text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import networkx as nx
import numpy as np
def project3d(points, direction):
"""
投影函数,将三维点集投影到二维
投影平面内的y方向为z轴投影(如果投影的法向量为z轴,则y方向为x轴投影)
:param points: 三维点集
:param direction: 投影平面的法向量(u,v,w),投影平面通过原点(0,0,0)
"""
d = direction / np.linalg.norm(direction)
y0 = np.array([1, 0, 0]) if np.array([0, 0, 1]).dot(d) == 1 else np.array([0, 0, 1])
y1 = y0 - np.dot(d, y0) * d
norm_y = y1 / np.linalg.norm(y1)
x0 = np.cross(norm_y, d)
norm_x = x0 / np.linalg.norm(x0)
pos = {}
for k in points:
p0 = np.array(points[k])
p1 = p0 - np.dot(d, p0) * d
pos[k] = (np.dot(norm_y, p1), np.dot(norm_x, p1))
return pos
class Graph:
"""
包装nx.Graph的图类
"""
def __init__(self, name, nx_graph=None):
self.name = name
self.info = {}
self.g = nx.Graph(nx_graph)
def __len__(self):
return len(self.nodes())
def __getitem__(self, node):
return self.g[node]
def copy(self):
return Graph(self.name, self.g)
def add_node(self, node, **attr):
self.g.add_node(node, **attr)
def add_edge(self, node1, node2, **attr):
self.g.add_edge(node1, node2, **attr)
def remove_node(self, node):
self.g.remove_node(node)
def nodes(self):
return self.g.nodes
def edges(self):
return self.g.edges
def degree(self, node=None):
if node is not None:
return self.g.degree[node]
return self.g.degree
def subgraph(self, nodes):
return Graph(self.name, self.g.subgraph(nodes))
def max_subgraph(self):
mc = max(nx.connected_components(self.g), key=len)
return Graph(self.name, self.g.subgraph(mc))
def is_connected(self):
return nx.is_connected(self.g)
def get_node_attributes(self, attr):
return nx.get_node_attributes(self.g, attr)
def get_edge_attributes(self, attr):
return nx.get_edge_attributes(self.g, attr)
def draw_graph(self, axes, highlight=None, direction=(0, 0, 1), rotation=None):
"""用matlotlib画二维投影图"""
axes.clear()
points = self.get_node_attributes('location')
if rotation is not None:
for k in points:
points[k] = np.dot(points[k], rotation)
pos = project3d(points, np.array(direction))
label = self.get_node_attributes('label')
edge_label = self.get_edge_attributes('dist')
nx.draw_networkx(self.g, pos, alpha=0.7, with_labels=False, edge_color='.4', ax=axes)
if highlight is not None:
nx.draw_networkx_nodes(self.g, pos=pos, nodelist=highlight, node_color='r', ax=axes)
nx.draw_networkx_labels(self.g, pos, labels=label, ax=axes)
nx.draw_networkx_edge_labels(self.g, pos, edge_labels=edge_label, ax=axes)
axes.axis('off')
def draw_3d_graph(self, axes, highlight=None):
"""用matlotlib画三维图"""
axes.clear()
points = self.get_node_attributes('location')
label = self.get_node_attributes('label')
if highlight is None:
highlight = []
for key, value in points.items():
c = 'blue' # 普通原子为蓝色
if key in highlight:
c = 'red' # 高亮原子用红色表示
xi, yi, zi = value
axes.scatter(xi, yi, zi, label[key], c=c, alpha=0.9)
for i, j in enumerate(self.edges()):
# 用两端原子的坐标连线,绘制化学键
x = np.array((points[j[0]][0], points[j[1]][0]))
y = np.array((points[j[0]][1], points[j[1]][1]))
z = np.array((points[j[0]][2], points[j[1]][2]))
axes.plot(x, y, z, c='black', alpha=0.9)
def number_of_edges(self, u, v):
return self.g.number_of_edges(u, v)
|
{"hexsha": "06563550a45959f7b7c60ff0e3d503579d489536", "size": 3759, "ext": "py", "lang": "Python", "max_stars_repo_path": "crystalsearch/graph/graph.py", "max_stars_repo_name": "jingshenSN2/CrystalTool", "max_stars_repo_head_hexsha": "18f07963ff5f2a54ac2c93e2fa59fada51346232", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "crystalsearch/graph/graph.py", "max_issues_repo_name": "jingshenSN2/CrystalTool", "max_issues_repo_head_hexsha": "18f07963ff5f2a54ac2c93e2fa59fada51346232", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "crystalsearch/graph/graph.py", "max_forks_repo_name": "jingshenSN2/CrystalTool", "max_forks_repo_head_hexsha": "18f07963ff5f2a54ac2c93e2fa59fada51346232", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.325, "max_line_length": 96, "alphanum_fraction": 0.5833998404, "include": true, "reason": "import numpy,import networkx", "num_tokens": 1105}
|
"""
Defines a set of regressions tests that should be run succesfully after all
major modification to the code.
"""
import sys
import math
import numpy as np
import unittest
import time
from describe.descriptors import MBTR
from describe.descriptors import CoulombMatrix
from describe.descriptors import SortedCoulombMatrix
from describe.descriptors import SineMatrix
from describe.descriptors import SortedSineMatrix
from describe.core import System
from describe.data.element_data import numbers_to_symbols
import matplotlib.pyplot as mpl
from ase import Atoms
from ase.lattice.cubic import SimpleCubicFactory
H2O = System(
cell=[[5.0, 0.0, 0.0], [0.0, 5.0, 0.0], [0.0, 0.0, 5.0]],
positions=[[0, 0, 0], [0.95, 0, 0], [0.95*(1+math.cos(76/180*math.pi)), 0.95*math.sin(76/180*math.pi), 0.0]],
symbols=["H", "O", "H"],
)
H2O.set_initial_charges(H2O.numbers)
H2O_2 = System(
cell=[[5.0, 0.0, 0], [0, 5, 0], [0, 0, 5.0]],
positions=[[0.95, 0, 0], [0, 0, 0], [0.95*(1+math.cos(76/180*math.pi)), 0.95*math.sin(76/180*math.pi), 0.0]],
symbols=["O", "H", "H"],
)
NaCl_prim = System(
cell=[
[
0.0,
2.8201,
2.8201
],
[
2.8201,
0.0,
2.8201
],
[
2.8201,
2.8201,
0.0
]
],
scaled_positions=[[0.5, 0.5, 0.5], [0, 0, 0]],
symbols=["Na", "Cl"],
)
NaCl_conv = System(
cell=[
[
5.6402,
0.0,
0.0
],
[
0.0,
5.6402,
0.0
],
[
0.0,
0.0,
5.6402
]
],
scaled_positions=[
[
0.0,
0.5,
0.0
],
[
0.0,
0.5,
0.5
],
[
0.0,
0.0,
0.5
],
[
0.0,
0.0,
0.0
],
[
0.5,
0.5,
0.5
],
[
0.5,
0.5,
0.0
],
[
0.5,
0.0,
0.0
],
[
0.5,
0.0,
0.5
]],
symbols=["Na", "Cl", "Na", "Cl", "Na", "Cl", "Na", "Cl"],
)
class GeometryTests(unittest.TestCase):
def test_distances(self):
"""Tests that the periodicity is taken into account when calculating
distances.
"""
system = System(
scaled_positions=[[0.0, 0.0, 0.0], [0.5, 0.5, 0.5]],
symbols=["H", "H"],
cell=[
[5, 5, 0],
[0, -5, -5],
[5, 0, 5]
],
)
disp = system.get_displacement_tensor()
# For a non-periodic system, periodicity is not taken into account even
# if cell is defined.
assumed = np.array([
[[0.0, 0.0, 0.0], [-5, 0, 0]],
[[5, 0, 0], [0.0, 0.0, 0.0]]])
self.assertTrue(np.allclose(assumed, disp))
# For a periodic system, the nearest copy should be considered when
# comparing distances to neighbors or to self
system.set_pbc([True, True, True])
disp = system.get_displacement_tensor()
assumed = np.array([
[[5.0, 5.0, 0.0], [-5, 0, 0]],
[[5, 0, 0], [5.0, 5.0, 0.0]]])
self.assertTrue(np.allclose(assumed, disp))
def test_transformations(self):
"""Test that coordinates are correctly transformed from scaled to
cartesian and back again.
"""
system = System(
scaled_positions=[[0.0, 0.0, 0.0], [0.5, 0.5, 0.5]],
symbols=["H", "H"],
cell=[
[5, 5, 0],
[0, -5, -5],
[5, 0, 5]
],
)
orig = np.array([[2, 1.45, -4.8]])
scal = system.to_scaled(orig)
cart = system.to_cartesian(scal)
self.assertTrue(np.allclose(orig, cart))
class GaussianTests(unittest.TestCase):
def test_cdf(self):
"""Test that the implementation of the gaussian value through the
cumulative distribution function works as expected.
"""
from scipy.stats import norm
from scipy.special import erf
start = -5
stop = 5
n_points = 9
centers = np.array([0])
sigma = 1
area_cum = []
area_pdf = []
# Calculate errors for dfferent number of points
for n_points in range(2, 10):
axis = np.linspace(start, stop, n_points)
# Calculate with cumulative function
dx = (stop - start)/(n_points-1)
x = np.linspace(start-dx/2, stop+dx/2, n_points+1)
pos = x[np.newaxis, :] - centers[:, np.newaxis]
y = 1/2*(1 + erf(pos/(sigma*np.sqrt(2))))
f = np.sum(y, axis=0)
f_rolled = np.roll(f, -1)
pdf_cum = (f_rolled - f)[0:-1]/dx
# Calculate with probability function
dist2 = axis[np.newaxis, :] - centers[:, np.newaxis]
dist2 *= dist2
f = np.sum(np.exp(-dist2/(2*sigma**2)), axis=0)
f *= 1/math.sqrt(2*sigma**2*math.pi)
pdf_pdf = f
true_axis = np.linspace(start, stop, 200)
pdf_true = norm.pdf(true_axis, centers[0], sigma) # + norm.pdf(true_axis, centers[1], sigma)
# Calculate differences
sum_cum = np.sum(0.5*dx*(pdf_cum[:-1]+pdf_cum[1:]))
sum_pdf = np.sum(0.5*dx*(pdf_pdf[:-1]+pdf_pdf[1:]))
area_cum.append(sum_cum)
area_pdf.append(sum_pdf)
# mpl.plot(axis, pdf_pdf, linestyle=":", linewidth=3, color="r")
# mpl.plot(axis, pdf_cum, linewidth=1, color="g")
# mpl.plot(true_axis, pdf_true, linestyle="--", color="b")
# mpl.show()
mpl.plot(area_cum, linestyle=":", linewidth=3, color="r")
mpl.plot(area_pdf, linewidth=1, color="g")
# mpl.plot(true_axis, pdf_true, linestyle="--", color="b")
mpl.show()
class ASETests(unittest.TestCase):
def test_atoms_to_system(self):
"""Tests that an ASE Atoms is succesfully converted to a System object.
"""
class NaClFactory(SimpleCubicFactory):
"A factory for creating NaCl (B1, Rocksalt) lattices."
bravais_basis = [[0, 0, 0], [0, 0, 0.5], [0, 0.5, 0], [0, 0.5, 0.5],
[0.5, 0, 0], [0.5, 0, 0.5], [0.5, 0.5, 0],
[0.5, 0.5, 0.5]]
element_basis = (0, 1, 1, 0, 1, 0, 0, 1)
nacl = NaClFactory()(symbol=["Na", "Cl"], latticeconstant=5.6402)
system = System.from_atoms(nacl)
self.assertTrue(np.array_equal(nacl.get_positions(), system.get_positions()))
self.assertTrue(np.array_equal(nacl.get_initial_charges(), system.get_initial_charges()))
self.assertTrue(np.array_equal(nacl.get_atomic_numbers(), system.get_atomic_numbers()))
self.assertTrue(np.array_equal(nacl.get_chemical_symbols(), system.get_chemical_symbols()))
self.assertTrue(np.array_equal(nacl.get_cell(), system.get_cell()))
self.assertTrue(np.array_equal(nacl.get_pbc(), system.get_pbc()))
self.assertTrue(np.array_equal(nacl.get_scaled_positions(), system.get_scaled_positions()))
class CoulombMatrixTests(unittest.TestCase):
def test_matrix(self):
desc = CoulombMatrix(n_atoms_max=5, flatten=False)
cm = desc.create(H2O)
# Test against assumed values
q = H2O.get_initial_charges()
p = H2O.get_positions()
norm = np.linalg.norm
assumed = np.array(
[
[0.5*q[0]**2.4, q[0]*q[1]/(norm(p[0]-p[1])), q[0]*q[2]/(norm(p[0]-p[2]))],
[q[1]*q[0]/(norm(p[1]-p[0])), 0.5*q[1]**2.4, q[1]*q[2]/(norm(p[1]-p[2]))],
[q[2]*q[0]/(norm(p[2]-p[0])), q[2]*q[1]/(norm(p[2]-p[1])), 0.5*q[2]**2.4],
]
)
zeros = np.zeros((5, 5))
zeros[:3, :3] = assumed
assumed = zeros
self.assertTrue(np.array_equal(cm, assumed))
class SortedCoulombMatrixTests(unittest.TestCase):
def test_matrix(self):
desc = SortedCoulombMatrix(n_atoms_max=5, flatten=False)
cm = desc.create(H2O)
lens = np.linalg.norm(cm, axis=0)
old_len = lens[0]
for length in lens[1:]:
self.assertTrue(length <= old_len)
old_len = length
class SineMatrixTests(unittest.TestCase):
def test_matrix(self):
# Create simple toy system
test_sys = System(
cell=[[1, 0.0, 0.0], [1, 1, 0.0], [0.0, 0.0, 1.0]],
positions=[[0, 0, 0], [2, 1, 1]],
symbols=["H", "H"],
)
test_sys.charges = np.array([1, 1])
desc = SineMatrix(n_atoms_max=5, flatten=False)
# Create a graph of the interaction in a 2D slice
size = 100
x_min = 0.0
x_max = 3
y_min = 0.0
y_max = 3
x_axis = np.linspace(x_min, x_max, size)
y_axis = np.linspace(y_min, y_max, size)
interaction = np.empty((size, size))
for i, x in enumerate(x_axis):
for j, y in enumerate(y_axis):
temp_sys = System(
cell=[[1, 0.0, 0.0], [1, 1, 0.0], [0.0, 0.0, 1.0]],
positions=[[0, 0, 0], [x, y, 0]],
symbols=["H", "H"],
)
temp_sys.set_initial_charges(np.array([1, 1]))
value = desc.create(temp_sys)
interaction[i, j] = value[0, 1]
mpl.imshow(interaction, cmap='RdBu', vmin=0, vmax=5,
extent=[x_min, x_max, y_min, y_max],
interpolation='nearest', origin='lower')
mpl.colorbar()
mpl.show()
# Test against assumed values
q = test_sys.get_initial_charges()
p = test_sys.get_positions()
cell = test_sys.get_cell()
cell_inv = test_sys.get_reciprocal_cell()
sin = np.sin
pi = np.pi
dot = np.dot
norm = np.linalg.norm
assumed = np.array(
[
[0.5*q[0]**2.4, q[0]*q[1]/(norm(dot(cell, sin(pi*dot(p[0]-p[1], cell_inv))**2)))],
[q[0]*q[1]/(norm(dot(cell, sin(pi*dot(p[1]-p[0], cell_inv))**2))), 0.5*q[1]**2.4],
]
)
zeros = np.zeros((5, 5))
zeros[:2, :2] = assumed
assumed = zeros
sm = desc.create(test_sys)
self.assertTrue(np.array_equal(sm, assumed))
class SortedSineMatrixTests(unittest.TestCase):
def test_matrix(self):
desc = SortedSineMatrix(n_atoms_max=5, flatten=False)
cm = desc.create(H2O)
lens = np.linalg.norm(cm, axis=0)
old_len = lens[0]
for length in lens[1:]:
self.assertTrue(length <= old_len)
old_len = length
class MBTRTests(unittest.TestCase):
def test_invalid_parameters(self):
"""Test that invalid parameters raise the correct exception.
"""
with self.assertRaises(ValueError):
MBTR(
atomic_numbers=[1],
k=0,
periodic=False,
)
with self.assertRaises(ValueError):
MBTR(
atomic_numbers=[1],
k=[-1, 2],
periodic=False,
)
with self.assertRaises(ValueError):
MBTR(
atomic_numbers=[1],
k=1,
periodic=False,
)
with self.assertRaises(ValueError):
MBTR(
atomic_numbers=[1],
k={1, 4},
periodic=False,
)
def test_flattening(self):
"""Test that the flattened version equals the unflattened one.
"""
def test_number_of_features(self):
# K = 1
n = 100
atomic_numbers = [1, 8]
n_elem = len(atomic_numbers)
mbtr = MBTR(
atomic_numbers=atomic_numbers,
k=[1],
grid={
"k1": {
"min": 1,
"max": 8,
"sigma": 0.1,
"n": 100,
}
},
periodic=False,
flatten=True
)
n_features = mbtr.get_number_of_features()
expected = n_elem*n
self.assertEqual(n_features, expected)
# K = 2
mbtr = MBTR(
atomic_numbers=atomic_numbers,
k={1, 2},
grid={
"k1": {
"min": 1,
"max": 8,
"sigma": 0.1,
"n": 100,
},
"k2": {
"min": 0,
"max": 1/0.7,
"sigma": 0.1,
"n": n,
}
},
periodic=False,
flatten=True
)
n_features = mbtr.get_number_of_features()
expected = n_elem*n + 1/2*(n_elem)*(n_elem+1)*n
self.assertEqual(n_features, expected)
# K = 3
mbtr = MBTR(
atomic_numbers=atomic_numbers,
k={1, 2, 3},
grid={
"k1": {
"min": 1,
"max": 8,
"sigma": 0.1,
"n": 100,
},
"k2": {
"min": 0,
"max": 1/0.7,
"sigma": 0.1,
"n": n,
},
"k3": {
"min": -1,
"max": 1,
"sigma": 0.1,
"n": n,
}
},
periodic=False,
flatten=True
)
n_features = mbtr.get_number_of_features()
expected = n_elem*n + 1/2*(n_elem)*(n_elem+1)*n + n_elem*1/2*(n_elem)*(n_elem+1)*n
self.assertEqual(n_features, expected)
def test_counts(self):
mbtr = MBTR([1, 8], k=[1], periodic=False)
mbtr.create(H2O)
counts = mbtr._counts
# Test against the assumed values
self.assertTrue(np.array_equal(counts, np.array([2, 1])))
# Test against system with different indexing
mbtr = MBTR([1, 8], k=[1], periodic=False)
mbtr.create(H2O_2)
counts2 = mbtr._counts
self.assertTrue(np.array_equal(counts, counts2))
def test_periodic(self):
test_sys = System(
cell=[[5.0, 0.0, 0.0], [0, 5.0, 0.0], [0.0, 0.0, 5.0]],
positions=[[0, 0, 0]],
symbols=["H"],
)
mbtr = MBTR([1], k=[2], weighting="exponential", periodic=True)
desc = mbtr.create(test_sys)
def test_inverse_distances(self):
mbtr = MBTR([1, 8], k=[2], periodic=False)
mbtr.create(H2O)
inv_dist = mbtr._inverse_distances
# Test against the assumed values
pos = H2O.get_positions()
assumed = {
0: {
0: [1/np.linalg.norm(pos[0] - pos[2])],
1: 2*[1/np.linalg.norm(pos[0] - pos[1])]
}
}
self.assertEqual(assumed, inv_dist)
# Test against system with different indexing
mbtr = MBTR([1, 8], k=[2], periodic=False)
mbtr.create(H2O_2)
inv_dist_2 = mbtr._inverse_distances
self.assertEqual(inv_dist, inv_dist_2)
def test_cosines(self):
mbtr = MBTR([1, 8], k=[3], periodic=False)
mbtr.create(H2O)
angles = mbtr._angles
# Test against the assumed values.
assumed = {
0: {
1: {
0: 2*[math.cos(104/180*math.pi)]
},
0: {
1: 2*[math.cos(38/180*math.pi)]
},
}
}
for i in range(2):
for j in range(2):
for k in range(2):
try:
assumed_elem = assumed[i][j][k]
except KeyError:
assumed_elem = None
try:
true_elem = angles[i][j][k]
except KeyError:
true_elem = None
if assumed_elem is None:
self.assertIsNone(true_elem)
else:
self.assertEqual(len(assumed_elem), len(true_elem))
for i_elem, val_assumed in enumerate(assumed_elem):
val_true = true_elem[i_elem]
self.assertAlmostEqual(val_assumed, val_true, places=6)
# Test against system with different indexing
mbtr = MBTR([1, 8], k=[3], periodic=False)
mbtr.create(H2O_2)
angles2 = mbtr._angles
# print(angles)
# print(angles2)
self.assertEqual(angles, angles2)
def test_gaussian_distribution(self):
"""Check that the broadening follows gaussian distribution.
"""
std = 1
start = -3
stop = 11
n = 500
mbtr = MBTR(
[1, 8],
k=[1],
grid={
"k1": {
"min": start,
"max": stop,
"sigma": std,
"n": n
}
},
periodic=False,
flatten=False)
y = mbtr.create(H2O)
k1_axis = mbtr._axis_k1
# Find the location of the peaks
peak1_x = np.searchsorted(k1_axis, 1)
peak1_y = y[0][0, peak1_x]
peak2_x = np.searchsorted(k1_axis, 8)
peak2_y = y[0][1, peak2_x]
# Check against the analytical value
gaussian = lambda x, mean, sigma: np.exp(-(x-mean)**2/(2*sigma**2))
self.assertTrue(np.allclose(peak1_y, 2*gaussian(1, 1, std), rtol=0, atol=0.001))
self.assertTrue(np.allclose(peak2_y, gaussian(8, 8, std), rtol=0, atol=0.001))
# Check the integral
pdf = y[0][0, :]
# mpl.plot(pdf)
# mpl.show()
dx = (stop-start)/(n-1)
sum_cum = np.sum(0.5*dx*(pdf[:-1]+pdf[1:]))
exp = 2/(1/math.sqrt(2*math.pi*std**2))
self.assertTrue(np.allclose(sum_cum, exp, rtol=0, atol=0.001))
def test_k1(self):
mbtr = MBTR([1, 8], k=[1], periodic=False, flatten=False)
desc = mbtr.create(H2O)
x1 = mbtr._axis_k1
imap = mbtr.index_to_atomic_number
smap = {}
for index, number in imap.items():
smap[index] = numbers_to_symbols(number)
# Visually check the contents
# mpl.plot(y)
# mpl.ylim(0, y.max())
# mpl.show()
# mpl.plot(x1, desc[0][0, :], label="{}".format(smap[0]))
# mpl.plot(x1, desc[0][1, :], linestyle=":", linewidth=3, label="{}".format(smap[1]))
# mpl.ylabel("$\phi$ (arbitrary units)", size=20)
# mpl.xlabel("Inverse distance (1/angstrom)", size=20)
# mpl.legend()
# mpl.show()
def test_k2(self):
mbtr = MBTR([1, 8], k=[2], periodic=False, flatten=False)
desc = mbtr.create(H2O)
x2 = mbtr._axis_k2
imap = mbtr.index_to_atomic_number
smap = {}
for index, number in imap.items():
smap[index] = numbers_to_symbols(number)
# Visually check the contents
# mpl.plot(x2, desc[1][0, 1, :], label="{}-{}".format(smap[0], smap[1]))
# mpl.plot(x2, desc[1][1, 0, :], linestyle=":", linewidth=3, label="{}-{}".format(smap[1], smap[0]))
# mpl.plot(x2, desc[1][1, 1, :], label="{}-{}".format(smap[1], smap[1]))
# mpl.plot(x2, desc[1][0, 0, :], label="{}-{}".format(smap[0], smap[0]))
# mpl.ylabel("$\phi$ (arbitrary units)", size=20)
# mpl.xlabel("Inverse distance (1/angstrom)", size=20)
# mpl.legend()
# mpl.show()
# mbtr = MBTR([1, 8], k=2, periodic=False, flatten=True)
# desc = mbtr.create(H2O)
# y = desc.todense().T
# mpl.plot(y)
# mpl.show()
# def test_k3(self):
# mbtr = MBTR([1, 8], k=3, periodic=False)
# desc = mbtr.create(H2O)
# y = desc.todense().T
# # Visually check the contents
# mpl.plot(y)
# mpl.show()
# def test_counts_duplicate(self):
# mbtr = MBTR([1, 8], k=1, periodic=False)
# mbtr.create(H2O)
# Check that there are correct number of counts. The counts are
# calculated only from the original cell that is assumed to be
# primitive
# self.assertTrue(np.array_equal(mbtr._counts, [2, 1]))
# def test_distances_duplicate(self):
# mbtr = MBTR([1, 8], k=2, periodic=False)
# mbtr.create(H2O)
# # Check that there are correct number of inverse distances
# n_atoms = len(H2O)
# n_ext_atoms = (1+2*1)**3*n_atoms
# n_inv_dist_analytic = sum([n_ext_atoms-i for i in range(1, n_atoms+1)])
# inv_dist = mbtr._inverse_distances
# n_inv_dist = 0
# for dict1 in inv_dist.values():
# for val in dict1.values():
# n_inv_dist += len(val)
# self.assertEqual(n_inv_dist_analytic, n_inv_dist)
# def test_angles_duplicate(self):
# mbtr = MBTR([1, 8], n_atoms_max=2, k=3, periodic=False)
# mbtr.create(H2O)
# Check that there are correct number of angles
# n_atoms = len(H2O)
# n_ext_atoms = (1+2*n_copies)**3*n_atoms
# n_angles_analytic = ? # Did not have the energy to figure out the correct analytic formula... :)
# angles = mbtr._angles
# n_angles = 0
# for dict1 in angles.values():
# for dict2 in dict1.values():
# for val in dict2.values():
# n_angles += len(val)
# self.assertEqual(n_angles_analytic, n_angles)
# def test_haoyan_nacl(self):
# # Test periodic NaCl crystal
# cutoff_x = 12
# cutoff_y = 0.01
# rate = -math.log(cutoff_y)/cutoff_x
# # rate = 0.5
# mbtr = MBTR(
# [11, 17],
# n_atoms_max=8,
# k=3,
# periodic=True,
# # grid={
# # "k3": [0, np.pi, np.pi/200, 0.07]
# # },
# weighting={
# "k2": {
# "function": lambda x: np.exp(-rate*x),
# "threshold": 1e-2
# },
# "k3": {
# "function": lambda x: np.exp(-rate*x),
# "threshold": 1e-2
# },
# },
# flatten=False)
# start = time.time()
# # desc = mbtr.create(NaCl_prim)
# desc = mbtr.create(NaCl_conv)
# end = time.time()
# print("DONE: {}".format(end-start))
# x = mbtr._axis_k3/np.pi*180
# imap = mbtr.index_to_atomic_number
# smap = {}
# for index, number in imap.items():
# smap[index] = numbers_to_symbols(number)
# mpl.rcParams['text.usetex'] = True
# mpl.rcParams['font.family'] = 'serif'
# mpl.rcParams['font.serif'] = ['cm']
# mpl.rcParams['xtick.labelsize'] = 18
# mpl.rcParams['ytick.labelsize'] = 18
# mpl.rcParams['legend.fontsize'] = 18
# print(desc[0].shape)
# print(desc[1].shape)
# print(desc[2].shape)
# mpl.plot(x, desc[2][0, 0, 0, :], label="NaNaNa, ClClCl".format(smap[0], smap[0], smap[0]), color="blue")
# mpl.plot(x, desc[2][0, 0, 1, :], label="NaNaCl, NaClCl".format(smap[0], smap[0], smap[1]), color="orange")
# mpl.plot(x, desc[2][1, 0, 1, :], label="NaClNa, ClNaCl".format(smap[1], smap[0], smap[1]), color="green")
# mpl.ylabel("$\phi$ (arbitrary units)", size=25)
# mpl.xlabel("angle (degree)", size=25)
# mpl.title("The exponentially weighted angle distribution in NaCl crystal.", size=30)
# mpl.legend()
# mpl.show()
if __name__ == '__main__':
suites = []
suites.append(unittest.TestLoader().loadTestsFromTestCase(ASETests))
suites.append(unittest.TestLoader().loadTestsFromTestCase(GeometryTests))
suites.append(unittest.TestLoader().loadTestsFromTestCase(GaussianTests))
suites.append(unittest.TestLoader().loadTestsFromTestCase(MBTRTests))
suites.append(unittest.TestLoader().loadTestsFromTestCase(CoulombMatrixTests))
suites.append(unittest.TestLoader().loadTestsFromTestCase(SortedCoulombMatrixTests))
suites.append(unittest.TestLoader().loadTestsFromTestCase(SineMatrixTests))
suites.append(unittest.TestLoader().loadTestsFromTestCase(SortedSineMatrixTests))
alltests = unittest.TestSuite(suites)
result = unittest.TextTestRunner(verbosity=0).run(alltests)
# We need to return a non-zero exit code for the gitlab CI to detect errors
sys.exit(not result.wasSuccessful())
|
{"hexsha": "c7f3f5be7d0f156875b0a4d997803dddbb4788d6", "size": 25203, "ext": "py", "lang": "Python", "max_stars_repo_path": "describe/describe/regtests/regtests.py", "max_stars_repo_name": "MadsAW/machine-learning-on-materials", "max_stars_repo_head_hexsha": "6101c7e3d12be54b12391c78442294198a39cc9b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-10-10T09:32:34.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-28T08:42:31.000Z", "max_issues_repo_path": "describe/describe/regtests/regtests.py", "max_issues_repo_name": "MadsAW/machine-learning-on-materials", "max_issues_repo_head_hexsha": "6101c7e3d12be54b12391c78442294198a39cc9b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "describe/describe/regtests/regtests.py", "max_forks_repo_name": "MadsAW/machine-learning-on-materials", "max_forks_repo_head_hexsha": "6101c7e3d12be54b12391c78442294198a39cc9b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6620603015, "max_line_length": 116, "alphanum_fraction": 0.494385589, "include": true, "reason": "import numpy,from scipy", "num_tokens": 6901}
|
/*
* Copyright 2013 Matthew Harvey
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef GUARD_sql_statement_impl_hpp_8187575260264601
#define GUARD_sql_statement_impl_hpp_8187575260264601
// Hide from Doxygen
/// @cond
/** @file
*
* @brief Header file pertaining to SQLStatementImpl class.
*/
#include "sqlite3.h" // Compiling directly into build
#include "../sqloxx_exceptions.hpp"
#include <boost/filesystem/path.hpp>
#include <jewel/assert.hpp>
#include <jewel/checked_arithmetic.hpp>
#include <climits>
#include <string>
#include <type_traits>
#include <vector>
namespace sqloxx
{
namespace detail
{
// Forward declaration
class SQLiteDBConn;
/**
* Wrapper class for sqlite_stmt*. This class is should not be
* used except internally by the Sqloxx library. SQLStatementImpl instances
* are themselves encapsulated by SQLStatement instances.
*/
class SQLStatementImpl
{
public:
/**
* Creates an object encapsulating a SQL statement.
*
* @param str is the text of a single SQL statement. It can be terminated
* with any mixture of semicolons and/or spaces (but not other forms
* of whitespace).
*
* @throws InvalidConnection if the database connection passed to
* \c dbconn is invalid.
*
* @throws SQLiteException or an exception derived therefrom, if
* the database connection is valid, but the statement could not
* be properly prepared by SQLite.
*
* @throws TooManyStatements if the first purported SQL statement
* in str is syntactically acceptable to SQLite, <em>but</em> there
* are characters in str after this statement, other than ';' and ' '.
* This includes the case where there are further syntactically
* acceptable SQL statements after the first one - as each
* SQLStatementImpl can encapsulate only one statement.
*/
SQLStatementImpl(SQLiteDBConn& p_sqlite_dbconn, std::string const& str);
SQLStatementImpl(SQLStatementImpl const&) = delete;
SQLStatementImpl(SQLStatementImpl&&) = delete;
SQLStatementImpl& operator=(SQLStatementImpl const&) = delete;
SQLStatementImpl& operator=(SQLStatementImpl&&) = delete;
~SQLStatementImpl();
/**
* Wrapper around SQLite bind functions.
*
* These throw \c SQLiteException, or an exception derived therefrom,
* if SQLite could not properly bind the statement.
*
* Currently the following types for T are supported:\n
* int\n
* long\n
* long long\n
* double\n
* std::string\n
* char const*
*
* <b>NOTE</b>
* If x is of an integral type that is wider than 64 bits, then any
* attempt instantiate this function with x will result in compilation
* failure. This is done to rule out any overflow within SQLite.
*/
template <typename T>
void bind(std::string const& parameter_name, T const& x);
/**
* Where a SQLStatementImpl has a result set available,
* this function (template) can be used to extract the value at
* the \c indexth column of the current row (where \c index starts
* counting at 0).
*
* Currently the following types for T are supported:\n
* long\n
* long long\n
* int\n
* double\n
* std::string\n
*
* @param index is the column number (starting at 0) from which to
* read the value.
*
* @throws ResultIndexOutOfRange if \c index is out of range.
*
* @throws ValueTypeException if the requested column contains a type that
* is incompatible with T.
*/
template <typename T>
T extract(int index);
/**
* Wraps sqlite3_step
* Returns true as only long as there are further steps to go (i.e. result
* rows to examine).
*
* On stepping beyond the last result row, step() will return false.
* The statement will then be automatically reset (see reset()).
*
* @throws SQLiteException or some exception deriving therefrom, if an
* error occurs. This function should almost never throw, but it is
* possible something will fail as the statement is being executed, in
* which the resulting SQLite error condition will trigger the
* corresponding exception class.
*/
bool step();
/**
* Wraps sqlite3_step. Similar to \c step except that it throws an
* exception if a result row still remains after calling. That is,
* it is equivalent to calling:\n
* \c if (step()) throw UnexpectedResultRow("...");\n
*
* @throws UnexpectedResultRow if a result set is returned.
*
* @throws SQLiteException or an exception derived therefrom if there
* is any other error in executing the statement.
*/
void step_final();
/**
* Resets the statement, freeing bound parameters ready for
* subsequent re-binding and re-execution.
*
* Does not throw.
*/
void reset();
/**
* Clears the parameter bindings from the statement, setting all
* to NULL. This is a wrapper for sqlite3_clear_bindings.
* Does not throw.
*/
void clear_bindings();
/**
* @returns true if and only if the statement is currently
* in use by way of a SQLStatement. Does not throw.
*/
bool is_locked() const;
/**
* Locks the statement, indicating that is currently in
* use. Does not throw.
*/
void lock();
/**
* Unlocks the statement, indicating that it is now available
* for use. Does not throw.
*/
void unlock();
/**
* Mirrors sqloxx::detail::SQLiteDBConn::throw_on_failure, and
* throws the same exceptions under the same circumstances.
*/
void throw_on_failure(int errcode);
private:
/**
* @parameter_name is the name of a column in the result set.
*
* @throws NoMatchingColumnException if \c parameter_name does not
* name a column in the result set.
*/
int parameter_index(std::string const& column_name) const;
/**
* Checks whether a column is available for extraction at
* index \c index, of type \c value_type, and throws an
* exception if not.
*
* @param index Position of column (starts from zero) in result
* row.
*
* @param value_type Should be a SQLite value type code, i.e. one of:\n
* SQLITE_INTEGER, SQLITE_FLOAT, SQLITE_TEXT, SQLITE_BLOB, SQLITE_NULL.
*
* @throws NoResultRowException if there are no results available for
* extraction.
*
* @throws ResultIndexOutOfRange if \c index is negative or is otherwise
* out of range.
*
* @throws ValueTypeException if the value at position \c index is not of
* value type \c value_type.
*/
void check_column(int index, int value_type);
template <typename T>
void do_bind(std::string const& parameter_name, T x);
sqlite3_stmt* m_statement;
SQLiteDBConn& m_sqlite_dbconn;
bool m_is_locked;
};
// FUNCTION TEMPLATE DEFINITIONS AND INLINE FUNCTIONS
template <typename T>
inline
void
SQLStatementImpl::bind(std::string const& parameter_name, T const& x)
{
try
{
do_bind(parameter_name, x);
}
catch (SQLiteException&)
{
reset();
clear_bindings();
throw;
}
return;
}
template <>
inline
int
SQLStatementImpl::extract<int>(int index)
{
check_column(index, SQLITE_INTEGER);
return sqlite3_column_int(m_statement, index);
}
template <>
inline
long
SQLStatementImpl::extract<long>(int index)
{
check_column(index, SQLITE_INTEGER);
return sqlite3_column_int64(m_statement, index);
}
template <>
inline
long long
SQLStatementImpl::extract<long long>(int index)
{
check_column(index, SQLITE_INTEGER);
return sqlite3_column_int64(m_statement, index);
}
template <>
inline
double
SQLStatementImpl::extract<double>(int index)
{
check_column(index, SQLITE_FLOAT);
return sqlite3_column_double(m_statement, index);
}
template <>
inline
std::string
SQLStatementImpl::extract<std::string>(int index)
{
check_column(index, SQLITE_TEXT);
const unsigned char* begin = sqlite3_column_text(m_statement, index);
const unsigned char* end = begin;
while (*end != '\0') ++end;
return std::string(begin, end);
}
inline
void
SQLStatementImpl::reset()
{
if (m_statement)
{
sqlite3_reset(m_statement);
}
return;
}
inline
void
SQLStatementImpl::clear_bindings()
{
if (m_statement)
{
sqlite3_clear_bindings(m_statement);
}
return;
}
inline
bool
SQLStatementImpl::is_locked() const
{
return m_is_locked;
}
inline
void
SQLStatementImpl::lock()
{
m_is_locked = true;
return;
}
inline
void
SQLStatementImpl::unlock()
{
m_is_locked = false;
return;
}
#if INT_MAX <= 9223372036854775807
template <>
inline
void
SQLStatementImpl::do_bind(std::string const& parameter_name, int x)
{
# if INT_MAX <= 2147483647
JEWEL_ASSERT (CHAR_BIT * sizeof(x) <= 32);
throw_on_failure
( sqlite3_bind_int
( m_statement,
parameter_index(parameter_name),
x
)
);
# else
JEWEL_ASSERT (CHAR_BIT * sizeof(x) <= 64);
throw_on_failure
( sqlite3_bind_int64
( m_statement,
parameter_index(parameter_name),
x
)
);
# endif
return;
}
#endif
#if LONG_MAX <= 9223372036854775807
template <>
inline
void
SQLStatementImpl::do_bind(std::string const& parameter_name, long x)
{
# if LONG_MAX <= 2147483647
JEWEL_ASSERT (CHAR_BIT * sizeof(x) <= 32);
throw_on_failure
( sqlite3_bind_int
( m_statement,
parameter_index(parameter_name),
x
)
);
# else
JEWEL_ASSERT (CHAR_BIT * sizeof(x) <= 64);
throw_on_failure
( sqlite3_bind_int64
( m_statement,
parameter_index(parameter_name),
x
)
);
# endif
return;
}
#endif
#if LLONG_MAX <= 9223372036854775807
template <>
inline
void
SQLStatementImpl::do_bind
( std::string const& parameter_name,
long long x
)
{
// long long is guaranteed to be at least 8 bytes. But
// if it's greater than 64 bits, this causes a danger
// of overflow of SQLite's 64-bit integer type column, in
// which we will want to store values of long long type.
// In this case, this enable_if should have ensured that
// compilation failed; to this assertion should always hold
// at runtime.
JEWEL_ASSERT (CHAR_BIT * sizeof(x) <= 64);
throw_on_failure
( sqlite3_bind_int64
( m_statement,
parameter_index(parameter_name),
x
)
);
return;
}
# endif
template <>
inline
void
SQLStatementImpl::do_bind(std::string const& parameter_name, double x)
{
throw_on_failure
( sqlite3_bind_double(m_statement, parameter_index(parameter_name), x)
);
return;
}
template <>
inline
void
SQLStatementImpl::do_bind(std::string const& parameter_name, char const* x)
{
throw_on_failure
( sqlite3_bind_text
( m_statement,
parameter_index(parameter_name),
x,
-1,
SQLITE_TRANSIENT
)
);
}
} // namespace detail
} // namespace sqloxx
/// @endcond
// End hiding from Doxygen
#endif // GUARD_sql_statement_impl.hpp
|
{"hexsha": "5bb4b8ecc48077c9ee07539d79d7ae9a20f07983", "size": 12345, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/detail/sql_statement_impl.hpp", "max_stars_repo_name": "matt-harvey/sqloxx", "max_stars_repo_head_hexsha": "4bc74b8992ebf735b96d512ee879b8035659fe60", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/detail/sql_statement_impl.hpp", "max_issues_repo_name": "matt-harvey/sqloxx", "max_issues_repo_head_hexsha": "4bc74b8992ebf735b96d512ee879b8035659fe60", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/detail/sql_statement_impl.hpp", "max_forks_repo_name": "matt-harvey/sqloxx", "max_forks_repo_head_hexsha": "4bc74b8992ebf735b96d512ee879b8035659fe60", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6652806653, "max_line_length": 79, "alphanum_fraction": 0.6385581207, "num_tokens": 2893}
|
import unittest
import numpy as np
import spladtool.spladtool_forward as stf
class TestBasic(unittest.TestCase):
def test_add(self):
x = np.array([[1.0], [2.0], [3.0]])
z = x + 4
sf_x = stf.tensor([[1.0], [2.0], [3.0]])
sf_z = sf_x + 4
print('x : ', sf_x)
print('z : ', sf_z)
print('z.grad: ', sf_z.grad)
self.assertTrue((sf_x.data + 4 == z).all())
self.assertTrue((4 + sf_x.data == 4 + x).all())
self.assertTrue((sf_z.grad == np.array([[1.],[1.],[1.]])).all())
def test_sub(self):
x = np.array([[1.0], [2.0], [3.0]])
z = x - 4
sf_x = stf.tensor([[1.0], [2.0], [3.0]])
sf_z = sf_x - 4
print('x : ', sf_x)
print('z : ', sf_z)
print('z.grad: ', sf_z.grad)
self.assertTrue((sf_x.data - 4 == z).all())
self.assertTrue((4 - sf_x.data == 4 - x).all())
self.assertTrue((sf_z.grad == np.array([[1.],[1.],[1.]])).all())
def test_mult(self):
x = np.array([[1.0], [2.0], [3.0]])
z = 3 * x
sf_x = stf.tensor([[1.0], [2.0], [3.0]])
sf_z = 3 * sf_x
print('x : ', sf_x)
print('z : ', sf_z)
print('z.grad: ', sf_z.grad)
self.assertTrue((sf_z.data == z).all())
self.assertTrue((sf_x.data * 3 == x * 3).all())
self.assertTrue((sf_z.grad == np.array([[3.],[3.],[3.]])).all())
def test_div(self):
x = np.array([[1.0], [2.0], [3.0]])
z = x / 4
sf_x = stf.tensor([[1.0], [2.0], [3.0]])
sf_z = sf_x / 4
print('x : ', sf_x)
print('z : ', sf_z)
print('z.grad: ', sf_z.grad)
self.assertTrue((sf_z.data == z).all())
self.assertTrue((4 / sf_x.data == 4 / x).all())
self.assertTrue((sf_z.grad == np.array([[0.25],[0.25],[0.25]])).all())
def test_pow_consf(self):
x = np.array([[1.0], [2.0], [3.0]])
z = x ** 3
sf_x = stf.tensor(x)
sf_z = sf_x ** 3
print('x : ', sf_x)
print('z : ', sf_z)
print('z.grad: ', sf_z.grad)
self.assertTrue((sf_z.data == z).all())
self.assertTrue((sf_z.grad == (3 * x ** 2)).all())
def test_neg(self):
x = np.array([[1.0], [2.0], [3.0]])
z = -x
sf_x = stf.tensor(x)
sf_z = -sf_x
print('x : ', sf_x)
print('z : ', sf_z)
print('z.grad: ', sf_z.grad)
self.assertTrue((sf_z.data == z).all())
self.assertTrue(((-sf_z).data == -z).all())
self.assertTrue((sf_z.grad == np.array([[-1.],[-1.],[-1.]])).all())
def test_synthesis(self):
x = stf.tensor([[1., 2.], [3., 4.]])
y = 2 * x + 1
z = - y / (x ** 3)
print('x : ', x)
print('y : ', y)
print('y.grad : ', y.grad)
print('z: ', z)
print('z.grad: ', z.grad)
self.assertTrue((y.data == np.array([[3., 5.], [7., 9.]])).all())
self.assertTrue((y.grad == np.array([[2., 2.], [2., 2.]])).all())
self.assertTrue((z.data == np.array([[-3., -5. / 8], [-7. / 27, -9. / 64]])).all())
self.assertTrue((z.grad == np.array([[7., 11./16], [15. / 81, 19. / 256]])).all())
def test_repr(self):
x = stf.tensor([[1., 2.], [3., 4.]])
self.assertTrue(repr(x) == 'spladtool.Tensor(\n[[1. 2.]\n [3. 4.]]\n)')
def test_seed(self):
seed = [1.0, 0, 0]
sf_x = stf.Tensor([1.0, 2.0, 3.0], seed=seed)
sf_z = sf_x + 4
print('x : ', sf_x)
print('z : ', sf_z)
print('z.grad: ', sf_z.grad)
self.assertTrue(sf_z.grad == 1.0)
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "429f690863202058257ed6dde8a8cfc41bd9fe10", "size": 3688, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_basic_ops.py", "max_stars_repo_name": "cs107-rysr/cs107-FinalProject", "max_stars_repo_head_hexsha": "df4814948374dbb5defe28e8d318e43d33a2d1cb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_basic_ops.py", "max_issues_repo_name": "cs107-rysr/cs107-FinalProject", "max_issues_repo_head_hexsha": "df4814948374dbb5defe28e8d318e43d33a2d1cb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_basic_ops.py", "max_forks_repo_name": "cs107-rysr/cs107-FinalProject", "max_forks_repo_head_hexsha": "df4814948374dbb5defe28e8d318e43d33a2d1cb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.1481481481, "max_line_length": 91, "alphanum_fraction": 0.4468546638, "include": true, "reason": "import numpy", "num_tokens": 1251}
|
from typing import Optional, Tuple, Union
from grgr import _R
from grgr.dev import dict_to_rargs
from grgr.dev.typing import T, U
from grgr.ggplot2.basic import Aesthetic, GGPlot
from grgr.ggplot2.facet import Facet
from grgr.ggplot2.layer import Layer
from grgr.ggplot2.scale import Appearance
from grgr.ggplot2.theme import Theme, ThemeElement
from numpy import array, ndarray, str_
from numpy.typing import NDArray
from pandas import DataFrame
# Basics
def ggplot(data: Optional[DataFrame] = None,
mapping: Optional[Aesthetic] = None,
**kwargs):
return GGPlot(data, mapping, **kwargs)
def aes(x: Optional[Union[str, ndarray]] = None,
y: Optional[Union[str, ndarray]] = None,
**kwargs) -> Aesthetic:
return Aesthetic(x, y, **kwargs)
def ggsave(filename: str,
plot: Optional[GGPlot] = None,
width: Optional[int] = None,
height: Optional[int] = None,
dpi: Optional[int] = None,
**kwargs):
s = str()
pyargs = locals()
pyargs.update(**kwargs)
rargs = dict_to_rargs(pyargs, ["s"])
rcode = f"ggsave({rargs})"
_R(rcode)
# Layer
def geom_abline(slope: float = 1., intercept: float = 0.) -> Layer:
return Layer("geom_abline", slope=slope, intercept=intercept)
def geom_hline(yintercept: float) -> Layer:
return Layer("geom_hline", yintercept=yintercept)
def geom_vline(xintercept: float) -> Layer:
return Layer("geom_hline", xintercept=xintercept)
def geom_bar(data: Optional[DataFrame] = None,
mapping: Optional[Aesthetic] = None,
**kwargs):
return Layer("geom_bar", data, mapping, **kwargs)
def geom_boxplot(data: Optional[DataFrame] = None,
mapping: Optional[Aesthetic] = None,
**kwargs):
return Layer("geom_boxplot", data, mapping, **kwargs)
def geom_density(data: Optional[DataFrame] = None,
mapping: Optional[Aesthetic] = None,
**kwargs):
return Layer("geom_density", data, mapping, **kwargs)
def geom_density_2d(data: Optional[DataFrame] = None,
mapping: Optional[Aesthetic] = None,
**kwargs):
return Layer("geom_density_2d", data, mapping, **kwargs)
def geom_histogram(data: Optional[DataFrame] = None,
mapping: Optional[Aesthetic] = None,
**kwargs):
return Layer("geom_histogram", data, mapping, **kwargs)
def geom_errorbar(data: Optional[DataFrame] = None,
mapping: Optional[Aesthetic] = None,
**kwargs):
return Layer("geom_errorbar", data, mapping, **kwargs)
def geom_line(data: Optional[DataFrame] = None,
mapping: Optional[Aesthetic] = None,
**kwargs):
return Layer("geom_line", data, mapping, **kwargs)
def geom_point(data: Optional[DataFrame] = None,
mapping: Optional[Aesthetic] = None,
**kwargs):
return Layer("geom_point", data, mapping, **kwargs)
def geom_ribbon(data: Optional[DataFrame] = None,
mapping: Optional[Aesthetic] = None,
**kwargs):
return Layer("geom_ribbon", data, mapping, **kwargs)
def geom_area(data: Optional[DataFrame] = None,
mapping: Optional[Aesthetic] = None,
**kwargs):
return Layer("geom_area", data, mapping, **kwargs)
def geom_violin(data: Optional[DataFrame] = None,
mapping: Optional[Aesthetic] = None,
**kwargs):
return Layer("geom_violin", data, mapping, **kwargs)
# Scales
def labs(title: Optional[str] = None,
subtitle: Optional[str] = None,
caption: Optional[str] = None,
tag: Optional[str] = None,
alt: Optional[str] = None,
alt_insight: Optional[str] = None,
**kwargs) -> Appearance:
return Appearance("labs",
title=title,
subtitle=subtitle,
caption=caption,
tag=tag,
alt=alt,
alt_insight=alt_insight,
**kwargs)
def xlab(label: str) -> Appearance:
return Appearance("xlab", label=label)
def ylab(label: str) -> Appearance:
return Appearance("xlab", label=label)
def ggtitle(label, subtitle: Optional[str] = None) -> Appearance:
return Appearance("ggtitle", label=label, subtitle=subtitle)
def lims(x: Optional[Tuple[T, T]], y: Optional[Tuple[U, U]]) -> Appearance:
return Appearance("lims", x=array(x), y=array(y))
def xlim(x: Tuple[T, T]) -> Appearance:
return Appearance("xlim", array(x))
def ylim(y: Tuple[T, T]) -> Appearance:
return Appearance("ylim", array(y))
def scale_color_continuous(colorscale: str = '"gradient"') -> Appearance:
return Appearance("scale_color_continuous", type=colorscale)
def scale_fill_continuous(colorscale: str = '"gradient"') -> Appearance:
return Appearance("scale_fill_continuous", type=colorscale)
def scale_color_discrete(colorscale: str = '"gradient"') -> Appearance:
return Appearance("scale_color_discrete", type=colorscale)
def scale_fill_discrete(colorscale: str = '"gradient"') -> Appearance:
return Appearance("scale_fill_discrete", type=colorscale)
def scale_color_gradient(low: str, high: str, **kwargs) -> Appearance:
return Appearance("scale_color_gradient", low=low, high=high, **kwargs)
def scale_fill_gradient(low: str, high: str, **kwargs) -> Appearance:
return Appearance("scale_fill_gradient", low=low, high=high, **kwargs)
def scale_color_gradient2(low: str, mid: str, high: str,
**kwargs) -> Appearance:
return Appearance("scale_color_gradient2",
low=low,
mid=mid,
high=high,
**kwargs)
def scale_fill_gradient2(low: str, mid: str, high: str,
**kwargs) -> Appearance:
return Appearance("scale_fill_gradient2",
low=low,
mid=mid,
high=high,
**kwargs)
def scale_color_gradientn(colors: NDArray[str_], **kwargs) -> Appearance:
return Appearance("scale_color_gradientn", colors=colors, **kwargs)
def scale_fill_gradientn(colors: NDArray[str_], **kwargs) -> Appearance:
return Appearance("scale_fill_gradientn", colors=colors, **kwargs)
# Facets
def facet_grid(*args, **kwargs) -> Facet:
return Facet("facet_grid", *args, **kwargs)
def facet_wrap(*args, **kwargs) -> Facet:
return Facet("facet_wrap", *args, **kwargs)
# Themes
def theme(**kwargs):
return Theme("theme", **kwargs)
def theme_bw(**kwargs):
return Theme("theme_bw", **kwargs)
def theme_classic(**kwargs):
return Theme("theme_classic", **kwargs)
def margin(top: float = 0.,
right: float = 0.,
bottom: float = 0.,
left: float = 0.,
unit: str = "pt") -> ThemeElement:
return ThemeElement("margin", t=top, r=right, b=bottom, l=left, unit=unit)
def element_blank():
return ThemeElement("element_blank")
def element_rect(fill: Optional[str] = None,
color: Optional[str] = None,
size: Optional[float] = None,
linetype: Optional[str] = None,
**kwargs):
return ThemeElement("element_rect",
fill=fill,
color=color,
size=size,
linetype=linetype,
**kwargs)
def element_line(color: Optional[str] = None,
size: Optional[float] = None,
linetype: Optional[str] = None,
**kwargs):
return ThemeElement("element_line",
color=color,
size=size,
linetype=linetype,
**kwargs)
def element_text(family: Optional[str] = None,
color: Optional[str] = None,
size: Optional[float] = None,
angle: Optional[float] = None,
**kwargs):
return ThemeElement("element_text",
family=family,
color=color,
size=size,
angle=angle,
**kwargs)
|
{"hexsha": "e299568d35da27e4a44008b2f0ad125e4b9f62b6", "size": 8398, "ext": "py", "lang": "Python", "max_stars_repo_path": "grgr/ggplot2/__init__.py", "max_stars_repo_name": "7cm-diameter/grgr", "max_stars_repo_head_hexsha": "514a8f56e0fae9b4374d921006aaf3fb08fa2d09", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "grgr/ggplot2/__init__.py", "max_issues_repo_name": "7cm-diameter/grgr", "max_issues_repo_head_hexsha": "514a8f56e0fae9b4374d921006aaf3fb08fa2d09", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "grgr/ggplot2/__init__.py", "max_forks_repo_name": "7cm-diameter/grgr", "max_forks_repo_head_hexsha": "514a8f56e0fae9b4374d921006aaf3fb08fa2d09", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6749116608, "max_line_length": 78, "alphanum_fraction": 0.5890688259, "include": true, "reason": "from numpy", "num_tokens": 1883}
|
MODULE IMSReorderingModule
use KindModule, only: DP, I4B
private
public :: ims_genrcm, ims_odrv, ims_dperm, ims_vperm
contains
!----- subroutine ims_genrcm
!
! purpose - ims_genrcm finds the reverse cuthill-mckee
! ordering for a general graph. for each connected
! component in the graph, ims_genrcm obtains the ordering
! by calling the subroutine ims_rcm.
!
! input parameters -
! neqns - number of equations
! (xadj0, adjncy) - array pair containing the adjacency
! structure of the graph of the matrix.
!
! output parameter -
! perm - vector that contains the rcm ordering.
!
! working parameters -
! xadj - working ia of the matrix
! mask - is used to mark variables that have been
! numbered during the ordering process. it is
! initialized to 1, and set to zero as each node
! is numbered.
! xls - the index vector for a level structure. the
! level structure is stored in the currently
! unused spaces in the permutation vector perm.
!
! program subroutines -
! ims_fnroot, ims_rcm.
!
!***************************************************************
!
subroutine ims_genrcm(neqns, nja, xadj0, adjncy, perm, mask, xls)
!
!***************************************************************
!
implicit none
! -- dummy variables
integer(I4B), intent(in) :: neqns, nja
integer(I4B), dimension(neqns+1), intent(in) :: xadj0
integer(I4B), dimension(nja), intent(in) :: adjncy
integer(I4B), dimension(neqns), intent(inout) :: perm
integer(I4B), dimension(neqns), intent(inout) :: mask
integer(I4B), dimension(neqns+1), intent(inout) :: xls
! -- locals
integer(I4B) :: i
integer(I4B) :: ccsize
integer(I4B) :: lperm
integer(I4B) :: nlvl
integer(I4B) :: num
integer(I4B) :: root
integer(I4B), allocatable, dimension(:) :: xadj
!
!***************************************************************
!
! allocate local storage
allocate(xadj(neqns+1))
!
! initialize mask and working xadj
do i = 1, neqns
mask(i) = 1
xadj(i) = xadj0(i)
end do
xadj(neqns+1) = xadj0(neqns+1)
num = 1
louter: do i = 1, neqns
!
!for each masked connected component
if (mask(i) == 0) cycle
root = i
!
! first find a pseudo-peripheral node root.
! note that the level structure found by
! ims_fnroot is stored starting at perm(num).
! then ims_rcm is called to order the component
! using root as the starting node.
!
! mi
lperm = neqns - num + 1
! mi
call ims_fnroot(lperm, neqns, nja, root, xadj, adjncy, mask, &
nlvl, xls, perm(num))
call ims_rcm(lperm, neqns, nja, root, xadj, adjncy, mask, &
perm(num), ccsize, xls )
num = num + ccsize
if (num > neqns) exit louter
end do louter
!
! allocate local storage
deallocate(xadj)
return
end subroutine ims_genrcm
! subroutine ims_fnroot
!
! find pseudo-peripheral node
!
! purpose - ims_fnroot implements a modified version of the
! scheme by gibbs, poole, and stockmeyer to find pseudo-
! peripheral nodes. it determines such a node for the
! section subgraph specified by mask and root.
!
! input parameters -
! (xadj, adjncy) - adjacency structure pair for the graph.
! mask - specifies a section subgraph. nodes for which
! mask is zero are ignored by ims_fnroot.
!
! updated parameter -
! root - on input, it (along with mask) defines the
! component for which a pseudo-peripheral node is
! to be found. on output, it is the node obtained.
!
! output parameters -
! nlvl - is the number of levels in the level structure
! rooted at the node root.
! (xls,ls) - the level structure array pair containing
! the level structure found.
!
! program subroutines -
! ims_rootls.
!
!***************************************************************
!
subroutine ims_fnroot (lls, neqns, nja, root, xadj, adjncy, mask, &
nlvl, xls, ls )
implicit none
! -- dummy variables
integer(I4B), intent(in) :: lls
integer(I4B), intent(in) :: neqns
integer(I4B), intent(in) :: nja
integer(I4B), intent(inout) :: root
integer(I4B), dimension(neqns+1), intent(in) :: xadj
integer(I4B), dimension(nja), intent(in) :: adjncy
integer(I4B), dimension(neqns), intent(inout) :: mask
integer(I4B), intent(inout) :: nlvl
integer(I4B), dimension(neqns+1), intent(inout) :: xls
integer(I4B), dimension(lls), intent(inout) :: ls
! -- local
integer(I4B) :: ccsize
integer(I4B) :: j
integer(I4B) :: k
integer(I4B) :: jstrt
integer(I4B) :: kstrt
integer(I4B) :: kstop
integer(I4B) :: mindeg
integer(I4B) :: nabor
integer(I4B) :: ndeg
integer(I4B) :: node
integer(I4B) :: nunlvl
!
! determine the level structure rooted at root.
call ims_rootls(lls, neqns, nja, root, xadj, adjncy, mask, &
nlvl, xls, ls)
ccsize = xls(nlvl+1) - 1
if ( nlvl == 1 .or. nlvl == ccsize ) return
!
! pick a node with minimum degree from the last level.
100 jstrt = xls(nlvl)
mindeg = ccsize
root = ls(jstrt)
if ( ccsize == jstrt ) go to 400
louter: do j = jstrt, ccsize
node = ls(j)
ndeg = 0
kstrt = xadj(node)
kstop = xadj(node+1) - 1
linner: do k = kstrt, kstop
nabor = adjncy(k)
if (mask(nabor) > 0) ndeg = ndeg + 1
end do linner
if (ndeg >= mindeg) cycle louter
root = node
mindeg = ndeg
end do louter
!
! and generate its rooted level structure.
400 call ims_rootls(lls, neqns, nja, root, xadj, adjncy, mask, &
nunlvl, xls, ls)
if (nunlvl <= nlvl) return
nlvl = nunlvl
if (nlvl < ccsize) go to 100
return
end subroutine ims_fnroot
! subroutine ims_rcm
!
! reverse cuthill-mckee ordering
!
! purpose - rcm numbers a connected component specified by
! mask and root, using the rcm algorithm.
! the numbering is to be started at the node root.
!
! input parameters -
! root - is the node that defines the connected
! component and it is used as the starting
! node for the rcm ordering.
! (xadj, adjncy) - adjacency structure pair for
! the graph.
!
! updated parameters -
! mask - only those nodes with nonzero input mask
! values are considered by the routine. the
! nodes numbered by rcm will have their
! mask values set to zero.
!
! output parameters -
! perm - will contain the rcm ordering.
! ccsize - is the size of the connected component
! that has been numbered by rcm.
!
! working parameter -
! deg - is a temporary vector used to hold the degree
! of the nodes in the section graph specified
! by mask and root.
!
! program subroutines -
! ims_degree.
!
!***************************************************************
!
subroutine ims_rcm(llperm, neqns, nja, root, xadj, adjncy, &
mask, perm, ccsize, deg)
!
implicit none
! -- dummy variables
integer(I4B), intent(in) :: llperm
integer(I4B), intent(in) :: neqns
integer(I4B), intent(in) :: nja
integer(I4B), intent(in) :: root
integer(I4B), dimension(neqns+1), intent(inout) :: xadj
integer(I4B), dimension(nja), intent(in) :: adjncy
integer(I4B), dimension(neqns), intent(inout) :: mask
integer(I4B), dimension(llperm), intent(inout) :: perm
integer(I4B), intent(inout) :: ccsize
integer(I4B), dimension(neqns), intent(inout) :: deg
! -- local
integer(I4B) :: fnbr
integer(I4B) :: i
integer(I4B) :: j
integer(I4B) :: jstop
integer(I4B) :: jstrt
integer(I4B) :: k
integer(I4B) :: l
integer(I4B) :: lbegin
integer(I4B) :: lnbr
integer(I4B) :: lperm
integer(I4B) :: lvlend
integer(I4B) :: nbr
integer(I4B) :: node
! code
! find the degrees of the nodes in the
! component specified by mask and root.
call ims_degree(llperm, neqns, nja, root, xadj, adjncy, mask, &
deg, ccsize, perm)
mask(root) = 0
if (ccsize <= 1) return
lvlend = 0
lnbr = 1
!
! lbegin and lvlend point to the beginning and
! the end of the current level respectively.
100 lbegin = lvlend + 1
lvlend = lnbr
lbegend: do i = lbegin, lvlend
!
! for each node in current level ...
node = perm(i)
jstrt = xadj(node)
jstop = xadj(node+1) - 1
!
! find the unnumbered neighbors of node.
! fnbr and lnbr point to the first and last
! unnumbered neighbors respectively of the current
! node in perm.
fnbr = lnbr + 1
lunn: do j = jstrt, jstop
nbr = adjncy(j)
if (mask(nbr) == 0) cycle lunn
lnbr = lnbr + 1
mask(nbr) = 0
perm(lnbr) = nbr
end do lunn
if (fnbr >= lnbr) cycle lbegend
!
! sort the neighbors of node in increasing
! order by degree. linear insertion is used.
k = fnbr
300 l = k
k = k + 1
nbr = perm(k)
400 if (l < fnbr) go to 500
lperm = perm(l)
if (deg(lperm) <= deg(nbr)) go to 500
perm(l+1) = lperm
l = l - 1
go to 400
500 perm(l+1) = nbr
if (k < lnbr) go to 300
end do lbegend
if (lnbr > lvlend) go to 100
!
! we now have the cuthill mckee ordering.
! reverse it below ...
k = ccsize/2
l = ccsize
do i = 1, k
lperm = perm(l)
perm(l) = perm(i)
perm(i) = lperm
l = l - 1
end do
return
end subroutine ims_rcm
!----- subroutine ims_degree
! degree in masked component ********
!
! purpose - this routine computes the degrees of the nodes
! in the connected component specified by mask and root.
! nodes for which mask is zero are ignored.
!
! input parameter -
! root - is the input node that defines the component.
! (xadj, adjncy) - adjacency structure pair.
! mask - specifies a section subgraph.
!
! output parameters -
! deg - array containing the degrees of the nodes in
! the component.
! ccsize-size of the component specified by mask and root
!
! working parameter -
! ls - a temporary vector used to store the nodes of the
! component level by level.
!
!***************************************************************
!
subroutine ims_degree(lls, neqns, nja, root, xadj, adjncy, mask, &
deg, ccsize, ls)
!
!***************************************************************
!
implicit none
! -- dummy variables
integer(I4B), intent(in) :: lls
integer(I4B), intent(in) :: neqns
integer(I4B), intent(in) :: nja
integer(I4B), intent(in) :: root
integer(I4B), dimension(neqns+1), intent(inout) :: xadj
integer(I4B), dimension(nja), intent(in) :: adjncy
integer(I4B), dimension(neqns), intent(in) :: mask
integer(I4B), dimension(neqns), intent(inout) :: deg
integer(I4B), intent(inout) :: ccsize
integer(I4B), dimension(lls), intent(inout) :: ls
! -- local
integer(I4B) :: i
integer(I4B) :: ideg
integer(I4B) :: j
integer(I4B) :: jstop
integer(I4B) :: jstrt
integer(I4B) :: lbegin
integer(I4B) :: lvlend
integer(I4B) :: lvsize
integer(I4B) :: nbr
integer(I4B) :: node
! code
!
! initialization ...
! the array xadj is used as a temporary marker to
! indicate which nodes have been considered so far.
ls(1) = root
xadj(root) = -xadj(root)
lvlend = 0
ccsize = 1
!
! lbegin is the pointer to the beginning of the current
! level, and lvlend points to the end of this level.
100 lbegin = lvlend + 1
lvlend = ccsize
!
! find the degrees of nodes in the current level,
! and at the same time, generate the next level.
louter: do i = lbegin, lvlend
node = ls(i)
jstrt = -xadj(node)
jstop = iabs(xadj(node + 1)) - 1
ideg = 0
if (jstop < jstrt) go to 300
linner: do j = jstrt, jstop
nbr = adjncy(j)
if (mask(nbr) == 0) cycle linner
ideg = ideg + 1
if (xadj(nbr) < 0) cycle linner
xadj(nbr) = -xadj(nbr)
ccsize = ccsize + 1
ls(ccsize) = nbr
end do linner
300 deg(node) = ideg
end do louter
!
! compute the current level width.
! if it is nonzero , generate another level.
lvsize = ccsize - lvlend
if (lvsize > 0) go to 100
!
! reset xadj to its correct sign and return.
do i = 1, ccsize
node = ls(i)
xadj(node) = -xadj(node)
end do
return
end subroutine ims_degree
! subroutine ims_rootls
!
! rooted level structure
!
! purpose - ims_rootls generates the level structure rooted
! at the input node called root. only those nodes for
! which mask is nonzero will be considered.
!
! input parameters -
! root - the node at which the level structure is to
! be rooted.
! (xadj, adjncy) - adjacency structure pair for the
! given graph.
! mask - is used to specify a section subgraph. nodes
! with mask(i)=0 are ignored.
!
! output parameters -
! nlvl - is the number of levels in the level structure.
! (xls, ls) - array pair for the rooted level structure.
!
!***************************************************************
!
subroutine ims_rootls(lls, neqns, nja, root, xadj, adjncy, mask, &
nlvl, xls, ls )
implicit none
! -- dummy variables
integer(I4B), intent(in) :: lls
integer(I4B), intent(in) :: neqns
integer(I4B), intent(in) :: nja
integer(I4B), intent(in) :: root
integer(I4B), dimension(neqns+1), intent(in) :: xadj
integer(I4B), dimension(nja), intent(in) :: adjncy
integer(I4B), dimension(neqns), intent(inout) :: mask
integer(I4B), intent(inout) :: nlvl
integer(I4B), dimension(neqns+1), intent(inout) :: xls
integer(I4B), dimension(lls), intent(inout) :: ls
! -- local
integer(I4B) :: i
integer(I4B) :: j
integer(I4B) :: jstop
integer(I4B) :: jstrt
integer(I4B) :: lbegin
integer(I4B) :: ccsize
integer(I4B) :: lvlend
integer(I4B) :: lvsize
integer(I4B) :: nbr
integer(I4B) :: node
!
! code
!
! initialization ...
mask(root) = 0
ls(1) = root
nlvl = 0
lvlend = 0
ccsize = 1
!
! lbegin is the pointer to the beginning of the current
! level, and lvlend points to the end of this level.
200 lbegin = lvlend + 1
lvlend = ccsize
nlvl = nlvl + 1
xls(nlvl) = lbegin
!
! generate the next level by finding all the masked
! neighbors of nodes in the current level.
louter: do i = lbegin, lvlend
node = ls(i)
jstrt = xadj(node)
jstop = xadj(node + 1) - 1
if (jstop < jstrt) cycle louter
linner: do j = jstrt, jstop
nbr = adjncy(j)
if (mask(nbr) == 0) cycle linner
ccsize = ccsize + 1
ls(ccsize) = nbr
mask(nbr) = 0
end do linner
end do louter
!
! compute the current level width.
! if it is nonzero, generate the next level.
lvsize = ccsize - lvlend
if (lvsize > 0 ) go to 200
!
! reset mask to one for the nodes in the level structure.
xls(nlvl+1) = lvlend + 1
do i = 1, ccsize
node = ls(i)
mask(node) = 1
end do
return
end subroutine ims_rootls
subroutine ims_odrv(n, nja, nsp, ia, ja, p, ip, isp, flag)
!
! 3/12/82
!***********************************************************************
! odrv -- driver for sparse matrix reordering routines
!***********************************************************************
!
! description
!
! odrv finds a minimum degree ordering of the rows and columns
! of a matrix m stored in (ia,ja,a) format (see below). for the
! reordered matrix, the work and storage required to perform
! gaussian elimination is (usually) significantly less.
!
! note.. odrv and its subordinate routines have been modified to
! compute orderings for general matrices, not necessarily having any
! symmetry. the minimum degree ordering is computed for the
! structure of the symmetric matrix m + m-transpose.
! modifications to the original odrv module have been made in
! the coding in subroutine mdi, and in the initial comments in
! subroutines odrv and md.
!
! if only the nonzero entries in the upper triangle of m are being
! stored, then odrv symmetrically reorders (ia,ja,a), (optionally)
! with the diagonal entries placed first in each row. this is to
! ensure that if m(i,j) will be in the upper triangle of m with
! respect to the new ordering, then m(i,j) is stored in row i (and
! thus m(j,i) is not stored), whereas if m(i,j) will be in the
! strict lower triangle of m, then m(j,i) is stored in row j (and
! thus m(i,j) is not stored).
!
!
! storage of sparse matrices
!
! the nonzero entries of the matrix m are stored row-by-row in the
! array a. to identify the individual nonzero entries in each row,
! we need to know in which column each entry lies. these column
! indices are stored in the array ja. i.e., if a(k) = m(i,j), then
! ja(k) = j. to identify the individual rows, we need to know where
! each row starts. these row pointers are stored in the array ia.
! i.e., if m(i,j) is the first nonzero entry (stored) in the i-th row
! and a(k) = m(i,j), then ia(i) = k. moreover, ia(n+1) points to
! the first location following the last element in the last row.
! thus, the number of entries in the i-th row is ia(i+1) - ia(i),
! the nonzero entries in the i-th row are stored consecutively in
!
! a(ia(i)), a(ia(i)+1), ..., a(ia(i+1)-1),
!
! and the corresponding column indices are stored consecutively in
!
! ja(ia(i)), ja(ia(i)+1), ..., ja(ia(i+1)-1).
!
! since the coefficient matrix is symmetric, only the nonzero entries
! in the upper triangle need be stored. for example, the matrix
!
! ( 1 0 2 3 0 )
! ( 0 4 0 0 0 )
! m = ( 2 0 5 6 0 )
! ( 3 0 6 7 8 )
! ( 0 0 0 8 9 )
!
! could be stored as
!
! - 1 2 3 4 5 6 7 8 9 10 11 12 13
! ---+--------------------------------------
! ia - 1 4 5 8 12 14
! ja - 1 3 4 2 1 3 4 1 3 4 5 4 5
! a - 1 2 3 4 2 5 6 3 6 7 8 8 9
!
! or (symmetrically) as
!
! - 1 2 3 4 5 6 7 8 9
! ---+--------------------------
! ia - 1 4 5 7 9 10
! ja - 1 3 4 2 3 4 4 5 5
! a - 1 2 3 4 5 6 7 8 9 .
!
!
! parameters
!
! n - order of the matrix
!
! nja - number of nonzeroes in the matrix
!
! nsp - declared dimension of the one-dimensional array isp. nsp
! must be at least 3n+4k, where k is the number of nonzeroes
! in the strict upper triangle of m
!
! ia - integer one-dimensional array containing pointers to delimit
! rows in ja and a. dimension = n+1
!
! ja - integer one-dimensional array containing the column indices
! corresponding to the elements of a. dimension = number of
! nonzero entries in (the upper triangle of) m
!
! a - real one-dimensional array containing the nonzero entries in
! (the upper triangle of) m, stored by rows. dimension =
! number of nonzero entries in (the upper triangle of) m
!
! p - integer one-dimensional array used to return the permutation
! of the rows and columns of m corresponding to the minimum
! degree ordering. dimension = n
!
! ip - integer one-dimensional array used to return the inverse of
! the permutation returned in p. dimension = n
!
! isp - integer one-dimensional array used for working storage.
! dimension = nsp
!
! path - integer path specification. values and their meanings are -
! 1 find minimum degree ordering only
! 2 find minimum degree ordering and reorder symmetrically
! stored matrix (used when only the nonzero entries in
! the upper triangle of m are being stored)
! 3 reorder symmetrically stored matrix as specified by
! input permutation (used when an ordering has already
! been determined and only the nonzero entries in the
! upper triangle of m are being stored)
! 4 same as 2 but put diagonal entries at start of each row
! 5 same as 3 but put diagonal entries at start of each row
!
! flag - integer error flag. values and their meanings are -
! 0 no errors detected
! 9n+k insufficient storage in md
! 10n+1 insufficient storage in odrv
! 11n+1 illegal path specification
!
!
! conversion from real to double precision
!
! change the real declarations in odrv and sro to double precision
! declarations.
!
!-----------------------------------------------------------------------
!
implicit none
! -- dummy variables
integer(I4B), intent(in) :: n
integer(I4B), intent(in) :: nja
integer(I4B), intent(in) :: nsp
integer(I4B), dimension(n+1), intent(in) :: ia
integer(I4B), dimension(nja), intent(in) :: ja
integer(I4B), dimension(n), intent(inout) :: p
integer(I4B), dimension(n), intent(inout) :: ip
integer(I4B), dimension(nsp), intent(inout) :: isp
integer(I4B), intent(inout) :: flag
! -- local
integer(I4B) :: v
integer(I4B) :: l
integer(I4B) :: head
integer(I4B) :: mmax
integer(I4B) :: next
integer(I4B) :: path
!
! set path for finding ordering only
!
path = 1
!
!
! initialize error flag and validate path specification
flag = 0
if (path < 1 .or. 5 < path) go to 111
!
! find minimum degree ordering
mmax = (nsp-n)/2
v = 1
l = v + mmax
head = l + mmax
next = head + n
if (mmax < n) go to 110
!
call ims_md(n, nja, ia, ja, mmax, isp(v), isp(l), isp(head), p, &
ip, isp(v), flag)
if (flag.ne.0) go to 100
!
2 return
!
! ** error -- error detected in md
! flag = 9 * n + vi from routine mdi.
!
100 return
! ** error -- insufficient storage
110 flag = 10*n + 1
return
! ** error -- illegal path specified
111 flag = 11*n + 1
return
end subroutine ims_odrv
subroutine ims_md(n, nja, ia, ja, mmax, v, l, head, last, next, &
mark, flag)
!
!*****************************************************************
! ims_md -- minimum degree algorithm (based on element model)
!*****************************************************************
!
! description
!
! ims_md finds a minimum degree ordering of the rows and
! columns of a general sparse matrix m stored in (ia,ja,a)
! format. when the structure of m is nonsymmetric, the ordering
! is that obtained for the symmetric matrix m + m-transpose.
!
!
! additional parameters
!
! mmax - declared dimension of the one-dimensional arrays v and l.
! mmax must be at least n+2k, where k is the number of
! nonzeroes in the strict upper triangle of m
!
! v - integer one-dimensional work array. dimension = mmax
!
! l - integer one-dimensional work array. dimension = mmax
!
! head - integer one-dimensional work array. dimension = n
!
! last - integer one-dimensional array used to return the permutation
! of the rows and columns of m corresponding to the minimum
! degree ordering. dimension = n
!
! next - integer one-dimensional array used to return the inverse of
! the permutation returned in last. dimension = n
!
! mark - integer one-dimensional work array (may be the same as v).
! dimension = n
!
! flag - integer error flag. values and their meanings are -
! 0 no errors detected
! 11n+1 insufficient storage in md
!
!
! definitions of internal parameters
!
! ---------+---------------------------------------------------------
! v(s) - value field of list entry
! ---------+---------------------------------------------------------
! l(s) - link field of list entry (0 =) end of list)
! ---------+---------------------------------------------------------
! l(vi) - pointer to element list of uneliminated vertex vi
! ---------+---------------------------------------------------------
! l(ej) - pointer to boundary list of active element ej
! ---------+---------------------------------------------------------
! head(d) - vj =) vj head of d-list d
! - 0 =) no vertex in d-list d
!
!
! - vi uneliminated vertex
! - vi in ek - vi not in ek
! ---------+-----------------------------+---------------------------
! next(vi) - undefined but nonnegative - vj =) vj next in d-list
! - - 0 =) vi tail of d-list
! ---------+-----------------------------+---------------------------
! last(vi) - (not set until mdp) - -d =) vi head of d-list d
! --vk =) compute degree - vj =) vj last in d-list
! - ej =) vi prototype of ej - 0 =) vi not in any d-list
! - 0 =) do not compute degree -
! ---------+-----------------------------+---------------------------
! mark(vi) - mark(vk) - nonneg. tag .lt. mark(vk)
!
!
! - vi eliminated vertex
! - ei active element - otherwise
! ---------+-----------------------------+---------------------------
! next(vi) - -j =) vi was j-th vertex - -j =) vi was j-th vertex
! - to be eliminated - to be eliminated
! ---------+-----------------------------+---------------------------
! last(vi) - m =) size of ei = m - undefined
! ---------+-----------------------------+---------------------------
! mark(vi) - -m =) overlap count of ei - undefined
! - with ek = m -
! - otherwise nonnegative tag -
! - .lt. mark(vk) -
!
!-----------------------------------------------------------------------
!
implicit none
! -- dummy variables
integer(I4B), intent(in) :: n
integer(I4B), intent(in) :: nja
integer(I4B), dimension(n+1), intent(in) :: ia
integer(I4B), dimension(nja), intent(in) :: ja
integer(I4B), intent(in) :: mmax
integer(I4B), dimension(mmax), intent(inout) :: v
integer(I4B), dimension(mmax), intent(inout) :: l
integer(I4B), dimension(n), intent(inout) :: head
integer(I4B), dimension(n), intent(inout) :: last
integer(I4B), dimension(n), intent(inout) :: next
integer(I4B), dimension(n), intent(inout) :: mark
integer(I4B), intent(inout) :: flag
! -- local
integer(I4B) :: tag
integer(I4B) :: dmin
integer(I4B) :: vk
integer(I4B) :: ek
integer(I4B) :: tail
integer(I4B) :: k
equivalence(vk, ek)
!
! initialization
tag = 0
call ims_mdi(n, nja, ia, ja, mmax ,v, l, head, last, next, &
mark, tag, flag)
if (flag.ne.0) return
!
k = 0
dmin = 1
!
! while k .lt. n do
1 if (k >= n) go to 4
!
! search for vertex of minimum degree
2 if (head(dmin) > 0) go to 3
dmin = dmin + 1
go to 2
!
! remove vertex vk of minimum degree from degree list
3 vk = head(dmin)
head(dmin) = next(vk)
if (head(dmin) > 0) last(head(dmin)) = -dmin
!
! number vertex vk, adjust tag, and tag vk
k = k+1
next(vk) = -k
last(ek) = dmin - 1
tag = tag + last(ek)
mark(vk) = tag
!
! form element ek from uneliminated neighbors of vk
call ims_mdm(n, mmax, vk, tail, v, l, last, next, mark)
!
! purge inactive elements and do mass elimination
call ims_mdp(n, mmax, k, ek, tail, v, l, head, last, next, mark)
!
! update degrees of uneliminated vertices in ek
call ims_mdu(n, mmax, ek, dmin, v, l, head, last, next, mark)
!
go to 1
!
! generate inverse permutation from permutation
4 do k = 1, n
next(k) = -next(k)
last(next(k)) = k
end do
!
return
end subroutine ims_md
subroutine ims_mdi(n, nja, ia, ja, mmax, v, l, head, last, next, &
mark, tag, flag)
!
!***********************************************************************
! ims_mdi -- initialization
!***********************************************************************
implicit none
! -- dummy variables
integer(I4B), intent(in) :: n
integer(I4B), intent(in) :: nja
integer(I4B), dimension(n+1), intent(in) :: ia
integer(I4B), dimension(nja), intent(in) :: ja
integer(I4B), intent(in) :: mmax
integer(I4B), dimension(mmax), intent(inout) :: v
integer(I4B), dimension(mmax), intent(inout) :: l
integer(I4B), dimension(n), intent(inout) :: head
integer(I4B), dimension(n), intent(inout) :: last
integer(I4B), dimension(n), intent(inout) :: next
integer(I4B), dimension(n), intent(inout) :: mark
integer(I4B), intent(in) :: tag
integer(I4B), intent(inout) :: flag
! -- local
integer(I4B) :: sfs
integer(I4B) :: vi
integer(I4B) :: dvi
integer(I4B) :: vj
integer(I4B) :: jmin
integer(I4B) :: jmax
integer(I4B) :: j
integer(I4B) :: lvk
integer(I4B) :: kmax
integer(I4B) :: k
integer(I4B) :: nextvi
integer(I4B) :: ieval
!
! initialize degrees, element lists, and degree lists
do vi = 1, n
mark(vi) = 1
l(vi) = 0
head(vi) = 0
end do
sfs = n + 1
!
! create nonzero structure
! for each nonzero entry a(vi,vj)
louter: do vi = 1, n
jmin = ia(vi)
jmax = ia(vi+1) - 1
if (jmin > jmax) cycle louter
linner1: do j = jmin, jmax !5
vj = ja(j)
!if (vj-vi) 2, 5, 4
ieval = vj - vi
if (ieval == 0) cycle linner1 !5
if (ieval > 0) go to 4
!
! if a(vi,vj) is in strict lower triangle
! check for previous occurrence of a(vj,vi)
2 lvk = vi
kmax = mark(vi) - 1
if (kmax == 0) go to 4
linner2: do k = 1, kmax
lvk = l(lvk)
if (v(lvk) == vj) cycle linner1 !5
end do linner2
! for unentered entries a(vi,vj)
4 if (sfs >= mmax) go to 101
!
! enter vj in element list for vi
mark(vi) = mark(vi) + 1
v(sfs) = vj
l(sfs) = l(vi)
l(vi) = sfs
sfs = sfs+1
!
! enter vi in element list for vj
mark(vj) = mark(vj) + 1
v(sfs) = vi
l(sfs) = l(vj)
l(vj) = sfs
sfs = sfs + 1
end do linner1
end do louter
!
! create degree lists and initialize mark vector
do vi = 1, n
dvi = mark(vi)
next(vi) = head(dvi)
head(dvi) = vi
last(vi) = -dvi
nextvi = next(vi)
if (nextvi > 0) last(nextvi) = vi
mark(vi) = tag
end do
!
return
!
! ** error- insufficient storage
101 flag = 9*n + vi
return
end subroutine ims_mdi
subroutine ims_mdm(n, mmax, vk, tail, v, l, last, next, mark)
!
!***********************************************************************
! ims_mdm -- form element from uneliminated neighbors of vk
!***********************************************************************
implicit none
! -- dummy variables
integer(I4B), intent(in) :: n
integer(I4B), intent(in) :: mmax
integer(I4B), intent(in) :: vk
integer(I4B), intent(inout) :: tail
integer(I4B), dimension(mmax), intent(inout) :: v
integer(I4B), dimension(mmax), intent(inout) :: l
integer(I4B), dimension(n), intent(inout) :: last
integer(I4B), dimension(n), intent(inout) :: next
integer(I4B), dimension(n), intent(inout) :: mark
! -- local
integer(I4B) :: tag
integer(I4B) :: s
integer(I4B) :: ls
integer(I4B) :: vs
integer(I4B) :: es
integer(I4B) :: b
integer(I4B) :: lb
integer(I4B) :: vb
integer(I4B) :: blp
integer(I4B) :: blpmax
equivalence (vs, es)
!
! initialize tag and list of uneliminated neighbors
tag = mark(vk)
tail = vk
!
! for each vertex/element vs/es in element list of vk
ls = l(vk)
1 s = ls
if (s == 0) go to 5
ls = l(s)
vs = v(s)
if (next(vs) < 0) go to 2
!
! if vs is uneliminated vertex, then tag and append to list of
! uneliminated neighbors
mark(vs) = tag
l(tail) = s
tail = s
go to 4
!
! if es is active element, then ...
! for each vertex vb in boundary list of element es
2 lb = l(es)
blpmax = last(es)
louter: do blp = 1, blpmax !3
b = lb
lb = l(b)
vb = v(b)
!
! if vb is untagged vertex, then tag and append to list of
! uneliminated neighbors
if (mark(vb) >= tag) cycle louter !3
mark(vb) = tag
l(tail) = b
tail = b
end do louter
!
! mark es inactive
mark(es) = tag
!
4 go to 1
!
! terminate list of uneliminated neighbors
5 l(tail) = 0
!
return
end subroutine ims_mdm
subroutine ims_mdp(n, mmax, k, ek, tail, v, l, head, last, next, mark)
!
!***********************************************************************
! ims_mdp -- purge inactive elements and do mass elimination
!***********************************************************************
implicit none
! -- dummy variables
integer(I4B), intent(in) :: n
integer(I4B), intent(in) :: mmax
integer(I4B), intent(inout) :: k
integer(I4B), intent(in) :: ek
integer(I4B), intent(inout) :: tail
integer(I4B), dimension(mmax), intent(inout) :: v
integer(I4B), dimension(mmax), intent(inout) :: l
integer(I4B), dimension(n), intent(inout) :: head
integer(I4B), dimension(n), intent(inout) :: last
integer(I4B), dimension(n), intent(inout) :: next
integer(I4B), dimension(n), intent(inout) :: mark
! -- local
integer(I4B) :: tag
integer(I4B) :: free
integer(I4B) :: li
integer(I4B) :: vi
integer(I4B) :: lvi
integer(I4B) :: evi
integer(I4B) :: s
integer(I4B) :: ls
integer(I4B) :: es
integer(I4B) :: ilp
integer(I4B) :: ilpmax
integer(I4B) :: i
!
! initialize tag
tag = mark(ek)
!
! for each vertex vi in ek
li = ek
ilpmax = last(ek)
if (ilpmax <= 0) go to 12
louter: do ilp = 1, ilpmax !11
i = li
li = l(i)
vi = v(li)
!
! remove vi from degree list
if (last(vi) == 0) go to 3
if (last(vi) > 0) go to 1
head(-last(vi)) = next(vi)
go to 2
1 next(last(vi)) = next(vi)
2 if (next(vi) > 0) last(next(vi)) = last(vi)
!
! remove inactive items from element list of vi
3 ls = vi
4 s = ls
ls = l(s)
if (ls == 0) go to 6
es = v(ls)
if (mark(es) < tag) go to 5
free = ls
l(s) = l(ls)
ls = s
5 go to 4
!
! if vi is interior vertex, then remove from list and eliminate
6 lvi = l(vi)
if (lvi.ne.0) go to 7
l(i) = l(li)
li = i
!
k = k + 1
next(vi) = -k
last(ek) = last(ek) - 1
cycle louter !11
!
! else ...
! classify vertex vi
7 if (l(lvi).ne.0) go to 9
evi = v(lvi)
if (next(evi) >= 0) go to 9
if (mark(evi) < 0) go to 8
!
! if vi is prototype vertex, then mark as such, initialize
! overlap count for corresponding element, and move vi to end
! of boundary list
last(vi) = evi
mark(evi) = -1
l(tail) = li
tail = li
l(i) = l(li)
li = i
go to 10
!
! else if vi is duplicate vertex, then mark as such and adjust
! overlap count for corresponding element
8 last(vi) = 0
mark(evi) = mark(evi) - 1
go to 10
!
! else mark vi to compute degree
9 last(vi) = -ek
!
! insert ek in element list of vi
10 v(free) = ek
l(free) = l(vi)
l(vi) = free
end do louter !11
!
! terminate boundary list
12 l(tail) = 0
!
return
end subroutine ims_mdp
subroutine ims_mdu(n, mmax, ek, dmin, v, l, head, last, next, mark)
!
!***********************************************************************
! ims_mdu -- update degrees of uneliminated vertices in ek
!***********************************************************************
implicit none
! -- dummy variables
integer(I4B), intent(in) :: n
integer(I4B), intent(in) :: mmax
integer(I4B), intent(in) :: ek
integer(I4B), intent(inout) :: dmin
integer(I4B), dimension(mmax), intent(inout) :: v
integer(I4B), dimension(mmax), intent(inout) :: l
integer(I4B), dimension(n), intent(inout) :: head
integer(I4B), dimension(n), intent(inout) :: last
integer(I4B), dimension(n), intent(inout) :: next
integer(I4B), dimension(n), intent(inout) :: mark
! -- local
integer(I4B) :: tag
integer(I4B) :: vi
integer(I4B) :: evi
integer(I4B) :: dvi
integer(I4B) :: s
integer(I4B) :: vs
integer(I4B) :: es
integer(I4B) :: b
integer(I4B) :: vb
integer(I4B) :: ilp
integer(I4B) :: ilpmax
integer(I4B) :: blp
integer(I4B) :: blpmax
integer(I4B) :: i
equivalence (vs, es)
!
! initialize tag
tag = mark(ek) - last(ek)
!
! for each vertex vi in ek
i = ek
ilpmax = last(ek)
if (ilpmax <= 0) go to 11
louter: do ilp = 1, ilpmax !10
i = l(i)
vi = v(i)
!if (last(vi)) 1, 10, 8
if (last(vi) == 0) cycle louter !10
if (last(vi) > 0) goto 8
!
! if vi neither prototype nor duplicate vertex, then merge elements
! to compute degree
1 tag = tag + 1
dvi = last(ek)
!
! for each vertex/element vs/es in element list of vi
s = l(vi)
2 s = l(s)
if (s == 0) go to 9
vs = v(s)
if (next(vs) < 0) go to 3
!
! if vs is uneliminated vertex, then tag and adjust degree
mark(vs) = tag
dvi = dvi + 1
go to 5
!
! if es is active element, then expand
! check for outmatched vertex
3 if (mark(es) < 0) go to 6
!
! for each vertex vb in es
b = es
blpmax = last(es)
linner: do blp = 1, blpmax !4
b = l(b)
vb = v(b)
!
! if vb is untagged, then tag and adjust degree
if (mark(vb) >= tag) cycle linner !4
mark(vb) = tag
dvi = dvi + 1
end do linner !4
!
5 go to 2
!
! else if vi is outmatched vertex, then adjust overlaps but do not
! compute degree
6 last(vi) = 0
mark(es) = mark(es) - 1
7 s = l(s)
if (s == 0) cycle louter !10
es = v(s)
if (mark(es) < 0) mark(es) = mark(es) - 1
go to 7
!
! else if vi is prototype vertex, then calculate degree by
! inclusion/exclusion and reset overlap count
8 evi = last(vi)
dvi = last(ek) + last(evi) + mark(evi)
mark(evi) = 0
!
! insert vi in appropriate degree list
9 next(vi) = head(dvi)
head(dvi) = vi
last(vi) = -dvi
if (next(vi) > 0) last(next(vi)) = vi
if (dvi < dmin) dmin = dvi
!
end do louter !10
!
11 return
end subroutine ims_mdu
!
! ROUTINES FROM SPARSKIT TO PERMUTATE A LINEAR SYSTEM OF EQUATIONS
! IN ORDER TO REORDER THE MATRIX TO MINIMIZE THE BANDWIDTH USING
! THE REVERSE CUTHILL MCKEE OR MINIMUM DEGREE ORDERING ALGORITHMS
!
subroutine ims_dperm(nrow, nja, a, ja, ia, ao, jao, iao, &
perm, qperm, job)
implicit none
! -- dummy variables
integer(I4B), intent(in) :: nrow
integer(I4B), intent(in) :: nja
real(DP), dimension(nja), intent(in) :: a
integer(I4B), dimension(nja), intent(in) :: ja
integer(I4B), dimension(nrow+1), intent(in) :: ia
real(DP), dimension(nja), intent(inout) :: ao
integer(I4B), dimension(nja), intent(inout) :: jao
integer(I4B), dimension(nrow+1), intent(inout) :: iao
integer(I4B), dimension(nrow), intent(inout) :: perm
integer(I4B), dimension(nrow), intent(inout) :: qperm
integer(I4B), intent(in) :: job
!-----------------------------------------------------------------------
! This routine permutes the rows and columns of a matrix stored in CSR
! format. i.e., it computes P A Q, where P, Q are permutation matrices.
! P maps row i into row perm(i) and Q maps column j into column qperm(j)
! a(i,j) becomes a(perm(i),qperm(j)) in new matrix
! In the particular case where Q is the transpose of P (symmetric
! permutation of A) then qperm is not needed.
! note that qperm should be of length ncol (number of columns) but this
! is not checked.
!-----------------------------------------------------------------------
! Y. Saad, Sep. 21 1989 / recoded Jan. 28 1991.
!-----------------------------------------------------------------------
! on entry:
!----------
! n = dimension of the matrix
! a, ja,
! ia = input matrix in a, ja, ia format
! perm = integer array of length n containing the permutation arra
! for the rows: perm(i) is the destination of row i in the
! permuted matrix -- also the destination of column i in case
! permutation is symmetric (job .le. 2)
!
! qperm = same thing for the columns. This should be provided only
! if job=3 or job=4, i.e., only in the case of a nonsymmetric
! permutation of rows and columns. Otherwise qperm is a dummy
!
! job = integer indicating the work to be done:
! * job = 1,2 permutation is symmetric Ao :== P * A * transp(P)
! job = 1 permute a, ja, ia into ao, jao, iao
! job = 2 permute matrix ignoring real values.
! * job = 3,4 permutation is non-symmetric Ao :== P * A * Q
! job = 3 permute a, ja, ia into ao, jao, iao
! job = 4 permute matrix ignoring real values.
!
! on return:
!-----------
! ao, jao, iao = input matrix in a, ja, ia format
!
! in case job .eq. 2 or job .eq. 4, a and ao are never referred to
! and can be dummy arguments.
! Notes:
!-------
! 1) algorithm is in place
! 2) column indices may not be sorted on return eventhough they may be
! on entry.
!----------------------------------------------------------------------
! -- local
integer(I4B) :: locjob, mod
!
! locjob indicates whether or not real values must be copied.
!
locjob = mod(job,2)
!
! permute rows first
!
call ims_rperm(nrow, nja, a, ja, ia, ao, jao, iao, perm, locjob)
!
! then permute columns
!
locjob = 0
!
if (job .le. 2) then
call ims_cperm(nrow, nja, ao, jao, iao, ao, jao, iao, perm, locjob)
else
call ims_cperm(nrow, nja, ao, jao, iao, ao, jao, iao, qperm, locjob)
endif
!
return
!-------end-of-ims_dperm----------------------------------------------------
end subroutine ims_dperm
!-----------------------------------------------------------------------
subroutine ims_rperm (nrow, nja, a, ja, ia, ao, jao, iao, perm, job)
implicit none
! -- dummy variables
integer(I4B), intent(in) :: nrow
integer(I4B), intent(in) :: nja
real(DP), dimension(nja), intent(in) :: a
integer(I4B), dimension(nja), intent(in) :: ja
integer(I4B), dimension(nrow+1), intent(in) :: ia
real(DP), dimension(nja), intent(inout) :: ao
integer(I4B), dimension(nja), intent(inout) :: jao
integer(I4B), dimension(nrow+1), intent(inout) :: iao
integer(I4B), dimension(nrow), intent(inout) :: perm
integer(I4B), intent(in) :: job
!-----------------------------------------------------------------------
! this subroutine permutes the rows of a matrix in CSR format.
! ims_rperm computes B = P A where P is a permutation matrix.
! the permutation P is defined through the array perm: for each j,
! perm(j) represents the destination row number of row number j.
! Youcef Saad -- recoded Jan 28, 1991.
!-----------------------------------------------------------------------
! on entry:
!----------
! n = dimension of the matrix
! a, ja, ia = input matrix in csr format
! perm = integer array of length nrow containing the permutation a
! for the rows: perm(i) is the destination of row i in the
! permuted matrix.
! ---> a(i,j) in the original matrix becomes a(perm(i),j)
! in the output matrix.
!
! job = integer indicating the work to be done:
! job = 1 permute a, ja, ia into ao, jao, iao
! (including the copying of real values ao and
! the array iao).
! job .ne. 1 : ignore real values.
! (in which case arrays a and ao are not needed nor
! used).
!
!------------
! on return:
!------------
! ao, jao, iao = input matrix in a, ja, ia format
! note :
! if (job.ne.1) then the arrays a and ao are not used.
!----------------------------------------------------------------------c
! Y. Saad, May 2, 1990 c
!----------------------------------------------------------------------c
! -- local
logical :: values
integer(I4B) :: i
integer(I4B) :: j
integer(I4B) :: k
integer(I4B) :: ii
integer(I4B) :: ko
values = (job .eq. 1)
!
! determine pointers for output matrix.
!
do j=1,nrow
i = perm(j)
iao(i+1) = ia(j+1) - ia(j)
end do
!
! get pointers from lengths
!
iao(1) = 1
do j=1,nrow
iao(j+1) = iao(j+1) + iao(j)
end do
!
! copying
!
do ii=1,nrow
!
! old row = ii -- new row = iperm(ii) -- ko = new pointer
!
ko = iao(perm(ii))
do k = ia(ii), ia(ii+1)-1
jao(ko) = ja(k)
if (values) ao(ko) = a(k)
ko = ko+1
end do
end do
!
return
!---------end-of-ims_rperm -------------------------------------------------
!-----------------------------------------------------------------------
end subroutine ims_rperm
!-----------------------------------------------------------------------
subroutine ims_cperm (nrow, nja, a, ja, ia, ao, jao, iao, perm, job)
implicit none
! -- dummy variables
integer(I4B), intent(in) :: nrow
integer(I4B), intent(in) :: nja
real(DP), dimension(nja), intent(in) :: a
integer(I4B), dimension(nja), intent(in) :: ja
integer(I4B), dimension(nrow+1), intent(in) :: ia
real(DP), dimension(nja), intent(inout) :: ao
integer(I4B), dimension(nja), intent(inout) :: jao
integer(I4B), dimension(nrow+1), intent(inout) :: iao
integer(I4B), dimension(nrow), intent(inout) :: perm
integer(I4B), intent(in) :: job
!-----------------------------------------------------------------------
! this subroutine permutes the columns of a matrix a, ja, ia.
! the result is written in the output matrix ao, jao, iao.
! cperm computes B = A P, where P is a permutation matrix
! that maps column j into column perm(j), i.e., on return
! a(i,j) becomes a(i,perm(j)) in new matrix
! Y. Saad, May 2, 1990 / modified Jan. 28, 1991.
!-----------------------------------------------------------------------
! on entry:
!----------
! nrow = row dimension of the matrix
!
! a, ja, ia = input matrix in csr format.
!
! perm = integer array of length ncol (number of columns of A
! containing the permutation array the columns:
! a(i,j) in the original matrix becomes a(i,perm(j))
! in the output matrix.
!
! job = integer indicating the work to be done:
! job = 1 permute a, ja, ia into ao, jao, iao
! (including the copying of real values ao and
! the array iao).
! job .ne. 1 : ignore real values ao and ignore iao.
!
!------------
! on return:
!------------
! ao, jao, iao = input matrix in a, ja, ia format (array ao not needed)
!
! Notes:
!-------
! 1. if job=1 then ao, iao are not used.
! 2. This routine is in place: ja, jao can be the same.
! 3. If the matrix is initially sorted (by increasing column number)
! then ao,jao,iao may not be on return.
!
!----------------------------------------------------------------------c
! -- local
integer(I4B) :: k, i
!
do k=1, nja
jao(k) = perm(ja(k))
end do
!
! done with ja array. return if no need to touch values.
!
if (job .ne. 1) return
!
! else get new pointers -- and copy values too.
!
do i=1, nrow+1
iao(i) = ia(i)
end do
!
do k=1, nja
ao(k) = a(k)
end do
!
return
!---------end-of-ims_cperm--------------------------------------------------
!-----------------------------------------------------------------------
end subroutine ims_cperm
!-----------------------------------------------------------------------
subroutine ims_vperm (n, x, perm)
implicit none
! -- dummy variables
integer(I4B), intent(in) :: n
integer(I4B), dimension(n), intent(inout) :: perm
real(DP), dimension(n), intent(inout) :: x
!-----------------------------------------------------------------------
! this subroutine performs an in-place permutation of a real vector x
! according to the permutation array perm(*), i.e., on return,
! the vector x satisfies,
!
! x(perm(j)) :== x(j), j=1,2,.., n
!
!-----------------------------------------------------------------------
! on entry:
!---------
! n = length of vector x.
! perm = integer array of length n containing the permutation array.
! x = input vector
!
! on return:
!----------
! x = vector x permuted according to x(perm(*)) := x(*)
!
!----------------------------------------------------------------------c
! Y. Saad, Sep. 21 1989 c
!----------------------------------------------------------------------c
! -- local
integer(I4B) :: j
integer(I4B) :: k
integer(I4B) :: ii
integer(I4B) :: init
integer(I4B) :: next
real(DP) :: tmp, tmp1
!
init = 1
tmp = x(init)
ii = perm(init)
perm(init)= -perm(init)
k = 0
!
! loop
!
6 k = k + 1
!
! save the chased element --
!
tmp1 = x(ii)
x(ii) = tmp
next = perm(ii)
if (next < 0 ) goto 65
!
! test for end
!
if (k > n) go to 101
tmp = tmp1
perm(ii) = -perm(ii)
ii = next
!
! end loop
!
go to 6
!
! reinitialize cycle --
!
65 init = init + 1
if (init > n) go to 101
if (perm(init) < 0) go to 65
tmp = x(init)
ii = perm(init)
perm(init)= -perm(init)
go to 6
!
101 continue
do j = 1, n
perm(j) = -perm(j)
end do
!
return
!-------------------end-of-ims_vperm---------------------------------------
!-----------------------------------------------------------------------
end subroutine ims_vperm
end module IMSReorderingModule
|
{"hexsha": "740324485e9dc29010b828bf6058d81f3ad41c13", "size": 60817, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/Solution/SparseMatrixSolver/ims8reordering.f90", "max_stars_repo_name": "maseology/mmMODFLOW6", "max_stars_repo_head_hexsha": "c4cfd2a5b52a80886142c2048ab44e0486516ae9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-07-04T12:20:25.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-30T02:56:07.000Z", "max_issues_repo_path": "src/Solution/SparseMatrixSolver/ims8reordering.f90", "max_issues_repo_name": "maseology/mmMODFLOW6", "max_issues_repo_head_hexsha": "c4cfd2a5b52a80886142c2048ab44e0486516ae9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-02-13T18:34:03.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-02T15:27:53.000Z", "max_forks_repo_path": "src/Solution/SparseMatrixSolver/ims8reordering.f90", "max_forks_repo_name": "maseology/mmMODFLOW6", "max_forks_repo_head_hexsha": "c4cfd2a5b52a80886142c2048ab44e0486516ae9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6588306209, "max_line_length": 84, "alphanum_fraction": 0.4492987158, "num_tokens": 15850}
|
import io
import matplotlib.pyplot as plt
import numpy as np
import telegram
import torch
import torchvision
from PIL import Image
from trixi.logger.plt.numpyseabornplotlogger import NumpySeabornPlotLogger
class TelegramLogger(NumpySeabornPlotLogger):
"""
Telegram logger, inherits the AbstractLogger and sends plots/logs to a chat via a Telegram bot.
"""
def __init__(self, token, chat_id, exp_name=None, **kwargs):
"""
Creates a new TelegramLogger object.
Args:
token (str): The token of the Telegram bot used.
chat_id (str): The chat ID for the chat between the user and the Telegram bot.
"""
super(TelegramLogger, self).__init__(**kwargs)
self.token = token
self.chat_id = chat_id
self.bot = telegram.Bot(token=self.token)
self.exp_name = exp_name
def show_text(self, text, **kwargs):
"""
Sends a text to a chat using an existing Telegram bot.
Args:
text (str): Text message to be sent to the bot.
"""
if self.exp_name is not None:
text = self.exp_name + ":\n" + text
try:
self.bot.send_message(chat_id=self.chat_id, text=text)
except:
print("Could not send text to telegram")
def show_image(self, image_path, **kwargs):
"""
Sends an image file to a chat using an existing Telegram bot.
Args:
image_path (str): Path to the image file to be sent to the chat.
"""
try:
self.bot.send_photo(chat_id=self.chat_id, photo=open(image_path, 'rb'))
except:
print("Could not send image to telegram")
def show_image_grid(self, image_array, name=None, nrow=8, padding=2,
normalize=False, range=None, scale_each=False, pad_value=0, **kwargs):
"""
Sends an array of images to a chat using an existing Telegram bot. (Requires torch and torchvision)
Args:
image_array (np.narray / torch.tensor): Image array/ tensor which will be sent as an image grid
make_grid_kargs: Key word arguments for the torchvision make grid method
"""
caption = ""
if self.exp_name is not None:
caption += self.exp_name + " "
if name is not None:
caption += name + " "
if isinstance(image_array, np.ndarray):
image_array = torch.from_numpy(image_array)
buf = io.BytesIO()
image_array = image_array.cpu()
grid = torchvision.utils.make_grid(image_array, nrow=nrow, padding=padding, pad_value=pad_value,
normalize=normalize, range=range, scale_each=scale_each)
ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).numpy()
im = Image.fromarray(ndarr)
im.save(buf, format="png")
buf.seek(0)
try:
self.bot.send_photo(chat_id=self.chat_id, photo=buf, caption=caption)
except:
print("Could not send image_grid to telegram")
def show_value(self, value, name, counter=None, tag=None, **kwargs):
"""
Sends a value to a chat using an existing Telegram bot.
Args:
value: Value to be plotted sent to the chat.
name: Name for the plot.
counter: Optional counter to be sent in conjunction with the value.
tag: Tag to be used as a label for the plot.
"""
caption = ""
if self.exp_name is not None:
caption += self.exp_name + " "
if name is not None:
caption += name + " "
buf = io.BytesIO()
figure = NumpySeabornPlotLogger.show_value(self, value, name, counter, tag)
figure.savefig(buf, format='png')
buf.seek(0)
try:
self.bot.send_photo(chat_id=self.chat_id, photo=buf, caption=caption)
except:
print("Could not send plot to telegram")
plt.close(figure)
def show_barplot(self, *args, **kwargs):
pass
def show_lineplot(self, *args, **kwargs):
pass
def show_scatterplot(self, *args, **kwargs):
pass
def show_piechart(self, *args, **kwargs):
pass
def print(self, text, **kwargs):
"""Just calls show_text()"""
self.show_text(text, **kwargs)
|
{"hexsha": "b95c3f20d1d43ad359f23c3a0fe6e654d579ad8d", "size": 4402, "ext": "py", "lang": "Python", "max_stars_repo_path": "trixi/logger/message/telegramlogger.py", "max_stars_repo_name": "pfjaeger/trixi", "max_stars_repo_head_hexsha": "53f5c03ea3a955805c26037e0e9e4d135aec7652", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-09-08T05:00:25.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-08T05:00:25.000Z", "max_issues_repo_path": "trixi/logger/message/telegramlogger.py", "max_issues_repo_name": "pfjaeger/trixi", "max_issues_repo_head_hexsha": "53f5c03ea3a955805c26037e0e9e4d135aec7652", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "trixi/logger/message/telegramlogger.py", "max_forks_repo_name": "pfjaeger/trixi", "max_forks_repo_head_hexsha": "53f5c03ea3a955805c26037e0e9e4d135aec7652", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8507462687, "max_line_length": 108, "alphanum_fraction": 0.5954111767, "include": true, "reason": "import numpy", "num_tokens": 997}
|
from numpy import inf, nan
from sklearn.linear_model import RANSACRegressor as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _RANSACRegressorImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for RANSACRegressor RANSAC (RANdom SAmple Consensus) algorithm.",
"allOf": [
{
"type": "object",
"required": [
"base_estimator",
"min_samples",
"residual_threshold",
"is_data_valid",
"is_model_valid",
"max_trials",
"max_skips",
"stop_n_inliers",
"stop_score",
"stop_probability",
"loss",
"random_state",
],
"relevantToOptimizer": [
"min_samples",
"max_trials",
"max_skips",
"stop_n_inliers",
"loss",
],
"additionalProperties": False,
"properties": {
"base_estimator": {
"anyOf": [{"type": "object"}, {"enum": [None]}],
"default": None,
"description": "Base estimator object which implements the following methods: * `fit(X, y)`: Fit model to given training data and target values",
},
"min_samples": {
"XXX TODO XXX": "int (>= 1) or float ([0, 1]), optional",
"description": "Minimum number of samples chosen randomly from original data",
"anyOf": [
{
"type": "number",
"minimumForOptimizer": 0.0,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
},
"residual_threshold": {
"anyOf": [{"type": "number"}, {"enum": [None]}],
"default": None,
"description": "Maximum residual for a data sample to be classified as an inlier",
},
"is_data_valid": {
"anyOf": [{"laleType": "callable"}, {"enum": [None]}],
"default": None,
"description": "This function is called with the randomly selected data before the model is fitted to it: `is_data_valid(X, y)`",
},
"is_model_valid": {
"anyOf": [{"laleType": "callable"}, {"enum": [None]}],
"default": None,
"description": "This function is called with the estimated model and the randomly selected data: `is_model_valid(model, X, y)`",
},
"max_trials": {
"type": "integer",
"minimumForOptimizer": 100,
"maximumForOptimizer": 101,
"distribution": "uniform",
"default": 100,
"description": "Maximum number of iterations for random sample selection.",
},
"max_skips": {
"anyOf": [
{"type": "integer", "forOptimizer": False},
{
"type": "number",
"minimumForOptimizer": 0.0,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
},
],
"default": inf,
"description": "Maximum number of iterations that can be skipped due to finding zero inliers or invalid data defined by ``is_data_valid`` or invalid models defined by ``is_model_valid``",
},
"stop_n_inliers": {
"anyOf": [
{"type": "integer", "forOptimizer": False},
{
"type": "number",
"minimumForOptimizer": 0.0,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
},
],
"default": inf,
"description": "Stop iteration if at least this number of inliers are found.",
},
"stop_score": {
"type": "number",
"default": inf,
"description": "Stop iteration if score is greater equal than this threshold.",
},
"stop_probability": {
"XXX TODO XXX": "float in range [0, 1], optional",
"description": "RANSAC iteration stops if at least one outlier-free set of the training data is sampled in RANSAC",
"type": "number",
"default": 0.99,
},
"loss": {
"anyOf": [
{"laleType": "callable", "forOptimizer": False},
{
"enum": [
"X[i]",
"absolute_loss",
"deviance",
"epsilon_insensitive",
"exponential",
"hinge",
"huber",
"lad",
"linear",
"log",
"ls",
"modified_huber",
"perceptron",
"quantile",
"residual_threshold",
"square",
"squared_epsilon_insensitive",
"squared_hinge",
"squared_loss",
]
},
],
"default": "absolute_loss",
"description": 'String inputs, "absolute_loss" and "squared_loss" are supported which find the absolute loss and squared loss per sample respectively',
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The generator used to initialize the centers",
},
},
},
{
"XXX TODO XXX": "Parameter: base_estimator > only supports regression estimators"
},
{
"XXX TODO XXX": "Parameter: is_model_valid > only be used if the estimated model is needed for making the rejection decision"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit estimator using RANSAC algorithm.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like or sparse matrix, shape [n_samples, n_features]",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Training data.",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Target values.",
},
"sample_weight": {
"type": "array",
"items": {"type": "number"},
"description": "Individual weights for each sample raises error if sample_weight is passed and base_estimator fit method does not support it.",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the estimated model.",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "array", "items": {"type": "number"}}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.RANSACRegressor#sklearn-linear_model-ransacregressor",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
RANSACRegressor = make_operator(_RANSACRegressorImpl, _combined_schemas)
set_docstrings(RANSACRegressor)
|
{"hexsha": "bc7c2c03ea76adb549b97f1f9d27aec56d603745", "size": 10317, "ext": "py", "lang": "Python", "max_stars_repo_path": "lale/lib/autogen/ransac_regressor.py", "max_stars_repo_name": "mfeffer/lale", "max_stars_repo_head_hexsha": "57b58843c7c14dc2e5658244280f2c1918bf030b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-24T20:35:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-24T20:35:18.000Z", "max_issues_repo_path": "lale/lib/autogen/ransac_regressor.py", "max_issues_repo_name": "mfeffer/lale", "max_issues_repo_head_hexsha": "57b58843c7c14dc2e5658244280f2c1918bf030b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lale/lib/autogen/ransac_regressor.py", "max_forks_repo_name": "mfeffer/lale", "max_forks_repo_head_hexsha": "57b58843c7c14dc2e5658244280f2c1918bf030b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-16T08:20:30.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-16T08:20:30.000Z", "avg_line_length": 40.9404761905, "max_line_length": 207, "alphanum_fraction": 0.436367161, "include": true, "reason": "from numpy", "num_tokens": 2008}
|
#include <boost/noncopyable.hpp>
#include "burger/base/Singleton.h"
#include <thread>
#include <iostream>
#include <string>
class Test :boost::noncopyable {
public:
Test() {
std::cout << "Test tid = " << std::this_thread::get_id() << " Address = "
<< static_cast<const void *>(this) << std::endl;
}
~Test() {
std::cout << "~Test tid = " << std::this_thread::get_id() << " Address = "
<< static_cast<const void *>(this) << " name = " << name_ <<std::endl;
}
const std::string& getName() const { return name_; }
void setName(const std::string& name) { name_ = name; }
private:
std::string name_;
};
using ST = burger::SingletonPerThread<Test>;
void print() {
std::cout << std::this_thread::get_id() << " obj1 Address = "
<< static_cast<const void *>(&ST::Instance()) << " name = " << ST::Instance().getName() <<std::endl;
}
void ThreadFunc(const std::string changeTo) {
print();
ST::Instance().setName(changeTo);
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
print();
}
int main() {
ST::Instance().setName("main one");
std::thread t1(ThreadFunc, "thread1");
std::thread t2(ThreadFunc, "thread2");
t1.join();
print();
t2.join();
}
|
{"hexsha": "a11d42eb664e450d73f061d268e2fed5d2e2adb5", "size": 1280, "ext": "cc", "lang": "C++", "max_stars_repo_path": "burger/base/tests/SingletonThreadLocal_test.cc", "max_stars_repo_name": "BurgerGroup/Burger", "max_stars_repo_head_hexsha": "b9159ea8855122a32091d40eb24439456f8879a9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 30.0, "max_stars_repo_stars_event_min_datetime": "2021-05-07T16:54:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-12T01:55:49.000Z", "max_issues_repo_path": "burger/base/tests/SingletonThreadLocal_test.cc", "max_issues_repo_name": "chanchann/Burger", "max_issues_repo_head_hexsha": "b9159ea8855122a32091d40eb24439456f8879a9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2021-07-27T16:27:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-27T16:27:14.000Z", "max_forks_repo_path": "burger/base/tests/SingletonThreadLocal_test.cc", "max_forks_repo_name": "chanchann/Burger", "max_forks_repo_head_hexsha": "b9159ea8855122a32091d40eb24439456f8879a9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9.0, "max_forks_repo_forks_event_min_datetime": "2021-06-07T09:20:46.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T06:30:36.000Z", "avg_line_length": 27.2340425532, "max_line_length": 110, "alphanum_fraction": 0.58203125, "num_tokens": 348}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Jérôme Eberhardt 2016-2020
# Unrolr
#
# pSPE CPU
# Author: Jérôme Eberhardt <qksoneo@gmail.com>
#
# License: MIT
import os
import sys
import numpy as np
from scipy.spatial.distance import cdist
__author__ = "Jérôme Eberhardt"
__copyright__ = "Copyright 2020, Jérôme Eberhardt"
__lience__ = "MIT"
__maintainer__ = "Jérôme Eberhardt"
__email__ = "qksoneo@gmail.com"
def _spe_dihedral(r, d, r_neighbor, n_iter=10000, learning_rate=1, verbose=0):
"""
The CPU implementation of pSPE with dihedral_distance
Args:
r (ndarray): n-dimensional dataset (rows: frame; columns: angle/intramolecular distance)
d (ndarray): projected embedding in low dim space
r_neighbor (float): neighbor radius cutoff
n_iter (int): number of optimization iteration (default: 10000)
learning_rate (float): learning rate, aka computational temperature (default: 1)
verbose (int): turn on:off verbose (default: False)
"""
alpha = float(learning_rate) / float(n_iter)
freq_progression = float(n_iter) / 100.
epsilon = 1e-10
l = 1. / r.shape[1]
for c in range(0, n_iter + 1):
if c % freq_progression == 0 and verbose:
percentage = float(c) / float(n_iter) * 100.
sys.stdout.write("\rUnrolr Optimization : %8.3f %%" % percentage)
sys.stdout.flush()
# Choose random embedding (pivot)
i = np.random.randint(r.shape[0])
# Euclidean distance
dijs = cdist([d[i]], d)[0]
# Dihedral distance
rijs = np.sqrt(l * 0.5 * np.sum((1. - np.cos(r[i] - r)), axis=1))
# SPE
j = (rijs <= r_neighbor) | ((rijs > r_neighbor) & (dijs < rijs))
d[j] += (learning_rate * ((rijs[j] - dijs[j]) / (dijs[j] + epsilon)))[:, None] * (d[j] - d[i])
learning_rate -= alpha
if verbose:
print()
return d
def _spe_intramolecular(r, d, r_neighbor, n_iter=10000, learning_rate=1, verbose=0):
"""
The CPU implementation of pSPE with intermolecular_distance
Args:
r (ndarray): n-dimensional dataset (rows: frame; columns: angle/intramolecular distance)
d (ndarray): projected embedding in low dim space
r_neighbor (float): neighbor radius cutoff
n_iter (int): number of optimization iteration (default: 10000)
learning_rate (float): learning rate, aka computational temperature (default: 1)
verbose (int): turn on:off verbose (default: False)
"""
alpha = float(learning_rate) / float(n_iter)
freq_progression = float(n_iter) / 100.
epsilon = 1e-10
for c in range(0, n_iter + 1):
j = 0
if c % freq_progression == 0 and verbose:
percentage = float(c) / float(n_iter) * 100.
sys.stdout.write("\rUnrolr Optimization : %8.3f %%" % percentage)
sys.stdout.flush()
# Choose random embedding (pivot)
i = np.random.randint(r.shape[0])
# Euclidean distance
dijs = cdist([d[i]], d)[0]
# Intramolecular distance
rijs = np.sqrt(np.mean((r[i] - r)**2, axis=1))
# SPE
j = (rijs <= r_neighbor) | ((rijs > r_neighbor) & (dijs < rijs))
d[j] += (learning_rate * ((rijs[j] - dijs[j]) / (dijs[j] + epsilon)))[:, None] * (d[j] - d[i])
learning_rate -= alpha
if verbose:
print()
return d
def _spe_cpu(r, d, r_neighbor, metric="dihedral", n_iter=10000, learning_rate=1., verbose=0):
"""
The CPU implementation of pSPE (dihedral_distance/intermolecular_distance)
Args:
r (ndarray): n-dimensional dataset (rows: frame; columns: angle/intramolecular distance)
d (ndarray): projected embedding in low dim space
r_neighbor (float): neighbor radius cutoff
metric (str): distance metric (choices: dihedral or intramolecular) (default: dihedral)
n_iter (int): number of optimization iteration (default: 10000)
learning_rate (float): learning rate, aka computational temperature (default: 1)
verbose (int): turn on:off verbose (default: False)
"""
if metric == "dihedral":
d = _spe_dihedral(r, d, r_neighbor, n_iter, learning_rate, verbose)
elif metric == 'intramolecular':
d = _spe_intramolecular(r, d, r_neighbor, n_iter, learning_rate, verbose)
return d
def _evaluate_embedding_cpu(r, d, r_neighbor, metric="dihedral", epsilon=1e-4):
"""
Evaluate the final embedding by calculating the stress and correlation
Args:
r (ndarray): n-dimensional dataset (rows: frame; columns: angle/intramolecular distance)
d (ndarray): the final projected embedding in low dim space
r_neighbor (float): neighbor radius cutoff
metric (str): distance metric (choices: dihedral or intramolecular) (default: dihedral)
epsilon (float): convergence criteria when computing final stress and correlation (default: 1e-4)
"""
# Ignore divide per zeros
np.seterr(divide='ignore', invalid='ignore')
tmp_correl = []
sij = []
tmp_sij_sum = 0.0
tmp_rij_sum = 0.0
old_stress = 999.
old_correl = 999.
correlation = None
stress = None
l = 1. / r.shape[1]
while True:
# Choose random conformation as pivot
i = np.random.randint(r.shape[0])
# Euclidean distance
dijs = cdist([d[i]], d)[0]
if metric == 'dihedral':
rijs = np.sqrt(l * 0.5 * np.sum((1. - np.cos(r[i] - r)), axis=1))
elif metric == 'intramolecular':
rijs = np.sqrt(np.mean((r[i] - r)**2, axis=1))
# Compute current correlation
tmp = (np.dot(rijs.T, dijs) / rijs.shape[0]) - (np.mean(rijs) * np.mean(dijs))
tmp_correl.append(tmp / (np.std(rijs) * np.std(dijs)))
correlation = np.mean(tmp_correl)
# Compute current stress
j = (rijs <= r_neighbor) | (dijs < rijs)
sij = ((dijs[j] - rijs[j]) * (dijs[j] - rijs[j])) / (rijs[j])
tmp_sij_sum += np.nansum(sij)
tmp_rij_sum += np.sum(rijs)
stress = tmp_sij_sum / tmp_rij_sum
# Test for convergence
if (np.abs(old_stress - stress) < epsilon) and (np.abs(old_correl - correlation) < epsilon):
break
old_stress = stress
old_correl = correlation
# Restore numpy warnings
np.seterr(divide='warn', invalid='warn')
return correlation, stress
|
{"hexsha": "789dd8577494614b3ae0afc74034431883cc744d", "size": 6473, "ext": "py", "lang": "Python", "max_stars_repo_path": "unrolr/core/spe_cpu.py", "max_stars_repo_name": "jeeberhardt/unrolr", "max_stars_repo_head_hexsha": "76d432643525a1999a6b14d6af500b9ffb296b82", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-06-05T19:44:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-25T04:42:25.000Z", "max_issues_repo_path": "unrolr/core/spe_cpu.py", "max_issues_repo_name": "jeeberhardt/unrolr", "max_issues_repo_head_hexsha": "76d432643525a1999a6b14d6af500b9ffb296b82", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "unrolr/core/spe_cpu.py", "max_forks_repo_name": "jeeberhardt/unrolr", "max_forks_repo_head_hexsha": "76d432643525a1999a6b14d6af500b9ffb296b82", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-06-21T16:57:52.000Z", "max_forks_repo_forks_event_max_datetime": "2019-06-21T16:57:52.000Z", "avg_line_length": 33.0255102041, "max_line_length": 105, "alphanum_fraction": 0.615325197, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1804}
|
import pytest
import numpy as np
from numpy import testing as npt
import pandas.util.testing as pdt
from ixmp import Platform
from message_ix import Scenario
from message_ix.testing import (
make_dantzig,
models,
TS_DF,
TS_DF_CLEARED,
TS_DF_SHIFT
)
def test_run_clone(tmpdir):
# this test is designed to cover the full functionality of the GAMS API
# - initialize a new ixmp platform instance
# - create a new scenario based on Dantzigs tutorial transport model
# - solve the model and read back the solution from the output
# - perform tests on the objective value and the timeseries data
mp = Platform(driver='hsqldb', path=tmpdir / 'db')
scen = make_dantzig(mp, solve=True)
assert np.isclose(scen.var('OBJ')['lvl'], 153.675)
assert scen.firstmodelyear == 1963
pdt.assert_frame_equal(scen.timeseries(iamc=True), TS_DF)
# cloning with `keep_solution=True` keeps all timeseries and the solution
# (same behaviour as `ixmp.Scenario`)
scen2 = scen.clone(keep_solution=True)
assert np.isclose(scen2.var('OBJ')['lvl'], 153.675)
assert scen2.firstmodelyear == 1963
pdt.assert_frame_equal(scen2.timeseries(iamc=True), TS_DF)
# cloning with `keep_solution=False` drops the solution and only keeps
# timeseries set as `meta=True` or prior to the first model year
# (DIFFERENT behaviour from `ixmp.Scenario`)
scen3 = scen.clone(keep_solution=False)
assert np.isnan(scen3.var('OBJ')['lvl'])
assert scen3.firstmodelyear == 1963
pdt.assert_frame_equal(scen3.timeseries(iamc=True), TS_DF_CLEARED)
def test_run_remove_solution(test_mp):
# create a new instance of the transport problem and solve it
scen = make_dantzig(test_mp, solve=True)
assert np.isclose(scen.var('OBJ')['lvl'], 153.675)
# check that re-solving the model will raise an error if a solution exists
pytest.raises(ValueError, scen.solve)
# check that removing solution with a first-model-year arg raises an error
# (DIFFERENT behaviour from `ixmp.Scenario`)
pytest.raises(TypeError, scen.remove_solution, first_model_year=1964)
# check that removing solution does not delete timeseries data
# before first model year (DIFFERENT behaviour from `ixmp.Scenario`)
scen.remove_solution()
assert scen.firstmodelyear == 1963
pdt.assert_frame_equal(scen.timeseries(iamc=True), TS_DF_CLEARED)
def test_shift_first_model_year(test_mp):
scen = make_dantzig(test_mp, solve=True, multi_year=True)
# assert that `historical_activity` is empty in the source scenario
assert scen.par('historical_activity').empty
# clone and shift first model year
clone = scen.clone(shift_first_model_year=1964)
# check that solution and timeseries in new model horizon have been removed
assert np.isnan(clone.var('OBJ')['lvl'])
pdt.assert_frame_equal(clone.timeseries(iamc=True), TS_DF_SHIFT)
assert clone.firstmodelyear == 1964
# check that the variable `ACT` is now the parameter `historical_activity`
assert not clone.par('historical_activity').empty
def scenario_list(mp):
return mp.scenario_list(default=False)[['model', 'scenario']]
def assert_multi_db(mp1, mp2):
pdt.assert_frame_equal(scenario_list(mp1), scenario_list(mp2))
def test_multi_db_run(tmpdir):
# create a new instance of the transport problem and solve it
mp1 = Platform(driver='hsqldb', path=tmpdir / 'mp1')
scen1 = make_dantzig(mp1, solve=True)
mp2 = Platform(driver='hsqldb', path=tmpdir / 'mp2')
# add other unit to make sure that the mapping is correct during clone
mp2.add_unit('wrong_unit')
mp2.add_region('wrong_region', 'country')
# check that cloning across platforms must copy the full solution
dest = dict(platform=mp2)
pytest.raises(NotImplementedError, scen1.clone, keep_solution=False,
**dest)
pytest.raises(NotImplementedError, scen1.clone,
shift_first_model_year=1964, **dest)
# clone solved model across platforms (with default settings)
scen1.clone(platform=mp2, keep_solution=True)
# close the db to ensure that data and solution of the clone are saved
mp2.close_db()
del mp2
# reopen the connection to the second platform and reload scenario
_mp2 = Platform(driver='hsqldb', path=tmpdir / 'mp2')
scen2 = Scenario(_mp2, **models['dantzig'])
assert_multi_db(mp1, _mp2)
# check that sets, variables and parameter were copied correctly
npt.assert_array_equal(scen1.set('node'), scen2.set('node'))
scen2.firstmodelyear == 1963
pdt.assert_frame_equal(scen1.par('var_cost'), scen2.par('var_cost'))
assert np.isclose(scen2.var('OBJ')['lvl'], 153.675)
pdt.assert_frame_equal(scen1.var('ACT'), scen2.var('ACT'))
# check that custom unit, region and timeseries are migrated correctly
pdt.assert_frame_equal(scen2.timeseries(iamc=True), TS_DF)
|
{"hexsha": "d78cb744d2ed60a04768297317f31599bde429f1", "size": 4937, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_integration.py", "max_stars_repo_name": "GamzeUnlu95/message_ix", "max_stars_repo_head_hexsha": "ccf80600991362874424b786e10a688d8d24ca74", "max_stars_repo_licenses": ["Apache-2.0", "CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_integration.py", "max_issues_repo_name": "GamzeUnlu95/message_ix", "max_issues_repo_head_hexsha": "ccf80600991362874424b786e10a688d8d24ca74", "max_issues_repo_licenses": ["Apache-2.0", "CC-BY-4.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-08-09T13:26:01.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-13T09:04:45.000Z", "max_forks_repo_path": "tests/test_integration.py", "max_forks_repo_name": "GamzeUnlu95/message_ix", "max_forks_repo_head_hexsha": "ccf80600991362874424b786e10a688d8d24ca74", "max_forks_repo_licenses": ["Apache-2.0", "CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.5703125, "max_line_length": 79, "alphanum_fraction": 0.7239214098, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1248}
|
PROGRAM file_pos
IMPLICIT NONE
REAL :: r
INTEGER :: status, line, index
CHARACTER :: msg
OPEN(UNIT=1, FILE='temp.dat', STATUS='NEW', ACTION='READWRITE', IOSTAT=status, IOMSG=msg)
WRITE(*, *) "Enter nonnegative real numbers to store in a temporary file."
WRITE(*, *) "Enter a negative real number to stop."
DO
READ(*, '(F3.1)') r
IF ((r .lt. 0.0) .OR. (status .ne. 0)) THEN
EXIT
ELSE
WRITE(1, *, IOSTAT=status) r
END IF
END DO
REWIND(1)
WRITE(*, *) "Enter a line number to review."
READ(*, *) line
DO index = 1, line
READ(1, *, IOSTAT=status) r
END DO
WRITE(*, *) r
CLOSE(1)
END PROGRAM file_pos
|
{"hexsha": "b86dbcb8d3e0537437d53ceef27d4c3e2f8f0193", "size": 658, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/chap5/file_pos.f90", "max_stars_repo_name": "evanmacbride/fortran-practice", "max_stars_repo_head_hexsha": "1d9d851c35baedf52444db65157bd9a987dec60d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/chap5/file_pos.f90", "max_issues_repo_name": "evanmacbride/fortran-practice", "max_issues_repo_head_hexsha": "1d9d851c35baedf52444db65157bd9a987dec60d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/chap5/file_pos.f90", "max_forks_repo_name": "evanmacbride/fortran-practice", "max_forks_repo_head_hexsha": "1d9d851c35baedf52444db65157bd9a987dec60d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.3703703704, "max_line_length": 91, "alphanum_fraction": 0.6048632219, "num_tokens": 228}
|
#include <iostream>
#include <iomanip>
#include <fstream>
#include <chrono>
#include <boost/algorithm/string.hpp>
#include "estimation.hpp"
#include "utils.hpp"
#include "pointmatcher/PointMatcher.h"
typedef PointMatcher<double> PM;
typedef PM::DataPoints DP;
using namespace PointMatcherSupport; // NOLINT
int main(int argc, const char *argv[]) {
std::string root, config;
if (validateArgs(argc, argv, root, config) != 0) {
return 1;
}
std::vector<std::string> lidar_files;
get_file_names(root + "lidar/", lidar_files, "bin");
std::string lidar_pose_file = root + "applanix/lidar_poses_ypr_ref2.csv";
std::string lidar_odom_file = root + "applanix/lidar_odom_poses.csv";
std::ofstream ofs;
ofs.open(lidar_odom_file, std::ios::out);
ofs << "TIME1,TIME2,x,y,z,yaw,pitch,roll\n";
ofs.close();
PM::ICP icp;
if (config.empty()) {
icp.setDefault();
} else {
std::ifstream ifs(config.c_str());
icp.loadFromYaml(ifs);
}
DP::Labels labels;
labels.push_back(DP::Label("x", 1));
labels.push_back(DP::Label("y", 1));
labels.push_back(DP::Label("z", 1));
labels.push_back(DP::Label("w", 1));
std::shared_ptr<PM::Transformation> rigidTrans;
rigidTrans = PM::get().REG(Transformation).create("RigidTransformation");
std::shared_ptr<PM::DataPointsFilter> removeScanner =
PM::get().DataPointsFilterRegistrar.create("MinDistDataPointsFilter",
{{"minDist", "2.0"}});
std::shared_ptr<PM::DataPointsFilter> randSubsample =
PM::get().DataPointsFilterRegistrar.create("RandomSamplingDataPointsFilter",
{{"prob", toParam(0.90)}});
std::deque<DP> sliding_window;
uint window_size = 10;
Eigen::Matrix4d T_enu_map = Eigen::Matrix4d::Identity();
Eigen::Matrix4d T_prev = Eigen::Matrix4d::Identity();
uint start = 92;
for (uint i = start; i < lidar_files.size(); ++i) {
std::cout << i << " / " << lidar_files.size() - 1 << std::endl;
Eigen::MatrixXd pc, intensities;
std::vector<float> times;
load_velodyne(root + "lidar/" + lidar_files[i], pc, intensities, times);
std::vector<double> gt;
// GPSTime,x,y,z,vel_x,vel_y,vel_z,roll,pitch,heading,ang_vel_z
assert(get_groundtruth_data(lidar_pose_file, lidar_files[i], gt));
Eigen::Matrix4d T_enu_sensor = getTransformFromGT(gt);
removeMotionDistortion(pc, times, T_enu_sensor, gt);
DP newCloud(pc, labels);
newCloud = removeScanner->filter(newCloud);
if (i == start) {
T_enu_map = T_enu_sensor;
sliding_window.push_back(newCloud);
continue;
}
Eigen::Matrix4d T_map_sensor = get_inverse_tf(T_enu_map) * T_enu_sensor;
Eigen::Matrix4d T = Eigen::Matrix4d::Identity();
try {
DP submap = sliding_window[0];
for (uint j = 1; j < sliding_window.size(); ++j) {
submap.concatenate(sliding_window[j]);
}
submap = randSubsample->filter(submap); // random subsample to speed up ICP
T = icp(randSubsample->filter(newCloud), submap, T_map_sensor);
} catch (PM::ConvergenceError& error) {
std::cout << "ERROR PM::ICP failed to converge: " << error.what() << std::endl;
return 1;
}
std::cout << T << std::endl;
DP transformed = rigidTrans->compute(newCloud, T);
sliding_window.push_back(transformed);
if (sliding_window.size() > window_size)
sliding_window.pop_front();
Eigen::Matrix4d T_rel = get_inverse_tf(T_prev) * T;
T_prev = T;
ofs.open(lidar_odom_file, std::ios::app);
std::string time1, time2;
get_name_from_file(lidar_files[i - 1], time1);
get_name_from_file(lidar_files[i], time2);
double yaw = 0, pitch = 0, roll = 0;
Eigen::Matrix3d C = T_rel.block(0, 0, 3, 3);
rotToYawPitchRoll(C, yaw, pitch, roll);
ofs << time1 << "," << time2 << "," << T_rel(0, 3) << "," << T_rel(1, 3) << "," << T_rel(2, 3) << ","
<< yaw << "," << pitch << "," << roll << "\n";
ofs.close();
}
}
|
{"hexsha": "24a99c3310f4c7d25ff37fc4fbe8f70e96e45ab3", "size": 4209, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/odometry.cpp", "max_stars_repo_name": "keenan-burnett/leslie_lidar_mapping", "max_stars_repo_head_hexsha": "004f3b552c27aa87931b3e3a851a836d703682e7", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 7.0, "max_stars_repo_stars_event_min_datetime": "2021-01-05T01:17:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T21:51:30.000Z", "max_issues_repo_path": "src/odometry.cpp", "max_issues_repo_name": "keenan-burnett/leslie_lidar_mapping", "max_issues_repo_head_hexsha": "004f3b552c27aa87931b3e3a851a836d703682e7", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/odometry.cpp", "max_forks_repo_name": "keenan-burnett/leslie_lidar_mapping", "max_forks_repo_head_hexsha": "004f3b552c27aa87931b3e3a851a836d703682e7", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2021-02-02T15:42:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T16:59:35.000Z", "avg_line_length": 37.9189189189, "max_line_length": 109, "alphanum_fraction": 0.6060822048, "num_tokens": 1168}
|
# third party imports
import numpy as np
import pandas as pd
from scipy import stats
from sklearn.metrics import roc_auc_score
from sklearn.linear_model import LogisticRegression
# custom imports
import cobra.utils as utils
class LogisticRegressionModel:
"""Wrapper around the LogisticRegression class, with additional methods
implemented such as evaluation (using auc), getting a list of coefficients,
a ditionary of coefficients per predictor, ... for convenience
Attributes
----------
logit : LogisticRegression
scikit-learn logistic regression model
predictors : list
List of predictors used in the model
"""
def __init__(self):
self.logit = LogisticRegression(fit_intercept=True, C=1e9,
solver='liblinear', random_state=42)
self._is_fitted = False
# placeholder to keep track of a list of predictors
self.predictors = []
self._eval_metrics_by_split = {}
def serialize(self) -> dict:
"""Serialize model as JSON
Returns
-------
dict
dictionary containing the serialized JSON
"""
serialized_model = {
"meta": "logistic-regression",
"predictors": self.predictors,
"_eval_metrics_by_split": self._eval_metrics_by_split,
"params": self.logit.get_params()
}
if self._is_fitted:
serialized_model.update({
"classes_": self.logit.classes_.tolist(),
"coef_": self.logit.coef_.tolist(),
"intercept_": self.logit.intercept_.tolist(),
"n_iter_": self.logit.n_iter_.tolist(),
})
return serialized_model
def deserialize(self, model_dict: dict):
"""Deserialize a model previously stored as JSON
Parameters
----------
model_dict : dict
Serialized JSON file as a dict
Raises
------
ValueError
In case JSON file is no valid serialized model
"""
if not self._is_valid_dict(model_dict):
raise ValueError("No valid serialized model")
self.logit = LogisticRegression()
self.logit.set_params(**model_dict["params"])
self.logit.classes_ = np.array(model_dict['classes_'])
self.logit.coef_ = np.array(model_dict['coef_'])
self.logit.intercept_ = np.array(model_dict['intercept_'])
self.logit.n_iter_ = np.array(model_dict['intercept_'])
self.predictors = model_dict["predictors"]
self._eval_metrics_by_split = model_dict["_eval_metrics_by_split"]
def get_coef(self) -> np.array:
"""Returns the model coefficients
Returns
-------
np.array
array of model coefficients
"""
return self.logit.coef_[0]
def get_intercept(self) -> float:
"""Returns the intercept of the model
Returns
-------
float
intercept of the model
"""
return self.logit.intercept_[0]
def get_coef_by_predictor(self) -> dict:
"""Returns a dictionary mapping predictor (key) to coefficient (value)
Returns
-------
dict
map ``{predictor: coefficient}``
"""
return dict(zip(self.predictors, self.logit.coef_[0]))
def fit(self, X_train: pd.DataFrame, y_train: pd.Series):
"""Fit the model
Parameters
----------
X_train : pd.DataFrame
predictors of train data
y_train : pd.Series
target of train data
"""
self.predictors = list(X_train.columns)
self.logit.fit(X_train, y_train)
self._is_fitted = True
def score_model(self, X: pd.DataFrame) -> np.ndarray:
"""Score a model on a (new) dataset
Parameters
----------
X : pd.DataFrame
dataset of predictors to score the model
Returns
-------
np.ndarray
score of the model for each observation
"""
# We select predictor columns (self.predictors) here to
# ensure we have the proper predictors and the proper order!!!
return self.logit.predict_proba(X[self.predictors])[:, 1]
def evaluate(self, X: pd.DataFrame, y: pd.Series,
split: str=None) -> float:
"""Evaluate the model on a given data set (X, y). The optional split
parameter is to indicate that the data set belongs to
(train, selection, validation), so that the computation on these sets
can be cached!
Parameters
----------
X : pd.DataFrame
dataset containing the predictor values for each observation
y : pd.Series
dataset containig the target of each observation
split : str, optional
split of the dataset (e.g. train-selection-validation)
Returns
-------
float
the performance score of the model (e.g. AUC)
"""
if (split is None) or (split not in self._eval_metrics_by_split):
y_pred = self.score_model(X)
performance = roc_auc_score(y_true=y, y_score=y_pred)
if split is None:
return performance
else:
self._eval_metrics_by_split[split] = performance
return self._eval_metrics_by_split[split]
def compute_variable_importance(self, data: pd.DataFrame) -> pd.DataFrame:
"""Compute the importance of each predictor in the model and return
it as a DataFrame
Parameters
----------
data : pd.DataFrame
data to score the model
Returns
-------
pd.DataFrame
DataFrame containing columns predictor and importance
"""
y_pred = self.score_model(data)
importance_by_variable = {
utils.clean_predictor_name(predictor): stats.pearsonr(
data[predictor],
y_pred
)[0]
for predictor in self.predictors
}
df = pd.DataFrame.from_dict(importance_by_variable,
orient='index').reset_index()
df.columns = ["predictor", "importance"]
return (df.sort_values(by="importance", ascending=False)
.reset_index(drop=True))
def _is_valid_dict(self, model_dict: dict) -> bool:
if ("meta" not in model_dict
or model_dict["meta"] != "logistic-regression"):
return False
attr = ["classes_", "coef_", "intercept_", "n_iter_", "predictors"]
for key in attr:
if not (key in model_dict or type(model_dict[key]) != list):
return False
if ("params" not in model_dict
or "_eval_metrics_by_split" not in model_dict):
return False
return True
|
{"hexsha": "b3261119a35a7581d9ce9e9c04a60c89c8b335a4", "size": 6994, "ext": "py", "lang": "Python", "max_stars_repo_path": "cobra/model_building/models.py", "max_stars_repo_name": "MatthiasRoels/cobra", "max_stars_repo_head_hexsha": "474650f1ba3f36aa87f3fd7e77724a10d5484401", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-12-16T09:40:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-27T04:11:12.000Z", "max_issues_repo_path": "cobra/model_building/models.py", "max_issues_repo_name": "MatthiasRoels/cobra", "max_issues_repo_head_hexsha": "474650f1ba3f36aa87f3fd7e77724a10d5484401", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cobra/model_building/models.py", "max_forks_repo_name": "MatthiasRoels/cobra", "max_forks_repo_head_hexsha": "474650f1ba3f36aa87f3fd7e77724a10d5484401", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6754385965, "max_line_length": 79, "alphanum_fraction": 0.5810694881, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1441}
|
###Differential Evolution Validation###
##Brief explanation of the method##
"""
Validation will be done for various functions.
"""
__author__ = "Yarilis Gómez Martínez (yarilisgm@gmail.com)"
__date__ = "2021"
__copyright__ = "Copyright (C) 2021 Yarilis Gómez Martínez"
__license__ = "GNU GPL Version 3.0"
##Modules##
import numpy as np
from scipy.optimize import differential_evolution
import time
import Grafics as graf
##Name of the files to save outputs##
#Logger modes: 'w' erase previous file, 'a' appending to the end of the file
output_namefile='DE_Test_Function'
log_console = graf.Logger('Results/Log_'+output_namefile+'.log', mode="w")
##Define the objective function##
#val=variable of the function to be optimized
#Ackley Function
solA=np.array([ 0, 0])
fA=0
boundsA = [(-5, 5), (-5, 5)]
def Ackley(valA):
x=valA
arg1 = -0.2 * np.sqrt(0.5 * (x[0] ** 2 + x[1] ** 2))
arg2 = 0.5 * (np.cos(2. * np.pi * x[0]) + np.cos(2. * np.pi * x[1]))
zA=-20. * np.exp(arg1) + 20. + np.e - np.exp(arg2)
return zA-fA
#Rastrigin Function
solR=np.array([ 0, 0])
fR=0
n=2
boundsR=[(-5.12, 5.12) for i in range(0,n,1)]
def Rastrigin (valR):
x=valR
n=len(x)
zR=10*n+np.sum(x**2-10*np.cos(2*np.pi*x))
return zR-fR
#Rosenbrok Function
solB=np.array([ 1, 1])
fB=0
def Rosenbrok (valB):
x0=valB[:-1]
x1=valB[1:]
zB=np.sum(100*(x1-x0**2)**2+(1-x0)**2)
return zB-fB
##Method input parameters##
#args
strategy='best1bin' #Default strategy
max_generations=300
population_size=30
tolerance=1e-16
mutation=1 #between (0,2)
recombination=0.5 #Probability
#seed
disp=False
##Callback##
epsilon=1e-16
eA=[]
end_timeA=[]
def callback_A(xk,convergence):
eA.append(np.sum((xk-solA)**2))
end_timeA.append (time.time() - start_time)
if Ackley(xk)<epsilon:
return True
eR=[]
end_timeR=[]
def callback_R(xk,convergence):
eR.append(np.sum((xk-solR)**2))
end_timeR.append (time.time() - start_time)
if Rastrigin(xk)<epsilon:
return True
eB=[]
end_timeB=[]
def callback_B(xk,convergence):
eB.append(np.sum((xk-solB)**2))
end_timeB.append (time.time() - start_time)
if Rosenbrok(xk)<epsilon:
return True
#polish (The L-BFGS-B minimization method is used to polish the last member of the population.)
initial='latinhypercube'#It can be 'latinhypercube' (default), 'random' or array.
#atol
updating='deferred'
workers=-1
#constraints
print("----------------------------------------------------------------------------------")
print("Differential_evolution parameters: ")
print("----------------------------------------------------------------------------------")
print("Objective Function: Peaks, Ackley, Rastrigin and Rosenbrok Functions")
print("Bounds for the variables of the Ackley function =", boundsA[0])
print("Bounds for the variables of the Rastrigin and Rosenbrok function =", boundsR[0])
#print("args =", args)
print("Strategy =", strategy)
print("Maximum number of generations =", max_generations)
print("Total population size =", population_size)
print("Relative tolerance =", tolerance)
print("Mutation constant =", mutation)
print("Recombination constant =", recombination)
#print("Seed =", seed)
print("Prints the evaluated func at every iteration. =", disp)
print("Minimization halted value =", epsilon)
#print("polish =", polish)
print("Type of population initialization =", initial)
#print("atol =", atol)
print("Updating =", updating)
print("Workers =", workers)
#print("constraints =", constraints)
print("----------------------------------------------------------------------------------")
##Result##
#Result for the Ackley function
start_time = time.time()
ResultA = differential_evolution(Ackley,boundsA,strategy=strategy, maxiter=max_generations,popsize=population_size,
tol=tolerance,mutation=mutation,recombination=recombination,disp=False,callback=callback_A,polish=False,
init=initial,updating=updating,workers=workers)
end_timeA_Mem=time.time() - start_time
#Result for the Rastrigin function
start_time = time.time()
ResultR = differential_evolution(Rastrigin,boundsR,strategy=strategy, maxiter=max_generations,popsize=population_size,
tol=tolerance,mutation=mutation,recombination=recombination,disp=False,callback=callback_R,polish=False,
init=initial,updating=updating,workers=workers)
end_timeR_Mem=time.time() - start_time
#Result for the Rosenbrok function
start_time = time.time()
ResultB = differential_evolution(Rosenbrok,boundsR,strategy=strategy, maxiter=max_generations,popsize=population_size,
tol=tolerance,mutation=mutation,recombination=recombination,disp=False,callback=callback_B,polish=False,
init=initial,updating=updating,workers=workers)
end_timeB_Mem=time.time() - start_time
print("----------------------------------------------------------------------------------")
print("Differential_evolution result: ")
print("----------------------------------------------------------------------------------")
print("Result for the Ackley function\n", ResultA)
print("----------------------------------------------------------------------------------")
print("Result for the Rastrigin function\n", ResultR)
print("----------------------------------------------------------------------------------")
print("Result for the Rosenbrok function\n", ResultB)
print("----------------------------------------------------------------------------------")
##Error graph##
max_generations_A=np.array(range(10,len(eA),10))
eA_list=eA[10::10]
endtimeA_list=end_timeA[10::10]
text=graf.message_convert(ResultA,epsilon)
graf.Two_axes_plot(max_generations_A,eA_list,endtimeA_list,"Error graph for the Ackley function",
'max_generations',y1label='error', y2label='Time (s)', text=text)
max_generations_R=np.array(range(10,len(eR),10))
eR_list=eR[10::10]
endtimeR_list=end_timeR[10::10]
text=graf.message_convert(ResultR,epsilon)
graf.Two_axes_plot(max_generations_R,eR_list,endtimeR_list,"Error graph for the Rastrigin function",
'max_generations',y1label='error', y2label='Time (s)', text=text)
max_generations_B=np.array(range(10,len(eB),10))
eB_list=eB[10::10]
endtimeB_list=end_timeB[10::10]
text=graf.message_convert(ResultB,epsilon)
graf.Two_axes_plot(max_generations_B,eB_list,endtimeB_list,"Error graph for the Rosenbrok function",
'max_generations',y1label='error', y2label='Time (s)', text=text)
##Table##
##Analyzing max_generations##
#Initialize#
n=50
j=0
resultAT=[0]*n
end_timeAT=[0]*n
resultRT=[0]*n
end_timeRT=[0]*n
resultBT=[0]*n
end_timeBT=[0]*n
#Iterating max_generations#
for i in range(0,n):
#Result for the Ackley function
start_time = time.time()
resultAT[j] = differential_evolution(Ackley,boundsA,strategy=strategy, maxiter=max_generations,popsize=population_size,
tol=tolerance,mutation=mutation,recombination=recombination,disp=False,callback=callback_A,polish=False,
init=initial,updating=updating,workers=workers)
end_timeAT[j]=time.time() - start_time
#Result for the Rastrigin function
start_time = time.time()
resultRT[j] = differential_evolution(Rastrigin,boundsR,strategy=strategy, maxiter=max_generations,popsize=population_size,
tol=tolerance,mutation=mutation,recombination=recombination,disp=False,callback=callback_R,polish=False,
init=initial,updating=updating,workers=workers)
end_timeRT[j]=time.time() - start_time
#Result for the Rosenbrok function
start_time = time.time()
resultBT[j] = differential_evolution(Rosenbrok,boundsR,strategy=strategy, maxiter=max_generations,popsize=population_size,
tol=tolerance,mutation=mutation,recombination=recombination,disp=False,callback=callback_B,polish=False,
init=initial,updating=updating,workers=workers)
end_timeBT[j]=time.time() - start_time
j=j+1
xA=np.array([res.x for res in resultAT])
eAT=np.sum((xA-solA)**2, axis=1)
funA=np.array([res.fun for res in resultAT])
nitA=np.array([res.nit for res in resultAT])
TeA=graf.Stats_Univariate(eAT)
TfunA=graf.Stats_Univariate(funA)
TnitA=graf.Stats_Univariate(nitA)
Tend_timeA=graf.Stats_Univariate(end_timeAT)
xR=np.array([res.x for res in resultRT])
eRT=np.sum((xR-solR)**2, axis=1)
funR=np.array([res.fun for res in resultRT])
nitR=np.array([res.nit for res in resultRT])
TeR=graf.Stats_Univariate(eRT)
TfunR=graf.Stats_Univariate(funR)
TnitR=graf.Stats_Univariate(nitR)
Tend_timeR=graf.Stats_Univariate(end_timeRT)
xB=np.array([res.x for res in resultBT])
eBT=np.sum((xB-solB)**2, axis=1)
funB=np.array([res.fun for res in resultBT])
nitB=np.array([res.nit for res in resultBT])
TeB=graf.Stats_Univariate(eBT)
TfunB=graf.Stats_Univariate(funB)
TnitB=graf.Stats_Univariate(nitB)
Tend_timeB=graf.Stats_Univariate(end_timeBT)
print("-------------------------------------------------------------------------")
print("Descriptive statistics ")
print("-------------------------------------------------------------------------")
print("Ackley Function ")
print("-------------------------------------------------------------------------")
print("Error")
TeA.Table()
print("-------------------------------------------------------------------------")
print("Function value")
TfunA.Table()
print("-------------------------------------------------------------------------")
print("Iterations number")
TnitA.Table()
print("-------------------------------------------------------------------------")
print("Execution time")
Tend_timeA.Table()
print("-------------------------------------------------------------------------")
print("Rastrigin Function ")
print("-------------------------------------------------------------------------")
print("Error")
TeR.Table()
print("-------------------------------------------------------------------------")
print("Function value")
TfunR.Table()
print("-------------------------------------------------------------------------")
print("Iterations number")
TnitR.Table()
print("-------------------------------------------------------------------------")
print("Execution time")
Tend_timeR.Table()
print("-------------------------------------------------------------------------")
print("Rosenbrok Function ")
print("-------------------------------------------------------------------------")
print("Error")
TeB.Table()
print("-------------------------------------------------------------------------")
print("Function value")
TfunB.Table()
print("-------------------------------------------------------------------------")
print("Iterations number")
TnitB.Table()
print("-------------------------------------------------------------------------")
print("Execution time")
Tend_timeB.Table()
#Save figures and log to files#
graf.multipage('Results/Figures_'+ output_namefile +'.pdf')
log_console.close()
|
{"hexsha": "eb5294325a5f2af9521c451a0c37891225596f22", "size": 11468, "ext": "py", "lang": "Python", "max_stars_repo_path": "DE_Test_Function.py", "max_stars_repo_name": "Yarapy/Copula_Cosimulation", "max_stars_repo_head_hexsha": "90731920463a77952adf46b8444bba060362b9e4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-04-28T00:32:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-28T00:43:18.000Z", "max_issues_repo_path": "DE_Test_Function.py", "max_issues_repo_name": "Yarapy/Copula_Cosimulation", "max_issues_repo_head_hexsha": "90731920463a77952adf46b8444bba060362b9e4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DE_Test_Function.py", "max_forks_repo_name": "Yarapy/Copula_Cosimulation", "max_forks_repo_head_hexsha": "90731920463a77952adf46b8444bba060362b9e4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.2739726027, "max_line_length": 136, "alphanum_fraction": 0.5749912801, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2663}
|
using FrameFun.FrameFunInterface, FrameFun.Platforms, FrameFun.ApproximationProblems,
Test, LinearAlgebra, BasisFunctions, FrameFun.ParameterPaths, FrameFun.WeightedSumPlatforms,
FrameFun.ExtensionFramePlatforms
ap1 = approximationproblem(platform(Fourier(10)),10)
ap2 = approximationproblem(platform(Fourier(10)^2),(10,10))
dict1 = Fourier(10)
dict2 = Fourier(10)^2
plat1 = (platform(Fourier(10)),10)
plat2 = (platform(Fourier(10)^2),(10,10))
aps = (ap1,ap2,)
alls = (dict1, dict2, aps...,)
@testset "samplingparameter" begin
# adaptive approximations has no samplingparameter
for (a,sol) in zip(alls ,(10,(10,10),10,(10,10)))
@test samplingparameter(a)==sol
end
@test samplingparameter(plat1...) == 10
@test samplingparameter(plat2...) == (10,10)
end
@testset "interpolation_grid" begin
@test interpolation_grid(plat1...) == interpolation_grid(dict1)
@test interpolation_grid(plat2...) == interpolation_grid(dict2)
@test interpolation_grid(ap1) == interpolation_grid(dict1)
@test interpolation_grid(ap2) == interpolation_grid(dict2)
end
@testset "oversampling_grid" begin
@test oversampling_grid(dict1) == FourierGrid(10)
@test oversampling_grid(dict1, 11) == FourierGrid(11)
@test oversampling_grid(dict2) == FourierGrid(10)^2
@test oversampling_grid(dict2, (11,11)) == FourierGrid(11)^2
@test oversampling_grid(plat1...) == FourierGrid(10)
@test oversampling_grid(ap1) == FourierGrid(10)
@test oversampling_grid(ap2) == FourierGrid(10)^2
end
@testset "samplingoperator" begin
op1 = samplingoperator(dict1)
op2 = samplingoperator(dict2)
op3 = samplingoperator(plat1...)
op4 = samplingoperator(plat2...)
op5 = samplingoperator(ap1)
op6 = samplingoperator(ap2)
op1a, op1b = elements(op1)
@test grid(op1a) == FourierGrid(10)
@test op1b ≈ IdentityOperator(dict1)
op2a, op2b = elements(op3)
@test grid(op2a) == FourierGrid(10)
@test op2b ≈ IdentityOperator(dict1)
op3a, op3b = elements(op5)
@test grid(op3a) == FourierGrid(10)
@test op3b ≈ IdentityOperator(dict1)
f2 = (x,y)->exp(x*y)
@test tensorproduct(op1,op1)*f2≈op2*f2
@test tensorproduct(op3,op3)*f2≈op4*f2
@test tensorproduct(op5,op5)*f2≈op6*f2
end
@testset "sampling_grid" begin
@test sampling_grid(dict1) == FourierGrid(10)
@test sampling_grid(dict2) == FourierGrid(10)^2
@test sampling_grid(plat1...) == FourierGrid(10)
@test sampling_grid(plat2...) == FourierGrid(10)^2
@test sampling_grid(ap1) == FourierGrid(10)
@test sampling_grid(ap2) == FourierGrid(10)^2
end
@testset "platform_grid" begin
@test platform_grid(dict1;samplingstyle=GridStyle(),grid=FourierGrid(1)) == FourierGrid(1)
@test platform_grid(dict2;samplingstyle=ProductSamplingStyle(GridStyle(),GridStyle()),grid=FourierGrid(1)) == FourierGrid(1)^2
@test platform_grid(dict2;samplingstyle=GridStyle(),grid=FourierGrid(1)) == FourierGrid(1)
@test platform_grid(plat1...,grid=FourierGrid(1)) == FourierGrid(1)
@test platform_grid(plat2...,grid=FourierGrid(1)) == FourierGrid(1)^2
@test platform_grid(ap1;samplingstyle=GridStyle(),grid=FourierGrid(1)) == FourierGrid(1)
@test platform_grid(ap2;samplingstyle=GridStyle(),grid=FourierGrid(1)) == FourierGrid(1)
@test sampling_grid(dict2;samplingstyle=GridStyle(),grid=FourierGrid(1))==FourierGrid(1)
@test sampling_grid(dict2;samplingstyle=ProductSamplingStyle(GridStyle(),GridStyle()),grid=FourierGrid(1)) == FourierGrid(1)^2
end
@testset "discretemeasure" begin
@test discretemeasure(dict1) == discretemeasure(sampling_grid(dict1))
@test discretemeasure(dict2)== discretemeasure(sampling_grid(dict2))
@test discretemeasure(plat1...)== discretemeasure(sampling_grid(plat1...))
@test discretemeasure(plat2...)== discretemeasure(sampling_grid(plat2...))
@test discretemeasure(ap1)== discretemeasure(sampling_grid(ap1))
@test discretemeasure(ap2)== discretemeasure(sampling_grid(ap2))
end
@testset "measure" begin
@test measure(dict1) == measure(dict1)
@test measure(dict2)== measure(dict2)
@test measure(plat1...)== measure(dict1)
@test measure(plat2...)== measure(dict2)
@test measure(ap1)== measure(dict1)
@test measure(ap2)== measure(dict2)
end
@testset "azdual_dict" begin
@test operator(azdual_dict(dict1))≈operator(azdual_dict(ap1))≈operator(azdual_dict(plat1...))
op = azdual_dict(dict2)
@test op isa TensorProductDict
@test ≈(operator.(elements(op))...)
op = azdual_dict(plat2...)
@test op isa TensorProductDict
@test ≈(operator.(elements(op))...)
op = azdual_dict(ap2)
@test op isa TensorProductDict
@test ≈(operator.(elements(op))...)
@test operator(element(azdual_dict(dict2),1))≈operator(azdual_dict(dict1))
@test operator(azdual_dict(dict1;samplingstyle=GramStyle()))≈
operator(element(azdual_dict(ap2;samplingstyle=ProductSamplingStyle(GramStyle(),GramStyle())),1))
@test operator(azdual_dict(dict1;samplingstyle=GramStyle()))≈
operator(element(azdual_dict(plat2...;samplingstyle=ProductSamplingStyle(GramStyle(),GramStyle())),1))
@test operator(azdual_dict(dict1;samplingstyle=GramStyle()))≈
operator(element(azdual_dict(dict2;samplingstyle=ProductSamplingStyle(GramStyle(),GramStyle())),1))
end
@testset "discretization" begin
op1 = discretization(ap1)
op2 = discretization(ap2)
op3 = discretization(plat1...)
op4 = discretization(plat2...)
op5 = discretization(dict1)
op6 = discretization(dict2)
@test op2 isa TensorProductOperator
@test op4 isa TensorProductOperator
@test op6 isa TensorProductOperator
@test op1 ≈ op3 ≈ op5
@test op2 ≈ op4 ≈ op6
f1 = exp
f2 = (x,y)->exp(x*y)
op1, b1 = discretization(f1,ap1)
op2, b2 = discretization(f2,ap2)
op3, b3 = discretization(f1,plat1...)
op4, b4 = discretization(f2,plat2...)
op5, b5 = discretization(f1,dict1)
op6, b6 = discretization(f2,dict2)
@test op2 isa TensorProductOperator
@test op4 isa TensorProductOperator
@test op6 isa TensorProductOperator
@test op1 ≈ op3 ≈ op5
@test op2 ≈ op4 ≈ op6
@test b1≈b3≈b5
@test b2≈b4≈b6
end
@testset "dualdiscretization" begin
op1 = dualdiscretization(ap1)
op2 = dualdiscretization(ap2)
op3 = dualdiscretization(plat1...)
op4 = dualdiscretization(plat2...)
op5 = dualdiscretization(dict1)
op6 = dualdiscretization(dict2)
@test op2 isa TensorProductOperator
@test op4 isa TensorProductOperator
@test op6 isa TensorProductOperator
@test op1 ≈ op3 ≈ op5
@test op2 ≈ op4 ≈ op6
end
@testset "solver" begin
op1 = solver(dict1)
op2 = solver(dict2)
op3 = solver(plat1...)
op4 = solver(plat2...)
op5 = solver(ap1)
op6 = solver(ap2)
@test op2 isa TensorProductOperator
@test op4 isa TensorProductOperator
@test op6 isa TensorProductOperator
@test op1 ≈ op3 ≈ op5
@test op2 ≈ op4 ≈ op6
@test element(op2,1) ≈ op1 ≈ element(op2,2)
for STYLE in (InverseStyle, DirectStyle, DualStyle, IterativeStyle,AZStyle,AZSmoothStyle)
op1 = solver(dict1;solverstyle=STYLE())
op2 = solver(dict2;solverstyle=ProductSolverStyle(STYLE(),STYLE()))
op3 = solver(plat1...;solverstyle=STYLE())
op4 = solver(plat2...;solverstyle=ProductSolverStyle(STYLE(),STYLE()))
op5 = solver(ap1;solverstyle=STYLE())
op6 = solver(ap2;solverstyle=ProductSolverStyle(STYLE(),STYLE()))
@test op2 isa TensorProductOperator
@test op4 isa TensorProductOperator
@test op6 isa TensorProductOperator
@test op1 ≈ op3 ≈ op5
@test op2 ≈ op4 ≈ op6
@test element(op2,1) ≈ op1 ≈ element(op2,2)
end
end
@testset "AZ_A, AZ_Z, AZ_Zt, plungeoperator, firstAZstepoperator, plungerank" begin
A1 = AZ_A(dict1)
A2 = AZ_A(dict2)
@test tensorproduct(A1,A1) ≈ A2
Z1 = AZ_Z(dict1)
Z2 = AZ_Z(dict2)
@test tensorproduct(Z1,Z1) ≈ Z2
Zt1 = AZ_Zt(dict1)
Zt2 = AZ_Zt(dict2)
@test tensorproduct(Zt1,Zt1) ≈ Zt2
@test Zt2 ≈ Z2'
P1 = plungeoperator(dict1)
P2 = plungeoperator(dict2)
@test 1+norm(tensorproduct(P1,P1) -P2)≈1
@test I-A2*Zt2 ≈ P2
M1 = firstAZstepoperator(dict1)
M2 = firstAZstepoperator(dict2)
@test 1+norm(tensorproduct(M1,M1) - M2)≈1
@test 1+norm(A2*P2 - M2)≈1
@test plungerank(dict1) <= 1
@test plungerank(dict2) <= 1
end
@testset begin "ParameterPath"
p = IncrementalCartesianParameterPath{2}()
P = ExtensionFramePlatform(WeightedSumPlatform(platform(ChebyshevT(10)^2), (x,y)->1.,
(x,y)->sqrt(x^2+y^2)), .9 .* UnitDisk())
path = HierarchyPath(p,ProductPath(p,p))
paramP = parametrizedplatform(P, path)
paramPparam=45
Pparam = path[paramPparam]
@test SamplingStyle(P)==SamplingStyle(paramP)
@test SolverStyle(P, SamplingStyle(P)) == SolverStyle(paramP, SamplingStyle(P))
@test sampling_grid(P,Pparam)≈sampling_grid(paramP, paramPparam)
@test dimensions(dictionary(P,Pparam)) == dimensions(dictionary(paramP,paramPparam))
@test dimensions(azdual_dict(P,Pparam))==dimensions(azdual_dict(paramP, paramPparam))
@test AZ_A(P,Pparam)≈AZ_A(paramP, paramPparam)
@test AZ_Zt(P,Pparam)≈AZ_Zt(paramP, paramPparam)
@test AZ_A(P,Pparam)≈AZ_A(paramP, paramPparam)
@test typeof(solver(P,Pparam))==typeof(solver(paramP,paramPparam))
end
|
{"hexsha": "3fe8fc00d2b3d23038e2ee4efc36d3ae92116203", "size": 9477, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_framefuninterface.jl", "max_stars_repo_name": "GeorgAUT/FrameFun.jl", "max_stars_repo_head_hexsha": "769c342ae76de06fa986662862ab448e48c1849c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_framefuninterface.jl", "max_issues_repo_name": "GeorgAUT/FrameFun.jl", "max_issues_repo_head_hexsha": "769c342ae76de06fa986662862ab448e48c1849c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_framefuninterface.jl", "max_forks_repo_name": "GeorgAUT/FrameFun.jl", "max_forks_repo_head_hexsha": "769c342ae76de06fa986662862ab448e48c1849c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5875912409, "max_line_length": 130, "alphanum_fraction": 0.6934683972, "num_tokens": 2979}
|
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_classification
from sklearn.metrics import accuracy_score
from neupy import layers, algorithms, environment
environment.reproducible()
environment.speedup()
def make_dataset():
data, target = make_classification(n_samples=10000, n_features=20)
# number of categorical columns
n_categorical = 3
# if value x in [0, 0.2) then category 0
# if value x in [0.2, 0.5) then category 1
# if value x in [0.5, 0.8) then category 2
# if value x in [0.8, 0.1] then category 3
bins = np.array([0, 0.2, 0.5, 0.8, 1])
# Create categorical features
for i in range(n_categorical):
data[:, i] = np.digitize(data[:, i], bins=bins)
return train_test_split(data, target, test_size=0.1)
def only_numerical(data):
return data[:, 3:]
class ConvertCategorical(object):
def fit_transform(self, data):
n_categories = np.max(data, axis=0) + 1
self.index_shifts = np.cumsum(n_categories) - n_categories[0]
return self.transform(data)
def transform(self, data):
return data + self.index_shifts
convert_categorical = ConvertCategorical()
x_train, x_test, y_train, y_test = make_dataset()
x_train_cat = convert_categorical.fit_transform(x_train[:, :3])
x_train_num = only_numerical(x_train)
# We use .max(), because each category has a unique identifier.
# Maximum value defines last category ID. Since first category has
# 0 identifier, we need to +1 to obtain total result
n_unique_categories = int(x_train_cat.max() + 1)
x_test_cat = convert_categorical.transform(x_test[:, :3])
x_test_num = only_numerical(x_test)
network = algorithms.Momentum(
[
[[
# 3 categorical inputs
layers.Input(3),
# Train embedding matrix for categorical inputs.
# It has 18 different unique categories (6 categories
# per each of the 3 columns). Next layer projects each
# category into 4 dimensional space. Output shape from
# the layer should be: (batch_size, 3, 4)
layers.Embedding(n_unique_categories, 4),
# Reshape (batch_size, 3, 4) to (batch_size, 12)
layers.Reshape(),
], [
# 17 numerical inputs
layers.Input(17),
]],
# Concatenate (batch_size, 12) and (batch_size, 17)
# into one matrix with shape (batch_size, 29)
layers.Concatenate(),
layers.Relu(128),
layers.Relu(32) > layers.Dropout(0.5),
layers.Sigmoid(1)
],
step=0.2,
verbose=True,
momentum=0.9,
nesterov=True,
error='binary_crossentropy',
# Applied max-norm regularizer to prevent overfitting.
# Maximum possible norm for any weight is specified by
# the `max_norm` parameter.
addons=[algorithms.MaxNormRegularization],
max_norm=10,
)
# Categorical input should be first, because input layer
# for categorical matrices was defined first.
network.train([x_train_cat, x_train_num], y_train,
[x_test_cat, x_test_num], y_test,
epochs=40)
y_predicted = network.predict([x_test_cat, x_test_num])
accuracy = accuracy_score(y_test, y_predicted.round())
print("Accuracy: {:.2%}".format(accuracy))
|
{"hexsha": "00c813945787ce4f7617d0bee0ca928ca17450ac", "size": 3326, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/mlp/mix_categorical_numerical_inputs.py", "max_stars_repo_name": "FGDBTKD/neupy", "max_stars_repo_head_hexsha": "1f5e1ae9364e8c7816df79678a4648c689d2a5d1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/mlp/mix_categorical_numerical_inputs.py", "max_issues_repo_name": "FGDBTKD/neupy", "max_issues_repo_head_hexsha": "1f5e1ae9364e8c7816df79678a4648c689d2a5d1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/mlp/mix_categorical_numerical_inputs.py", "max_forks_repo_name": "FGDBTKD/neupy", "max_forks_repo_head_hexsha": "1f5e1ae9364e8c7816df79678a4648c689d2a5d1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.963963964, "max_line_length": 70, "alphanum_fraction": 0.6683704149, "include": true, "reason": "import numpy", "num_tokens": 836}
|
"""
This example shows how to connect events in one window, for example, a mouse
press, to another figure window.
If you click on a point in the first window, the z and y limits of the
second will be adjusted so that the center of the zoom in the second
window will be the x,y coordinates of the clicked point.
Note the diameter of the circles in the scatter are defined in
points**2, so their size is independent of the zoom
"""
from matplotlib.pyplot import figure, show
import numpy
figsrc = figure()
figzoom = figure()
axsrc = figsrc.add_subplot(111, xlim=(0,1), ylim=(0,1), autoscale_on=False)
axzoom = figzoom.add_subplot(111, xlim=(0.45,0.55), ylim=(0.4,.6),
autoscale_on=False)
axsrc.set_title('Click to zoom')
axzoom.set_title('zoom window')
x,y,s,c = numpy.random.rand(4,200)
s *= 200
axsrc.scatter(x,y,s,c)
axzoom.scatter(x,y,s,c)
def onpress(event):
if event.button!=1: return
x,y = event.xdata, event.ydata
axzoom.set_xlim(x-0.1, x+0.1)
axzoom.set_ylim(y-0.1, y+0.1)
figzoom.canvas.draw()
figsrc.canvas.mpl_connect('button_press_event', onpress)
show()
|
{"hexsha": "6a6826011e441146ba5bc54963801f22945cdcbe", "size": 1146, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/event_handling/zoom_window.py", "max_stars_repo_name": "nkoep/matplotlib", "max_stars_repo_head_hexsha": "6ed04252994443a4cecf95f0da0efedb6d514b38", "max_stars_repo_licenses": ["MIT", "BSD-3-Clause"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2017-04-11T08:55:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T04:31:26.000Z", "max_issues_repo_path": "examples/event_handling/zoom_window.py", "max_issues_repo_name": "epgauss/matplotlib", "max_issues_repo_head_hexsha": "c9898ea9a30c67c579ab27cd61b68e2abae0fb0e", "max_issues_repo_licenses": ["MIT", "BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/event_handling/zoom_window.py", "max_forks_repo_name": "epgauss/matplotlib", "max_forks_repo_head_hexsha": "c9898ea9a30c67c579ab27cd61b68e2abae0fb0e", "max_forks_repo_licenses": ["MIT", "BSD-3-Clause"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2015-10-05T04:15:46.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-11T18:06:02.000Z", "avg_line_length": 29.3846153846, "max_line_length": 76, "alphanum_fraction": 0.6928446771, "include": true, "reason": "import numpy", "num_tokens": 313}
|
#this file simulate omics data for nonlinear system
# the QTLs ("kk"), QTL effects ("u.txt"), QTL position ("qtl.txt"), omics effects ("alpha.txt") in Christensenet al.(2021) are required
# above data can be found in Christensenet al.(2021) or below link:
# http://genoweb.toulouse.inra.fr/~alegarra/GOBLUP/
using JWAS,DataFrames,CSV,Statistics,JWAS.Datasets,DelimitedFiles,Random, LinearAlgebra, StatsBase
cd("/Users/tianjing/Box/Omicsdata")
Random.seed!(123)
### step1. get genetic value of omics (since it needs to be transformed to get true bv)
#read QTL
nQTL=20_000 #there are 20000 QLTs in Christensenet al.(2021), but only 5000 will be used
inter=4 #select 1 every 4 QTLs (5000/20000 QTLs)
nOmics=1200
nInd=21100
σ2_mi=2
h2_m=0.61
σ2_gi=h2_m*σ2_mi #genetic variance of each omics
σ2_ei=(1-h2_m)*σ2_mi #residual variance of each omics
QTL=readdlm("/Users/tianjing/Box/Omicsdata/andrewlegarra_simulated_omics/andrew_code/kk") #QTLs in Christensenet al.(2021)
QTL=QTL[:,2:end] #remove ID
pos=collect(inter:inter:nQTL)
QTL=QTL[:,pos]
writedlm("nonlinear/QTL5000.txt",QTL) #evenly select 5000 QTLs as in Christensenet al.(2021)
#read 500 selected QTL position for all 1200 omics features
QTL_select_pos=Int.(readdlm("/Users/tianjing/Box/Omicsdata/andrewlegarra_simulated_omics/andrew_code/qtl.txt"))
maximum(QTL_select_pos)
minimum(QTL_select_pos)
#read 500 selected QTL effects for all 1200 omics features
QTL_select_effect=readdlm("/Users/tianjing/Box/Omicsdata/andrewlegarra_simulated_omics/andrew_code/u.txt")
#calculate omics
# 1st layer
G=zeros(nInd,nOmics) #genetic value of omics
E=zeros(nInd,nOmics) #residuals of omics
Random.seed!(1)
for i in 1:nOmics
W=QTL[:,QTL_select_pos[:,i]]
@show size(W),W[1:2,1:2]
Gi=W*QTL_select_effect[:,i]
Gi=Gi/std(Gi)*sqrt(σ2_gi)
G[:,i]=Gi
E[:,i]=randn(nInd)*sqrt(σ2_ei)
end
var(G,dims=1)
var(E,dims=1)
writedlm("nonlinear/varmi_2/omics_bv.txt",G)
writedlm("nonlinear/varmi_2/omics_residual.txt",E)
omics=G+E
writedlm("nonlinear/varmi_2/omics.txt",omics)
var(omics,dims=1)
mysigmoid(x) = 1/(1+exp(-x))
#save g(omics_g)
G_nonlinear=mysigmoid.(G)
writedlm("nonlinear/varmi_2/omics_bv_nonlinear.txt",G_nonlinear)
#save omics nonlinear
omics_nonlinear=mysigmoid.(omics)
writedlm("nonlinear/varmi_2/omics_nonlinear.txt",omics_nonlinear)
#read neural network weight between omics and phenotype (omics effects on phenotype)
w1=vec(readdlm("/Users/tianjing/Box/Omicsdata/andrewlegarra_simulated_omics/andrew_code/alpha.txt"))
#save true bv
tbv=G_nonlinear*w1
writedlm("nonlinear/varmi_2/tbv.txt",tbv)
#create y_nonlinear
omics_contribution=omics_nonlinear*w1
σ2_z=var(omics_contribution)
σ2_g_nonlinear=var(G_nonlinear*w1)
#h2=σ2_g_nonlinear/(σ2_z+σ2_e)
h2=0.337
σ2_e=σ2_g_nonlinear/h2 - σ2_z #residual variance of y
Random.seed!(1)
y_residual=randn(nInd)*sqrt(σ2_e)
var(y_residual)
y_nonlinear=omics_contribution+y_residual
var(y_nonlinear)
σ2_g_nonlinear/var(y_nonlinear)
writedlm("nonlinear/varmi_2/y.txt",y_nonlinear)
#move genotype from old to new folders
for i in 1:20
mkdir("nonlinear/varmi_2/data$i");
end
#create 1055 dataset
y_nonlinear=vec(readdlm("nonlinear/varmi_2/y.txt"))
omics=readdlm("nonlinear/varmi_2/omics.txt")
tbv=vec(readdlm("nonlinear/varmi_2/tbv.txt"))
for i in 1:20
id=Int.(vec(readdlm("andrew1055/data$i/select_1055_index_data$i.txt")))
# save y_nonlinear
yi=y_nonlinear[id]
writedlm("nonlinear/varmi_2/data$i/y.txt",yi)
yi_df=DataFrame(ID=id,y=yi)
CSV.write("nonlinear/varmi_2/data$i/y.csv", yi_df)
# save omics (should use omics, not g(omics) in the model)
omicsi=omics[id,:]
writedlm("nonlinear/varmi_2/data$i/omics.txt",omicsi)
names=["gene$i" for i in 1:1200]
omicsi_df=DataFrame(omicsi,names)
insertcols!(omicsi_df, 1, :ID => id)
CSV.write("nonlinear/varmi_2/data$i/omics.csv", omicsi_df)
#save true bv
bvi=tbv[id]
writedlm("nonlinear/varmi_2/data$i/tbv.txt",bvi)
bvi_df=DataFrame(ID=id,tbv=bvi)
CSV.write("nonlinear/varmi_2/data$i/tbv.csv", bvi_df)
end
|
{"hexsha": "c27a1bc9a67b499d1c730132115e35da46a5fae7", "size": 3981, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "Part2_NNLMM_nonlinear/data/data_simu_nonlinear.jl", "max_stars_repo_name": "zhaotianjing/NN-LMM", "max_stars_repo_head_hexsha": "cac8e597d7a783b64bde9b897408be500e1615a8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-03-18T19:01:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T15:52:30.000Z", "max_issues_repo_path": "Part2_NNLMM_nonlinear/data/data_simu_nonlinear.jl", "max_issues_repo_name": "zhaotianjing/NN-LMM", "max_issues_repo_head_hexsha": "cac8e597d7a783b64bde9b897408be500e1615a8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Part2_NNLMM_nonlinear/data/data_simu_nonlinear.jl", "max_forks_repo_name": "zhaotianjing/NN-LMM", "max_forks_repo_head_hexsha": "cac8e597d7a783b64bde9b897408be500e1615a8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.9008264463, "max_line_length": 135, "alphanum_fraction": 0.7696558654, "num_tokens": 1418}
|
import logging
import os
import coloredlogs
import imageio
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
from skimage.color import rgb2gray
from skimage.exposure import rescale_intensity
from skimage.transform import rescale, rotate
from skimage.util import pad
logging.getLogger("tifffile").setLevel(logging.ERROR)
coloredlogs.install(
level="DEBUG", fmt="%(asctime)s %(levelname)s %(message)s", datefmt="%H:%M:%S"
)
src_path = "hashimoto_2.jpg"
dst_shape = (1536, 2048)
scale = 3
image = imageio.imread(src_path)
# rotate +90d
# image = rotate(image, 90, resize=True, preserve_range=True)
# rescale
src_shape = image.shape
image = rescale(
image, scale, preserve_range=True, multichannel=True, anti_aliasing=True
)
logging.info(f"rescale image from {src_shape} to {image.shape}")
# pad to destination shape
src_shape = image.shape[:2] # potentially colored
pad_shape = tuple(d - s for d, s in zip(dst_shape, src_shape))
# update pad shape to before/after
pad_shape = tuple((p // 2, p - p // 2) for p in pad_shape)
logging.info(f"pad width: {pad_shape}")
if len(image.shape) == 3:
# color dimension does not need padding
pad_shape += ((0, 0),)
image = pad(image, pad_shape, mode="constant", constant_values=0)
# convert to u8
image = image.astype(np.uint8)
# gray scale
image_gray = rgb2gray(image)
image_gray = rescale_intensity(image_gray, out_range=np.uint8)
image_gray = image_gray.astype(np.uint8)
imageio.imwrite("hashimoto_slm_gray.bmp", image_gray)
# write RGB
for c, cname in zip(range(3), ("r", "g", "b")):
imageio.imwrite(f"hashimoto_slm_rgb_{cname}.bmp", np.squeeze(image[..., c]))
|
{"hexsha": "bb9d0c0d14f72d253f486ba5b7398716900d1534", "size": 1722, "ext": "py", "lang": "Python", "max_stars_repo_path": "workspace/generate_hashimoto_image_pattern.py", "max_stars_repo_name": "cbc-group/pattern", "max_stars_repo_head_hexsha": "84c3c69015d860430c84114aabdd9ceea151704d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-02-23T14:15:24.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-23T14:15:24.000Z", "max_issues_repo_path": "workspace/generate_hashimoto_image_pattern.py", "max_issues_repo_name": "cbc-group/pattern", "max_issues_repo_head_hexsha": "84c3c69015d860430c84114aabdd9ceea151704d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "workspace/generate_hashimoto_image_pattern.py", "max_forks_repo_name": "cbc-group/pattern", "max_forks_repo_head_hexsha": "84c3c69015d860430c84114aabdd9ceea151704d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-23T14:15:33.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-23T14:15:33.000Z", "avg_line_length": 28.7, "max_line_length": 83, "alphanum_fraction": 0.7102206736, "include": true, "reason": "import numpy", "num_tokens": 460}
|
import struct
import numpy as np
import pandas as pd
df_train = pd.read_csv('data/train_data.csv')
df_valid = pd.read_csv('data/valid_data.csv')
df_test = pd.read_csv('data/test_data.csv')
feature_cols = list(df_train.columns[:-1])
target_col = df_train.columns[-1]
X_train = df_train[feature_cols].values
y_train = df_train[target_col].values
X_valid = df_valid[feature_cols].values
y_valid = df_valid[target_col].values
X_test = df_test[feature_cols].values
X = np.concatenate([X_train, X_valid, X_test], axis=0)
N = X.shape[0] # int32
D = X.shape[1] # int32
theta = 0.5 # double
perplexity = 30.0 # double
no_dims = 3 # int32
with open('data.dat', 'wb') as f:
f.write(struct.pack('@i', N))
f.write(struct.pack('@i', D))
f.write(struct.pack('@d', theta))
f.write(struct.pack('@d', perplexity))
f.write(struct.pack('@i', no_dims))
f.write(X.tobytes())
|
{"hexsha": "5e5bac8ed85072e3318e3570b00ed728402e3f71", "size": 884, "ext": "py", "lang": "Python", "max_stars_repo_path": "bh_tsne/prep_data.py", "max_stars_repo_name": "mr4jay/numerai", "max_stars_repo_head_hexsha": "a07b2dcafe9f078df8578d150d585f239fe73c51", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 306, "max_stars_repo_stars_event_min_datetime": "2016-09-18T07:32:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T16:30:26.000Z", "max_issues_repo_path": "bh_tsne/prep_data.py", "max_issues_repo_name": "mikekosk/numerai", "max_issues_repo_head_hexsha": "2a09c648c66143ee101cd80de4827108aaf218fc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2017-01-04T02:17:20.000Z", "max_issues_repo_issues_event_max_datetime": "2017-09-18T11:43:59.000Z", "max_forks_repo_path": "bh_tsne/prep_data.py", "max_forks_repo_name": "mikekosk/numerai", "max_forks_repo_head_hexsha": "2a09c648c66143ee101cd80de4827108aaf218fc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 94, "max_forks_repo_forks_event_min_datetime": "2016-09-17T03:48:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-05T11:54:25.000Z", "avg_line_length": 26.0, "max_line_length": 54, "alphanum_fraction": 0.6968325792, "include": true, "reason": "import numpy", "num_tokens": 263}
|
SUBROUTINE classico(uint,vint,wint)
USE velpre
USE parametros
IMPLICIT NONE
!===================================================================================================================
real(8), dimension(nx1,ny,nz) :: uint
real(8), dimension(nx,ny1,nz) :: vint
real(8), dimension(nx,ny,nz1) :: wint
real(8), dimension(nx1,ny,nz) :: dudx, dudy, dudz, bma, dma, dudxa, dudya, dudza
real(8), dimension(nx,ny1,nz) :: dvdx, dvdy, dvdz, amb, dmb, dvdxa, dvdya, dvdza
real(8), dimension(nx,ny,nz1) :: dwdx, dwdy, dwdz, amd, bmd, dwdxa, dwdya, dwdza
real(8), dimension(nx,ny,nz) :: aux
!contadores
integer :: i, j, k
!auxiliares
real(8) :: aux1, aux2
!===================================================================================================================
!RESOLUÇÃO DO PROBLEMA
!===================================================================================================================
if (der == 1) then
! upwind
do k = 1, nz
do j = 1, ny
do i = 1, nx1
bma(i,j,k) = (v(i,j,k) + v(i-1,j,k) + v(i,j+1,k) + v(i-1,j+1,k)) * 0.25
dma(i,j,k) = (w(i,j,k) + w(i-1,j,k) + w(i,j,k+1) + w(i-1,j,k+1)) * 0.25
aux1 = max(u(i,j,k),0.)
aux2 = min(u(i,j,k),0.)
dudx(i,j,k) = aux1*(u(i,j,k)-u(i-1,j,k))/dx + aux2*(u(i+1,j,k)-u(i,j,k))/dx
aux1 = max(bma(i,j,k),0.)
aux2 = min(bma(i,j,k),0.)
dudy(i,j,k) = aux1*(u(i,j,k)-u(i,j-1,k))/dy + aux2*(u(i,j+1,k)-u(i,j,k))/dy
aux1 = max(dma(i,j,k),0.)
aux2 = min(dma(i,j,k),0.)
dudz(i,j,k) = aux1*(u(i,j,k)-u(i,j,k-1))/dz + aux2*(u(i,j,k+1)-u(i,j,k))/dz
uint(i,j,k) = dudx(i,j,k) + dudy(i,j,k) + dudz(i,j,k)
enddo
enddo
enddo
do k = 1, nz
do j = 1, ny1
do i = 1, nx
amb(i,j,k) = (u(i,j,k) + u(i+1,j,k) + u(i,j-1,k) + u(i+1,j-1,k)) * 0.25
dmb(i,j,k) = (w(i,j,k) + w(i,j-1,k) + w(i,j,k+1) + w(i,j-1,k+1)) * 0.25
aux1 = max(amb(i,j,k),0.)
aux2 = min(amb(i,j,k),0.)
dvdx(i,j,k) = aux1*(v(i,j,k)-v(i-1,j,k))/dx + aux2*(v(i+1,j,k)-v(i,j,k))/dx
aux1 = max(v(i,j,k),0.)
aux2 = min(v(i,j,k),0.)
dvdy(i,j,k) = aux1*(v(i,j,k)-v(i,j-1,k))/dy + aux2*(v(i,j+1,k)-v(i,j,k))/dy
aux1 = max(dmb(i,j,k),0.)
aux2 = min(dmb(i,j,k),0.)
dvdz(i,j,k) = aux1*(v(i,j,k)-v(i,j,k-1))/dz + aux2*(v(i,j,k+1)-v(i,j,k))/dz
vint(i,j,k) = dvdx(i,j,k) + dvdy(i,j,k) + dvdz(i,j,k)
enddo
enddo
enddo
do k = 1, nz1
do j = 1, ny
do i = 1, nx
amd(i,j,k) = (u(i,j,k) + u(i+1,j,k) + u(i,j,k-1) + u(i+1,j,k-1)) * 0.25
bmd(i,j,k) = (v(i,j,k) + v(i,j+1,k) + v(i,j,k-1) + v(i,j+1,k-1)) * 0.25
aux1 = max(amd(i,j,k),0.)
aux2 = min(amd(i,j,k),0.)
dwdx(i,j,k) = aux1*(w(i,j,k)-w(i-1,j,k))/dx + aux2*(w(i+1,j,k)-w(i,j,k))/dx
aux1 = max(bmd(i,j,k),0.)
aux2 = min(bmd(i,j,k),0.)
dwdy(i,j,k) = aux1*(w(i,j,k)-w(i,j-1,k))/dy + aux2*(w(i,j+1,k)-w(i,j,k))/dy
aux1 = max(w(i,j,k),0.)
aux2 = min(w(i,j,k),0.)
dwdz(i,j,k) = aux1*(w(i,j,k)-w(i,j,k-1))/dz + aux2*(w(i,j,k+1)-w(i,j,k))/dz
wint(i,j,k) = dwdx(i,j,k) + dwdy(i,j,k) + dwdz(i,j,k)
enddo
enddo
enddo
elseif (der == 2) then
CALL derivax(u,nx1,ny,nz,dudx)
CALL derivay(u,nx1,ny,nz,dudy)
CALL derivaz(u,nx1,ny,nz,dudz)
CALL derivax(v,nx,ny1,nz,dvdx)
CALL derivay(v,nx,ny1,nz,dvdy)
CALL derivaz(v,nx,ny1,nz,dvdz)
CALL derivax(w,nx,ny,nz1,dwdx)
CALL derivay(w,nx,ny,nz1,dwdy)
CALL derivaz(w,nx,ny,nz1,dwdz)
call interpy_fc(v(1:nx,1:ny1,1:nz),nx,ny1,nz,aux) !(nx,ny,nz)
call interpx_cf(aux,nx,ny,nz,bma) !(nx1,ny,nz)
call interpz_fc(w(1:nx,1:ny,1:nz1),nx,ny,nz1,aux) !(nx,ny,nz)
call interpx_cf(aux,nx,ny,nz,dma) !(nx1,ny,nz)
call interpx_fc(u(1:nx1,1:ny,1:nz),nx1,ny,nz,aux) !(nx,ny,nz)
call interpy_cf(aux,nx,ny,nz,amb) !(nx,ny1,nz)
call interpz_fc(w(1:nx,1:ny,1:nz1),nx,ny,nz1,aux) !(nx,ny,nz)
call interpy_cf(aux,nx,ny,nz,dmb) !(nx,ny1,nz)
call interpx_fc(u(1:nx1,1:ny,1:nz),nx1,ny,nz,aux) !(nx,ny,nz)
call interpz_cf(aux,nx,ny,nz,amd) !(nx,ny,nz1)
call interpy_fc(v(1:nx,1:ny1,1:nz),nx,ny1,nz,aux) !(nx,ny,nz)
call interpz_cf(aux,nx,ny,nz,bmd) !(nx,ny,nz1)
do k = 1, nz
do j = 1, ny
do i = 1, nx1
uint(i,j,k) = u(i,j,k)*dudx(i,j,k) + bma(i,j,k)*dudy(i,j,k) + dma(i,j,k)*dudz(i,j,k)
enddo
enddo
enddo
do k = 1, nz
do j = 1, ny1
do i = 1, nx
vint(i,j,k) = amb(i,j,k)*dvdx(i,j,k) + v(i,j,k)*dvdy(i,j,k) + dmb(i,j,k)*dvdz(i,j,k)
enddo
enddo
enddo
do k = 1, nz1
do j = 1, ny
do i = 1, nx
wint(i,j,k) = amd(i,j,k)*dwdx(i,j,k) + bmd(i,j,k)*dwdy(i,j,k) + w(i,j,k)*dwdz(i,j,k)
enddo
enddo
enddo
elseif (der == 3) then
! upwind 2nd order
call interpy_fc(v(1:nx,1:ny1,1:nz),nx,ny1,nz,aux) !(nx,ny,nz)
call interpx_cf(aux,nx,ny,nz,bma) !(nx1,ny,nz)
call interpz_fc(w(1:nx,1:ny,1:nz1),nx,ny,nz1,aux) !(nx,ny,nz)
call interpx_cf(aux,nx,ny,nz,dma) !(nx1,ny,nz)
call interpx_fc(u(1:nx1,1:ny,1:nz),nx1,ny,nz,aux) !(nx,ny,nz)
call interpy_cf(aux,nx,ny,nz,amb) !(nx,ny1,nz)
call interpz_fc(w(1:nx,1:ny,1:nz1),nx,ny,nz1,aux) !(nx,ny,nz)
call interpy_cf(aux,nx,ny,nz,dmb) !(nx,ny1,nz)
call interpx_fc(u(1:nx1,1:ny,1:nz),nx1,ny,nz,aux) !(nx,ny,nz)
call interpz_cf(aux,nx,ny,nz,amd) !(nx,ny,nz1)
call interpy_fc(v(1:nx,1:ny1,1:nz),nx,ny1,nz,aux) !(nx,ny,nz)
call interpz_cf(aux,nx,ny,nz,bmd) !(nx,ny,nz1)
CALL derivaxu2n(u,nx1,ny,nz,dudxa)
CALL derivaxu2p(u,nx1,ny,nz,dudx)
CALL derivayu2n(u,nx1,ny,nz,dudya)
CALL derivayu2p(u,nx1,ny,nz,dudy)
CALL derivazu2n(u,nx1,ny,nz,dudza)
CALL derivazu2p(u,nx1,ny,nz,dudz)
CALL derivaxu2n(v,nx,ny1,nz,dvdxa)
CALL derivaxu2p(v,nx,ny1,nz,dvdx)
CALL derivayu2n(v,nx,ny1,nz,dvdya)
CALL derivayu2p(v,nx,ny1,nz,dvdy)
CALL derivazu2n(v,nx,ny1,nz,dvdza)
CALL derivazu2p(v,nx,ny1,nz,dvdz)
CALL derivaxu2n(w,nx,ny,nz1,dwdxa)
CALL derivaxu2p(w,nx,ny,nz1,dwdx)
CALL derivayu2n(w,nx,ny,nz1,dwdya)
CALL derivayu2p(w,nx,ny,nz1,dwdy)
CALL derivazu2n(w,nx,ny,nz1,dwdza)
CALL derivazu2p(w,nx,ny,nz1,dwdz)
do k = 1, nz
do j = 1, ny
do i = 1, nx1
aux1 = max(u(i,j,k),0.)
aux2 = min(u(i,j,k),0.)
dudx(i,j,k) = aux1*dudxa(i,j,k) + aux2*dudx(i,j,k)
aux1 = max(bma(i,j,k),0.)
aux2 = min(bma(i,j,k),0.)
dudy(i,j,k) = aux1*dudya(i,j,k) + aux2*dudy(i,j,k)
aux1 = max(dma(i,j,k),0.)
aux2 = min(dma(i,j,k),0.)
dudz(i,j,k) = aux1*dudza(i,j,k) + aux2*dudz(i,j,k)
uint(i,j,k) = dudx(i,j,k) + dudy(i,j,k) + dudz(i,j,k)
enddo
enddo
enddo
do k = 1, nz
do j = 1, ny1
do i = 1, nx
aux1 = max(amb(i,j,k),0.)
aux2 = min(amb(i,j,k),0.)
dvdx(i,j,k) = aux1*dvdxa(i,j,k) + aux2*dvdx(i,j,k)
aux1 = max(v(i,j,k),0.)
aux2 = min(v(i,j,k),0.)
dvdy(i,j,k) = aux1*dvdya(i,j,k) + aux2*dvdy(i,j,k)
aux1 = max(dmb(i,j,k),0.)
aux2 = min(dmb(i,j,k),0.)
dvdz(i,j,k) = aux1*dvdza(i,j,k) + aux2*dvdz(i,j,k)
vint(i,j,k) = dvdx(i,j,k) + dvdy(i,j,k) + dvdz(i,j,k)
enddo
enddo
enddo
do k = 1, nz1
do j = 1, ny
do i = 1, nx
aux1 = max(amd(i,j,k),0.)
aux2 = min(amd(i,j,k),0.)
dwdx(i,j,k) = aux1*dwdxa(i,j,k) + aux2*dwdx(i,j,k)
aux1 = max(bmd(i,j,k),0.)
aux2 = min(bmd(i,j,k),0.)
dwdy(i,j,k) = aux1*dwdya(i,j,k) + aux2*dwdy(i,j,k)
aux1 = max(w(i,j,k),0.)
aux2 = min(w(i,j,k),0.)
dwdz(i,j,k) = aux1*dwdza(i,j,k) + aux2*dwdz(i,j,k)
wint(i,j,k) = dwdx(i,j,k) + dwdy(i,j,k) + dwdz(i,j,k)
enddo
enddo
enddo
endif
!==================================================================================================================
END SUBROUTINE classico
SUBROUTINE rotacional(uint,vint,wint)
USE velpre
USE parametros
IMPLICIT NONE
!===================================================================================================================
real(8), dimension(nx1,ny,nz) :: uint
real(8), dimension(nx,ny1,nz) :: vint
real(8), dimension(nx,ny,nz1) :: wint
real(8), dimension(0:nx1+1,0:ny1+1,0:nz1+1) :: ap, an, bma, dma
real(8), dimension(0:nx1+1,0:ny1+1,0:nz1+1) :: bp, bn, amb, dmb
real(8), dimension(0:nx1+1,0:ny1+1,0:nz1+1) :: dp, dn, amd, bmd
!
real(8), dimension(nx1,ny1,nz1) :: dudx, dvdx, dwdx
real(8), dimension(nx1,ny1,nz1) :: dudy, dvdy, dwdy
real(8), dimension(nx1,ny1,nz1) :: dudz, dvdz, dwdz
!
real(8) :: aa, bb, dd
!contadores
integer :: i, j, k, ai, bi, di
!plotagem
real(8) :: acont, bcont, dcont
integer :: loca(3), locb(3), locd(3)
!auxiliares
real(8) :: aux1, aux2
!===================================================================================================================
!RESOLUÇÃO DO PROBLEMA
!===================================================================================================================
if (der == 1) then
! upwind
do k = 0, nz+1
do j = 0, ny+1
do i = 1, nx+1
bma(i,j,k) = (v(i,j,k) + v(i-1,j,k) + v(i,j+1,k) + v(i-1,j+1,k)) * 0.25
dma(i,j,k) = (w(i,j,k) + w(i-1,j,k) + w(i,j,k+1) + w(i-1,j,k+1)) * 0.25
ap(i,j,k) = max(u(i,j,k),0.)
an(i,j,k) = min(u(i,j,k),0.)
bp(i,j,k) = max(bma(i,j,k),0.)
bn(i,j,k) = min(bma(i,j,k),0.)
dp(i,j,k) = max(dma(i,j,k),0.)
dn(i,j,k) = min(dma(i,j,k),0.)
enddo
enddo
enddo
ap(0,0:ny+1,0:nz+1) = max(u(0,0:ny+1,0:nz+1),0.)
an(nx1+1,0:ny+1,0:nz+1) = min(u(nx1+1,0:ny+1,0:nz+1),0.)
do k = 1, nz
do j = 1, ny
do i = 1, nx1
dudx(i,j,k) = (ap(i,j,k)*u(i,j,k)-ap(i-1,j,k)*u(i-1,j,k))/dx + (an(i+1,j,k)*u(i+1,j,k)-an(i,j,k)*u(i,j,k))/dx
dudy(i,j,k) = (bp(i,j,k)*u(i,j,k)-bp(i,j-1,k)*u(i,j-1,k))/dy + (bn(i,j+1,k)*u(i,j+1,k)-bn(i,j,k)*u(i,j,k))/dy
dudz(i,j,k) = (dp(i,j,k)*u(i,j,k)-dp(i,j,k-1)*u(i,j,k-1))/dz + (dn(i,j,k+1)*u(i,j,k+1)-dn(i,j,k)*u(i,j,k))/dz
uint(i,j,k) = dudx(i,j,k) + dudy(i,j,k) + dudz(i,j,k)
enddo
enddo
enddo
do k = 0, nz+1
do j = 1, ny+1
do i = 0, nx+1
amb(i,j,k) = (u(i,j,k) + u(i+1,j,k) + u(i,j-1,k) + u(i+1,j-1,k)) * 0.25
dmb(i,j,k) = (w(i,j,k) + w(i,j-1,k) + w(i,j,k+1) + w(i,j-1,k+1)) * 0.25
ap(i,j,k) = max(amb(i,j,k),0.)
an(i,j,k) = min(amb(i,j,k),0.)
bp(i,j,k) = max(v(i,j,k),0.)
bn(i,j,k) = min(v(i,j,k),0.)
dp(i,j,k) = max(dmb(i,j,k),0.)
dn(i,j,k) = min(dmb(i,j,k),0.)
enddo
enddo
enddo
bp(0:nx+1,0,0:nz+1) = max(v(0:nx+1,0,0:nz+1),0.)
bn(0:nx+1,ny1+1,0:nz+1) = min(v(0:nx+1,ny1+1,0:nz+1),0.)
do k = 1, nz
do j = 1, ny1
do i = 1, nx
dvdx(i,j,k) = (ap(i,j,k)*v(i,j,k)-ap(i-1,j,k)*v(i-1,j,k))/dx + (an(i+1,j,k)*v(i+1,j,k)-an(i,j,k)*v(i,j,k))/dx
dvdy(i,j,k) = (bp(i,j,k)*v(i,j,k)-bp(i,j-1,k)*v(i,j-1,k))/dy + (bn(i,j+1,k)*v(i,j+1,k)-bn(i,j,k)*v(i,j,k))/dy
dvdz(i,j,k) = (dp(i,j,k)*v(i,j,k)-dp(i,j,k-1)*v(i,j,k-1))/dz + (dn(i,j,k+1)*v(i,j,k+1)-dn(i,j,k)*v(i,j,k))/dz
vint(i,j,k) = dvdx(i,j,k) + dvdy(i,j,k) + dvdz(i,j,k)
enddo
enddo
enddo
do k = 1, nz+1
do j = 0, ny+1
do i = 0, nx+1
amd(i,j,k) = (u(i,j,k) + u(i+1,j,k) + u(i,j,k-1) + u(i+1,j,k-1)) * 0.25
bmd(i,j,k) = (v(i,j,k) + v(i,j+1,k) + v(i,j,k-1) + v(i,j+1,k-1)) * 0.25
ap(i,j,k) = max(amd(i,j,k),0.)
an(i,j,k) = min(amd(i,j,k),0.)
bp(i,j,k) = max(bmd(i,j,k),0.)
bn(i,j,k) = min(bmd(i,j,k),0.)
dp(i,j,k) = max(w(i,j,k),0.)
dn(i,j,k) = min(w(i,j,k),0.)
enddo
enddo
enddo
dp(0:nx+1,0:ny+1,0) = max(w(0:nx+1,0:ny+1,0),0.)
dp(0:nx+1,0:ny+1,nz1+1) = min(w(0:nx+1,0:ny+1,nz1+1),0.)
do k = 1, nz1
do j = 1, ny
do i = 1, nx
dwdx(i,j,k) = (ap(i,j,k)*w(i,j,k)-ap(i-1,j,k)*w(i-1,j,k))/dx + (an(i+1,j,k)*w(i+1,j,k)-an(i,j,k)*w(i,j,k))/dx
dwdy(i,j,k) = (bp(i,j,k)*w(i,j,k)-bp(i,j-1,k)*w(i,j-1,k))/dy + (bn(i,j+1,k)*w(i,j+1,k)-bn(i,j,k)*w(i,j,k))/dy
dwdz(i,j,k) = (dp(i,j,k)*w(i,j,k)-dp(i,j,k-1)*w(i,j,k-1))/dz + (dn(i,j,k+1)*w(i,j,k+1)-dn(i,j,k)*w(i,j,k))/dz
wint(i,j,k) = dwdx(i,j,k) + dwdy(i,j,k) + dwdz(i,j,k)
enddo
enddo
enddo
else
write(*,*) "não possui este esquema para o rotacional"
STOP
endif
!==================================================================================================================
END SUBROUTINE rotacional
SUBROUTINE antissim(uint,vint,wint)
USE velpre
USE parametros
IMPLICIT NONE
!===================================================================================================================
real(8), dimension(nx1,ny,nz) :: uint
real(8), dimension(nx,ny1,nz) :: vint
real(8), dimension(nx,ny,nz1) :: wint
real(8), dimension(0:nx1+1,0:ny1+1,0:nz1+1) :: ap, an
real(8), dimension(0:nx1+1,0:ny1+1,0:nz1+1) :: bp, bn
real(8), dimension(0:nx1+1,0:ny1+1,0:nz1+1) :: dp, dn
!
real(8), dimension(nx1,ny1,nz1) :: dudx, dvdx, dwdx
real(8), dimension(nx1,ny1,nz1) :: dudy, dvdy, dwdy
real(8), dimension(nx1,ny1,nz1) :: dudz, dvdz, dwdz
real(8), dimension(nx1,ny1,nz1) :: bma, dma,amb, dmb, amd, bmd
!
real(8) :: aa, bb, dd
!contadores
integer :: i, j, k, ai, bi, di
!plotagem
real(8) :: acont, bcont, dcont
integer :: loca(3), locb(3), locd(3)
!auxiliares
real(8) :: aux1, aux2
!===================================================================================================================
!RESOLUÇÃO DO PROBLEMA
!===================================================================================================================
if (der == 1) then
! upwind
do k = 1, nz+1
do j = 1, ny+1
do i = 1, nx+1
bma(i,j,k) = (v(i,j,k) + v(i-1,j,k) + v(i,j+1,k) + v(i-1,j+1,k)) * 0.25
dma(i,j,k) = (w(i,j,k) + w(i-1,j,k) + w(i,j,k+1) + w(i-1,j,k+1)) * 0.25
ap(i,j,k) = max(u(i,j,k),0.)
an(i,j,k) = min(u(i,j,k),0.)
bp(i,j,k) = max(bma(i,j,k),0.)
bn(i,j,k) = min(bma(i,j,k),0.)
dp(i,j,k) = max(dma(i,j,k),0.)
dn(i,j,k) = min(dma(i,j,k),0.)
enddo
enddo
enddo
ap(0,:,:) = ap(1,:,:)
bp(:,0,:) = bp(:,1,:)
dp(:,:,0) = dp(:,:,1)
an(nx1+1,:,:)=an(nx1,:,:)
do k = 1, nz
do j = 1, ny
do i = 1, nx1
dudx(i,j,k) = ((ap(i,j,k)*u(i,j,k)-ap(i-1,j,k)*u(i-1,j,k))/dx + (an(i+1,j,k)*u(i+1,j,k)-an(i,j,k)*u(i,j,k))/dx &
+ ap(i,j,k)*(u(i,j,k)-u(i-1,j,k))/dx + an(i,j,k)*(u(i+1,j,k)-u(i,j,k))/dx) * 0.5
dudy(i,j,k) = ((bp(i,j,k)*u(i,j,k)-bp(i,j-1,k)*u(i,j-1,k))/dy + (bn(i,j+1,k)*u(i,j+1,k)-bn(i,j,k)*u(i,j,k))/dy &
+ bp(i,j,k)*(u(i,j,k)-u(i,j-1,k))/dy + bn(i,j,k)*(u(i,j+1,k)-u(i,j,k))/dy) * 0.5
dudz(i,j,k) = ((dp(i,j,k)*u(i,j,k)-dp(i,j,k-1)*u(i,j,k-1))/dz+(dn(i,j,k+1)*u(i,j,k+1)-dn(i,j,k)*u(i,j,k))/dz &
+ dp(i,j,k)*(u(i,j,k)-u(i,j,k-1))/dz + dn(i,j,k)*(u(i,j,k+1)-u(i,j,k))/dz) * 0.5
uint(i,j,k) = dudx(i,j,k) + dudy(i,j,k) + dudz(i,j,k)
enddo
enddo
enddo
do k = 1, nz+1
do j = 1, ny+1
do i = 1, nx+1
amb(i,j,k) = (u(i,j,k) + u(i+1,j,k) + u(i,j-1,k) + u(i+1,j-1,k)) * 0.25
dmb(i,j,k) = (w(i,j,k) + w(i,j-1,k) + w(i,j,k+1) + w(i,j-1,k+1)) * 0.25
ap(i,j,k) = max(amb(i,j,k),0.)
an(i,j,k) = min(amb(i,j,k),0.)
bp(i,j,k) = max(v(i,j,k),0.)
bn(i,j,k) = min(v(i,j,k),0.)
dp(i,j,k) = max(dmb(i,j,k),0.)
dn(i,j,k) = min(dmb(i,j,k),0.)
enddo
enddo
enddo
ap(0,:,:) = ap(1,:,:)
bp(:,0,:) = bp(:,1,:)
dp(:,:,0) = dp(:,:,1)
bn(:,ny1+1,:)=bn(:,ny1,:)
do k = 1, nz
do j = 1, ny1
do i = 1, nx
dvdx(i,j,k) = ((ap(i,j,k)*v(i,j,k)-ap(i-1,j,k)*v(i-1,j,k))/dx + (an(i+1,j,k)*v(i+1,j,k)-an(i,j,k)*v(i,j,k))/dx &
+ ap(i,j,k)*(v(i,j,k)-v(i-1,j,k))/dx + an(i,j,k)*(v(i+1,j,k)-v(i,j,k))/dx) * 0.5
dvdy(i,j,k) = ((bp(i,j,k)*v(i,j,k)-bp(i,j-1,k)*v(i,j-1,k))/dy + (bn(i,j+1,k)*v(i,j+1,k)-bn(i,j,k)*v(i,j,k))/dy &
+ bp(i,j,k)*(v(i,j,k)-v(i,j-1,k))/dy + bn(i,j,k)*(v(i,j+1,k)-v(i,j,k))/dy) * 0.5
dvdz(i,j,k) = ((dp(i,j,k)*v(i,j,k)-dp(i,j,k-1)*v(i,j,k-1))/dz+(dn(i,j,k+1)*v(i,j,k+1)-dn(i,j,k)*v(i,j,k))/dz &
+ dp(i,j,k)*(v(i,j,k)-v(i,j,k-1))/dz + dn(i,j,k)*(v(i,j,k+1)-v(i,j,k))/dz) * 0.5
vint(i,j,k) = dvdx(i,j,k) + dvdy(i,j,k) + dvdz(i,j,k)
enddo
enddo
enddo
do k = 1, nz+1
do j = 1, ny+1
do i = 1, nx+1
amd(i,j,k) = (u(i,j,k) + u(i+1,j,k) + u(i,j,k-1) + u(i+1,j,k-1)) * 0.25
bmd(i,j,k) = (v(i,j,k) + v(i,j+1,k) + v(i,j,k-1) + v(i,j+1,k-1)) * 0.25
ap(i,j,k) = max(amd(i,j,k),0.)
an(i,j,k) = min(amd(i,j,k),0.)
bp(i,j,k) = max(bmd(i,j,k),0.)
bn(i,j,k) = min(bmd(i,j,k),0.)
dp(i,j,k) = max(w(i,j,k),0.)
dn(i,j,k) = min(w(i,j,k),0.)
enddo
enddo
enddo
ap(0,:,:) = ap(1,:,:)
bp(:,0,:) = bp(:,1,:)
dp(:,:,0) = dp(:,:,1)
dn(:,:,nz1+1)=dn(:,:,nz1)
do k = 1, nz1
do j = 1, ny
do i = 1, nx
dwdx(i,j,k) = ((ap(i,j,k)*w(i,j,k)-ap(i-1,j,k)*w(i-1,j,k))/dx + (an(i+1,j,k)*w(i+1,j,k)-an(i,j,k)*w(i,j,k))/dx &
+ ap(i,j,k)*(w(i,j,k)-w(i-1,j,k))/dx + an(i,j,k)*(w(i+1,j,k)-w(i,j,k))/dx) * 0.5
dwdy(i,j,k) = ((bp(i,j,k)*w(i,j,k)-bp(i,j-1,k)*w(i,j-1,k))/dy + (bn(i,j+1,k)*w(i,j+1,k)-bn(i,j,k)*w(i,j,k))/dy &
+ bp(i,j,k)*(w(i,j,k)-w(i,j-1,k))/dy + bn(i,j,k)*(w(i,j+1,k)-w(i,j,k))/dy) * 0.5
dwdz(i,j,k) = ((dp(i,j,k)*w(i,j,k)-dp(i,j,k-1)*w(i,j,k-1))/dz+(dn(i,j,k+1)*w(i,j,k+1)-dn(i,j,k)*w(i,j,k))/dz &
+ dp(i,j,k)*(w(i,j,k)-w(i,j,k-1))/dz + dn(i,j,k)*(w(i,j,k+1)-w(i,j,k))/dz) * 0.5
wint(i,j,k) = dwdx(i,j,k) + dwdy(i,j,k) + dwdz(i,j,k)
enddo
enddo
enddo
else
write(*,*) "não possui esquema para o antissimétrico"
STOP
endif
!==================================================================================================================
END SUBROUTINE antissim
|
{"hexsha": "1f58d5afd3e49df649874d79224d875f1efaf687", "size": 16655, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "SuLi_ic/7_advectivo.f90", "max_stars_repo_name": "pemiguell/SuLi", "max_stars_repo_head_hexsha": "8d9feb8f89bc44e2f870fb68023ef3b90e5ecfa6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "SuLi_ic/7_advectivo.f90", "max_issues_repo_name": "pemiguell/SuLi", "max_issues_repo_head_hexsha": "8d9feb8f89bc44e2f870fb68023ef3b90e5ecfa6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SuLi_ic/7_advectivo.f90", "max_forks_repo_name": "pemiguell/SuLi", "max_forks_repo_head_hexsha": "8d9feb8f89bc44e2f870fb68023ef3b90e5ecfa6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.4663608563, "max_line_length": 117, "alphanum_fraction": 0.481717202, "num_tokens": 7851}
|
import caffe
import numpy as np
import sys
import triplet.config as cfg
global mean_file
mean_file='/home/frank/triplet-master/data/models/softmax/mean.binaryproto'
if __name__ == '__main__':
proto_data = open(mean_file, "rb").read()
mean_blob = caffe.io.caffe_pb2.BlobProto.FromString(proto_data)
#mean = caffe.io.blobproto_to_array(mean_blob)[0]
arr = np.array(caffe.io.blobproto_to_array(mean_blob))
out = arr[0]
np.save(cfg.MEAN_NPY,out)
|
{"hexsha": "97ea27932bbda84bef2951e398669adc1b55aa13", "size": 468, "ext": "py", "lang": "Python", "max_stars_repo_path": "meanproto2npy.py", "max_stars_repo_name": "gustavkkk/image-classifier", "max_stars_repo_head_hexsha": "4991c9e828daf793b5b8378bf989d8fb89519204", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2017-07-23T03:22:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-14T23:56:41.000Z", "max_issues_repo_path": "meanproto2npy.py", "max_issues_repo_name": "deep2essence/image-classifier", "max_issues_repo_head_hexsha": "4991c9e828daf793b5b8378bf989d8fb89519204", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "meanproto2npy.py", "max_forks_repo_name": "deep2essence/image-classifier", "max_forks_repo_head_hexsha": "4991c9e828daf793b5b8378bf989d8fb89519204", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2017-10-23T14:19:18.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-30T14:57:19.000Z", "avg_line_length": 29.25, "max_line_length": 75, "alphanum_fraction": 0.7435897436, "include": true, "reason": "import numpy", "num_tokens": 127}
|
[STATEMENT]
lemma perp_inter_perp_in_n:
assumes "A B Perp C D"
shows "\<exists> P. Col A B P \<and> Col C D P \<and> P PerpAt A B C D"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>P. Col A B P \<and> Col C D P \<and> P PerpAt A B C D
[PROOF STEP]
by (simp add: assms perp_inter_perp_in)
|
{"llama_tokens": 135, "file": "IsaGeoCoq_Tarski_Neutral", "length": 1}
|
[STATEMENT]
lemma tabulate_parametric:
assumes [transfer_rule]: "bi_unique A"
shows "(list_all2 A ===> (A ===> B) ===> A ===> rel_option B)
(\<lambda>ks f. (map_of (map (\<lambda>k. (k, f k)) ks))) (\<lambda>ks f. (map_of (map (\<lambda>k. (k, f k)) ks)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (list_all2 A ===> (A ===> B) ===> A ===> rel_option B) (\<lambda>ks f. map_of (map (\<lambda>k. (k, f k)) ks)) (\<lambda>ks f. map_of (map (\<lambda>k. (k, f k)) ks))
[PROOF STEP]
by transfer_prover
|
{"llama_tokens": 222, "file": null, "length": 1}
|
import sys
import numpy as np
import pandas as pd
def kese_indicators():
pass
def _neb_raw_data_merge(df_bfs, df_pep, df_bds, df_bfs_march):
return df_bfs. \
merge(df_pep.drop('region', 1), how='left', on=['fips', 'time']).\
merge(df_bds.drop('region', 1), how='left', on=['fips', 'time']).\
merge(df_bfs_march.drop('region', 1), how='left', on=['fips', 'time'])
def neb_indicators(df_bfs, df_pep, df_bds, df_bfs_march):
# todo: velocity?
return _neb_raw_data_merge(df_bfs, df_pep, df_bds, df_bfs_march). \
rename(columns={'avg_speed_annual': 'velocity'}). \
assign(
actualization=lambda x: x['bf'] / x['ba'],
bf_per_capita=lambda x: x['bf'] / x['population'] * 100,
newness=lambda x: x['bf_march_shift'] / x['firms'],
)
def _mpj_raw_data_merge(df_qwi, df_pep, df_earnbeg_us):
return df_qwi. \
merge(df_pep[['fips', 'time', 'population']], how='left', on=['fips', 'time']).\
merge(df_earnbeg_us, how='left', on='time')
def _missing_obs(df):
df.loc[df['EmpTotal'] == 0, 'constancy'] = np.nan
df.loc[df['EarnBeg'] == 0, 'compensation'] = np.nan
df.loc[df['emp_mid'] == 0, 'contribution'] = np.nan
return df
def _total_emp_create(df, contribution_by):
covars = ['fips', 'time']
if contribution_by:
covars = ['fips', 'time'] + contribution_by.split('_')
return df.\
assign(
within_count=lambda x: x[['emp_mid'] + covars].groupby(covars).transform('count'),
max_count=lambda x: x['within_count'].max(),
total_emp=lambda x: x[['emp_mid'] + covars].groupby(covars).transform(sum, min_count=int(x['max_count'].iloc[0]))
)
def mpj_indicators(df_qwi, df_pep, df_earnbeg_us, contribution_by=None, constancy_mult=1):
"""
todo: description of each of these data sets briefly
contribution_by: str, None
None: unconditional employment count, across all strata
covariate name: covariate name to condition on
"""
# todo: need some restrictions for variable names in each of the dfs
# each row needs to be unique category within fips/time
# todo: I want to drop the variables from pep and earnbeg_us
return _mpj_raw_data_merge(
df_qwi,
df_pep,
df_earnbeg_us.rename(columns={'EarnBeg': 'EarnBeg_us'})
).\
assign(
emp_mid=lambda x: (x['Emp'] + x['EmpEnd']) / 2,
).\
pipe(_total_emp_create, contribution_by).\
assign(
contribution=lambda x: x['emp_mid'] / x['total_emp'],
compensation=lambda x: x['EarnBeg'] / x['EarnBeg_us'],
constancy=lambda x: (x['EmpS'] / x['EmpTotal']) * constancy_mult,
creation=lambda x: x['FrmJbC'] / x['population'] * 1000,
).\
pipe(_missing_obs). \
drop(['ownercode', 'emp_mid', 'within_count', 'max_count', 'total_emp', 'Emp', 'EmpEnd', 'EarnBeg', 'EmpS', 'EmpTotal', 'FrmJbC', 'population', 'EarnBeg_us'], 1)
|
{"hexsha": "6bf2a6baf936101837e20422a2d699c84b2c4544", "size": 3051, "ext": "py", "lang": "Python", "max_stars_repo_path": "kauffman/tools/_indicators.py", "max_stars_repo_name": "EMKF/downwardata", "max_stars_repo_head_hexsha": "60e4f547df69796f52a7ac7dd9417c44a733b396", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "kauffman/tools/_indicators.py", "max_issues_repo_name": "EMKF/downwardata", "max_issues_repo_head_hexsha": "60e4f547df69796f52a7ac7dd9417c44a733b396", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kauffman/tools/_indicators.py", "max_forks_repo_name": "EMKF/downwardata", "max_forks_repo_head_hexsha": "60e4f547df69796f52a7ac7dd9417c44a733b396", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-04-28T17:15:06.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-01T14:21:31.000Z", "avg_line_length": 37.2073170732, "max_line_length": 169, "alphanum_fraction": 0.6073418551, "include": true, "reason": "import numpy", "num_tokens": 887}
|
from Adafruit_IO import *
import RPi.GPIO as GPIO
import time as yotimma
import numpy as np
import sounddevice as sd
#Connectie met de adafruit api
aio = Client('Nizari' , '')
#setten van de pins
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
PIR_PIN = 3
GPIO.setup(PIR_PIN, GPIO.IN)
#print dat de code ready is
print('Starting up the PIR Module (click on STOP to exit)')
print('Ready')
totalDb = []
#over hoeveel tijd wil je het gemidelde pakken van hoe druk het is
duration = 3 #in seconds
#functie die ophaalt wat de geluids levels zijn
def audio_callback(indata, frames, time, status):
volume_norm = np.linalg.norm(indata) * 10
volume_norm = int(volume_norm)
totalDb.append(volume_norm)
print(volume_norm)
#send via de adafuit api data naar het dashboard
def send_data(dbArray):
length = len(dbArray)
total = sum(dbArray)
average = total / length
averageRound = int(average)
aio.send("sound-levels", int(averageRound))
totalDb.clear()
#de check of er beweging is te zien voor de sensoren als er beweging is erkent start hij de opnamen van een gemidlde geluids levels
while True:
if GPIO.input(PIR_PIN):
print('Motion Detected')
stream = sd.InputStream(callback=audio_callback)
with stream:
sd.sleep(duration * 1000)
send_data(totalDb)
else:
print('No Motion Detected')
aio.send("sound-levels", 0)
yotimma.sleep(3)
yotimma.sleep(1)
|
{"hexsha": "03cfe4cf6e3bc0bae0bc9cf8073d808aba937b93", "size": 1464, "ext": "py", "lang": "Python", "max_stars_repo_path": "geluidleves.py", "max_stars_repo_name": "stijnana/druktemeter", "max_stars_repo_head_hexsha": "cdb9030e54985028ef75677c477c11caf7989a12", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "geluidleves.py", "max_issues_repo_name": "stijnana/druktemeter", "max_issues_repo_head_hexsha": "cdb9030e54985028ef75677c477c11caf7989a12", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "geluidleves.py", "max_forks_repo_name": "stijnana/druktemeter", "max_forks_repo_head_hexsha": "cdb9030e54985028ef75677c477c11caf7989a12", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.6129032258, "max_line_length": 133, "alphanum_fraction": 0.7015027322, "include": true, "reason": "import numpy", "num_tokens": 403}
|
#Create line graph of loss chart. x-axis = # of epochs, y-axis = loss
import numpy as np
from matplotlib import pyplot as plt
import matplotlib
train_num = 940+2350 #760 for flickr, 1880 for mscoco outdoor decoder
val_num = 90+220 #140 for flickr, 180 for mscoco outdoor decoder
x_data_train = np.arange(1, 21, step=20/train_num) #1, 21 normally
x_data_val = np.arange(1, 21, step=20/val_num)
x_data_avgval = np.arange(1, 21, step=1)
print('x_data', x_data_train)
y_data_train = np.zeros((train_num))
y_data_val = np.zeros((val_num))
y_data_avgval = np.zeros((20))
#TOCHANGE:
infile = "output_mscoco_encoder_finetune.txt" #"output_outdoor.txt"
outfile = "loss_mscoco_outdoor_encoder_finetune.png" #"loss_mscoco_outdoor_decoder.png"
with open("output_outdoor.txt", "r") as output:
train_i=0
val_i=0
valavg_i=0
for line in output:
if line.startswith("Epoch:"):
splitLine = line.split("Loss ")
try:
splitLine = splitLine[1].split(" ")
except:
print(splitLine)
loss = float(splitLine[0])
#print(loss)
y_data_train[train_i] = loss
train_i+=1
if line.startswith("Validation:"):
splitLine = line.split("Loss ")
try:
splitLine = splitLine[1].split(" ")
except:
print(splitLine)
loss = float(splitLine[0])
#print(loss)
y_data_val[val_i] = loss
val_i+=1
if line.startswith(" * LOSS"):
splitLine = line.split("LOSS - ")
try:
splitLine = splitLine[1].split(",")
except:
print(splitLine)
loss = float(splitLine[0])
print('LOSS', loss)
y_data_avgval[valavg_i] = loss
valavg_i+=1
if valavg_i == 10: break #FOR ENCODER FINETUNE GRAPH ONLY
print(valavg_i)
with open(infile, "r") as output:
for line in output:
if line.startswith("Epoch:"):
splitLine = line.split("Loss ")
try:
splitLine = splitLine[1].split(" ")
except:
print(splitLine)
loss = float(splitLine[0])
#print(loss)
y_data_train[train_i] = loss
train_i+=1
if line.startswith("Validation:"):
splitLine = line.split("Loss ")
try:
splitLine = splitLine[1].split(" ")
except:
print(splitLine)
loss = float(splitLine[0])
#print(loss)
y_data_val[val_i] = loss
val_i+=1
if line.startswith(" * LOSS"):
splitLine = line.split("LOSS - ")
try:
splitLine = splitLine[1].split(",")
except:
print(splitLine)
loss = float(splitLine[0])
print('LOSS', loss)
y_data_avgval[valavg_i] = loss
valavg_i+=1
print(valavg_i)
# utility function: color a string
def color(s, color=None, lightness=0):
# red green brown blue purple cyan gray
colors = None, "r", "g", "br", "b", "p", "c", "gy"
esc = "\033[%d;3%dm" % (lightness, colors.index(color))
return esc + s + "\033[0m"
# Define a function for the line plot
def lineplot(x_label, y_label, title):
# Create the plot object
_, ax = plt.subplots()
# Plot the best fit line, set the linewidth (lw), color and
# transparency (alpha) of the line
ax.plot(x_data_train, y_data_train, lw = 1, color = '#539caf', alpha = 1)
ax.plot(x_data_val, y_data_val, lw=1, color = 'r', alpha = 1)
ax.plot(x_data_avgval, y_data_avgval, lw=1, color='g', alpha=1)
#ax.plot(x_data, lasso_y, lw=2, color = 'r', alpha = 1)
#ax.fill_between(x_data, low_CIs, upper_CIs, color = '#539caf', alpha = 0.4, label = '95% CI for LinUCB')
#ax.fill_between(x_data, lasso_low, lasso_upper, color = 'r', alpha = 0.4, label = '95% CI for Lasso')
# Label the axes and provide a title
ax.set_title(title)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
plt.legend(('train loss', 'validation loss', 'average validation loss'))
plt.show()
plt.savefig('./lossgraphs/'+outfile)
np.set_printoptions(linewidth=999, edgeitems=10, suppress=True)
#warnings.filterwarnings("error")
np.random.seed(0)
lineplot(x_label = 'epochs', y_label = 'loss', title = 'MSCOCO Outdoor Encoder Finetuning Loss')
|
{"hexsha": "cc75ef3e5d1d5e91c1fb6dfaab9aa4b13756ff91", "size": 4611, "ext": "py", "lang": "Python", "max_stars_repo_path": "metrics_graphs.py", "max_stars_repo_name": "mayankiitg/a-PyTorch-Tutorial-to-Image-Captioning", "max_stars_repo_head_hexsha": "13b2ea499b1ad11d308640df82f1502ddde0e6f6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-13T03:25:41.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-13T03:25:41.000Z", "max_issues_repo_path": "metrics_graphs.py", "max_issues_repo_name": "mayankiitg/a-PyTorch-Tutorial-to-Image-Captioning", "max_issues_repo_head_hexsha": "13b2ea499b1ad11d308640df82f1502ddde0e6f6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "metrics_graphs.py", "max_forks_repo_name": "mayankiitg/a-PyTorch-Tutorial-to-Image-Captioning", "max_forks_repo_head_hexsha": "13b2ea499b1ad11d308640df82f1502ddde0e6f6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.4692307692, "max_line_length": 109, "alphanum_fraction": 0.5662546085, "include": true, "reason": "import numpy", "num_tokens": 1200}
|
from functools import lru_cache
import numpy as np
import pytest
from copulae import GaussianCopula
from copulae.core import cov2corr
@lru_cache(3)
def gen_corr(d=3) -> np.ndarray:
np.random.seed(10)
a = np.random.uniform(size=d * d).reshape(d, d)
return cov2corr(a @ a.T)
def test_set_parameter():
corr = gen_corr()
cop = GaussianCopula(3)
cop[:] = corr
cop[:, 0] = corr[:, 0]
cop[1, :] = corr[1, :]
cop[:, :] = corr
cop[1, 2] = 0.5
cop[0] = 0.3
@pytest.mark.parametrize("category, value", [
["full", gen_corr() + 2], # values above 1
["full", gen_corr() - 2], # values below -1
["full", gen_corr(4)],
["slice", np.repeat(0.1, 4)], # one more than required
])
def test_set_parameter_value_error(category, value):
cop = GaussianCopula(3)
with pytest.raises(ValueError):
if category == 'full':
cop[:] = value
elif category == 'slice':
cop[:, 0] = value
@pytest.mark.parametrize("index", [
(0, 1, 2),
(-1, 0),
(0, 4),
(1, 1)
])
def test_set_parameter_index_error(index):
cop = GaussianCopula(3)
with pytest.raises(IndexError):
cop[index] = 0.3
def test_get_parameter():
cop = GaussianCopula(3)
cop[:] = gen_corr()
assert isinstance(cop[:], np.ndarray)
assert isinstance(cop[:, 0], np.ndarray)
assert isinstance(cop[0, 0], (float, int))
def test_get_parameter_index_error():
cop = GaussianCopula(3)
cop[:] = gen_corr()
with pytest.raises(IndexError):
cop[0, 0, 0]
with pytest.raises(IndexError):
cop[{}]
|
{"hexsha": "cdaa696373cd2973a25aac966e3a3fef64ea594e", "size": 1614, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/elliptical/test_elliptical.py", "max_stars_repo_name": "CrisDS81/copulae", "max_stars_repo_head_hexsha": "2a312c2b849f95cfb2b40b381d34bc790d9d80c5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 100, "max_stars_repo_stars_event_min_datetime": "2019-01-30T19:52:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T10:00:17.000Z", "max_issues_repo_path": "tests/elliptical/test_elliptical.py", "max_issues_repo_name": "CrisDS81/copulae", "max_issues_repo_head_hexsha": "2a312c2b849f95cfb2b40b381d34bc790d9d80c5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 30, "max_issues_repo_issues_event_min_datetime": "2019-07-14T00:30:03.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-24T08:59:14.000Z", "max_forks_repo_path": "tests/elliptical/test_elliptical.py", "max_forks_repo_name": "CrisDS81/copulae", "max_forks_repo_head_hexsha": "2a312c2b849f95cfb2b40b381d34bc790d9d80c5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 25, "max_forks_repo_forks_event_min_datetime": "2019-03-10T21:12:55.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-09T11:54:16.000Z", "avg_line_length": 21.8108108108, "max_line_length": 59, "alphanum_fraction": 0.5954151177, "include": true, "reason": "import numpy", "num_tokens": 482}
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
from enum import Enum
from typing import List
import numpy as np
import pandas as pd
from scipy.stats import t
class Prediction:
""" General Prediction class used to capture output from surrogate model .predict() methods
PredictionSchema defines the known universe of .predict() dataframe columns. Column names
will be restricted to the enum values.
"""
class LegalColumnNames(Enum):
""" Enum class standardizing the data columns returned by a surrogate model's predict method
The class defines the "universe" of data returned by a predict() method, but not
all surrogate models must return all columns.
"""
# boolean indicating if predict() feature row could be used to make a prediction
IS_VALID_INPUT = 'is_valid_input'
# given an instance of the independent variable(s), what's the predicted dependent variable's value
PREDICTED_VALUE = 'predicted_value'
""" References:
https://en.wikipedia.org/wiki/Prediction_interval
https://stats.stackexchange.com/questions/16493/difference-between-confidence-intervals-and-prediction-intervals
https://haozhestat.github.io/files/manuscript_RFIntervals_FinalVersion.pdf
https://www.theoj.org/joss-papers/joss.00124/10.21105.joss.00124.pdf
"""
PREDICTED_VALUE_VARIANCE = 'predicted_value_variance'
PREDICTED_VALUE_DEGREES_OF_FREEDOM = 'predicted_value_degrees_of_freedom'
PREDICTED_VALUE_STANDARD_DEVIATION = 'predicted_value_standard_deviation'
# https://en.wikipedia.org/wiki/Sample_mean_and_covariance#Sample_mean
SAMPLE_MEAN = 'sample_mean'
# https://en.wikipedia.org/wiki/Variance#Sample_variance
SAMPLE_VARIANCE = 'sample_variance'
SAMPLE_SIZE = 'sample_size'
DEGREES_OF_FREEDOM = 'degrees_of_freedom'
@classmethod
def create_prediction_from_dataframe(cls, objective_name: str, dataframe: pd.DataFrame):
assert objective_name is not None
predictor_outputs = [
Prediction.LegalColumnNames(column_name)
for column_name
in dataframe.columns.values
]
return Prediction(
objective_name=objective_name,
predictor_outputs=predictor_outputs,
dataframe=dataframe
)
def __init__(
self,
objective_name: str,
predictor_outputs: List[LegalColumnNames],
dataframe_index: pd.Index = None,
dataframe: pd.DataFrame = None,
num_head_rows_to_print: int = 1,
allow_extra_columns: bool = False
):
self.objective_name = objective_name
self.num_head_rows_to_print = num_head_rows_to_print
# validate passed args
for output_enum in predictor_outputs:
assert output_enum in set(column_name for column_name in Prediction.LegalColumnNames), \
f'PredictionSchema Error: Passed PredictionSchema enum "{output_enum}" not in Prediction.PredictionSchema'
self.predictor_outputs = predictor_outputs
# expect dataframe column names to be values from Enum above
self.expected_column_names = [output_enum.value for output_enum in self.predictor_outputs]
self.allow_extra_columns = allow_extra_columns
self._dataframe = pd.DataFrame(columns=self.expected_column_names, index=dataframe_index)
if dataframe is not None:
self.set_dataframe(dataframe)
def set_dataframe(self, dataframe: pd.DataFrame):
self.validate_dataframe(dataframe)
if self._dataframe.index.empty or (len(self._dataframe.index) == len(dataframe.index) and self._dataframe.index.equals(dataframe.index)):
self._dataframe = dataframe
else:
self._dataframe.loc[dataframe.index, self.expected_column_names] = dataframe[self.expected_column_names]
def validate_dataframe(self, dataframe: pd.DataFrame):
if not self.allow_extra_columns:
# validate passed columns exist in LegalColumnNames enum
for column_name in dataframe.columns.values:
assert column_name in self.expected_column_names, \
f'PredictionSchema Error: Failed to find "{column_name}" in Prediction.PredictionSchema class'
# validate all declared columns (in model's SCHEMA) are present in the dataframe
for expected_column_name in self.expected_column_names:
assert expected_column_name in dataframe.columns.values, \
f'PredictionSchema Error: Failed to find expected column name "{expected_column_name}" in passed dataframe'
mean_variance_col = self.LegalColumnNames.PREDICTED_VALUE_VARIANCE.value
sample_variance_col = self.LegalColumnNames.SAMPLE_VARIANCE.value
if mean_variance_col in self.expected_column_names:
if dataframe[mean_variance_col].notnull().any():
if not (dataframe[dataframe[mean_variance_col].notnull()][mean_variance_col] >= 0).all():
violated_rows_df = dataframe[dataframe[dataframe[mean_variance_col].notnull()][mean_variance_col] < 0]
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(violated_rows_df)
print(f"Num invalid rows: {len(violated_rows_df.index)}")
print(f"Index: {violated_rows_df.index}")
print(f"{mean_variance_col}: {violated_rows_df[mean_variance_col]}")
assert False
if sample_variance_col in self.expected_column_names:
if dataframe[sample_variance_col].notnull().any():
assert (dataframe[dataframe[sample_variance_col].notnull()][sample_variance_col] >= 0).all()
@classmethod
def get_enum_by_column_name(cls, column_name):
return Prediction.LegalColumnNames(column_name)
def get_dataframe(self):
return self._dataframe
@classmethod
def dataframe_from_json(cls, json_string):
return pd.read_json(json_string, orient='index')
def dataframe_to_json(self):
return self.get_dataframe().to_json(orient='index', double_precision=15)
def __repr__(self):
rows_as_dict = self._dataframe.head(self.num_head_rows_to_print).to_dict(orient='records')
return 'objective_name: {name}, dataframe.head({num_rows}): {rows_as_dict}'.format(
name=self.objective_name,
num_rows=self.num_head_rows_to_print,
rows_as_dict=rows_as_dict
)
def add_invalid_rows_at_missing_indices(self, desired_index):
assert self._dataframe.index.intersection(desired_index).equals(self._dataframe.index),\
"Desired index must be a superset of the existing index."
invalid_predictions_index = desired_index.difference(self._dataframe.index)
self.add_invalid_prediction_rows(invalid_predictions_index)
def add_invalid_prediction_rows(self, invalid_predictions_index):
""" Inserts rows with LegalColumnNames.IS_VALID_INPUT column set to False, and all other columns set to NaN at specified index.
This is useful if a model can only produce valid predictions for a subset of rows, but the caller expects a dataframe
with index matching the index of the features dataframe.
:param invalid_predictions_index:
:return:
"""
if not invalid_predictions_index.empty:
assert invalid_predictions_index.intersection(self._dataframe.index).empty, "Valid and invalid indices cannot overlap."
if self.LegalColumnNames.IS_VALID_INPUT.value not in self.expected_column_names:
self.expected_column_names.append(self.LegalColumnNames.IS_VALID_INPUT.value)
invalid_predictions_df = pd.DataFrame(columns=self.expected_column_names, index=invalid_predictions_index)
invalid_predictions_df[self.LegalColumnNames.IS_VALID_INPUT.value] = False
all_predictions_df = pd.concat([self._dataframe, invalid_predictions_df])
all_predictions_df.sort_index(inplace=True)
self.validate_dataframe(all_predictions_df)
self._dataframe = all_predictions_df
def add_standard_deviation_column(self) -> str:
"""Appends a standard deviation column to the prediction dataframe and returns the new column's name.
This is a convenience function - many users of the Prediction object need to know the standard deviation rather than
variance so it makes sense to add it as a feature here.
:return:
TODO: format return type properly
new column name
"""
std_dev_col_name = self.LegalColumnNames.PREDICTED_VALUE_STANDARD_DEVIATION.name
variance_col_name = self.LegalColumnNames.PREDICTED_VALUE_VARIANCE.value
self._dataframe[std_dev_col_name] = np.sqrt(self._dataframe[variance_col_name])
return std_dev_col_name
def add_t_values_column(self, alpha: float) -> str:
"""Appends a t-values column for a given alpha to the prediction dataframe and returns the new column's name.
:param alpha:
:return:
"""
assert 0.0 < alpha < 1.0
t_values_column_name = f"t_value_{(1-alpha)*100:.1f}".replace(".", "_point_")
self._dataframe[t_values_column_name] = t.ppf(1 - alpha / 2.0, self._dataframe[self.LegalColumnNames.PREDICTED_VALUE_DEGREES_OF_FREEDOM])
return t_values_column_name
|
{"hexsha": "f6dcb77a4e99835ccb82b4c19abc5514b9305b75", "size": 9847, "ext": "py", "lang": "Python", "max_stars_repo_path": "source/Mlos.Python/mlos/Optimizers/RegressionModels/Prediction.py", "max_stars_repo_name": "amueller/MLOS", "max_stars_repo_head_hexsha": "8f79bfa27a6fd09c3e00187bae8d7177eaf55247", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-12-19T17:16:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-14T19:41:49.000Z", "max_issues_repo_path": "source/Mlos.Python/mlos/Optimizers/RegressionModels/Prediction.py", "max_issues_repo_name": "amueller/MLOS", "max_issues_repo_head_hexsha": "8f79bfa27a6fd09c3e00187bae8d7177eaf55247", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-11-05T20:17:10.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-06T03:10:02.000Z", "max_forks_repo_path": "source/Mlos.Python/mlos/Optimizers/RegressionModels/Prediction.py", "max_forks_repo_name": "amueller/MLOS", "max_forks_repo_head_hexsha": "8f79bfa27a6fd09c3e00187bae8d7177eaf55247", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-30T12:24:52.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-30T12:24:52.000Z", "avg_line_length": 48.2696078431, "max_line_length": 146, "alphanum_fraction": 0.6871128262, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1983}
|
///////////////////////////////////////////////////////////////////////////////////////////////////
/// \file formic/utils/numeric.cpp
///
/// \brief implementation file for miscellaneous functions related to numbers
///
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "numeric.h"
#include "formic/utils/mpi_interface.h"
#include <boost/scoped_array.hpp>
#include <boost/format.hpp>
///////////////////////////////////////////////////////////////////////////////////////////////////
/// \brief get an offset array used in compounding pairs of distinct indices
///
/// For two indices i,j with i < j, we have: compound(i,j) = i + ioff[j];
///
/// \param[in] n desired length of the array
/// \param[in,out] ioff on exit, the offset array
///
///////////////////////////////////////////////////////////////////////////////////////////////////
void formic::get_pair_ioff(int n, std::vector<int> & ioff) {
if (ioff.size() != n)
ioff.resize(n);
if ( n <= 0 )
return;
ioff.at(0) = 0;
for (int i = 1; i < n; i++)
ioff.at(i) = ioff.at(i-1) + i - 1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// \brief computes the binomial coefficient
///
/// \param[in] n number of things
/// \param[in] m how many things to take at a time
///
/// \return the number of ways n things can be taken m at a time
///
///////////////////////////////////////////////////////////////////////////////////////////////////
int formic::binom_coeff(int n, int m) {
if (n < 0 || m < 0 || m > n) return 0;
double retval = 1.0;
while (m > 0) retval = ( retval * (n--) ) / (m--);
return int(retval+0.5);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// \brief Returns the number of solutions to the equation
/// x(1) + x(2) + ... + x(n) = r
/// when the variables x(i) are constrained to be
/// integers in the range (0, 1, 2, ..., k)
///
/// \param[in] n number of variables
/// \param[in] r sum of variables
/// \param[in] k range of each variable
/// \param[out] work integer workspace, either null or size >= k+1
///
///////////////////////////////////////////////////////////////////////////////////////////////////
int formic::n_integer_solutions(const int n, const int r, const int k, int * work) {
assert( n >= 0 );
assert( r >= 0 );
assert( k >= 0 );
// if requested, dynamically allocate the work space
boost::scoped_array<int> dynamic_work;
if (work == 0) {
dynamic_work.reset( new int[k+1] );
work = dynamic_work.get();
}
// initialize an array to hold the number of variables having each allowed value
int * const n_with_value = work;
work += (k+1);
for (int i = 0; i <= k; i++)
n_with_value[i] = 0;
// initialize the return value
int retval = 0;
// Loop over all possible distributions of variables among the values.
// Note that we do not directly track of how many variables are equal to zero,
// as this is known by how many variables take on other values.
while (true) {
// compute the number of nonzero variables
int n_nonzero = 0;
for (int i = 1; i <= k; i++)
n_nonzero += n_with_value[i];
// compute the sum of the variables
int sum = 0;
for (int i = 1; i <= k; i++)
sum += i * n_with_value[i];
// if this distribution solves the equation, count how many ways it can occur
if (sum == r && n_nonzero <= n) {
// determine how many variables are nonzero
int t = 0;
for (int i = 1; i <= k; i++)
t += n_with_value[i];
// count how many ways the variables can satisfy this distribution
int occurrences = formic::binom_coeff(n, t);
for (int i = 1; i < k; i++) {
occurrences *= formic::binom_coeff(t, n_with_value[i]);
t -= n_with_value[i]; // t is now equal to the number of variables greater than i
}
// record how many ways the variables satisfy this distribution
retval += occurrences;
}
// increment to the next distribution of variables
int p;
for (p = k; p > 0; p--)
if (++n_with_value[p] > n)
n_with_value[p] = 0;
else
break;
// stop iterating if all distributions have been processed
if (p == 0) break;
}
// return the result
return retval;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// \brief formats a real number into a string
///
/// \param[in] f the formatting string used by boost::format
/// \param[in] value the number to be formatted
///
/// \return the string containing the formatted number
///
///////////////////////////////////////////////////////////////////////////////////////////////////
std::string formic::format_number(const std::string & f, const double value) {
return (boost::format(f) % value).str();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// \brief formats a complex number into a string
///
/// \param[in] f the formatting string used by boost::format
/// \param[in] value the number to be formatted
///
/// \return the string containing the formatted number
///
///////////////////////////////////////////////////////////////////////////////////////////////////
std::string formic::format_number(const std::string & f, const std::complex<double> value) {
std::string retval;
retval.append("( ");
retval.append( (boost::format(f) % value.real()).str() );
retval.append(", ");
retval.append( (boost::format(f) % value.imag()).str() );
retval.append(" )");
return retval;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// \brief computes the unbiased estimate of a ratio of means: <f>_p / <g>_p in which the
/// numerator and denominator values are sampled from the same probability distribution p
///
/// \param[in] n the number of samples
/// \param[in] p the probability weight for each sample
/// \param[in] f the numerator samples
/// \param[in] g the denominator samples
/// \param[out] r on exit, the estimate of the ratio <f>_p / <g>_p
/// \param[out] v on exit, the estimate of the variance in the ratio
///
///////////////////////////////////////////////////////////////////////////////////////////////////
void formic::unbiased_ratio_of_means(const int n, const double * const p, const double * const f, const double * const g, double & r, double & v) {
// compute the normalization, the numerator and denominator means, the means of the squares, and the mean of the products
double nm = 0.0; // normalization constant
double mf = 0.0; // mean of numerator
double mg = 0.0; // mean of denominator
double sf = 0.0; // mean of the square of the numerator terms
double sg = 0.0; // mean of the square of the denominator terms
double mp = 0.0; // mean of the product of numerator times denominator
for (int i = 0; i < n; i++) {
nm += p[i];
double x = p[i] * f[i];
mf += x;
sf += x * f[i];
mp += x * g[i];
x = p[i] * g[i];
mg += x;
sg += x * g[i];
}
mf /= nm;
mg /= nm;
sf /= nm;
sg /= nm;
mp /= nm;
// compute the numerator and denominator variances and the covariance
const double vf = ( sf - mf * mf ) * double(n) / double(n-1);
const double vg = ( sg - mg * mg ) * double(n) / double(n-1);
const double cv = ( mp - mf * mg ) * double(n) / double(n-1);
// compute the unbiased estimate of the ratio of means
r = ( mf / mg ) / ( 1.0 + ( vg / mg / mg - cv / mf / mg ) / double(n) );
// compute the unbiased estimate of the variance of the ratio of means
v = ( mf * mf / mg / mg / double(n) ) * ( vf / mf / mf + vg / mg / mg - 2.0 * cv / mf / mg );
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// \brief computes the unbiased estimate of a ratio of means: <f>_p / <g>_p in which the
/// numerator and denominator values are sampled from the same probability distribution p
/// and samples are combined across all processors
///
/// \param[in] n the number of samples on this process
/// \param[in] p the probability weight for each sample
/// \param[in] f the numerator samples
/// \param[in] g the denominator samples
/// \param[out] r on exit, the estimate of the ratio <f>_p / <g>_p
/// \param[out] v on exit, the estimate of the variance in the ratio
///
///////////////////////////////////////////////////////////////////////////////////////////////////
void formic::mpi_unbiased_ratio_of_means(const int n, const double * const p, const double * const f, const double * const g, double & r, double & v) {
// compute the normalization, the numerator and denominator means, the means of the squares, and the mean of the products
double y[7];
y[0] = 0.0; // normalization constant
y[1] = 0.0; // mean of numerator
y[2] = 0.0; // mean of denominator
y[3] = 0.0; // mean of the square of the numerator terms
y[4] = 0.0; // mean of the square of the denominator terms
y[5] = 0.0; // mean of the product of numerator times denominator
y[6] = double(n); // number of samples
for (int i = 0; i < n; i++) {
y[0] += p[i];
double x = p[i] * f[i];
y[1] += x;
y[3] += x * f[i];
y[5] += x * g[i];
x = p[i] * g[i];
y[2] += x;
y[4] += x * g[i];
}
double z[7];
formic::mpi::allreduce(&y[0], &z[0], 7, MPI_SUM);
const double mf = z[1] / z[0]; // mean of numerator
const double mg = z[2] / z[0]; // mean of denominator
const double sf = z[3] / z[0]; // mean of the square of the numerator terms
const double sg = z[4] / z[0]; // mean of the square of the denominator terms
const double mp = z[5] / z[0]; // mean of the product of numerator times denominator
const double ns = z[6]; // number of samples
// compute the numerator and denominator variances and the covariance
const double vf = ( sf - mf * mf ) * ns / ( ns - 1.0 );
const double vg = ( sg - mg * mg ) * ns / ( ns - 1.0 );
const double cv = ( mp - mf * mg ) * ns / ( ns - 1.0 );
// compute the unbiased estimate of the ratio of means
r = ( mf / mg ) / ( 1.0 + ( vg / mg / mg - cv / mf / mg ) / ns );
// compute the unbiased estimate of the variance of the ratio of means
v = ( mf * mf / mg / mg ) * ( vf / mf / mf + vg / mg / mg - 2.0 * cv / mf / mg );
}
|
{"hexsha": "049012874b5c0ed79eb3bc2ab7d38447b3026926", "size": 10736, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/formic/utils/numeric.cpp", "max_stars_repo_name": "eugeneswalker/qmcpack", "max_stars_repo_head_hexsha": "352ff27f163bb92e0c232c48bec8ae7951ed9d8c", "max_stars_repo_licenses": ["NCSA"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/formic/utils/numeric.cpp", "max_issues_repo_name": "eugeneswalker/qmcpack", "max_issues_repo_head_hexsha": "352ff27f163bb92e0c232c48bec8ae7951ed9d8c", "max_issues_repo_licenses": ["NCSA"], "max_issues_count": 11.0, "max_issues_repo_issues_event_min_datetime": "2020-05-09T20:57:21.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-10T00:00:17.000Z", "max_forks_repo_path": "src/formic/utils/numeric.cpp", "max_forks_repo_name": "williamfgc/qmcpack", "max_forks_repo_head_hexsha": "732b473841e7823a21ab55ff397eed059f0f2e96", "max_forks_repo_licenses": ["NCSA"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7581227437, "max_line_length": 151, "alphanum_fraction": 0.506147541, "num_tokens": 2693}
|
import numpy as np
def get_board(state, b0=None):
if b0 is None:
repr_array = np.empty(9, dtype=np.int8)
else:
repr_array = b0.ravel()
for n in range(0, 8):
new_state = state // (3**(8-n))
repr_array[8-n] = new_state
state -= new_state * (3**(8-n))
repr_array[0] = state
if b0 is None:
return repr_array.reshape((3, 3))
def get_state(board):
state = board.ravel().dot(np.power(3, np.arange(9)))
return state
def decide_win(board):
for p in (1, 2):
board_bool = board == p
if np.any(np.all(board_bool, axis=1)) or np.any(np.all(board_bool, axis=0)) or np.all(board_bool[(0, 1, 2), (0, 1, 2)]) or np.all(board_bool[(2, 1, 0), (0, 1, 2)]):
return p
if np.sum(board == 0) == 0:
return -1
else:
return 0
def list_empty_indices(board):
return np.where(board.ravel() == 0)[0]
|
{"hexsha": "08bfa7f3c855337f7c5851a915b51d387ed38993", "size": 806, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/functions.py", "max_stars_repo_name": "Ollehto/ox_lib", "max_stars_repo_head_hexsha": "2326aad94be4635b43d70fbedc63e669b2643019", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/functions.py", "max_issues_repo_name": "Ollehto/ox_lib", "max_issues_repo_head_hexsha": "2326aad94be4635b43d70fbedc63e669b2643019", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/functions.py", "max_forks_repo_name": "Ollehto/ox_lib", "max_forks_repo_head_hexsha": "2326aad94be4635b43d70fbedc63e669b2643019", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0, "max_line_length": 166, "alphanum_fraction": 0.635235732, "include": true, "reason": "import numpy", "num_tokens": 282}
|
from typing import Dict, List, NamedTuple, Tuple
from bs4 import BeautifulSoup, Tag
from matplotlib.path import Path
from numpy import ndarray
from shapely.geometry import MultiPolygon, Polygon
from shapely.ops import unary_union
from svgpath2mpl import parse_path
from .logs.log import get_logger
LOGGER = get_logger(__name__)
RUSSIA_COLOR = "#ebc0b3"
UKRAINE_COLOR = "#e3d975"
class _UkraineMapData(NamedTuple):
water: List[str]
ukraine_controlled: List[str] # russia should be rendered OVER ukraine
russia_controlled: List[str]
cities: Dict[str, Tuple[float, float]] # names to coords
def _get_basemap_and_filled_in_water(svg_xml: str) -> _UkraineMapData:
soup = BeautifulSoup(svg_xml, features="html.parser")
basemap = soup.find("g", {"id": "layer1", "inkscape:label": "Basemap"})
filled_water: List[str] = []
ukraine_controlled: List[str] = []
russia_controlled: List[str] = []
cities: Dict[str, Tuple[float, float]] = {} # TODO
for path in soup.find(
"g", {"id": "layer2", "inkscape:label": "Rivers"}
).findChildren("path"):
path: Tag = path
if "fill:none" not in path.get("style").split(";"):
filled_water.append(path.get("d"))
for path in soup.find(
"g", {"id": "layer1", "inkscape:label": "Basemap"}
).findChildren("path"):
path: Tag = path
if f"fill:{UKRAINE_COLOR}" in path.get("style").split(";"):
print("ukraine")
ukraine_controlled.append(path.get("d"))
if f"fill:{RUSSIA_COLOR}" in path.get("style").split(";"):
russia_controlled.append(path.get("d"))
return _UkraineMapData(filled_water, ukraine_controlled, russia_controlled, cities)
def _convert_svg_path_string_to_shapely(svg_paths: List[str]) -> MultiPolygon:
geometries: List[Polygon] = []
for path in svg_paths:
mpl_path: Path = parse_path(path)
coords_list: List[ndarray] = mpl_path.to_polygons()
geometries += [Polygon(coords) for coords in coords_list]
geometries = [geom if geom.is_valid else geom.buffer(0) for geom in geometries]
multi_polygon: MultiPolygon = unary_union(geometries)
return multi_polygon
def get_areas(svg_xml: str) -> Tuple[float, float]:
"""_summary_
Args:
svg_xml (str): SVG data in the form of an XML string.
Returns:
Tuple[float, float]: Russia- and separatist-controlled area, Ukraine-controlled area.
"""
water_xml, ua_xml, ru_xml, _ = _get_basemap_and_filled_in_water(svg_xml)
ukraine_poly = _convert_svg_path_string_to_shapely(ua_xml) # this is a base layer, so it overlaps russian areas
russian_poly = _convert_svg_path_string_to_shapely(ru_xml)
water_poly = _convert_svg_path_string_to_shapely(water_xml)
ukraine_land = ukraine_poly.difference(water_poly)
russian_land = russian_poly.difference(water_poly)
ukraine_land = ukraine_land.difference(russian_land)
return float(russian_land.area), float(ukraine_land.area)
|
{"hexsha": "9ce226a348ece8c08b9c5de73ca34490dd85d0e8", "size": 3026, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/ukraine_war_map_twitter_bot/analyze.py", "max_stars_repo_name": "a2435191/ukraine-war-map-twitter-bot", "max_stars_repo_head_hexsha": "3218e0d1754b5a21bc995781c572e05e97151959", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/ukraine_war_map_twitter_bot/analyze.py", "max_issues_repo_name": "a2435191/ukraine-war-map-twitter-bot", "max_issues_repo_head_hexsha": "3218e0d1754b5a21bc995781c572e05e97151959", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ukraine_war_map_twitter_bot/analyze.py", "max_forks_repo_name": "a2435191/ukraine-war-map-twitter-bot", "max_forks_repo_head_hexsha": "3218e0d1754b5a21bc995781c572e05e97151959", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4578313253, "max_line_length": 115, "alphanum_fraction": 0.6953073364, "include": true, "reason": "from numpy", "num_tokens": 796}
|
include 'VICMAIN_FOR'
subroutine main44
c Revision History:
c 02 Jan 1995 ... CRI ... MSTP S/W Conversion (VICAR Porting)
c------ program CAMPARAM
c------ Program CAMPARAM will fill the LOCAL variables;
c------ "sc", "scan", "camera", "filter", "fds" and "exprng"
c------ and return the variables to the calling proc. It will
c------ acquire the data via an able77 call on the VGR label.
character*2 cam(2)
character*4 irange,ISECCHR
integer*4 able(19),parb(500)
character*80 MSG
character*4 ISECSTR
character*3 flt(2,8)
character*1 virgule
integer ist, unit, ind, isec, nc
real exp
DATA FLT / 'CHJ','CLR','BLU','VIO',
1 'CLR','BLU','VIO','ORG',
2 'NAD','CLR','GRN','GRN',
3 'CH4','GRN','ORG','UV '/
C
data cam /'NA','WA'/
data irange /' '/
data isecchr /' '/
data isecstr /' '/
data virgule /'/'/
msg = ' '
call zia (able,19)
call zia (parb,500)
call ifmessage ('CAMPARAM version 02-Jan-95')
call xvunit(unit,'INP',1,ist,' ')
call xvopen(unit,ist,' ')
able(1) = 19
call able77v2(ind,unit,able)
c
call mve (4,1,able(3),exp,1,1)
c-------calculate the exposure range
NC = 4
if (exp .lt. 23000.) then
irange = 'A'
else if(exp .lt. 62000.) then
irange = 'B'
else
irange = 'C'
ISEC = IFIX(EXP/1000.)
IF (ISEC .LT. 100) then
NC = 2
write (ISECSTR(1:2),'(I2)') ISEC
else IF (ISEC .LT. 1000) then
NC = 3
write (ISECSTR(1:3),'(I3)') ISEC
else
write (ISECSTR(1:4),'(I4)') ISEC
end if
write (ISECCHR(1:4),'(A4)') ISECSTR(1:4)
end if
call xvmessage(' ',' ')
call xvmessage('From the Voyager Label:',' ')
call xvmessage
&(' FDS SC CAMERA FILTER EXP(SEC)/RANGE SCAN',' ')
MSG = ' '
WRITE (MSG(2:8), '(I7)') ABLE(2)
WRITE (MSG(12:12),'(I1)') ABLE(19)
WRITE (MSG(36:43),'(F8.3)') EXP/1000.
WRITE (MSG(53:54),'(I2)') ABLE(5)
write (msg(18:19),'(A2)') cam(able(7))
write (msg(27:29),'(A3)') flt(3-able(7),able(4)+1)
write (msg(44:44),'(A1)') virgule
IF (IRANGE .NE. 'C') THEN
write (msg(45:48),'(A4)') irange
ELSE
write (msg(45:48),'(A4)') ISECCHR
END IF
call xvmessage (msg,' ')
c
call xqini(parb,500,xabort)
call xqintg(parb,'SC',1,able(19),xadd,ist)
call xqintg(parb,'SCAN',1,able(5),xadd,ist)
call xqstr(parb,'CAMERA',1,cam(able(7)),xadd,ist)
call xqstr(parb,'FILTER',1,flt(3-able(7),able(4)+1),xadd,ist)
call xqintg(parb,'FDS',1,able(2),xadd,ist)
IF (IRANGE .NE. 'C') THEN
call xqstr(parb,'EXPRNG',1,irange,xadd,ist)
ELSE
call xqstr(parb,'EXPRNG',1,ISECCHR,xadd,ist)
END IF
call xvqout(parb,ist)
call xvclose(unit,ist,' ')
return
end
|
{"hexsha": "8de21e955bc1c7746814c4073baacee9f9d22146", "size": 3044, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "vos/p2/prog/camparam/camparam.f", "max_stars_repo_name": "NASA-AMMOS/VICAR", "max_stars_repo_head_hexsha": "4504c1f558855d9c6eaef89f4460217aa4909f8e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2020-10-21T05:56:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T10:02:01.000Z", "max_issues_repo_path": "vos/p2/prog/camparam/camparam.f", "max_issues_repo_name": "NASA-AMMOS/VICAR", "max_issues_repo_head_hexsha": "4504c1f558855d9c6eaef89f4460217aa4909f8e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vos/p2/prog/camparam/camparam.f", "max_forks_repo_name": "NASA-AMMOS/VICAR", "max_forks_repo_head_hexsha": "4504c1f558855d9c6eaef89f4460217aa4909f8e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-03-09T01:51:08.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-23T00:23:24.000Z", "avg_line_length": 28.9904761905, "max_line_length": 69, "alphanum_fraction": 0.5180683311, "num_tokens": 1110}
|
using Dates: Hour, Minute, Second, Millisecond, days, hour, minute, second, millisecond
"""
timezone(::ZonedDateTime) -> TimeZone
Returns the `TimeZone` used by the `ZonedDateTime`.
"""
timezone(zdt::ZonedDateTime) = zdt.timezone
Dates.days(zdt::ZonedDateTime) = days(DateTime(zdt))
for period in (:Hour, :Minute, :Second, :Millisecond)
accessor = Symbol(lowercase(string(period)))
@eval begin
Dates.$accessor(zdt::ZonedDateTime) = $accessor(DateTime(zdt))
Dates.$period(zdt::ZonedDateTime) = $period($accessor(zdt))
end
end
Base.eps(::ZonedDateTime) = Millisecond(1)
|
{"hexsha": "edf36494ab85a4970d4b961c4b1cf64a612252cd", "size": 605, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/accessors.jl", "max_stars_repo_name": "NHDaly/TimeZones.jl", "max_stars_repo_head_hexsha": "71178fefd23a8ad00f43aacfcde74720f1abfd07", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 49, "max_stars_repo_stars_event_min_datetime": "2017-01-18T02:16:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T22:13:43.000Z", "max_issues_repo_path": "src/accessors.jl", "max_issues_repo_name": "NHDaly/TimeZones.jl", "max_issues_repo_head_hexsha": "71178fefd23a8ad00f43aacfcde74720f1abfd07", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 321, "max_issues_repo_issues_event_min_datetime": "2016-06-16T13:56:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-14T20:59:53.000Z", "max_forks_repo_path": "src/accessors.jl", "max_forks_repo_name": "NHDaly/TimeZones.jl", "max_forks_repo_head_hexsha": "71178fefd23a8ad00f43aacfcde74720f1abfd07", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 52, "max_forks_repo_forks_event_min_datetime": "2016-06-21T00:42:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-21T18:24:31.000Z", "avg_line_length": 28.8095238095, "max_line_length": 87, "alphanum_fraction": 0.7008264463, "num_tokens": 157}
|
"""
Tests of neo.io.axonaio
"""
import unittest
from neo.io.axonaio import AxonaIO
from neo.test.iotest.common_io_test import BaseTestIO
from neo.io.proxyobjects import (AnalogSignalProxy,
SpikeTrainProxy, EventProxy, EpochProxy)
from neo import (AnalogSignal, SpikeTrain)
import quantities as pq
import numpy as np
class TestAxonaIO(BaseTestIO, unittest.TestCase, ):
ioclass = AxonaIO
entities_to_download = [
'axona'
]
entities_to_test = [
'axona/axona_raw.set'
]
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "cea32570871534de50c0c0e762dc23ba74a7d7c2", "size": 572, "ext": "py", "lang": "Python", "max_stars_repo_path": "neo/test/iotest/test_axonaio.py", "max_stars_repo_name": "teogale/python-neo", "max_stars_repo_head_hexsha": "cd4226ddcfbace080c4734f562f706423979f2dc", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "neo/test/iotest/test_axonaio.py", "max_issues_repo_name": "teogale/python-neo", "max_issues_repo_head_hexsha": "cd4226ddcfbace080c4734f562f706423979f2dc", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "neo/test/iotest/test_axonaio.py", "max_forks_repo_name": "teogale/python-neo", "max_forks_repo_head_hexsha": "cd4226ddcfbace080c4734f562f706423979f2dc", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.724137931, "max_line_length": 56, "alphanum_fraction": 0.7027972028, "include": true, "reason": "import numpy", "num_tokens": 147}
|
"""
Library Features:
Name: lib_snowblending_generic
Author(s): Francesco Avanzi (francesco.avanzi@cimafoundation.org), Fabio Delogu (fabio.delogu@cimafoundation.org)
Date: '20210525'
Version: '1.0.0'
"""
#######################################################################################
# Library
import logging
import os
import re
import numpy as np
from shutil import rmtree
from random import randint
from copy import deepcopy
from datetime import datetime
#######################################################################################
# -------------------------------------------------------------------------------------
# Method to search root path
def get_root_path(generic_path):
string_patterns = re.findall(r"\{([A-Za-z0-9_]+)\}", generic_path)
dict_patterns = {}
for string_pattern in string_patterns:
dict_patterns[string_pattern] = ''
root_path = generic_path.format(**dict_patterns)
return root_path
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to list sub-folders
def list_folder(main_path, reverse=True):
path_generator = os.walk(main_path)
path_list = []
for path_obj in path_generator:
path_string = path_obj[0]
path_list.append(path_string)
if reverse:
path_list.reverse()
return path_list
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to create a random string
def random_string(string_root='temporary', string_separetor='_', rand_min=0, rand_max=1000):
# Rand number
rand_n = str(randint(rand_min, rand_max))
# Rand time
rand_time = datetime.now().strftime('%Y%m%d-%H%M%S_%f')
# Rand string
rand_string = string_separetor.join([string_root, rand_time, rand_n])
return rand_string
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to delete folder (and check if folder exists)
def delete_folder(path_folder):
# Check folder status
if os.path.exists(path_folder):
# Remove folder (file only-read too)
rmtree(path_folder, ignore_errors=True)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to make folder
def make_folder(path_folder):
if not os.path.exists(path_folder):
os.makedirs(path_folder)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to add time in a unfilled string (path or filename)
def fill_tags2string(string_raw, tags_format=None, tags_filling=None):
apply_tags = False
if string_raw is not None:
for tag in list(tags_format.keys()):
if tag in string_raw:
apply_tags = True
break
if apply_tags:
tags_format_tmp = deepcopy(tags_format)
for tag_key, tag_value in tags_format.items():
tag_key_tmp = '{' + tag_key + '}'
if tag_value is not None:
if tag_key_tmp in string_raw:
string_filled = string_raw.replace(tag_key_tmp, tag_value)
string_raw = string_filled
else:
tags_format_tmp.pop(tag_key, None)
for tag_format_name, tag_format_value in list(tags_format_tmp.items()):
if tag_format_name in list(tags_filling.keys()):
tag_filling_value = tags_filling[tag_format_name]
if tag_filling_value is not None:
if isinstance(tag_filling_value, datetime):
tag_filling_value = tag_filling_value.strftime(tag_format_value)
if isinstance(tag_filling_value, (float, int)):
tag_filling_value = tag_format_value.format(tag_filling_value)
string_filled = string_filled.replace(tag_format_value, tag_filling_value)
string_filled = string_filled.replace('//', '/')
return string_filled
else:
return string_raw
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to get dictionary values using a key
def get_dict_values(d, key, value=[]):
for k, v in iter(d.items()):
if isinstance(v, dict):
if k == key:
for kk, vv in iter(v.items()):
temp = [kk, vv]
value.append(temp)
else:
vf = get_dict_values(v, key, value)
if isinstance(vf, list):
if vf:
vf_end = vf[0]
else:
vf_end = None
elif isinstance(vf, np.ndarray):
vf_end = vf.tolist()
else:
vf_end = vf
if vf_end not in value:
if vf_end:
if isinstance(value, list):
value.append(vf_end)
elif isinstance(value, str):
value = [value, vf_end]
else:
pass
else:
pass
else:
if k == key:
if isinstance(v, np.ndarray):
value = v
else:
value = v
return value
# -------------------------------------------------------------------------------------
|
{"hexsha": "7c8bcff54c1487a239398e23bd6cf108eef55e9b", "size": 6035, "ext": "py", "lang": "Python", "max_stars_repo_path": "apps/satellite/blending/snow/lib_snowblending_generic.py", "max_stars_repo_name": "c-hydro/hyde", "max_stars_repo_head_hexsha": "3a3ff92d442077ce353b071d5afe726fc5465201", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "apps/satellite/blending/snow/lib_snowblending_generic.py", "max_issues_repo_name": "c-hydro/hyde", "max_issues_repo_head_hexsha": "3a3ff92d442077ce353b071d5afe726fc5465201", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2020-04-07T16:34:59.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-02T07:32:39.000Z", "max_forks_repo_path": "apps/satellite/blending/snow/lib_snowblending_generic.py", "max_forks_repo_name": "c-hydro/fp-hyde", "max_forks_repo_head_hexsha": "b0728397522aceebec3e7ff115aff160a10efede", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.7989130435, "max_line_length": 117, "alphanum_fraction": 0.4422535211, "include": true, "reason": "import numpy", "num_tokens": 1019}
|
{-# OPTIONS --cubical --no-import-sorts --safe #-}
module Cubical.Relation.Binary.Base where
open import Cubical.Core.Everything
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.HLevels
open import Cubical.Data.Sigma
open import Cubical.HITs.SetQuotients.Base
open import Cubical.HITs.PropositionalTruncation.Base
Rel : ∀ {ℓ} (A B : Type ℓ) (ℓ' : Level) → Type (ℓ-max ℓ (ℓ-suc ℓ'))
Rel A B ℓ' = A → B → Type ℓ'
PropRel : ∀ {ℓ} (A B : Type ℓ) (ℓ' : Level) → Type (ℓ-max ℓ (ℓ-suc ℓ'))
PropRel A B ℓ' = Σ[ R ∈ Rel A B ℓ' ] ∀ a b → isProp (R a b)
idPropRel : ∀ {ℓ} (A : Type ℓ) → PropRel A A ℓ
idPropRel A .fst a a' = ∥ a ≡ a' ∥
idPropRel A .snd _ _ = squash
invPropRel : ∀ {ℓ ℓ'} {A B : Type ℓ}
→ PropRel A B ℓ' → PropRel B A ℓ'
invPropRel R .fst b a = R .fst a b
invPropRel R .snd b a = R .snd a b
compPropRel : ∀ {ℓ ℓ' ℓ''} {A B C : Type ℓ}
→ PropRel A B ℓ' → PropRel B C ℓ'' → PropRel A C (ℓ-max ℓ (ℓ-max ℓ' ℓ''))
compPropRel R S .fst a c = ∥ Σ[ b ∈ _ ] (R .fst a b × S .fst b c) ∥
compPropRel R S .snd _ _ = squash
graphRel : ∀ {ℓ} {A B : Type ℓ} → (A → B) → Rel A B ℓ
graphRel f a b = f a ≡ b
module BinaryRelation {ℓ ℓ' : Level} {A : Type ℓ} (R : Rel A A ℓ') where
isRefl : Type (ℓ-max ℓ ℓ')
isRefl = (a : A) → R a a
isSym : Type (ℓ-max ℓ ℓ')
isSym = (a b : A) → R a b → R b a
isTrans : Type (ℓ-max ℓ ℓ')
isTrans = (a b c : A) → R a b → R b c → R a c
record isEquivRel : Type (ℓ-max ℓ ℓ') where
constructor equivRel
field
reflexive : isRefl
symmetric : isSym
transitive : isTrans
isPropValued : Type (ℓ-max ℓ ℓ')
isPropValued = (a b : A) → isProp (R a b)
isEffective : Type (ℓ-max ℓ ℓ')
isEffective =
(a b : A) → isEquiv (eq/ {R = R} a b)
EquivRel : ∀ {ℓ} (A : Type ℓ) (ℓ' : Level) → Type (ℓ-max ℓ (ℓ-suc ℓ'))
EquivRel A ℓ' = Σ[ R ∈ Rel A A ℓ' ] BinaryRelation.isEquivRel R
EquivPropRel : ∀ {ℓ} (A : Type ℓ) (ℓ' : Level) → Type (ℓ-max ℓ (ℓ-suc ℓ'))
EquivPropRel A ℓ' = Σ[ R ∈ PropRel A A ℓ' ] BinaryRelation.isEquivRel (R .fst)
|
{"hexsha": "be51c02b7d27169d2918a6db3250cfc876bc93e6", "size": 2028, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "Cubical/Relation/Binary/Base.agda", "max_stars_repo_name": "apabepa10/cubical", "max_stars_repo_head_hexsha": "3a9bb56260c25a6f2e9c20af8d278de0fe8d9e05", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Cubical/Relation/Binary/Base.agda", "max_issues_repo_name": "apabepa10/cubical", "max_issues_repo_head_hexsha": "3a9bb56260c25a6f2e9c20af8d278de0fe8d9e05", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-27T02:07:48.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-27T02:07:48.000Z", "max_forks_repo_path": "Cubical/Relation/Binary/Base.agda", "max_forks_repo_name": "apabepa10/cubical", "max_forks_repo_head_hexsha": "3a9bb56260c25a6f2e9c20af8d278de0fe8d9e05", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2, "max_line_length": 78, "alphanum_fraction": 0.5892504931, "num_tokens": 879}
|
"""
Example usage:
$ python issmile.py --show %userprofile%\scikit_learn_data\lfw_home\lfw_funneled\Arnold_Schwarzenegger\Arnold_Schwarzenegger_0006.jpg
$ python issmile.py %userprofile%\scikit_learn_data\lfw_home\lfw_funneled\Yoko_Ono\Yoko_Ono_0003.jpg
"""
import argparse
import numpy as np
from keras.models import load_model
from utils import vectorizeimg
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('image_path', help='path to the image you want to classify')
parser.add_argument('--show', help='if present, show the image', action='store_true')
parser.add_argument('--train', help='if present, every other parameter is ignored and training starts', action='store_true')
args = parser.parse_args()
if args.train:
import trainer
print('Training finished.')
exit(0)
try:
img = vectorizeimg(args.image_path) / 255
except FileNotFoundError:
print('The given image "%s" is not found' % args.image_path)
exit(1)
if args.show:
from matplotlib import pyplot as plt
plt.imshow(img)
plt.show()
try:
model = load_model('./trained/cnn.h5')
except FileNotFoundError:
print('Before usage you must download the trained models from github or train the model yourself')
exit(0)
input_data = np.array([img])
prediction = model.predict(input_data)
prediction = np.argmax(prediction, axis=1)
print('This is NOT a smile.' if prediction == 0 else 'This is a smile.')
|
{"hexsha": "bd46f1d024e381ee7034d6b72656693173aa1448", "size": 1556, "ext": "py", "lang": "Python", "max_stars_repo_path": "issmile.py", "max_stars_repo_name": "MartinKondor/IsRealSmile", "max_stars_repo_head_hexsha": "b45a57589b9ef8ccded5e475c81f4786a935c177", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "issmile.py", "max_issues_repo_name": "MartinKondor/IsRealSmile", "max_issues_repo_head_hexsha": "b45a57589b9ef8ccded5e475c81f4786a935c177", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-03-19T01:29:08.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-08T01:05:30.000Z", "max_forks_repo_path": "issmile.py", "max_forks_repo_name": "MartinKondor/IsRealSmile", "max_forks_repo_head_hexsha": "b45a57589b9ef8ccded5e475c81f4786a935c177", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4166666667, "max_line_length": 133, "alphanum_fraction": 0.6940874036, "include": true, "reason": "import numpy", "num_tokens": 375}
|
// [[Rcpp::depends(RcppArmadillo)]]
#define ARMA_DONT_PRINT_ERRORS
#include <iostream>
#include <fstream>
#include <cmath>
#include <armadillo>
#include <errno.h>
#include <RcppArmadillo.h>
//' Get observation location in 2D space
//'
//' @param time time to return observer position
//' @param strip_size size of strip in (x, y) dimensions
//' @param buffer buffer size
//' @param delta (dx, dt) vector
//' @param transect_type 0 = line, 1 = point
//' @param observer_speed speed of observer
//'
//' @return (x, y) location of observer at time t
// [[Rcpp::export]]
arma::vec GetObserverPosition(const double time,
const arma::vec strip_size,
const double buffer,
const arma::vec delta,
const int transect_type,
const double observer_speed) {
arma::vec pos(2);
pos(0) = 0.5 * strip_size(0);
if (transect_type == 1) {
// point transect assumed to be center of grid
pos(1) = 0.5 * strip_size(1);
} else {
pos(1) = observer_speed * time + buffer;
}
return pos;
}
//' Calculates the sparse transition rate matrix
//'
//' @param num_cells vector with number of cells in (total space, x-direction,
//' y-direction)
//' @param sd vector of diffusive standard deviation for each behavioural state
//' @param dx grid cell size in the space (c.f. delta(0))
//'
//' @return sparse transition rate matrix
// [[Rcpp::export]]
arma::sp_mat CalcTrm(const arma::vec num_cells, const double sd, const double dx) {
arma::sp_mat tpr = arma::zeros<arma::sp_mat>(num_cells(0), num_cells(0));
double rate = sd * sd / (2 * dx * dx);
int s;
for (int i = 0; i < num_cells(1); ++i) {
for (int j = 0; j < num_cells(2); ++j) {
s = i + num_cells(1) * j;
if (i < num_cells(1) - 1) {
tpr(s, s + 1) = rate;
}
if (i > 0) {
tpr(s, s - 1) = rate;
}
if (j < num_cells(2) - 1) {
tpr(s, s + num_cells(1)) = rate;
}
if (j > 0) {
tpr(s, s - num_cells(1)) = rate;
}
tpr(s, s) = -4 * rate;
}
}
return tpr.t();
}
//' Diffuse probability distribution over space
//'
//' @description Calculate product of v with matrix exponential of a using
//' the Arnoldi process. Thereby diffusing the probability distribution
//' according to Brownian motion. Code is transcribed from Expokit package.
//'
//' @note NOTICE
//' Permission to use, copy, modify, and distribute EXPOKIT and its
//' supporting documentation for non-commercial purposes, is hereby
//' granted without fee, provided that this permission message and
//' copyright notice appear in all copies. Approval must be sought for
//' commercial purposes as testimony of its usage in applications.
//'
//' Neither the Institution (University of Queensland) nor the Author
//' make any representations about the suitability of this software for
//' any purpose. This software is provided ``as is'' without express or
//' implied warranty.
//'
//' The work resulting from EXPOKIT has been published in ACM-Transactions
//' on Mathematical Software, 24(1):130-156, 1998.
//'
//' The bibtex record of the citation:
//'
//' ARTICLE{EXPOKIT,
//' AUTHOR = {Sidje, R. B.},
//' TITLE = {{Expokit.} {A} Software Package for
//' Computing Matrix Exponentials},
//' JOURNAL = {ACM Trans. Math. Softw.},
//' VOLUME = {24},
//' NUMBER = {1},
//' PAGES = {130-156}
//' YEAR = {1998}
//' }
//'
//' Certain elements of the current software may include inadequacies
//' that may be corrected at any time, as they are discovered. The Web
//' always contains the latest updates.
//'
//' Original Author:
//' Roger B. Sidje <rbs@maths.uq.edu.au>
//' Department of Mathematics, University of Queensland
//' Brisbane, QLD-4072, Australia, (c) 1996-2006 All Rights Reserved
//'
//' @param a transition rate matrix
//' @param v vector to be multiplied
//' @param t time to diffuse over
//' @param num_cells vector with number of cells in (total space, x-direction,
//' y-direction)
//' @param krylov_dim dimension of the approximating Krylov space
//' @param tol tolerance in error
//'
//' @return diffused probability distribution
// [[Rcpp::export]]
arma::rowvec Diffuse(const arma::sp_mat a,
const arma::rowvec v,
const double t,
const arma::vec num_cells,
const int& krylov_dim = 30,
const double& tol = 1e-10) {
double m = fmin(a.n_rows, krylov_dim);
double anorm = norm(a, "Inf");
double mxrej = 10;
double mx;
double btol = 1e-7;
double gamma = 0.9;
double mb = m;
int nstep = 0;
double t_now = 0;
double t_step;
double delta = 1.2;
double t_out = fabs(t);
double s_error = 0;
double rndoff = anorm * 1e-16;
int k1 = 1;
double xm = 1 / m;
double normv = norm(v);
double avnorm;
double beta = normv;
double fact = std::pow((m + 1) / std::exp(1), m + 1) * std::sqrt(2 * M_PI * (m + 1));
double t_new = (1.0 / anorm) * std::pow((fact * tol) / (4 * beta * anorm), xm);
double s = std::pow(10, std::floor(std::log10(t_new)) - 1);
t_new = std::ceil(t_new / s) * s;
double sgn = t > 0 ? 1 : -1;
int ireject;
double err_loc;
double phi1;
double phi2;
arma::vec w = v.t();
double hump = normv;
arma::mat vmat = arma::zeros<arma::mat>(a.n_rows, m + 1);
arma::mat hmat = arma::zeros<arma::mat>(m + 2, m + 2);
arma::mat fmat;
arma::vec p;
while (t_now < t_out) {
Rcpp::checkUserInterrupt();
++nstep;
t_step = fmin(t_out - t_now, t_new);
vmat.zeros();
hmat.zeros();
vmat.col(0) = (1 / beta) * w;
for (int j = 0; j < m; ++j) {
p = a * vmat.col(j);
for (int i = 0; i <= j; ++i) {
hmat(i, j) = dot(vmat.col(i), p);
p -= hmat(i, j) * vmat.col(i);
}
s = norm(p);
if (s < btol) {
k1 = 0;
mb = j;
t_step = t_out - t_now;
break;
}
hmat(j + 1, j) = s;
vmat.col(j + 1) = (1 / s) * p;
}
if (k1 != 0) {
hmat(m + 1, m) = 1;
avnorm = norm(a * vmat.col(m));
}
ireject = 0;
while (ireject <= mxrej) {
mx = mb + k1;
fmat = expmat(sgn * t_step * hmat.submat(0, 0, mx, mx));
if (k1 == 0) {
err_loc = btol;
break;
}
else {
phi1 = fabs(beta * fmat(m, 0));
phi2 = fabs(beta * fmat(m + 1, 0) * avnorm);
if (phi1 > 10 * phi2) {
err_loc = phi2;
xm = 1 / m;
}
else if (phi1 > phi2) {
err_loc = (phi1 * phi2) / (phi1 - phi2);
xm = 1 / m;
}
else {
err_loc = phi1;
xm = 1 / (m - 1);
}
}
if (err_loc <= delta * t_step * tol) break;
else {
t_step = gamma * t_step * std::pow(t_step * tol / err_loc, xm);
s = std::pow(10, std::floor(std::log10(t_step)) - 1);
t_step = std::ceil(t_step / s) * s;
if (ireject == mxrej) {
Rcpp::Rcout << "error: requested tolerance too high for Krylov approximation" << std::endl;
}
++ireject;
}
}
mx = mb + fmax(0, k1 - 1);
w = vmat.cols(0, mx) * beta * fmat.col(0).rows(0, mx);
beta = norm(w);
hump = fmax(hump, beta);
t_now = t_now + t_step;
t_new = gamma * t_step * std::pow(t_step * tol / err_loc, xm);
s = std::pow(10, std::floor(std::log10(t_new) - 1));
t_new = std::ceil(t_new / s) * s;
err_loc = fmax(err_loc, rndoff);
s_error += err_loc;
}
double err = s_error;
hump = hump / normv;
return w.t();
}
//' Calculates the initial distribution of animal locations.
//' Assumes uniform distribution relative to transect.
//'
//' @param num_cells vector with number of cells in (total space, x-direction,
//' y-direction)
//' @param delta spatial and temporal increments (dx, dt)
//' @param region_size size of survey region in (x,y) extents
//'
//' @return Row vector with i^th entry probability animal in i^th grid cell initially
// [[Rcpp::export]]
arma::rowvec CalcInitialDistribution(const arma::vec num_cells,
const arma::vec delta,
const arma::vec region_size) {
arma::rowvec initial_phi = arma::ones<arma::rowvec>(num_cells(0));
initial_phi *= delta(0) * delta(0);
initial_phi /= prod(region_size);
return(initial_phi);
}
//' Transform working parameters (for the optimiser) to natural parameters
//' @param working_parameter working parameters
//' @param hzfn hazard function type
//' @return natural parameters
// [[Rcpp::export]]
arma::vec Working2Natural(arma::vec working_parameter, int hzfn = 1) {
arma::vec parameter = arma::exp(working_parameter);
//parameter(1) += 2;
return parameter;
}
//' Transform natural parameters to unconstrained working parameters
//' @param parameter natural parameters
//' @param hzfn hazard function type
//' @return working parameters
// [[Rcpp::export]]
arma::vec Natural2Working(arma::vec parameter, int hzfn = 1) {
arma::vec working_parameter(parameter);
//working_parameter(1) -= 2;
working_parameter = arma::log(working_parameter);
return working_parameter;
}
//' Calculates hazard of detection
//'
//' @param x relative x coordinate
//' @param y relative y coordinate
//' @param dt time increment
//' @param observer_speed speed of the observer
//' @param parameter vector of (detection shape, detection scale)
//' @param type transect type (0 = line, 1 = point)
//' @param hzfn hazard function code (see ?hazardfns)
//'
//' @return hazard of detection
// [[Rcpp::export]]
double CalcHazard(const double x,
const double y,
const double dt,
const double observer_speed,
const arma::vec parameter,
const int type,
const int hzfn) {
double hazard = 0;
double r0, r1, abeta, abeta2, y1;
double s, sx, sy, d, c, k;
switch(hzfn) {
case 0:
// Hayes and Buckland isotropic h(r) = (r/s)^(-2)
// parameter = (s, d)
s = parameter(0);
d = 1;
c = pow(s, d);
r0 = x * x + y * y;
if (type == 1) {
hazard = dt * c / pow(r0, 0.5 * d);
}
else {
// assume cannot detect behind observer
if (y < 0) return 0;
abeta = 0.5 * (d - 1.0);
y1 = y - observer_speed * dt;
if (y1 < 0) y1 = 0;
r1 = x * x + y1 * y1;
if (r1 < 1e-10) return arma::datum::inf;
if (fabs(x) < 1e-10) {
if (fabs(d - 1) < 1e-10) {
hazard = log(sqrt(r1)) - log(sqrt(r0));
hazard *= c;
} else {
hazard = 1.0 / pow(r1, abeta) - 1.0 / pow(r0, abeta);
hazard *= c / (d - 1.0);
}
} else {
hazard = R::pbeta(x * x / r1, abeta, 0.5, 1, 0) - R::pbeta(x * x / r0,
abeta, 0.5, 1, 0);
hazard *= R::beta(abeta, 0.5) * c / (2.0 * pow(fabs(x),
d - 1.0));
}
}
return hazard;
break;
case 1:
// Hayes and Buckland isotropic h(r) = (r/s)^(-d)
// parameter = (s, d)
s = parameter(0);
d = 1 + parameter(1);
c = pow(s, d);
r0 = x * x + y * y;
if (type == 1) {
hazard = dt * c / pow(r0, 0.5 * d);
}
else {
// assume cannot detect behind observer
if (y < 0) return 0;
abeta = 0.5 * (d - 1.0);
y1 = y - observer_speed * dt;
if (y1 < 0) y1 = 0;
r1 = x * x + y1 * y1;
if (r1 < 1e-10) return arma::datum::inf;
if (fabs(x) < 1e-10) {
if (fabs(d - 1) < 1e-10) {
hazard = log(sqrt(r0)) - log(sqrt(r1));
hazard *= c;
} else {
hazard = 1.0 / pow(r1, abeta) - 1.0 / pow(r0, abeta);
hazard *= c / (d - 1.0);
}
} else {
hazard = R::pbeta(x * x / r1, abeta, 0.5, 1, 0) - R::pbeta(x * x / r0,
abeta, 0.5, 1, 0);
hazard *= R::beta(abeta, 0.5) * c / (2.0 * pow(fabs(x),
d - 1.0));
}
}
return hazard;
break;
case 2:
// Hayes and Buckland anisotropic h(r) = (x^2/sx^2 + y^2/sy^2)^(-d/2)
// parameter = (sx, sy, d)
sx = parameter(0);
sy = parameter(1);
d = 1 + parameter(2);
r0 = (x * x) / (sx * sx) + (y * y) / (sy * sy);
if (type == 1) {
hazard = dt * pow(r0, -0.5 * d);
}
else {
// assume cannot detect behind observer
if (y < 0) return 0;
abeta = 0.5 * (d - 1.0);
y1 = y - observer_speed * dt;
if (y1 < 0) y1 = 0;
r1 = (x * x) / (sx * sx) + (y1 * y1) / (sy * sy);
if (r1 < 1e-10) return arma::datum::inf;
if (fabs(x) < 1e-10) {
if (fabs(d - 1) < 1e-10) {
hazard = log(sqrt(y)) - log(sqrt(y1));
hazard *= pow(sy, d);
} else {
hazard = 1.0 / pow(y1, abeta) - 1.0 / pow(y, abeta);
hazard *= pow(sy, d) / (d - 1.0);
}
} else {
hazard = R::pbeta(x * x / (sx * sx * r1), abeta, 0.5, 1, 0) - R::pbeta(x * x / (sx * sx * r0),
abeta, 0.5, 1, 0);
hazard *= R::beta(abeta, 0.5) * pow(sx, d - 1) * sy / (2.0 * pow(fabs(x),
d - 1.0));
}
}
return hazard;
break;
case 3:
// Hayes and Buckland shape-anisotropic h(r) = (x^2+(y+k)^2)^(-d/2)
// parameter = (s, d, k)
s = parameter(0);
d = 1 + parameter(1);
k = parameter(2);
c = pow(s, d);
r0 = x * x / (s * s) + (y / s + k) * (y / s + k);
if (type == 1) {
hazard = dt * c * (1 + k * y / sqrt(r0)) / pow(r0, 0.5 * d);
}
else {
// assume cannot detect behind observer
if (y < 0) return 0;
abeta = 0.5 * (d - 1.0);
y1 = y - observer_speed * dt;
if (y1 < 0) y1 = 0;
r1 = x * x / (s * s) + (y1 / s + k) * (y1 / s + k);
if (r1 < 1e-10) return arma::datum::inf;
if (fabs(x) < 1e-10) {
if (fabs(d - 1) < 1e-10) {
hazard = log(sqrt(y + s * k)) - log(sqrt(y1 + s * k));
hazard *= c;
} else {
hazard = 1.0 / pow(y1 + s * k, abeta) - 1.0 / pow(y + s * k, abeta);
hazard *= c / (d - 1.0);
}
} else {
hazard = R::pbeta(x * x / (s * s * r1), abeta, 0.5, 1, 0) - R::pbeta(x * x / (s * s * r0),
abeta, 0.5, 1, 0);
hazard *= R::beta(abeta, 0.5) * c / (2.0 * pow(fabs(x), d - 1.0));
}
}
return hazard;
break;
case 4:
// Hayes and Buckland anisotropic h(r) = (x^2/sx^2 + (y/sy + k)^2)^(-d/2)
// parameter = (sx, sy, d, k)
sx = parameter(0);
sy = parameter(1);
d = 1 + parameter(2);
k = parameter(3);
r0 = (x * x) / (sx * sx) + pow(y / sy + k, 2.0);
if (type == 1) {
hazard = dt * pow(r0, -0.5 * d);
}
else {
// assume cannot detect behind observer
if (y < 0) return 0;
abeta = 0.5 * (d - 1.0);
y1 = y - observer_speed * dt;
if (y1 < 0) y1 = 0;
r1 = (x * x) / (sx * sx) + pow(y1 / sy + k, 2.0);
if (r1 < 1e-10) return arma::datum::inf;
if (fabs(x) < 1e-10) {
if (fabs(d - 1) < 1e-10) {
hazard = log(sqrt(y)) - log(sqrt(y1));
hazard *= pow(sy, d);
} else {
hazard = 1.0 / pow(y1, abeta) - 1.0 / pow(y, abeta);
hazard *= pow(sy, d) / (d - 1.0);
}
} else {
hazard = R::pbeta(x * x / (sx * sx * r1), abeta, 0.5, 1, 0) - R::pbeta(x * x / (sx * sx * r0),
abeta, 0.5, 1, 0);
hazard *= R::beta(abeta, 0.5) * pow(sx, d - 1) * sy / (2.0 * pow(fabs(x),
d - 1.0));
}
}
return hazard;
break;
default:
Rcpp::Rcout << "error: no hazard specified." << std::endl;
return -arma::datum::inf;
}
}
//' Computes the probability of survival for each spatial location
//'
//' @param t time step
//' @param parameter (scale, shape, diffusion) parameter
//' @param num_cells number of cells in (x, y, all) dimensions
//' @param delta (dx, dt) vector
//' @param strip_size size of strip in (x, y) dimensions
//' @param buffer buffer size
//' @param observer_speed speed of the observer
//' @param type transect type
//' @param hzfn hazard function code
//' @param nint not used
//'
//' @return row vector of survival probabilities over space
// [[Rcpp::export]]
arma::rowvec CalcSurvivalPr(const int t,
const arma::vec parameter,
const arma::vec num_cells,
const arma::vec delta,
const arma::vec strip_size,
const double buffer,
const double observer_speed,
const int type,
const int hzfn,
const int nint = 4) {
arma::rowvec pr_survive = arma::ones<arma::rowvec>(num_cells(0));
arma::vec observer_position(GetObserverPosition(t * delta(1), strip_size, buffer, delta, type,
observer_speed));
double x, ix;
double y, iy;
int s;
int ymin = 0;
if (type == 0) ymin = floor(observer_position(1) / delta(0));
for (int x_cell = 0; x_cell < num_cells(1); ++x_cell) {
for (int y_cell = ymin; y_cell < num_cells(2); ++y_cell) {
s = x_cell + num_cells(1) * y_cell;
pr_survive(s) = 0;
x = x_cell * delta(0) - observer_position(0);
y = y_cell * delta(0) - observer_position(1);
for (int i = 0; i < nint; ++i) {
//for (int j = 0; j < nint; ++j) {
ix = x + (i * delta(0)) / nint;
//iy = y + (j * delta(0)) / nint;
pr_survive(s) += CalcHazard(ix, y, delta(1), observer_speed, parameter, type, hzfn);
// }
}
//pr_survive(s) /= 1.0*nint*nint;
pr_survive(s) /= 1.0*nint;
pr_survive(s) = exp(-pr_survive(s));
}
}
return pr_survive;
}
//' Thins probability distribution by the proportion detected in each grid cell
//'
//' @param t time step
//' @param pr probability distribution over finite grid
//' @param parameter (scale, shape, diffusion) parameter
//' @param num_cells number of cells in (x, y, all) dimensions
//' @param delta (dx, dt) vector
//' @param strip_size size of strip in (x, y) dimensions
//' @param buffer buffer width
//' @param observer_speed speed of the observer
//' @param type transect type
//' @param hzfn hazard function code
//'
//' @return thinned probability distribution
// [[Rcpp::export]]
arma::rowvec Detect(const int t,
const arma::rowvec pr,
const arma::vec parameter,
const arma::vec num_cells,
const arma::vec delta,
const arma::vec strip_size,
const double buffer,
const double observer_speed,
const int type,
const int hzfn) {
arma::rowvec pr_survive = CalcSurvivalPr(t, parameter, num_cells, delta,
strip_size, buffer, observer_speed, type, hzfn);
pr_survive %= pr;
return(pr_survive);
}
//' Compute hazard of each detection within time-step
//'
//' @param data (x, y, t) data matrix
//' @param dt time step
//' @param transdat transect data matrix
//' @param parameter (scale, shape, diffusion) parameters
//' @param observer_speed speed of observer
//' @param type 1 = point, 0 = line transect
//' @param hzfn hazard function code (see ?hazardfns)
//' @return PDF for within-timestep detection
// [[Rcpp::export]]
double CalcHazardDetected(const arma::mat data,
double dt,
arma::mat transdat,
arma::vec parameter,
double observer_speed,
int type,
const int hzfn) {
arma::vec r2, cosang;
arma::vec t_remaining = data.col(4) - floor((data.col(4)) / dt) * dt;
double log_hazard = 0;
for (int i = 0; i < data.n_rows; ++i) {
log_hazard -= CalcHazard(data(i, 2), data(i, 3) + t_remaining(i) *
observer_speed, t_remaining(i), observer_speed, parameter, type, hzfn);
}
double s, sx, sy, d, c, k;
switch(hzfn) {
case 0:
s = parameter(0);
d = 1;
c = pow(s, d);
r2 = sum(data.cols(2, 3) % data.cols(2, 3), 1);
log_hazard += arma::accu(log(c) - 0.5 * d * log(r2));
break;
case 1:
s = parameter(0);
d = 1 + parameter(1);
c = pow(s, d);
r2 = sum(data.cols(2, 3) % data.cols(2, 3), 1);
log_hazard += arma::accu(log(c) - 0.5 * d * log(r2));
break;
case 2:
sx = parameter(0);
sy = parameter(1);
d = 1 + parameter(2);
log_hazard += -0.5 * d * arma::accu(log(data.col(2) % data.col(2) / (sx * sx) + data.col(3) % data.col(3) / (sy * sy)));
break;
case 3:
s = parameter(0);
d = 1 + parameter(1);
k = parameter(2);
r2 = data.col(2) % data.col(2) / (s * s) + pow(data.col(3) / s + k, 2.0);
log_hazard += arma::accu(- 0.5 * d * log(r2));
break;
case 4:
sx = parameter(0);
sy = parameter(1);
d = 1 + parameter(2);
k = parameter(3);
r2 = data.col(2) % data.col(2) / (sx * sx) + pow(data.col(3) / sy + k, 2.0);
log_hazard += arma::accu(- 0.5 * d * log(r2));
break;
default:
Rcpp::Rcout << "error: no hazard specified." << std::endl;
return -arma::datum::inf;
}
return log_hazard;
}
//' Calculates movement model log-likelihood
//'
//' @param sd diffusion
//' @param data Rcpp List where each component represent an individual path
//' and continas a matrix where each row is an observed location (x,y,t)
//'
//' @return log-likelihood
// [[Rcpp::export]]
double CalcMovementLogLikelihood(const double sd, const Rcpp::List data) {
double log_likelihood = 0;
arma::vec xdiff, ydiff;
int ntags = data.size();
arma::mat tag;
for (int i = 0; i < ntags; ++i) {
tag = Rcpp::as<arma::mat>(data(i));
arma::vec xdiff = arma::diff(tag.col(0));
arma::vec ydiff = arma::diff(tag.col(1));
arma::vec tdiff = arma::diff(tag.col(2));
log_likelihood -= tag.n_rows * log(2 * M_PI * sd * sd);
log_likelihood -= accu(log(tdiff) + (xdiff % xdiff + ydiff % ydiff) / (2 * sd * sd * tdiff));
}
return log_likelihood;
}
//' Computes what grid cells are inside and outside transect
//'
//' @param num_cells number of cells in (total, x, y) direction
//' @param strip_size size of strip in (x,y) directions
//' @param dx grid cell size
//' @param w for lines, half-width, for points radius
//' @param ymax maximum forward distance for lines
//' @param buffer distance
//' @param type =0 for lines, =1 for points
//'
//' @return vector with 1 for each grid cell inside and 0 otherwise
// [[Rcpp::export]]
arma::rowvec InTransect(const arma::vec num_cells,
const arma::vec strip_size,
const double dx,
const double w,
const double ymax,
const double buffer,
const int type) {
arma::rowvec intrans = arma::zeros<arma::rowvec>(num_cells(0));
double x, y, r;
double top, bot, lenx, leny;
if (type == 0) {
for (int i = 0; i < num_cells(1); ++i) {
x = i * dx - strip_size(0) * 0.5;
top = fmin(x + dx, w);
bot = fmax(x, -w);
lenx = top - bot;
if (lenx < 0) lenx = 0;
for (int j = 0; j < num_cells(2); ++j) {
y = j * dx - buffer;
top = fmin(y + dx, ymax);
bot = fmax(y, 0);
leny = top - bot;
if (leny < 0) leny = 0;
intrans(i + j * num_cells(1)) = lenx * leny / (dx * dx);
}
}
} else {
for (int i = 0; i < num_cells(1); ++i) {
x = i * dx - strip_size(0) * 0.5;
for (int j = 0; j < num_cells(2); ++j) {
y = j * dx - strip_size(1) * 0.5;
r = x * x + y * y;
if (r <= w*w) intrans(i + j * num_cells(1)) = 1;
}
}
}
return intrans;
}
//' Computes negative log-likelihood of moveDs model
//'
//' @param working_parameter unconstrained version of parameter vector containing
//' (detection shape, detection scale, diffusion sd)
//' @param start start value for parameters on natural scale
//' @param data matrix with (trans id, grid cell,t) distance sampling survey data (assumed to be ordered by transect and time)
//' @param transdat matrix with (stripsize(1), numcells in y, totaltimestep, number of observations)
//' @param auxiliary_data vector containing (area x extent, area y extent, strip width, transect_type)
//' @param delta vector of (dx, dt) spacetime increments
//' @param num_cells number of cells in (total space, x-direction, y-direction)
//' @param T total time of survey for longest transect
//' @param ymax maximum length of a transect
//' @param buffer buffer distance
//' @param movement_data field object where each component represents an individual
//' path and contains a matrix where each row is an observed location (x,y,t)
//' @param fixed_sd if move_method = 2
//' @param hzfn hazard function code (see ?hazardfns)
//' @param move_method 0 = 2d CDS model, 1 = 2d MDS model (movement estimated),
//' 2 = 2d MDS model (movement fixed)
//' @param print if TRUE then print likelihood and parmeters after evaluation
//' @param con parameters are constrained to be between 1/con * start value and
//' con * start value
//'
//' @return negative log-likelihood
// [[Rcpp::export]]
double NegativeLogLikelihood(const arma::vec working_parameter,
const arma::vec start,
const arma::mat data,
const arma::mat transdat,
const arma::vec auxiliary_data,
const arma::vec delta,
const arma::vec num_cells,
const int T,
const double ymax,
const double buffer,
const Rcpp::List movement_data,
const double fixed_sd = 0,
const int hzfn = 1,
const int move_method = 1,
const bool print = false,
const double con = 100) {
// unpack auxiliary data
arma::vec region_size(auxiliary_data.rows(0, 1));
arma::vec strip_size(2);
strip_size(0) = 2 * auxiliary_data(2) + 2 * buffer;
strip_size(1) = ymax + 2 * buffer;
double observer_speed = auxiliary_data(3);
int num_transects = transdat.n_rows;
int transect_type = auxiliary_data(4);
double dx = delta(0);
double dt = delta(1);
// unpack parameters
arma::vec parameter = Working2Natural(working_parameter, hzfn);
int npar = parameter.n_elem;
// constraints
for (int p = 0; p < npar; ++p) {
if (parameter(p) > con * start(p)) return arma::datum::inf;
if (parameter(p) < start(p) / con) return arma::datum::inf;
}
double sd = 0;
if (move_method == 1) sd = parameter(npar - 1);
if (move_method == 2) sd = fixed_sd;
// setup variables
int curtrans = 0;
int curobs = 0;
double pr_survived;
double pr_outside;
double accu_hazard;
double pdet;
double llk = 0;
// calculate initial probability in each grid cell
arma::rowvec pr_t = CalcInitialDistribution(num_cells, delta, region_size);
arma::rowvec old_pr_t(pr_t);
// probability outside buffer region at t = 0
pr_outside = 1.0 - arma::prod(strip_size) / arma::prod(region_size);
// compute movement matrices for survey
arma::sp_mat trm;
arma::rowvec flux = arma::ones<arma::rowvec>(num_cells(0));
if (move_method > 0) {
trm = CalcTrm(num_cells, sd, dx);
flux = Diffuse(trm.t(), flux, dt, num_cells);
flux = 1.0 - flux;
}
double num_boundary_states = floor(prod(region_size) / (dx * dx)) - num_cells(0);
// intialise variables
double curt = floor((data(curobs, 4)) / dt);
if (curt < 0) curt = 0;
if ((curt > T - 1) & (curt < T + 1)) curt = T - 1;
int endtime = transdat(curtrans, 2);
arma::rowvec intrans = InTransect(num_cells, strip_size, dx, auxiliary_data(2), ymax, buffer, transect_type);
accu_hazard = 0;
pdet = 0;
double diff = 0;
// compute HMM approximation
for (int t = 0; t < T; ++t) {
Rcpp::checkUserInterrupt();
// add to pr_obs, the observations that occur during time interval t
while (t == curt) {
if (data(curobs, 1) > num_cells(0)) {
Rcpp::Rcout << "Warning: buffer region too small to include all detections." << std::endl;
} else {
llk += log(pr_t(data(curobs, 1))) + accu_hazard - log(dx * dx);
}
++curobs;
if (curobs > data.n_rows - 1) {
curt = T + 1;
} else {
curt = floor((data(curobs, 4)) / dt);
if (curt < 0) curt = 0;
if ((curt > T - 1) & (curt < T + 1)) curt = T - 1;
}
}
// thin pr_t by those that are detected
diff = 0;
diff += arma::accu(pr_t % intrans);
pr_t = Detect(t, pr_t, parameter, num_cells, delta, strip_size, buffer, observer_speed, transect_type, hzfn);
diff -= arma::accu(pr_t % intrans);
pdet += diff * exp(accu_hazard);
if (arma::accu(pr_t) < 1e-10) return(arma::datum::inf);
// move animals
if (move_method > 0) {
old_pr_t = pr_t;
//move animals that are inside strip
try {
pr_t = Diffuse(trm, pr_t, dt, num_cells);
} catch(...) {
return arma::datum::inf;
}
//move animals outside strip that come into strip
pr_t += flux * pr_outside / num_boundary_states;
//account for transversal of boundary
pr_outside += accu(old_pr_t % flux) - accu(flux) * pr_outside / num_boundary_states;
}
// add contribution to accu_hazard
pr_survived = accu(pr_t) + pr_outside;
accu_hazard += log(pr_survived);
// scale to avoid underflow
pr_t /= pr_survived;
pr_outside /= pr_survived;
// if transect ends divide by conditional probability
while (endtime == t) {
llk -= transdat(curtrans, 1) * log(pdet);
++curtrans;
if (curtrans > transdat.n_rows - 1) {
endtime = T + 1;
} else {
endtime = transdat(curtrans, 2);
}
}
}
// add hazard of detections
llk += CalcHazardDetected(data, dt, transdat, parameter, observer_speed, transect_type, hzfn);
double movement_log_likelihood = 0;
if (move_method == 1) movement_log_likelihood = CalcMovementLogLikelihood(sd, movement_data);
double negative_log_likelihood = -llk - movement_log_likelihood;
if (print) {
int old_precision = Rcpp::Rcout.precision();
Rcpp::Rcout.precision(4);
Rcpp::Rcout << -negative_log_likelihood << " ";
for (int par = 0; par < npar; ++par) Rcpp::Rcout << parameter(par) << " ";
Rcpp::Rcout << std::endl;
}
return negative_log_likelihood;
}
//' Computes covered area for entire survey
//'
//' @param working_parameter unconstrained version of parameter vector containing
//' (detection shape, detection scale, diffusion sd)
//' @param transdat matrix with (stripsize(1), numcells in y, totaltimestep, number of observations)
//' @param auxiliary_data vector containing (area x extent, area y extent, strip width, transect_type)
//' @param delta vector of (dx, dt) spacetime increments
//' @param num_cells number of cells in (total space, x-direction, y-direction)
//' @param T total time of survey for longest transect
//' @param ymax maximum length of a transect
//' @param buffer buffer distance
//' @param fixed_sd if move_method = 2
//' @param hzfn hazard function code (see ?hazardfns)
//' @param move_method 0 = 2d CDS model, 1 = 2d MDS model (movement estimated),
//' 2 = 2d MDS model (movement fixed)
//'
//' @return negative log-likelihood
//' @return covered area
//' unpack auxiliary data
// [[Rcpp::export]]
double GetPenc(const arma::vec working_parameter,
const arma::mat transdat,
const arma::vec auxiliary_data,
const arma::vec delta,
const arma::vec num_cells,
const int T,
const double ymax,
const double buffer,
const double fixed_sd,
const int hzfn,
int move_method) {
arma::vec region_size(auxiliary_data.rows(0, 1));
arma::vec strip_size(2);
strip_size(0) = 2 * auxiliary_data(2) + 2 * buffer;
strip_size(1) = ymax + 2 * buffer;
double observer_speed = auxiliary_data(3);
int num_transects = transdat.n_rows;
int transect_type = auxiliary_data(4);
double dx = delta(0);
double dt = delta(1);
// unpack parameters
arma::vec parameter = Working2Natural(working_parameter, hzfn);
int npar = parameter.n_elem;
double sd = 0;
if (move_method == 1) sd = parameter(npar - 1);
if (move_method == 2) sd = fixed_sd;
// setup variables
int curtrans = 0;
int curobs = 0;
double pr_survived;
double pr_outside;
double accu_hazard;
double pdet;
arma::vec penc(num_transects); penc.zeros();
// calculate initial probability in each grid cell
arma::rowvec pr_t = CalcInitialDistribution(num_cells, delta, region_size);
arma::rowvec old_pr_t(pr_t);
// probability outside buffer region at t = 0
pr_outside = 1.0 - arma::prod(strip_size) / arma::prod(region_size);
// compute movement matrices for survey
arma::sp_mat trm;
arma::rowvec flux = arma::ones<arma::rowvec>(num_cells(0));
if (move_method > 0) {
trm = CalcTrm(num_cells, sd, dx);
flux = Diffuse(trm.t(), flux, dt, num_cells);
flux = 1.0 - flux;
}
double num_boundary_states = floor(prod(region_size) / (dx * dx)) - num_cells(0);
// intialise variables
int endtime = transdat(curtrans, 2);
arma::rowvec intrans = InTransect(num_cells, strip_size, dx, auxiliary_data(2), ymax, buffer, transect_type);
accu_hazard = 0;
pdet = 0;
double diff;
// compute HMM approximation
for (int t = 0; t < T; ++t) {
Rcpp::checkUserInterrupt();
// thin pr_t by those that are detected
diff = 0;
diff += arma::accu(pr_t % intrans);
pr_t = Detect(t, pr_t, parameter, num_cells, delta, strip_size, buffer, observer_speed, transect_type, hzfn);
diff -= arma::accu(pr_t % intrans);
pdet += diff * exp(accu_hazard);
// move animals
if (move_method > 0) {
old_pr_t = pr_t;
// move animals that are inside strip
pr_t = Diffuse(trm, pr_t, dt, num_cells);
// move animals outside strip that come into strip
pr_t += flux * pr_outside / num_boundary_states;
// account for transversal of boundary
pr_outside += accu(old_pr_t % flux) - accu(flux) * pr_outside / num_boundary_states;
}
// add contribution to accu_hazard
pr_survived = accu(pr_t) + pr_outside;
accu_hazard += log(pr_survived);
// scale to avoid underflow
pr_t /= pr_survived;
pr_outside /= pr_survived;
// if transect ends divide by conditional probability
while (endtime == t) {
penc(curtrans) = pdet;
++curtrans;
if (curtrans > transdat.n_rows - 1) {
endtime = T + 1;
} else {
endtime = transdat(curtrans, 2);
}
}
}
return arma::accu(penc);
}
//' Computes PDF of observed detections for each (x,y) cell around the observer.
//'
//' @param working_parameter unconstrained version of parameter vector containing
//' (detection shape, detection scale, diffusion sd)
//' @param range to compute out to in x and y directions
//' @param transdat matrix with (stripsize(1), numcells in y, totaltimestep, number of observations)
//' @param auxiliary_data vector containing (area x extent, area y extent, strip width, transect_type)
//' @param delta vector of (dx, dt) spacetime increments
//' @param num_cells number of cells in (total space, x-direction, y-direction)
//' @param T total time of survey for longest transect
//' @param ymax maximum length of a transect
//' @param buffer buffer distance
//' @param fixed_sd if move_method = 2
//' @param hzfn hazard function code (see ?hazardfns)
//' @param move_method 0 = 2d CDS model, 1 = 2d MDS model (movement estimated),
//' 2 = 2d MDS model (movement fixed)
//'
//' @return matrix where (i,j) entry is cell i*dx perpendicular and j*dx forward of
//' observer
//' unpack auxiliary data
// [[Rcpp::export]]
arma::mat GetHist(const arma::vec working_parameter,
const arma::vec range,
const arma::mat transdat,
const arma::vec auxiliary_data,
const arma::vec delta,
const arma::vec num_cells,
const int T,
const double ymax,
const double buffer,
const double fixed_sd = 0,
const int hzfn = 1,
int move_method = 1) {
arma::vec region_size(auxiliary_data.rows(0, 1));
arma::vec strip_size(2);
strip_size(0) = 2 * auxiliary_data(2) + 2 * buffer;
strip_size(1) = ymax + 2 * buffer;
double observer_speed = auxiliary_data(3);
int num_transects = transdat.n_rows;
int transect_type = auxiliary_data(4);
double dx = delta(0);
double dt = delta(1);
// unpack parameters
arma::vec parameter = Working2Natural(working_parameter, hzfn);
int npar = parameter.n_elem;
double sd = 0;
if (move_method == 1) sd = parameter(npar - 1);
if (move_method == 2) sd = fixed_sd;
// setup variables
int curtrans = 0;
int curobs = 0;
double pr_survived;
double pr_outside;
double accu_hazard;
arma::vec obspos;
int sobs;
int smax;
int Nperp = floor(2 * range(0) / dx);
int Nforw = floor(range(1) / dx);
arma::rowvec count(num_cells(1) * Nforw); count.zeros();
arma::rowvec accum(num_cells(1) * Nforw); accum.zeros();
// nalive is the number of transect still being surveyed in the meta-transect
int nalive = num_transects;
// calculate initial probability in each grid cell
arma::rowvec pr_t = CalcInitialDistribution(num_cells, delta, region_size);
arma::rowvec old_pr_t(pr_t);
// probability outside buffer region at t = 0
pr_outside = 1.0 - arma::prod(strip_size) / arma::prod(region_size);
// compute movement matrices for survey
arma::sp_mat trm;
arma::rowvec flux = arma::ones<arma::rowvec>(num_cells(0));
if (move_method > 0) {
trm = CalcTrm(num_cells, sd, dx);
flux = Diffuse(trm.t(), flux, dt, num_cells);
flux = 1.0 - flux;
}
double num_boundary_states = floor(prod(region_size) / (dx * dx)) - num_cells(0);
arma::rowvec intrans = InTransect(num_cells, strip_size, dx, auxiliary_data(2), ymax, buffer, transect_type);
// intialise variables
int endtime = transdat(curtrans, 2);
accu_hazard = 0;
// compute HMM approximation
for (int t = 0; t < T; ++t) {
Rcpp::checkUserInterrupt();
obspos = GetObserverPosition(t * dt, strip_size, buffer, delta, transect_type, observer_speed);
// observer grid cell
sobs = num_cells(1) * floor(obspos(1) / dx);
// sum from that point to 2 * Nperp * Nforw state or max size
smax = sobs + num_cells(1) * Nforw - 1;
if (smax >= num_cells(0)) smax = num_cells(0) - 1;
if (sobs >= num_cells(0)) sobs = num_cells(0) - 1;
// add in all animals present in cell
count.cols(0, smax - sobs) += exp(log(pr_t.cols(sobs, smax) % intrans.cols(sobs, smax)) + accu_hazard);
// thin pr_t by those that are detected
pr_t = Detect(t, pr_t, parameter, num_cells, delta, strip_size, buffer, observer_speed, transect_type, hzfn);
// subtract those animals still present (failed to be detected)
count.cols(0, smax - sobs) -= exp(log(pr_t.cols(sobs, smax) % intrans.cols(sobs, smax)) + accu_hazard);
if (move_method > 0) {
old_pr_t = pr_t;
// move animals that are inside strip
pr_t = Diffuse(trm, pr_t, dt, num_cells);
// move animals outside strip that come into strip
pr_t += flux * pr_outside / num_boundary_states;
// account for transversal of boundary
pr_outside += accu(old_pr_t % flux) - accu(flux) * pr_outside / num_boundary_states;
}
// add contribution to accu_hazard
pr_survived = accu(pr_t) + pr_outside;
accu_hazard += log(pr_survived);
// scale to avoid underflow
pr_t /= pr_survived;
pr_outside /= pr_survived;
// if transect ends divide by conditional probability
while (endtime == t) {
accum += count;
--nalive;
++curtrans;
if (curtrans > transdat.n_rows - 1) {
endtime = T + 1;
} else {
endtime = transdat(curtrans, 2);
}
}
}
return accum;
}
|
{"hexsha": "436cd11ff3f0bb55efe44f950287843a0f7340e2", "size": 41050, "ext": "cc", "lang": "C++", "max_stars_repo_path": "src/moveds.cc", "max_stars_repo_name": "r-glennie/moveds", "max_stars_repo_head_hexsha": "3fb04969cd0548e65b230ee4dcfb750ee1560b46", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/moveds.cc", "max_issues_repo_name": "r-glennie/moveds", "max_issues_repo_head_hexsha": "3fb04969cd0548e65b230ee4dcfb750ee1560b46", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/moveds.cc", "max_forks_repo_name": "r-glennie/moveds", "max_forks_repo_head_hexsha": "3fb04969cd0548e65b230ee4dcfb750ee1560b46", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3274336283, "max_line_length": 127, "alphanum_fraction": 0.570864799, "num_tokens": 12426}
|
from keras.datasets import mnist
import matplotlib.pyplot as plt
import numpy as np
from keras.preprocessing import image
######################################### veri setini yükleyelim
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
################################################## Veri setimizi görelim
from keras.utils import to_categorical
print('Training data shape : ', train_images.shape, train_labels.shape)
print('Testing data shape : ', test_images.shape, test_labels.shape)
# Find the unique numbers from the trpy ain labels
classes = np.unique(train_labels)
nClasses = len(classes)
print('Total number of outputs : ', nClasses)
print('Output classes : ', classes)
plt.figure(figsize=[10,5])
# Eğitim setindeki ilk resimi çizdirelim
plt.subplot(121)
plt.imshow(train_images[0, :, :], cmap='gray')
plt.title("Ground Truth : {}".format(train_labels[0]))
# Test setindeki ilk resimi çizdirelim
plt.subplot(122)
plt.imshow(test_images[0, :, :], cmap='gray')
plt.title("Ground Truth : {}".format(test_labels[0]))
plt.show()
#########################################
# Change from matrix to array of dimension 28x28 to array of dimention 784
dimData = np.prod(train_images.shape[1:])
train_data = train_images.reshape(train_images.shape[0], dimData)
test_data = test_images.reshape(test_images.shape[0], dimData)
# #########################################
# Change to float datatype
train_data = train_data.astype('float32')
test_data = test_data.astype('float32')
# Scale the data to lie between 0 to 1
train_data /= 255
test_data /= 255
# #########################################
# Change the labels from integer to categorical data
train_labels_one_hot = to_categorical(train_labels)
test_labels_one_hot = to_categorical(test_labels)
# Display the change for category label using one-hot encoding
print('Original label 0 : ', train_labels[0])
print('After conversion to categorical ( one-hot ) : ', train_labels_one_hot[0])
# # #########################################
# from keras.models import Sequential
# from keras.layers import Dense
#
# model = Sequential()
# model.add(Dense(512, activation='relu', input_shape=(dimData,)))
# model.add(Dense(512, activation='relu'))
# model.add(Dense(nClasses, activation='softmax'))
# # # #########################################
# model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
# # # #########################################
# history = model.fit(train_data, train_labels_one_hot, batch_size=256, epochs=20, verbose=1,
# validation_data=(test_data, test_labels_one_hot))
# # # #########################################
# print(history.history.keys())
# [test_loss, test_acc] = model.evaluate(test_data, test_labels_one_hot)
# print("Evaluation result on Test Data : Loss = {}, accuracy = {}".format(test_loss, test_acc))
# # # #########################################
# # Plot the Loss Curves
# plt.subplot(121)
# plt.plot(history.history['loss'], 'r')
# plt.plot(history.history['val_loss'], 'b')
# plt.legend(['Training loss', 'Validation Loss'])
# plt.xlabel('Epochs ')
# plt.ylabel('Loss')
# plt.title('Loss Curves')
#
# # Plot the Accuracy Curves
# plt.subplot(122)
# plt.plot(history.history['acc'], 'r')
# plt.plot(history.history['val_acc'], 'b')
# plt.legend(['Training Accuracy', 'Validation Accuracy'])
# plt.xlabel('Epochs ')
# plt.ylabel('Accuracy')
# plt.title('Accuracy Curves')
#
# plt.show()
# #########################################
#dropout için yeniden düzenleme
from keras.layers import Dropout
from keras.models import Sequential
from keras.layers import Dense
model_reg = Sequential()
model_reg.add(Dense(512, activation='relu', input_shape=(dimData,)))
model_reg.add(Dropout(0.5))
model_reg.add(Dense(512, activation='relu'))
model_reg.add(Dropout(0.5))
model_reg.add(Dense(nClasses, activation='softmax'))
# #########################################
model_reg.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
history_reg = model_reg.fit(train_data, train_labels_one_hot, batch_size=256, epochs=20, verbose=1,
validation_data=(test_data, test_labels_one_hot))
# Plot the Loss Curves
plt.figure(figsize=[8, 6])
plt.plot(history_reg.history['loss'], 'r', linewidth=3.0)
plt.plot(history_reg.history['val_loss'], 'b', linewidth=3.0)
plt.legend(['Training loss', 'Validation Loss'], fontsize=18)
plt.xlabel('Epochs ', fontsize=16)
plt.ylabel('Loss', fontsize=16)
plt.title('Loss Curves', fontsize=16)
# Plot the Accuracy Curves
plt.figure(figsize=[8, 6])
plt.plot(history_reg.history['acc'], 'r', linewidth=3.0)
plt.plot(history_reg.history['val_acc'], 'b', linewidth=3.0)
plt.legend(['Training Accuracy', 'Validation Accuracy'], fontsize=18)
plt.xlabel('Epochs ', fontsize=16)
plt.ylabel('Accuracy', fontsize=16)
plt.title('Accuracy Curves', fontsize=16)
plt.show()
# #########################################
# Predict the most likely class
model_reg.predict_classes(test_data[[0],:])
# Predict the probabilities for each class
model_reg.predict(test_data[[0],:])
#
# img = image.load_img(path="test.png",grayscale=True,target_size=(28,28,1))
# img = image.img_to_array(img)
# test_img = img.reshape((1,784))
# img_class = model_reg.predict_classes(test_img)
# prediction = img_class[0]
#
# print("Class: ",prediction)
# img = img.reshape((28,28))
# plt.imshow(img)
# plt.title(prediction)
# plt.show()
model_reg.save("model.h5")
|
{"hexsha": "189d5a4e3f61f56ca5cadf51956d84e9e26457ce", "size": 5481, "ext": "py", "lang": "Python", "max_stars_repo_path": "mnist_karakter_tanima/mnist_karakter_tanima.py", "max_stars_repo_name": "seyfullahuysal/Derin-renme-Uygulamalar-", "max_stars_repo_head_hexsha": "8c09f5caed9fd0eaf56d348ba32f3752a5fb3b9d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-24T15:19:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-24T15:19:31.000Z", "max_issues_repo_path": "mnist_karakter_tanima/mnist_karakter_tanima.py", "max_issues_repo_name": "seyfullahuysal/Derin-renme-Uygulamalar-", "max_issues_repo_head_hexsha": "8c09f5caed9fd0eaf56d348ba32f3752a5fb3b9d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mnist_karakter_tanima/mnist_karakter_tanima.py", "max_forks_repo_name": "seyfullahuysal/Derin-renme-Uygulamalar-", "max_forks_repo_head_hexsha": "8c09f5caed9fd0eaf56d348ba32f3752a5fb3b9d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-04-10T08:03:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-24T15:19:40.000Z", "avg_line_length": 38.3286713287, "max_line_length": 99, "alphanum_fraction": 0.6600985222, "include": true, "reason": "import numpy", "num_tokens": 1275}
|
import base64
import io
import tarfile
import numpy as np
from numpy.testing import assert_array_equal
import pytest
import tiledb
from tiledb.tests.common import DiskTestCase
# This test writes to local filesystem, skip
# TODO: unskip if we support transparent file ops on a VFS
@pytest.mark.skipif(
pytest.tiledb_vfs != "file", reason="Do not run compat test against non-file VFS"
)
class TestBackwardCompatibility(DiskTestCase):
def test_compat_tiledb_py_0_5_anon_attr_dense(self):
# array written with the following script:
"""
import tiledb, numpy as np
dom = tiledb.Domain(tiledb.Dim(name="d", domain=(0, 0), tile=1, dtype=np.uint8))
attrs = (tiledb.Attr(name="_attr_", dtype=np.uint8),)
schema = tiledb.ArraySchema(domain=dom, attrs=attrs, sparse=False)
path = "py0.5.9-test"
tiledb.DenseArray.create(path, schema)
with tiledb.open(path, "w") as A:
A[0] = 1
"""
# save and print tgz of array directory:
# f = open("/tmp/py0.5.9-testa2.tgz",'rb').read()
# s = base64.encodebytes(f)
# print(f"{s.decode():>32}")
array_tgz = b"""H4sIADjvS2AAA+2YzW4TMRCA7fIX0SJVFdz9AAg8XtubvbR9AF6gEpLjJg4FmgRttwJuReKAuFFe
oUcO9A165NJ7jxWPwBOwXq3RZgnNtmkiBPNJ2bEnY89uRjMZrzGgQal2ArFUXNZm0sa8D7GL2tpJ
SKIk6XIFTiVxlIg4UY9JEzjnMeeskFoVkpfzAAPJhYh1LLVmXIDgQJhqtPuM7O9lNs1v5flwlGaj
4R/tXu84t3vBPuMPxa79PueEmS3+xvRT+2zghpkZuMz2bGYfZb3tcR9T4g8AuhZ/paOYML6IH+A/
j//N/KPL8b2go+HbteJKiVfQW/5SjCr23mK1nNOK7g3t9jqd86Vtzfr59JCseU+hXoQVTT15++Wa
p6DznjbzFYwsoYtLuPi1Y2X8gFzMi1KelpKXCz/TSdbI38/M9d9mWfp7yR9j6v+/ULX6H4GUWP8X
Aa1IWtMh/z55AqepfWv2ujtuMKF3uw6m5b+AWv6DiiTH/F8EvhPYKsdPg65hs+Ht/Rmt2mwEXd5s
WHKD7rdOT05a71dWnnxh3zdWOx+/vrt/8Oruh9twdtBeXz8+Omo9vPPJdQj58W15Y47PiUzGmN1R
9+V88j5w6fM/RFoIzP9FYIpze7P3OFflCvGHSOL7HwRBEARBEARBEARBEARBkFn4CRFQSoEAKAAA"""
path = self.path("tiledb_py_0_6_anon_attr")
with tarfile.open(fileobj=io.BytesIO(base64.b64decode(array_tgz))) as tf:
tf.extractall(path)
with tiledb.open(path) as A:
self.assertEqual(A.schema.attr(0).name, "")
self.assertEqual(A.schema.attr(0)._internal_name, "__attr")
self.assertEqual(A[0], 1)
mres = A.multi_index[0]
self.assertEqual(mres[""], 1)
qres = A.query(coords=True).multi_index[0]
self.assertEqual(qres["d"], 0)
def test_compat_py_0_5_anon_attr_sparse(self):
# This array was written with TileDB-Py 0.5.9:
# - using the invocation below, followed by
"""
tiledb.Array.create("path", tiledb.ArraySchema(
domain=tiledb.Domain(*[
tiledb.Dim(name='d', domain=(0, 2), tile=2, dtype='uint64'),]),
attrs=[tiledb.Attr(name='', dtype='int64'),], sparse=True,))
with tiledb.open("path", 'w') as A:
A[[0,1,2]] = np.array([1.0,2.0,5.0])
"""
# - followed by `tar czf array.tgz -C path`
# - followed by `base64.encodebytes(open("sp6.tgz", 'rb').read())`
test_array = b"""H4sIANDnmV8AA+2Xz2vUQBTHJ6mLlnpYBGkRD0EQBGV3ZpLJdBFk9bBnj3pKJpvESrsbmo2otyoI
Pe/JSy9ePXnwruJBPPYv0P4VRRDNhAxm07o/dBN6eJ9lMpmXSd6Eb96bt602qhyMMcfYyHqbZT3O
xwqDmNyyzfRnWwYmFDOCDFb90hB6MkpEnC7l8TCKk2j413lPt4JgZ8pzJl/KWPo6K6LVdpxBkIgq
P4OF9Gck1d+kHPSvBan/TtTfbiW+V5WPf9CfM44MXNWCioD+johj8dwZ9beCgajiO5ilP6V2SX9m
cdC/Fs6lTQm+q2yaunopO2pIGrSGPGRnhfl30tbMx1rB9kzrC9d1fbd5//yh++HCEcXvXu7/6qJx
7/J3fffuZmP/497qgTYOVo6Ojz+Px9d6zfU3r15o6O322r0q3xgoIuOf2NjsULJppVHHSiOPh6Hn
9ZnAWFicsk4YspCEOOAd7jFO56kbFq7/KCXEhv2/Dv5bf8cJY/FoEAyTrI70RXJiD5mhPyEWKelv
M0Yh/9eBzP+38/PryjZn/pfz19Fk/le2NP/7rvtNFz1D+/Rlb/WrhvQf6Ip0p1KGum1ed3L+Wsmd
skl33fQOA+ngYgEXf9ALkyUreX8r77vodKK8P8x7lj/gtXbabOCMsYT8L5Iknvq3Yeb+z6xS/rew
bUL+rwMVpRt5K9pUSmjUuiKgTpYQ//0oiv3RlAwwK/7JifrfMjnUf7VQjP+raLJmULYb79s/jY0D
hB6kdpUUdHTz4cWspAAAAAAAAAAAAAAA4IzzG7vsp0oAKAAA"""
path = self.path("test_tiledb_py_0_5_anon_attr_sparse")
with tarfile.open(fileobj=io.BytesIO(base64.b64decode(test_array))) as tf:
tf.extractall(path)
with tiledb.open(path) as A:
assert_array_equal(A[:][""], np.array([1.0, 2.0, 5.0]))
def test_tiledb_py_0_6_anon_attr(self):
# same creation steps as above for 0.5
tgz_sparse = b"""H4sIAJKNpWAAA+2aPW/TQBjHz2nTFlGJClUoAxIuA0ICpXf2vdhbGWBiYEIgihI7MRT1JVKairKh
qgNfgA2kDnwFVga+ABtfgE8AEwsS5/ROTUzBjWpbKv3/JPexLxc/l/59zz3PJc1lUjqUUiWEO7Ty
0GqsPbxgnArmUymk71LmUc6JK8ofGiE724Oor4fyYqvXH/S2/trv5VqSbPzjPuMfyi18nCXRXG61
NpNBVOZjMIH+XEip9fc9xaB/FaT6b/Q6681BNy7Lh/5/SM4n0l8JPf9pWQMaBfq3on4/etXa7qwl
m1EZz0Ge/p6X1V9wKaF/FdT1sWrOXxs77dhXLw//OiRtcNKuzvBspH+gjwVz7Yy07TqdhNTuzcw4
OwtT0407qzM3Hi58vzZH7678cN99rl9f2ji40JZ77T0Wzb+JD/rdp8SZnfta2gcFx5LOfyY9xqXn
ByoIVeYqDJMu44GOyGHCeRIGKuHCF1HsRRGLaacl8jOHifM/z2M+8r9KOL3+zd56jo8J1n+rPxcC
8b8KjvRnvlSh8rJXcRJ2Euor7gne8XgsJdVPhAoSFXZFogrWX6//aqg/p9C/Ck6vf6Hx3+rPmEL8
r4IC9G+1nvWj55vJ1mC4k9CNBpkqImf+a7VFRn8phI/5XwVpUh+Yc9fYk+b/af9FMp7/27Zd51vc
brf3Y7c+e//BFeJ8IJfSG9hoYd9zUl9p/4sZX7ZN1xrdlXrquwYXcAEXx7s4ojbSOWXK2NtknBVy
Mmxc/GKsZ2781tifxj4xjj8Zu2Qc79sBgKopYP3v5u0Z5uX/7I/8z6ce9n8rwYaAhj6ukvE4Yttu
flz+5TbeE/JIt9vYUSO3Hs8Pwww4wxQw/3O/Msit/wXP1n9Sof6vhNH538i02ak+njyA/4kC9v+L
rP/N/q8UmP/VgPofLuDiXLg4AvU/MBSw/hdZ/5v1XxcCDOt/FaD+P98UMP+LrP/t7z8Uxe8/KgH1
PwAAAAAAAAAAAAAAAAAAAAAAAHD2+Q18oX51AFAAAA=="""
path = self.path("0_6_anon_sparse")
with tarfile.open(fileobj=io.BytesIO(base64.b64decode(tgz_sparse))) as tf:
tf.extractall(path)
with tiledb.open(path) as A:
if A.schema.sparse:
assert_array_equal(A[:][""], np.array([1.0, 2.0, 5.0]))
###########################################################################################
# This test checks that anonymous attributes internally stored as "__attr" are presented
# as "".
# The following steps were run under TileDB-Py 0.6
# Normally, we can't actually write an attribute named "__attr" anymore, so
# restored a schema written by a patched libtiledb, and rename the attr file.
# schema_data = b"\x05\x00\x00\x00]\x00\x00\x00\x00\x00\x00\x00q\x00\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x00\x00\x00\x00\x00\x00\x12\x00\x00\x00\x00\x00\x01\x00\x01\x00\x00\x00\x01\x05\x00\x00\x00\x01\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00q\x00\x00\x009\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00q\x00\x00\x009\x00\x00\x00x\x01ce\x80\x00\x01u(\x83\x81\x11\x08\x19\x18\x98XA\xc4\x7f `\xc0\x10\x01\xc9\x83p\n\x1b\x88\x84\xb0\x81\x8a\xc1l\x88\x00H\x9c\r\x88\xe3\xe3\x13KJ\x8aP\x94\x01\x00\xa2c\x0bD"
# path = self.path("tiledb_py_0_6_anon_attr")
# ctx = tiledb.default_ctx()
# dom = tiledb.Domain(tiledb.Dim(name="d", domain=(0, 0), tile=1, dtype=np.uint8))
# attrs = (tiledb.Attr(name="_attr_", dtype=np.uint8, ctx=ctx),)
# schema = tiledb.ArraySchema(domain=dom, attrs=attrs, sparse=False, ctx=ctx)
# tiledb.DenseArray.create(path, schema, ctx=ctx)
# with tiledb.open(path, "w") as A:
# A[0] = 1
# fragment_name = os.path.split(list(A.last_write_info.keys())[0])[-1]
# fragment_path = os.path.join(path, fragment_name)
## fix up the array the override schema
# with open(os.path.join(path, "__array_schema.tdb"), "wb") as f:
# f.write(schema_data)
# shutil.move(
# os.path.join(fragment_path, "_attr_.tdb"),
# os.path.join(fragment_path, "__attr.tdb"),
# )
tgz_dense = b"""H4sIAL6RpWAAA+2YPW/TQBjH71qQKiKkAEIqYvEIS3p3uRd5A4kB0QUxdUHm/AJFzQu4rlrUoa3K
EFWMDB2Y+AQs7CAhJD5HPgBfgXNyRq4pdVNyHtDzk5z/3fni55y/L8+TdFaQcwghSghvonKqhkKn
HcqJoF1KpOx6hDLCCfKE+6UhtLWZ6dQs5eVgmGbDwV/nba8nSe+M65y8KW/u63REZyUI+kmmXT4G
s/vfZZKD/02Q+98bRhudLA5dxTCfh+R8Jv8VV8gjrhZUBvwPdJrqN8FmtJ70tYvnoM5/xmjFf8El
A/+b4LI5ntr2a6uXcHH2+uQVo3wA51PxpFWa75ujbfu4NLaDo2Qf4a07hwfXlm4tH6/d/7bnPfvS
xj8OX125PXr76eDoa2+EHn64OhqPb6w+Onr8HqOPUeuBy5sF/iDf/1QyymWXK6GYqvS4r3gcR2Gi
lc9JSLTvKxVqbRK6r0jsB6Iz3KiJMfP3P2OCwf5vhH/3v75ynLn+Y4wRCvVfE8zB/yB4nuoX/WSQ
TX5JxDqrVBE1+59RKSv+S8lh/zdCntSLHbxk9bz5P5/fQifzfzG2g8fhvtE11CqHKKaeN0T7lBDF
mCkx4nvmHR5agBAQAkKcHuL3FUvtm+hiRFa/W71rL/jO6k+rTxam+tnq8uJUdxcvGBhwxFzyv86y
9Iw/DmrrfyYq+Z9TTiH/NwEuKa6MAQAAAAAAAAAAAADwf/ALzPk2VwAoAAA="""
path = self.path("0_6_anon_dense")
with tarfile.open(fileobj=io.BytesIO(base64.b64decode(tgz_dense))) as tf:
tf.extractall(path)
with tiledb.open(path) as A:
self.assertEqual(A.schema.attr(0).name, "")
self.assertEqual(A.schema.attr(0)._internal_name, "__attr")
self.assertEqual(A[0], 1)
mres = A.multi_index[0]
self.assertEqual(mres[""], 1)
qres = A.query(coords=True).multi_index[0]
self.assertEqual(qres["d"], 0)
|
{"hexsha": "f01249c6a28718f16deced1950898a313e394bea", "size": 9893, "ext": "py", "lang": "Python", "max_stars_repo_path": "tiledb/tests/test_compat.py", "max_stars_repo_name": "vishalbelsare/TileDB-Py", "max_stars_repo_head_hexsha": "9b1bf3c18fbe9d0de27ab26915f57779d3ea3635", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 136, "max_stars_repo_stars_event_min_datetime": "2018-02-26T05:17:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T22:59:31.000Z", "max_issues_repo_path": "tiledb/tests/test_compat.py", "max_issues_repo_name": "ihnorton/TileDB-Py", "max_issues_repo_head_hexsha": "bb4d5ea4d07e02721e431956363d3b9d59c3b9e6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 578, "max_issues_repo_issues_event_min_datetime": "2018-02-20T02:07:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T11:24:34.000Z", "max_forks_repo_path": "tiledb/tests/test_compat.py", "max_forks_repo_name": "ihnorton/TileDB-Py", "max_forks_repo_head_hexsha": "bb4d5ea4d07e02721e431956363d3b9d59c3b9e6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 30, "max_forks_repo_forks_event_min_datetime": "2018-03-22T04:13:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T13:24:43.000Z", "avg_line_length": 58.1941176471, "max_line_length": 534, "alphanum_fraction": 0.6501566764, "include": true, "reason": "import numpy,from numpy", "num_tokens": 4038}
|
\chapter{crand Example of Extraction/Switch-Level Simulation}
\section{Introduction}
\label{PEintro}
In this example, we will be studying a random counter circuit.
We will see how Space is used for circuit extraction.
And how you can do a switch-level simulation of the circuit.
\\[1 ex]
The layout looks as follows, using the layout editor \io{dali} (see \manualpage{dali}):
\begin{figure}[h]
\centerline{\epsfig{figure=crand/crand.eps, width=15cm}}
\end{figure}
\section{Files}
This tutorial is located in the directory \CACDTOP{demo/crand}.
Initially, it contains the following files:
\begin{filelist}
\item[README] A file containing information about the demo.
\item[crand.cmd] Command file for circuit simulation.
\item[crand.gds] GDS2 file of the layout of the crand design.
\item[script.sh] Batch file for running all commands of the demo in sequence.
\end{filelist}
\section{Running the Extractor}
First, use the following command to change the current working directory '.' into a project directory:
\small
\begin{Verbatim}
% mkpr -p scmos_n -l 0.2 .
\end{Verbatim}
\normalsize
The command specifies the \io{scmos\_n} process from the technology library
and a lambda (design unit) of $0.2 \mu m$.
We use the mask names as defined in the \io{maskdata} file of the library.
And we are using the default technology file \io{space.def.s}
and parameter file \io{space.def.p} of the library.
\small
\begin{Verbatim}
% cgi crand.gds
\end{Verbatim}
\normalsize
Now, we can extract a circuit description for the layout of the \io{crand} cell, as follows:
\small
\begin{Verbatim}
% space -vFc crand
\end{Verbatim}
\normalsize
\small \begin{Verbatim}[frame=single]
Version 5.3.1, compiled on Fri Feb 03 12:45:53 GMT 2006
See http://www.space.tudelft.nl
parameter file: $ICDPATH/share/lib/process/scmos_n/space.def.p
technology file: $ICDPATH/share/lib/process/scmos_n/space.def.t
preprocessing crand (phase 1 - flattening layout)
preprocessing crand (phase 2 - removing overlap)
extracting crand
extraction statistics for layout crand:
capacitances : 221
resistances : 0
nodes : 222
mos transistors : 419
bipolar vertical : 0
bipolar lateral : 0
substrate nodes : 0
overall resource utilization:
memory allocation : 0.287 Mbyte
user time : 0.0
system time : 0.0
real time : 1.5 5%
space: --- Finished ---
\end{Verbatim}
\normalsize
You can show the resulting circuit with one of the circuit listing tools.
For example, to list the circuit in a SLS description, use \io{xsls} (see \io{icdman}).
\small
\begin{Verbatim}
% xsls crand
\end{Verbatim}
\normalsize
The output is default going to "stdout", a part is shown below:
\small \begin{Verbatim}[frame=single]
...
network crand (terminal out_7, out_6, out_5, out_4, out_3, out_2, out_1, out_0,
inpar_7, inpar_6, inpar_5, inpar_4, inpar_3, inpar_2, inpar_1,
inpar_0, serial, vss_lb, vss_lo, sc_l, nsc_l, vdd_lb, vdd_lo,
nphi1_l, phi1_l, nphi2_l, phi2_l, phi1_r, phi2_r, nphi2_r,
nphi1_r, vss_ro, vss_rb, vdd_rb, nsc_r, sc_r, vdd_ro)
{
net {vdd_lo, vdd_ro};
net {phi1_l, phi1_r};
net {phi2_l, phi2_r};
net {nphi2_l, nphi2_r};
net {nphi1_l, nphi1_r};
net {sc_l, sc_r};
net {vdd_lb, vdd_rb};
net {nsc_l, nsc_r};
net {SUBSTR, vss_lb};
net {SUBSTR, vss_rb};
net {SUBSTR, vss_ro};
net {SUBSTR, vss_lo};
cap 2.8f (1, GND);
nenh w=4u l=1.2u (14, 1, 14);
cap 11.44f (2, GND);
penh w=6.8u l=1.2u (14, 2, 14);
cap 11.44f (3, GND);
penh w=6.8u l=1.2u (vdd_lb, 3, 12);
cap 2.8f (4, GND);
nenh w=4u l=1.2u (SUBSTR, 4, SUBSTR);
nenh w=4u l=1.2u (phi1_l, 13, 14);
penh w=6.8u l=1.2u (nphi1_l, 13, 14);
penh w=6.8u l=1.2u (serial, 12, vdd_lb);
nenh w=4u l=1.2u (serial, 5, SUBSTR);
nenh w=4u l=1.2u (phi2_l, 10, 13);
penh w=6.8u l=1.2u (nphi2_l, 10, 13);
penh w=6.8u l=1.2u (nsc_l, 12, vdd_lb);
cap 3.2f (5, GND);
nenh w=4u l=1.2u (nsc_l, 5, 14);
cap 72.16f (6, GND);
penh w=6.8u l=1.2u (vdd_lo, 6, 8);
cap 16f (7, GND);
nenh w=4u l=1.2u (SUBSTR, 7, 9);
nenh w=4u l=1.2u (10, 10, 10);
penh w=6.8u l=1.2u (10, 10, 10);
penh w=6.8u l=1.2u (inpar_0, 12, 14);
nenh w=4u l=1.2u (inpar_0, 11, 14);
nenh w=7.2u l=1.2u (inpar_0, SUBSTR, SUBSTR);
...
...
}
\end{Verbatim}
\normalsize
\section{Running the Switch-Level Simulation}
For this simulation you are using the switch-level simulator \io{sls}.
See the "SLS: Switch-Level Simulator User's Manual" and for the manual page \manualpage{sls}.
This simulator is started from the simulation GUI \io{simeye} and
the results are shown in the output window (see \manualpage{simeye}).
\\[1 ex]
First, start the simulation GUI \io{simeye}.
\small
\begin{Verbatim}
% simeye
\end{Verbatim}
\normalsize
Second, prepare the simulation:
\\[1 ex]
Click on the "Simulate" menu and choice the "Prepare" item.
Select in the "Circuit:" field cell name "crand" and
in the "Stimuli:" field file name "crand.cmd" (click on it).
To inspect or edit the input signals, click on the "Edit" button.
\\[1 ex]
Third, start the switch-level simulation:
\\[1 ex]
Go back to the "Simulate" menu and choice the "Prepare" dialog item again:
\begin{figure}[h]
\centerline{\epsfig{figure=crand/dialog.eps, width=9cm}}
\end{figure}
\noindent
In the dialog window, choice simulation "Type: sls-timing" and for "Read: Analog".
Now, start the switch-level timing simulation by clicking on the "Run" button and wait for simulation results.
Below, you see the output waveforms.
\begin{figure}[h]
\centerline{\epsfig{figure=crand/simeye.eps, width=14cm}}
\end{figure}
Note, to exit \io{simeye},
go to the "File" menu and click on "Exit" and "Yes".
|
{"hexsha": "f8b64b0c99e055f86f076ccd8cc7a654fcfa5579", "size": 5821, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/manuals/examples/crand/demo.tex", "max_stars_repo_name": "yrrapt/cacd", "max_stars_repo_head_hexsha": "696f5a22cb71b83eabbb9de199f1972d458fa9e9", "max_stars_repo_licenses": ["ISC"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2019-10-16T11:03:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-28T19:46:12.000Z", "max_issues_repo_path": "doc/manuals/examples/crand/demo.tex", "max_issues_repo_name": "yrrapt/cacd", "max_issues_repo_head_hexsha": "696f5a22cb71b83eabbb9de199f1972d458fa9e9", "max_issues_repo_licenses": ["ISC"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/manuals/examples/crand/demo.tex", "max_forks_repo_name": "yrrapt/cacd", "max_forks_repo_head_hexsha": "696f5a22cb71b83eabbb9de199f1972d458fa9e9", "max_forks_repo_licenses": ["ISC"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-29T18:15:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-29T18:15:17.000Z", "avg_line_length": 33.8430232558, "max_line_length": 110, "alphanum_fraction": 0.6856210273, "num_tokens": 2061}
|
import gc
print("############################################")
print("## 4.1. 결합, 마스터 테이블에서 정보 얻기 ")
print("############################################")
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 100)
reserve_tb=pd.read_csv('./data/reserve.csv', encoding='UTF-8')
hotel_tb=pd.read_csv('./data/hotel.csv', encoding='UTF-8')
result=pd.merge(reserve_tb, hotel_tb, on='hotel_id', how='inner')\
.query('people_num == 1 & is_business')
print(hotel_tb.head())
print(reserve_tb.head())
print('------------------')
print(result)
result=pd.merge(reserve_tb.query('people_num == 1'),
hotel_tb.query('is_business'),
on='hotel_id', how='inner')
print('------------------')
print(result)
print("############################################")
print("## 4.2. 결합, 조건에 따라 결합할 마스터 테이블 변경하기")
print("############################################")
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 100)
reserve_tb=pd.read_csv('./data/reserve.csv', encoding='UTF-8')
hotel_tb=pd.read_csv('./data/hotel.csv', encoding='UTF-8')
print(hotel_tb.head())
small_area_mst=hotel_tb\
.groupby(['big_area_name', 'small_area_name'], as_index=False)\
.size().reset_index()
small_area_mst.columns=['index','big_area_name', 'small_area_name', 'hotel_cnt']
print(small_area_mst.head())
small_area_mst['join_area_id']=\
np.where(small_area_mst['hotel_cnt']-1>=20,
small_area_mst['small_area_name'],
small_area_mst['big_area_name'])
small_area_mst.drop(['hotel_cnt', 'big_area_name'], axis=1, inplace=True)
print('-------------------------')
print(small_area_mst.head())
base_hotel_mst=pd.merge(hotel_tb, small_area_mst, on='small_area_name')\
.loc[:, ['hotel_id', 'join_area_id']]
print('-------------------------')
print(base_hotel_mst.head())
del small_area_mst
gc.collect()
print('1------------------------')
recommend_hotel_mst=pd.concat([\
hotel_tb[['small_area_name', 'hotel_id']]\
.rename(columns={'small_area_name': 'join_area_id'}, inplace=False),
hotel_tb[['big_area_name', 'hotel_id']]\
.rename(columns={'big_area_name': 'join_area_id'}, inplace=False)\
])
print(recommend_hotel_mst.head())
print('2------------------------')
recommend_hotel_mst.rename(columns={'hotel_id':'rec_hotel_id'}, inplace=True)
print(recommend_hotel_mst.head())
print('3------------------------')
result=pd.merge(base_hotel_mst, recommend_hotel_mst, on='join_area_id')\
.loc[:,['hotel_id', 'rec_hotel_id']]\
.query('hotel_id != rec_hotel_id')
print('4------------------------')
print('-------------------------')
print(base_hotel_mst.head())
print('-------------------------')
print(recommend_hotel_mst.head())
print('-------------------------')
print(result)
print("############################################")
print("## 4.3. 과거의 데이터 정보 얻기 (n번 이전 까지의 데이터")
print("############################################")
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 100)
reserve_tb=pd.read_csv('./data/reserve.csv', encoding='UTF-8')
hotel_tb=pd.read_csv('./data/hotel.csv', encoding='UTF-8')
print(hotel_tb.head())
result=reserve_tb.groupby('customer_id')\
.apply(lambda x: x.sort_values(by='reserve_datetime', ascending=True))\
.reset_index(drop=True)
print(result)
result['price_avg']=pd.Series(
result.groupby('customer_id')
['total_price'].rolling(center=False, window=3, min_periods=1).mean()
.reset_index(drop=True)
)
print('-----------------')
print(result)
result['price_avg']=\
result.groupby('customer_id')['price_avg'].shift(periods=1)
print('-----------------')
print(result)
print("############################################")
print("## 4.4. 과거의 데이터 정보 얻기 (과거 n일의 합계)")
print("############################################")
import pandas as pd
import numpy as np
import pandas.tseries.offsets as offsets
import operator
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 100)
reserve_tb=pd.read_csv('./data/reserve.csv', encoding='UTF-8')
hotel_tb=pd.read_csv('./data/hotel.csv', encoding='UTF-8')
print(hotel_tb.head())
reserve_tb['reserve_datetime']=\
pd.to_datetime(reserve_tb['reserve_datetime'], format='%Y-%m-%d %H:%M:%S')
sum_table=pd.merge(
reserve_tb[['reserve_id', 'customer_id', 'reserve_datetime']],
reserve_tb[['customer_id', 'reserve_datetime', 'total_price']]
.rename(columns={'reserve_datetime':'reserve_datetime_before'}),
on='customer_id')
print('--------------')
print(sum_table)
print('--------------')
print(reserve_tb[['reserve_id', 'customer_id', 'reserve_datetime']])
print('--------------')
print(reserve_tb[['customer_id', 'reserve_datetime', 'total_price']])
sum_table=sum_table[operator.and_(
sum_table['reserve_datetime'] > sum_table['reserve_datetime_before'],
sum_table['reserve_datetime']+offsets.Day(-90) <= sum_table['reserve_datetime_before'])].groupby('reserve_id')['total_price'].sum().reset_index()
print('--------------')
print(sum_table)
sum_table.columns=['reserve_id','total_price_sum']
print('--------------')
print(sum_table)
result=pd.merge(reserve_tb, sum_table, on='reserve_id', how='left').fillna(0)
print('--------------')
print(result)
print("############################################")
print("## 4.5. 상호 결합")
print("############################################")
import pandas as pd
import numpy as np
import pandas.tseries.offsets as offsets
import operator
import datetime
from dateutil.relativedelta import relativedelta
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 100)
reserve_tb=pd.read_csv('./data/reserve.csv', encoding='UTF-8')
customer_tb=pd.read_csv('./data/customer.csv', encoding='UTF-8')
print(customer_tb.head())
month_mst=pd.DataFrame({
'year_month':
[(datetime.date(2017,1,1) + relativedelta(months=x)).strftime("%Y%m")
for x in range(0,3)]
})
print('---------------')
print(month_mst)
customer_tb['join_key']=0
month_mst['join_key']=0
print('---------------')
print(month_mst)
customer_mst=pd.merge(
customer_tb[['customer_id', 'join_key']], month_mst, on='join_key'
)
print('---------------')
print(customer_mst)
reserve_tb['year_month']=reserve_tb['checkin_date']\
.apply(lambda x: pd.to_datetime(x, format='%Y-%m-%d').strftime("%Y%m"))
print('---------------')
print(reserve_tb)
summary_result=pd.merge(
customer_mst,
reserve_tb[['customer_id', 'year_month', 'total_price']],
on=['customer_id', 'year_month'], how='left').groupby(['customer_id','year_month'])['total_price']\
.sum().reset_index()
summary_result.fillna(0, inplace=True)
print('---------------')
print(summary_result)
|
{"hexsha": "5a11d846fd7699b693ea13789d714a520e2da535", "size": 6886, "ext": "py", "lang": "Python", "max_stars_repo_path": "chapter4_study.py", "max_stars_repo_name": "gusdyd98/py_datapreprocessingwar", "max_stars_repo_head_hexsha": "907e4e8e2a9619bd33a34bcb4af1760f6f7ca3ee", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "chapter4_study.py", "max_issues_repo_name": "gusdyd98/py_datapreprocessingwar", "max_issues_repo_head_hexsha": "907e4e8e2a9619bd33a34bcb4af1760f6f7ca3ee", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chapter4_study.py", "max_forks_repo_name": "gusdyd98/py_datapreprocessingwar", "max_forks_repo_head_hexsha": "907e4e8e2a9619bd33a34bcb4af1760f6f7ca3ee", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.7661290323, "max_line_length": 149, "alphanum_fraction": 0.6115306419, "include": true, "reason": "import numpy", "num_tokens": 1701}
|
import cv2 as cv
import numpy as np
class StartState(object):
def action(self, ball):
ball.state = PlayState()
def update(self, ball, player, bricks, walls):
ball.pos = np.float32([player.x+player.w//2, player.y-ball.rad])
def die(self, ball):
pass
class PlayState(object):
def action(self, ball):
pass
def update(self, ball, player, bricks, shape):
prev_pos = ball.pos
next_pos = ball.pos + ball.vec * ball.speed
h, w = shape
if next_pos[0] - ball.rad <= 0:
next_pos[0] = ball.rad
if next_pos[0] + ball.rad >= w:
next_pos[0] = w - ball.rad
if next_pos[1] - ball.rad <= 0:
next_pos[1] = ball.rad
ball.vec = np.array([0, 1])
if next_pos[1] + ball.rad >= h:
ball.die()
return
ball.pos = next_pos
def die(self, ball):
ball.vec = np.array([0, -1])
ball.state = StartState()
class Ball(object):
def __init__(self, pos, rad=5, color=(0, 0, 255), speed=0.75):
self.pos = pos
self.rad = rad
self.vec = np.array([0, -1])
self.color = color
self.state = StartState()
self.speed = speed
def render(self, grid):
x, y = np.int_(self.pos)
return cv.circle(grid, (x, y), self.rad, self.color, -1)
def update(self, player, bricks, walls):
self.state.update(self, player, bricks, walls)
def intersect(self, brick):
return self.pos[0] <= brick.pos[0] + brick.w and \
self.pos[0] >= brick.pos[0] - brick.w and \
self.pos[1] <= brick.pos[1] + brick.h and \
self.pos[1] >= brick.pos[1] - brick.h
def die(self):
self.state.die(self)
def action(self):
self.state.action(self)
def get_normal(self, brick):
pass
def rebound(self, normal):
self.vec = 2 * np.dot(normal, self.vec) * normal - self.vec
self.pos += self.vec * self.speed
def point_in_rect(p, rect):
AP = p - rect.a
AB = rect.b - rect.a
AD = rect.d - rect.a
APdAB = np.dot(AP, AB)
ABdAB = np.dot(AB, AB)
APdAD = np.dot(AP, AD)
ADdAD = np.dot(AD, AD)
return APdAB >= 0 and APdAB <= ABdAB and \
APdAD >= 0 and APdAD <= ADdAD
def line_circle_intersect(circle, a, b):
d = b - a
f = a - circle.pos
class Brick(object):
__brick_colors = [(0, 0, 255), (0, 255, 0), (255, 0, 0)]
def __init__(self, rect, hp):
self.x, self.y, self.w, self.h = rect
self.a = np.array([self.x, self.y])
self.b = np.array([self.x+self.w, self.y])
self.c = np.array([self.x+self.w, self.y+self.h])
self.d = np.array([self.x, self.y+self.h])
self.rect = rect
self.hp = hp
def render(self, grid):
grid[self.y:self.y+self.h, self.x:self.x+self.w] = self.__brick_colors[self.hp]
def hit(self):
self.hp -= 1
def dead(self):
return self.hp < 0
class App(object):
__level_colors = [(0, 0, 255), (0, 255, 0), (255, 0, 0)]
def mainloop(self):
self.state = StartState()
self.running = True
self.reset_grid()
self.player = Brick((self.grid.shape[1]//2-20, self.grid.shape[0]-15, 40, 10), 0)
self.ball = Ball(np.array([self.player.x + self.player.w//2]))
self.setupBricks()
cv.namedWindow('window', cv.WINDOW_NORMAL)
while self.running:
self.ball.update(self.player, self.bricks, self.grid.shape[:2])
self.render()
cv.imshow('window', self.grid)
c = cv.waitKey(1)
self.handleInput(c)
cv.destroyAllWindows()
def drawPlayer(self):
x, y = self.player.pos
self.grid[y-5:y+5, x-20:x+20] = (0, 0, 255)
def drawBricks(self):
for brick in self.bricks:
brick.render(self.grid)
def reset_grid(self):
self.grid = np.zeros((300, 400, 3))
def render(self):
self.reset_grid()
self.player.render(self.grid)
self.grid = self.ball.render(self.grid)
# self.drawBricks()
def setupBricks(self):
self.bricks = []
i = 2
for x in range(0, 400, 40):
for y in range(5, 30, 10):
self.bricks.append(Brick((x, y, 40, 10), i))
i = (i - 1) % len(self.__level_colors)
def handleInput(self, c):
if c == ord('q'):
self.running = False
if c == ord('d'):
self.player.x = min(self.player.x + 10, 380)
elif c == ord('a'):
self.player.x = max(self.player.x - 10, 20)
elif c == ord(' '):
self.ball.action()
def main():
App().mainloop()
if __name__ == "__main__":
main()
|
{"hexsha": "0297692e3350c8efb337eb3dd3343ecf8b21037e", "size": 4856, "ext": "py", "lang": "Python", "max_stars_repo_path": "breakout.py", "max_stars_repo_name": "mikeswhitney33/face-detection-breakout-python", "max_stars_repo_head_hexsha": "6d6b0202564ba9585d46eb971fd0b8179caf5bc0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "breakout.py", "max_issues_repo_name": "mikeswhitney33/face-detection-breakout-python", "max_issues_repo_head_hexsha": "6d6b0202564ba9585d46eb971fd0b8179caf5bc0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "breakout.py", "max_forks_repo_name": "mikeswhitney33/face-detection-breakout-python", "max_forks_repo_head_hexsha": "6d6b0202564ba9585d46eb971fd0b8179caf5bc0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.2808988764, "max_line_length": 89, "alphanum_fraction": 0.5273887974, "include": true, "reason": "import numpy", "num_tokens": 1374}
|
from styx_msgs.msg import TrafficLight
import cv2
#import rospy
import numpy as np
#from std_msgs.msg import Int32
class TLClassifier(object):
def __init__(self):
#TODO load classifier
pass
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
HsvImg = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
frame_threshed = cv2.inRange(HsvImg, np.array([0, 120, 120],np.uint8), np.array([10, 255, 255],np.uint8))
r = cv2.countNonZero(frame_threshed)
if r > 50:
return TrafficLight.RED
frame_threshed = cv2.inRange(HsvImg, np.array([28, 120, 120],np.uint8), np.array([43, 255, 255],np.uint8))
y = cv2.countNonZero(frame_threshed)
if y > 50:
return TrafficLight.YELLOW
frame_threshed = cv2.inRange(HsvImg, np.array([64, 120, 120],np.uint8), np.array([99, 255, 255],np.uint8))
g = cv2.countNonZero(frame_threshed)
if g > 50:
return TrafficLight.GREEN
return TrafficLight.UNKNOWN
|
{"hexsha": "6ca51c4d2ae00de721a8d2886a1e8626862f3c82", "size": 1263, "ext": "py", "lang": "Python", "max_stars_repo_path": "ros/src/tl_detector/light_classification/tl_classifier.py", "max_stars_repo_name": "truongconghiep/CarND-Capstone-1", "max_stars_repo_head_hexsha": "e523adeaa7525f8d0cef94810db855d8331cddec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ros/src/tl_detector/light_classification/tl_classifier.py", "max_issues_repo_name": "truongconghiep/CarND-Capstone-1", "max_issues_repo_head_hexsha": "e523adeaa7525f8d0cef94810db855d8331cddec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ros/src/tl_detector/light_classification/tl_classifier.py", "max_forks_repo_name": "truongconghiep/CarND-Capstone-1", "max_forks_repo_head_hexsha": "e523adeaa7525f8d0cef94810db855d8331cddec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.575, "max_line_length": 114, "alphanum_fraction": 0.6286619161, "include": true, "reason": "import numpy", "num_tokens": 340}
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from copy import deepcopy
from bg import Multicolor, KBreak, BreakpointGraph, GRIMMReader, NewickReader, BGGenome
import itertools
import networkx as nx
import os
__author__ = "Sergey Aganezov"
__email__ = "aganezov(at)gwu.edu"
################################################################################################################
#
# START OF supporting functions that implement some functionality, that BreakpointGraph class lacks in the bg package
#
################################################################################################################
def get_vertex_surrounding_multicolor(graph, vertex):
"""
Loops over all edges that are incident to supplied vertex and accumulates all colors,
that are present in those edges
"""
result = Multicolor()
for edge in graph.get_edges_by_vertex(vertex):
result += edge.multicolor
return result
def get_irregular_edge_by_vertex(graph, vertex):
"""
Loops over all edges that are incident to supplied vertex and return a first irregular edge
in "no repeat" scenario such irregular edge can be only one for any given supplied vertex
"""
for edge in graph.get_edges_by_vertex(vertex):
if edge.is_irregular_edge:
return edge
return None # not return in current implementation, added for code consistency
def get_irregular_vertex(bgedge):
"""
This method is called only in irregular edges in current implementation, thus at least one edge will be irregular
"""
return bgedge.vertex1 if bgedge.vertex1.is_irregular_vertex else bgedge.vertex2
def supports_a_pair_of_irregular_edges(graph, edge):
v1 = edge.vertex1
v2 = edge.vertex2
return get_irregular_edge_by_vertex(graph, v1) is not None and get_irregular_edge_by_vertex(graph, v2) is not None
################################################################################################################
#
# END OF supporting functions that implement some functionality, that BreakpointGraph class lacks in the bg package
#
################################################################################################################
################################################################################################################
#
# START OF main scaffolding algorithm functions
#
################################################################################################################
def get_support_edge_scores(graph, subnet, target_multicolor, tree, verbose=False, verbose_destination=None):
"""
For supplied connected components (this connected components is assumed to be pre-filtered before hand)
for every regular edge in it (which after pre-filtration would correspond to "supporting" edge only)
computes an summands ("before" and "after") for its "assembly score" (which is defined as "before" - "after")
where "before" is a score for a pair of irregular edges under observation and their support
(every support edge corresponds to a pair of irregular edges), as if assembly DID NOT happened
where "after" is a score for a pair of irregular edges under observation and their support
(every support edge corresponds to a pair of irregular edges), as if assembly DID happened
"""
result = []
# we iterate over all regular edges in pre-filtered connected component,
# as all of them correspond to possible assembly points
if verbose:
print(">Getting support edge scores", file=verbose_destination)
for edge in filter(lambda e: not e.is_irregular_edge, subnet.edges()):
ex_data = {}
if verbose:
print("Possible assembly point", edge.vertex1.name, edge.vertex2.name,
[color.name for color in edge.multicolor.multicolors.elements()], file=verbose_destination)
# basic references to most important parts of the observed possible assembly point
v1, v2 = edge.vertex1, edge.vertex2
sedge, iedge1, iedge2 = edge, None, None
# since each vertex has at most 1 irregular edge incident to it, we can safely retrieve
# the first one for each vertex
iedge1 = get_irregular_edge_by_vertex(subnet, v1)
iedge2 = get_irregular_edge_by_vertex(subnet, v2)
if verbose:
print("irregular edge 1:", iedge1.vertex1.name, iedge1.vertex2.name,
[color.name for color in iedge1.multicolor.multicolors.elements()], file=verbose_destination)
print("irregular edge 2:", iedge2.vertex1.name, iedge2.vertex2.name,
[color.name for color in iedge2.multicolor.multicolors.elements()], file=verbose_destination)
# we need a full multicolor to determine which colors are missing (indels) on both vertices,
# that determine an assembly point
full_multicolor = max(tree.consistent_multicolors, key=lambda multicolor: len(list(multicolor.multicolors.elements())))
full_multicolor = Multicolor(*full_multicolor.colors)
# we accumulated multicolors for both vertices,
# that are present in all edges combined, that are incident to them
# we need to retrieve that information from the original whole graph, and not from the filtered one
surrounding1 = Multicolor(*get_vertex_surrounding_multicolor(graph, v1).colors)
surrounding2 = Multicolor(*get_vertex_surrounding_multicolor(graph, v2).colors)
# we compute complementary multicolors for each vertex
# (multicolors, that are not present in edges, that are incident to respective vertices)
c_1_multicolor = full_multicolor - surrounding1
c_2_multicolor = full_multicolor - surrounding2
# their intersection correspond to multicolors, that are not present at both vertices
c = c_1_multicolor.intersect(c_2_multicolor)
# and these colors correspond to colors, that are lacking uniquely at each vertex
c_a = c_1_multicolor - c
c_b = c_2_multicolor - c
# a list of tree consistent multicolors, that are used to split any given multicolor into a smallest set of
# T-consistent multicolors
guidance = tree.consistent_multicolors
if verbose:
print("\tfull multicolor:", [color.name for color in full_multicolor.multicolors.elements()],
file=verbose_destination)
print("\ts1 multicolor:", [color.name for color in surrounding1.multicolors.elements()],
file=verbose_destination)
print("\ts2 multicolor:", [color.name for color in surrounding2.multicolors.elements()],
file=verbose_destination)
print("\tc_1 multicolor:", [color.name for color in c_1_multicolor.multicolors.elements()],
file=verbose_destination)
print("\tc_2 multicolor:", [color.name for color in c_2_multicolor.multicolors.elements()],
file=verbose_destination)
print("\tc multicolor:", [color.name for color in c.multicolors.elements()],
file=verbose_destination)
print("\tc_a multicolor:", [color.name for color in c_a.multicolors.elements()],
file=verbose_destination)
print("\tc_b multicolor:", [color.name for color in c_b.multicolors.elements()],
file=verbose_destination)
# we compute summands for the "before" score for respective three edges
# we add lacking colors to the edges multicolors during this computation
###################################################
# we don't account for multiplicity in guidance, as in guidance we have each colors present exactly once,
# as it is when no information about whole genome duplication is available,
# but since there might be duplications in the multicolors on the edges, we would like to interpret
# those multicolors as multicolors, where each present colors has multiplicity 1
###################################################
ie1_score = len(Multicolor.split_colors(Multicolor(*iedge1.multicolor.colors) + c_a, guidance=guidance,
account_for_color_multiplicity_in_guidance=False))
ie2_score = len(Multicolor.split_colors(Multicolor(*iedge2.multicolor.colors) + c_b, guidance=guidance,
account_for_color_multiplicity_in_guidance=False))
s_multicolor = (Multicolor(*sedge.multicolor.colors))
ex_data["s_support"] = sorted(sorted([color.name for color in target_multicolor.intersect(s_multicolor).colors]))
if target_multicolor <= s_multicolor:
s_multicolor -= target_multicolor
se_score = len(Multicolor.split_colors(s_multicolor + c, guidance=guidance,
account_for_color_multiplicity_in_guidance=False))
before = ie1_score + ie2_score + se_score
if verbose:
print("score before: iedge1 score =", ie1_score,
"iedge2 score =", ie2_score,
"support edge score =", se_score,
"\"before\" score =", before,
file=verbose_destination)
# we compute summands for the "after" score for respective three edges, as if the assembly was performed
# we add lacking colors to the edges multicolors during this computation
###################################################
# we don't account for multiplicity in guidance, as in guidance we have each colors present exactly once,
# as it is when no information about whole genome duplication is available,
# but since there might be duplications in the multicolors on the edges, we would like to interpret
# those multicolors as multicolors, where each present colors has multiplicity 1
###################################################
new_ie1_score = len(Multicolor.split_colors(Multicolor(*iedge1.multicolor.colors) - target_multicolor + c_a, guidance=guidance,
account_for_color_multiplicity_in_guidance=False))
new_ie2_score = len(Multicolor.split_colors(Multicolor(*iedge2.multicolor.colors) - target_multicolor + c_b, guidance=guidance,
account_for_color_multiplicity_in_guidance=False))
new_se_score = len(Multicolor.split_colors(s_multicolor + target_multicolor + c, guidance=guidance,
account_for_color_multiplicity_in_guidance=False))
after = new_ie1_score + new_ie2_score + new_se_score
if verbose:
print("score after: iedge1 score =", new_ie1_score,
"iedge2 score =", new_ie2_score,
"support edge score =", new_se_score,
"\"after\" score =", after)
result.append(((v1, v2), before, after, ex_data))
# we return result as a list of tuples, where each tuple contains information about a pair of vertices
# (possible assembly point), and "before" and "after" score for this possible assembly point
return result
def get_irregular_subnets(cc, target_multicolor, exclude, verbose=False, verbose_destination=None):
"""
For a supplied connected component (which is assumed to be a deepcopy from an original breakpoint graph)
we filter out all edges, that are of no interest for the scaffolding purposes with current target multicolor:
1. target multicolor must be fully present in the irregular edge
2. all colors from the target multicolor must be present exactly once in irregular edge multicolor
3. no colors from the "exclude" set must be present in the irregular edge multicolor
4. regular edge must "support" two irregular edges, that have survived the filtration specified above
"""
to_remove = []
if verbose:
print(">Getting irregular subnets", file=verbose_destination)
print("Graph contains", len(list(cc.edges())), "edges", file=verbose_destination)
print("Removing uninteresting irregular edges", file=verbose_destination)
####################################################################################################
# we work with a supplied connected components,
# assuming that its a deepcopy of the connected components in the original graph
# so we can do with it whatever we want, without affecting data in the original graph
####################################################################################################
for bgedge, key in cc.edges(keys=True):
####################################################################################################
# infinity edges must fully contain target multicolor
####################################################################################################
if bgedge.is_irregular_edge and not target_multicolor <= bgedge.multicolor:
if verbose:
print(" removing", bgedge.vertex1.name, "--", bgedge.vertex2.name,
[color.name for color in bgedge.multicolor.multicolors.elements()], ": no target multicolor",
file=verbose_destination)
to_remove.append((bgedge, key))
continue
####################################################################################################
# infinity edges must not contain colors from the excluded group
####################################################################################################
if bgedge.is_irregular_edge and any(map(lambda color: color in bgedge.multicolor.colors, exclude)):
if verbose:
print(" removing", bgedge.vertex1.name, "--", bgedge.vertex2.name,
[color.name for color in bgedge.multicolor.multicolors.elements()],
": contains exclude multicolor", file=verbose_destination)
to_remove.append((bgedge, key))
continue
####################################################################################################
# in infinity edge multicolor all colors from targetted multicolor must have multiplicity one
####################################################################################################
if bgedge.is_irregular_edge and any(map(lambda color: bgedge.multicolor.multicolors[color] > 1, target_multicolor.colors)):
if verbose:
print(" removing", bgedge.vertex1.name, "--", bgedge.vertex2.name,
[color.name for color in bgedge.multicolor.multicolors.elements()],
": multicplicity of some target multicolor os greater than 1", file=verbose_destination)
to_remove.append((bgedge, key))
continue
####################################################################################################
##########################################################################################################
# once we've gathered all uninteresting / ambiguous (from target multicolor scaffolding stand point) irregular edges
# we remove them
##########################################################################################################
for bgedge, key in to_remove:
cc.delete_bgedge(bgedge, key)
if verbose:
print("Graph contains", len(list(cc.edges())), "edges", file=verbose_destination)
to_remove = []
################################################################################
# after we have left only those infinity edges are of interesting for the scaffolding purposses
# we filter regular edges to leave only those, that are supporting these infinity edges
################################################################################
if verbose:
print("Removing uninteresting regular edges", file=verbose_destination)
for bgedge, key in cc.edges(keys=True):
################################################################################
# we only need edges that are either infinity edges, that were left during the presious step
# or support edges (have infinity edges at both vertices, that they are incident to)
################################################################################
if not bgedge.is_irregular_edge and not supports_a_pair_of_irregular_edges(cc, bgedge):
if verbose:
print(" removing", bgedge.vertex1.name, "--", bgedge.vertex2.name,
[color.name for color in bgedge.multicolor.multicolors.elements()],
": does not support any irregular edge", file=verbose_destination)
to_remove.append((bgedge, key))
##########################################################################################################
# once we've gathered all regular edges, that do not support pairs of interesting (from scaffolding stand point)
# irregular edges
##########################################################################################################
for bgedge, key in to_remove:
cc.delete_bgedge(bgedge, key)
if verbose:
print("Graph contains", len(list(cc.edges())), "edges", file=verbose_destination)
##########################################################################################################
# once we've deleted all the uninteresting for scaffolding purposes infinity and regular edges,
# our connected component
# is being torn apart into multiple connected components, each of which we can process independently
# and which contain only edges of interest for us
##########################################################################################################
result = []
for cc in cc.connected_components_subgraphs(copy=False):
if len(list(cc.edges())) > 0:
result.append(cc)
return result
def identify_assembly_points(graph, bgtree, target_multicolor, exclude=None, verbose=False, verbose_destination=None):
"""
The main granular assembling function, that IDENTIFIES assembly points, but does not perform the assembly on its own
It DOES NOT change the supplied breakpoint graph in any way!!!
"""
if verbose:
print(">>Identifying assemblies for target multicolor:",
[e.name for e in target_multicolor.multicolors.elements()], file=verbose_destination)
guidance = bgtree.consistent_multicolors[:]
offset = len(Multicolor.split_colors(target_multicolor, guidance=guidance,
account_for_color_multiplicity_in_guidance=False)) - 1
threshold = 1 if offset == 0 else 2
assemblies = [] # the overall result
if exclude is None:
exclude = [] # a container with single colors of genomes, that are to be considered fully assembled
p_t_consistent_multicolors_in_target = Multicolor.split_colors(target_multicolor, guidance=guidance,
account_for_color_multiplicity_in_guidance=False)
t_consistent_multicolors_in_target = []
for tcmc in p_t_consistent_multicolors_in_target:
t_consistent_multicolors_in_target.append(sorted(color.name for color in tcmc.colors))
# we work with each connected component separately, as connected components usually preserve fragmentation points
################################################################################################
#
# its important that we iterate over connected components making each particular one a deepcopy of an
# underlying breakpoint graph connected component
#
################################################################################################
for i, cc in enumerate(graph.connected_components_subgraphs(copy=True)):
# we filter current connected component of uninteresting / ambiguous edges and retrieve a list of
# connected components that are left in the original connected components after filtration
irregular_subnets = get_irregular_subnets(cc, target_multicolor, exclude)
if len(irregular_subnets) > 0 and verbose:
print(">>Processing", str(i) + "th", "connected component", file=verbose_destination)
print("\tcontains", len(irregular_subnets), "subnet groups", file=verbose_destination)
# each subnet can be processed separately
for subnet in irregular_subnets:
supporting_edge_scores = get_support_edge_scores(graph, subnet, target_multicolor, bgtree)
# we create a new dummy graph for the purpose of computing maximum weight matching for support edges in it
new_graph = nx.Graph()
if verbose:
print("\tcontains", len(supporting_edge_scores), "possible assembly points", file=verbose_destination)
# we'll keep track of possible assembly points for future reference
support_edge_dict = {}
for (v1, v2), before, after, ex_data in supporting_edge_scores:
ex_data["tcmc"] = t_consistent_multicolors_in_target
##########################################################################################
#
# INSERT YOUR CODE ASSEMBLY SCORE THRESHOLD FILTRATION HERE IF NEED BE
#
##########################################################################################
if before - after - offset < threshold:
continue
##########################################################################################
#
# by default networkx assumes all edges, that have weight >= 0 are good
#
##########################################################################################
new_graph.add_edge(v1, v2, weight=before - after - offset)
support_edge_dict[(v1, v2)] = (before, after + offset, ex_data)
support_edge_dict[(v2, v1)] = (before, after + offset, ex_data)
maximal_matching = nx.max_weight_matching(new_graph)
if verbose:
print("\t", len(maximal_matching) // 2, "assembly points are identified", file=verbose_destination)
# as networkx provides a maximum matching in a form of adjacency list, every identified edge
# (pair of vertices) is present their twice (i.e. matching[v1]=v2 and matching[v2]=v1)
# we need to make sure we only add every edge only once
visited = set()
for v1, v2 in maximal_matching.items():
if v1 in visited or v2 in visited:
continue
visited.add(v1)
visited.add(v2)
assemblies.append((v1, v2, support_edge_dict[(v1, v2)]))
# we return the result as a list of assembly points that were identified for the targeted multicolor
# as a list of tuples (v1, v2, ("before", "after"))
# where v1 and v2 correspond to assembly point and "before" and "after" are used to compute the assembly score
return assemblies
def assemble_points(graph, assemblies, multicolor, verbose=False, verbose_destination=None):
"""
This function actually does assembling being provided
a graph, to play with
a list of assembly points
and a multicolor, which to assemble
"""
if verbose:
print(">>Assembling for multicolor", [e.name for e in multicolor.multicolors.elements()],
file=verbose_destination)
for assembly in assemblies:
v1, v2, (before, after, ex_data) = assembly
iv1 = get_irregular_vertex(get_irregular_edge_by_vertex(graph, vertex=v1))
iv2 = get_irregular_vertex(get_irregular_edge_by_vertex(graph, vertex=v2))
kbreak = KBreak(start_edges=[(v1, iv1), (v2, iv2)],
result_edges=[(v1, v2), (iv1, iv2)],
multicolor=multicolor)
if verbose:
print("(", v1.name, ",", iv1.name, ")x(", v2.name, ",", iv2.name, ")", " score=", before - after, sep="",
file=verbose_destination)
graph.apply_kbreak(kbreak=kbreak, merge=True)
def assemble_scaffolds(graph, bgtree, target_organisms, exclude=None, verbose=False, verbose_destination=None):
overall_assembling_result = []
# all genomes stacked up together
overall_target_multicolor = Multicolor(*target_organisms)
# all of them combined might not be a tree consistent set, so we separate it into smallest number
# of tree consistent chunks
tree_consistent_target_multicolors = Multicolor.split_colors(overall_target_multicolor,
guidance=bgtree.consistent_multicolors,
account_for_color_multiplicity_in_guidance=False)
if verbose:
print("Supplied set of targeted for scaffolding genomes has been split into",
len(tree_consistent_target_multicolors), "T-consistent sets:", file=verbose_destination)
for multicolor in tree_consistent_target_multicolors:
print("\t", [color.name for color in multicolor.multicolors.elements()], file=verbose_destination)
print("Expanding target multicolors to include all T-consistent subcolors")
# now we need to expand that list into a larger list to include every possible tree consistent sub-color,
# of whatever is already in the list
#
# we will change it as we go, so better iterate over a copy
for target_multicolor in tree_consistent_target_multicolors[:]:
for tree_c_multicolor in deepcopy(bgtree.consistent_multicolors):
if tree_c_multicolor <= target_multicolor \
and tree_c_multicolor not in tree_consistent_target_multicolors \
and len(tree_c_multicolor.colors) > 0:
tree_consistent_target_multicolors.append(tree_c_multicolor)
tree_consistent_target_multicolors = sorted(tree_consistent_target_multicolors,
key=lambda mc: len(mc.hashable_representation),
reverse=True)
all_target_multicolors = tree_consistent_target_multicolors[:]
for i in range(2, len(tree_consistent_target_multicolors) + 1):
for comb in itertools.combinations(tree_consistent_target_multicolors[:], i):
comb = list(comb)
for mc1, mc2 in itertools.combinations(comb, 2):
if len(mc1.intersect(mc2).colors) > 0:
break
else:
new_mc = Multicolor()
for mc in comb:
new_mc += mc
all_target_multicolors.append(new_mc)
hashed_vertex_tree_consistent_multicolors = {mc.hashable_representation for mc in all_target_multicolors}
all_target_multicolors = [Multicolor(*hashed_multicolor) for hashed_multicolor in
hashed_vertex_tree_consistent_multicolors]
all_target_multicolors = sorted(all_target_multicolors,
key=lambda mc: len(mc.hashable_representation),
reverse=True)
if verbose:
print("Determined full list of targeted for scaffolding multicolors of length",
len(all_target_multicolors), file=verbose_destination)
for multicolor in all_target_multicolors:
print("\t", [color.name for color in multicolor.multicolors.elements()], file=verbose_destination)
for i, multicolor in enumerate(all_target_multicolors):
print("working with multicolor", i)
assembly_points = identify_assembly_points(graph, bgtree, target_multicolor=multicolor, exclude=exclude,
verbose_destination=verbose_destination)
for v1, v2, (before, after, ex_data) in assembly_points:
overall_assembling_result.append((v1, v2, (before, after, ex_data), multicolor))
assemble_points(graph, assemblies=assembly_points, multicolor=multicolor,
verbose_destination=verbose_destination)
return overall_assembling_result
################################################################################################################
#
# END OF main scaffolding algorithm functions
#
################################################################################################################
################################################################################################################
#
# START OF experiment set up (data)
#
################################################################################################################
DATA_SOURCE_ROOT_DIR = "/volumes/data/projects/2015"
DATA_SOURCE_REL_DIR = "plants/data"
DATA_TEST_CASE = "grimm"
FULL_SOURCE_DIR = os.path.join(DATA_SOURCE_ROOT_DIR, DATA_SOURCE_REL_DIR, DATA_TEST_CASE)
SOURCE_GRIMM_FILES = [os.path.join(FULL_SOURCE_DIR, file) for file in os.listdir(FULL_SOURCE_DIR) if file.endswith(".grimm")]
GRIMM_FILES = SOURCE_GRIMM_FILES
F_NEWICK_STRING_TREE = "((((((fugu,Stickleback),Medaka),Tetraodon),zebra_fish),Coelacanth),Anguilla_japonica);"
NEWICK_STRING_TREE = "(vvi,(ptr,(egr,(cpa,(tpa,(cru,(ath,aly)))))));"
F_TARGET_ORGANISM_NAMES = ["Anguilla_japonica", "Coelacanth", "fugu", "Medaka"]
TARGET_ORGANISM_NAMES = ["aly", "tpa"]
COMPLETE_ORGANISM_NAMES = ["vvi", "ath"]
F_COMPLETE_ORGANISM_NAMES = ["Stickleback", "Tetraodon"]
################################################################################################################
#
# END OF experiment set up (data)
#
################################################################################################################
if __name__ == "__main__":
print("Reading data into breakpoint graph...")
graph = BreakpointGraph()
for file in GRIMM_FILES:
with open(file, "rt") as source:
graph.update(GRIMMReader.get_breakpoint_graph(source), merge_edges=True)
print("Getting a tree...")
bgtree = NewickReader.from_string(NEWICK_STRING_TREE)
print("Preparing organisms for assembling...")
target_organisms = [BGGenome(organism) for organism in TARGET_ORGANISM_NAMES]
exclude = [BGGenome(organism) for organism in COMPLETE_ORGANISM_NAMES]
print("Staring the assembly process...")
result = assemble_scaffolds(graph=graph, bgtree=bgtree, target_organisms=target_organisms, exclude=exclude,
verbose=True)
print("Finished assembling!")
print("Were identified", sum([len(multicolor.colors) for _, _, (_, _, ex_data), multicolor in result]), "assembly points")
with open("assemblies.txt", "wt") as destination:
for v1, v2, (before, after, ex_data), multicolor in result:
for color in multicolor.colors:
print(color.name, before - after, v1.name, v2.name, ex_data["s_support"], ex_data["tcmc"])
|
{"hexsha": "0816aacbfcc5dbf846f23a48de2be9831b705330", "size": 31507, "ext": "py", "lang": "Python", "max_stars_repo_path": "gos/tmp/scaffolding_no_repeats.py", "max_stars_repo_name": "sergey-aganezov-jr/gos", "max_stars_repo_head_hexsha": "fb4d210284f3037c5321250cb95f3901754feb6b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gos/tmp/scaffolding_no_repeats.py", "max_issues_repo_name": "sergey-aganezov-jr/gos", "max_issues_repo_head_hexsha": "fb4d210284f3037c5321250cb95f3901754feb6b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gos/tmp/scaffolding_no_repeats.py", "max_forks_repo_name": "sergey-aganezov-jr/gos", "max_forks_repo_head_hexsha": "fb4d210284f3037c5321250cb95f3901754feb6b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.9746835443, "max_line_length": 135, "alphanum_fraction": 0.5845050306, "include": true, "reason": "import networkx", "num_tokens": 6049}
|
#ifndef N_BODY_RANDOM_BODY_HPP
#define N_BODY_RANDOM_BODY_HPP
#include "communication.hpp"
#include "data.hpp"
#include "logging.hpp"
#include <boost/mpi/collectives.hpp>
#include <cstddef>
#include <functional>
namespace n_body::random::body {
template <typename T, std::size_t Dimension>
using BodyGenerator = std::function<data::Body<T, Dimension>(std::size_t)>;
template <typename T, std::size_t Dimension>
data::Body<T, Dimension> random_body(BodyGenerator<T, Dimension> &generator) {
return generator(0);
}
template <typename T, std::size_t Dimension>
void random_bodies(const boost::mpi::communicator &comm,
BodyGenerator<T, Dimension> &generator,
data::Bodies<T, Dimension> &bodies, std::size_t number) {
communication::Division division(comm, number);
data::Bodies<T, Dimension> local_bodies;
// generate all data in local bodies
for (std::size_t i = 0; i < division.count; ++i) {
local_bodies.push_back(generator(i + division.begin));
}
logging::logger(logging::Level::Debug)
<< "random_bodies() main task done, about to gather" << std::endl;
// send and receive all data
logging::logger(logging::Level::Debug) << "gathering masses" << std::endl;
boost::mpi::all_gather(comm, &local_bodies[0], division.count, bodies);
}
} // namespace n_body::random::body
#endif
|
{"hexsha": "226bc757240335ebc536caf1f341a85383f88d57", "size": 1352, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/random_body.hpp", "max_stars_repo_name": "linyinfeng/n-body", "max_stars_repo_head_hexsha": "e40c859689d76a3f36cd08e072d7ee24685e8be4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-11-28T15:13:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-28T15:13:06.000Z", "max_issues_repo_path": "src/random_body.hpp", "max_issues_repo_name": "linyinfeng/n-body", "max_issues_repo_head_hexsha": "e40c859689d76a3f36cd08e072d7ee24685e8be4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/random_body.hpp", "max_forks_repo_name": "linyinfeng/n-body", "max_forks_repo_head_hexsha": "e40c859689d76a3f36cd08e072d7ee24685e8be4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2019-11-10T14:01:55.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-10T14:01:55.000Z", "avg_line_length": 30.7272727273, "max_line_length": 78, "alphanum_fraction": 0.7071005917, "num_tokens": 340}
|
[STATEMENT]
lemma conemem_expansion_estimate:
fixes u v u' v'::"'a::euclidean_space"
assumes "t \<in> {0 .. pi / 2}"
assumes angle_pos: "0 < vangle u v" "vangle u v < pi / 2"
assumes angle_le: "(vangle u' v') \<le> (vangle u v)"
assumes "norm u = 1" "norm v = 1"
shows "norm (conemem u' v' t) \<ge> min (norm u') (norm v') * norm (conemem u v t)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
define e_pre where "e_pre = min (norm u') (norm v')"
[PROOF STATE]
proof (state)
this:
e_pre = min (norm u') (norm v')
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
let ?w = "conemem u v"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
let ?w' = "conemem u' v'"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
have cos_angle_le: "cos (vangle u' v') \<ge> cos (vangle u v)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cos (vangle u v) \<le> cos (vangle u' v')
[PROOF STEP]
using angle_pos vangle_bounds
[PROOF STATE]
proof (prove)
using this:
0 < vangle u v
vangle u v < pi / 2
0 \<le> vangle ?u ?v
vangle ?u ?v \<le> pi
goal (1 subgoal):
1. cos (vangle u v) \<le> cos (vangle u' v')
[PROOF STEP]
by (auto intro!: cos_monotone_0_pi_le angle_le)
[PROOF STATE]
proof (state)
this:
cos (vangle u v) \<le> cos (vangle u' v')
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
have e_pre_le: "e_pre\<^sup>2 \<le> norm u' * norm v'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. e_pre\<^sup>2 \<le> norm u' * norm v'
[PROOF STEP]
by (auto simp: e_pre_def min_def power2_eq_square intro: mult_left_mono mult_right_mono)
[PROOF STATE]
proof (state)
this:
e_pre\<^sup>2 \<le> norm u' * norm v'
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
have lt: "0 < 1 + 2 * (u \<bullet> v) * sin t * cos t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < 1 + 2 * (u \<bullet> v) * sin t * cos t
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. 0 < 1 + 2 * (u \<bullet> v) * sin t * cos t
[PROOF STEP]
have "\<bar>u \<bullet> v\<bar> < norm u * norm v"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<bar>u \<bullet> v\<bar> < norm u * norm v
[PROOF STEP]
apply (rule Cauchy_Schwarz_strict_ineq2)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. v \<noteq> (0::'a)
2. \<And>k. u \<noteq> k *\<^sub>R v
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
t \<in> {0..pi / 2}
0 < vangle u v
vangle u v < pi / 2
vangle u' v' \<le> vangle u v
norm u = 1
norm v = 1
goal (2 subgoals):
1. v \<noteq> (0::'a)
2. \<And>k. u \<noteq> k *\<^sub>R v
[PROOF STEP]
apply auto
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>k. \<lbrakk>0 < vangle (k *\<^sub>R v) v; vangle (k *\<^sub>R v) v * 2 < pi; vangle u' v' \<le> vangle (k *\<^sub>R v) v; \<bar>k\<bar> = 1; norm v = 1; 0 \<le> t; t * 2 \<le> pi; u = k *\<^sub>R v\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
apply (subst (asm) vangle_scaleR_self)+
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>k. \<lbrakk>0 < (if k = 0 \<or> v = (0::'a) then pi / 2 else if 0 < k then 0 else pi); (if k = 0 \<or> v = (0::'a) then pi / 2 else if 0 < k then 0 else pi) * 2 < pi; vangle u' v' \<le> (if k = 0 \<or> v = (0::'a) then pi / 2 else if 0 < k then 0 else pi); \<bar>k\<bar> = 1; norm v = 1; 0 \<le> t; t * 2 \<le> pi; u = k *\<^sub>R v\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
by (auto simp: split: if_splits)
[PROOF STATE]
proof (state)
this:
\<bar>u \<bullet> v\<bar> < norm u * norm v
goal (1 subgoal):
1. 0 < 1 + 2 * (u \<bullet> v) * sin t * cos t
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<bar>u \<bullet> v\<bar> < norm u * norm v
[PROOF STEP]
have "abs (u \<bullet> v * sin (2 * t)) < 1"
[PROOF STATE]
proof (prove)
using this:
\<bar>u \<bullet> v\<bar> < norm u * norm v
goal (1 subgoal):
1. \<bar>u \<bullet> v * sin (2 * t)\<bar> < 1
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
\<bar>u \<bullet> v\<bar> < norm u * norm v
t \<in> {0..pi / 2}
0 < vangle u v
vangle u v < pi / 2
vangle u' v' \<le> vangle u v
norm u = 1
norm v = 1
goal (1 subgoal):
1. \<bar>u \<bullet> v * sin (2 * t)\<bar> < 1
[PROOF STEP]
apply (auto simp add: abs_mult)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<bar>u \<bullet> v\<bar> < 1; 0 < vangle u v; vangle u v * 2 < pi; vangle u' v' \<le> vangle u v; norm u = 1; norm v = 1; 0 \<le> t; t * 2 \<le> pi\<rbrakk> \<Longrightarrow> \<bar>u \<bullet> v\<bar> * \<bar>sin (2 * t)\<bar> < 1
[PROOF STEP]
apply (subst mult.commute)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<bar>u \<bullet> v\<bar> < 1; 0 < vangle u v; vangle u v * 2 < pi; vangle u' v' \<le> vangle u v; norm u = 1; norm v = 1; 0 \<le> t; t * 2 \<le> pi\<rbrakk> \<Longrightarrow> \<bar>sin (2 * t)\<bar> * \<bar>u \<bullet> v\<bar> < 1
[PROOF STEP]
apply (rule less_one_multI)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>\<bar>u \<bullet> v\<bar> < 1; 0 < vangle u v; vangle u v * 2 < pi; vangle u' v' \<le> vangle u v; norm u = 1; norm v = 1; 0 \<le> t; t * 2 \<le> pi\<rbrakk> \<Longrightarrow> \<bar>sin (2 * t)\<bar> \<le> 1
2. \<lbrakk>\<bar>u \<bullet> v\<bar> < 1; 0 < vangle u v; vangle u v * 2 < pi; vangle u' v' \<le> vangle u v; norm u = 1; norm v = 1; 0 \<le> t; t * 2 \<le> pi\<rbrakk> \<Longrightarrow> 0 < \<bar>u \<bullet> v\<bar>
3. \<lbrakk>\<bar>u \<bullet> v\<bar> < 1; 0 < vangle u v; vangle u v * 2 < pi; vangle u' v' \<le> vangle u v; norm u = 1; norm v = 1; 0 \<le> t; t * 2 \<le> pi\<rbrakk> \<Longrightarrow> \<bar>u \<bullet> v\<bar> < 1
[PROOF STEP]
apply (auto simp add: abs_mult inner_eq_vangle )
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>0 < vangle u v; vangle u v * 2 < pi; vangle u' v' \<le> vangle u v; norm u = 1; norm v = 1; 0 \<le> t; t * 2 \<le> pi; cos (vangle u v) = 0\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
by (auto simp: cos_vangle_eq_zero_iff_vangle dest!: ortho_imp_angle_pi_half)
[PROOF STATE]
proof (state)
this:
\<bar>u \<bullet> v * sin (2 * t)\<bar> < 1
goal (1 subgoal):
1. 0 < 1 + 2 * (u \<bullet> v) * sin t * cos t
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<bar>u \<bullet> v * sin (2 * t)\<bar> < 1
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<bar>u \<bullet> v * sin (2 * t)\<bar> < 1
goal (1 subgoal):
1. 0 < 1 + 2 * (u \<bullet> v) * sin t * cos t
[PROOF STEP]
by (subst mult.assoc sin_times_cos)+ auto
[PROOF STATE]
proof (state)
this:
0 < 1 + 2 * (u \<bullet> v) * sin t * cos t
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
0 < 1 + 2 * (u \<bullet> v) * sin t * cos t
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
have le: "0 \<le> 1 + 2 * (u \<bullet> v) * sin t * cos t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 \<le> 1 + 2 * (u \<bullet> v) * sin t * cos t
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. 0 \<le> 1 + 2 * (u \<bullet> v) * sin t * cos t
[PROOF STEP]
have "\<bar>u \<bullet> v\<bar> \<le> norm u * norm v"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<bar>u \<bullet> v\<bar> \<le> norm u * norm v
[PROOF STEP]
by (rule Cauchy_Schwarz_ineq2)
[PROOF STATE]
proof (state)
this:
\<bar>u \<bullet> v\<bar> \<le> norm u * norm v
goal (1 subgoal):
1. 0 \<le> 1 + 2 * (u \<bullet> v) * sin t * cos t
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<bar>u \<bullet> v\<bar> \<le> norm u * norm v
[PROOF STEP]
have "abs (u \<bullet> v * sin (2 * t)) \<le> 1"
[PROOF STATE]
proof (prove)
using this:
\<bar>u \<bullet> v\<bar> \<le> norm u * norm v
goal (1 subgoal):
1. \<bar>u \<bullet> v * sin (2 * t)\<bar> \<le> 1
[PROOF STEP]
by (auto simp add: abs_mult assms intro!: mult_le_one)
[PROOF STATE]
proof (state)
this:
\<bar>u \<bullet> v * sin (2 * t)\<bar> \<le> 1
goal (1 subgoal):
1. 0 \<le> 1 + 2 * (u \<bullet> v) * sin t * cos t
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<bar>u \<bullet> v * sin (2 * t)\<bar> \<le> 1
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<bar>u \<bullet> v * sin (2 * t)\<bar> \<le> 1
goal (1 subgoal):
1. 0 \<le> 1 + 2 * (u \<bullet> v) * sin t * cos t
[PROOF STEP]
by (subst mult.assoc sin_times_cos)+ auto
[PROOF STATE]
proof (state)
this:
0 \<le> 1 + 2 * (u \<bullet> v) * sin t * cos t
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
0 \<le> 1 + 2 * (u \<bullet> v) * sin t * cos t
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
have "(norm (?w t))\<^sup>2 = (cos t)\<^sup>2 *\<^sub>R (norm u)\<^sup>2 + (sin t)\<^sup>2 *\<^sub>R (norm v)\<^sup>2 + 2 * (u \<bullet> v) * sin t * cos t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (norm (conemem u v t))\<^sup>2 = (cos t)\<^sup>2 *\<^sub>R (norm u)\<^sup>2 + (sin t)\<^sup>2 *\<^sub>R (norm v)\<^sup>2 + 2 * (u \<bullet> v) * sin t * cos t
[PROOF STEP]
by (auto simp: conemem_def algebra_simps power2_norm_eq_inner)
(auto simp: power2_eq_square inner_commute)
[PROOF STATE]
proof (state)
this:
(norm (conemem u v t))\<^sup>2 = (cos t)\<^sup>2 *\<^sub>R (norm u)\<^sup>2 + (sin t)\<^sup>2 *\<^sub>R (norm v)\<^sup>2 + 2 * (u \<bullet> v) * sin t * cos t
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(norm (conemem u v t))\<^sup>2 = (cos t)\<^sup>2 *\<^sub>R (norm u)\<^sup>2 + (sin t)\<^sup>2 *\<^sub>R (norm v)\<^sup>2 + 2 * (u \<bullet> v) * sin t * cos t
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
have "\<dots> = 1 + 2 * (u \<bullet> v) * sin t * cos t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (cos t)\<^sup>2 *\<^sub>R (norm u)\<^sup>2 + (sin t)\<^sup>2 *\<^sub>R (norm v)\<^sup>2 + 2 * (u \<bullet> v) * sin t * cos t = 1 + 2 * (u \<bullet> v) * sin t * cos t
[PROOF STEP]
by (auto simp: sin_squared_eq algebra_simps assms)
[PROOF STATE]
proof (state)
this:
(cos t)\<^sup>2 *\<^sub>R (norm u)\<^sup>2 + (sin t)\<^sup>2 *\<^sub>R (norm v)\<^sup>2 + 2 * (u \<bullet> v) * sin t * cos t = 1 + 2 * (u \<bullet> v) * sin t * cos t
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
(norm (conemem u v t))\<^sup>2 = 1 + 2 * (u \<bullet> v) * sin t * cos t
[PROOF STEP]
have "(norm (conemem u v t))\<^sup>2 = 1 + 2 * (u \<bullet> v) * sin t * cos t"
[PROOF STATE]
proof (prove)
using this:
(norm (conemem u v t))\<^sup>2 = 1 + 2 * (u \<bullet> v) * sin t * cos t
goal (1 subgoal):
1. (norm (conemem u v t))\<^sup>2 = 1 + 2 * (u \<bullet> v) * sin t * cos t
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(norm (conemem u v t))\<^sup>2 = 1 + 2 * (u \<bullet> v) * sin t * cos t
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
(norm (conemem u v t))\<^sup>2 = 1 + 2 * (u \<bullet> v) * sin t * cos t
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
have "(norm (?w' t))\<^sup>2 = (cos t)\<^sup>2 *\<^sub>R (norm u')\<^sup>2 + (sin t)\<^sup>2 *\<^sub>R (norm v')\<^sup>2 + 2 * (u' \<bullet> v') * sin t * cos t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (norm (conemem u' v' t))\<^sup>2 = (cos t)\<^sup>2 *\<^sub>R (norm u')\<^sup>2 + (sin t)\<^sup>2 *\<^sub>R (norm v')\<^sup>2 + 2 * (u' \<bullet> v') * sin t * cos t
[PROOF STEP]
by (auto simp: conemem_def algebra_simps power2_norm_eq_inner)
(auto simp: power2_eq_square inner_commute)
[PROOF STATE]
proof (state)
this:
(norm (conemem u' v' t))\<^sup>2 = (cos t)\<^sup>2 *\<^sub>R (norm u')\<^sup>2 + (sin t)\<^sup>2 *\<^sub>R (norm v')\<^sup>2 + 2 * (u' \<bullet> v') * sin t * cos t
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
(norm (conemem u v t))\<^sup>2 = 1 + 2 * (u \<bullet> v) * sin t * cos t
(norm (conemem u' v' t))\<^sup>2 = (cos t)\<^sup>2 *\<^sub>R (norm u')\<^sup>2 + (sin t)\<^sup>2 *\<^sub>R (norm v')\<^sup>2 + 2 * (u' \<bullet> v') * sin t * cos t
[PROOF STEP]
have "(norm (?w' t) / norm (?w t))\<^sup>2 =
((cos t)\<^sup>2 *\<^sub>R (norm u')\<^sup>2 + (sin t)\<^sup>2 *\<^sub>R (norm v')\<^sup>2 + 2 * (u' \<bullet> v') * sin t * cos t) /
(1 + 2 * (u \<bullet> v) * sin t * cos t)"
(is "_ = (?a + ?b) / ?c")
[PROOF STATE]
proof (prove)
using this:
(norm (conemem u v t))\<^sup>2 = 1 + 2 * (u \<bullet> v) * sin t * cos t
(norm (conemem u' v' t))\<^sup>2 = (cos t)\<^sup>2 *\<^sub>R (norm u')\<^sup>2 + (sin t)\<^sup>2 *\<^sub>R (norm v')\<^sup>2 + 2 * (u' \<bullet> v') * sin t * cos t
goal (1 subgoal):
1. (norm (conemem u' v' t) / norm (conemem u v t))\<^sup>2 = ((cos t)\<^sup>2 *\<^sub>R (norm u')\<^sup>2 + (sin t)\<^sup>2 *\<^sub>R (norm v')\<^sup>2 + 2 * (u' \<bullet> v') * sin t * cos t) / (1 + 2 * (u \<bullet> v) * sin t * cos t)
[PROOF STEP]
by (auto simp: divide_inverse power_mult_distrib) (auto simp: inverse_eq_divide power2_eq_square)
[PROOF STATE]
proof (state)
this:
(norm (conemem u' v' t) / norm (conemem u v t))\<^sup>2 = ((cos t)\<^sup>2 *\<^sub>R (norm u')\<^sup>2 + (sin t)\<^sup>2 *\<^sub>R (norm v')\<^sup>2 + 2 * (u' \<bullet> v') * sin t * cos t) / (1 + 2 * (u \<bullet> v) * sin t * cos t)
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(norm (conemem u' v' t) / norm (conemem u v t))\<^sup>2 = ((cos t)\<^sup>2 *\<^sub>R (norm u')\<^sup>2 + (sin t)\<^sup>2 *\<^sub>R (norm v')\<^sup>2 + 2 * (u' \<bullet> v') * sin t * cos t) / (1 + 2 * (u \<bullet> v) * sin t * cos t)
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
have "\<dots> \<ge> (e_pre\<^sup>2 + ?b) / ?c"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (e_pre\<^sup>2 + 2 * (u' \<bullet> v') * sin t * cos t) / (1 + 2 * (u \<bullet> v) * sin t * cos t) \<le> ((cos t)\<^sup>2 *\<^sub>R (norm u')\<^sup>2 + (sin t)\<^sup>2 *\<^sub>R (norm v')\<^sup>2 + 2 * (u' \<bullet> v') * sin t * cos t) / (1 + 2 * (u \<bullet> v) * sin t * cos t)
[PROOF STEP]
apply (rule divide_right_mono)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. e_pre\<^sup>2 + 2 * (u' \<bullet> v') * sin t * cos t \<le> (cos t)\<^sup>2 *\<^sub>R (norm u')\<^sup>2 + (sin t)\<^sup>2 *\<^sub>R (norm v')\<^sup>2 + 2 * (u' \<bullet> v') * sin t * cos t
2. 0 \<le> 1 + 2 * (u \<bullet> v) * sin t * cos t
[PROOF STEP]
apply (rule add_right_mono)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. e_pre\<^sup>2 \<le> (cos t)\<^sup>2 *\<^sub>R (norm u')\<^sup>2 + (sin t)\<^sup>2 *\<^sub>R (norm v')\<^sup>2
2. 0 \<le> 1 + 2 * (u \<bullet> v) * sin t * cos t
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. e_pre\<^sup>2 \<le> (cos t)\<^sup>2 *\<^sub>R (norm u')\<^sup>2 + (sin t)\<^sup>2 *\<^sub>R (norm v')\<^sup>2
[PROOF STEP]
using assms e_pre_def
[PROOF STATE]
proof (prove)
using this:
t \<in> {0..pi / 2}
0 < vangle u v
vangle u v < pi / 2
vangle u' v' \<le> vangle u v
norm u = 1
norm v = 1
e_pre = min (norm u') (norm v')
goal (1 subgoal):
1. e_pre\<^sup>2 \<le> (cos t)\<^sup>2 *\<^sub>R (norm u')\<^sup>2 + (sin t)\<^sup>2 *\<^sub>R (norm v')\<^sup>2
[PROOF STEP]
apply (auto simp: min_def)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>0 < vangle u v; vangle u v * 2 < pi; vangle u' v' \<le> vangle u v; norm u = 1; norm v = 1; e_pre = norm u'; 0 \<le> t; t * 2 \<le> pi; norm u' \<le> norm v'\<rbrakk> \<Longrightarrow> (norm u')\<^sup>2 \<le> (cos t)\<^sup>2 * (norm u')\<^sup>2 + (sin t)\<^sup>2 * (norm v')\<^sup>2
2. \<lbrakk>0 < vangle u v; vangle u v * 2 < pi; vangle u' v' \<le> vangle u v; norm u = 1; norm v = 1; e_pre = norm v'; 0 \<le> t; t * 2 \<le> pi; \<not> norm u' \<le> norm v'\<rbrakk> \<Longrightarrow> (norm v')\<^sup>2 \<le> (cos t)\<^sup>2 * (norm u')\<^sup>2 + (sin t)\<^sup>2 * (norm v')\<^sup>2
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>0 < vangle u v; vangle u v * 2 < pi; vangle u' v' \<le> vangle u v; norm u = 1; norm v = 1; e_pre = norm u'; 0 \<le> t; t * 2 \<le> pi; norm u' \<le> norm v'\<rbrakk> \<Longrightarrow> (norm u')\<^sup>2 \<le> (cos t)\<^sup>2 * (norm u')\<^sup>2 + (sin t)\<^sup>2 * (norm v')\<^sup>2
[PROOF STEP]
by (auto simp: algebra_simps cos_squared_eq intro!: mult_right_mono power_mono)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>0 < vangle u v; vangle u v * 2 < pi; vangle u' v' \<le> vangle u v; norm u = 1; norm v = 1; e_pre = norm v'; 0 \<le> t; t * 2 \<le> pi; \<not> norm u' \<le> norm v'\<rbrakk> \<Longrightarrow> (norm v')\<^sup>2 \<le> (cos t)\<^sup>2 * (norm u')\<^sup>2 + (sin t)\<^sup>2 * (norm v')\<^sup>2
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>0 < vangle u v; vangle u v * 2 < pi; vangle u' v' \<le> vangle u v; norm u = 1; norm v = 1; e_pre = norm v'; 0 \<le> t; t * 2 \<le> pi; \<not> norm u' \<le> norm v'\<rbrakk> \<Longrightarrow> (norm v')\<^sup>2 \<le> (cos t)\<^sup>2 * (norm u')\<^sup>2 + (sin t)\<^sup>2 * (norm v')\<^sup>2
[PROOF STEP]
by (auto simp: algebra_simps sin_squared_eq intro!: mult_right_mono power_mono)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 \<le> 1 + 2 * (u \<bullet> v) * sin t * cos t
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 \<le> 1 + 2 * (u \<bullet> v) * sin t * cos t
[PROOF STEP]
by (rule le)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
(e_pre\<^sup>2 + 2 * (u' \<bullet> v') * sin t * cos t) / (1 + 2 * (u \<bullet> v) * sin t * cos t) \<le> ((cos t)\<^sup>2 *\<^sub>R (norm u')\<^sup>2 + (sin t)\<^sup>2 *\<^sub>R (norm v')\<^sup>2 + 2 * (u' \<bullet> v') * sin t * cos t) / (1 + 2 * (u \<bullet> v) * sin t * cos t)
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
also (xtrans)
[PROOF STATE]
proof (state)
this:
(e_pre\<^sup>2 + 2 * (u' \<bullet> v') * sin t * cos t) / (1 + 2 * (u \<bullet> v) * sin t * cos t) \<le> ((cos t)\<^sup>2 *\<^sub>R (norm u')\<^sup>2 + (sin t)\<^sup>2 *\<^sub>R (norm v')\<^sup>2 + 2 * (u' \<bullet> v') * sin t * cos t) / (1 + 2 * (u \<bullet> v) * sin t * cos t)
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
have inner_nonneg: "u' \<bullet> v' \<ge> 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 \<le> u' \<bullet> v'
[PROOF STEP]
using angle_le(1) angle_pos vangle_bounds[of u' v']
[PROOF STATE]
proof (prove)
using this:
vangle u' v' \<le> vangle u v
0 < vangle u v
vangle u v < pi / 2
0 \<le> vangle u' v'
vangle u' v' \<le> pi
goal (1 subgoal):
1. 0 \<le> u' \<bullet> v'
[PROOF STEP]
by (auto simp: inner_eq_vangle intro!: mult_nonneg_nonneg cos_ge_zero)
[PROOF STATE]
proof (state)
this:
0 \<le> u' \<bullet> v'
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
from vangle_bounds[of u' v'] vangle_le_pi2[OF this]
[PROOF STATE]
proof (chain)
picking this:
0 \<le> vangle u' v'
vangle u' v' \<le> pi
vangle u' v' \<le> pi / 2
[PROOF STEP]
have u'v'e_pre: "u' \<bullet> v' \<ge> cos (vangle u' v') * e_pre\<^sup>2"
[PROOF STATE]
proof (prove)
using this:
0 \<le> vangle u' v'
vangle u' v' \<le> pi
vangle u' v' \<le> pi / 2
goal (1 subgoal):
1. cos (vangle u' v') * e_pre\<^sup>2 \<le> u' \<bullet> v'
[PROOF STEP]
apply (subst inner_eq_vangle)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>0 \<le> vangle u' v'; vangle u' v' \<le> pi; vangle u' v' \<le> pi / 2\<rbrakk> \<Longrightarrow> cos (vangle u' v') * e_pre\<^sup>2 \<le> cos (vangle u' v') * (norm u' * norm v')
[PROOF STEP]
apply (rule mult_left_mono)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>0 \<le> vangle u' v'; vangle u' v' \<le> pi; vangle u' v' \<le> pi / 2\<rbrakk> \<Longrightarrow> e_pre\<^sup>2 \<le> norm u' * norm v'
2. \<lbrakk>0 \<le> vangle u' v'; vangle u' v' \<le> pi; vangle u' v' \<le> pi / 2\<rbrakk> \<Longrightarrow> 0 \<le> cos (vangle u' v')
[PROOF STEP]
apply (rule e_pre_le)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>0 \<le> vangle u' v'; vangle u' v' \<le> pi; vangle u' v' \<le> pi / 2\<rbrakk> \<Longrightarrow> 0 \<le> cos (vangle u' v')
[PROOF STEP]
apply (rule cos_ge_zero)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>0 \<le> vangle u' v'; vangle u' v' \<le> pi; vangle u' v' \<le> pi / 2\<rbrakk> \<Longrightarrow> - (pi / 2) \<le> vangle u' v'
2. \<lbrakk>0 \<le> vangle u' v'; vangle u' v' \<le> pi; vangle u' v' \<le> pi / 2\<rbrakk> \<Longrightarrow> vangle u' v' \<le> pi / 2
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
cos (vangle u' v') * e_pre\<^sup>2 \<le> u' \<bullet> v'
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
have "(e_pre\<^sup>2 + ?b) / ?c \<ge> (e_pre\<^sup>2 + 2 * (cos (vangle u' v') * e_pre\<^sup>2) * sin t * cos t) / ?c"
(is "_ \<ge> ?ddd")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (e_pre\<^sup>2 + 2 * (cos (vangle u' v') * e_pre\<^sup>2) * sin t * cos t) / (1 + 2 * (u \<bullet> v) * sin t * cos t) \<le> (e_pre\<^sup>2 + 2 * (u' \<bullet> v') * sin t * cos t) / (1 + 2 * (u \<bullet> v) * sin t * cos t)
[PROOF STEP]
apply (intro divide_right_mono add_left_mono mult_right_mono mult_left_mono u'v'e_pre)
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. 0 \<le> 2
2. 0 \<le> sin t
3. 0 \<le> cos t
4. 0 \<le> 1 + 2 * (u \<bullet> v) * sin t * cos t
[PROOF STEP]
using \<open>t \<in> _\<close>
[PROOF STATE]
proof (prove)
using this:
t \<in> {0..pi / 2}
goal (4 subgoals):
1. 0 \<le> 2
2. 0 \<le> sin t
3. 0 \<le> cos t
4. 0 \<le> 1 + 2 * (u \<bullet> v) * sin t * cos t
[PROOF STEP]
by (auto intro!: mult_right_mono sin_ge_zero divide_right_mono le cos_ge_zero
simp: sin_times_cos u'v'e_pre)
[PROOF STATE]
proof (state)
this:
(e_pre\<^sup>2 + 2 * (cos (vangle u' v') * e_pre\<^sup>2) * sin t * cos t) / (1 + 2 * (u \<bullet> v) * sin t * cos t) \<le> (e_pre\<^sup>2 + 2 * (u' \<bullet> v') * sin t * cos t) / (1 + 2 * (u \<bullet> v) * sin t * cos t)
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
also (xtrans)
[PROOF STATE]
proof (state)
this:
(e_pre\<^sup>2 + 2 * (cos (vangle u' v') * e_pre\<^sup>2) * sin t * cos t) / (1 + 2 * (u \<bullet> v) * sin t * cos t) \<le> (e_pre\<^sup>2 + 2 * (u' \<bullet> v') * sin t * cos t) / (1 + 2 * (u \<bullet> v) * sin t * cos t)
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
have "?ddd = e_pre\<^sup>2 * ((1 + 2 * cos (vangle u' v') * sin t * cos t) / ?c)" (is "_ = ?ddd")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (e_pre\<^sup>2 + 2 * (cos (vangle u' v') * e_pre\<^sup>2) * sin t * cos t) / (1 + 2 * (u \<bullet> v) * sin t * cos t) = e_pre\<^sup>2 * ((1 + 2 * cos (vangle u' v') * sin t * cos t) / (1 + 2 * (u \<bullet> v) * sin t * cos t))
[PROOF STEP]
by (auto simp add: divide_simps algebra_simps)
[PROOF STATE]
proof (state)
this:
(e_pre\<^sup>2 + 2 * (cos (vangle u' v') * e_pre\<^sup>2) * sin t * cos t) / (1 + 2 * (u \<bullet> v) * sin t * cos t) = e_pre\<^sup>2 * ((1 + 2 * cos (vangle u' v') * sin t * cos t) / (1 + 2 * (u \<bullet> v) * sin t * cos t))
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
also (xtrans)
[PROOF STATE]
proof (state)
this:
(e_pre\<^sup>2 + 2 * (cos (vangle u' v') * e_pre\<^sup>2) * sin t * cos t) / (1 + 2 * (u \<bullet> v) * sin t * cos t) = e_pre\<^sup>2 * ((1 + 2 * cos (vangle u' v') * sin t * cos t) / (1 + 2 * (u \<bullet> v) * sin t * cos t))
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
have sc_ge_0: "0 \<le> sin t * cos t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 \<le> sin t * cos t
[PROOF STEP]
using \<open>t \<in> _\<close>
[PROOF STATE]
proof (prove)
using this:
t \<in> {0..pi / 2}
goal (1 subgoal):
1. 0 \<le> sin t * cos t
[PROOF STEP]
by (auto simp: assms cos_angle_le intro!: mult_nonneg_nonneg sin_ge_zero cos_ge_zero)
[PROOF STATE]
proof (state)
this:
0 \<le> sin t * cos t
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
have "?ddd \<ge> e_pre\<^sup>2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. e_pre\<^sup>2 \<le> e_pre\<^sup>2 * ((1 + 2 * cos (vangle u' v') * sin t * cos t) / (1 + 2 * (u \<bullet> v) * sin t * cos t))
[PROOF STEP]
apply (subst mult_le_cancel_left1)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (0 < e_pre\<^sup>2 \<longrightarrow> 1 \<le> (1 + 2 * cos (vangle u' v') * sin t * cos t) / (1 + 2 * (u \<bullet> v) * sin t * cos t)) \<and> (e_pre\<^sup>2 < 0 \<longrightarrow> (1 + 2 * cos (vangle u' v') * sin t * cos t) / (1 + 2 * (u \<bullet> v) * sin t * cos t) \<le> 1)
[PROOF STEP]
apply (auto simp add: divide_simps split: if_splits)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>1 + 2 * (u \<bullet> v) * sin t * cos t < 0; e_pre \<noteq> 0\<rbrakk> \<Longrightarrow> cos (vangle u' v') * (sin t * cos t) \<le> u \<bullet> v * (sin t * cos t)
2. \<lbrakk>0 < 1 + 2 * (u \<bullet> v) * sin t * cos t; e_pre \<noteq> 0\<rbrakk> \<Longrightarrow> u \<bullet> v * (sin t * cos t) \<le> cos (vangle u' v') * (sin t * cos t)
3. 1 + 2 * (u \<bullet> v) * sin t * cos t = 0 \<Longrightarrow> e_pre = 0
[PROOF STEP]
apply (rule mult_right_mono)
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<lbrakk>1 + 2 * (u \<bullet> v) * sin t * cos t < 0; e_pre \<noteq> 0\<rbrakk> \<Longrightarrow> cos (vangle u' v') \<le> u \<bullet> v
2. \<lbrakk>1 + 2 * (u \<bullet> v) * sin t * cos t < 0; e_pre \<noteq> 0\<rbrakk> \<Longrightarrow> 0 \<le> sin t * cos t
3. \<lbrakk>0 < 1 + 2 * (u \<bullet> v) * sin t * cos t; e_pre \<noteq> 0\<rbrakk> \<Longrightarrow> u \<bullet> v * (sin t * cos t) \<le> cos (vangle u' v') * (sin t * cos t)
4. 1 + 2 * (u \<bullet> v) * sin t * cos t = 0 \<Longrightarrow> e_pre = 0
[PROOF STEP]
using lt
[PROOF STATE]
proof (prove)
using this:
0 < 1 + 2 * (u \<bullet> v) * sin t * cos t
goal (4 subgoals):
1. \<lbrakk>1 + 2 * (u \<bullet> v) * sin t * cos t < 0; e_pre \<noteq> 0\<rbrakk> \<Longrightarrow> cos (vangle u' v') \<le> u \<bullet> v
2. \<lbrakk>1 + 2 * (u \<bullet> v) * sin t * cos t < 0; e_pre \<noteq> 0\<rbrakk> \<Longrightarrow> 0 \<le> sin t * cos t
3. \<lbrakk>0 < 1 + 2 * (u \<bullet> v) * sin t * cos t; e_pre \<noteq> 0\<rbrakk> \<Longrightarrow> u \<bullet> v * (sin t * cos t) \<le> cos (vangle u' v') * (sin t * cos t)
4. 1 + 2 * (u \<bullet> v) * sin t * cos t = 0 \<Longrightarrow> e_pre = 0
[PROOF STEP]
by (auto simp: assms inner_eq_vangle intro!: mult_right_mono sc_ge_0 cos_angle_le)
[PROOF STATE]
proof (state)
this:
e_pre\<^sup>2 \<le> e_pre\<^sup>2 * ((1 + 2 * cos (vangle u' v') * sin t * cos t) / (1 + 2 * (u \<bullet> v) * sin t * cos t))
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
finally (xtrans)
[PROOF STATE]
proof (chain)
picking this:
e_pre\<^sup>2 \<le> (norm (conemem u' v' t) / norm (conemem u v t))\<^sup>2
[PROOF STEP]
have "(norm (conemem u' v' t))\<^sup>2 \<ge> (e_pre * norm (conemem u v t))\<^sup>2"
[PROOF STATE]
proof (prove)
using this:
e_pre\<^sup>2 \<le> (norm (conemem u' v' t) / norm (conemem u v t))\<^sup>2
goal (1 subgoal):
1. (e_pre * norm (conemem u v t))\<^sup>2 \<le> (norm (conemem u' v' t))\<^sup>2
[PROOF STEP]
by (simp add: divide_simps power_mult_distrib split: if_splits)
[PROOF STATE]
proof (state)
this:
(e_pre * norm (conemem u v t))\<^sup>2 \<le> (norm (conemem u' v' t))\<^sup>2
goal (1 subgoal):
1. min (norm u') (norm v') * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
(e_pre * norm (conemem u v t))\<^sup>2 \<le> (norm (conemem u' v' t))\<^sup>2
[PROOF STEP]
show "norm (conemem u' v' t) \<ge> e_pre * norm (conemem u v t)"
[PROOF STATE]
proof (prove)
using this:
(e_pre * norm (conemem u v t))\<^sup>2 \<le> (norm (conemem u' v' t))\<^sup>2
goal (1 subgoal):
1. e_pre * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
using norm_imp_pos_and_ge power2_le_imp_le
[PROOF STATE]
proof (prove)
using this:
(e_pre * norm (conemem u v t))\<^sup>2 \<le> (norm (conemem u' v' t))\<^sup>2
norm ?x \<equiv> ?n \<Longrightarrow> 0 \<le> norm ?x \<and> norm ?x \<le> ?n
\<lbrakk>?x\<^sup>2 \<le> ?y\<^sup>2; (0::?'a) \<le> ?y\<rbrakk> \<Longrightarrow> ?x \<le> ?y
goal (1 subgoal):
1. e_pre * norm (conemem u v t) \<le> norm (conemem u' v' t)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
e_pre * norm (conemem u v t) \<le> norm (conemem u' v' t)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 14032, "file": "Ordinary_Differential_Equations_IVP_Cones", "length": 105}
|
using BenchmarkTools
using DataFrames
using PooledArrays
using Random
@show Threads.nthreads()
Random.seed!(1234)
ref_dfi = DataFrame(rand(1:10^4, 10^7, 4), :auto)
ref_dfs = string.(ref_dfi)
ref_dfp = mapcols(PooledArray, ref_dfs)
res = DataFrame(rows=Int[],cols=Int[], type=String[], op=String[], time=Float64[])
for x in (10, 10^6-1, 10^6, 10^7), y in 1:4
dfi = ref_dfi[1:x, 1:y]
dfs = ref_dfs[1:x, 1:y]
dfp = ref_dfp[1:x, 1:y]
@show (x, y) # ping that the process is alive
push!(res, (x, y, "integer", "copy", @belapsed DataFrame($dfi)))
push!(res, (x, y, "string", "copy", @belapsed DataFrame($dfs)))
push!(res, (x, y, "pooled", "copy", @belapsed DataFrame($dfp)))
push!(res, (x, y, "integer", ":", @belapsed $dfi[:, :]))
push!(res, (x, y, "string", ":", @belapsed $dfs[:, :]))
push!(res, (x, y, "pooled", ":", @belapsed $dfp[:, :]))
push!(res, (x, y, "integer", "1:end-5", @belapsed $dfi[1:end-5, :]))
push!(res, (x, y, "string", "1:end-5", @belapsed $dfs[1:end-5, :]))
push!(res, (x, y, "pooled", "1:end-5", @belapsed $dfp[1:end-5, :]))
push!(res, (x, y, "integer", "1:5", @belapsed $dfi[1:5, :]))
push!(res, (x, y, "string", "1:5", @belapsed $dfs[1:1:5, :]))
push!(res, (x, y, "pooled", "1:5", @belapsed $dfp[1:1:5, :]))
end
res.time *= 1_000
@show Threads.nthreads()
@show unstack(res, [:cols, :type, :op], :rows, :time)
|
{"hexsha": "920297ed60f30c80d6c21d63f469f41a3a234ecd", "size": 1400, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "benchmarks/constructor_and_indexing/constructor_and_indexing_performance.jl", "max_stars_repo_name": "ericphanson/DataFrames.jl", "max_stars_repo_head_hexsha": "2f5536e92ed9d7002e3a19fa573ffad1d20dc343", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1006, "max_stars_repo_stars_event_min_datetime": "2017-09-13T02:13:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T08:06:39.000Z", "max_issues_repo_path": "benchmarks/constructor_and_indexing/constructor_and_indexing_performance.jl", "max_issues_repo_name": "ericphanson/DataFrames.jl", "max_issues_repo_head_hexsha": "2f5536e92ed9d7002e3a19fa573ffad1d20dc343", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1698, "max_issues_repo_issues_event_min_datetime": "2017-09-09T21:53:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T10:48:23.000Z", "max_forks_repo_path": "benchmarks/constructor_and_indexing/constructor_and_indexing_performance.jl", "max_forks_repo_name": "ericphanson/DataFrames.jl", "max_forks_repo_head_hexsha": "2f5536e92ed9d7002e3a19fa573ffad1d20dc343", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 251, "max_forks_repo_forks_event_min_datetime": "2017-09-12T01:28:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-27T03:37:54.000Z", "avg_line_length": 35.8974358974, "max_line_length": 82, "alphanum_fraction": 0.5678571429, "num_tokens": 565}
|
[STATEMENT]
lemma unit\<^sub>0_simp:
assumes "C.obj a"
shows "EQ\<^sub>DoEQ\<^sub>U\<^sub>VoEQ\<^sub>C'.unit\<^sub>0 a =
C\<^sub>U.E (F\<^sub>U\<^sub>V.G (D\<^sub>V.\<eta>\<^sub>0 (D\<^sub>V.src (F (C\<^sub>U.P a))))) \<star>\<^sub>C C\<^sub>U.E (C\<^sub>U.P\<^sub>0 (src\<^sub>C a))
\<star>\<^sub>C EQ\<^sub>C'.unit\<^sub>0 a"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. EQ\<^sub>DoEQ\<^sub>U\<^sub>VoEQ\<^sub>C'.unit\<^sub>0 a = C\<^sub>U.E (F\<^sub>U\<^sub>V.G (D\<^sub>V.e (D\<^sub>V.src (F (C\<^sub>U.P a))))) \<star>\<^sub>C C\<^sub>U.E (C\<^sub>U.P\<^sub>0 (src\<^sub>C a)) \<star>\<^sub>C EQ\<^sub>C'.unit\<^sub>0 a
[PROOF STEP]
using assms EQ\<^sub>DoEQ\<^sub>U\<^sub>VoEQ\<^sub>C'.unit\<^sub>0_simp EQ\<^sub>U\<^sub>VoEQ\<^sub>C'.unit\<^sub>0_simp
EQ\<^sub>U\<^sub>VoEQ\<^sub>C'.FH.map\<^sub>0_def C\<^sub>U.src_def
[PROOF STATE]
proof (prove)
using this:
C.obj a
C.obj ?a \<Longrightarrow> EQ\<^sub>DoEQ\<^sub>U\<^sub>VoEQ\<^sub>C'.unit\<^sub>0 ?a = EQ\<^sub>U\<^sub>VoEQ\<^sub>C'.right_map (D\<^sub>V.e (EQ\<^sub>U\<^sub>VoEQ\<^sub>C'.FH.map\<^sub>0 ?a)) \<star>\<^sub>C EQ\<^sub>U\<^sub>VoEQ\<^sub>C'.unit\<^sub>0 ?a
C.obj ?a \<Longrightarrow> EQ\<^sub>U\<^sub>VoEQ\<^sub>C'.unit\<^sub>0 ?a = C\<^sub>U.E (C\<^sub>U.prj.map\<^sub>0 ?a) \<star>\<^sub>C EQ\<^sub>C'.unit\<^sub>0 ?a
EQ\<^sub>U\<^sub>VoEQ\<^sub>C'.FH.map\<^sub>0 ?a \<equiv> D\<^sub>V.src (EQ\<^sub>U\<^sub>VoEQ\<^sub>C'.left_map ?a)
C\<^sub>U.src ?\<mu> = (if C\<^sub>U.arr ?\<mu> then src\<^sub>C ?\<mu> else C\<^sub>U.null)
goal (1 subgoal):
1. EQ\<^sub>DoEQ\<^sub>U\<^sub>VoEQ\<^sub>C'.unit\<^sub>0 a = C\<^sub>U.E (F\<^sub>U\<^sub>V.G (D\<^sub>V.e (D\<^sub>V.src (F (C\<^sub>U.P a))))) \<star>\<^sub>C C\<^sub>U.E (C\<^sub>U.P\<^sub>0 (src\<^sub>C a)) \<star>\<^sub>C EQ\<^sub>C'.unit\<^sub>0 a
[PROOF STEP]
by auto
|
{"llama_tokens": 904, "file": "Bicategory_EquivalenceOfBicategories", "length": 2}
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
import schema
from nni import ClassArgsValidator
from nni.common.hpo_utils import format_search_space, deformat_parameters
from nni.tuner import Tuner
class RandomTuner(Tuner):
def __init__(self, seed=None):
self.space = None
self.rng = np.random.default_rng(seed)
def update_search_space(self, space):
self.space = format_search_space(space)
def generate_parameters(self, *args, **kwargs):
params = suggest(self.rng, self.space)
return deformat_parameters(params, self.space)
def receive_trial_result(self, *args, **kwargs):
pass
class RandomClassArgsValidator(ClassArgsValidator):
def validate_class_args(self, **kwargs):
schema.Schema({schema.Optional('seed'): int}).validate(kwargs)
def suggest(rng, space):
params = {}
for spec in space.values():
if not spec.is_activated(params):
continue
if spec.categorical:
params[spec.key] = rng.integers(spec.size)
continue
if spec.normal_distributed:
if spec.log_distributed:
x = rng.lognormal(spec.mu, spec.sigma)
else:
x = rng.normal(spec.mu, spec.sigma)
else:
if spec.log_distributed:
x = np.exp(rng.uniform(np.log(spec.low), np.log(spec.high)))
else:
x = rng.uniform(spec.low, spec.high)
if spec.q is not None:
x = np.round(x / spec.q) * spec.q
params[spec.key] = x
return params
|
{"hexsha": "30318cf27b0ab5277b0988adc150245459279f6a", "size": 1627, "ext": "py", "lang": "Python", "max_stars_repo_path": "nni/algorithms/hpo/random_tuner.py", "max_stars_repo_name": "chyan0411/nni", "max_stars_repo_head_hexsha": "2064bba03da468ee9093b9015e98e62ca4262113", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-20T08:50:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-20T08:50:18.000Z", "max_issues_repo_path": "nni/algorithms/hpo/random_tuner.py", "max_issues_repo_name": "chyan0411/nni", "max_issues_repo_head_hexsha": "2064bba03da468ee9093b9015e98e62ca4262113", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-11-08T22:18:41.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-08T22:18:41.000Z", "max_forks_repo_path": "nni/algorithms/hpo/random_tuner.py", "max_forks_repo_name": "chyan0411/nni", "max_forks_repo_head_hexsha": "2064bba03da468ee9093b9015e98e62ca4262113", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.1296296296, "max_line_length": 76, "alphanum_fraction": 0.6275353411, "include": true, "reason": "import numpy", "num_tokens": 357}
|
C$Attribute setting:
C+PGCOLOUR -- set standard colour tables
SUBROUTINE PGCOLOUR
C-----------------------------------------------------------------------
C Sets standard colour tables, for devices supporting colour graphics.
C
C 16-Dec-1988 - new routine for Lexidata image processor [DJT].
C-----------------------------------------------------------------------
EXTERNAL PGCT01
C
CALL PGSCT(PGCT01)
C
END
|
{"hexsha": "cdce3aa7e13b4a5a3c07f39fb48e8e160e29ebb1", "size": 436, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "graphic_lib/pgcolour.f", "max_stars_repo_name": "CavendishAstrophysics/anmap", "max_stars_repo_head_hexsha": "efb611d7f80a3d14dc55e46cd01e8a622f6fd294", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2015-09-01T12:40:45.000Z", "max_stars_repo_stars_event_max_datetime": "2015-09-01T12:40:45.000Z", "max_issues_repo_path": "graphic_lib/pgcolour.f", "max_issues_repo_name": "CavendishAstrophysics/anmap", "max_issues_repo_head_hexsha": "efb611d7f80a3d14dc55e46cd01e8a622f6fd294", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "graphic_lib/pgcolour.f", "max_forks_repo_name": "CavendishAstrophysics/anmap", "max_forks_repo_head_hexsha": "efb611d7f80a3d14dc55e46cd01e8a622f6fd294", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6470588235, "max_line_length": 72, "alphanum_fraction": 0.4839449541, "num_tokens": 88}
|
"""Find the synapse's pulse extender speed
Derived from check_max_synapse_rates.py
For each synapse:
Set up the Tag Action Table to send +1 and -1 spikes to an individual synapse for each input spike
generated from one of the FPGA's spike generators.
Send a high rate to the synapse, well above its maximum possible input consumption rate
The input -> fifo -> tat -> synapse path will quickly back up
and, due to the datapath operation, the rate of overflows will match the synapse's consumption
rate
"""
from time import sleep
from time import time as get_time
import argparse
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from pystorm.hal import HAL
from pystorm.hal.neuromorph import graph
from pystorm.PyDriver import bddriver as bd
HAL = HAL()
from utils.exp import clear_overflows, compute_spike_gen_rates
from utils.file_io import set_data_dir
np.set_printoptions(precision=2)
CORE = 0
NRN_N = 4096
# SYN_N = 1
# SYN_N = 4
# SYN_N = 8
# SYN_N = 16
# SYN_N = 64
SYN_N = 1024
DEFAULT_TEST_TIME = 1.0 # time to collect overflow data
DEFAULT_SLOP_TIME = 0.2 # time to allow traffic to flush at the start and end of an experiment
SPIKE_GEN_TIME_UNIT_NS = 10000 # time unit of fpga spike generator
SPIKE_GEN_IDX = 0 # FPGA spike generator index
# Tag Action Table settings
TAT_IDX = 0
TAT_START_ADDR = 0
TAT_STOP_BIT = 1
TAT_SIGN_0 = 0
TAT_SIGN_1 = 1
FIFO_BUFFER_SIZE = 255
VALIDATE_HIGH_BUF_RATE = 500 # upper bound padding to test high side of max_rate
SYN_PU = HAL.get_DAC_value('DAC_SYN_PU') # analog bias setting
SYN_PD = HAL.get_DAC_value('DAC_SYN_PD') # analog bias setting
RATE = 20000 # maximum rate to test
SPIKE_GEN_RATE = compute_spike_gen_rates(RATE-1, RATE, SPIKE_GEN_TIME_UNIT_NS)[0]
def parse_args():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(description='Characterize the synapse pulse extender')
parser.add_argument("--syn_pd", dest="syn_pd", type=int, default=SYN_PD, help="Set DAC_SYN_PD bias. Default {}".format(SYN_PD))
args = parser.parse_args()
return args
def build_net():
"""Builds the HAL-level network for testing"""
dim = 1
tap_matrix = np.zeros((NRN_N, dim))
net = graph.Network("net")
net.create_pool("p", tap_matrix)
HAL.map(net)
def set_analog(syn_pd):
"""Sets the synapse config bits and the bias currents"""
HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SYN_PD, syn_pd)
HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SYN_PU, SYN_PU)
for n_idx in range(NRN_N):
HAL.driver.SetSomaEnableStatus(CORE, n_idx, bd.bdpars.SomaStatusId.DISABLED)
for s_idx in range(SYN_N):
HAL.driver.SetSynapseEnableStatus(CORE, s_idx, bd.bdpars.SynapseStatusId.ENABLED)
HAL.flush()
def set_hal():
"""Set the HAL traffic settings"""
# clear queues
HAL.set_time_resolution(downstream_ns=SPIKE_GEN_TIME_UNIT_NS, upstream_ns=10000000)
HAL.start_traffic(flush=False)
HAL.disable_spike_recording(flush=False)
HAL.disable_output_recording(flush=True)
def set_tat(syn_idx):
"""Set up the tag action table to send spikes to an individual synapse"""
HAL.stop_traffic(flush=True)
sleep(DEFAULT_SLOP_TIME)
HAL.start_traffic(flush=True)
addr = HAL.driver.BDPars.GetSynAERAddr(syn_idx)
tat_entry = bd.PackWord([
(bd.TATSpikeWord.STOP, TAT_STOP_BIT),
(bd.TATSpikeWord.SYNAPSE_ADDRESS_0, addr),
(bd.TATSpikeWord.SYNAPSE_SIGN_0, TAT_SIGN_0),
(bd.TATSpikeWord.SYNAPSE_ADDRESS_1, addr),
(bd.TATSpikeWord.SYNAPSE_SIGN_1, TAT_SIGN_1)])
HAL.driver.SetMem(CORE, bd.bdpars.BDMemId.TAT0, [tat_entry], TAT_START_ADDR)
HAL.flush()
def toggle_spk_generator(rate, test_time, slop_time):
"""Toggle the spike generator and check for overflow"""
clear_overflows(HAL, slop_time)
test_time_ns = int(test_time*1E9)
slop_time_ns = int(slop_time*1E9)
cur_time_ns = HAL.get_time()
HAL.driver.SetSpikeGeneratorRates(
CORE, [SPIKE_GEN_IDX], [TAT_IDX], [rate], time=cur_time_ns+slop_time_ns)
HAL.driver.SetSpikeGeneratorRates(
CORE, [SPIKE_GEN_IDX], [TAT_IDX], [0], time=cur_time_ns+test_time_ns+slop_time_ns)
sleep(test_time + 2*slop_time)
overflow_0, _ = HAL.driver.GetFIFOOverflowCounts(CORE)
return overflow_0
def test_syn(syn_idx, test_time, slop_time):
"""Deliver spikes to a synapse to find its spike consumption rate"""
set_tat(syn_idx)
# check overflow rate at max spike gen rate to predict max synapse rate
overflows = toggle_spk_generator(SPIKE_GEN_RATE, test_time, slop_time)
overflow_rate = overflows / test_time
max_rate = 2 * overflow_rate # 2 spikes per TAT entry
print("Synapse {}, Input Rate {:.1f} overflow_count {} overflow rate {:.1f}".format(
syn_idx, SPIKE_GEN_RATE, overflows, overflow_rate))
return max_rate
def report_time_remaining(start_time, syn_idx):
"""Occasionally estimate and report the remaining time"""
if syn_idx%4 == 0 and SYN_N > 1:
n_syn_completed = syn_idx+1
delta_time = get_time()-start_time
est_time_remaining = delta_time/(syn_idx+1) * (SYN_N-n_syn_completed)
print("\tEstimated time remaining: {:.0f} s = {:.1f} min = {:.2f} hr...".format(
est_time_remaining, est_time_remaining/60., est_time_remaining/60./60.))
def plot_data(max_rates, data_dir):
"""Plot the data"""
syn_n = len(max_rates)
max_rates_mean = np.mean(max_rates)
max_rates_min = np.min(max_rates)
max_rates_max = np.max(max_rates)
fig_1d, axs = plt.subplots(figsize=(8, 6))
axs.plot(max_rates, 'o', markersize=1.5)
axs.set_xlabel("Synapse Index")
axs.set_ylabel("Max Input Rate (spks/s)")
fig_1d.savefig(data_dir + "syn_idx_vs_max_rate.pdf")
if syn_n > 1: # make histograms
fig_hist, axs = plt.subplots(ncols=2, figsize=(12, 6))
axs[0].hist(max_rates, bins=80)
axs[0].axvline(max_rates_mean, color="k", linewidth=1, label="mean")
axs[0].set_xlabel("Max Input Rate (spks/s)")
axs[0].set_ylabel("Counts")
axs[0].set_title(
"Full Rate Histogram\n"+
"Min:{:,.0f} Mean:{:,.0f} Max:{:,.0f}".format(
max_rates_min, max_rates_mean, max_rates_max))
axs[1].plot(np.sort(max_rates), (np.arange(syn_n)+1)/syn_n)
axs[1].axvline(max_rates_mean, color="k", linewidth=1, label="mean")
axs[1].set_xlabel("Max Input Rate (spks/s)")
axs[1].set_ylabel("Cumulative Probability")
axs[1].set_title("Full Rate Cumulative Distribution Function")
fig_hist.suptitle("All Synapses")
fig_hist.savefig(data_dir + "histogram.pdf")
if syn_n == NRN_N//4: # all syn_n tested
sqrt_n = int(np.ceil(np.sqrt(syn_n)))
max_rates_2d = max_rates.reshape((sqrt_n, -1))
fig_heatmap, axs = plt.subplots()
ims = axs.imshow(max_rates_2d)
plt.colorbar(ims)
axs.set_xlabel("Synapse X Coordinate")
axs.set_ylabel("Synapse Y Coordinate")
axs.set_title("Max Input Rate (spks/s)")
fig_heatmap.savefig(data_dir + "2d_heatmap.pdf")
max_rates_hex = np.nan*np.ones((sqrt_n, sqrt_n*2+1))
max_rates_hex[0::2, 0:sqrt_n*2:2] = max_rates_2d[0::2, :]
max_rates_hex[0::2, 1:sqrt_n*2:2] = max_rates_2d[0::2, :]
max_rates_hex[1::2, 1:sqrt_n*2:2] = max_rates_2d[1::2, :]
max_rates_hex[1::2, 2:sqrt_n*2+1:2] = max_rates_2d[1::2, :]
fig_hex_heatmap, axs = plt.subplots()
matplotlib.cm.get_cmap().set_bad(color='w')
ims = axs.imshow(max_rates_hex, aspect=2)
axs.set_xticks([])
axs.set_yticks([])
plt.colorbar(ims)
axs.set_title("Max Input Rate (spks/s)")
fig_hex_heatmap.savefig(data_dir + "2d_hex_heatmap.pdf")
tile_max_rates = dict(
upper_left=max_rates_2d[0::2, 0::2].flatten(),
upper_right=max_rates_2d[0::2, 1::2].flatten(),
lower_left=max_rates_2d[1::2, 0::2].flatten(),
lower_right=max_rates_2d[1::2, 1::2].flatten())
fig_tile, axs = plt.subplots(ncols=3, figsize=(20, 6))
offset = 0
for pos in tile_max_rates:
hist_values, bin_edges = np.histogram(tile_max_rates[pos], bins=50)
bin_centers = bin_edges[:-1] + np.diff(bin_edges)/2
offset -= np.max(hist_values)
axs[0].fill_between(
bin_centers, np.ones_like(hist_values)*offset, hist_values+offset, label=pos)
axs[0].set_xlabel("Max Input Rate (spks/s)")
axs[0].set_yticks([])
axs[0].set_title("Histograms")
axs[0].legend()
cdf_idxs = {}
for pos in tile_max_rates:
n_syn = len(tile_max_rates[pos])
cdf_values = (np.arange(n_syn)+1) / n_syn
cdf_idxs[pos] = np.sort(tile_max_rates[pos])
axs[1].plot(cdf_idxs[pos], cdf_values, alpha=1.0, label=pos)
axs[1].set_xlabel("Max Input Rate (spks/s)")
axs[1].set_ylabel("Probability")
axs[1].set_title("Cumulative Distribution Functions")
axs[1].legend()
positions = list(tile_max_rates.keys())
for idx0, pos0 in enumerate(positions):
for pos1 in positions[idx0+1:]:
axs[2].plot(cdf_idxs[pos0], cdf_idxs[pos1],
linewidth=1, label="{} : {}".format(pos0, pos1))
xlim = axs[2].get_xlim()
ylim = axs[2].get_ylim()
min_val = np.min(xlim + ylim)
max_val = np.max(xlim + ylim)
axs[2].plot([min_val, max_val], [min_val, max_val], 'k', linewidth=1)
axs[2].set_xlim(xlim)
axs[2].set_ylim(ylim)
axs[2].set_xlabel("Position 0's Max Input Rate (spks/s)")
axs[2].set_ylabel("Position 1's Max Input Rate (spks/s)")
axs[2].set_title("Quantile-Quantiles")
axs[2].legend(title="Position 0 : Position 1")
fig_tile.suptitle("Dividing Synapses by Position in Tile")
fig_tile.savefig(data_dir + "syn_tile.pdf")
def calibrate_syn_pulse_extender(parsed_args):
"""Run the calibration"""
syn_pd = parsed_args.syn_pd
data_dir = set_data_dir(__file__, "dac_syn_pd_" + str(syn_pd) + "/")
max_rates = np.zeros(SYN_N)
build_net()
set_analog(syn_pd)
set_hal()
start_time = get_time()
for syn_idx in range(SYN_N):
max_rates[syn_idx] = test_syn(syn_idx, DEFAULT_TEST_TIME, DEFAULT_SLOP_TIME)
report_time_remaining(start_time, syn_idx)
np.savetxt(data_dir + "max_rates.txt", max_rates)
pulse_widths = (1./max_rates).reshape((32, 32))
HAL.add_calibration("synapse", "pulse_width_dac_{}".format(syn_pd), pulse_widths)
plot_data(max_rates, data_dir)
plt.show()
if __name__ == "__main__":
calibrate_syn_pulse_extender(parse_args())
|
{"hexsha": "aa3a9553ad564db8b60bb9379b416af8e5f2155f", "size": 10815, "ext": "py", "lang": "Python", "max_stars_repo_path": "pystorm/calibration/syn_pulse_extender.py", "max_stars_repo_name": "Stanford-BIS/pystorm", "max_stars_repo_head_hexsha": "4acaaee78a04b69ad17554126018016800e5a140", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-12-19T06:46:15.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-08T18:53:20.000Z", "max_issues_repo_path": "pystorm/calibration/syn_pulse_extender.py", "max_issues_repo_name": "Stanford-BIS/pystorm", "max_issues_repo_head_hexsha": "4acaaee78a04b69ad17554126018016800e5a140", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-12-13T00:30:08.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-14T18:55:44.000Z", "max_forks_repo_path": "pystorm/calibration/syn_pulse_extender.py", "max_forks_repo_name": "Stanford-BIS/pystorm", "max_forks_repo_head_hexsha": "4acaaee78a04b69ad17554126018016800e5a140", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.0555555556, "max_line_length": 131, "alphanum_fraction": 0.6736014794, "include": true, "reason": "import numpy", "num_tokens": 3072}
|
import copy
import numpy as np
import hashlib
import collections
class Individual(object):
def __init__(self, id, params, n_var, genome = []):
self.id = id
self.acc = -1
self.flop = -1
self.params = params
self.n_var = n_var
self.rank = np.inf
self.crowding = -1
if genome == []:
self.genome = self.random_generate_genome()
else:
self.genome = genome
def random_generate_genome(self):
val = np.random.random(self.n_var)
return (val < 0.5).astype(np.int)
def uuid(self):
_str = 'genome:' + str(self.genome)
_final_utf8_str_ = _str.encode('utf-8')
_hash_key = hashlib.sha224(_final_utf8_str_).hexdigest()
return _hash_key, _str
def __str__(self):
_str = []
_str.append('indi:%s' % (self.id))
_str.append('Acc:%.5f' % (self.acc))
_str.append('flop:%.5f' % (self.flop))
_str.append('genome:' + str(self.genome))
return '\n'.join(_str)
def reset(self):
self.acc = -1
self.flop = -1
self.rank = np.inf
self.crowding = -1
class Population(object):
def __init__(self, gen_no, params):
self.gen_no = gen_no
self.number_id = 0
self.pop_size = params['pop_size']
self.individuals = []
self.params = params
self.n_var = params['n_var']
def initialize(self):
for i in range(self.pop_size):
indi_no = 'indi%05d_%05d'%(self.gen_no, self.number_id)
self.number_id = self.number_id+1
indi = Individual(indi_no, self.params, self.n_var)
self.individuals.append(indi)
def __str__(self):
_str = []
for ind in self.individuals:
_str.append(str(ind))
_str.append("-"*100)
return '\n'.join(_str)
def create_from_offspring(self, offspring):
offs_temp = copy.deepcopy(offspring)
for indi_ in offs_temp:
indi = copy.deepcopy(indi_)
indi_no = 'indi%05d_%05d' % (self.gen_no, self.number_id)
indi.id = indi_no
self.number_id = self.number_id + 1
self.individuals.append(indi)
|
{"hexsha": "63a7cb936cabdbbb3d4a37e97659f91fe21c8152", "size": 2248, "ext": "py", "lang": "Python", "max_stars_repo_path": "BenchENAS_linux_platform/algs/nsga_net/genetic/population.py", "max_stars_repo_name": "benchenas/BenchENAS", "max_stars_repo_head_hexsha": "776cd1dd035d73c4af369d0106d010b932f64782", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-21T07:56:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T07:56:13.000Z", "max_issues_repo_path": "BenchENAS_linux_platform/algs/nsga_net/genetic/population.py", "max_issues_repo_name": "benchenas/BenchENA", "max_issues_repo_head_hexsha": "cfb18a8e4bc6cd654df1011c94e3b8a61cd64da3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-24T06:07:27.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-24T06:07:27.000Z", "max_forks_repo_path": "BenchENAS_linux_platform/algs/nsga_net/genetic/population.py", "max_forks_repo_name": "benchenas/BenchENAS", "max_forks_repo_head_hexsha": "776cd1dd035d73c4af369d0106d010b932f64782", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-20T07:36:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-20T07:36:37.000Z", "avg_line_length": 28.8205128205, "max_line_length": 69, "alphanum_fraction": 0.5658362989, "include": true, "reason": "import numpy", "num_tokens": 596}
|
import numpy as np
from pyglib.model import circauxi
import shutil,subprocess
cmd = ['/home/ykent/WIEN_GUTZ/bin2/CyGutz', '-r', '-1']
for i,u in enumerate(np.arange(1.0,0.9,-10)):
print(' Running with u = {}'.format(u))
circauxi.gutz_model_setup(u=u, nmesh=5000, norb=3, tiny=0.0, mu=0.0)
subprocess.call(cmd)
shutil.copyfile('WH_RL_BEST.h5', 'WH_RL_INIT.h5')
shutil.copyfile('WH_RL_BEST.h5', 'WH_RL_INIT.h5_{}'.format(u))
|
{"hexsha": "6e41bba2f326ea490fdd75eeb08fad2e8a9cafab", "size": 446, "ext": "py", "lang": "Python", "max_stars_repo_path": "ComRISB/pyglib/pyglib/model/test/test_semicircular_metal_2g/REF/scan.py", "max_stars_repo_name": "comscope/comsuite", "max_stars_repo_head_hexsha": "d51c43cad0d15dc3b4d1f45e7df777cdddaa9d6c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2019-06-15T18:08:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-30T05:01:29.000Z", "max_issues_repo_path": "ComRISB/pyglib/pyglib/model/test/test_semicircular_metal_2g/REF/scan_u.py", "max_issues_repo_name": "comscope/Comsuite", "max_issues_repo_head_hexsha": "b80ca9f34c519757d337487c489fb655f7598cc2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ComRISB/pyglib/pyglib/model/test/test_semicircular_metal_2g/REF/scan_u.py", "max_forks_repo_name": "comscope/Comsuite", "max_forks_repo_head_hexsha": "b80ca9f34c519757d337487c489fb655f7598cc2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2019-06-05T02:57:55.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-29T02:54:25.000Z", "avg_line_length": 31.8571428571, "max_line_length": 72, "alphanum_fraction": 0.6816143498, "include": true, "reason": "import numpy", "num_tokens": 161}
|
function kern = nddisimKernParamInit(kern)
% NDDISIMKERNPARAMINIT NDDISIM kernel parameter initialisation.
% The driven input single input motif (DISIM) kernel is specifically designed for
% working with gene networks where there is assumed to be a single
% transcription factor controlling several genes. This transcription
% factor, in turn, is driven by its own gene's RNA. The model takes the
% following form: each gene is
% related to the transcription factor through the following
% differential equation,
%
% dx(t)/dt = B + C f(t-delta) - D x(t),
%
% where D is a decay term, C is a response term, delta is a time delay
% and B is an initial level. Then if f(t) is assumed to be the result of
% a further differential equation,
%
% df(t)/dt = Sx'(t) - D' x'(t)
%
% where x'(t) is assumed to come from a Gaussian process with an RBF
% covariance function f(t) is a Gaussian process with a covariance function
% provided by the single input motif kernel (SIM) and x(t) is a Gaussian
% process with covariance function provided by this kernel, the DISIM kernel.
%
% The kernel is designed to interoperate with the multiple output
% block kernel so that f(t) can be inferred given several different
% instantiations of x(t) (associated with different genes).
%
% The parameters (B, C, delta, S, D and D') are constrained positive.
%
% FORMAT
% DESC initialises the single input motif
% kernel structure with some default parameters.
% ARG kern : the kernel structure which requires initialisation.
% RETURN kern : the kernel structure with the default parameters placed in.
%
% SEEALSO : kernCreate, kernParamInit, simKernCompute
%
% COPYRIGHT : Neil D. Lawrence, 2006
%
% COPYRIGHT : Antti Honkela, 2007, 2009
%
% COPYRIGHT: Jaakko Peltonen, 2011
% KERN
if kern.inputDimension > 1
error('NDDISIM kernel is only valid for one-dimensional input.')
end
if isfield(kern, 'options') && isfield(kern.options, 'gaussianInitial') && ...
kern.options.gaussianInitial,
kern.gaussianInitial = 1;
kern.initialVariance = 1;
else
kern.gaussianInitial = 0;
end
kern.inverseWidth = 1;
kern.di_variance = 1;
kern.decay = 1;
kern.variance = 1;
kern.delay = 1e-7;
if kern.gaussianInitial,
kern.nParams = 6;
else
kern.nParams = 5;
end
if isfield(kern, 'options') && isfield(kern.options, 'paramTransform'),
paramTransform = kern.options.paramTransform;
else
paramTransform = 'sigmoidab';
end
switch paramTransform,
case 'sigmoidab',
for k=1:kern.nParams,
kern.transforms(k).index = k;
kern.transforms(k).type = 'sigmoidab';
kern.transforms(k).transformsettings = [0 1e6];
end;
case 'bounded',
for k=1:kern.nParams,
kern.transforms(k).index = k;
kern.transforms(k).type = optimiDefaultConstraint('bounded');
kern.transforms(k).transformsettings = [0 1e6];
end;
case 'identity',
for k=1:kern.nParams,
kern.transforms(k).index = k;
kern.transforms(k).type = 'identity';
kern.transforms(k).transformsettings = [0 1e6];
end;
case 'positive',
for k=1:kern.nParams,
kern.transforms(k).index = k;
kern.transforms(k).type = optimiDefaultConstraint('positive');
end;
case 'none',
otherwise,
error('Unknown paramTransform');
end
kern.isStationary = false;
|
{"author": "SheffieldML", "repo": "GPmat", "sha": "4b5914a38ecbad9fb7a13a3392970bfc28c9d911", "save_path": "github-repos/MATLAB/SheffieldML-GPmat", "path": "github-repos/MATLAB/SheffieldML-GPmat/GPmat-4b5914a38ecbad9fb7a13a3392970bfc28c9d911/kern/nddisimKernParamInit.m"}
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Generate Number Partitioning (Partition) instances, and convert them
into a Hamiltonian given as a Pauli list.
"""
import logging
import warnings
import numpy as np
from qiskit.quantum_info import Pauli
from qiskit.aqua.operators import WeightedPauliOperator
logger = logging.getLogger(__name__)
def get_operator(values):
"""Construct the Hamiltonian for a given Partition instance.
Given a list of numbers for the Number Partitioning problem, we
construct the Hamiltonian described as a list of Pauli gates.
Args:
values (numpy.ndarray): array of values.
Returns:
tuple(WeightedPauliOperator, float): operator for the Hamiltonian and a
constant shift for the obj function.
"""
n = len(values)
# The Hamiltonian is:
# \sum_{i,j=1,\dots,n} ij z_iz_j + \sum_{i=1,\dots,n} i^2
pauli_list = []
for i in range(n):
for j in range(i):
x_p = np.zeros(n, dtype=np.bool)
z_p = np.zeros(n, dtype=np.bool)
z_p[i] = True
z_p[j] = True
pauli_list.append([2. * values[i] * values[j], Pauli(z_p, x_p)])
return WeightedPauliOperator(paulis=pauli_list), sum(values*values)
def partition_value(x, number_list):
"""Compute the value of a partition.
Args:
x (numpy.ndarray): binary string as numpy array.
number_list (numpy.ndarray): list of numbers in the instance.
Returns:
float: difference squared between the two sides of the number
partition.
"""
diff = np.sum(number_list[x == 0]) - np.sum(number_list[x == 1])
return diff * diff
def random_number_list(n, weight_range=100, savefile=None):
""" random number list """
# pylint: disable=import-outside-toplevel
from .common import random_number_list as redirect_func
warnings.warn("random_number_list function has been moved to "
"qiskit.optimization.ising.common,, "
"the method here will be removed after Aqua 0.7+",
DeprecationWarning)
return redirect_func(n=n, weight_range=weight_range, savefile=savefile)
def read_numbers_from_file(filename):
""" read numbers from file """
# pylint: disable=import-outside-toplevel
from .common import read_numbers_from_file as redirect_func
warnings.warn("read_numbers_from_file function has been moved to "
"qiskit.optimization.ising.common, "
"the method here will be removed after Aqua 0.7+",
DeprecationWarning)
return redirect_func(filename)
def sample_most_likely(state_vector):
""" sample most likely """
# pylint: disable=import-outside-toplevel
from .common import sample_most_likely as redirect_func
warnings.warn("sample_most_likely function has been moved "
"to qiskit.optimization.ising.common,, "
"the method here will be removed after Aqua 0.7+",
DeprecationWarning)
return redirect_func(state_vector=state_vector)
def get_partition_qubitops(values):
""" get partition qubit ops """
warnings.warn("get_partition_qubitops function has been changed to get_operator"
"the method here will be removed after Aqua 0.7+",
DeprecationWarning)
return get_operator(values)
|
{"hexsha": "db6fa0ee3be1332d3e2e622fda3a687844557315", "size": 3858, "ext": "py", "lang": "Python", "max_stars_repo_path": "qiskit/optimization/ising/partition.py", "max_stars_repo_name": "IanJoel/qiskit-aqua", "max_stars_repo_head_hexsha": "7707172d01f0539358f1ce2406f307e830105303", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "qiskit/optimization/ising/partition.py", "max_issues_repo_name": "IanJoel/qiskit-aqua", "max_issues_repo_head_hexsha": "7707172d01f0539358f1ce2406f307e830105303", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "qiskit/optimization/ising/partition.py", "max_forks_repo_name": "IanJoel/qiskit-aqua", "max_forks_repo_head_hexsha": "7707172d01f0539358f1ce2406f307e830105303", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-02-13T02:17:58.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-09T07:56:25.000Z", "avg_line_length": 34.1415929204, "max_line_length": 84, "alphanum_fraction": 0.6765163297, "include": true, "reason": "import numpy", "num_tokens": 895}
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import json
import sys
from elq.index.faiss_indexer import DenseFlatIndexer, DenseHNSWFlatIndexer, DenseIVFFlatIndexer
import logging
import torch
import numpy as np
from colorama import init
from termcolor import colored
import torch.nn.functional as F
import blink.ner as NER
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
from elq.biencoder.biencoder import BiEncoderRanker, load_biencoder, to_bert_input
from elq.biencoder.data_process import (
process_mention_data,
get_context_representation_single_mention,
get_candidate_representation,
)
import elq.candidate_ranking.utils as utils
import math
from elq.vcg_utils.measures import entity_linking_tp_with_overlap
from elq.biencoder.utils import batch_reshape_mask_left
import os
import sys
from tqdm import tqdm
import pdb
import time
HIGHLIGHTS = [
"on_red",
"on_green",
"on_yellow",
"on_blue",
"on_magenta",
"on_cyan",
]
from transformers import BertTokenizer
# tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
tokenizer = BertTokenizer.from_pretrained("dbmdz/bert-base-italian-cased")
def _print_colorful_text(input_tokens, tokenizer, pred_triples):
"""
pred_triples:
Assumes no overlapping triples
"""
sort_idxs = sorted(range(len(pred_triples)), key=lambda idx: pred_triples[idx][1])
init() # colorful output
msg = ""
if pred_triples and (len(pred_triples) > 0):
msg += tokenizer.decode(input_tokens[0: int(pred_triples[sort_idxs[0]][1])])
for i, idx in enumerate(sort_idxs):
triple = pred_triples[idx]
msg += " " + colored(
tokenizer.decode(input_tokens[int(triple[1]): int(triple[2])]),
"grey",
HIGHLIGHTS[idx % len(HIGHLIGHTS)],
)
if i < len(sort_idxs) - 1:
msg += " " + tokenizer.decode(input_tokens[
int(triple[2]): int(pred_triples[sort_idxs[i + 1]][1])
])
else:
msg += " " + tokenizer.decode(input_tokens[int(triple[2]):])
else:
msg = tokenizer.decode(input_tokens)
print("\n" + str(msg) + "\n")
def _print_colorful_prediction(all_entity_preds, pred_triples, id2text, id2wikidata):
sort_idxs = sorted(range(len(pred_triples)), key=lambda idx: pred_triples[idx][1])
for idx in sort_idxs:
print(colored(all_entity_preds[0]['pred_tuples_string'][idx][1], "grey", HIGHLIGHTS[idx % len(HIGHLIGHTS)]))
if pred_triples[idx][0] in id2wikidata:
print(" Wikidata ID: {}".format(id2wikidata[pred_triples[idx][0]]))
print(" Title: {}".format(all_entity_preds[0]['pred_tuples_string'][idx][0]))
print(" Score: {}".format(str(all_entity_preds[0]['scores'][idx])))
print(" Triple: {}".format(str(pred_triples[idx])))
print(" Text: {}".format(id2text[pred_triples[idx][0]]))
def _load_candidates(
entity_catalogue, entity_encoding,
faiss_index="none", index_path=None,
logger=None,
):
if faiss_index == "none":
candidate_encoding = torch.load(entity_encoding)
indexer = None
else:
candidate_encoding = None
assert index_path is not None, "Error! Empty indexer path."
if faiss_index == "flat":
indexer = DenseFlatIndexer(1)
elif faiss_index == "hnsw":
indexer = DenseHNSWFlatIndexer(1)
elif faiss_index == "ivfflat":
indexer = DenseIVFFlatIndexer(1)
else:
raise ValueError("Error! Unsupported indexer type! Choose from flat,hnsw,ivfflat.")
indexer.deserialize_from(index_path)
candidate_encoding = torch.load(entity_encoding)
if not os.path.exists("models/id2title.json"):
id2title = {}
id2text = {}
id2wikidata = {}
local_idx = 0
with open(entity_catalogue, "r") as fin:
lines = fin.readlines()
for line in lines:
entity = json.loads(line)
id2title[str(local_idx)] = entity["title"]
id2text[str(local_idx)] = entity["text"]
if "kb_idx" in entity:
id2wikidata[str(local_idx)] = entity["kb_idx"]
local_idx += 1
json.dump(id2title, open("models/id2title.json", "w"))
json.dump(id2text, open("models/id2text.json", "w"))
json.dump(id2wikidata, open("models/id2wikidata.json", "w"))
else:
if logger: logger.info("Loading id2title")
id2title = json.load(open("models/id2title.json"))
if logger: logger.info("Loading id2text")
id2text = json.load(open("models/id2text.json"))
if logger: logger.info("Loading id2wikidata")
id2wikidata = json.load(open("models/id2wikidata.json"))
return (
candidate_encoding, indexer,
id2title, id2text, id2wikidata,
)
def _get_test_samples(
test_filename, test_entities_path, logger,
):
"""
Parses jsonl format with one example per line
Each line of the following form
IF HAVE LABELS
{
"id": "WebQTest-12",
"text": "who is governor of ohio 2011?",
"mentions": [[19, 23], [7, 15]],
"tokenized_text_ids": [2040, 2003, 3099, 1997, 4058, 2249, 1029],
"tokenized_mention_idxs": [[4, 5], [2, 3]],
"label_id": [10902, 28422],
"wikidata_id": ["Q1397", "Q132050"],
"entity": ["Ohio", "Governor"],
"label": [list of wikipedia descriptions]
}
IF NO LABELS (JUST PREDICTION)
{
"id": "WebQTest-12",
"text": "who is governor of ohio 2011?",
}
"""
if logger: logger.info("Loading test samples")
test_samples = []
unknown_entity_samples = []
num_unknown_entity_samples = 0
num_no_gold_entity = 0
ner_errors = 0
with open(test_filename, "r") as fin:
lines = fin.readlines()
sample_idx = 0
do_setup_samples = True
for i, line in enumerate(lines):
record = json.loads(line)
test_samples.append(record)
return test_samples, num_unknown_entity_samples
def _process_biencoder_dataloader(samples, tokenizer, biencoder_params, logger):
"""
Samples: list of examples, each of the form--
IF HAVE LABELS
{
"id": "WebQTest-12",
"text": "who is governor of ohio 2011?",
"mentions": [[19, 23], [7, 15]],
"tokenized_text_ids": [2040, 2003, 3099, 1997, 4058, 2249, 1029],
"tokenized_mention_idxs": [[4, 5], [2, 3]],
"label_id": [10902, 28422],
"wikidata_id": ["Q1397", "Q132050"],
"entity": ["Ohio", "Governor"],
"label": [list of wikipedia descriptions]
}
IF NO LABELS (JUST PREDICTION)
{
"id": "WebQTest-12",
"text": "who is governor of ohio 2011?",
}
"""
if 'label_id' in samples[0]:
# have labels
tokens_data, tensor_data_tuple, _ = process_mention_data(
samples=samples,
tokenizer=tokenizer,
max_context_length=biencoder_params["max_context_length"],
max_cand_length=biencoder_params["max_cand_length"],
silent=False,
logger=logger,
debug=biencoder_params["debug"],
add_mention_bounds=(not biencoder_params.get("no_mention_bounds", False)),
params=biencoder_params,
)
else:
samples_text_tuple = []
max_seq_len = 0
for sample in samples:
samples_text_tuple
# truncate the end if the sequence is too long...
encoded_sample = [101] + tokenizer.encode(sample['text'])[:biencoder_params["max_context_length"] - 2] + [
102]
max_seq_len = max(len(encoded_sample), max_seq_len)
samples_text_tuple.append(
encoded_sample + [0 for _ in range(biencoder_params["max_context_length"] - len(encoded_sample))])
# print(samples_text_tuple)
tensor_data_tuple = [torch.tensor(samples_text_tuple)]
tensor_data = TensorDataset(*tensor_data_tuple)
sampler = SequentialSampler(tensor_data)
dataloader = DataLoader(
tensor_data, sampler=sampler, batch_size=biencoder_params["eval_batch_size"]
)
return dataloader
def _run_biencoder(
args, biencoder, dataloader, candidate_encoding, samples,
num_cand_mentions=50, num_cand_entities=10,
device="cpu", sample_to_all_context_inputs=None,
threshold=0.0, indexer=None,
):
"""
Returns: tuple
labels (List[int]) [(max_num_mentions_gold) x exs]: gold labels -- returns None if no labels
nns (List[Array[int]]) [(# of pred mentions, cands_per_mention) x exs]: predicted entity IDs in each example
dists (List[Array[float]]) [(# of pred mentions, cands_per_mention) x exs]: scores of each entity in nns
pred_mention_bounds (List[Array[int]]) [(# of pred mentions, 2) x exs]: predicted mention boundaries in each examples
mention_scores (List[Array[float]]) [(# of pred mentions,) x exs]: mention score logit
cand_scores (List[Array[float]]) [(# of pred mentions, cands_per_mention) x exs]: candidate score logit
"""
biencoder.model.eval()
biencoder_model = biencoder.model
if hasattr(biencoder.model, "module"):
biencoder_model = biencoder.model.module
context_inputs = []
nns = []
dists = []
mention_dists = []
pred_mention_bounds = []
mention_scores = []
cand_scores = []
sample_idx = 0
ctxt_idx = 0
label_ids = None
for step, batch in enumerate(tqdm(dataloader)):
context_input = batch[0].to(device)
mask_ctxt = context_input != biencoder.NULL_IDX
with torch.no_grad():
context_outs = biencoder.encode_context(
context_input, num_cand_mentions=num_cand_mentions, topK_threshold=threshold,
)
embedding_ctxt = context_outs['mention_reps']
left_align_mask = context_outs['mention_masks']
chosen_mention_logits = context_outs['mention_logits']
chosen_mention_bounds = context_outs['mention_bounds']
'''
GET TOP CANDIDATES PER MENTION
'''
# (all_pred_mentions_batch, embed_dim)
embedding_ctxt = embedding_ctxt[left_align_mask]
if indexer is None:
try:
cand_logits, _, _ = biencoder.score_candidate(
context_input, None,
text_encs=embedding_ctxt,
cand_encs=candidate_encoding.to(device),
)
# DIM (all_pred_mentions_batch, num_cand_entities); (all_pred_mentions_batch, num_cand_entities)
top_cand_logits_shape, top_cand_indices_shape = cand_logits.topk(num_cand_entities, dim=-1,
sorted=True)
except:
# for memory savings, go through one chunk of candidates at a time
SPLIT_SIZE = 1000000
done = False
while not done:
top_cand_logits_list = []
top_cand_indices_list = []
max_chunk = int(len(candidate_encoding) / SPLIT_SIZE)
for chunk_idx in range(max_chunk):
try:
# DIM (num_total_mentions, num_cand_entities); (num_total_mention, num_cand_entities)
top_cand_logits, top_cand_indices = embedding_ctxt.mm(
candidate_encoding[chunk_idx * SPLIT_SIZE:(chunk_idx + 1) * SPLIT_SIZE].to(
device).t().contiguous()).topk(10, dim=-1, sorted=True)
top_cand_logits_list.append(top_cand_logits)
top_cand_indices_list.append(top_cand_indices + chunk_idx * SPLIT_SIZE)
if len((top_cand_indices_list[chunk_idx] < 0).nonzero()) > 0:
import pdb
pdb.set_trace()
except:
SPLIT_SIZE = int(SPLIT_SIZE / 2)
break
if len(top_cand_indices_list) == max_chunk:
# DIM (num_total_mentions, num_cand_entities); (num_total_mentions, num_cand_entities) -->
# top_top_cand_indices_shape indexes into top_cand_indices
top_cand_logits_shape, top_top_cand_indices_shape = torch.cat(
top_cand_logits_list, dim=-1).topk(num_cand_entities, dim=-1, sorted=True)
# make indices index into candidate_encoding
# DIM (num_total_mentions, max_chunk*num_cand_entities)
all_top_cand_indices = torch.cat(top_cand_indices_list, dim=-1)
# DIM (num_total_mentions, num_cand_entities)
top_cand_indices_shape = all_top_cand_indices.gather(-1, top_top_cand_indices_shape)
done = True
else:
# DIM (all_pred_mentions_batch, num_cand_entities); (all_pred_mentions_batch, num_cand_entities)
top_cand_logits_shape, top_cand_indices_shape = indexer.search_knn(embedding_ctxt.cpu().numpy(),
num_cand_entities)
top_cand_logits_shape = torch.tensor(top_cand_logits_shape).to(embedding_ctxt.device)
top_cand_indices_shape = torch.tensor(top_cand_indices_shape).to(embedding_ctxt.device)
# DIM (bs, max_num_pred_mentions, num_cand_entities)
top_cand_logits = torch.zeros(chosen_mention_logits.size(0), chosen_mention_logits.size(1),
top_cand_logits_shape.size(-1)).to(
top_cand_logits_shape.device, top_cand_logits_shape.dtype)
top_cand_logits[left_align_mask] = top_cand_logits_shape
top_cand_indices = torch.zeros(chosen_mention_logits.size(0), chosen_mention_logits.size(1),
top_cand_indices_shape.size(-1)).to(
top_cand_indices_shape.device, top_cand_indices_shape.dtype)
top_cand_indices[left_align_mask] = top_cand_indices_shape
'''
COMPUTE FINAL SCORES FOR EACH CAND-MENTION PAIR + PRUNE USING IT
'''
# Has NAN for impossible mentions...
# log p(entity && mb) = log [p(entity|mention bounds) * p(mention bounds)] = log p(e|mb) + log p(mb)
# DIM (bs, max_num_pred_mentions, num_cand_entities)
scores = torch.log_softmax(top_cand_logits, -1) + torch.sigmoid(chosen_mention_logits.unsqueeze(-1)).log()
'''
DON'T NEED TO RESORT BY NEW SCORE -- DISTANCE PRESERVING (largest entity score still be largest entity score)
'''
for idx in range(len(batch[0])):
# [(seqlen) x exs] <= (bsz, seqlen)
context_inputs.append(context_input[idx][mask_ctxt[idx]].data.cpu().numpy())
# [(max_num_mentions, cands_per_mention) x exs] <= (bsz, max_num_mentions=num_cand_mentions, cands_per_mention)
nns.append(top_cand_indices[idx][left_align_mask[idx]].data.cpu().numpy())
# [(max_num_mentions, cands_per_mention) x exs] <= (bsz, max_num_mentions=num_cand_mentions, cands_per_mention)
dists.append(scores[idx][left_align_mask[idx]].data.cpu().numpy())
# [(max_num_mentions, 2) x exs] <= (bsz, max_num_mentions=num_cand_mentions, 2)
pred_mention_bounds.append(chosen_mention_bounds[idx][left_align_mask[idx]].data.cpu().numpy())
# [(max_num_mentions,) x exs] <= (bsz, max_num_mentions=num_cand_mentions)
mention_scores.append(chosen_mention_logits[idx][left_align_mask[idx]].data.cpu().numpy())
# [(max_num_mentions, cands_per_mention) x exs] <= (bsz, max_num_mentions=num_cand_mentions, cands_per_mention)
cand_scores.append(top_cand_logits[idx][left_align_mask[idx]].data.cpu().numpy())
return nns, dists, pred_mention_bounds, mention_scores, cand_scores
def get_predictions(
args, dataloader, biencoder_params, samples, nns, dists, mention_scores, cand_scores,
pred_mention_bounds, id2title, threshold=-2.9, mention_threshold=-0.6931,
):
"""
Arguments:
args, dataloader, biencoder_params, samples, nns, dists, pred_mention_bounds
Returns:
all_entity_preds,
num_correct_weak, num_correct_strong, num_predicted, num_gold,
num_correct_weak_from_input_window, num_correct_strong_from_input_window, num_gold_from_input_window
"""
# save biencoder predictions and print precision/recalls
num_correct_weak = 0
num_correct_strong = 0
num_predicted = 0
num_gold = 0
num_correct_weak_from_input_window = 0
num_correct_strong_from_input_window = 0
num_gold_from_input_window = 0
all_entity_preds = []
f = errors_f = None
if getattr(args, 'save_preds_dir', None) is not None:
save_biencoder_file = os.path.join(args.save_preds_dir, 'biencoder_outs.jsonl')
f = open(save_biencoder_file, 'w')
errors_f = open(os.path.join(args.save_preds_dir, 'biencoder_errors.jsonl'), 'w')
# nns (List[Array[int]]) [(num_pred_mentions, cands_per_mention) x exs])
# dists (List[Array[float]]) [(num_pred_mentions, cands_per_mention) x exs])
# pred_mention_bounds (List[Array[int]]) [(num_pred_mentions, 2) x exs]
# cand_scores (List[Array[float]]) [(num_pred_mentions, cands_per_mention) x exs])
# mention_scores (List[Array[float]]) [(num_pred_mentions,) x exs])
for batch_num, batch_data in enumerate(dataloader):
batch_context = batch_data[0]
if len(batch_data) > 1:
_, batch_cands, batch_label_ids, batch_mention_idxs, batch_mention_idx_masks = batch_data
for b in range(len(batch_context)):
i = batch_num * biencoder_params['eval_batch_size'] + b
sample = samples[i]
input_context = batch_context[b][batch_context[b] != 0].tolist() # filter out padding
# (num_pred_mentions, cands_per_mention)
scores = dists[i] if args.threshold_type == "joint" else cand_scores[i]
cands_mask = (scores[:, 0] == scores[:, 0])
pred_entity_list = nns[i][cands_mask]
if len(pred_entity_list) > 0:
e_id = pred_entity_list[0]
distances = scores[cands_mask]
# (num_pred_mentions, 2)
entity_mention_bounds_idx = pred_mention_bounds[i][cands_mask]
utterance = sample['text']
if args.threshold_type == "joint":
# THRESHOLDING
assert utterance is not None
top_mentions_mask = (distances[:, 0] > threshold)
elif args.threshold_type == "top_entity_by_mention":
top_mentions_mask = (mention_scores[i] > mention_threshold)
elif args.threshold_type == "thresholded_entity_by_mention":
top_mentions_mask = (distances[:, 0] > threshold) & (mention_scores[i] > mention_threshold)
_, sort_idxs = torch.tensor(distances[:, 0][top_mentions_mask]).sort(descending=True)
# cands already sorted by score
all_pred_entities = pred_entity_list[:, 0][top_mentions_mask]
e_mention_bounds = entity_mention_bounds_idx[top_mentions_mask]
chosen_distances = distances[:, 0][top_mentions_mask]
if len(all_pred_entities) >= 2:
all_pred_entities = all_pred_entities[sort_idxs]
e_mention_bounds = e_mention_bounds[sort_idxs]
chosen_distances = chosen_distances[sort_idxs]
# prune mention overlaps
e_mention_bounds_pruned = []
all_pred_entities_pruned = []
chosen_distances_pruned = []
mention_masked_utterance = np.zeros(len(input_context))
# ensure well-formed-ness, prune overlaps
# greedily pick highest scoring, then prune all overlapping
for idx, mb in enumerate(e_mention_bounds):
mb[1] += 1 # prediction was inclusive, now make exclusive
# check if in existing mentions
if args.threshold_type != "top_entity_by_mention" and mention_masked_utterance[mb[0]:mb[1]].sum() >= 1:
continue
e_mention_bounds_pruned.append(mb)
all_pred_entities_pruned.append(all_pred_entities[idx])
chosen_distances_pruned.append(float(chosen_distances[idx]))
mention_masked_utterance[mb[0]:mb[1]] = 1
input_context = input_context[1:-1] # remove BOS and sep
pred_triples = [(
str(all_pred_entities_pruned[j]),
int(e_mention_bounds_pruned[j][0]) - 1, # -1 for BOS
int(e_mention_bounds_pruned[j][1]) - 1,
) for j in range(len(all_pred_entities_pruned))]
entity_results = {
"id": sample["id"],
"text": sample["text"],
"scores": chosen_distances_pruned,
}
if 'label_id' in sample:
# Get LABELS
input_mention_idxs = batch_mention_idxs[b][batch_mention_idx_masks[b]].tolist()
input_label_ids = batch_label_ids[b][batch_label_ids[b] != -1].tolist()
assert len(input_label_ids) == len(input_mention_idxs)
gold_mention_bounds = [
sample['text'][ment[0] - 10:ment[0]] + "[" + sample['text'][ment[0]:ment[1]] + "]" + sample['text'][
ment[1]:ment[
1] + 10]
for ment in sample['mentions']
]
# GET ALIGNED MENTION_IDXS (input is slightly different to model) between ours and gold labels -- also have to account for BOS
gold_input = sample['tokenized_text_ids']
# return first instance of my_input in gold_input
for my_input_start in range(len(gold_input)):
if (
gold_input[my_input_start] == input_context[0] and
gold_input[my_input_start:my_input_start + len(input_context)] == input_context
):
break
# add alignment factor (my_input_start) to predicted mention triples
pred_triples = [(
triple[0],
triple[1] + my_input_start, triple[2] + my_input_start,
) for triple in pred_triples]
gold_triples = [(
str(sample['label_id'][j]),
sample['tokenized_mention_idxs'][j][0], sample['tokenized_mention_idxs'][j][1],
) for j in range(len(sample['label_id']))]
num_overlap_weak, num_overlap_strong = entity_linking_tp_with_overlap(gold_triples, pred_triples)
num_correct_weak += num_overlap_weak
num_correct_strong += num_overlap_strong
num_predicted += len(all_pred_entities_pruned)
num_gold += len(sample["label_id"])
# compute number correct given the input window
pred_input_window_triples = [(
str(all_pred_entities_pruned[j]),
int(e_mention_bounds_pruned[j][0]), int(e_mention_bounds_pruned[j][1]),
) for j in range(len(all_pred_entities_pruned))]
gold_input_window_triples = [(
str(input_label_ids[j]),
input_mention_idxs[j][0], input_mention_idxs[j][1] + 1,
) for j in range(len(input_label_ids))]
num_overlap_weak_window, num_overlap_strong_window = entity_linking_tp_with_overlap(
gold_input_window_triples, pred_input_window_triples)
num_correct_weak_from_input_window += num_overlap_weak_window
num_correct_strong_from_input_window += num_overlap_strong_window
num_gold_from_input_window += len(input_mention_idxs)
entity_results.update({
"pred_tuples_string": [
[id2title[triple[0]], tokenizer.decode(sample['tokenized_text_ids'][triple[1]:triple[2]])]
for triple in pred_triples
],
"gold_tuples_string": [
[id2title[triple[0]], tokenizer.decode(sample['tokenized_text_ids'][triple[1]:triple[2]])]
for triple in gold_triples
],
"pred_triples": pred_triples,
"gold_triples": gold_triples,
"tokens": input_context,
})
if errors_f is not None and (
num_overlap_weak != len(gold_triples) or num_overlap_weak != len(pred_triples)):
errors_f.write(json.dumps(entity_results) + "\n")
else:
entity_results.update({
"pred_tuples_string": [
[id2title[triple[0]], tokenizer.decode(input_context[triple[1]:triple[2]])]
for triple in pred_triples
],
"pred_triples": pred_triples,
"tokens": input_context,
})
all_entity_preds.append(entity_results)
if f is not None:
f.write(
json.dumps(entity_results) + "\n"
)
if f is not None:
f.close()
errors_f.close()
return (
all_entity_preds, num_correct_weak, num_correct_strong, num_predicted, num_gold,
num_correct_weak_from_input_window, num_correct_strong_from_input_window, num_gold_from_input_window
)
def _save_biencoder_outs(save_preds_dir, nns, dists, pred_mention_bounds, cand_scores, mention_scores, runtime):
np.save(os.path.join(save_preds_dir, "biencoder_nns.npy"), nns)
np.save(os.path.join(save_preds_dir, "biencoder_dists.npy"), dists)
np.save(os.path.join(save_preds_dir, "biencoder_mention_bounds.npy"), pred_mention_bounds)
np.save(os.path.join(save_preds_dir, "biencoder_cand_scores.npy"), cand_scores)
np.save(os.path.join(save_preds_dir, "biencoder_mention_scores.npy"), mention_scores)
with open(os.path.join(save_preds_dir, "runtime.txt"), "w") as wf:
wf.write(str(runtime))
def _load_biencoder_outs(save_preds_dir):
nns = np.load(os.path.join(save_preds_dir, "biencoder_nns.npy"), allow_pickle=True)
dists = np.load(os.path.join(save_preds_dir, "biencoder_dists.npy"), allow_pickle=True)
pred_mention_bounds = np.load(os.path.join(save_preds_dir, "biencoder_mention_bounds.npy"), allow_pickle=True)
cand_scores = np.load(os.path.join(save_preds_dir, "biencoder_cand_scores.npy"), allow_pickle=True)
mention_scores = np.load(os.path.join(save_preds_dir, "biencoder_mention_scores.npy"), allow_pickle=True)
runtime = float(open(os.path.join(args.save_preds_dir, "runtime.txt")).read())
return nns, dists, pred_mention_bounds, cand_scores, mention_scores, runtime
def display_metrics(
num_correct, num_predicted, num_gold, prefix="",
):
p = 0 if num_predicted == 0 else float(num_correct) / float(num_predicted)
r = 0 if num_gold == 0 else float(num_correct) / float(num_gold)
if p + r > 0:
f1 = 2 * p * r / (p + r)
else:
f1 = 0
print("{0}precision = {1} / {2} = {3}".format(prefix, num_correct, num_predicted, p))
print("{0}recall = {1} / {2} = {3}".format(prefix, num_correct, num_gold, r))
print("{0}f1 = {1}".format(prefix, f1))
def load_models(args, logger):
# load biencoder model
if logger: logger.info("Loading biencoder model")
try:
with open(args.biencoder_config) as json_file:
biencoder_params = json.load(json_file)
except json.decoder.JSONDecodeError:
with open(args.biencoder_config) as json_file:
for line in json_file:
line = line.replace("'", "\"")
line = line.replace("True", "true")
line = line.replace("False", "false")
line = line.replace("None", "null")
biencoder_params = json.loads(line)
break
biencoder_params["path_to_model"] = args.biencoder_model
biencoder_params["cand_token_ids_path"] = args.cand_token_ids_path
biencoder_params["eval_batch_size"] = getattr(args, 'eval_batch_size', 8)
biencoder_params["no_cuda"] = (not getattr(args, 'use_cuda', False) or not torch.cuda.is_available())
if biencoder_params["no_cuda"]:
biencoder_params["data_parallel"] = False
biencoder_params["load_cand_enc_only"] = False
if getattr(args, 'max_context_length', None) is not None:
biencoder_params["max_context_length"] = args.max_context_length
biencoder = load_biencoder(biencoder_params)
if biencoder_params["no_cuda"] and type(biencoder.model).__name__ == 'DataParallel':
biencoder.model = biencoder.model.module
elif not biencoder_params["no_cuda"] and type(biencoder.model).__name__ != 'DataParallel':
biencoder.model = torch.nn.DataParallel(biencoder.model)
# load candidate entities
if logger: logger.info("Loading candidate entities")
(
candidate_encoding,
indexer,
id2title,
id2text,
id2wikidata,
) = _load_candidates(
args.entity_catalogue, args.entity_encoding,
args.faiss_index, args.index_path, logger=logger,
)
return (
biencoder,
biencoder_params,
candidate_encoding,
indexer,
id2title,
id2text,
id2wikidata,
)
def run(
args,
logger,
biencoder,
biencoder_params,
candidate_encoding,
indexer,
id2title,
id2text,
id2wikidata,
test_data=None,
):
if not test_data and not getattr(args, 'test_mentions', None) and not getattr(args, 'interactive', None):
msg = (
"ERROR: either you start BLINK with the "
"interactive option (-i) or you pass in input test mentions (--test_mentions)"
"and test entities (--test_entities) or manually pass in test data"
)
raise ValueError(msg)
if getattr(args, 'save_preds_dir', None) is not None and not os.path.exists(args.save_preds_dir):
os.makedirs(args.save_preds_dir)
print("Saving preds in {}".format(args.save_preds_dir))
stopping_condition = False
threshold = float(args.threshold)
if args.threshold_type == "top_entity_by_mention":
assert args.mention_threshold is not None
mention_threshold = float(args.mention_threshold)
else:
mention_threshold = threshold
if args.interactive:
while not stopping_condition:
if logger: logger.info("interactive mode")
# Interactive
text = input("insert text: ")
# Prepare data
samples = [{"id": "-1", "text": text}]
dataloader = _process_biencoder_dataloader(
samples, biencoder.tokenizer, biencoder_params, logger,
)
# Run inference
nns, dists, pred_mention_bounds, mention_scores, cand_scores = _run_biencoder(
args, biencoder, dataloader, candidate_encoding, samples=samples,
num_cand_mentions=args.num_cand_mentions, num_cand_entities=args.num_cand_entities,
device="cpu" if biencoder_params["no_cuda"] else "cuda",
threshold=mention_threshold, indexer=indexer,
)
action = "c"
while action == "c":
all_entity_preds = get_predictions(
args, dataloader, biencoder_params,
samples, nns, dists, mention_scores, cand_scores,
pred_mention_bounds, id2title, threshold=threshold,
mention_threshold=mention_threshold,
)[0]
pred_triples = all_entity_preds[0]['pred_triples']
_print_colorful_text(all_entity_preds[0]['tokens'], tokenizer, pred_triples)
_print_colorful_prediction(all_entity_preds, pred_triples, id2text, id2wikidata)
action = input("Next question [n] / change threshold [c]: ")
while action != "n" and action != "c":
action = input("Next question [n] / change threshold [c]: ")
if action == "c":
print("Current threshold {}".format(threshold))
while True:
threshold = input("New threshold (increase for less cands, decrease for more cands): ")
try:
threshold = float(threshold)
break
except:
print("Error! Expected float, got {}. Try again.".format(threshold))
else:
if not test_data:
samples, num_unk = _get_test_samples(
args.test_mentions, args.test_entities, logger,
)
else:
samples = test_data
if logger: logger.info("Preparing data for biencoder")
dataloader = _process_biencoder_dataloader(
samples, biencoder.tokenizer, biencoder_params, None,
)
stopping_condition = True
# prepare the data for biencoder
# run biencoder if predictions not saved
if not getattr(args, 'save_preds_dir', None) or not os.path.exists(
os.path.join(args.save_preds_dir, 'biencoder_mention_bounds.npy')):
# run biencoder
if logger: logger.info("Running biencoder...")
start_time = time.time()
nns, dists, pred_mention_bounds, mention_scores, cand_scores = _run_biencoder(
args, biencoder, dataloader, candidate_encoding, samples=samples,
num_cand_mentions=args.num_cand_mentions, num_cand_entities=args.num_cand_entities,
device="cpu" if biencoder_params["no_cuda"] else "cuda",
threshold=mention_threshold, indexer=indexer,
)
end_time = time.time()
if logger: logger.info("Finished running biencoder")
runtime = end_time - start_time
if getattr(args, 'save_preds_dir', None):
_save_biencoder_outs(
args.save_preds_dir, nns, dists, pred_mention_bounds, cand_scores, mention_scores, runtime,
)
else:
nns, dists, pred_mention_bounds, cand_scores, mention_scores, runtime = _load_biencoder_outs(
args.save_preds_dir)
assert len(samples) == len(nns) == len(dists) == len(pred_mention_bounds) == len(cand_scores) == len(
mention_scores)
(
all_entity_preds, num_correct_weak, num_correct_strong, num_predicted, num_gold,
num_correct_weak_from_input_window, num_correct_strong_from_input_window, num_gold_from_input_window,
) = get_predictions(
args, dataloader, biencoder_params,
samples, nns, dists, mention_scores, cand_scores,
pred_mention_bounds, id2title, threshold=threshold,
mention_threshold=mention_threshold,
)
print("*--------*")
if num_gold > 0:
print("WEAK MATCHING")
display_metrics(num_correct_weak, num_predicted, num_gold)
print("Just entities within input window...")
display_metrics(num_correct_weak_from_input_window, num_predicted, num_gold_from_input_window)
print("*--------*")
print("STRONG MATCHING")
display_metrics(num_correct_strong, num_predicted, num_gold)
print("Just entities within input window...")
display_metrics(num_correct_strong_from_input_window, num_predicted, num_gold_from_input_window)
print("*--------*")
print("biencoder runtime = {}".format(runtime))
print("*--------*")
return all_entity_preds
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--debug_biencoder", "-db", action="store_true", default=False, help="Debug biencoder"
)
# evaluation mode
parser.add_argument(
"--get_predictions", "-p", action="store_true", default=False,
help="Getting predictions mode. Does not filter at crossencoder step."
)
parser.add_argument(
"--interactive", "-i", action="store_true", help="Interactive mode."
)
# test_data
parser.add_argument(
"--test_mentions", dest="test_mentions", type=str, help="Test Dataset."
)
parser.add_argument(
"--test_entities", dest="test_entities", type=str, help="Test Entities.",
default="models/entity.jsonl", # ALL WIKIPEDIA!
)
parser.add_argument(
"--save_preds_dir", type=str, help="Directory to save model predictions to."
)
parser.add_argument(
"--mention_threshold", type=str, default=None,
dest="mention_threshold",
help="Used if threshold type is `top_entity_by_mention`. "
"Threshold for mention score, for which mentions will be pruned if they fall under that threshold. "
"Set to '-inf' to get all mentions."
)
parser.add_argument(
"--threshold", type=str, default="-4.5",
dest="threshold",
help="Threshold for final joint score, for which examples will be pruned if they fall under that threshold. "
"Set to `-inf` to get all entities."
)
parser.add_argument(
"--num_cand_mentions", type=int, default=50,
help="Number of mention candidates to consider per example (at most)"
)
parser.add_argument(
"--num_cand_entities", type=int, default=10,
help="Number of entity candidates to consider per mention (at most)"
)
parser.add_argument(
"--threshold_type", type=str, default="joint",
choices=["joint", "top_entity_by_mention"],
help="How to threshold the final candidates. "
"`top_entity_by_mention`: get top candidate (with entity score) for each predicted mention bound. "
"`joint`: by thresholding joint score."
)
# biencoder
parser.add_argument(
"--biencoder_model",
dest="biencoder_model",
type=str,
default="models/elq_wiki_large.bin",
help="Path to the biencoder model.",
)
parser.add_argument(
"--biencoder_config",
dest="biencoder_config",
type=str,
default="models/elq_large_params.txt",
help="Path to the biencoder configuration.",
)
parser.add_argument(
"--cand_token_ids_path",
dest="cand_token_ids_path",
type=str,
default="models/entity_token_ids_128.t7", # ALL WIKIPEDIA!
help="Path to tokenized entity catalogue",
)
parser.add_argument(
"--entity_catalogue",
dest="entity_catalogue",
type=str,
default="models/entity.jsonl", # ALL WIKIPEDIA!
help="Path to the entity catalogue.",
)
parser.add_argument(
"--entity_encoding",
dest="entity_encoding",
type=str,
default="models/all_entities_large.t7", # ALL WIKIPEDIA!
help="Path to the entity catalogue.",
)
parser.add_argument(
"--eval_batch_size",
dest="eval_batch_size",
type=int,
default=8,
help="Crossencoder's batch size for evaluation",
)
parser.add_argument(
"--faiss_index",
dest="faiss_index",
type=str,
default="hnsw",
choices=["hnsw", "flat", "ivfflat", "none"],
help="whether to use faiss index",
)
parser.add_argument(
"--index_path",
dest="index_path",
type=str,
default="models/faiss_hnsw_index.pkl",
help="path to load indexer",
)
parser.add_argument(
"--max_context_length",
dest="max_context_length",
type=int,
help="Maximum length of context. (Don't set to inherit from training config)",
)
# output folder
parser.add_argument(
"--output_path",
dest="output_path",
type=str,
default="output",
help="Path to the output.",
)
parser.add_argument(
"--use_cuda", dest="use_cuda", action="store_true", default=False, help="run on gpu"
)
parser.add_argument(
"--no_logger", dest="no_logger", action="store_true", default=False, help="don't log progress"
)
args = parser.parse_args()
logger = None
if not args.no_logger:
logger = utils.get_logger(args.output_path)
logger.setLevel(10)
models = load_models(args, logger)
run(args, logger, *models)
|
{"hexsha": "54e52bd548e9bb31c2a4e505fe1dee058b37bb1d", "size": 42181, "ext": "py", "lang": "Python", "max_stars_repo_path": "elq/main_dense.py", "max_stars_repo_name": "lorenzo-sasso/BLINK", "max_stars_repo_head_hexsha": "ec26995da10a30d3ab0c520b0377faef329a7620", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "elq/main_dense.py", "max_issues_repo_name": "lorenzo-sasso/BLINK", "max_issues_repo_head_hexsha": "ec26995da10a30d3ab0c520b0377faef329a7620", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "elq/main_dense.py", "max_forks_repo_name": "lorenzo-sasso/BLINK", "max_forks_repo_head_hexsha": "ec26995da10a30d3ab0c520b0377faef329a7620", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.3069815195, "max_line_length": 142, "alphanum_fraction": 0.6043953439, "include": true, "reason": "import numpy", "num_tokens": 9406}
|
Require Import Coq.Bool.Bool.
Require Import Coq.ZArith.ZArith.
Require Import Coq.Lists.List. Import ListNotations.
Require Import bedrock2.MetricLogging.
Require Import coqutil.Macros.unique.
Require Import bedrock2.Memory.
Require Import compiler.util.Common.
Require Import coqutil.Decidable.
Require Import coqutil.Datatypes.PropSet.
Require Import coqutil.Byte.
Require Import bedrock2.Syntax.
Require Import coqutil.Z.Lia.
Require Import coqutil.Tactics.Simp.
Require Import bedrock2.Semantics.
Require Import coqutil.Datatypes.ListSet.
Require Import coqutil.Map.OfListWord.
Require Import coqutil.Word.Bitwidth.
Require Import coqutil.Word.Interface.
Local Hint Mode Word.Interface.word - : typeclass_instances.
Inductive bbinop: Type :=
| BEq
| BNe
| BLt
| BGe
| BLtu
| BGeu.
Section Syntax.
Context {varname: Type}.
Inductive operand: Type :=
| Var (v: varname)
| Const (c: Z)
.
Inductive bcond: Type :=
| CondBinary (op: bbinop) (x y: varname)
| CondNez (x: varname)
.
Inductive stmt: Type :=
| SLoad(sz: Syntax.access_size)(x: varname)(a: varname)(offset: Z)
| SStore(sz: Syntax.access_size)(a: varname)(v: varname)(offset: Z)
| SInlinetable(sz: Syntax.access_size)(x: varname)(t: list Byte.byte)(i: varname)
| SStackalloc(x : varname)(nbytes: Z)(body: stmt)
| SLit(x: varname)(v: Z)
| SOp(x: varname)(op: bopname)(y: varname)(z: operand)
| SSet(x y: varname)
| SIf(cond: bcond)(bThen bElse: stmt)
| SLoop(body1: stmt)(cond: bcond)(body2: stmt)
| SSeq(s1 s2: stmt)
| SSkip
| SCall(binds: list varname)(f: String.string)(args: list varname)
| SInteract(binds: list varname)(a: String.string)(args: list varname).
Definition stmt_size_body(rec: stmt -> Z)(s: stmt): Z :=
match s with
| SLoad sz x a o => 1
| SStore sz a v o => 1
| SInlinetable sz x t i => 1 + (Z.of_nat (length t) + 3) / 4 + 2
| SStackalloc x a body => 1 + rec body
| SLit x v => 8
| SOp x op y z => 2
| SSet x y => 1
| SIf cond bThen bElse => 1 + (rec bThen) + 1 + (rec bElse)
| SLoop body1 cond body2 => (rec body1) + 1 + (rec body2) + 1
| SSeq s1 s2 => (rec s1) + (rec s2)
| SSkip => 0
(* TODO only works because all registers are callee-saved.
And we still need to account for the code emitted by compile_function somewhere. *)
| SCall binds f args => Z.of_nat (List.length args) + 1 + Z.of_nat (List.length binds)
| SInteract binds f args => 7 (* TODO don't hardcode a magic number *)
end.
Fixpoint stmt_size(s: stmt): Z := stmt_size_body stmt_size s.
Lemma stmt_size_unfold : forall s, stmt_size s = stmt_size_body stmt_size s.
Proof. destruct s; reflexivity. Qed.
Local Arguments Z.add _ _ : simpl never.
Lemma stmt_size_nonneg: forall s, 0 <= stmt_size s.
Proof.
induction s; simpl; try blia.
assert (0 <= (Z.of_nat (Datatypes.length t) + 3) / 4). {
apply Z.div_pos; blia.
}
blia.
Qed.
Fixpoint modVars_as_list(veq: varname -> varname -> bool)(s: stmt): list varname :=
match s with
| SSkip | SStore _ _ _ _ => []
| SStackalloc x n body => list_union veq [x] (modVars_as_list veq body)
| SLoad _ x _ _ | SLit x _ | SOp x _ _ _ | SSet x _ | SInlinetable _ x _ _ => [x]
| SIf _ s1 s2 | SLoop s1 _ s2 | SSeq s1 s2 =>
list_union veq (modVars_as_list veq s1) (modVars_as_list veq s2)
| SCall binds _ _ | SInteract binds _ _ => list_union veq binds []
end.
Definition ForallVars_bcond_gen{R: Type}(and: R -> R -> R)(P: varname -> R)(cond: bcond): R :=
match cond with
| CondBinary _ x y => and (P x) (P y)
| CondNez x => P x
end.
Definition Forall_vars_stmt_gen{R: Type}(T: R)(and: R -> R -> R)
(P_bcond: bcond -> R)
(P_vars: varname -> R)
(P_calls: list varname -> list varname -> R): stmt -> R :=
fix rec s :=
match s with
| SLoad _ x a _ => and (P_vars x) (P_vars a)
| SStore _ a x _ => and (P_vars a) (P_vars x)
| SInlinetable _ x _ i => and (P_vars x) (P_vars i)
| SStackalloc x n body => and (P_vars x) (rec body)
| SLit x _ => P_vars x
| SOp x _ y z => let op_z := match z with
| Var v => P_vars v
| Const _ => T
end in
and (P_vars x) (and (P_vars y) (op_z))
| SSet x y => and (P_vars x) (P_vars y)
| SIf c s1 s2 => and (P_bcond c) (and (rec s1) (rec s2))
| SLoop s1 c s2 => and (P_bcond c) (and (rec s1) (rec s2))
| SSeq s1 s2 => and (rec s1) (rec s2)
| SSkip => T
| SCall binds _ args => P_calls binds args
| SInteract binds _ args => P_calls binds args
end.
Definition ForallVars_bcond(P_vars: varname -> Prop)(cond: bcond): Prop :=
Eval unfold ForallVars_bcond_gen in ForallVars_bcond_gen and P_vars cond.
Definition Forall_vars_stmt(P_vars: varname -> Prop): stmt -> Prop :=
Eval unfold Forall_vars_stmt_gen in
Forall_vars_stmt_gen True and (ForallVars_bcond P_vars) P_vars
(fun binds args => (List.length binds <= 8)%nat /\
(List.length args <= 8)%nat /\
Forall P_vars binds /\
Forall P_vars args).
Definition forallbVars_bcond(P_vars: varname -> bool)(cond: bcond): bool :=
Eval unfold ForallVars_bcond_gen in ForallVars_bcond_gen andb P_vars cond.
Definition forallb_vars_stmt(P_vars: varname -> bool): stmt -> bool :=
Eval unfold Forall_vars_stmt_gen in
Forall_vars_stmt_gen true andb (forallbVars_bcond P_vars) P_vars
(fun binds args => (List.length binds <=? 8)%nat &&
(List.length args <=? 8)%nat &&
forallb P_vars binds &&
forallb P_vars args).
Lemma forallb_vars_stmt_correct
(P: varname -> Prop)(p: varname -> bool)
(p_correct: forall x, p x = true <-> P x):
forall s, forallb_vars_stmt p s = true <-> Forall_vars_stmt P s.
Proof.
assert (p_correct_fw: forall x, p x = true -> P x). {
intros. eapply p_correct. assumption.
}
assert (p_correct_bw: forall x, P x -> p x = true). {
intros. eapply p_correct. assumption.
}
clear p_correct.
induction s; split; simpl; intros; unfold ForallVars_bcond, forallbVars_bcond in *;
repeat match goal with
| c: bcond |- _ => destruct c
| H: andb _ _ = true |- _ => eapply Bool.andb_true_iff in H
| H: (_ <=? _)%nat = true |- _ => eapply Nat.leb_le in H
| H: _ /\ _ |- _ => destruct H
| H: _ <-> _ |- _ => destruct H
| |- andb _ _ = true => apply Bool.andb_true_iff
| |- _ /\ _ => split
| |- (_ <=? _)%nat = true => eapply Nat.leb_le
| y: operand |- _ => destruct y
end;
eauto using List.Forall_to_forallb, List.forallb_to_Forall.
Qed.
Lemma ForallVars_bcond_impl: forall (P Q: varname -> Prop),
(forall x, P x -> Q x) ->
forall s, ForallVars_bcond P s -> ForallVars_bcond Q s.
Proof.
intros. destruct s; simpl in *; intuition eauto.
Qed.
Lemma Forall_vars_stmt_impl: forall (P Q: varname -> Prop),
(forall x, P x -> Q x) ->
forall s, Forall_vars_stmt P s -> Forall_vars_stmt Q s.
Proof.
induction s; intros; simpl in *;
repeat match goal with
| y : operand |- _ => destruct y
| _ => intuition eauto using ForallVars_bcond_impl, Forall_impl
end.
Qed.
End Syntax.
Arguments bcond: clear implicits.
Arguments stmt: clear implicits.
Local Open Scope Z_scope.
Section FlatImp1.
Context {varname: Type} {varname_eqb: varname -> varname -> bool}.
Context {width: Z} {BW: Bitwidth width} {word: word.word width}.
Context {mem: map.map word byte} {locals: map.map varname word}
{env: map.map String.string (list varname * list varname * stmt varname)}.
Context {ext_spec: ExtSpec}.
Section WithEnv.
Variable (e: env).
Definition eval_bbinop(op: bbinop)(x y: word): bool :=
match op with
| BEq => word.eqb x y
| BNe => negb (word.eqb x y)
| BLt => word.lts x y
| BGe => negb (word.lts x y)
| BLtu => word.ltu x y
| BGeu => negb (word.ltu x y)
end.
Definition eval_bcond(st: locals)(cond: bcond varname): option bool :=
match cond with
| CondBinary op x y =>
match map.get st x, map.get st y with
| Some mx, Some my => Some (eval_bbinop op mx my)
| _, _ => None
end
| CondNez x =>
match map.get st x with
| Some mx => Some (negb (word.eqb mx (word.of_Z 0)))
| None => None
end
end.
End WithEnv.
(* returns the set of modified vars *)
Fixpoint modVars(s: stmt varname): set varname :=
match s with
| SLoad sz x y o => singleton_set x
| SStore sz x y o => empty_set
| SInlinetable sz x t i => singleton_set x
| SStackalloc x n body => union (singleton_set x) (modVars body)
| SLit x v => singleton_set x
| SOp x op y z => singleton_set x
| SSet x y => singleton_set x
| SIf cond bThen bElse =>
union (modVars bThen) (modVars bElse)
| SLoop body1 cond body2 =>
union (modVars body1) (modVars body2)
| SSeq s1 s2 =>
union (modVars s1) (modVars s2)
| SSkip => empty_set
| SCall binds funcname args => of_list binds
| SInteract binds funcname args => of_list binds
end.
Definition accessedVarsBcond(cond: bcond varname): set varname :=
match cond with
| CondBinary _ x y =>
union (singleton_set x) (singleton_set y)
| CondNez x =>
singleton_set x
end.
End FlatImp1.
Module exec.
Section FlatImpExec.
Context {varname: Type} {varname_eqb: varname -> varname -> bool}.
Context {width: Z} {BW: Bitwidth width} {word: word.word width}.
Context {mem: map.map word byte} {locals: map.map varname word}
{env: map.map String.string (list varname * list varname * stmt varname)}.
Context {ext_spec: ExtSpec}.
Context {varname_eq_spec: EqDecider varname_eqb}
{word_ok: word.ok word}
{mem_ok: map.ok mem}
{locals_ok: map.ok locals}
{env_ok: map.ok env}
{ext_spec_ok: ext_spec.ok ext_spec}.
Variable (e: env).
Local Notation metrics := MetricLog.
(* COQBUG(unification finds Type instead of Prop and fails to downgrade *)
Implicit Types post : trace -> mem -> locals -> metrics -> Prop.
Definition lookup_op_locals (l: locals) (o: operand) :=
match o with
| Var vo => map.get l vo
| Const co => Some (word.of_Z co)
end.
(* alternative semantics which allow non-determinism *)
Inductive exec:
stmt varname ->
trace -> mem -> locals -> metrics ->
(trace -> mem -> locals -> metrics -> Prop)
-> Prop :=
| interact: forall t m mKeep mGive l mc action argvars argvals resvars outcome post,
map.split m mKeep mGive ->
map.getmany_of_list l argvars = Some argvals ->
ext_spec t mGive action argvals outcome ->
(forall mReceive resvals,
outcome mReceive resvals ->
exists l', map.putmany_of_list_zip resvars resvals l = Some l' /\
forall m', map.split m' mKeep mReceive ->
post (((mGive, action, argvals), (mReceive, resvals)) :: t) m' l'
(addMetricInstructions 1
(addMetricStores 1
(addMetricLoads 2 mc)))) ->
exec (SInteract resvars action argvars) t m l mc post
| call: forall t m l mc binds fname args params rets fbody argvs st0 post outcome,
map.get e fname = Some (params, rets, fbody) ->
map.getmany_of_list l args = Some argvs ->
map.putmany_of_list_zip params argvs map.empty = Some st0 ->
exec fbody t m st0 (addMetricInstructions 100 (addMetricJumps 100 (addMetricLoads 100 (addMetricStores 100 mc)))) outcome ->
(forall t' m' mc' st1,
outcome t' m' st1 mc' ->
exists retvs l',
map.getmany_of_list st1 rets = Some retvs /\
map.putmany_of_list_zip binds retvs l = Some l' /\
post t' m' l' (addMetricInstructions 100 (addMetricJumps 100 (addMetricLoads 100 (addMetricStores 100 mc'))))) ->
exec (SCall binds fname args) t m l mc post
(* TODO think about a non-fixed bound on the cost of function preamble and postamble *)
| load: forall t m l mc sz x a o v addr post,
map.get l a = Some addr ->
load sz m (word.add addr (word.of_Z o)) = Some v ->
post t m (map.put l x v)
(addMetricLoads 2
(addMetricInstructions 1 mc)) ->
exec (SLoad sz x a o) t m l mc post
| store: forall t m m' mc l sz a o addr v val post,
map.get l a = Some addr ->
map.get l v = Some val ->
store sz m (word.add addr (word.of_Z o)) val = Some m' ->
post t m' l
(addMetricLoads 1
(addMetricInstructions 1
(addMetricStores 1 mc))) ->
exec (SStore sz a v o) t m l mc post
| inlinetable: forall sz x table i v index t m l mc post,
(* compiled riscv code uses x as a tmp register and this shouldn't overwrite i *)
x <> i ->
map.get l i = Some index ->
load sz (map.of_list_word table) index = Some v ->
post t m (map.put l x v)
(addMetricLoads 4
(addMetricInstructions 3
(addMetricJumps 1 mc))) ->
exec (SInlinetable sz x table i) t m l mc post
| stackalloc: forall t mSmall l mc x n body post,
n mod (bytes_per_word width) = 0 ->
(forall a mStack mCombined,
anybytes a n mStack ->
map.split mCombined mSmall mStack ->
exec body t mCombined (map.put l x a) (addMetricLoads 1 (addMetricInstructions 1 mc))
(fun t' mCombined' l' mc' =>
exists mSmall' mStack',
anybytes a n mStack' /\
map.split mCombined' mSmall' mStack' /\
post t' mSmall' l' mc')) ->
exec (SStackalloc x n body) t mSmall l mc post
| lit: forall t m l mc x v post,
post t m (map.put l x (word.of_Z v))
(addMetricLoads 8
(addMetricInstructions 8 mc)) ->
exec (SLit x v) t m l mc post
| op: forall t m l mc x op y y' z z' post,
map.get l y = Some y' ->
lookup_op_locals l z = Some z' ->
post t m (map.put l x (interp_binop op y' z'))
(addMetricLoads 2
(addMetricInstructions 2 mc)) ->
exec (SOp x op y z) t m l mc post
| set: forall t m l mc x y y' post,
map.get l y = Some y' ->
post t m (map.put l x y')
(addMetricLoads 1
(addMetricInstructions 1 mc)) ->
exec (SSet x y) t m l mc post
| if_true: forall t m l mc cond bThen bElse post,
eval_bcond l cond = Some true ->
exec bThen t m l
(addMetricLoads 2
(addMetricInstructions 2
(addMetricJumps 1 mc))) post ->
exec (SIf cond bThen bElse) t m l mc post
| if_false: forall t m l mc cond bThen bElse post,
eval_bcond l cond = Some false ->
exec bElse t m l
(addMetricLoads 2
(addMetricInstructions 2
(addMetricJumps 1 mc))) post ->
exec (SIf cond bThen bElse) t m l mc post
| loop: forall t m l mc cond body1 body2 mid1 mid2 post,
(* This case is carefully crafted in such a way that recursive uses of exec
only appear under forall and ->, but not under exists, /\, \/, to make sure the
auto-generated induction principle contains an IH for all recursive uses. *)
exec body1 t m l mc mid1 ->
(forall t' m' l' mc',
mid1 t' m' l' mc' ->
eval_bcond l' cond <> None) ->
(forall t' m' l' mc',
mid1 t' m' l' mc' ->
eval_bcond l' cond = Some false ->
post t' m' l'
(addMetricLoads 1
(addMetricInstructions 1
(addMetricJumps 1 mc')))) ->
(forall t' m' l' mc',
mid1 t' m' l' mc' ->
eval_bcond l' cond = Some true ->
exec body2 t' m' l' mc' mid2) ->
(forall t'' m'' l'' mc'',
mid2 t'' m'' l'' mc'' ->
exec (SLoop body1 cond body2) t'' m'' l''
(addMetricLoads 2
(addMetricInstructions 2
(addMetricJumps 1 mc''))) post) ->
exec (SLoop body1 cond body2) t m l mc post
| seq: forall t m l mc s1 s2 mid post,
exec s1 t m l mc mid ->
(forall t' m' l' mc', mid t' m' l' mc' -> exec s2 t' m' l' mc' post) ->
exec (SSeq s1 s2) t m l mc post
| skip: forall t m l mc post,
post t m l mc ->
exec SSkip t m l mc post.
Lemma det_step: forall t0 m0 l0 mc0 s1 s2 t1 m1 l1 mc1 post,
exec s1 t0 m0 l0 mc0 (fun t1' m1' l1' mc1' => t1' = t1 /\ m1' = m1 /\ l1' = l1 /\ mc1 = mc1') ->
exec s2 t1 m1 l1 mc1 post ->
exec (SSeq s1 s2) t0 m0 l0 mc0 post.
Proof.
intros.
eapply seq; [eassumption|].
intros. simpl in *. simp.
assumption.
Qed.
Lemma seq_cps: forall s1 s2 t m (l: locals) mc post,
exec s1 t m l mc (fun t' m' l' mc' => exec s2 t' m' l' mc' post) ->
exec (SSeq s1 s2) t m l mc post.
Proof.
intros. eapply seq. 1: eassumption. simpl. clear. auto.
Qed.
Lemma call_cps: forall fname params rets binds args fbody argvs t (l: locals) m mc st post,
map.get e fname = Some (params, rets, fbody) ->
map.getmany_of_list l args = Some argvs ->
map.putmany_of_list_zip params argvs map.empty = Some st ->
exec fbody t m st (addMetricInstructions 100 (addMetricJumps 100 (addMetricLoads 100 (addMetricStores 100 mc))))
(fun t' m' st' mc' =>
exists retvs l',
map.getmany_of_list st' rets = Some retvs /\
map.putmany_of_list_zip binds retvs l = Some l' /\
post t' m' l' (addMetricInstructions 100 (addMetricJumps 100 (addMetricLoads 100 (addMetricStores 100 mc'))))) ->
exec (SCall binds fname args) t m l mc post.
Proof.
intros. eapply call; try eassumption.
cbv beta. intros *. exact id.
Qed.
Lemma loop_cps: forall body1 cond body2 t m l mc post,
exec body1 t m l mc (fun t m l mc => exists b,
eval_bcond l cond = Some b /\
(b = false -> post t m l (addMetricLoads 1 (addMetricInstructions 1 (addMetricJumps 1 mc)))) /\
(b = true -> exec body2 t m l mc (fun t m l mc =>
exec (SLoop body1 cond body2) t m l
(addMetricLoads 2 (addMetricInstructions 2 (addMetricJumps 1 mc))) post))) ->
exec (SLoop body1 cond body2) t m l mc post.
Proof.
intros. eapply loop. 1: eapply H. all: cbv beta; intros; simp.
- congruence.
- replace b with false in * by congruence. clear b. eauto.
- replace b with true in * by congruence. clear b. eauto.
- assumption.
Qed.
Lemma weaken: forall t l m mc s post1,
exec s t m l mc post1 ->
forall post2,
(forall t' m' l' mc', post1 t' m' l' mc' -> post2 t' m' l' mc') ->
exec s t m l mc post2.
Proof.
induction 1; intros; try solve [econstructor; eauto].
- eapply interact; try eassumption.
intros; simp.
edestruct H2; [eassumption|].
simp. eauto 10.
- eapply call.
4: eapply IHexec.
all: eauto.
intros. simp.
specialize H3 with (1 := H5).
simp. eauto 10.
- eapply stackalloc. 1: assumption.
intros.
eapply H1; eauto.
intros. simp. eauto 10.
Qed.
Lemma seq_assoc: forall s1 s2 s3 t m l mc post,
exec (SSeq s1 (SSeq s2 s3)) t m l mc post ->
exec (SSeq (SSeq s1 s2) s3) t m l mc post.
Proof.
intros. simp.
eapply seq_cps.
eapply seq_cps.
eapply weaken. 1: eassumption. intros.
specialize H8 with (1 := H). simp.
eapply weaken. 1: eassumption. intros.
eauto.
Qed.
Lemma seq_assoc_bw: forall s1 s2 s3 t m l mc post,
exec (SSeq (SSeq s1 s2) s3) t m l mc post ->
exec (SSeq s1 (SSeq s2 s3)) t m l mc post.
Proof. intros. simp. eauto 10 using seq. Qed.
Ltac equalities :=
repeat match goal with
| H1: ?e = ?e1, H2: ?e = ?e2 |- _ =>
progress (let H := fresh in assert (e1 = e2) as H by congruence; ensure_new H; simp)
| H1: ?P, H2: ?P |- _ => clear H2
end;
simp.
Lemma intersect: forall t l m mc s post1,
exec s t m l mc post1 ->
forall post2,
exec s t m l mc post2 ->
exec s t m l mc (fun t' m' l' mc' => post1 t' m' l' mc' /\ post2 t' m' l' mc').
Proof.
induction 1; intros;
match goal with
| H: exec _ _ _ _ _ _ |- _ => inversion H; subst; clear H
end;
equalities;
try solve [econstructor; eauto | exfalso; congruence].
- (* SInteract *)
pose proof ext_spec.unique_mGive_footprint as P.
specialize P with (1 := H1) (2 := H14).
destruct (map.split_diff P H H7). subst mKeep0 mGive0.
eapply @interact.
+ eassumption.
+ eassumption.
+ eapply ext_spec.intersect; [exact H1|exact H14].
+ simpl. intros. simp.
edestruct H2 as (? & ? & ?); [eassumption|].
edestruct H15 as (? & ? & ?); [eassumption|].
simp.
equalities.
eauto 10.
- (* SCall *)
rename IHexec into IH.
specialize IH with (1 := H16).
eapply @call; [..|exact IH|]; eauto.
rename H3 into Ex1.
rename H17 into Ex2.
move Ex1 before Ex2.
intros. simpl in *. simp.
edestruct Ex1; [eassumption|].
edestruct Ex2; [eassumption|].
simp.
equalities.
eauto 10.
- (* SStackalloc *)
eapply @stackalloc. 1: eassumption.
intros.
rename H0 into Ex1, H12 into Ex2.
eapply weaken. 1: eapply H1. 1,2: eassumption.
1: eapply Ex2. 1,2: eassumption.
cbv beta.
intros. simp.
lazymatch goal with
| A: map.split _ _ _, B: map.split _ _ _ |- _ =>
specialize @map.split_diff with (4 := A) (5 := B) as P
end.
edestruct P; try typeclasses eauto. 2: subst; eauto 10.
eapply anybytes_unique_domain; eassumption.
- (* SLoop *)
eapply @loop.
+ eapply IHexec. exact H10.
+ simpl. intros. simp. eauto.
+ simpl. intros. simp. eauto.
+ simpl. intros. simp. eapply H3; [eassumption..|]. (* also an IH *)
eapply H18; eassumption.
+ simpl. intros. simp. eapply H5; [eassumption..|]. (* also an IH *)
eapply H19; eassumption.
- (* SSeq *)
pose proof IHexec as IH1.
specialize IH1 with (1 := H5).
eapply @seq; [exact IH1|].
intros; simpl in *.
destruct H2.
eauto.
Qed.
End FlatImpExec.
End exec.
Notation exec := exec.exec.
Section FlatImp2.
Context (varname: Type).
Context {varname_eqb: varname -> varname -> bool}.
Context {width: Z} {BW: Bitwidth width} {word: word.word width}.
Context {mem: map.map word byte} {locals: map.map varname word}
{env: map.map String.string (list varname * list varname * stmt varname)}.
Context {ext_spec: ExtSpec}.
Context {varname_eq_spec: EqDecider varname_eqb}
{word_ok: word.ok word}
{mem_ok: map.ok mem}
{locals_ok: map.ok locals}
{env_ok: map.ok env}
{ext_spec_ok: ext_spec.ok ext_spec}.
Definition SimState: Type := trace * mem * locals * MetricLog.
Definition SimExec(e: env)(c: stmt varname): SimState -> (SimState -> Prop) -> Prop :=
fun '(t, m, l, mc) post =>
exec e c t m l mc (fun t' m' l' mc' => post (t', m', l', mc')).
Lemma modVarsSound: forall e s initialT (initialSt: locals) initialM (initialMc: MetricLog) post,
exec e s initialT initialM initialSt initialMc post ->
exec e s initialT initialM initialSt initialMc
(fun finalT finalM finalSt _ => map.only_differ initialSt (modVars s) finalSt).
Proof.
induction 1;
try solve [ econstructor; [eassumption..|simpl; map_solver locals_ok] ].
- eapply exec.interact; try eassumption.
intros; simp.
edestruct H2; try eassumption. simp.
eexists; split; [eassumption|].
simpl. try split; eauto.
intros.
eapply map.only_differ_putmany. eassumption.
- eapply exec.call. 4: exact H2. (* don't pick IHexec! *) all: try eassumption.
intros; simpl in *; simp.
edestruct H3; try eassumption. simp.
do 2 eexists; split; [|split]; try eassumption.
eapply map.only_differ_putmany. eassumption.
- eapply exec.stackalloc; try eassumption.
intros.
eapply exec.weaken.
+ eapply exec.intersect.
* eapply H0; eassumption.
* eapply H1; eassumption.
+ simpl. intros. simp.
do 2 eexists. split; [eassumption|]. split; [eassumption|]. map_solver locals_ok.
- eapply exec.if_true; try eassumption.
eapply exec.weaken; [eassumption|].
simpl; intros. map_solver locals_ok.
- eapply exec.if_false; try eassumption.
eapply exec.weaken; [eassumption|].
simpl; intros. map_solver locals_ok.
- eapply @exec.loop with
(mid1 := fun t' m' l' mc' => mid1 t' m' l' mc' /\
map.only_differ l (modVars body1) l')
(mid2 := fun t' m' l' mc' => mid2 t' m' l' mc' /\
map.only_differ l (modVars (SLoop body1 cond body2)) l').
+ eapply exec.intersect; eassumption.
+ intros. simp. eauto.
+ intros. simp. simpl. map_solver locals_ok.
+ intros. simp. simpl in *.
eapply exec.intersect; [eauto|].
eapply exec.weaken.
* eapply H3; eassumption.
* simpl. intros. map_solver locals_ok.
+ intros. simp. simpl in *.
eapply exec.weaken.
* eapply H5; eassumption.
* simpl. intros. map_solver locals_ok.
- eapply @exec.seq with
(mid := fun t' m' l' mc' => mid t' m' l' mc' /\ map.only_differ l (modVars s1) l').
+ eapply exec.intersect; eassumption.
+ simpl; intros. simp.
eapply exec.weaken; [eapply H1; eauto|].
simpl; intros.
map_solver locals_ok.
Qed.
End FlatImp2.
|
{"author": "mit-plv", "repo": "bedrock2", "sha": "7f2d764ed79f394fe715505a04301d0fb502407f", "save_path": "github-repos/coq/mit-plv-bedrock2", "path": "github-repos/coq/mit-plv-bedrock2/bedrock2-7f2d764ed79f394fe715505a04301d0fb502407f/compiler/src/compiler/FlatImp.v"}
|
//
// server.cpp
// ~~~~~~~~~~
//
// Copyright 2012 Red Hat, Inc.
// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#include <stdio.h>
#include <cstdlib>
#include <iostream>
#include <boost/lexical_cast.hpp>
#include <boost/asio.hpp>
#include <boost/thread.hpp>
#include <boost/thread/thread.hpp>
#include <boost/bind.hpp>
#include <boost/shared_ptr.hpp>
#include <vector>
#include <boost/filesystem/operations.hpp>
#include <ctime>
#include <iostream>
#include "../include/server.hpp"
extern std::string as_proxyd_uptime;
namespace http {
namespace server3 {
// Every time the timer fires we will generate a new frame and send it to all subscribers.
void server::handle_timer() {
if ( !isHandleStop ) {
asOP_->CheckAsServersStatus();
// Wait for next timeout.
timer_.expires_from_now(boost::posix_time::milliseconds(AS_HEALTH_TIMER));
timer_.async_wait(boost::bind(&server::handle_timer, this));
}
}
server::server(const std::string& address, unsigned int port, AS_CONTROL *pasOP, std::size_t thread_pool_size)
: asOP_(pasOP),
thread_pool_size_(thread_pool_size),
signals_(io_service_),
acceptor_(io_service_),
new_connection_(),
request_handler_(pasOP),
timer_(io_service_) {
// Register to handle the signals that indicate when the server should exit.
// It is safe to register for the same signal multiple times in a program,
// provided all registration for the specified signal is made through Asio.
signals_.add(SIGINT);
signals_.add(SIGTERM);
#if defined(SIGQUIT)
signals_.add(SIGQUIT);
#endif // defined(SIGQUIT)
signals_.async_wait(boost::bind(&server::handle_stop, this));
// Open the acceptor with the option to reuse the address (i.e. SO_REUSEADDR).
char portstr[10];
snprintf(portstr, sizeof(portstr), "%u", port);
boost::asio::ip::tcp::resolver resolver(io_service_);
boost::asio::ip::tcp::resolver::query query(address, portstr);
boost::asio::ip::tcp::endpoint endpoint = *resolver.resolve(query);
acceptor_.open(endpoint.protocol());
acceptor_.set_option(boost::asio::ip::tcp::acceptor::linger(false, 0));
acceptor_.set_option(boost::asio::ip::tcp::no_delay(true));
acceptor_.set_option(boost::asio::ip::tcp::acceptor::keep_alive(true));
acceptor_.set_option(boost::asio::ip::tcp::acceptor::reuse_address(true));
acceptor_.bind(endpoint);
acceptor_.listen();
std::time_t t;
std::time(&t);
const std::tm* timeinfo = std::localtime ( &t ) ;
char yyyymmdd[28] ;
std::strftime( yyyymmdd, sizeof(yyyymmdd), "%Y-%m-%d %H:%M:%S", timeinfo );
as_proxyd_uptime = std::string( yyyymmdd );
// Start the timer used to generate outgoing frames.
//timer_.expires_from_now(boost::posix_time::seconds(1));
timer_.expires_from_now(boost::posix_time::milliseconds(AS_HEALTH_TIMER));
timer_.async_wait(boost::bind(&server::handle_timer, this));
start_accept();
isHandleStop = false;
}
void server::run() {
// Create a pool of threads to run all of the io_services.
std::vector<boost::shared_ptr<boost::thread> > threads;
for (std::size_t i = 0; i < thread_pool_size_; ++i) {
boost::shared_ptr<boost::thread> thread(new boost::thread( boost::bind(&boost::asio::io_service::run, &io_service_)) );
threads.push_back(thread);
asOP_->mthreadList[thread->get_id()] = i;
}
asOP_->Initialize();
WriteLogFile("%-40s : %s", "Started..........", "OK");
fflush(stdout);
// Wait for all threads in the pool to exit.
for (std::size_t i = 0; i < threads.size(); ++i) {
threads[i]->join();
}
}
void server::start_accept() {
new_connection_.reset(new connection(io_service_, request_handler_));
acceptor_.async_accept(new_connection_->socket(), new_connection_->peer, boost::bind(&server::handle_accept, this, boost::asio::placeholders::error));
}
void server::handle_accept(const boost::system::error_code& e) {
if (!e) {
new_connection_->start();
}
start_accept();
}
void server::handle_stop() {
isHandleStop = true;
io_service_.stop();
}
} // namespace server3
} // namespace http
|
{"hexsha": "e9f6b3a095f984a76f1574f422e6cdeeba144394", "size": 4265, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/server.cpp", "max_stars_repo_name": "dancal/as_proxyd", "max_stars_repo_head_hexsha": "a8f4eeb41dbffb49071d55b966f0ecf705104d2a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2015-07-15T06:22:08.000Z", "max_stars_repo_stars_event_max_datetime": "2015-07-15T06:30:22.000Z", "max_issues_repo_path": "src/server.cpp", "max_issues_repo_name": "dancal/as_proxyd", "max_issues_repo_head_hexsha": "a8f4eeb41dbffb49071d55b966f0ecf705104d2a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/server.cpp", "max_forks_repo_name": "dancal/as_proxyd", "max_forks_repo_head_hexsha": "a8f4eeb41dbffb49071d55b966f0ecf705104d2a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4137931034, "max_line_length": 151, "alphanum_fraction": 0.7015240328, "num_tokens": 1125}
|
import numpy as np
from collections import OrderedDict
from sklearn.metrics import average_precision_score
def str2ind(categoryname,classlist):
return [i for i in range(len(classlist)) if categoryname==classlist[i].decode('utf-8')][0]
def strlist2indlist(strlist, classlist):
return [str2ind(s,classlist) for s in strlist]
def strlist2multihot(strlist, classlist):
return np.sum(np.eye(len(classlist))[strlist2indlist(strlist,classlist)], axis=0)
def idx2multihot(id_list,num_class):
return np.sum(np.eye(num_class)[id_list], axis=0)
def random_extract(feat, t_max):
r = np.random.randint(len(feat)-t_max)
res = feat[r:r+t_max]
return res, r, r+t_max
def pad(feat, min_len):
if np.shape(feat)[0] <= min_len:
return np.pad(feat, ((0,min_len-np.shape(feat)[0]), (0,0)), mode='constant', constant_values=0)
else:
return feat
def process_feat(feat, length):
if len(feat) > length:
res, st, ed = random_extract(feat, length)
return res, st, ed
else:
return pad(feat, length), 0, length
def compute_FAP_result(num_classes, score_metrics, target_metrics, ignore_class=[], verbose=False):
result = OrderedDict()
score_metrics = np.array(score_metrics)
target_metrics = np.array(target_metrics)
# Compute AP
result['AP'] = OrderedDict()
for cls in range(num_classes):
if cls not in ignore_class:
result['AP'][cls] = average_precision_score(
(target_metrics[:, cls]==1).astype(np.int),
score_metrics[:, cls])
# Compute mAP
result['mAP'] = np.mean(list(result['AP'].values()))
if verbose:
print('mAP: {:.5f}'.format(result['mAP']))
return result
def voc_ap(rec, prec, use_07_metric=True, rec_th=1.0):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., rec_th+rec_th/10.0, rec_th/10.0):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.0
return ap
def point_average_precision(GTs, videoLen, cls, scores, times, videoIds, dist_th, rec_th =1.0):
"""
inputs:
GTs is a dictionary (GTs[videoIds][cls] = [AS1, AS2, AS3])
Note GTs[videoIds][0] is for ambiguous class which is ignored
videoLen is a dictionary recording video length in seconds
class of interest
CAS for all the classes
times per-frame times in seconds
videoIds is the video id of the corresponding confidence and times
"""
npos = 0
R = dict()
for k, v in enumerate(GTs):
posct = 0
for ct in range(len(GTs[v][cls])):
if v == 'video_test_0001292': #ignore videos contain only ambiguous class
continue
if GTs[v][cls][ct] <= videoLen[v]:
posct += 1
npos += posct
R[v] = [0 for _ in range(len(GTs[v][cls]))]
confidence = scores[:,cls]
sorted_ind = np.argsort(-confidence)
times = times[sorted_ind]
videoIds = ['video_test_'+str(int(videoIds[x])).zfill(7) for x in sorted_ind]
nd = len(videoIds)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
ASs = np.array(GTs[videoIds[d]][cls]).astype(float)
time = times[d].astype(float)
dist_min = np.inf
if len(ASs) > 0:
# compute absolute distance
dists = np.abs(time - ASs)
dist_min = np.min(dists)
jmin = np.argmin(dists)
if dist_min <= dist_th:
if R[videoIds[d]][jmin] == 0:
tp[d] = 1.
R[videoIds[d]][jmin] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, True, rec_th)
return rec, prec, ap
def compute_PAP_result_thumos14(GTs, videoLen, Scores, times, videoIds, dist_th, classnum, ignore=[], rec_th=1.0):
result = OrderedDict()
result['pointAP'] = OrderedDict()
result['mAP'] = OrderedDict()
for i in range(classnum):
if not i in ignore:
rec,prec,result['pointAP'][i] = point_average_precision(GTs, videoLen, i, Scores, times, videoIds, dist_th, rec_th)
result['mAP'] = np.mean(list(result['pointAP'].values()))
return result
def getASfromCAS(frameScores, videoIds, fps):
'''
inputs: per-frame scores for all classes (N, class_num);
corresponding per-frame video Ids;
fps: frames per second
outputs: action start scores for all classes (N, class_num);
corresponding per-frame times in second at its videos;
length of each video;
'''
scores = np.zeros(frameScores.shape)
times = np.zeros(frameScores.shape[0])
videoLen = dict()
# get action starts from CAS
# 1) c_{t-1} neq c_t
# 2) pred action at t is non-background
# 3) if 1)&2) hold set action start prob = action prob at t
# 4) otherwise action prob = 0
for i in range(0, frameScores.shape[0]):
if i == 0:
cprev = 0
else:
cprev = np.argmax(frameScores[i-1, :])
ccurr = np.argmax(frameScores[i, :])
if cprev != ccurr and ccurr != 0:
scores[i, ccurr] = frameScores[i, ccurr]
previd = videoIds[0]
counter = 0
for i in range(0, times.shape[0]):
currid = videoIds[i]
if currid != previd:
counter = 0
previd = currid
videoLen['video_test_'+str(int(videoIds[i-1])).zfill(7)] = times[i-1]
times[i] = counter*1.0/fps
counter += 1
# add the last one
videoLen['video_test_'+str(int(videoIds[-1])).zfill(7)] = times[-1]
return scores, times, videoLen
|
{"hexsha": "9a858a6fac1314665e070adebc27dd6bec506e81", "size": 6322, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "salesforce/woad-pytorch", "max_stars_repo_head_hexsha": "405fa0c56271ae241f4789d9e1150330fddca3e5", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-06-21T17:22:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T19:32:25.000Z", "max_issues_repo_path": "utils.py", "max_issues_repo_name": "salesforce/woad-pytorch", "max_issues_repo_head_hexsha": "405fa0c56271ae241f4789d9e1150330fddca3e5", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils.py", "max_forks_repo_name": "salesforce/woad-pytorch", "max_forks_repo_head_hexsha": "405fa0c56271ae241f4789d9e1150330fddca3e5", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-06-23T10:46:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-24T02:57:19.000Z", "avg_line_length": 35.7175141243, "max_line_length": 128, "alphanum_fraction": 0.5847832964, "include": true, "reason": "import numpy", "num_tokens": 1713}
|
(*****************************************************************************
* Featherweight-OCL --- A Formal Semantics for UML-OCL Version OCL 2.5
* for the OMG Standard.
* http://www.brucker.ch/projects/hol-testgen/
*
* Design_OCL.thy --- OCL Contracts and an Example.
* This file is part of HOL-TestGen.
*
* Copyright (c) 2012-2015 Université Paris-Saclay, Univ. Paris-Sud, France
* 2013-2015 IRT SystemX, France
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************)
theory
Design_OCL
imports
Design_UML
begin
text \<open>\label{ex:employee-design:ocl}\<close>
section\<open>OCL Part: Invariant\<close>
text\<open>These recursive predicates can be defined conservatively
by greatest fix-point
constructions---automatically. See~\cite{brucker.ea:hol-ocl-book:2006,brucker:interactive:2007}
for details. For the purpose of this example, we state them as axioms
here.
\begin{ocl}
context Person
inv label : self .boss <> null implies (self .salary \<le> ((self .boss) .salary))
\end{ocl}
\<close>
definition Person_label\<^sub>i\<^sub>n\<^sub>v :: "Person \<Rightarrow> Boolean"
where "Person_label\<^sub>i\<^sub>n\<^sub>v (self) \<equiv>
(self .boss <> null implies (self .salary \<le>\<^sub>i\<^sub>n\<^sub>t ((self .boss) .salary)))"
definition Person_label\<^sub>i\<^sub>n\<^sub>v\<^sub>A\<^sub>T\<^sub>p\<^sub>r\<^sub>e :: "Person \<Rightarrow> Boolean"
where "Person_label\<^sub>i\<^sub>n\<^sub>v\<^sub>A\<^sub>T\<^sub>p\<^sub>r\<^sub>e (self) \<equiv>
(self .boss@pre <> null implies (self .salary@pre \<le>\<^sub>i\<^sub>n\<^sub>t ((self .boss@pre) .salary@pre)))"
definition Person_label\<^sub>g\<^sub>l\<^sub>o\<^sub>b\<^sub>a\<^sub>l\<^sub>i\<^sub>n\<^sub>v :: "Boolean"
where "Person_label\<^sub>g\<^sub>l\<^sub>o\<^sub>b\<^sub>a\<^sub>l\<^sub>i\<^sub>n\<^sub>v \<equiv> (Person .allInstances()->forAll\<^sub>S\<^sub>e\<^sub>t(x | Person_label\<^sub>i\<^sub>n\<^sub>v (x)) and
(Person .allInstances@pre()->forAll\<^sub>S\<^sub>e\<^sub>t(x | Person_label\<^sub>i\<^sub>n\<^sub>v\<^sub>A\<^sub>T\<^sub>p\<^sub>r\<^sub>e (x))))"
lemma "\<tau> \<Turnstile> \<delta> (X .boss) \<Longrightarrow> \<tau> \<Turnstile> Person .allInstances()->includes\<^sub>S\<^sub>e\<^sub>t(X .boss) \<and>
\<tau> \<Turnstile> Person .allInstances()->includes\<^sub>S\<^sub>e\<^sub>t(X) "
oops
(* To be generated generically ... hard, but crucial lemma that should hold.
It means that X and it successor are object representation that actually
occur in the state. *)
lemma REC_pre : "\<tau> \<Turnstile> Person_label\<^sub>g\<^sub>l\<^sub>o\<^sub>b\<^sub>a\<^sub>l\<^sub>i\<^sub>n\<^sub>v
\<Longrightarrow> \<tau> \<Turnstile> Person .allInstances()->includes\<^sub>S\<^sub>e\<^sub>t(X) \<comment> \<open>\<open>X\<close> represented object in state\<close>
\<Longrightarrow> \<exists> REC. \<tau> \<Turnstile> REC(X) \<triangleq> (Person_label\<^sub>i\<^sub>n\<^sub>v (X) and (X .boss <> null implies REC(X .boss)))"
oops (* Attempt to allegiate the burden of he following axiomatizations: could be
a witness for a constant specification ...*)
text\<open>This allows to state a predicate:\<close>
axiomatization inv\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n\<^sub>_\<^sub>l\<^sub>a\<^sub>b\<^sub>e\<^sub>l :: "Person \<Rightarrow> Boolean"
where inv\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n\<^sub>_\<^sub>l\<^sub>a\<^sub>b\<^sub>e\<^sub>l_def:
"(\<tau> \<Turnstile> Person .allInstances()->includes\<^sub>S\<^sub>e\<^sub>t(self)) \<Longrightarrow>
(\<tau> \<Turnstile> (inv\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n\<^sub>_\<^sub>l\<^sub>a\<^sub>b\<^sub>e\<^sub>l(self) \<triangleq> (self .boss <> null implies
(self .salary \<le>\<^sub>i\<^sub>n\<^sub>t ((self .boss) .salary)) and
inv\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n\<^sub>_\<^sub>l\<^sub>a\<^sub>b\<^sub>e\<^sub>l(self .boss))))"
axiomatization inv\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n\<^sub>_\<^sub>l\<^sub>a\<^sub>b\<^sub>e\<^sub>l\<^sub>A\<^sub>T\<^sub>p\<^sub>r\<^sub>e :: "Person \<Rightarrow> Boolean"
where inv\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n\<^sub>_\<^sub>l\<^sub>a\<^sub>b\<^sub>e\<^sub>l\<^sub>A\<^sub>T\<^sub>p\<^sub>r\<^sub>e_def:
"(\<tau> \<Turnstile> Person .allInstances@pre()->includes\<^sub>S\<^sub>e\<^sub>t(self)) \<Longrightarrow>
(\<tau> \<Turnstile> (inv\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n\<^sub>_\<^sub>l\<^sub>a\<^sub>b\<^sub>e\<^sub>l\<^sub>A\<^sub>T\<^sub>p\<^sub>r\<^sub>e(self) \<triangleq> (self .boss@pre <> null implies
(self .salary@pre \<le>\<^sub>i\<^sub>n\<^sub>t ((self .boss@pre) .salary@pre)) and
inv\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n\<^sub>_\<^sub>l\<^sub>a\<^sub>b\<^sub>e\<^sub>l\<^sub>A\<^sub>T\<^sub>p\<^sub>r\<^sub>e(self .boss@pre))))"
lemma inv_1 :
"(\<tau> \<Turnstile> Person .allInstances()->includes\<^sub>S\<^sub>e\<^sub>t(self)) \<Longrightarrow>
(\<tau> \<Turnstile> inv\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n\<^sub>_\<^sub>l\<^sub>a\<^sub>b\<^sub>e\<^sub>l(self) = ((\<tau> \<Turnstile> (self .boss \<doteq> null)) \<or>
( \<tau> \<Turnstile> (self .boss <> null) \<and>
\<tau> \<Turnstile> ((self .salary) \<le>\<^sub>i\<^sub>n\<^sub>t (self .boss .salary)) \<and>
\<tau> \<Turnstile> (inv\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n\<^sub>_\<^sub>l\<^sub>a\<^sub>b\<^sub>e\<^sub>l(self .boss))))) "
oops (* Let's hope that this holds ... *)
text\<open>A very first attempt to characterize the axiomatization by an inductive
definition - this can not be the last word since too weak (should be equality!)\<close>
coinductive inv :: "Person \<Rightarrow> (\<AA>)st \<Rightarrow> bool" where
"(\<tau> \<Turnstile> (\<delta> self)) \<Longrightarrow> ((\<tau> \<Turnstile> (self .boss \<doteq> null)) \<or>
(\<tau> \<Turnstile> (self .boss <> null) \<and> (\<tau> \<Turnstile> (self .boss .salary \<le>\<^sub>i\<^sub>n\<^sub>t self .salary)) \<and>
( (inv(self .boss))\<tau> )))
\<Longrightarrow> ( inv self \<tau>)"
section\<open>OCL Part: The Contract of a Recursive Query\<close>
text\<open>This part is analogous to the Analysis Model and skipped here.\<close>
end
|
{"author": "data61", "repo": "PSL", "sha": "2a71eac0db39ad490fe4921a5ce1e4344dc43b12", "save_path": "github-repos/isabelle/data61-PSL", "path": "github-repos/isabelle/data61-PSL/PSL-2a71eac0db39ad490fe4921a5ce1e4344dc43b12/SeLFiE/Example/afp-2020-05-16/thys/Featherweight_OCL/examples/Employee_Model/Design/Design_OCL.thy"}
|
# 10.1.3 一変数ガウス分布の変分推論
#%%
# 10.1.3項で利用するライブラリ
import numpy as np
from scipy.stats import norm, gamma # 1次元ガウス分布, ガンマ分布
import matplotlib.pyplot as plt
#%%
## 真の分布(1次元ガウス分布)の設定
# 真の平均パラメータを指定
mu_truth = 5.0
# 真の精度パラメータを指定
tau_truth = 0.5
print(np.sqrt(1.0 / tau_truth)) # 標準偏差
# 作図用のxの値を作成
x_line = np.linspace(
mu_truth - 4.0 * np.sqrt(1.0 / tau_truth),
mu_truth + 4.0 * np.sqrt(1.0 / tau_truth),
num=1000
)
# 真の分布を計算
model_dens = norm.pdf(x=x_line, loc=mu_truth, scale=np.sqrt(1.0 / tau_truth))
#%%
# 真の分布を作図
plt.figure(figsize=(12, 9))
plt.plot(x_line, model_dens, label='true model') # 真の分布
plt.xlabel('x')
plt.ylabel('density')
plt.suptitle('Gaussian Distribution', fontsize=20)
plt.title('$\mu=' + str(mu_truth) + ', \\tau=' + str(tau_truth) + '$', loc='left')
plt.legend() # 凡例
plt.grid() # グリッド線
plt.show()
#%%
## 観測データの生成
# (観測)データ数を指定
N = 50
# ガウス分布に従うデータを生成
x_n = np.random.normal(loc=mu_truth, scale=np.sqrt(1 / tau_truth), size=N)
#%%
# 観測データのヒストグラムを作図
plt.figure(figsize=(12, 9))
#plt.hist(x=x_n, bins=50, label='data') # 観測データ:(度数)
plt.hist(x=x_n, density=True, bins=50, label='data') # 観測データ:(相対度数)
plt.plot(x_line, model_dens, color='red', linestyle='--', label='true model') # 真の分布
plt.xlabel('x')
#plt.ylabel('count') # (度数用)
plt.ylabel('density') # (相対度数用)
plt.suptitle('Gaussian Distribution', fontsize=20)
plt.title('$N=' + str(N) + ', \mu=' + str(mu_truth) + ', \\tau=' + str(tau_truth) + '$', loc='left')
plt.legend() # 凡例
plt.grid() # グリッド線
plt.show()
#%%
## 事前分布の設定
# muの事前分布のパラメータを指定
mu_0 = 0.0
lambda_0 = 0.1
# tauの事前分布のパラメータを指定
a_0 = 1.0
b_0 = 1.0
# 作図用のmuの値を作成
mu_line = np.linspace(
mu_truth - 4.0 * np.sqrt(1.0 / tau_truth),
mu_truth + 4.0 * np.sqrt(1.0 / tau_truth),
num=500
)
# 作図用のtauの値を作成
tau_line = np.linspace(0.0, 4 * tau_truth, num=500)
# 格子状の点を作成
mu_grid, tau_grid = np.meshgrid(mu_line, tau_line)
# 配列の形状を保存
point_dims = mu_grid.shape
print(point_dims)
# muの事前分布を計算
mu_prior_dens = norm.pdf(
x=mu_grid.flatten(), loc=mu_0, scale=np.sqrt(1.0 / (lambda_0 * tau_grid.flatten() + 1e-7))
)
# tauの事前分布を計算
tau_prior_dens = gamma.pdf(x=tau_grid.flatten(), a=a_0, scale=1.0 / b_0)
# 同時事前分布を計算
prior_dens = mu_prior_dens * tau_prior_dens
#%%
# 事前分布を作図
plt.figure(figsize=(12, 9))
plt.scatter(x=mu_truth, y=tau_truth, color='red', s=100, marker='x', label='true val') # 真の値
plt.contour(mu_grid, tau_grid, prior_dens.reshape(point_dims)) # 事前分布
plt.xlabel('$\mu$')
plt.ylabel('$\\tau$')
plt.suptitle('Gaussian-Gamma Distribution', fontsize=20)
plt.title('$\mu_0=' + str(mu_0)+ ', \lambda_0=' + str(lambda_0) +
', a_0=' + str(a_0) + ', b_0=' + str(b_0) + '$',
loc='left')
plt.colorbar() # 等高線の値
plt.legend() # 凡例
plt.grid() # グリッド線
plt.show()
#%%
## 真の事後分布の計算
# muの真の事後分布のパラメータを計算
lambda_hat = lambda_0 + N
mu_hat = (lambda_0 * mu_0 + sum(x_n)) / lambda_hat
# lambdaの真の事後分布のパラメータを計算
a_hat = a_0 + 0.5 * N
b_hat = b_0 + 0.5 * (sum(x_n**2) + lambda_0 * mu_0**2 - lambda_hat * mu_hat**2)
# muの真の事後分布を計算
mu_true_posterior_dens = norm.pdf(
x=mu_grid.flatten(), loc=mu_hat, scale=np.sqrt(1.0 / (lambda_hat * tau_grid.flatten() + 1e-7))
)
# tauの真の事後分布を計算
tau_true_posterior_dens = gamma.pdf(x=tau_grid.flatten(), a=a_hat, scale=1.0 / b_hat)
# 真の同時事後分布を計算
posterior_truth_dens = mu_true_posterior_dens * tau_true_posterior_dens
#%%
# 真の事後分布を作図
plt.figure(figsize=(12, 9))
plt.scatter(x=mu_truth, y=tau_truth, color='red', s=100, marker='x', label='true val') # 真の値
plt.contour(mu_grid, tau_grid, posterior_truth_dens.reshape(point_dims)) # 真の事後分布
plt.xlabel('$\mu$')
plt.ylabel('$\\tau$')
plt.suptitle('Gaussian-Gamma Distribution', fontsize=20)
plt.title('$N=' + str(N) +
', \hat{\mu}=' + str(np.round(mu_hat, 1))+ ', \hat{\lambda}=' + str(np.round(lambda_hat, 5)) +
', \hat{a}=' + str(a_hat) + ', \hat{b}=' + str(np.round(b_hat, 1)) + '$',
loc='left')
plt.xlim(mu_truth - np.sqrt(1.0 / tau_truth), mu_truth + np.sqrt(1.0 / tau_truth))
plt.ylim(0.0, 2.0 * tau_truth)
plt.colorbar() # 等高線の値
plt.legend() # 凡例
plt.grid() # グリッド線
plt.show()
#%%
# muの真の事後分布を作図
mu_true_posterior_dens = norm.pdf(
x=mu_line, loc=mu_hat, scale=np.sqrt(1.0 / (lambda_hat * a_hat / b_hat))
)
# muの真の事後分布を作図
plt.figure(figsize=(12, 9))
plt.plot(mu_line, mu_true_posterior_dens, label='$\mu$ posterior') # muの事後分布
plt.vlines(x=mu_truth, ymin=0.0, ymax=np.nanmax(mu_true_posterior_dens),
color='red', linestyle='--', label='true val') # muの真の値
plt.xlabel('$\mu$')
plt.ylabel('density')
plt.suptitle('Gaussian Distribution', fontsize=20)
plt.title('$N=' + str(N) +
', \hat{\mu}=' + str(np.round(mu_hat, 1)) +
', \hat{\\lambda}=' + str(lambda_hat) +
', E[\\tau]=' + str(np.round(a_hat / b_hat, 5)) + '$',
loc='left')
plt.legend() # 凡例
plt.grid() # グリッド線
plt.show()
#%%
# tauの真の事後分布を計算
tau_true_posterior_dens = gamma.pdf(x=tau_line, a=a_hat, scale=1.0 / b_hat)
# lambdaの真の事後分布を作図
plt.figure(figsize=(12, 9))
plt.plot(tau_line, tau_true_posterior_dens, label='$\\tau$ posterior') # tauの事後分布
plt.vlines(x=tau_truth, ymin=0.0, ymax=np.nanmax(tau_true_posterior_dens),
color='red', linestyle='--', label='true val') # tauの真の値
plt.xlabel('$\lambda$')
plt.ylabel('density')
plt.suptitle('Gamma Distribution', fontsize=20)
plt.title('$N=' + str(N) +
', \hat{a}=' + str(a_hat) + ', \hat{b}=' + str(np.round(b_hat, 1)) + '$',
loc='left')
plt.legend() # 凡例
plt.grid() # グリッド線
plt.show()
#%%
## 推論処理
# 試行回数を指定
MaxIter = 5
# 初期値を代入
mu_N = mu_0
lambda_N = lambda_0 * a_0 / a_0
a_N = a_0
b_N = b_0
# 推移の確認用の受け皿を作成
trace_mu_i = [mu_0]
trace_lambda_i = [lambda_N]
trace_a_i = [a_N]
trace_b_i = [b_N]
# 変分推論
for i in range(MaxIter):
# mu の近似事後分布のパラメータを計算: 式 (10.26)(10.27)
mu_N = (lambda_0 * mu_0 + np.sum(x_n)) / (lambda_0 + N)
lambda_N = (lambda_0 + N) * a_N / b_N
# i回目のmuの近似事後分布の更新後の結果を記録
trace_mu_i.append(mu_N)
trace_lambda_i.append(lambda_N)
trace_a_i.append(a_N)
trace_b_i.append(b_N)
# tauの近似事後分布のパラメータを計算:式(10.29)(10.30)
a_N = a_0 + 0.5 * (N + 1)
b_N = b_0 + 0.5 * (lambda_0 * mu_0**2 + np.sum(x_n**2))
b_N += 0.5 * (lambda_0 + N) * (mu_N**2 + 1.0 / lambda_N)
b_N -= (lambda_0 * mu_0 + np.sum(x_n)) * mu_N
# i回目のtauの近似事後分布の更新後の結果を記録
trace_mu_i.append(mu_N)
trace_lambda_i.append(lambda_N)
trace_a_i.append(a_N)
trace_b_i.append(b_N)
# 動作確認
print(str(i + 1) + ' (' + str(np.round((i + 1) / MaxIter * 100, 1)) + ')%')
#%%
## 推論結果の確認
# muの近似事後分布を計算
E_tau = a_N / b_N # tauの期待値
mu_posterior_dens = norm.pdf(
x=mu_grid.flatten(),
loc=mu_N,
scale=np.sqrt(1.0 / (lambda_N / E_tau * tau_grid.flatten() + 1e-7))
)
# tauの近似事後分布を計算
tau_posterior_dens = gamma.pdf(x=tau_grid.flatten(), a=a_N, scale=1.0 / b_N)
# 同時近似事後分布を計算
posterior_dens = mu_posterior_dens * tau_posterior_dens
#%%
# 近似事後分布を作図
plt.figure(figsize=(12, 9))
plt.scatter(x=mu_truth, y=tau_truth, color='red', s=100, marker='x', label='true val') # 真の値
plt.contour(mu_grid, tau_grid, posterior_truth_dens.reshape(point_dims),
alpha=0.5, linestyles='--') # 真の事後分布
plt.contour(mu_grid, tau_grid, posterior_dens.reshape(point_dims)) # 近似事後分布
plt.xlabel('$\mu$')
plt.ylabel('$\\tau$')
plt.suptitle('Gaussian-Gamma Distribution', fontsize=20)
plt.title('$N=' + str(N) +
', \mu_N=' + str(np.round(mu_N, 1))+
', \lambda_N\ /\ E[\\tau]=' + str(np.round(lambda_N / E_tau, 1)) +
', a_N=' + str(a_N) +
', b_N=' + str(np.round(b_N, 1)) + '$',
loc='left')
plt.xlim(mu_truth - np.sqrt(1.0 / tau_truth), mu_truth + np.sqrt(1.0 / tau_truth))
plt.ylim(0.0, 2.0 * tau_truth)
plt.colorbar() # 等高線の値
plt.legend() # 凡例
plt.grid() # グリッド線
plt.show()
#%%
# muの近似事後分布を作図
mu_posterior_dens = norm.pdf(
x=mu_line, loc=mu_N, scale=np.sqrt(1.0 / lambda_N)
)
# muの近似事後分布を作図
plt.figure(figsize=(12, 9))
plt.plot(mu_line, mu_posterior_dens, label='$\mu$ posterior') # muの近似事後分布
plt.vlines(x=mu_truth, ymin=0.0, ymax=np.nanmax(mu_posterior_dens),
color='red', linestyle='--', label='true val') # muの真の値
plt.xlabel('$\mu$')
plt.ylabel('density')
plt.suptitle('Gaussian Distribution', fontsize=20)
plt.title('$N=' + str(N) +
', \mu_N=' + str(np.round(mu_N, 1)) +
', \\lambda_N=' + str(np.round(lambda_N, 5)) + '$',
loc='left')
plt.legend() # 凡例
plt.grid() # グリッド線
plt.show()
#%%
# tauの近似事後分布を計算
tau_posterior_dens = gamma.pdf(x=tau_line, a=a_N, scale=1.0 / b_N)
# lambdaの近似事後分布を作図
plt.figure(figsize=(12, 9))
plt.plot(tau_line, tau_posterior_dens, label='$\\tau$ posterior') # tauの事後分布
plt.vlines(x=tau_truth, ymin=0.0, ymax=np.nanmax(tau_posterior_dens),
color='red', linestyle='--', label='true val') # tauの真の値
plt.xlabel('$\lambda$')
plt.ylabel('density')
plt.suptitle('Gamma Distribution', fontsize=20)
plt.title('$N=' + str(N) +
', a_N=' + str(a_hat) + ', b_N=' + str(np.round(b_N, 1)) + '$',
loc='left')
plt.legend() # 凡例
plt.grid() # グリッド線
plt.show()
#%%
## 超パラメータの推移の確認
# mu_Nの推移を作図
plt.figure(figsize=(12, 9))
plt.plot(np.arange(0.0, MaxIter + 0.1, 0.5), trace_mu_i)
plt.xlabel('iteration')
plt.ylabel('value')
plt.suptitle('Variational Inference', fontsize=20)
plt.title('$\mu_N$', loc='left')
plt.grid() # グリッド線
plt.show()
#%%
# lambda_Nの推移を作図
plt.figure(figsize=(12, 9))
plt.plot(np.arange(0.0, MaxIter + 0.1, 0.5), trace_lambda_i)
plt.xlabel('iteration')
plt.ylabel('value')
plt.suptitle('Variational Inference', fontsize=20)
plt.title('$\lambda_N$', loc='left')
plt.grid() # グリッド線
plt.show()
#%%
# a_Nの推移を作図
plt.figure(figsize=(12, 9))
plt.plot(np.arange(0.0, MaxIter + 0.1, 0.5), trace_a_i)
plt.xlabel('iteration')
plt.ylabel('value')
plt.suptitle('Variational Inference', fontsize=20)
plt.title('$a_N$', loc='left')
plt.grid() # グリッド線
plt.show()
#%%
# b_Nの推移を作図
plt.figure(figsize=(12, 9))
plt.plot(np.arange(0.0, MaxIter + 0.1, 0.5), trace_b_i)
plt.xlabel('iteration')
plt.ylabel('value')
plt.suptitle('Variational Inference', fontsize=20)
plt.title('$b_N$', loc='left')
plt.grid() # グリッド線
plt.show()
#%%
## アニメーションによる推移の確認
# 追加ライブラリ
import matplotlib.animation as animation
#%%
## 近似事後分布の推移をgif画像化
# 画像サイズを指定
fig = plt.figure(figsize=(12, 9))
# 作図処理を関数として定義
def update_posterior(i):
# i回目の同時近似事後分布を計算
E_tau = trace_a_i[i] / trace_b_i[i] # tauの期待値
posterior_dens = norm.pdf(
x=mu_grid.flatten(),
loc=trace_mu_i[i],
scale=np.sqrt(1.0 / (trace_lambda_i[i] / E_tau * tau_grid.flatten() + 1e-7))
)
posterior_dens *= gamma.pdf(
x=tau_grid.flatten(), a=trace_a_i[i], scale=1.0 / trace_b_i[i]
)
# 前フレームのグラフを初期化
plt.cla()
# 近似事後分布を作図
plt.scatter(x=mu_truth, y=tau_truth, color='red', s=100, marker='x', label='true val') # 真の値
plt.contour(mu_grid, tau_grid, posterior_truth_dens.reshape(point_dims),
alpha=0.5, linestyles='--') # 真の事後分布
plt.contour(mu_grid, tau_grid, posterior_dens.reshape(point_dims)) # 近似事後分布
plt.xlabel('$\mu$')
plt.ylabel('$\\tau$')
plt.suptitle('Gaussian-Gamma Distribution', fontsize=20)
plt.title('$iter:' + str(i * 0.5) + ', N=' + str(N) +
', \mu_N=' + str(np.round(trace_mu_i[i], 1))+
', \lambda_N=' + str(np.round(trace_lambda_i[i], 5)) +
', a_N=' + str(trace_a_i[i]) +
', b_N=' + str(np.round(trace_b_i[i], 1)) + '$',
loc='left')
plt.xlim(mu_truth - np.sqrt(1.0 / tau_truth), mu_truth + np.sqrt(1.0 / tau_truth))
plt.ylim(0.0, 2.0 * tau_truth)
plt.legend() # 凡例
plt.grid() # グリッド線
# gif画像を作成
posterior_anime = animation.FuncAnimation(fig, update_posterior, frames=MaxIter * 2 + 1, interval=200)
posterior_anime.save("ch10_1_3_Posterior.gif")
#%%
print('end')
|
{"hexsha": "501c8510523a51bc7e4ac96ef1f47bc45f2777a4", "size": 11859, "ext": "py", "lang": "Python", "max_stars_repo_path": "Code_Python/ch10_1_3.py", "max_stars_repo_name": "anemptyarchive/PRML", "max_stars_repo_head_hexsha": "58cbb35ae65d66b6faf436c70a6cbc9d54d4589f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Code_Python/ch10_1_3.py", "max_issues_repo_name": "anemptyarchive/PRML", "max_issues_repo_head_hexsha": "58cbb35ae65d66b6faf436c70a6cbc9d54d4589f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Code_Python/ch10_1_3.py", "max_forks_repo_name": "anemptyarchive/PRML", "max_forks_repo_head_hexsha": "58cbb35ae65d66b6faf436c70a6cbc9d54d4589f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.8930131004, "max_line_length": 105, "alphanum_fraction": 0.6343705203, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4922}
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import string
import re
from os import walk
globaldict = dict()
def print_dict(d):
for key in list(d.keys()):
print(key, ":", d[key])
def read_data(textfile):
d = dict()
text = open(textfile).read().split()
for word in text:
word = word.lower()
# make sure that word has only letters and numbers and _
word = re.sub(r'\W+', '', word)
# check if word contains only numbers
word_wo_numbers = re.sub(r'[0-9]+', '', word)
if word_wo_numbers == "":
continue
# Check if the word is already in dictionary
if word in d:
d[word] += 1
else:
d[word] = 1
# for key in list(d.keys()):
# print(key, ":", d[key])
return d
special_letters = ['a', 'e', 'i', 'o', 'u', 'y', 's']
def update_global_dict(d):
# Checks if word is in global dictionary, if not, add.
# Updates global dictionary values.
for word in list(d.keys()):
val = d[word]
if len(word) > 3:
# some similar words detection
wending = word[len(word) - 1]
if wending == "s":
if word[len(word) - 2] not in special_letters:
word = word[0:len(word) - 1]
if word in globaldict:
gval = globaldict[word]
gval[0] += val
gval[1] += 1
else:
globaldict[word] = [val, 1]
def prepare_data(path, limit=0):
f = []
for (dirpath, dirnames, filenames) in walk(path):
l = 0
if limit == 0:
l = len(filenames)
else:
l = min(limit, len(filenames))
f.extend(filenames[:l])
break
# update global dictionary with data from each file
for text in f:
d = read_data(path+'/'+text)
update_global_dict(d)
# prepare_data("./sport", 100)
# print_dict(globaldict)
# read_data()
|
{"hexsha": "bbd78fc5032f0369fa1fbb527bc627d38b5b3c60", "size": 2046, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/hi_clustering/prepare_data.py", "max_stars_repo_name": "piotrkoziar/AGH-python-labs", "max_stars_repo_head_hexsha": "a474c5f6ebe17b625d449ccbb0e6e9ef8a3755cb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/hi_clustering/prepare_data.py", "max_issues_repo_name": "piotrkoziar/AGH-python-labs", "max_issues_repo_head_hexsha": "a474c5f6ebe17b625d449ccbb0e6e9ef8a3755cb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-03-24T17:54:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:08:32.000Z", "max_forks_repo_path": "src/hi_clustering/prepare_data.py", "max_forks_repo_name": "piotrkoziar/AGH-python-labs", "max_forks_repo_head_hexsha": "a474c5f6ebe17b625d449ccbb0e6e9ef8a3755cb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.3571428571, "max_line_length": 64, "alphanum_fraction": 0.518572825, "include": true, "reason": "import numpy", "num_tokens": 526}
|
[STATEMENT]
lemma chain_subdiv_path_singleton:
shows "chain_subdiv_path \<gamma> {(1,\<gamma>)}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. chain_subdiv_path \<gamma> {(1, \<gamma>)}
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. chain_subdiv_path \<gamma> {(1, \<gamma>)}
[PROOF STEP]
have "rec_join [(1,\<gamma>)] = \<gamma>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. rec_join [(1, \<gamma>)] = \<gamma>
[PROOF STEP]
by (simp add: joinpaths_def)
[PROOF STATE]
proof (state)
this:
rec_join [(1, \<gamma>)] = \<gamma>
goal (1 subgoal):
1. chain_subdiv_path \<gamma> {(1, \<gamma>)}
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
rec_join [(1, \<gamma>)] = \<gamma>
[PROOF STEP]
have "set [(1,\<gamma>)] = {(1, \<gamma>)}" "distinct [(1,\<gamma>)]" "rec_join [(1,\<gamma>)] = \<gamma>" "valid_chain_list [(1,\<gamma>)]"
[PROOF STATE]
proof (prove)
using this:
rec_join [(1, \<gamma>)] = \<gamma>
goal (1 subgoal):
1. (set [(1::'a, \<gamma>)] = {(1::'a, \<gamma>)} &&& distinct [(1::'b, \<gamma>)]) &&& rec_join [(1, \<gamma>)] = \<gamma> &&& valid_chain_list [(1, \<gamma>)]
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
set [(1::?'a, \<gamma>)] = {(1::?'a, \<gamma>)}
distinct [(1::?'b, \<gamma>)]
rec_join [(1, \<gamma>)] = \<gamma>
valid_chain_list [(1, \<gamma>)]
goal (1 subgoal):
1. chain_subdiv_path \<gamma> {(1, \<gamma>)}
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
set [(1::?'a, \<gamma>)] = {(1::?'a, \<gamma>)}
distinct [(1::?'b, \<gamma>)]
rec_join [(1, \<gamma>)] = \<gamma>
valid_chain_list [(1, \<gamma>)]
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
set [(1::?'a, \<gamma>)] = {(1::?'a, \<gamma>)}
distinct [(1::?'b, \<gamma>)]
rec_join [(1, \<gamma>)] = \<gamma>
valid_chain_list [(1, \<gamma>)]
goal (1 subgoal):
1. chain_subdiv_path \<gamma> {(1, \<gamma>)}
[PROOF STEP]
by (metis (no_types) chain_subdiv_path.intros)
[PROOF STATE]
proof (state)
this:
chain_subdiv_path \<gamma> {(1, \<gamma>)}
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 910, "file": "Green_Paths", "length": 10}
|
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
inst✝ : SemilatticeSup M
w : σ → M
p : MvPolynomial σ R
⊢ weightedTotalDegree' w p = ⊥ ↔ p = 0
[PROOFSTEP]
simp only [weightedTotalDegree', Finset.sup_eq_bot_iff, mem_support_iff, WithBot.coe_ne_bot, MvPolynomial.eq_zero_iff]
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
inst✝ : SemilatticeSup M
w : σ → M
p : MvPolynomial σ R
⊢ (∀ (s : σ →₀ ℕ), coeff s p ≠ 0 → False) ↔ ∀ (d : σ →₀ ℕ), coeff d p = 0
[PROOFSTEP]
exact forall_congr' fun _ => Classical.not_not
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
inst✝ : SemilatticeSup M
w : σ → M
⊢ weightedTotalDegree' w 0 = ⊥
[PROOFSTEP]
simp only [weightedTotalDegree', support_zero, Finset.sup_empty]
[GOAL]
R : Type u_1
M : Type u_2
inst✝³ : CommSemiring R
σ : Type u_3
inst✝² : AddCommMonoid M
inst✝¹ : SemilatticeSup M
inst✝ : OrderBot M
w : σ → M
p : MvPolynomial σ R
hp : p ≠ 0
⊢ weightedTotalDegree' w p = ↑(weightedTotalDegree w p)
[PROOFSTEP]
rw [Ne.def, ← weightedTotalDegree'_eq_bot_iff w p, ← Ne.def, WithBot.ne_bot_iff_exists] at hp
[GOAL]
R : Type u_1
M : Type u_2
inst✝³ : CommSemiring R
σ : Type u_3
inst✝² : AddCommMonoid M
inst✝¹ : SemilatticeSup M
inst✝ : OrderBot M
w : σ → M
p : MvPolynomial σ R
hp : ∃ a, ↑a = weightedTotalDegree' w p
⊢ weightedTotalDegree' w p = ↑(weightedTotalDegree w p)
[PROOFSTEP]
obtain ⟨m, hm⟩ := hp
[GOAL]
case intro
R : Type u_1
M : Type u_2
inst✝³ : CommSemiring R
σ : Type u_3
inst✝² : AddCommMonoid M
inst✝¹ : SemilatticeSup M
inst✝ : OrderBot M
w : σ → M
p : MvPolynomial σ R
m : M
hm : ↑m = weightedTotalDegree' w p
⊢ weightedTotalDegree' w p = ↑(weightedTotalDegree w p)
[PROOFSTEP]
apply le_antisymm
[GOAL]
case intro.a
R : Type u_1
M : Type u_2
inst✝³ : CommSemiring R
σ : Type u_3
inst✝² : AddCommMonoid M
inst✝¹ : SemilatticeSup M
inst✝ : OrderBot M
w : σ → M
p : MvPolynomial σ R
m : M
hm : ↑m = weightedTotalDegree' w p
⊢ weightedTotalDegree' w p ≤ ↑(weightedTotalDegree w p)
[PROOFSTEP]
simp only [weightedTotalDegree, weightedTotalDegree', Finset.sup_le_iff, WithBot.coe_le_coe]
[GOAL]
case intro.a
R : Type u_1
M : Type u_2
inst✝³ : CommSemiring R
σ : Type u_3
inst✝² : AddCommMonoid M
inst✝¹ : SemilatticeSup M
inst✝ : OrderBot M
w : σ → M
p : MvPolynomial σ R
m : M
hm : ↑m = weightedTotalDegree' w p
⊢ ∀ (b : σ →₀ ℕ), b ∈ support p → ↑(weightedDegree' w) b ≤ sup (support p) fun s => ↑(weightedDegree' w) s
[PROOFSTEP]
intro b
[GOAL]
case intro.a
R : Type u_1
M : Type u_2
inst✝³ : CommSemiring R
σ : Type u_3
inst✝² : AddCommMonoid M
inst✝¹ : SemilatticeSup M
inst✝ : OrderBot M
w : σ → M
p : MvPolynomial σ R
m : M
hm : ↑m = weightedTotalDegree' w p
b : σ →₀ ℕ
⊢ b ∈ support p → ↑(weightedDegree' w) b ≤ sup (support p) fun s => ↑(weightedDegree' w) s
[PROOFSTEP]
exact Finset.le_sup
[GOAL]
case intro.a
R : Type u_1
M : Type u_2
inst✝³ : CommSemiring R
σ : Type u_3
inst✝² : AddCommMonoid M
inst✝¹ : SemilatticeSup M
inst✝ : OrderBot M
w : σ → M
p : MvPolynomial σ R
m : M
hm : ↑m = weightedTotalDegree' w p
⊢ ↑(weightedTotalDegree w p) ≤ weightedTotalDegree' w p
[PROOFSTEP]
simp only [weightedTotalDegree]
[GOAL]
case intro.a
R : Type u_1
M : Type u_2
inst✝³ : CommSemiring R
σ : Type u_3
inst✝² : AddCommMonoid M
inst✝¹ : SemilatticeSup M
inst✝ : OrderBot M
w : σ → M
p : MvPolynomial σ R
m : M
hm : ↑m = weightedTotalDegree' w p
⊢ ↑(sup (support p) fun s => ↑(weightedDegree' w) s) ≤ weightedTotalDegree' w p
[PROOFSTEP]
have hm' : weightedTotalDegree' w p ≤ m := le_of_eq hm.symm
[GOAL]
case intro.a
R : Type u_1
M : Type u_2
inst✝³ : CommSemiring R
σ : Type u_3
inst✝² : AddCommMonoid M
inst✝¹ : SemilatticeSup M
inst✝ : OrderBot M
w : σ → M
p : MvPolynomial σ R
m : M
hm : ↑m = weightedTotalDegree' w p
hm' : weightedTotalDegree' w p ≤ ↑m
⊢ ↑(sup (support p) fun s => ↑(weightedDegree' w) s) ≤ weightedTotalDegree' w p
[PROOFSTEP]
rw [← hm]
[GOAL]
case intro.a
R : Type u_1
M : Type u_2
inst✝³ : CommSemiring R
σ : Type u_3
inst✝² : AddCommMonoid M
inst✝¹ : SemilatticeSup M
inst✝ : OrderBot M
w : σ → M
p : MvPolynomial σ R
m : M
hm : ↑m = weightedTotalDegree' w p
hm' : weightedTotalDegree' w p ≤ ↑m
⊢ ↑(sup (support p) fun s => ↑(weightedDegree' w) s) ≤ ↑m
[PROOFSTEP]
simpa [weightedTotalDegree'] using hm'
[GOAL]
R : Type u_1
M : Type u_2
inst✝³ : CommSemiring R
σ : Type u_3
inst✝² : AddCommMonoid M
inst✝¹ : SemilatticeSup M
inst✝ : OrderBot M
w : σ → M
⊢ weightedTotalDegree w 0 = ⊥
[PROOFSTEP]
simp only [weightedTotalDegree, support_zero, Finset.sup_empty]
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
m : M
a b : MvPolynomial σ R
ha : a ∈ {x | IsWeightedHomogeneous w x m}
hb : b ∈ {x | IsWeightedHomogeneous w x m}
c : σ →₀ ℕ
hc : coeff c (a + b) ≠ 0
⊢ ↑(weightedDegree' w) c = m
[PROOFSTEP]
rw [coeff_add] at hc
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
m : M
a b : MvPolynomial σ R
ha : a ∈ {x | IsWeightedHomogeneous w x m}
hb : b ∈ {x | IsWeightedHomogeneous w x m}
c : σ →₀ ℕ
hc : coeff c a + coeff c b ≠ 0
⊢ ↑(weightedDegree' w) c = m
[PROOFSTEP]
obtain h | h : coeff c a ≠ 0 ∨ coeff c b ≠ 0 := by
contrapose! hc
simp only [hc, add_zero]
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
m : M
a b : MvPolynomial σ R
ha : a ∈ {x | IsWeightedHomogeneous w x m}
hb : b ∈ {x | IsWeightedHomogeneous w x m}
c : σ →₀ ℕ
hc : coeff c a + coeff c b ≠ 0
⊢ coeff c a ≠ 0 ∨ coeff c b ≠ 0
[PROOFSTEP]
contrapose! hc
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
m : M
a b : MvPolynomial σ R
ha : a ∈ {x | IsWeightedHomogeneous w x m}
hb : b ∈ {x | IsWeightedHomogeneous w x m}
c : σ →₀ ℕ
hc : coeff c a = 0 ∧ coeff c b = 0
⊢ coeff c a + coeff c b = 0
[PROOFSTEP]
simp only [hc, add_zero]
[GOAL]
case inl
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
m : M
a b : MvPolynomial σ R
ha : a ∈ {x | IsWeightedHomogeneous w x m}
hb : b ∈ {x | IsWeightedHomogeneous w x m}
c : σ →₀ ℕ
hc : coeff c a + coeff c b ≠ 0
h : coeff c a ≠ 0
⊢ ↑(weightedDegree' w) c = m
[PROOFSTEP]
exact ha h
[GOAL]
case inr
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
m : M
a b : MvPolynomial σ R
ha : a ∈ {x | IsWeightedHomogeneous w x m}
hb : b ∈ {x | IsWeightedHomogeneous w x m}
c : σ →₀ ℕ
hc : coeff c a + coeff c b ≠ 0
h : coeff c b ≠ 0
⊢ ↑(weightedDegree' w) c = m
[PROOFSTEP]
exact hb h
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
m : M
r : R
a : MvPolynomial σ R
ha :
a ∈
{
toAddSubsemigroup :=
{ carrier := {x | IsWeightedHomogeneous w x m},
add_mem' :=
(_ :
∀ {a b : MvPolynomial σ R},
a ∈ {x | IsWeightedHomogeneous w x m} →
b ∈ {x | IsWeightedHomogeneous w x m} →
∀ (c : σ →₀ ℕ), coeff c (a + b) ≠ 0 → ↑(weightedDegree' w) c = m) },
zero_mem' := (_ : ∀ (d : σ →₀ ℕ), coeff d 0 ≠ 0 → ↑(weightedDegree' w) d = m) }.toAddSubsemigroup.carrier
c : σ →₀ ℕ
hc : coeff c (r • a) ≠ 0
⊢ ↑(weightedDegree' w) c = m
[PROOFSTEP]
rw [coeff_smul] at hc
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
m : M
r : R
a : MvPolynomial σ R
ha :
a ∈
{
toAddSubsemigroup :=
{ carrier := {x | IsWeightedHomogeneous w x m},
add_mem' :=
(_ :
∀ {a b : MvPolynomial σ R},
a ∈ {x | IsWeightedHomogeneous w x m} →
b ∈ {x | IsWeightedHomogeneous w x m} →
∀ (c : σ →₀ ℕ), coeff c (a + b) ≠ 0 → ↑(weightedDegree' w) c = m) },
zero_mem' := (_ : ∀ (d : σ →₀ ℕ), coeff d 0 ≠ 0 → ↑(weightedDegree' w) d = m) }.toAddSubsemigroup.carrier
c : σ →₀ ℕ
hc : r • coeff c a ≠ 0
⊢ ↑(weightedDegree' w) c = m
[PROOFSTEP]
exact ha (right_ne_zero_of_mul hc)
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
m : M
⊢ weightedHomogeneousSubmodule R w m = supported R R {d | ↑(weightedDegree' w) d = m}
[PROOFSTEP]
ext x
[GOAL]
case h
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
m : M
x : MvPolynomial σ R
⊢ x ∈ weightedHomogeneousSubmodule R w m ↔ x ∈ supported R R {d | ↑(weightedDegree' w) d = m}
[PROOFSTEP]
rw [mem_supported, Set.subset_def]
[GOAL]
case h
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
m : M
x : MvPolynomial σ R
⊢ x ∈ weightedHomogeneousSubmodule R w m ↔ ∀ (x_1 : σ →₀ ℕ), x_1 ∈ ↑x.support → x_1 ∈ {d | ↑(weightedDegree' w) d = m}
[PROOFSTEP]
simp only [Finsupp.mem_support_iff, mem_coe]
[GOAL]
case h
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
m : M
x : MvPolynomial σ R
⊢ x ∈ weightedHomogeneousSubmodule R w m ↔ ∀ (x_1 : σ →₀ ℕ), ↑x x_1 ≠ 0 → x_1 ∈ {d | ↑(weightedDegree' w) d = m}
[PROOFSTEP]
rfl
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
m n : M
⊢ weightedHomogeneousSubmodule R w m * weightedHomogeneousSubmodule R w n ≤ weightedHomogeneousSubmodule R w (m + n)
[PROOFSTEP]
classical
rw [Submodule.mul_le]
intro φ hφ ψ hψ c hc
rw [coeff_mul] at hc
obtain ⟨⟨d, e⟩, hde, H⟩ := Finset.exists_ne_zero_of_sum_ne_zero hc
have aux : coeff d φ ≠ 0 ∧ coeff e ψ ≠ 0 := by
contrapose! H
by_cases h : coeff d φ = 0 <;> simp_all only [Ne.def, not_false_iff, zero_mul, mul_zero]
rw [← Finsupp.mem_antidiagonal.mp hde, ← hφ aux.1, ← hψ aux.2, map_add]
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
m n : M
⊢ weightedHomogeneousSubmodule R w m * weightedHomogeneousSubmodule R w n ≤ weightedHomogeneousSubmodule R w (m + n)
[PROOFSTEP]
rw [Submodule.mul_le]
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
m n : M
⊢ ∀ (m_1 : MvPolynomial σ R),
m_1 ∈ weightedHomogeneousSubmodule R w m →
∀ (n_1 : MvPolynomial σ R),
n_1 ∈ weightedHomogeneousSubmodule R w n → m_1 * n_1 ∈ weightedHomogeneousSubmodule R w (m + n)
[PROOFSTEP]
intro φ hφ ψ hψ c hc
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
m n : M
φ : MvPolynomial σ R
hφ : φ ∈ weightedHomogeneousSubmodule R w m
ψ : MvPolynomial σ R
hψ : ψ ∈ weightedHomogeneousSubmodule R w n
c : σ →₀ ℕ
hc : coeff c (φ * ψ) ≠ 0
⊢ ↑(weightedDegree' w) c = m + n
[PROOFSTEP]
rw [coeff_mul] at hc
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
m n : M
φ : MvPolynomial σ R
hφ : φ ∈ weightedHomogeneousSubmodule R w m
ψ : MvPolynomial σ R
hψ : ψ ∈ weightedHomogeneousSubmodule R w n
c : σ →₀ ℕ
hc✝ : coeff c (φ * ψ) ≠ 0
hc : ∑ x in antidiagonal c, coeff x.fst φ * coeff x.snd ψ ≠ 0
⊢ ↑(weightedDegree' w) c = m + n
[PROOFSTEP]
obtain ⟨⟨d, e⟩, hde, H⟩ := Finset.exists_ne_zero_of_sum_ne_zero hc
[GOAL]
case intro.mk.intro
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
m n : M
φ : MvPolynomial σ R
hφ : φ ∈ weightedHomogeneousSubmodule R w m
ψ : MvPolynomial σ R
hψ : ψ ∈ weightedHomogeneousSubmodule R w n
c : σ →₀ ℕ
hc✝ : coeff c (φ * ψ) ≠ 0
hc : ∑ x in antidiagonal c, coeff x.fst φ * coeff x.snd ψ ≠ 0
d e : σ →₀ ℕ
hde : (d, e) ∈ antidiagonal c
H : coeff (d, e).fst φ * coeff (d, e).snd ψ ≠ 0
⊢ ↑(weightedDegree' w) c = m + n
[PROOFSTEP]
have aux : coeff d φ ≠ 0 ∧ coeff e ψ ≠ 0 := by
contrapose! H
by_cases h : coeff d φ = 0 <;> simp_all only [Ne.def, not_false_iff, zero_mul, mul_zero]
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
m n : M
φ : MvPolynomial σ R
hφ : φ ∈ weightedHomogeneousSubmodule R w m
ψ : MvPolynomial σ R
hψ : ψ ∈ weightedHomogeneousSubmodule R w n
c : σ →₀ ℕ
hc✝ : coeff c (φ * ψ) ≠ 0
hc : ∑ x in antidiagonal c, coeff x.fst φ * coeff x.snd ψ ≠ 0
d e : σ →₀ ℕ
hde : (d, e) ∈ antidiagonal c
H : coeff (d, e).fst φ * coeff (d, e).snd ψ ≠ 0
⊢ coeff d φ ≠ 0 ∧ coeff e ψ ≠ 0
[PROOFSTEP]
contrapose! H
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
m n : M
φ : MvPolynomial σ R
hφ : φ ∈ weightedHomogeneousSubmodule R w m
ψ : MvPolynomial σ R
hψ : ψ ∈ weightedHomogeneousSubmodule R w n
c : σ →₀ ℕ
hc✝ : coeff c (φ * ψ) ≠ 0
hc : ∑ x in antidiagonal c, coeff x.fst φ * coeff x.snd ψ ≠ 0
d e : σ →₀ ℕ
hde : (d, e) ∈ antidiagonal c
H : coeff d φ ≠ 0 → coeff e ψ = 0
⊢ coeff d φ * coeff e ψ = 0
[PROOFSTEP]
by_cases h : coeff d φ = 0
[GOAL]
case pos
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
m n : M
φ : MvPolynomial σ R
hφ : φ ∈ weightedHomogeneousSubmodule R w m
ψ : MvPolynomial σ R
hψ : ψ ∈ weightedHomogeneousSubmodule R w n
c : σ →₀ ℕ
hc✝ : coeff c (φ * ψ) ≠ 0
hc : ∑ x in antidiagonal c, coeff x.fst φ * coeff x.snd ψ ≠ 0
d e : σ →₀ ℕ
hde : (d, e) ∈ antidiagonal c
H : coeff d φ ≠ 0 → coeff e ψ = 0
h : coeff d φ = 0
⊢ coeff d φ * coeff e ψ = 0
[PROOFSTEP]
simp_all only [Ne.def, not_false_iff, zero_mul, mul_zero]
[GOAL]
case neg
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
m n : M
φ : MvPolynomial σ R
hφ : φ ∈ weightedHomogeneousSubmodule R w m
ψ : MvPolynomial σ R
hψ : ψ ∈ weightedHomogeneousSubmodule R w n
c : σ →₀ ℕ
hc✝ : coeff c (φ * ψ) ≠ 0
hc : ∑ x in antidiagonal c, coeff x.fst φ * coeff x.snd ψ ≠ 0
d e : σ →₀ ℕ
hde : (d, e) ∈ antidiagonal c
H : coeff d φ ≠ 0 → coeff e ψ = 0
h : ¬coeff d φ = 0
⊢ coeff d φ * coeff e ψ = 0
[PROOFSTEP]
simp_all only [Ne.def, not_false_iff, zero_mul, mul_zero]
[GOAL]
case intro.mk.intro
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
m n : M
φ : MvPolynomial σ R
hφ : φ ∈ weightedHomogeneousSubmodule R w m
ψ : MvPolynomial σ R
hψ : ψ ∈ weightedHomogeneousSubmodule R w n
c : σ →₀ ℕ
hc✝ : coeff c (φ * ψ) ≠ 0
hc : ∑ x in antidiagonal c, coeff x.fst φ * coeff x.snd ψ ≠ 0
d e : σ →₀ ℕ
hde : (d, e) ∈ antidiagonal c
H : coeff (d, e).fst φ * coeff (d, e).snd ψ ≠ 0
aux : coeff d φ ≠ 0 ∧ coeff e ψ ≠ 0
⊢ ↑(weightedDegree' w) c = m + n
[PROOFSTEP]
rw [← Finsupp.mem_antidiagonal.mp hde, ← hφ aux.1, ← hψ aux.2, map_add]
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
d : σ →₀ ℕ
r : R
m : M
hm : ↑(weightedDegree' w) d = m
⊢ IsWeightedHomogeneous w (↑(monomial d) r) m
[PROOFSTEP]
classical
intro c hc
rw [coeff_monomial] at hc
split_ifs at hc with h
· subst c
exact hm
· contradiction
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
d : σ →₀ ℕ
r : R
m : M
hm : ↑(weightedDegree' w) d = m
⊢ IsWeightedHomogeneous w (↑(monomial d) r) m
[PROOFSTEP]
intro c hc
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
d : σ →₀ ℕ
r : R
m : M
hm : ↑(weightedDegree' w) d = m
c : σ →₀ ℕ
hc : coeff c (↑(monomial d) r) ≠ 0
⊢ ↑(weightedDegree' w) c = m
[PROOFSTEP]
rw [coeff_monomial] at hc
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
d : σ →₀ ℕ
r : R
m : M
hm : ↑(weightedDegree' w) d = m
c : σ →₀ ℕ
hc✝ : coeff c (↑(monomial d) r) ≠ 0
hc : (if d = c then r else 0) ≠ 0
⊢ ↑(weightedDegree' w) c = m
[PROOFSTEP]
split_ifs at hc with h
[GOAL]
case pos
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
d : σ →₀ ℕ
r : R
m : M
hm : ↑(weightedDegree' w) d = m
c : σ →₀ ℕ
hc✝ : coeff c (↑(monomial d) r) ≠ 0
h : d = c
hc : r ≠ 0
⊢ ↑(weightedDegree' w) c = m
[PROOFSTEP]
subst c
[GOAL]
case pos
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
d : σ →₀ ℕ
r : R
m : M
hm : ↑(weightedDegree' w) d = m
hc✝ : r ≠ 0
hc : coeff d (↑(monomial d) r) ≠ 0
⊢ ↑(weightedDegree' w) d = m
[PROOFSTEP]
exact hm
[GOAL]
case neg
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
d : σ →₀ ℕ
r : R
m : M
hm : ↑(weightedDegree' w) d = m
c : σ →₀ ℕ
hc✝ : coeff c (↑(monomial d) r) ≠ 0
h : ¬d = c
hc : 0 ≠ 0
⊢ ↑(weightedDegree' w) c = m
[PROOFSTEP]
contradiction
[GOAL]
R : Type u_1
M : Type u_2
inst✝³ : CommSemiring R
σ : Type u_3
inst✝² : AddCommMonoid M
inst✝¹ : SemilatticeSup M
inst✝ : OrderBot M
w : σ → M
p : MvPolynomial σ R
hp : weightedTotalDegree w p = ⊥
⊢ IsWeightedHomogeneous w p ⊥
[PROOFSTEP]
intro d hd
[GOAL]
R : Type u_1
M : Type u_2
inst✝³ : CommSemiring R
σ : Type u_3
inst✝² : AddCommMonoid M
inst✝¹ : SemilatticeSup M
inst✝ : OrderBot M
w : σ → M
p : MvPolynomial σ R
hp : weightedTotalDegree w p = ⊥
d : σ →₀ ℕ
hd : coeff d p ≠ 0
⊢ ↑(weightedDegree' w) d = ⊥
[PROOFSTEP]
have h := weightedTotalDegree_coe w p (MvPolynomial.ne_zero_iff.mpr ⟨d, hd⟩)
[GOAL]
R : Type u_1
M : Type u_2
inst✝³ : CommSemiring R
σ : Type u_3
inst✝² : AddCommMonoid M
inst✝¹ : SemilatticeSup M
inst✝ : OrderBot M
w : σ → M
p : MvPolynomial σ R
hp : weightedTotalDegree w p = ⊥
d : σ →₀ ℕ
hd : coeff d p ≠ 0
h : weightedTotalDegree' w p = ↑(weightedTotalDegree w p)
⊢ ↑(weightedDegree' w) d = ⊥
[PROOFSTEP]
simp only [weightedTotalDegree', hp] at h
[GOAL]
R : Type u_1
M : Type u_2
inst✝³ : CommSemiring R
σ : Type u_3
inst✝² : AddCommMonoid M
inst✝¹ : SemilatticeSup M
inst✝ : OrderBot M
w : σ → M
p : MvPolynomial σ R
hp : weightedTotalDegree w p = ⊥
d : σ →₀ ℕ
hd : coeff d p ≠ 0
h : (sup (support p) fun s => ↑(↑(weightedDegree' w) s)) = ↑⊥
⊢ ↑(weightedDegree' w) d = ⊥
[PROOFSTEP]
rw [eq_bot_iff, ← WithBot.coe_le_coe, ← h]
[GOAL]
R : Type u_1
M : Type u_2
inst✝³ : CommSemiring R
σ : Type u_3
inst✝² : AddCommMonoid M
inst✝¹ : SemilatticeSup M
inst✝ : OrderBot M
w : σ → M
p : MvPolynomial σ R
hp : weightedTotalDegree w p = ⊥
d : σ →₀ ℕ
hd : coeff d p ≠ 0
h : (sup (support p) fun s => ↑(↑(weightedDegree' w) s)) = ↑⊥
⊢ ↑(↑(weightedDegree' w) d) ≤ sup (support p) fun s => ↑(↑(weightedDegree' w) s)
[PROOFSTEP]
apply Finset.le_sup (mem_support_iff.mpr hd)
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
i : σ
⊢ IsWeightedHomogeneous w (X i) (w i)
[PROOFSTEP]
apply isWeightedHomogeneous_monomial
[GOAL]
case hm
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
i : σ
⊢ ↑(weightedDegree' w) (Finsupp.single i 1) = w i
[PROOFSTEP]
simp only [weightedDegree', LinearMap.toAddMonoidHom_coe, total_single, one_nsmul]
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
φ ψ : MvPolynomial σ R
m n : M
w : σ → M
hφ : IsWeightedHomogeneous w φ n
d : σ →₀ ℕ
hd : ↑(weightedDegree' w) d ≠ n
⊢ coeff d φ = 0
[PROOFSTEP]
have aux := mt (@hφ d) hd
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
φ ψ : MvPolynomial σ R
m n : M
w : σ → M
hφ : IsWeightedHomogeneous w φ n
d : σ →₀ ℕ
hd : ↑(weightedDegree' w) d ≠ n
aux : ¬coeff d φ ≠ 0
⊢ coeff d φ = 0
[PROOFSTEP]
rwa [Classical.not_not] at aux
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
φ ψ : MvPolynomial σ R
m n : M
w : σ → M
hφ : φ ≠ 0
hm : IsWeightedHomogeneous w φ m
hn : IsWeightedHomogeneous w φ n
⊢ m = n
[PROOFSTEP]
obtain ⟨d, hd⟩ : ∃ d, coeff d φ ≠ 0 := exists_coeff_ne_zero hφ
[GOAL]
case intro
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
φ ψ : MvPolynomial σ R
m n : M
w : σ → M
hφ : φ ≠ 0
hm : IsWeightedHomogeneous w φ m
hn : IsWeightedHomogeneous w φ n
d : σ →₀ ℕ
hd : coeff d φ ≠ 0
⊢ m = n
[PROOFSTEP]
rw [← hm hd, ← hn hd]
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
φ✝ ψ : MvPolynomial σ R
m n✝ : M
ι : Type u_4
s : Finset ι
φ : ι → MvPolynomial σ R
n : ι → M
w : σ → M
⊢ (∀ (i : ι), i ∈ s → IsWeightedHomogeneous w (φ i) (n i)) → IsWeightedHomogeneous w (∏ i in s, φ i) (∑ i in s, n i)
[PROOFSTEP]
classical
refine Finset.induction_on s ?_ ?_
· intro
simp only [isWeightedHomogeneous_one, Finset.sum_empty, Finset.prod_empty]
· intro i s his IH h
simp only [his, Finset.prod_insert, Finset.sum_insert, not_false_iff]
apply (h i (Finset.mem_insert_self _ _)).mul (IH _)
intro j hjs
exact h j (Finset.mem_insert_of_mem hjs)
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
φ✝ ψ : MvPolynomial σ R
m n✝ : M
ι : Type u_4
s : Finset ι
φ : ι → MvPolynomial σ R
n : ι → M
w : σ → M
⊢ (∀ (i : ι), i ∈ s → IsWeightedHomogeneous w (φ i) (n i)) → IsWeightedHomogeneous w (∏ i in s, φ i) (∑ i in s, n i)
[PROOFSTEP]
refine Finset.induction_on s ?_ ?_
[GOAL]
case refine_1
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
φ✝ ψ : MvPolynomial σ R
m n✝ : M
ι : Type u_4
s : Finset ι
φ : ι → MvPolynomial σ R
n : ι → M
w : σ → M
⊢ (∀ (i : ι), i ∈ ∅ → IsWeightedHomogeneous w (φ i) (n i)) → IsWeightedHomogeneous w (∏ i in ∅, φ i) (∑ i in ∅, n i)
[PROOFSTEP]
intro
[GOAL]
case refine_1
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
φ✝ ψ : MvPolynomial σ R
m n✝ : M
ι : Type u_4
s : Finset ι
φ : ι → MvPolynomial σ R
n : ι → M
w : σ → M
a✝ : ∀ (i : ι), i ∈ ∅ → IsWeightedHomogeneous w (φ i) (n i)
⊢ IsWeightedHomogeneous w (∏ i in ∅, φ i) (∑ i in ∅, n i)
[PROOFSTEP]
simp only [isWeightedHomogeneous_one, Finset.sum_empty, Finset.prod_empty]
[GOAL]
case refine_2
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
φ✝ ψ : MvPolynomial σ R
m n✝ : M
ι : Type u_4
s : Finset ι
φ : ι → MvPolynomial σ R
n : ι → M
w : σ → M
⊢ ∀ ⦃a : ι⦄ {s : Finset ι},
¬a ∈ s →
((∀ (i : ι), i ∈ s → IsWeightedHomogeneous w (φ i) (n i)) →
IsWeightedHomogeneous w (∏ i in s, φ i) (∑ i in s, n i)) →
(∀ (i : ι), i ∈ insert a s → IsWeightedHomogeneous w (φ i) (n i)) →
IsWeightedHomogeneous w (∏ i in insert a s, φ i) (∑ i in insert a s, n i)
[PROOFSTEP]
intro i s his IH h
[GOAL]
case refine_2
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
φ✝ ψ : MvPolynomial σ R
m n✝ : M
ι : Type u_4
s✝ : Finset ι
φ : ι → MvPolynomial σ R
n : ι → M
w : σ → M
i : ι
s : Finset ι
his : ¬i ∈ s
IH : (∀ (i : ι), i ∈ s → IsWeightedHomogeneous w (φ i) (n i)) → IsWeightedHomogeneous w (∏ i in s, φ i) (∑ i in s, n i)
h : ∀ (i_1 : ι), i_1 ∈ insert i s → IsWeightedHomogeneous w (φ i_1) (n i_1)
⊢ IsWeightedHomogeneous w (∏ i in insert i s, φ i) (∑ i in insert i s, n i)
[PROOFSTEP]
simp only [his, Finset.prod_insert, Finset.sum_insert, not_false_iff]
[GOAL]
case refine_2
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
φ✝ ψ : MvPolynomial σ R
m n✝ : M
ι : Type u_4
s✝ : Finset ι
φ : ι → MvPolynomial σ R
n : ι → M
w : σ → M
i : ι
s : Finset ι
his : ¬i ∈ s
IH : (∀ (i : ι), i ∈ s → IsWeightedHomogeneous w (φ i) (n i)) → IsWeightedHomogeneous w (∏ i in s, φ i) (∑ i in s, n i)
h : ∀ (i_1 : ι), i_1 ∈ insert i s → IsWeightedHomogeneous w (φ i_1) (n i_1)
⊢ IsWeightedHomogeneous w (φ i * ∏ i in s, φ i) (n i + ∑ i in s, n i)
[PROOFSTEP]
apply (h i (Finset.mem_insert_self _ _)).mul (IH _)
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
φ✝ ψ : MvPolynomial σ R
m n✝ : M
ι : Type u_4
s✝ : Finset ι
φ : ι → MvPolynomial σ R
n : ι → M
w : σ → M
i : ι
s : Finset ι
his : ¬i ∈ s
IH : (∀ (i : ι), i ∈ s → IsWeightedHomogeneous w (φ i) (n i)) → IsWeightedHomogeneous w (∏ i in s, φ i) (∑ i in s, n i)
h : ∀ (i_1 : ι), i_1 ∈ insert i s → IsWeightedHomogeneous w (φ i_1) (n i_1)
⊢ ∀ (i : ι), i ∈ s → IsWeightedHomogeneous w (φ i) (n i)
[PROOFSTEP]
intro j hjs
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
φ✝ ψ : MvPolynomial σ R
m n✝ : M
ι : Type u_4
s✝ : Finset ι
φ : ι → MvPolynomial σ R
n : ι → M
w : σ → M
i : ι
s : Finset ι
his : ¬i ∈ s
IH : (∀ (i : ι), i ∈ s → IsWeightedHomogeneous w (φ i) (n i)) → IsWeightedHomogeneous w (∏ i in s, φ i) (∑ i in s, n i)
h : ∀ (i_1 : ι), i_1 ∈ insert i s → IsWeightedHomogeneous w (φ i_1) (n i_1)
j : ι
hjs : j ∈ s
⊢ IsWeightedHomogeneous w (φ j) (n j)
[PROOFSTEP]
exact h j (Finset.mem_insert_of_mem hjs)
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
φ ψ : MvPolynomial σ R
m n : M
inst✝ : SemilatticeSup M
w : σ → M
hφ : IsWeightedHomogeneous w φ n
h : φ ≠ 0
⊢ weightedTotalDegree' w φ = ↑n
[PROOFSTEP]
simp only [weightedTotalDegree']
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
φ ψ : MvPolynomial σ R
m n : M
inst✝ : SemilatticeSup M
w : σ → M
hφ : IsWeightedHomogeneous w φ n
h : φ ≠ 0
⊢ (sup (support φ) fun s => ↑(↑(weightedDegree' w) s)) = ↑n
[PROOFSTEP]
apply le_antisymm
[GOAL]
case a
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
φ ψ : MvPolynomial σ R
m n : M
inst✝ : SemilatticeSup M
w : σ → M
hφ : IsWeightedHomogeneous w φ n
h : φ ≠ 0
⊢ (sup (support φ) fun s => ↑(↑(weightedDegree' w) s)) ≤ ↑n
[PROOFSTEP]
simp only [Finset.sup_le_iff, mem_support_iff, WithBot.coe_le_coe]
[GOAL]
case a
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
φ ψ : MvPolynomial σ R
m n : M
inst✝ : SemilatticeSup M
w : σ → M
hφ : IsWeightedHomogeneous w φ n
h : φ ≠ 0
⊢ ∀ (b : σ →₀ ℕ), coeff b φ ≠ 0 → ↑(weightedDegree' w) b ≤ n
[PROOFSTEP]
exact fun d hd => le_of_eq (hφ hd)
[GOAL]
case a
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
φ ψ : MvPolynomial σ R
m n : M
inst✝ : SemilatticeSup M
w : σ → M
hφ : IsWeightedHomogeneous w φ n
h : φ ≠ 0
⊢ ↑n ≤ sup (support φ) fun s => ↑(↑(weightedDegree' w) s)
[PROOFSTEP]
obtain ⟨d, hd⟩ : ∃ d, coeff d φ ≠ 0 := exists_coeff_ne_zero h
[GOAL]
case a.intro
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
φ ψ : MvPolynomial σ R
m n : M
inst✝ : SemilatticeSup M
w : σ → M
hφ : IsWeightedHomogeneous w φ n
h : φ ≠ 0
d : σ →₀ ℕ
hd : coeff d φ ≠ 0
⊢ ↑n ≤ sup (support φ) fun s => ↑(↑(weightedDegree' w) s)
[PROOFSTEP]
simp only [← hφ hd, Finsupp.sum]
[GOAL]
case a.intro
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
φ ψ : MvPolynomial σ R
m n : M
inst✝ : SemilatticeSup M
w : σ → M
hφ : IsWeightedHomogeneous w φ n
h : φ ≠ 0
d : σ →₀ ℕ
hd : coeff d φ ≠ 0
⊢ ↑(↑(weightedDegree' w) d) ≤ sup (support φ) fun s => ↑(↑(weightedDegree' w) s)
[PROOFSTEP]
replace hd := Finsupp.mem_support_iff.mpr hd
[GOAL]
case a.intro
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
φ ψ : MvPolynomial σ R
m n : M
inst✝ : SemilatticeSup M
w : σ → M
hφ : IsWeightedHomogeneous w φ n
h : φ ≠ 0
d : σ →₀ ℕ
hd : d ∈ φ.support
⊢ ↑(↑(weightedDegree' w) d) ≤ sup (support φ) fun s => ↑(↑(weightedDegree' w) s)
[PROOFSTEP]
apply Finset.le_sup hd
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
⊢ IsWeightedHomogeneous w (↑(weightedHomogeneousComponent w n) φ) n
[PROOFSTEP]
classical
intro d hd
contrapose! hd
rw [coeff_weightedHomogeneousComponent, if_neg hd]
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
⊢ IsWeightedHomogeneous w (↑(weightedHomogeneousComponent w n) φ) n
[PROOFSTEP]
intro d hd
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
d : σ →₀ ℕ
hd : coeff d (↑(weightedHomogeneousComponent w n) φ) ≠ 0
⊢ ↑(weightedDegree' w) d = n
[PROOFSTEP]
contrapose! hd
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
d : σ →₀ ℕ
hd : ↑(weightedDegree' w) d ≠ n
⊢ coeff d (↑(weightedHomogeneousComponent w n) φ) = 0
[PROOFSTEP]
rw [coeff_weightedHomogeneousComponent, if_neg hd]
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n✝ : M
φ ψ : MvPolynomial σ R
n : M
r : R
⊢ ↑(weightedHomogeneousComponent w n) (↑C r * φ) = ↑C r * ↑(weightedHomogeneousComponent w n) φ
[PROOFSTEP]
simp only [C_mul', LinearMap.map_smul]
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
h : ∀ (d : σ →₀ ℕ), d ∈ support φ → ↑(weightedDegree' w) d ≠ n
⊢ ↑(weightedHomogeneousComponent w n) φ = 0
[PROOFSTEP]
classical
rw [weightedHomogeneousComponent_apply, sum_eq_zero]
intro d hd; rw [mem_filter] at hd
exfalso; exact h _ hd.1 hd.2
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
h : ∀ (d : σ →₀ ℕ), d ∈ support φ → ↑(weightedDegree' w) d ≠ n
⊢ ↑(weightedHomogeneousComponent w n) φ = 0
[PROOFSTEP]
rw [weightedHomogeneousComponent_apply, sum_eq_zero]
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
h : ∀ (d : σ →₀ ℕ), d ∈ support φ → ↑(weightedDegree' w) d ≠ n
⊢ ∀ (x : σ →₀ ℕ), x ∈ Finset.filter (fun d => ↑(weightedDegree' w) d = n) (support φ) → ↑(monomial x) (coeff x φ) = 0
[PROOFSTEP]
intro d hd
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
h : ∀ (d : σ →₀ ℕ), d ∈ support φ → ↑(weightedDegree' w) d ≠ n
d : σ →₀ ℕ
hd : d ∈ Finset.filter (fun d => ↑(weightedDegree' w) d = n) (support φ)
⊢ ↑(monomial d) (coeff d φ) = 0
[PROOFSTEP]
rw [mem_filter] at hd
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
h : ∀ (d : σ →₀ ℕ), d ∈ support φ → ↑(weightedDegree' w) d ≠ n
d : σ →₀ ℕ
hd : d ∈ support φ ∧ ↑(weightedDegree' w) d = n
⊢ ↑(monomial d) (coeff d φ) = 0
[PROOFSTEP]
exfalso
[GOAL]
case h
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
h : ∀ (d : σ →₀ ℕ), d ∈ support φ → ↑(weightedDegree' w) d ≠ n
d : σ →₀ ℕ
hd : d ∈ support φ ∧ ↑(weightedDegree' w) d = n
⊢ False
[PROOFSTEP]
exact h _ hd.1 hd.2
[GOAL]
R : Type u_1
M : Type u_2
inst✝³ : CommSemiring R
σ : Type u_3
inst✝² : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
inst✝¹ : SemilatticeSup M
inst✝ : OrderBot M
h : weightedTotalDegree w φ < n
⊢ ↑(weightedHomogeneousComponent w n) φ = 0
[PROOFSTEP]
classical
rw [weightedHomogeneousComponent_apply, sum_eq_zero]
intro d hd
rw [Finset.mem_filter] at hd
exfalso
apply lt_irrefl n
nth_rw 1 [← hd.2]
exact lt_of_le_of_lt (le_weightedTotalDegree w hd.1) h
[GOAL]
R : Type u_1
M : Type u_2
inst✝³ : CommSemiring R
σ : Type u_3
inst✝² : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
inst✝¹ : SemilatticeSup M
inst✝ : OrderBot M
h : weightedTotalDegree w φ < n
⊢ ↑(weightedHomogeneousComponent w n) φ = 0
[PROOFSTEP]
rw [weightedHomogeneousComponent_apply, sum_eq_zero]
[GOAL]
R : Type u_1
M : Type u_2
inst✝³ : CommSemiring R
σ : Type u_3
inst✝² : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
inst✝¹ : SemilatticeSup M
inst✝ : OrderBot M
h : weightedTotalDegree w φ < n
⊢ ∀ (x : σ →₀ ℕ), x ∈ Finset.filter (fun d => ↑(weightedDegree' w) d = n) (support φ) → ↑(monomial x) (coeff x φ) = 0
[PROOFSTEP]
intro d hd
[GOAL]
R : Type u_1
M : Type u_2
inst✝³ : CommSemiring R
σ : Type u_3
inst✝² : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
inst✝¹ : SemilatticeSup M
inst✝ : OrderBot M
h : weightedTotalDegree w φ < n
d : σ →₀ ℕ
hd : d ∈ Finset.filter (fun d => ↑(weightedDegree' w) d = n) (support φ)
⊢ ↑(monomial d) (coeff d φ) = 0
[PROOFSTEP]
rw [Finset.mem_filter] at hd
[GOAL]
R : Type u_1
M : Type u_2
inst✝³ : CommSemiring R
σ : Type u_3
inst✝² : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
inst✝¹ : SemilatticeSup M
inst✝ : OrderBot M
h : weightedTotalDegree w φ < n
d : σ →₀ ℕ
hd : d ∈ support φ ∧ ↑(weightedDegree' w) d = n
⊢ ↑(monomial d) (coeff d φ) = 0
[PROOFSTEP]
exfalso
[GOAL]
case h
R : Type u_1
M : Type u_2
inst✝³ : CommSemiring R
σ : Type u_3
inst✝² : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
inst✝¹ : SemilatticeSup M
inst✝ : OrderBot M
h : weightedTotalDegree w φ < n
d : σ →₀ ℕ
hd : d ∈ support φ ∧ ↑(weightedDegree' w) d = n
⊢ False
[PROOFSTEP]
apply lt_irrefl n
[GOAL]
case h
R : Type u_1
M : Type u_2
inst✝³ : CommSemiring R
σ : Type u_3
inst✝² : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
inst✝¹ : SemilatticeSup M
inst✝ : OrderBot M
h : weightedTotalDegree w φ < n
d : σ →₀ ℕ
hd : d ∈ support φ ∧ ↑(weightedDegree' w) d = n
⊢ n < n
[PROOFSTEP]
nth_rw 1 [← hd.2]
[GOAL]
case h
R : Type u_1
M : Type u_2
inst✝³ : CommSemiring R
σ : Type u_3
inst✝² : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
inst✝¹ : SemilatticeSup M
inst✝ : OrderBot M
h : weightedTotalDegree w φ < n
d : σ →₀ ℕ
hd : d ∈ support φ ∧ ↑(weightedDegree' w) d = n
⊢ ↑(weightedDegree' w) d < n
[PROOFSTEP]
exact lt_of_le_of_lt (le_weightedTotalDegree w hd.1) h
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
⊢ Set.Finite (Function.support fun m => ↑(weightedHomogeneousComponent w m) φ)
[PROOFSTEP]
suffices (Function.support fun m => weightedHomogeneousComponent w m φ) ⊆ (fun d => weightedDegree' w d) '' φ.support by
exact Finite.subset ((fun d : σ →₀ ℕ => (weightedDegree' w) d) '' ↑(support φ)).toFinite this
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
this :
(Function.support fun m => ↑(weightedHomogeneousComponent w m) φ) ⊆ (fun d => ↑(weightedDegree' w) d) '' ↑(support φ)
⊢ Set.Finite (Function.support fun m => ↑(weightedHomogeneousComponent w m) φ)
[PROOFSTEP]
exact Finite.subset ((fun d : σ →₀ ℕ => (weightedDegree' w) d) '' ↑(support φ)).toFinite this
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
⊢ (Function.support fun m => ↑(weightedHomogeneousComponent w m) φ) ⊆ (fun d => ↑(weightedDegree' w) d) '' ↑(support φ)
[PROOFSTEP]
intro m hm
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
m : M
hm : m ∈ Function.support fun m => ↑(weightedHomogeneousComponent w m) φ
⊢ m ∈ (fun d => ↑(weightedDegree' w) d) '' ↑(support φ)
[PROOFSTEP]
by_contra hm'
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
m : M
hm : m ∈ Function.support fun m => ↑(weightedHomogeneousComponent w m) φ
hm' : ¬m ∈ (fun d => ↑(weightedDegree' w) d) '' ↑(support φ)
⊢ False
[PROOFSTEP]
apply hm
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
m : M
hm : m ∈ Function.support fun m => ↑(weightedHomogeneousComponent w m) φ
hm' : ¬m ∈ (fun d => ↑(weightedDegree' w) d) '' ↑(support φ)
⊢ (fun m => ↑(weightedHomogeneousComponent w m) φ) m = 0
[PROOFSTEP]
simp only [mem_support, Ne.def] at hm
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
m : M
hm' : ¬m ∈ (fun d => ↑(weightedDegree' w) d) '' ↑(support φ)
hm : ¬↑(weightedHomogeneousComponent w m) φ = 0
⊢ (fun m => ↑(weightedHomogeneousComponent w m) φ) m = 0
[PROOFSTEP]
simp only [Set.mem_image, not_exists, not_and] at hm'
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
m : M
hm : ¬↑(weightedHomogeneousComponent w m) φ = 0
hm' : ∀ (x : σ →₀ ℕ), x ∈ ↑(support φ) → ¬↑(weightedDegree' w) x = m
⊢ (fun m => ↑(weightedHomogeneousComponent w m) φ) m = 0
[PROOFSTEP]
exact weightedHomogeneousComponent_eq_zero' m φ hm'
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
⊢ ∑ᶠ (m : M), ↑(weightedHomogeneousComponent w m) φ = φ
[PROOFSTEP]
classical
rw [finsum_eq_sum _ (weightedHomogeneousComponent_finsupp φ)]
ext1 d
simp only [coeff_sum, coeff_weightedHomogeneousComponent]
rw [Finset.sum_eq_single (weightedDegree' w d)]
· rw [if_pos rfl]
· intro m _ hm'
rw [if_neg hm'.symm]
· intro hm
rw [if_pos rfl]
simp only [Finite.mem_toFinset, mem_support, Ne.def, Classical.not_not] at hm
have := coeff_weightedHomogeneousComponent (w := w) (weightedDegree' w d) φ d
rw [hm, if_pos rfl, coeff_zero] at this
exact this.symm
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
⊢ ∑ᶠ (m : M), ↑(weightedHomogeneousComponent w m) φ = φ
[PROOFSTEP]
rw [finsum_eq_sum _ (weightedHomogeneousComponent_finsupp φ)]
[GOAL]
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
⊢ ∑ i in Finite.toFinset (_ : Set.Finite (Function.support fun m => ↑(weightedHomogeneousComponent w m) φ)),
↑(weightedHomogeneousComponent w i) φ =
φ
[PROOFSTEP]
ext1 d
[GOAL]
case a
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
d : σ →₀ ℕ
⊢ coeff d
(∑ i in Finite.toFinset (_ : Set.Finite (Function.support fun m => ↑(weightedHomogeneousComponent w m) φ)),
↑(weightedHomogeneousComponent w i) φ) =
coeff d φ
[PROOFSTEP]
simp only [coeff_sum, coeff_weightedHomogeneousComponent]
[GOAL]
case a
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
d : σ →₀ ℕ
⊢ (∑ x in Finite.toFinset (_ : Set.Finite (Function.support fun m => ↑(weightedHomogeneousComponent w m) φ)),
if ↑(weightedDegree' w) d = x then coeff d φ else 0) =
coeff d φ
[PROOFSTEP]
rw [Finset.sum_eq_single (weightedDegree' w d)]
[GOAL]
case a
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
d : σ →₀ ℕ
⊢ (if ↑(weightedDegree' w) d = ↑(weightedDegree' w) d then coeff d φ else 0) = coeff d φ
[PROOFSTEP]
rw [if_pos rfl]
[GOAL]
case a.h₀
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
d : σ →₀ ℕ
⊢ ∀ (b : (fun x => M) d),
b ∈ Finite.toFinset (_ : Set.Finite (Function.support fun m => ↑(weightedHomogeneousComponent w m) φ)) →
b ≠ ↑(weightedDegree' w) d → (if ↑(weightedDegree' w) d = b then coeff d φ else 0) = 0
[PROOFSTEP]
intro m _ hm'
[GOAL]
case a.h₀
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
d : σ →₀ ℕ
m : M
a✝ : m ∈ Finite.toFinset (_ : Set.Finite (Function.support fun m => ↑(weightedHomogeneousComponent w m) φ))
hm' : m ≠ ↑(weightedDegree' w) d
⊢ (if ↑(weightedDegree' w) d = m then coeff d φ else 0) = 0
[PROOFSTEP]
rw [if_neg hm'.symm]
[GOAL]
case a.h₁
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
d : σ →₀ ℕ
⊢ ¬↑(weightedDegree' w) d ∈
Finite.toFinset (_ : Set.Finite (Function.support fun m => ↑(weightedHomogeneousComponent w m) φ)) →
(if ↑(weightedDegree' w) d = ↑(weightedDegree' w) d then coeff d φ else 0) = 0
[PROOFSTEP]
intro hm
[GOAL]
case a.h₁
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
d : σ →₀ ℕ
hm :
¬↑(weightedDegree' w) d ∈
Finite.toFinset (_ : Set.Finite (Function.support fun m => ↑(weightedHomogeneousComponent w m) φ))
⊢ (if ↑(weightedDegree' w) d = ↑(weightedDegree' w) d then coeff d φ else 0) = 0
[PROOFSTEP]
rw [if_pos rfl]
[GOAL]
case a.h₁
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
d : σ →₀ ℕ
hm :
¬↑(weightedDegree' w) d ∈
Finite.toFinset (_ : Set.Finite (Function.support fun m => ↑(weightedHomogeneousComponent w m) φ))
⊢ coeff d φ = 0
[PROOFSTEP]
simp only [Finite.mem_toFinset, mem_support, Ne.def, Classical.not_not] at hm
[GOAL]
case a.h₁
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
d : σ →₀ ℕ
hm : ↑(weightedHomogeneousComponent w (↑(weightedDegree' w) d)) φ = 0
⊢ coeff d φ = 0
[PROOFSTEP]
have := coeff_weightedHomogeneousComponent (w := w) (weightedDegree' w d) φ d
[GOAL]
case a.h₁
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
d : σ →₀ ℕ
hm : ↑(weightedHomogeneousComponent w (↑(weightedDegree' w) d)) φ = 0
this :
coeff d (↑(weightedHomogeneousComponent w (↑(weightedDegree' w) d)) φ) =
if ↑(weightedDegree' w) d = ↑(weightedDegree' w) d then coeff d φ else 0
⊢ coeff d φ = 0
[PROOFSTEP]
rw [hm, if_pos rfl, coeff_zero] at this
[GOAL]
case a.h₁
R : Type u_1
M : Type u_2
inst✝¹ : CommSemiring R
σ : Type u_3
inst✝ : AddCommMonoid M
w : σ → M
n : M
φ ψ : MvPolynomial σ R
d : σ →₀ ℕ
hm : ↑(weightedHomogeneousComponent w (↑(weightedDegree' w) d)) φ = 0
this : 0 = coeff d φ
⊢ coeff d φ = 0
[PROOFSTEP]
exact this.symm
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
w : σ → M
n✝ : M
φ ψ : MvPolynomial σ R
inst✝ : DecidableEq M
m n : M
p : MvPolynomial σ R
h : p ∈ weightedHomogeneousSubmodule R w n
⊢ ↑(weightedHomogeneousComponent w m) p = if m = n then p else 0
[PROOFSTEP]
simp only [mem_weightedHomogeneousSubmodule] at h
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
w : σ → M
n✝ : M
φ ψ : MvPolynomial σ R
inst✝ : DecidableEq M
m n : M
p : MvPolynomial σ R
h : IsWeightedHomogeneous w p n
⊢ ↑(weightedHomogeneousComponent w m) p = if m = n then p else 0
[PROOFSTEP]
ext x
[GOAL]
case a
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
w : σ → M
n✝ : M
φ ψ : MvPolynomial σ R
inst✝ : DecidableEq M
m n : M
p : MvPolynomial σ R
h : IsWeightedHomogeneous w p n
x : σ →₀ ℕ
⊢ coeff x (↑(weightedHomogeneousComponent w m) p) = coeff x (if m = n then p else 0)
[PROOFSTEP]
rw [coeff_weightedHomogeneousComponent]
[GOAL]
case a
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
w : σ → M
n✝ : M
φ ψ : MvPolynomial σ R
inst✝ : DecidableEq M
m n : M
p : MvPolynomial σ R
h : IsWeightedHomogeneous w p n
x : σ →₀ ℕ
⊢ (if ↑(weightedDegree' w) x = m then coeff x p else 0) = coeff x (if m = n then p else 0)
[PROOFSTEP]
by_cases zero_coeff : coeff x p = 0
[GOAL]
case pos
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
w : σ → M
n✝ : M
φ ψ : MvPolynomial σ R
inst✝ : DecidableEq M
m n : M
p : MvPolynomial σ R
h : IsWeightedHomogeneous w p n
x : σ →₀ ℕ
zero_coeff : coeff x p = 0
⊢ (if ↑(weightedDegree' w) x = m then coeff x p else 0) = coeff x (if m = n then p else 0)
[PROOFSTEP]
split_ifs
[GOAL]
case pos
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
w : σ → M
n✝ : M
φ ψ : MvPolynomial σ R
inst✝ : DecidableEq M
m n : M
p : MvPolynomial σ R
h : IsWeightedHomogeneous w p n
x : σ →₀ ℕ
zero_coeff : coeff x p = 0
h✝¹ : ↑(weightedDegree' w) x = m
h✝ : m = n
⊢ coeff x p = coeff x p
case neg
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
w : σ → M
n✝ : M
φ ψ : MvPolynomial σ R
inst✝ : DecidableEq M
m n : M
p : MvPolynomial σ R
h : IsWeightedHomogeneous w p n
x : σ →₀ ℕ
zero_coeff : coeff x p = 0
h✝¹ : ↑(weightedDegree' w) x = m
h✝ : ¬m = n
⊢ coeff x p = coeff x 0
case pos
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
w : σ → M
n✝ : M
φ ψ : MvPolynomial σ R
inst✝ : DecidableEq M
m n : M
p : MvPolynomial σ R
h : IsWeightedHomogeneous w p n
x : σ →₀ ℕ
zero_coeff : coeff x p = 0
h✝¹ : ¬↑(weightedDegree' w) x = m
h✝ : m = n
⊢ 0 = coeff x p
case neg
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
w : σ → M
n✝ : M
φ ψ : MvPolynomial σ R
inst✝ : DecidableEq M
m n : M
p : MvPolynomial σ R
h : IsWeightedHomogeneous w p n
x : σ →₀ ℕ
zero_coeff : coeff x p = 0
h✝¹ : ¬↑(weightedDegree' w) x = m
h✝ : ¬m = n
⊢ 0 = coeff x 0
[PROOFSTEP]
all_goals simp only [zero_coeff, coeff_zero]
[GOAL]
case pos
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
w : σ → M
n✝ : M
φ ψ : MvPolynomial σ R
inst✝ : DecidableEq M
m n : M
p : MvPolynomial σ R
h : IsWeightedHomogeneous w p n
x : σ →₀ ℕ
zero_coeff : coeff x p = 0
h✝¹ : ↑(weightedDegree' w) x = m
h✝ : m = n
⊢ coeff x p = coeff x p
[PROOFSTEP]
simp only [zero_coeff, coeff_zero]
[GOAL]
case neg
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
w : σ → M
n✝ : M
φ ψ : MvPolynomial σ R
inst✝ : DecidableEq M
m n : M
p : MvPolynomial σ R
h : IsWeightedHomogeneous w p n
x : σ →₀ ℕ
zero_coeff : coeff x p = 0
h✝¹ : ↑(weightedDegree' w) x = m
h✝ : ¬m = n
⊢ coeff x p = coeff x 0
[PROOFSTEP]
simp only [zero_coeff, coeff_zero]
[GOAL]
case pos
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
w : σ → M
n✝ : M
φ ψ : MvPolynomial σ R
inst✝ : DecidableEq M
m n : M
p : MvPolynomial σ R
h : IsWeightedHomogeneous w p n
x : σ →₀ ℕ
zero_coeff : coeff x p = 0
h✝¹ : ¬↑(weightedDegree' w) x = m
h✝ : m = n
⊢ 0 = coeff x p
[PROOFSTEP]
simp only [zero_coeff, coeff_zero]
[GOAL]
case neg
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
w : σ → M
n✝ : M
φ ψ : MvPolynomial σ R
inst✝ : DecidableEq M
m n : M
p : MvPolynomial σ R
h : IsWeightedHomogeneous w p n
x : σ →₀ ℕ
zero_coeff : coeff x p = 0
h✝¹ : ¬↑(weightedDegree' w) x = m
h✝ : ¬m = n
⊢ 0 = coeff x 0
[PROOFSTEP]
simp only [zero_coeff, coeff_zero]
[GOAL]
case neg
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
w : σ → M
n✝ : M
φ ψ : MvPolynomial σ R
inst✝ : DecidableEq M
m n : M
p : MvPolynomial σ R
h : IsWeightedHomogeneous w p n
x : σ →₀ ℕ
zero_coeff : ¬coeff x p = 0
⊢ (if ↑(weightedDegree' w) x = m then coeff x p else 0) = coeff x (if m = n then p else 0)
[PROOFSTEP]
rw [h zero_coeff]
[GOAL]
case neg
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
w : σ → M
n✝ : M
φ ψ : MvPolynomial σ R
inst✝ : DecidableEq M
m n : M
p : MvPolynomial σ R
h : IsWeightedHomogeneous w p n
x : σ →₀ ℕ
zero_coeff : ¬coeff x p = 0
⊢ (if n = m then coeff x p else 0) = coeff x (if m = n then p else 0)
[PROOFSTEP]
simp only [show n = m ↔ m = n from eq_comm]
[GOAL]
case neg
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
w : σ → M
n✝ : M
φ ψ : MvPolynomial σ R
inst✝ : DecidableEq M
m n : M
p : MvPolynomial σ R
h : IsWeightedHomogeneous w p n
x : σ →₀ ℕ
zero_coeff : ¬coeff x p = 0
⊢ (if m = n then coeff x p else 0) = coeff x (if m = n then p else 0)
[PROOFSTEP]
split_ifs with h1
[GOAL]
case pos
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
w : σ → M
n✝ : M
φ ψ : MvPolynomial σ R
inst✝ : DecidableEq M
m n : M
p : MvPolynomial σ R
h : IsWeightedHomogeneous w p n
x : σ →₀ ℕ
zero_coeff : ¬coeff x p = 0
h1 : m = n
⊢ coeff x p = coeff x p
[PROOFSTEP]
rfl
[GOAL]
case neg
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : AddCommMonoid M
w : σ → M
n✝ : M
φ ψ : MvPolynomial σ R
inst✝ : DecidableEq M
m n : M
p : MvPolynomial σ R
h : IsWeightedHomogeneous w p n
x : σ →₀ ℕ
zero_coeff : ¬coeff x p = 0
h1 : ¬m = n
⊢ 0 = coeff x 0
[PROOFSTEP]
simp only [coeff_zero]
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : CanonicallyOrderedAddMonoid M
w : σ → M
φ : MvPolynomial σ R
inst✝ : NoZeroSMulDivisors ℕ M
hw : ∀ (i : σ), w i ≠ 0
⊢ ↑(weightedHomogeneousComponent w 0) φ = ↑C (coeff 0 φ)
[PROOFSTEP]
classical
ext1 d
rcases Classical.em (d = 0) with (rfl | hd)
· simp only [coeff_weightedHomogeneousComponent, if_pos, map_zero, coeff_zero_C]
· rw [coeff_weightedHomogeneousComponent, if_neg, coeff_C, if_neg (Ne.symm hd)]
simp only [weightedDegree', LinearMap.toAddMonoidHom_coe, Finsupp.total_apply, Finsupp.sum, sum_eq_zero_iff,
Finsupp.mem_support_iff, Ne.def, smul_eq_zero, not_forall, not_or, and_self_left, exists_prop]
simp only [FunLike.ext_iff, Finsupp.coe_zero, Pi.zero_apply, not_forall] at hd
obtain ⟨i, hi⟩ := hd
exact ⟨i, hi, hw i⟩
[GOAL]
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : CanonicallyOrderedAddMonoid M
w : σ → M
φ : MvPolynomial σ R
inst✝ : NoZeroSMulDivisors ℕ M
hw : ∀ (i : σ), w i ≠ 0
⊢ ↑(weightedHomogeneousComponent w 0) φ = ↑C (coeff 0 φ)
[PROOFSTEP]
ext1 d
[GOAL]
case a
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : CanonicallyOrderedAddMonoid M
w : σ → M
φ : MvPolynomial σ R
inst✝ : NoZeroSMulDivisors ℕ M
hw : ∀ (i : σ), w i ≠ 0
d : σ →₀ ℕ
⊢ coeff d (↑(weightedHomogeneousComponent w 0) φ) = coeff d (↑C (coeff 0 φ))
[PROOFSTEP]
rcases Classical.em (d = 0) with (rfl | hd)
[GOAL]
case a.inl
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : CanonicallyOrderedAddMonoid M
w : σ → M
φ : MvPolynomial σ R
inst✝ : NoZeroSMulDivisors ℕ M
hw : ∀ (i : σ), w i ≠ 0
⊢ coeff 0 (↑(weightedHomogeneousComponent w 0) φ) = coeff 0 (↑C (coeff 0 φ))
[PROOFSTEP]
simp only [coeff_weightedHomogeneousComponent, if_pos, map_zero, coeff_zero_C]
[GOAL]
case a.inr
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : CanonicallyOrderedAddMonoid M
w : σ → M
φ : MvPolynomial σ R
inst✝ : NoZeroSMulDivisors ℕ M
hw : ∀ (i : σ), w i ≠ 0
d : σ →₀ ℕ
hd : ¬d = 0
⊢ coeff d (↑(weightedHomogeneousComponent w 0) φ) = coeff d (↑C (coeff 0 φ))
[PROOFSTEP]
rw [coeff_weightedHomogeneousComponent, if_neg, coeff_C, if_neg (Ne.symm hd)]
[GOAL]
case a.inr.hnc
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : CanonicallyOrderedAddMonoid M
w : σ → M
φ : MvPolynomial σ R
inst✝ : NoZeroSMulDivisors ℕ M
hw : ∀ (i : σ), w i ≠ 0
d : σ →₀ ℕ
hd : ¬d = 0
⊢ ¬↑(weightedDegree' w) d = 0
[PROOFSTEP]
simp only [weightedDegree', LinearMap.toAddMonoidHom_coe, Finsupp.total_apply, Finsupp.sum, sum_eq_zero_iff,
Finsupp.mem_support_iff, Ne.def, smul_eq_zero, not_forall, not_or, and_self_left, exists_prop]
[GOAL]
case a.inr.hnc
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : CanonicallyOrderedAddMonoid M
w : σ → M
φ : MvPolynomial σ R
inst✝ : NoZeroSMulDivisors ℕ M
hw : ∀ (i : σ), w i ≠ 0
d : σ →₀ ℕ
hd : ¬d = 0
⊢ ∃ x, ¬↑d x = 0 ∧ ¬w x = 0
[PROOFSTEP]
simp only [FunLike.ext_iff, Finsupp.coe_zero, Pi.zero_apply, not_forall] at hd
[GOAL]
case a.inr.hnc
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : CanonicallyOrderedAddMonoid M
w : σ → M
φ : MvPolynomial σ R
inst✝ : NoZeroSMulDivisors ℕ M
hw : ∀ (i : σ), w i ≠ 0
d : σ →₀ ℕ
hd : ∃ x, ¬↑d x = 0
⊢ ∃ x, ¬↑d x = 0 ∧ ¬w x = 0
[PROOFSTEP]
obtain ⟨i, hi⟩ := hd
[GOAL]
case a.inr.hnc.intro
R : Type u_1
M : Type u_2
inst✝² : CommSemiring R
σ : Type u_3
inst✝¹ : CanonicallyOrderedAddMonoid M
w : σ → M
φ : MvPolynomial σ R
inst✝ : NoZeroSMulDivisors ℕ M
hw : ∀ (i : σ), w i ≠ 0
d : σ →₀ ℕ
i : σ
hi : ¬↑d i = 0
⊢ ∃ x, ¬↑d x = 0 ∧ ¬w x = 0
[PROOFSTEP]
exact ⟨i, hi, hw i⟩
|
{"mathlib_filename": "Mathlib.RingTheory.MvPolynomial.WeightedHomogeneous", "llama_tokens": 26398}
|
using DualNumbers
using Random
using StaticArrays
using Test
Random.seed!(0)
@testset "Ring" begin
T = Int
randint() = T(rand(-100:100))
for iter in 1:100
n = Dual(T(0))
e = Dual(T(1))
x = Dual(randint(), randint())
y = Dual(randint(), randint())
z = Dual(randint(), randint())
a = randint()
b = randint()
i = rand(1:10)
@test Dual(a).primal === a
@test Dual(a).dual === 0
@test Dual(a, b).primal === a
@test Dual(a, b).dual === b
@test +x === x
@test -(-x) === x
@test n + x === x
@test x + n === x
@test -x === n - x
@test x - x === n
@test x + y === y + x
@test (x + y) + z === x + (y + z)
@test n * x === n
@test x * n === n
@test e * x === x
@test x * e === x
@test x * y === y * x
@test (x * y) * z === x * (y * z)
@test x^0 == e
@test x^1 == x
@test x^i == x * x^(i - 1)
@test x + a === a + x
@test Dual(a) == a
@test (Dual(a) == b) == (a == b)
@test (Dual(a) < b) == (a < b)
@test (Dual(a) > b) == (a > b)
end
end
Random.seed!(0)
@testset "Field" begin
T = Rational{Int128}
randint() = T(rand(-100:100))
randintnz() =
while true
i = randint()
i != 0 && return i
end
randrat() = randint() // randintnz()
for iter in 1:100
n = Dual(T(0))
e = Dual(T(1))
x = Dual(randrat(), randrat())
y = Dual(randrat(), randrat())
z = Dual(randrat(), randrat())
a = randrat()
b = randrat()
if x != 0
@test inv(x) === 1 / x
@test inv(inv(x)) === x
@test e / x === inv(x)
@test x \ y === y / x
if y != 0
@test x / y === inv(y / x)
@test x \ y === inv(y \ x)
end
end
end
end
Random.seed!(0)
@testset "Real" begin
T = Float64
randfloat() = rand(T) * 200 - 100
for iter in 1:100
n = Dual(T(0))
e = Dual(T(1))
x = Dual(randfloat(), randfloat())
y = Dual(randfloat(), randfloat())
z = Dual(randfloat(), randfloat())
a = randfloat()
b = randfloat()
@test (abs(x)^a).primal ≈ abs(x).primal^a
@test cos(x).primal ≈ cos(x.primal)
@test sin(x).primal ≈ sin(x.primal)
@test sqrt(abs(x)).primal ≈ sqrt(abs(x).primal)
end
end
Random.seed!(0)
@testset "Basic Derivatives" begin
T = Float64
randfloat() = rand(T)
for iter in 1:100
x = randfloat()
a = randfloat()
@test derivative(x -> x + a, x) ≈ 1
@test derivative(x -> a * x, x) ≈ a
@test derivative(x -> x^2, x) ≈ 2x
@test derivative(cos, x) ≈ -sin(x)
@test derivative(sin, x) ≈ cos(x)
@test derivative(sqrt, x) ≈ 1 / 2sqrt(x)
end
end
Random.seed!(0)
@testset "Derivative laws" begin
T = Float64
randfloat() = rand(T)
for iter in 1:100
x = randfloat()
c0 = randfloat()
c1 = randfloat()
fs = [x -> x + c0, x -> c1 * x, x -> x^2, cos, sin, x -> sqrt(x + 1)]
i = identity
f = rand(fs)
g = rand(fs)
h = rand(fs)
a = randfloat()
@test (g ∘ f)(x) ≈ g(f(x))
@test derivative(x -> f(x) + g(x), x) ≈ derivative(f, x) + derivative(g, x)
@test derivative(x -> a * f(x), x) ≈ a * derivative(f, x)
@test derivative(identity ∘ f, x) ≈ derivative(f, x)
@test derivative(f ∘ identity, x) ≈ derivative(f, x)
@test derivative(identity, x) ≈ 1
f′(x) = derivative(f, x)
g′(x) = derivative(g, x)
@test derivative(g ∘ f, x) ≈ g′(f(x)) * f′(x)
end
end
Random.seed!(0)
@testset "Derivatives of multi-valued functions" begin
T = Float64
randfloat() = rand(T)
for iter in 1:100
xs = SVector(randfloat(), randfloat(), randfloat())
c0 = randfloat()
c1 = randfloat()
fs = [x -> x + c0, x -> c1 * x, x -> x^2, cos, sin, x -> sqrt(x + 1)]
f1 = rand(fs)
f2 = rand(fs)
f3 = rand(fs)
f(xs) = f1(xs[1]) + f2(xs[2]) * f3(xs[3])
g(xs) = f1(xs[1]) * f2(xs[2]) * f3(xs[3])
a = randfloat()
@test derivative(f, xs, 1) ≈ derivative(f1, xs[1])
@test derivative(f, xs, 2) ≈ derivative(f2, xs[2]) * f3(xs[3])
@test derivative(f, xs, 3) ≈ f2(xs[2]) * derivative(f3, xs[3])
for i in 1:3
@test derivative(xs -> f(xs) + g(xs), xs, i) ≈ derivative(f, xs, i) + derivative(g, xs, i)
@test derivative(xs -> a * f(xs), xs, i) ≈ a * derivative(f, xs, i)
end
end
end
Random.seed!(0)
@testset "Derivatives of function with complex arguments" begin
T = Float64
randfloat() = rand(T)
for iter in 1:100
x = Complex(randfloat(), randfloat())
X = SVector(Complex(randfloat(), randfloat()), Complex(randfloat(), randfloat()))
c0 = randfloat()
c1 = randfloat()
fs = [x -> x + c0, x -> c1 * x, x -> x^2, cos, sin, x -> sqrt(x + 1)]
f1 = rand(fs)
f2 = rand(fs)
f3 = rand(fs)
f4 = rand(fs)
g1 = rand(fs)
g2 = rand(fs)
g3 = rand(fs)
g4 = rand(fs)
f(x) = (f1(real(x)) + im * f2(real(x))) + (f3(imag(x)) + im * f4(imag(x)))
g(x) = (g1(real(x)) + im * g2(real(x))) * (g3(imag(x)) + im * g4(imag(x)))
F(xs) = f(xs[1]) + g(xs[2])
G(xs) = f(xs[2]) * g(xs[1])
a = randfloat()
@test derivative(f, x, 1) ≈ derivative(f1, real(x)) + im * derivative(f2, real(x))
@test derivative(f, x, 2) ≈ derivative(f3, imag(x)) + im * derivative(f4, imag(x))
for c in 1:2
@test derivative(x -> f(x) + g(x), x, c) ≈ derivative(f, x, c) + derivative(g, x, c)
@test derivative(x -> a * f(x), x, c) ≈ a * derivative(f, x, c)
end
for i in 1:2, c in 1:2
@test derivative(X -> F(X) + G(X), X, i, c) ≈ derivative(F, X, i, c) + derivative(G, X, i, c)
@test derivative(X -> a * F(X), X, i, c) ≈ a * derivative(F, X, i, c)
end
end
end
|
{"hexsha": "b8a2b7118ad37cba7e1e58cffb99ff3179462866", "size": 6300, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "eschnett/DualNumbers.jl", "max_stars_repo_head_hexsha": "e46e17c2c7c2321fb8b79e878fd13e2afcd71e7c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "eschnett/DualNumbers.jl", "max_issues_repo_head_hexsha": "e46e17c2c7c2321fb8b79e878fd13e2afcd71e7c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "eschnett/DualNumbers.jl", "max_forks_repo_head_hexsha": "e46e17c2c7c2321fb8b79e878fd13e2afcd71e7c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.5060728745, "max_line_length": 105, "alphanum_fraction": 0.4517460317, "num_tokens": 2113}
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright © Climate Data Preprocessing Tool Project Contributors
# https://github.com/cgq-qgc/climate-data-preprocessing-tool
#
# This file is part of Climate Data Preprocessing Tool.
# Licensed under the terms of the GNU General Public License.
# -----------------------------------------------------------------------------
# ---- Standard library imports
import csv
import os
import os.path as osp
from time import strftime, process_time
from datetime import datetime
# ---- Third party imports
import numpy as np
import pandas as pd
from PyQt5.QtCore import pyqtSignal as QSignal
from PyQt5.QtCore import QObject
# import statsmodels.api as sm
# import statsmodels.regression as sm_reg
# from statsmodels.regression.linear_model import OLS
# from statsmodels.regression.quantile_regression import QuantReg
# ---- Local imports
from cdprep.utils.taskmanagers import WorkerBase, TaskManagerBase
from cdprep.config.gui import RED, LIGHTGRAY
from cdprep.gapfill_data.read_weather_data import read_weather_datafile
from cdprep import __namever__
VARNAMES = ['Ptot', 'Tmax', 'Tavg', 'Tmin']
class DataGapfillManager(TaskManagerBase):
sig_task_progress = QSignal(int)
sig_status_message = QSignal(str)
def __init__(self):
super().__init__()
worker = DataGapfillWorker()
self.set_worker(worker)
worker.sig_task_progress.connect(self.sig_task_progress.emit)
worker.sig_status_message.connect(self.sig_status_message.emit)
def count(self):
"""
Return the number of datasets that are currently loaded in the
gapfill data worker.
"""
return self.worker().wxdatasets.count()
def get_station_names(self):
"""
Return the list of station names for which data are loaded in memory.
"""
return self.worker().wxdatasets.station_names
def get_station_ids(self):
"""
Return the list of station IDs for which data are loaded in memory.
"""
return self.worker().wxdatasets.station_ids
def set_workdir(self, workdir):
self.worker().inputDir = workdir
def set_target_station(self, station_id, callback=None,
postpone_exec=False):
"""
Set the target station to the station corresponding
to the specified station id.
Setting the target station also trigger the recalculation of the
correlation coefficients with the neighboring stations.
"""
self.add_task(
'set_target_station',
callback=callback,
station_id=station_id)
if not postpone_exec:
self.run_tasks()
def load_data(self, force_reload=False, callback=None,
postpone_exec=False):
"""Read the csv files in the input data directory folder."""
self.add_task(
'load_data',
force_reload=force_reload,
callback=callback)
if not postpone_exec:
self.run_tasks()
def gapfill_data(self, time_start, time_end, max_neighbors,
hdist_limit, vdist_limit, regression_mode,
callback=None, postpone_exec=False):
"""Gapfill the data of the target station."""
self.add_task(
'gapfill_data',
callback=callback,
time_start=time_start,
time_end=time_end,
max_neighbors=max_neighbors,
hdist_limit=hdist_limit,
vdist_limit=vdist_limit,
regression_mode=regression_mode)
if not postpone_exec:
self.run_tasks()
class DataGapfillWorker(WorkerBase):
"""
This class manage all that is related to the gap-filling of weather data
records, including reading the data file on the disk.
Parameters
----------
NSTAmax : int
limitDist : float
limitAlt : float
regression_mode : int
full_error_analysis : bool
"""
sig_task_progress = QSignal(int)
sig_status_message = QSignal(str)
sig_console_message = QSignal(str)
sig_gapfill_finished = QSignal(bool)
def __init__(self):
super().__init__()
self.target = None
self.alt_and_dist = None
self.corcoef = None
# ---- Required Inputs
self.time_start = None
self.time_end = None
self.WEATHER = self.wxdatasets = WeatherData()
self.wxdatasets.sig_task_progress.connect(self.sig_task_progress.emit)
self.wxdatasets.sig_status_message.connect(
self.sig_status_message.emit)
self.wxdatasets.sig_corrcoeff_calculated.connect(
lambda: self.wxdatasets.save_to_binary(self.inputDir))
self.inputDir = None
self.isParamsValid = False
# ---- Define Parameters Default
# Maximum number of neighboring stations that will be used to fill
# the missing data in the target station
self.NSTAmax = 4
self.limitDist = 100
self.limitAlt = 350
# if *regression_mode* = 1: Ordinary Least Square
# if *regression_mode* = 0: Least Absolute Deviations
self.regression_mode = 1
# Set whether a complete analysis of the estimation errors is
# conducted with a cross-validation procedure while filling missing
# data.
self.full_error_analysis = False
@property
def outputdir(self):
if self.inputDir is None:
return None
else:
return osp.join(self.inputDir, 'GAPFILLED')
@property
def NSTAmax(self):
return self.__NSTAmax
@NSTAmax.setter
def NSTAmax(self, x):
if type(x) != int or x < 1:
raise ValueError('!WARNING! NSTAmax must be must be an integer'
' with a value greater than 0.')
self.__NSTAmax = x
def load_data(self, force_reload=False):
"""
Read the csv files in the input data directory folder.
The resulting formatted dataset is saved in a structured numpy array
in binary format, so that loading time is improved on subsequent runs.
Some checks are made to be sure the binary match with the current
data files in the folder.
"""
self.target = None
self.alt_and_dist = None
self.corcoef = None
if self.inputDir is None:
print('Please specify a valid input data file directory.')
return
if not osp.exists(self.inputDir):
print('Input data directory does not exists.')
return
if force_reload is True:
print('Force reloading data from csv file...')
return self._reload_data()
# Check if a cached binary file exists.
binfile = os.path.join(self.inputDir, '__cache__', 'fdata.npy')
if not osp.exists(binfile):
return self._reload_data()
# Try to load data from the cached binary file.
try:
self.wxdatasets.load_from_binary(self.inputDir)
except Exception as e:
print('Failed to load data from cache because '
'of the following error:')
print(e)
return self._reload_data()
else:
# Scan input folder for changes
# If one of the csv data file contained within the input data
# directory has changed since last time the binary file was
# created, the data will be reloaded from the csv files and a
# new binary file will be generated.
filenames = [osp.basename(f) for f in self.wxdatasets.filenames]
bmtime = osp.getmtime(binfile)
count = 0
for f in os.listdir(self.inputDir):
if f.endswith('.csv'):
fmtime = osp.getmtime(osp.join(self.inputDir, f))
if f in filenames and fmtime <= bmtime:
count += 1
if len(filenames) != count:
print('One or more input data files in the workind '
'directory changed since the last time the data '
'were cached.')
return self._reload_data()
else:
print('Data loaded from cache.')
def _reload_data(self):
"""
Read the csv files in the input data directory folder, format
the datasets and save the results in a binary file.
"""
filepaths = [
osp.join(self.inputDir, f) for
f in os.listdir(self.inputDir) if f.endswith('.csv')]
print('{:d} csv files were found in {}.'.format(
len(filepaths), self.inputDir))
message = 'Reading data from csv files...'
print(message)
self.sig_status_message.emit(message)
self.wxdatasets.load_and_format_data(filepaths)
self.wxdatasets.save_to_binary(self.inputDir)
print('Data loaded successfully.')
self.sig_status_message.emit('')
def get_target_station(self):
"""
Return the metadata related to the current target station.
"""
return self.wxdatasets.metadata.loc[self.target]
def set_target_station(self, station_id):
"""
Set the target station to the station corresponding to the specified
station id.
"""
if station_id not in self.wxdatasets.station_ids:
self.target = None
self.alt_and_dist = None
self.corcoef = None
raise ValueError("No data currently loaded for station '{}'."
.format(station_id))
else:
self.target = station_id
self.alt_and_dist = self.wxdatasets.alt_and_dist_calc(station_id)
self.corcoef = (
self.wxdatasets.compute_correlation_coeff(station_id))
def get_valid_neighboring_stations(self, hdist_limit, vdist_limit):
"""
Return the list of neighboring stations that are within the
horizontal and altitude range of the target station.
"""
# If cutoff limits for the horizontal distance and altitude are set
# to a negative number, all stations are kept regardless of their
# distance or altitude difference with the target station.
valid_stations = self.alt_and_dist.copy()
if hdist_limit > 0:
valid_stations = valid_stations[
valid_stations['hordist'] <= hdist_limit]
if vdist_limit > 0:
valid_stations = valid_stations[
valid_stations['altdiff'].abs() <= vdist_limit]
valid_stations = valid_stations.index.values.tolist()
valid_stations.remove(self.target)
return valid_stations
def gapfill_data(self, time_start, time_end, max_neighbors,
hdist_limit, vdist_limit, regression_mode):
"""Gapfill the data of the target station."""
tstart_total = process_time()
neighbors = self.get_valid_neighboring_stations(
hdist_limit, vdist_limit)
gapfill_date_range = pd.date_range(
start=time_start, end=time_end, freq='D')
y2fill = pd.DataFrame(
np.nan, index=gapfill_date_range, columns=VARNAMES)
self.sig_task_progress.emit(0)
for i, varname in enumerate(VARNAMES):
# When a station does not have enough data for a given variable,
# its correlation coefficient is set to nan. If all the stations
# have a NeN value in the correlation table for a given variable,
# it means there is not enough data available overall to estimate
# and fill the missing data for that variable.
var2fill = (self.corcoef.loc[neighbors]
.dropna(axis=1, how='all').columns.tolist())
if varname not in var2fill:
msg = ("Variable {} will not be filled because there "
"is not enough data.").format(varname)
print(msg)
self.sig_console_message.emit(
'<font color=red>%s</font>' % msg)
continue
tstart = process_time()
message = 'Gapfilling data for variable {}...'.format(varname)
print(message)
self.sig_status_message.emit(message)
reg_models = {}
notnull = self.wxdatasets.data[varname].loc[
gapfill_date_range, neighbors].notnull()
notnull_groups = notnull.groupby(by=neighbors, axis=0)
for j, group in enumerate(notnull_groups):
group_dates = group[1].index
group_neighbors = group[1].columns[list(group[0])]
if len(group_neighbors) == 0:
# It is impossible to fill the data in this group
# because all neighboring stations are empty.
continue
# Determines the neighboring stations to include in the
# regression model.
model_neighbors = list(
self.corcoef.loc[group_neighbors]
.sort_values(varname, axis=0, ascending=False)
.index
)[:max_neighbors]
neighbors_combi = ', '.join(model_neighbors)
if neighbors_combi in reg_models:
# Regression coefficients and RSME are recalled
# from the memory matrices.
A = reg_models[neighbors_combi]
else:
# This is the first time this neighboring stations
# combination is encountered in the routine,
# regression coefficients need to be calculated.
# The data for the current variable are sorted by
# their stations in in descending correlation
# coefficient.
YX = self.wxdatasets.data[varname][
[self.target] + model_neighbors].copy()
# Remove all rows containing at least one nan value.
YX = YX.dropna()
# Rows for which precipitation of the target station
# and all the neighboring stations is 0 are removed.
# This is only applicable for precipitation, not air
# temperature.
if varname in ['Ptot']:
YX = YX.loc[(YX != 0).any(axis=1)]
# Dependant variable (target)
Y = YX[self.target].values
# Independant variables (neighbors)
X = YX[model_neighbors].values
# Add a unitary array to X for the intercept term if
# variable is a temperature type data.
# (though this was questionned by G. Flerchinger)
if varname in ['Tmax', 'Tavg', 'Tmin']:
X = np.hstack((np.ones((len(Y), 1)), X))
# Generate the MLR Model
A = self.build_mlr_model(X, Y, regression_mode)
# Calcul the RMSE.
# Calculate a RMSE between the estimated and
# measured values of the target station.
# RMSE with 0 value are not accounted for
# in the calcultation.
Yp = np.dot(A, X.transpose())
rmse = (Y - Yp)**2 # MAE = np.abs(Y - Yp)
rmse = rmse[rmse != 0] # MAE = MAE[MAE!=0]
rmse = np.mean(rmse)**0.5 # MAE = np.mean(MAE)
# print('Calcul RMSE', rmse)
# Store values in memory.
reg_models[neighbors_combi] = A
# Calculate the missing values for the group.
X = self.wxdatasets.data[varname].loc[
group_dates, model_neighbors].values
if varname in ['Tmax', 'Tavg', 'Tmin']:
X = np.hstack((np.ones((len(X), 1)), X))
Y = np.dot(A, X.transpose())
# Limit precipitation to positive values.
# This may happens when there is one or more negative
# regression coefficients in A
if varname in ['Ptot']:
Y[Y < 0] = 0
# Store the results.
y2fill.loc[group_dates, varname] = Y
self.sig_task_progress.emit(int(
(j + 1) / len(notnull_groups) * 100 / len(VARNAMES) +
i / len(VARNAMES) * 100))
self.sig_task_progress.emit(int((i + 1) / len(VARNAMES) * 100))
print('Data gapfilled for {} in {:0.1f} sec.'.format(
varname, process_time() - tstart))
# Gapfill dataset for the target station.
gapfilled_data = pd.DataFrame([], index=gapfill_date_range)
for varname in VARNAMES:
# Fetch the original target data for varname.
gapfilled_data[varname] = self.wxdatasets.data[varname].loc[
gapfill_date_range, self.target]
# Fill the gaps.
isnull = gapfilled_data.index[gapfilled_data[varname].isnull()]
gapfilled_data.loc[isnull, varname] = y2fill.loc[
isnull, varname]
message = (
'Data completion for station %s completed successfully '
'in %0.2f sec.') % (self.target, (process_time() - tstart_total))
print(message)
self.sig_status_message.emit(message)
self.sig_console_message.emit('<font color=black>%s</font>' % message)
if gapfilled_data.isnull().values.any():
message = ("WARNING: Some missing data were not filled because "
"all neighboring stations were empty for that period.")
print(message)
self.sig_console_message.emit(
'<font color=red>%s</font>' % message)
# Save the gapfilled data to a file.
# Add Year, Month and Day to the dataset and rename some columns.
gapfilled_data['Year'] = gapfilled_data.index.year.astype(str)
gapfilled_data['Month'] = gapfilled_data.index.month.astype(str)
gapfilled_data['Day'] = gapfilled_data.index.day.astype(str)
for varname in VARNAMES:
gapfilled_data[varname] = gapfilled_data[varname].round(1)
# Replace nan values by an empty string.
gapfilled_data = gapfilled_data.fillna(value='')
# Make sure the columns are in the right order.
gapfilled_data = gapfilled_data[
['Year', 'Month', 'Day', 'Tmax', 'Tmin', 'Tavg', 'Ptot']]
target_metadata = self.wxdatasets.metadata.loc[self.target]
data_headers = ['Year', 'Month', 'Day', 'Max Temp (°C)',
'Min Temp (°C)', 'Mean Temp (°C)',
'Total Precip (mm)']
fcontent = [
['Station Name', target_metadata['Station Name']],
['Province', target_metadata['Location']],
['Latitude (dd)', target_metadata['Latitude']],
['Longitude (dd)', target_metadata['Longitude']],
['Elevation (m)', target_metadata['Elevation']],
['Climate Identifier', self.target],
[],
['Created by', __namever__],
['Created on', strftime("%d/%m/%Y")],
[],
data_headers
] + gapfilled_data.values.tolist()
# Save the data to csv.
if not osp.exists(self.outputdir):
os.makedirs(self.outputdir)
clean_target_name = (
target_metadata['Station Name']
.replace('\\', '_').replace('/', '_'))
filename = '{} ({})_{}-{}.csv'.format(
clean_target_name,
self.target,
str(min(gapfilled_data['Year'])),
str(max(gapfilled_data['Year']))
)
filepath = osp.join(self.outputdir, filename)
with open(filepath, 'w', encoding='utf-8') as f:
writer = csv.writer(f, delimiter=',', lineterminator='\n')
writer.writerows(fcontent)
self.sig_gapfill_finished.emit(True)
return gapfilled_data
def build_mlr_model(self, X, Y, regression_mode):
"""
Build a multiple linear model using the provided independent (X) and
dependent (y) variable data.
"""
if regression_mode == 1: # Ordinary Least Square regression
# http://statsmodels.sourceforge.net/devel/generated/
# statsmodels.regression.linear_model.OLS.html
# model = OLS(Y, X)
# results = model.fit()
# A = results.params
# Using Numpy function:
A = np.linalg.lstsq(X, Y, rcond=None)[0]
else: # Least Absolute Deviations regression
# http://statsmodels.sourceforge.net/devel/generated/
# statsmodels.regression.quantile_regression.QuantReg.html
# http://statsmodels.sourceforge.net/devel/examples/
# notebooks/generated/quantile_regression.html
# model = QuantReg(Y, X)
# results = model.fit(q=0.5)
# A = results.params
# Using Homemade function:
A = L1LinearRegression(X, Y)
return A
@staticmethod
def postprocess_fillinfo(staName, YX, tarStaIndx):
# Extracts info related to the target station from <YXmFull> and the
# info related to the neighboring stations. Xm is for the
# neighboring stations and Ym is for the target stations.
Yname = staName[tarStaIndx] # target station name
Xnames = np.delete(staName, tarStaIndx) # neighboring station names
Y = YX[:, tarStaIndx, :] # Target station data
X = np.delete(YX, tarStaIndx, axis=1) # Neighboring station data
# Counts how many times each neigboring station was used for
# estimating the data of the target stations.
Xcount_var = np.sum(~np.isnan(X), axis=0)
Xcount_tot = np.sum(Xcount_var, axis=1)
# Removes the neighboring stations that were not used.
indx = np.where(Xcount_tot > 0)[0]
Xnames = Xnames[indx]
X = X[:, indx]
Xcount_var = Xcount_var[indx, :]
Xcount_tot = Xcount_tot[indx]
# Sort the neighboring stations by importance.
indx = np.argsort(Xcount_tot * -1)
Xnames = Xnames[indx]
X = X[:, indx]
return Yname, Y, Xnames, X, Xcount_var, Xcount_tot
def generate_html_summary_table(self):
return self.wxdatasets.generate_html_summary_table()
def generate_correlation_html_table(self, gapfill_parameters):
"""
This function generate an HTML output to be displayed in the
<Fill Data> tab display area after a target station has been
selected by the user.
"""
target_metadata = self.wxdatasets.metadata.loc[self.target]
header_data = {
'Latitude': target_metadata['Latitude'],
'Longitude': target_metadata['Longitude'],
'Altitude': target_metadata['Elevation'],
'Data start': target_metadata['first_date'],
'Data end': target_metadata['last_date']}
target_info = (
'<table border="0" cellpadding="1" cellspacing="0" align="left">')
for field, value in header_data.items():
target_info += '<tr>'
target_info += '<td align="left">%s</td>' % field
target_info += '<td align="left"> = </td>'
target_info += '<td align="left">%s</td>' % value
target_info += '</tr>'
target_info += '</table>'
# Sort neighboring stations.
# Stations best correlated with the target station are displayed toward
# the top of the table while neighboring stations poorly correlated are
# displayed toward the bottom.
# Define a criteria for sorting the correlation quality
# of the stations.
# Generate the missing data table.
fill_date_start = gapfill_parameters['date_start']
fill_date_end = gapfill_parameters['date_end']
table1 = '''
<p align=justify>
Table 1 : Number of days with missing data from
<b>%s</b> to <b>%s</b> for station <b>%s</b>:
</p>
''' % (fill_date_start, fill_date_end,
target_metadata['Station Name'])
table1 += '''
<table border="0" cellpadding="3" cellspacing="0"
align="center">
<tr>
<td colspan="5"><hr></td>
</tr>
<tr>
<td width=135 align="left">Weather Variable</td>
<td align="center">T<sub>max</sub></td>
<td align="center">T<sub>min</sub></sub></td>
<td align="center">T<sub>mean</sub></td>
<td align="center">P<sub>tot</sub></td>
</tr>
<tr>
<td colspan="5"><hr></td>
</tr>
<tr>
<td width=135 align="left">Days with<br>missing data</td>
'''
datetime_start = datetime.strptime(
gapfill_parameters['date_start'], '%d/%m/%Y')
datetime_end = datetime.strptime(
gapfill_parameters['date_end'], '%d/%m/%Y')
total_nbr_data = (
(datetime_end - datetime_start).total_seconds() / 3600 / 24 + 1)
for var in self.wxdatasets.data.keys():
data = self.wxdatasets.data[var][self.target]
nbr_nan = len(data[
(data.index >= datetime_start) &
(data.index <= datetime_end) &
(data.isnull())])
nan_percent = round(nbr_nan / total_nbr_data * 100, 1)
table1 += '''
<td align="center">
%d<br>(%0.1f %%)
</td>
''' % (nbr_nan, nan_percent)
table1 += '''
</tr>
<tr>
<td colspan="5"><hr></td>
</tr>
</table>
<br><br>
'''
# Generate the correlation coefficient table
table2 = table1
table2 += '''
<p align="justify">
<font size="3">
Table 2 : Altitude difference, horizontal distance and
correlation coefficients for each meteorological
variables, calculated between station <b>%s</b> and its
neighboring stations :
<\font>
</p>
''' % target_metadata['Station Name']
# Generate the horizontal header of the table.
table2 += '''
<table border="0" cellpadding="3" cellspacing="0"
align="center" width="100%%">
<tr>
<td colspan="9"><hr></td>
</tr>
<tr>
<td align="center" valign="bottom" width=30 rowspan="3">
#
</td>
<td align="left" valign="bottom" width=200 rowspan="3">
Neighboring Stations
</td>
<td width=60 align="center" valign="bottom" rowspan="3">
ΔAlt.<br>(m)
</td>
<td width=60 align="center" valign="bottom" rowspan="3">
Dist.<br>(km)
</td>
<td align="center" valign="middle" colspan="4">
Correlation Coefficients
</td>
</tr>
<tr>
<td colspan="4"><hr></td>
</tr>
<tr>
<td width=60 align="center" valign="middle">
T<sub>max</sub>
</td>
<td width=60 align="center" valign="middle">
T<sub>min</sub>
</td>
<td width=60 align="center" valign="middle">
T<sub>mean</sub>
</td>
<td width=60 align="center" valign="middle">
P<sub>tot</sub>
</td>
</tr>
<tr>
<td colspan="9"><hr></td>
</tr>
'''
corcoef = self.corcoef.sort_values('Ptot', axis=0, ascending=False)
stations = corcoef.index.values.tolist()
stations.remove(self.target)
for i, station_id in enumerate(stations):
color = ['transparent', LIGHTGRAY][i % 2]
metadata = self.wxdatasets.metadata.loc[station_id]
# Neighboring station names.
table2 += '''
<tr bgcolor="%s">
<td align="center" valign="top">%02d</td>
<td valign="top">
%s
</td>
''' % (color, i + 1, metadata['Station Name'])
# Check the condition for the altitude difference.
limit_altdiff = gapfill_parameters['limitAlt']
altdiff = self.alt_and_dist.loc[station_id]['altdiff']
if abs(altdiff) >= limit_altdiff and limit_altdiff >= 0:
fontcolor = RED
else:
fontcolor = ''
table2 += '''
<td align="center" valign="top">
<font color="%s">%0.1f</font>
</td>
''' % (fontcolor, altdiff)
# Check the condition for the horizontal distance.
limit_hordist = gapfill_parameters['limitDist']
hordist = self.alt_and_dist.loc[station_id]['hordist']
if hordist >= limit_hordist and limit_hordist >= 0:
fontcolor = RED
else:
fontcolor = ''
table2 += '''
<td align="center" valign="top">
<font color="%s">%0.1f</font>
</td>
''' % (fontcolor, hordist)
# Add the correlation coefficients to the table.
for var in ['Tmax', 'Tmin', 'Tavg', 'Ptot']:
value = self.corcoef.loc[station_id, var]
fontcolor = RED if value < 0.7 else ''
table2 += '''
<td align="center" valign="top">
<font color="%s">%0.3f</font>
</td>
''' % (fontcolor, value)
table2 += '</tr>'
table2 += ''' <tr>
<td colspan="8"><hr></td>
</tr>
<tr>
<td align="justify" colspan="8">
<font size="2">
* Correlation coefficients are set to
<font color="#C83737">NaN</font> for a given
variable if there is less than
<font color="#C83737">%d</font> pairs of data
between the target and the neighboring station.
</font>
</td>
</tr>
</table>
''' % (365 // 2)
return table2, target_info
class WeatherData(QObject):
"""
This class contains all the weather data and weather station info
that are needed for the gapfilling algorithm that is defined in the
*GapFillWeather* class.
"""
sig_task_progress = QSignal(int)
sig_status_message = QSignal(str)
sig_corrcoeff_calculated = QSignal()
def __init__(self):
super().__init__()
self.data = None
self.metadata = None
self._corrcoef = None
@property
def filenames(self):
"""
Return the list of file paths from which data were loaded.
"""
if self.metadata is None or self.metadata.empty:
return []
else:
return self.metadata['filename'].tolist()
@property
def station_names(self):
"""
Return the list of station names for which data are loaded in memory.
"""
if self.metadata is None or self.metadata.empty:
return []
else:
return self.metadata['Station Name'].tolist()
@property
def station_ids(self):
"""
Return the list of station IDs for which data are loaded in memory.
"""
if self.metadata is None or self.metadata.empty:
return []
else:
return self.metadata.index.tolist()
@property
def datetimes(self):
return (self.data['Ptot'].index.values if
self.data is not None else [])
def count(self):
"""
Return the number of datasets that are currently loaded.
"""
return len(self.station_ids)
# ---- Load and format data.
def load_and_format_data(self, paths):
"""
Parameters
----------
paths: list
A list of absolute paths containing daily weater data files
"""
self.data = {var: pd.DataFrame([]) for var in VARNAMES}
self.metadata = pd.DataFrame([])
self._corrcoef = None
if len(paths) == 0:
return
self.sig_status_message.emit('Reading data from csv files... 0%')
for i, path in enumerate(paths):
try:
sta_metadata, sta_data = read_weather_datafile(path)
except Exception:
print("Unable to read data from '{}'"
.format(osp.basename(path)))
else:
# Add the first and last date of the dataset to the metadata.
sta_metadata['first_date'] = min(sta_data.index).date()
sta_metadata['last_date'] = max(sta_data.index).date()
# Append the metadata of this station to that of the others.
sta_id = sta_metadata['Station ID']
if ('Station ID' in self.metadata.columns and
sta_id in self.metadata['Station ID']):
print(("A dataset for station '{}' already exists. "
"Skipping reading data from '{}'."
).format(sta_id, osp.basename(path)))
continue
self.metadata = self.metadata.append(
sta_metadata, ignore_index=True)
# Append the data of this station to that of the others.
for name in VARNAMES:
self.data[name] = self.data[name].merge(
sta_data[[name]].rename(columns={name: sta_id}),
left_index=True,
right_index=True,
how='outer')
percent_progress = int(i / len(paths) * 100)
self.sig_task_progress.emit(percent_progress)
self.sig_status_message.emit(
'Reading data from csv files... {:d}%'.format(
percent_progress))
# Make the daily time series continuous.
for name in VARNAMES:
self.data[name] = self.data[name].resample('1D').asfreq()
# Set the index of the metadata.
self.metadata = self.metadata.set_index('Station ID', drop=True)
def load_from_binary(self, dirname):
"""Load the data and metadata from binary files."""
A = np.load(
osp.join(dirname, '__cache__', 'fdata.npy'),
allow_pickle=True
).item()
self.data = A['data']
self.metadata = A['metadata']
self._corrcoef = A.get('corrcoef', None)
def save_to_binary(self, dirname):
"""Save the data and metadata to binary files."""
print('Caching data...')
os.makedirs(osp.join(dirname, '__cache__'), exist_ok=True)
A = {'data': self.data, 'metadata': self.metadata,
'corrcoef': self._corrcoef}
np.save(osp.join(dirname, '__cache__', 'fdata.npy'), A)
print('Data cached succesfully.')
# ---- Utilities
def alt_and_dist_calc(self, target_station_id):
"""
Compute the horizontal distances in km and the altitude differences
in m between the target station and each neighboring station.
"""
alt_and_dist = (
self.metadata[['Latitude', 'Longitude', 'Elevation']].copy())
# Calcul horizontal and vertical distances of neighboring stations
# from target.
alt_and_dist['hordist'] = calc_dist_from_coord(
alt_and_dist.loc[target_station_id, 'Latitude'],
alt_and_dist.loc[target_station_id, 'Longitude'],
alt_and_dist['Latitude'].values,
alt_and_dist['Longitude'].values)
alt_and_dist['altdiff'] = (
alt_and_dist['Elevation'].values -
alt_and_dist.loc[target_station_id, 'Elevation'])
return alt_and_dist
def compute_correlation_coeff(self, target_station_id):
"""
Compute the correlation coefficients between the target
station and the neighboring stations for each meteorological variable.
"""
if self._corrcoef is None:
message = "Calculating correlation coefficients..."
print(message)
self.sig_status_message.emit(message)
self._corrcoef = {}
for var in VARNAMES:
self._corrcoef[var] = (
self.data[var].corr(min_periods=365//2))
print("Correlation coefficients calculated sucessfully.")
self.sig_corrcoeff_calculated.emit()
self.sig_status_message.emit('')
correl_target = None
for var in VARNAMES:
corr_var_sta = (
self._corrcoef[var][[target_station_id]]
.rename({target_station_id: var}, axis='columns'))
if correl_target is None:
correl_target = corr_var_sta
else:
correl_target = correl_target.join(corr_var_sta)
return correl_target
def generate_html_summary_table(self):
"""
Generate a Html table showing a summary of available and missing
weather data for all the stations for which data were loaded in the
current session.
"""
table = '''
<table border="0" cellpadding="3" cellspacing="0"
align="center">
<tr>
<td colspan="10"><hr></td>
</tr>
<tr>
<td align="center" valign="bottom" width=30 rowspan="3">
#
</td>
<td align="left" valign="bottom" rowspan="3">
Station
</td>
<td align="center" valign="bottom" rowspan="3">
Climate<br>ID
</td>
<td align="center" valign="bottom" rowspan="3">
From<br>year
</td>
<td align="center" valign="bottom" rowspan="3">
To<br>year
</td>
<td align="center" valign="bottom" rowspan="3">
Nbr.<br>of<br>years
<td align="center" valign="middle" colspan="4">
% of missing data for
</td>
</tr>
<tr>
<td colspan="4"><hr></td>
</tr>
<tr>
<td align="center" valign="middle">
T<sub>max</sub>
</td>
<td align="center" valign="middle">
T<sub>min</sub>
</td>
<td align="center" valign="middle">
T<sub>mean</sub>
</td>
<td align="center" valign="middle">
P<sub>tot</sub>
</td>
</tr>
<tr>
<td colspan="10"><hr></td>
</tr>
'''
for i, station_id in enumerate(self.station_ids):
station_metadata = self.metadata.loc[station_id]
color = ['transparent', LIGHTGRAY][i % 2]
first_date = station_metadata['first_date']
datetime_start = datetime(
first_date.year, first_date.month, first_date.day)
last_date = station_metadata['last_date']
datetime_end = datetime(
last_date.year, last_date.month, last_date.day)
total_nbr_data = (
(datetime_end - datetime_start)
.total_seconds() / 3600 / 24 + 1)
firstyear = datetime_start.year
lastyear = datetime_end.year
nyears = lastyear - firstyear + 1
ptot_data = self.data['Ptot'][station_id]
ptot_nan_percent = round(len(ptot_data[
(ptot_data.index >= datetime_start) &
(ptot_data.index <= datetime_end) &
(ptot_data.isnull())
]) / total_nbr_data * 100, 1)
tmax_data = self.data['Tmax'][station_id]
tmax_nan_percent = round(len(tmax_data[
(tmax_data.index >= datetime_start) &
(tmax_data.index <= datetime_end) &
(tmax_data.isnull())
]) / total_nbr_data * 100, 1)
tmin_data = self.data['Tmax'][station_id]
tmin_nan_percent = round(len(tmin_data[
(tmin_data.index >= datetime_start) &
(tmin_data.index <= datetime_end) &
(tmin_data.isnull())
]) / total_nbr_data * 100, 1)
tmean_data = self.data['Tmax'][station_id]
tmean_nan_percent = round(len(tmean_data[
(tmean_data.index >= datetime_start) &
(tmean_data.index <= datetime_end) &
(tmean_data.isnull())
]) / total_nbr_data * 100, 1)
table += '''
<tr bgcolor="%s">
<td align="center" valign="middle">
%02d
</td>
<td align="left" valign="middle">
<font size="3">%s</font>
</td>
<td align="center" valign="middle">
<font size="3">%s</font>
</td>
<td align="center" valign="middle">
<font size="3">%s</font>
</td>
<td align="center" valign="middle">
<font size="3">%s</font>
</td>
<td align="center" valign="middle">
<font size="3">%0.0f</font>
</td>
<td align="center" valign="middle">%0.0f</td>
<td align="center" valign="middle">%0.0f</td>
<td align="center" valign="middle">%0.0f</td>
<td align="center" valign="middle">%0.0f</td>
</tr>
''' % (color, i+1, station_metadata['Station Name'],
station_id, firstyear, lastyear, nyears,
tmax_nan_percent, tmin_nan_percent,
tmean_nan_percent, ptot_nan_percent)
table += """
<tr>
<td colspan="10"><hr></td>
</tr>
</table>
"""
return table
def calc_dist_from_coord(lat1, lon1, lat2, lon2):
"""
Compute the horizontal distance in km between a location given in
decimal degrees and a set of locations also given in decimal degrees.
"""
lat1, lon1 = np.radians(lat1), np.radians(lon1)
lat2, lon2 = np.radians(lat2), np.radians(lon2)
r = 6373 # r is the Earth radius in km
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat/2)**2 + np.cos(lat1)*np.cos(lat2)*np.sin(dlon/2)**2
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1-a))
return r * c
def L1LinearRegression(X, Y):
"""
L1LinearRegression: Calculates L-1 multiple linear regression by IRLS
(Iterative reweighted least squares)
B = L1LinearRegression(Y,X)
B = discovered linear coefficients
X = independent variables
Y = dependent variable
Note 1: An intercept term is NOT assumed (need to append a unit column if
needed).
Note 2: a.k.a. LAD, LAE, LAR, LAV, least absolute, etc. regression
SOURCE:
This function is originally from a Matlab code written by Will Dwinnell
www.matlabdatamining.blogspot.ca/2007/10/l-1-linear-regression.html
Last accessed on 21/07/2014
"""
# Determine size of predictor data.
n, m = np.shape(X)
# Initialize with least-squares fit.
B = np.linalg.lstsq(X, Y, rcond=None)[0]
BOld = np.copy(B)
# Force divergence.
BOld[0] += 1e-5
# Repeat until convergence.
while np.max(np.abs(B - BOld)) > 1e-6:
BOld = np.copy(B)
# Calculate new observation weights based on residuals from old
# coefficients.
weight = np.dot(B, X.transpose()) - Y
weight = np.abs(weight)
weight[weight < 1e-6] = 1e-6 # to avoid division by zero
weight = weight**-0.5
# Calculate new coefficients.
Xb = np.tile(weight, (m, 1)).transpose() * X
Yb = weight * Y
B = np.linalg.lstsq(Xb, Yb, rcond=None)[0]
return B
if __name__ == '__main__':
gapfiller = DataGapfillWorker()
# Set the input and output directory.
gapfiller.inputDir = 'D:/choix_stations_telemetrie/weather_data'
# Load weather the data files and set the target station.
station_names = gapfiller.load_data()
gapfiller.set_target_station('7050240')
# Set the gapfill parameters.
gapfilled_data = gapfiller.gapfill_data(
time_start=datetime.strptime('1980-01-01', '%Y-%m-%d'),
time_end=datetime.strptime('2020-01-01', '%Y-%m-%d'),
hdist_limit=350,
vdist_limit=100,
max_neighbors=3,
regression_mode=0)
# 0 -> Least Absolute Deviation (LAD)
# 1 -> Ordinary Least-Square (OLS)
|
{"hexsha": "ba8407cbafb60165081aef538cfdf02e389c326e", "size": 47488, "ext": "py", "lang": "Python", "max_stars_repo_path": "cdprep/gapfill_data/gapfill_weather_algorithm.py", "max_stars_repo_name": "cgq-qgc/climate-data-preprocessing-tool", "max_stars_repo_head_hexsha": "59aaf214127f00f92c6cacc8f988c5f05de6c0f6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-02T15:38:47.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-02T15:38:47.000Z", "max_issues_repo_path": "cdprep/gapfill_data/gapfill_weather_algorithm.py", "max_issues_repo_name": "cgq-qgc/climate-data-preprocessing-tool", "max_issues_repo_head_hexsha": "59aaf214127f00f92c6cacc8f988c5f05de6c0f6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-11-03T15:11:04.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-11T16:23:17.000Z", "max_forks_repo_path": "cdprep/gapfill_data/gapfill_weather_algorithm.py", "max_forks_repo_name": "cgq-qgc/climate-data-preprocessing-tool", "max_forks_repo_head_hexsha": "59aaf214127f00f92c6cacc8f988c5f05de6c0f6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7973856209, "max_line_length": 79, "alphanum_fraction": 0.5301128706, "include": true, "reason": "import numpy,import statsmodels,from statsmodels", "num_tokens": 9978}
|
"""
Code based loosely on implementation:
https://github.com/openai/baselines/blob/master/baselines/common/models.py
Under MIT license.
"""
import numpy as np
import torch.nn as nn
import torch.nn.init as init
import vel.util.network as net_util
from vel.api.base import LinearBackboneModel, ModelFactory
class MLP(LinearBackboneModel):
""" Simple Multi-Layer-Perceptron network """
def __init__(self, input_length, layers=2, hidden_units=64, activation='tanh', layer_norm=False):
super().__init__()
self.input_length = input_length
self.layers = layers
self.hidden_units = hidden_units
self.activation = activation
self.layer_norm = layer_norm
current_size = self.input_length
layer_objects = []
for i in range(self.layers):
layer_objects.append(nn.Linear(current_size, hidden_units))
if self.layer_norm:
layer_objects.append(nn.LayerNorm(hidden_units))
layer_objects.append(net_util.activation(activation)())
current_size = hidden_units
self.model = nn.Sequential(*layer_objects)
@property
def output_dim(self):
""" Final dimension of model output """
return self.hidden_units
def reset_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
# init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
init.orthogonal_(m.weight, gain=np.sqrt(2))
init.constant_(m.bias, 0.0)
def forward(self, input_data):
input_data = input_data.float()
return self.model(input_data)
def create(input_length, layers=2, hidden_units=64, activation='tanh', layer_norm=True):
def instantiate(**_):
return MLP(
input_length=input_length,
layers=layers,
hidden_units=hidden_units,
activation=activation,
layer_norm=layer_norm
)
return ModelFactory.generic(instantiate)
MLPFactory = create
|
{"hexsha": "131004f2ef02dca6e500566e082f0a883c157b62", "size": 2066, "ext": "py", "lang": "Python", "max_stars_repo_path": "vel/rl/models/backbone/mlp.py", "max_stars_repo_name": "cclauss/vel", "max_stars_repo_head_hexsha": "78a6a20af80ff613898d2983c83fdb223634aaad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "vel/rl/models/backbone/mlp.py", "max_issues_repo_name": "cclauss/vel", "max_issues_repo_head_hexsha": "78a6a20af80ff613898d2983c83fdb223634aaad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vel/rl/models/backbone/mlp.py", "max_forks_repo_name": "cclauss/vel", "max_forks_repo_head_hexsha": "78a6a20af80ff613898d2983c83fdb223634aaad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.9189189189, "max_line_length": 101, "alphanum_fraction": 0.6529525653, "include": true, "reason": "import numpy", "num_tokens": 431}
|
import multiprocessing
import pytest
import numpy as np
import scipy as sp
import scipy.stats as st
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from pyabc import (ABCSMC, RV, Distribution,
MedianEpsilon,
PercentileDistance, SimpleModel,
ConstantPopulationSize,
History)
from pyabc.sampler import (SingleCoreSampler,
MappingSampler,
MulticoreParticleParallelSampler,
DaskDistributedSampler,
ConcurrentFutureSampler,
MulticoreEvalParallelSampler,
RedisEvalParallelSamplerServerStarter)
from pyabc.population import Particle
import logging
logger = logging.getLogger(__name__)
def multi_proc_map(f, x):
with multiprocessing.Pool() as pool:
res = pool.map(f, x)
return res
class GenericFutureWithProcessPool(ConcurrentFutureSampler):
def __init__(self, map_=None):
cfuture_executor = ProcessPoolExecutor(max_workers=8)
client_max_jobs = 8
super().__init__(cfuture_executor, client_max_jobs)
class GenericFutureWithProcessPoolBatch(ConcurrentFutureSampler):
def __init__(self, map_=None):
cfuture_executor = ProcessPoolExecutor(max_workers=8)
client_max_jobs = 8
batch_size = 15
super().__init__(cfuture_executor, client_max_jobs,
batch_size=batch_size)
class GenericFutureWithThreadPool(ConcurrentFutureSampler):
def __init__(self, map_=None):
cfuture_executor = ThreadPoolExecutor(max_workers=8)
client_max_jobs = 8
super().__init__(cfuture_executor, client_max_jobs)
class MultiProcessingMappingSampler(MappingSampler):
def __init__(self, map_=None):
super().__init__(multi_proc_map)
class DaskDistributedSamplerBatch(DaskDistributedSampler):
def __init__(self, map_=None):
batch_size = 20
super().__init__(batch_size=batch_size)
class WrongOutputSampler(SingleCoreSampler):
def sample_until_n_accepted(self, n, simulate_one, all_accepted=False):
return super().sample_until_n_accepted(
n + 1, simulate_one, all_accepted=False)
def RedisEvalParallelSamplerServerStarterWrapper():
return RedisEvalParallelSamplerServerStarter(batch_size=5)
@pytest.fixture(params=[SingleCoreSampler,
RedisEvalParallelSamplerServerStarterWrapper,
MulticoreEvalParallelSampler,
MultiProcessingMappingSampler,
MulticoreParticleParallelSampler,
MappingSampler,
DaskDistributedSampler,
DaskDistributedSamplerBatch,
GenericFutureWithThreadPool,
GenericFutureWithProcessPool,
GenericFutureWithProcessPoolBatch
])
def sampler(request):
s = request.param()
yield s
try:
s.cleanup()
except AttributeError:
pass
@pytest.fixture
def redis_starter_sampler():
s = RedisEvalParallelSamplerServerStarter(batch_size=5)
yield s
s.cleanup()
def test_two_competing_gaussians_multiple_population(db_path, sampler):
two_competing_gaussians_multiple_population(
db_path, sampler, 1)
def test_two_competing_gaussians_multiple_population_2_evaluations(
db_path, redis_starter_sampler):
two_competing_gaussians_multiple_population(db_path,
redis_starter_sampler, 2)
def two_competing_gaussians_multiple_population(db_path, sampler, n_sim):
# Define a gaussian model
sigma = .5
def model(args):
return {"y": st.norm(args['x'], sigma).rvs()}
# We define two models, but they are identical so far
models = [model, model]
models = list(map(SimpleModel, models))
# However, our models' priors are not the same. Their mean differs.
mu_x_1, mu_x_2 = 0, 1
parameter_given_model_prior_distribution = [
Distribution(x=RV("norm", mu_x_1, sigma)),
Distribution(x=RV("norm", mu_x_2, sigma))
]
# We plug all the ABC setup together
nr_populations = 2
pop_size = ConstantPopulationSize(23, nr_samples_per_parameter=n_sim)
abc = ABCSMC(models, parameter_given_model_prior_distribution,
PercentileDistance(measures_to_use=["y"]),
pop_size,
eps=MedianEpsilon(),
sampler=sampler)
# Finally we add meta data such as model names and
# define where to store the results
# y_observed is the important piece here: our actual observation.
y_observed = 1
abc.new(db_path, {"y": y_observed})
# We run the ABC with 3 populations max
minimum_epsilon = .05
history = abc.run(minimum_epsilon, max_nr_populations=nr_populations)
# Evaluate the model probabililties
mp = history.get_model_probabilities(history.max_t)
def p_y_given_model(mu_x_model):
res = st.norm(mu_x_model, sp.sqrt(sigma**2 + sigma**2)).pdf(y_observed)
return res
p1_expected_unnormalized = p_y_given_model(mu_x_1)
p2_expected_unnormalized = p_y_given_model(mu_x_2)
p1_expected = p1_expected_unnormalized / (p1_expected_unnormalized
+ p2_expected_unnormalized)
p2_expected = p2_expected_unnormalized / (p1_expected_unnormalized
+ p2_expected_unnormalized)
assert history.max_t == nr_populations-1
# the next line only tests if we obtain correct numerical types
try:
mp0 = mp.p[0]
except KeyError:
mp0 = 0
try:
mp1 = mp.p[1]
except KeyError:
mp1 = 0
assert abs(mp0 - p1_expected) + abs(mp1 - p2_expected) < sp.inf
# check that sampler only did nr_particles samples in first round
pops = history.get_all_populations()
# since we had calibration (of epsilon), check that was saved
pre_evals = pops[pops['t'] == History.PRE_TIME]['samples'].values
assert pre_evals >= pop_size.nr_particles
# our samplers should not have overhead in calibration, except batching
batch_size = sampler.batch_size if hasattr(sampler, 'batch_size') else 1
max_expected = pop_size.nr_particles + batch_size - 1
if pre_evals > max_expected:
# Violations have been observed occasionally for the redis server
# due to runtime conditions with the increase of the evaluations
# counter. This could be overcome, but as it usually only happens
# for low-runtime models, this should not be a problem. Thus, only
# print a warning here.
logger.warn(
f"Had {pre_evals} simulations in the calibration iteration, "
f"but a maximum of {max_expected} would have been sufficient for "
f"the population size of {pop_size.nr_particles}.")
def test_in_memory(redis_starter_sampler):
db_path = "sqlite://"
two_competing_gaussians_multiple_population(db_path,
redis_starter_sampler, 1)
def test_wrong_output_sampler():
sampler = WrongOutputSampler()
def simulate_one():
return Particle(m=0, parameter={}, weight=0,
accepted_sum_stats=[], accepted_distances=[],
accepted=True)
with pytest.raises(AssertionError):
sampler.sample_until_n_accepted(5, simulate_one)
def test_redis_multiprocess():
sampler = RedisEvalParallelSamplerServerStarter(
batch_size=3, workers=1, processes_per_worker=1)
def simulate_one():
accepted = np.random.randint(2)
print(accepted)
return Particle(0, {}, 0.1, [], [], accepted)
sample = sampler.sample_until_n_accepted(10, simulate_one)
assert 10 == len(sample.get_accepted_population())
sampler.cleanup()
|
{"hexsha": "1288edd62b60a6111564c86fd8ba78dd7319bcc3", "size": 8043, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_samplers.py", "max_stars_repo_name": "Pat-Laub/pyABC", "max_stars_repo_head_hexsha": "f23f0ff8d430a8ce0a0c8253b45e19add9121992", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_samplers.py", "max_issues_repo_name": "Pat-Laub/pyABC", "max_issues_repo_head_hexsha": "f23f0ff8d430a8ce0a0c8253b45e19add9121992", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_samplers.py", "max_forks_repo_name": "Pat-Laub/pyABC", "max_forks_repo_head_hexsha": "f23f0ff8d430a8ce0a0c8253b45e19add9121992", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2763157895, "max_line_length": 79, "alphanum_fraction": 0.6631853786, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1736}
|
# This code is based off the DUET algorithm presented in:
# O. Yilmaz and S. Rickard, "Blind separation of speech mixtures via time-frequency masking."
# S. Rickard, "The DUET Blind Source Separation Algorithm"
#
# At this time, the algorithm is not working when returning to the time domain
# and, to be honest, I haven't yet figured out why. At a later time I'll try
# and add code implementing the below algorithms
#
# A. S. Master, "Bayesian two source modeling for separation of N sources from stereo signals."
# J. Woodruff and B. Pardo, "Active source estimation for improved signal estimation"
#
#
import duet as duet
import matplotlibwrapper as mplw
import numpy as np
import stft as stft
import soundfile as sf
import sounddevice as sd
#Input values are defined here
window_length= 1024 #In the DUET paper this was found to be the optimal window length for the STFT
delay_size = 512 #In the DUET paper this was chosen as the delay spacing
output_plots = True #Defines whether this will just be for use or analysis
output_sounds = True #Defines whether the separated sources are output
p = 0.5 #Both of these can be tuned depending on the MLE
q = 0 #Both of these can be tuned depending on the MLE
alpha_cutoff = 1
number_alpha_bins = 50
delta_cutoff = 4
number_delta_bins = 50
number_of_sources = 3
data, fs = sf.read('../stereomixtures/a4bb4b4.aiff') #Read in the sample music
#Get the relevant left and right channel data
l_data = data[:,0]
r_data = data[:,1]
#Get l and r stft for each dataset
l_stft_data = stft.short_time_fourier_transform(l_data, window_length, delay_size)
r_stft_data = stft.short_time_fourier_transform(r_data, window_length, delay_size)
#Remove the dc component of the data since this causes problems for when w = 0 for 1/w
l_stft_data = np.delete(l_stft_data, 0, 1)
r_stft_data = np.delete(r_stft_data, 0, 1)
#Estimate mixing parameters
symmetric_attenuation_estimation, delay_estimation = duet.estimate_parameters(l_stft_data, r_stft_data, window_length)
weighted_data = duet.generate_weighted_data(l_stft_data, r_stft_data, window_length, p, q)
filtered_weighted_data, filtered_attenutation_data, filtered_delay_data = duet.filter_data(weighted_data, symmetric_attenuation_estimation, delay_estimation, alpha_cutoff, delta_cutoff)
histogram_data = duet.generate_histogram(filtered_weighted_data, filtered_attenutation_data, filtered_delay_data, number_alpha_bins, number_delta_bins)
smoothed_histogram_data = duet.smooth_histogram(histogram_data, 4)
if (output_plots):
mplw.simple_surface_plot(histogram_data, number_alpha_bins, number_delta_bins, alpha_cutoff, delta_cutoff)
mplw.simple_surface_plot(smoothed_histogram_data, number_alpha_bins, number_delta_bins, alpha_cutoff, delta_cutoff)
#Ideally, we'd have an effective peak-picking algorithm here
#Alas, for now, we do not
peak_alphas = np.array([-0.478, -0.2, 0.123])
peak_deltas = np.array([-0.600, -1.18, 1.32])
stft_sources = duet.separate_sources(l_stft_data, r_stft_data, duet.compute_w(window_length), duet.compute_a(peak_alphas), peak_deltas, number_of_sources)
#Lastly, convert back to the time domain
td_sources = []
for i in range(len(stft_sources)):
td_sources.append(stft.inverse_short_time_fourier_transform(stft_sources[i], len(l_data), window_length, delay_size))
#Finally, save the separated sources
if (output_sounds):
for i in range(0, len(td_sources)):
#mplw.simple_line_plot(td_sources[i])
sf.write("Sourcenumber" + str(i) + ".aiff", td_sources[i], fs)
|
{"hexsha": "10bdb71ed6a0629f81dc4130589e00db4592648a", "size": 3493, "ext": "py", "lang": "Python", "max_stars_repo_path": "msspy/msspy.py", "max_stars_repo_name": "adambnoel/msspy", "max_stars_repo_head_hexsha": "8f3f3283e11b2ded141636abacdc7a3279ed45e5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "msspy/msspy.py", "max_issues_repo_name": "adambnoel/msspy", "max_issues_repo_head_hexsha": "8f3f3283e11b2ded141636abacdc7a3279ed45e5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "msspy/msspy.py", "max_forks_repo_name": "adambnoel/msspy", "max_forks_repo_head_hexsha": "8f3f3283e11b2ded141636abacdc7a3279ed45e5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.2151898734, "max_line_length": 185, "alphanum_fraction": 0.7953048955, "include": true, "reason": "import numpy", "num_tokens": 930}
|
import pathlib, sys
home_path = pathlib.Path('.').resolve()
while home_path.name != 'membership_inference_attack':
home_path = home_path.parent
reports_path = home_path/'reports'
from sklearn.metrics import confusion_matrix, classification_report, balanced_accuracy_score, roc_auc_score, \
classification_report, roc_curve
import matplotlib.pyplot as plt
from matplotlib import ticker
import numpy as np
import pdb
import os, pathlib, sys
import torch
import time
import pdb
from collections import defaultdict
import seaborn as sns
import pandas as pd
class Statistics:
"""class used to records statistical data for all experiments.
"""
def __init__(self):
self.y_pred = []
self.y_true = []
self.exp = []
self.resume = None
def new_experiment(self, name, parameters, label = None):
"""declares that a new experiment will be executed.
Args:
name (string): name of the experiment
parameters (Dict): all parameters of the experiment
label (Dict): contains infos to aggregate with similar experiments
"""
self._call_stat("new_experiment")
self._process_batchs()
self._close_timer()
experiment = { 'name': name, 'label': label, 'param': parameters, 'model_training': [],
'mia_stats' : { 'mia_train_in_distribution' : [],
'mia_train_out_distribution': [],
'mia_test_in_distribution' : [],
'mia_test_out_distribution' : [] },
'mean_accuracies': None }
self.exp.append(experiment)
def new_train(self, name = None, label = None):
"""declares that the training of a new model will be executed.
Args:
name (string): name of the model. If None the results of the model testing will not be printed individualy.
label (string): label of the group to which belongs the model. Special processing are done with models of the same group (averaged accuracy for instance).
"""
self._call_stat("new_train")
self._process_batchs()
self._close_timer()
start = time.time()
model = { 'name': name, 'label': label, 'loss': [], 'time': [start], 'measures': { 'balanced_accuracy': [],
'roc_area' : [],
'report' : '' } }
self.exp[-1]['model_training'].append(model)
def new_epoch(self):
"""declares that a new epoch of a training cycle will be executed.
"""
self._call_stat("new_epoch")
self._process_batchs()
def new_batch(self, batch_pred, batch_true):
"""collects the results of a train epoch on the test dataset.
Args:
batch_pred (list(label)): list of the labels predicted by the ML for the current batch.
batch_true (list(label)): list of the true labels for the current batch.
"""
# self._call_stat("new_batch")
self.y_pred.extend(batch_pred)
self.y_true.extend(batch_true)
def _process_batchs(self):
"""process the data for all saved batches.
"""
if len(self.y_true) != 0:
accuracy = balanced_accuracy_score(self.y_true, self.y_pred)
self.exp[-1]['model_training'][-1]['measures']['balanced_accuracy'].append(accuracy)
try:
# if 'MIA model' in self.exp[-1]['model_training'][-1]['name']:
# pdb.set_trace()
area = roc_auc_score(self.y_true, self.y_pred)
self.exp[-1]['model_training'][-1]['measures']['roc_area'].append(area)
except TypeError:
self.exp[-1]['model_training'][-1]['measures']['roc_area'] = None
except ValueError:
if self.exp[-1]['model_training'][-1]['measures']['roc_area'] is not None:
self.exp[-1]['model_training'][-1]['measures']['roc_area'].append(0.5) #not satisfying !!
report = classification_report(self.y_true, self.y_pred)
self.exp[-1]['model_training'][-1]['measures']['report'] = report
self.y_true = []
self.y_pred = []
def _final_process(self):
for idx in range(len(self.exp)):
if self.exp[idx]['mean_accuracies'] is None and self.exp[idx]['label'] is not None:
groups_exp = defaultdict(list)
for experiment_idx, experiment in enumerate(self.exp):
if experiment['label'] is not None:
groups_exp[experiment['label']].append((experiment_idx,experiment))
for group_label, group in groups_exp.items():
mean_accuracies = [ sum(mia_model_accuracies) / len(mia_model_accuracies) \
for mia_model_accuracies in \
[ [ model['measures']['balanced_accuracy'][-1] for model in experiment['model_training'] if model['name'] is not None and 'mia' in model['name'].lower() ] ] \
for (_,experiment) in group ]
average = sum(mean_accuracies) / len(mean_accuracies)
for (experiment_idx,_) in group:
self.exp[experiment_idx]['mean_accuracies'] = mean_accuracies
def save(self, dir):
"""save the results of all experiments in dir
"""
print("\n\nRecording...", end = ' ')
self._process_batchs()
self._final_process()
self._close_timer()
basename_report = 'Statistics_report'
actual_reports = [f for f in dir.iterdir() if basename_report in f.name]
file = f"{basename_report}_{len(actual_reports)}"
path = dir/file
print(f"in {file}...", end = ' ')
resume_path = path/'resume'
os.makedirs(os.path.dirname(str(resume_path)), exist_ok=True)
with open(resume_path, 'w') as resume_file:
self._create_resume()
resume_file.write(self.resume)
resume_file.closed
for experiment in self.exp:
for model in experiment['model_training']:
if model['name'] is not None:
for measure_name, measure_values in model['measures'].items():
if measure_values is None:
continue
if type(measure_values) == list:
plot_path = path/experiment['name']/model['name']/measure_name
os.makedirs(os.path.dirname(str(plot_path)), exist_ok=True)
plt.plot(measure_values)
plt.title(measure_name)
plt.savefig(plot_path)
plt.clf()
loss_path = path/experiment['name']/model['name']/'loss_curve'
os.makedirs(os.path.dirname(str(loss_path)), exist_ok=True)
plt.plot(model['loss'])
plt.title('loss evolution during training')
plt.savefig(loss_path)
plt.clf()
if experiment['label'] is not None:
mean_path = path/f"Mean_accuracies_curve of experiment '{experiment['label']}'"
os.makedirs(os.path.dirname(str(mean_path)), exist_ok=True)
plt.plot(experiment['label']['interest_parameter_range'], experiment['mean_accuracies'])
plt.title('Mean attack model accuracy variation')
plt.xlabel(f"Different {experiment['label']} values")
plt.ylabel('Mean mia accuracy')
plt.savefig(mean_path)
plt.clf()
print("Done.")
def _create_resume(self):
"""create the results resume of all experiments as a string and save it, if it's not already did
"""
if self.resume is None:
lines = []
groups_exp = defaultdict(list)
for experiment_idx, experiment in enumerate(self.exp):
lines.append(f" Experiment {experiment['name']} :")
lines.append("Parameters :")
lines.append(str(experiment['param']))
if experiment['label'] is not None:
groups_exp[experiment['label']].append((experiment_idx,experiment))
groups_mod = defaultdict(list)
for model in experiment['model_training']:
if model['name'] is not None:
lines.append(f"\n Model {model['name']} :\n")
for measure_name in model['measures']:
if model['measures'][measure_name] is None:
continue
lines.append(f"\n{measure_name}")
lines.append(str(model['measures'][measure_name]))
lines.append(f"\nTraining time: {model['time'][1] - model['time'][0]:3.5f}s")
if model['label'] is not None:
groups_mod[model['label']].append(model)
mia = experiment['mia_stats']
if len(mia['mia_train_in_distribution']):
class_number = len(mia['mia_train_in_distribution'])
lines.append('\nMembership mean distributions:')
for i in range(class_number):
lines.append(f" class {i}")
for label, tab in mia.items():
lines.append(f" {label}: {tab[i]}")
for group_label, group in groups_mod.items():
lines.append(f"\n\nAverage statistics of the group of model '{group_label}':")
for measure_name in group[0]['measures']: # every models with same label must have same measures
values = [model['measures'][measure_name] for model in group]
if None in values:
continue
average = None
if type(values[0]) == float:
average = sum(values)/len(values)
if type(values[0]) == list:
final_values = []
for l in values:
if len(l):
final_values.append(l[-1])
if len(final_values):
average = sum(final_values) / len(final_values)
if average is not None:
lines.append(f"\n * {measure_name}: {average}")
durations = [model['time'][1] - model['time'][0] for model in group]
lines.append(f"\n\nAverage training time for the group {group_label}: {sum(durations) / len(durations):3.5f}s")
for group_label, group in groups_exp.items():
lines.append(f"\n\nAverage performances of the group of experiments '{group_label}':")
mean_accuracies = [ sum(mia_model_accuracies) / len(mia_model_accuracies) \
for mia_model_accuracies in \
[ [ model['measures']['balanced_accuracy'][-1] for model in experiment['model_training'] if model['name'] is not None and 'mia' in model['name'].lower() ] ] \
for (_,experiment) in group ]
average = sum(mean_accuracies) / len(mean_accuracies)
lines.append(f"Average mean accuracy of the group of experiments '{group_label}': {average}")
for (experiment_idx,_) in group:
self.exp[experiment_idx]['mean_accuracies'] = mean_accuracies
self.resume = '\n'.join(lines)
def print_results(self):
"""print the results of all experiments
"""
self._process_batchs()
self._close_timer()
self._create_resume()
print(self.resume)
def _process_mia_dataset(self, dataset, klass, save_path):
"""process the mean distribution of input samples from a MIA dataset
Args:
dataset (torch Dataset): MIA train or test dataset
Returns
(mean_in_sample (torch Tensor), mean_out_sample (torch Tensor))
"""
s_in = []
s_out = []
s_in_miss = []
s_out_miss = []
for s_input, s_output in dataset:
if torch.argmax(s_input).item() == klass:
if s_output == 1:
s_in.append(s_input)
else:
s_out.append(s_input)
else:
if s_output == 1:
s_in_miss.append(s_input)
else:
s_out_miss.append(s_input)
s_in = torch.exp(torch.stack(s_in))
s_out = torch.exp(torch.stack(s_out))
if len(s_in_miss) != 0 and len(s_out_miss) != 0:
s_in_miss = torch.exp(torch.stack(s_in_miss))
s_out_miss = torch.exp(torch.stack(s_out_miss))
else:
s_in_miss = None
s_out_miss = None
df_in = pd.DataFrame(s_in.numpy())
df_out = pd.DataFrame(s_out.numpy())
df_in_miss = None
df_out_miss = None
if s_in_miss is not None:
df_in_miss = pd.DataFrame(s_in_miss.numpy())
df_out_miss = pd.DataFrame(s_out_miss.numpy())
df_in['training set'] = ['in' for i in range(len(df_in[0]))]
df_out['training set'] = ['out' for i in range(len(df_out[0]))]
if s_in_miss is not None:
df_in_miss['training set'] = ['in' for i in range(len(df_in_miss[0]))]
df_out_miss['training set'] = ['out' for i in range(len(df_out_miss[0]))]
in_count = len(df_in)
out_count = len(df_out)
type_str_in = f"true positive\nin: {in_count}"
type_str_out = f"true positive\nout: {out_count}"
df_in['type'] = [type_str_in for i in range(len(df_in[0]))]
df_out['type'] = [type_str_out for i in range(len(df_out[0]))]
if s_in_miss is not None:
in_count = len(df_in_miss)
out_count = len(df_out_miss)
type_str_in = f"false negative\nin: {in_count}"
type_str_out = f"false negative\nout: {out_count}"
df_in_miss['type'] = [type_str_in for i in range(len(df_in_miss[0]))]
df_out_miss['type'] = [type_str_out for i in range(len(df_out_miss[0]))]
df = df_in.append(df_out, ignore_index = True)
target_column = f"confidence for class {klass}"
df_plot = df.rename(columns = { klass : target_column })
sns.violinplot(x = 'type', y = target_column,
data = df_plot[[target_column, 'training set', 'type']],
scale = 'count', bw = 0.1, cut = 0)
plt.savefig(save_path.as_posix() + "_tp.png")
plt.clf()
if s_in_miss is not None:
df = df_in_miss.append(df_out_miss, ignore_index = True)
target_column = f"confidence for class {klass}"
df_plot = df.rename(columns = { klass : target_column })
sns.violinplot(x = 'type', y = target_column,
data = df_plot[[target_column, 'training set', 'type']],
scale = 'count', bw = 0.1, cut = 0)
plt.savefig(save_path.as_posix() + "_fn.png")
plt.clf()
# adding all samples under the label "all" to view the whole distribution
in_count = len(df_in) + len(df_in_miss)
out_count = len(df_out) + len(df_out_miss)
type_str_in = f"all\nin: {in_count}"
type_str_out = f"all\nout: {out_count}"
df_in['type'] = [type_str_in for i in range(len(df_in[0]))]
df_out['type'] = [type_str_out for i in range(len(df_out[0]))]
df_in_miss['type'] = [type_str_in for i in range(len(df_in_miss[0]))]
df_out_miss['type'] = [type_str_out for i in range(len(df_out_miss[0]))]
df = df_in.append(df_out, ignore_index = True)
df = df.append(df_in_miss, ignore_index = True)
df = df.append(df_out_miss, ignore_index = True)
target_column = f"confidence for class {klass}"
df_plot = df.rename(columns = { klass : target_column })
sns.violinplot(x = 'type', y = target_column,
data = df_plot[[target_column, 'training set', 'type']],
scale = 'count', bw = 0.1, cut = 0)
plt.savefig(save_path.as_posix() + "_all.png")
plt.clf()
def new_report_dir(self):
dirs = [x for x in reports_path.iterdir() if reports_path.is_dir()]
index = len(dirs)
self.report_dir = reports_path/f"statistics_{index}"
self.report_dir.mkdir()
def membership_distributions(self, train_datasets, test_datasets):
"""process the mean distribution of all train and test MIA datasets
"""
dirs = [x for x in self.report_dir.iterdir() if self.report_dir.is_dir()]
index = len(dirs)
self.exp_dir = self.report_dir /f"experiment_{index}"
self.exp_dir.mkdir()
klass = 0
for dataset in train_datasets:
self._process_mia_dataset(dataset, klass, self.exp_dir/f"mia_train_samples_model_{klass}")
klass += 1
klass = 0
for dataset in test_datasets:
self._process_mia_dataset(dataset, klass, self.exp_dir/f"mia_test_samples_model_{klass}")
klass += 1
def _close_timer(self):
"""
save the end time of the last model
"""
end = time.time()
last_model = None
if self.exp != []:
if self.exp[-1]['model_training'] == []:
if len(self.exp) > 1:
if self.exp[-2]['model_training'] != []:
last_model = self.exp[-2]['model_training'][-1]
else:
last_model = self.exp[-1]['model_training'][-1]
if last_model is not None and len(last_model['time']) == 1:
last_model['time'].append(end)
def add_loss(self, loss):
"""
collect the loss on the last batch during the training on the training set
"""
self._call_stat("add_loss")
self.exp[-1]['model_training'][-1]['loss'].append(loss)
def _call_stat(self, method_name):
pass
# try:
# if 'number 6' in self.exp[-1]['name']:
# print(method_name, [ (model['name'],model['label']) for model in self.exp[-1]['model_training'] ])
# pdb.set_trace()
# except:
# pass
|
{"hexsha": "acdbd6b0a16544a38d89329eab681028cef1371e", "size": 17176, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/utils/statistics.py", "max_stars_repo_name": "DisaitekAI/membership_inference_attack", "max_stars_repo_head_hexsha": "3cd77529e7584b3195a532d848bbea5f5cb6304d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-13T10:52:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-13T10:52:53.000Z", "max_issues_repo_path": "src/utils/statistics.py", "max_issues_repo_name": "DisaitekAI/membership_inference_attack", "max_issues_repo_head_hexsha": "3cd77529e7584b3195a532d848bbea5f5cb6304d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/utils/statistics.py", "max_forks_repo_name": "DisaitekAI/membership_inference_attack", "max_forks_repo_head_hexsha": "3cd77529e7584b3195a532d848bbea5f5cb6304d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.0172413793, "max_line_length": 172, "alphanum_fraction": 0.6036329762, "include": true, "reason": "import numpy", "num_tokens": 4099}
|
import numpy as np
import operator as op
from functools import reduce
def ncr(n, r):
"""n choose r"""
r = min(r, n - r) # This works since it's symmetric
numer = reduce(op.mul, range(n, n - r, -1), 1)
denom = reduce(op.mul, range(1, r + 1), 1)
return numer / denom
def bernstein_poly(i, n, t):
"""
The Bernstein polynomial of n, i as a function of t
"""
return ncr(n, i) * (t ** (n - i)) * (1 - t)**i
def compute_bezier_points(points, n_times=25):
"""
Returns a list of points that can be used to construct a bezier curve.
"""
n_points = len(points)
x_points, y_points = np.array([p[0] for p in points]), np.array([p[1] for p in points])
t = np.linspace(0.0, 1.0, n_times)
polynomial_array = np.array([bernstein_poly(i, n_points-1, t) for i in range(0, n_points)])
x_vals, y_vals = np.dot(x_points, polynomial_array), np.dot(y_points, polynomial_array)
return [(x, y) for x, y in zip(x_vals, y_vals)]
def colour_linear_interpolation(col_a, col_b, t):
"""
Linearly interpolates between two colours.
"""
col = tuple([a + (b - a) * t for a, b in zip(col_a, col_b)])
return col
def map_from_to(x, a, b, c, d):
"""
Maps a value x from a-b to c-d.
"""
return (x - a) / (b - a) * (d - c) + c
|
{"hexsha": "e17a1819d59e78cc88beda39e7be6c4c4fc04b30", "size": 1325, "ext": "py", "lang": "Python", "max_stars_repo_path": "sim_assets/ext.py", "max_stars_repo_name": "AvanaPY/SimSims", "max_stars_repo_head_hexsha": "6f74ed93f642a4238f98969a3f34ea8bccd83a87", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sim_assets/ext.py", "max_issues_repo_name": "AvanaPY/SimSims", "max_issues_repo_head_hexsha": "6f74ed93f642a4238f98969a3f34ea8bccd83a87", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sim_assets/ext.py", "max_forks_repo_name": "AvanaPY/SimSims", "max_forks_repo_head_hexsha": "6f74ed93f642a4238f98969a3f34ea8bccd83a87", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4444444444, "max_line_length": 95, "alphanum_fraction": 0.5909433962, "include": true, "reason": "import numpy", "num_tokens": 413}
|
import os, vtk
import numpy as np
from pymicro.view.scene3d import Scene3D
from pymicro.view.vtk_utils import *
from vtk.util.colors import white, grey, black, lamp_black
'''
Create a 3d scene with a tomographic view of a polymer foam.
The shape is displayed using a simple contour filter. Bounding box
and axes are also added to the scene.
'''
# Create the 3D scene
base_name = os.path.splitext(__file__)[0]
s3d = Scene3D(display=False, ren_size=(800, 800), name=base_name, background=black)
data_dir = '../data'
scan = 'mousse_250x250x250_uint8.raw'
im_file = os.path.join(data_dir, scan)
s_size = scan[:-4].split('_')[-2].split('x')
s_type = scan[:-4].split('_')[-1]
size = [int(s_size[0]), int(s_size[1]), int(s_size[2])]
data = read_image_data(im_file, size, data_type=s_type, verbose=True)
print('adding bounding box')
outline = data_outline(data)
outline.GetProperty().SetColor(white)
s3d.add(outline)
print('isolating the foam with vtkContourFilter')
foam = contourFilter(data, 80, color=grey, diffuseColor=white)
foam.GetProperty().SetSpecular(.4)
foam.GetProperty().SetSpecularPower(10)
s3d.add(foam)
print('adding XYZ axes')
axes = axes_actor(length=100, fontSize=60)
axes.GetXAxisCaptionActor2D().GetCaptionTextProperty().SetColor(grey)
s3d.add(axes);
print('setting up camera and rendering')
cam = setup_camera(size=size)
cam.SetClippingRange(1, 2000)
s3d.set_camera(cam)
s3d.render()
# thumbnail for the image gallery
from matplotlib import image
image_name = base_name + '.png'
image.thumbnail(image_name, 'thumb_' + image_name, 0.2)
|
{"hexsha": "a3273e563068f38e0e64996d536218d06f8cdf7c", "size": 1559, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/3d_visualisation/mousse_3d.py", "max_stars_repo_name": "heprom/pymicro", "max_stars_repo_head_hexsha": "176bf3a829dbf67796a3d4471f18868a3da229a7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 30, "max_stars_repo_stars_event_min_datetime": "2017-03-02T14:43:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T13:22:22.000Z", "max_issues_repo_path": "examples/3d_visualisation/mousse_3d.py", "max_issues_repo_name": "heprom/pymicro", "max_issues_repo_head_hexsha": "176bf3a829dbf67796a3d4471f18868a3da229a7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2019-12-29T12:41:29.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-01T21:13:20.000Z", "max_forks_repo_path": "examples/3d_visualisation/mousse_3d.py", "max_forks_repo_name": "heprom/pymicro", "max_forks_repo_head_hexsha": "176bf3a829dbf67796a3d4471f18868a3da229a7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2017-03-21T12:43:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T14:30:06.000Z", "avg_line_length": 29.4150943396, "max_line_length": 83, "alphanum_fraction": 0.7543296985, "include": true, "reason": "import numpy", "num_tokens": 440}
|
import tensorflow as tf
import numpy as np
from train import model_to_tflite
from gdrive import saveModel, saveTFLiteModel
from mates import rmse
class DetectorInterface:
def train(self, trainData):
"""Train using many sessions"""
pass
def trainSession(self, session):
"""Train parameters of the model"""
pass
def evaluate(self, session):
"""Evaluate session"""
pass
class ModelInterface():
def __init__(self, name, model=None):
self.model = model
self.name = name
def predict(self, data):
pass
def evaluate(self, x, y):
_y = self.predict(x)
return rmse(y, _y)
def getModel(self):
return self.model
def save(self):
pass
class BasicModel(ModelInterface):
prefix=''
def __init__(self, name, model=None, history=None):
if model is None:
global cargar_o_crear_modelo
model, trained, history = cargar_o_crear_modelo(f"{self.prefix}-{name}")
super(BasicModel, self).__init__(name, model)
self.history=history
self.trained = history is not None
def predict(self, data):
return self.model.predict(np.expand_dims(data, axis=0), batch_size=1)
def save(self, format=None):
if format is None:
saveModel(self.model, f"{self.prefix}-{self.name}", self.history)
else:
saveModel(self.model, f"{self.prefix}-{self.name}", self.history, format='h5')
OPTIM_NAMES = ('', 'FLOAT16', 'DYNAMIC', 'INT')
class LiteModel(ModelInterface):
prefix='LITE'
def __init__(self, name, model=None, keras_model=None, optim=0):
self.optim = optim
if keras_model is not None:
model = model_to_tflite(keras_model, optim=optim)
if model is not None:
self.interpreter = tf.lite.Interpreter(model_content=model)
else:
self.interpreter = tf.lite.Interpreter(model_path=f"{prefix}-{name}-{OPTIM_NAMES[optim]}")
self.interpreter.allocate_tensors()
super(LiteModel, self).__init__(name, model)
def predict(self, data):
tf_data_in = tf.cast(np.expand_dims(data, axis=0), tf.float32)
#interpreter = tf.lite.Interpreter(model_content=self.)
#interpreter.allocate_tensors()
# Get input and output tensors
in_details = self.interpreter.get_input_details()
out_details = self.interpreter.get_output_details()
self.interpreter.set_tensor(in_details[0]['index'], tf_data_in) #np.array(x_val[0], dtype=np.float32))
self.interpreter.invoke()
output_data = self.interpreter.get_tensor(out_details[0]['index'])
return output_data
def save(self):
saveTFLiteModel(self.model, f"LITE-{self.name}-{OPTIM_NAMES[self.optim]}")
class TrainModel(BasicModel):
prefix = "TRAIN"
def __init__(self, name, cell, nb_units, input_shape, output_dims, output_steps, nb_layers, bidirectional, stateful, lr=0.001, encoderLength=None):
self.cell = cell
self.nb_units = nb_units
self.input_shape = input_shape
self.output_dims = output_dims
self.output_steps = output_steps
self.nb_layers = nb_layers
self.bidirectional = bidirectional
self.stateful = stateful
self.lr = lr
self.encoderLength = encoderLength
global cargar_o_crear_modelo
_model, self.isTrained, _history = cargar_o_crear_modelo(f"{self.prefix}-{name}", nb_units, input_shape, output_dims, output_steps, nb_layers, stateful, bidirectional, cell, True, lr, encoderLength)
info(f"Model is trained {self.isTrained}")
if self.isTrained is False:
weights, _history = readPartialModel(name, _model)
info(f"Load PARTIAL model and history {_history}")
if _history is not None:
info(f"Trained for {_history['epoch']} epochs")
super(TrainModel, self).__init__(name, _model, _history)
def train(self, x, y, epochs=10, initialEpoch=None, validation_data=None, patience=2, statsEvery=20):
lr_step = 0
if self.history is not None:
lr_step = self.history['lr_steps']
cb = [ LRCallback(RNN_LR, RNN_MINIMUM_LR, step = lr_step)]
self.model, self.history, weights = batchTrain(self.model, x, y, self.name,
epochs, validation_data=validation_data,
history=self.history, initialEpoch=initialEpoch, callbacks=cb)
self.isTrained = True
self.save()
return BasicModel(self.name, self._getTrainedKerasModel(weights), self.history)
def getTrainedModel(self):
return BasicModel(self.name, self._getTrainedKerasModel(), self.history)
def _getTrainedKerasModel(self, weights = None, history=None):
if weights is None:
weights = self.model.get_weights()
if history is None:
history = self.history
_model,_t,_h = cargar_o_crear_modelo("FAKE", self.nb_units, self.input_shape,
self.output_dims, self.output_steps, self.nb_layers, self.stateful,
self.bidirectional, self.cell, False, self.lr, self.encoderLength,
forceNew=True)
_model.set_weights(weights)
return _model
|
{"hexsha": "1dc74fe9645bb32806f855c8ddaeac60ddcd9832", "size": 4945, "ext": "py", "lang": "Python", "max_stars_repo_path": "classes.py", "max_stars_repo_name": "aberaza/tfm2020Scripts", "max_stars_repo_head_hexsha": "5bd996c8cf3e1637f08669fee8b052ffefd09d64", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "classes.py", "max_issues_repo_name": "aberaza/tfm2020Scripts", "max_issues_repo_head_hexsha": "5bd996c8cf3e1637f08669fee8b052ffefd09d64", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "classes.py", "max_forks_repo_name": "aberaza/tfm2020Scripts", "max_forks_repo_head_hexsha": "5bd996c8cf3e1637f08669fee8b052ffefd09d64", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6394557823, "max_line_length": 202, "alphanum_fraction": 0.698281092, "include": true, "reason": "import numpy", "num_tokens": 1244}
|
import argparse
import statistics
import scipy.stats as stats
''' //////////// CLASSI ////////////'''
class Caller():
GT=''
AO=''
RO=''
AO_f=''
AO_r=''
DP_f=''
DP_r=''
DP=''
QB=''
Call=''
AF=''
StrandBias=''
class Freebayes(Caller):
RO_f=''
RO_r=''
class Vardict(Caller):
RO_f=''
RO_r=''
ODDRATIO=''
SSF=''
MQ=''
class Platypus(Caller):
RO_f=''
RO_r=''
pass
class Gatk(Caller):
MQ0=''
MQRankSum=''
BQRankSum=''
PhredFS=''
StrBiasFS=''
pass
class Features():
GT_Freebayes='0'
GT_Vardict='0'
GT_Platypus='0'
GT_Gatk='0'
QB_Freebayes='.'
QB_Vardict='.'
QB_Platypus='.'
QB_Gatk='.'
AF_Freebayes='.'
AF_Vardict='.'
AF_Platypus='.'
AF_Gatk='.'
DP_Freebayes='.'
DP_Vardict='.'
DP_Platypus='.'
DP_Gatk='.'
CallFreebayes='0'
CallVardict='0'
CallPlatypus='0'
CallGatk='0'
STRBIAS_Freebayes='.'
STRBIAS_Vardict='.'
STRBIAS_Platypus='.'
StrBiasFS_Gatk='.'
BIAS_Vardict='.'
ODDRATIO_Vardict='.'
SBF_Vardict='.'
MQ0_Gatk='.'
MQ0F_Gatk='.'
MQRankSum_Gatk='.'
BQRankSum_Gatk='.'
MQ0F_median='.'
MQ0_median='.'
MQ0_norm_median='.'
MQ_Vardict='.'
DP=float(0)
DP_median='.'
DP_norm_median='.'
BQ_media='.'
BQ_median='.'
AF_media='.'
AF_median='.'
STRBIAS_media='.'
STRBIAS_median= '.'
''' //////////// FUNZIONI ////////////'''
def get_info_freebayes(chrom,pos,ref,alt,info,format,sample,freebayes):
'''estrae le informazioni dal vcf di freebayes'''
if sample[format.index('GT')]=='1/0' or sample[format.index('GT')]=='0/1':
freebayes.GT=1
elif sample[format.index('GT')]=='1/1':
freebayes.GT=2
elif '.' in sample[format.index('GT')]:
freebayes.GT=0
if sample is not 'null' and sample[format.index('DP')] is not '0':
try:
freebayes.AO=float(sample[format.index('AO')])
except:
freebayes.AO=float(0)
try:
freebayes.DP=float(sample[format.index('DP')])
except:
freebayes.DP=0
try:
freebayes.RO=float(sample[format.index('RO')])
except:
freebayes.RO=float(0)
try:
freebayes.AF=freebayes.AO/freebayes.DP
except:
freebayes.AF='.'
else:
freebayes.DP=0
freebayes.AF='.'
freebayes.AO='.'
freebayes.RO='.'
for ind in info:
if ind.startswith("SAF="):
freebayes.AO_f=float(ind.split('=')[1])
if ind.startswith("SAR="):
freebayes.AO_r=float(ind.split('=')[1])
if ind.startswith("SRF="):
freebayes.RO_f=float(ind.split('=')[1])
if ind.startswith("SRR="):
freebayes.RO_r=float(ind.split('=')[1])
freebayes.DP_f=float(freebayes.AO_f)+float(freebayes.RO_f)
freebayes.DP_r=float(freebayes.AO_r)+float(freebayes.RO_r)
try:
freebayes.QB=float(sample[format.index('QA')])/freebayes.AO
except:
freebayes.QB=float(0)
freebayes.Call=1
if min((freebayes.DP_r),(freebayes.DP_f))/((freebayes.DP_r)+(freebayes.DP_f)) > 0:
try:
freebayes.StrandBias=1-stats.fisher_exact([[freebayes.RO_f, freebayes.RO_r], [freebayes.AO_f, freebayes.AO_r]])[1]
except:
freebayes.StrandBias='.'
else:
freebayes.StrandBias='.'
def get_info_vardict(chrom,pos,ref,alt,info,format,sample,vardict):
'''estrae le informazioni dal vcf di vardict'''
if sample[format.index('GT')]=='1/0' or sample[format.index('GT')]=='0/1':
vardict.GT=1
elif sample[format.index('GT')]=='1/1':
vardict.GT=2
elif '.' in sample[format.index('GT')]:
vardict.GT=0
if sample is not 'null' and sample[format.index('DP')] is not '0' :
vardict.AO=float(sample[format.index('AD')].split(',')[1])
vardict.RO=float(sample[format.index('AD')].split(',')[0])
vardict.DP=float(sample[format.index('DP')])
try:
vardict.AF=float(vardict.AO/(vardict.DP))
except:
vardict.AF=float(0)
else:
vardict.AO='.'
vardict.RO='.'
vardict.DP=0
vardict.AF='.'
vardict.AO_f=float(sample[format.index('ALD')].split(',')[0])
vardict.AO_r=float(sample[format.index('ALD')].split(',')[1])
vardict.RO_f=float(sample[format.index('RD')].split(',')[0])
vardict.RO_r=float(sample[format.index('RD')].split(',')[1])
vardict.DP_f=vardict.AO_f+vardict.RO_f
vardict.DP_r=vardict.AO_r+vardict.RO_r
vardict.Call=1
if min((vardict.DP_r),(vardict.DP_f))/((vardict.DP_r)+(vardict.DP_f)) > 0:
try:
vardict.StrandBias=1-stats.fisher_exact([[vardict.RO_f, vardict.RO_r], [vardict.AO_f, vardict.AO_r]])[1]
except:
vardict.StrandBias='.'
else:
vardict.StrandBias='.'
for ind in info:
if ind.startswith("QUAL="):
vardict.QB=float(ind.split('=')[1])
if ind.startswith("MQ="):
vardict.MQ=float(ind.split('=')[1])
def get_info_platypus(chrom,pos,ref,alt,info,format,sample,platypus):
'''estrae le informazioni dal vcf di platypus'''
if sample[format.index('GT')]=='1/0' or sample[format.index('GT')]=='0/1':
platypus.GT=1
elif sample[format.index('GT')]=='1/1':
platypus.GT=2
elif '.' in sample[format.index('GT')]:
platypus.GT=0
if sample is not 'null':
for ind in info:
if ind.startswith("TC="):
platypus.DP=float(ind.split('=')[1])
if ind.startswith("TCF="):
platypus.DP_f=float(ind.split('=')[1])
if ind.startswith("TCR="):
platypus.DP_r=float(ind.split('=')[1])
platypus.AO=float(sample[format.index('NV')])
platypus.RO=platypus.DP-platypus.AO
if platypus.DP is not '0':
try:
platypus.AF=float(platypus.AO/(platypus.DP))
except:
platypus.AF=float(0)
else:
platypus.AO='.'
platypus.RO='.'
platypus.DP=0
platypus.AF='.'
for ind in info:
if ind.startswith("NF="):
platypus.AO_f=float(ind.split('=')[1])
if ind.startswith("NR="):
platypus.AO_r=float(ind.split('=')[1])
platypus.RO_f=platypus.DP_f-platypus.AO_f
platypus.RO_r=platypus.DP_r-platypus.AO_r
platypus.Call=1
platypus.QB='.'
if int(platypus.DP) is not 0:
if min((platypus.DP_r),(platypus.DP_f))/((platypus.DP_r)+(platypus.DP_f)) > 0:
try:
platypus.StrandBias=1-stats.fisher_exact([[platypus.RO_f, platypus.RO_r], [platypus.AO_f, platypus.AO_r]])[1]
except:
platypus.StrandBias='.'
else:
platypus.StrandBias='.'
def get_info_gatk(chrom,pos,ref,alt,info,format,sample,gatk):
'''estrae le informazioni dal vcf di gatk'''
if sample[format.index('GT')]=='1/0' or sample[format.index('GT')]=='0/1':
gatk.GT=1
elif sample[format.index('GT')]=='1/1':
gatk.GT=2
elif '.' in sample[format.index('GT')]:
gatk.GT=0
if sample is not 'null' and sample[format.index('DP')] is not '0' :
gatk.DP=float(sample[format.index('DP')])
gatk.AO=float(sample[format.index('AD')].split(',')[1])
gatk.RO=float(sample[format.index('AD')].split(',')[0])
try:
gatk.AF=float(gatk.AO/(gatk.DP))
except:
gatk.AF=float(0)
else:
gatk.AO='.'
gatk.RO='.'
gatk.DP=0
gatk.AF='.'
gatk.Call=1
gatk.QB='.'
gatk.BQRankSum=0
gatk.MQRankSum=0
for ind in info:
if ind.startswith("BaseQRankSum="):
gatk.BQRankSum=float(ind.split('=')[1])
if ind.startswith("MQRankSum="):
gatk.MQRankSum=float(ind.split('=')[1])
if ind.startswith("MQ0="):
gatk.MQ0=float(ind.split('=')[1])
if ind.startswith("FS="):
gatk.PhredFS=float(ind.split('=')[1])
try:
gatk.StrBiasFS=1-pow(10,-gatk.PhredFS/10)
except:
gatk.StrBiasFS='.'
try:
gatk.MQ0F=gatk.MQ0/gatk.DP
except:
gatk.MQ0F='0'
def set_features_snp(dictionary):
'''setta i valori delle features in base alle info estratte dai vcf'''
for variante in dictionary.keys():
features=Features()
varc_array=dictionary.get(variante)
vett_MBQ=[]
vett_DP=[]
vett_MQ0=[]
index=0
for varcall in varc_array:
if varcall is not "":
vett_MBQ=vett_MBQ+[varcall.QB]
vett_DP=vett_DP+[varcall.DP]
if index == 0:
features.GT_Freebayes=varc_array[0].GT
features.QB_Freebayes=varc_array[0].QB
features.AF_Freebayes=varc_array[0].AF
features.CallFreebayes=varc_array[0].Call
features.STRBIAS_Freebayes=varc_array[0].StrandBias
elif index == 1:
features.GT_Vardict=varc_array[1].GT
features.QB_Vardict=varc_array[1].QB
features.MQ_Vardict=varc_array[1].MQ
features.AF_Vardict=varc_array[1].AF
features.CallVardict=varc_array[1].Call
features.STRBIAS_Vardict=varc_array[1].StrandBias
elif index == 2:
features.GT_Platypus=varc_array[2].GT
features.AF_Platypus=varc_array[2].AF
features.CallPlatypus=varc_array[2].Call
features.STRBIAS_Platypus=varc_array[2].StrandBias
elif index == 3:
features.GT_Gatk=varc_array[3].GT
features.AF_Gatk=varc_array[3].AF
features.CallGatk=varc_array[3].Call
features.MQ0_Gatk=varc_array[3].MQ0
features.MQ0F_Gatk=varc_array[3].MQ0F
features.MQRankSum_Gatk=varc_array[3].MQRankSum
features.BQRankSum_Gatk=varc_array[3].BQRankSum
features.StrBiasFS_Gatk=varc_array[3].StrBiasFS
index = index + 1
vett_MQ0=[features.MQ0_Gatk]
vett_MQ0F=[features.MQ0F_Gatk]
vett_AF_media=[features.AF_Freebayes,features.AF_Vardict,features.AF_Platypus,features.AF_Gatk]
vett_STRB_media=[features.STRBIAS_Freebayes,features.STRBIAS_Vardict,features.STRBIAS_Platypus,features.StrBiasFS_Gatk]
AF_med=0
MQ0_med=0
MQ0F_med=0
SB_media=0
nDP=0
nMBQ=0
i=0
v=[]
for dp in vett_DP:
if dp and dp is not '':
nDP= float(nDP)+float(dp)
v=v+[float(dp)]
i=i+1
try:
features.DP=nDP/i
features.DP_median= statistics.median(v)
except:
features.DP='.'
try:
features.DP_norm_median= features.DP_median/float(opts.expectedMeanDP)
except:
features.DP_norm_median='.'
i=0
v=[]
for bq in vett_MBQ:
if bq is not '.':
nMBQ= float(nMBQ)+float(bq)
v=v+[float(bq)]
i=i+1
try:
features.BQ_media=nMBQ/i
features.BQ_median= statistics.median(v)
except:
features.BQ_media='.'
i=0
v=[]
for strb in vett_STRB_media:
if strb is not '.':
SB_media=float(SB_media) + float(strb)
v=v+[float(strb)]
i=i+1
try:
features.STRBIAS_media= SB_media/i
features.STRBIAS_median= statistics.median(v)
except:
features.STRBIAS_media='.'
i=0
v=[]
for af in vett_AF_media:
if af is not '.' and af is not '0':
AF_med=float(AF_med) + float(af)
v=v+[float(af)]
i=i+1
try:
features.AF_media= AF_med/i
features.AF_median= statistics.median(v)
except:
features.AF_media='.'
i=0
v=[]
for mq in vett_MQ0:
if mq is not '.':
MQ0_med=float(MQ0_med) + float(mq)
v=v+[float(mq)]
i=i+1
try:
features.MQ0_media= MQ0_med/i
features.MQ0_median= statistics.median(v)
except:
features.MQ0_media='.'
try:
features.MQ0_norm_median= features.MQ0_median/float(opts.expectedMeanDP)
except:
features.MQ0_norm_median='.'
i=0
v=[]
for mqf in vett_MQ0F:
if mqf is not '.':
MQ0F_med=float(MQ0F_med) + float(mqf)
v=v+[float(mqf)]
i=i+1
try:
features.MQ0F_media= MQ0_med/i
features.MQ0F_median= statistics.median(v)
except:
features.MQ0F_media='.'
dictionary[variante]= varc_array + [features]
def switch_indel(dictionary,ID,index,chrom,pos,ref,alt,info,format,sample):
'''tramite index richiama la funzione di estrazione delle informazioni del variant caller associato all'indice'''
if dictionary.has_key(ID):
vettore=dictionary[ID]
else:
vettore=['','','','']
if index==0:
freebayes=Freebayes()
get_info_freebayes(chrom,pos,ref,alt,info,format,sample,freebayes)
if freebayes.AF != 0.0:
vettore[0]=freebayes
elif index==1:
vardict=Vardict()
get_info_vardict(chrom,pos,ref,alt,info,format,sample,vardict)
if vardict.AF != 0.0 and float(vardict.MQ) >= float(opts.mq):
vettore[1]=vardict
elif index==2:
platypus=Platypus()
get_info_platypus(chrom,pos,ref,alt,info,format,sample,platypus)
if platypus.AF != 0.0:
vettore[2]=platypus
elif index==3:
gatk=Gatk()
get_info_gatk(chrom,pos,ref,alt,info,format,sample,gatk)
if gatk.AF != 0.0:
vettore[3]=gatk
dictionary[ID]=vettore
def read(iterable,index,dictionary):
'''legge il vcf e splitta le varie sezioni'''
chrom=''
alt=''
pos=''
ref=''
format=''
info=''
sample=''
for line in iterable:
line.rstrip()
if line.startswith('#'):
continue
else:
ind=0
parts = line.split("\t")
chrom=parts[0]
pos=parts[1]
ref=parts[3]
alt=parts[4]
ID='\t'.join([chrom,pos,ref,alt])
INFO=parts[7]
info=INFO.split(";")
FORMAT=parts[8]
format=FORMAT.split(":")
SAMPLE=parts[9]
sample=SAMPLE.split(":")
if len(ref)==1 and len(alt)==1 or len(sample)==1 or SAMPLE.startswith('0/0:') :
continue
else:
if "VD" in format:
if sample[format.index('VD')] is '0':
continue
else:
switch_indel(dictionary,ID,index,chrom,pos,ref,alt,info,format,sample)
else:
switch_indel(dictionary,ID,index,chrom,pos,ref,alt,info,format,sample)
def control(dictionary):
''' esegue un controllo sulle varianti, se non hanno variant caller che le chiama vengono eliminate'''
for variante in dictionary.keys():
if dictionary[variante][:4] == ['','','','']:
del dictionary[variante]
def print_var_indel(dictionary):
print "SAMPLE_ID\tCONTIG\tPOS\tREF\tALT\tCallFreebayes\tCallVardict\tCallPlatypus\tCallGatk\tGT_Freebayes\tGT_Vardict\tGT_Platypus\tGT_Gatk\tBQ_Freebayes\tBQ_Vardict\tBQ_mean\tBQ_median\tDP_mean\tDP_median\tDP_norm_median\tAF_Freebayes\tAF_Vardict\tAF_Platypus\tAF_Gatk\tAF_mean\tAF_median\tSTRBIAS_Freebayes\tSTRBIAS_Vardict\tSTRBIAS_Platypus\tStrBiasFS_Gatk\tSTRBIAS_mean\tSTRBIAS_median\tMQRankSum_Gatk\tBQRankSum_Gatk\tMQ0F_Gatk\tMQ0_Gatk\tMQ0_GATK_norm\tMQ_Vardict"
for variante in dictionary.keys():
features = dictionary.get(variante)[4]
print '\t'.join([opts.sample,variante,str(features.CallFreebayes),str(features.CallVardict),str(features.CallPlatypus),str(features.CallGatk),
str(features.GT_Freebayes),str(features.GT_Vardict),str(features.GT_Platypus),str(features.GT_Gatk),
str(features.QB_Freebayes),str(features.QB_Vardict),str(features.BQ_media),str(features.BQ_median),str(features.DP),str(features.DP_median),str(features.DP_norm_median),
str(features.AF_Freebayes),str(features.AF_Vardict),str(features.AF_Platypus),str(features.AF_Gatk),str(features.AF_media),str(features.AF_median),
str(features.STRBIAS_Freebayes),str(features.STRBIAS_Vardict),str(features.STRBIAS_Platypus),str(features.StrBiasFS_Gatk),str(features.STRBIAS_media),str(features.STRBIAS_median),
str(features.MQRankSum_Gatk),str(features.BQRankSum_Gatk),str(features.MQ0F_Gatk),str(features.MQ0_Gatk),str(features.MQ0_norm_median),str(features.MQ_Vardict)]).rstrip()
def main():
parser = argparse.ArgumentParser('Parse VCF output from Variant callers to output a variant_dataset.txt. Output is to stdout.')
parser.add_argument('-fb', '--freebayes', help="Freebayes vcf output file name")
parser.add_argument('-vd', '--vardict', help="Vardict vcf output file name")
parser.add_argument('-pl', '--platypus', help="Platypus vcf output file name")
parser.add_argument('-gk', '--gatk', help="Gatk vcf output file name")
parser.add_argument('-s','--sample',help="Sample name")
parser.add_argument('-mDP','--expectedMeanDP',help="Expected Mean Coverage")
parser.add_argument('-mqVDThreshold','--mq',help="Threshold used in the variant calling phase to filter variants")
global opts
opts = parser.parse_args()
callers = [opts.freebayes,opts.vardict,opts.platypus,opts.gatk]
varianti = dict()
index=0;
for vcf in callers:
in_file = open(vcf)
read(in_file,index,varianti)
index = index + 1
set_features_snp(varianti)
control(varianti)
print_var_indel(varianti)
main()
|
{"hexsha": "d9ac79e69e7eb58f95c143337e4a894f2d852ad1", "size": 15598, "ext": "py", "lang": "Python", "max_stars_repo_path": "feature_extraction_indel.py", "max_stars_repo_name": "BonizzoniLab/SVD", "max_stars_repo_head_hexsha": "95ed967ae385ed0a339030763a07ea7acfa0c1d3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "feature_extraction_indel.py", "max_issues_repo_name": "BonizzoniLab/SVD", "max_issues_repo_head_hexsha": "95ed967ae385ed0a339030763a07ea7acfa0c1d3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "feature_extraction_indel.py", "max_forks_repo_name": "BonizzoniLab/SVD", "max_forks_repo_head_hexsha": "95ed967ae385ed0a339030763a07ea7acfa0c1d3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.3925549915, "max_line_length": 471, "alphanum_fraction": 0.677907424, "include": true, "reason": "import scipy", "num_tokens": 5424}
|
#!/usr/bin/env python
import os
import re
import math
import hashlib
import argparse
import numpy as np
import pandas as pd
import firecloud.api as fapi
from google.cloud import bigquery
from google.cloud import storage
from google.api_core.exceptions import NotFound
from collections import OrderedDict
import xmltodict
import pprint
pd.set_option('max_columns', 200)
pd.set_option('max_rows', 200)
pd.set_option("max_colwidth", None)
def load_table(namespace, workspace, table_name, store_membership=False):
ent_old = fapi.get_entities(namespace, workspace, table_name).json()
tbl_old = None
membership = None
if len(ent_old) > 0:
tbl_old = pd.DataFrame(list(map(lambda e: e['attributes'], ent_old)))
tbl_old[f"entity:{table_name}_id"] = list(map(lambda f: f['name'], ent_old))
if store_membership:
membership = list(map(lambda g: set(map(lambda h: h['entityName'], g['items'])), tbl_old['samples']))
del tbl_old['samples']
c = list(tbl_old.columns)
c.remove(f"entity:{table_name}_id")
c = [f"entity:{table_name}_id"] + c
tbl_old = tbl_old[c]
tbl_old = tbl_old.astype(str)
return tbl_old, membership
def load_new_sample_table(buckets, project):
ts = load_xmls(buckets, project)
tbl_header = ["entity:sample_id", "flowcell_id", "instrument", "movie_name", "well_name", "created_at", "bio_sample",
"well_sample", "insert_size", "is_ccs", "is_isoseq", "is_corrected", "description", "application",
"experiment_type", "num_records", "total_length", "ccs_report", "ccs_zmws_input",
"ccs_zmws_pass_filters", "ccs_zmws_fail_filters", "ccs_zmws_shortcut_filters",
"ccs_zmws_pass_filters_pct", "ccs_zmws_fail_filters_pct", "ccs_zmws_shortcut_filters_pct",
"gcs_input_dir", "subreads_bam", "subreads_pbi", "ccs_bam", "ccs_pbi", "input_bam", "input_pbi"]
tbl_rows = []
for e in ts:
r = load_ccs_report(project, e['Files']['ccs_reports.txt'], e)
experiment_type = "CLR"
if ('IsCCS' in e['WellSample'][0] and e['WellSample'][0]['IsCCS'] == 'true') or e['Files']['reads.bam'] != "":
experiment_type = "CCS"
if 'IsoSeq' in e['WellSample'][0] and e['WellSample'][0]['IsoSeq'] == 'true':
experiment_type = "ISOSEQ"
input_bam = e['Files']['subreads.bam'] if e['Files']['subreads.bam'] != "" else e['Files']['reads.bam']
input_pbi = e['Files']['subreads.bam.pbi'] if e['Files']['subreads.bam.pbi'] != "" else e['Files'][
'reads.bam.pbi']
tbl_rows.append([
e['CollectionMetadata'][0]['UniqueId'] if 'Context' in e['CollectionMetadata'][0] else "",
e['CellPac'][0]['Barcode'] if 'Barcode' in e['CellPac'][0] else "UnknownFlowcell",
e['CollectionMetadata'][0]['InstrumentName'] if 'Context' in e['CollectionMetadata'][
0] else "UnknownInstrument",
e['CollectionMetadata'][0]['Context'] if 'Context' in e['CollectionMetadata'][0] else "UnknownMovie",
e['WellSample'][0]['WellName'] if 'WellName' in e['WellSample'][0] else "Z00",
e['WellSample'][0]['CreatedAt'] if 'CreatedAt' in e['WellSample'][0] else "0001-01-01T00:00:00",
re.sub("[# ]", "", e['BioSample'][0]['Name']) if 'BioSample' in e else "UnknownBioSample",
re.sub("[# ]", "", e['WellSample'][0]['Name']) if 'Name' in e['WellSample'][0] else "UnknownWellSample",
e['WellSample'][0]['InsertSize'] if 'InsertSize' in e['WellSample'][0] else "0",
e['WellSample'][0]['IsCCS'] if 'IsCCS' in e['WellSample'][0] else "unknown",
e['WellSample'][0]['IsoSeq'] if 'IsoSeq' in e['WellSample'][0] else "unknown",
"true" if 'ConsensusReadSet' in e else "false",
e['WellSample'][0]['Description'] if 'Description' in e['WellSample'][0] else "unknown",
e['WellSample'][0]['Application'] if 'Application' in e['WellSample'][0] else "unknown",
experiment_type,
e['DataSetMetadata'][0]['NumRecords'],
e['DataSetMetadata'][0]['TotalLength'],
e['Files']['ccs_reports.txt'],
r['ZMWs input'],
r['ZMWs pass filters'],
r['ZMWs fail filters'],
r['ZMWs shortcut filters'],
"{:.2f}".format(100.0 * r['ZMWs pass filters'] / (
r['ZMWs pass filters'] + r['ZMWs fail filters'] + r['ZMWs shortcut filters'] + 1)),
"{:.2f}".format(100.0 * r['ZMWs fail filters'] / (
r['ZMWs pass filters'] + r['ZMWs fail filters'] + r['ZMWs shortcut filters'] + 1)),
"{:.2f}".format(100.0 * r['ZMWs shortcut filters'] / (
r['ZMWs pass filters'] + r['ZMWs fail filters'] + r['ZMWs shortcut filters'] + 1)),
e['Files']['input_dir'],
e['Files']['subreads.bam'],
e['Files']['subreads.bam.pbi'],
e['Files']['reads.bam'],
e['Files']['reads.bam.pbi'],
input_bam,
input_pbi
])
tbl_new = pd.DataFrame(tbl_rows, columns=tbl_header)
tbl_new = tbl_new.astype(str)
return tbl_new
def merge_tables(tbl_old, tbl_new):
if tbl_old is not None:
outer_tbl = pd.merge(tbl_old, tbl_new, how='outer', sort=True, indicator=True)
else:
outer_tbl = tbl_new
hs = []
for l in list(outer_tbl['entity:sample_id'].unique()):
g = outer_tbl.loc[outer_tbl['entity:sample_id'] == l].sort_values('_merge')
if len(g) == 1:
hs.append(g.iloc[0].to_dict())
else:
h = {}
for col_name in list(outer_tbl.columns):
q = g[col_name]
v = q.where((q != 'None') & (q != 'nan')).dropna()
h[col_name] = v.iloc[0] if len(v) > 0 else ''
hs.append(h)
joined_tbl = pd.DataFrame(hs)
if '_merge' in joined_tbl:
del joined_tbl['_merge']
c = list(joined_tbl.columns)
c.remove("entity:sample_id")
c = ["entity:sample_id"] + c
joined_tbl = joined_tbl[c]
joined_tbl['description'] = joined_tbl['description'].str.replace(r'\s+', ' ', regex=True).astype('str')
joined_tbl['bio_sample'] = joined_tbl['bio_sample'].str.replace(r'\s+', ' ', regex=True).astype('str')
joined_tbl['well_sample'] = joined_tbl['well_sample'].str.replace(r'\s+', ' ', regex=True).astype('str')
return joined_tbl
def traverse_xml(key, xml):
tables = []
table = {}
for k in xml:
if 'xmlns' in k or 'xsi' in k:
continue
v = xml[k]
k = re.sub('^@|^#|^pbds:|^pbbase:|^pbmeta:|^pbsample:', '', k)
l = []
if isinstance(v, str) or isinstance(v, dict):
l = [v]
elif isinstance(v, list):
l = v
for va in l:
if isinstance(va, str):
table[k] = v
if isinstance(va, dict):
f = traverse_xml(k, va)
tables.extend(f)
if len(table) > 0:
tables.append({key: table})
return tables
def combine(tables):
combined_tables = {}
for table in tables:
for k in table:
if k not in combined_tables:
combined_tables[k] = []
combined_tables[k].append(table[k])
return combined_tables
def load_xmls(gcs_buckets, project):
storage_client = storage.Client(project=project)
ts = []
for gcs_bucket in gcs_buckets:
blobs = storage_client.list_blobs(re.sub("^gs://", "", gcs_bucket))
for blob in blobs:
if 'subreadset.xml' in blob.name:
xml = blob.download_as_string()
doc = xmltodict.parse(xml)
t = combine(traverse_xml('root', doc))
t['Files'] = {
'input_dir': os.path.dirname(gcs_bucket + "/" + blob.name),
'subreadset.xml': gcs_bucket + "/" + blob.name,
'subreads.bam': gcs_bucket + "/" + re.sub("et.xml", ".bam", blob.name),
'subreads.bam.pbi': gcs_bucket + "/" + re.sub("et.xml", ".bam.pbi", blob.name),
'consensusreadset.xml': "",
'ccs_reports.txt': "",
'reads.bam': "",
'reads.bam.pbi': ""
}
ts.append(t)
elif 'consensusreadset.xml' in blob.name:
xml = blob.download_as_string()
doc = xmltodict.parse(xml)
t = combine(traverse_xml('root', doc))
t['Files'] = {
'input_dir': os.path.dirname(gcs_bucket + "/" + blob.name),
'subreadset.xml': "",
'subreads.bam': "",
'subreads.bam.pbi': "",
'consensusreadset.xml': gcs_bucket + "/" + blob.name,
'ccs_reports.txt': gcs_bucket + "/" + re.sub(".consensusreadset.xml", ".ccs_reports.txt",
blob.name),
'reads.bam': gcs_bucket + "/" + re.sub(".consensusreadset.xml", ".reads.bam", blob.name),
'reads.bam.pbi': gcs_bucket + "/" + re.sub(".consensusreadset.xml", ".reads.bam.pbi", blob.name)
}
ts.append(t)
return ts
def load_ccs_report(project, ccs_report_path, e):
d = {
'ZMWs input': 0,
'ZMWs pass filters': 0,
'ZMWs fail filters': 0,
'ZMWs shortcut filters': 0,
'ZMWs with tandem repeats': 0,
'Below SNR threshold': 0,
'Median length filter': 0,
'Lacking full passes': 0,
'Heteroduplex insertions': 0,
'Coverage drops': 0,
'Insufficient draft cov': 0,
'Draft too different': 0,
'Draft generation error': 0,
'Draft above --max-length': 0,
'Draft below --min-length': 0,
'Reads failed polishing': 0,
'Empty coverage windows': 0,
'CCS did not converge': 0,
'CCS below minimum RQ': 0,
'Unknown error': 0
}
if ccs_report_path != "":
storage_client = storage.Client(project=project)
ccs_report = re.sub("^gs://", "", e['Files']['ccs_reports.txt']).split("/")
blobs = storage_client.list_blobs(ccs_report[0], prefix="/".join(ccs_report[1:]))
for blob in blobs:
blob.download_to_filename("ccs_report.txt")
file = open("ccs_report.txt", "r")
d = {}
for line in file:
if len(line) > 1 and 'Exclusive counts for ZMWs' not in line:
a = line.rstrip().split(":")
k = a[0].rstrip()
v = float(re.sub(" ", "", re.sub(" \(.*$", "", a[1])))
if k not in d:
d[k] = 0.0;
d[k] = d[k] + v
break
return d
def update_sample_table(namespace, workspace, buckets, project):
tbl_old, _ = load_table(namespace, workspace, 'sample')
tbl_new = load_new_sample_table(buckets, project)
joined_tbl = merge_tables(tbl_old, tbl_new)
joined_tbl = joined_tbl.replace('^nan$', '', regex=True)
return joined_tbl
def update_sample_set_table(namespace, workspace, joined_tbl):
ss_old, membership = load_table(namespace, workspace, 'sample_set', store_membership=True)
# create old membership set
oms = pd \
.DataFrame({'entity:sample_set_id': list(ss_old['entity:sample_set_id']), 'sample': membership}) \
.explode('sample', ignore_index=True)
oms.columns = ['membership:sample_set_id', 'sample']
# create sample set
ss = joined_tbl.filter(['bio_sample'], axis=1).drop_duplicates()
ss.columns = [f'entity:sample_set_id']
if ss_old is not None:
ss = pd.merge(ss_old, ss, how='outer', sort=True)
ss = ss.replace('^nan$', '', regex=True)
# create new membership set
ms = joined_tbl.filter(['bio_sample', 'entity:sample_id'], axis=1).drop_duplicates()
ms.columns = [f'membership:sample_set_id', f'sample']
# create full membership set
fms = pd.merge(ms, oms, how='outer', indicator=True)
# create new/modified membership set
nms = fms[fms['_merge'] != 'both']
return ss, nms
def upload_table(namespace, workspace, table, label):
# upload new samples
a = fapi.upload_entities(namespace, workspace, entity_data=table.to_csv(index=False, sep="\t"), model='flexible')
if a.status_code == 200:
print(f'Uploaded {len(table)} {label} rows successfully.')
else:
print(a.json())
def upload_tables(namespace, workspace, s, ss, nms):
for ssname in list(nms[nms['_merge'] == 'right_only']['membership:sample_set_id']):
a = fapi.delete_sample_set(namespace, workspace, ssname)
if a.status_code == 204:
print(f'Removed out-of-date sample set {ssname} successfully.')
else:
print(a.json())
lms = nms[nms['_merge'] == 'left_only'][['membership:sample_set_id', 'sample']]
upload_table(namespace, workspace, s, 'sample')
upload_table(namespace, workspace, ss, 'sample_set')
upload_table(namespace, workspace, lms, 'sample_set membership')
def main():
parser = argparse.ArgumentParser(description='Update Terra workspace sample table', prog='update_pacbio_tables')
parser.add_argument('-p', '--project', type=str, help="GCP project")
parser.add_argument('-n', '--namespace', type=str, help="Terra namespace")
parser.add_argument('-w', '--workspace', type=str, help="Terra workspace")
parser.add_argument('-r', '--run', action='store_true', help="Turn off the default dry-run mode")
parser.add_argument('buckets', metavar='B', type=str, nargs='+', help='GCS buckets to scan')
args = parser.parse_args()
s = update_sample_table(args.namespace, args.workspace, args.buckets, args.project)
ss, nms = update_sample_set_table(args.namespace, args.workspace, s)
if args.run:
upload_tables(args.namespace, args.workspace, s, ss, nms)
if __name__ == "__main__":
main()
|
{"hexsha": "ef2df10d38e73e9d6489ebcc89bbef94680139da", "size": 14244, "ext": "py", "lang": "Python", "max_stars_repo_path": "terra/scripts/update_pacbio_tables.py", "max_stars_repo_name": "CloudyTrees/long-read-pipelines", "max_stars_repo_head_hexsha": "c6fdfa8f4f7ba06a92598e6febb7a6b81d78e946", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 64, "max_stars_repo_stars_event_min_datetime": "2019-09-08T20:12:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-02T20:02:43.000Z", "max_issues_repo_path": "terra/scripts/update_pacbio_tables.py", "max_issues_repo_name": "CloudyTrees/long-read-pipelines", "max_issues_repo_head_hexsha": "c6fdfa8f4f7ba06a92598e6febb7a6b81d78e946", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 165, "max_issues_repo_issues_event_min_datetime": "2019-09-04T18:31:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T20:07:04.000Z", "max_forks_repo_path": "terra/scripts/update_pacbio_tables.py", "max_forks_repo_name": "CloudyTrees/long-read-pipelines", "max_forks_repo_head_hexsha": "c6fdfa8f4f7ba06a92598e6febb7a6b81d78e946", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2019-11-07T15:51:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-16T17:20:26.000Z", "avg_line_length": 36.7113402062, "max_line_length": 121, "alphanum_fraction": 0.5713984836, "include": true, "reason": "import numpy", "num_tokens": 3635}
|
import pyrealsense2 as rs
import numpy as np
import cv2
import os
#import keyboard
import time
# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming
pipeline.start(config)
align = rs.align(rs.stream.color)
#path = os.path.join(os.path.dirname(os.getcwd()), 'data')
#path = os.path.join(path, 'realsense/images')
path = '/home/guillermo/Escritorio/images'#/media/guillermo/60F9-DB6E/external/images'
a = len(os.listdir(os.path.join(path, 'rgb')))
i = a
try:
while True:
i = i + 1
time.sleep(0.05) #quitar esto para grabar test
# Wait for a coherent pair of frames: depth and color
frames = pipeline.wait_for_frames()
aligned_frames = align.process(frames)
depth_frame = aligned_frames.get_depth_frame()
color_frame = frames.get_color_frame()
if not depth_frame or not color_frame:
continue
# Convert images to numpy arrays
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
cv2.imwrite(os.path.join(os.path.join(path, 'd'), str(i) + '.pgm'), depth_image)
cv2.imwrite(os.path.join(os.path.join(path, 'rgb'), str(i) + '.ppm'), color_image)
colorizer = rs.colorizer()
depth_image_colorized = np.asanyarray(colorizer.colorize(depth_frame).get_data())
# Show images
combined = cv2.addWeighted(color_image, 0.5, depth_image_colorized, 0.5, 0)
cv2.namedWindow('RGB-D', cv2.WINDOW_AUTOSIZE)
cv2.imshow('RGB-D', combined)
cv2.waitKey(1)
except KeyboardInterrupt:
# Stop streaming
pipeline.stop()
|
{"hexsha": "29b75bf35a9bd3fc7b656c8bf20b1ffb297aa2e9", "size": 1821, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/capture.py", "max_stars_repo_name": "GRobled0/CenterNet", "max_stars_repo_head_hexsha": "740ecf06a96897b3545249bbb239264394283565", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/capture.py", "max_issues_repo_name": "GRobled0/CenterNet", "max_issues_repo_head_hexsha": "740ecf06a96897b3545249bbb239264394283565", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/capture.py", "max_forks_repo_name": "GRobled0/CenterNet", "max_forks_repo_head_hexsha": "740ecf06a96897b3545249bbb239264394283565", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.9047619048, "max_line_length": 90, "alphanum_fraction": 0.6754530478, "include": true, "reason": "import numpy", "num_tokens": 469}
|
"""Abstract type for container modules -Modules that specify an
execution structure for a given group of modules"""
abstract type KnetContainer <: KnetModule end
"""
Sequential <: KnetModule
# Constructor
Sequential(ls...) adds layers in ls.
# Fields
layers::Array{Union{KnetModule, Function}, 1}
# Usage
s = Sequential(Linear(...), ReLU(), Linear(...),...)
@mc s(x)
@run r(x)
Layers stored in layers field is executed in order, where layer
n+1 takes layer n's output as its input. Layer inputs and outputs
should be consistent.
See `add!` function to add additional layers after construction
# Note on storing functions in layers
Currently, JLD throws error if you add functions in layers,
but it is still kept for convenience, considering the future
support. Currently, using functional modules is a better practice.
See FnModule types.
"""
type Sequential <: KnetContainer
layers::Array{Union{KnetModule, Function}, 1}
#layers::Array{KnetModule, 1}
end
function Sequential(ls...)
new = Sequential([l for l in ls])
if any(x->isa(x, Function), new.layers)
warn("Serialization functions like save_module and load_module",
" may not work when you use functions in Sequential")
end
return new
end
"""
add!(s::Sequential, m...) adds modules m... to the end of layers.
Identical to push!(s.layers, m...)
"""
add!(s::Sequential, m...) = push!(s.layers, m...)
function (s::Sequential)(ctx, x)
for l in s.layers
if isa(l, Function) || isa(l, FnModule)
x = l(x)
else
x = l(ctx, x)
end
end
return x
end
|
{"hexsha": "48846e685279b7ac277dade6afcfef84491d56a1", "size": 1647, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/container.jl", "max_stars_repo_name": "cangumeli/KnetModules.jl", "max_stars_repo_head_hexsha": "ecf9d1dce785a9fafd2e2cd7cb354db10b1a032d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/container.jl", "max_issues_repo_name": "cangumeli/KnetModules.jl", "max_issues_repo_head_hexsha": "ecf9d1dce785a9fafd2e2cd7cb354db10b1a032d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/container.jl", "max_forks_repo_name": "cangumeli/KnetModules.jl", "max_forks_repo_head_hexsha": "ecf9d1dce785a9fafd2e2cd7cb354db10b1a032d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.45, "max_line_length": 72, "alphanum_fraction": 0.6630236794, "num_tokens": 407}
|
'''
Demo to show use of the engineering Formatter.
'''
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import EngFormatter
fig, ax = plt.subplots()
ax.set_xscale('log')
formatter = EngFormatter(unit='Hz', places=1)
ax.xaxis.set_major_formatter(formatter)
xs = np.logspace(1, 9, 100)
ys = (0.8 + 0.4*np.random.uniform(size=100))*np.log10(xs)**2
ax.plot(xs, ys)
plt.show()
|
{"hexsha": "a2c5d3003b16b04404a7c188818da46755b1d995", "size": 402, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/api/engineering_formatter.py", "max_stars_repo_name": "jbbrokaw/matplotlib", "max_stars_repo_head_hexsha": "86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427", "max_stars_repo_licenses": ["MIT", "BSD-3-Clause"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2017-04-11T08:55:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T04:31:26.000Z", "max_issues_repo_path": "examples/api/engineering_formatter.py", "max_issues_repo_name": "jbbrokaw/matplotlib", "max_issues_repo_head_hexsha": "86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427", "max_issues_repo_licenses": ["MIT", "BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-05-10T17:57:41.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-26T16:23:09.000Z", "max_forks_repo_path": "examples/api/engineering_formatter.py", "max_forks_repo_name": "jbbrokaw/matplotlib", "max_forks_repo_head_hexsha": "86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427", "max_forks_repo_licenses": ["MIT", "BSD-3-Clause"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2015-10-05T04:15:46.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-11T18:06:02.000Z", "avg_line_length": 20.1, "max_line_length": 60, "alphanum_fraction": 0.7213930348, "include": true, "reason": "import numpy", "num_tokens": 111}
|
import os
import cv2
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import preprocess
# Return a list of all image names with a given extension in a given folder
def listImages(dir, extension):
res = []
for img in os.listdir(dir):
if img.endswith(extension):
res.append(img)
return res
# Return a list of all image names in subfolders with a given extension in a given folder
def listImagesSub(dir, extension):
res = []
for subdir in os.listdir(dir):
for img in os.listdir(dir + subdir):
if img.endswith(extension):
res.append(subdir + "/" + img)
return res
# Compute the slope of a given line
# Infite slopes (vertical lines) are set to the height of the image
def getSlope(line, height):
# line is [x1, y1, x2, y2]
if (line[2] - line[0]) == 0:
slope = height
else:
slope = ((line[3]-line[1]) / (line[2]-line[0]))
return slope
# Calculate perpendicular slope
# Return true if two slopes are perpendicular
# Will also return true is slopes are close to perpendicular (within a range of 0-1)
def isPerpendicular(slope1, slope2):
if slope1 != 0:
if (abs((-1 * (1 / slope1)) - slope2) < 1):
return True
elif slope2 != 0:
if (abs((-1 * (1 / slope2)) - slope1) < 1):
return True
return False
# Calculate intersection between two lines, return None if no intersection
# from https://rosettacode.org/wiki/Find_the_intersection_of_two_lines#Python
def line_intersect(Ax1, Ay1, Ax2, Ay2, Bx1, By1, Bx2, By2):
""" returns a (x, y) tuple or None if there is no intersection """
d = (By2 - By1) * (Ax2 - Ax1) - (Bx2 - Bx1) * (Ay2 - Ay1)
if d:
uA = ((Bx2 - Bx1) * (Ay1 - By1) - (By2 - By1) * (Ax1 - Bx1)) / d
uB = ((Ax2 - Ax1) * (Ay1 - By1) - (Ay2 - Ay1) * (Ax1 - Bx1)) / d
else:
return
if not(0 <= uA <= 1 and 0 <= uB <= 1):
return
x = Ax1 + uA * (Ax2 - Ax1)
y = Ay1 + uA * (Ay2 - Ay1)
return x, y
# Calculate the distance of a point to all endpoints of two lines
# returns minimum of these distances
# Used to calculate radius of rotaional symmetry
# Minimum distance from intersection to any endpoint = radius
def minDistance(intersect, line1, line2):
dist1 = np.sqrt( (line1[0] - intersect[0])**2 + (line1[1] - intersect[1])**2 )
dist2 = np.sqrt( (line1[2] - intersect[0])**2 + (line1[3] - intersect[1])**2 )
dist3 = np.sqrt( (line2[0] - intersect[0])**2 + (line2[1] - intersect[1])**2 )
dist4 = np.sqrt( (line2[2] - intersect[0])**2 + (line2[3] - intersect[1])**2 )
return (min(dist1, dist2, dist3, dist4))
# Used to reorder symmetries for ease of processing
# from:
# [[[[[line], slope, score, normScore, depth], ...], depth] ... ]
# To:
# [[[line], slope, score, normScore, depth], ...]
def placeInOrder(symmetries):
newSymmetries = []
for syms in symmetries:
newSymmetries += syms[0]
return newSymmetries
# Only required when cuts are made before knowing symThreshold (Ipynb kernel)
# Will also reorder the symmetries as placeInOrder does
# Will remove all smaller parts of an image where the cut was made on a reflection
# symmetry line with a low score. Will also remove all parts based on that recursive loop.
def removeBadCuts(symmetries, symThreshold):
newSymmetries = []
deleteDepth = 99999
for syms in symmetries:
if syms[1] == 0:
newSymmetries += syms[0]
continue
if syms[1] >= deleteDepth:
symmetries.remove(syms)
continue
else:
deleteDepth = 99999
mainSym = syms[0][0]
if mainSym[2] < symThreshold:
deleteDepth = syms[1] + 1
symmetries.remove(syms)
continue
else:
newSymmetries += syms[0]
return newSymmetries
# Remove symmetries if they have a normalized score under normThreshold
# or if they have a normalized score of 1.0 and a score under symThreshold, i.e. are the main symmetry in their recursive loop (sub image)
# If a main symmetry is removed, all other symmetries in that recursive loop are also removed,
# by removing next symmetries untill they have a different depth, meaning they belong to a different loop
def removeBadSymmetries(symmetries, symThreshold, normThreshold):
copySym = symmetries[:]
# Start from one, always keep first symmetry
for i in range(1, len(symmetries)):
if symmetries[i] not in copySym:
continue
if symmetries[i][3] < normThreshold:
copySym.remove(symmetries[i])
elif symmetries[i][3] == 1.0:
if symmetries[i][2] < symThreshold:
copySym.remove(symmetries[i])
j = i + 1
if j >= len(symmetries):
break
while (symmetries[i][4] == symmetries[j][4]):
if symmetries[j] not in copySym:
j = j + 1
if j >= len(symmetries):
break
continue
copySym.remove(symmetries[j])
j = j + 1
if j >= len(symmetries):
break
return copySym
# Loop over each line and compare to other lines
# If slope is similar and the distance between endpoints is small enough, remove line with lower symmetry score
# maxDistX and maxDistY set based on width and height of image
# Both dictate the maximum distance between endpoints of lines
# If line1-endpoint1 is within maxDist to line2-endpoint1
# line1-endpoint2 only has to lie within (maxDist*0,66) line2-endpoint2 to be flagged as similar
def removeSimilarLines(symmetries, image, lineSimilarity):
height, width, _ = image.shape
maxDistX = width / lineSimilarity
maxDistY = height / lineSimilarity
maxDist = (maxDistX + maxDistY) / 2
maxSlopeDiff = maxDistY
copySym = symmetries[:]
def lowerScore(sym1, sym2):
if sym1[2] < sym2[2]:
return sym1
return sym2
for i in range(0, len(copySym)):
for j in range(i + 1, len(copySym)):
if copySym[i] not in symmetries:
break
if copySym[j] not in symmetries:
continue
if abs(copySym[i][1] - copySym[j][1]) < maxSlopeDiff or (abs(copySym[i][1]) > height / 3 and abs(copySym[j][1]) > height / 3):
center1 = (((copySym[i][0][0] + copySym[i][0][2]) / 2), ((copySym[i][0][1] + copySym[i][0][3]) / 2))
center2 = (((copySym[j][0][0] + copySym[j][0][2]) / 2), ((copySym[j][0][1] + copySym[j][0][3]) / 2))
dist = np.sqrt( (center1[0] - center2[0])**2 + (center1[1] - center2[1])**2 )
if dist < maxDist:
if i == 0:
symmetries.remove(copySym[j])
else:
symmetries.remove(lowerScore(copySym[i], copySym[j]))
return symmetries
# Remove similar rotational symmetries
# Remove if centerpoint is within maxDistX and maxDistY and the radius is within max(maxDistX, maxDistY)
# Rotation symmetry which has the highest avarage depth is removed
# Average depth is calculated based on the depth of the two reflection lines that form the rotational symmetry
def removeSimilarRotational(rotations, image, rotationSimilarity):
height, width, _ = image.shape
maxDistX = width / rotationSimilarity
maxDistY = height / rotationSimilarity
copyRot = rotations[:]
def removeRot(rot1, rot2):
# i=1 -> Remove rotaional with smaller radius
# i=2 -> Remove rotational with lower average scores of reflection symmetries that made the rotation:
i = 1
if rot1[i] < rot2[i]:
return rot1
return rot2
for i in range(0, len(copyRot)):
for j in range(i + 1, len(copyRot)):
if copyRot[i] not in rotations:
break
if copyRot[j] not in rotations:
continue
if abs(copyRot[i][0][0] - copyRot[j][0][0]) < maxDistX:
if abs(copyRot[i][0][1] - copyRot[j][0][1]) < maxDistY:
if abs(copyRot[i][1] - copyRot[j][1]) < max(maxDistX, maxDistY):
rotations.remove(removeRot(copyRot[i], copyRot[j]))
# Checks if distance between intersection point and endpoints of reflection lines is similar enough
# Used to calculate rotational symmetries with a non ML approach
def checkDistance(intersect, line1, line2, distDifference):
dist1 = np.sqrt( (line1[0] - intersect[0])**2 + (line1[1] - intersect[1])**2 )
dist2 = np.sqrt( (line1[2] - intersect[0])**2 + (line1[3] - intersect[1])**2 )
dist3 = np.sqrt( (line2[0] - intersect[0])**2 + (line2[1] - intersect[1])**2 )
dist4 = np.sqrt( (line2[2] - intersect[0])**2 + (line2[3] - intersect[1])**2 )
if abs(dist1 - dist2) > distDifference:
return False
elif abs(dist1 - dist3) > distDifference:
return False
elif abs(dist1 - dist4) > distDifference:
return False
elif abs(dist2 - dist3) > distDifference:
return False
elif abs(dist2 - dist4) > distDifference:
return False
elif abs(dist3 - dist4) > distDifference:
return False
return True
# Find rotaional symmetries with a given machine learning model
# Will loop over each reflection symmetry in a double loop and check if any have intersections
# Pairs with intersections will be pre-processed and subsequently predicted by the model
# Positive results will create a rotational symmetry in their centerpoint
# The radius is determined by the minDistance function
# Reflection symmetries which create a rotational symmetrie are removed afterwards
# Will not be executed in 'fast' mode
def rotationalSymmetriesML(symmetries, model, data):
h, w, _ = data.shape
rotations = []
data = pd.DataFrame()
for i in range(0, len(symmetries)):
for j in range(i + 1, len(symmetries)):
intersect = line_intersect(symmetries[i][0][0], symmetries[i][0][1], symmetries[i][0][2], symmetries[i][0][3], symmetries[j][0][0], symmetries[j][0][1], symmetries[j][0][2], symmetries[j][0][3])
if intersect == None:
continue
s = pd.Series(data={
"line1x1": symmetries[i][0][0],
"line1y1": symmetries[i][0][1],
"line1x2": symmetries[i][0][2],
"line1y2": symmetries[i][0][3],
"line1Score": symmetries[i][2],
"line2x1": symmetries[j][0][0],
"line2y1": symmetries[j][0][1],
"line2x2": symmetries[j][0][2],
"line2y2": symmetries[j][0][3],
"line2Score": symmetries[j][2],
"height": h,
"width": w
}, name="rotation")
data = data.append(s, ignore_index=False)
if len(data) > 0:
cpyData = data.copy()
data = preprocess.preproccesData(data)
pred = model.predict(data)
for i in range(0, len(data)):
if pred[i] == True:
intersect = line_intersect(cpyData["line1x1"][i], cpyData["line1y1"][i], cpyData["line1x2"][i], cpyData["line1y2"][i], cpyData["line2x1"][i], cpyData["line2y1"][i], cpyData["line2x2"][i], cpyData["line2y2"][i])
rad = minDistance(intersect, [cpyData["line1x1"][i], cpyData["line1y1"][i], cpyData["line1x2"][i], cpyData["line1y2"][i]], [cpyData["line2x1"][i], cpyData["line2y1"][i], cpyData["line2x2"][i], cpyData["line2y2"][i]])
meanScore = (cpyData["line1Score"][i] + cpyData["line2Score"][i]) / 2
rot = [intersect, rad, meanScore]
rotations.append(rot)
return rotations
# Find rotaional symmetries given reflection symmetries and a threshold
# Will loop over each reflection symmetry in a double loop and check if any pairs:
# - have similar symmetry score, their relative score must be inside the circleSymThreshold
# - have intersections,
# - are (close to) perpendicular
# - have distances from their endpoints to the intersection not too different from one another
# Positive results will create a rotational symmetry in their centerpoint
# The radius is determined by the minDistance function
# Reflection symmetries which create a rotational symmetrie are removed afterwards
# Will not be executed in 'slow' mode
def rotationalSymmetries(symmetries, image, circleSymThreshold):
rotations = []
tmp = []
copySym = symmetries[:]
height, width, _ = image.shape
distDifference = min(height / 5, width / 5)
for sym in symmetries:
for subsym in copySym:
# First check if lines have similar symmetry scores
if max(sym[2], subsym[2]) * circleSymThreshold > min(sym[2], subsym[2]):
continue
# Check if lines are perpendicular
if isPerpendicular(sym[1], subsym[1]) == False:
continue
intersect = line_intersect(sym[0][0], sym[0][1], sym[0][2], sym[0][3], subsym[0][0], subsym[0][1], subsym[0][2], subsym[0][3])
if intersect != None:
if checkDistance(intersect, sym[0], subsym[0], distDifference) == False:
continue
rad = minDistance(intersect, sym[0], subsym[0])
meanScore = (sym[2] + subsym[2]) / 2
rot = [intersect, rad, meanScore]
rotations.append(rot)
return rotations
# Plot all given reflection symmetry lines
def plotLines(symmetries):
n = 0
for sym in symmetries:
if sym[4] > n:
n = sym[4]
linewidth = 3
# Colors dicated by colormap (default: viridis)
colors = plt.cm.jet(np.linspace(0,1,n + 1))
for i, sym in enumerate(symmetries):
color = colors[sym[4]]
x = [sym[0][0], sym[0][2]]
y = [sym[0][1], sym[0][3]]
plt.plot(x, y, color=color, linewidth=linewidth)
# Plot all given rotational symmetries
def plotRotations(rotations):
for rot in rotations:
circleSym = plt.Circle(rot[0], linewidth=2.5, radius=rot[1], color="yellow", fill=False)
fig = plt.gcf()
axs = fig.gca()
axs.add_patch(circleSym)
# Used to resize an image by a given fraction
def resize_image(image, fraction):
h, w, _ = image.shape
desiredW = int(w / fraction)
desiredH = int(h / fraction)
dimensions = (desiredW, desiredH)
resizedImage = cv2.resize(image, dimensions, interpolation = cv2.INTER_AREA)
return resizedImage
|
{"hexsha": "12fdeef5d26bbb94352e8d3eab0790bd1137b9c1", "size": 14709, "ext": "py", "lang": "Python", "max_stars_repo_path": "util.py", "max_stars_repo_name": "Koen-Git/ColorSymDetect", "max_stars_repo_head_hexsha": "5d6bb6734063f4a09c9a153527a446ce5c02a5b0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-07T20:08:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T20:08:32.000Z", "max_issues_repo_path": "util.py", "max_issues_repo_name": "Koen-Git/ColorSymDetect", "max_issues_repo_head_hexsha": "5d6bb6734063f4a09c9a153527a446ce5c02a5b0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "util.py", "max_forks_repo_name": "Koen-Git/ColorSymDetect", "max_forks_repo_head_hexsha": "5d6bb6734063f4a09c9a153527a446ce5c02a5b0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.0257142857, "max_line_length": 232, "alphanum_fraction": 0.6074512203, "include": true, "reason": "import numpy", "num_tokens": 4060}
|
function mrAnatSetNiftiXform(niftiFile, outFile);
%
% mrAnatSetNiftiXform([niftiFile=uigetfile],[outFile=uiputfile])
%
% Allows you to set the qto xform in a nifti file.
%
% REQUIRES:
% * Stanford anatomy tools (eg. /usr/local/matlab/toolbox/mri/Anatomy)
%
% HISTORY:
% 2006.10.25 RFD (bob@white.stanford.edu) wrote it.
if (~exist('niftiFile','var') || isempty(niftiFile))
[f,p] = uigetfile({'*.nii.gz','NIFTI';'*.*', 'All Files (*.*)'}, 'Select NIFTI file...');
if(isnumeric(f)) disp('User canceled.'); return; end
niftiFile = fullfile(p,f);
end
ni = niftiRead(niftiFile);
ni = niftiApplyCannonicalXform(ni);
img = mrAnatHistogramClip(double(ni.data), 0.4, 0.98);
nii.img = img;
nii.hdr.dime.pixdim = [1 ni.pixdim 1 1 1 1];
nii.hdr.dime.datatype = 64;
nii.hdr.dime.dim = [3 size(nii.img) 1 1 1 1];
%nii.hdr.hist.originator = [round(ni.qto_ijk(1:3,:)*[0 0 0 1]')'+1 128 0];
nii.hdr.hist.originator = [round(size(nii.img)./2) 128 0];
h = figure('unit','normal','pos', [0.18 0.08 0.5 0.85],'name','Set AC-PC landmarks');
opt.setarea = [0.05 0.15 0.9 0.8];
opt.usecolorbar = 0;
opt.usestretch = 0;
opt.command = 'init';
view_nii(h, nii, opt);
%d = getappdata(h);
hstr = num2str(h);
cb = ['d=getappdata(' hstr ');p=d.nii_view.imgXYZ.vox;setappdata(' hstr ',''ac'',p);set(gcbo,''String'',[''AC=['' num2str(p) '']'']);'];
b1 = uicontrol(h, 'Style','pushbutton','Visible','on','String','Set AC','Position',[20 30 150 30],'Callback',cb);
cb = ['d=getappdata(' hstr ');p=d.nii_view.imgXYZ.vox;setappdata(' hstr ',''pc'',p);set(gcbo,''String'',[''PC=['' num2str(p) '']'']);'];
b2 = uicontrol(h, 'Style','pushbutton','Visible','on','String','Set PC','Position',[190 30 150 30],'Callback',cb);
cb = ['d=getappdata(' hstr ');p=d.nii_view.imgXYZ.vox;setappdata(' hstr ',''ms'',p);set(gcbo,''String'',[''MidSag=['' num2str(p) '']'']);'];
b3 = uicontrol(h, 'Style','pushbutton','Visible','on','String','Set MidSag','Position',[360 30 150 30],'Callback',cb);
cb = ['setappdata(' hstr ',''done'',1);'];
b4 = uicontrol(h, 'Style','pushbutton','Visible','on','String','FINISH','Position',[530 30 80 30],'Callback',cb);
done = false;
while(~done)
d = getappdata(h);
if(isfield(d,'ac')&&isfield(d,'pc')&&isfield(d,'ms')&&isfield(d,'done')&&d.done==1)
done = true;
%convert matlab 1-based indices to zero-indexed indices
%alignLandmarks = [d.ac; d.pc; d.ms]-1;
alignLandmarks = [d.ac; d.pc; d.ms];
end
pause(.1);
end
close(h);
%disp(alignLandmarks);
origin = alignLandmarks(1,:);
% Define the current image axes by re-centering on the origin (the AC)
imY = alignLandmarks(2,:)-origin; imY = imY./norm(imY);
imZ = alignLandmarks(3,:)-origin; imZ = imZ./norm(imZ);
imX = cross(imZ,imY);
% Make sure the vectors point right, superior, anterior
if(imX(1)<0) imX = -imX; end
if(imY(2)<0) imY = -imY; end
if(imZ(3)<0) imZ = -imZ; end
% Project the current image axes to the cannonical AC-PC axes. These
% are defined as X=[1,0,0], Y=[0,1,0], Z=[0,0,1], with the origin
% (0,0,0) at the AC. Note that the following are the projections
x = [0 1 imY(3)]; x = x./norm(x);
y = [1 0 imX(3)]; y = y./norm(y);
%z = [0 imX(2) 1]; z = z./norm(z);
z = [0 -imY(1) 1]; z = z./norm(z);
% Define the 3 rotations using the projections. We have to set the sign
% of the rotation, depending on which side of the plane we came from.
rot(1) = sign(x(3))*acos(dot(x,[0 1 0])); % rot about x-axis (pitch)
rot(2) = sign(y(3))*acos(dot(y,[1 0 0])); % rot about y-axis (roll)
rot(3) = sign(z(2))*acos(dot(z,[0 0 1])); % rot about z-axis (yaw)
scale = ni.pixdim;
% Affine build assumes that we need to translate before rotating. But,
% our rotations have been computed about the origin, so we'll pass a
% zero translation and set it ourselves (below).
im2tal = affineBuild([0 0 0], rot, scale, [0 0 0]);
tal2im = inv(im2tal);
% Insert the translation.
%tal2im(1:3,4) = [origin+scale/2]';
tal2im(1:3,4) = [origin]';
im2tal = inv(tal2im);
if (~exist('outFile','var') || isempty(outFile))
outFile = niftiFile;
end
resp = questdlg(['Save new transform matrix in ' outFile '?'],'Save confirmation','Ok','Cancel','Ok');
if(strcmpi(resp,'cancel'))
disp('user canceled- transform NOT saved.');
return;
end
if(exist(outFile,'file'))
clear ni;
ni = niftiRead(outFile);
ni = niftiApplyCannonicalXform(ni);
else
ni.fname = outFile;
end
ni = niftiSetQto(ni,im2tal,true);
% NOTE: our data are always left-handed (ie. 'neurological;
% unflipped; left-is-left). So, we force the qfac to reflect
% that.
ni.qfac = 1;
writeFileNifti(ni);
disp('transform saved.');
return;
|
{"author": "vistalab", "repo": "vistasoft", "sha": "7f0102c696c091c858233340cc7e1ab02f064d4c", "save_path": "github-repos/MATLAB/vistalab-vistasoft", "path": "github-repos/MATLAB/vistalab-vistasoft/vistasoft-7f0102c696c091c858233340cc7e1ab02f064d4c/mrAnatomy/VolumeUtilities/mrAnatSetNiftiXform.m"}
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# # # # # # # # # # # # # # # # # # # # # # # #
# @Author: ZhuangYuZhou
# @E-mail: 605540375@qq.com
# @Time: 22-4-20
# @Desc:
# # # # # # # # # # # # # # # # # # # # # # # #
import torch
import numpy as np
import math
import torch.nn.functional as F
def get_order_value_list(start_idx,input_volume_axis, crop_shape_axi, extraction_step_axi):
start_idx_list = [start_idx]
while start_idx < input_volume_axis - crop_shape_axi:
start_idx += extraction_step_axi
if start_idx > input_volume_axis - crop_shape_axi:
start_idx = input_volume_axis - crop_shape_axi
start_idx_list.append(start_idx)
break
start_idx_list.append(start_idx)
return start_idx_list
def get_order_crop_list(volume_shape,crop_shape,extraction_step):
"""
:param volume_shape: e.g.(155,240,240)
:param crop_shape: e.g.(128,128,128)
:param extraction_step: e.g.(128,128,128)
:return:
"""
assert volume_shape[0] >= crop_shape[0], "crop size is too big"
assert volume_shape[1] >= crop_shape[1], "crop size is too big"
assert volume_shape[2] >= crop_shape[2], "crop size is too big"
crop_z_list = get_order_value_list(start_idx=0,input_volume_axis=volume_shape[0],crop_shape_axi=crop_shape[0],extraction_step_axi=extraction_step[0])
crop_y_list = get_order_value_list(start_idx=0,input_volume_axis=volume_shape[1],crop_shape_axi=crop_shape[1],extraction_step_axi=extraction_step[1])
crop_x_list =get_order_value_list(start_idx=0,input_volume_axis=volume_shape[2],crop_shape_axi=crop_shape[2],extraction_step_axi=extraction_step[2])
crop_list = []
for current_crop_z_value in crop_z_list:
for current_crop_y_value in crop_y_list:
for current_crop_x_value in crop_x_list:
crop_list.append((current_crop_z_value,current_crop_y_value,current_crop_x_value))
return crop_list
|
{"hexsha": "ffe17a1ce5c6d65390e87c7d576cc718aef5de25", "size": 1953, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/dataloader/medical_loader_utils.py", "max_stars_repo_name": "Healingl/3DAPRNet", "max_stars_repo_head_hexsha": "7c5e0028ae844df4e1f26327e8b438532ca0745f", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/dataloader/medical_loader_utils.py", "max_issues_repo_name": "Healingl/3DAPRNet", "max_issues_repo_head_hexsha": "7c5e0028ae844df4e1f26327e8b438532ca0745f", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/dataloader/medical_loader_utils.py", "max_forks_repo_name": "Healingl/3DAPRNet", "max_forks_repo_head_hexsha": "7c5e0028ae844df4e1f26327e8b438532ca0745f", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.8571428571, "max_line_length": 153, "alphanum_fraction": 0.7004608295, "include": true, "reason": "import numpy", "num_tokens": 545}
|
import re
import numpy as np
from sqlalchemy import create_engine
def check_input_data(data):
"""
This function is used to check if input data are accepted or not
Args:
data: input data
Returns: True or false
"""
if 'uri' in data.keys() and 'type' in data.keys() and 'part' in data.keys() and 'index' in data.keys():
return True
else:
return False
def rdbms_check_if_uri_is_valid(input_uri, part):
"""
This function test if the provided connection URI is valid
Args:
input_uri: input uri
part: provided table
Returns: True/False
"""
try:
engine = create_engine(input_uri)
table_names = engine.table_names()
if part in table_names:
return True
else:
return False
except Exception as ex:
return False
def map_dtype_to_elk_type(df_type):
"""
This function is used to map data frame types to elastic search types
Args:
df_type: provided type
Returns: elastic search data type
"""
if df_type == np.int64:
return_type = {'type': 'integer'}
elif df_type == np.float64:
return_type = {'type': 'float'}
elif df_type == np.object:
return_type = {'type': 'text'}
elif df_type == np.bool:
return_type = {'type': 'boolean'}
elif df_type == np.datetime:
return_type = {'type': 'date'}
return return_type
def df_lookup(data_frame):
"""
This function is used to find data frame types
Args:
data_frame: provided data frame
Returns: processed data frame
"""
data_frame_types = data_frame.dtypes
type_items = data_frame_types.items()
transformed_types = dict(map(lambda element: (element[0], map_dtype_to_elk_type(element[1])), type_items))
return transformed_types
def replace_nan_in_files(data_frame):
"""
This function is used to remove NaN in provided files
Args:
data_frame: data frame
Returns: dataframe without NaN
"""
df_without_nan = data_frame.replace(np.nan, '', regex=True)
return df_without_nan
def split_camel_case(input_string):
"""
This function is used to transform camel case words to more words
Args:
input_string: camel case string
Returns: Extracted words from camel case
"""
splitted = re.sub('([A-Z][a-z]+)', r' \1', re.sub('([A-Z]+)', r' \1', input_string)).split()
joined_string = " ".join(splitted)
return joined_string
|
{"hexsha": "7e84eea5626ed06368227c1c981da9d3227fa9cc", "size": 2530, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "QualiChain/analyzer", "max_stars_repo_head_hexsha": "e854479af374bc15823e7e930564b3ec88b83096", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils.py", "max_issues_repo_name": "QualiChain/analyzer", "max_issues_repo_head_hexsha": "e854479af374bc15823e7e930564b3ec88b83096", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils.py", "max_forks_repo_name": "QualiChain/analyzer", "max_forks_repo_head_hexsha": "e854479af374bc15823e7e930564b3ec88b83096", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-11T20:40:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-11T20:40:26.000Z", "avg_line_length": 22.7927927928, "max_line_length": 110, "alphanum_fraction": 0.6339920949, "include": true, "reason": "import numpy", "num_tokens": 595}
|
from sympy.parsing.sympy_parser import parse_expr as parse
from sympy.parsing.sympy_parser import standard_transformations,implicit_multiplication_application
from sympy import latex
from strg import hasq
from sympy import sympify
def gotec(q,mode="equation"):
#def hasq(q,key):#returns if q has (<key> or <key></key>),q without it
shallhide,q=hasq(q,"h")
shallhide2,q=hasq(q,"hide")
modus,q=hasq(q,"empty")
if modus:mode="plain"
if shallhide or shallhide2:
mode="equation*"
trafo=(standard_transformations+(implicit_multiplication_application,))
#p=parse(q,evaluate=0)
#p=parse(q,transformations=trafo,evaluate=False)
#print("parsing",q)
#exit()
p=sympify(q,evaluate=False)
#print("parsed",p)
return latex(p,mul_symbol="dot",mode=mode)
def gostec(q):
return gotec(q,mode="inline")
|
{"hexsha": "9ad4caed603b28d79e3d58a2d66f60ce68583213", "size": 822, "ext": "py", "lang": "Python", "max_stars_repo_path": "formula.py", "max_stars_repo_name": "psorus/xtech", "max_stars_repo_head_hexsha": "da856b3254e6885353636b1e3b163d5562c502b3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "formula.py", "max_issues_repo_name": "psorus/xtech", "max_issues_repo_head_hexsha": "da856b3254e6885353636b1e3b163d5562c502b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "formula.py", "max_forks_repo_name": "psorus/xtech", "max_forks_repo_head_hexsha": "da856b3254e6885353636b1e3b163d5562c502b3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6875, "max_line_length": 99, "alphanum_fraction": 0.7408759124, "include": true, "reason": "from sympy", "num_tokens": 250}
|
# coding=utf-8
# main codes, call functions at stokes_flow.py
# Assuming u=u1+u2, u1 is velocity filed due to a stokeslet. u2=-u1 at boundary of a pip.
# Thus, u==0, no-slip boundary condition at the pip.
# Zhang Ji, 20170320
import sys
from typing import Any, Union
import petsc4py
petsc4py.init(sys.argv)
import numpy as np
from src import stokes_flow as sf
from src.stokes_flow import problem_dic, obj_dic
from petsc4py import PETSc
from src.geo import *
from time import time
from scipy.io import savemat
import pickle
def save_vtk(problem: sf.StokesFlowProblem):
t0 = time()
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
problem_kwargs = problem.get_kwargs()
fileHandle = problem_kwargs['fileHandle']
tunnel_radius = problem_kwargs['tunnel_radius']
length = problem_kwargs['length']
n_tunnel_check = problem_kwargs['n_tunnel_check']
problem.vtk_obj(fileHandle)
problem.vtk_velocity('%s_Velocity' % fileHandle)
obj_check = sf.StokesFlowObj()
tunnel_geo_check = tunnel_geo() # pf, force geo
tunnel_geo_check.create_n(n_tunnel_check, length / 3, tunnel_radius)
tunnel_geo_check.set_rigid_velocity(np.array((0, 0, 0, 0, 0, 0)))
obj_check.set_data(tunnel_geo_check, tunnel_geo_check)
problem.vtk_check(fileHandle + '_Check_tunnel', obj_check)
t1 = time()
PETSc.Sys.Print('%s: write vtk files use: %fs' % (str(problem), (t1 - t0)))
return True
def get_problem_kwargs(**main_kwargs):
OptDB = PETSc.Options()
length = OptDB.getReal('l', 3)
n_tunnel_parts = OptDB.getInt('tunnel_parts', 1)
tunnel_radius = OptDB.getReal('tunnel_radius', 1)
deltaLength = OptDB.getReal('d', 0.3)
epsilon = OptDB.getReal('e', 1)
fx = OptDB.getReal('fx', 1)
fy = OptDB.getReal('fy', 0)
fz = OptDB.getReal('fz', 0)
stokeslets_f = np.array((fx, fy, fz))
stokeslets_b = OptDB.getReal('stokeslets_b', 0)
fileHandle = OptDB.getString('f', 'stokeletInPipe')
solve_method = OptDB.getString('s', 'gmres')
precondition_method = OptDB.getString('g', 'none')
plot = OptDB.getBool('plot', False)
debug_mode = OptDB.getBool('debug', False)
matrix_method = OptDB.getString('sm', 'rs_stokeslets')
restart = OptDB.getBool('restart', False)
grid_para = OptDB.getInt('ngrid', 20)
twoPara_n = OptDB.getInt('tp_n', 1)
legendre_m = OptDB.getInt('legendre_m', 3)
legendre_k = OptDB.getInt('legendre_k', 2)
n_tunnel_check = OptDB.getInt('n_tunnel_check', 30000)
n_node_threshold = OptDB.getInt('n_threshold', 10000)
getConvergenceHistory = OptDB.getBool('getConvergenceHistory', False)
pickProblem = OptDB.getBool('pickProblem', False)
xRange1 = OptDB.getReal('xRange1', 0)
xRange2 = OptDB.getReal('xRange2', 1)
xfactor = OptDB.getReal('xfactor', 1)
stokeslets_post = np.hstack((0, stokeslets_b, 0))
# field_range: describe a sector area.
region_type = 'sector'
theta = np.pi * 2
field_range = np.array([[xRange1, 0, -theta / 2 + np.pi / 2], [xRange2, tunnel_radius, theta / 2 + np.pi / 2]])
# field_range = np.array([[0, 0, -theta / 2 + np.pi / 2], [length / 20, tunnel_radius, theta / 2 + np.pi / 2]])
temp = np.abs(field_range[0] - field_range[1]) * (xfactor, 1, tunnel_radius / 2) * grid_para
n_grid = np.array(temp, dtype='int')
problem_kwargs = {
'name': 'stokeletInPipePrb',
'matrix_method': matrix_method,
'tunnel_radius': tunnel_radius,
'length': length,
'deltaLength': deltaLength,
'epsilon': epsilon,
'delta': deltaLength * epsilon, # for rs method
'd_radia': deltaLength / 2, # for sf method
'solve_method': solve_method,
'precondition_method': precondition_method,
'field_range': field_range,
'n_grid': n_grid,
'plot': plot,
'debug_mode': debug_mode,
'fileHandle': fileHandle,
'region_type': region_type,
'twoPara_n': twoPara_n,
'legendre_m': legendre_m,
'legendre_k': legendre_k,
'stokeslets_f': stokeslets_f,
'stokeslets_b': stokeslets_b,
'stokeslets_post': stokeslets_post,
'n_tunnel_parts': n_tunnel_parts,
'restart': restart,
'n_tunnel_check': n_tunnel_check,
'n_node_threshold': n_node_threshold,
'getConvergenceHistory': getConvergenceHistory,
'pickProblem': pickProblem
}
for key in main_kwargs:
problem_kwargs[key] = main_kwargs[key]
return problem_kwargs
def print_case_info(**problem_kwargs):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
size = comm.Get_size()
fileHandle = problem_kwargs['fileHandle']
tunnel_radius = problem_kwargs['tunnel_radius']
deltaLength = problem_kwargs['deltaLength']
matrix_method = problem_kwargs['matrix_method']
stokeslets_f = problem_kwargs['stokeslets_f']
length = problem_kwargs['length']
stokeslets_b = problem_kwargs['stokeslets_b']
PETSc.Sys.Print('tunnel length: %f, tunnel radius: %f, delta length: %f, velocity: %f'
% (length, tunnel_radius, deltaLength, 0))
PETSc.Sys.Print('stokeslets: %s, stokeslets position: %f' % (str(stokeslets_f), stokeslets_b))
err_msg = "Only 'rs_stokeslets', 'tp_rs_stokeslets', 'lg_rs_stokeslets', and 'ps_stokeslets' methods are accept for this main code. "
acceptType = ('rs_stokeslets', 'tp_rs_stokeslets', 'lg_rs_stokeslets', 'rs_stokeslets_precondition',
'tp_rs_stokeslets_precondition', 'lg_rs_stokeslets_precondition',
'pf')
assert matrix_method in acceptType, err_msg
epsilon = problem_kwargs['epsilon']
if matrix_method in ('rs_stokeslets', 'rs_stokeslets_precondition'):
PETSc.Sys.Print('create matrix method: %s, epsilon: %f'
% (matrix_method, epsilon))
elif matrix_method in ('tp_rs_stokeslets', 'tp_rs_stokeslets_precondition'):
twoPara_n = problem_kwargs['twoPara_n']
PETSc.Sys.Print('create matrix method: %s, epsilon: %f, order: %d'
% (matrix_method, epsilon, twoPara_n))
elif matrix_method in ('lg_rs_stokeslets', 'lg_rs_stokeslets_precondition'):
legendre_m = problem_kwargs['legendre_m']
legendre_k = problem_kwargs['legendre_k']
PETSc.Sys.Print('create matrix method: %s, epsilon: %f, m: %d, k: %d, p: %d'
% (matrix_method, epsilon, legendre_m, legendre_k, (legendre_m + 2 * legendre_k + 1)))
elif matrix_method in 'pf':
PETSc.Sys.Print('create matrix method: %s, epsilon: %f' % (matrix_method, epsilon))
else:
raise Exception('set how to print matrix method please. ')
solve_method = problem_kwargs['solve_method']
precondition_method = problem_kwargs['precondition_method']
PETSc.Sys.Print('solve method: %s, precondition method: %s'
% (solve_method, precondition_method))
PETSc.Sys.Print('output file headle: ' + fileHandle)
PETSc.Sys.Print('MPI size: %d' % size)
# @profile
def main_fun(**main_kwargs):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
problem_kwargs = get_problem_kwargs(**main_kwargs)
fileHandle = problem_kwargs['fileHandle']
stokeslets_post = problem_kwargs['stokeslets_post']
stokeslets_f = problem_kwargs['stokeslets_f']
tunnel_radius = problem_kwargs['tunnel_radius']
if not problem_kwargs['restart']:
tunnel_radius = problem_kwargs['tunnel_radius']
deltaLength = problem_kwargs['deltaLength']
matrix_method = problem_kwargs['matrix_method']
length = problem_kwargs['length']
n_tunnel_parts = problem_kwargs['n_tunnel_parts']
print_case_info(**problem_kwargs)
problem = problem_dic[matrix_method](**problem_kwargs)
if problem_kwargs['pickProblem']:
# do NOT save anything really, just check if the path is correct, to avoid this error after long time calculation.
problem.pickmyself(fileHandle, ifcheck=True)
# The tunnel is divided into n objects having a similar length.
tunnel_geo_u = stokeslets_tunnel_geo()
part_length = length / n_tunnel_parts - (1 - 1 / n_tunnel_parts) * deltaLength
err_msg = 'length of each part of object >= deltaLength.'
assert part_length > deltaLength, err_msg
tunnel_geo_u.create_deltaz(deltaLength, part_length, tunnel_radius)
tunnel_geo.node_rotation(norm=np.array((0, 1, 0)), theta=np.pi/2)
move_dist = np.array([-(length - part_length) / 2, 0, 0])
tunnel_geo_u.move(move_dist)
tunnel_geo_u.stokeslets_velocity(problem)
tunnel_geo_f = tunnel_geo_u.copy()
if matrix_method in sf.two_geo_method_list:
epsilon = problem_kwargs['epsilon']
tunnel_geo_f.node_zoom_radius((tunnel_radius + deltaLength * epsilon) / tunnel_radius)
obj_tunnel = obj_dic[matrix_method]()
obj_tunnel_kwargs = {'name': 'tunnelObj_0',
'stokeslets_post': stokeslets_post,
'stokeslets_f': stokeslets_f}
obj_tunnel.set_data(tunnel_geo_f, tunnel_geo_u, **obj_tunnel_kwargs)
problem.add_obj(obj_tunnel)
for i in np.arange(1, n_tunnel_parts):
move_dist = np.array([part_length + deltaLength, 0, 0]) * i
obj2 = obj_tunnel.copy()
obj2.move(move_dist)
tunnel_geo_u = obj2.get_u_geo()
tunnel_geo_f = obj2.get_f_geo()
tunnel_geo_u.stokeslets_velocity(problem)
obj2_kwargs = {'name': 'tunnelObj_%d' % (i)}
obj2.set_data(tunnel_geo_f, tunnel_geo_u, **obj2_kwargs)
problem.add_obj(obj2)
problem.print_info()
problem.create_matrix()
residualNorm = problem.solve()
save_vtk(problem)
if problem_kwargs['pickProblem']:
problem.pickmyself(fileHandle)
else:
with open(fileHandle + '_pick.bin', 'rb') as input:
unpick = pickle.Unpickler(input)
problem = unpick.load()
problem.unpick_myself()
residualNorm = problem.get_residualNorm()
PETSc.Sys.Print('---->>>unpick the problem from file %s.pickle' % (fileHandle))
problem_kwargs = get_problem_kwargs(**main_kwargs)
problem.set_kwargs(**problem_kwargs)
save_vtk(problem)
return problem, residualNorm
# @profile
def view_matrix(m, **kwargs):
args = {
'vmin': None,
'vmax': None,
'title': ' ',
'cmap': None
}
for key, value in args.items():
if key in kwargs:
args[key] = kwargs[key]
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
cax = ax.matshow(m,
origin='lower',
vmin=args['vmin'],
vmax=args['vmax'],
cmap=plt.get_cmap(args['cmap']))
fig.colorbar(cax)
plt.title(args['title'])
plt.show()
def casebank():
ksp_rtol = 1e-6
n_tunnel_check = 30000
pickProblem = 'True'
xRange1 = -0.5
xRange2 = 0.5
ngrid = 20
xfactor = 10
ksp_max_it = 1e4
# stokeslets_list = ((0, 1, 0, 0), # (b, fx, fy, fz)
# (0, 0, 0, 0),
# (0.5, 1, 0, 0),
# (0.5, 0, 1, 0),
# (0.5, 0, 0, 1))
# fileHeadle1_list = ('b0_100',
# 'b0_010',
# 'b05_100',
# 'b05_010',
# 'b05_001')
stokeslets_list = ((0.2, 1, 0, 0), # (b, fx, fy, fz)
(0.2, 0, 1, 0),
(0.2, 0, 0, 1),
(0.9, 1, 0, 0),
(0.9, 0, 1, 0),
(0.9, 0, 0, 1),)
fileHeadle1_list = ('b02_100',
'b02_010',
'b02_001',
'b09_100',
'b09_010',
'b09_001',)
length_list = (70,)
fileHeadle2_list = ['_l%d' % l for l in length_list]
# sm_e_d_tube = (('rs_stokeslets', 0.25, 0.15),
# ('rs_stokeslets', 0.3, 0.15),
# ('lg_rs_stokeslets', 1, 0.15),
# ('lg_rs_stokeslets', 3, 0.15),
# ('lg_rs_stokeslets', 6, 0.15),
# ('pf', 1, 0.2),
# ('pf', 1.5, 0.2),
# ('pf', 2, 0.2))
# fileHeadle3_list = ('_rs_e025',
# '_rs_e03',
# '_lg_e1',
# '_lg_e3',
# '_lg_e6',
# '_pf_e1',
# '_pf_e1.5',
# '_pf_e2',)
sm_e_d_tube = (('lg_rs_stokeslets', 6, 0.15),
('pf', 2, 0.15))
fileHeadle3_list = ('_lg_e6',
'_pf_e2',)
for i0 in range(len(fileHeadle1_list)):
fileHeadle1 = fileHeadle1_list[i0]
b, fx, fy, fz = stokeslets_list[i0]
for i1 in range(len(fileHeadle2_list)):
fileHeadle2 = fileHeadle2_list[i1]
l = length_list[i1]
for i2 in range(len(fileHeadle3_list)):
fileHeadle3 = fileHeadle3_list[i2]
sm, e, d = sm_e_d_tube[i2]
fileHandle = fileHeadle1 + fileHeadle2 + fileHeadle3
kwargs = '-l %f -d %f -e %f -fx %f -fy %f -fz %f -stokeslets_b %f -sm %s -f %s ' \
'-n_tunnel_check %d -pickProblem %s -ksp_rtol %f -ksp_max_it %d ' \
'-xRange1 %f -xRange2 %f -ngrid %d -xfactor %d ' % \
(l, d, e, fx, fy, fz, b, sm, fileHandle,
n_tunnel_check, pickProblem, ksp_rtol, ksp_max_it,
xRange1, xRange2, ngrid, xfactor)
PETSc.Sys.Print('echo \'-------------------------------------------->>>>>>%s\'; '
'mpirun -n 24 python ../../StokesletInPipe.py %s > %s.txt' %
(fileHandle, kwargs, fileHandle))
# fileHeadle1_list = ('b0_100',
# 'b0_010',
# 'b05_100',
# 'b05_010',
# 'b05_001')
# length_list = (50, 60, 70)
# fileHeadle2_list = ['_l%d' % l for l in length_list]
# fileHeadle3_list = ('_rs_e025',
# '_rs_e03',
# '_lg_e1',
# '_lg_e3',
# '_lg_e6',
# '_pf_e1',
# '_pf_e1.5',
# '_pf_e2',)
# for i0 in range(len(fileHeadle1_list)):
# fileHeadle1 = fileHeadle1_list[i0]
# for i1 in range(len(fileHeadle2_list)):
# fileHeadle2 = fileHeadle2_list[i1]
# for i2 in range(len(fileHeadle3_list)):
# fileHeadle3 = fileHeadle3_list[i2]
# fileHandle = fileHeadle1 + fileHeadle2 + fileHeadle3
# PETSc.Sys.Print(fileHandle)
if __name__ == '__main__':
# casebank()
main_fun()
|
{"hexsha": "cc1951cb375402c392ac7b9ff3be76a5a40e84b2", "size": 15768, "ext": "py", "lang": "Python", "max_stars_repo_path": "sphereInPipe/StokesletInPipe.py", "max_stars_repo_name": "pcmagic/stokes_flow", "max_stars_repo_head_hexsha": "464d512d3739eee77b33d1ebf2f27dae6cfa0423", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-11-11T05:00:53.000Z", "max_stars_repo_stars_event_max_datetime": "2018-11-11T05:00:53.000Z", "max_issues_repo_path": "sphereInPipe/StokesletInPipe.py", "max_issues_repo_name": "pcmagic/stokes_flow", "max_issues_repo_head_hexsha": "464d512d3739eee77b33d1ebf2f27dae6cfa0423", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sphereInPipe/StokesletInPipe.py", "max_forks_repo_name": "pcmagic/stokes_flow", "max_forks_repo_head_hexsha": "464d512d3739eee77b33d1ebf2f27dae6cfa0423", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.048, "max_line_length": 137, "alphanum_fraction": 0.5610096398, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4225}
|
import mapchete
from mapchete.errors import MapcheteConfigError
from mapchete.formats import available_output_formats
import numpy as np
import pytest
import xarray as xr
import dateutil
import json
from mapchete.testing import get_process_mp
def test_format_available():
assert "xarray" in available_output_formats()
def test_write_read_output(example_config):
with mapchete.open(example_config.dict) as mp:
data_tile = next(mp.get_process_tiles(5))
# basic functions
empty_xarr = mp.config.output.empty(data_tile)
assert isinstance(empty_xarr, xr.DataArray)
assert mp.config.output.get_path(data_tile)
# check if tile exists
assert not mp.config.output.tiles_exist(data_tile)
# write
mp.batch_process(tile=data_tile.id)
# check if tile exists
assert mp.config.output.tiles_exist(data_tile)
# check if output_tile exists
assert mp.config.output.tiles_exist(output_tile=data_tile)
# read again, this time with data
xarr = mp.config.output.read(data_tile)
assert isinstance(xarr, xr.DataArray)
assert xarr.data.all()
assert not set(("time", "bands", "x", "y")).difference(set(xarr.dims))
# handle empty data
process_tile = next(mp.get_process_tiles(6))
mp.config.output.write(process_tile, mp.config.output.empty(process_tile))
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
xarr = mp.config.output.read(process_tile)
assert isinstance(xarr, xr.DataArray)
assert not xarr.data.any()
# write nodata array
process_tile = next(mp.get_process_tiles(7))
mp.config.output.write(process_tile, xr.DataArray(np.zeros(process_tile.shape)))
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
xarr = mp.config.output.read(process_tile)
assert isinstance(xarr, xr.DataArray)
assert not xarr.data.any()
def test_read_from_tile_directory(xarray_tiledir_input_mapchete, written_output):
# read from xarray tile directory output
with mapchete.open(
dict(
xarray_tiledir_input_mapchete.dict,
input=dict(xarray_output=written_output.dict["output"]["path"]),
)
) as mp:
data_tile = mp.config.process_pyramid.tile(5, 0, 0)
tile = mp.config.process_pyramid.tile(5, 0, 0)
user_process = mapchete.MapcheteProcess(
tile=tile,
params=mp.config.params_at_zoom(tile.zoom),
input=mp.config.get_inputs_for_tile(tile),
)
xarr_tile = user_process.open("xarray_output")
assert not xarr_tile.is_empty()
xarr = xarr_tile.read()
assert isinstance(xarr, xr.DataArray)
assert xarr.data.all()
assert ("time", "bands", "x", "y") == xarr.dims
assert xarr.data.shape[-2:] == data_tile.shape
# raise error if process metatiling is bigger than output metatiling
with mapchete.open(
dict(
xarray_tiledir_input_mapchete.dict,
input=dict(xarray_output=written_output.dict["output"]["path"]),
pyramid=dict(xarray_tiledir_input_mapchete.dict["pyramid"], metatiling=4),
)
) as mp:
with pytest.raises(MapcheteConfigError):
tile = mp.config.process_pyramid.tile(5, 0, 0)
user_process = (
mapchete.MapcheteProcess(
tile=tile,
params=mp.config.params_at_zoom(tile.zoom),
input=mp.config.get_inputs_for_tile(tile),
)
.open("xarray_output")
.read()
)
def test_tile_directory_grid_error(xarray_tiledir_input_mapchete, written_output):
# raise error if tile pyramid grid differs
with mapchete.open(
dict(
xarray_tiledir_input_mapchete.dict,
input=dict(xarray_output=written_output.dict["output"]["path"]),
pyramid=dict(grid="mercator"),
)
) as mp:
with pytest.raises(MapcheteConfigError):
tile = mp.config.process_pyramid.tile(5, 0, 0)
mapchete.MapcheteProcess(
tile=tile,
params=mp.config.params_at_zoom(tile.zoom),
input=mp.config.get_inputs_for_tile(tile),
).open("xarray_output").read()
def test_read_from_mapchete_output(xarray_mapchete_input_mapchete, written_output):
# read from xarray tile directory output
with mapchete.open(
dict(
xarray_mapchete_input_mapchete.dict,
input=dict(xarray_output=written_output.dict["output"]["path"]),
)
) as mp:
tile = mp.config.process_pyramid.tile(5, 0, 0)
user_process = mapchete.MapcheteProcess(
tile=tile,
params=mp.config.params_at_zoom(tile.zoom),
input=mp.config.get_inputs_for_tile(tile),
)
xarr_tile = user_process.open("xarray_output")
assert not xarr_tile.is_empty()
xarr = xarr_tile.read()
assert isinstance(xarr, xr.DataArray)
assert xarr.data.all()
assert ("time", "bands", "x", "y") == xarr.dims
assert xarr.data.shape[-2:] == tile.shape
# raise error if process metatiling is bigger than output metatiling
with mapchete.open(
dict(
xarray_mapchete_input_mapchete.dict,
input=dict(xarray_output=written_output.dict["output"]["path"]),
pyramid=dict(xarray_mapchete_input_mapchete.dict["pyramid"], metatiling=4),
)
) as mp:
with pytest.raises(MapcheteConfigError):
tile = mp.config.process_pyramid.tile(5, 0, 0)
user_process = (
mapchete.MapcheteProcess(
tile=tile,
params=mp.config.params_at_zoom(tile.zoom),
input=mp.config.get_inputs_for_tile(tile),
)
.open("xarray_output")
.read()
)
def test_write_read_remote_netcdf_output(example_config, mp_s3_tmpdir):
with mapchete.open(
dict(
example_config.dict,
output=dict(example_config.dict["output"], path=mp_s3_tmpdir),
)
) as mp:
data_tile = next(mp.get_process_tiles(5))
# basic functions
empty_xarr = mp.config.output.empty(data_tile)
assert isinstance(empty_xarr, xr.DataArray)
assert mp.config.output.get_path(data_tile).endswith(".nc")
# check if tile exists
assert not mp.config.output.tiles_exist(data_tile)
# write
mp.batch_process(tile=data_tile.id)
# check if tile exists
assert mp.config.output.tiles_exist(data_tile)
# read again, this time with data
xarr = mp.config.output.read(data_tile)
assert isinstance(xarr, xr.DataArray)
assert xarr.data.all()
assert not set(("time", "bands", "x", "y")).difference(set(xarr.dims))
# handle empty data
process_tile = next(mp.get_process_tiles(6))
mp.config.output.write(process_tile, mp.config.output.empty(process_tile))
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
xarr = mp.config.output.read(process_tile)
assert isinstance(xarr, xr.DataArray)
assert not xarr.data.any()
# write nodata array
process_tile = next(mp.get_process_tiles(7))
mp.config.output.write(process_tile, xr.DataArray(np.zeros(process_tile.shape)))
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
xarr = mp.config.output.read(process_tile)
assert isinstance(xarr, xr.DataArray)
assert not xarr.data.any()
def test_write_read_zarr_output(zarr_mapchete):
with mapchete.open(zarr_mapchete.dict) as mp:
data_tile = next(mp.get_process_tiles(5))
# basic functions
empty_xarr = mp.config.output.empty(data_tile)
assert isinstance(empty_xarr, xr.DataArray)
assert mp.config.output.get_path(data_tile).endswith(".zarr")
# check if process_tile exists
assert not mp.config.output.tiles_exist(data_tile)
# check if output_tile exists
assert not mp.config.output.tiles_exist(output_tile=data_tile)
# write
mp.batch_process(tile=data_tile.id)
# check if tile exists
assert mp.config.output.tiles_exist(data_tile)
# read again, this time with data
xarr = mp.config.output.read(data_tile)
assert isinstance(xarr, xr.DataArray)
assert xarr.data.all()
assert not set(("time", "bands", "x", "y")).difference(set(xarr.dims))
# handle empty data
process_tile = next(mp.get_process_tiles(6))
mp.config.output.write(process_tile, mp.config.output.empty(process_tile))
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
xarr = mp.config.output.read(process_tile)
assert isinstance(xarr, xr.DataArray)
assert not xarr.data.any()
# write nodata array
process_tile = next(mp.get_process_tiles(7))
mp.config.output.write(process_tile, xr.DataArray(np.zeros(process_tile.shape)))
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
xarr = mp.config.output.read(process_tile)
assert isinstance(xarr, xr.DataArray)
assert not xarr.data.any()
def test_write_read_remote_zarr_output(zarr_mapchete, mp_s3_tmpdir):
with mapchete.open(
dict(
zarr_mapchete.dict,
output=dict(zarr_mapchete.dict["output"], path=mp_s3_tmpdir),
)
) as mp:
data_tile = next(mp.get_process_tiles(5))
# basic functions
empty_xarr = mp.config.output.empty(data_tile)
assert isinstance(empty_xarr, xr.DataArray)
assert mp.config.output.get_path(data_tile).endswith(".zarr")
# check if tile exists
assert not mp.config.output.tiles_exist(data_tile)
# write
mp.batch_process(tile=data_tile.id)
# check if tile exists
assert mp.config.output.tiles_exist(data_tile)
# read again, this time with data
xarr = mp.config.output.read(data_tile)
assert isinstance(xarr, xr.DataArray)
assert xarr.data.all()
assert not set(("time", "bands", "x", "y")).difference(set(xarr.dims))
# handle empty data
process_tile = next(mp.get_process_tiles(6))
mp.config.output.write(process_tile, mp.config.output.empty(process_tile))
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
xarr = mp.config.output.read(process_tile)
assert isinstance(xarr, xr.DataArray)
assert not xarr.data.any()
# write nodata array
process_tile = next(mp.get_process_tiles(7))
mp.config.output.write(process_tile, xr.DataArray(np.zeros(process_tile.shape)))
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
xarr = mp.config.output.read(process_tile)
assert isinstance(xarr, xr.DataArray)
assert not xarr.data.any()
def test_errors(zarr_mapchete):
with mapchete.open(zarr_mapchete.dict) as mp:
data_tile = next(mp.get_process_tiles(5))
with pytest.raises(ValueError):
mp.config.output.tiles_exist(process_tile=data_tile, output_tile=data_tile)
with pytest.raises(ValueError):
mapchete.open(
dict(
zarr_mapchete.dict,
output=dict(zarr_mapchete.dict["output"], storage="invalid"),
)
)
def test_input_data(written_output):
mp = get_process_mp(
input=dict(xarray=written_output.dict["output"]["path"]),
tile=written_output.first_process_tile(),
metatiling=2,
)
xarr = mp.open("xarray")
assert xarr.is_empty()
assert isinstance(xarr.read(), xr.DataArray)
def test_single_zarr(zarr_single_mapchete):
mp = zarr_single_mapchete.mp()
data_tile = zarr_single_mapchete.first_process_tile()
# basic functions
for empty_xarr in mp.config.output.empty(data_tile):
assert isinstance(empty_xarr, xr.DataArray)
assert mp.config.output.get_path(data_tile)
# check if tile exists
assert not mp.config.output.tiles_exist(data_tile)
# write
mp.batch_process(tile=data_tile.id)
# check if tile exists
assert mp.config.output.tiles_exist(data_tile)
# check if output_tile exists
assert mp.config.output.tiles_exist(output_tile=data_tile)
with open(mp.config.output.path + "/.zmetadata") as f:
zmetadata = json.load(f)
# read again, this time with data
for xarr in mp.config.output.read(data_tile):
assert isinstance(xarr, xr.DataArray)
assert xarr.data.all()
assert not set(("X", "Y")).difference(set(xarr.dims))
zattrs = zmetadata["metadata"].get(f"{xarr.name}/.zattrs")
for attr in ["AREA_OR_POINT", "_ARRAY_DIMENSIONS", "_CRS"]:
assert attr in zattrs
def test_single_zarr_empty(zarr_single_mapchete):
mp = zarr_single_mapchete.mp()
# handle empty data
process_tile = zarr_single_mapchete.first_process_tile()
mp.config.output.write(process_tile, mp.config.output.empty(process_tile))
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
for xarr in mp.config.output.read(process_tile):
assert isinstance(xarr, xr.DataArray)
assert not xarr.data.any()
# write empty DataArray
mp.config.output.write(
process_tile, xr.DataArray(np.zeros((3, *process_tile.shape)))
)
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
for xarr in mp.config.output.read(process_tile):
assert isinstance(xarr, xr.DataArray)
assert not xarr.data.any()
def test_single_zarr_smaller_metatiling(zarr_single_mapchete):
pass
# zarr_single_mapchete.dict["pyramid"]["metatiling"] = 2
# zarr_single_mapchete._mp = None
#
# mp = zarr_single_mapchete.mp()
# data_tile = zarr_single_mapchete.first_process_tile()
# mp.batch_process(tile=data_tile.id)
#
# zarr_single_mapchete.dict["pyramid"]["metatiling"] = 1
# zarr_single_mapchete._mp = None
#
# mp = zarr_single_mapchete.mp()
# data_tile_id = (data_tile.id[0], data_tile.id[1] + 3, data_tile.id[2] + 3)
#
# # write
# with pytest.raises(ValueError):
# mp.batch_process(tile=data_tile_id)
def test_single_zarr_s3(zarr_single_s3_mapchete):
mp = zarr_single_s3_mapchete.mp()
data_tile = zarr_single_s3_mapchete.first_process_tile()
# basic functions
for empty_xarr in mp.config.output.empty(data_tile):
assert isinstance(empty_xarr, xr.DataArray)
assert mp.config.output.get_path(data_tile)
# check if tile exists
assert not mp.config.output.tiles_exist(data_tile)
# write
mp.batch_process(tile=data_tile.id)
# check if tile exists
assert mp.config.output.tiles_exist(data_tile)
# check if output_tile exists
assert mp.config.output.tiles_exist(output_tile=data_tile)
# read again, this time with data
for xarr in mp.config.output.read(data_tile):
assert isinstance(xarr, xr.DataArray)
assert xarr.data.all()
assert not set(("X", "Y")).difference(set(xarr.dims))
def test_single_zarr_empty_s3(zarr_single_s3_mapchete):
mp = zarr_single_s3_mapchete.mp()
# handle empty data
process_tile = zarr_single_s3_mapchete.first_process_tile()
mp.config.output.write(process_tile, mp.config.output.empty(process_tile))
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
for xarr in mp.config.output.read(process_tile):
assert isinstance(xarr, xr.DataArray)
assert not xarr.data.any()
# write empty DataArray
mp.config.output.write(
process_tile, xr.DataArray(np.zeros((3, *process_tile.shape)))
)
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
for xarr in mp.config.output.read(process_tile):
assert isinstance(xarr, xr.DataArray)
assert not xarr.data.any()
def test_single_zarr_time(zarr_single_time_mapchete):
mp = zarr_single_time_mapchete.mp()
data_tile = zarr_single_time_mapchete.first_process_tile()
# basic functions
for empty_xarr in mp.config.output.empty(data_tile):
assert isinstance(empty_xarr, xr.DataArray)
assert mp.config.output.get_path(data_tile)
# check if tile exists
assert not mp.config.output.tiles_exist(data_tile)
# write
mp.batch_process(tile=data_tile.id)
# check if tile exists
assert mp.config.output.tiles_exist(data_tile)
# check if output_tile exists
assert mp.config.output.tiles_exist(output_tile=data_tile)
# read again, this time with data
for xarr in mp.config.output.read(data_tile):
assert isinstance(xarr, xr.DataArray)
assert xarr.data.any()
assert not set(("time", "X", "Y")).difference(set(xarr.dims))
def test_single_zarr_time_empty(zarr_single_time_mapchete):
mp = zarr_single_time_mapchete.mp()
# handle empty data
process_tile = zarr_single_time_mapchete.first_process_tile()
mp.config.output.write(process_tile, mp.config.output.empty(process_tile))
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
for xarr in mp.config.output.read(process_tile):
assert isinstance(xarr, xr.DataArray)
assert not xarr.data.any()
# write empty DataArray
# NOTE: this probably will fail because DataArray has no time coords
timestamps = [
dateutil.parser.parse(t)
for t in ["2022-03-01", "2022-03-04", "2022-03-07", "2022-03-09"]
]
mp.config.output.write(
process_tile,
xr.DataArray(
data=np.zeros((3, len(timestamps), *process_tile.shape)),
dims=["band", "time", "Y", "X"],
coords={"time": timestamps},
),
)
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
for xarr in mp.config.output.read(process_tile):
assert isinstance(xarr, xr.DataArray)
assert not xarr.data.any()
|
{"hexsha": "e9beec547509b32b7bbbfcecca5122473282d6d5", "size": 18590, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_format.py", "max_stars_repo_name": "ungarj/mapchete_xarray", "max_stars_repo_head_hexsha": "e42b2f8f38871c415ba12de0299d30f5618227c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_format.py", "max_issues_repo_name": "ungarj/mapchete_xarray", "max_issues_repo_head_hexsha": "e42b2f8f38871c415ba12de0299d30f5618227c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2019-05-08T08:12:29.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T12:39:02.000Z", "max_forks_repo_path": "tests/test_format.py", "max_forks_repo_name": "ungarj/mapchete_xarray", "max_forks_repo_head_hexsha": "e42b2f8f38871c415ba12de0299d30f5618227c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-25T13:50:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T13:50:31.000Z", "avg_line_length": 35.5449330784, "max_line_length": 88, "alphanum_fraction": 0.6592791824, "include": true, "reason": "import numpy", "num_tokens": 4266}
|
import hashlib
import math
import os
import random
from datetime import datetime, timedelta
from functools import reduce
import numpy as np
import pandas as pd
from faker import Faker
class GenerateData:
def __init__(self, num_users, num_txn, fraud_ratio, start_date, end_date):
self.num_users = num_users
self.num_txn = num_txn
self.faker = Faker()
self.seed = 123
random.seed(self.seed)
np.random.seed(self.seed)
self.faker.seed_locale("en_US", 0)
self.faker.seed_instance(self.seed)
self.srtf = "%Y-%m-%d %H:%M:%S"
self.start_date = start_date
self.end_date = end_date
self.distribution = {
0.05: (0.01, 1.01),
0.075: (1, 11.01),
0.525: (10, 100.01),
0.25: (100, 1000.01),
0.10: (1000, 10000.01),
}
self.fraud_ratio = fraud_ratio
self.chain_attack = sorted(random.sample(range(3, 11), 8))
self.data = []
def generate_cc_num(self):
cc_num = set()
for _ in range(self.num_users):
cc_id = self.faker.credit_card_number(card_type="visa")
cc_num.add(cc_id)
return list(cc_num)
def generate_timestamps(self):
start = datetime.strptime(self.start_date, self.srtf)
end = datetime.strptime(self.end_date, self.srtf)
timestamps = [
self.faker.date_time_between(
start_date=start, end_date=end, tzinfo=None
).strftime(self.srtf)
for _ in range(self.num_txn)
]
return timestamps
def generate_transaction_amount(self):
amt = []
for key, value in self.distribution.items():
start, end = value
n = int(self.num_txn * key)
for _ in range(n):
amt.append(round(np.random.uniform(start, end)))
random.shuffle(amt)
return amt
def generate_transaction_id(self, timestamp, cc_num, amt):
hashing = f"{timestamp}{cc_num}{amt}"
digest = hashlib.md5(hashing.encode("utf-8")).hexdigest()
return digest
def generate_transactions(self):
chain = self.create_fraud_chain_attack()
cc_num = self.generate_cc_num()
timestamps = self.generate_timestamps()
amts = self.generate_transaction_amount()
for timestamp, amt in zip(timestamps, amts):
cc = random.choice(cc_num)
txn = self.generate_transaction_id(timestamp, cc, amt)
self.data.append(
{"txn_id": txn, "cc_num": cc, "ts": timestamp, "amt": amt, "label": 0,}
)
self.inject_fraud_txn(chain)
transactions = pd.DataFrame(self.data)
return transactions
def chain_length(self, chain):
length = reduce(lambda count, l: count + len(l), chain, 0)
return length
def create_fraud_chain_attack(self):
num_attacks = int(self.fraud_ratio * self.num_txn)
chains = []
check_duplicates = set()
while self.chain_length(chains) < num_attacks:
idx = random.choice(range(self.num_txn))
chain_length = random.choice(self.chain_attack)
if idx not in check_duplicates:
freq = []
check_duplicates.add(idx)
freq.append(idx)
for i in range(1, chain_length):
if idx + i not in check_duplicates:
if self.chain_length(chains) == num_attacks:
break
freq.append(idx + i)
check_duplicates.add(idx + i)
chains.append(freq)
return chains
def generate_timestamps_for_fraud_attacks(self, timestamp, chain_length):
timestamps = []
timestamp = datetime.strptime(timestamp, self.srtf)
for _ in range(chain_length):
delta = random.randint(30, 120)
current = timestamp + timedelta(seconds=delta)
timestamps.append(current.strftime(self.srtf))
timestamp = current
return timestamps
def generate_amounts_for_fraud_attacks(self, chain_length):
amounts = []
for percentage, span in self.distribution.items():
n = math.ceil(chain_length * percentage)
start, end = span
for _ in range(n):
amounts.append(round(np.random.uniform(start, end + 1), 2))
return amounts[:chain_length]
def inject_fraud_txn(self, chain):
for idx in chain:
start_chain_id = idx.pop(0)
txn = self.data[start_chain_id]
ts = txn["ts"]
cc_num = txn["cc_num"]
amt = txn["amt"]
txn["label"] = 1
inject_ts = self.generate_timestamps_for_fraud_attacks(ts, len(idx))
inject_amt = self.generate_amounts_for_fraud_attacks(len(idx))
random.shuffle(inject_amt)
for i, j in enumerate(idx):
original_transaction = self.data[j]
inject_timestamp = inject_ts[i]
original_transaction["ts"] = inject_timestamp
original_transaction["label"] = 1
original_transaction["cc_num"] = cc_num
original_transaction["amt"] = inject_amt[i]
original_transaction["txn_id"] = self.generate_transaction_id(
inject_timestamp, cc_num, amt
)
self.data[j] = original_transaction
if __name__ == "__main__":
dataset = GenerateData(
10000, 6000000, 0.0025, "2021-08-01 00:00:00", "2022-02-01 00:01:00"
)
df = dataset.generate_transactions()
data_dir = os.path.join(os.getcwd(), "data")
os.makedirs(data_dir, exist_ok=True)
df.to_csv(f"{data_dir}/transactions.csv", index=False)
|
{"hexsha": "f8a87ca50c0abde19f35fa2ba5259feb015862d2", "size": 5883, "ext": "py", "lang": "Python", "max_stars_repo_path": "generate_transactions.py", "max_stars_repo_name": "Priya4607/real_time_fraudulent_transaction_detection", "max_stars_repo_head_hexsha": "ef2251295b8ff8a93bd39fcc568c9c7a3cb72153", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "generate_transactions.py", "max_issues_repo_name": "Priya4607/real_time_fraudulent_transaction_detection", "max_issues_repo_head_hexsha": "ef2251295b8ff8a93bd39fcc568c9c7a3cb72153", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "generate_transactions.py", "max_forks_repo_name": "Priya4607/real_time_fraudulent_transaction_detection", "max_forks_repo_head_hexsha": "ef2251295b8ff8a93bd39fcc568c9c7a3cb72153", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.5403726708, "max_line_length": 87, "alphanum_fraction": 0.5826959035, "include": true, "reason": "import numpy", "num_tokens": 1313}
|
module Meshing
using Delaunay
using DifferentialForms
# using MiniQhull
using SparseArrays
using StaticArrays
using ..Algorithms
using ..SparseOps
using ..ZeroOrOne
################################################################################
export delaunay_mesh
"""
Find the Delaunay triangulation for a set of points
"""
function delaunay_mesh(coords::IDVector{0,SVector{C,S}}) where {C,S}
nvertices = length(coords)
if C == 0
@assert nvertices == 1
simplices = SparseOp{0,C}(sparse([1], [1], [One()], 1, 1))
return simplices
end
if true
# Use Delaunay.jl
# Triangulate
# println("[Calling Delaunay with $nvertices points...]")
t0 = time_ns()
mesh = delaunay(S[coords[i][c] for i in axes(coords, 1), c in 1:C])
# [:Qbb, :Qc, :Qz, :Q12, :QJ]
t1 = time_ns()
tdelaunay = round((t1 - t0) / 1.0e9; sigdigits=3)
# Convert to sparse matrix
nsimplices = size(mesh.simplices, 1)
# println("[Delaunay found $nsimplices simplices in $tdelaunay s]")
@assert size(mesh.simplices, 2) == C + 1
simplices = MakeSparse{One}(nvertices, nsimplices)
for j in 1:nsimplices
for i in @view mesh.simplices[j, :]
simplices[i, j] = One()
end
end
simplices = SparseOp{0,C}(sparse(simplices))
else
# Use MiniQhull.jl
# Triangulate
mesh = delaunay(S[coords[i][c] for c in 1:C, i in axes(coords, 1)],
"qhull d Qt Qbb Qc Qz")
# Convert to sparse matrix
nsimplices = size(mesh, 2)
@assert size(mesh, 1) == C + 1
simplices = MakeSparse{One}(nvertices, nsimplices)
for j in 1:nsimplices
for i in @view mesh[:, j]
simplices[i, j] = One()
end
end
# @assert all(i -> 1 ≤ i ≤ nvertices, I)
# @assert all(j -> 1 ≤ j ≤ nsimplices, J)
simplices = SparseOp{0,C}(sparse(simplices))
end
return simplices
end
export check_delaunay
"""
Check Delaunay condition: No vertex must lie ∈ the circumcentre of a
simplex
"""
function check_delaunay(simplices::SparseOp{0,D,One},
lookup::SparseOp{D1,D,One}, lookup1::SparseOp{D,D1,One},
coords::Vector{SVector{C,S}},
dualcoords::Vector{SVector{C,S}}) where {D,D1,C,S}
D::Int
D1::Int
@assert 0 ≤ D1 ≤ D
@assert D1 == D - 1
C::Int
# This is currently broken because the location of the dual
# coordinates are not the circumcentres any more. We need to
# recalculate the circumcentres.
return
for i in 1:size(simplices, 2)
si = sparse_column_rows(simplices, i)
@assert length(si) == D + 1
x1i = Form{C,1}(coords[first(si)])
cci = Form{C,1}(dualcoords[i])
cri2 = norm2(x1i - cci)
# Loop over all faces
for j in sparse_column_rows(lookup, i)
# Loop over all simplices (except i)
for k in sparse_column_rows(lookup1, j)
if k != i
# Loop over all vertices
for l in sparse_column_rows(simplices, k)
# Ignore vertices of simplex i
if l ∉ si
xl = Form{C,1}(coords[l])
d2 = norm2(xl - cci)
@assert d2 ≥ cri2 || d2 ≈ cri2
end
end
end
end
end
end
end
################################################################################
export refine_coords
"""
Refine a mesh
"""
function refine_coords(oldedges::SparseOp{0,1,One},
oldcoords::IDVector{0,SVector{C,S}}) where {C,S}
noldvertices, noldedges = size(oldedges)
@assert length(oldcoords) == noldvertices
nvertices = noldvertices + noldedges
coords = copy(oldcoords)
# Loop over all old edges
for i in axes(oldedges, 2)
si = sparse_column_rows(oldedges, i)
@assert length(si) == 2
x = sum(coords[j] for j in si) / length(si)
# x1 = coords[si[1]]
# x2 = coords[si[2]]
# # x = (x0+x1)/2
# q = S(0.375) + S(0.25) * rand(S)
# x = q * x1 + (q - 1) * x2
push!(coords, x)
end
return coords
end
################################################################################
export improve_mesh
"""
Improve the mesh by moving the circumcentres towards the barycentres
"""
function improve_mesh(simplices::SparseOp{0,R,One},
coords::IDVector{0,SVector{C,S}},
weights::IDVector{0,S}) where {D,R,C,S}
C::Int
@assert 0 ≤ C
shift_coords = IDVector{0}(zeros(SVector{C,S}, length(coords)))
shift_weights = IDVector{0}(zeros(S, length(weights)))
count = 0
for j in axes(simplices, 2)
si = sparse_column_rows(simplices, j)
si = SVector{R + 1}(i for i in si)
xs = SVector{R + 1}(Form{C,1}(coords[i]) for i in si)
ws = SVector{R + 1}(Form{C,0}((weights[i],)) for i in si)
bc = barycentre(xs)
cc = circumcentre(xs, ws)
for n in 1:(R + 1)
i = si[n]
β = ((xs[n] - bc) ⋅ (cc - bc))[]
shift_coords[i] -= convert(SVector,
β * (xs[n] - bc) / norm2(xs[n] - bc))
shift_weights[i] += β
end
count += 1
end
α = length(shift_weights) / (S(R + 1) * count)
for i in axes(shift_coords, 1)
shift_coords[i] *= α
end
for i in axes(shift_weights, 1)
shift_weights[i] *= α
end
return shift_coords, shift_weights
end
end
|
{"hexsha": "badfb8500ec8273f5c16d81d8c111e37a1bec361", "size": 5789, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Meshing.jl", "max_stars_repo_name": "eschnett/DDF.jl", "max_stars_repo_head_hexsha": "e5a93eaef99e2143619ce81ec0e9e222f049f25b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-04-20T23:06:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-09T19:08:15.000Z", "max_issues_repo_path": "src/Meshing.jl", "max_issues_repo_name": "eschnett/DDF.jl", "max_issues_repo_head_hexsha": "e5a93eaef99e2143619ce81ec0e9e222f049f25b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-05-17T15:55:47.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-26T20:15:00.000Z", "max_forks_repo_path": "src/Meshing.jl", "max_forks_repo_name": "eschnett/DDF.jl", "max_forks_repo_head_hexsha": "e5a93eaef99e2143619ce81ec0e9e222f049f25b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-08T11:23:43.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T11:23:43.000Z", "avg_line_length": 29.9948186528, "max_line_length": 80, "alphanum_fraction": 0.5144239074, "num_tokens": 1645}
|
import random
import time
import numpy as np
from Team import Team
from Match import Match
class Tournament:
matches_per_team = 0
number_of_matches = 0
ceiling_hits = 0
def __init__(self, te, ma):
# set up the array of teams, which will also keep track of rankings
self.teams = []
if isinstance(te, int):
self.generate_n_teams(te)
else:
self.teams = te
self.rank()
self.matches_per_team = ma
#TODO: check that with the number of teams given, the requested amount of
# matches per team can be played without conflicts'''
self.number_of_matches = len(self.teams) * self.matches_per_team // 4
self.matches = []
def create_match_schedule(self):
for n in range(1, self.number_of_matches + 1):
red1 = random.choice(self.teams)
failures = 0
while (red1.matches_scheduled >= self.matches_per_team):
#print('red1: ' + red1.name + ' has already been scheduled for ' + str(red1.matches_scheduled) + ' matches')
failures += 1
#if failures > len(self.teams):
if failures > 3 * len(self.teams):
self.reset()
return False
red1 = random.choice(self.teams)
#print('red1 FINALIZED:', red1.name)
assert red1.matches_scheduled < self.matches_per_team
red1.matches_scheduled += 1
red2 = random.choice(self.teams)
failures = 0
# check that there is such a team before entering the while loop
while (red2.matches_scheduled >= self.matches_per_team or red2.number in red1.past_partners):
'''if (red2.matches_scheduled >= self.matches_per_team):
print('red2: ' + red2.name + ' has already been scheduled for ' + str(red2.matches_scheduled) + ' matches')
else:
print('red2: ' + red2.name + ' has already partnered with ' + red1.name)'''
failures += 1
#if failures > len(self.teams):
if failures > 3 * len(self.teams):
self.reset()
return False
red2 = random.choice(self.teams)
#print('red2 FINALIZED:', red2.name)
assert red2.matches_scheduled < self.matches_per_team
assert red1.number != red2.number
red2.matches_scheduled += 1
red1.past_partners.append(red2.number)
red2.past_partners.append(red1.number)
blue1 = random.choice(self.teams)
failures = 0
# check that there is such a team before entering the while loop
while (blue1.matches_scheduled >= self.matches_per_team or blue1.number in red1.past_opponents or blue1.number in red2.past_opponents):
'''if (blue1.matches_scheduled >= self.matches_per_team):
print('blue1: ' + blue1.name + ' has already been scheduled for ' + str(blue1.matches_scheduled) + ' matches')
elif (blue1.number in red1.past_opponents):
print('blue1: ' + blue1.name + ' has already played against ' + red1.name)
else:
print('blue1: ' + blue1.name + ' has already played against ' + red2.name)'''
failures += 1
#if failures > len(self.teams):
if failures > 3 * len(self.teams):
self.reset()
return False
blue1 = random.choice(self.teams)
#print('blue1 FINALIZED:', blue1.name)
assert blue1.matches_scheduled < self.matches_per_team
assert red1.number != blue1.number
assert red2.number != blue1.number
blue1.matches_scheduled += 1
blue2 = random.choice(self.teams)
failures = 0
# check that there is such a team before entering the while loop
while (blue2.matches_scheduled >= self.matches_per_team or blue2.number in red1.past_opponents or blue2.number in red2.past_opponents or blue2.number in blue1.past_partners):
'''if (blue2.matches_scheduled >= self.matches_per_team):
print('blue2: ' + blue2.name + ' has already been scheduled for ' + str(blue2.matches_scheduled) + ' matches')
elif (blue2.number in red1.past_opponents):
print('blue2: ' + blue2.name + ' has already played against ' + red1.name)
elif (blue2.number in red2.past_opponents):
print('blue2: ' + blue2.name + ' has already played against ' + red2.name)
else:
print('blue2: ' + blue2.name + ' has already partnered with ' + blue1.name)'''
failures += 1
#if failures > len(self.teams):
if failures > 3 * len(self.teams):
self.reset()
return False
blue2 = random.choice(self.teams)
#print('blue2 FINALIZED:', blue2.name)
assert blue2.matches_scheduled < self.matches_per_team
assert red1.number != blue2.number
assert red2.number != blue2.number
assert blue1.number != blue2.number
blue2.matches_scheduled += 1
blue1.past_partners.append(blue2.number)
blue2.past_partners.append(blue1.number)
red1.past_opponents.append(blue1.number)
red1.past_opponents.append(blue2.number)
red2.past_opponents.append(blue1.number)
red2.past_opponents.append(blue2.number)
blue1.past_opponents.append(red1.number)
blue1.past_opponents.append(red2.number)
blue2.past_opponents.append(red1.number)
blue2.past_opponents.append(red2.number)
m = Match([red1, red2], [blue1, blue2], n)
self.matches.append(m)
return True
def generate_n_teams(self, n):
oprs = []
for i in range(1, n + 1):
t = Team(i, -1)
self.teams.append(t)
# if more than 32 teams, use worlds-like distribution
if (n > 32):
oprs = np.random.normal(150, 55, n)
# if 32 > n > 24, use state-like distribution
elif (n > 24):
oprs = np.random.normal(125, 55, n)
# else for smaller tournaments, use qual-like distribution
else:
oprs = np.random.normal(100, 55, n)
oprs.sort()
o = 0
for t in reversed(self.teams):
temp = int(oprs[o])
# min opr is 10, max is 450
if temp < 10: temp = 10
if temp > 450: temp = 450
t.opr = temp
o += 1
def run_tournament(self):
for m in self.matches:
# run_match returns True if the "ceiling" was hit. Keep track of that
if(m.run_match()): self.ceiling_hits += 1
def rank(self):
self.teams.sort()
# TODO: add rank in front of team name
def rankings(self):
self.rank()
print('{:^20}'.format('Team Name') + '|' + '{:^6}'.format('RP') + '|' + '{:^6}'.format('TP') + '|' + '{:^4}'.format('MP') + '|' + '{:^5}'.format('OPR'))
print('{:->20}'.format('') + '|' + '{:->6}'.format('') + '|' + '{:->6}'.format('') + '|' + '{:->4}'.format('') + '|' + '{:->5}'.format(''))
for t in self.teams:
print('{:20}'.format(t.name) + '|' + '{:>6}'.format(t.rp) + '|' + '{:>6}'.format(t.get_tp()) + '|' + '{:>4}'.format(t.matches_played) + '|' + '{:>5}'.format(t.opr))
print()
def stats(self):
for m in self.matches:
m.stats()
def reset(self):
self.matches = []
for t in self.teams:
t.reset()
|
{"hexsha": "b8358c8c9ebdad73433336d5e190ab6854c54502", "size": 6633, "ext": "py", "lang": "Python", "max_stars_repo_path": "Tournament.py", "max_stars_repo_name": "ftc9899/tournament-sim", "max_stars_repo_head_hexsha": "72b67488affe7cd4f649d1fc84f74c4f2fea155a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-06-10T19:29:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-22T08:34:45.000Z", "max_issues_repo_path": "Tournament.py", "max_issues_repo_name": "ftc9899/tournament-sim", "max_issues_repo_head_hexsha": "72b67488affe7cd4f649d1fc84f74c4f2fea155a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-06-23T04:49:51.000Z", "max_issues_repo_issues_event_max_datetime": "2019-06-24T03:04:14.000Z", "max_forks_repo_path": "Tournament.py", "max_forks_repo_name": "ftc9899/tournament-sim", "max_forks_repo_head_hexsha": "72b67488affe7cd4f649d1fc84f74c4f2fea155a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.356097561, "max_line_length": 177, "alphanum_fraction": 0.6500829187, "include": true, "reason": "import numpy", "num_tokens": 1951}
|
#########################
# 1. Importing Packages #
#########################
import numpy as np
##################################
# 2. Helper Conversion Functions #
##################################
def dms2dec(degrees, arcminutes, arcseconds):
angle = abs(degrees) + arcminutes/60 + arcseconds/(60*60)
return angle if degrees > 0 else -angle
def hms2dec(hours, minutes, seconds):
return 15*(hours + minutes/60 + seconds/(60*60))
#########################
# 3. Importing Datasets #
#########################
def import_bss(file='data/BSS.dat'):
result = []
data = np.loadtxt(file, usecols=range(1, 7))
for i, row in enumerate(data, 1):
ascension = hms2dec(row[0], row[1], row[2])
declination = dms2dec(row[3], row[4], row[5])
result.append((i, ascension, declination))
return result
def import_super(file='data/SuperCOSMOS.csv'):
result = []
data = np.loadtxt(file, delimiter=',', skiprows=1,
usecols=[0, 1])
for i, row in enumerate(data, 1):
ascension = row[0]
declination = row[1]
result.append((i, ascension, declination))
return result
###################################
# 4. Calculating Angular Distance #
###################################
def angular_dist(r1, d1, r2, d2):
r1, d1, r2, d2 = map(np.radians, [r1, d1, r2, d2])
a = np.sin(np.abs(d1 - d2)/2)**2
b = np.cos(d1)*np.cos(d2)*np.sin(np.abs(r1 - r2)/2)**2
d = 2*np.arcsin(np.sqrt(a + b))
return np.degrees(d)
#########################################
# 5. Helper functions for crossmatching #
#########################################
def find_closest(catalogue, ascension, declination):
closest = (None, np.inf)
for item in catalogue:
distance = angular_dist(ascension, declination, item[1], item[2])
if distance < closest[1]:
closest = (item[0], distance)
return closest
def crossmatch(catalogue1, catalogue2, max_dist):
matches = []
no_matches = []
for item1 in catalogue1:
closest = find_closest(catalogue2, item1[1], item1[2])
if closest[1] > max_dist:
no_matches.append(item1[0])
else:
matches.append((item1[0], closest[0], closest[1]))
return matches, no_matches
|
{"hexsha": "d3b11ded9d055833ee58c2cea1d504c3dc6375fb", "size": 2283, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "SauravMaheshkar/Cross-Matching-Methods-for-Astronomical-Catalogs", "max_stars_repo_head_hexsha": "c10f43ad9ea3ce5ced5deadd7b6aaf99e1b9afb3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-10-12T07:15:15.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-02T16:59:53.000Z", "max_issues_repo_path": "utils.py", "max_issues_repo_name": "SauravMaheshkar/Cross-Matching-Methods-for-Astronomical-Catalogs", "max_issues_repo_head_hexsha": "c10f43ad9ea3ce5ced5deadd7b6aaf99e1b9afb3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils.py", "max_forks_repo_name": "SauravMaheshkar/Cross-Matching-Methods-for-Astronomical-Catalogs", "max_forks_repo_head_hexsha": "c10f43ad9ea3ce5ced5deadd7b6aaf99e1b9afb3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.0869565217, "max_line_length": 73, "alphanum_fraction": 0.5308804205, "include": true, "reason": "import numpy", "num_tokens": 608}
|
"""
Create a subset with more frequent labels
> python notebooks/subset-dataset.py TEMP/train-from-kaggle.csv 1500
"""
import itertools
import os.path
import sys
import numpy as np
import pandas as pd
COUNT_THR = 1000
CSV_NAME = "train-from-kaggle.csv"
COL_LABELS = 'attribute_ids'
def main(path_csv: str = CSV_NAME, count_thr: int = COUNT_THR):
print(f"Loafing: {path_csv}")
df_train = pd.read_csv(path_csv)
print(f"Samples: {len(df_train)}")
labels_all = list(itertools.chain(*[[int(lb) for lb in lbs.split(" ")] for lbs in df_train[COL_LABELS]]))
lb_hist = dict(zip(range(max(labels_all) + 1), np.bincount(labels_all)))
print(f"Filter: {count_thr}")
df_hist = pd.DataFrame([dict(lb=lb, count=count) for lb, count in lb_hist.items()
if count > count_thr]).set_index("lb")
print(f"Reductions: {len(lb_hist)} >> {len(df_hist)}")
allowed_lbs = set(list(df_hist.index))
df_train[COL_LABELS] = [
" ".join([lb for lb in lbs.split() if int(lb) in allowed_lbs]) for lbs in df_train[COL_LABELS]
]
df_train[COL_LABELS].replace('', np.nan, inplace=True)
df_train.dropna(subset=[COL_LABELS], inplace=True)
print(f"Samples: {len(df_train)}")
name_csv, _ = os.path.splitext(os.path.basename(path_csv))
path_csv = os.path.join(os.path.dirname(path_csv), f"{name_csv}_min-lb-sample-{count_thr}.csv")
df_train.to_csv(path_csv)
labels_all = list(itertools.chain(*[[int(lb) for lb in lbs.split(" ")] for lbs in df_train[COL_LABELS]]))
print(f"sanity check - nb labels: {len(set(labels_all))}")
if __name__ == '__main__':
main(sys.argv[1], int(sys.argv[2]))
|
{"hexsha": "d38db9cc854e99753cdd369557906997ba86ce39", "size": 1665, "ext": "py", "lang": "Python", "max_stars_repo_path": "notebooks/subset-dataset.py", "max_stars_repo_name": "Borda/kaggle_iMet-collection", "max_stars_repo_head_hexsha": "ab3f55e85029768d5cdb8bc7ee895c195010cf80", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-06-11T19:26:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-30T22:06:08.000Z", "max_issues_repo_path": "notebooks/subset-dataset.py", "max_issues_repo_name": "Borda/kaggle_iMet-collection", "max_issues_repo_head_hexsha": "ab3f55e85029768d5cdb8bc7ee895c195010cf80", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-06-07T07:26:45.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-30T22:06:43.000Z", "max_forks_repo_path": "notebooks/subset-dataset.py", "max_forks_repo_name": "Borda/kaggle_iMet-collection", "max_forks_repo_head_hexsha": "ab3f55e85029768d5cdb8bc7ee895c195010cf80", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1956521739, "max_line_length": 109, "alphanum_fraction": 0.6756756757, "include": true, "reason": "import numpy", "num_tokens": 472}
|
import itertools
import numpy as np
_min = np.minimum
_max = np.maximum
def union(a, *bs, k=None):
def f(p):
d1 = a(p)
for b in bs:
d2 = b(p)
K = k or getattr(b, '_k', None)
if K is None:
d1 = _min(d1, d2)
else:
h = np.clip(0.5 + 0.5 * (d2 - d1) / K, 0, 1)
m = d2 + (d1 - d2) * h
d1 = m - K * h * (1 - h)
return d1
return f
def difference(a, *bs, k=None):
def f(p):
d1 = a(p)
for b in bs:
d2 = b(p)
K = k or getattr(b, '_k', None)
if K is None:
d1 = _max(d1, -d2)
else:
h = np.clip(0.5 - 0.5 * (d2 + d1) / K, 0, 1)
m = d1 + (-d2 - d1) * h
d1 = m + K * h * (1 - h)
return d1
return f
def intersection(a, *bs, k=None):
def f(p):
d1 = a(p)
for b in bs:
d2 = b(p)
K = k or getattr(b, '_k', None)
if K is None:
d1 = _max(d1, d2)
else:
h = np.clip(0.5 - 0.5 * (d2 - d1) / K, 0, 1)
m = d2 + (d1 - d2) * h
d1 = m + K * h * (1 - h)
return d1
return f
def blend(a, *bs, k=0.5):
def f(p):
d1 = a(p)
for b in bs:
d2 = b(p)
K = k or getattr(b, '_k', None)
d1 = K * d2 + (1 - K) * d1
return d1
return f
def negate(other):
def f(p):
return -other(p)
return f
def dilate(other, r):
def f(p):
return other(p) - r
return f
def erode(other, r):
def f(p):
return other(p) + r
return f
def shell(other, thickness):
def f(p):
return np.abs(other(p)) - thickness / 2
return f
def repeat(other, spacing, count=None, padding=0):
count = np.array(count) if count is not None else None
spacing = np.array(spacing)
def neighbors(dim, padding, spacing):
try:
padding = [padding[i] for i in range(dim)]
except Exception:
padding = [padding] * dim
try:
spacing = [spacing[i] for i in range(dim)]
except Exception:
spacing = [spacing] * dim
for i, s in enumerate(spacing):
if s == 0:
padding[i] = 0
axes = [list(range(-p, p + 1)) for p in padding]
return list(itertools.product(*axes))
def f(p):
q = np.divide(p, spacing, out=np.zeros_like(p), where=spacing != 0)
if count is None:
index = np.round(q)
else:
index = np.clip(np.round(q), -count, count)
indexes = [index + n for n in neighbors(p.shape[-1], padding, spacing)]
A = [other(p - spacing * i) for i in indexes]
a = A[0]
for b in A[1:]:
a = _min(a, b)
return a
return f
|
{"hexsha": "9c245e8286118b21daf04b17e120c113f70f1b7c", "size": 2925, "ext": "py", "lang": "Python", "max_stars_repo_path": "sdf/dn.py", "max_stars_repo_name": "yihong0618/sdf", "max_stars_repo_head_hexsha": "0949ae7d4468e03270290dca5ebd36847fd60192", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 926, "max_stars_repo_stars_event_min_datetime": "2021-01-19T17:59:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T19:06:06.000Z", "max_issues_repo_path": "sdf/dn.py", "max_issues_repo_name": "jeonghopark/sdf", "max_issues_repo_head_hexsha": "699cf72545349798dd707977974e29fa3099edc0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 27, "max_issues_repo_issues_event_min_datetime": "2021-01-23T14:07:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-28T08:25:56.000Z", "max_forks_repo_path": "sdf/dn.py", "max_forks_repo_name": "jeonghopark/sdf", "max_forks_repo_head_hexsha": "699cf72545349798dd707977974e29fa3099edc0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 74, "max_forks_repo_forks_event_min_datetime": "2021-02-02T09:27:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T11:08:35.000Z", "avg_line_length": 25.4347826087, "max_line_length": 79, "alphanum_fraction": 0.4348717949, "include": true, "reason": "import numpy", "num_tokens": 893}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "wangzhefeng"
from pyspark import SparkContext as sc
import random
import numpy as np
# **************************************************************************
# version 1
# **************************************************************************
# ------------------------------------------
# 加载数据集
# ------------------------------------------
### iterate number
total = int(100 * 10000)
local_collection = np.arange(1, total)
### parallelize a data set into the cluster
rdd = sc.parallelize(local_collection) \
.setName("parallelized_data") \
.cache()
# ------------------------------------------
# 处理数据
# ------------------------------------------
### ramdom generate points
def map_func(element):
x = random.random()
y = random.random()
return (x, y)
def map_func_2(element):
x, y = element
return 1 if x ** 2 + y ** 2 < 1 else 0
rdd2 = rdd.map(map_func) \
.setName("random_point") \
.cache()
### calculate the number of points in and out the circle
rdd3 = rdd2.map(map_func_2) \
.setName("points_in_out_circle") \
.cache()
# ------------------------------------------
# 结果展示
# ------------------------------------------
### how many points are in the circle
in_circle = rdd3.reduce(operator.add)
pi = 4.0 * in_circle / total
print("iterate {} times".format(total))
print("estimated pi: {}".format(pi))
# **************************************************************************
# version 2
# **************************************************************************
total = int(100 * 10000)
sc.parallelize(range(total)) \
.map(lambda x: (random.random(), random.random())) \
.map(lambda x: 1 if x[0] ** 2 + x[1] ** 2 < 1 else 0) \
.reduce(lambda x, y: x + y) \
/ float(total) * 4
# **************************************************************************
# version 3
# **************************************************************************
total = int(100 * 10000)
sc.parallelize(range(total)) \
.map(lambda x: 1 if sum(np.random.random(2) ** 2) else 0) \
.reduct(lambda x, y: x + y) \
/ float(total) * 4
|
{"hexsha": "1b5487c202a5953c33b70d8d4c4598323adaa3b0", "size": 2121, "ext": "py", "lang": "Python", "max_stars_repo_path": "project_scala/MC_pyspark.py", "max_stars_repo_name": "wangzhefeng/spark_spr", "max_stars_repo_head_hexsha": "a8f92b2563c70c7e072668a1e82cfa50164dd30d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "project_scala/MC_pyspark.py", "max_issues_repo_name": "wangzhefeng/spark_spr", "max_issues_repo_head_hexsha": "a8f92b2563c70c7e072668a1e82cfa50164dd30d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "project_scala/MC_pyspark.py", "max_forks_repo_name": "wangzhefeng/spark_spr", "max_forks_repo_head_hexsha": "a8f92b2563c70c7e072668a1e82cfa50164dd30d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.8658536585, "max_line_length": 76, "alphanum_fraction": 0.4285714286, "include": true, "reason": "import numpy", "num_tokens": 482}
|
c Subroutine find single vector of stations
c AJ_Kettle, 22Apr2021
SUBROUTINE find_single_bigvector(l_rgh_stn,
+ l_stn2019,s_vec_stnlist2019,l_stn2020,s_vec_stnlist2020,
+ l_stn2021,s_vec_stnlist2021,
+ s_vec_stnlist_amal,i_mat_stnlist_flag)
IMPLICIT NONE
c************************************************************************
c Variables passed into subroutine
INTEGER :: l_rgh_stn
INTEGER :: l_stn2019
INTEGER :: l_stn2020
INTEGER :: l_stn2021
CHARACTER(LEN=32) :: s_vec_stnlist2019(l_rgh_stn)
CHARACTER(LEN=32) :: s_vec_stnlist2020(l_rgh_stn)
CHARACTER(LEN=32) :: s_vec_stnlist2021(l_rgh_stn)
CHARACTER(LEN=32) :: s_vec_stnlist_amal(l_rgh_stn)
INTEGER :: i_mat_stnlist_flag(l_rgh_stn,3)
c*****
c Variables used within subroutine
INTEGER :: i,j,k,ii,jj,kk
INTEGER :: io
c************************************************************************
print*,'just entered find_single_bigvector'
print*,'just leaving find_single_bigvector'
RETURN
END
|
{"hexsha": "812318f59dbf58e2579083980e0c44df780ec59d", "size": 1185, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "r202106_usaf_update2/Y_amalgamate/Subroutine/find_single_bigvector.f", "max_stars_repo_name": "glamod/glamod-nuim", "max_stars_repo_head_hexsha": "eed6f9d7d71b0c456ef39fdea6b58677e13ab50c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "r202106_usaf_update2/Y_amalgamate/Subroutine/find_single_bigvector.f", "max_issues_repo_name": "glamod/glamod-nuim", "max_issues_repo_head_hexsha": "eed6f9d7d71b0c456ef39fdea6b58677e13ab50c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 23, "max_issues_repo_issues_event_min_datetime": "2022-01-28T13:57:39.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T09:34:41.000Z", "max_forks_repo_path": "r202106_usaf_update2/Y_amalgamate/Subroutine/find_single_bigvector.f", "max_forks_repo_name": "glamod/glamod-nuim", "max_forks_repo_head_hexsha": "eed6f9d7d71b0c456ef39fdea6b58677e13ab50c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-01-24T12:06:06.000Z", "max_forks_repo_forks_event_max_datetime": "2019-01-24T12:06:06.000Z", "avg_line_length": 32.027027027, "max_line_length": 73, "alphanum_fraction": 0.5510548523, "num_tokens": 318}
|
[STATEMENT]
lemma flag_read_reg_write:
shows "flag_read (\<sigma> with ((r :=\<^sub>r w)#updates)) f = flag_read (\<sigma> with updates) f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. flag_read (\<sigma> with ((r :=\<^sub>r w) # updates)) f = flag_read (\<sigma> with updates) f
[PROOF STEP]
by (induct updates arbitrary: \<sigma>, auto simp add: flag_read_def)
|
{"llama_tokens": 139, "file": "X86_Semantics_State", "length": 1}
|
from pbcore.io import (CmpH5Reader,
AlignmentSet)
import numpy as np
import os
class SequencingYield:
"""
Class for characterizing the yield of a
sequencing run
"""
def __init__(self, aset_path):
(self.aset,
self.is_cmph5) = self._openAset(aset_path)
def calculate_yield_by_time(self):
"""
Return yield vs. time. Calculation will be different
depending on whether data are from cmp.h5 vs bam
"""
if self.is_cmph5:
time, base_yield = self.ybt_cmph5()
else:
time, base_yield = self.ybt_bam()
def ybt_cmph5(self):
"""
Return yield vs. time for cmph5 datasets
"""
min_time, max_time, time_interval = 0, 1800, 5
time = np.arange(min_time, max_time, time_interval)
yield_per_time = np.zeros(time.shape, dtype=int)
for alignment in self.aset:
advance_time = alignment.IPD() + alignment.PulseWidth()
advance_time[advance_time == 65534] = np.round(
np.mean(
advance_time[
advance_time != 65534]))
advance_time = np.divide(advance_time,
self.aset.readGroupTable['FrameRate'] * 60,
dtype=float)
start_frame = np.cumsum(advance_time)
counts, bin_edges = np.histogram(start_frame, time)
yield_per_time[0:-1] += counts
cumulative_yield = np.cumsum(yield_per_time)
time = time + 0.5 * time_interval
max_index = np.argmax(cumulative_yield)
return time[0:max_index+1], cumulative_yield[0:max_index+1]
def _openAset(self, aset_path):
ext = os.path.splitext(aset_path)[-1]
if ext == '.h5':
return self._openCmpH5(aset_path)
elif ext == '.xml':
return self._openAlignmentSet(aset_path)
else:
raise IOError('Did not recognize filename extension')
def _openCmpH5(self, aset_path):
print aset_path
return CmpH5Reader(aset_path), True
def _openAlignmentSet(self, aset_path):
return AlignmentSet(aset_path), False
|
{"hexsha": "6772bdd86d3e450b51e3332532a1a983525fb3d1", "size": 2236, "ext": "py", "lang": "Python", "max_stars_repo_path": "biotk/libs/YieldAccumulation.py", "max_stars_repo_name": "knyquist/biotk", "max_stars_repo_head_hexsha": "d86bb3d8c72c8b820160ba8698c973208a72c2ca", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-03-24T21:46:44.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-24T21:46:44.000Z", "max_issues_repo_path": "biotk/libs/YieldAccumulation.py", "max_issues_repo_name": "knyquist/biotk", "max_issues_repo_head_hexsha": "d86bb3d8c72c8b820160ba8698c973208a72c2ca", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "biotk/libs/YieldAccumulation.py", "max_forks_repo_name": "knyquist/biotk", "max_forks_repo_head_hexsha": "d86bb3d8c72c8b820160ba8698c973208a72c2ca", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4, "max_line_length": 80, "alphanum_fraction": 0.5836314848, "include": true, "reason": "import numpy", "num_tokens": 532}
|
import sys
import numpy as np
def preprocess(text):
text = text.lower()
text = text.replace('.', ' .')
words = text.split(' ')
word_to_id = {}
id_to_word = {}
for word in words:
if word not in word_to_id:
new_id = len(word_to_id)
word_to_id[word] = new_id
id_to_word[new_id] = word
corpus = np.array([word_to_id[w] for w in words])
return corpus, word_to_id, id_to_word
def create_contexts_target(corpus, window_size=1):
target = corpus[window_size: -window_size]
contexts = []
for idx in range(window_size, len(corpus) - window_size):
cs = []
for t in range(- window_size, window_size + 1):
if t == 0:
continue
cs.append(corpus[idx + t])
contexts.append(cs)
return np.array(contexts), np.array(target)
def convert_one_hot(corpus, vocab_size):
'''one-hot表現への変換
:param corpus: 単語IDのリスト(1次元もしくは2次元のNumPy配列)
:param vocab_size: 語彙数
:return: one-hot表現(2次元もしくは3次元のNumPy配列)
'''
N = corpus.shape[0]
if corpus.ndim == 1:
one_hot = np.zeros((N, vocab_size), dtype=np.int32)
for idx, word_id in enumerate(corpus):
one_hot[idx, word_id] = 1
elif corpus.ndim == 2:
C = corpus.shape[1]
one_hot = np.zeros((N, C, vocab_size), dtype=np.int32)
for idx_0, word_ids in enumerate(corpus):
for idx_1, word_id in enumerate(word_ids):
one_hot[idx_0, idx_1, word_id] = 1
return one_hot
def clip_grads(grads, max_norm):
total_norm = 0
for grad in grads:
total_norm += np.sum(grad ** 2)
total_norm = np.sqrt(total_norm)
rate = max_norm / (total_norm + 1e-6)
if rate < 1:
for grad in grads:
grad *= rate
def cos_similarity(x, y, eps=1e-8):
'''コサイン類似度の算出
:param x: ベクトル
:param y: ベクトル
:param eps: ”0割り”防止のための微小値
:return:
'''
nx = x / (np.sqrt(np.sum(x ** 2)) + eps)
ny = y / (np.sqrt(np.sum(y ** 2)) + eps)
return np.dot(nx, ny)
def most_similar(query, word_to_id, id_to_word, word_matrix, top=5):
'''類似単語の検索
:param query: クエリ(テキスト)
:param word_to_id: 単語から単語IDへのディクショナリ
:param id_to_word: 単語IDから単語へのディクショナリ
:param word_matrix: 単語ベクトルをまとめた行列。各行に対応する単語のベクトルが格納されていることを想定する
:param top: 上位何位まで表示するか
'''
if query not in word_to_id:
print('%s is not found' % query)
return
print('\n[query] ' + query)
query_id = word_to_id[query]
query_vec = word_matrix[query_id]
vocab_size = len(id_to_word)
similarity = np.zeros(vocab_size)
for i in range(vocab_size):
similarity[i] = cos_similarity(word_matrix[i], query_vec)
count = 0
for i in (-1 * similarity).argsort():
if id_to_word[i] == query:
continue
print(' %s: %s' % (id_to_word[i], similarity[i]))
count += 1
if count >= top:
return
def eval_perplexity(model, corpus, batch_size=10, time_size=35):
print('evaluating perplexity ...')
corpus_size = len(corpus)
total_loss = 0
max_iters = (corpus_size - 1) // (batch_size * time_size)
jump = (corpus_size - 1) // batch_size
for iters in range(max_iters):
xs = np.zeros((batch_size, time_size), dtype=np.int32)
ts = np.zeros((batch_size, time_size), dtype=np.int32)
time_offset = iters * time_size
offsets = [time_offset + (i * jump) for i in range(batch_size)]
for t in range(time_size):
for i, offset in enumerate(offsets):
xs[i, t] = corpus[(offset + t) % corpus_size]
ts[i, t] = corpus[(offset + t + 1) % corpus_size]
try:
loss = model.forward(xs, ts, train_flg=False)
except TypeError:
loss = model.forward(xs, ts)
total_loss += loss
sys.stdout.write('\r%d / %d' % (iters, max_iters))
sys.stdout.flush()
print('')
ppl = np.exp(total_loss / max_iters)
return ppl
|
{"hexsha": "82bfadc3fb6ecadc242d98b5ff421c6a52d7f0c5", "size": 4071, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/common/util.py", "max_stars_repo_name": "kanta-nakamura/jarujaru-tower-generator", "max_stars_repo_head_hexsha": "9f3cc4cc9e699b968bc573cd8198f4938a302894", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-12-25T01:22:40.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-28T08:47:02.000Z", "max_issues_repo_path": "src/common/util.py", "max_issues_repo_name": "kanta-nakamura/jarujaru-tower-generator", "max_issues_repo_head_hexsha": "9f3cc4cc9e699b968bc573cd8198f4938a302894", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/common/util.py", "max_forks_repo_name": "kanta-nakamura/jarujaru-tower-generator", "max_forks_repo_head_hexsha": "9f3cc4cc9e699b968bc573cd8198f4938a302894", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6690140845, "max_line_length": 71, "alphanum_fraction": 0.5878162614, "include": true, "reason": "import numpy", "num_tokens": 1280}
|
# Certification of Robustness using Zonotopes with DeepZ
In this notebook we will demonstrate the usage of certification using zonotopes within ART. With deterministic certification methods such as DeepZ we can have a guarantee if a datapoint could have its class changed under a given bound. This method was originally proposed in: https://papers.nips.cc/paper/2018/file/f2f446980d8e971ef3da97af089481c3-Paper.pdf
The zonotopes abstraction used here is defined by:
\begin{equation}
\hat{x} = \eta_0 + \sum_{i=1}^{i=N} \eta_i \epsilon_i
\end{equation}
where $\eta_0$ is the central vector, $\epsilon_i$ are noise symbols, $\eta_i$ are coefficients representing deviations around $\eta_0$.
We can illustrate a 2D toy example of this below in which the initial datapoint has two features, with a central vector of [0.25, 0.25] and these features both have noise terms of [0.25, 0.25]. We push this zonotope through the neural network and show it's intermediate shapes:
We can see that the zonotope changes shape as it is passed through the neural network. When passing though a ReLU it gains another term (going from 2 sets of parallel lines to 3). We can then check if the final zonotope crosses any desicion boundaries and say if a point is certified.
Let's see how to use this method in ART!
```python
import torch
import torch.optim as optim
import numpy as np
from torch import nn
from sklearn.utils import shuffle
from art.estimators.certification import deep_z
from art.utils import load_mnist, preprocess, to_categorical
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
```
```python
# We make an example pytorch classifier
class MNISTModel(nn.Module):
def __init__(self):
super(MNISTModel, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1,
out_channels=32,
kernel_size=(4, 4),
stride=(2, 2),
dilation=(1, 1),
padding=(0, 0))
self.conv2 = nn.Conv2d(in_channels=32,
out_channels=32,
kernel_size=(4, 4),
stride=(2, 2),
dilation=(1, 1),
padding=(0, 0))
self.fc1 = nn.Linear(in_features=800,
out_features=10)
self.relu = nn.ReLU()
def forward(self, x):
if isinstance(x, np.ndarray):
x = torch.from_numpy(x).float().to(device)
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = torch.flatten(x, 1)
x = self.fc1(x)
return x
```
```python
model = MNISTModel()
opt = optim.Adam(model.parameters(), lr=1e-4)
criterion = nn.CrossEntropyLoss()
(x_train, y_train), (x_test, y_test), min_, max_ = load_mnist()
x_test = np.squeeze(x_test)
x_test = np.expand_dims(x_test, axis=1)
y_test = np.argmax(y_test, axis=1)
x_train = np.squeeze(x_train)
x_train = np.expand_dims(x_train, axis=1)
y_train = np.argmax(y_train, axis=1)
```
```python
# train the model normally
def standard_train(model, opt, criterion, x, y, bsize=32, epochs=5):
num_of_batches = int(len(x) / bsize)
for epoch in range(epochs):
x, y = shuffle(x, y)
loss_list = []
for bnum in range(num_of_batches):
x_batch = np.copy(x[bnum * bsize:(bnum + 1) * bsize])
y_batch = np.copy(y[bnum * bsize:(bnum + 1) * bsize])
x_batch = torch.from_numpy(x_batch).float().to(device)
y_batch = torch.from_numpy(y_batch).type(torch.LongTensor).to(device)
# zero the parameter gradients
opt.zero_grad()
outputs = model(x_batch)
loss = criterion(outputs, y_batch)
loss_list.append(loss.data)
loss.backward()
opt.step()
print('End of epoch {} loss {}'.format(epoch, np.mean(loss_list)))
return model
model = standard_train(model=model,
opt=opt,
criterion=criterion,
x=x_train,
y=y_train)
```
End of epoch 0 loss 0.5815373063087463
End of epoch 1 loss 0.2648811340332031
End of epoch 2 loss 0.18593080341815948
End of epoch 3 loss 0.1360677033662796
End of epoch 4 loss 0.10795646160840988
```python
# lets now get the predicions for the MNIST test set and see how well our model is doing.
with torch.no_grad():
test_preds = model(torch.from_numpy(x_test).float().to(device))
test_preds = np.argmax(test_preds.cpu().detach().numpy(), axis=1)
print('Test acc: ', np.mean(test_preds == y_test) * 100)
```
Test acc: 97.46000000000001
```python
# But how robust are these predictions?
# We can now examine this neural network's certified robustness.
# We pass it into PytorchDeepZ. We will get a print out showing which
# neural network layers have been registered. There will also be a
# warning to tell us that PytorchDeepZ currently infers a reshape when
# a neural network goes from using convolutional to dense layers.
# This will cover the majority of use cases, however, if not then the
# certification layers in art.estimators.certification.deepz.deep_z.py
# can be used to directly build a certified model structure.
zonotope_model = deep_z.PytorchDeepZ(model=model,
clip_values=(0, 1),
loss=nn.CrossEntropyLoss(),
input_shape=(1, 28, 28),
nb_classes=10)
```
registered <class 'torch.nn.modules.conv.Conv2d'>
registered <class 'torch.nn.modules.activation.ReLU'>
registered <class 'torch.nn.modules.conv.Conv2d'>
registered <class 'torch.nn.modules.activation.ReLU'>
registered <class 'torch.nn.modules.linear.Linear'>
Inferred reshape on op num 4
/home/giulio/Documents/Projects/AI2_for_ART/adversarial-robustness-toolbox/art/estimators/certification/deep_z/pytorch.py:90: UserWarning:
This estimator does not support networks which have dense layers before convolutional. We currently infer a reshape when a neural network goes from convolutional layers to dense layers. If your use case does not fall into this pattern then consider directly building a certifier network with the custom layers found in art.estimators.certification.deepz.deep_z.py
"\nThis estimator does not support networks which have dense layers before convolutional. "
```python
# Lets now see how robust our model is!
# First we need to define what bound we need to check.
# Here let's check for L infinity robustness with small bound of 0.05
bound = 0.05
num_certified = 0
num_correct = 0
# lets now loop over the data to check its certified robustness:
# we need to consider a single sample at a time as due to memory and compute footprints batching is not supported.
# In this demo we will look at the first 50 samples of the MNIST test data.
original_x = np.copy(x_test)
for i, (sample, pred, label) in enumerate(zip(x_test[:50], test_preds[:50], y_test[:50])):
# we make the matrix representing the allowable perturbations.
# we have 28*28 features and each one can be manipulated independently requiring a different row.
# hence a 784*784 matrix.
eps_bound = np.eye(784) * bound
# we then need to adjust the raw data with the eps bounds to take into account
# the allowable range of 0 - 1 for pixel data.
# We provide a simple function to do this preprocessing for image data.
# However if your use case is not supported then a custom pre-processor function will need to be written.
sample, eps_bound = zonotope_model.pre_process(cent=sample,
eps=eps_bound)
sample = np.expand_dims(sample, axis=0)
# We pass the data sample and the eps bound to the certifier along with the prediction that was made
# for the datapoint.
# A boolean is returned signifying if it can have its class changed under the given bound.
is_certified = zonotope_model.certify(cent=sample,
eps=eps_bound,
prediction=pred)
if pred == label:
num_correct +=1
if is_certified:
num_certified +=1
print('Classified Correct {}/{} and also certified {}/{}'.format(num_correct, i+1, num_certified, i+1))
```
Classified Correct 1/1 and also certified 1/1
Classified Correct 2/2 and also certified 2/2
Classified Correct 3/3 and also certified 2/3
Classified Correct 4/4 and also certified 3/4
Classified Correct 5/5 and also certified 4/5
Classified Correct 6/6 and also certified 4/6
Classified Correct 7/7 and also certified 4/7
Classified Correct 8/8 and also certified 4/8
Classified Correct 9/9 and also certified 4/9
Classified Correct 10/10 and also certified 4/10
Classified Correct 11/11 and also certified 5/11
Classified Correct 12/12 and also certified 6/12
Classified Correct 13/13 and also certified 7/13
Classified Correct 14/14 and also certified 8/14
Classified Correct 15/15 and also certified 9/15
Classified Correct 16/16 and also certified 10/16
Classified Correct 17/17 and also certified 11/17
Classified Correct 18/18 and also certified 12/18
Classified Correct 19/19 and also certified 12/19
Classified Correct 20/20 and also certified 13/20
Classified Correct 21/21 and also certified 14/21
Classified Correct 22/22 and also certified 14/22
Classified Correct 23/23 and also certified 15/23
Classified Correct 24/24 and also certified 16/24
Classified Correct 25/25 and also certified 16/25
Classified Correct 26/26 and also certified 17/26
Classified Correct 27/27 and also certified 18/27
Classified Correct 28/28 and also certified 19/28
Classified Correct 29/29 and also certified 20/29
Classified Correct 30/30 and also certified 20/30
Classified Correct 31/31 and also certified 21/31
Classified Correct 32/32 and also certified 21/32
Classified Correct 33/33 and also certified 22/33
Classified Correct 34/34 and also certified 22/34
Classified Correct 35/35 and also certified 23/35
Classified Correct 36/36 and also certified 24/36
Classified Correct 37/37 and also certified 25/37
Classified Correct 38/38 and also certified 25/38
Classified Correct 39/39 and also certified 26/39
Classified Correct 40/40 and also certified 26/40
Classified Correct 41/41 and also certified 26/41
Classified Correct 42/42 and also certified 26/42
Classified Correct 43/43 and also certified 27/43
Classified Correct 44/44 and also certified 27/44
Classified Correct 45/45 and also certified 28/45
Classified Correct 46/46 and also certified 28/46
Classified Correct 47/47 and also certified 28/47
Classified Correct 48/48 and also certified 29/48
Classified Correct 49/49 and also certified 30/49
Classified Correct 50/50 and also certified 31/50
```python
# we can then compare this to the empirical PGD performance
from art.estimators.classification import PyTorchClassifier
from art.attacks.evasion.projected_gradient_descent.projected_gradient_descent import ProjectedGradientDescent
classifier = PyTorchClassifier(
model=model,
clip_values=(0.0, 1.0),
loss=criterion,
optimizer=opt,
input_shape=(1, 28, 28),
nb_classes=10,
)
attack = ProjectedGradientDescent(classifier, eps=0.05, eps_step=0.01, verbose=False)
x_train_adv = attack.generate(x_test[:50].astype('float32'))
y_adv_pred = classifier.predict(torch.from_numpy(x_train_adv).float().to(device))
y_adv_pred = np.argmax(y_adv_pred, axis=1)
print('Test acc: ', np.mean(y_adv_pred == y_test[:50]) * 100)
```
Test acc: 92.0
we can see that the empirical test accuracy is much higher than the certifiable performance. This is because with certifiable techniques we will be providing a lower bound on the performance: there may well be datapoints that the certifier says are unsafe, but in fact cannot have their class changed.
|
{"hexsha": "54a00e1ceb8b5218c681e3480af4e47712f3dc1a", "size": 63901, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "notebooks/certification_deepz.ipynb", "max_stars_repo_name": "david-shmailov/adversarial-robustness-toolbox", "max_stars_repo_head_hexsha": "ad8b94d3928abe218cd6ab2eed1c5c21f1d6e420", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-31T15:17:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-31T15:17:20.000Z", "max_issues_repo_path": "notebooks/certification_deepz.ipynb", "max_issues_repo_name": "david-shmailov/adversarial-robustness-toolbox", "max_issues_repo_head_hexsha": "ad8b94d3928abe218cd6ab2eed1c5c21f1d6e420", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-18T00:41:02.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-18T00:41:02.000Z", "max_forks_repo_path": "notebooks/certification_deepz.ipynb", "max_forks_repo_name": "david-shmailov/adversarial-robustness-toolbox", "max_forks_repo_head_hexsha": "ad8b94d3928abe218cd6ab2eed1c5c21f1d6e420", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-22T05:30:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T05:30:31.000Z", "avg_line_length": 151.0661938534, "max_line_length": 46984, "alphanum_fraction": 0.8614732164, "converted": true, "num_tokens": 3072}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.