text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 25 16:02:21 2018
@author: hecc
"""
import numpy as np
from itertools import combinations, permutations
def get_distance_matrix(pos):
n = np.shape(pos)[0]
d = np.zeros((n, n))
for ii in range(n):
for jj in range(ii + 1, n):
d[ii, jj] = np.linalg.norm(pos[ii] - pos[jj])
d = d + d.T
return d
def get_three_permutation(d, atoms, pres=1e-3):
n = np.shape(d)[0]
all_comb = combinations(range(n), 3)
all_tri = []
corre_comb = []
for comb in all_comb:
temp = [d[comb[0], comb[1]], d[comb[0], comb[2]], d[comb[1], comb[2]]]
all_tri.append(sorted(temp))
corre_comb.append(comb)
all_tri = np.array(all_tri).reshape(-1, 3)
corre_comb = np.array(corre_comb).reshape(-1, 3)
n_all_tri = np.shape(all_tri)[0]
temp_d = np.linalg.norm(
all_tri - np.tile(all_tri[0], (n_all_tri, 1)), axis=1)
ind = np.where(temp_d < pres)
poss_com = corre_comb[ind]
# here we have known that `1 2 3` atoms can transform into ind atoms
# but we can not know precisely which atom is correspondant to the other
all_perms = []
for sub_com in poss_com:
temp = list(permutations(sub_com))
all_perms.append(temp)
all_perms = np.array(all_perms).reshape(-1, 3)
# based on the sub distance matrix
sub_d_matrix = d[0:3, 0:3]
tri_symm = []
for perm in all_perms:
dd = (sub_d_matrix[0, 1] - d[perm[0], perm[1]])**2 + \
(sub_d_matrix[0, 2] - d[perm[0], perm[2]])**2 + \
(sub_d_matrix[1, 2] - d[perm[1], perm[2]])**2
if dd < pres and atoms[:3] == [atoms[perm[0]],
atoms[perm[1]], atoms[perm[2]]]:
tri_symm.append(perm)
tri_symm = np.array(tri_symm).reshape(-1, 3)
return tri_symm
def get_new_symm(d, origin_d, tri_symm, atoms, pres=1e-3):
new_tri_symm = []
n = np.shape(d)[0]
for each_symm in tri_symm:
left_index = np.setdiff1d(range(n), each_symm)
for index in left_index:
nn = np.shape(origin_d)[0]
new_d = np.zeros((nn + 1, nn + 1))
new_d[0:nn, 0:nn] = origin_d
new_d[nn, 0:nn] = d[index, each_symm]
new_d[0:nn, nn] = d[each_symm, index]
temp_dis = np.linalg.norm(new_d - d[0:nn + 1, 0:nn + 1])
if temp_dis < pres and atoms[nn] == atoms[index]:
new_tri_symm.append(np.hstack((each_symm, index)))
break
return new_tri_symm
def get_permutations(pos, atoms, pres=1e-3):
'''
This algorithm are mainly derived by Chaobin Qiu
'''
d = get_distance_matrix(pos)
tri_symm = get_three_permutation(d, atoms, pres=pres)
n = np.shape(d)[0]
for ii in range(n - 3):
origin_d = d[0:ii + 3, 0:ii + 3]
tri_symm = get_new_symm(d, origin_d, tri_symm, atoms, pres=pres)
return np.array(tri_symm)
|
{"hexsha": "6a47d5156a40b4de6d411aadbf840fd14e3ee74c", "size": 2950, "ext": "py", "lang": "Python", "max_stars_repo_path": "sagar/molecule/symmetry.py", "max_stars_repo_name": "unkcpz/sagar", "max_stars_repo_head_hexsha": "097a9e77200d79e40c45c2741c9c1e61a1013b22", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-09-05T10:40:01.000Z", "max_stars_repo_stars_event_max_datetime": "2018-09-18T01:09:20.000Z", "max_issues_repo_path": "sagar/molecule/symmetry.py", "max_issues_repo_name": "unkcpz/sagar", "max_issues_repo_head_hexsha": "097a9e77200d79e40c45c2741c9c1e61a1013b22", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2018-10-17T07:48:27.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-04T13:39:07.000Z", "max_forks_repo_path": "sagar/molecule/symmetry.py", "max_forks_repo_name": "unkcpz/pyyabc", "max_forks_repo_head_hexsha": "097a9e77200d79e40c45c2741c9c1e61a1013b22", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-05-03T08:15:42.000Z", "max_forks_repo_forks_event_max_datetime": "2018-08-28T05:45:33.000Z", "avg_line_length": 33.908045977, "max_line_length": 78, "alphanum_fraction": 0.5776271186, "include": true, "reason": "import numpy", "num_tokens": 926}
|
"""
.. module:: instrument
:platform: Unix
:synopsis: functions describing behaviour of instrument spectra.
.. moduleauthor: Ben Thorne <ben.thorne@physics.ox.ac.uk>
"""
import numpy as np
from .foreground import fg_res_sys, dust_cl, synch_cl
def N_ell(ell, beam, sens):
"""Gaussian white-noise spectrum for a given beam and sensitivity.
Parameters
----------
ell : array_like(int, ndim=1)
Multipole range to consider
beam : float
Beam FWHM in arcminutes.
sens : float
Sensitivity in X per arcmin where X is the unit of temperature.
Returns
-------
array_like
Noise spectrum, deconvolved with beam.
"""
# convert beam from arcminutes to radians.
b_rad = beam / 60. * np.pi / 180.
beam = np.exp(ell * (ell + 1.) * b_rad ** 2 / (8. * np.log(2)))
noise_level = sens ** 2 * (1. / 60. * np.pi / 180.) ** 2
return beam * noise_level
def instrument(nus, p_sens, beams, map_res, lmin=0, lmax=2500):
"""Function to calculate the noise spectrum of a given set of instrument
specifications.
This function rolls the instrumental and
Parameters
----------
nus : array_like(float, ndim=1)
Observing freuqencies in GHz.
p_sens : array_like(float, ndim=1)
Sensitivities in uK_amin.
beams : array_like(float, ndim=1)
Beam FWHM in arcminutes.
map_res : float
Level of foreground residuals in map space.
lmin : int, optional (default=2)
Minimum multipole to consider.
lmax : int, optional (default=2500)
Maximum multipole to consider.
Returns
-------
array_like(float, ndim=1)
Instrument and foreground polarization noise spectrum.
Notes
-----
This is an implementation of the model described in [1].
References
----------
.. [1] Thorne, B., Fujita, T., Hazumi, M., Katayama, N., Komatsu, E.,
Shiraishi, M., arXiv e-prints (2017): 1707.03240.
"""
specs = zip(nus, p_sens, beams)
# Filter lists to include only CMB channels. The cmb_filter contains
# the definitions of lower and upper channels used for CMB analysis. The
# other channels are assumed to go into foreground removal.
cmb_filter = lambda (nu, p_sens, beams): (nu > 90.) & (nu < 220.)
fg_filter = lambda (nu, p_sens, beams): not cmb_filter((nu, p_sens, beams))
cmb_nu, cmb_p_sens, cmb_beams = zip(*filter(cmb_filter, specs))
fg_nu, fg_p_sens, fg_beams = zip(*filter(fg_filter, specs))
N_chan = len(fg_nu)
nu_S_ref = np.min(fg_nu)
nu_D_ref = np.max(fg_nu)
# Compute the original noise in the foreground channels.
ell = np.arange(lmin, lmax + 1)
fg_N_ell_p = [N_ell(ell, beam, p_sens) for (beam, p_sens) in zip(fg_beams,
fg_p_sens)]
cmb_N_ell_p = [N_ell(ell, beam, p_sens) for (beam, p_sens) in zip(cmb_beams, cmb_p_sens)]
# Compute the foreground residual spectra and template noise
# scaled to the CMB channels. Foreground parameters used in calculating
# the residual fg noise:
A_S = 6.3e-18
alpha_S = -3.
beta_S = -2.6
nu_S_0 = 30.
ell_S_0 = 350.
A_D = 1.3e-13
alpha_D = 2.2
beta_D = -2.5
nu_D_0 = 94.
ell_D_0 = 10.
T = 18.
p_d = 0.15
cmb_channel_fgnd_res = lambda nu: (dust_cl(nu, ell, p_d, T, A_D, alpha_D,
beta_D, nu_D_0, ell_D_0) + \
synch_cl(nu, ell, A_S, alpha_S, beta_S, nu_S_0, ell_S_0)) * map_res ** 2 + \
fg_res_sys(nu, nu_S_ref, alpha_S, nu_D_ref, alpha_D, N_chan, fg_N_ell_p)
# Now compute the combined foreground residual contribution and systematic
# residual contribution of foreground channels to the cmb channels.
fg_cmb_noise_spec = [cmb_channel_fgnd_res(nu) for nu in cmb_nu]
# Now sum the foreground and cmb channel noise contributions to the CMB
# channel noise.
total_channel_noise_spec_p = [c_p + fg for c_p, fg in zip(cmb_N_ell_p,
fg_cmb_noise_spec)]
# Now can compute the final noise spectrum in CMB spectrum.
# This is computed by treating each ell as a Gaussian variate and
# so combining inverse squares.
N_ell_p = 1. / sum([1. / n_l for n_l in total_channel_noise_spec_p])
return N_ell_p
|
{"hexsha": "5697ed0bb1b7a96c03acb7c887b01ab78928b304", "size": 4384, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyranha/instrument.py", "max_stars_repo_name": "bthorne93/pyranha", "max_stars_repo_head_hexsha": "3803b2e87129906c018e20876ba25c0e097d6c25", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-10-23T10:39:44.000Z", "max_stars_repo_stars_event_max_datetime": "2018-10-23T10:39:44.000Z", "max_issues_repo_path": "pyranha/instrument.py", "max_issues_repo_name": "bthorne93/pyranha", "max_issues_repo_head_hexsha": "3803b2e87129906c018e20876ba25c0e097d6c25", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyranha/instrument.py", "max_forks_repo_name": "bthorne93/pyranha", "max_forks_repo_head_hexsha": "3803b2e87129906c018e20876ba25c0e097d6c25", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6422764228, "max_line_length": 93, "alphanum_fraction": 0.6293339416, "include": true, "reason": "import numpy", "num_tokens": 1267}
|
import numpy as np
import pytest
from dnnv.nn import OperationGraph, operations
from dnnv.nn.transformers.slicers import DropPrefix, Slicer
@pytest.fixture
def op_graph():
input_op = operations.Input(np.array([1, 5]), np.dtype(np.float32))
mul_op = operations.Mul(input_op, np.float32(1))
div_op = operations.Div(input_op, np.float32(2))
add_op = operations.Add(mul_op, div_op)
relu_op = operations.Relu(add_op)
sub_op = operations.Sub(np.zeros((1, 5), dtype=np.float32), relu_op)
op_graph = OperationGraph([sub_op])
return op_graph
def test_slicer_stop_0(op_graph):
output_ops = op_graph.walk(Slicer(None, 0))[0]
assert len(output_ops) == 0
def test_slicer_all(op_graph):
output_ops = op_graph.walk(Slicer(None, None))[0]
assert len(output_ops) == 1
output_op = output_ops[0]
assert isinstance(output_op, operations.Sub)
assert isinstance(output_op.b, operations.Relu)
assert isinstance(output_op.b.x, operations.Add)
assert isinstance(output_op.b.x.a, operations.Mul)
assert isinstance(output_op.b.x.a.a, operations.Input)
assert isinstance(output_op.b.x.b, operations.Div)
assert isinstance(output_op.b.x.b.a, operations.Input)
assert output_op.b.x.a.a is output_op.b.x.b.a
def test_slicer_end(op_graph):
output_ops = op_graph.walk(Slicer(-1, None))[0]
assert len(output_ops) == 1
output_op = output_ops[0]
assert isinstance(output_op, operations.Sub)
assert isinstance(output_op.b, operations.Input)
def test_slicer_cycle():
input_op = operations.Input(np.array([1, 5]), np.dtype(np.float32))
mul_op = operations.Mul(input_op, np.float32(1))
div_op = operations.Div(input_op, np.float32(2))
add_op = operations.Add(mul_op, div_op)
relu_op = operations.Relu(add_op)
mul_op.b = relu_op
op_graph = OperationGraph([relu_op])
with pytest.raises(ValueError, match="Slicing cyclic graphs is not supported."):
output_ops = op_graph.walk(Slicer(None, None))[0]
def test_drop_prefix(op_graph):
prefix = op_graph[:3]
output_ops = op_graph.walk(DropPrefix(prefix))
assert len(output_ops) == 1
output_op = output_ops[0]
assert isinstance(output_op, operations.Sub)
assert isinstance(output_op.b, operations.Relu)
assert isinstance(output_op.b.x, operations.Input)
|
{"hexsha": "eee621b8bea122d16150087c883a0c7482c89e98", "size": 2345, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/unit_tests/test_nn/test_transformers/test_slicers.py", "max_stars_repo_name": "samysweb/dnnv", "max_stars_repo_head_hexsha": "58fb95b7300914d9da28eed86c39eca473b1aaef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2022-01-28T20:30:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T09:26:52.000Z", "max_issues_repo_path": "tests/unit_tests/test_nn/test_transformers/test_slicers.py", "max_issues_repo_name": "samysweb/dnnv", "max_issues_repo_head_hexsha": "58fb95b7300914d9da28eed86c39eca473b1aaef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2022-01-27T03:50:28.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-08T18:42:17.000Z", "max_forks_repo_path": "tests/unit_tests/test_nn/test_transformers/test_slicers.py", "max_forks_repo_name": "samysweb/dnnv", "max_forks_repo_head_hexsha": "58fb95b7300914d9da28eed86c39eca473b1aaef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-02-03T17:32:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T16:38:49.000Z", "avg_line_length": 32.5694444444, "max_line_length": 84, "alphanum_fraction": 0.7181236674, "include": true, "reason": "import numpy", "num_tokens": 599}
|
import numpy as np
import conect
import os
import sys
import operator
from time import time
"""
Este programilla se encarga de analizar el accuracy, precision, recall y F1 Score sobre los resultados generados
en el entrenamiento con corte 1 año previo a la BBDD actual.
Consideramos el estado de un predicción como:
TP - True Positive: El usuario ha hecho a posteriori uno de los problemas que el recomendador ha recomendado
FP - False Positive: El usuario no ha hecho a posteriori uno de los problemas que el recomendador ha recomendado
FN - False Negative: El usuario ha hecho a posteriori uno de los problemas que el recomendador no ha recomendado
TN - True Negative: El usuario no ha hecho a posteriori uno de los problemas que el recomendador no ha recomendado (No lo vamos a considerar para nuestras métricas).
**Para cada caso habría que descartar aquellos usuarios que NO han realizado entregas
Calculo sobre diferentes métricas:
- accuracy: todo. comentar. (DE MOMENTO ESTA NO LA CALCULAMOS)
- precision: TP / TP + FP
- recall: TP / TP + FN
- f1 Score: 2*(Recall*Precision)/(Recall + Precision)
#Contar conjunto de usuarios validos para calcular la precisión
#Calcular con TOP - 1, 3, 5, 10... N
#calcular con filtrar N mas similares de todos, todos/2 todos/3...
ESTO ES SUPER RANDOM, O SEA, SI CAMBIAMOS EL CORTE A OTRAS FECHAS, ETC, ETC, ETC, LAS METRICAS VARIAN MAZO...:
Añadir en el tfg esto, y por lo tanto determinar que sirva solo para comparar entre recomendadores.
"""
TopN = 1
#TODO: ver como guardo estos datos en memoria
fguardo = "resultados_TOP1-12_optimized_pedro_500.txt"
guardo = open(fguardo, "w")
# Conectamos la BBDD
db = conect.JuezDB()
while TopN <= 12:
guardo.write("======TOP "+str(TopN)+" ============\n")
# Variables globales
# ==================
TP = 0
FP = 0
FN = 0
TN = 0 # No lo consideramos por el momento
totalUsers = 0
descartados = 0
aceptados = 0
hits = 0
# =================
fichero = "resultadosV3_entrenamiento500.txt"
# =================
leo = open(fichero, "r")
#while no lleguemos a fin de documento
usuarioString = leo.readline()
while usuarioString:
usuarioId = int(usuarioString[3:len(usuarioString)])
hastaTop = 1
listaProblemas = []
linea = leo.readline()
while len(linea)>1 and linea[0]!="[":
#while no encontremos siguiente usuario...
tamIdProb = linea.find("=")
idProb = int(linea[0:tamIdProb]) #id del problema recomendado
pesoProb = float(linea[tamIdProb+1:len(linea)]) #peso del problema
if(hastaTop<=TopN):
listaProblemas.append(idProb)
hastaTop = hastaTop + 1
linea = leo.readline()
#Leo siguiente usuario y se lo paso a usuarioString al final de la iteracion
linea = leo.readline()
#Obtengo lista problemas hechos a posteriori
problemasPosteriori = db._obtenerEntregasValidasDeUserPostTraining(usuarioId)
#Descarto al usuario si no ha resuelto un minimo de TopN problemas
#Si no, compruebo si existe cada valor de listaProblemas en problemasPosteriori
if(problemasPosteriori.size>=TopN):
posRecomendados = 0
posPosterioriList = 0
while posRecomendados < len(listaProblemas):
enc = False
while posPosterioriList < problemasPosteriori.size:
if listaProblemas[posRecomendados] == problemasPosteriori[posPosterioriList]:
# Incrementamos true positive (Dfinicion a comienzo del codigo)
TP = TP + 1
enc = True
if posPosterioriList == problemasPosteriori.size-1 and enc == False:
# Incrementamos false positive (Definicion a comienzo del codigo)
FP = FP + 1
posPosterioriList = posPosterioriList + 1
posRecomendados = posRecomendados + 1
#Calulo false negative cases...
posRecomendados = 0
posPosterioriList = 0
while posPosterioriList < problemasPosteriori.size:
enc = False
while posRecomendados < len(listaProblemas):
if listaProblemas[posRecomendados] == problemasPosteriori[posPosterioriList]:
enc = True
if posRecomendados == len(listaProblemas)-1 and enc == False:
# Incrementamos false negative
FN = FN + 1
posRecomendados = posRecomendados + 1
posPosterioriList = posPosterioriList + 1
aceptados = aceptados + 1
else:
descartados = descartados + 1
totalUsers = totalUsers + 1
cteUsers = 6533
print("TOP "+str(TopN)+": "+str((totalUsers/cteUsers)*100)+"%")
#Vuelvo a iterar
usuarioString = linea
precision = TP / (TP + FP)
recall = TP / (TP + FN)
f1Score = 2*(recall*precision)/(recall + precision)
guardo.write("Total Users: "+ str(totalUsers) +"\n")
guardo.write("Aceptados: "+ str(aceptados) +"\n")
guardo.write("Descartados: "+ str(descartados) +"\n")
guardo.write("====================\n")
guardo.write("Precision: "+ str(precision) +"\n")
guardo.write("Recall: "+ str(recall) +"\n")
guardo.write("f1 score: "+ str(f1Score) +"\n")
guardo.write("====================\n")
guardo.write("True positive: "+ str(TP) +"\n")
guardo.write("False positive: "+ str(FP) +"\n")
guardo.write("False negative: "+ str(FN) +"\n")
leo.close()
TopN = TopN+1
guardo.close()
|
{"hexsha": "2c51b714c803fb6b1ac7a2bf2a1bea152681e5a8", "size": 6083, "ext": "py", "lang": "Python", "max_stars_repo_path": "recomendador1v2_training/otrosEjecutables/analizador.py", "max_stars_repo_name": "alfonsoelmas/recomendadores", "max_stars_repo_head_hexsha": "b7bf79be99253a8ffe34c98e661a5e0e64cd0795", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "recomendador1v2_training/otrosEjecutables/analizador.py", "max_issues_repo_name": "alfonsoelmas/recomendadores", "max_issues_repo_head_hexsha": "b7bf79be99253a8ffe34c98e661a5e0e64cd0795", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "recomendador1v2_training/otrosEjecutables/analizador.py", "max_forks_repo_name": "alfonsoelmas/recomendadores", "max_forks_repo_head_hexsha": "b7bf79be99253a8ffe34c98e661a5e0e64cd0795", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.5, "max_line_length": 173, "alphanum_fraction": 0.5854019398, "include": true, "reason": "import numpy", "num_tokens": 1577}
|
#! /usr/bin/env python
import cv2
import numpy as np
import scipy.spatial as spatial
import logging
## 3D Transform
def bilinear_interpolate(img, coords):
""" Interpolates over every image channel
http://en.wikipedia.org/wiki/Bilinear_interpolation
:param img: max 3 channel image
:param coords: 2 x _m_ array. 1st row = xcoords, 2nd row = ycoords
:returns: array of interpolated pixels with same shape as coords
"""
int_coords = np.int32(coords)
x0, y0 = int_coords
dx, dy = coords - int_coords
# 4 Neighour pixels
q11 = img[y0, x0]
q21 = img[y0, x0 + 1]
q12 = img[y0 + 1, x0]
q22 = img[y0 + 1, x0 + 1]
btm = q21.T * dx + q11.T * (1 - dx)
top = q22.T * dx + q12.T * (1 - dx)
inter_pixel = top * dy + btm * (1 - dy)
return inter_pixel.T
def grid_coordinates(points):
""" x,y grid coordinates within the ROI of supplied points
:param points: points to generate grid coordinates
:returns: array of (x, y) coordinates
"""
xmin = np.min(points[:, 0])
xmax = np.max(points[:, 0]) + 1
ymin = np.min(points[:, 1])
ymax = np.max(points[:, 1]) + 1
return np.asarray([(x, y) for y in range(ymin, ymax)
for x in range(xmin, xmax)], np.uint32)
def process_warp(src_img, result_img, tri_affines, dst_points, delaunay):
"""
Warp each triangle from the src_image only within the
ROI of the destination image (points in dst_points).
"""
roi_coords = grid_coordinates(dst_points)
# indices to vertices. -1 if pixel is not in any triangle
roi_tri_indices = delaunay.find_simplex(roi_coords)
for simplex_index in range(len(delaunay.simplices)):
coords = roi_coords[roi_tri_indices == simplex_index]
num_coords = len(coords)
out_coords = np.dot(tri_affines[simplex_index],
np.vstack((coords.T, np.ones(num_coords))))
x, y = coords.T
result_img[y, x] = bilinear_interpolate(src_img, out_coords)
return None
def triangular_affine_matrices(vertices, src_points, dst_points):
"""
Calculate the affine transformation matrix for each
triangle (x,y) vertex from dst_points to src_points
:param vertices: array of triplet indices to corners of triangle
:param src_points: array of [x, y] points to landmarks for source image
:param dst_points: array of [x, y] points to landmarks for destination image
:returns: 2 x 3 affine matrix transformation for a triangle
"""
ones = [1, 1, 1]
for tri_indices in vertices:
src_tri = np.vstack((src_points[tri_indices, :].T, ones))
dst_tri = np.vstack((dst_points[tri_indices, :].T, ones))
mat = np.dot(src_tri, np.linalg.inv(dst_tri))[:2, :]
yield mat
def warp_image_3d(src_img, src_points, dst_points, dst_shape, dtype=np.uint8):
rows, cols = dst_shape[:2]
result_img = np.zeros((rows, cols, 3), dtype=dtype)
delaunay = spatial.Delaunay(dst_points)
tri_affines = np.asarray(list(triangular_affine_matrices(
delaunay.simplices, src_points, dst_points)))
process_warp(src_img, result_img, tri_affines, dst_points, delaunay)
return result_img
## 2D Transform
def transformation_from_points(points1, points2):
points1 = points1.astype(np.float64)
points2 = points2.astype(np.float64)
c1 = np.mean(points1, axis=0)
c2 = np.mean(points2, axis=0)
points1 -= c1
points2 -= c2
s1 = np.std(points1)
s2 = np.std(points2)
points1 /= s1
points2 /= s2
U, S, Vt = np.linalg.svd(np.dot(points1.T, points2))
R = (np.dot(U, Vt)).T
return np.vstack([np.hstack([s2 / s1 * R,
(c2.T - np.dot(s2 / s1 * R, c1.T))[:, np.newaxis]]),
np.array([[0., 0., 1.]])])
def warp_image_2d(im, M, dshape):
output_im = np.zeros(dshape, dtype=im.dtype)
cv2.warpAffine(im,
M[:2],
(dshape[1], dshape[0]),
dst=output_im,
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return output_im
## Generate Mask
def mask_from_points(size, points,erode_flag=1):
radius = 10 # kernel size
kernel = np.ones((radius, radius), np.uint8)
mask = np.zeros(size, np.uint8)
cv2.fillConvexPoly(mask, cv2.convexHull(points), 255)
if erode_flag:
mask = cv2.erode(mask, kernel,iterations=1)
return mask
## Color Correction
def correct_colours(im1, im2, landmarks1):
COLOUR_CORRECT_BLUR_FRAC = 0.75
LEFT_EYE_POINTS = list(range(42, 48))
RIGHT_EYE_POINTS = list(range(36, 42))
blur_amount = COLOUR_CORRECT_BLUR_FRAC * np.linalg.norm(
np.mean(landmarks1[LEFT_EYE_POINTS], axis=0) -
np.mean(landmarks1[RIGHT_EYE_POINTS], axis=0))
blur_amount = int(blur_amount)
if blur_amount % 2 == 0:
blur_amount += 1
im1_blur = cv2.GaussianBlur(im1, (blur_amount, blur_amount), 0)
im2_blur = cv2.GaussianBlur(im2, (blur_amount, blur_amount), 0)
# Avoid divide-by-zero errors.
im2_blur = im2_blur.astype(int)
im2_blur += 128*(im2_blur <= 1)
result = im2.astype(np.float64) * im1_blur.astype(np.float64) / im2_blur.astype(np.float64)
result = np.clip(result, 0, 255).astype(np.uint8)
return result
## Copy-and-paste
def apply_mask(img, mask):
""" Apply mask to supplied image
:param img: max 3 channel image
:param mask: [0-255] values in mask
:returns: new image with mask applied
"""
masked_img=cv2.bitwise_and(img,img,mask=mask)
return masked_img
## Alpha blending
def alpha_feathering(src_img, dest_img, img_mask, blur_radius=15):
mask = cv2.blur(img_mask, (blur_radius, blur_radius))
mask = mask / 255.0
result_img = np.empty(src_img.shape, np.uint8)
for i in range(3):
result_img[..., i] = src_img[..., i] * mask + dest_img[..., i] * (1-mask)
return result_img
def check_points(img,points):
# Todo: I just consider one situation.
if points[8,1]>img.shape[0]:
logging.error("Jaw part out of image")
else:
return True
return False
|
{"hexsha": "0d008f51f4c62166761bca36fb15829c69aba748", "size": 6220, "ext": "py", "lang": "Python", "max_stars_repo_path": "2DwithLandmarkFaceSwap/face_swap.py", "max_stars_repo_name": "ForrestPi/faceSwapProjects", "max_stars_repo_head_hexsha": "daf2649a2791a25aa541c4d6d3b7e1d6552be5d7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-04-22T14:21:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-25T11:54:35.000Z", "max_issues_repo_path": "2DwithLandmarkFaceSwap/face_swap.py", "max_issues_repo_name": "ForrestPi/faceSwapProjects", "max_issues_repo_head_hexsha": "daf2649a2791a25aa541c4d6d3b7e1d6552be5d7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-06-14T17:36:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-11T03:43:44.000Z", "max_forks_repo_path": "FaceSwap/face_swap.py", "max_forks_repo_name": "dannyqnguyen/eyewash", "max_forks_repo_head_hexsha": "b9fccc5f7ab8eccfc36d46e95da7083b307ecaff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-07-14T09:27:37.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-06T06:41:19.000Z", "avg_line_length": 31.1, "max_line_length": 95, "alphanum_fraction": 0.6371382637, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1771}
|
'''
Code from GroudSeg.py implementation found at the github repository:
https://github.com/mitkina/EnvironmentPrediction
For the implementation of Random Markov Field ground segmentation described in:
G. Postica, A. Romanoni, and M. Matteucci. Robust moving objects detection in LiDAR
data exploiting visual cues. In IEEE/RSJ International Conference on Intelligent Robots and
Systems (IROS), pages 1093-1098, 2016.
'''
import numpy as np
import math
import itertools
import copy
def ground_seg(point_cloud, height_lidar, res=1./3., s=0.09):
num_points = point_cloud.shape[0]
# generate 2-D grid of the LiDAR cloud
max_index = math.sqrt(2.)*(128/3./2.+1.)
# a 2D array that contains lists of 3D points in point_cloud that map to
# a particular grid cell (according to the place of the 3D point in point_cloud)
filler = np.frompyfunc(lambda x: list(), 1, 1)
grid = np.empty((int(2 * math.ceil(max_index/res) + 1), int(2 * math.ceil(max_index/res) + 1)), dtype=np.object)
filler(grid, grid);
# determine the center coordinate of the 2D grid
center_x = int(math.ceil(max_index/res))
center_y = int(math.ceil(max_index/res))
for i in range(num_points):
point = point_cloud[i,:]
x = point[0]
y = point[1]
z = point[2]
if ((math.fabs(x) <= max_index) and (math.fabs(y) <= max_index) and (z <= 3.5)):
grid[int(center_x + round(x/res)), int(center_y + round(y/res))].append(i)
h_G = np.nan*np.empty((grid.shape))
# iterate radially outwards to compute if a point belongs to the ground (1) on mask grid
grid_seg = np.zeros(grid.shape)
# initialize the center coordinate of the 2D grid to ground
points_z = np.ndarray.tolist(point_cloud[grid[center_x, center_y],2])
H = max(points_z or [np.nan])
if not math.isnan(H):
h_G[center_x, center_y] = H
else:
# initialize to the z-height of the LiDAR accroding to the KITTI set-up
#h_G[center_x, center_y] = -2.184 - 0.05
h_G[center_x, center_y] = -height_lidar
# initialize the coordinates of inner circle
circle_inner = [[center_x, center_y]]
# identify all the points that were labeled as not ground
point_cloud_seg = np.empty((0,3))
for i in range(1,int(math.ceil(max_index/res))+1):
# generate indices at the ith inner circle level
circle_curr = generate_circle(i, center_x, center_y)
for indices in circle_curr:
x = indices[0]
y = indices[1]
# compute h_hat_G: find max h_G of neighbors
neigh_indeces = np.array(get_neighbors(x,y,circle_inner))
# compute the min and max z coordinates of each grid cell
points_z = np.ndarray.tolist(point_cloud[grid[x,y],2])
H = max(points_z or [np.nan])
h = min(points_z or [np.nan])
h_hat_G = np.nanmax(h_G[neigh_indeces])
if ((not np.isnan(H)) and (not np.isnan(h)) and \
(H - h < s) and (H - h_hat_G < s)):
grid_seg[x,y] = 1
h_G[x,y] = copy.deepcopy(H)
else:
h_G[x,y] = copy.deepcopy(h_hat_G)
# add to not ground points
point_locations = grid[x,y]
if point_locations != []:
point_cloud_seg = np.vstack((point_cloud_seg, point_cloud[point_locations,:]))
# update the inner circle indices
circle_inner = copy.deepcopy(circle_curr)
return point_cloud_seg
# return the indices of a circle at level i from the center of the grid
def generate_circle(i, center_x, center_y):
circle_range = range(-1*i,i+1)
circle = [list(x) for x in itertools.product(circle_range, circle_range)]
circle = [[item[0]+center_x, item[1]+center_y] for item in circle if ((abs(item[0]) == i) or (abs(item[1]) == i))]
return circle
# get the inner circle neighbors of a point
def get_neighbors(x,y,circle_inner):
neigh_indices = []
for indices in circle_inner:
if ((abs(x-indices[0]) < 2) and (abs(y-indices[1]) < 2)):
neigh_indices.append(indices)
return neigh_indices
|
{"hexsha": "1b56fb3e9fb65b1f5a10009604ee98712a48aa19", "size": 3843, "ext": "py", "lang": "Python", "max_stars_repo_path": "CODES_data_generation/ground_segmentation.py", "max_stars_repo_name": "sisl/Double-Prong-Occupancy", "max_stars_repo_head_hexsha": "8698b1c732c240fccaac7971b06de241af000229", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2020-12-05T05:16:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-17T08:00:52.000Z", "max_issues_repo_path": "CODES_data_generation/ground_segmentation.py", "max_issues_repo_name": "sisl/Double-Prong-Occupancy", "max_issues_repo_head_hexsha": "8698b1c732c240fccaac7971b06de241af000229", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-07-26T12:49:03.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-12T22:23:21.000Z", "max_forks_repo_path": "CODES_data_generation/ground_segmentation.py", "max_forks_repo_name": "sisl/Double-Prong-Occupancy", "max_forks_repo_head_hexsha": "8698b1c732c240fccaac7971b06de241af000229", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-08-31T11:56:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T08:34:37.000Z", "avg_line_length": 32.025, "max_line_length": 117, "alphanum_fraction": 0.6937288577, "include": true, "reason": "import numpy", "num_tokens": 1108}
|
module PSDMatrices
import Base: \, /, size, inv, copy, copy!, ==, show, similar, Matrix
using LinearAlgebra
import LinearAlgebra: det, logabsdet, diag
struct PSDMatrix{T,FactorType} <: AbstractMatrix{T}
R::FactorType
PSDMatrix(R::AbstractMatrix{T}) where {T} = new{T,typeof(R)}(R)
end
# Base overloads
Matrix(M::PSDMatrix) = M.R' * M.R
size(M::PSDMatrix) = (size(M.R, 2), size(M.R, 2))
inv(M::PSDMatrix) = PSDMatrix(inv(M.R'))
\(A::PSDMatrix, B::AbstractVecOrMat) = A.R \ (A.R' \ B)
/(B::AbstractVecOrMat, A::PSDMatrix) = B / A.R / A.R'
copy(M::PSDMatrix) = PSDMatrix(copy(M.R))
similar(M::PSDMatrix, element_type::Type=eltype(M)) = PSDMatrix(similar(M.R, element_type))
copy!(dst::PSDMatrix, src::PSDMatrix) = (copy!(dst.R, src.R); dst)
==(M1::PSDMatrix, M2::PSDMatrix) = M1.R == M2.R # todo: same as isequal()?!
function show(io::IO, M::PSDMatrix)
print(io, "$(size(M,1))x$(size(M,2)) $(typeof(M)); R=")
show(io, M.R)
end
function show(io::IO, m::MIME"text/plain", M::PSDMatrix)
println(io, "$(size(M,1))x$(size(M,2)) $(typeof(M)) ")
print(io, " Right square root: R=")
show(io, m, M.R)
end
# LinearAlgebra overloads
function det(M::PSDMatrix)
confirm_factor_is_square(M)
return det(M.R)^2
end
function logabsdet(M::PSDMatrix)
confirm_factor_is_square(M)
_logabsdet, _sign = logabsdet(M.R)
return 2 * _logabsdet, _sign^2
end
function diag(M::PSDMatrix)
out = similar(M.R, size(M.R, 2))
sum!(abs2, out', M.R)
return out
end
function confirm_factor_is_square(M::PSDMatrix)
if size(M.R, 1) != size(M.R, 2)
msg = (
"The requested operation is not available for a PSDMatrix with a non-square factor." *
"The factor of the received PSDMatrix has dimensions ($(size(M.R,1)), $(size(M.R,2))). " *
"Try turning the PSDMatrix into a dense matrix first."
)
throw(MethodError(msg))
end
end
# Custom functions
X_A_Xt(; A::PSDMatrix, X::AbstractMatrix) = PSDMatrix(A.R * X')
X_A_Xt(A::PSDMatrix, X::AbstractMatrix) = X_A_Xt(A=A, X=X)
function X_A_Xt!(out::PSDMatrix; A::PSDMatrix, X::AbstractMatrix)
mul!(out.R, A.R, X')
return out
end
X_A_Xt!(out::PSDMatrix, A::PSDMatrix, X::AbstractMatrix) = X_A_Xt!(out, A=A, X=X)
function add_cholesky(A::PSDMatrix, B::PSDMatrix)
sum_dense = Matrix(A) + Matrix(B)
factor = cholesky(sum_dense).U
return PSDMatrix(factor)
end
function add_qr(A::PSDMatrix, B::PSDMatrix)
stack = vcat(A.R, B.R)
matrix = PSDMatrix(stack)
return triangularize_factor(matrix)
end
function triangularize_factor(M::PSDMatrix)
R = qr(M.R).R
return PSDMatrix(UpperTriangular(R))
end
export PSDMatrix
export add_cholesky
export add_qr
export triangularize_factor
export X_A_Xt
export X_A_Xt!
end
|
{"hexsha": "9c68c4056fe9bcec8647b2a2ebc51debde0bd2a6", "size": 2779, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/PSDMatrices.jl", "max_stars_repo_name": "nathanaelbosch/PSDMats.jl", "max_stars_repo_head_hexsha": "d12423020f0f06fb0263f07430bace01a864063c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/PSDMatrices.jl", "max_issues_repo_name": "nathanaelbosch/PSDMats.jl", "max_issues_repo_head_hexsha": "d12423020f0f06fb0263f07430bace01a864063c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/PSDMatrices.jl", "max_forks_repo_name": "nathanaelbosch/PSDMats.jl", "max_forks_repo_head_hexsha": "d12423020f0f06fb0263f07430bace01a864063c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.3571428571, "max_line_length": 102, "alphanum_fraction": 0.6628283555, "num_tokens": 916}
|
PROGRAM FITHIST
C
REAL HIST(14,4,100),HIST1(100),XAX(101)
CHARACTER*120 JUNK
INTEGER*4 NFHIST(14)
REAL*8 SCETSTS,SCETEND
C
OPEN(UNIT=89,FILE='FOR089.DAT',STATUS='OLD',READONLY)
read (89,*) SCETSTS,SCETEND
READ (89,*) NFHIST,NHISTST
C
189 FORMAT(A)
irxlim = 4
irxlim = 2
do irx = 1,irxlim
do ifr = 1,14
READ(89,189) JUNK
PRINT*,JUNK
C range is +10 to -90 dB for irxlim = 4 (TDSS)
C and -10 tp -110 for irxlim = 2 (TDSF)
C no, now range is nhistst to nhistst + 99
READ(89,*) (hist(ifr,irx,nhist),NHIST=1,100)
C READ(89,1089) (hist(ifr,irx,nhist),NHIST=1,100)
1089 format(10F8.0)
enddo
enddo
CLOSE(UNIT=89)
do nhist=1,101
xax(nhist) = nhist + nhistst - 1
enddo
C
do irx = 1,4
do ifr = 1,14
DO N = 1,100
HIST1(N) = HIST(IFR,IRX,N)
ENDDO
c CALL FITLOGNORM(IRX,IFR,HIST1)
enddo
enddo
ITERM = 3
c ITERM = -1
CALL PLOTHIST(ITERM,NFHIST,XAX,HIST)
C
STOP
END
SUBROUTINE FITLOGNORM(IRX,IFR,HIST)
C
REAL HIST(100),MAX
C
SUM = 0.
MAX = 0.
COUNT = 1.E-9
STD = 0.
DO N = 1,100
SUM = SUM + N*HIST(N)
STD = STD + (N**2)*HIST(N)
COUNT = COUNT + HIST(N)
ENDDO
AVR = SUM/COUNT
STD = SQRT(STD/COUNT - AVR**2)
SIGMA = .7071*STD
AMP = 2.*SUM/SIGMA**2
C
C DETERMINE TOP 12 PERCENTILE
C
PCTILE = 0.
GOAL = .12*COUNT
DO N = 100,1,-1
PCTSV = PCTILE
PCTILE = PCTILE + HIST(N)
IF(PCTILE.GT.GOAL) THEN
EXCESS = PCTILE - GOAL
CUT = N + 1 - EXCESS/HIST(N)
DEV = -(AVR - CUT)/STD
GO TO 100
ENDIF
ENDDO
100 PRINT 69,IFR,IRX,COUNT,AVR,STD,AMP,DEV
69 FORMAT(2I5,F8.0,E12.3,F7.1,2E12.3)
RETURN
END
SUBROUTINE PLOTHIST(ITERM,NFHIST,XAX,HIST)
REAL HIST(14,4,100),HIST1(100),XAX(101)
INTEGER*4 NFHIST(14)
CHARACTER*120 STR
CHARACTER*1 DISPOSE
C
COMMON /MONGOPAR/
1 X1,X2,Y1,Y2,GX1,GX2,GY1,GY2,LX1,LX2,LY1,LY2,
1 GX,GY,CX,CY,
1 EXPAND,ANGLE,LTYPE,LWEIGHT,
1 CHEIGHT,CWIDTH,CXDEF,CYDEF,PSDEF,PYDEF,COFF,
1 TERMOUT,XYSWAPPED,NUMDEV,
1 PI,USERVAR(10),AUTODOT
INTEGER*4 LX1,LX2,LY1,LY2,LTYPE,LWEIGHT,NUMDEV,DDS
C
DATA TWOPI /6.2831853/
C DATA ITERM /3/
C
DO IRX = 1,4
CALL MGOINIT
CALL MGOSETUP(ITERM)
CALL MGOERASE
C
C HALF WIDTH, 2/3 HEIGHT, LOOKED TERRIBLE
C CALL MGOSETLOC(100.,100.,600.,600.)
C
DO IFR = 1,12
CALL MGOWINDOW(3,4,IFR)
HMAX = 0
DO IH = 1,100
HIST1(IH) = HIST(IFR,IRX,IH)
HMAX = AMAX1(HMAX,HIST1(IH))
ENDDO
CALL MGOSETLIM(XAX(1), 0.,XAX(100), 1.1*HMAX)
CALL MGOHISTOGRAM(XAX,HIST1,100)
CALL MGOBOX(1,2)
EXSAVE = EXPAND
CALL MGOSETEXPAND(.8)
WRITE(STR,102) 120000.*NFHIST(IFR)/2048.
102 FORMAT(F5.0,' Hz')
CALL MGORELOCATE(-100.,.9*HMAX)
CALL MGOLABEL(9,STR)
CALL MGOSETEXPAND(EXSAVE)
ENDDO
C PRINT*,'IRS',IRX
WRITE(STR,101) IRX
101 FORMAT(I2)
CALL MGOSETEXPAND(2.)
CALL MGOPLOTID('[.WIND]FITHIST',STR)
CALL MGOSETEXPAND(1.)
IF(ITERM.LT.0) THEN
CALL MGOPRNTPLOT(NVEC)
PRINT*,' NO. VECTORS PLOTTED',NVEC
ELSE
READ(5,1234) DISPOSE
1234 FORMAT(A)
CALL MGOTCLOSE
ENDIF
ENDDO ! END OF IRX = 1,4 LOOP
C
RETURN
END
|
{"hexsha": "7697a0474783e56b009a2abffdbc656adad92e95", "size": 3164, "ext": "for", "lang": "FORTRAN", "max_stars_repo_path": "WAVES_VMS_Fortran/PJK_Fortran/wind_dir/fithist_47.for", "max_stars_repo_name": "lynnbwilsoniii/Wind_Decom_Code", "max_stars_repo_head_hexsha": "ef596644fe0ed3df5ff3b462602e7550a04323e2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "WAVES_VMS_Fortran/PJK_Fortran/wind_dir/fithist_47.for", "max_issues_repo_name": "lynnbwilsoniii/Wind_Decom_Code", "max_issues_repo_head_hexsha": "ef596644fe0ed3df5ff3b462602e7550a04323e2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "WAVES_VMS_Fortran/PJK_Fortran/wind_dir/fithist_47.for", "max_forks_repo_name": "lynnbwilsoniii/Wind_Decom_Code", "max_forks_repo_head_hexsha": "ef596644fe0ed3df5ff3b462602e7550a04323e2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.6712328767, "max_line_length": 60, "alphanum_fraction": 0.6238938053, "num_tokens": 1474}
|
[STATEMENT]
lemma cycle_root_end_empty_var:
assumes "terminating_path_root_end r x e"
and "x \<noteq> 0"
shows "\<not> many_strongly_connected x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> many_strongly_connected x
[PROOF STEP]
using assms cycle_root_end_empty
[PROOF STATE]
proof (prove)
using this:
terminating_path_root_end r x e
x \<noteq> 0
\<lbrakk>terminating_path_root_end ?r ?x ?e; many_strongly_connected ?x\<rbrakk> \<Longrightarrow> ?x = 0
goal (1 subgoal):
1. \<not> many_strongly_connected x
[PROOF STEP]
by blast
|
{"llama_tokens": 224, "file": "Relational_Paths_Rooted_Paths", "length": 2}
|
/-
Copyright (c) 2022 Rémi Bottinelli. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Rémi Bottinelli
-/
import category_theory.category.basic
import category_theory.functor.basic
import category_theory.groupoid
import tactic.nth_rewrite
import category_theory.path_category
import category_theory.quotient
import combinatorics.quiver.symmetric
/-!
# Free groupoid on a quiver
This file defines the free groupoid on a quiver, the lifting of a prefunctor to its unique
extension as a functor from the free groupoid, and proves uniqueness of this extension.
## Main results
Given the type `V` and a quiver instance on `V`:
- `free_groupoid V`: a type synonym for `V`.
- `free_groupoid_groupoid`: the `groupoid` instance on `free_groupoid V`.
- `lift`: the lifting of a prefunctor from `V` to `V'` where `V'` is a groupoid, to a functor.
`free_groupoid V ⥤ V'`.
- `lift_spec` and `lift_unique`: the proofs that, respectively, `lift` indeed is a lifting
and is the unique one.
## Implementation notes
The free groupoid is first defined by symmetrifying the quiver, taking the induced path category
and finally quotienting by the reducibility relation.
-/
open set classical function
local attribute [instance] prop_decidable
namespace category_theory
namespace groupoid
namespace free
universes u v u' v' u'' v''
variables {V : Type u} [quiver.{v+1} V]
/-- Shorthand for the "forward" arrow corresponding to `f` in `paths $ symmetrify V` -/
abbreviation quiver.hom.to_pos_path {X Y : V} (f : X ⟶ Y) :
((category_theory.paths.category_paths $ quiver.symmetrify V).hom X Y) := f.to_pos.to_path
/-- Shorthand for the "forward" arrow corresponding to `f` in `paths $ symmetrify V` -/
abbreviation quiver.hom.to_neg_path {X Y : V} (f : X ⟶ Y) :
((category_theory.paths.category_paths $ quiver.symmetrify V).hom Y X) := f.to_neg.to_path
/-- The "reduction" relation -/
inductive red_step : hom_rel (paths (quiver.symmetrify V))
| step (X Z : quiver.symmetrify V) (f : X ⟶ Z) :
red_step (𝟙 X) (f.to_path ≫ (quiver.reverse f).to_path)
/-- The underlying vertices of the free groupoid -/
def _root_.category_theory.free_groupoid (V) [Q : quiver V] := quotient (@red_step V Q)
instance {V} [Q : quiver V] [h : nonempty V] : nonempty (free_groupoid V) := ⟨⟨h.some⟩⟩
lemma congr_reverse {X Y : paths $ quiver.symmetrify V} (p q : X ⟶ Y) :
quotient.comp_closure red_step p q →
quotient.comp_closure red_step (p.reverse) (q.reverse) :=
begin
rintro ⟨XW, pp, qq, WY, _, Z, f⟩,
have : quotient.comp_closure red_step (WY.reverse ≫ 𝟙 _ ≫ XW.reverse)
(WY.reverse ≫ (f.to_path ≫ (quiver.reverse f).to_path) ≫ XW.reverse),
{ apply quotient.comp_closure.intro,
apply red_step.step, },
simpa only [category_struct.comp, category_struct.id, quiver.path.reverse, quiver.path.nil_comp,
quiver.path.reverse_comp, quiver.reverse_reverse, quiver.path.reverse_to_path,
quiver.path.comp_assoc] using this,
end
lemma congr_comp_reverse {X Y : paths $ quiver.symmetrify V} (p : X ⟶ Y) :
quot.mk (@quotient.comp_closure _ _ red_step _ _) (p ≫ p.reverse) =
quot.mk (@quotient.comp_closure _ _ red_step _ _) (𝟙 X) :=
begin
apply quot.eqv_gen_sound,
induction p with _ _ q f ih,
{ apply eqv_gen.refl, },
{ simp only [quiver.path.reverse],
fapply eqv_gen.trans,
{ exact q ≫ q.reverse, },
{ apply eqv_gen.symm, apply eqv_gen.rel,
have : quotient.comp_closure
red_step (q ≫ (𝟙 _) ≫ q.reverse)
(q ≫ (f.to_path ≫ (quiver.reverse f).to_path) ≫ q.reverse), by
{ apply quotient.comp_closure.intro, apply red_step.step, },
have that : q.cons f = q.comp f.to_path, by refl, rw that,
simp only [category.assoc, category.id_comp] at this ⊢,
simp only [category_struct.comp, quiver.path.comp_assoc] at this ⊢,
exact this, },
{ exact ih }, },
end
lemma congr_reverse_comp {X Y : paths $ quiver.symmetrify V} (p : X ⟶ Y) :
quot.mk (@quotient.comp_closure _ _ red_step _ _) (p.reverse ≫ p) =
quot.mk (@quotient.comp_closure _ _ red_step _ _) (𝟙 Y) :=
begin
nth_rewrite 1 ←quiver.path.reverse_reverse p,
apply congr_comp_reverse,
end
instance : category (free_groupoid V) := quotient.category red_step
/-- The inverse of an arrow in the free groupoid -/
def quot_inv {X Y : free_groupoid V} (f : X ⟶ Y) : Y ⟶ X :=
quot.lift_on f
(λ pp, quot.mk _ $ pp.reverse)
(λ pp qq con, quot.sound $ congr_reverse pp qq con)
instance : groupoid (free_groupoid V) :=
{ inv := λ X Y f, quot_inv f,
inv_comp' := λ X Y p, quot.induction_on p $ λ pp, congr_reverse_comp pp,
comp_inv' := λ X Y p, quot.induction_on p $ λ pp, congr_comp_reverse pp }
/-- The inclusion of the quiver on `V` to the underlying quiver on `free_groupoid V`-/
def of (V) [quiver V] : V ⥤q (free_groupoid V) :=
{ obj := λ X, ⟨X⟩,
map := λ X Y f, quot.mk _ f.to_pos_path }
lemma of_eq : of V =
(quiver.symmetrify.of ⋙q paths.of).comp (quotient.functor $ @red_step V _).to_prefunctor :=
begin
apply prefunctor.ext, rotate,
{ rintro X, refl, },
{ rintro X Y f, refl, }
end
section universal_property
variables {V' : Type u'} [groupoid V'] (φ : V ⥤q V')
/-- The lift of a prefunctor to a groupoid, to a functor from `free_groupoid V` -/
def lift (φ : V ⥤q V') : free_groupoid V ⥤ V' :=
quotient.lift _
(paths.lift $ quiver.symmetrify.lift φ)
(by
{ rintros _ _ _ _ ⟨X,Y,f⟩,
simp only [quiver.symmetrify.lift_reverse, paths.lift_nil, quiver.path.comp_nil,
paths.lift_cons, paths.lift_to_path],
symmetry,
apply groupoid.comp_inv, })
lemma lift_spec (φ : V ⥤q V') : of V ⋙q (lift φ).to_prefunctor = φ :=
begin
rw [of_eq, prefunctor.comp_assoc, prefunctor.comp_assoc, functor.to_prefunctor_comp],
dsimp [lift],
rw [quotient.lift_spec, paths.lift_spec, quiver.symmetrify.lift_spec],
end
lemma lift_unique (φ : V ⥤q V') (Φ : free_groupoid V ⥤ V')
(hΦ : of V ⋙q Φ.to_prefunctor = φ) : Φ = lift φ :=
begin
apply quotient.lift_unique,
apply paths.lift_unique,
fapply @quiver.symmetrify.lift_unique _ _ _ _ _ _ _ _ _,
{ rw ←functor.to_prefunctor_comp, exact hΦ, },
{ constructor, rintros X Y f,
simp only [←functor.to_prefunctor_comp,prefunctor.comp_map, paths.of_map, inv_eq_inv],
change Φ.map (inv ((quotient.functor red_step).to_prefunctor.map f.to_path)) =
inv (Φ.map ((quotient.functor red_step).to_prefunctor.map f.to_path)),
have := functor.map_inv Φ ((quotient.functor red_step).to_prefunctor.map f.to_path),
convert this; simp only [inv_eq_inv], },
end
end universal_property
section functoriality
variables {V' : Type u'} [quiver.{v'+1} V'] {V'' : Type u''} [quiver.{v''+1} V'']
/-- The functor of free groupoid induced by a prefunctor of quivers -/
def _root_.category_theory.free_groupoid_functor (φ : V ⥤q V') :
free_groupoid V ⥤ free_groupoid V' := lift (φ ⋙q of V')
lemma free_groupoid_functor_comp
(φ : V ⥤q V') (φ' : V' ⥤q V'') :
free_groupoid_functor (φ ⋙q φ') = free_groupoid_functor φ ⋙ free_groupoid_functor φ' :=
begin
dsimp only [free_groupoid_functor], symmetry,
apply lift_unique, refl,
end
end functoriality
end free
end groupoid
end category_theory
|
{"author": "leanprover-community", "repo": "mathlib", "sha": "5e526d18cea33550268dcbbddcb822d5cde40654", "save_path": "github-repos/lean/leanprover-community-mathlib", "path": "github-repos/lean/leanprover-community-mathlib/mathlib-5e526d18cea33550268dcbbddcb822d5cde40654/src/category_theory/groupoid/free_groupoid.lean"}
|
[STATEMENT]
lemma imply_append: \<open>ps @ qs \<leadsto> r = ps \<leadsto> qs \<leadsto> r\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ps @ qs \<leadsto> r = ps \<leadsto> qs \<leadsto> r
[PROOF STEP]
by (induct ps) simp_all
|
{"llama_tokens": 108, "file": "Implicational_Logic_Implicational_Logic_Appendix", "length": 1}
|
#!/usr/bin/env python
# plot the geometric factor K vs. the dipole separation of a dipole-dipole
# configuration
import crtomo.configManager as CRc
import numpy as np
# from crtomo.mpl_setup import *
import crtomo.mpl
plt, mpl = crtomo.mpl.setup()
from reda.utils.geometric_factors import compute_K_analytical
config = CRc.ConfigManager(nr_of_electrodes=40)
# generate configs, by hand
a = 1
b = 2
quads = []
for m in range(3, 40):
quads.append((a, b, m, m + 1))
config.add_to_configs(quads)
# config.compute_K_factors(spacing=1)
fig, ax = plt.subplots(figsize=(15 / 2.54, 10 / 2.54))
for spacing in np.arange(0.5, 4, 0.5):
dipole_separation = (config.configs[:, 2] - config.configs[:, 1]) * spacing
print(spacing, dipole_separation)
K = compute_K_analytical(config.configs, spacing=spacing)
ax.plot(dipole_separation, np.abs(K), '.-', label='spacing {0}m'.format(
spacing
))
ax.set_xlabel('dipole separation [m]')
ax.set_ylabel('K [m]')
ax.set_title(
'geometric factor for different electrode distances ' +
'(dipole-dipole skip-0)',
fontsize=10.0,
)
ax.axhline(y=5000, color='k', linestyle='dashed')
ax.annotate(
'K = 5000',
xy=(0, 6000),
)
ax.legend(loc='best')
fig.tight_layout()
fig.savefig('K_vs_dippol_sep.png')
ax.set_xlim(None, 40)
ax.set_ylim(-5000, 1e5)
fig.savefig('K_vs_dippol_sep_zoom.png')
|
{"hexsha": "edbaf1d8d3983c07ec0e33959b86c502e251b06b", "size": 1364, "ext": "py", "lang": "Python", "max_stars_repo_path": "doc/pyplots/plot_K_vs_dipol_sep.py", "max_stars_repo_name": "niklasj-h/crtomo_tools", "max_stars_repo_head_hexsha": "57a577ff2925c137fcc387ad49e3c9fe30025831", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-03-05T14:30:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-16T05:31:07.000Z", "max_issues_repo_path": "doc/pyplots/plot_K_vs_dipol_sep.py", "max_issues_repo_name": "niklasj-h/crtomo_tools", "max_issues_repo_head_hexsha": "57a577ff2925c137fcc387ad49e3c9fe30025831", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-06-06T12:22:26.000Z", "max_issues_repo_issues_event_max_datetime": "2019-06-06T12:22:26.000Z", "max_forks_repo_path": "doc/pyplots/plot_K_vs_dipol_sep.py", "max_forks_repo_name": "niklasj-h/crtomo_tools", "max_forks_repo_head_hexsha": "57a577ff2925c137fcc387ad49e3c9fe30025831", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2019-02-22T12:17:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-01T01:47:55.000Z", "avg_line_length": 25.7358490566, "max_line_length": 79, "alphanum_fraction": 0.7001466276, "include": true, "reason": "import numpy", "num_tokens": 429}
|
import unittest
import os
import numpy as np
import tensorflow as tf
import nnutil as nn
class Image_Rasterize(unittest.TestCase):
def test_image_rasterize_1(self):
tf.set_random_seed(42)
with tf.Session() as sess:
coord = tf.constant([[0.1, 0.1], [0.8, 0.1]], dtype=tf.float32)
raster = nn.image.rasterize(coord, None, (2, 2))
data = sess.run(raster)
self.assertEqual((2, 2), data.shape)
np.testing.assert_array_almost_equal(
data,
np.array([[1, 0], [1, 0]]),
decimal=4)
def test_image_rasterize_2(self):
tf.set_random_seed(42)
with tf.Session() as sess:
coord = tf.constant([[0.1, 0.1], [0.8, 0.1]], dtype=tf.float32)
value = tf.constant([0.5, 0.3], dtype=tf.float32)
raster = nn.image.rasterize(coord, value, (2, 2))
data = sess.run(raster)
self.assertEqual((2, 2), data.shape)
np.testing.assert_array_almost_equal(
data,
np.array([[0.5, 0], [0.3, 0]]),
decimal=4)
def test_image_rasterize_3(self):
tf.set_random_seed(42)
with tf.Session() as sess:
coord = tf.constant([[0.1, 0.1], [0.8, 0.1]], dtype=tf.float32)
value = tf.constant([[0.5, 0.6], [0.2, 0.3]], dtype=tf.float32)
raster = nn.image.rasterize(coord, value, (2, 2))
data = sess.run(raster)
self.assertEqual((2, 2, 2), data.shape)
np.testing.assert_array_almost_equal(
data,
np.array([[[0.5, 0.6], [0, 0]], [[0.2, 0.3], [0, 0]]]),
decimal=4)
def test_image_pick_1(self):
tf.set_random_seed(42)
with tf.Session() as sess:
coord = tf.constant([[0.1, 0.1], [0.8, 0.1], [0.9, 0.9]], dtype=tf.float32)
raster = tf.constant([[1, 0], [1, 0]], dtype=tf.float32)
value = nn.image.pick(coord, raster)
data = sess.run(value)
self.assertEqual((3,), data.shape)
np.testing.assert_array_almost_equal(
data,
np.array([1, 1, 0]),
decimal=4)
def test_image_pick_2(self):
tf.set_random_seed(42)
with tf.Session() as sess:
coord = tf.constant([[0.1, 0.1], [0.8, 0.1], [0.9, 0.9]], dtype=tf.float32)
raster = tf.constant([[[1], [0]], [[1], [0]]], dtype=tf.float32)
value = nn.image.pick(coord, raster)
data = sess.run(value)
self.assertEqual((3, 1), data.shape)
np.testing.assert_array_almost_equal(
data,
np.array([[1], [1], [0]]),
decimal=4)
|
{"hexsha": "4afc8543931de1355636b167bb003d0c7767b865", "size": 2691, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/image_rasterize.py", "max_stars_repo_name": "aroig/nnutil", "max_stars_repo_head_hexsha": "88df41ee89f592a28c1661ee8837dd8e8ca42cf3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/image_rasterize.py", "max_issues_repo_name": "aroig/nnutil", "max_issues_repo_head_hexsha": "88df41ee89f592a28c1661ee8837dd8e8ca42cf3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/image_rasterize.py", "max_forks_repo_name": "aroig/nnutil", "max_forks_repo_head_hexsha": "88df41ee89f592a28c1661ee8837dd8e8ca42cf3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8170731707, "max_line_length": 87, "alphanum_fraction": 0.5299145299, "include": true, "reason": "import numpy", "num_tokens": 777}
|
import torch
import numpy as np
def gd_step(f, x, alpha):
y = f(x)
y.backward()
g = x.grad
with torch.no_grad():
return x - alpha * g, y
def nesterov_step(f, x, alpha, beta):
val = f(x)
val.backward()
g = x.grad
with torch.no_grad():
x1 = x - alpha * g
return x1, (1. + beta) * x1 - beta * x, val
class LogisticRegression:
def __init__(self, X, y, tau, theta0=None, device="cpu"):
self.X = X
self.targets = y
self.tau = tau
if theta0 is None:
self.theta = torch.zeros(X.shape[1], device=device, dtype=X.dtype)
else:
self.theta = torch.tensor(theta0, device=device, dtype=X.dtype)
self.value_log = []
self.device = device
def _obj(self, theta):
t = -self.targets * (self.X @ theta)
loss = torch.logaddexp(t, torch.zeros_like(t))
reg = 0.5 * self.tau * torch.sum(theta ** 2)
return torch.sum(loss) + reg
@property
def obj(self):
return lambda theta: self._obj(theta)
def step(self):
raise NotImplementedError
def run_steps(self, k):
for _ in range(k):
self.step()
self.value_log.append(self.obj(self.theta).item())
def fit(self, eps, max_iter=10000):
old_theta = None
iter_ = 0
while old_theta is None or torch.max(torch.abs(self.theta - old_theta)) > eps:
y, old_theta = self.step()[:2]
iter_ += 1
if iter_ >= max_iter:
break
self.value_log.append(self.obj(self.theta).item())
def predict(self, X):
with torch.no_grad():
scores = 1. / (1. + torch.exp(-X @ self.theta))
return torch.where(scores > 0.5, 1, -1)
def to(self, device):
self.device = device
self.X = self.X.to(device=device)
self.targets = self.targets.to(device=device)
@property
def x(self):
raise NotImplementedError
@property
def y(self):
raise NotImplementedError
class LogisticRegressionGD(LogisticRegression):
def __init__(self, X, y, tau, theta0=None, device="cpu", log_grad=True):
super().__init__(X, y, tau, theta0, device)
L = torch.linalg.norm(X.cpu(), 2) ** 2 / 4. + tau
self.alpha = 2 / (L + tau)
self.log = [self.theta.cpu().detach()]
self.grad_log = []
self.log_grad = log_grad
def step(self):
old_theta = self.theta
old_theta.requires_grad_(True)
res, y = gd_step(self.obj, old_theta, self.alpha)
self.theta = res.detach()
self.log.append(self.theta.cpu())
self.value_log.append(y.item())
if self.log_grad:
self.grad_log.append(old_theta.grad.detach().cpu())
return y, old_theta
def clear_logs(self):
self.log = [self.theta.cpu().detach()]
self.value_log = []
@property
def x_log(self):
return self.log
class LogisticRegressionNesterov(LogisticRegression):
def __init__(self, X, y, tau, theta0=None, device="cpu", log_x=True, log_grad=True):
super().__init__(X, y, tau, theta0, device)
L = torch.sum(X ** 2).item() / 4. + tau
self.alpha = 1. / L
self.beta = (np.sqrt(L) - np.sqrt(tau)) / (np.sqrt(L) + np.sqrt(tau))
self.x_log = [self.theta.cpu().detach()]
self.y_log = [self.theta.cpu().detach()]
self.grad_log = []
self.log_x = log_x
self.log_grad = log_grad
def step(self):
old_theta = self.theta
old_theta.requires_grad_(True)
x, y, val = nesterov_step(self.obj, old_theta, self.alpha, self.beta)
self.theta = y.detach()
self.y_log.append(self.theta.cpu())
if self.log_x:
self.x_log.append(x.detach().cpu())
self.value_log.append(val.item())
if self.log_grad:
self.grad_log.append(old_theta.grad.detach().cpu())
return val, old_theta, x.detach()
def clear_logs(self):
self.x_log = [self.theta.cpu().detach()]
self.y_log = [self.theta.cpu().detach()]
self.value_log = []
|
{"hexsha": "548d9f19cecb3cd801b1aa6f76ca6bdc6f4d6b17", "size": 4167, "ext": "py", "lang": "Python", "max_stars_repo_path": "logistic_regression.py", "max_stars_repo_name": "JaworWr/MLAcceleration", "max_stars_repo_head_hexsha": "ef0e0661389782b0caeec9137b3d4ddd84643d2c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "logistic_regression.py", "max_issues_repo_name": "JaworWr/MLAcceleration", "max_issues_repo_head_hexsha": "ef0e0661389782b0caeec9137b3d4ddd84643d2c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "logistic_regression.py", "max_forks_repo_name": "JaworWr/MLAcceleration", "max_forks_repo_head_hexsha": "ef0e0661389782b0caeec9137b3d4ddd84643d2c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9784172662, "max_line_length": 88, "alphanum_fraction": 0.5677945764, "include": true, "reason": "import numpy", "num_tokens": 1067}
|
#' isSymmetric
#'
#' Test if a float matrix is symmetric.
#'
#' @param object
#' A float vector/matrix.
#' @param ...
#' Ignored.
#'
#' @return
#' A logical value.
#'
#' @examples
#' library(float)
#'
#' s = flrunif(10, 3)
#' isSymmetric(s)
#'
#' cp = crossprod(s)
#' isSymmetric(s)
#'
#' @useDynLib float R_isSymmetric_spm
#' @name isSymmetric
#' @rdname isSymmetric
NULL
isSymmetric_float32 = function(object, ...)
{
.Call(R_isSymmetric_spm, DATA(object))
}
#' @rdname isSymmetric
#' @export
setMethod("isSymmetric", signature(object="float32"), isSymmetric_float32)
|
{"hexsha": "60c214a6e302f178c8b770535afac7312834e2c1", "size": 584, "ext": "r", "lang": "R", "max_stars_repo_path": "R/isSymmetric.r", "max_stars_repo_name": "david-cortes/float", "max_stars_repo_head_hexsha": "df58b4040a352f006c299233c2c920e11b0dcae3", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 35, "max_stars_repo_stars_event_min_datetime": "2017-11-08T11:29:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-20T20:17:08.000Z", "max_issues_repo_path": "R/isSymmetric.r", "max_issues_repo_name": "david-cortes/float", "max_issues_repo_head_hexsha": "df58b4040a352f006c299233c2c920e11b0dcae3", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 37, "max_issues_repo_issues_event_min_datetime": "2017-09-02T11:14:09.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-19T15:11:19.000Z", "max_forks_repo_path": "R/isSymmetric.r", "max_forks_repo_name": "david-cortes/float", "max_forks_repo_head_hexsha": "df58b4040a352f006c299233c2c920e11b0dcae3", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2017-11-18T18:05:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-17T01:23:23.000Z", "avg_line_length": 14.9743589744, "max_line_length": 74, "alphanum_fraction": 0.654109589, "num_tokens": 182}
|
# -*- coding: utf-8 -*-
"""
Deep Human Pose Estimation
Project by Walid Benbihi
MSc Individual Project
Imperial College
Created on Wed Jul 12 15:53:44 2017
@author: Walid Benbihi
@mail : w.benbihi(at)gmail.com
@github : https://github.com/wbenbihi/hourglasstensorlfow/
Abstract:
This python code creates a Stacked Hourglass Model
(Credits : A.Newell et al.)
(Paper : https://arxiv.org/abs/1603.06937)
Code translated from 'anewell' github
Torch7(LUA) --> TensorFlow(PYTHON)
(Code : https://github.com/anewell/pose-hg-train)
Modification are made and explained in the report
Goal : Achieve Real Time detection (Webcam)
----- Modifications made to obtain faster results (trade off speed/accuracy)
This work is free of use, please cite the author if you use it!
"""
import numpy as np
import cv2
import os
import matplotlib.pyplot as plt
import random
import time
from skimage import transform
import scipy.misc as scm
class DataGenerator():
""" DataGenerator Class : To generate Train, Validatidation and Test sets
for the Deep Human Pose Estimation Model
Formalized DATA:
Inputs:
Inputs have a shape of (Number of Image) X (Height: 256) X (Width: 256) X (Channels: 3)
Outputs:
Outputs have a shape of (Number of Image) X (Number of Stacks) X (Heigth: 64) X (Width: 64) X (OutputDimendion: 16)
Joints:
We use the MPII convention on joints numbering
List of joints:
00 - Right Ankle
01 - Right Knee
02 - Right Hip
03 - Left Hip
04 - Left Knee
05 - Left Ankle
06 - Pelvis (Not present in other dataset ex : LSP)
07 - Thorax (Not present in other dataset ex : LSP)
08 - Neck
09 - Top Head
10 - Right Wrist
11 - Right Elbow
12 - Right Shoulder
13 - Left Shoulder
14 - Left Elbow
15 - Left Wrist
# TODO : Modify selection of joints for Training
How to generate Dataset:
Create a TEXT file with the following structure:
image_name.jpg[LETTER] box_xmin box_ymin box_xmax b_ymax joints
[LETTER]:
One image can contain multiple person. To use the same image
finish the image with a CAPITAL letter [A,B,C...] for
first/second/third... person in the image
joints :
Sequence of x_p y_p (p being the p-joint)
/!\ In case of missing values use -1
The Generator will read the TEXT file to create a dictionnary
Then 2 options are available for training:
Store image/heatmap arrays (numpy file stored in a folder: need disk space but faster reading)
Generate image/heatmap arrays when needed (Generate arrays while training, increase training time - Need to compute arrays at every iteration)
"""
def __init__(self, joints_name = None, img_dir=None, train_data_file = None, remove_joints = None):
""" Initializer
Args:
joints_name : List of joints condsidered
img_dir : Directory containing every images
train_data_file : Text file with training set data
remove_joints : Joints List to keep (See documentation)
"""
if joints_name == None:
self.joints_list = ['r_anckle', 'r_knee', 'r_hip', 'l_hip', 'l_knee', 'l_anckle', 'pelvis', 'thorax', 'neck', 'head', 'r_wrist', 'r_elbow', 'r_shoulder', 'l_shoulder', 'l_elbow', 'l_wrist']
else:
self.joints_list = joints_name
self.toReduce = False
if remove_joints is not None:
self.toReduce = True
self.weightJ = remove_joints
self.letter = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N']
self.img_dir = img_dir
self.train_data_file = train_data_file
self.images = os.listdir(img_dir)
# --------------------Generator Initialization Methods ---------------------
def _reduce_joints(self, joints):
""" Select Joints of interest from self.weightJ
"""
j = []
for i in range(len(self.weightJ)):
if self.weightJ[i] == 1:
j.append(joints[2*i])
j.append(joints[2*i + 1])
return j
def _create_train_table(self):
""" Create Table of samples from TEXT file
"""
self.train_table = []
self.no_intel = []
self.data_dict = {}
input_file = open(self.train_data_file, 'r')
print('READING TRAIN DATA')
for line in input_file:
line = line.strip()
line = line.split(' ')
name = line[0]
box = list(map(int,line[1:5]))
joints = list(map(int,line[5:]))
if self.toReduce:
joints = self._reduce_joints(joints)
if joints == [-1] * len(joints):
self.no_intel.append(name)
else:
joints = np.reshape(joints, (-1,2))
w = [1] * joints.shape[0]
for i in range(joints.shape[0]):
if np.array_equal(joints[i], [-1,-1]):
w[i] = 0
self.data_dict[name] = {'box' : box, 'joints' : joints, 'weights' : w}
self.train_table.append(name)
input_file.close()
def _randomize(self):
""" Randomize the set
"""
random.shuffle(self.train_table)
def _complete_sample(self, name):
""" Check if a sample has no missing value
Args:
name : Name of the sample
"""
for i in range(self.data_dict[name]['joints'].shape[0]):
if np.array_equal(self.data_dict[name]['joints'][i],[-1,-1]):
return False
return True
def _give_batch_name(self, batch_size = 16, set = 'train'):
""" Returns a List of Samples
Args:
batch_size : Number of sample wanted
set : Set to use (valid/train)
"""
list_file = []
for i in range(batch_size):
if set == 'train':
list_file.append(random.choice(self.train_set))
elif set == 'valid':
list_file.append(random.choice(self.valid_set))
else:
print('Set must be : train/valid')
break
return list_file
def _create_sets(self, validation_rate = 0.05):
""" Select Elements to feed training and validation set
Args:
validation_rate : Percentage of validation data (in ]0,1[, don't waste time use 0.1)
"""
sample = len(self.train_table)
valid_sample = int(sample * validation_rate)
self.train_set = self.train_table[:sample - valid_sample]
self.valid_set = []
preset = self.train_table[sample - valid_sample:]
print('START SET CREATION')
for elem in preset:
if self._complete_sample(elem):
self.valid_set.append(elem)
else:
self.train_set.append(elem)
print('SET CREATED')
np.save('Dataset-Validation-Set', self.valid_set)
np.save('Dataset-Training-Set', self.train_set)
print('--Training set :', len(self.train_set), ' samples.')
print('--Validation set :', len(self.valid_set), ' samples.')
def generateSet(self, rand = False):
""" Generate the training and validation set
Args:
rand : (bool) True to shuffle the set
"""
self._create_train_table()
if rand:
self._randomize()
self._create_sets()
# ---------------------------- Generating Methods --------------------------
def _makeGaussian(self, height, width, sigma = 3, center=None):
""" Make a square gaussian kernel.
size is the length of a side of the square
sigma is full-width-half-maximum, which
can be thought of as an effective radius.
"""
x = np.arange(0, width, 1, float)
y = np.arange(0, height, 1, float)[:, np.newaxis]
if center is None:
x0 = width // 2
y0 = height // 2
else:
x0 = center[0]
y0 = center[1]
return np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / sigma**2)
def _generate_hm(self, height, width ,joints, maxlenght, weight):
""" Generate a full Heap Map for every joints in an array
Args:
height : Wanted Height for the Heat Map
width : Wanted Width for the Heat Map
joints : Array of Joints
maxlenght : Lenght of the Bounding Box
"""
num_joints = joints.shape[0]
hm = np.zeros((height, width, num_joints), dtype = np.float32)
for i in range(num_joints):
if not(np.array_equal(joints[i], [-1,-1])) and weight[i] == 1:
s = int(np.sqrt(maxlenght) * maxlenght * 10 / 4096) + 2
hm[:,:,i] = self._makeGaussian(height, width, sigma= s, center= (joints[i,0], joints[i,1]))
else:
hm[:,:,i] = np.zeros((height,width))
return hm
def _crop_data(self, height, width, box, joints, boxp = 0.05):
""" Automatically returns a padding vector and a bounding box given
the size of the image and a list of joints.
Args:
height : Original Height
width : Original Width
box : Bounding Box
joints : Array of joints
boxp : Box percentage (Use 20% to get a good bounding box)
"""
padding = [[0,0],[0,0],[0,0]]
j = np.copy(joints)
if box[0:2] == [-1,-1]:
j[joints == -1] = 1e5
box[0], box[1] = min(j[:,0]), min(j[:,1])
crop_box = [box[0] - int(boxp * (box[2]-box[0])), box[1] - int(boxp * (box[3]-box[1])), box[2] + int(boxp * (box[2]-box[0])), box[3] + int(boxp * (box[3]-box[1]))]
if crop_box[0] < 0: crop_box[0] = 0
if crop_box[1] < 0: crop_box[1] = 0
if crop_box[2] > width -1: crop_box[2] = width -1
if crop_box[3] > height -1: crop_box[3] = height -1
new_h = int(crop_box[3] - crop_box[1])
new_w = int(crop_box[2] - crop_box[0])
crop_box = [crop_box[0] + new_w //2, crop_box[1] + new_h //2, new_w, new_h]
if new_h > new_w:
bounds = (crop_box[0] - new_h //2, crop_box[0] + new_h //2)
if bounds[0] < 0:
padding[1][0] = abs(bounds[0])
if bounds[1] > width - 1:
padding[1][1] = abs(width - bounds[1])
elif new_h < new_w:
bounds = (crop_box[1] - new_w //2, crop_box[1] + new_w //2)
if bounds[0] < 0:
padding[0][0] = abs(bounds[0])
if bounds[1] > width - 1:
padding[0][1] = abs(height - bounds[1])
crop_box[0] += padding[1][0]
crop_box[1] += padding[0][0]
return padding, crop_box
def _crop_img(self, img, padding, crop_box):
""" Given a bounding box and padding values return cropped image
Args:
img : Source Image
padding : Padding
crop_box : Bounding Box
"""
img = np.pad(img, padding, mode = 'constant')
max_lenght = max(crop_box[2], crop_box[3])
img = img[crop_box[1] - max_lenght //2:crop_box[1] + max_lenght //2, crop_box[0] - max_lenght // 2:crop_box[0] + max_lenght //2]
return img
def _crop(self, img, hm, padding, crop_box):
""" Given a bounding box and padding values return cropped image and heatmap
Args:
img : Source Image
hm : Source Heat Map
padding : Padding
crop_box : Bounding Box
"""
img = np.pad(img, padding, mode = 'constant')
hm = np.pad(hm, padding, mode = 'constant')
max_lenght = max(crop_box[2], crop_box[3])
img = img[crop_box[1] - max_lenght //2:crop_box[1] + max_lenght //2, crop_box[0] - max_lenght // 2:crop_box[0] + max_lenght //2]
hm = hm[crop_box[1] - max_lenght //2:crop_box[1] + max_lenght//2, crop_box[0] - max_lenght // 2:crop_box[0] + max_lenght // 2]
return img, hm
def _relative_joints(self, box, padding, joints, to_size = 64):
""" Convert Absolute joint coordinates to crop box relative joint coordinates
(Used to compute Heat Maps)
Args:
box : Bounding Box
padding : Padding Added to the original Image
to_size : Heat Map wanted Size
"""
new_j = np.copy(joints)
max_l = max(box[2], box[3])
new_j = new_j + [padding[1][0], padding[0][0]]
new_j = new_j - [box[0] - max_l //2,box[1] - max_l //2]
new_j = new_j * to_size / (max_l + 0.0000001)
return new_j.astype(np.int32)
def _augment(self,img, hm, max_rotation = 30):
""" # TODO : IMPLEMENT DATA AUGMENTATION
"""
if random.choice([0,1]):
r_angle = np.random.randint(-1*max_rotation, max_rotation)
img = transform.rotate(img, r_angle, preserve_range = True)
hm = transform.rotate(hm, r_angle)
return img, hm
# ----------------------- Batch Generator ----------------------------------
def _generator(self, batch_size = 16, stacks = 4, set = 'train', stored = False, normalize = True, debug = False):
""" Create Generator for Training
Args:
batch_size : Number of images per batch
stacks : Number of stacks/module in the network
set : Training/Testing/Validation set # TODO: Not implemented yet
stored : Use stored Value # TODO: Not implemented yet
normalize : True to return Image Value between 0 and 1
_debug : Boolean to test the computation time (/!\ Keep False)
# Done : Optimize Computation time
16 Images --> 1.3 sec (on i7 6700hq)
"""
while True:
if debug:
t = time.time()
train_img = np.zeros((batch_size, 256,256,3), dtype = np.float32)
train_gtmap = np.zeros((batch_size, stacks, 64, 64, len(self.joints_list)), np.float32)
files = self._give_batch_name(batch_size= batch_size, set = set)
for i, name in enumerate(files):
if name[:-1] in self.images:
try :
img = self.open_img(name)
joints = self.data_dict[name]['joints']
box = self.data_dict[name]['box']
weight = self.data_dict[name]['weights']
if debug:
print(box)
padd, cbox = self._crop_data(img.shape[0], img.shape[1], box, joints, boxp = 0.2)
if debug:
print(cbox)
print('maxl :', max(cbox[2], cbox[3]))
new_j = self._relative_joints(cbox,padd, joints, to_size=64)
hm = self._generate_hm(64, 64, new_j, 64, weight)
img = self._crop_img(img, padd, cbox)
img = img.astype(np.uint8)
# On 16 image per batch
# Avg Time -OpenCV : 1.0 s -skimage: 1.25 s -scipy.misc.imresize: 1.05s
img = scm.imresize(img, (256,256))
# Less efficient that OpenCV resize method
#img = transform.resize(img, (256,256), preserve_range = True, mode = 'constant')
# May Cause trouble, bug in OpenCV imgwrap.cpp:3229
# error: (-215) ssize.area() > 0 in function cv::resize
#img = cv2.resize(img, (256,256), interpolation = cv2.INTER_CUBIC)
img, hm = self._augment(img, hm)
hm = np.expand_dims(hm, axis = 0)
hm = np.repeat(hm, stacks, axis = 0)
if normalize:
train_img[i] = img.astype(np.float32) / 255
else :
train_img[i] = img.astype(np.float32)
train_gtmap[i] = hm
except :
i = i-1
else:
i = i - 1
if debug:
print('Batch : ',time.time() - t, ' sec.')
yield train_img, train_gtmap
def _aux_generator(self, batch_size = 16, stacks = 4, normalize = True, sample_set = 'train'):
""" Auxiliary Generator
Args:
See Args section in self._generator
"""
while True:
train_img = np.zeros((batch_size, 256,256,3), dtype = np.float32)
train_gtmap = np.zeros((batch_size, stacks, 64, 64, len(self.joints_list)), np.float32)
train_weights = np.zeros((batch_size, len(self.joints_list)), np.float32)
i = 0
while i < batch_size:
try:
if sample_set == 'train':
name = random.choice(self.train_set)
elif sample_set == 'valid':
name = random.choice(self.valid_set)
joints = self.data_dict[name]['joints']
box = self.data_dict[name]['box']
weight = np.asarray(self.data_dict[name]['weights'])
train_weights[i] = weight
img = self.open_img(name)
padd, cbox = self._crop_data(img.shape[0], img.shape[1], box, joints, boxp = 0.2)
new_j = self._relative_joints(cbox,padd, joints, to_size=64)
hm = self._generate_hm(64, 64, new_j, 64, weight)
img = self._crop_img(img, padd, cbox)
img = img.astype(np.uint8)
img = scm.imresize(img, (256,256))
img, hm = self._augment(img, hm)
hm = np.expand_dims(hm, axis = 0)
hm = np.repeat(hm, stacks, axis = 0)
if normalize:
train_img[i] = img.astype(np.float32) / 255
else :
train_img[i] = img.astype(np.float32)
train_gtmap[i] = hm
i = i + 1
except :
print('error file: ', name)
yield train_img, train_gtmap, train_weights
def generator(self, batchSize = 16, stacks = 4, norm = True, sample = 'train'):
""" Create a Sample Generator
Args:
batchSize : Number of image per batch
stacks : Stacks in HG model
norm : (bool) True to normalize the batch
sample : 'train'/'valid' Default: 'train'
"""
return self._aux_generator(batch_size=batchSize, stacks=stacks, normalize=norm, sample_set=sample)
# ---------------------------- Image Reader --------------------------------
def open_img(self, name, color = 'RGB'):
""" Open an image
Args:
name : Name of the sample
color : Color Mode (RGB/BGR/GRAY)
"""
if name[-1] in self.letter:
name = name[:-1]
img = cv2.imread(os.path.join(self.img_dir, name))
if color == 'RGB':
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
elif color == 'BGR':
return img
elif color == 'GRAY':
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
print('Color mode supported: RGB/BGR. If you need another mode do it yourself :p')
def plot_img(self, name, plot = 'cv2'):
""" Plot an image
Args:
name : Name of the Sample
plot : Library to use (cv2: OpenCV, plt: matplotlib)
"""
if plot == 'cv2':
img = self.open_img(name, color = 'BGR')
cv2.imshow('Image', img)
elif plot == 'plt':
img = self.open_img(name, color = 'RGB')
plt.imshow(img)
plt.show()
def test(self, toWait = 0.2):
""" TESTING METHOD
You can run it to see if the preprocessing is well done.
Wait few seconds for loading, then diaporama appears with image and highlighted joints
/!\ Use Esc to quit
Args:
toWait : In sec, time between pictures
"""
self._create_train_table()
self._create_sets()
for i in range(len(self.train_set)):
img = self.open_img(self.train_set[i])
w = self.data_dict[self.train_set[i]]['weights']
padd, box = self._crop_data(img.shape[0], img.shape[1], self.data_dict[self.train_set[i]]['box'], self.data_dict[self.train_set[i]]['joints'], boxp= 0.0)
new_j = self._relative_joints(box,padd, self.data_dict[self.train_set[i]]['joints'], to_size=256)
rhm = self._generate_hm(256, 256, new_j,256, w)
rimg = self._crop_img(img, padd, box)
# See Error in self._generator
#rimg = cv2.resize(rimg, (256,256))
rimg = scm.imresize(rimg, (256,256))
#rhm = np.zeros((256,256,16))
#for i in range(16):
# rhm[:,:,i] = cv2.resize(rHM[:,:,i], (256,256))
grimg = cv2.cvtColor(rimg, cv2.COLOR_RGB2GRAY)
cv2.imshow('image', grimg / 255 + np.sum(rhm,axis = 2))
# Wait
time.sleep(toWait)
if cv2.waitKey(1) == 27:
print('Ended')
cv2.destroyAllWindows()
break
# ------------------------------- PCK METHODS-------------------------------
def pck_ready(self, idlh = 3, idrs = 12, testSet = None):
""" Creates a list with all PCK ready samples
(PCK: Percentage of Correct Keypoints)
"""
id_lhip = idlh
id_rsho = idrs
self.total_joints = 0
self.pck_samples = []
for s in self.data_dict.keys():
if testSet == None:
if self.data_dict[s]['weights'][id_lhip] == 1 and self.data_dict[s]['weights'][id_rsho] == 1:
self.pck_samples.append(s)
wIntel = np.unique(self.data_dict[s]['weights'], return_counts = True)
self.total_joints += dict(zip(wIntel[0], wIntel[1]))[1]
else:
if self.data_dict[s]['weights'][id_lhip] == 1 and self.data_dict[s]['weights'][id_rsho] == 1 and s in testSet:
self.pck_samples.append(s)
wIntel = np.unique(self.data_dict[s]['weights'], return_counts = True)
self.total_joints += dict(zip(wIntel[0], wIntel[1]))[1]
print('PCK PREPROCESS DONE: \n --Samples:', len(self.pck_samples), '\n --Num.Joints', self.total_joints)
def getSample(self, sample = None):
""" Returns information of a sample
Args:
sample : (str) Name of the sample
Returns:
img: RGB Image
new_j: Resized Joints
w: Weights of Joints
joint_full: Raw Joints
max_l: Maximum Size of Input Image
"""
if sample != None:
try:
joints = self.data_dict[sample]['joints']
box = self.data_dict[sample]['box']
w = self.data_dict[sample]['weights']
img = self.open_img(sample)
padd, cbox = self._crop_data(img.shape[0], img.shape[1], box, joints, boxp = 0.2)
new_j = self._relative_joints(cbox,padd, joints, to_size=256)
joint_full = np.copy(joints)
max_l = max(cbox[2], cbox[3])
joint_full = joint_full + [padd[1][0], padd[0][0]]
joint_full = joint_full - [cbox[0] - max_l //2,cbox[1] - max_l //2]
img = self._crop_img(img, padd, cbox)
img = img.astype(np.uint8)
img = scm.imresize(img, (256,256))
return img, new_j, w, joint_full, max_l
except:
return False
else:
print('Specify a sample name')
|
{"hexsha": "b98c85838b799d94002303314ba5fa4bdfe5861e", "size": 20190, "ext": "py", "lang": "Python", "max_stars_repo_path": "datagen.py", "max_stars_repo_name": "mohaEs/Train-Predict-Landmarks-by-MCAM", "max_stars_repo_head_hexsha": "e06179fc91b33a7bc73e44df47a4cf53f36b0a2f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "datagen.py", "max_issues_repo_name": "mohaEs/Train-Predict-Landmarks-by-MCAM", "max_issues_repo_head_hexsha": "e06179fc91b33a7bc73e44df47a4cf53f36b0a2f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datagen.py", "max_forks_repo_name": "mohaEs/Train-Predict-Landmarks-by-MCAM", "max_forks_repo_head_hexsha": "e06179fc91b33a7bc73e44df47a4cf53f36b0a2f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1742160279, "max_line_length": 192, "alphanum_fraction": 0.638038633, "include": true, "reason": "import numpy,import scipy", "num_tokens": 6282}
|
import random
import pandas as pd
import numpy as np
from fuzzywuzzy.process import extractOne
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import confusion_matrix,classification_report
from sklearn.feature_extraction.text import CountVectorizer
from scipy.sparse import csr_matrix
from py_stringmatching.similarity_measure.monge_elkan import MongeElkan
from py_stringmatching.similarity_measure.jaro_winkler import JaroWinkler
from py_stringmatching.similarity_measure.tfidf import TfIdf
from py_stringmatching.similarity_measure.soft_tfidf import SoftTfIdf
from py_stringmatching.tokenizer.alphanumeric_tokenizer import AlphanumericTokenizer
def stringMatching(data, label_options, print_output = False):
processed_rows, correct_rows = 0, 0
total_rows = len(data.index)
predictions, scores, correct_matches = [], [], []
for row_index, row in data.iterrows():
prediction, score = extractOne(row['text'], label_options)
predictions.append(prediction)
scores.append(score)
if prediction == row['label']:
correct_rows += 1
correct_matches.append(True)
else:
correct_matches.append(False)
if print_output:
print('\rProgress: Completed: {:.2%} --- Accuracy: {:.2%}'.format((row_index+1)/total_rows, correct_rows/(row_index+1)), end='')
results = data
results['predicted-label'] = predictions
results['label-score'] = scores
results['correct_match'] = correct_matches
accuracy = correct_rows/total_rows
if print_output:
print('\nFinal Accuracy: {:.2%}'.format(accuracy))
return accuracy, results
def logisticMulticlass(train_data, test_data, print_output=False, dummy_labels = False):
train_text = train_data['text']
train_label = train_data['label']
train_text_dummie = pd.get_dummies(train_text)
test_text = test_data['text']
test_label = test_data['label']
test_text_dummie = pd.get_dummies(test_text)
if dummy_labels:
train_label_dummie = pd.get_dummies(train_label)
test_label_dummie = pd.get_dummies(test_label)
else:
train_label_dummie = train_label
test_label_dummie = test_label
classifier = OneVsRestClassifier(LogisticRegression(solver = 'lbfgs', random_state = 0))
classifier.fit(train_text_dummie, train_label_dummie)
test_label_predictions = classifier.predict(test_text_dummie)
classification_report_result = classification_report(test_label_dummie, test_label_predictions)
if print_output:
print('Classification Report for Multiclass Logistic Regression:')
print(classification_report_result)
return classifier, classification_report_result
def data2BOW_match(data, label_options, bow_vec):
vec_data = []
match_data = []
for row_index, row in data.iterrows():
vec_row = bow_vec.transform([row['text']+' '+str(row['label'])])
vec_data.append(vec_row.toarray()[0])
match_data.append(csr_matrix([1], dtype='int64').toarray()[0])
while True:
new_label = random.choice(label_options)
if new_label != row['label']:
break
row['label'] = new_label
vec_row = bow_vec.transform([row['text']+' '+row['label']])
vec_data.append(vec_row.toarray()[0])
match_data.append(csr_matrix([0], dtype='int64').toarray()[0])
return np.array(vec_data), np.array(match_data).ravel()
def logisticBinaryMatch(train_data, test_data, label_options, print_output=False):
corpus = list(train_data['text']) + list(test_data['text']) + [str(x) for x in train_data['label']] + [str(x) for x in test_data['label']]
bow_vec = CountVectorizer()
bow_vec.fit(corpus)
vec_train_data, match_train_data = data2BOW_match(train_data, label_options, bow_vec)
vec_test_data, match_test_data = data2BOW_match(test_data, label_options, bow_vec)
classifier = LogisticRegression(solver='lbfgs', random_state=0, max_iter=10000)
classifier.fit(vec_train_data, match_train_data)
match_test_predictions = classifier.predict(vec_test_data)
confusion_matrix_result = confusion_matrix(match_test_data, match_test_predictions)
classification_report_result = classification_report(match_test_data, match_test_predictions)
if print_output:
print('Confusion Matrix:')
print(confusion_matrix_result)
print('\nClassification Report:')
print(classification_report_result)
return classifier, bow_vec, confusion_matrix_result, classification_report_result
def data2BOW_multiclass(data, bow_vec, column):
vec_data = []
for row_index, row in data.iterrows():
vec_row = bow_vec.transform([row[column]])
vec_data.append(vec_row.toarray()[0])
return np.array(vec_data)
def logisticMulticlassBOW(train_data, test_data, print_output=False, dummy_labels = False):
text_corpus = list(train_data['text']) + list(test_data['text'])
bow_vec_text = CountVectorizer()
bow_vec_text.fit(text_corpus)
train_label = train_data['label']
test_label = test_data['label']
vec_train_data = data2BOW_multiclass(train_data, bow_vec_text, 'text')
vec_test_data = data2BOW_multiclass(test_data, bow_vec_text, 'text')
if dummy_labels:
label_corpus = list(train_data['label']) + list(test_data['label'])
bow_vec_label = CountVectorizer()
bow_vec_label.fit(label_corpus)
train_label_dummie = data2BOW_multiclass(train_data, bow_vec_label, 'label')
test_label_dummie = data2BOW_multiclass(test_data, bow_vec_label, 'label')
else:
train_label_dummie = train_label
test_label_dummie = test_label
classifier = OneVsRestClassifier(LogisticRegression(solver = 'lbfgs', random_state = 0))
classifier.fit(vec_train_data, train_label_dummie)
test_label_predictions = classifier.predict(vec_test_data)
classification_report_result = classification_report(test_label_dummie, test_label_predictions)
if print_output:
print('Classification Report for Multiclass Logistic Regression:')
print(classification_report_result)
return classifier, classification_report_result
def calculate_features(str1, str2):
me = MongeElkan()
jw = JaroWinkler()
tfidf = TfIdf(dampen = False)
stdidf = SoftTfIdf()
tokenizer = AlphanumericTokenizer()
str1 = str1.casefold()
str2 = str2.casefold()
bag1 = tokenizer.tokenize(str1)
bag2 =tokenizer.tokenize(str2)
monge_elkan = me.get_raw_score(bag1, bag2)
jaro_winkler = jw.get_sim_score(str1, str2)
tf_idf = tfidf.get_raw_score(bag1, bag2)
soft_tfidf = stdidf.get_raw_score(bag1, bag2)
return monge_elkan, jaro_winkler, tf_idf, soft_tfidf
def data2features(data, label_options):
features_data = []
match_data = []
for row_index, row in data.iterrows():
features_row = calculate_features(row['text'], str(row['label']))
features_data.append(features_row)
match_data.append(1)
while True:
new_label = random.choice(label_options)
if new_label != row['label']:
break
row['label'] = new_label
features_row = calculate_features(row['text'], str(row['label']))
features_data.append(features_row)
match_data.append(0)
return features_data, match_data
def logisticStrComparisson(train_data, test_data, label_options, print_output=False):
features_train_data, match_train_data = data2features(train_data, label_options)
features_test_data, match_test_data = data2features(test_data, label_options)
classifier = LogisticRegression(solver='lbfgs', random_state=0)
classifier.fit(features_train_data, match_train_data)
match_test_predictions = classifier.predict(features_test_data)
confusion_matrix_result = confusion_matrix(match_test_data, match_test_predictions)
classification_report_result = classification_report(match_test_data, match_test_predictions)
if print_output:
print('Confusion Matrix:')
print(confusion_matrix_result)
print('\nClassification Report:')
print(classification_report_result)
return classifier, confusion_matrix_result, classification_report_result
|
{"hexsha": "9fceae1eb8ff74278222645684b8729acbaa389d", "size": 8449, "ext": "py", "lang": "Python", "max_stars_repo_path": "tokens2labels/NLPAlgorithms.py", "max_stars_repo_name": "mayaepps/Exercise-Logs", "max_stars_repo_head_hexsha": "59e37d351b97c3e34fe677e001e70abe16bb5133", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tokens2labels/NLPAlgorithms.py", "max_issues_repo_name": "mayaepps/Exercise-Logs", "max_issues_repo_head_hexsha": "59e37d351b97c3e34fe677e001e70abe16bb5133", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tokens2labels/NLPAlgorithms.py", "max_forks_repo_name": "mayaepps/Exercise-Logs", "max_forks_repo_head_hexsha": "59e37d351b97c3e34fe677e001e70abe16bb5133", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1068376068, "max_line_length": 142, "alphanum_fraction": 0.7230441472, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1903}
|
"""
Data access functions
---------------------
"""
from __future__ import absolute_import
from os.path import join as pjoin, basename, dirname
import subprocess
import tempfile
import logging
import numpy as np
import h5py
import rasterio
from rasterio.crs import CRS
from rasterio.warp import reproject
from rasterio.enums import Resampling
from wagl.geobox import GriddedGeoBox
from wagl.tiling import generate_tiles
def get_pixel(filename, lonlat, band=1):
"""Return a pixel from `filename` at the longitude and latitude given
by the tuple `lonlat`. Optionally, the `band` can be specified."""
with rasterio.open(filename) as src:
x, y = [int(v) for v in ~src.transform * lonlat]
if isinstance(band, list):
data = src.read(band, window=((y, y + 1), (x, x + 1))).ravel()
else:
data = src.read(band, window=((y, y + 1), (x, x + 1))).flat[0]
return data
def select_acquisitions(acqs_list, fn=(lambda acq: True)):
"""
Given a list of acquisitions, apply the supplied fn to select the
desired acquisitions.
"""
acqs = [acq for acq in acqs_list if fn(acq)]
return acqs
def stack_data(acqs_list, fn=(lambda acq: True), window=None, masked=False):
"""
Given a list of acquisitions, return the data from each acquisition
collected in a 3D numpy array (first index is the acquisition number).
If window is defined, then the subset contained within the window is
returned along with a GriddedGeoBox instance detailing the
spatial information associated with that subset.
:param acqs_list:
The list of acquisitions from which to generate a stack of data.
:param window:
Defines a subset ((ystart, yend), (xstart, xend)) in array
co-ordinates. Default is None.
:param masked:
Indicates whether or not to return a masked array. Default is False.
:return:
A 2-tuple containing:
* 1. A 3D numpy array (or None) containing the corresponding
acquisition data. (None if no data).
* 2. A GriddedGeoBox instance specifying the spatial context
of the 3D numpy array. Note: All Acquisitions share the
same GriddedGeoBox.
"""
# determine data type and dimensions by reading the first band
acqs = acqs_list
a, geo_box = acqs[0].data_and_box(window=window, masked=masked)
# create the result array, setting datatype based on source type
stack_shape = (len(acqs), a.shape[0], a.shape[1])
stack = np.empty(stack_shape, a.dtype)
stack[0] = a
del a
# read remaining aquisitions into it
for i in range(1, stack_shape[0]):
# can't use this statement because it will cause data to be
# resampled. But we want an exception thrown if the user
# tries to stack irreqular aquisitions
stack[i] = acqs[i].data(window=window, masked=masked)
return stack, geo_box
def write_img(array, filename, driver='GTiff', geobox=None, nodata=None,
tags=None, options=None, cogtif=False, levels=None,
resampling=Resampling.nearest):
"""
Writes a 2D/3D image to disk using rasterio.
:param array:
A 2D/3D NumPy array.
:param filename:
A string containing the output file name.
:param driver:
A string containing a GDAL compliant image driver. Default is
'GTiff'.
:param geobox:
An instance of a GriddedGeoBox object.
:param nodata:
A value representing the no data value for the array.
:param tags:
A dictionary of dataset-level metadata.
:param options:
A dictionary containing other dataset creation options.
See creation options for the respective GDAL formats.
:param cogtif:
If set to True, override the `driver` keyword with `GTiff`
and create a Cloud Optimised GeoTiff. Default is False.
See:
https://trac.osgeo.org/gdal/wiki/CloudOptimizedGeoTIFF
:param levels:
If cogtif is set to True, build overviews/pyramids
according to levels. Default levels are [2, 4, 8, 16, 32].
:param resampling:
If cogtif is set to True, build overviews/pyramids using
a resampling method from `rasterio.enums.Resampling`.
Default is `Resampling.nearest`.
:notes:
If array is an instance of a `h5py.Dataset`, then the output
file will include blocksizes based on the `h5py.Dataset's`
chunks. To override the blocksizes, specify them using the
`options` keyword. Eg {'blockxsize': 512, 'blockysize': 512}.
If `cogtif` is set to True, the default blocksizes will be
256x256. To override this behaviour, specify them using the
`options` keyword. Eg {'blockxsize': 512, 'blockysize': 512}.
"""
# Get the datatype of the array
dtype = array.dtype.name
# Check for excluded datatypes
excluded_dtypes = ['int64', 'int8', 'uint64']
if dtype in excluded_dtypes:
msg = "Datatype not supported: {dt}".format(dt=dtype)
raise TypeError(msg)
# convert any bools to uin8
if dtype == 'bool':
array = np.uint8(array)
dtype = 'uint8'
ndims = array.ndim
dims = array.shape
# Get the (z, y, x) dimensions (assuming BSQ interleave)
if ndims == 2:
samples = dims[1]
lines = dims[0]
bands = 1
elif ndims == 3:
samples = dims[2]
lines = dims[1]
bands = dims[0]
else:
logging.error('Input array is not of 2 or 3 dimensions!!!')
err = 'Array dimensions: {dims}'.format(dims=ndims)
raise IndexError(err)
# If we have a geobox, then retrieve the geotransform and projection
if geobox is not None:
transform = geobox.transform
projection = geobox.crs.ExportToWkt()
else:
transform = None
projection = None
# override the driver if we are creating a cogtif
if cogtif:
driver = 'GTiff'
# compression predictor choices
predictor = {'int8': 2,
'uint8': 2,
'int16': 2,
'uint16': 2,
'int32': 2,
'uint32': 2,
'int64': 2,
'uint64': 2,
'float32': 3,
'float64': 3}
kwargs = {'count': bands,
'width': samples,
'height': lines,
'crs': projection,
'transform': transform,
'dtype': dtype,
'driver': driver,
'nodata': nodata,
'predictor': predictor[dtype]}
if isinstance(array, h5py.Dataset):
# TODO: if array is 3D get x & y chunks
if array.chunks[1] == array.shape[1]:
# GDAL doesn't like tiled or blocksize options to be set
# the same length as the columns (probably true for rows as well)
array = array[:]
else:
y_tile, x_tile = array.chunks
tiles = generate_tiles(samples, lines, x_tile, y_tile)
# add blocksizes to the creation keywords
kwargs['tiled'] = 'yes'
kwargs['blockxsize'] = x_tile
kwargs['blockysize'] = y_tile
# the user can override any derived blocksizes by supplying `options`
if options is not None:
for key in options:
kwargs[key] = options[key]
with tempfile.TemporaryDirectory() as tmpdir:
out_fname = pjoin(tmpdir, basename(filename)) if cogtif else filename
with rasterio.open(out_fname, 'w', **kwargs) as outds:
if bands == 1:
if isinstance(array, h5py.Dataset):
for tile in tiles:
idx = (slice(tile[0][0], tile[0][1]),
slice(tile[1][0], tile[1][1]))
outds.write(array[idx], 1, window=tile)
else:
outds.write(array, 1)
else:
if isinstance(array, h5py.Dataset):
for tile in tiles:
idx = (slice(tile[0][0], tile[0][1]),
slice(tile[1][0], tile[1][1]))
subs = array[:, idx[0], idx[1]]
for i in range(bands):
outds.write(subs[i], i + 1, window=tile)
else:
for i in range(bands):
outds.write(array[i], i + 1)
if tags is not None:
outds.update_tags(**tags)
# overviews/pyramids
if cogtif:
if levels is None:
levels = [2, 4, 8, 16, 32]
outds.build_overviews(levels, resampling)
if cogtif:
cmd = ['gdal_translate',
'-co',
'TILED=YES',
'-co',
'COPY_SRC_OVERVIEWS=YES',
'-co',
'{}={}'.format('PREDICTOR', predictor[dtype])]
for key, value in options.items():
cmd.extend(['-co', '{}={}'.format(key, value)])
cmd.extend([out_fname, filename])
subprocess.check_call(cmd, cwd=dirname(filename))
def read_subset(fname, ul_xy, ur_xy, lr_xy, ll_xy, bands=1):
"""
Return a 2D or 3D NumPy array subsetted to the given bounding
extents.
:param fname:
A string containing the full file pathname to an image on
disk.
:param ul_xy:
A tuple containing the Upper Left (x,y) co-ordinate pair
in real world (map) co-ordinates. Co-ordinate pairs can be
(longitude, latitude) or (eastings, northings), but they must
be of the same reference as the image of interest.
:param ur_xy:
A tuple containing the Upper Right (x,y) co-ordinate pair
in real world (map) co-ordinates. Co-ordinate pairs can be
(longitude, latitude) or (eastings, northings), but they must
be of the same reference as the image of interest.
:param lr_xy:
A tuple containing the Lower Right (x,y) co-ordinate pair
in real world (map) co-ordinates. Co-ordinate pairs can be
(longitude, latitude) or (eastings, northings), but they must
be of the same reference as the image of interest.
:param ll_xy:
A tuple containing the Lower Left (x,y) co-ordinate pair
in real world (map) co-ordinates. Co-ordinate pairs can be
(longitude, latitude) or (eastings, northings), but they must
be of the same reference as the image of interest.
:param bands:
Can be an integer of list of integers representing the band(s)
to be read from disk. If bands is a list, then the returned
subset will be 3D, otherwise the subset will be strictly 2D.
:return:
A tuple of 3 elements:
* 1. 2D or 3D NumPy array containing the image subset.
* 2. A list of length 6 containing the GDAL geotransform.
* 3. A WKT formatted string representing the co-ordinate
reference system (projection).
:additional notes:
The ending array co-ordinates are increased by +1,
i.e. xend = 270 + 1
to account for Python's [inclusive, exclusive) index notation.
"""
if isinstance(fname, h5py.Dataset):
geobox = GriddedGeoBox.from_dataset(fname)
prj = fname.attrs['crs_wkt']
else:
# Open the file
with rasterio.open(fname) as src:
# Get the inverse transform of the affine co-ordinate reference
geobox = GriddedGeoBox.from_dataset(src)
prj = src.crs.wkt # rasterio returns a unicode
inv = ~geobox.transform
rows, cols = geobox.shape
# Convert each map co-ordinate to image/array co-ordinates
img_ul_x, img_ul_y = [int(v) for v in inv * ul_xy]
img_ur_x, img_ur_y = [int(v) for v in inv * ur_xy]
img_lr_x, img_lr_y = [int(v) for v in inv * lr_xy]
img_ll_x, img_ll_y = [int(v) for v in inv * ll_xy]
# Calculate the min and max array extents
# The ending array extents have +1 to account for Python's
# [inclusive, exclusive) index notation.
xstart = min(img_ul_x, img_ll_x)
ystart = min(img_ul_y, img_ur_y)
xend = max(img_ur_x, img_lr_x) + 1
yend = max(img_ll_y, img_lr_y) + 1
# Check for out of bounds
if (((xstart < 0) or (ystart < 0)) or
((xend -1 > cols) or (yend -1 > rows))):
msg = ("Error! Attempt to read a subset that is outside of the"
"image domain. Index: ({ys}, {ye}), ({xs}, {xe}))")
msg = msg.format(ys=ystart, ye=yend, xs=xstart, xe=xend)
raise IndexError(msg)
if isinstance(fname, h5py.Dataset):
subs = fname[ystart:yend, xstart:xend]
else:
with rasterio.open(fname) as src:
subs = src.read(bands, window=((ystart, yend), (xstart, xend)))
# Get the new UL co-ordinates of the array
ul_x, ul_y = geobox.transform * (xstart, ystart)
geobox_subs = GriddedGeoBox(shape=subs.shape, origin=(ul_x, ul_y),
pixelsize=geobox.pixelsize, crs=prj)
return (subs, geobox_subs)
def reproject_file_to_array(src_filename, src_band=1, dst_geobox=None,
resampling=Resampling.nearest):
"""
Given an image on file, reproject to the desired coordinate
reference system.
:param src_filename:
A string containing the full file path name to the source
image on disk.
:param src_band:
An integer representing the band number to be reprojected.
Default is 1, the 1st band.
:param dst_geobox:
An instance of a GriddedGeoBox object containing the
destination parameters such as origin, affine, projection,
and array dimensions.
:param resampling:
An integer representing the resampling method to be used.
check rasterio.warp.RESMPLING for more details.
Default is 0, nearest neighbour resampling.
:return:
A NumPy array containing the reprojected result.
"""
if not isinstance(dst_geobox, GriddedGeoBox):
msg = 'dst_geobox must be an instance of a GriddedGeoBox! Type: {}'
msg = msg.format(type(dst_geobox))
raise TypeError(msg)
with rasterio.open(src_filename) as src:
# Define a rasterio band
rio_band = rasterio.band(src, src_band)
# Define the output NumPy array
dst_arr = np.zeros(dst_geobox.shape, dtype=src.dtypes[0])
# Get the rasterio proj4 styled dict
prj = CRS.from_string(dst_geobox.crs.ExportToProj4())
reproject(rio_band, dst_arr, dst_transform=dst_geobox.transform,
dst_crs=prj, resampling=resampling)
return dst_arr
def reproject_img_to_img(src_img, src_geobox, dst_geobox,
resampling=Resampling.nearest):
"""
Reprojects an image/array to the desired co-ordinate reference system.
:param src_img:
A NumPy array containing the source image.
:param src_geobox:
An instance of a GriddedGeoBox object containing the
source parameters such as origin, affine, projection.
:param dst_geobox:
An instance of a GriddedGeoBox object containing the
destination parameters such as origin, affine, projection,
and array dimensions.
:param resampling:
An integer representing the resampling method to be used.
check rasterio.warp.RESMPLING for more details.
Default is 0, nearest neighbour resampling.
:return:
A NumPy array containing the reprojected result.
"""
if not isinstance(dst_geobox, GriddedGeoBox):
msg = 'dst_geobox must be an instance of a GriddedGeoBox! Type: {}'
msg = msg.format(type(dst_geobox))
raise TypeError(msg)
if not isinstance(src_geobox, GriddedGeoBox):
msg = 'src_geobox must be an instance of a GriddedGeoBox! Type: {}'
msg = msg.format(type(src_geobox))
raise TypeError(msg)
# Get the source and destination projections in Proj4 styled dicts
src_prj = CRS.from_string(src_geobox.crs.ExportToProj4())
dst_prj = CRS.from_string(dst_geobox.crs.ExportToProj4())
# Get the source and destination transforms
src_trans = src_geobox.transform
dst_trans = dst_geobox.transform
# Define the output NumPy array
dst_arr = np.zeros(dst_geobox.shape, dtype=src_img.dtype)
reproject(src_img, dst_arr, src_transform=src_trans,
src_crs=src_prj, dst_transform=dst_trans, dst_crs=dst_prj,
resampling=resampling)
return dst_arr
def as_array(array, dtype, transpose=False):
"""
Given an array and dtype, array will be converted to dtype if
and only if array.dtype != dtype. If transpose is set to True
then array will be transposed before returning.
:param array:
A NumPy array.
:param dtype:
The type to return the array as.
:type dtype:
A NumPy data type (e.g. ``numpy.float32``).
:param transpose:
If set then array will be transposed before returning.
Useful for passing arrays into Fortran routiines. Default is
False.
:type transpose:
Bool.
:return:
A :py:class:`numpy.ndarry` of type ``dtype`` with the same
dimensions as array.
"""
if array.dtype != dtype:
if transpose:
return array.astype(dtype).transpose()
return array.astype(dtype)
if transpose:
return array.transpose()
return array
|
{"hexsha": "1c2e869b0603b84c2ae7af7ba218f65b9f92ec56", "size": 17754, "ext": "py", "lang": "Python", "max_stars_repo_path": "wagl/data.py", "max_stars_repo_name": "ASVincent/wagl", "max_stars_repo_head_hexsha": "cf3a72e53e53f3a7b2f2b5308068069b1b714f2a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "wagl/data.py", "max_issues_repo_name": "ASVincent/wagl", "max_issues_repo_head_hexsha": "cf3a72e53e53f3a7b2f2b5308068069b1b714f2a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "wagl/data.py", "max_forks_repo_name": "ASVincent/wagl", "max_forks_repo_head_hexsha": "cf3a72e53e53f3a7b2f2b5308068069b1b714f2a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-01-23T00:51:56.000Z", "max_forks_repo_forks_event_max_datetime": "2019-01-23T00:51:56.000Z", "avg_line_length": 34.3404255319, "max_line_length": 77, "alphanum_fraction": 0.6112988622, "include": true, "reason": "import numpy", "num_tokens": 4391}
|
#coding=utf-8
"""
"""
import sys
import math
import numpy as np
import numpy.linalg as la
import file
import collections
def clamping_acos(cos):
"""
Calculate arccos with its argument clamped to [-1, 1]
"""
if cos > 1:
return 0
if cos < -1:
return math.pi/2
return math.acos(cos)
def get_bonds_with_constant_delta(atoms, delta):
"""
Two atoms are connected if their distance is less than or equals a given delta.
"""
bond_target_index_arrays = []
for index, position in enumerate(atoms.positions):
distance_vector_array = atoms.positions[index+1:] - position
distance_squared_array = np.sum(np.square(distance_vector_array), axis=1)
delta_squared = delta**2
bond_target_indices = (distance_squared_array <= delta_squared).nonzero()[0] + index + 1
bond_target_index_arrays.append(bond_target_indices)
return bond_target_index_arrays
def get_bonds_with_radii(atoms, radii_sum_factor):
"""
Two atoms are connected if their distance is less than or equals the sum of
their covalent radii times a radii_sum_factor (e.g. 1.15).
"""
bond_target_index_arrays = []
for index, position in enumerate(atoms.positions):
distance_vector_array = atoms.positions[index+1:] - position
distance_squared_array = np.sum(np.square(distance_vector_array), axis=1)
delta_squared = np.square((atoms.covalence_radii[index+1:] + atoms.covalence_radii[index]) * radii_sum_factor)
bond_target_indices = (distance_squared_array <= delta_squared).nonzero()[0] + index + 1
bond_target_index_arrays.append(bond_target_indices)
return bond_target_index_arrays
def get_bonds_symetric_indicies(bond_target_index_arrays):
"""
inserts all symetric bonds into the of bonds for each atom
:param bond_target_index_arrays: list with all symetric bond indices
:return:
"""
bond_target_index_arrays = [bond_target_index_array.tolist() for bond_target_index_array in bond_target_index_arrays]
for index, bond_target_index_array in enumerate(bond_target_index_arrays):
for bond_target_index in bond_target_index_array:
if bond_target_index > index:
bond_target_index_arrays[bond_target_index].append(index)
bond_target_index_arrays = np.array(bond_target_index_arrays)
return bond_target_index_arrays
def calculate_bond_angles(atoms, bond_target_index_arrays):
# Build dict: atom index -> bonded atom indices
atom_bonds = collections.defaultdict(list)
for source_index, target_indices in enumerate(bond_target_index_arrays):
for target_index in target_indices:
atom_bonds[source_index].append(target_index)
atom_bonds[target_index].append(source_index)
# Calculate angles between bonds sharing an atom
bond_angles = {}
for shared_atom_index, target_indices in atom_bonds.items():
shared_atom_position = atoms.positions[shared_atom_index]
for i, target_index1 in enumerate(target_indices):
vec1 = atoms.positions[target_index1]-shared_atom_position
nvec1 = vec1/np.linalg.norm(vec1)
for target_index2 in target_indices[i+1:]:
vec2 = atoms.positions[target_index2]-shared_atom_position
nvec2 = vec2/np.linalg.norm(vec2)
bond_angle = clamping_acos(np.dot(nvec1, nvec2))
bond_angles[((target_index1, shared_atom_index), (shared_atom_index, target_index2))] = bond_angle
bond_angles[((target_index2, shared_atom_index), (shared_atom_index, target_index1))] = bond_angle
# Calculate bond chains of length 3
bond_chains = []
for source_index in atom_bonds.keys():
bond_chains += find_bond_chains(atoms, source_index, atom_bonds)
bond_chains_without_duplicates = set()
for bond_chain in bond_chains:
if tuple(reversed(bond_chain)) not in bond_chains_without_duplicates:
bond_chains_without_duplicates.add(tuple(bond_chain))
bond_chains = bond_chains_without_duplicates
# Calculate angle in these chains around the axis of the connecting bond
# according to http://en.wikipedia.org/wiki/Dihedral_angle
bond_chain_angles = {}
for bond_chain in bond_chains:
index1, index2, index3, index4 = bond_chain
axis = normalized(atoms.positions[index3] - atoms.positions[index2])
vec1 = atoms.positions[index1] - atoms.positions[index2]
vec1 -= np.dot(vec1, axis)*axis
nvec1 = normalized(vec1)
vec2 = atoms.positions[index4] - atoms.positions[index3]
vec2 -= np.dot(vec2, axis)*axis
nvec2 = normalized(vec2)
angle = clamping_acos(np.dot(nvec1, nvec2))
if np.dot(nvec2, np.cross(nvec1, axis)) < 0:
angle = -angle
bond_chain_angles[tuple(bond_chain)] = angle
#print "bond_anglges", bond_angles[0]
return bond_angles, bond_chain_angles
def normalized(vec):
""" Return the normalized version of a numpy array """
return vec/la.norm(vec)
def find_bond_chains(atoms, source_index, atom_bonds, length=3, previous_bond_chain=None):
if previous_bond_chain is None:
previous_bond_chain = [source_index]
length -= 1
bond_chains = []
for target_index in atom_bonds[source_index]:
if target_index not in previous_bond_chain:
new_bond_chain = previous_bond_chain + [target_index]
new_source_index = target_index
if length > 0:
new_bond_chains = find_bond_chains(atoms, new_source_index, atom_bonds, length, new_bond_chain)
if new_bond_chains:
bond_chains += new_bond_chains
else:
bond_chains.append(new_bond_chain)
return bond_chains
def export_bonds(filename, atoms):
bond_target_index_arrays = get_bonds_with_constant_delta(atoms, 2.8)
with open(filename, "w") as outfile:
for source_index, target_indices in enumerate(bond_target_index_arrays):
for target_index in target_indices:
outfile.write("{} {}\n".format(source_index, target_index))
def export_bond_angles(filename, atoms):
bond_target_index_arrays = get_bonds_with_constant_delta(atoms, 2.8)
bond_angles, bond_chain_angles = calculate_bond_angles(atoms, bond_target_index_arrays)
with open(filename, "w") as outfile:
for bond1, bond2 in bond_angles.keys():
if bond1[0] > bond2[1]:
outfile.write("{} {} {} {}\n".format(bond1[0], bond1[1], bond2[1], bond_angles[bond1, bond2]))
def export_bond_dihedral_angles(filename, atoms):
bond_target_index_arrays = get_bonds_with_constant_delta(atoms, 2.8)
bond_angles, bond_chain_angles = calculate_bond_angles(atoms, bond_target_index_arrays)
with open(filename, "w") as outfile:
for bond_chain, angle in bond_chain_angles.items():
outfile.write("{} {} {} {}".format(*bond_chain))
outfile.write(" {}\n".format(angle))
def main():
file_name = sys.argv[1]
frame = int(sys.argv[2])
f = file.File.open(file_name)
atoms = f.getatoms(frame)
bond_target_index_arrays = get_bonds_with_constant_delta(get_atoms(), 2.8)
bond_target_index_arrays = get_bonds_with_radii(atoms, 1.15)
bond_angles, bond_chain_angles = calculate_bond_angles(get_atoms(), bond_target_index_arrays)
export_bonds("bonds.txt", get_bonds_with_constant_delta(get_atoms(), 2.8))
export_bond_angles("bond_angles.txt", get_bond_angles())
export_bond_dihedral_angles("bond_dihedral_angles.txt", get_bond_chain_angles())
with open("bonds.txt", 'w') as outfile:
for source_index, target_indices in enumerate(bond_target_index_arrays):
for target_index in target_indices:
outfile.write("{} {}\n".format(source_index, target_index))
with open("bond_angles.txt", 'w') as outfile:
for bond1, bond2 in bond_angles.keys():
if bond1[0] > bond2[1]:
outfile.write("{} {} {} {}\n".format(bond1[0], bond1[1], bond2[1], bond_angles[bond1, bond2]))
with open("bond_dihedral_angles.txt", 'w') as outfile:
for bond_chain, angle in bond_chain_angles.items():
outfile.write("{} {} {} {}".format(*bond_chain))
outfile.write(" {}\n".format(angle))
if __name__ == '__main__':
main()
|
{"hexsha": "74145d1673940b9e9dc23420a7a28f47b57183a2", "size": 8436, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/core/bonds.py", "max_stars_repo_name": "sciapp/pyMolDyn", "max_stars_repo_head_hexsha": "fba6ea91cb185f916b930cd25b4b1d28a22fb4c5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2016-10-25T09:48:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-30T18:59:50.000Z", "max_issues_repo_path": "src/core/bonds.py", "max_issues_repo_name": "sciapp/pyMolDyn", "max_issues_repo_head_hexsha": "fba6ea91cb185f916b930cd25b4b1d28a22fb4c5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-09-19T06:03:36.000Z", "max_issues_repo_issues_event_max_datetime": "2017-09-28T11:29:23.000Z", "max_forks_repo_path": "src/core/bonds.py", "max_forks_repo_name": "sciapp/pyMolDyn", "max_forks_repo_head_hexsha": "fba6ea91cb185f916b930cd25b4b1d28a22fb4c5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.6060606061, "max_line_length": 121, "alphanum_fraction": 0.6954717876, "include": true, "reason": "import numpy", "num_tokens": 1991}
|
{"mathlib_filename": "Mathlib.Topology.Instances.RealVectorSpace", "llama_tokens": 0}
|
|
from .match_base import Matcher
import numpy as np
from .tester import Tester
from gensim.models import Word2Vec
class Word2Vec_Matcher(Matcher, Tester):
"""
Create a classifier based on the word2ved natural language model.
In order to make this work we treat every incoming column as a corpus.
The columns are already split and prepared by the feature class.
For training/predicting we calculate the mean score of the corpus in a column and use this
as a feature vector for the classifier.
"""
def __init__(self, featuremap):
Matcher.__init__(self, featuremap)
self.train()
def train(self):
corpus = self.featuremap['corpus']
corpus, targetpoints = corpus.get_features_targets()
self.train_manual(corpus, targetpoints)
def train_manual(self, corpus, targetpoints):
"""
Train the classifier by first computing vectors from the w2vec model.
Per column a word2vec model is created, and the datapoints are the mean scores of the corpus in the word2vec model.
A classifier is then trained upon these mean datapoints
"""
self.model = Word2Vec(corpus, min_count=1, size=200)
self.w2v = dict(zip(self.model.wv.index2word, self.model.wv.syn0))
self.dim = len(list(self.w2v.values())[0])
datapoints = self.mean_scores(corpus)
self.create_oneclass_dict(datapoints, targetpoints)
self.clf.fit(datapoints, targetpoints)
def classify_instance(self, entry):
corpus = self.featuremap['corpus']
col_tokenized = corpus.extract_features_column([entry])
datapoints = self.prediction_score(col_tokenized)
return self.clf.predict(datapoints.reshape(1, -1))
def classify_instance_proba(self, entry):
corpus = self.featuremap['corpus']
col_tokenized = corpus.extract_features_column([entry])
datapoints = self.prediction_score(col_tokenized)
return self.clf.predict_proba(datapoints.reshape(1, -1))
def classify_column(self, column, detect_outlier=False):
corpus = self.featuremap['corpus']
col_tokenized = corpus.extract_features_column(column)
datapoints = self.prediction_score(col_tokenized)
prediction = self.clf.predict(datapoints.reshape(1, -1))
if detect_outlier:
outlier = self.outlier_detector_dict[prediction[0]].predict(datapoints.reshape(1, -1))
return self.clf.predict(datapoints.reshape(1, -1)), outlier
return prediction
def classify_column_proba(self, column, detect_outlier=False):
corpus = self.featuremap['corpus']
col_tokenized = corpus.extract_features_column(column)
datapoints = self.prediction_score(col_tokenized)
prediction = self.clf.predict(datapoints.reshape(1, -1))
if detect_outlier:
outlier = self.outlier_detector_dict[prediction[0]].predict_proba(datapoints.reshape(1, -1))
return self.clf.predict_proba(datapoints.reshape(1, -1)), outlier
return self.clf.predict_proba(datapoints.reshape(1, -1))
def prediction_score(self, column_tokens):
"""
Get the scoring vector of the column tokens based on the model.
Use the average if the total number of entries is bigger than 0.
"""
score = np.zeros(self.dim)
num_entries = 0
for token in column_tokens:
if token in self.w2v:
num_entries += 1
score += self.w2v[token]
if num_entries > 1:
score = score / float(num_entries)
return score
def mean_scores(self, corpus):
"""
We use the model to get the mean score of the entire corpus/column,
and use this to train the classifier.
"""
scores = []
for column in corpus:
total = np.zeros(self.dim)
for word in column:
total += self.w2v[word]
scores.append(total / float(len(column)))
return np.array(scores)
def classify_prepared_instance(self, entry):
target_score = self.prediction_score(entry)
return self.clf.predict(target_score.reshape(1, -1))[0]
def execute_test(self, num_tests=5, learnset_ratio=0.7):
corpus = self.featuremap['corpus']
corpus, targetpoints = corpus.get_features_targets()
train_arguments = [corpus, targetpoints, learnset_ratio]
return self.k_fold_test_classifier(self.create_datasets, train_arguments,
self.train_manual, self.classify_prepared_instance, num_tests)
|
{"hexsha": "6c8a504cf2afc249aceed7c9e21dd29f70232619", "size": 4102, "ext": "py", "lang": "Python", "max_stars_repo_path": "schema_matching/column_classifiers/match_word2vec.py", "max_stars_repo_name": "JordyBottelier/arpsas", "max_stars_repo_head_hexsha": "1d10f18d082b71ad2931852b8d88ad963add8fbe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-02-23T01:11:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-14T13:29:42.000Z", "max_issues_repo_path": "schema_matching/column_classifiers/match_word2vec.py", "max_issues_repo_name": "JordyBottelier/arpsas", "max_issues_repo_head_hexsha": "1d10f18d082b71ad2931852b8d88ad963add8fbe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-03-18T20:36:03.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-13T00:47:32.000Z", "max_forks_repo_path": "schema_matching/column_classifiers/match_word2vec.py", "max_forks_repo_name": "JordyBottelier/arpsas", "max_forks_repo_head_hexsha": "1d10f18d082b71ad2931852b8d88ad963add8fbe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-02-23T01:12:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-27T17:48:30.000Z", "avg_line_length": 37.9814814815, "max_line_length": 119, "alphanum_fraction": 0.7537786446, "include": true, "reason": "import numpy", "num_tokens": 1055}
|
"""Test tile classificaiton speed.
Use tensorflow to divide tiles, construct graph, and run inference.
Served as the onboard scripts to run on Jetson.
"""
from __future__ import absolute_import, division, print_function
import argparse
import glob
import numpy as np
import time
import os
import tensorflow as tf
from PIL import Image
tf.logging.set_verbosity(tf.logging.INFO)
def load_graph(model_file):
graph = tf.Graph()
graph_def = tf.GraphDef()
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
def read_tensor_from_image_file(input_height=299,
input_width=299,
input_mean=0,
input_std=255):
return input_image, normalized
def _divide_to_tiles(im, grid_w, grid_h, tile_w, tile_h, allocated_tiles):
# row majored. if tiles are divided into 2x2
# then the sequence is (0,0), (0,1), (1,0), (1,1)
# in which 1st index is on x-aix, 2nd index on y-axis
for h_idx in range(0, grid_w):
for v_idx in range(0, grid_h):
tile_x = int(h_idx * tile_w)
tile_y = int(v_idx * tile_h)
current_tile = im[tile_y:tile_y + tile_h, tile_x:tile_x + tile_w]
tiles[h_idx * grid_h + v_idx] = current_tile
return tiles
def _tf_divide_to_tiles(images_tensor, tile_w, tile_h, grid_w, grid_h):
tiles = []
for h_idx in range(0, grid_w):
for v_idx in range(0, grid_h):
tile_x = h_idx * tile_w
tile_y = v_idx * tile_h
current_tile = tf.image.crop_to_bounding_box(
images_tensor, tile_y, tile_x, tile_h, tile_w)
tiles.append(current_tile)
tiles = tf.transpose(tiles, perm=[1, 0, 2, 3, 4])
tf.logging.info('tile shape before concat: {}'.format(tiles))
tiles = tf.reshape(tiles, [-1, tile_h, tile_w, 3])
tf.logging.info('tile shape after reshape: {}'.format(tiles))
return tiles
if __name__ == "__main__":
model_file = None
input_height = 224
input_width = 224
input_mean = 128
input_std = 128
input_layer = "input"
output_layer = "MobilenetV1/Predictions/Reshape_1"
parser = argparse.ArgumentParser()
parser.add_argument("--graph", help="graph/model to be executed")
parser.add_argument("--input_height", type=int, help="input height")
parser.add_argument("--input_width", type=int, help="input width")
parser.add_argument("--input_mean", type=int, help="input mean")
parser.add_argument("--input_std", type=int, help="input std")
parser.add_argument("--input_layer", help="name of input layer")
parser.add_argument("--output_layer", help="name of output layer")
parser.add_argument("--image_dir", help="Test image directory")
parser.add_argument("--grid_w", help="# of tile horizontally")
parser.add_argument("--grid_h", help="# of tiles vertically")
parser.add_argument(
"--batch_size", help="max # of tiles to feed in at once")
args = parser.parse_args()
if args.graph:
model_file = args.graph
if args.input_height:
input_height = args.input_height
if args.input_width:
input_width = args.input_width
if args.input_mean:
input_mean = args.input_mean
if args.input_std:
input_std = args.input_std
if args.input_layer:
input_layer = args.input_layer
if args.output_layer:
output_layer = args.output_layer
assert args.image_dir
image_dir = args.image_dir
assert args.grid_w
grid_w = int(args.grid_w)
assert args.grid_h
grid_h = int(args.grid_h)
assert args.batch_size
batch_size = int(args.batch_size)
graph = load_graph(model_file)
with graph.as_default():
input_image = tf.placeholder(tf.uint8, shape=[None, None, None, 3])
resized = tf.image.resize_images(
input_image, [input_height * grid_h, input_width * grid_w],
method=tf.image.ResizeMethod.BILINEAR)
float_caster = tf.cast(resized, tf.float32)
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
tiles = _tf_divide_to_tiles(normalized, input_width, input_height,
grid_w, grid_h)
input_name = "import/" + input_layer
output_name = "import/" + output_layer
input_operation = graph.get_operation_by_name(input_name)
output_operation = graph.get_operation_by_name(output_name)
test_images_path = glob.glob(os.path.join(image_dir, '*'))
tf.logging.info('model_file: {}'.format(model_file))
latencies = []
tile_latencies = []
tile_normalization_latencies = []
with tf.Session(graph=graph) as sess:
# warm up
image_np = (np.random.rand(1, input_height * grid_h,
input_width * grid_w, 3) * 255).astype(
np.uint8)
tiles_output = sess.run(tiles, {input_image: image_np})
results = sess.run(output_operation.outputs[0], {
input_operation.outputs[0]: tiles_output
})
results = np.squeeze(results)
# actual measurement
for _ in range(3):
for image_path in test_images_path:
tf.logging.info(image_path)
image = Image.open(image_path)
image_np = np.asarray(image)
st = time.time()
tiles_output = sess.run(tiles, {input_image: [image_np]})
tile_normalization_latencies.append(time.time() - st)
processed_tiles = 0
while processed_tiles < grid_h * grid_w:
# used to prevent OOM errors
batch_tiles = tiles_output[processed_tiles:
processed_tiles + batch_size]
results = sess.run(output_operation.outputs[0], {
input_operation.outputs[0]: batch_tiles
})
results = np.squeeze(results)
processed_tiles += batch_size
latencies.append(time.time() - st)
tf.logging.info('average latency: {:.1f}ms, std: {:.1f}ms'.format(
np.mean(latencies) * 1000,
np.std(latencies) * 1000))
tf.logging.info(
'tile + normalization latency: {:.1f}ms, std: {:.1f}ms'.format(
np.mean(tile_normalization_latencies) * 1000,
np.std(tile_normalization_latencies) * 1000))
tf.logging.info('latencies: {}'.format(latencies))
|
{"hexsha": "a5496aee376d4f338339077a9a0158e59ee0aa66", "size": 6663, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/tile_inference_speed/test_classification_speed.py", "max_stars_repo_name": "cmusatyalab/dronesearch", "max_stars_repo_head_hexsha": "9849637555185efa0a484f49bef43ad734964e8a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2019-11-19T10:57:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-08T23:19:12.000Z", "max_issues_repo_path": "experiments/tile_inference_speed/test_classification_speed.py", "max_issues_repo_name": "cmusatyalab/dronesearch", "max_issues_repo_head_hexsha": "9849637555185efa0a484f49bef43ad734964e8a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-01-28T22:18:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-13T01:36:22.000Z", "max_forks_repo_path": "experiments/tile_inference_speed/test_classification_speed.py", "max_forks_repo_name": "cmusatyalab/dronesearch", "max_forks_repo_head_hexsha": "9849637555185efa0a484f49bef43ad734964e8a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4098360656, "max_line_length": 79, "alphanum_fraction": 0.6217919856, "include": true, "reason": "import numpy", "num_tokens": 1553}
|
import sys
import typing
import numba as nb
import numpy as np
@nb.njit(
(nb.i8, ),
cache=True,
)
def fw_build(n: int) -> np.ndarray:
return np.full(n + 1, 0, np.int64)
@nb.njit(
(nb.i8[:], ),
cache=True,
)
def fw_build_from_array(
a: np.ndarray,
) -> np.ndarray:
fw = a.copy()
assert a[0] == 0
n = fw.size
for i in range(n):
j = i + (i & -i)
if j < n: fw[j] ^= fw[i]
return fw
@nb.njit(
(nb.i8[:], nb.i8, nb.i8),
cache=True,
)
def fw_set(
fw: np.ndarray,
i: int,
x: int,
) -> typing.NoReturn:
while i < len(fw):
fw[i] ^= x
i += i & -i
@nb.njit(
(nb.i8[:], nb.i8),
cache=True,
)
def fw_get(
fw: np.ndarray,
i: int,
) -> int:
v = 0
while i > 0:
v ^= fw[i]
i -= i & -i
return v
@nb.njit(
(nb.i8[:], nb.i8, nb.i8),
cache=True,
)
def fw_get_range(
fw: np.ndarray,
l: int,
r: int,
) -> int:
return fw_get(fw, l - 1) ^ fw_get(fw, r)
@nb.njit(
(nb.i8[:], nb.i8[:, :]),
cache=True,
)
def solve(
a: np.ndarray,
txy: np.ndarray,
) -> typing.NoReturn:
n, m = len(a), len(txy)
fw = np.zeros(n + 1, np.int64)
fw[1:] = a
fw = fw_build_from_array(fw)
for j in range(m):
t, x, y = txy[j]
if t == 1:
fw_set(fw, x, y)
else:
print(fw_get_range(fw, x, y))
def main() -> typing.NoReturn:
n, q = map(int, input().split())
a = np.array(
sys.stdin.readline().split(),
dtype=np.int64,
)
txy = np.array(
sys.stdin.read().split(),
dtype=np.int64,
).reshape(q, 3)
solve(a, txy)
main()
|
{"hexsha": "7fd30ea6b7403d4d78c5ac573e9d4e004fc1049a", "size": 1642, "ext": "py", "lang": "Python", "max_stars_repo_path": "jp.atcoder/abc185/abc185_f/25707777.py", "max_stars_repo_name": "kagemeka/atcoder-submissions", "max_stars_repo_head_hexsha": "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-09T03:06:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-09T03:06:25.000Z", "max_issues_repo_path": "jp.atcoder/abc185/abc185_f/25707777.py", "max_issues_repo_name": "kagemeka/atcoder-submissions", "max_issues_repo_head_hexsha": "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-05T22:53:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T01:29:30.000Z", "max_forks_repo_path": "jp.atcoder/abc185/abc185_f/25707777.py", "max_forks_repo_name": "kagemeka/atcoder-submissions", "max_forks_repo_head_hexsha": "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.7927927928, "max_line_length": 43, "alphanum_fraction": 0.4902557856, "include": true, "reason": "import numpy,import numba", "num_tokens": 589}
|
# --------------------------------------------------------
# Ke Yan,
# Imaging Biomarkers and Computer-Aided Diagnosis Laboratory (CADLab)
# National Institutes of Health Clinical Center,
# Apr 2019.
# This file contains some default configuration values,
# which will be overwritten by values in config.yml and default.yml
# --------------------------------------------------------
import numpy as np
from easydict import EasyDict as edict
import yaml
config = edict()
# algorithm related params
config.PIXEL_MEANS = np.array([50])
config.MAX_IM_SIZE = 512
config.SCALE = 512
config.NORM_SPACING = -1
config.SLICE_INTV = 2
config.WINDOWING = [-1024, 3071]
config.IMG_DO_CLIP = False # clip the black borders of ct images
config.TRAIN = edict()
config.SAMPLES_PER_BATCH = 256
config.TRAIN.USE_PRETRAINED_MODEL = True
config.TEST = edict()
config.ROI_METHOD = 'FIXED_CONTEXT'
config.BOX_PAD = 60
config.PAD_BORDER = True
# default settings
default = edict()
# default network
default.network = 'vgg'
default.base_lr = 0.001
default.dataset = 'DeepLesion'
default.image_set = 'train'
# default training
default.frequent = 20
default.model_path = 'checkpoints/'
default.res_path = 'results/'
default.epoch = 10
default.lr = default.base_lr
default.lr_step = '7'
default.prefetch_thread_num = 4 # 0: no prefetch
default.world_size = 1 # number of distributed processes
default.dist_url = 'tcp://224.66.41.62:23456' # url used to set up distributed training
default.dist_backend = 'gloo' # distributed backend
default.seed = None # seed for initializing training
default.gpus = '0'
default.val_gpu = default.gpus
default.val_image_set = 'val'
default.val_vis = False
default.val_shuffle = False
default.val_max_box = 5
default.val_thresh = 0
default.weight_decay = .0005
default.groundtruth_file = 'DL_info.csv'
default.image_path = ''
default.validate_at_begin = True
default.testing = False
default.flip = False
default.shuffle = True
default.begin_epoch = 0
default.show_avg_loss = 100 # 1: show exact loss of each batch. >1: smooth the shown loss
def merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b
"""
if type(a) is not edict:
return
for k, v in a.iteritems():
# recursively merge dicts
if type(v) is edict:
merge_a_into_b(a[k], b[k])
else:
b[k] = v
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
with open(filename, 'r') as f: # not valid grammar in Python 2.5
yaml_cfg = edict(yaml.load(f))
return yaml_cfg
|
{"hexsha": "84d91e59f9e808b22b6db95fd45e74204cb35af1", "size": 2703, "ext": "py", "lang": "Python", "max_stars_repo_path": "config.py", "max_stars_repo_name": "haehn/dicompute", "max_stars_repo_head_hexsha": "ee4364aaa1258a370bd62bbaf6e577936bf463b3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "config.py", "max_issues_repo_name": "haehn/dicompute", "max_issues_repo_head_hexsha": "ee4364aaa1258a370bd62bbaf6e577936bf463b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "config.py", "max_forks_repo_name": "haehn/dicompute", "max_forks_repo_head_hexsha": "ee4364aaa1258a370bd62bbaf6e577936bf463b3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.03, "max_line_length": 91, "alphanum_fraction": 0.6681465039, "include": true, "reason": "import numpy", "num_tokens": 669}
|
import argparse
import os
import numpy as np
import scanpy as sc
from scipy import sparse
import trvae
if not os.getcwd().endswith("tests"):
os.chdir("./tests")
DATASETS = {
"CelebA": {"name": 'celeba', "gender": "Male", 'attribute': "Smiling",
"width": 64, 'height': 64, "n_channels": 3},
# "Horse2Zebra": {"name": "h2z", "source_key": "horse", "target_key": "zebra", "size": 256, "n_channels": 3,
# "resize": 64},
# "Apple2Orange": {"name": "a2o", "source_key": "apple", "target_key": "orange", "size": 256, "n_channels": 3,
# "resize": 64}
}
def train_network(data_dict=None,
n_epochs=500,
batch_size=512,
dropout_rate=0.2,
preprocess=True,
learning_rate=0.001,
gpus=1,
max_size=50000,
early_stopping_limit=50,
):
data_name = data_dict['name']
img_width = data_dict.get("width", None)
img_height = data_dict.get("height", None)
n_channels = data_dict.get("n_channels", None)
attribute = data_dict.get('attribute', None)
if data_name == "celeba":
gender = data_dict.get('gender', None)
data = trvae.prepare_and_load_celeba(file_path="../data/celeba/img_align_celeba.zip",
attr_path="../data/celeba/list_attr_celeba.txt",
landmark_path="../data/celeba/list_landmarks_align_celeba.txt",
gender=gender,
attribute=attribute,
max_n_images=max_size,
img_width=img_width,
img_height=img_height,
restore=True,
save=True)
if sparse.issparse(data.X):
data.X = data.X.A
data.obs.loc[(data.obs['labels'] == -1) & (data.obs['condition'] == -1), 'label'] = 0
data.obs.loc[(data.obs['labels'] == -1) & (data.obs['condition'] == 1), 'label'] = 1
data.obs.loc[(data.obs['labels'] == 1) & (data.obs['condition'] == -1), 'label'] = 2
data.obs.loc[(data.obs['labels'] == 1) & (data.obs['condition'] == 1), 'label'] = 3
if preprocess:
data.X /= 255.0
else:
data = sc.read(f"../data/{data_name}/{data_name}.h5ad")
if preprocess:
data.X /= 255.0
train_size = int(data.shape[0] * 0.85)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
train_idx = indices[:train_size]
test_idx = indices[train_size:]
train_data = data[train_idx, :]
valid_data = data[test_idx, :]
network = trvae.FaceNet(x_dimension=(img_width, img_height, n_channels),
learning_rate=learning_rate,
model_path=f"../models/",
gpus=gpus,
dropout_rate=dropout_rate)
network.train(train_data,
use_validation=True,
valid_adata=valid_data,
n_epochs=n_epochs,
batch_size=batch_size,
verbose=2,
early_stop_limit=early_stopping_limit,
shuffle=True,
save=True)
print("Model has been trained")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Sample a trained autoencoder.')
arguments_group = parser.add_argument_group("Parameters")
arguments_group.add_argument('-d', '--data', type=str, required=True,
help='name of dataset you want to train')
arguments_group.add_argument('-n', '--n_epochs', type=int, default=5000, required=False,
help='Maximum Number of epochs for training')
arguments_group.add_argument('-c', '--batch_size', type=int, default=512, required=False,
help='Batch Size')
arguments_group.add_argument('-r', '--dropout_rate', type=float, default=0.4, required=False,
help='Dropout ratio')
arguments_group.add_argument('-w', '--width', type=int, default=0, required=False,
help='Image Width to be resize')
arguments_group.add_argument('-e', '--height', type=int, default=0, required=False,
help='Image Height to be resize')
arguments_group.add_argument('-p', '--preprocess', type=int, default=True, required=False,
help='do preprocess images')
arguments_group.add_argument('-l', '--learning_rate', type=float, default=0.001, required=False,
help='Learning Rate for Optimizer')
arguments_group.add_argument('-g', '--gpus', type=int, default=1, required=False,
help='Learning Rate for Optimizer')
arguments_group.add_argument('-x', '--max_size', type=int, default=50000, required=False,
help='Max Size for CelebA')
arguments_group.add_argument('-t', '--do_train', type=int, default=1, required=False,
help='do train the network')
arguments_group.add_argument('-y', '--early_stopping_limit', type=int, default=50, required=False,
help='do train the network')
args = vars(parser.parse_args())
data_dict = DATASETS[args['data']]
if args['width'] > 0 and args['height'] > 0:
data_dict['width'] = args['width']
data_dict['height'] = args['height']
if args['preprocess'] == 0:
args['preprocess'] = False
else:
args['preprocess'] = True
if args['max_size'] == 0:
args['max_size'] = None
del args['data']
del args['width']
del args['height']
if args['do_train'] > 0:
del args['do_train']
train_network(data_dict=data_dict, **args)
print(f"Model for {data_dict['name']} has been trained and sample results are ready!")
|
{"hexsha": "fd308e21b50381d5ad12ddd55fb03b57fe19ec16", "size": 6248, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_facenet.py", "max_stars_repo_name": "gokceneraslan/trVAE", "max_stars_repo_head_hexsha": "596127b02f4a86ed6a91d5a3f666d6b5d97aff0c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 46, "max_stars_repo_stars_event_min_datetime": "2019-10-07T21:46:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-13T15:30:50.000Z", "max_issues_repo_path": "tests/test_facenet.py", "max_issues_repo_name": "gokceneraslan/trVAE", "max_issues_repo_head_hexsha": "596127b02f4a86ed6a91d5a3f666d6b5d97aff0c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-05-15T16:59:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-14T11:35:29.000Z", "max_forks_repo_path": "tests/test_facenet.py", "max_forks_repo_name": "gokceneraslan/trVAE", "max_forks_repo_head_hexsha": "596127b02f4a86ed6a91d5a3f666d6b5d97aff0c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-03-04T11:47:01.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-05T17:48:47.000Z", "avg_line_length": 42.5034013605, "max_line_length": 114, "alphanum_fraction": 0.5329705506, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1381}
|
from cartographer.filterers import KernelDensityFilterer
from sklearn.datasets.samples_generator import make_blobs
from sklearn.utils.testing import assert_true, assert_raises
import numpy as np
def test_kde_one_dimension():
X, true_labels = make_blobs(n_samples=1000, n_features=1)
kde_filterer = KernelDensityFilterer()
kde_prob = kde_filterer.fit_transform(X)
assert_true(kde_prob.shape[0] == X.shape[0])
assert_true(kde_prob.shape[1] == 1)
def test_kde_two_dimension():
X, true_labels = make_blobs(n_samples=1000, n_features=2)
kde_filterer = KernelDensityFilterer()
kde_prob = kde_filterer.fit_transform(X)
assert_true(kde_prob.shape[0] == X.shape[0])
assert_true(kde_prob.shape[1] == 1)
|
{"hexsha": "8a03c9fdf7b01109b56530ad11a3beb6d493a777", "size": 738, "ext": "py", "lang": "Python", "max_stars_repo_path": "cartographer/tests/test_filterers.py", "max_stars_repo_name": "pablodecm/cartographer", "max_stars_repo_head_hexsha": "50c56af9962cc896697ba8f88885d9da7eb50148", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2016-12-14T23:44:35.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-04T01:07:51.000Z", "max_issues_repo_path": "cartographer/tests/test_filterers.py", "max_issues_repo_name": "pablodecm/cartographer", "max_issues_repo_head_hexsha": "50c56af9962cc896697ba8f88885d9da7eb50148", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2016-11-09T11:06:32.000Z", "max_issues_repo_issues_event_max_datetime": "2018-11-03T14:02:08.000Z", "max_forks_repo_path": "cartographer/tests/test_filterers.py", "max_forks_repo_name": "pablodecm/cartographer", "max_forks_repo_head_hexsha": "50c56af9962cc896697ba8f88885d9da7eb50148", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2017-07-19T16:15:03.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-26T08:15:45.000Z", "avg_line_length": 33.5454545455, "max_line_length": 61, "alphanum_fraction": 0.7642276423, "include": true, "reason": "import numpy", "num_tokens": 199}
|
(*
* Copyright 2019, NTU
*
* This software may be distributed and modified according to the terms of
* the BSD 2-Clause license. Note that NO WARRANTY is provided.
* See "LICENSE_BSD2.txt" for details.
*
* Author: Albert Rizaldi, NTU Singapore
*)
theory Signed_Mult_Typed
imports VHDL_Hoare_Typed Bits_Int_Aux
begin
datatype sig = A | B | C
definition mult :: "sig conc_stmt" where
"mult \<equiv> process {A, B} : Bassign_trans C (Bmult (Bsig A) (Bsig B)) 1"
lemma potential_tyenv:
assumes "seq_wt \<Gamma> (Bassign_trans C (Bmult (Bsig A) (Bsig B)) 1)"
shows "\<exists>len1>0. \<exists>len2>0. \<Gamma> A = Lty Uns len1 \<and> \<Gamma> B = Lty Uns len2 \<and> \<Gamma> C = Lty Uns (len1 + len2)
\<or> \<Gamma> A = Lty Sig len1 \<and> \<Gamma> B = Lty Sig len2 \<and> \<Gamma> C = Lty Sig (len1 + len2)"
proof (rule seq_wt_cases(4)[OF assms])
assume "bexp_wt \<Gamma> (Bmult (Bsig A) (Bsig B)) (\<Gamma> C)"
obtain len1 len2 where " \<Gamma> A = Lty Uns len1 \<and> \<Gamma> B = Lty Uns len2 \<and> \<Gamma> C = Lty Uns (len1 + len2)
\<or> \<Gamma> A = Lty Sig len1 \<and> \<Gamma> B = Lty Sig len2 \<and> \<Gamma> C = Lty Sig (len1 + len2)"
and "0 < len1" and "0 < len2"
apply (rule bexp_wt_cases_slice(5)[OF \<open>bexp_wt \<Gamma> (Bmult (Bsig A) (Bsig B)) (\<Gamma> C)\<close>])
by (metis bexp_wt_cases_slice(2))+
thus ?thesis
by auto
qed
locale signed_multiplication =
fixes \<Gamma> :: "sig tyenv"
fixes len len1 len2 :: nat
assumes len_def: "len = len1 + len2"
assumes atype: "\<Gamma> A = Lty Sig len1" and btype: "\<Gamma> B = Lty Sig len2" and ctype: "\<Gamma> C = Lty Sig len"
assumes len1: "0 < len1" and len2: "0 < len2"
begin
lemma well_typed:
"seq_wt \<Gamma> (Bassign_trans C (Bmult (Bsig A) (Bsig B)) 1)"
apply (rule seq_wt.intros(4))
unfolding ctype len_def apply (rule bexp_wt.intros(18))
apply (rule bexp_wt.intros(3))
apply (rule atype[symmetric])
apply (rule bexp_wt.intros)
apply (rule btype[symmetric])
using len1 len2 by auto
abbreviation "lof_wline tw sig n \<equiv> lval_of (wline_of tw sig n)"
definition inv :: "sig assn2" where
"inv tw \<equiv> (lof_wline tw C (fst tw) =
bin_to_bl len (sbl_to_bin (lof_wline tw A (fst tw - 1)) * sbl_to_bin (lof_wline tw B (fst tw - 1))))"
definition inv2 :: "sig assn2" where
"inv2 tw \<equiv> (disjnt {A, B} (event_of tw) \<longrightarrow> (\<forall>i > fst tw. lof_wline tw C i = lof_wline tw C (fst tw)))"
lemma inv_next_time:
fixes tw
defines "v \<equiv> eval_world_raw2 tw (Bmult (Bsig A) (Bsig B))"
defines "tw' \<equiv> tw[C, 1 :=\<^sub>2 v]"
assumes "wityping \<Gamma> (snd tw)"
shows "inv (fst tw' + 1, snd tw')"
proof -
have bexpA: "bexp_wt \<Gamma> (Bsig A) (Lty Sig len1)" and bexpB: "bexp_wt \<Gamma> (Bsig B) (Lty Sig len2)"
using signed_multiplication_axioms unfolding signed_multiplication_def by (metis bexp_wt.intros(3))+
obtain bsA bsB where evalA: "eval_world_raw (fst tw) (snd tw) (Bsig A) = Lv Sig bsA" and" length bsA = len1 " and
evalB: "eval_world_raw (fst tw) (snd tw) (Bsig B) = Lv Sig bsB" and" length bsB = len2 "
using eval_world_raw_lv[OF bexpA `wityping \<Gamma> (snd tw)`] eval_world_raw_lv[OF bexpB `wityping \<Gamma> (snd tw)`] by blast
have "lof_wline tw' C (fst tw + 1) = lval_of v"
unfolding tw'_def worldline_upd2_def worldline_upd_def by auto
also have "... = bin_to_bl len (sbl_to_bin (lof_wline tw A (fst tw)) * sbl_to_bin (lof_wline tw B (fst tw)))"
using evalA evalB `length bsA = len1` `length bsB = len2`
unfolding v_def eval_world_raw.simps eval_arith.simps len_def Let_def by auto
finally show ?thesis
unfolding inv_def tw'_def worldline_upd2_def worldline_upd_def by auto
qed
lemma inv2_next_time:
fixes tw v
defines "tw' \<equiv> tw[C, 1 :=\<^sub>2 v]"
shows "inv2 (fst tw' + 1, snd tw')"
unfolding inv2_def tw'_def worldline_upd2_def worldline_upd_def by auto
lemma mult_conc_hoare:
"\<And>tw. inv tw \<and> inv2 tw \<and> disjnt {A, B} (event_of tw) \<Longrightarrow> inv (fst tw + 1, snd tw)"
proof -
fix tw
assume "inv tw \<and> inv2 tw \<and> disjnt {A, B} (event_of tw)"
hence "inv tw" and "inv2 tw" and "disjnt {A, B} (event_of tw)"
by auto
have "lof_wline tw C (fst tw + 1) = lof_wline tw C (fst tw)"
using `inv2 tw` `disjnt {A, B} (event_of tw)` unfolding inv2_def by auto
also have "... = bin_to_bl len (sbl_to_bin (lval_of (wline_of tw A (get_time tw - 1))) * sbl_to_bin (lval_of (wline_of tw B (get_time tw - 1))))"
using `inv tw` unfolding inv_def by auto
also have "... = bin_to_bl len (sbl_to_bin (lval_of (wline_of tw A (fst tw))) * sbl_to_bin (lval_of (wline_of tw B (fst tw))))"
using `disjnt {A, B} (event_of tw)` unfolding event_of_alt_def
by (smt diff_0_eq_0 disjnt_insert1 mem_Collect_eq)
finally show "inv (fst tw + 1, snd tw)"
unfolding inv_def by auto
qed
lemma mult_conc_hoare2:
"\<And>tw. inv2 tw \<and> disjnt {A, B} (event_of tw) \<Longrightarrow> inv2 (fst tw + 1, snd tw)"
unfolding inv2_def by auto
lemma conc_stmt_wf_mult:
"conc_stmt_wf mult"
unfolding mult_def conc_stmt_wf_def by auto
lemma nonneg_delay_conc_mult:
"nonneg_delay_conc mult"
unfolding mult_def by auto
lemma nonneg_delay_conc_mult':
"nonneg_delay_conc ( process {A, B} : Bassign_trans C (Bmult (Bsig A) (Bsig B)) 1)"
using nonneg_delay_conc_mult unfolding mult_def by auto
lemma conc_wt_mult:
"conc_wt \<Gamma> mult"
unfolding mult_def by (meson conc_wt.intros(1) well_typed)
lemma conc_wt_mult':
"conc_wt \<Gamma> ( process {A, B} : Bassign_trans C (Bmult (Bsig A) (Bsig B)) 1)"
using conc_wt_mult unfolding mult_def by auto
lemma mult_conc_sim2':
"\<Gamma> \<turnstile>\<^sub>s \<lbrace>\<lambda>tw. inv tw \<and> inv2 tw\<rbrace> mult \<lbrace>\<lambda>tw. inv tw \<and> inv2 tw\<rbrace>"
apply (rule While_Suc)
apply (rule Conseq'[where P="wp3_conc \<Gamma> mult (\<lambda>tw. inv (fst tw + 1, snd tw) \<and>
inv2 (fst tw + 1, snd tw))", rotated])
apply (rule wp3_conc_is_pre, rule conc_stmt_wf_mult, rule nonneg_delay_conc_mult, rule conc_wt_mult, simp)
unfolding mult_def wp3_conc_single'[OF conc_wt_mult' nonneg_delay_conc_mult'] wp3_fun.simps
using inv_next_time inv2_next_time mult_conc_hoare mult_conc_hoare2 by presburger
text \<open>Initialisation preserves the invariant\<close>
lemma nonneg_delay_mult:
" nonneg_delay (Bassign_trans C (Bmult (Bsig A) (Bsig B)) 1)"
using nonneg_delay_conc_mult' by auto
lemma seq_wt:
"seq_wt \<Gamma> (Bassign_trans C (Bmult (Bsig A) (Bsig B)) 1)"
using well_typed by blast
lemma init_sat_nand_inv_comb:
"init_sim2_hoare_wt \<Gamma> (\<lambda>tw. fst tw = 0) mult (\<lambda>tw. inv tw \<and> inv2 tw)"
unfolding mult_def
apply (rule AssignI_suc, rule SingleI)
apply (rule Conseq3[where Q="\<lambda>tw. inv (fst tw + 1, snd tw) \<and> inv2 (fst tw + 1, snd tw)", rotated])
apply (rule wp3_fun_is_pre[OF well_typed nonneg_delay_mult], simp)
unfolding wp3_fun.simps using inv_next_time inv2_next_time by blast
lemma correctness:
assumes "sim_fin2 w (i + 1) mult tw'" and "wityping \<Gamma> w"
shows "lof_wline tw' C (i + 1) = bin_to_bl len (sbl_to_bin (lof_wline tw' A i) * sbl_to_bin (lof_wline tw' B i))"
using grand_correctness[OF assms conc_stmt_wf_mult conc_wt_mult nonneg_delay_conc_mult mult_conc_sim2' init_sat_nand_inv_comb]
unfolding mult_def inv_def by (metis (no_types, lifting) add_diff_cancel_right' assms(1)
sim_fin2.cases world_maxtime_lt_fst_tres)
end
end
|
{"author": "rizaldialbert", "repo": "vhdl-semantics", "sha": "352f89c9ccdfe830c054757dfd86caeadbd67159", "save_path": "github-repos/isabelle/rizaldialbert-vhdl-semantics", "path": "github-repos/isabelle/rizaldialbert-vhdl-semantics/vhdl-semantics-352f89c9ccdfe830c054757dfd86caeadbd67159/Signed_Mult_Typed.thy"}
|
import numpy as np
file = np.loadtxt("num.csv",delimiter= ',')
print(file)
file = np.loadtxt("num.csv",delimiter= ',', skiprows=1)
print(file)
file = np.loadtxt("num.csv",delimiter= ',', usecols=[2,4])
print(file)
file = np.loadtxt("num.csv",delimiter= ',', usecols=[2,4], dtype=str)
print(file)
|
{"hexsha": "81f0fce1fc24b1c6e6aa2bb5c4c3943924a92c17", "size": 298, "ext": "py", "lang": "Python", "max_stars_repo_path": "Slides/10_numpy_loadcsv.py", "max_stars_repo_name": "sobil-dalal/Database-Analytical-Programming", "max_stars_repo_head_hexsha": "b9231e4fec11fd59955935639f308ca0417e6caa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Slides/10_numpy_loadcsv.py", "max_issues_repo_name": "sobil-dalal/Database-Analytical-Programming", "max_issues_repo_head_hexsha": "b9231e4fec11fd59955935639f308ca0417e6caa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Slides/10_numpy_loadcsv.py", "max_forks_repo_name": "sobil-dalal/Database-Analytical-Programming", "max_forks_repo_head_hexsha": "b9231e4fec11fd59955935639f308ca0417e6caa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-10-30T13:13:12.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-30T13:16:11.000Z", "avg_line_length": 24.8333333333, "max_line_length": 69, "alphanum_fraction": 0.6644295302, "include": true, "reason": "import numpy", "num_tokens": 84}
|
% ==========================================================================================
% ==========================================================================================
% ==========================================================================================
\documentclass[10pt]{beamer}
\mode<presentation>
{
\usetheme{Hannover}
\usecolortheme{rose}
\usefonttheme{structurebold}
\setbeamertemplate{navigation symbols}{}
\setbeamertemplate{footline}{}
\setbeamertemplate{frametitle}{\centering\normalsize\bfseries\itshape\insertframetitle\par\vskip-7pt\hrulefill}
\setbeamercolor{alerted text}{fg=blue}
}
% ==========================================================================================
% ==========================================================================================
% ==========================================================================================
\usepackage{microtype}
\usepackage{comment}
\usepackage{parskip}
\usepackage{graphicx}
\usepackage{siunitx}[=v2]
\usepackage{booktabs}
\usepackage{upgreek}
\usepackage{nicefrac}
\def\half{\nicefrac{1}{2}}
\def\third{\nicefrac{1}{3}}
\def\quarter{\nicefrac{1}{4}}
\def\threequarter{\nicefrac{3}{4}}
\def\twothird{\nicefrac{2}{3}}
\usepackage[version=4]{mhchem}
\usepackage{sansmathfonts} % use greek cmss instead of cm
\usepackage{helvet} % use helvet as normal text
\usepackage[helvet]{sfmath} % use helvet math
% ==========================================================================================
% ==========================================================================================
% ==========================================================================================
\title{General Chemistry}
\def\mylabel{Chapter}
\def\mydeckno{1}
\def\myterm{SP2020}
\def\myclass{CHEM120}
\def\mydeck{\mylabel\space\mydeckno}
\author{\mydeck}
\institute{}
\date{}
\begin{document}
% ==========================================================================================
% ==========================================================================================
% ==========================================================================================
\begin{frame}
\vspace*{-1cm}\titlepage
\vspace*{-2cm}{\centering \includegraphics[scale=0.1]{figures/zvulogo.jpg}\\[3ex]
\scriptsize John Terhorst, Ph.D. \\ \myterm/\myclass \\ }
\end{frame}
% ==========================================================================================
% ==========================================================================================
% ==========================================================================================
\begin{frame}[plain]{}
% \bigskip \tiny \centering \copyright\ \the\year\ by John Terhorst \\[1ex]
\bigskip \scriptsize \centering Do not distribute without permission. \\[1ex]
Unless otherwise noted, all figures are from \\
McMurry \& Fay \textit{General Chemistry: Atoms First}, 2e \\
Copyright \copyright\ 2014 by Pearson Education, Inc., Saddle River, NJ. \\[1ex]
Adoption of the above textbook provides the instructor with legal permission \\
for use of content assets found within the Instructor Support Materials.
\end{frame}
% ==========================================================================================
% ==========================================================================================
% ==========================================================================================
\section{Introduction}
\subsection{Basics}
% ==========================================================================================
% ==========================================================================================
% ==========================================================================================
\begin{frame}[t]{Title of the slide}
text
\begin{equation*}
G^\prime_{\textnormal{pol},i} =
\frac{-166.0}{R_{\textnormal{vdW},i}+\phi+P_1} +
\sum^{1,2}\frac{P_2V_j}{r^4_{ij}} +
\sum^{1,3}\frac{P_3V_j}{r^4_{ij}} +
\sum^{1,\ge 4}\frac{P_4V_jCCF}{r^4_{ij}}
\end{equation*}
\begin{equation*}
\alpha\beta\lambda\nu\gamma~\upalpha\upbeta\uplambda~12324
\end{equation*}
See the \alert{definition}
\begin{itemize}
\item hi
\end{itemize}
\end{frame}
% ==========================================================================================
% ==========================================================================================
% ==========================================================================================
% ==========================================================================================
% ==========================================================================================
% ==========================================================================================
\section*{Summary}
\begin{frame}[shrink=10]{\mydeck\ Summary}
\tableofcontents[subsectionstyle=hide/hide/hide]
\end{frame}
\begin{comment}
\begin{frame}{End of \mydeck}
\end{frame}
\end{comment}
\begin{frame}{Notes}
\end{frame}
% ==========================================================================================
% ==========================================================================================
% ==========================================================================================
\end{document}
|
{"hexsha": "c856f7daf4f1c2f243772e01676dde8fbb0a38ba", "size": 5447, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "slides.tex", "max_stars_repo_name": "terhorstj/beamer", "max_stars_repo_head_hexsha": "8b715d460bea505f82df731395452ddf743a8c26", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "slides.tex", "max_issues_repo_name": "terhorstj/beamer", "max_issues_repo_head_hexsha": "8b715d460bea505f82df731395452ddf743a8c26", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "slides.tex", "max_forks_repo_name": "terhorstj/beamer", "max_forks_repo_head_hexsha": "8b715d460bea505f82df731395452ddf743a8c26", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6942675159, "max_line_length": 114, "alphanum_fraction": 0.3434918304, "num_tokens": 1025}
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 8 10:36:17 2020
@author: created by Sowmya Myneni and updated by Dijiang Huang
"""
import numpy as np
import pandas as pd
from keras.utils import np_utils
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
def get_processed_data(datasetFile, categoryMappingsPath, classType='binary'):
inputFile = pd.read_csv(datasetFile, header=None)
X = inputFile.iloc[:, 0:-2].values
label_column = inputFile.iloc[:, -2].values
category_1 = np.array(pd.read_csv(categoryMappingsPath + "1.csv", header=None).iloc[:, 0].values)
category_2 = np.array(pd.read_csv(categoryMappingsPath + "2.csv", header=None).iloc[:, 0].values)
category_3 = np.array(pd.read_csv(categoryMappingsPath + "3.csv", header=None).iloc[:, 0].values)
#category_label = np.array(pd.read_csv(categoryMappingsPath + "41.csv", header=None).iloc[:, 0].values)
ct = ColumnTransformer(
[('X_one_hot_encoder', OneHotEncoder(categories=[category_1, category_2, category_3], handle_unknown='ignore'), [1,2,3])], # The column numbers to be transformed ([1, 2, 3] represents three columns to be transferred)
remainder='passthrough'# Leave the rest of the columns untouched
)
X = np.array(ct.fit_transform(X), dtype=np.float)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X = sc.fit_transform(np.array(X)) # Scaling to the range [0,1]
if classType == 'binary':
y = []
for i in range(len(label_column)):
if label_column[i] == 'normal' or str(label_column[i]) == '0':
y.append(0)
else:
y.append(1)
# Convert ist to array
y = np.array(y)
else:
#Converting to integers from the mappings file
label_map = pd.read_csv(categoryMappingsPath + "41.csv", header=None)
label_category = label_map.iloc[:, 0].values
label_value = label_map.iloc[:, 1].values
y = []
for i in range(len(label_column)):
y.append(label_value[label_category.tolist().index(label_column[i])])
# Encoding the Dependent Variable
y = np_utils.to_categorical(y)
return X, y
|
{"hexsha": "ce5bd73bc2b392fd0b9453e60a9b7bd0be30f989", "size": 2385, "ext": "py", "lang": "Python", "max_stars_repo_path": "lab_4/lab-cs-ml-00301/data_preprocessor.py", "max_stars_repo_name": "ChristopherBilg/cse-548-adv-comp-net-sec", "max_stars_repo_head_hexsha": "8d6256ace822e58cc662ef2fee476d1b1a3e60a4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab_4/lab-cs-ml-00301/data_preprocessor.py", "max_issues_repo_name": "ChristopherBilg/cse-548-adv-comp-net-sec", "max_issues_repo_head_hexsha": "8d6256ace822e58cc662ef2fee476d1b1a3e60a4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab_4/lab-cs-ml-00301/data_preprocessor.py", "max_forks_repo_name": "ChristopherBilg/cse-548-adv-comp-net-sec", "max_forks_repo_head_hexsha": "8d6256ace822e58cc662ef2fee476d1b1a3e60a4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.1666666667, "max_line_length": 236, "alphanum_fraction": 0.6264150943, "include": true, "reason": "import numpy", "num_tokens": 560}
|
# Copyright (C) 2019 Project AGI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""EpisodicComponent class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import sys
import logging
import os
from os.path import dirname, abspath
import numpy as np
import tensorflow as tf
from pagi.utils import image_utils, generic_utils
from pagi.utils.dual import DualData
from pagi.utils.hparam_multi import HParamMulti
from pagi.utils.layer_utils import type_activation_fn
from pagi.utils.np_utils import np_uniform
from pagi.utils.tf_utils import tf_build_interpolate_distributions
from pagi.components.summarize_levels import SummarizeLevels
from pagi.components.composite_component import CompositeComponent
from pagi.components.visual_cortex_component import VisualCortexComponent
from pagi.components.sparse_autoencoder_component import SparseAutoencoderComponent
from pagi.components.sparse_conv_maxpool import SparseConvAutoencoderMaxPoolComponent
from aha.components.dg_sae import DGSAE
from aha.components.dg_scae import DGSCAE
from aha.components.dg_stub import DGStubComponent
from aha.components.label_learner_fc import LabelLearnerFC
from aha.components.hopfieldlike_component import HopfieldlikeComponent
from aha.components.deep_autoencoder_component import DeepAutoencoderComponent
from aha.components.diff_plasticity_component import DifferentiablePlasticityComponent
from aha.utils.interest_filter import InterestFilter
from aha.utils.generic_utils import normalize_minmax
class PCMode(enum.Enum):
PCOnly = 1 # Use only PC
Exclude = 2 # Use all sub-components excluding PC i.e. everything up to PC
Combined = 3 # Use all sub-components including PC
HVC_ENABLED = True # Hierarchical VC
class EpisodicComponent(CompositeComponent):
"""
A component to implement episodic memory, inspired by the Medial Temporal Lobe.
Currently, it consists of a Visual Cortex (Sparse Autoencoder, SAE) and
a Pattern Completer similar to DG/CA3 (Differentiable Plasticity or SAE).
"""
@staticmethod
def default_hparams():
"""Builds an HParam object with default hyperparameters."""
# create component level hparams (this will be a multi hparam, with hparams from sub components)
batch_size = 40
max_outputs = 3
hparam = tf.contrib.training.HParams(
batch_size=batch_size,
output_features='pc', # the output of this subcomponent is used as the component's features
pc_type='sae', # none, hl = hopfield like, sae = sparse autoencoder, dp = differentiable-plasticity
dg_type='fc', # 'none', 'fc', or 'conv' Dentate Gyrus
ll_vc_type='none', # vc label learner: 'none', 'fc'
ll_pc_type='none', # pc label learner: 'none', 'fc'
use_cue_to_pc=False, # use a secondary input as a cue to pc (EC perforant path to CA3)
use_pm=False, # pattern mapping (reconstruct inputs from PC output
use_interest_filter=False, # this replaces VC (attentional system zones in on interesting features)
summarize_level=SummarizeLevels.ALL.value, # for the top summaries (leave individual comps to decide on own)
vc_norm_per_filter=False,
vc_norm_per_sample=False,
max_pool_vc_final_size=2,
max_pool_vc_final_stride=1,
max_outputs=max_outputs
)
# create all possible sub component hparams (must create one for every possible sub component)
if HVC_ENABLED:
vc = VisualCortexComponent.default_hparams()
else:
vc = SparseConvAutoencoderMaxPoolComponent.default_hparams()
dg_fc = DGSAE.default_hparams()
dg_conv = DGSCAE.default_hparams()
dg_stub = DGStubComponent.default_hparams()
pc_sae = SparseAutoencoderComponent.default_hparams()
pc_dae = DeepAutoencoderComponent.default_hparams()
pc_dp = DifferentiablePlasticityComponent.default_hparams()
pc_hl = HopfieldlikeComponent.default_hparams()
ifi = InterestFilter.default_hparams()
ll_vc = LabelLearnerFC.default_hparams()
ll_pc = LabelLearnerFC.default_hparams()
subcomponents = [vc, dg_fc, dg_conv, dg_stub, pc_sae, pc_dae, pc_dp, pc_hl, ll_vc, ll_pc] # all possible subcomponents
# default overrides of sub-component hparam defaults
if not HVC_ENABLED:
vc.set_hparam('learning_rate', 0.001)
vc.set_hparam('sparsity', 25)
vc.set_hparam('sparsity_output_factor', 1.5)
vc.set_hparam('filters', 64)
vc.set_hparam('filters_field_width', 6)
vc.set_hparam('filters_field_height', 6)
vc.set_hparam('filters_field_stride', 3)
vc.set_hparam('pool_size', 2)
vc.set_hparam('pool_strides', 2)
# Note that DG will get the pooled->unpooled encoding
vc.set_hparam('use_max_pool', 'none') # none, encoding, training
dg_fc.set_hparam('learning_rate', 0.001)
dg_fc.set_hparam('sparsity', 20)
dg_fc.set_hparam('sparsity_output_factor', 1.0)
dg_fc.set_hparam('filters', 784)
pc_hl.set_hparam('learning_rate', 0.0001)
pc_hl.set_hparam('optimizer', 'adam')
pc_hl.set_hparam('momentum', 0.9)
pc_hl.set_hparam('momentum_nesterov', False)
pc_hl.set_hparam('use_feedback', True)
pc_hl.set_hparam('memorise_method', 'pinv')
pc_hl.set_hparam('nonlinearity', 'none')
pc_hl.set_hparam('update_n_neurons', -1)
# default hparams in individual component should be consistent with component level hparams
HParamMulti.set_hparam_in_subcomponents(subcomponents, 'batch_size', batch_size)
# add sub components to the composite hparams
HParamMulti.add(source=vc, multi=hparam, component='vc')
HParamMulti.add(source=dg_fc, multi=hparam, component='dg_fc')
HParamMulti.add(source=dg_conv, multi=hparam, component='dg_conv')
HParamMulti.add(source=dg_stub, multi=hparam, component='dg_stub')
HParamMulti.add(source=pc_dp, multi=hparam, component='pc_dp')
HParamMulti.add(source=pc_sae, multi=hparam, component='pc_sae')
HParamMulti.add(source=pc_dae, multi=hparam, component='pc_dae')
HParamMulti.add(source=pc_hl, multi=hparam, component='pc_hl')
HParamMulti.add(source=ifi, multi=hparam, component='ifi')
HParamMulti.add(source=ll_vc, multi=hparam, component='ll_vc')
HParamMulti.add(source=ll_pc, multi=hparam, component='ll_pc')
return hparam
def __init__(self):
super(EpisodicComponent, self).__init__()
self._name = None
self._hparams = None
self._summary_op = None
self._summary_result = None
self._dual = None
self._input_shape = None
self._input_values = None
self._summary_values = None
self._sub_components = {} # map {name, component}
self._pc_mode = PCMode.Combined
self._pc_input = None
self._pc_input_vis_shape = None
self._degrade_type = 'random' # if degrading is used, then a degrade type: vertical, horizontal, random
self._signals = {} # signals at each stage: convenient container for significant signals
self._show_episodic_level_summary = True
self._interest_filter = None
def batch_size(self):
return self._hparams.batch_size
def get_vc_encoding(self):
return self._dual.get_values('vc_encoding')
def is_build_dg(self):
return self._hparams.dg_type != 'none'
def is_build_ll_vc(self):
return self._hparams.ll_vc_type != 'none'
def is_build_ll_pc(self):
return self._hparams.ll_pc_type != 'none'
def is_build_ll_ensemble(self):
build_ll_ensemble = True
return self.is_build_ll_vc() and self.is_build_ll_pc() and build_ll_ensemble
def is_build_pc(self):
return self._hparams.pc_type != 'none'
def is_pc_hopfield(self):
return isinstance(self.get_pc(), HopfieldlikeComponent)
@staticmethod
def is_vc_hierarchical():
# return isinstance(self.get_vc(), VisualCortexComponent)
return HVC_ENABLED # need to use this before _component is instantiated
def pc_combined(self):
self._pc_mode = PCMode.Combined
def pc_exclude(self):
self._pc_mode = PCMode.Exclude
def pc_only(self):
self._pc_mode = PCMode.PCOnly
@property
def name(self):
return self._name
def get_interest_filter_masked_encodings(self):
return self._dual.get_values('masked_encodings')
def get_interest_filter_positional_encodings(self):
return self._dual.get_values('positional_encodings')
def set_signal(self, key, val, val_shape):
"""Set as a significant signal, that should be summarised"""
self._signals.update({key: (val, val_shape)})
def get_signal(self, key):
val, val_shape = self._signals[key]
return val, val_shape
def get_loss(self):
"""Define loss as the loss of the subcomponent selected for output features: using _hparam.output_features"""
if self._hparams.output_features == 'vc':
comp = self.get_vc()
elif self._hparams.output_features == 'dg':
comp = self.get_dg()
else: # assumes output_features == 'pc'
comp = self.get_pc()
return comp.get_loss()
@staticmethod
def degrader(degrade_step_pl, degrade_type, random_value_pl, input_values, degrade_step, name=None):
return tf.cond(tf.equal(degrade_step_pl, degrade_step),
lambda: image_utils.degrade_image(input_values,
degrade_type=degrade_type,
random_value=random_value_pl),
lambda: input_values,
name=name)
def _build_vc(self, input_values, input_shape):
if HVC_ENABLED:
vc = VisualCortexComponent()
hparams_vc = VisualCortexComponent.default_hparams()
else:
vc = SparseConvAutoencoderMaxPoolComponent()
hparams_vc = SparseConvAutoencoderMaxPoolComponent.default_hparams()
hparams_vc = HParamMulti.override(multi=self._hparams, target=hparams_vc, component='vc')
vc.build(input_values, input_shape, hparams_vc, 'vc')
self._add_sub_component(vc, 'vc')
# Update 'next' value/shape for DG
if HVC_ENABLED:
# Since pooling/unpooling is applied within the VC component,
# use the get_output() method to get the final layer of VC with the
# appropriate pooling/unpooling setting.
input_values_next = vc.get_output_op()
else:
# Otherwise, get the encoding or unpooled encoding as appropriate
input_values_next = vc.get_encoding_op()
if hparams_vc.use_max_pool == 'encoding':
input_values_next = vc.get_encoding_unpooled_op()
print('vc', 'output', input_values_next)
# Optionally norm VC per filter (this should probably be only first layer, but only one layer for now anyway)
for _, layer in vc.get_sub_components().items():
layer.set_norm_filters(self._hparams.vc_norm_per_filter)
# Add InterestFilter to mask in and blur position of interesting visual filters (and block out the rest)
if self._hparams.use_interest_filter:
self._interest_filter = InterestFilter()
image_tensor, image_shape = self.get_signal('input')
vc_tensor = input_values_next
vc_shape = vc_tensor.get_shape().as_list()
assert image_shape[:-1] == vc_shape[:-1], "The VC encoding must be the same height and width as the image " \
"i.e. conv stride 1"
hparams_ifi = InterestFilter.default_hparams()
hparams_ifi = HParamMulti.override(multi=self._hparams, target=hparams_ifi, component='ifi')
_, input_values_next = self._interest_filter.build(image_tensor, vc_tensor, hparams_ifi)
self._dual.set_op('masked_encodings', self._interest_filter.get_image('masked_encodings'))
self._dual.set_op('positional_encodings', self._interest_filter.get_image('positional_encodings'))
# Optionally pool the final output of the VC (simply to reduce dimensionality)
pool_size = self._hparams.max_pool_vc_final_size
pool_stride = self._hparams.max_pool_vc_final_stride
if pool_size > 1:
input_values_next = tf.layers.max_pooling2d(input_values_next, pool_size, pool_stride, 'SAME')
print('vc final pooled', input_values_next)
# Optionally norm the output samples so that they are comparable to the next stage
def normalize_min_max_4d(x):
sample_mins = tf.reduce_min(x, axis=[1, 2, 3], keepdims=True)
sample_maxs = tf.reduce_max(x, axis=[1, 2, 3], keepdims=True)
return (x - sample_mins) / (sample_maxs - sample_mins)
if self._hparams.vc_norm_per_sample:
frobenius_norm = tf.sqrt(tf.reduce_sum(tf.square(input_values_next), axis=[1, 2, 3], keepdims=True))
input_values_next = input_values_next / frobenius_norm
#input_values_next = normalize_min_max_4d(input_values_next)
# Unpack the conv cells shape
input_volume = np.prod(input_values_next.get_shape().as_list()[1:])
input_next_vis_shape, _ = image_utils.square_image_shape_from_1d(input_volume)
return input_values_next, input_next_vis_shape
def _build_ll_vc(self, target_output, train_input, test_input, name='ll_vc'):
"""Build the label learning component for LTM."""
ll_vc = None
# Don't normalize this yet
train_input = normalize_minmax(train_input)
test_input = normalize_minmax(test_input)
if self._hparams.ll_vc_type == 'fc':
ll_vc = LabelLearnerFC()
self._add_sub_component(ll_vc, name)
hparams_ll_vc = LabelLearnerFC.default_hparams()
hparams_ll_vc = HParamMulti.override(multi=self._hparams, target=hparams_ll_vc, component='ll_vc')
ll_vc.build(target_output, train_input, test_input, hparams_ll_vc, name)
return ll_vc
def _build_ll_pc(self, target_output, train_input, test_input, name='ll_pc'):
"""Build the label learning component for PC."""
ll_pc = None
train_input = normalize_minmax(train_input)
test_input = normalize_minmax(test_input)
if self._hparams.ll_pc_type == 'fc':
ll_pc = LabelLearnerFC()
self._add_sub_component(ll_pc, name)
hparams_ll_pc = LabelLearnerFC.default_hparams()
hparams_ll_pc = HParamMulti.override(multi=self._hparams, target=hparams_ll_pc, component='ll_pc')
ll_pc.build(target_output, train_input, test_input, hparams_ll_pc, name)
return ll_pc
def _build_dg(self, input_next, input_next_vis_shape):
"""Builds the pattern separation component."""
dg_type = self._hparams.dg_type
if dg_type == 'stub':
# create fc, so that we can use the encodings etc. without breaking other stuff
dg = DGStubComponent()
self._add_sub_component(dg, 'dg')
hparams_dg = DGStubComponent.default_hparams()
hparams_dg = HParamMulti.override(multi=self._hparams, target=hparams_dg, component='dg_stub')
dg.build(hparams_dg, 'dg')
# Update 'next' value/shape for PC
input_next = dg.get_encoding_op()
input_next_vis_shape, _ = image_utils.square_image_shape_from_1d(hparams_dg.filters)
dg_sparsity = hparams_dg.sparsity
elif dg_type == 'fc':
dg = DGSAE()
self._add_sub_component(dg, 'dg')
hparams_dg = DGSAE.default_hparams()
hparams_dg = HParamMulti.override(multi=self._hparams, target=hparams_dg, component='dg_fc')
dg.build(input_next, input_next_vis_shape, hparams_dg, 'dg')
# Update 'next' value/shape for PC
input_next = dg.get_encoding_op()
input_next_vis_shape, _ = image_utils.square_image_shape_from_1d(hparams_dg.filters)
dg_sparsity = hparams_dg.sparsity
elif dg_type == 'conv':
input_next_vis_shape = [-1] + input_next.get_shape().as_list()[1:]
print('dg', 'input', input_next)
dg = DGSCAE()
self._add_sub_component(dg, 'dg')
hparams_dg = DGSCAE.default_hparams()
hparams_dg = HParamMulti.override(multi=self._hparams, target=hparams_dg, component='dg_conv')
dg.build(input_next, input_next_vis_shape, hparams_dg, 'dg')
# Update 'next' value/shape for PC
input_next = dg.get_encoding_op()
print('dg', 'output', input_next)
# Unpack the conv cells shape
input_volume = np.prod(input_next.get_shape().as_list()[1:])
input_next_vis_shape, _ = image_utils.square_image_shape_from_1d(input_volume)
input_next = tf.reshape(input_next, [-1, input_volume])
dg_sparsity = hparams_dg.sparsity
else:
raise NotImplementedError('Dentate Gyrus not implemented: ' + dg_type)
return input_next, input_next_vis_shape, dg_sparsity
def _build_pc(self, input_next, input_next_vis_shape, dg_sparsity):
pc_type = self._hparams.pc_type
use_cue_to_pc = self._hparams.use_cue_to_pc
use_pm = self._hparams.use_pm
if use_cue_to_pc:
cue = self._signals['vc'][0]
else:
cue = None
cue_raw = None
if use_pm:
cue_raw = self._signals['vc_input'][0]
if pc_type == 'sae':
pc = SparseAutoencoderComponent()
self._add_sub_component(pc, 'pc')
hparams_sae = SparseAutoencoderComponent.default_hparams()
hparams_sae = HParamMulti.override(multi=self._hparams, target=hparams_sae, component='pc_sae')
pc.build(input_next, input_next_vis_shape, hparams_sae, 'pc')
elif pc_type == 'dae':
pc = DeepAutoencoderComponent()
self._add_sub_component(pc, 'pc')
hparams_dae = DeepAutoencoderComponent.default_hparams()
hparams_dae = HParamMulti.override(multi=self._hparams, target=hparams_dae, component='pc_dae')
input_next_shape = input_next.get_shape().as_list()
pc.build(input_next, input_next_shape, hparams_dae, 'pc', input_cue_raw=cue_raw)
elif pc_type == 'dp':
# DP works with batches differently, so not prescriptive for input shape (used for summaries only)
input_next_vis_shape[0] = -1
# ensure DP receives binary values (all k winners will be 1)
input_next = tf.greater(input_next, 0)
input_next = tf.to_float(input_next)
pc = DifferentiablePlasticityComponent()
self._add_sub_component(pc, 'pc')
hparams_pc_dp = DifferentiablePlasticityComponent.default_hparams()
hparams_pc_dp = HParamMulti.override(multi=self._hparams, target=hparams_pc_dp, component='pc_dp')
pc.build(input_next, input_next_vis_shape, hparams_pc_dp, 'pc')
elif pc_type == 'hl':
pc = HopfieldlikeComponent()
self._add_sub_component(pc, 'pc')
hparams_hl = HopfieldlikeComponent.default_hparams()
hparams_hl = HParamMulti.override(multi=self._hparams, target=hparams_hl, component='pc_hl')
if dg_sparsity == 0:
raise RuntimeError("Could not establish dg per sample sparsity to pass to Hopfield.")
hparams_hl.cue_nn_label_sparsity = dg_sparsity
pc.build(input_next, input_next_vis_shape, hparams_hl, 'pc', input_cue=cue, input_cue_raw=cue_raw)
else:
raise NotImplementedError('Pattern completer not implemented: ' + pc_type)
pc_output = pc.get_decoding_op()
if pc_type == 'dae':
input_volume = np.prod(pc_output.get_shape().as_list()[1:])
pc_output_shape, _ = image_utils.square_image_shape_from_1d(input_volume)
else:
pc_output_shape = input_next_vis_shape # output is same shape and size as input
return pc_output, pc_output_shape
def _build_ll_ensemble(self):
"""Builds ensemble of VC and PC classifiers."""
distributions = []
distribution_mass = []
num_classes = self._label_values.get_shape().as_list()[-1]
aha_mass = 0.495
ltm_mass = 0.495
uniform_mass = 0.01
if aha_mass > 0.0:
aha_prediction = self.get_ll_pc().get_op('preds')
distributions.append(aha_prediction)
distribution_mass.append(aha_mass)
if ltm_mass > 0.0:
ltm_prediction = self.get_ll_vc().get_op('preds')
distributions.append(ltm_prediction)
distribution_mass.append(ltm_mass)
if uniform_mass > 0.0:
uniform = np_uniform(num_classes)
distributions.append(uniform)
distribution_mass.append(uniform_mass)
unseen_sum = 1
unseen_idxs = (0, unseen_sum)
# Build the final distribution, calculate loss
ensemble_preds = tf_build_interpolate_distributions(distributions, distribution_mass, num_classes)
ensemble_correct_preds = tf.equal(tf.argmax(ensemble_preds, 1), tf.argmax(self._label_values, 1))
ensemble_correct_preds = tf.cast(ensemble_correct_preds, tf.float32)
ensemble_accuracy = tf.reduce_mean(ensemble_correct_preds)
ensemble_accuracy_unseen = tf.reduce_mean(ensemble_correct_preds[unseen_idxs[0]:unseen_idxs[1]])
self._dual.set_op('ensemble_preds', ensemble_preds)
self._dual.set_op('ensemble_accuracy', ensemble_accuracy)
self._dual.set_op('ensemble_accuracy_unseen', ensemble_accuracy_unseen)
def build(self, input_values, input_shape, hparams, label_values=None, name='episodic'):
"""Initializes the model parameters.
Args:
hparams: The hyperparameters for the model as tf.contrib.training.HParams.
:param input_values:
:param input_shape:
:param hparams:
:param name:
"""
self._name = name
self._hparams = hparams
self._summary_op = None
self._summary_result = None
self._dual = DualData(self._name)
self._input_values = input_values
self._input_shape = input_shape
self._label_values = label_values
input_area = np.prod(input_shape[1:])
logging.debug('Input Shape: %s', input_shape)
logging.debug('Input Area: %s', input_area)
with tf.variable_scope(self._name, reuse=tf.AUTO_REUSE):
# Replay mode
# ------------------------------------------------------------------------
replay_mode = 'pixel' # pixel or encoding
replay = self._dual.add('replay', shape=[], default_value=False).add_pl(
default=True, dtype=tf.bool)
# Replace labels during replay
replay_labels = self._dual.add('replay_labels', shape=label_values.shape, default_value=0.0).add_pl(
default=True, dtype=label_values.dtype)
self._label_values = tf.cond(tf.equal(replay, True), lambda: replay_labels, lambda: self._label_values)
# Replay pixel inputs during replay, if using 'pixel' replay mode
if replay_mode == 'pixel':
replay_inputs = self._dual.add('replay_inputs', shape=input_values.shape, default_value=0.0).add_pl(
default=True, dtype=input_values.dtype)
self._input_values = tf.cond(tf.equal(replay, True), lambda: replay_inputs, lambda: self._input_values)
self.set_signal('input', self._input_values, self._input_shape)
# Build the LTM
# ------------------------------------------------------------------------
# Optionally degrade input to VC
degrade_step_pl = self._dual.add('degrade_step', shape=[], # e.g. hidden, input, none
default_value='none').add_pl(default=True, dtype=tf.string)
degrade_random_pl = self._dual.add('degrade_random', shape=[],
default_value=0.0).add_pl(default=True, dtype=tf.float32)
input_values = self.degrader(degrade_step_pl, self._degrade_type, degrade_random_pl, self._input_values,
degrade_step='input', name='vc_input_values')
print('vc', 'input', input_values)
self.set_signal('vc_input', input_values, input_shape)
# Build the VC
input_next, input_next_vis_shape = self._build_vc(input_values, input_shape)
vc_encoding = input_next
# Replace the encoding during replay, if using 'encoding' replay mode
if replay_mode == 'encoding':
replay_inputs = self._dual.add('replay_inputs', shape=vc_encoding.shape, default_value=0.0).add_pl(
default=True, dtype=vc_encoding.dtype)
vc_encoding = tf.cond(tf.equal(replay, True), lambda: replay_inputs, lambda: vc_encoding)
self.set_signal('vc', vc_encoding, input_next_vis_shape)
self._dual.set_op('vc_encoding', vc_encoding)
# Build the softmax classifier
if self.is_build_ll_vc() and self._label_values is not None:
self._build_ll_vc(self._label_values, vc_encoding, vc_encoding)
# Build AHA
# ------------------------------------------------------------------------
# Build the DG
dg_sparsity = 0
if self.is_build_dg():
input_next, input_next_vis_shape, dg_sparsity = self._build_dg(input_next, input_next_vis_shape)
dg_encoding = input_next
self.set_signal('dg', dg_encoding, input_next_vis_shape)
# Build the PC
if self.is_build_pc():
# Optionally degrade input to PC
# not all degrade types are supported for embedding in graph (but may still be used directly on test set)
if self._degrade_type != 'rect' and self._degrade_type != 'circle':
input_next = self.degrader(degrade_step_pl, self._degrade_type, degrade_random_pl, input_next,
degrade_step='hidden', name='pc_input_values')
print('pc_input', input_next)
self.set_signal('pc_input', input_next, input_next_vis_shape)
pc_output, pc_output_shape = self._build_pc(input_next, input_next_vis_shape, dg_sparsity)
self.set_signal('pc', pc_output, pc_output_shape)
if self._hparams.use_pm:
ec_out_raw = self.get_pc().get_ec_out_raw_op()
self.set_signal('ec_out_raw', ec_out_raw, input_shape)
if self.is_build_ll_pc() and self.is_build_dg() and self._label_values is not None:
self._build_ll_pc(self._label_values, dg_encoding, pc_output)
if self.is_build_ll_ensemble():
self._build_ll_ensemble()
self.reset()
def get_vc(self):
vc = self.get_sub_component('vc')
return vc
def get_pc(self):
pc = self.get_sub_component('pc')
return pc
def get_dg(self):
dg = self.get_sub_component('dg')
return dg
def get_decoding(self):
return self.get_pc().get_decoding()
def get_ll_vc(self):
return self.get_sub_component('ll_vc')
def get_ll_pc(self):
return self.get_sub_component('ll_pc')
def get_batch_type(self, name=None):
"""
Return dic of batch types for each component (key is component)
If component does not have a persistent batch type, then don't include in dictionary,
assumption is that in that case, it doesn't have any effect.
"""
if name is None:
batch_types = dict.fromkeys(self._sub_components.keys(), None)
for c in self._sub_components:
if hasattr(self._sub_components[c], 'get_batch_type'):
batch_types[c] = self._sub_components[c].get_batch_type()
else:
batch_types.pop(c)
return batch_types
return self._sub_components[name].get_batch_type()
def get_features(self, batch_type='training'):
"""
The output of the component is taken from one of the subcomponents, depending on hparams.
If not vc or dg, the fallback is to take from pc regardless of value of the hparam
"""
del batch_type
if self._hparams.output_features == 'vc':
features = self.get_vc().get_features()
elif self._hparams.output_features == 'dg':
features = self.get_dg().get_features()
else: # self._hparams.output_features == 'pc':
features = self.get_pc().get_features()
return features
def _is_skip_for_pc(self, name):
if self._pc_mode == PCMode.PCOnly: # only pc
if name != 'pc':
return True
elif self._pc_mode == PCMode.Exclude: # skip pc
if name == 'pc':
return True
return False
def update_feed_dict_input_gain_pl(self, feed_dict, gain):
"""
This is relevant for the PC, and is only be called when it is being run recursively.
"""
if self.get_pc() is not None:
self.get_pc().update_feed_dict_input_gain_pl(feed_dict, gain)
def update_feed_dict(self, feed_dict, batch_type='training'):
for name, comp in self._sub_components.items():
if self._is_skip_for_pc(name):
continue
comp.update_feed_dict(feed_dict, self._select_batch_type(batch_type, name))
def add_fetches(self, fetches, batch_type='training'):
# each component adds its own
for name, comp in self._sub_components.items():
if self._is_skip_for_pc(name):
continue
comp.add_fetches(fetches, self._select_batch_type(batch_type, name))
# Episodic Component specific
# ------------------------------
# Interest Filter and other
names = []
if self._hparams.use_interest_filter:
names.extend(['masked_encodings', 'positional_encodings'])
if self.is_build_ll_ensemble():
names.extend(['ensemble_preds', 'ensemble_accuracy', 'ensemble_accuracy_unseen'])
# Other
names.extend(['vc_encoding'])
if len(names) > 0:
self._dual.add_fetches(fetches, names)
# Episodic Component specific - summaries
bt = self._select_batch_type(batch_type, self._name)
summary_op = self._dual.get_op(generic_utils.summary_name(bt))
if summary_op is not None:
fetches[self._name]['summaries'] = summary_op
def set_fetches(self, fetched, batch_type='training'):
# each component adds its own
for name, comp in self._sub_components.items():
if self._is_skip_for_pc(name):
continue
comp.set_fetches(fetched, self._select_batch_type(batch_type, name))
# Episodic Component specific
# ----------------------------
# Interest Filter
names = []
if self._hparams.use_interest_filter:
names.extend(['masked_encodings', 'positional_encodings'])
if self.is_build_ll_ensemble():
names.extend(['ensemble_preds', 'ensemble_accuracy', 'ensemble_accuracy_unseen'])
# other
names.extend(['vc_encoding'])
if len(names) > 0:
self._dual.set_fetches(fetched, names)
# Episodic Component specific - summaries
bt = self._select_batch_type(batch_type, self._name)
summary_op = self._dual.get_op(generic_utils.summary_name(bt))
if summary_op is not None:
self._summary_values = fetched[self._name]['summaries']
def build_summaries(self, batch_types=None):
if batch_types is None:
batch_types = []
components = self._sub_components.copy()
consolidate_graph_view = False
if self._show_episodic_level_summary:
components.update({self._name: self})
for name, comp in components.items():
scope = name + '-summaries' # this is best for visualising images in summaries
if consolidate_graph_view:
scope = self._name + '/' + name + '/summaries/'
bt = self._select_batch_type(batch_types, name, as_list=True)
if name == self._name:
comp.build_summaries_episodic(bt, scope=scope)
else:
comp.build_summaries(bt, scope=scope)
def write_summaries(self, step, writer, batch_type='training'):
# the episodic component itself
if self._summary_values is not None:
writer.add_summary(self._summary_values, step) # Write the summaries fetched into _summary_values
writer.flush()
super().write_summaries(step, writer, batch_type)
def write_recursive_summaries(self, step, writer, batch_type=None):
for name, comp in self._sub_components.items():
if hasattr(comp, 'write_recursive_summaries'):
comp.write_recursive_summaries(step, writer, batch_type)
def build_summaries_episodic(self, batch_types=None, scope=None):
"""Builds all summaries."""
if not scope:
scope = self._name + '/summaries/'
with tf.name_scope(scope):
for batch_type in batch_types:
# build 'batch_type' summary subgraph
with tf.name_scope(batch_type):
summaries = self._build_summaries(batch_type)
if len(summaries) > 0:
self._dual.set_op(generic_utils.summary_name(batch_type), tf.summary.merge(summaries))
def _build_summaries(self, batch_type='training'):
"""Assumes appropriate name scope has been set."""
max_outputs = self._hparams.max_outputs
summaries = []
if self._hparams.summarize_level != SummarizeLevels.OFF.value:
for key, pair in self._signals.items():
val = pair[0]
val_shape = pair[1]
summary_shape = image_utils.get_image_summary_shape(val_shape)
reshaped = tf.reshape(val, summary_shape)
summaries.append(tf.summary.image(key, reshaped, max_outputs=max_outputs))
if self._hparams.use_interest_filter and self._interest_filter.summarize_level() != SummarizeLevels.OFF.value:
with tf.name_scope('interest_filter'):
self._interest_filter.add_summaries(summaries)
return summaries
|
{"hexsha": "070d0152223c9070d46ba4152f076f7c8deea87e", "size": 33182, "ext": "py", "lang": "Python", "max_stars_repo_path": "aha/components/episodic_component.py", "max_stars_repo_name": "ProjectAGI/aha", "max_stars_repo_head_hexsha": "53a98ea42526dca56517dc97fffad874772f10f2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-10-08T13:35:24.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-19T18:33:36.000Z", "max_issues_repo_path": "aha/components/episodic_component.py", "max_issues_repo_name": "ProjectAGI/aha", "max_issues_repo_head_hexsha": "53a98ea42526dca56517dc97fffad874772f10f2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "aha/components/episodic_component.py", "max_forks_repo_name": "ProjectAGI/aha", "max_forks_repo_head_hexsha": "53a98ea42526dca56517dc97fffad874772f10f2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-02T08:28:06.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-02T08:28:06.000Z", "avg_line_length": 38.6736596737, "max_line_length": 124, "alphanum_fraction": 0.703574227, "include": true, "reason": "import numpy", "num_tokens": 8132}
|
# --------------
import pandas as pd
import os
import numpy as np
import warnings
warnings.filterwarnings("ignore")
# path_train : location of test file
# Code starts here
##Loading the CSV data onto a Pandas dataframe
df = pd.read_csv(path_train)
##Defining a function to check every row for a category match, and applying the same
col_list = df.columns[1:].tolist()
def label_race(row):
for i in range(0, len(col_list)):
if row[i + 1] == "T":
return col_list[i]
df["category"] = df.apply(lambda row: label_race(row), axis = 1)
##Dropping the unnecessary columns
df.drop(col_list, inplace = True, axis = 1)
# --------------
from sklearn.feature_extraction.text import TfidfVectorizer
# Code starts here
# Sampling only 1000 samples of each category
##Sampling the data as instructed
df = df.groupby("category").apply(lambda x: x.sample(n = 1000, random_state = 0))
##Saving the text data as a variable, and lower-casing it
all_text = df["message"].str.lower().tolist()
##Intantiating a TfidfVectorizer object with stopwords, and applying the same to yield the X dataset
tfidf = TfidfVectorizer(stop_words = "english")
tfidf.fit(all_text)
vector_tfidf = tfidf.fit_transform(all_text)
X = vector_tfidf.toarray()
##Intantiating a LabelEncoder object\, and applying the same to yield the y dataset
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(df["category"])
# --------------
from sklearn.metrics import accuracy_score, classification_report
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
# Code starts here
##Applying train-test split as instructed
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size = 0.3, random_state = 42)
##Intantiating a LogisticRegression model, fitting the same, and saving its accuracy in a variable
log_reg = LogisticRegression(random_state = 0)
log_reg.fit(X_train, y_train)
y_pred = log_reg.predict(X_val)
log_accuracy = accuracy_score(y_val, y_pred)
##Intantiating a MultinomialNB model, fitting the same, and saving its accuracy in a variable
nb = MultinomialNB()
nb.fit(X_train, y_train)
y_pred = nb.predict(X_val)
nb_accuracy = accuracy_score(y_val, y_pred)
##Intantiating a LinearSVC model, fitting the same, and saving its accuracy in a variable
lsvm = LinearSVC(random_state = 0)
lsvm.fit(X_train, y_train)
y_pred = lsvm.predict(X_val)
lsvm_accuracy = accuracy_score(y_val, y_pred)
# --------------
# path_test : Location of test data
# Code starts here
#Loading the dataframe
##Loading the test CSV data onto a Pandas dataframe
df_test = pd.read_csv(path_test)
#Creating the new column category
##Creating the "category" column as in train data, and dropping the unnecessary columns
df_test["category"] = df_test.apply(lambda row: label_race (row), axis = 1)
df_test.drop(col_list, inplace = True, axis = 1)
##Creating the X_test and y_test dataframes as with train data
all_text = df_test["message"].str.lower().tolist()
vector_tfidf = tfidf.transform(all_text)
X_test = vector_tfidf.toarray()
y_test = le.transform(df_test["category"])
##Checking the accuracy of the earlier trained models on the test data
y_pred = log_reg.predict(X_test)
log_accuracy_2 = accuracy_score(y_test, y_pred)
y_pred = nb.predict(X_test)
nb_accuracy_2 = accuracy_score(y_test, y_pred)
y_pred = lsvm.predict(X_test)
lsvm_accuracy_2 = accuracy_score(y_test, y_pred)
# --------------
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
import string
import gensim
from gensim.models.lsimodel import LsiModel
from gensim import corpora
from pprint import pprint
# import nltk
# nltk.download('wordnet')
# Creating a stopwords list
stop = set(stopwords.words('english'))
exclude = set(string.punctuation)
lemma = WordNetLemmatizer()
# Function to lemmatize and remove the stopwords
def clean(doc):
stop_free = " ".join([i for i in doc.lower().split() if i not in stop])
punc_free = "".join(ch for ch in stop_free if ch not in exclude)
normalized = " ".join(lemma.lemmatize(word) for word in punc_free.split())
return normalized
# Creating a list of documents from the complaints column
list_of_docs = df["message"].tolist()
# Implementing the function for all the complaints of list_of_docs
doc_clean = [clean(doc).split() for doc in list_of_docs]
# Code starts here
##Creating the id2word dictionary
dictionary = corpora.Dictionary(doc_clean)
##Creating a word corpus
doc_term_matrix = [dictionary.doc2bow(doc) for doc in doc_clean]
##Intantiating an LSI model, and printing the topics
lsimodel = LsiModel(corpus = doc_term_matrix, num_topics = 5, id2word = dictionary)
pprint(lsimodel.print_topics())
# --------------
from gensim.models import LdaModel
from gensim.models import CoherenceModel
# doc_term_matrix - Word matrix created in the last task
# dictionary - Dictionary created in the last task
# Function to calculate coherence values
def compute_coherence_values(dictionary, corpus, texts, limit, start=2, step=3):
"""
Compute c_v coherence for various number of topics
Parameters:
----------
dictionary : Gensim dictionary
corpus : Gensim corpus
texts : List of input texts
limit : Max num of topics
Returns:
-------
topic_list : No. of topics chosen
coherence_values : Coherence values corresponding to the LDA model with respective number of topics
"""
coherence_values = []
topic_list = []
for num_topics in range(start, limit, step):
model = gensim.models.ldamodel.LdaModel(doc_term_matrix, random_state = 0, num_topics=num_topics, id2word = dictionary, iterations=10)
topic_list.append(num_topics)
coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=dictionary, coherence='c_v')
coherence_values.append(coherencemodel.get_coherence())
return topic_list, coherence_values
# Code starts here
##Calling the function as instructed
topic_list, coherence_value_list = compute_coherence_values(dictionary = dictionary, corpus = doc_term_matrix, texts = doc_clean, start = 1, limit = 41, step = 5)
##Outputting the optimal topic count
opt_topic_index = np.where(np.array(coherence_value_list) == np.max(coherence_value_list))[0][0]
opt_topic = topic_list[opt_topic_index]
##Intantiating the LDA model, and printing the top 5 topics
lda_model = LdaModel(corpus = doc_term_matrix, num_topics = opt_topic, id2word = dictionary, iterations = 10, passes = 30, random_state = 2)
pprint(lda_model.print_topics(5))
|
{"hexsha": "346cb63a597e4816f3ba5401150039387861e463", "size": 6845, "ext": "py", "lang": "Python", "max_stars_repo_path": "code.py", "max_stars_repo_name": "Vishal-Bhatia/domain-classification-text", "max_stars_repo_head_hexsha": "de016540c1c5a3eb94a8f9a88145f490dd8f2c10", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code.py", "max_issues_repo_name": "Vishal-Bhatia/domain-classification-text", "max_issues_repo_head_hexsha": "de016540c1c5a3eb94a8f9a88145f490dd8f2c10", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code.py", "max_forks_repo_name": "Vishal-Bhatia/domain-classification-text", "max_forks_repo_head_hexsha": "de016540c1c5a3eb94a8f9a88145f490dd8f2c10", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.0676328502, "max_line_length": 163, "alphanum_fraction": 0.7304601899, "include": true, "reason": "import numpy", "num_tokens": 1644}
|
try:
import pickle as pickle
except ImportError:
import pickle
from bisect import bisect_right
from collections import defaultdict
from copy import deepcopy
from functools import partial
from itertools import chain
from operator import eq
def identity(obj):
"""Returns directly the argument *obj*.
"""
return obj
class History(object):
"""The :class:`History` class helps to build a genealogy of all the
individuals produced in the evolution. It contains two attributes,
the :attr:`genealogy_tree` that is a dictionary of lists indexed by
individual, the list contain the indices of the parents. The second
attribute :attr:`genealogy_history` contains every individual indexed
by their individual number as in the genealogy tree.
The produced genealogy tree is compatible with `NetworkX
<http://networkx.lanl.gov/index.html>`_, here is how to plot the genealogy
tree ::
history = History()
# Decorate the variation operators
toolbox.decorate("mate", history.decorator)
toolbox.decorate("mutate", history.decorator)
# Create the population and populate the history
population = toolbox.population(n=POPSIZE)
history.update(population)
# Do the evolution, the decorators will take care of updating the
# history
# [...]
import matplotlib.pyplot as plt
import networkx
graph = networkx.DiGraph(history.genealogy_tree)
graph = graph.reverse() # Make the grah top-down
colors = [toolbox.evaluate(history.genealogy_history[i])[0] for i in graph]
networkx.draw(graph, node_color=colors)
plt.show()
Using NetworkX in combination with `pygraphviz
<http://networkx.lanl.gov/pygraphviz/>`_ (dot layout) this amazing
genealogy tree can be obtained from the OneMax example with a population
size of 20 and 5 generations, where the color of the nodes indicate there
fitness, blue is low and red is high.
.. image:: /_images/genealogy.png
:width: 67%
.. note::
The genealogy tree might get very big if your population and/or the
number of generation is large.
"""
def __init__(self):
self.genealogy_index = 0
self.genealogy_history = dict()
self.genealogy_tree = dict()
def update(self, individuals):
"""Update the history with the new *individuals*. The index present in
their :attr:`history_index` attribute will be used to locate their
parents, it is then modified to a unique one to keep track of those
new individuals. This method should be called on the individuals after
each variation.
:param individuals: The list of modified individuals that shall be
inserted in the history.
If the *individuals* do not have a :attr:`history_index` attribute,
the attribute is added and this individual is considered as having no
parent. This method should be called with the initial population to
initialize the history.
Modifying the internal :attr:`genealogy_index` of the history or the
:attr:`history_index` of an individual may lead to unpredictable
results and corruption of the history.
"""
try:
parent_indices = tuple(ind.history_index for ind in individuals)
except AttributeError:
parent_indices = tuple()
for ind in individuals:
self.genealogy_index += 1
ind.history_index = self.genealogy_index
self.genealogy_history[self.genealogy_index] = deepcopy(ind)
self.genealogy_tree[self.genealogy_index] = parent_indices
@property
def decorator(self):
"""Property that returns an appropriate decorator to enhance the
operators of the toolbox. The returned decorator assumes that the
individuals are returned by the operator. First the decorator calls
the underlying operation and then calls the :func:`update` function
with what has been returned by the operator. Finally, it returns the
individuals with their history parameters modified according to the
update function.
"""
def decFunc(func):
def wrapFunc(*args, **kargs):
individuals = func(*args, **kargs)
self.update(individuals)
return individuals
return wrapFunc
return decFunc
def getGenealogy(self, individual, max_depth=float("inf")):
"""Provide the genealogy tree of an *individual*. The individual must
have an attribute :attr:`history_index` as defined by
:func:`~deap.tools.History.update` in order to retrieve its associated
genealogy tree. The returned graph contains the parents up to
*max_depth* variations before this individual. If not provided
the maximum depth is up to the begining of the evolution.
:param individual: The individual at the root of the genealogy tree.
:param max_depth: The approximate maximum distance between the root
(individual) and the leaves (parents), optional.
:returns: A dictionary where each key is an individual index and the
values are a tuple corresponding to the index of the parents.
"""
gtree = {}
visited = set() # Adds memory to the breadth first search
def genealogy(index, depth):
if index not in self.genealogy_tree:
return
depth += 1
if depth > max_depth:
return
parent_indices = self.genealogy_tree[index]
gtree[index] = parent_indices
for ind in parent_indices:
if ind not in visited:
genealogy(ind, depth)
visited.add(ind)
genealogy(individual.history_index, 0)
return gtree
class Statistics(object):
"""Object that compiles statistics on a list of arbitrary objects.
When created the statistics object receives a *key* argument that
is used to get the values on which the function will be computed.
If not provided the *key* argument defaults to the identity function.
The value returned by the key may be a multi-dimensional object, i.e.:
a tuple or a list, as long as the statistical function registered
support it. So for example, statistics can be computed directly on
multi-objective fitnesses when using numpy statistical function.
:param key: A function to access the values on which to compute the
statistics, optional.
::
>>> s = Statistics()
>>> s.register("mean", numpy.mean)
>>> s.register("max", max)
>>> s.compile([1, 2, 3, 4])
{'max': 4, 'mean': 2.5}
>>> s.compile([5, 6, 7, 8])
{'max': 8, 'mean': 6.5}
"""
def __init__(self, key=identity):
self.key = key
self.functions = dict()
self.fields = []
def register(self, name, function, *args, **kargs):
"""Register a *function* that will be applied on the sequence each
time :meth:`record` is called.
:param name: The name of the statistics function as it would appear
in the dictionnary of the statistics object.
:param function: A function that will compute the desired statistics
on the data as preprocessed by the key.
:param argument: One or more argument (and keyword argument) to pass
automatically to the registered function when called,
optional.
"""
self.functions[name] = partial(function, *args, **kargs)
self.fields.append(name)
def compile(self, data):
"""Apply to the input sequence *data* each registered function
and return the results as a dictionnary.
:param data: Sequence of objects on which the statistics are computed.
"""
values = tuple(self.key(elem) for elem in data)
entry = dict()
for key, func in self.functions.items():
entry[key] = func(values)
return entry
class MultiStatistics(dict):
"""Dictionary of :class:`Statistics` object allowing to compute
statistics on multiple keys using a single call to :meth:`compile`. It
takes a set of key-value pairs associating a statistics object to a
unique name. This name can then be used to retrieve the statistics object.
The following code computes statistics simultaneously on the length and
the first value of the provided objects.
::
>>> len_stats = Statistics(key=len)
>>> itm0_stats = Statistics(key=itemgetter(0))
>>> mstats = MultiStatistics(length=len_stats, item=itm0_stats)
>>> mstats.register("mean", numpy.mean, axis=0)
>>> mstats.register("max", numpy.max, axis=0)
>>> mstats.compile([[0.0, 1.0, 1.0, 5.0], [2.0, 5.0]])
{'length': {'max': 4, 'mean': 3.0}, 'item': {'max': 2.0, 'mean': 1.0}}
"""
def compile(self, data):
"""Calls :meth:`Statistics.compile` with *data* of each
:class:`Statistics` object.
:param data: Sequence of objects on which the statistics are computed.
"""
record = {}
for name, stats in list(self.items()):
record[name] = stats.compile(data)
return record
@property
def fields(self):
return sorted(self.keys())
def register(self, name, function, *args, **kargs):
"""Register a *function* in each :class:`Statistics` object.
:param name: The name of the statistics function as it would appear
in the dictionnary of the statistics object.
:param function: A function that will compute the desired statistics
on the data as preprocessed by the key.
:param argument: One or more argument (and keyword argument) to pass
automatically to the registered function when called,
optional.
"""
for stats in list(self.values()):
stats.register(name, function, *args, **kargs)
class Logbook(list):
"""Evolution records as a chronological list of dictionaries.
Data can be retrieved via the :meth:`select` method given the appropriate
names.
The :class:`Logbook` class may also contain other logbooks refered to
as chapters. Chapters are used to store information associated to a
specific part of the evolution. For example when computing statistics
on different components of individuals (namely :class:`MultiStatistics`),
chapters can be used to distinguish the average fitness and the average
size.
"""
def __init__(self):
self.buffindex = 0
self.chapters = defaultdict(Logbook)
"""Dictionary containing the sub-sections of the logbook which are also
:class:`Logbook`. Chapters are automatically created when the right hand
side of a keyworded argument, provided to the *record* function, is a
dictionnary. The keyword determines the chapter's name. For example, the
following line adds a new chapter "size" that will contain the fields
"max" and "mean". ::
logbook.record(gen=0, size={'max' : 10.0, 'mean' : 7.5})
To access a specific chapter, use the name of the chapter as a
dictionnary key. For example, to access the size chapter and select
the mean use ::
logbook.chapters["size"].select("mean")
Compiling a :class:`MultiStatistics` object returns a dictionary
containing dictionnaries, therefore when recording such an object in a
logbook using the keyword argument unpacking operator (**), chapters
will be automatically added to the logbook.
::
>>> fit_stats = Statistics(key=attrgetter("fitness.values"))
>>> size_stats = Statistics(key=len)
>>> mstats = MultiStatistics(fitness=fit_stats, size=size_stats)
>>> # [...]
>>> record = mstats.compile(population)
>>> logbook.record(**record)
>>> print(logbook)
fitness length
------------ ------------
max mean max mean
2 1 4 3
"""
self.columns_len = None
self.header = None
"""Order of the columns to print when using the :data:`stream` and
:meth:`__str__` methods. The syntax is a single iterable containing
string elements. For example, with the previously
defined statistics class, one can print the generation and the
fitness average, and maximum with
::
logbook.header = ("gen", "mean", "max")
If not set the header is built with all fields, in arbritrary order
on insertion of the first data. The header can be removed by setting
it to :data:`None`.
"""
self.log_header = True
"""Tells the log book to output or not the header when streaming the
first line or getting its entire string representation. This defaults
:data:`True`.
"""
def record(self, **infos):
"""Enter a record of event in the logbook as a list of key-value pairs.
The informations are appended chronogically to a list as a dictionnary.
When the value part of a pair is a dictionnary, the informations contained
in the dictionnary are recorded in a chapter entitled as the name of the
key part of the pair. Chapters are also Logbook.
"""
for key, value in list(infos.items()):
if isinstance(value, dict):
self.chapters[key].record(**value)
del infos[key]
self.append(infos)
def select(self, *names):
"""Return a list of values associated to the *names* provided
in argument in each dictionary of the Statistics object list.
One list per name is returned in order.
::
>>> log = Logbook()
>>> log.record(gen = 0, mean = 5.4, max = 10.0)
>>> log.record(gen = 1, mean = 9.4, max = 15.0)
>>> log.select("mean")
[5.4, 9.4]
>>> log.select("gen", "max")
([0, 1], [10.0, 15.0])
With a :class:`MultiStatistics` object, the statistics for each
measurement can be retrieved using the :data:`chapters` member :
::
>>> log = Logbook()
>>> log.record(**{'gen' : 0, 'fit' : {'mean' : 0.8, 'max' : 1.5},
... 'size' : {'mean' : 25.4, 'max' : 67}})
>>> log.record(**{'gen' : 1, 'fit' : {'mean' : 0.95, 'max' : 1.7},
... 'size' : {'mean' : 28.1, 'max' : 71}})
>>> log.chapters['size'].select("mean")
[25.4, 28.1]
>>> log.chapters['fit'].select("gen", "max")
([0, 1], [1.5, 1.7])
"""
if len(names) == 1:
return [entry.get(names[0], None) for entry in self]
return tuple([entry.get(name, None) for entry in self] for name in names)
@property
def stream(self):
"""Retrieve the formatted not streamed yet entries of the database
including the headers.
::
>>> log = Logbook()
>>> log.append({'gen' : 0})
>>> print(log.stream)
gen
0
>>> log.append({'gen' : 1})
>>> print(log.stream)
1
"""
startindex, self.buffindex = self.buffindex, len(self)
return self.__str__(startindex)
def __delitem__(self, key):
if isinstance(key, slice):
for i, in range(*key.indices(len(self))):
self.pop(i)
for chapter in list(self.chapters.values()):
chapter.pop(i)
else:
self.pop(key)
for chapter in list(self.chapters.values()):
chapter.pop(key)
def pop(self, index=0):
"""Retrieve and delete element *index*. The header and stream will be
adjusted to follow the modification.
:param item: The index of the element to remove, optional. It defaults
to the first element.
You can also use the following syntax to delete elements.
::
del log[0]
del log[1::5]
"""
if index < self.buffindex:
self.buffindex -= 1
return super(self.__class__, self).pop(index)
def __txt__(self, startindex):
columns = self.header
if not columns:
columns = sorted(self[0].keys()) + sorted(self.chapters.keys())
if not self.columns_len or len(self.columns_len) != len(columns):
self.columns_len = list(map(len, columns))
chapters_txt = {}
offsets = defaultdict(int)
for name, chapter in list(self.chapters.items()):
chapters_txt[name] = chapter.__txt__(startindex)
if startindex == 0:
offsets[name] = len(chapters_txt[name]) - len(self)
str_matrix = []
for i, line in enumerate(self[startindex:]):
str_line = []
for j, name in enumerate(columns):
if name in chapters_txt:
column = chapters_txt[name][i+offsets[name]]
else:
value = line.get(name, "")
string = "{0:n}" if isinstance(value, float) else "{0}"
column = string.format(value)
self.columns_len[j] = max(self.columns_len[j], len(column))
str_line.append(column)
str_matrix.append(str_line)
if startindex == 0 and self.log_header:
header = []
nlines = 1
if len(self.chapters) > 0:
nlines += max(list(map(len, list(chapters_txt.values())))) - len(self) + 1
header = [[] for i in range(nlines)]
for j, name in enumerate(columns):
if name in chapters_txt:
length = max(len(line.expandtabs()) for line in chapters_txt[name])
blanks = nlines - 2 - offsets[name]
for i in range(blanks):
header[i].append(" " * length)
header[blanks].append(name.center(length))
header[blanks+1].append("-" * length)
for i in range(offsets[name]):
header[blanks+2+i].append(chapters_txt[name][i])
else:
length = max(len(line[j].expandtabs()) for line in str_matrix)
for line in header[:-1]:
line.append(" " * length)
header[-1].append(name)
str_matrix = chain(header, str_matrix)
template = "\t".join("{%i:<%i}" % (i, l) for i, l in enumerate(self.columns_len))
text = [template.format(*line) for line in str_matrix]
return text
def __str__(self, startindex=0):
text = self.__txt__(startindex)
return "\n".join(text)
class HallOfFame(object):
"""The hall of fame contains the best individual that ever lived in the
population during the evolution. It is lexicographically sorted at all
time so that the first element of the hall of fame is the individual that
has the best first fitness value ever seen, according to the weights
provided to the fitness at creation time.
The insertion is made so that old individuals have priority on new
individuals. A single copy of each individual is kept at all time, the
equivalence between two individuals is made by the operator passed to the
*similar* argument.
:param maxsize: The maximum number of individual to keep in the hall of
fame.
:param similar: An equivalence operator between two individuals, optional.
It defaults to operator :func:`operator.eq`.
The class :class:`HallOfFame` provides an interface similar to a list
(without being one completely). It is possible to retrieve its length, to
iterate on it forward and backward and to get an item or a slice from it.
"""
def __init__(self, maxsize, similar=eq):
self.maxsize = maxsize
self.keys = list()
self.items = list()
self.similar = similar
def update(self, population):
"""Update the hall of fame with the *population* by replacing the
worst individuals in it by the best individuals present in
*population* (if they are better). The size of the hall of fame is
kept constant.
:param population: A list of individual with a fitness attribute to
update the hall of fame with.
"""
if len(self) == 0 and self.maxsize !=0:
# Working on an empty hall of fame is problematic for the
# "for else"
self.insert(population[0])
for ind in population:
if ind.fitness > self[-1].fitness or len(self) < self.maxsize:
for hofer in self:
# Loop through the hall of fame to check for any
# similar individual
if self.similar(ind, hofer):
break
else:
# The individual is unique and strictly better than
# the worst
if len(self) >= self.maxsize:
self.remove(-1)
self.insert(ind)
def insert(self, item):
"""Insert a new individual in the hall of fame using the
:func:`~bisect.bisect_right` function. The inserted individual is
inserted on the right side of an equal individual. Inserting a new
individual in the hall of fame also preserve the hall of fame's order.
This method **does not** check for the size of the hall of fame, in a
way that inserting a new individual in a full hall of fame will not
remove the worst individual to maintain a constant size.
:param item: The individual with a fitness attribute to insert in the
hall of fame.
"""
item = deepcopy(item)
i = bisect_right(self.keys, item.fitness)
self.items.insert(len(self) - i, item)
self.keys.insert(i, item.fitness)
def remove(self, index):
"""Remove the specified *index* from the hall of fame.
:param index: An integer giving which item to remove.
"""
del self.keys[len(self) - (index % len(self) + 1)]
del self.items[index]
def clear(self):
"""Clear the hall of fame."""
del self.items[:]
del self.keys[:]
def __len__(self):
return len(self.items)
def __getitem__(self, i):
return self.items[i]
def __iter__(self):
return iter(self.items)
def __reversed__(self):
return reversed(self.items)
def __str__(self):
return str(self.items)
class ParetoFront(HallOfFame):
"""The Pareto front hall of fame contains all the non-dominated individuals
that ever lived in the population. That means that the Pareto front hall of
fame can contain an infinity of different individuals.
:param similar: A function that tels the Pareto front whether or not two
individuals are similar, optional.
The size of the front may become very large if it is used for example on
a continuous function with a continuous domain. In order to limit the number
of individuals, it is possible to specify a similarity function that will
return :data:`True` if the genotype of two individuals are similar. In that
case only one of the two individuals will be added to the hall of fame. By
default the similarity function is :func:`operator.eq`.
Since, the Pareto front hall of fame inherits from the :class:`HallOfFame`,
it is sorted lexicographically at every moment.
"""
def __init__(self, similar=eq):
HallOfFame.__init__(self, None, similar)
def update(self, population):
"""Update the Pareto front hall of fame with the *population* by adding
the individuals from the population that are not dominated by the hall
of fame. If any individual in the hall of fame is dominated it is
removed.
:param population: A list of individual with a fitness attribute to
update the hall of fame with.
"""
for ind in population:
is_dominated = False
dominates_one = False
has_twin = False
to_remove = []
for i, hofer in enumerate(self): # hofer = hall of famer
if not dominates_one and hofer.fitness.dominates(ind.fitness):
is_dominated = True
break
elif ind.fitness.dominates(hofer.fitness):
dominates_one = True
to_remove.append(i)
elif ind.fitness == hofer.fitness and self.similar(ind, hofer):
has_twin = True
break
for i in reversed(to_remove): # Remove the dominated hofer
self.remove(i)
if not is_dominated and not has_twin:
self.insert(ind)
__all__ = ['HallOfFame', 'ParetoFront', 'History', 'Statistics', 'MultiStatistics', 'Logbook']
if __name__ == "__main__":
import doctest
from operator import itemgetter
import numpy
doctest.run_docstring_examples(Statistics, globals())
doctest.run_docstring_examples(Statistics.register, globals())
doctest.run_docstring_examples(Statistics.compile, globals())
doctest.run_docstring_examples(MultiStatistics, globals())
doctest.run_docstring_examples(MultiStatistics.register, globals())
doctest.run_docstring_examples(MultiStatistics.compile, globals())
|
{"hexsha": "53a7bce0061dd6c27ef0f311089bf8e07727da23", "size": 26585, "ext": "py", "lang": "Python", "max_stars_repo_path": "env/Lib/site-packages/deap/tools/support.py", "max_stars_repo_name": "richooms/healthcare_automl", "max_stars_repo_head_hexsha": "73fc27ee8f57c717dc82a7841680ba64d6b4c34b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-11-29T13:47:15.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-08T06:25:47.000Z", "max_issues_repo_path": "env/Lib/site-packages/deap/tools/support.py", "max_issues_repo_name": "richooms/healthcare_automl", "max_issues_repo_head_hexsha": "73fc27ee8f57c717dc82a7841680ba64d6b4c34b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-03-24T17:12:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T21:09:52.000Z", "max_forks_repo_path": "env/Lib/site-packages/deap/tools/support.py", "max_forks_repo_name": "richooms/healthcare_automl", "max_forks_repo_head_hexsha": "73fc27ee8f57c717dc82a7841680ba64d6b4c34b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-04-23T09:01:13.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-23T09:01:13.000Z", "avg_line_length": 40.8371735791, "max_line_length": 94, "alphanum_fraction": 0.5989091593, "include": true, "reason": "import numpy,import networkx", "num_tokens": 5774}
|
"""Variational Auto Encoders as intrinsic rewards
"""
from abc import ABC, abstractmethod
import copy
import enum
from typing import Callable, List, NamedTuple, Optional, Sequence, Tuple
import numpy as np
import torch
from torch.distributions import Normal
from torch.nn import functional as F
from torch import Tensor, nn
from rainy import Config, net
from rainy.utils import Device
from rainy.utils.rms import RunningMeanStdTorch
from .prelude import Normalizer, PreProcessor
from .unsupervised import (
UnsupervisedBlock,
UnsupervisedIRewGen,
normalize_r_default,
preprocess_default,
)
class DecoderKind(enum.Enum):
BERNOULLI = 1
GAUSSIAN = 2
CATEGORICAL = 3
def wrap(self, net: nn.Module, init: Optional[net.Initializer] = None) -> nn.Module:
if self == DecoderKind.BERNOULLI:
return BernoulliHead(net, init)
elif self == DecoderKind.GAUSSIAN:
return GaussianHead(net, init)
elif self == DecoderKind.CATEGORICAL:
return CategoricalHead(net, init)
else:
raise NotImplementedError()
DECORDERS = {
"bernoulli": DecoderKind.BERNOULLI,
"gaussian": DecoderKind.GAUSSIAN,
"categorical": DecoderKind.CATEGORICAL,
}
class DecoderDist(ABC):
@abstractmethod
def loss(self, x: Tensor) -> Tensor:
pass
@abstractmethod
def sample(self) -> Tensor:
pass
class BernoulliDist(DecoderDist):
def __init__(self, logits: Tensor) -> None:
self.logits = logits
def loss(self, x: Tensor) -> Tensor:
return F.binary_cross_entropy_with_logits(self.logits, x, reduction="none")
def sample(self) -> Tensor:
return torch.distributions.Categorical(logits=self.logits).sample()
class CategoricalDist(DecoderDist):
def __init__(self, logits: Tensor) -> None:
self.logits = logits
def loss(self, x: Tensor) -> Tensor:
t = x.argmax(dim=1)
return F.cross_entropy(self.logits, t, reduction="none")
def sample(self) -> Tensor:
if self.logits.dim() > 1:
shape = self.logits.shape
logits = self.logits.view(shape[0], shape[1], -1).transpose(1, 2)
sample = torch.distributions.Categorical(logits=logits).sample()
return sample.view(shape[0], *shape[2:])
else:
return torch.distributions.Categorical(logits=logits).sample()
class GaussianDist(DecoderDist):
"""
Thankfully referenced chainer implementation for nll loss:
https://github.com/chainer/chainer/blob/v7.1.0/chainer/functions/loss/vae.py#L123
"""
def __init__(self, mu: Tensor, logvar: Tensor) -> None:
self.mu = mu
self.logvar = logvar
def loss(self, x: Tensor) -> Tensor:
x_prec = torch.exp(-self.logvar)
x_diff = x - self.mu
x_power = x_diff.pow(2).mul_(x_prec).mul_(-0.5)
return 0.5 * (self.logvar + np.log(2.0 * np.pi)) - x_power
def sample(self) -> Tensor:
return Normal(self.mu, torch.exp(0.5 * self.logvar)).sample()
class BernoulliHead(nn.Module):
def __init__(self, net: nn.Module, init: net.Initializer) -> None:
super().__init__()
self.net = init(net)
def forward(self, x: Tensor) -> BernoulliDist:
return BernoulliDist(logits=self.net(x))
class CategoricalHead(BernoulliHead):
def forward(self, x: Tensor) -> CategoricalDist:
return CategoricalDist(logits=self.net(x))
class GaussianHead(nn.Module):
def __init__(self, net: nn.Module, init: net.Initializer) -> None:
super().__init__()
self.mu = init(net)
self.logvar = init(copy.deepcopy(net))
def forward(self, x: Tensor) -> GaussianDist:
mu = self.mu(x)
logvar = self.logvar(x)
return GaussianDist(mu, logvar)
class VaeOutPut(NamedTuple):
decoder: DecoderDist
mu: Tensor
logvar: Tensor
class Vae(nn.Module, ABC):
input_dim: Sequence[int]
encoder: nn.Module
decoder: nn.Module
mu: nn.Module
logvar: nn.Module
def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
def forward(self, x: Tensor) -> VaeOutPut:
x = self.encoder(x)
mu, logvar = self.mu(x), self.logvar(x)
z = self.reparameterize(mu, logvar)
return VaeOutPut(self.decoder(z), mu, logvar)
class FcVae(Vae):
def __init__(
self,
input_dim: tuple,
fc_dims: Sequence[int] = [64, 64],
z_dim: int = 32,
device: Device = Device(),
decoder_kind: DecoderKind = DecoderKind.GAUSSIAN,
init: net.Initializer = net.Initializer(),
) -> None:
super().__init__()
self.input_dim = input_dim
x_dim = np.prod(input_dim)
dims = [x_dim] + list(fc_dims)
encoders = []
for i in range(len(fc_dims)):
encoders.append(init(nn.Linear(dims[i], dims[i + 1])))
encoders.append(nn.ReLU(inplace=True))
decoders = [init(nn.Linear(z_dim, dims[-1])), nn.ReLU(inplace=True)]
for i in reversed(range(1, len(fc_dims))):
decoders.append(init(nn.Linear(dims[i + 1], dims[i])))
decoders.append(decoder_kind.wrap(nn.Linear(dims[1], x_dim), init))
self.encoder = nn.Sequential(*encoders)
self.decoder = nn.Sequential(*decoders)
self.mu = nn.Linear(dims[-1], z_dim)
self.logvar = nn.Linear(dims[-1], z_dim)
class Flatten(nn.Module):
def forward(self, x: Tensor) -> Tensor:
return x.view(x.size(0), -1)
class Unflatten(nn.Module):
def __init__(self, shape: tuple) -> Tensor:
super().__init__()
self.shape = shape
def forward(self, x: Tensor) -> Tensor:
return x.view(x.size(0), *self.shape)
class CNNVae(Vae):
def __init__(
self,
input_dim: tuple,
fc_dim: int = 256,
z_dim: int = 64,
conv_channels: List[int] = [32, 64, 64],
encoder_args: List[tuple] = [(8, 4), (4, 2), (3, 1)],
decoder_args: List[tuple] = [(3, 1), (4, 2), (8, 4)],
device: Device = Device(),
decoder_kind: DecoderKind = DecoderKind.GAUSSIAN,
cnn_init: net.Initializer = net.Initializer(
weight_init=net.init.orthogonal(nonlinearity="relu"),
),
init: net.Initializer = net.Initializer(),
) -> None:
super().__init__()
assert len(input_dim) == 3, "CNNVae assumes that len(input_dim) == 3"
self.input_dim = input_dim
in_channel, height, width = input_dim
channels = [in_channel] + conv_channels
conved_h, conved_w = net.calc_cnn_hidden(encoder_args, height, width)
hidden = conved_h * conved_w * channels[-1]
def _make_encoder() -> nn.Sequential:
encoders = []
for i in range(len(conv_channels)):
encoders.append(
cnn_init(nn.Conv2d(channels[i], channels[i + 1], *encoder_args[i]))
)
encoders.append(nn.ReLU(inplace=True))
return nn.Sequential(
*encoders,
Flatten(),
init(nn.Linear(hidden, fc_dim)),
nn.ReLU(inplace=True),
)
self.encoder = _make_encoder()
self.mu = nn.Linear(fc_dim, z_dim)
self.logvar = nn.Linear(fc_dim, z_dim)
channels.reverse()
def _make_decoder() -> nn.Sequential:
decoders = [
init(nn.Linear(z_dim, fc_dim)),
nn.ReLU(inplace=True),
init(nn.Linear(fc_dim, hidden)),
Unflatten((channels[0], conved_w, conved_h)),
nn.ReLU(inplace=True),
]
for i in range(len(conv_channels) - 1):
params = channels[i], channels[i + 1], *decoder_args[i]
decoders.append(cnn_init(nn.ConvTranspose2d(*params)))
decoders.append(nn.ReLU(inplace=True))
decoders.append(
decoder_kind.wrap(
nn.ConvTranspose2d(channels[-2], channels[-1], *decoder_args[-1]),
cnn_init,
)
)
return nn.Sequential(*decoders)
self.decoder = _make_decoder()
class VaeUnsupervisedBlock(UnsupervisedBlock):
def __init__(self, vae: Vae) -> None:
super().__init__()
self.vae = vae
def rewards(self, states: Tensor) -> Tuple[Tensor, Optional[Tensor]]:
batch_size = states.size(0)
decoder, *_ = self.vae(states)
recons_loss = decoder.loss(states).div_(batch_size)
return recons_loss, None
def loss(self, states: Tensor, target: Optional[Tensor]) -> Tensor:
decoder, mu, logvar = self.vae(states)
recons_loss = decoder.loss(states).sum(dim=1)
kl_loss = -0.5 * (1.0 + logvar - mu.pow(2.0) - logvar.exp()).sum(dim=1)
return recons_loss + kl_loss
@property
def input_dim(self) -> Sequence[int]:
return self.vae.input_dim
def normalize_vae(t: Tensor, rms: RunningMeanStdTorch) -> Tensor:
t = t.reshape(-1, 1, *t.shape[-2:])
t.sub_(rms.mean.float()).div_(rms.std().float())
return t.clamp_(-5.0, 5.0).add_(5.0).div(10.0)
def irew_gen_cnn_vae(
preprocess: PreProcessor = preprocess_default,
state_normalizer: Normalizer = normalize_vae,
reward_normalizer: Normalizer = normalize_r_default,
**kwargs,
) -> Callable[[Config, Device], UnsupervisedIRewGen]:
def _make_irew_gen(cfg: Config, device: Device) -> UnsupervisedIRewGen:
input_dim = 1, *cfg.state_dim[1:]
vae = CNNVae(input_dim, **kwargs)
return UnsupervisedIRewGen(
VaeUnsupervisedBlock(vae),
cfg.int_discount_factor,
cfg.nworkers,
device,
preprocess=preprocess,
state_normalizer=state_normalizer,
reward_normalizer=reward_normalizer,
)
return _make_irew_gen
def irew_gen_fc_vae(
preprocess: PreProcessor = lambda x, _: x,
state_normalizer: Normalizer = lambda x, _: x,
reward_normalizer: Normalizer = normalize_r_default,
**kwargs,
) -> Callable[[Config, Device], UnsupervisedIRewGen]:
def _make_irew_gen(cfg: Config, device: Device) -> UnsupervisedIRewGen:
vae = FcVae(cfg.state_dim, **kwargs)
return UnsupervisedIRewGen(
VaeUnsupervisedBlock(vae),
cfg.int_discount_factor,
cfg.nworkers,
device,
preprocess=preprocess,
state_normalizer=state_normalizer,
reward_normalizer=reward_normalizer,
ob_rms_shape=cfg.state_dim,
)
return _make_irew_gen
|
{"hexsha": "7a15857efc7b2201f8a9c3d2d5b69627f7ad04cc", "size": 10805, "ext": "py", "lang": "Python", "max_stars_repo_path": "int_rew/vae.py", "max_stars_repo_name": "kngwyu/intrinsic-rewards", "max_stars_repo_head_hexsha": "c2a8f98c0fd9292dc90f8857fa5ddb763ba8b994", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2019-09-22T12:13:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T11:52:13.000Z", "max_issues_repo_path": "int_rew/vae.py", "max_issues_repo_name": "kngwyu/intrinsic-rewards", "max_issues_repo_head_hexsha": "c2a8f98c0fd9292dc90f8857fa5ddb763ba8b994", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-07-29T08:57:28.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-29T10:35:35.000Z", "max_forks_repo_path": "int_rew/vae.py", "max_forks_repo_name": "kngwyu/intrinsic-rewards", "max_forks_repo_head_hexsha": "c2a8f98c0fd9292dc90f8857fa5ddb763ba8b994", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-03-24T01:38:27.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-24T01:38:27.000Z", "avg_line_length": 31.6862170088, "max_line_length": 88, "alphanum_fraction": 0.6107357705, "include": true, "reason": "import numpy", "num_tokens": 2732}
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.experimental.map_and_batch()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import itertools
import time
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
_NUMPY_RANDOM_SEED = 42
class MapAndBatchBenchmark(test.Benchmark):
"""Benchmarks for `tf.data.experimental.map_and_batch()`."""
def benchmarkMapAndBatchDense(self):
"""Measures the performance of parallelized batching."""
shapes = [(), (10,), (10, 10), (10, 10, 10), (224, 224, 3)]
batch_size_values = [1, 32, 64, 128, 1024]
shape_placeholder = array_ops.placeholder(dtypes.int64, shape=[None])
batch_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
dataset = dataset_ops.Dataset.range(1000000000)
dense_value = random_ops.random_normal(shape=shape_placeholder)
dataset = dataset.apply(batching.map_and_batch(
lambda _: dense_value, batch_size_placeholder))
iterator = dataset_ops.make_initializable_iterator(dataset)
next_element = iterator.get_next()
for shape in shapes:
for batch_size in batch_size_values:
with session.Session() as sess:
sess.run(iterator.initializer, feed_dict={
shape_placeholder: shape, batch_size_placeholder: batch_size})
# Use a C++ callable to minimize the Python overhead in the benchmark.
callable_opts = config_pb2.CallableOptions()
callable_opts.target.append(next_element.op.name)
op_callable = sess._make_callable_from_options(callable_opts) # pylint: disable=protected-access
# Run five steps to warm up the session caches before taking the
# first measurement.
for _ in range(5):
op_callable()
deltas = []
overall_start = time.time()
# Run at least five repetitions and for at least five seconds.
while len(deltas) < 5 or time.time() - overall_start < 5.0:
start = time.time()
for _ in range(100):
op_callable()
end = time.time()
deltas.append(end - start)
del op_callable
median_wall_time = np.median(deltas) / 100.0
iters = len(deltas) * 100
print("Map and batch dense dataset shape: %r batch_size: %d "
"wall time: %f (%d iters)"
% (shape, batch_size, median_wall_time, iters))
self.report_benchmark(
iters=iters, wall_time=median_wall_time,
name="benchmark_batch_dense_dataset_nnz_%d_batch_size_%d" % (
np.prod(shape), batch_size))
def benchmarkMapAndBatchChainingVersusFusing(self):
"""Compares the performance of chaining and fusing map and batch.
NOTE: It is recommended to build the benchmark with
`-c opt --copt=-mavx --copt=-mavx2 --copt=-mfma --copt=-gmlt`
and execute it on a machine with at least 32 CPU cores.
"""
# Sequential pipeline configurations.
seq_elem_size_series = itertools.product([1], [1], [1, 2, 4, 8], [16])
seq_batch_size_series = itertools.product([1], [1], [1], [8, 16, 32, 64])
# Parallel pipeline configuration.
par_elem_size_series = itertools.product([32], [32], [1, 2, 4, 8], [256])
par_batch_size_series = itertools.product([32], [32], [1],
[128, 256, 512, 1024])
par_num_calls_series = itertools.product([8, 16, 32, 64], [32], [1], [512])
par_inter_op_series = itertools.product([32], [8, 16, 32, 64], [1], [512])
def name(method, label, num_calls, inter_op, element_size, batch_size):
return ("%s_id_%s_num_calls_%d_inter_op_%d_elem_size_%d_batch_size_%d" % (
method,
hashlib.sha1(label).hexdigest()[:8],
num_calls,
inter_op,
element_size,
batch_size,
))
def benchmark(label, series):
"""Runs benchmark the given series."""
print("%s:" % label)
def make_base_dataset(element_size):
k = 1024 * 1024
x = constant_op.constant(np.random.rand(element_size, 4 * k))
y = constant_op.constant(np.random.rand(4 * k, 1))
return dataset_ops.Dataset.range(1000000000000).map(lambda _: (x, y))
for num_calls, inter_op, element_size, batch_size in series:
num_iters = 1024 // (
(element_size * batch_size) // min(num_calls, inter_op))
dataset = make_base_dataset(element_size)
chained_dataset = dataset.map(
math_ops.matmul,
num_parallel_calls=num_calls).batch(batch_size=batch_size)
chained_iterator = dataset_ops.make_one_shot_iterator(chained_dataset)
chained_get_next = chained_iterator.get_next()
chained_deltas = []
with session.Session(
config=config_pb2.ConfigProto(
inter_op_parallelism_threads=inter_op,
use_per_session_threads=True)) as sess:
for _ in range(5):
sess.run(chained_get_next.op)
for _ in range(num_iters):
start = time.time()
sess.run(chained_get_next.op)
end = time.time()
chained_deltas.append(end - start)
fused_dataset = dataset.apply(
batching.map_and_batch(
math_ops.matmul,
num_parallel_calls=num_calls,
batch_size=batch_size))
fused_iterator = dataset_ops.make_one_shot_iterator(fused_dataset)
fused_get_next = fused_iterator.get_next()
fused_deltas = []
with session.Session(
config=config_pb2.ConfigProto(
inter_op_parallelism_threads=inter_op,
use_per_session_threads=True)) as sess:
for _ in range(5):
sess.run(fused_get_next.op)
for _ in range(num_iters):
start = time.time()
sess.run(fused_get_next.op)
end = time.time()
fused_deltas.append(end - start)
print(
"batch size: %d, num parallel calls: %d, inter-op parallelism: %d, "
"element size: %d, num iters: %d\nchained wall time: %f (median), "
"%f (mean), %f (stddev), %f (min), %f (max)\n fused wall time: "
"%f (median), %f (mean), %f (stddev), %f (min), %f (max)\n "
"chained/fused: %.2fx (median), %.2fx (mean)" %
(batch_size, num_calls, inter_op, element_size, num_iters,
np.median(chained_deltas), np.mean(chained_deltas),
np.std(chained_deltas), np.min(chained_deltas),
np.max(chained_deltas), np.median(fused_deltas),
np.mean(fused_deltas), np.std(fused_deltas), np.min(fused_deltas),
np.max(fused_deltas),
np.median(chained_deltas) / np.median(fused_deltas),
np.mean(chained_deltas) / np.mean(fused_deltas)))
self.report_benchmark(
iters=num_iters,
wall_time=np.median(chained_deltas),
name=name("chained", label, num_calls, inter_op, element_size,
batch_size))
self.report_benchmark(
iters=num_iters,
wall_time=np.median(fused_deltas),
name=name("fused", label, num_calls, inter_op, element_size,
batch_size))
print()
np.random.seed(_NUMPY_RANDOM_SEED)
benchmark("Sequential element size evaluation", seq_elem_size_series)
benchmark("Sequential batch size evaluation", seq_batch_size_series)
benchmark("Parallel element size evaluation", par_elem_size_series)
benchmark("Parallel batch size evaluation", par_batch_size_series)
benchmark("Transformation parallelism evaluation", par_num_calls_series)
benchmark("Threadpool size evaluation", par_inter_op_series)
if __name__ == "__main__":
test.main()
|
{"hexsha": "fbd06a5a78eab5a8c30df80f7130461b68f9643c", "size": 9036, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow/python/data/experimental/benchmarks/map_and_batch_benchmark.py", "max_stars_repo_name": "aeverall/tensorflow", "max_stars_repo_head_hexsha": "7992bf97711919f56f80bff9e5510cead4ab2095", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-12-12T23:33:05.000Z", "max_stars_repo_stars_event_max_datetime": "2019-02-26T07:20:22.000Z", "max_issues_repo_path": "tensorflow/python/data/experimental/benchmarks/map_and_batch_benchmark.py", "max_issues_repo_name": "aeverall/tensorflow", "max_issues_repo_head_hexsha": "7992bf97711919f56f80bff9e5510cead4ab2095", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tensorflow/python/data/experimental/benchmarks/map_and_batch_benchmark.py", "max_forks_repo_name": "aeverall/tensorflow", "max_forks_repo_head_hexsha": "7992bf97711919f56f80bff9e5510cead4ab2095", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.3392857143, "max_line_length": 107, "alphanum_fraction": 0.6454183267, "include": true, "reason": "import numpy", "num_tokens": 2058}
|
C#######################################################################
C NAME OF ROUTINE
C GENTHIS
C
C PURPOSE
C THIS IS THE STANDARD MAIN PROGRAM USED FOR TAE/VICAR PROGRAMS.
C THIS MODULE CALLS SUBROUTINE MAIN44 TO ENTER INTO THE BODY OF THE
C PROGRAM.
C GENTHIS generates small exactly-defined test files.
C PREPARED FOR USE ON MIPL SYSTEM BY
C STEVE POHORSKY INFORMATICS GENERAL CORPORATION APRIL 1986
C FOR
C MIPL SOFTWARE DEVELOPMENT
C
C DERIVED FROM CODE FOR VICAR PROGRAM GEN
C
C
C REVISION HISTORY
C Converted to UNIX/VICAR May 2, 1991 --- Ron Alley
C SUBROUTINES CALLED
C MAIN44
CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
INCLUDE 'VICMAIN_FOR'
SUBROUTINE MAIN44
C GENTHIS A NL NS PARAMS
C GENTHIS DOES NOT DISTINGUISH BETWEEN INTEGER AND REAL
C NUMBER DN VALUES. ALL VALUES ARE NOW PROCESSED AS REAL NUMBERS.
CALL XVMESSAGE(' GENTHIS Version 1.1',' ')
CALL GENTHISLAB
CALL GENTHIS1
RETURN
END
C*****************************************************************************
SUBROUTINE GENTHISLAB
IMPLICIT NONE
COMMON /C1XX/ NL,NS,PIXSIZ,NCODE,BUF
INTEGER*4 NL,NS,PIXSIZ,NCODE
LOGICAL*1 BUF(80000)
COMMON /FORMATS/ DATA
COMMON /UNIT/ OUTUNIT
C
INTEGER DEF,CNT,STATUS,OUTUNIT, I
REAL PAR(1000)
INTEGER*4 KAR(1000)
EQUIVALENCE ( PAR,KAR )
CHARACTER*5 FORMAT, DATA
C Label processor
CALL XVUNIT(OUTUNIT,'OUT',1,STATUS,' ')
C--- Determine data type and pixel size and add to label
CALL XVP('FORMAT',DATA,CNT)
IF (DATA .EQ. 'HALF ') THEN
FORMAT(1:4) = 'HALF'
NCODE = -6
PIXSIZ = 2
ELSE IF (DATA .EQ. 'FULL ') THEN
FORMAT(1:4) = 'FULL'
NCODE = 4
PIXSIZ = 4
ELSE IF (DATA .EQ. 'REAL4' .OR. DATA.EQ.'REAL') THEN
FORMAT(1:4) = 'REAL'
NCODE = 4
PIXSIZ = 4
ELSE IF (DATA .EQ. 'REAL8') THEN
FORMAT(1:4) = 'DOUB'
NCODE = 9
PIXSIZ = 8
ELSE
FORMAT(1:4) = 'BYTE'
NCODE = -5
PIXSIZ = 1
END IF
C--- Open output file with specified values
CALL XVPARM('NL',NL,CNT,DEF,0)
CALL XVPARM('NS',NS,CNT,DEF,0)
CALL XVOPEN(OUTUNIT,STATUS,'U_FORMAT',FORMAT,'O_FORMAT',
+ FORMAT,'OP','WRITE','U_NL',NL,'U_NS',NS,
+ 'IO_ACT','SA','OPEN_ACT','SA',' ')
C--- Get DN VALUES.
CALL XVP('DN', PAR, CNT)
IF (CNT .NE. NL*NS) THEN
CALL XVMESSAGE(
+ ' **PARAMETER ERR...DN COUNT DOES NOT MATCH SIZE',' ')
CALL XVMESSAGE(' **GENTHIS TASK CANCELLED',' ')
CALL ABEND
END IF
C--- CONVERT DN VALUES TO SPECIFIED FORMAT
IF (DATA(:1).EQ.'B' .OR. DATA(:1).EQ.'H' .OR.
+ DATA(:1).EQ.'F') THEN
DO I = 1, CNT
KAR(I) = PAR(I) ! CONVERT TO INTEGER.
END DO
END IF
CALL MVE (NCODE, CNT, PAR, BUF, 1, 1 )
RETURN
END
C************************************************************************
SUBROUTINE GENTHIS1
IMPLICIT NONE
COMMON /C1XX/ NL,NS,PIXSIZ,NCODE,BUF
INTEGER*4 NL,NS,PIXSIZ,NCODE
LOGICAL*1 BUF(80000)
COMMON /FORMATS/ DATA
CHARACTER*5 DATA
COMMON /UNIT/ OUTUNIT
INTEGER OUTUNIT,STATUS
INTEGER I, N, NBYTES
NBYTES = NS* PIXSIZ
N = 1
DO I = 1, NL
CALL XVWRIT(OUTUNIT,BUF(N),STATUS,' ')
N = N + NBYTES
END DO
C--- Close output file
CALL XVCLOSE(OUTUNIT,STATUS,' ')
CALL XVMESSAGE(' GENTHIS TASK COMPLETED',' ')
RETURN
END
|
{"hexsha": "932aaf2edd8e0c5fb646b53e5ef54395c46b997a", "size": 3696, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "vos/p3/prog/genthis/genthis.f", "max_stars_repo_name": "NASA-AMMOS/VICAR", "max_stars_repo_head_hexsha": "4504c1f558855d9c6eaef89f4460217aa4909f8e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2020-10-21T05:56:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T10:02:01.000Z", "max_issues_repo_path": "vos/p3/prog/genthis/genthis.f", "max_issues_repo_name": "NASA-AMMOS/VICAR", "max_issues_repo_head_hexsha": "4504c1f558855d9c6eaef89f4460217aa4909f8e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vos/p3/prog/genthis/genthis.f", "max_forks_repo_name": "NASA-AMMOS/VICAR", "max_forks_repo_head_hexsha": "4504c1f558855d9c6eaef89f4460217aa4909f8e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-03-09T01:51:08.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-23T00:23:24.000Z", "avg_line_length": 26.2127659574, "max_line_length": 78, "alphanum_fraction": 0.5478896104, "num_tokens": 1148}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import flashalgorithm as fc
import numpy as np
import pickle
import pdb
comp_list = ('water', 'methane', 'ethane', 'propane')
phase_list = ('aqueous', 'vapor', 'lhc', 's1', 's2')
P = 80 # bar
T = 273.15 + 12 # Kelvin
flash_full = fc.FlashController(components=comp_list,
phases=phase_list)
water_fracs = [0.4, 0.6, 0.92, 0.96]
hc_fracs = np.linspace(0, 1, 30)
c1_frac, c2_frac, c3_frac = np.meshgrid(hc_fracs, hc_fracs, hc_fracs)
z_all = list()
for water in water_fracs:
for hcs in zip(c1_frac.flatten(), c2_frac.flatten(), c3_frac.flatten()):
if sum(hcs) > 0.0:
mod_hcs = [x / sum(hcs) * (1.0 - water) for x in hcs]
z = np.asarray([water] + mod_hcs)
z_all.append(z / np.sum(z))
z_use = np.unique(np.asarray(z_all), axis=0)
z_use = z_use[:, [0, 3, 1, 2]]
z_use = z_use[::-1, :]
c1_frac_mod = z_use[:, 1] / (1 - z_use[:, 0])
z_use = z_use[c1_frac_mod > 0.5, :]
def emptycomp_hash(z):
hashed = sum([2**ii for ii, x in enumerate(z) if x == 0.0])
return hashed
np.take(z_use,np.random.permutation(z_use.shape[0]),axis=0,out=z_use)
flash_dict = {0: flash_full}
all_output = list()
out_file = 'c1toc3_flashtable_70bar6C_pt8.pkl'
K_use = []
for ii, z in enumerate(z_use):
comp_hash = emptycomp_hash(z)
new_comps, new_z = zip(*[
(comp, z_) for comp, z_ in zip(comp_list, z)
if z_ != 0.0
])
if comp_hash not in flash_dict.keys():
flash_dict.update({comp_hash:
fc.FlashController(
components=new_comps,
phases=phase_list)})
flash_use = flash_dict[comp_hash]
new_z = np.asarray(new_z)
try:
output = flash_use.main_handler(
compobjs=flash_use.compobjs,
z=new_z,
T=T,
P=P,
initialize=False)
if output[-1] > 1e-6:
output = flash_use.main_handler(
compobjs=flash_use.compobjs,
z=new_z,
T=T,
P=P,
initialize=True)
if output[-1] > 1e-6:
output = flash_use.main_handler(
compobjs=flash_use.compobjs,
z=new_z,
T=T,
P=P,
initialize=True,
incipient_calc=True)
except:
try:
output = flash_use.main_handler(
compobjs=flash_use.compobjs,
z=new_z,
T=T,
P=P,
initialize=True,
incipient_calc=False)
if output[-1] > 1e-6:
output = flash_use.main_handler(
compobjs=flash_use.compobjs,
z=new_z,
T=T,
P=P,
initialize=True,
incipient_calc=True)
except:
output = []
all_output.append([ii, z, new_comps, new_z, output])
if np.mod(ii, 10) == 0:
#pdb.set_trace()
print('{0:3.3f} % complete!'.format(float(ii) * 100 / len(z_use)))
with open(out_file, 'wb') as f:
pickle.dump(all_output, f)
with open(out_file, 'wb') as f:
pickle.dump(all_output, f)
|
{"hexsha": "40d21805678c1bbc73f1c812751e70c826875fca", "size": 3418, "ext": "py", "lang": "Python", "max_stars_repo_path": "run_test.py", "max_stars_repo_name": "kdarnell/injection-sim-python", "max_stars_repo_head_hexsha": "fa018de562989a207590c2628443b878bd0ed753", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "run_test.py", "max_issues_repo_name": "kdarnell/injection-sim-python", "max_issues_repo_head_hexsha": "fa018de562989a207590c2628443b878bd0ed753", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "run_test.py", "max_forks_repo_name": "kdarnell/injection-sim-python", "max_forks_repo_head_hexsha": "fa018de562989a207590c2628443b878bd0ed753", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7927927928, "max_line_length": 76, "alphanum_fraction": 0.5067290813, "include": true, "reason": "import numpy", "num_tokens": 903}
|
from abc import ABC, abstractmethod
import torch
from torch_ac.format import default_preprocess_obss
from torch_ac.utils import DictList, ParallelEnv
import numpy as np
from copy import deepcopy
class MultiQAlgo(ABC):
"""The base class for RL algorithms."""
def __init__(self, envs, model, device=None, num_frames_per_proc=None, discount=0.99, lr=0.001,
recurrence=4,
adam_eps=1e-8,
buffer_size=10000,
preprocess_obss=None, reshape_reward=None):
"""
Initializes a `BaseAlgo` instance.
Parameters:
----------
envs : list
a list of environments that will be run in parallel
acmodel : torch.Module
the model
num_frames_per_proc : int
the number of frames collected by every process for an update
discount : float
the discount for future rewards
lr : float
the learning rate for optimizers
gae_lambda : float
the lambda coefficient in the GAE formula
([Schulman et al., 2015](https://arxiv.org/abs/1506.02438))
entropy_coef : float
the weight of the entropy cost in the final objective
value_loss_coef : float
the weight of the value loss in the final objective
max_grad_norm : float
gradient will be clipped to be at most this value
recurrence : int
the number of steps the gradient is propagated back in time
preprocess_obss : function
a function that takes observations returned by the environment
and converts them into the format that the model can handle
reshape_reward : function
a function that shapes the reward, takes an
(observation, action, reward, done) tuple as an input
"""
# Store parameters
num_frames_per_proc = num_frames_per_proc or 128 # is 128 correct here?
self.env = ParallelEnv(envs)
self.model = model
self.eval_model = deepcopy(model)
self.device = device
self.num_frames_per_proc = num_frames_per_proc
self.discount = discount
self.lr = lr
self.recurrence = recurrence
self.preprocess_obss = preprocess_obss or default_preprocess_obss
self.reshape_reward = reshape_reward
self.reward_size = self.model.reward_size
# Control parameters
assert self.model.recurrent or self.recurrence == 1
assert self.num_frames_per_proc % self.recurrence == 0
# Configure acmodel
self.model.to(self.device)
self.model.train()
# Store helpers values
self.num_procs = len(envs)
self.num_frames = self.num_frames_per_proc * self.num_procs
# Initialize experience values
shape = (self.num_frames_per_proc, self.num_procs)
self.obs = self.env.reset()
self.obss = [None]*(shape[0])
if self.model.recurrent:
self.memory = torch.zeros(shape[1], self.model.memory_size, device=self.device)
self.memories = torch.zeros(*shape, self.model.memory_size, device=self.device)
self.mask = torch.ones(shape[1], device=self.device)
self.masks = torch.zeros(*shape, device=self.device)
# self.masks = torch.zeros(*shape, self.reward_size, device=self.device)
self.actions = torch.zeros(*shape, device=self.device, dtype=torch.int)
self.values = torch.zeros(*shape, self.env.action_space.n, self.reward_size, device=self.device)
self.expected_values = torch.zeros(*shape, self.reward_size, device=self.device)
self.rewards = torch.zeros(*shape, self.reward_size, device=self.device)
self.log_probs = torch.zeros(*shape, device=self.device)
# initialize the pareto weights
self.weights = torch.ones(shape[1], self.reward_size, device=self.device)/self.reward_size
# Initialize log values
self.log_episode_return = torch.zeros(self.num_procs, self.reward_size, device=self.device)
self.log_episode_reshaped_return = torch.zeros(self.num_procs, self.reward_size, device=self.device)
self.log_episode_num_frames = torch.zeros(self.num_procs, device=self.device)
self.log_done_counter = 0
self.log_return = [0] * self.num_procs
self.log_reshaped_return = [0] * self.num_procs
self.log_num_frames = [0] * self.num_procs
self.buffer = ReplayBuffer(capacity=buffer_size)
self.eps = 0.05
#self.optimizer = torch.optim.Adam(self.model.parameters(), lr, eps=adam_eps)
self.optimizer = torch.optim.RMSprop(params=self.model.parameters(),
lr=self.lr)
def collect_experiences(self):
for n in range(self.num_frames_per_proc):
# calculate the prediction based on current state/obs
preprocessed_obs = self.preprocess_obss(self.obs, device=self.device)
with torch.no_grad():
if self.model.recurrent:
q_value, memory = self.model(preprocessed_obs, self.memory * self.mask.unsqueeze(1))
else:
q_value = self.model(preprocessed_obs)
# select an action based on the q-values
action = self.pareto_action(q_value, self.weights)
# overwrite the action based on epsilon
eps_mask = torch.rand(action.shape)<self.eps
action[eps_mask] = torch.randint(0,self.env.action_space.n,(sum(eps_mask),))
# step the environment based on the predicted action
next_obs, reward, done, _ = self.env.step(action.cpu().numpy())
next_preprocessed_obs = self.preprocess_obss(next_obs, device=self.device)
with torch.no_grad():
if self.model.recurrent:
next_q, next_memory = self.model(preprocessed_obs, self.memory * self.mask.unsqueeze(1))
else:
next_q = self.model(preprocessed_obs)
return exps, logs
def update_parameters(self, exps):
self.buffer.push(exps)
return logs
def collect_experiences_old(self):
"""Collects rollouts and computes advantages.
Runs several environments concurrently. The next actions are computed
in a batch mode for all environments at the same time. The rollouts
and advantages from all environments are concatenated together.
Returns
-------
exps : DictList
Contains actions, rewards, advantages etc as attributes.
Each attribute, e.g. `exps.reward` has a shape
(self.num_frames_per_proc * num_envs, ...). k-th block
of consecutive `self.num_frames_per_proc` frames contains
data obtained from the k-th environment. Be careful not to mix
data from different environments!
logs : dict
Useful stats about the training process, including the average
reward, policy loss, value loss, etc.
"""
for i in range(self.num_frames_per_proc):
# Do one agent-environment interaction
preprocessed_obs = self.preprocess_obss(self.obs, device=self.device)
with torch.no_grad():
if self.model.recurrent:
value, memory = self.model(preprocessed_obs, self.memory * self.mask.unsqueeze(1))
else:
value = self.model(preprocessed_obs)
action = self.pareto_action(value, self.weights)
eps_mask = torch.rand(action.shape)<self.eps
action[eps_mask] = torch.randint(0,self.env.action_space.n,(sum(eps_mask),))
obs, reward, done, _ = self.env.step(action.cpu().numpy())
# Update experiences values
self.obss[i] = self.obs
self.obs = obs
if self.model.recurrent:
self.memories[i] = self.memory
self.memory = memory
self.masks[i] = self.mask
self.mask = 1 - torch.tensor(done, device=self.device, dtype=torch.float)
self.actions[i] = action
self.values[i] = value
if self.reshape_reward is not None:
self.rewards[i] = torch.tensor([
self.reshape_reward(obs_, action_, reward_, done_)
for obs_, action_, reward_, done_ in zip(obs, action, reward, done)
], device=self.device)
else:
self.rewards[i] = torch.tensor(reward, device=self.device)
# Update log values
self.log_episode_return += torch.tensor(reward, device=self.device, dtype=torch.float)
self.log_episode_reshaped_return += self.rewards[i]
self.log_episode_num_frames += torch.ones(self.num_procs, device=self.device)
for i, done_ in enumerate(done):
if done_:
self.log_done_counter += 1
self.log_return.append(self.log_episode_return[i])#.item())
self.log_reshaped_return.append(self.log_episode_reshaped_return[i])#.item())
self.log_num_frames.append(self.log_episode_num_frames[i].item())
# reroll the weights for that episode
if self.reward_size == 1:
self.weights[i,0] = 1
elif self.reward_size == 2:
self.weights[i,0] = torch.rand(1)
self.weights[i,1] = 1-self.weights[i,0]
else:
raise NotImplementedError
self.log_episode_return = (self.log_episode_return.T*self.mask).T
self.log_episode_reshaped_return = (self.log_episode_reshaped_return.T * self.mask).T
self.log_episode_num_frames *= self.mask
preprocessed_obs = self.preprocess_obss(self.obs, device=self.device)
with torch.no_grad():
if self.model.recurrent:
next_value, _ = self.eval_model(preprocessed_obs, self.memory * self.mask.unsqueeze(1))
else:
next_value = self.eval_model(preprocessed_obs)
next_value_clipped = torch.clip(next_value, *self.env.envs[0].reward_range)
for i in reversed(range(self.num_frames_per_proc)):
next_mask = self.masks[i+1] if i < self.num_frames_per_proc - 1 else self.mask
next_mask = torch.vstack([next_mask] * self.reward_size).T
next_value = self.values[i+1] if i < self.num_frames_per_proc - 1 else next_value
self.expected_values[i] = self.rewards[i] + (self.pareto_rewards(next_value_clipped,self.weights) * (self.discount * next_mask))
# self.advantages[i] = delta + (next_advantage.T * (self.discount * self.gae_lambda * next_mask)).T
# Define experiences:
# the whole experience is the concatenation of the experience
# of each process.
# In comments below:
# - T is self.num_frames_per_proc,
# - P is self.num_procs,
# - D is the dimensionality.
exps = DictList()
exps.obs = [self.obss[i][j]
for j in range(self.num_procs)
for i in range(self.num_frames_per_proc)]
if self.model.recurrent:
# T x P x D -> P x T x D -> (P * T) x D
exps.memory = self.memories.transpose(0, 1).reshape(-1, *self.memories.shape[2:])
# T x P -> P x T -> (P * T) x 1
exps.mask = self.masks.transpose(0, 1).reshape(-1).unsqueeze(1)
# for all tensors below, T x P -> P x T -> P * T
exps.action = self.actions.transpose(0, 1).reshape(-1)
exps.value = self.values.transpose(0, 1).reshape(-1,self.reward_size)
exps.reward = self.rewards.transpose(0, 1).reshape(-1,self.reward_size)
exps.exp_value = self.expected_values.transpose(0, 1).reshape(-1,self.reward_size)
exps.log_prob = self.log_probs.transpose(0, 1).reshape(-1)
# Preprocess experiences
exps.obs = self.preprocess_obss(exps.obs, device=self.device)
# Log some values
keep = max(self.log_done_counter, self.num_procs)
logs = {
"return_per_episode": self.log_return[-keep:],
"reshaped_return_per_episode": self.log_reshaped_return[-keep:],
"num_frames_per_episode": self.log_num_frames[-keep:],
"num_frames": self.num_frames
}
self.log_done_counter = 0
self.log_return = self.log_return[-self.num_procs:]
self.log_reshaped_return = self.log_reshaped_return[-self.num_procs:]
self.log_num_frames = self.log_num_frames[-self.num_procs:]
return exps, logs
def update_parameters_old(self, exps):
# Compute starting indexes
inds = self._get_starting_indexes()
# Initialize update values
update_entropy = 0
update_value = 0
update_policy_loss = 0
update_value_loss = 0
update_loss = 0
# Initialize memory
if self.model.recurrent:
memory = exps.memory[inds]
for i in range(self.recurrence):
# Create a sub-batch of experience
sb = exps[inds + i]
# Compute loss
if self.model.recurrent:
value, memory = self.model(sb.obs, memory * sb.mask)
else:
value = self.model(sb.obs)
# entropy = dist.entropy().mean()
# policy_loss = -(dist.log_prob(sb.action) * sb.advantage).mean()
loss = (value - sb.exp_value.unsqueeze(1)).pow(2).mean()
# Update batch values
update_loss += loss
# Update update values
update_value /= self.recurrence
update_loss /= self.recurrence
# Update actor-critic
self.optimizer.zero_grad()
update_loss.backward()
update_grad_norm = sum(p.grad.data.norm(2) ** 2 for p in self.model.parameters()) ** 0.5
# torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.max_grad_norm)
self.optimizer.step()
# Log some values
logs = {
"entropy": update_entropy,
"value": update_value,
"policy_loss": update_policy_loss,
"value_loss": update_value_loss,
"grad_norm": update_grad_norm
}
return logs
def _get_starting_indexes(self):
"""Gives the indexes of the observations given to the model and the
experiences used to compute the loss at first.
The indexes are the integers from 0 to `self.num_frames` with a step of
`self.recurrence`. If the model is not recurrent, they are all the
integers from 0 to `self.num_frames`.
Returns
-------
starting_indexes : list of int
the indexes of the experiences to be used at first
"""
starting_indexes = np.arange(0, self.num_frames, self.recurrence)
return starting_indexes
def pareto_action(self, values, weights):
#col = torch.randint(0,self.reward_size,(1,))
#return torch.max(values[:,:,col], dim=1).indices.squeeze()
#print(torch.tensor([torch.argmax(torch.matmul(values[i,:,:],weights[i,:])) for i in range(values.shape[0])]))
return torch.tensor(
[torch.argmax(torch.matmul(values[i,:,:],weights[i,:]))
for i in range(values.shape[0])])
def pareto_rewards(self, values, weights):
#col = torch.randint(0,self.reward_size,(1,))
#inds = torch.max(values[:,:,col], dim=1).indices
#return values.gather(inds)
actions = self.pareto_action(values, weights)
return torch.vstack([values[i,actions[i],:] for i in range(values.shape[0])])
class ReplayBuffer(object):
def __init__(self, capacity):
self.capacity = capacity
self.buffer = []
self.position = 0
def push(self, *args):
"""Saves a transition."""
if len(self.buffer) < self.capacity:
self.buffer.append(None)
self.buffer[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.buffer)
|
{"hexsha": "489ddcedd7591da587384f844c0655e8b834604a", "size": 16527, "ext": "py", "lang": "Python", "max_stars_repo_path": "torch_ac/algos/multiQ.py", "max_stars_repo_name": "mcavolowsky/torch-ac", "max_stars_repo_head_hexsha": "4c69d0260c0776554c8a4e5c9623b03181273504", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "torch_ac/algos/multiQ.py", "max_issues_repo_name": "mcavolowsky/torch-ac", "max_issues_repo_head_hexsha": "4c69d0260c0776554c8a4e5c9623b03181273504", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "torch_ac/algos/multiQ.py", "max_forks_repo_name": "mcavolowsky/torch-ac", "max_forks_repo_head_hexsha": "4c69d0260c0776554c8a4e5c9623b03181273504", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.163507109, "max_line_length": 141, "alphanum_fraction": 0.6105766322, "include": true, "reason": "import numpy", "num_tokens": 3584}
|
This is originally a collaborative project with Isky on an implementation of the Hartree-Fock method, a way of approximating the wave function and thus energy of bonds in a quantum molecular system. However, despite a week of research, the complexity of the problem forces us to cease the plan, and instead resort to a much simpler task of plotting the probability density of hydrogen-like orbitals.
# Plotting the Atomic Orbital - Johnny Tse
This is a project which draws out the hull of each atomic orbital of a single electron in a hydrogen atom. This is done by obtaining the wave function of the electron, finding the (complex) probability amplitude at points around the atom, then to square the amplitude to derive the probability density. A contour of the probability density is then drawn to illustrate the region where the electron is most likely to be found.
It is inevitable that this would be an easy process. It is a difficult task to derive the exhaustive list of the orbitals' wave function which can obey the Schrödinger equation in this situation. However, once derived, one can easily plot out the orbitals, and there is no easy way of finding the wave functions by simulation.
It did, however, allowed for deeper understanding of what the wave function is. Each electron around a nucleus can be characterized by four variables, the principal quantum number (n), the azimuthal quantum number (l), the magnetic quantum number (m) and the spin quantum number (s). The principal quantum number is the average distance of the electron from nucleus/energy level, the azimuthal quantum number shows the orbital's shape/angular momentum, the magnetic quantum number is the orbital's orientation/angular momentum's vector component, and the spin quantum number is the intrinsic angular momentum. Each orbital is described by the variables n,l,m, and due to Pauli exclusion principle, no electron in the system can share the same value for all four variables, hence each orbital can take up to two electrons.
The wave function is usually expressed using polar coordinates, due to the fact that it is possible to separate the wave function into three functions, each taking in only one value of the coordinate. The radial component is a function with exponential decay, and the angular component are formed from spherical harmonics, a set of functions in which combinations of these functions generates arbitrary functions defined on a sphere, like Fourier series. Spherical harmonics plays a role in the construction of the wave functions due to its link to producing standing waves on the surface of a sphere, which is applicable in this situation as well.
Notice that the wavefunction can only be explicitly expressed for single electron atoms (Hydrogen-like). If there are multiple electrons, the repulsion between the electrons would cause the wave functions of the electrons to be entangled, resulting in our inability to write an explicit wave function for it. We can, however, attempt to approximate it, using various methods such as the hartree-fock method.
In terms of programming, it opened me to the world of symbolic computation. In conventional programming, the closest thing resembling symbols are variables, which must hold a value. Symbols, on the other hand, is just a placeholder, allowing different possible methods of calculation (such as evaluating equality). Another feature that this code uses is numpy's capability of vectorization. It is essentially batch processing an array of data with a single function call, which is many times faster than calling the function iteratively for every single data.
## Note #1
Although the 2D graphs below might allude to the idea that the electron is mostly found in the near the nucleus, due to the bright colour, this is in fact not the case. This is because although the outer region of the atom is much dimmer, it cover a greater space. The region near the nucleus has higher probability density, but smaller space, so it is actually quite unlikely to find the electron in that region.
To illustrate my point, here is the probability of find a electron at certain distance from the nucleus for the first three s-orbitals, extracted from "General Chemistry: Principles, Patterns, and Applications - 6.5 Atomic Orbitals and Their Energies":
## Note #2
A hydrogen wave function also comes in two flavours, the complex atomic orbital, and a real atomic orbital. When describing the atomic orbitals of an electron with its four values (n,l,m,s), one would obtain complex atomic orbitals, which is shown below, extracted from the Wikipedia page on magnetic quantum number.
As you can see, many of their shapes are the same, except their phase difference (look at how the colour evolves going around the loop). To make better visualization, complex atomic orbitals are combined to form superpositions, whose wave function lies in the real domain. This is the real orbital, which will be used in the following code.
This is in fact equivalent to the complex type, and the reason it is so is quite complicated. It is alluded by the fact that electrons can be in superposition between orbitals and using the complex atomic orbital or real atomic orbitalis simply the matter of picking which [basis](https://en.wikipedia.org/wiki/Basis_(linear_algebra)) to use.
### Either way, this is very important because it sheds light on what the orbitals we are familiar with actually are. They are useful in chemistry for different electron models, but they are just a possible selection of basis which makes visualization easier. The complex atomic orbital, though looking quite different, will still yield correct answers when used in calculations.
## Please run every block of code below
```python
# The code's dependencies are declared here
%matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
from sympy.physics.hydrogen import Psi_nlm, R_nl #might be unnecessary
from sympy.functions.special.spherical_harmonics import Znm
from sympy import Symbol
from sympy.utilities.lambdify import lambdify
from mpmath import atan2
import ipyvolume as ipv
```
## Constructing the wave function
In this part, the real wavefunction is constructed. Adjust the atomic orbital being plotted by typing in the principal quantum number (n), the azimuthal quantum number (l) and the magnetic quantum number (m) of the orbital. This would generate the wave function of the orbital in spherical coordinate, using the library sympy.
```python
r = Symbol("r", real=True, positive=True) #radius
phi = Symbol("phi", real=True) #azimuthal angle
theta = Symbol("theta", real=True) #polar angle
Z = Symbol("Z", positive=True, integer=True, nonzero=True) #atomic number
#Variables - n,1,m
n,l,m = 5,2,2
#Construction of complex wave function, hard to visualize
#psi = Psi_nlm(n,l,m,r,phi,theta,Z)
#Construction of real wave function, used in the following code
psi = R_nl(n, l, r, Z)*Znm(l,m,theta,phi).expand(func=True)
print("Psi: ", psi)
```
## Plotting the probability density in 2D
A vertical (parallel to z-axis) 2D slice of the plot of probability density is rendered via the code below, which produces a graph where a light colour means a higher probability density in that region of space.
This is handled using NumPy vectorization, which utilizes a huge tensor (multi-dimensional array) to store the x and y coordinate of every point of the system we wish to record the probability density. The cartesian coordinate is then transformed to the polar coordinate. And the tensor of coordinates is substituted into the wave function to obtain the tensor of probability amplitude, and then the tensor of probability density, which is directly fed into the plotting library to produce the plot.
Utilizing this approach over the native python for-loop, going through each pixel, enables magnitudes of speed up, which allows a detailed plot to be rendered in reasonable time.
The variables that can be adjusted are maxi, the maximum distance to render from nucleus, and resolution, the number of points to measure in a dimension.
```python
#NUMPY VECTORIZATION, FAST
#Variables to adjust
maxi = 40
resolution = 180
#Setting up equally spaced points around the nucleus
base = np.linspace(-maxi, maxi, resolution)[:,np.newaxis]
x2 = np.tile(base, (1,resolution))
y2 = np.swapaxes(x2,0,1)
total = np.concatenate((x2[np.newaxis,:],y2[np.newaxis,:]), axis=0)
#Converting cartesian coordinates into polar coordinates
r2 = np.linalg.norm(total, axis=0)
theta2 = np.arctan2(total[1],total[0])
#Substitution of values
psi2 = psi.subs({Z: 1, phi: 0})
lam_psi = lambdify([r,theta], psi2, 'numpy')
image1 = lam_psi(r2,theta2)
#Convertion of prabability amplitude to probability density
image1 = np.real(image1*np.conj(image1))
#Plotting
plt.imshow(image1, cmap="gist_gray")
plt.colorbar()
```
```python
#NATIVE APPROACH, VERY SLOW
maxi = 40
resolution = 20
image = np.empty([resolution,resolution])
for x in range(resolution):
for y in range(resolution):
x1 = 2*maxi*x/resolution-maxi
y1 = 2*maxi*y/resolution-maxi
res = psi.subs({Z: 1, r: (x1**2+y1**2)**0.5, phi: 0, theta: atan2(y1,x1)})
image[x,y] = res * res.conjugate()
plt.imshow(image, cmap="gist_gray")
plt.colorbar()
```
## Plotting the probability density in 3D
Two modes are available to visualize the probability density in 3D. The first method is to plot the regions of space with different colour and opacities based on how high the probability density is. The second method is to draw a 3D contour around the regions of high probability density, creating what is called an isosurface. The volume bounded by the isosurface is where the electron is mainly "located" at (they don't locate at a single point).
This code is similarly written using NumPy vectorization, in order to speed up the execution.
The first part of the code sets up the tensor which stores the probability density at points around the nucleus. Run the second part of the code to plot the entire probability density with colour and opacities. To draw out the isosurface, run the third part of the code. Adjust the slide's position beneath the plot to adjust the threshold for a point to reside within the isosurface.
```python
#Variables to adjust
maxi = 60
resolution = 160
base = np.linspace(-maxi, maxi, resolution)[:,np.newaxis,np.newaxis]
x2 = np.tile(base, (1,resolution,resolution))
y2 = np.swapaxes(x2,0,1)
z2 = np.swapaxes(x2,0,2)
total = np.concatenate((x2[np.newaxis,:],y2[np.newaxis,:],z2[np.newaxis,:]), axis=0)
r2 = np.linalg.norm(total, axis=0)
#Alternative theta calculation
#theta3 = np.abs(np.arctan2(np.linalg.norm(total[:2], axis=0),-total[2]))
np.seterr(all='ignore')
theta2 = np.arctan(np.divide(total[2],np.linalg.norm(total[:2], axis=0))) + np.pi/2
phi2 = np.arctan2(total[1],total[0])
psi2 = psi.subs({Z: 1})
lam_psi = lambdify([r,phi,theta], psi2, 'numpy')
image2 = np.square(lam_psi(r2,phi2,theta2)).real
#Scaling the probability density up
image2 *=1000000000
```
```python
#Volume with opacities
ipv.figure()
ipv.volshow(image2)
ipv.show()
```
```python
#Isosurface
ipv.figure()
ipv.plot_isosurface(image2)
ipv.show()
```
|
{"hexsha": "ba65e5d80240a5d5fda5e292e68dce96511f96bf", "size": 14145, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "Orbital - Hydrogen-like.ipynb", "max_stars_repo_name": "dylux/Chemistry-Project", "max_stars_repo_head_hexsha": "76d714858909dc7e8c44074fc93108dcb193641f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Orbital - Hydrogen-like.ipynb", "max_issues_repo_name": "dylux/Chemistry-Project", "max_issues_repo_head_hexsha": "76d714858909dc7e8c44074fc93108dcb193641f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Orbital - Hydrogen-like.ipynb", "max_forks_repo_name": "dylux/Chemistry-Project", "max_forks_repo_head_hexsha": "76d714858909dc7e8c44074fc93108dcb193641f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 54.1954022989, "max_line_length": 830, "alphanum_fraction": 0.6821491693, "converted": true, "num_tokens": 2582}
|
% +-======-+
% Copyright (c) 2003-2007 United States Government as represented by
% the Admistrator of the National Aeronautics and Space Administration.
% All Rights Reserved.
%
% THIS OPEN SOURCE AGREEMENT ("AGREEMENT") DEFINES THE RIGHTS OF USE,
% REPRODUCTION, DISTRIBUTION, MODIFICATION AND REDISTRIBUTION OF CERTAIN
% COMPUTER SOFTWARE ORIGINALLY RELEASED BY THE UNITED STATES GOVERNMENT AS
% REPRESENTED BY THE GOVERNMENT AGENCY LISTED BELOW ("GOVERNMENT AGENCY").
% THE UNITED STATES GOVERNMENT, AS REPRESENTED BY GOVERNMENT AGENCY, IS AN
% INTENDED THIRD-PARTY BENEFICIARY OF ALL SUBSEQUENT DISTRIBUTIONS OR
% REDISTRIBUTIONS OF THE SUBJECT SOFTWARE. ANYONE WHO USES, REPRODUCES,
% DISTRIBUTES, MODIFIES OR REDISTRIBUTES THE SUBJECT SOFTWARE, AS DEFINED
% HEREIN, OR ANY PART THEREOF, IS, BY THAT ACTION, ACCEPTING IN FULL THE
% RESPONSIBILITIES AND OBLIGATIONS CONTAINED IN THIS AGREEMENT.
%
% Government Agency: National Aeronautics and Space Administration
% Government Agency Original Software Designation: GSC-15354-1
% Government Agency Original Software Title: GEOS-5 GCM Modeling Software
% User Registration Requested. Please Visit http://opensource.gsfc.nasa.gov
% Government Agency Point of Contact for Original Software:
% Dale Hithon, SRA Assistant, (301) 286-2691
%
% +-======-+
\section{Package Overview }
%
\pagenumbering{arabic}
%
\setcounter{secnumdepth}{5}
\setlength{\parskip}{0.5em}
%
The Message Passing Envionment Utility ({\tt mpeu}) is a library that
was designed and developed at the Data Assimilation Office (DAO) to
support its data assimilation system.
It sits on top of vendors utilities such as MPI, BLAS, shared-memory
primitives, etc.
%
{\tt mpeu} is written is Fortran 90 and is portable to many platforms.
It provides the following services:
%
\begin{itemize}
\item Management of resources
\item Manipulation of strings
\item F90 module-style access to MPI
\item Portable/flexible definition of types
\item Support for multiprocessor stdout/stderr
\item Error handling/shutdown
\item Timing/load balance monitoring tools
\item Sorting tools
\end{itemize}
%
The {\tt mpeu} library can be easily included in any application (using
MPI) and can be ported into any platform with little (or no) modification.
This report is a user's documentation that describes the above {\tt mpeu}
services.
Each of the services is contained a module.
In the next sections, we present the functions of each module and provides
enough examples to show how the library can be efficiently employed in a
given application.
|
{"hexsha": "780ecd6acf154fe8528f305b5b67da1cfb4e30d1", "size": 2616, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "ESMF/src/addon/MAPL/GMAO_mpeu/doc/PackageOverview.tex", "max_stars_repo_name": "joeylamcy/gchp", "max_stars_repo_head_hexsha": "0e1676300fc91000ecb43539cabf1f342d718fb3", "max_stars_repo_licenses": ["NCSA", "Apache-2.0", "MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-07-05T16:48:58.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-05T16:48:58.000Z", "max_issues_repo_path": "ESMF/src/addon/MAPL/GMAO_mpeu/doc/PackageOverview.tex", "max_issues_repo_name": "joeylamcy/gchp", "max_issues_repo_head_hexsha": "0e1676300fc91000ecb43539cabf1f342d718fb3", "max_issues_repo_licenses": ["NCSA", "Apache-2.0", "MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-04T16:12:02.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-04T16:12:02.000Z", "max_forks_repo_path": "ESMF/src/addon/MAPL/GMAO_mpeu/doc/PackageOverview.tex", "max_forks_repo_name": "joeylamcy/gchp", "max_forks_repo_head_hexsha": "0e1676300fc91000ecb43539cabf1f342d718fb3", "max_forks_repo_licenses": ["NCSA", "Apache-2.0", "MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.1935483871, "max_line_length": 77, "alphanum_fraction": 0.7679663609, "num_tokens": 693}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*
"""A generator of graphs written in Python and LaTeX.
https://github.com/jariazavalverde/graphs
"""
import numpy as np
# FORMATS
def format_c(adjacency):
"""Returns a string representation of the given graph in C format."""
size = adjacency.shape[0]
string = "int adjacency[%d][%d] = {\n" % (size, size)
for i in xrange(size):
string += "\t{" + ", ".join(map(str, np.array(adjacency[i], dtype=int).tolist())) + "}"
if i+1 < size:
string += ","
string += "\n"
string += "};"
return string
def format_dimacs(adjacency):
"""Returns a string representation of the given graph in DIMACS format."""
string = ""
size = adjacency.shape[0]
edges = 0
for i in xrange(size):
for j in xrange(i, size):
if adjacency[i][j]:
string += "\ne %d %d" % ((i+1), (j+1))
edges += 1
string = ("p edge %d %d" % (size, edges)) + string
string = "c https://github.com/jariazavalverde/graphs\n" + string
return string
def format_list(adjacency):
"""Returns a string representation of the given graph in Python format."""
return str(np.array(adjacency, dtype=int).tolist())
def format_numpy(adjacency):
"""Returns a string representation of the given graph in numpy format."""
return str(np.array(adjacency, dtype=int))
# HANDLING
format_functions = {
"c": format_c,
"dimacs": format_dimacs,
"haskell": format_list,
"javascript": format_list,
"prolog": format_list,
"python": format_list,
"numpy": format_numpy
}
# AUTHORSHIP INFORMATION
__author__ = "José Antonio Riaza Valverde"
__copyright__ = "Copyright 2019, José Antonio Riaza Valverde"
__credits__ = ["José Antonio Riaza Valverde"]
__license__ = "BSD 3-Clause"
__version__ = "1.0.0"
__maintainer__ = "José Antonio Riaza Valverde"
__email__ = "riaza.valverde@gmail.com"
__status__ = "Development"
|
{"hexsha": "42fe14a4324a1bd734decb10e5721dbb699b9a1a", "size": 1837, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/formats.py", "max_stars_repo_name": "jariazavalverde/graph-families", "max_stars_repo_head_hexsha": "845b8bf6e1990964d0a8322f4a91e85bcb6da512", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-12T18:26:17.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-12T18:26:17.000Z", "max_issues_repo_path": "src/formats.py", "max_issues_repo_name": "jariazavalverde/graph-families", "max_issues_repo_head_hexsha": "845b8bf6e1990964d0a8322f4a91e85bcb6da512", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/formats.py", "max_forks_repo_name": "jariazavalverde/graph-families", "max_forks_repo_head_hexsha": "845b8bf6e1990964d0a8322f4a91e85bcb6da512", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.8243243243, "max_line_length": 89, "alphanum_fraction": 0.6815459989, "include": true, "reason": "import numpy", "num_tokens": 522}
|
import numpy as np
Fs = 32e3
Ts = 1.0 / Fs
frequencies = (1 + np.arange(8)) * 1e3
carrier_index = 0
Fc = frequencies[carrier_index]
Tc = 1.0 / Fc
symbols = np.array([complex(x, y)
for x in np.linspace(-1, 1, 8)
for y in np.linspace(-1, 1, 8)]) / np.sqrt(2)
Tsym = 1e-3
Nsym = int(Tsym / Ts)
baud = int(1/Tsym)
|
{"hexsha": "1391b76951c6e6b106509f081e810d670288ecd5", "size": 352, "ext": "py", "lang": "Python", "max_stars_repo_path": "config.py", "max_stars_repo_name": "RagnarDanneskjold/amodem", "max_stars_repo_head_hexsha": "5d2bcd5004035fcd34369927a243e611ce7e2700", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "config.py", "max_issues_repo_name": "RagnarDanneskjold/amodem", "max_issues_repo_head_hexsha": "5d2bcd5004035fcd34369927a243e611ce7e2700", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "config.py", "max_forks_repo_name": "RagnarDanneskjold/amodem", "max_forks_repo_head_hexsha": "5d2bcd5004035fcd34369927a243e611ce7e2700", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-25T17:10:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T17:10:57.000Z", "avg_line_length": 19.5555555556, "max_line_length": 64, "alphanum_fraction": 0.5596590909, "include": true, "reason": "import numpy", "num_tokens": 135}
|
export intmat2binmat
"""
function intmat2binmat(M;p=maximum(M))
Converts a general integer matrix {1,2,...,p}^{m x n} to a binary matrix of
size (m*(p-1),n).
Ones are converted to false vectors of length p-1. Two is converted to
[true;false;...;false] and p is converted to [false;...;false;true].
Input:
M - Integer matrix of size (m,n) with integers 1,...,p
Keyword argument:
p - number of categories (if larger than the largest number already in M)
Output:
Mbin - BitArray of size (m*(p-1),n)
"""
function intmat2binmat(M::Array{Int,2};p::Int=maximum(M))
if minimum(M)<=0
error("the categories in the integer matrix must be positive integers")
end
m = size(M,1)
n = size(M,2)
p = maximum((p,maximum(M)))
# initialize output
Mbin = falses(m*(p-1),n)
for k = 1:m
fullblock = falses(p,n)
# values in the k'th row of the original matrix are now row indices for
# the next block in the binary matrix
fullblock[sub2ind(size(fullblock),M[k,:],1:n)] = true
# discard the first row because it is redundant
block = fullblock[2:p,:]
# assign the new block to the output matrix
Mbin[(k-1)*(p-1)+1 : k*(p-1), :] = block
end
return Mbin
end
|
{"hexsha": "c97ab443db7d3ee477d57558d9c81dcfd38f00b6", "size": 1268, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/intmat2binmat.jl", "max_stars_repo_name": "lruthotto/StrainRecon.jl", "max_stars_repo_head_hexsha": "ba1c5392994e80bb0f7e6b94f90b404f25c40958", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-05-01T00:47:02.000Z", "max_stars_repo_stars_event_max_datetime": "2018-05-01T00:47:02.000Z", "max_issues_repo_path": "src/intmat2binmat.jl", "max_issues_repo_name": "lruthotto/StrainRecon.jl", "max_issues_repo_head_hexsha": "ba1c5392994e80bb0f7e6b94f90b404f25c40958", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/intmat2binmat.jl", "max_forks_repo_name": "lruthotto/StrainRecon.jl", "max_forks_repo_head_hexsha": "ba1c5392994e80bb0f7e6b94f90b404f25c40958", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.36, "max_line_length": 79, "alphanum_fraction": 0.6253943218, "num_tokens": 379}
|
import numpy as np
def test_base_transform(image, mean):
x = image.astype(np.float32)
x -= mean
x = x.astype(np.float32)
return x
class TestBaseTransform:
def __init__(self, mean):
self.mean = np.array(mean, dtype=np.float32)
def __call__(self, image):
return test_base_transform(image, self.mean)
widerface_640 = {
'num_classes': 2,
'feature_maps': [160, 80, 40, 20, 10, 5],
'min_dim': 640,
'steps': [4, 8, 16, 32, 64, 128], # stride
'variance': [0.1, 0.2],
'clip': True, # make default box in [0,1]
'name': 'WIDERFace',
'l2norm_scale': [10, 8, 5],
'base': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M', 512, 512, 512],
'extras': [256, 'S', 512, 128, 'S', 256],
'mbox': [1, 1, 1, 1, 1, 1],
'min_sizes': [16, 32, 64, 128, 256, 512],
'max_sizes': [],
'aspect_ratios': [[1.5], [1.5], [1.5], [1.5], [1.5], [1.5]], # [1,2] default 1
'backbone': 'resnet152',
'feature_pyramid_network': True,
'bottom_up_path': False,
'feature_enhance_module': True,
'max_in_out': True,
'focal_loss': False,
'progressive_anchor': True,
'refinedet': False,
'max_out': False,
'anchor_compensation': False,
'data_anchor_sampling': False,
'overlap_thresh': [0.4],
'negpos_ratio': 3,
# test
'nms_thresh': 0.3,
'conf_thresh': 0.01,
'num_thresh': 5000,
}
|
{"hexsha": "f050417d7688c5661a215038738a65529b13db18", "size": 1428, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/config.py", "max_stars_repo_name": "juanmed/FaceDetection-DSFD", "max_stars_repo_head_hexsha": "23650ca492444f9f052ca9b8db8b068a9be5bc68", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 98, "max_stars_repo_stars_event_min_datetime": "2020-06-08T20:00:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T10:16:25.000Z", "max_issues_repo_path": "data/config.py", "max_issues_repo_name": "juanmed/FaceDetection-DSFD", "max_issues_repo_head_hexsha": "23650ca492444f9f052ca9b8db8b068a9be5bc68", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 37, "max_issues_repo_issues_event_min_datetime": "2021-03-11T18:44:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T02:47:53.000Z", "max_forks_repo_path": "data/config.py", "max_forks_repo_name": "juanmed/FaceDetection-DSFD", "max_forks_repo_head_hexsha": "23650ca492444f9f052ca9b8db8b068a9be5bc68", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2020-06-12T19:07:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-19T02:44:29.000Z", "avg_line_length": 24.6206896552, "max_line_length": 96, "alphanum_fraction": 0.5651260504, "include": true, "reason": "import numpy", "num_tokens": 538}
|
"""
Anne Urai, CSHL, 2020-05-17
"""
import pandas as pd
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
import seaborn as sns
from patsy import dmatrices
from datetime import datetime
import statsmodels.api as sm
# layout
sns.set(style="ticks", context="paper")
sns.despine(trim=True)
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
# ====================================== #
# read data from Ian Stevenson
# ====================================== #
# original spreadsheet from Ian Stevenson: https://stevenson.lab.uconn.edu/scaling/
# df = pd.read_csv('https://docs.google.com/spreadsheet/pub?hl=en_US&hl=en_US&key=0Ai7vcDJIlD6AdF9vQWlNRDh2S1dub09jMWRvTFRpemc&single=true&gid=0&output=csv')
# instead, use my own sheet with additional datapoints for imaging
df = pd.read_csv('https://docs.google.com/spreadsheets/d/e/2PACX-1vQdv2uGPz4zSZmfpiIUrHvpB90Cz6cs8rgObbAqNQmsaLb5moGg8sYlIvfSZvXhoh1R1id8lZFyASkC/pub?gid=1390826946&single=true&output=csv')
print(df.describe())
# add some things - like the date for the x-axis
df['date'] = pd.to_datetime(df['Year'].astype(str) + '-' + df['Month'].astype(str) + '-' + '01')
df['years'] = (df['date'] - datetime(1950, 1, 1)).dt.days / 365
df['date_num'] = df['Year'] + (df['Month']-1)/12
df['neurons_log'] = np.log(df['Neurons']) # take log
# ====================================== #
# refit the curve from Stevenson et al. 2011
# ====================================== #
# separate out data for fit to original papers
fit_data = df[(df['Source'] == 'S&K')].copy()
# from https://github.com/ihstevenson/scaling/blob/master/scaling.py:
# Only keep first M papers to record >=N neurons
tmp = fit_data.groupby(['Neurons'])['DOI'].nunique().reset_index()
assert(all(tmp['DOI'] <= 10))
# use patsy
y, X = dmatrices('neurons_log ~ date_num', data=fit_data, return_type='dataframe')
mod = sm.OLS(y, X) # Describe model
res = mod.fit() # Fit model
print(res.summary()) # Summarize model
# what's the doubling time from this model? log(2) / a
doubling_time = np.log(2) / res.params['date_num']
print('Doubling time S&K: %f years'%doubling_time)
# extrapolate to whenever
xvec1 = np.linspace(2000, 2500, 100)
yvec1 = res.predict(sm.add_constant(xvec1))
# ====================================== #
# also fit on all data, including imaging
# ====================================== #
fit_data2 = df[(df['Method'] == 'Imaging')].copy()
# use patsy
y2, X2 = dmatrices('neurons_log ~ date_num', data=fit_data2, return_type='dataframe')
mod2 = sm.OLS(y2, X2) # Describe model
res2 = mod2.fit() # Fit model
print(res2.summary()) # Summarize model
# what's the doubling time from this model? log(2) / a
doubling_time2 = np.log(2) / res2.params['date_num']
print('Doubling time imaging: %f years'%doubling_time2)
# extrapolate to whenever
yvec2 = res2.predict(sm.add_constant(xvec1))
# ====================================== #
# SHOW SOME TARGET NUMBERS FOR NEURONS IN DIFFERENT SPECIES
# ====================================== #
# Herculano-Houzel et al. 2015, 10.1159/000437413
nneurons = [{'species':'Caenorhabditis elegans', 'name':'C. elegans',
'nneurons_low':302, 'nneurons_high':302},
{'species': 'Danio rerio (larvae)', 'name': 'Zebrafish (larva)', # https://elifesciences.org/articles/28158
'nneurons_low': 100000, 'nneurons_high': 100000},
# {'species':'Drosophila melanogaster', 'name':'Drosophila', # https://doi.org/10.1016/j.cub.2010.11.056
# 'nneurons_low':135000, 'nneurons_high':135000},
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3597383/
{'species':'Mus musculus', 'name':'Mouse', # Vincent et al 2010: 7.5x10^7
'nneurons_low':67873741-10406194, 'nneurons_high':67873741+10406194},
# {'species':'Rattus norvegicus', 'name':'Rat',
# 'nneurons_low':188867832-12622383, 'nneurons_high':188867832+12622383},
{'species': 'Macaca mulatta', 'name': 'Macaque',
'nneurons_low': 6376160000, 'nneurons_high': 6376160000},
{'species': 'Homo sapiens', 'name': 'Human',
'nneurons_low': 86060000000-8120000000, 'nneurons_high': 86060000000+8120000000},
]
# ====================================== #
# for each species, when will record
# from all their neurons?
# ====================================== #
for sp in nneurons:
avg_log = np.log((sp['nneurons_low'] + sp['nneurons_high']) / 2)
max_year = xvec1[np.abs(yvec1-avg_log).argmin()]
min_year = xvec1[np.abs(yvec2-avg_log).argmin()]
print('%s: expected %d - %d'%(sp['name'], min_year, max_year))
# ====================================== #
# make the plot
# ====================================== #
fig, ax = plt.subplots(1, 1, figsize=[5, 3.5])
sns.scatterplot(data=df, x='date_num', y='neurons_log', style='Source',
hue='Method', zorder=0, s=10, linewidths=0.5, alpha=0.5,
palette=sns.color_palette(["firebrick", "midnightblue"]),
hue_order=['Imaging', 'Ephys'],
markers={'S&K':'s', 'Stevenson':'o', 'Urai':'o',
'Rupprecht':'o', 'Charles':'o', 'Meijer':'o',
'Svoboda':'o'}, legend=False)
# write labels in plot, instead of legend
ax.text(2004, np.log(2), 'Electrophysiology',
{'color':"midnightblue", 'fontsize':9, 'fontstyle':'italic'})
ax.text(1985, np.log(1000), 'Optical\nimaging',
{'color':"firebrick", 'fontsize':9, 'fontstyle':'italic'})
# plot Stevenson curve on top
ax.plot(X['date_num'], res.predict(), color='k')
# then show extrapolation beyond 2011; to now
xvec = df[df['date_num'] > 1960]['date_num']
yvec = res.predict(sm.add_constant(xvec))
ax.plot(xvec, yvec, color='k', linestyle='--')
# and finally, all the way out into the future
xvec = np.linspace(2020, 2025, 100)
yvec = res.predict(sm.add_constant(xvec))
ax.plot(xvec, yvec, color='k', linestyle=':')
# show, for each species, the range
for a in nneurons:
# when can we expect this species to have all its neurons recorded?
year = 1958
n_neurons = np.log((a['nneurons_low'] + a['nneurons_high'])/2)
ax.axhline(y=n_neurons, color='grey', linestyle=':', zorder=-100, xmax=0.92)
ax.text(year, n_neurons, a['name'],
verticalalignment='bottom', fontsize=7,
color='grey')
# layout
yticks = np.logspace(0, 11, 12)
ax.set(ylabel='Simultaneously recorded neurons', xlabel='',
yticks=np.log(yticks))
yticklabs = ['1', '10', '100', '1k', '10k', '100k', '1m', '10m', '100m', '1b', '10b', '100b']
ax.set_yticklabels(['$\mathregular{10^{%i}}$' %np.log10(y) for y in yticks])
ax.set_yticklabels(yticklabs)
sns.despine(trim=True)
plt.show()
fig.savefig('scaling_figure.pdf')
fig.savefig('scaling_figure.png', dpi=600)
|
{"hexsha": "6265ffdf63640712ebd8d8386652937c8820323e", "size": 6850, "ext": "py", "lang": "Python", "max_stars_repo_path": "largescale_recordings.py", "max_stars_repo_name": "anne-urai/largescale_recordings", "max_stars_repo_head_hexsha": "987f803d211c19e6ff217f16785dad768467ba97", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-06-26T11:25:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T07:15:12.000Z", "max_issues_repo_path": "largescale_recordings.py", "max_issues_repo_name": "anne-urai/largescale_recordings", "max_issues_repo_head_hexsha": "987f803d211c19e6ff217f16785dad768467ba97", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "largescale_recordings.py", "max_forks_repo_name": "anne-urai/largescale_recordings", "max_forks_repo_head_hexsha": "987f803d211c19e6ff217f16785dad768467ba97", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-06T20:31:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-06T20:31:06.000Z", "avg_line_length": 40.5325443787, "max_line_length": 189, "alphanum_fraction": 0.6131386861, "include": true, "reason": "import numpy,import statsmodels", "num_tokens": 2091}
|
module Language.LSP.BrowseNamespace
import Core.Context
import Core.Core
import Core.Env
import Core.Metadata
import Core.Name
import Data.List
import Idris.Doc.String
import Idris.REPL.Opts
import Idris.Resugar
import Idris.Syntax
import Language.LSP.Definition
import Language.LSP.Message
import Libraries.Data.NameMap
import Parser.Source
import Parser.Rule.Source
import Server.Configuration
import Server.Log
import Server.Utils
visible : Defs -> Name -> Core Bool
visible defs n = do
Just def <- lookupCtxtExact n (gamma defs)
| Nothing => pure False
pure $ visibility def /= Private
inNS : Namespace -> Name -> Bool
inNS ns (NS xns (UN _)) = ns `isParentOf` xns
inNS _ _ = False
getNames : Ref ROpts REPLOpts
=> Ref Ctxt Defs
=> Ref Syn SyntaxInfo
=> Namespace -> Core (List Name)
getNames ns = do
defs <- get Ctxt
names <- allNames defs.gamma
let allNames = filter (inNS ns) names
allNames <- filterM (visible defs) allNames
pure $ sort allNames
buildDocumentSymbol : Ref Ctxt Defs
=> Ref LSPConf LSPConfiguration
=> Ref Syn SyntaxInfo
=> Name -> Core (Maybe SymbolInformation)
buildDocumentSymbol n = do
defs <- get Ctxt
Just def <- lookupCtxtExact n defs.gamma
| _ => pure Nothing
Just loc <- mkLocation def.location
| _ => pure Nothing
let isDeprecated = Deprecate `elem` def.flags
let kind = case def.definition of
(PMDef {}) => Function
(ExternDef {}) => Function
(ForeignDef {}) => Function
(Builtin {}) => Function
(DCon {}) => EnumMember
(TCon {}) => Constructor
(Hole {}) => Variable
_ => Null
ty <- resugar [] =<< normaliseHoles defs [] def.type
pure $ Just $ MkSymbolInformation
{ name = "\{show $ dropNS n} : \{show ty}"
, kind = kind
, tags = if isDeprecated then Just [Deprecated] else Nothing
, deprecated = Nothing
, location = loc
, containerName = Nothing
}
||| Returns the list of functions visible in the given namespace.
||| The response in the same format as a textDocument/documentSymbol request.
export
browseNamespaceCmd : Ref Ctxt Defs
=> Ref Syn SyntaxInfo
=> Ref ROpts REPLOpts
=> Ref LSPConf LSPConfiguration
=> String -> Core (List SymbolInformation)
browseNamespaceCmd str = do
let Right (_, _, ns) = runParser (Virtual Interactive) Nothing str namespaceId
| _ => pure []
logI Browse "Browsing namespace \{show ns}"
names <- getNames ns
logI Browse "Names in \{show ns} fetched, found \{show $ length names}"
catMaybes <$> traverse buildDocumentSymbol names
|
{"hexsha": "2d9563dbcd54aaab809dcdbd51ad503487bff526", "size": 2783, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "src/Language/LSP/BrowseNamespace.idr", "max_stars_repo_name": "Z-snails/idris2-lsp", "max_stars_repo_head_hexsha": "3a949818ef0180baabc5a88f3533c3154f49c3ce", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Language/LSP/BrowseNamespace.idr", "max_issues_repo_name": "Z-snails/idris2-lsp", "max_issues_repo_head_hexsha": "3a949818ef0180baabc5a88f3533c3154f49c3ce", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Language/LSP/BrowseNamespace.idr", "max_forks_repo_name": "Z-snails/idris2-lsp", "max_forks_repo_head_hexsha": "3a949818ef0180baabc5a88f3533c3154f49c3ce", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.625, "max_line_length": 80, "alphanum_fraction": 0.6374416098, "num_tokens": 689}
|
"""
"""
import argparse
import numpy as np
import time
from typing import List, Optional
from . import cli
from . import logging
from . import nn
from . import properties
from . import utils
from .verifiers.common import VerifierError, VerifierTranslatorError, SAT
def main(args: argparse.Namespace, extra_args: Optional[List[str]] = None):
logger = logging.initialize(__package__, args)
utils.set_random_seed(args.seed)
logger.debug("Reading property %s", args.property)
phi = properties.parse(args.property, format=args.prop_format, args=extra_args)
print("Verifying property:")
print(phi)
print()
if extra_args is not None and len(extra_args) > 0:
logger.error("Unused arguments: %r", extra_args)
unknown_args = " ".join(extra_args)
print(f"ERROR: Unknown arguments: {unknown_args}")
return 1
if args.networks:
print("Verifying Networks:")
networks = {}
for name, network in args.networks.items():
print(f"{name}:")
logger.debug("Parsing network (%s)", network)
dnn = nn.parse(network).simplify()
dnn.pprint()
networks[name] = dnn
print()
phi.concretize(**networks)
if len(args.verifiers) > 1:
verifier_names = [v.__module__ for v in args.verifiers]
logger.error("More than 1 verifier specified: %r", verifier_names)
print(f"ERROR: More than 1 verifier specified: {verifier_names}")
return 1
elif len(args.verifiers) == 0:
return 0
verifier = args.verifiers[0]
verifier_name = verifier.__name__.lower()
start_t = time.time()
try:
params = args.verifier_parameters.get(verifier_name, {})
result, cex = verifier.verify(phi, **params)
except VerifierTranslatorError as e:
result = f"{type(e).__name__}({e})"
logger.debug("Translation Error traceback:", exc_info=True)
except VerifierError as e:
result = f"{type(e).__name__}({e})"
logger.debug("Verifier Error traceback:", exc_info=True)
except SystemExit:
if verifier.__module__ != "dnnv.verifiers.convert":
logger.error(f"Verifier {verifier_name} called exit()")
raise
return 0
end_t = time.time()
if result == SAT and args.save_violation is not None and cex is not None:
np.save(args.save_violation, cex)
print(f"{verifier.__module__}")
print(f" result: {result}")
print(f" time: {(end_t - start_t):.4f}")
return 0
def _main():
return exit(main(*cli.parse_args()))
if __name__ == "__main__":
exit(_main())
|
{"hexsha": "6511427db43af28d646ff090348985ad3ee31670", "size": 2650, "ext": "py", "lang": "Python", "max_stars_repo_path": "dnnv/__main__.py", "max_stars_repo_name": "nathzi1505/DNNV", "max_stars_repo_head_hexsha": "16c6e6ecb681ce66196f9274d4a43eede8686319", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 33, "max_stars_repo_stars_event_min_datetime": "2019-12-13T18:54:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-16T06:29:29.000Z", "max_issues_repo_path": "dnnv/__main__.py", "max_issues_repo_name": "nathzi1505/DNNV", "max_issues_repo_head_hexsha": "16c6e6ecb681ce66196f9274d4a43eede8686319", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2020-01-30T14:06:03.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-27T01:07:37.000Z", "max_forks_repo_path": "dnnv/__main__.py", "max_forks_repo_name": "nathzi1505/DNNV", "max_forks_repo_head_hexsha": "16c6e6ecb681ce66196f9274d4a43eede8686319", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2020-04-08T01:57:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-26T09:35:02.000Z", "avg_line_length": 31.1764705882, "max_line_length": 83, "alphanum_fraction": 0.6396226415, "include": true, "reason": "import numpy", "num_tokens": 640}
|
// Copyright (c) 2020 Andrey Semashev
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_ATOMIC_TEST_IPC_WAIT_TEST_HELPERS_HPP_INCLUDED_
#define BOOST_ATOMIC_TEST_IPC_WAIT_TEST_HELPERS_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/atomic/ipc_atomic_flag.hpp>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <algorithm>
#include <boost/config.hpp>
#include <boost/chrono/chrono.hpp>
#include <boost/bind/bind.hpp>
#include <boost/thread/thread.hpp>
#include <boost/thread/barrier.hpp>
#include <boost/atomic/capabilities.hpp>
#include <boost/atomic/ipc_atomic_flag.hpp>
#include <boost/type_traits/integral_constant.hpp>
#include "atomic_wrapper.hpp"
#include "lightweight_test_stream.hpp"
#include "test_clock.hpp"
//! Since some of the tests below are allowed to fail, we retry up to this many times to pass the test
BOOST_CONSTEXPR_OR_CONST unsigned int test_retry_count = 5u;
//! The test verifies that the wait operation returns immediately if the passed value does not match the atomic value
template< template< typename > class Wrapper, typename T >
inline void test_wait_value_mismatch(T value1, T value2)
{
Wrapper< T > m_wrapper(value1);
T received_value = m_wrapper.a.wait(value2);
BOOST_TEST(received_value == value1);
}
/*!
* The test verifies that notify_one releases one blocked thread and that the released thread receives the modified atomic value.
*
* Technically, this test is allowed to fail since wait() is allowed to return spuriously. However, normally this should not happen.
*/
template< template< typename > class Wrapper, typename T >
class notify_one_test
{
private:
struct thread_state
{
T m_received_value;
test_clock::time_point m_wakeup_time;
explicit thread_state(T value) : m_received_value(value)
{
}
};
private:
Wrapper< T > m_wrapper;
char m_padding[1024];
T m_value1, m_value2, m_value3;
boost::barrier m_barrier;
thread_state m_thread1_state;
thread_state m_thread2_state;
public:
explicit notify_one_test(T value1, T value2, T value3) :
m_wrapper(value1),
m_value1(value1),
m_value2(value2),
m_value3(value3),
m_barrier(3),
m_thread1_state(value1),
m_thread2_state(value1)
{
}
bool run()
{
boost::thread thread1(¬ify_one_test::thread_func, this, &m_thread1_state);
boost::thread thread2(¬ify_one_test::thread_func, this, &m_thread2_state);
m_barrier.wait();
test_clock::time_point start_time = test_clock::now();
boost::this_thread::sleep_for(chrono::milliseconds(200));
m_wrapper.a.store(m_value2, boost::memory_order_release);
m_wrapper.a.notify_one();
boost::this_thread::sleep_for(chrono::milliseconds(200));
m_wrapper.a.store(m_value3, boost::memory_order_release);
m_wrapper.a.notify_one();
if (!thread1.try_join_for(chrono::seconds(3)))
{
BOOST_ERROR("Thread 1 failed to join");
std::abort();
}
if (!thread2.try_join_for(chrono::seconds(3)))
{
BOOST_ERROR("Thread 2 failed to join");
std::abort();
}
thread_state* first_state = &m_thread1_state;
thread_state* second_state = &m_thread2_state;
if (second_state->m_wakeup_time < first_state->m_wakeup_time)
std::swap(first_state, second_state);
if (m_wrapper.a.has_native_wait_notify())
{
if ((first_state->m_wakeup_time - start_time) < chrono::milliseconds(200))
{
std::cout << "notify_one_test: first thread woke up too soon: " << chrono::duration_cast< chrono::milliseconds >(first_state->m_wakeup_time - start_time).count() << " ms" << std::endl;
return false;
}
if ((first_state->m_wakeup_time - start_time) >= chrono::milliseconds(400))
{
std::cout << "notify_one_test: first thread woke up too late: " << chrono::duration_cast< chrono::milliseconds >(first_state->m_wakeup_time - start_time).count() << " ms" << std::endl;
return false;
}
if ((second_state->m_wakeup_time - start_time) < chrono::milliseconds(400))
{
std::cout << "notify_one_test: second thread woke up too soon: " << chrono::duration_cast< chrono::milliseconds >(second_state->m_wakeup_time - start_time).count() << " ms" << std::endl;
return false;
}
BOOST_TEST_EQ(first_state->m_received_value, m_value2);
BOOST_TEST_EQ(second_state->m_received_value, m_value3);
}
else
{
// With the emulated wait/notify the threads are most likely to return prior to notify
BOOST_TEST(first_state->m_received_value == m_value2 || first_state->m_received_value == m_value3);
BOOST_TEST(second_state->m_received_value == m_value2 || second_state->m_received_value == m_value3);
}
return true;
}
private:
void thread_func(thread_state* state)
{
m_barrier.wait();
state->m_received_value = m_wrapper.a.wait(m_value1);
state->m_wakeup_time = test_clock::now();
}
};
template< template< typename > class Wrapper, typename T >
inline void test_notify_one(T value1, T value2, T value3)
{
for (unsigned int i = 0u; i < test_retry_count; ++i)
{
notify_one_test< Wrapper, T > test(value1, value2, value3);
if (test.run())
return;
}
BOOST_ERROR("notify_one_test could not complete because blocked thread wake up too soon");
}
/*!
* The test verifies that notify_all releases all blocked threads and that the released threads receive the modified atomic value.
*
* Technically, this test is allowed to fail since wait() is allowed to return spuriously. However, normally this should not happen.
*/
template< template< typename > class Wrapper, typename T >
class notify_all_test
{
private:
struct thread_state
{
T m_received_value;
test_clock::time_point m_wakeup_time;
explicit thread_state(T value) : m_received_value(value)
{
}
};
private:
Wrapper< T > m_wrapper;
char m_padding[1024];
T m_value1, m_value2;
boost::barrier m_barrier;
thread_state m_thread1_state;
thread_state m_thread2_state;
public:
explicit notify_all_test(T value1, T value2) :
m_wrapper(value1),
m_value1(value1),
m_value2(value2),
m_barrier(3),
m_thread1_state(value1),
m_thread2_state(value1)
{
}
bool run()
{
boost::thread thread1(¬ify_all_test::thread_func, this, &m_thread1_state);
boost::thread thread2(¬ify_all_test::thread_func, this, &m_thread2_state);
m_barrier.wait();
test_clock::time_point start_time = test_clock::now();
boost::this_thread::sleep_for(chrono::milliseconds(200));
m_wrapper.a.store(m_value2, boost::memory_order_release);
m_wrapper.a.notify_all();
if (!thread1.try_join_for(chrono::seconds(3)))
{
BOOST_ERROR("Thread 1 failed to join");
std::abort();
}
if (!thread2.try_join_for(chrono::seconds(3)))
{
BOOST_ERROR("Thread 2 failed to join");
std::abort();
}
if (m_wrapper.a.has_native_wait_notify())
{
if ((m_thread1_state.m_wakeup_time - start_time) < chrono::milliseconds(200))
{
std::cout << "notify_all_test: first thread woke up too soon: " << chrono::duration_cast< chrono::milliseconds >(m_thread1_state.m_wakeup_time - start_time).count() << " ms" << std::endl;
return false;
}
if ((m_thread2_state.m_wakeup_time - start_time) < chrono::milliseconds(200))
{
std::cout << "notify_all_test: second thread woke up too soon: " << chrono::duration_cast< chrono::milliseconds >(m_thread2_state.m_wakeup_time - start_time).count() << " ms" << std::endl;
return false;
}
}
BOOST_TEST_EQ(m_thread1_state.m_received_value, m_value2);
BOOST_TEST_EQ(m_thread2_state.m_received_value, m_value2);
return true;
}
private:
void thread_func(thread_state* state)
{
m_barrier.wait();
state->m_received_value = m_wrapper.a.wait(m_value1);
state->m_wakeup_time = test_clock::now();
}
};
template< template< typename > class Wrapper, typename T >
inline void test_notify_all(T value1, T value2)
{
for (unsigned int i = 0u; i < test_retry_count; ++i)
{
notify_all_test< Wrapper, T > test(value1, value2);
if (test.run())
return;
}
BOOST_ERROR("notify_all_test could not complete because blocked thread wake up too soon");
}
//! Invokes all wait/notify tests
template< template< typename > class Wrapper, typename T >
void test_wait_notify_api(T value1, T value2, T value3, boost::true_type)
{
test_wait_value_mismatch< Wrapper >(value1, value2);
test_notify_one< Wrapper >(value1, value2, value3);
test_notify_all< Wrapper >(value1, value2);
}
template< template< typename > class Wrapper, typename T >
inline void test_wait_notify_api(T value1, T value2, T value3, boost::false_type)
{
}
//! Invokes all wait/notify tests, if the atomic type is lock-free
template< template< typename > class Wrapper, typename T >
inline void test_wait_notify_api(T value1, T value2, T value3)
{
test_wait_notify_api< Wrapper >(value1, value2, value3, boost::integral_constant< bool, Wrapper< T >::atomic_type::is_always_lock_free >());
}
inline void test_flag_wait_notify_api()
{
#if BOOST_ATOMIC_FLAG_LOCK_FREE == 2
#ifndef BOOST_ATOMIC_NO_ATOMIC_FLAG_INIT
boost::ipc_atomic_flag f = BOOST_ATOMIC_FLAG_INIT;
#else
boost::ipc_atomic_flag f;
#endif
bool received_value = f.wait(true);
BOOST_TEST(!received_value);
f.notify_one();
f.notify_all();
#endif // BOOST_ATOMIC_FLAG_LOCK_FREE == 2
}
#endif // BOOST_ATOMIC_TEST_IPC_WAIT_TEST_HELPERS_HPP_INCLUDED_
|
{"hexsha": "2970061e4ad4f4dc42fa0196749dbea47df21e1a", "size": 10398, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "libs/atomic/test/ipc_wait_test_helpers.hpp", "max_stars_repo_name": "anarthal/boost-unix-mirror", "max_stars_repo_head_hexsha": "8c34eb2fe471d6c3113c680c1fbef29e7a8063a0", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 106.0, "max_stars_repo_stars_event_min_datetime": "2015-08-07T04:23:50.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-27T18:25:15.000Z", "max_issues_repo_path": "libs/atomic/test/ipc_wait_test_helpers.hpp", "max_issues_repo_name": "anarthal/boost-unix-mirror", "max_issues_repo_head_hexsha": "8c34eb2fe471d6c3113c680c1fbef29e7a8063a0", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 130.0, "max_issues_repo_issues_event_min_datetime": "2016-06-22T22:11:25.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-29T20:24:09.000Z", "max_forks_repo_path": "Libs/boost_1_76_0/libs/atomic/test/ipc_wait_test_helpers.hpp", "max_forks_repo_name": "Antd23rus/S2DE", "max_forks_repo_head_hexsha": "47cc7151c2934cd8f0399a9856c1e54894571553", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 41.0, "max_forks_repo_forks_event_min_datetime": "2015-07-08T19:18:35.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-14T16:39:56.000Z", "avg_line_length": 31.8957055215, "max_line_length": 204, "alphanum_fraction": 0.6633006347, "num_tokens": 2438}
|
# Purpose: This script develops a list of suspect parcels to investigate and exclude, based on having
# null values for key indicators. This usually arises for study region edge cases with poor
# connectivity to network; implication is that these are not adequate representations of
# residential parcels (as we cannot reliably link to road network), so are best excluded.
# Author: Carl Higgs
import arcpy
import os
import sys
import time
import psycopg2
import numpy as np
from progressor import progressor
from script_running_log import script_running_log
from ConfigParser import SafeConfigParser
parser = SafeConfigParser()
parser.read(os.path.join(sys.path[0],'config.ini'))
# simple timer for log file
start = time.time()
script = os.path.basename(sys.argv[0])
task = "Create list of exlcuded parcels, based on null values for indicators"
# INPUT PARAMETERS
## specify locations
points = parser.get('parcels','parcel_dwellings')
pointsID = parser.get('parcels', 'parcel_id')
# SQL Settings - storing passwords in plain text is obviously not ideal
sqlDBName = parser.get('postgresql', 'database')
sqlUserName = parser.get('postgresql', 'user')
sqlPWD = parser.get('postgresql', 'password')
# output tables
# In this table detail_pid is not unique --- the idea is that jointly with indicator, detail_pid will be unique; such that we can see which if any parcels are missing multiple indicator values, and we can use this list to determine how many null values each indicator contains (ie. the number of detail_pids for that indicator)
# The number of excluded parcels can be determined through selection of COUNT(DISTINCT(detail_pid))
createTable_exclusions = '''
DROP TABLE IF EXISTS excluded_parcels;
CREATE TABLE excluded_parcels
({0} varchar NOT NULL,
indicator varchar NOT NULL,
PRIMARY KEY({0},indicator));
'''.format(pointsID.lower())
qA = "INSERT INTO excluded_parcels SELECT a.detail_pid, '"
qB = "\nFROM parcelmb AS a LEFT JOIN "
qC = " AS b \n ON a.detail_pid = b.detail_pid \n WHERE "
qD = " IS NULL ON CONFLICT (detail_pid,indicator) DO NOTHING "
# exclude on null indicator, and on null distance
query = '''
{0} walkability' {1} ind_walkability_hard {2} walkability {3};
{0} si_mix' {1} ind_si_mix_hard {2} si_mix {3};
{0} dest_pt' {1} ind_dest_pt_hard {2} dest_pt {3};
{0} walkability' {1} ind_walkability_soft {2} walkability {3};
{0} si_mix' {1} ind_si_mix_soft {2} si_mix {3};
{0} dest_pt' {1} ind_dest_pt_soft {2} dest_pt {3};
{0} pos_greq15000m2_in_400m_soft' {1} ind_pos {2} pos_greq15000m2_in_400m_soft {3};
{0} dest_distance' {1} dest_distance {2} NOT (b IS NOT NULL);
{0} sa1_7dig11' {1} abs_linkage ON a.mb_code11 = abs_linkage.mb_code11
WHERE abs_linkage.sa1_7dig11 NOT IN (SELECT sa1_7dig11 FROM abs_2011_irsd)
ON CONFLICT (detail_pid,indicator) DO NOTHING;
'''.format(qA,qB,qC,qD)
# OUTPUT PROCESS
conn = psycopg2.connect(database=sqlDBName, user=sqlUserName, password=sqlPWD)
curs = conn.cursor()
curs.execute(createTable_exclusions)
conn.commit()
curs.execute(query)
conn.commit()
# output to completion log
script_running_log(script, task, start)
# clean up
conn.close()
|
{"hexsha": "f6de0543f78452d2cff3b6f7102cce09515a8ecf", "size": 3538, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/33_exclude_parcels.py", "max_stars_repo_name": "carlhiggs/urban_liveability_index", "max_stars_repo_head_hexsha": "61decb632e0b0db28c181fe62c548f2b338cc47c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/33_exclude_parcels.py", "max_issues_repo_name": "carlhiggs/urban_liveability_index", "max_issues_repo_head_hexsha": "61decb632e0b0db28c181fe62c548f2b338cc47c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/33_exclude_parcels.py", "max_forks_repo_name": "carlhiggs/urban_liveability_index", "max_forks_repo_head_hexsha": "61decb632e0b0db28c181fe62c548f2b338cc47c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.7528089888, "max_line_length": 327, "alphanum_fraction": 0.6738270209, "include": true, "reason": "import numpy", "num_tokens": 870}
|
"""
Quick script to filter all bulk data to just save job ids which we used in our skill data sample.
"""
from tqdm import tqdm
import json
import os
from collections import defaultdict
import numpy as np
import pandas as pd
import boto3
from skills_taxonomy_v2.getters.s3_data import load_s3_data, save_to_s3
from skills_taxonomy_v2 import BUCKET_NAME
s3 = boto3.resource("s3")
if __name__ == "__main__":
# All 5 million job adverts in the original sample
original_sample = load_s3_data(s3, BUCKET_NAME, "outputs/tk_sample_data/sample_file_locations.json")
replacements_sample = load_s3_data(s3, BUCKET_NAME, "outputs/tk_sample_data/sample_file_locations_expired_replacements.json")
skill_job_ads = set([v for s in original_sample.values() for v in s] + [v for s in replacements_sample.values() for v in s])
# Takes up a fair amount of memory, so do separately
job_dates = defaultdict(list)
for file_name in tqdm(range(0, 14)):
# Job dates
date_dict = load_s3_data(
s3, BUCKET_NAME, f"outputs/tk_data_analysis_new_method/metadata_date/{file_name}.json"
)
for job_id, date_list in date_dict.items():
if job_id in skill_job_ads:
job_dates[job_id].append(date_list)
print(len(job_dates))
save_to_s3(
s3,
BUCKET_NAME,
job_dates,
"outputs/tk_data_analysis_new_method/metadata_date/with_replacements/sample_filtered_2021.11.05_from_metadata.json",
)
job_locations = defaultdict(list)
for file_name in tqdm(range(0, 14)):
# Job regions
region_dict = load_s3_data(
s3, BUCKET_NAME, f"outputs/tk_data_analysis_new_method/metadata_location/{file_name}.json"
)
for job_id, region_list in region_dict.items():
if job_id in skill_job_ads:
job_locations[job_id].append(region_list)
print(len(job_locations))
save_to_s3(
s3,
BUCKET_NAME,
job_locations,
"outputs/tk_data_analysis_new_method/metadata_location/with_replacements/sample_filtered_2021.11.05_from_metadata.json",
)
job_titles = defaultdict(list)
for file_name in tqdm(range(0, 14)):
# Job titles
titles_dict = load_s3_data(
s3, BUCKET_NAME, f"outputs/tk_data_analysis_new_method/metadata_job/{file_name}.json"
)
for job_id, titles_list in titles_dict.items():
if job_id in skill_job_ads:
job_titles[job_id].append(titles_list)
print(len(job_titles))
save_to_s3(
s3,
BUCKET_NAME,
job_titles,
"outputs/tk_data_analysis_new_method/metadata_job/with_replacements/sample_filtered_2021.11.05_from_metadata.json",
)
|
{"hexsha": "9abbfc33d60d38fd5c210a922e204852f11a9154", "size": 2784, "ext": "py", "lang": "Python", "max_stars_repo_path": "skills_taxonomy_v2/pipeline/tk_data_analysis/filter_bulk_data.py", "max_stars_repo_name": "nestauk/skills-taxonomy-v2", "max_stars_repo_head_hexsha": "ce0f9943a038c4539f04a9a58022fc7eb1909376", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-11-21T17:21:12.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-10T21:19:57.000Z", "max_issues_repo_path": "skills_taxonomy_v2/pipeline/tk_data_analysis/filter_bulk_data.py", "max_issues_repo_name": "nestauk/skills-taxonomy-v2", "max_issues_repo_head_hexsha": "ce0f9943a038c4539f04a9a58022fc7eb1909376", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2021-10-06T11:20:35.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-02T11:44:28.000Z", "max_forks_repo_path": "skills_taxonomy_v2/pipeline/tk_data_analysis/filter_bulk_data.py", "max_forks_repo_name": "nestauk/skills-taxonomy-v2", "max_forks_repo_head_hexsha": "ce0f9943a038c4539f04a9a58022fc7eb1909376", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-04T12:27:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-04T12:27:20.000Z", "avg_line_length": 34.8, "max_line_length": 129, "alphanum_fraction": 0.6903735632, "include": true, "reason": "import numpy", "num_tokens": 674}
|
from typing import Any, Dict, Hashable, Optional, Sequence, Tuple, Union
import dask.array as da
import numpy as np
import xarray as xr
from dask.array import Array
from numpy import ndarray
from xarray import Dataset
from ..typing import ArrayLike
from ..utils import split_array_chunks
from .utils import (
assert_array_shape,
assert_block_shape,
assert_chunk_shape,
concat_2d,
r2_score,
)
def index_array_blocks(
x: Union[ArrayLike, Sequence[int]], size: int
) -> Tuple[ndarray, ndarray]:
"""Generate indexes for blocks that partition an array within groups.
Given an array with monotonic increasing group assignments (as integers),
this function will generate the indexes of blocks within those groups that
are of at most `size` elements.
Parameters
----------
x : Union[ArrayLike, Sequence[int]]
Vector of group assignments, must be monotonic increasing.
Resulting blocks will never cross these group assignments
and the resulting `index` and `sizes` values constitute
covering slices for any array of the same size as `x`.
size : int
Maximum block size.
Examples
--------
>>> from sgkit.stats.regenie import index_array_blocks
>>> index_array_blocks([0, 0, 0], 2)
(array([0, 2]), array([2, 1]))
>>> index_array_blocks([0, 0, 1, 1, 1], 2)
(array([0, 2, 4]), array([2, 2, 1]))
Returns
-------
index : ndarray
Array of indexes for each block start
sizes : ndarray
Size of block such that `x[index[0]:(index[0] + sizes[0])]` contains
every element in block 0
Raises
------
ValueError
If `x` is not 1D.
ValueError
If `size` is <= 0.
ValueError
If `x` does not contain integers.
ValueError
If `x` is not monotonic increasing.
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(0, dtype=int), np.empty(0, dtype=int)
if x.ndim != 1:
raise ValueError(f"Array shape {x.shape} is not 1D")
if size <= 0:
raise ValueError(f"Block size {size} must be > 0")
if not np.issubdtype(x.dtype, np.integer):
raise ValueError("Array to partition must contain integers")
if np.any(np.diff(x) < 0):
raise ValueError("Array to partition must be monotonic increasing")
breaks = np.argwhere(np.diff(x, prepend=x[0]))[:, 0]
breaks = np.concatenate(([0], breaks, [x.size]))
index = np.concatenate(
[np.arange(breaks[i], breaks[i + 1], size) for i in range(breaks.size - 1)]
)
sizes = np.diff(index, append=x.size)
assert index.size == sizes.size
return index, sizes
def index_block_sizes(
sizes: Union[ArrayLike, Sequence[int]]
) -> Tuple[ndarray, ndarray]:
"""Generate indexes for blocks of specific sizes.
Parameters
----------
sizes : Union[ArrayLike, Sequence[int]]
Block sizes to generate indexes for.
Examples
--------
>>> from sgkit.stats.regenie import index_block_sizes
>>> index_block_sizes([3, 4, 5])
(array([0, 3, 7]), array([3, 4, 5]))
Returns
-------
index : ndarray
Array of indexes for each block start.
sizes : ndarray
Size of block such that `x[index[0]:(index[0] + sizes[0])]` contains
every element in block 0.
Raises
------
ValueError
If any value in `sizes` is <= 0.
ValueError
If `sizes` does not contain integers.
"""
sizes = np.asarray(sizes)
if np.any(sizes <= 0):
raise ValueError("All block sizes must be >= 0")
if not np.issubdtype(sizes.dtype, np.integer):
raise ValueError("Block sizes must be integers")
chunks = np.concatenate([np.array([0]), sizes])
index = np.cumsum(chunks)[:-1]
assert index.size == sizes.size
return index, sizes
def ridge_regression(
XtX: ArrayLike,
XtY: ArrayLike,
alphas: Union[ArrayLike, Sequence[float]],
n_zero_reg: Optional[int] = None,
dtype: Any = None,
) -> ArrayLike:
"""Multi-outcome, multi-parameter ridge regression from CV intermediates."""
if XtX.shape[0] != XtX.shape[1]:
raise ValueError(f"First argument must be symmetric (shape = {XtX.shape})")
if XtX.shape[0] != XtY.shape[0]:
raise ValueError("Array arguments must have same size in first dimension")
diags = []
n_alpha, n_obs, n_outcome = len(alphas), XtX.shape[0], XtY.shape[1]
for i in range(n_alpha):
diag = np.ones(XtX.shape[1]) * alphas[i]
if n_zero_reg:
# Optionally fix regularization for leading covariates
# TODO: This should probably be zero for consistency
# with orthogonalization, see:
# https://github.com/projectglow/glow/issues/266
diag[:n_zero_reg] = 1
diags.append(np.diag(diag))
diags = np.stack(diags)
B = np.linalg.inv(XtX + diags) @ XtY
B = B.astype(dtype or XtX.dtype)
assert_array_shape(B, n_alpha, n_obs, n_outcome)
return B
def get_alphas(
n_cols: int, heritability: Sequence[float] = [0.99, 0.75, 0.50, 0.25, 0.01]
) -> ndarray:
# https://github.com/projectglow/glow/blob/f3edf5bb8fe9c2d2e1a374d4402032ba5ce08e29/python/glow/wgr/linear_model/ridge_model.py#L80
return np.array([n_cols / h for h in heritability])
def stack(x: Array) -> Array:
"""Stack blocks as new leading array axis"""
return da.stack([x.blocks[i] for i in range(x.numblocks[0])])
def unstack(x: Array) -> Array:
"""Unstack leading array axis into blocks"""
return da.concatenate([x.blocks[i][0] for i in range(x.numblocks[0])])
def _ridge_regression_cv(
X: Array, Y: Array, alphas: ndarray, n_zero_reg: Optional[int] = None
) -> Tuple[Array, Array, Array, Array]:
assert alphas.ndim == 1
assert X.ndim == 2
assert Y.ndim == 2
assert X.numblocks[1] == 1
assert Y.numblocks[1] == 1
assert X.chunks[0] == Y.chunks[0]
n_block, n_obs, n_covar, n_outcome, n_alpha = (
X.numblocks[0],
X.shape[0],
X.shape[1],
Y.shape[1],
alphas.shape[0],
)
obs_chunks = X.chunks[0]
# Project samples and outcomes noting that resulting chunks are
# of fixed size even if the chunks along the observation dim
# are not uniform (i.e. |X.chunks[0]| != 1)
XtX = stack(da.map_blocks(lambda x: x.T @ x, X, chunks=(X.shape[1],) * 2))
assert_block_shape(XtX, n_block, 1, 1)
assert_chunk_shape(XtX, 1, n_covar, n_covar)
XtY = stack(da.map_blocks(lambda x, y: x.T @ y, X, Y, chunks=(n_covar, n_outcome)))
assert_block_shape(XtY, n_block, 1, 1)
assert_chunk_shape(XtY, 1, n_covar, n_outcome)
# Invert the projections in each block so that each
# contains data from all other blocks *except* itself
XtX = unstack(XtX.sum(axis=0) - XtX)
assert_block_shape(XtX, n_block, 1)
assert_chunk_shape(XtX, n_covar, n_covar)
XtY = unstack(XtY.sum(axis=0) - XtY)
assert_block_shape(XtY, n_block, 1)
assert_chunk_shape(XtY, n_covar, n_outcome)
assert XtX.numblocks == XtY.numblocks
# Regress for all outcomes/alphas and add new axis for ridge parameters
B = da.map_blocks(
ridge_regression,
XtX,
XtY,
chunks=(n_alpha, n_covar, n_outcome),
new_axis=[0],
alphas=alphas,
n_zero_reg=n_zero_reg,
)
assert_block_shape(B, 1, n_block, 1)
assert_chunk_shape(B, n_alpha, n_covar, n_outcome)
assert_array_shape(B, n_alpha, n_block * n_covar, n_outcome)
# Generate predictions for all outcomes/alphas
assert B.numblocks == (1,) + X.numblocks
YP = da.map_blocks(
lambda x, b: x @ b, X, B, chunks=(alphas.size, obs_chunks, n_outcome)
)
assert_block_shape(YP, 1, n_block, 1)
assert_chunk_shape(YP, n_alpha, obs_chunks[0], n_outcome)
assert_array_shape(YP, n_alpha, n_obs, n_outcome)
return XtX, XtY, B, YP
def _stage_1(G: Array, X: Array, Y: Array, alphas: Optional[ndarray] = None) -> Array:
"""Stage 1 - WGR Base Regression
This stage will predict outcomes separately for each alpha parameter and variant
block. This "compresses" the variant dimension into a smaller space that is
much more amenable to efficient blockwise regressions in stage 2. Another
interpretation for this operation is that all sample blocks are treated
as folds in a K-fold CV fit within one single variant block. Predictions for
any one combination of variant and sample block then correspond to a
regression model fit all across sample blocks for that range of variants
except for a single sample block. In other words, the predictions are
out of sample which enables training of a stage 2 regressor based on
these predictions, a technique commonly referred to as stacking.
For more details, see the level 0 regression model described in step 1
of [Mbatchou et al. 2020](https://www.biorxiv.org/content/10.1101/2020.06.19.162354v2).
"""
assert G.ndim == 2
assert X.ndim == 2
assert Y.ndim == 2
# Check that chunking across samples is the same for all arrays
assert G.shape[0] == X.shape[0] == Y.shape[0]
assert G.numblocks[0] == X.numblocks[0] == Y.numblocks[0]
assert G.chunks[0] == X.chunks[0] == Y.chunks[0]
assert X.numblocks[1] == Y.numblocks[1] == 1
if alphas is None:
alphas = get_alphas(G.shape[1])
# Extract shape statistics
n_sample = G.shape[0]
n_outcome = Y.shape[1]
n_alpha = alphas.size
n_sample_block = G.numblocks[0]
n_variant_block = G.numblocks[1]
sample_chunks = Y.chunks[0]
YP = []
for i in range(n_variant_block):
# Extract all sample blocks for one variant block
GB = G.blocks[:, i]
# Prepend covariates and chunk along first dim only
XGB = da.concatenate((X, GB), axis=1)
XGB = XGB.rechunk(chunks=(None, -1))
# Fit and predict folds for each parameter and outcome
YPB = _ridge_regression_cv(XGB, Y, alphas, n_zero_reg=X.shape[1])[-1]
assert_block_shape(YPB, 1, n_sample_block, 1)
assert_chunk_shape(YPB, n_alpha, sample_chunks[0], n_outcome)
assert_array_shape(YPB, n_alpha, n_sample, n_outcome)
YP.append(YPB)
# Stack as (n_variant_block, n_alpha, n_sample, n_outcome)
YP = da.stack(YP, axis=0)
assert_block_shape(YP, n_variant_block, 1, n_sample_block, 1)
assert_chunk_shape(YP, 1, n_alpha, sample_chunks[0], n_outcome)
assert_array_shape(YP, n_variant_block, n_alpha, n_sample, n_outcome)
return YP
def _stage_2(
YP: Array,
X: Array,
Y: Array,
alphas: Optional[ndarray] = None,
normalize: bool = True,
_glow_adj_alpha: bool = False,
_glow_adj_scaling: bool = False,
) -> Tuple[Array, Array]:
"""Stage 2 - WGR Meta Regression
This stage will train separate ridge regression models for each outcome
using the predictions from stage 1 for that same outcome as features. These
predictions are then evaluated based on R2 score to determine an optimal
"meta" estimator (see `_stage_1` for the "base" estimator description). Results
then include only predictions and coefficients from this optimal model.
For more details, see the level 1 regression model described in step 1
of [Mbatchou et al. 2020](https://www.biorxiv.org/content/10.1101/2020.06.19.162354v2).
"""
assert YP.ndim == 4
assert X.ndim == 2
assert Y.ndim == 2
# Check that chunking across samples is the same for all arrays
assert YP.numblocks[2] == X.numblocks[0] == Y.numblocks[0]
assert YP.chunks[2] == X.chunks[0] == Y.chunks[0]
# Assert single chunks for covariates and outcomes
assert X.numblocks[1] == Y.numblocks[1] == 1
# Extract shape statistics
n_variant_block, n_alpha_1 = YP.shape[:2]
n_sample_block = Y.numblocks[0]
n_sample, n_outcome = Y.shape
n_covar = X.shape[1]
n_indvar = n_covar + n_variant_block * n_alpha_1
sample_chunks = Y.chunks[0]
if normalize:
assert_block_shape(YP, n_variant_block, 1, n_sample_block, 1)
assert_chunk_shape(YP, 1, n_alpha_1, sample_chunks[0], n_outcome)
# See: https://github.com/projectglow/glow/issues/260
if _glow_adj_scaling:
YP = da.map_blocks(
lambda x: (x - x.mean(axis=2, keepdims=True))
/ x.std(axis=2, keepdims=True),
YP,
)
else:
YP = (YP - YP.mean(axis=2, keepdims=True)) / YP.std(axis=2, keepdims=True)
# Tranpose for refit on level 1 predictions
YP = YP.transpose((3, 2, 0, 1))
assert_array_shape(YP, n_outcome, n_sample, n_variant_block, n_alpha_1)
if alphas is None:
# See: https://github.com/projectglow/glow/issues/255
if _glow_adj_alpha:
alphas = get_alphas(n_variant_block * n_alpha_1 * n_outcome)
else:
alphas = get_alphas(n_variant_block * n_alpha_1)
n_alpha_2 = alphas.size
YR = []
BR = []
for i in range(n_outcome):
# Slice and reshape to new 2D covariate matrix;
# The order of raveling in trailing dimensions is important
# and later reshapes will assume variants, alphas order
XPB = YP[i].reshape((n_sample, n_variant_block * n_alpha_1))
# Prepend covariates and chunk along first dim only
XPB = da.concatenate((X, XPB), axis=1)
XPB = XPB.rechunk(chunks=(None, -1))
assert_array_shape(XPB, n_sample, n_indvar)
assert XPB.numblocks == (n_sample_block, 1)
# Extract outcome vector
YB = Y[:, [i]]
assert XPB.ndim == YB.ndim == 2
# Fit and predict folds for each parameter
BB, YPB = _ridge_regression_cv(XPB, YB, alphas, n_zero_reg=n_covar)[-2:]
assert_array_shape(BB, n_alpha_2, n_sample_block * n_indvar, 1)
assert_array_shape(YPB, n_alpha_2, n_sample, 1)
BR.append(BB)
YR.append(YPB)
# Concatenate predictions along outcome dimension
YR = da.concatenate(YR, axis=2)
assert_block_shape(YR, 1, n_sample_block, n_outcome)
assert_chunk_shape(YR, n_alpha_2, sample_chunks[0], 1)
assert_array_shape(YR, n_alpha_2, n_sample, n_outcome)
# Move samples to last dim so all others are batch
# dims for R2 calculations
YR = da.transpose(YR, (0, 2, 1))
assert_array_shape(YR, n_alpha_2, n_outcome, n_sample)
YR = YR.rechunk((-1, -1, None))
assert_block_shape(YR, 1, 1, n_sample_block)
assert YR.shape[1:] == Y.T.shape
# Concatenate betas along outcome dimension
BR = da.concatenate(BR, axis=2)
assert_block_shape(BR, 1, n_sample_block, n_outcome)
assert_chunk_shape(BR, n_alpha_2, n_indvar, 1)
assert_array_shape(BR, n_alpha_2, n_sample_block * n_indvar, n_outcome)
# Compute R2 scores within each sample block for each outcome + alpha
R2 = da.stack(
[
r2_score(YR.blocks[..., i], Y.T.blocks[..., i])
# Avoid warnings on R2 calculations for blocks with single rows
if YR.chunks[-1][i] > 1 else da.full(YR.shape[:-1], np.nan)
for i in range(n_sample_block)
]
)
assert_array_shape(R2, n_sample_block, n_alpha_2, n_outcome)
# Coerce to finite or nan before nan-aware mean
R2 = da.where(da.isfinite(R2), R2, np.nan)
# Find highest mean alpha score for each outcome across blocks
R2M = da.nanmean(R2, axis=0)
assert_array_shape(R2M, n_alpha_2, n_outcome)
# Identify index for the alpha value with the highest mean score
R2I = da.argmax(R2M, axis=0)
assert_array_shape(R2I, n_outcome)
# Choose the predictions corresponding to the model with best score
YRM = da.stack([YR[R2I[i], i, :] for i in range(n_outcome)], axis=-1)
YRM = YRM.rechunk((None, -1))
assert_block_shape(YRM, n_sample_block, 1)
assert_chunk_shape(YRM, sample_chunks[0], n_outcome)
assert_array_shape(YRM, n_sample, n_outcome)
# Choose the betas corresponding to the model with the best score
BRM = da.stack([BR[R2I[i], :, i] for i in range(n_outcome)], axis=-1)
BRM = BRM.rechunk((None, -1))
assert_block_shape(BRM, n_sample_block, 1)
assert_chunk_shape(BRM, n_indvar, n_outcome)
assert_array_shape(BRM, n_sample_block * n_indvar, n_outcome)
return BRM, YRM
def _stage_3(
B: Array,
YP: Array,
X: Array,
Y: Array,
contigs: Array,
variant_chunk_start: ndarray,
) -> Optional[Array]:
"""Stage 3 - Leave-one-chromosome-out (LOCO) Estimation
This stage will use the coefficients for the optimal model in
stage 2 to re-estimate predictions in a LOCO scheme. This scheme
involves omitting coefficients that correspond to all variant
blocks for a single chromosome in the stage 2 model and then
recomputing predictions without those coefficients.
For more details, see the "LOCO predictions" section of the Supplementary Methods
in [Mbatchou et al. 2020](https://www.biorxiv.org/content/10.1101/2020.06.19.162354v2).
"""
assert B.ndim == 2
assert YP.ndim == 4
assert X.ndim == 2
assert Y.ndim == 2
# Check that chunking across samples is the same for all arrays
assert B.numblocks[0] == YP.numblocks[2] == X.numblocks[0] == Y.numblocks[0]
assert YP.chunks[2] == X.chunks[0] == Y.chunks[0]
# Extract shape statistics
sample_chunks = Y.chunks[0]
n_covar = X.shape[1]
n_variant_block, n_alpha_1 = YP.shape[:2]
n_indvar = n_covar + n_variant_block * n_alpha_1
n_sample_block = Y.numblocks[0]
n_sample, n_outcome = Y.shape
# Determine unique contigs to create LOCO estimates for
contigs = np.asarray(contigs)
unique_contigs = np.unique(contigs)
n_contig = len(unique_contigs)
if n_contig <= 1:
# Return nothing w/o at least 2 contigs
return None
assert n_variant_block == len(variant_chunk_start)
# Create vector of size `n_variant_block` where value
# at index i corresponds to contig for variant block i
variant_block_contigs = contigs[variant_chunk_start]
# Transform coefficients (B) such that trailing dimensions
# contain right half of matrix product for prediction:
# (n_sample_block * n_indvar, n_outcome) ->
# (n_outcome, n_sample_block, n_indvar)
B = da.stack([B.blocks[i] for i in range(n_sample_block)], axis=0)
assert_block_shape(B, n_sample_block, 1, 1)
assert_chunk_shape(B, 1, n_indvar, n_outcome)
assert_array_shape(B, n_sample_block, n_indvar, n_outcome)
B = da.transpose(B, (2, 0, 1))
assert_block_shape(B, 1, n_sample_block, 1)
assert_chunk_shape(B, n_outcome, 1, n_indvar)
assert_array_shape(B, n_outcome, n_sample_block, n_indvar)
# Decompose coefficients (B) so that variant blocks can be sliced:
# BX -> (n_outcome, n_sample_block, n_covar)
# BYP -> (n_outcome, n_sample_block, n_variant_block, n_alpha_1)
BX = B[..., :n_covar]
assert_array_shape(BX, n_outcome, n_sample_block, n_covar)
BYP = B[..., n_covar:]
assert_array_shape(BYP, n_outcome, n_sample_block, n_variant_block * n_alpha_1)
BYP = BYP.reshape((n_outcome, n_sample_block, n_variant_block, n_alpha_1))
assert_block_shape(BYP, 1, n_sample_block, 1, 1)
assert_chunk_shape(BYP, n_outcome, 1, n_variant_block, n_alpha_1)
assert_array_shape(BYP, n_outcome, n_sample_block, n_variant_block, n_alpha_1)
# Transform base predictions (YP) such that trailing dimensions
# contain left half of matrix product for prediction as well
# as variant blocks to slice on:
# (n_variant_block, n_alpha_1, n_sample, n_outcome) ->
# (n_outcome, n_sample, n_variant_block, n_alpha_1)
YP = da.transpose(YP, (3, 2, 0, 1))
assert_block_shape(YP, 1, n_sample_block, n_variant_block, 1)
assert_chunk_shape(YP, n_outcome, sample_chunks[0], 1, n_alpha_1)
assert_array_shape(YP, n_outcome, n_sample, n_variant_block, n_alpha_1)
def apply(X: Array, YP: Array, BX: Array, BYP: Array) -> Array:
# Collapse selected variant blocks and alphas into single
# new covariate dimension
assert YP.shape[2] == BYP.shape[2]
n_group_covar = n_covar + BYP.shape[2] * n_alpha_1
BYP = BYP.reshape((n_outcome, n_sample_block, -1))
BG = da.concatenate((BX, BYP), axis=-1)
BG = BG.rechunk((-1, None, -1))
assert_block_shape(BG, 1, n_sample_block, 1)
assert_chunk_shape(BG, n_outcome, 1, n_group_covar)
assert_array_shape(BG, n_outcome, n_sample_block, n_group_covar)
YP = YP.reshape((n_outcome, n_sample, -1))
XYP = da.broadcast_to(X, (n_outcome, n_sample, n_covar))
XG = da.concatenate((XYP, YP), axis=-1)
XG = XG.rechunk((-1, None, -1))
assert_block_shape(XG, 1, n_sample_block, 1)
assert_chunk_shape(XG, n_outcome, sample_chunks[0], n_group_covar)
assert_array_shape(XG, n_outcome, n_sample, n_group_covar)
YG = da.map_blocks(
# Block chunks:
# (n_outcome, sample_chunks[0], n_group_covar) @
# (n_outcome, n_group_covar, 1) [after transpose]
lambda x, b: x @ b.transpose((0, 2, 1)),
XG,
BG,
chunks=(n_outcome, sample_chunks, 1),
)
assert_block_shape(YG, 1, n_sample_block, 1)
assert_chunk_shape(YG, n_outcome, sample_chunks[0], 1)
assert_array_shape(YG, n_outcome, n_sample, 1)
YG = da.squeeze(YG, axis=-1).T
assert_block_shape(YG, n_sample_block, 1)
assert_chunk_shape(YG, sample_chunks[0], n_outcome)
assert_array_shape(YG, n_sample, n_outcome)
return YG
# For each contig, generate predictions for all sample+outcome
# combinations using only betas from stage 2 results that
# correspond to *other* contigs (i.e. LOCO)
YC = []
for contig in unique_contigs:
# Define a variant block mask of size `n_variant_block`
# determining which blocks correspond to this contig
variant_block_mask = variant_block_contigs == contig
BYPC = BYP[:, :, ~variant_block_mask, :]
YPC = YP[:, :, ~variant_block_mask, :]
YGC = apply(X, YPC, BX, BYPC)
YC.append(YGC)
YC = da.stack(YC, axis=0)
assert_array_shape(YC, n_contig, n_sample, n_outcome)
return YC
def _variant_block_indexes(
variant_block_size: Union[int, Tuple[int, ...]], contigs: ArrayLike
) -> Tuple[ndarray, ndarray]:
if isinstance(variant_block_size, tuple):
return index_block_sizes(variant_block_size)
elif isinstance(variant_block_size, int):
return index_array_blocks(contigs, variant_block_size)
else:
raise ValueError(
f"Variant block size type {type(variant_block_size)} "
"must be tuple or int"
)
DESC_BASE_PRED = """Predictions from base ridge regressors for every variant block, alpha, sample and outcome"""
DESC_META_PRED = (
"""Predictions from best meta ridge model selected through CV over sample blocks"""
)
DESC_LOCO_PRED = """Predictions from best meta ridge model omitting coefficients for variant blocks within individual contigs (LOCO approximation)"""
def regenie_transform(
G: ArrayLike,
X: ArrayLike,
Y: ArrayLike,
contigs: ArrayLike,
*,
variant_block_size: Optional[Union[int, Tuple[int, ...]]] = None,
sample_block_size: Optional[Union[int, Tuple[int, ...]]] = None,
alphas: Optional[Sequence[float]] = None,
add_intercept: bool = True,
orthogonalize: bool = False,
normalize: bool = False,
_glow_adj_dof: bool = False,
_glow_adj_alpha: bool = False,
_glow_adj_scaling: bool = False,
) -> Dataset:
"""Regenie trait transformation.
Parameters
----------
G : (M, N) ArrayLike
Genotype data array, `M` samples by `N` variants.
X : (M, C) ArrayLike
Covariate array, `M` samples by `C` covariates.
Y : (M, O) ArrayLike
Outcome array, `M` samples by `O` outcomes.
contigs : (N,) ArrayLike
Variant contigs as monotonic increasting integer contig index.
See the `regenie` function for documentation on remaining fields.
Returns
-------
Dataset
A dataset containing the following variables:
- `base_prediction` (blocks, alphas, samples, outcomes): Stage 1
predictions from ridge regression reduction .
- `meta_prediction` (samples, outcomes): Stage 2 predictions from
the best meta estimator trained on the out-of-sample Stage 1
predictions.
- `loco_prediction` (contigs, samples, outcomes): LOCO predictions
resulting from Stage 2 predictions ignoring effects for variant
blocks on held out contigs. This will be absent if the
data provided does not contain at least 2 contigs.
Raises
------
ValueError
If `G`, `X`, and `Y` do not have the same size along
the first (samples) dimension.
"""
if not G.shape[0] == X.shape[0] == Y.shape[0]:
raise ValueError(
"All data arrays must have same size along first (samples) dimension "
f"(shapes provided: G={G.shape}, X={X.shape}, Y={Y.shape})"
)
n_sample = Y.shape[0]
n_variant = G.shape[1]
if alphas is not None:
alphas = np.asarray(alphas)
G, X, Y = da.asarray(G), da.asarray(X), da.asarray(Y)
contigs = da.asarray(contigs)
# Set default block sizes if not provided
if variant_block_size is None:
# Block in groups of 1000, unless dataset is small
# enough to default to 2 blocks (typically for tests)
variant_block_size = min(1000, n_variant // 2)
if sample_block_size is None:
# Break into 10 chunks of approximately equal size
sample_block_size = tuple(split_array_chunks(n_sample, min(10, n_sample)))
assert sum(sample_block_size) == n_sample
if normalize:
# See: https://github.com/projectglow/glow/issues/255
dof = 1 if _glow_adj_dof else 0
G = (G - G.mean(axis=0)) / G.std(axis=0, ddof=dof)
Y = (Y - Y.mean(axis=0)) / Y.std(axis=0)
X = (X - X.mean(axis=0)) / X.std(axis=0)
if add_intercept:
X = da.concatenate([da.ones((X.shape[0], 1), dtype=X.dtype), X], axis=1)
# TODO: Test this after finding out whether or not there was a good reason
# it was precluded in glow by unit covariate regularization:
# https://github.com/projectglow/glow/issues/266
if orthogonalize: # pragma: no cover
G = G - X @ da.linalg.lstsq(X, G)[0]
Y = Y - X @ da.linalg.lstsq(X, Y)[0]
G = G / G.std(axis=0)
Y = Y / Y.std(axis=0)
X = da.zeros(shape=(n_sample, 0), dtype=G.dtype)
variant_chunk_start, variant_chunk_size = _variant_block_indexes(
variant_block_size, contigs
)
G = G.rechunk(chunks=(sample_block_size, tuple(variant_chunk_size)))
X = X.rechunk(chunks=(sample_block_size, -1))
Y = Y.rechunk(chunks=(sample_block_size, -1))
YP1 = _stage_1(G, X, Y, alphas=alphas)
B2, YP2 = _stage_2(
YP1,
X,
Y,
alphas=alphas,
_glow_adj_alpha=_glow_adj_alpha,
_glow_adj_scaling=_glow_adj_scaling,
)
YP3 = _stage_3(B2, YP1, X, Y, contigs, variant_chunk_start)
data_vars: Dict[Hashable, Any] = {}
data_vars["base_prediction"] = xr.DataArray(
YP1,
dims=("blocks", "alphas", "samples", "outcomes"),
attrs={"description": DESC_BASE_PRED},
)
data_vars["meta_prediction"] = xr.DataArray(
YP2, dims=("samples", "outcomes"), attrs={"description": DESC_META_PRED}
)
if YP3 is not None:
data_vars["loco_prediction"] = xr.DataArray(
YP3,
dims=("contigs", "samples", "outcomes"),
attrs={"description": DESC_LOCO_PRED},
)
return xr.Dataset(data_vars)
def regenie(
ds: Dataset,
*,
dosage: str,
covariates: Union[str, Sequence[str]],
traits: Union[str, Sequence[str]],
variant_block_size: Optional[Union[int, Tuple[int, ...]]] = None,
sample_block_size: Optional[Union[int, Tuple[int, ...]]] = None,
alphas: Optional[Sequence[float]] = None,
add_intercept: bool = True,
normalize: bool = False,
orthogonalize: bool = False,
**kwargs: Any,
) -> Dataset:
"""Regenie trait transformation.
`REGENIE <https://github.com/rgcgithub/regenie>`_ is a whole-genome
regression technique that produces trait estimates for association
tests. These estimates are subtracted from trait values and
sampling statistics (p-values, standard errors, etc.) are evaluated
against the residuals. See the REGENIE preprint [1] for more details.
For a simpler technical overview, see [2] for a detailed description
of the individual stages and separate regression models involved.
Parameters
----------
dosage : str
Name of genetic dosage variable.
covariates : Union[str, Sequence[str]]
Names of covariate variables (1D or 2D).
traits : Union[str, Sequence[str]]
Names of trait variables (1D or 2D).
variant_block_size : Optional[Union[int, Tuple[int]]], optional
Number of variants in each block.
If int, this describes the number of variants in each block
but the last which may be smaller.
If Tuple[int, ...], this must describe the desired number of
variants in each block individually.
Defaults to 1000 or num variants // 2, whichever is smaller.
sample_block_size : Optional[Union[int, Tuple[int]]], optional
Number of samples in each block.
If int, this describes the number of samples in each block
but the last which may be smaller.
If Tuple[int, ...], this must describe the desired number of
samples in each block individually.
Defaults to 10 sample blocks split roughly across all possible
samples or the number of samples, if that number is < 10.
alphas : Optional[Sequence[float]], optional
List of alpha values to use for regularization, by default None.
If not provided, these will be set automatically based on
datasize and apriori heritability assumptions.
add_intercept : bool
Whether or not to add intercept to covariates, by default True.
normalize : bool
Rescale genotypes, traits, and covariates to have
mean 0 and stdev 1, by default False.
orthogonalize : bool
**Experimental**: Remove covariates through orthogonalization
of genotypes and traits, by default False.
Warnings
--------
Binary traits are not yet supported so all outcomes provided
must be continuous.
Returns
-------
Dataset
A dataset containing the following variables:
- `base_prediction` (blocks, alphas, samples, outcomes): Stage 1
predictions from ridge regression reduction .
- `meta_prediction` (samples, outcomes): Stage 2 predictions from
the best meta estimator trained on the out-of-sample Stage 1
predictions.
- `loco_prediction` (contigs, samples, outcomes): LOCO predictions
resulting from Stage 2 predictions ignoring effects for variant
blocks on held out contigs. This will be absent if the
data provided does not contain at least 2 contigs.
Raises
------
ValueError
If dosage, covariates, and trait arrays do not have the same number
of samples.
Examples
--------
>>> import numpy as np
>>> from sgkit.testing import simulate_genotype_call_dataset
>>> from sgkit.stats.regenie import regenie
>>> n_variant, n_sample, n_contig, n_covariate, n_trait, seed = 100, 50, 2, 3, 5, 0
>>> rs = np.random.RandomState(seed)
>>> ds = simulate_genotype_call_dataset(n_variant=n_variant, n_sample=n_sample, n_contig=n_contig, seed=seed)
>>> ds["call_dosage"] = (("variants", "samples"), rs.normal(size=(n_variant, n_sample)))
>>> ds["sample_covariate"] = (("samples", "covariates"), rs.normal(size=(n_sample, n_covariate)))
>>> ds["sample_trait"] = (("samples", "traits"), rs.normal(size=(n_sample, n_trait)))
>>> res = regenie(ds, dosage="call_dosage", covariates="sample_covariate", traits="sample_trait")
>>> res.compute() # doctest: +NORMALIZE_WHITESPACE
<xarray.Dataset>
Dimensions: (alphas: 5, blocks: 2, contigs: 2, outcomes: 5, samples: 50)
Dimensions without coordinates: alphas, blocks, contigs, outcomes, samples
Data variables:
base_prediction (blocks, alphas, samples, outcomes) float64 0.3343 ... -...
meta_prediction (samples, outcomes) float64 -0.4588 0.78 ... -0.3984 0.3734
loco_prediction (contigs, samples, outcomes) float64 0.4886 ... -0.01498
References
----------
[1] - Mbatchou, J., L. Barnard, J. Backman, and A. Marcketta. 2020.
“Computationally Efficient Whole Genome Regression for Quantitative and Binary
Traits.” bioRxiv. https://www.biorxiv.org/content/10.1101/2020.06.19.162354v2.abstract.
[2] - https://glow.readthedocs.io/en/latest/tertiary/whole-genome-regression.html
"""
if isinstance(covariates, str):
covariates = [covariates]
if isinstance(traits, str):
traits = [traits]
G = ds[dosage]
X = da.asarray(concat_2d(ds[list(covariates)], dims=("samples", "covariates")))
Y = da.asarray(concat_2d(ds[list(traits)], dims=("samples", "traits")))
contigs = ds["variant_contig"]
return regenie_transform(
G.T,
X,
Y,
contigs,
variant_block_size=variant_block_size,
sample_block_size=sample_block_size,
alphas=alphas,
add_intercept=add_intercept,
normalize=normalize,
orthogonalize=orthogonalize,
**kwargs,
)
|
{"hexsha": "4018c1fe2f251f68258998886ee648655ebfcd8c", "size": 33923, "ext": "py", "lang": "Python", "max_stars_repo_path": "sgkit/stats/regenie.py", "max_stars_repo_name": "jerowe/sgkit", "max_stars_repo_head_hexsha": "ff5a0a01ec6ae41d262ece14cc06a0b8c73ca342", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sgkit/stats/regenie.py", "max_issues_repo_name": "jerowe/sgkit", "max_issues_repo_head_hexsha": "ff5a0a01ec6ae41d262ece14cc06a0b8c73ca342", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sgkit/stats/regenie.py", "max_forks_repo_name": "jerowe/sgkit", "max_forks_repo_head_hexsha": "ff5a0a01ec6ae41d262ece14cc06a0b8c73ca342", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.4912689173, "max_line_length": 149, "alphanum_fraction": 0.6568699702, "include": true, "reason": "import numpy,from numpy", "num_tokens": 9431}
|
'''
This module contains an implementation of the network repair methodology
introduced in Campbell and Albert (2014), BMC Syst. Biol.
The code should be straightforward to apply (see network_repair_tutorial.py).
I will be happy to respond to questions and/or comments.
Colin Campbell
Contact: colin.campbell@psu.edu
Python Version: 2.7.x
Date: April 2014
'''
import networkx as nx
import numpy as np
from itertools import product
from random import choice, randrange
def form_network(rules):
'''
Takes as input a list of rules in the format of sample_network.txt.
Outputs a networkx DiGraph with node properties:
'update_nodes': a list of regulating nodes
'update_rules': a dictionary with binary strings as keys, corresponding
to the possible states of the update_nodes, and integers
as values, corresponding to the state of the node given
that input.
Note that nodes are identified by their position in a sorted list of
user-provided node names.
'''
def clean_states(x):
#cleans binary representation of node input states
out=x[2:] # Strip leading 0b
return '0'*(len(inf)-len(out))+out # Append leading 0's as needed
stream = [x.rstrip('\n') for x in rules if x != '\n' and x[0]!='#'] # Remove comments and blank lines
nodes = sorted([x.split(' ',1)[0][:-1] for x in stream]) # Generate a sorted list of node names
g = nx.DiGraph()
g.graph['knockout'] = None # At creation, no node is flagged for knockout or overexpression
g.graph['express'] = None
for n in xrange(len(stream)):
rule = stream[n].split('* = ')[1]
rule = rule.replace(' AND ',' and ') # Force decap of logical operators so as to work with eval()
rule = rule.replace(' OR ',' or ')
rule = rule.replace(' NOT ',' not ')
if stream[n].find('True') >= 0 or stream[n].find('False') >= 0: # For always ON or always OFF nodes
g.add_node(n) # We refer to nodes by their location in a sorted list of the user-provided node names
g.node[n]['update_nodes'] = []
g.node[n]['update_rules'] = {'':str(int(eval(rule)))}
continue
inf = rule.split(' ') # Strip down to just a list of influencing nodes
inf = [x.lstrip('(') for x in inf]
inf = [x.rstrip(')') for x in inf]
#The sort ensures that when we do text replacement (<node string>->'True' or 'False') below in this fn, we avoid problems where node 1 is a substring of node 2 (e.g. NODE1_phosph and NODE1)
inf = sorted([x for x in inf if x not in ['','and','or','not']],key=len,reverse=True)
for i in inf: g.add_edge(nodes.index(i),n) # Add edges from all influencing nodes to target node
g.node[n]['update_nodes'] = [nodes.index(i) for i in inf]
g.node[n]['update_rules'] = {}
bool_states = map(bin,range(2**len(inf)))
bool_states = map(clean_states,bool_states)
for j in bool_states:
rule_mod = rule[:]
for k in range(len(j)):
if j[k]=='0':
rule_mod=rule_mod.replace(nodes[g.node[n]['update_nodes'][k]],'False') # Modify the rule to have every combination of True, False for input nodes
else: rule_mod=rule_mod.replace(nodes[g.node[n]['update_nodes'][k]],'True')
g.node[n]['update_rules'][j] = int(eval(rule_mod)) # Store outcome for every possible input
return g,nodes
def find_attractor(graph,state=False):
'''
Takes a graph as formatted in form_network() as input.
Chooses a random starting state (if state==False); synchronously advances
until a SS or LC is found. Can accommodate a node knockout or overexpression
as formed in damage_network().
if state is not False, it must be a string of 0s and 1s, which specifies the
starting state for the update iteration.
The string bits must be are arranged in alphabetical (sorted) order,
according to node names.
Outputs a list of [next_state,attractor]. For a SS, the 'attractor' entries
are '0's and '1's, representing the SS. For a LC, the 'attractor' is a list
of state strings, representing every state in the LC.
If no state is found after 1000 interations, the function returns False and
prints a warning to the console.
'''
def update_state(x):
#x is the node's index
input_state = ''
for i in graph.node[nodes[x]]['update_nodes']: input_state += str(trajectory[-1][nodes.index(i)]) # Acquire string of current states of node x's input nodes
return str(graph.node[nodes[x]]['update_rules'][input_state])
nodes = sorted(graph.nodes())
if not state: trajectory = [list(np.random.random_integers(0,1,nx.number_of_nodes(graph)))] # Random starting state
else: trajectory = [state] # Provided starting state
while True:
trajectory += [map(update_state,xrange(len(nodes)))]
if graph.graph['knockout'] != None: trajectory[-1][nodes.index(graph.graph['knockout'])] = '0' # If a node has been knocked out, it must be 0 even if it would normally be active
elif graph.graph['express'] != None: trajectory[-1][nodes.index(graph.graph['express'])] = '1' # " " " " " expressed, " " " 1 " " " " " " inactive
if trajectory[-1] in trajectory[:-1]: # Return a list of [next state, attractor], once attractor is found (attractor list length 1 in case of SS)
return [trajectory[1],trajectory[trajectory.index(trajectory[-1]):-1]]
if len(trajectory) == 1000:
print 'find_attractor() was unable to find an attractor in 1000 interations; returning False.'
return False
def superset(a):
'''
Takes a limit cycle (list of lists of binary strings), and collapses it into
one state with a 1 if the node is 1 in any states in 'a', and 0 otherwise.
'''
transpose = [[row[i] for row in a[1]] for i in range(len(a[1][0]))] # Turn list of states into list of time sequences for each node [[n1s1,n2s1],[n1s2,n2s2]] -> [[n1s1,n1s2],[n2s1,n2s2]]
superset = ['1' if '1' in x else '0' for x in transpose] # Evaluate each entry for any 1s; generate superset
return superset
def damage_network(graph,a=None,force=None,force_type='knockout'):
'''
Takes a graph and superset attractor as input. Chooses either a transiently
active or permanently inactive node and force it to be knocked out or
overexpressed, respectively.
Alternatively, if force != False, it must be an integer of a node's position
in a sorted list of graph nodes. That node is then set to 'knockout' or
'express' according to force_type.
Returns the modified graph.
'''
g_copy = graph.copy() # Don't modify the input graph
if force == None:
node = randrange(len(a)) # Choose a random node index
if a[node] == 0: g_copy.graph['express'] = node # Assign the index of an overexpressed (0 forced into 1) node
else: g_copy.graph['knockout'] = node # " " " " a knocked out (1 forced into 0) "
else:
if force_type == 'knockout': g_copy.graph['knockout'] = force
elif force_type == 'express': g_copy.graph['express'] = force
return g_copy
def damage_state(graph,a):
'''
Form a damaged state (in superset form or regular form, depending on input)
that mirrors the original attractor except for the knockout/damage.
Called internally in other functions; likely not of direct interest to the
end user.
'''
if graph.graph['knockout'] != None:
return a[:graph.graph['knockout']]+['0']+a[graph.graph['knockout']+1:]
else: return a[:graph.graph['express']]+['1']+a[graph.graph['express']+1:]
def compare_attractors(graph,a):
'''
Takes as input a damaged graph and the original, undamaged LC attractor.
Returns (as the second value) a list where the first entry is the first
entry of 'a' (preserving format) and the second is the largest component of
the attractor that survives when duplicate states are collapsed (e.g. 011
and 010 when node 3 is fixed).
Returns (as the first value) True of False, respectively corresponding to
if a pair of state collapsed or not.
'''
new_attractors = []
valid,invalid = [],[]
for state in a[1]:
if graph.graph['knockout'] != None:
if state[graph.graph['knockout']] == '1':
switch_state = state[:graph.graph['knockout']] + ['0'] + state[graph.graph['knockout']+1:]
if switch_state in a[1]:
invalid += [state]
valid += [switch_state] # Store the 'invalid' states; inputs to them reroute to the 'valid' states
elif graph.graph['express'] != None:
if state[mod_index] == '0':
switch_state = state[:graph.graph['express']] + ['1'] + state[graph.graph['express']+1:]
if switch_state in a[1]:
invalid += [state]
valid += [switch_state]
if len(valid)==0: return False, [a[0],[damage_state(graph,x) for x in a[1]]]# If there are no state collapses, we straightforwardly damage every state in a according to graph damage.
positions = range(1,len(a))
cur_pos = 0
route = [a[1][0]]
while True: # We walk across the state transition map, sensitive to invalid states due to state collapse
if a[1][cur_pos] in invalid:
while a[1][cur_pos] in invalid:
try: positions.remove(cur_pos) # We can sometimes loop back to the same state multiple times; no need to remove it from the "points to walk across" list again
except ValueError: pass
cur_pos = a[1].index(valid[invalid.index(a[1][cur_pos])]) # Jump from invalid position to corresponding valid position when moving onto invalid state
route+=[a[1][cur_pos]]
else:
route+=[a[1][cur_pos]]
falsified = [x for x in route if route.count(x) > 1]
if len(falsified) > 0: # Check to see if we've hit a node twice (found a LC)
start = route.index(route[-1])
new_attractors += [route[start+1:]] # The last entry is always one of the repeats, so the LC runs from the first instance of the repeat to the end
try: positions.remove(cur_pos)
except ValueError: pass # We can sometimes loop back to the same state multiple times; no need to remove it from the "points to walk across" list again
if len(positions) == 0: break
cur_pos = choice(positions) # If we isolate an attractor and other states have yet to be walked, pick one randomly
route = []
continue
else: # If we are still progressing, simply iterate our position forward
try: positions.remove(cur_pos)
except ValueError: pass
cur_pos += 1
if cur_pos == len(a[1]): # We can also walk back to 0
cur_pos = 0
if len(new_attractors) == 0:
raise RuntimeError("No new attractors identified.")
elif len(new_attractors) == 1:
return True, [a[0],[damage_state(graph,x) for x in new_attractors[0]]] # We now have the largest salvageable section of the LC
else:
new_attractor_lengths = map(len,new_attractors)
max_index = new_attractor_lengths.index(max(new_attractor_lengths)) # We choose to look at the longest "new" attractor
return True, [a[0],[damage_state(graph,x) for x in new_attractors[max_index]]] # We now have the largest salvageable section of the LC
def check_stability(graph,a):
'''
Takes as input the damaged graph and damaged attractor.
If the attractor reached from every state in the damaged attractor is
identical to the damaged attractor, the damaged attractor is stable, and
the function returns True. Otherwise, the function returns False.
'''
def shift(seq, n):
n = n % len(seq)
return seq[n:] + seq[:n]
A_n = [find_attractor(graph,state=x) for x in a[1]]
if [type(x) for x in A_n].count(bool) > 0:
raise RuntimeError("Unable to find attractor in check_stability()")
if len(A_n) == len(a[1]) == 1: # If the damaged attractor is a SS and evaluates to a SS, we can directly compare them
if A_n[0][1] == a[1]: return True
else: return False
for attr in A_n: # If one or both is a LC, We check to make sure that the procession of states matches
if len(attr[1]) != len(a[1]): return False # Quickest check is to see if the lengths are the same
else:
try: asi = a[1].index(attr[1][0]) # See the 'starting index' of A_d relative to this attractor
except Exception: return False # If it isn't in this attractor, they obviously don't match
rotated = shift(a,asi) # Then see if the aligned processions are equivalent at each position (same procession)
if [i==j for i,j in zip(rotated,attr[1])].count(False) > 0: return False
return True
def evaluate_repair(graph,a,a_s=None,method='fix_to_SS'):
'''
Takes the damaged graph, damaged attractor, and original superset as input.
method == 'LC_repair' or method == 'fix_to_SS', to determine whether or not
we attempt to preserve LC transitions or fix its superset to a SS.
Returns the repaired graph, or if not possible, a string explaining the
cause of failure.
'''
# Define internal functions ------------------------------------------------
def disjoint(x,y):
'''
Takes two lists and outputs the positions where the two don't have the
same value.
'''
return [v for v in xrange(len(x)) if x[v]!=y[v]]
def examine_modifications(g_in,a_t):
'''
Takes damaged network g and target attractor a_t. Looks at all possible
edge modifications (as enumerated in the report) to force every node to
be in its desired state.
results are stored in g.graph['modifications'][<node>] as list of
tuples:
(<approach #>,<fix to 1/0>,<interacting node 1>,<interacting node 2>)
'''
def approach_1(x,pn,an,fix_to=1):
'''
adds OR <present_new> to an inactive node to make it active, or
an AND <absent_new> to an active node to make it inactive.
here and below:
fix_to = 0 - we are fixing an active node to be inactive
fix_to = 1 - we are fixing an inactive node to be active
pn - viable_present_new_nodes
p - viable_present_nodes
an - viable_absent_new_nodes
a - viable_absent_nodes
'''
if fix_to == 1:
for j in pn:
g.graph['modifications'][x] += [(1,1,j)] #output meaning: method, fix_to, interacting node #1 (similar for other approach_# functions)
else:
for j in an:
g.graph['modifications'][x] += [(1,0,j)]
def approach_2(x,pn,an,fix_to=1):
'''
adds an OR NOT <absent_new> to an inactive node to make it active,
or an AND NOT <present_new> to an active node to make it inactive.
'''
if fix_to == 1:
for j in an:
g.graph['modifications'][x] += [(2,1,j)]
else:
for j in pn:
g.graph['modifications'][x] += [(2,0,j)]
def approach_3(x,p,pn,a,an,fix_to=1):
'''
adds an OR <pres and pres_new> to an inactive node to make it
active, or an AND <abs or abs_new> to an active node to make it
inactive.
'''
if fix_to == 1:
for j,k in product(p,pn):
g.graph['modifications'][x] += [(3,1,j,k)]
else:
for j,k in product(a,an):
g.graph['modifications'][x] += [(3,0,j,k)]
def approach_4(x,p,pn,a,an,fix_to=1):
'''
adds an OR <pres and not abs_new> to an inactive node to make it
active, or an AND <abs OR NOT pres_new> to an active node to make it
inactive.
'''
if fix_to == 1:
for j,k in product(p,an):
g.graph['modifications'][x] += [(4,1,j,k)]
else:
for j,k in product(a,pn):
g.graph['modifications'][x] += [(4,0,j,k)]
def approach_5(x,p,pn,a,an,fix_to=1):
'''
adds an OR <NOT abs AND pres_new> to an inactive node to make it
active, or an AND <NOT pres OR abs_new> to an active node to make it
inactive.
'''
if fix_to == 1:
for j,k in product(a,pn):
g.graph['modifications'][x] += [(5,1,j,k)]
else:
for j,k in product(p,an):
g.graph['modifications'][x] += [(5,0,j,k)]
def approach_6(x,p,pn,a,an,fix_to=1):
'''
adds an OR <NOT abs AND NOT abs_new> to an inactive node to make it
active, or an AND <NOT pres OR NOT pres_new> to an active node to
make it inactive.
'''
if fix_to == 1:
for j,k in product(a,an):
g.graph['modifications'][x] += [(6,1,j,k)]
else:
for j,k in product(p,pn):
g.graph['modifications'][x] += [(6,0,j,k)]
g=g_in.copy()
nodes = sorted(g.nodes())
g.graph['modifications'] = {}
for i in g.nodes_iter():
if g.graph['express'] == i or g.graph['knockout'] == i: continue # Don't attempt to modify the knocked out/overexpressed node
g.graph['modifications'][i] = [] # Set container for all viable modifications to this node (0->1 and 1->0)
viable_present_new_nodes = [y for y in g.nodes_iter() if int(a_t[nodes.index(y)]) == 1 and y not in g.node[i]['update_nodes'] and g.graph['express'] != y and y != i] # Possible new regulators that are present nodes (that have not been overexpressed)
viable_absent_new_nodes = [y for y in g.nodes_iter() if int(a_t[nodes.index(y)]) == 0 and y not in g.node[i]['update_nodes'] and g.graph['knockout'] != y and y != i] # Possible new regulators that are absent nodes (that have not been knocked out)
viable_present_nodes = [y for y in g.node[i]['update_nodes'] if int(a_t[nodes.index(y)]) == 1 and g.graph['express'] != y and y != i] # Possible existing regulators that are present nodes (that have not been overexpressed)
viable_absent_nodes = [y for y in g.node[i]['update_nodes'] if int(a_t[nodes.index(y)]) == 0 and g.graph['knockout'] != y and y != i] # Possible existing regulators that are absent nodes (that have not been overexpressed)
if set(viable_present_new_nodes)&set(viable_absent_new_nodes)&set(viable_present_nodes)&set(viable_absent_nodes) != set(): raise RuntimeError("Algorithm halted - duplicate node assignment.")
#go through all 6 approaches, store possible combinations of nodes as a graph property
approach_1(i,viable_present_new_nodes,viable_absent_new_nodes,fix_to=int(a_t[nodes.index(i)]))
approach_2(i,viable_present_new_nodes,viable_absent_new_nodes,fix_to=int(a_t[nodes.index(i)]))
approach_3(i,viable_present_nodes,viable_present_new_nodes,viable_absent_nodes,viable_absent_new_nodes,fix_to=int(a_t[nodes.index(i)]))
approach_4(i,viable_present_nodes,viable_present_new_nodes,viable_absent_nodes,viable_absent_new_nodes,fix_to=int(a_t[nodes.index(i)]))
approach_5(i,viable_present_nodes,viable_present_new_nodes,viable_absent_nodes,viable_absent_new_nodes,fix_to=int(a_t[nodes.index(i)]))
approach_6(i,viable_present_nodes,viable_present_new_nodes,viable_absent_nodes,viable_absent_new_nodes,fix_to=int(a_t[nodes.index(i)]))
approach_1(i,viable_present_new_nodes,viable_absent_new_nodes,fix_to=(int(a_t[nodes.index(i)])+1)%2) #look at both the 0->1 and 1->0 possibilities
approach_2(i,viable_present_new_nodes,viable_absent_new_nodes,fix_to=(int(a_t[nodes.index(i)])+1)%2)
approach_3(i,viable_present_nodes,viable_present_new_nodes,viable_absent_nodes,viable_absent_new_nodes,fix_to=(int(a_t[nodes.index(i)])+1)%2)
approach_4(i,viable_present_nodes,viable_present_new_nodes,viable_absent_nodes,viable_absent_new_nodes,fix_to=(int(a_t[nodes.index(i)])+1)%2)
approach_5(i,viable_present_nodes,viable_present_new_nodes,viable_absent_nodes,viable_absent_new_nodes,fix_to=(int(a_t[nodes.index(i)])+1)%2)
approach_6(i,viable_present_nodes,viable_present_new_nodes,viable_absent_nodes,viable_absent_new_nodes,fix_to=(int(a_t[nodes.index(i)])+1)%2)
return g
def fix_to_SS(graph,a,node_set):
'''
Takes a damaged graph (that has been run through examine_modifications)
and a desired attractor, and makes that attractor a SS of an
edge-modified version of the network. Returns the repaired graph.
Randomly selects a viable edge modification for every node.
node_set is a list of the nodes with rules to be modified.
Returns repaired graph, steady state attractor, and dictionary of
viable modifications for each node.
'''
g_r = graph.copy()
nodes = sorted(g_r.nodes())
choice_dict = {}
for node in node_set:
if g_r.graph['express'] == node or g_r.graph['knockout'] == node: continue # Don't attempt to modify the knocked out/overexpressed node
choice_dict[node] = [x for x in g_r.graph['modifications'][node] if x[1] == int(a[nodes.index(node)])] # Choose only from approaches that fix it to the appropriate choice of 0 or 1
modification = choice(choice_dict[node]) # Format: (approach from the 6 listed in examine_modifications(), method 0 or 1 for the block within the method, interacting node 1[, interacting node 2])
g_r.node[node]['update_nodes'] += [modification[-1]] # Append new interacting nodes to previously selected interacting nodes
new_rules = {}
bool_suffixes = ['0','1'] # We always only ever add 1 new node
for state in g_r.node[node]['update_rules']:
for bs in bool_suffixes:
new_rules[state+bs]=g_r.node[node]['update_rules'][state] # Append the suffixes but initially with the same outputs as before; then with complete set of input possibilities, change outcomes below
g_r.node[node]['update_rules'] = new_rules.copy()
if len(modification) == 4: ex_index = g_r.node[node]['update_nodes'].index(modification[-2]) # Slot in 'update_rule' keys that corresponds to the existing node whose edge is being modified
#adding an "... OR p_n". So all rules where the final entry is a 1 must have an output of 1
if modification[0] == 1 and modification[1] == 1: g_r.node[node]['update_rules'] = {key:(val if key[-1] == '0' else 1) for key,val in g_r.node[node]['update_rules'].iteritems()}
#adding an "... AND a_n". So all rules where the final entry is a 0 must have an output of 0
elif modification[0] == 1 and modification[1] == 0: g_r.node[node]['update_rules'] = {key:(val if key[-1] == '1' else 0) for key,val in g_r.node[node]['update_rules'].iteritems()}
#adding an "... OR NOT a_n". So all rules where the final entry is a 0 must have an output of 1
elif modification[0] == 2 and modification[1] == 1: g_r.node[node]['update_rules'] = {key:(val if key[-1] == '1' else 1) for key,val in g_r.node[node]['update_rules'].iteritems()}
#adding an "... AND NOT p_n". So all rules where the final entry is a 1 must have an output of 0
elif modification[0] == 2 and modification[1] == 0: g_r.node[node]['update_rules'] = {key:(val if key[-1] == '0' else 0) for key,val in g_r.node[node]['update_rules'].iteritems()}
#adding an "... OR (p AND p_n)". So all rules where the existing and new node entries are both 1 must have an output of 1
elif modification[0] == 3 and modification[1] == 1: g_r.node[node]['update_rules'] = {key:(val if key[ex_index] == '0' or key[-1] == '0' else 1) for key,val in g_r.node[node]['update_rules'].iteritems()}
#adding an "... AND (a OR a_n)". So all rules where the existing and new node entries are both 0 must have an output of 0
elif modification[0] == 3 and modification[1] == 0: g_r.node[node]['update_rules'] = {key:(val if key[ex_index] == '1' or key[-1] == '1' else 0) for key,val in g_r.node[node]['update_rules'].iteritems()}
#adding an "... OR (p AND NOT a_n)". So all rules where the existing and new node entries are '10' must have an output of 1
elif modification[0] == 4 and modification[1] == 1: g_r.node[node]['update_rules'] = {key:(val if key[ex_index] == '0' or key[-1] == '1' else 1) for key,val in g_r.node[node]['update_rules'].iteritems()}
#adding an "... AND (a OR NOT p_n)". So all rules where the existing and new node entries are '01' must have an output of 0
elif modification[0] == 4 and modification[1] == 0: g_r.node[node]['update_rules'] = {key:(val if key[ex_index] == '1' or key[-1] == '0' else 0) for key,val in g_r.node[node]['update_rules'].iteritems()}
#adding an "... OR (NOT a AND p_n)". So all rules where the existing and new node entries are '01' must have an output of 1
elif modification[0] == 5 and modification[1] == 1: g_r.node[node]['update_rules'] = {key:(val if key[ex_index] == '1' or key[-1] == '0' else 1) for key,val in g_r.node[node]['update_rules'].iteritems()}
#adding an "... AND (NOT p OR a_n)". So all rules where the existing and new node entries are '10' must have an output of 0
elif modification[0] == 5 and modification[1] == 0: g_r.node[node]['update_rules'] = {key:(val if key[ex_index] == '0' or key[-1] == '1' else 0) for key,val in g_r.node[node]['update_rules'].iteritems()}
#adding an "... OR (NOT a AND NOT a_n)". So all rules where the existing and new node entries are both 0 must have an output of 1
elif modification[0] == 6 and modification[1] == 1: g_r.node[node]['update_rules'] = {key:(val if key[ex_index] == '1' or key[-1] == '1' else 1) for key,val in g_r.node[node]['update_rules'].iteritems()}
#adding an "... AND (NOT p OR NOT p_n)". So all rules where the existing and new node entries are both 1 must have an output of 0
elif modification[0] == 6 and modification[1] == 0: g_r.node[node]['update_rules'] = {key:(val if key[ex_index] == '0' or key[-1] == '0' else 0) for key,val in g_r.node[node]['update_rules'].iteritems()}
a_r = find_attractor(g_r,a)
if len(a_r[1]) > 1: raise RuntimeError('LC found in fix_to_SS()')
return g_r,a_r[1][0],choice_dict
def fix_to_LC(graph_list,a,d_o,node_list):
'''
Takes a list of graphs that has been run through examine_modifications,
such that each graph has modification properties that enumerates all
methods of forcing a node to be '0' or '1' after an update.
Each entry in the list corresponds to a state in the LC attractor, a.
d_o is the 'next state' from the corresponding LC state in 'a'
(evaluated in the damaged network).
node_list enumerates the nodes that don't make the desired transition,
again in the order of every state transition in the LC.
We enumerate all possible modifications for every state transition for
the nodes specified, and filter according to the modifications that
will account for all modifications.
Returns repaired graph LC attractor, and dictionary of viable
modifications for each node.
'''
g_r = graph_list[0].copy()
nodes = sorted(g_r.nodes())
#First, collapse node_list into one list with all nodes that ever don't make the desired transition (others don't require any modification)
s = set(node_list[0])
for x in node_list[1:]: s=s.union(x)
modify_nodes = list(s)
choice_dict = {}
#Then search for all viable modifications (return out of fn if none exist for any node)
#the only thing that is different b/w entries in this list are the modification rules; just use entry [0] for manipulations
for node in modify_nodes:
if graph_list[0].graph['express'] == node or g_r.graph['knockout'] == node: raise RuntimeError("expressed/knocked out node identified as oscillating.")
t_01,t_10 = 0,0
for state in range(len(node_list)):
if node in node_list[state]: #Important - we here look at the transitions where we know the node doesn't make the desired change
if int(d_o[state-1][nodes.index(node)]) == 0 and int(a[state][nodes.index(node)]) == 1: t_01 = 1 #(a node could fail at one transition and not another within a LC)
elif int(d_o[state-1][nodes.index(node)]) == 1 and int(a[state][nodes.index(node)]) == 0: t_10 = 1
if t_01 + t_10 == 2: return False,'a',False #If it actively fails via both 0->1 and 1->0, we can't repair the network using this methodology.
# Otherwise, we need to find a rule that works at every transition,
# for either 1+ 0->1 correction(s) or 1+ 1->0 correction(s). Either
# possibility could additionally have 1+ 1->1 and/or 1+ 0->0
# transitions (i.e. the rule change must preserve 'correct' state
# transitions). We do this by set-intersecting the appropriate list
# of modifications at every primary (1->0 or 0->1) state transition
# for the nodes in the modify_nodes list. From there, we compare to
# the other (secondary) transitions, which are more flexible
primary_transitions = [];secondary_1_transitions = [];secondary_0_transitions = []
for state in range(len(a)):
if int(d_o[state-1][nodes.index(node)]) != int(a[state][nodes.index(node)]):
primary_transitions += [[x for x in graph_list[state-1].graph['modifications'][node] if x[1] == int(a[state][nodes.index(node)])]] #All the valid transitions where we need a 1->0 or 0->1 modification
elif int(a[state][nodes.index(node)]) == 0:
secondary_0_transitions += [[x for x in graph_list[state-1].graph['modifications'][node] if x[1] == int(a[state][nodes.index(node)])]] #All the valid transitions where we need a 0->0 modification
else:
secondary_1_transitions += [[x for x in graph_list[state-1].graph['modifications'][node] if x[1] == int(a[state][nodes.index(node)])]] #All the valid transitions where we need a 1->1 modification
#These are now lists of lists, for valid modifications at each state transition
p = set(primary_transitions[0])
for i in primary_transitions[1:]: p=p.intersection(i)
if len(p)==0: return False,'b',False #No changes survive the primary filtering process.
p = list(p)
if len(secondary_0_transitions) > 0:
s0 = set(secondary_0_transitions[0])
for i in secondary_0_transitions[1:]: s0=s0.intersection(i)
s0 = list(s0)
if len(secondary_1_transitions) > 0:
s1 = set(secondary_1_transitions[0])
for i in secondary_1_transitions[1:]: s1=s1.intersection(i)
s1 = list(s1)
#Now we need to further refine this list to make sure the modification we choose preserves 0->0 and 1->1 transitions
if len(secondary_1_transitions) > 0 and p[0][1]==0: #We're doing a 1->0 correction somewhere, so we're restricted to an [ .. AND <stuff> ] modification
#0->0 aren't influenced, but we need AND <1> for 1->1 preservation.
#We want node (or node combinations) that eval to 0 (already isolated in primary_transitions) in some states and 1 in others (here).
#This boils down to the modification tuple matching at all indices but 1, which needs to be opposite (0 vs 1 or vice versa)
surviving_transitions = [x for x in p if tuple([x[0]]+[(x[1]+1)%2]+list(x[2:])) in s1]
if len(surviving_transitions) == 0: return False, 'c', False
else:
modification = choice(surviving_transitions)
choice_dict[node] = surviving_transitions
elif len(secondary_0_transitions) > 0: #We're doing a 0->1 correction somewhere, so we're restricted to an [ ... OR <stuff> ] modification
#1->1 aren't influenced, but we need OR <0> for 0->0 preservation.
#As above, we want combinations that eval to 1 or 0, depending on state
surviving_transitions = [x for x in p if tuple([x[0]]+[(x[1]+1)%2]+list(x[2:])) in s0]
if len(surviving_transitions) == 0: return False, 'c', False
else:
modification = choice(surviving_transitions)
choice_dict[node] = surviving_transitions
else:
modification = choice(p) #No need for further analysis if we don't have these transitions, so just choose from what we have
choice_dict[node] = p
#modification variable format: (approach from the 6 listed in examine_modifications(), method 0 or 1 for the block within the method, interacting node 1[, interacting node 2])
#Rest of this loop is copy of end of fix_to_SS()
g_r.node[node]['update_nodes'] += [modification[-1]] #append new interacting nodes to previously selected interacting nodes
new_rules = {}
bool_suffixes = ['0','1'] #we always only ever add 1 new node
for state in g_r.node[node]['update_rules']:
for bs in bool_suffixes:
new_rules[state+bs]=g_r.node[node]['update_rules'][state] #append the suffixes but initially with the same outputs as before; then with complete set of input possibilities, change outcomes below
g_r.node[node]['update_rules'] = new_rules.copy()
if len(modification) == 4: ex_index = g_r.node[node]['update_nodes'].index(modification[-2]) #slot in 'update_rule' keys that corresponds to the existing node whose edge is being modified
#adding an "... OR p_n". So all rules where the final entry is a 1 must have an output of 1
if modification[0] == 1 and modification[1] == 1: g_r.node[node]['update_rules'] = {key:(val if key[-1] == '0' else 1) for key,val in g_r.node[node]['update_rules'].iteritems()}
#adding an "... AND a_n". So all rules where the final entry is a 0 must have an output of 0
elif modification[0] == 1 and modification[1] == 0: g_r.node[node]['update_rules'] = {key:(val if key[-1] == '1' else 0) for key,val in g_r.node[node]['update_rules'].iteritems()}
#adding an "... OR NOT a_n". So all rules where the final entry is a 0 must have an output of 1
elif modification[0] == 2 and modification[1] == 1: g_r.node[node]['update_rules'] = {key:(val if key[-1] == '1' else 1) for key,val in g_r.node[node]['update_rules'].iteritems()}
#adding an "... AND NOT p_n". So all rules where the final entry is a 1 must have an output of 0
elif modification[0] == 2 and modification[1] == 0: g_r.node[node]['update_rules'] = {key:(val if key[-1] == '0' else 0) for key,val in g_r.node[node]['update_rules'].iteritems()}
#adding an "... OR (p AND p_n)". So all rules where the existing and new node entries are both 1 must have an output of 1
elif modification[0] == 3 and modification[1] == 1: g_r.node[node]['update_rules'] = {key:(val if key[ex_index] == '0' or key[-1] == '0' else 1) for key,val in g_r.node[node]['update_rules'].iteritems()}
#adding an "... AND (a OR a_n)". So all rules where the existing and new node entries are both 0 must have an output of 0
elif modification[0] == 3 and modification[1] == 0: g_r.node[node]['update_rules'] = {key:(val if key[ex_index] == '1' or key[-1] == '1' else 0) for key,val in g_r.node[node]['update_rules'].iteritems()}
#adding an "... OR (p AND NOT a_n)". So all rules where the existing and new node entries are '10' must have an output of 1
elif modification[0] == 4 and modification[1] == 1: g_r.node[node]['update_rules'] = {key:(val if key[ex_index] == '0' or key[-1] == '1' else 1) for key,val in g_r.node[node]['update_rules'].iteritems()}
#adding an "... AND (a OR NOT p_n)". So all rules where the existing and new node entries are '01' must have an output of 0
elif modification[0] == 4 and modification[1] == 0: g_r.node[node]['update_rules'] = {key:(val if key[ex_index] == '1' or key[-1] == '0' else 0) for key,val in g_r.node[node]['update_rules'].iteritems()}
#adding an "... OR (NOT a AND p_n)". So all rules where the existing and new node entries are '01' must have an output of 1
elif modification[0] == 5 and modification[1] == 1: g_r.node[node]['update_rules'] = {key:(val if key[ex_index] == '1' or key[-1] == '0' else 1) for key,val in g_r.node[node]['update_rules'].iteritems()}
#adding an "... AND (NOT p OR a_n)". So all rules where the existing and new node entries are '10' must have an output of 0
elif modification[0] == 5 and modification[1] == 0: g_r.node[node]['update_rules'] = {key:(val if key[ex_index] == '0' or key[-1] == '1' else 0) for key,val in g_r.node[node]['update_rules'].iteritems()}
#adding an "... OR (NOT a AND NOT a_n)". So all rules where the existing and new node entries are both 0 must have an output of 1
elif modification[0] == 6 and modification[1] == 1: g_r.node[node]['update_rules'] = {key:(val if key[ex_index] == '1' or key[-1] == '1' else 1) for key,val in g_r.node[node]['update_rules'].iteritems()}
#adding an "... AND (NOT p OR NOT p_n)". So all rules where the existing and new node entries are both 1 must have an output of 0
elif modification[0] == 6 and modification[1] == 0: g_r.node[node]['update_rules'] = {key:(val if key[ex_index] == '0' or key[-1] == '0' else 0) for key,val in g_r.node[node]['update_rules'].iteritems()}
a_r = find_attractor(g_r,a[-1]) #Find attractor from one of the LC states
if len(a_r[1]) == 1: raise RuntimeError('SS found in fix_to_LC()') #Make sure we find a LC from this procedure (we check for equality to 'a' outside of this fn)
return g_r,a_r[1],choice_dict
# End internal functions ---------------------------------------------------
if method == 'fix_to_SS' or (method == 'LC_repair' and len(a[1]) == 1): # If we pass a steady state, we evaluate it as a steady state even if we call 'LC_repair'
if len(a[1]) > 1: a_t = damage_state(graph,a_s) # If we do pass a LC, we are concerned with its superset
else: a_t = a[1][0][:]
graph = examine_modifications(graph,a_t) # Look at the enumerated methods of forcing each node to be a specified state after update. Output is a graph with: g.graph['modifications'][node] properties
node_set = find_attractor(graph,state=a_t)
node_set = disjoint(node_set[0],a_t) # Those nodes that change states in the first update from A_t
g_r,a_r,choice_dict = fix_to_SS(graph,a_t,node_set) # Modify edges so the target attractor is a SS of the network
if a_r == a_t: return g_r,choice_dict # The repair succeeded
else: raise RuntimeError("evaluate_repair() failed.") # The repair failed (i.e. there is a bug)
elif method == 'LC_repair':
graph_list = [examine_modifications(graph,x) for x in a[1]] # Look at the enumerated methods of forcing each node to be a specified state after update (Use 'A' b/c we assume we're going to fix it to be stable!)
node_sets = [find_attractor(graph,state=x) for x in a[1]] # Look at 'next state' and ultimate attractor for each state in the damaged limit cyle
node_set_list = [disjoint(node_sets[x-1][0],a[1][x]) for x in range(len(node_sets))] # See which nodes don't make the desired state transition at every update in the damaged LC
damaged_outcomes = [x[0] for x in node_sets] # Feed just the 'next state' values into the function in the below line
g_r,a_r,choice_dict = fix_to_LC(graph_list,a[1],damaged_outcomes,node_set_list) # Attempt to modify edges to recover complete LC
if g_r == False:
if a_r == 'a': return 'LC repair impossible (a)',False # The repair is not possible (need to repair a 0->1 and a 1->0 transition for at least one node)
elif a_r == 'b': return 'LC repair impossible (b)',False # The repair is not possible (not above issue, but edge additions as considered here fail to give option to meet all req. 0->1 or 1->0 state changes)
else: return 'LC repair impossible (c)',False # The repair is not possible (neither of above issues, but we can't also preserve the 0->0 or 1->1 transitions while fixing the 0->1 or 1->0 transitions)
elif [x in a[1] for x in a_r].count(False) + [x in a_r for x in a[1]].count(False) == 0:
return g_r,choice_dict # The repair was attempted and succeeded (all states in repaired LC are in original and vice versa; we don't explicitly check order)
else: raise RuntimeError("evaluate_repair() failed.") # The repair was attempted and failed, and none of the cause categories were identified
else: raise RuntimeError('invalid value for parameter \'method\'')
def write_dict_to_file(d,n,fname=False,console_dump=False):
'''
The output of evaluate_repair(), if successful, includes a dictionary of all
viable edge modifications. This function writes this dictionary (d) in a
human-readable format to a .txt file at fname (if fname != False), and
dumps the data to the console (if console_dump = True).
Note that when fixing to a SS, the "adjustments" are permanent, whereas when
fixing to a LC, the adjustments may not permanently adjust the state of the
node, but rather ensures that is follows the prescribed oscillations.
'''
out = ''
for node in d.iterkeys():
out+='Modifications for node '+n[node]+':\n'
for entry in d[node]:
if entry[0] == 1 and entry[1] == 0: out+= '\tAdjust to %s via: ... AND %s\t(absent_new)\n'%(entry[1],n[entry[-1]])
elif entry[0] == 1 and entry[1] == 1: out+= '\tAdjust to %s via: ... OR %s\t(present_new)\n'%(entry[1],n[entry[-1]])
elif entry[0] == 2 and entry[1] == 0: out+= '\tAdjust to %s via: ... AND NOT %s\t(present_new)\n'%(entry[1],n[entry[-1]])
elif entry[0] == 2 and entry[1] == 1: out+= '\tAdjust to %s via: ... OR NOT %s\t(absent_new)\n'%(entry[1],n[entry[-1]])
elif entry[0] == 3 and entry[1] == 0: out+= '\tAdjust to %s via: ... AND (%s OR %s)\t(absent, absent_new)\n'%(entry[1],n[entry[-2]],n[entry[-1]])
elif entry[0] == 3 and entry[1] == 1: out+= '\tAdjust to %s via: ... OR (%s AND %s)\t(present, present_new)\n'%(entry[1],n[entry[-2]],n[entry[-1]])
elif entry[0] == 4 and entry[1] == 0: out+= '\tAdjust to %s via: ... AND (%s OR NOT %s)\t(absent, present_new)\n'%(entry[1],n[entry[-2]],n[entry[-1]])
elif entry[0] == 4 and entry[1] == 1: out+= '\tAdjust to %s via: ... OR (%s AND NOT %s)\t(present, absent_new)\n'%(entry[1],n[entry[-2]],n[entry[-1]])
elif entry[0] == 5 and entry[1] == 0: out+= '\tAdjust to %s via: ... AND (NOT %s OR %s)\t(present, absent_new)\n'%(entry[1],n[entry[-2]],n[entry[-1]])
elif entry[0] == 5 and entry[1] == 1: out+= '\tAdjust to %s via: ... OR (NOT %s AND %s)\t(absent, present_new)\n'%(entry[1],n[entry[-2]],n[entry[-1]])
elif entry[0] == 6 and entry[1] == 0: out+= '\tAdjust to %s via: ... AND (NOT %s OR NOT %s)\t(present, present_new)\n'%(entry[1],n[entry[-2]],n[entry[-1]])
elif entry[0] == 6 and entry[1] == 1: out+= '\tAdjust to %s via: ... OR (NOT %s AND NOT %s)\t(absent, absent_new)\n'%(entry[1],n[entry[-2]],n[entry[-1]])
out+='\n\n'
if fname:
f = open(fname,'wt')
f.write(out)
f.close()
if console_dump:
print out
|
{"hexsha": "b2880d425c6230640eb3eed59a9ed9c6dc97077d", "size": 48276, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/network_repair_functions.py", "max_stars_repo_name": "AbrahmAB/booleannet", "max_stars_repo_head_hexsha": "a07124047d18a5b7265e050a234969ac58970c7a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/network_repair_functions.py", "max_issues_repo_name": "AbrahmAB/booleannet", "max_issues_repo_head_hexsha": "a07124047d18a5b7265e050a234969ac58970c7a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/network_repair_functions.py", "max_forks_repo_name": "AbrahmAB/booleannet", "max_forks_repo_head_hexsha": "a07124047d18a5b7265e050a234969ac58970c7a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 72.0537313433, "max_line_length": 271, "alphanum_fraction": 0.5847004723, "include": true, "reason": "import numpy,import networkx", "num_tokens": 11848}
|
function (x, y, z, μ)
begin
(SymbolicUtils.Code.create_array)(Array, nothing, Val{2}(), Val{(3, 3)}(), (+)((+)(1, (*)((*)(-3//2, (^)((inv)((sqrt)((+)((^)(y, 2), (^)(z, 2), (^)((+)(x, μ), 2)))), 5), (+)((*)(-1//1, x), (*)(-1//1, μ)), (+)((*)(2, x), (*)(2, μ))), (*)((+)(1, (*)(-1, μ)))), (*)(-1//1, μ, (^)((inv)((sqrt)((+)((^)(y, 2), (^)(z, 2), (^)((+)(-1, x, μ), 2)))), 3)), (*)(-1//1, (^)((inv)((sqrt)((+)((^)(y, 2), (^)(z, 2), (^)((+)(x, μ), 2)))), 3), (+)(1, (*)(-1, μ)))), (+)((*)(3//4, μ, (^)((+)(-2, (*)(2, x), (*)(2, μ)), 2), (^)((inv)((sqrt)((+)((^)(y, 2), (^)(z, 2), (^)((+)(-1, x, μ), 2)))), 5)))), (+)((*)((*)(3//2, y, μ, (+)(-2, (*)(2, x), (*)(2, μ))), (*)((^)((inv)((sqrt)((+)((^)(y, 2), (^)(z, 2), (^)((+)(-1, x, μ), 2)))), 5))), (*)((*)(-3//1, y, (^)((inv)((sqrt)((+)((^)(y, 2), (^)(z, 2), (^)((+)(x, μ), 2)))), 5), (+)((*)(-1//1, x), (*)(-1//1, μ))), (*)((+)(1, (*)(-1, μ))))), (+)((*)((*)(3//2, z, μ, (+)(-2, (*)(2, x), (*)(2, μ))), (*)((^)((inv)((sqrt)((+)((^)(y, 2), (^)(z, 2), (^)((+)(-1, x, μ), 2)))), 5))), (*)((*)(-3//1, z, (^)((inv)((sqrt)((+)((^)(y, 2), (^)(z, 2), (^)((+)(x, μ), 2)))), 5), (+)((*)(-1//1, x), (*)(-1//1, μ))), (*)((+)(1, (*)(-1, μ))))), (+)((*)((*)(3//2, y, μ, (+)(-2, (*)(2, x), (*)(2, μ))), (*)((^)((inv)((sqrt)((+)((^)(y, 2), (^)(z, 2), (^)((+)(-1, x, μ), 2)))), 5))), (*)((*)(-3//1, y, (^)((inv)((sqrt)((+)((^)(y, 2), (^)(z, 2), (^)((+)(x, μ), 2)))), 5), (+)((*)(-1//1, x), (*)(-1//1, μ))), (*)((+)(1, (*)(-1, μ))))), (+)((+)(1, (*)(-1//1, μ, (^)((inv)((sqrt)((+)((^)(y, 2), (^)(z, 2), (^)((+)(-1, x, μ), 2)))), 3)), (*)(-1//1, (^)((inv)((sqrt)((+)((^)(y, 2), (^)(z, 2), (^)((+)(x, μ), 2)))), 3), (+)(1, (*)(-1, μ))), (*)(3//1, μ, (^)(y, 2), (^)((inv)((sqrt)((+)((^)(y, 2), (^)(z, 2), (^)((+)(-1, x, μ), 2)))), 5))), (+)((*)(3//1, (^)(y, 2), (^)((inv)((sqrt)((+)((^)(y, 2), (^)(z, 2), (^)((+)(x, μ), 2)))), 5), (+)(1, (*)(-1, μ))))), (+)((*)((*)(3//1, y, z, μ), (*)((^)((inv)((sqrt)((+)((^)(y, 2), (^)(z, 2), (^)((+)(-1, x, μ), 2)))), 5))), (*)((*)(3//1, y, z, (^)((inv)((sqrt)((+)((^)(y, 2), (^)(z, 2), (^)((+)(x, μ), 2)))), 5)), (*)((+)(1, (*)(-1, μ))))), (+)((*)((*)(3//2, z, μ, (+)(-2, (*)(2, x), (*)(2, μ))), (*)((^)((inv)((sqrt)((+)((^)(y, 2), (^)(z, 2), (^)((+)(-1, x, μ), 2)))), 5))), (*)((*)(-3//1, z, (^)((inv)((sqrt)((+)((^)(y, 2), (^)(z, 2), (^)((+)(x, μ), 2)))), 5), (+)((*)(-1//1, x), (*)(-1//1, μ))), (*)((+)(1, (*)(-1, μ))))), (+)((*)((*)(3//1, y, z, μ), (*)((^)((inv)((sqrt)((+)((^)(y, 2), (^)(z, 2), (^)((+)(-1, x, μ), 2)))), 5))), (*)((*)(3//1, y, z, (^)((inv)((sqrt)((+)((^)(y, 2), (^)(z, 2), (^)((+)(x, μ), 2)))), 5)), (*)((+)(1, (*)(-1, μ))))), (+)((*)(-1//1, μ, (^)((inv)((sqrt)((+)((^)(y, 2), (^)(z, 2), (^)((+)(-1, x, μ), 2)))), 3)), (*)(-1//1, (^)((inv)((sqrt)((+)((^)(y, 2), (^)(z, 2), (^)((+)(x, μ), 2)))), 3), (+)(1, (*)(-1, μ))), (*)(3//1, μ, (^)(z, 2), (^)((inv)((sqrt)((+)((^)(y, 2), (^)(z, 2), (^)((+)(-1, x, μ), 2)))), 5)), (*)(3//1, (^)(z, 2), (^)((inv)((sqrt)((+)((^)(y, 2), (^)(z, 2), (^)((+)(x, μ), 2)))), 5), (+)(1, (*)(-1, μ)))))
end
end
|
{"hexsha": "610bff27faf1a5edfd4f69960181988add0f6b66", "size": 3039, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "deprecated/GeneralAstrodynamics_v0.9/Propagators/CR3BP/PotentialEnergyHessian.jl", "max_stars_repo_name": "pbouffard/GeneralAstrodynamics.jl", "max_stars_repo_head_hexsha": "80f175a5b3c6dac2140e645b016d39f131ecea05", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "deprecated/GeneralAstrodynamics_v0.9/Propagators/CR3BP/PotentialEnergyHessian.jl", "max_issues_repo_name": "pbouffard/GeneralAstrodynamics.jl", "max_issues_repo_head_hexsha": "80f175a5b3c6dac2140e645b016d39f131ecea05", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deprecated/GeneralAstrodynamics_v0.9/Propagators/CR3BP/PotentialEnergyHessian.jl", "max_forks_repo_name": "pbouffard/GeneralAstrodynamics.jl", "max_forks_repo_head_hexsha": "80f175a5b3c6dac2140e645b016d39f131ecea05", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 607.8, "max_line_length": 2995, "alphanum_fraction": 0.2063178677, "num_tokens": 1515}
|
import sys
import json
import gc
import h5py
import numpy as np
from timeit import default_timer as timer
import torch
from torch.autograd import Variable
import options
import visdial.metrics as metrics
from utils import utilities as utils
from dataloader import VisDialDataset
from torch.utils.data import DataLoader
from sklearn.metrics.pairwise import pairwise_distances
from six.moves import range
def dialogDump(params,
dataset,
split,
aBot,
qBot=None,
beamSize=1,
expLowerLimit=None,
expUpperLimit=None,
savePath="dialog_results.json"):
'''
Generates dialog and saves it to a json for later visualization.
If only A-Bot is given, A-Bot answers are generated given GT image,
caption and questions. If both agents are given, dialog is generated
by both agents conversing (A-Bot is shown the GT image and both
agents have access to a caption generated by a pre-trained captioning
model).
Arguments:
params : Parameter dict for all options
dataset : VisDialDataset instance
split : Dataset split, can be 'val' or 'test'
aBot : A-Bot
qBot : Q-Bot (Optional)
beamSize : Beam search width for generating utterrances
savePath : File path for saving dialog json file
'''
assert aBot is not None or (qBot is not None and aBot is not None),\
"Must provide either an A-Bot alone or both \
Q-Bot and A-Bot when generating dialog"
old_split = dataset.split
batchSize = dataset.batchSize
# hack here
numRounds = dataset.numRounds
dataset.split = split
ind2word = dataset.ind2word
to_str_gt = lambda w: str(" ".join([ind2word[x] for x in filter(lambda x:\
x>0,w.data.cpu().numpy())])) #.encode('utf-8','ignore')
to_str_pred = lambda w, l: str(" ".join([ind2word[x] for x in list( filter(
lambda x:x>0,w.data.cpu().numpy()))][:l.data.cpu()[0]])) #.encode('utf-8','ignore')
dataloader = DataLoader(
dataset,
batch_size=batchSize,
shuffle=False,
num_workers=0,
collate_fn=dataset.collate_fn)
text = {'data': []}
if '%s_img_fnames' % split not in dataset.data.keys():
print("[Error] Need coco directory and info as input " \
"to -cocoDir and -cocoInfo arguments for locating "\
"coco image files.")
print("Exiting dialogDump without saving files.")
return None
getImgFileName = lambda x: dataset.data['%s_img_fnames' % split][x]
getImgId = lambda x: int(getImgFileName(x)[:-4][-12:])
for idx, batch in enumerate(dataloader):
if expLowerLimit is not None:
if idx < expLowerLimit // batchSize: continue
if idx > expUpperLimit // batchSize: break
else:
if idx > 40:
break
imgIds = [getImgId(x) for x in batch['index']]
dialog = [{'dialog': [], 'image_id': imgId} for imgId in imgIds]
if dataset.useGPU:
batch = {key: v.cuda() if hasattr(v, 'cuda')\
else v for key, v in batch.items()}
image = Variable(batch['img_feat'], volatile=True)
caption = Variable(batch['cap'], volatile=True)
captionLens = Variable(batch['cap_len'], volatile=True)
if qBot is None: # A-Bot alone needs ground truth dialog
gtQuestions = Variable(batch['ques'], volatile=True)
gtQuesLens = Variable(batch['ques_len'], volatile=True)
gtAnswers = Variable(batch['ans'], volatile=True)
gtAnsLens = Variable(batch['ans_len'], volatile=True)
if aBot:
aBot.eval(), aBot.reset()
aBot.observe(
-1, image=image, caption=caption, captionLens=captionLens)
if qBot:
qBot.eval(), qBot.reset()
qBot.observe(-1, caption=caption, captionLens=captionLens)
questions = []
for j in range(batchSize):
caption_str = to_str_gt(caption[j])[8:-6]
dialog[j]['caption'] = caption_str
for round in range(numRounds):
if aBot is not None and qBot is None:
aBot.observe(
round,
ques=gtQuestions[:, round],
quesLens=gtQuesLens[:, round])
aBot.observe(
round,
ans=gtAnswers[:, round],
ansLens=gtAnsLens[:, round])
_ = aBot.forward()
answers, ansLens = aBot.forwardDecode(
inference='greedy', beamSize=beamSize)
elif aBot is not None and qBot is not None:
questions, quesLens = qBot.forwardDecode(
beamSize=beamSize, inference='greedy')
qBot.observe(round, ques=questions, quesLens=quesLens)
aBot.observe(round, ques=questions, quesLens=quesLens)
answers, ansLens = aBot.forwardDecode(
beamSize=beamSize, inference='greedy')
aBot.observe(round, ans=answers, ansLens=ansLens)
qBot.observe(round, ans=answers, ansLens=ansLens)
for j in range(batchSize):
question_str = to_str_pred(questions[j], quesLens[j]) \
if qBot is not None else to_str_gt(gtQuestions[j])
answer_str = to_str_pred(answers[j], ansLens[j])
dialog[j]['dialog'].append({
"answer": answer_str[8:],
"question": question_str[8:] + " "
}) # "8:" for indexing out initial <START>
text['data'].extend(dialog)
text['opts'] = {
'qbot': params['qstartFrom'],
'abot': params['startFrom'],
'backend': 'cudnn',
'beamLen': 20,
'beamSize': beamSize,
'decoder': params['decoder'],
'encoder': params['encoder'],
'gpuid': 0,
'imgNorm': params['imgNorm'],
'inputImg': params['inputImg'],
'inputJson': params['inputJson'],
'inputQues': params['inputQues'],
'loadPath': 'checkpoints/',
'maxThreads': 1,
'resultPath': 'dialog_output/results',
'sampleWords': 0,
'temperature': 1,
'useHistory': True,
'useIm': True,
'AQM': 0,
}
with open(savePath, "w") as fp:
print("Writing dialog text data to file: {}".format(savePath))
json.dump(text, fp)
print("Done!")
dataset.split = old_split
return
|
{"hexsha": "cf306db1fbe069041f7f73a671ae248e6073c5c2", "size": 6733, "ext": "py", "lang": "Python", "max_stars_repo_path": "eval_utils/dialog_generate.py", "max_stars_repo_name": "Zach-Ziyi-Liu/Answerer-in-Questioner-s-Mind-Information-Theoretic-Approach-to-Goal-Oriented-Visual-Dialog", "max_stars_repo_head_hexsha": "23f5202593a1308a8c3640d9ed9759aef5fbe6c2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 50, "max_stars_repo_stars_event_min_datetime": "2019-02-12T06:04:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-27T08:25:01.000Z", "max_issues_repo_path": "eval_utils/dialog_generate.py", "max_issues_repo_name": "Zach-Ziyi-Liu/Answerer-in-Questioner-s-Mind-Information-Theoretic-Approach-to-Goal-Oriented-Visual-Dialog", "max_issues_repo_head_hexsha": "23f5202593a1308a8c3640d9ed9759aef5fbe6c2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-10-05T08:08:23.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-05T08:08:23.000Z", "max_forks_repo_path": "eval_utils/dialog_generate.py", "max_forks_repo_name": "Zach-Ziyi-Liu/Answerer-in-Questioner-s-Mind-Information-Theoretic-Approach-to-Goal-Oriented-Visual-Dialog", "max_forks_repo_head_hexsha": "23f5202593a1308a8c3640d9ed9759aef5fbe6c2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2019-02-12T06:03:51.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-17T08:45:58.000Z", "avg_line_length": 37.1988950276, "max_line_length": 91, "alphanum_fraction": 0.5678003862, "include": true, "reason": "import numpy", "num_tokens": 1561}
|
import numpy as np
import torch
import threading
import os
from torch.nn import functional as F
import queue
from continual_rl.policies.impala.torchbeast.monobeast import Monobeast, Buffers
from continual_rl.utils.utils import Utils
class ClearMonobeast(Monobeast):
"""
An implementation of Experience Replay for Continual Learning (Rolnick et al, 2019):
https://arxiv.org/pdf/1811.11682.pdf
"""
def __init__(self, model_flags, observation_space, action_spaces, policy_class):
super().__init__(model_flags, observation_space, action_spaces, policy_class)
common_action_space = Utils.get_max_discrete_action_space(action_spaces)
torch.multiprocessing.set_sharing_strategy(model_flags.torch_multiprocessing_sharing_strategy)
# LSTMs not supported largely because they have not been validated; nothing extra is stored for them.
assert not model_flags.use_lstm, "CLEAR does not presently support using LSTMs."
assert self._model_flags.num_actors >= int(self._model_flags.batch_size * self._model_flags.batch_replay_ratio), \
"Each actor only gets sampled from once during training, so we need at least as many actors as batch_size"
self._model_flags = model_flags
# We want the replay buffers to be created in the large_file_path,
# but in a place characteristic to this experiment.
# Be careful if the output_dir specified is very nested
# (ie. Windows has max path length of 260 characters)
# Could hash output_dir_str if this is a problem.
output_dir_str = os.path.normpath(model_flags.output_dir).replace(os.path.sep, '-')
permanent_path = os.path.join(
model_flags.large_file_path,
"file_backed",
output_dir_str,
)
buffers_existed = os.path.exists(permanent_path)
os.makedirs(permanent_path, exist_ok=True)
self._entries_per_buffer = int(
model_flags.replay_buffer_frames // (model_flags.unroll_length * model_flags.num_actors)
)
self._replay_buffers, self._temp_files = self._create_replay_buffers(
model_flags,
observation_space.shape,
common_action_space.n,
self._entries_per_buffer,
permanent_path,
buffers_existed,
)
self._replay_lock = threading.Lock()
# Each replay batch needs to also have cloning losses applied to it
# Keep track of them as they're generated, to ensure we apply losses to all. This doesn't currently
# guarantee order - i.e. one learner thread might get one replay batch for training and a different for cloning
self._replay_batches_for_loss = queue.Queue()
def _create_replay_buffers(
self,
model_flags,
obs_shape,
num_actions,
entries_per_buffer,
permanent_path,
buffers_existed,
):
"""
Key differences from normal buffers:
1. File-backed, so we can store more at a time
2. Structured so that there are num_actors buffers, each with entries_per_buffer entries
Each buffer entry has unroll_length size, so the number of frames stored is (roughly, because of integer
rounding): num_actors * entries_per_buffer * unroll_length
"""
# Get the standard specs, and also add the CLEAR-specific reservoir value
specs = self.create_buffer_specs(model_flags.unroll_length, obs_shape, num_actions)
# Note: one reservoir value per row
specs["reservoir_val"] = dict(size=(1,), dtype=torch.float32)
buffers: Buffers = {key: [] for key in specs}
# Hold on to the file handle so it does not get deleted. Technically optional, as at least linux will
# keep the file open even after deletion, but this way it is still visible in the location it was created
temp_files = []
for actor_id in range(model_flags.num_actors):
for key in buffers:
shape = (entries_per_buffer, *specs[key]["size"])
permanent_file_name = f"replay_{actor_id}_{key}.fbt"
new_tensor, temp_file = Utils.create_file_backed_tensor(
permanent_path,
shape,
specs[key]["dtype"],
permanent_file_name=permanent_file_name,
)
# reservoir_val needs to be 0'd out so we can use it to see if a row is filled
# but this operation is slow, so leave the rest as-is
# Only do this if we created the buffers anew
if not buffers_existed and key == "reservoir_val":
new_tensor.zero_()
buffers[key].append(new_tensor.share_memory_())
temp_files.append(temp_file)
return buffers, temp_files
def _get_replay_buffer_filled_indices(self, replay_buffers, actor_index):
"""
Get the indices in the replay buffer corresponding to the actor_index.
"""
# We know that the reservoir value > 0 if it's been filled, so check for entries where it == 0
buffer_indicator = replay_buffers['reservoir_val'][actor_index].squeeze(1)
replay_indices = np.where(buffer_indicator != 0)[0]
return replay_indices
def _get_actor_unfilled_indices(self, actor_index, entries_per_buffer):
"""
Get the unfilled entries in the actor's subset of the replay buffer using a set difference.
"""
filled_indices = set(
self._get_replay_buffer_filled_indices(self._replay_buffers, actor_index)
)
actor_id_set = set(range(0, entries_per_buffer))
unfilled_indices = actor_id_set - filled_indices
return unfilled_indices
def _compute_policy_cloning_loss(self, old_logits, curr_logits):
# KLDiv requires inputs to be log-probs, and targets to be probs
old_policy = F.softmax(old_logits, dim=-1)
curr_log_policy = F.log_softmax(curr_logits, dim=-1)
kl_loss = torch.nn.KLDivLoss(reduction='sum')(curr_log_policy, old_policy.detach())
return kl_loss
def _compute_value_cloning_loss(self, old_value, curr_value):
return torch.sum((curr_value - old_value.detach()) ** 2)
def on_act_unroll_complete(self, task_flags, actor_index, agent_output, env_output, new_buffers):
"""
Every step, update the replay buffer using reservoir sampling.
"""
# Compute a reservoir_val for the new entry, then, if the buffer is filled, throw out the entry with the lowest
# reservoir_val and replace it with the new one. If the buffer it not filled, simply put it in the next spot
# Using a new RandomState() because using np.random directly is not thread-safe
random_state = np.random.RandomState()
# > 0 so we can use reservoir_val==0 to indicate unfilled
new_entry_reservoir_val = random_state.uniform(0.001, 1.0)
to_populate_replay_index = None
unfilled_indices = self._get_actor_unfilled_indices(actor_index, self._entries_per_buffer)
actor_replay_reservoir_vals = self._replay_buffers['reservoir_val'][actor_index]
if len(unfilled_indices) > 0:
current_replay_index = min(unfilled_indices)
to_populate_replay_index = current_replay_index
else:
# If we've filled our quota, we need to find something to throw out.
reservoir_threshold = actor_replay_reservoir_vals.min()
# If our new value is higher than our existing minimum, replace that one with this new data
if new_entry_reservoir_val > reservoir_threshold:
to_populate_replay_index = np.argmin(actor_replay_reservoir_vals)
# Do the replacement into the buffer, and update the reservoir_vals list
if to_populate_replay_index is not None:
with self._replay_lock:
actor_replay_reservoir_vals[to_populate_replay_index][0] = new_entry_reservoir_val
for key in new_buffers.keys():
if key == 'reservoir_val':
continue
self._replay_buffers[key][actor_index][to_populate_replay_index][...] = new_buffers[key]
def get_batch_for_training(self, batch):
"""
Augment the batch with entries from our replay buffer.
"""
# Select a random batch set of replay buffers to add also. Only select from ones that have been filled
shuffled_subset = [] # Will contain a list of tuples of (actor_index, buffer_index)
# We only allow each actor to be sampled from once, to reduce variance, and for parity with the original
# paper
actor_indices = list(range(self._model_flags.num_actors))
replay_entry_count = int(self._model_flags.batch_size * self._model_flags.batch_replay_ratio)
assert replay_entry_count > 0, "Attempting to run CLEAR without actually using any replay buffer entries."
random_state = np.random.RandomState()
with self._replay_lock:
# Select a random actor, and from that, a random buffer entry.
for _ in range(replay_entry_count):
# Pick an actor and remove it from our options
actor_index = random_state.choice(actor_indices)
actor_indices.remove(actor_index)
# From that actor's set of available indices, pick one randomly.
replay_indices = self._get_replay_buffer_filled_indices(self._replay_buffers, actor_index=actor_index)
if len(replay_indices) > 0:
buffer_index = random_state.choice(replay_indices)
shuffled_subset.append((actor_index, buffer_index))
if len(shuffled_subset) > 0:
replay_batch = {
# Get the actor_index and entry_id from the raw id
key: torch.stack([self._replay_buffers[key][actor_id][buffer_id]
for actor_id, buffer_id in shuffled_subset], dim=1)
for key in self._replay_buffers
}
replay_entries_retrieved = torch.sum(replay_batch["reservoir_val"] > 0)
assert replay_entries_retrieved <= replay_entry_count, \
f"Incorrect replay entries retrieved. Expected at most {replay_entry_count} got {replay_entries_retrieved}"
replay_batch = {
k: t.to(device=self._model_flags.device, non_blocking=True)
for k, t in replay_batch.items()
}
# Combine the replay in with the recent entries
combo_batch = {
key: torch.cat((batch[key], replay_batch[key]), dim=1) for key in batch
}
# Store the batch so we can generate some losses with it
self._replay_batches_for_loss.put(replay_batch)
else:
combo_batch = batch
return combo_batch
def custom_loss(self, task_flags, model, initial_agent_state):
"""
Compute the policy and value cloning losses
"""
# If the get doesn't happen basically immediately, it's not happening
replay_batch = self._replay_batches_for_loss.get(timeout=5)
replay_learner_outputs, unused_state = model(replay_batch, task_flags.action_space_id, initial_agent_state)
replay_batch_policy = replay_batch['policy_logits']
current_policy = replay_learner_outputs['policy_logits']
policy_cloning_loss = self._model_flags.policy_cloning_cost * self._compute_policy_cloning_loss(replay_batch_policy, current_policy)
replay_batch_baseline = replay_batch['baseline']
current_baseline = replay_learner_outputs['baseline']
value_cloning_loss = self._model_flags.value_cloning_cost * self._compute_value_cloning_loss(replay_batch_baseline, current_baseline)
cloning_loss = policy_cloning_loss + value_cloning_loss
stats = {
"policy_cloning_loss": policy_cloning_loss.item(),
"value_cloning_loss": value_cloning_loss.item(),
}
return cloning_loss, stats
|
{"hexsha": "61a977f027eb3f410a91d4464b099ea0b122bd49", "size": 12336, "ext": "py", "lang": "Python", "max_stars_repo_path": "continual_rl/policies/clear/clear_monobeast.py", "max_stars_repo_name": "AGI-Labs/continual_rl", "max_stars_repo_head_hexsha": "bcf17d879e8a983340be233ff8f740c424d0f303", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2021-07-27T05:20:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-27T07:12:05.000Z", "max_issues_repo_path": "continual_rl/policies/clear/clear_monobeast.py", "max_issues_repo_name": "AGI-Labs/continual_rl", "max_issues_repo_head_hexsha": "bcf17d879e8a983340be233ff8f740c424d0f303", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-11-05T07:36:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T00:21:50.000Z", "max_forks_repo_path": "continual_rl/policies/clear/clear_monobeast.py", "max_forks_repo_name": "AGI-Labs/continual_rl", "max_forks_repo_head_hexsha": "bcf17d879e8a983340be233ff8f740c424d0f303", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-10-20T06:04:35.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-06T22:59:36.000Z", "avg_line_length": 48.0, "max_line_length": 141, "alphanum_fraction": 0.6642347601, "include": true, "reason": "import numpy", "num_tokens": 2572}
|
"""A Morse code keyer.
A Morse "key" is a device with a button used to encode the carrier wave with a
Morse signal. Morse operators use these to produce the familiar "dits" and
"dahs" of Morse code. The Keyer class converts dots and dashes into an encoded
carrier waveform and plays it audibly.
"""
import numpy as np
import simpleaudio as sa
# Dit and dah timings
MORSE_DIT_FREQ = 10 # dits per second
MORSE_DIT = 1
MORSE_DAH = 3
# Audio settings
FREQUENCY = 440 # 440 Hz
SAMPLE_RATE = 44100
class Keyer:
"""Convert Morse code to audio and play it."""
def __init__(self, morse):
"""Convert Morse to playable audio.
:param morse: dot-and-dash Morse code
:type morse: str
"""
self.signal = self.create_binary_signal(morse)
self.audio = self.convert_audio()
def create_binary_signal(self, morse):
"""Converts Morse code into a binary signal.
For example, ".- ." becomes "1011100001"
:param morse: dot-and-dash Morse code
:type morse: str
:return: binary Morse code signal
:rtype: np.ndarray
"""
signal_list = []
# Convert to binary dit, dah or space
# Always add a space of one dit
for char in morse:
if char == ".":
signal_list += MORSE_DIT * [1]
elif char == "-":
signal_list += MORSE_DAH * [1]
signal_list += MORSE_DIT * [0]
# TODO Correct number of spaces: consider end of char/word following
# dit/dah: has one too many spaces currently
# signal_list is now list of binary digits, each representing a dit
# duration of on or off
signal = np.array(signal_list)
return signal
def convert_audio(self):
"""Convert binary signal to audio.
Encode sine wave with binary signal and create playable audio.
:return: 16-bit audio waveform
:rtype: np.ndarray
"""
# Stretch signal array to match the required sample rate and duration
samples_per_dit = int(round(SAMPLE_RATE / MORSE_DIT_FREQ))
signal_stretched = np.repeat(self.signal, samples_per_dit)
# Create increasing time value array of equivalent length
duration = signal_stretched.size / SAMPLE_RATE
t = np.linspace(
0.0, duration, num=signal_stretched.size, endpoint=False
)
# Create a sine wave at 440 Hz
sine = np.sin(2 * np.pi * FREQUENCY * t)
# Encode sine wave with signal
enc_sine = sine * signal_stretched
# Ensure that sine is in 16-bit range, normalised to maximum amplitude
audio = (2 ** 15 - 1) * enc_sine / (np.max(np.abs(enc_sine)))
# Convert to 16-bit data
audio = audio.astype(np.int16)
return audio
def play(self):
"""Play Morse code.
In the case of audio errors (i.e. on CI system with no sound card),
catch exception and notify.
"""
try:
# Start playback
play_obj = sa.play_buffer(self.audio, 1, 2, SAMPLE_RATE)
# Wait for playback to finish before exiting
play_obj.wait_done()
except sa._simpleaudio.SimpleaudioError:
print("There was an error with audio playback.")
|
{"hexsha": "fc95127277adeae7949b3c6dc2309478721de238", "size": 3321, "ext": "py", "lang": "Python", "max_stars_repo_path": "enigma/keyer.py", "max_stars_repo_name": "jonmaddock/enigma", "max_stars_repo_head_hexsha": "e9e3ca95cc397bbdfb7b5f43c043dd52997f0d65", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "enigma/keyer.py", "max_issues_repo_name": "jonmaddock/enigma", "max_issues_repo_head_hexsha": "e9e3ca95cc397bbdfb7b5f43c043dd52997f0d65", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2021-10-11T19:50:21.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-03T21:13:06.000Z", "max_forks_repo_path": "enigma/keyer.py", "max_forks_repo_name": "jonmaddock/enigma", "max_forks_repo_head_hexsha": "e9e3ca95cc397bbdfb7b5f43c043dd52997f0d65", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3301886792, "max_line_length": 78, "alphanum_fraction": 0.6175850647, "include": true, "reason": "import numpy", "num_tokens": 798}
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 18 19:27:57 2014
@author: joser
"""
import pygame, ode, random, Buttons
from math import atan2, acos, asin, sin, cos
import matplotlib.pyplot as plt
from pygame.locals import *
from numpy import *
from Point import *
from Buttons import *
class gameSimulator( object ):
def __init__( self, *args, **kwargs):
# Initialize pygame
pygame.init()
self.width = kwargs.get('width',600)
self.height = kwargs.get('height',400)
self.length = kwargs.get('length',200)
self.fps = kwargs.get('fps',50)
self.G = kwargs.get('gravity',-9.81)
self.world = ode.World()
self.world.setGravity((0,self.G,0))
self.createScreen()
self.createButtons()
#Variables of this game
self.FIRE = False
self.ANGLE = 0.25
self.POWER = 6.5
self.dt = 1.0/self.fps
self.buffer = []
self.bufferHead = 0
self.bufferTail = 0
self.bufferIsEmpty = True
self.bufferSize = 100
self.correctX = 0
self.correctY = 0
self.initBuffer()
def initBuffer(self):
for i in range(0,self.bufferSize):
self.buffer.append((0,0))
def createScreen(self):
# Open a display
self.srf = pygame.display.set_mode((self.width,self.height))
pygame.display.set_caption("Game Simulator")
#Parameters
self.dt = 1.0/self.fps
self.loopFlag = True
def createButtons(self):
#Buttons
self.goal_button = Buttons.Button(self.srf, color = (200,0,0), x = 10, y = 10, length = 50, height = 25, width = 0, text = "Button_1", text_color = (255,255,255), font_size = 20, fade_on = False)
self.switch_button = Buttons.Button(self.srf, color = (200,0,0), x = 60, y = 10, length = 50, height = 25, width = 0, text = "Button_2", text_color = (255,255,255), font_size = 20, fade_on = False)
self.follow_button = Buttons.Button(self.srf, color = (200,0,0), x = 110, y = 10, length = 50, height = 25, width = 0, text = "Button_3", text_color = (255,255,255), font_size = 20, fade_on = False)
self.noise_button = Buttons.Button(self.srf, color = (200,0,0), x = 160, y = 10, length = 50, height = 25, width = 0, text = "Button_4", text_color = (255,255,255), font_size = 20, fade_on = False)
#Button Dictionary
self.buttons = {0 : self.goal_button,
1 : self.switch_button,
2 : self.follow_button,
3 : self.noise_button}
def loadBackground(self,filename):
self.backgroundImage = pygame.image.load(filename).convert()
self.backgroundRect = self.backgroundImage.get_rect()
def loadImage(self, filename):
image = pygame.image.load(filename)
return image
def world2screen(self, x, y):
return int(self.width/2 + 128*x), int(self.length/2 - 128*y)
def screen2world(self, x, y):
return (float(x - self.width/2)/128), (float(-y + self.length/2)/128)
def checkEvents(self):
events = pygame.event.get()
for e in events:
if e.type==QUIT:
pygame.quit()
elif e.type==KEYDOWN:
if e.key == K_f:
print "FIRE!!!"
self.FIRE = True
self.Vox = self.POWER * cos(self.ANGLE)
self.Voy = self.POWER * sin(self.ANGLE)
elif e.key == K_UP:
self.ANGLE = self.ANGLE + 0.1
print self.POWER, self.ANGLE
elif e.key == K_DOWN:
self.ANGLE = self.ANGLE - 0.1
print self.POWER, self.ANGLE
elif e.key == K_LEFT:
self.POWER = self.POWER - 0.1
print self.POWER, self.ANGLE
elif e.key == K_RIGHT:
self.POWER = self.POWER + 0.1
print self.POWER, self.ANGLE
else:
pygame.quit()
def updateBackground(self, color = None):
if color is not None:
self.srf.fill(color)
else:
self.srf.blit(self.backgroundImage,self.backgroundRect)
def updateImage(self,image, position):
self.srf.blit(image,position)
self.addBuffer(position)
def getBuffer(self):
return zip(*self.buffer)
def addBuffer(self,newValue):
self.buffer[self.bufferHead] = newValue
if self.bufferHead == self.bufferSize-1:
self.bufferHead = 0
self.bufferTail = 0
else:
if self.bufferHead == self.bufferTail and not self.bufferIsEmpty:
self.bufferHead = self.bufferHead +1
self.bufferTail = self.bufferHead
else:
self.bufferHead = self.bufferHead +1
self.bufferIsEmpty = False
def updateTrace(self,x,y,color):
for i in range(0,self.bufferSize):
self.srf.set_at((x[i]+self.correctX,y[i]+self.correctY),color)
def run(self):
# Simulation loop.
self.clk = pygame.time.Clock()
self.loadBackground("images/screen.png")
gun = self.loadImage("images/gun.jpg")
gunPos = [50,320]
bullet = self.loadImage("images/bullet.jpg")
self.correctX, self.correctY = bullet.get_rect().size
self.correctX = self.correctX/2
self.correctY = self.correctY/2
x,y = self.screen2world(230,320)
while True:
# Check for events
self.checkEvents()
self.updateBackground()
self.updateImage(gun,gunPos)
if self.FIRE:
self.Voy = self.Voy + self.G* self.dt
x = x + self.Vox*self.dt
y = y + self.Voy*self.dt + 0.5*self.G*self.dt**2
self.updateImage(bullet, self.world2screen(x,y))
else:
if self.bufferIsEmpty is False:
plotx, ploty = self.getBuffer()
self.updateTrace(plotx, ploty,(255,255,255))
if self.FIRE and (y < -2.5 or y >2.5 or x >2.5 or x<-2.5):
self.FIRE = False
x,y = self.screen2world(230,320)
plotx, ploty = self.getBuffer()
pygame.display.flip()
# Next simulation step
self.world.step(self.dt)
# Try to keep the specified framerate
self.clk.tick(self.fps)
|
{"hexsha": "ac8c48ff26b52acfaee5ecfe8a47d80d1eb3b192", "size": 6837, "ext": "py", "lang": "Python", "max_stars_repo_path": "gameSimulator.py", "max_stars_repo_name": "jrcapriles/gameSimulator", "max_stars_repo_head_hexsha": "e4633d2a6ad7fbcc60cd77ed853c9e4e33290319", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gameSimulator.py", "max_issues_repo_name": "jrcapriles/gameSimulator", "max_issues_repo_head_hexsha": "e4633d2a6ad7fbcc60cd77ed853c9e4e33290319", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gameSimulator.py", "max_forks_repo_name": "jrcapriles/gameSimulator", "max_forks_repo_head_hexsha": "e4633d2a6ad7fbcc60cd77ed853c9e4e33290319", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6798029557, "max_line_length": 207, "alphanum_fraction": 0.5283018868, "include": true, "reason": "from numpy", "num_tokens": 1675}
|
import mpi4py
import numpy as np
import pytest
import unittest
from chainermn.communicators._communication_utility import chunked_bcast_obj # NOQA
from chainermn.communicators._communication_utility import INT_MAX # NOQA
from chainermn.communicators.naive_communicator import NaiveCommunicator
class TestCommunicationUtility(unittest.TestCase):
def setUp(self):
self.mpi_comm = mpi4py.MPI.COMM_WORLD
self.communicator = NaiveCommunicator(self.mpi_comm)
def test_chunked_bcast_objs(self):
# success
for (s, l) in [(10, 1), (1024, 7), (355678, 2378), (234, INT_MAX - 1)]:
self.check_chunked_bcast_obj(s, l)
# fail
for (s, l) in [(200, -1), (23, INT_MAX)]:
with pytest.raises(AssertionError):
self.check_chunked_bcast_obj(s, l)
def check_chunked_bcast_obj(self, data_size, max_buf_len):
root = 0
obj = np.arange(data_size)
src = None
if self.communicator.rank == root:
src = obj
dst = chunked_bcast_obj(src, self.communicator.mpi_comm,
max_buf_len, root)
assert len(dst) == len(obj)
for i in range(len(obj)):
assert dst[i] == obj[i]
|
{"hexsha": "51aea8fb96a8a990de397387debf0b72ebf5099a", "size": 1250, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/chainermn_tests/communicator_tests/test_communication_utility.py", "max_stars_repo_name": "zaltoprofen/chainer", "max_stars_repo_head_hexsha": "3b03f9afc80fd67f65d5e0395ef199e9506b6ee1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3705, "max_stars_repo_stars_event_min_datetime": "2017-06-01T07:36:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T10:46:15.000Z", "max_issues_repo_path": "tests/chainermn_tests/communicator_tests/test_communication_utility.py", "max_issues_repo_name": "nolfwin/chainer", "max_issues_repo_head_hexsha": "8d776fcc1e848cb9d3800a6aab356eb91ae9d088", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5998, "max_issues_repo_issues_event_min_datetime": "2017-06-01T06:40:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T01:42:44.000Z", "max_forks_repo_path": "tests/chainermn_tests/communicator_tests/test_communication_utility.py", "max_forks_repo_name": "nolfwin/chainer", "max_forks_repo_head_hexsha": "8d776fcc1e848cb9d3800a6aab356eb91ae9d088", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1150, "max_forks_repo_forks_event_min_datetime": "2017-06-02T03:39:46.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T02:29:32.000Z", "avg_line_length": 33.7837837838, "max_line_length": 84, "alphanum_fraction": 0.6424, "include": true, "reason": "import numpy", "num_tokens": 326}
|
import numpy as np
import pandas as pd
from scipy.io import arff
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from collections import Counter, defaultdict
class DimensionValueError(ValueError):
pass
class TypeError(ValueError):
pass
class IterError(ValueError):
pass
class DataProcess:
# 读入数据
def loadArffData(self, path):
data = arff.loadarff(path)
df = pd.DataFrame(data[0])
df = pd.DataFrame(df.values, columns=['sepal length','sepal width', 'petal length', 'petal width', 'class'])
return df
def Predata(self, path):
df = self.loadArffData(path)
# labels = df.values[:, -1]
title_mapping = {b"Iris-setosa": 1, b"Iris-versicolor": 2, b"Iris-virginica": 3}#将标签对应数值
df['class'] = df['class'].map(title_mapping)#处理数据
df['class'] = df['class'].fillna(0)##将其余标签填充为0值
data = df.values[:, 0:df.values.shape[-1] - 1]
labels = df.values[:, -1]
return data, labels
#划分数据集
def Split_Data(self, path, t_s=0.2):
data, labels = self.Predata(path)
#为w和b和统一表示,加一维全为1的值
# data = np.hstack((np.ones((data.shape[0], 1)), data))
return train_test_split(data, labels, test_size = t_s, random_state = 0)
class Bayes:
def __init__(self):
pass
def fit(self, data, labels):
self.data = data
self.labels = labels
self.Train()
return self
def CalNormalDistribution(self, x, mean, std):
res = 1 / np.sqrt(2 * np.pi) / std * np.exp(-1 / 2 * (x - mean) ** 2 / std ** 2)
return res
def Train(self):
unique_cls = np.unique(self.labels)
#计算类先验概率P(yi)
C = Counter(self.labels)
C = sorted(C.items(), key = lambda x:(x[0], x[1]))
Py = [cnt[1] / self.labels.shape[0] for cnt in C]
data = []
for i in range(unique_cls.shape[0]):
y_ind = self.labels == C[i][0]
data.append(self.data[y_ind])
#计算条件概率P(xi|yi)
#假设该概率分布服从正态分布N(u,sigma^2)
#Pxi_yi保存参数均值和标准差
Pxi_yi = defaultdict(tuple)
for y_i in range(unique_cls.shape[0]):
u = np.mean(data[y_i], axis = 0)
sigma = np.std(data[y_i], axis = 0)
for x_i in range(self.data.shape[1]):
Pxi_yi[(x_i, y_i)] = (u[x_i], sigma[x_i])
self.Py = Py
self.Likelihood = Pxi_yi
return self
def predict(self, data, labels):
ycls = np.unique(self.labels)
cnt = 0
for i, x in enumerate(data):
p = np.ones_like(ycls, dtype = np.float64)
for y_i in range(ycls.shape[0]):
p[y_i] = self.Py[y_i]
for x_i in range(data.shape[1]):
mean = self.Likelihood[(x_i, y_i)][0]
std = self.Likelihood[(x_i, y_i)][1]
p[y_i] *= self.CalNormalDistribution(x[x_i], mean, std)
print('predict: ', p, 'True label == ', labels[i] - 1)
if np.argmax(p) == labels[i] - 1:
cnt += 1
print('accuracy == ', cnt / data.shape[0])
if __name__ == "__main__":
Train_data, Validation_data, Train_labels, Validation_labels = DataProcess().Split_Data('iris.arff')
Train_data = np.array(Train_data, dtype=np.float64)
Validation_data = np.array(Validation_data, dtype=np.float64)
Train_labels = np.array(Train_labels, dtype=np.int64)
Validation_labels = np.array(Validation_labels, dtype=np.int64)
clf = Bayes().fit(Train_data, Train_labels).predict(Validation_data, Validation_labels)
|
{"hexsha": "13e945c355136825ef105a714665aaf4dbc8ad7f", "size": 3683, "ext": "py", "lang": "Python", "max_stars_repo_path": "Naive Bayes.py", "max_stars_repo_name": "K-ona/--------", "max_stars_repo_head_hexsha": "1bae093758c61e4863ca0b150195286e189af591", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-21T06:28:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-21T06:28:25.000Z", "max_issues_repo_path": "Naive Bayes.py", "max_issues_repo_name": "K-ona/ML-Tutorial", "max_issues_repo_head_hexsha": "1bae093758c61e4863ca0b150195286e189af591", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Naive Bayes.py", "max_forks_repo_name": "K-ona/ML-Tutorial", "max_forks_repo_head_hexsha": "1bae093758c61e4863ca0b150195286e189af591", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3070175439, "max_line_length": 116, "alphanum_fraction": 0.5805050231, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1066}
|
# -*- coding: utf-8 -*-
"""NumPy UltraQuick Tutorial
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/github/google/eng-edu/blob/master/ml/cc/exercises/numpy_ultraquick_tutorial.ipynb
"""
#@title Copyright 2020 Google LLC. Double-click here for license information.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""# NumPy UltraQuick Tutorial
NumPy is a Python library for creating and manipulating vectors and matrices. This Colab is not an exhaustive tutorial on NumPy. Rather, this Colab teaches you just enough to use NumPy in the Colab exercises of Machine Learning Crash Course.
## About Colabs
Machine Learning Crash Course uses Colaboratories (**Colabs**) for all programming exercises. Colab is Google's implementation of [Jupyter Notebook](https://jupyter.org/). Like all Jupyter Notebooks, a Colab consists of two kinds of components:
* **Text cells**, which contain explanations. You are currently reading a text cell.
* **Code cells**, which contain Python code for you to run. Code cells have a light gray background.
You *read* the text cells and *run* the code cells.
### Running code cells
You must run code cells in order. In other words, you may only run a code cell once all the code cells preceding it have already been run.
To run a code cell:
1. Place the cursor anywhere inside the [ ] area at the top left of a code cell. The area inside the [ ] will display an arrow.
2. Click the arrow.
Alternatively, you may invoke **Runtime->Run all**. Note, though, that some of the code cells will fail because not all the coding is complete. (You'll complete the coding as part of the exercise.)
### If you see errors...
The most common reasons for seeing code cell errors are as follows:
* You didn't run *all* of the code cells preceding the current code cell.
* If the code cell is labeled as a **Task**, then:
* You haven't yet written the code that implements the task.
* You did write the code, but the code contained errors.
## Import NumPy module
Run the following code cell to import the NumPy module:
"""
import numpy as np
"""## Populate arrays with specific numbers
Call `np.array` to create a NumPy matrix with your own hand-picked values. For example, the following call to `np.array` creates an 8-element vector:
"""
one_dimensional_array = np.array([1.2, 2.4, 3.5, 4.7, 6.1, 7.2, 8.3, 9.5])
print(one_dimensional_array)
"""You can also use `np.array` to create a two-dimensional matrix. To create a two-dimensional matrix, specify an extra layer of square brackets. For example, the following call creates a 3x2 matrix:"""
two_dimensional_array = np.array([[6, 5], [11, 7], [4, 8]])
print(two_dimensional_array)
"""To populate a matrix with all zeroes, call `np.zeros`. To populate a matrix with all ones, call `np.ones`.
## Populate arrays with sequences of numbers
You can populate an array with a sequence of numbers:
"""
sequence_of_integers = np.arange(5, 12)
print(sequence_of_integers)
"""Notice that `np.arange` generates a sequence that includes the lower bound (5) but not the upper bound (12).
## Populate arrays with random numbers
NumPy provides various functions to populate matrices with random numbers across certain ranges. For example, `np.random.randint` generates random integers between a low and high value. The following call populates a 6-element vector with random integers between 50 and 100.
"""
random_integers_between_50_and_100 = np.random.randint(low=50, high=101, size=(6))
print(random_integers_between_50_and_100)
"""Note that the highest generated integer `np.random.randint` is one less than the `high` argument.
To create random floating-point values between 0.0 and 1.0, call `np.random.random`. For example:
"""
random_floats_between_0_and_1 = np.random.random([6])
print(random_floats_between_0_and_1)
"""## Mathematical Operations on NumPy Operands
If you want to add or subtract two vectors or matrices, linear algebra requires that the two operands have the same dimensions. Furthermore, if you want to multiply two vectors or matrices, linear algebra imposes strict rules on the dimensional compatibility of operands. Fortunately, NumPy uses a trick called [**broadcasting**](https://developers.google.com/machine-learning/glossary/#broadcasting) to virtually expand the smaller operand to dimensions compatible for linear algebra. For example, the following operation uses broadcasting to add 2.0 to the value of every item in the vector created in the previous code cell:
"""
random_floats_between_2_and_3 = random_floats_between_0_and_1 + 2.0
print(random_floats_between_2_and_3)
"""The following operation also relies on broadcasting to multiply each cell in a vector by 3:"""
random_integers_between_150_and_300 = random_integers_between_50_and_100 * 3
print(random_integers_between_150_and_300)
"""## Task 1: Create a Linear Dataset
Your goal is to create a simple dataset consisting of a single feature and a label as follows:
1. Assign a sequence of integers from 6 to 20 (inclusive) to a NumPy array named `feature`.
2. Assign 15 values to a NumPy array named `label` such that:
```
label = (3)(feature) + 4
```
For example, the first value for `label` should be:
```
label = (3)(6) + 4 = 22
```
"""
feature = np.arange(6, 21)
print(feature)
label = (feature*3) + 4
print(label)
#@title Double-click to see a possible solution to Task 1.
feature = np.arange(6, 21)
print(feature)
label = (feature * 3) + 4
print(label)
"""## Task 2: Add Some Noise to the Dataset
To make your dataset a little more realistic, insert a little random noise into each element of the `label` array you already created. To be more precise, modify each value assigned to `label` by adding a *different* random floating-point value between -2 and +2.
Don't rely on broadcasting. Instead, create a `noise` array having the same dimension as `label`.
"""
noise = (np.random.random([15]) * 4) - 2
print(noise)
label = label + noise
print(label)
#@title Double-click to see a possible solution to Task 2.
noise = (np.random.random([15]) * 4) - 2
print(noise)
label = label + noise
print(label)
|
{"hexsha": "09001cc0c74f0b427b1eb5206ce32b4e99eab5b0", "size": 6689, "ext": "py", "lang": "Python", "max_stars_repo_path": "tf-ml-crashcourse/numpy_ultraquick_tutorial.py", "max_stars_repo_name": "blu3crab/DeepPlay2020", "max_stars_repo_head_hexsha": "9f62375f0d69d446764eb988ed66638f3fd57c5d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tf-ml-crashcourse/numpy_ultraquick_tutorial.py", "max_issues_repo_name": "blu3crab/DeepPlay2020", "max_issues_repo_head_hexsha": "9f62375f0d69d446764eb988ed66638f3fd57c5d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tf-ml-crashcourse/numpy_ultraquick_tutorial.py", "max_forks_repo_name": "blu3crab/DeepPlay2020", "max_forks_repo_head_hexsha": "9f62375f0d69d446764eb988ed66638f3fd57c5d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.2901234568, "max_line_length": 627, "alphanum_fraction": 0.755269846, "include": true, "reason": "import numpy", "num_tokens": 1627}
|
\subsubsection{Usability}
\input{usability.tex}
\subsubsection{User Experience}
\input{user-experience.tex}
|
{"hexsha": "544c2a5b2d7703845fe8f0ef046a7f96027a065b", "size": 107, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Design and Evaluation of User Interfaces- Reference/evaluation.tex", "max_stars_repo_name": "simwir/notes", "max_stars_repo_head_hexsha": "5079b3fc34610094ca00dea13c5128664609f113", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-02-12T22:22:23.000Z", "max_stars_repo_stars_event_max_datetime": "2019-02-12T22:22:23.000Z", "max_issues_repo_path": "Design and Evaluation of User Interfaces- Reference/evaluation.tex", "max_issues_repo_name": "simwir/notes", "max_issues_repo_head_hexsha": "5079b3fc34610094ca00dea13c5128664609f113", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Design and Evaluation of User Interfaces- Reference/evaluation.tex", "max_forks_repo_name": "simwir/notes", "max_forks_repo_head_hexsha": "5079b3fc34610094ca00dea13c5128664609f113", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-01-17T10:57:21.000Z", "max_forks_repo_forks_event_max_datetime": "2018-01-17T10:57:21.000Z", "avg_line_length": 26.75, "max_line_length": 31, "alphanum_fraction": 0.8224299065, "num_tokens": 29}
|
from operator import index
from pandas._config.config import options
import Cleaner
import textract as tx
import pandas as pd
import numpy
import os
import tf_idf
user = os.getcwd()
print(user)
resume_dir = user+"/media/Resume/"
job_desc_dir = user+"/media/JobDesc/"
resume_names = os.listdir(resume_dir)
job_description_names = os.listdir(job_desc_dir)
print(resume_names)
print(job_description_names)
document = []
def read_resumes(list_of_resumes, resume_directory):
placeholder = []
for res in list_of_resumes:
temp = []
temp.append(res)
text = tx.process(resume_directory+res, encoding='ascii')
text = str(text, 'utf-8')
temp.append(text)
placeholder.append(temp)
return placeholder
document = read_resumes(resume_names, resume_dir)
def get_cleaned_words(document):
for i in range(len(document)):
raw = Cleaner.Cleaner(document[i][1])
document[i].append(" ".join(raw[0]))
document[i].append(" ".join(raw[1]))
document[i].append(" ".join(raw[2]))
sentence = tf_idf.do_tfidf(document[i][3].split(" "))
document[i].append(sentence)
return document
Doc = get_cleaned_words(document)
Database = pd.DataFrame(document, columns=[
"Name", "Context", "Cleaned", "Selective", "Selective_Reduced", "TF_Based"])
Database.to_csv(
user+"/Resume_Matcher/Resume_Data.csv", index=False)
# Database.to_json("Resume_Data.json", index=False)
def read_jobdescriptions(job_description_names, job_desc_dir):
placeholder = []
for tes in job_description_names:
temp = []
temp.append(tes)
text = tx.process(job_desc_dir+tes, encoding='ascii')
text = str(text, 'utf-8')
temp.append(text)
placeholder.append(temp)
return placeholder
job_document = read_jobdescriptions(job_description_names, job_desc_dir)
Jd = get_cleaned_words(job_document)
jd_database = pd.DataFrame(Jd, columns=[
"Name", "Context", "Cleaned", "Selective", "Selective_Reduced", "TF_Based"])
jd_database.to_csv(
user+"/Resume_Matcher/Job_Data.csv", index=False)
|
{"hexsha": "1c9e43c62e32159c83bcf267bed08e2773aff582", "size": 2164, "ext": "py", "lang": "Python", "max_stars_repo_path": "Resume_Matcher/fileReader.py", "max_stars_repo_name": "r00tDada/Mini_Project_Semester_7", "max_stars_repo_head_hexsha": "fd84be13d91c9ffca8288c7787a0330a5aee7950", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Resume_Matcher/fileReader.py", "max_issues_repo_name": "r00tDada/Mini_Project_Semester_7", "max_issues_repo_head_hexsha": "fd84be13d91c9ffca8288c7787a0330a5aee7950", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Resume_Matcher/fileReader.py", "max_forks_repo_name": "r00tDada/Mini_Project_Semester_7", "max_forks_repo_head_hexsha": "fd84be13d91c9ffca8288c7787a0330a5aee7950", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.7160493827, "max_line_length": 103, "alphanum_fraction": 0.6802218115, "include": true, "reason": "import numpy", "num_tokens": 498}
|
(* Standard library imports *)
Require Import Coq.Strings.String.
Require Import Coq.Strings.Ascii.
Require Import Coq.Lists.List.
Require Import Coq.Arith.Arith.
Require Import Coq.Arith.EqNat.
Require Import Coq.Arith.PeanoNat.
Require Import Coq.Bool.Bool.
Require Import Coq.omega.Omega.
Require Import Coq.Program.Equality.
Import ListNotations.
(* Project related imports *)
Require Import GenericLemmas.
Require Import Names.
Require Import AST.
Require Import UtilsProgram.
Require Import UtilsSkeleton.
Require Import Skeleton.
Require Import Typechecker.
Require Import CtorizeI.
Require Import LiftComatch.
Require Import Subterm.
(*Require Import FunInd.*)
(**************************************************************************************************)
(** * Constructorization Part II: *)
(** *)
(** In the second part of the algorithm we compute the new function bodies. *)
(**************************************************************************************************)
Fixpoint constructorize_expr (tn : TypeName) (e : expr) : expr :=
match e with
| E_Var n => E_Var n
| E_Constr sn es => E_Constr sn (map (constructorize_expr tn) es)
| E_DestrCall sn e es =>
if eq_TypeName tn (fst (unscope sn))
then E_ConsFunCall sn (constructorize_expr tn e) (map (constructorize_expr tn) es)
else E_DestrCall sn (constructorize_expr tn e) (map (constructorize_expr tn) es)
| E_FunCall n es => E_FunCall n (map (constructorize_expr tn) es)
| E_GenFunCall sn es =>
if eq_TypeName tn (fst (unscope sn))
then E_Constr sn (map (constructorize_expr tn) es)
else E_GenFunCall sn (map (constructorize_expr tn) es)
| E_ConsFunCall sn e es => E_ConsFunCall sn (constructorize_expr tn e) (map (constructorize_expr tn) es)
| E_Match qn e bs cases t =>
E_Match qn (constructorize_expr tn e)
(map (fun x => (constructorize_expr tn (fst x), snd x)) bs)
(map (fun x => (fst x, constructorize_expr tn (snd x))) cases) t
| E_CoMatch qn bs cocases =>
(* Without lift/inline, we would have a case distinction... *)
(*
if eq_TypeName tn (fst qn)
(* ...but this case may actually never occur (and will not, thanks to comatch lifting) *)
then E_Constr (local qn) (map (fun x => constructorize_expr tn (fst x)) bs)
else *)
E_CoMatch qn (map (fun x => (constructorize_expr tn (fst x), snd x)) bs)
(map (fun x => (fst x, constructorize_expr tn (snd x))) cocases)
| E_Let e1 e2 => E_Let (constructorize_expr tn e1) (constructorize_expr tn e2)
end.
Lemma filter_compose : forall {A} (l : list A) f g,
filter f (filter g l) = filter (fun x => andb (f x) (g x)) l.
Proof with auto.
intros. induction l... simpl. case_eq (g a); intros.
- case_eq (f a); intros.
+ simpl. rewrite H0. f_equal...
+ simpl. rewrite H0...
- case_eq (f a); intros...
Qed.
Lemma constructorize_expr_preserves_typing : forall p tn e ctx t,
(forall e' n bs cocases, subterm e' e -> e' <> E_CoMatch (tn,n) bs cocases) ->
(program_skeleton p) / ctx |- e : t ->
(constructorize_to_skeleton p tn) / ctx |- (constructorize_expr tn e) : t.
Proof with try apply in_eq; try apply in_cons; eauto.
intros. generalize dependent ctx. generalize dependent t. generalize H. clear H.
induction e using expr_strong_ind; intros.
- inversion H0; subst. apply T_Var...
- inversion H1; subst. simpl. apply T_Constr with (cargs:=cargs)...
+ simpl. apply in_or_app. right...
+ clear - H H0 H7.
assert (forall x y n bs cocases, In x ls -> subterm y x -> y <> E_CoMatch (tn,n) bs cocases).
{ clear - H0. intros. apply H0. eapply Sub_Trans... apply Sub_Constr... }
clear H0. induction H7; try apply ListTypeDeriv_Nil. simpl. apply ListTypeDeriv_Cons.
* inversion H; subst. apply H4... intros. apply H1 with (x:=e)...
* apply IHListTypeDeriv; try inversion H... intros. apply H1 with (x:=x0)...
- simpl. case_eq (eq_TypeName tn (fst (unscope n))); intros.
+ inversion H1; subst. destruct n.
* apply T_LocalConsFunCall with (argts:=dargs).
-- simpl. unfold new_cfunsigs_l. apply in_or_app. left.
rewrite in_map_iff. unfold cfunsigs_mapfun. exists (local q, dargs, t).
split... rewrite filter_In. split...
-- apply IHe... intros. unfold not in *. intros. eapply H0...
eapply Sub_Trans... apply Sub_DestrCall_e0...
-- assert (forall x y n bs cocases, In x ls -> subterm y x -> y <> E_CoMatch (tn,n) bs cocases).
{ clear - H0. intros. apply H0. eapply Sub_Trans... apply Sub_DestrCall_es... }
clear - H H11 H3. induction H11; try apply ListTypeDeriv_Nil. simpl. apply ListTypeDeriv_Cons.
++ inversion H; subst. apply H4... intros. apply H3 with (x:=e)...
++ apply IHListTypeDeriv; try inversion H... intros. apply H3 with (x:=x0)...
* apply T_GlobalConsFunCall with (argts:=dargs).
-- simpl. unfold new_cfunsigs_g. apply in_or_app. left.
rewrite in_map_iff. unfold cfunsigs_mapfun. exists (global q, dargs, t).
split... rewrite filter_In. split...
-- apply IHe... intros. unfold not in *. intros. eapply H0...
eapply Sub_Trans... apply Sub_DestrCall_e0...
-- assert (forall x y n bs cocases, In x ls -> subterm y x -> y <> E_CoMatch (tn,n) bs cocases).
{ clear - H0. intros. apply H0. eapply Sub_Trans... apply Sub_DestrCall_es... }
clear - H H11 H3. induction H11; try apply ListTypeDeriv_Nil. simpl. apply ListTypeDeriv_Cons.
++ inversion H; subst. apply H4... intros. apply H3 with (x:=e)...
++ apply IHListTypeDeriv; try inversion H... intros. apply H3 with (x:=x0)...
+ inversion H1; subst. apply T_DestrCall with (dargs:=dargs).
* simpl. unfold new_dtors. rewrite filter_In. split... rewrite H2...
* apply IHe... intros. unfold not in *. intros. eapply H0...
eapply Sub_Trans... apply Sub_DestrCall_e0...
* assert (forall x y n bs cocases, In x ls -> subterm y x -> y <> E_CoMatch (tn,n) bs cocases).
{ clear - H0. intros. apply H0. eapply Sub_Trans... apply Sub_DestrCall_es... }
clear - H H11 H3. induction H11; try apply ListTypeDeriv_Nil. simpl. apply ListTypeDeriv_Cons.
++ inversion H; subst. apply H4... intros. apply H3 with (x:=e)...
++ apply IHListTypeDeriv; try inversion H... intros. apply H3 with (x:=x0)...
- inversion H1; subst. simpl. apply T_FunCall with (argts:=argts)...
clear - H H0 H8.
assert (forall x y n bs cocases, In x ls -> subterm y x -> y <> E_CoMatch (tn,n) bs cocases).
{ clear - H0. intros. apply H0. eapply Sub_Trans... apply Sub_FunCall... }
clear H0. induction H8; try apply ListTypeDeriv_Nil. simpl. apply ListTypeDeriv_Cons.
* inversion H; subst. apply H4... intros. apply H1 with (x:=e)...
* apply IHListTypeDeriv; try inversion H... intros. apply H1 with (x:=x0)...
- inversion H1; subst.
+ simpl. apply T_GlobalConsFunCall with (argts:=argts).
* simpl. unfold new_cfunsigs_g. apply in_or_app...
* apply IHe... intros. unfold not in *. intros. eapply H0...
eapply Sub_Trans... apply Sub_ConsFunCall_e0...
* assert (forall x y n bs cocases, In x ls -> subterm y x -> y <> E_CoMatch (tn,n) bs cocases).
{ clear - H0. intros. apply H0. eapply Sub_Trans... apply Sub_ConsFunCall_es... }
clear - H H10 H2. induction H10; try apply ListTypeDeriv_Nil. simpl. apply ListTypeDeriv_Cons.
-- inversion H; subst. apply H4... intros. apply H2 with (x:=e)...
-- apply IHListTypeDeriv; try inversion H... intros. apply H2 with (x:=x0)...
+ simpl. apply T_LocalConsFunCall with (argts:=argts).
* simpl. unfold new_cfunsigs_g. apply in_or_app...
* apply IHe... intros. unfold not in *. intros. eapply H0...
eapply Sub_Trans... apply Sub_ConsFunCall_e0...
* assert (forall x y n bs cocases, In x ls -> subterm y x -> y <> E_CoMatch (tn,n) bs cocases).
{ clear - H0. intros. apply H0. eapply Sub_Trans... apply Sub_ConsFunCall_es... }
clear - H H10 H2. induction H10; try apply ListTypeDeriv_Nil. simpl. apply ListTypeDeriv_Cons.
-- inversion H; subst. apply H4... intros. apply H2 with (x:=e)...
-- apply IHListTypeDeriv; try inversion H... intros. apply H2 with (x:=x0)...
- simpl. case_eq (eq_TypeName tn (fst (unscope sn))); intros.
+ inversion H1; subst.
* apply T_Constr with (cargs:=argts)...
-- simpl. unfold computeNewDatatype. apply in_or_app. left. apply in_or_app. left.
rewrite in_map_iff. exists (qn, argts). split... rewrite filter_In. split...
simpl in *. rewrite eq_TypeName_symm...
-- assert (forall x y n bs cocases, In x ls -> subterm y x -> y <> E_CoMatch (tn,n) bs cocases).
{ clear - H0. intros. apply H0. eapply Sub_Trans... apply Sub_GenFunCall... }
clear - H H9 H3. induction H9; try apply ListTypeDeriv_Nil. simpl. apply ListTypeDeriv_Cons.
++ inversion H; subst. apply H4... intros. apply H3 with (x:=e)...
++ apply IHListTypeDeriv; try inversion H... intros. apply H3 with (x:=x0)...
* apply T_Constr with (cargs:=argts)...
-- simpl. unfold computeNewDatatype. apply in_or_app. left. apply in_or_app. right.
rewrite in_map_iff. exists (qn, argts). split... rewrite filter_In. split...
simpl in *. rewrite eq_TypeName_symm...
-- assert (forall x y n bs cocases, In x ls -> subterm y x -> y <> E_CoMatch (tn,n) bs cocases).
{ clear - H0. intros. apply H0. eapply Sub_Trans... apply Sub_GenFunCall... }
clear - H H9 H3. induction H9; try apply ListTypeDeriv_Nil. simpl. apply ListTypeDeriv_Cons.
++ inversion H; subst. apply H4... intros. apply H3 with (x:=e)...
++ apply IHListTypeDeriv; try inversion H... intros. apply H3 with (x:=x0)...
+ inversion H1; subst.
* apply T_GlobalGenFunCall with (argts:=argts).
-- simpl. unfold new_gfunsigs_g. rewrite filter_In. split... simpl in *. rewrite H2...
-- assert (forall x y n bs cocases, In x ls -> subterm y x -> y <> E_CoMatch (tn,n) bs cocases).
{ clear - H0. intros. apply H0. eapply Sub_Trans... apply Sub_GenFunCall... }
clear - H H9 H3. induction H9; try apply ListTypeDeriv_Nil. simpl. apply ListTypeDeriv_Cons.
++ inversion H; subst. apply H4... intros. apply H3 with (x:=e)...
++ apply IHListTypeDeriv; try inversion H... intros. apply H3 with (x:=x0)...
* apply T_LocalGenFunCall with (argts:=argts).
-- simpl. unfold new_gfunsigs_l. rewrite filter_In. split... simpl in *. rewrite H2...
-- assert (forall x y n bs cocases, In x ls -> subterm y x -> y <> E_CoMatch (tn,n) bs cocases).
{ clear - H0. intros. apply H0. eapply Sub_Trans... apply Sub_GenFunCall... }
clear - H H9 H3. induction H9; try apply ListTypeDeriv_Nil. simpl. apply ListTypeDeriv_Cons.
++ inversion H; subst. apply H4... intros. apply H3 with (x:=e)...
++ apply IHListTypeDeriv; try inversion H... intros. apply H3 with (x:=x0)...
- simpl. inversion H2; subst.
apply T_Match with (bindings_exprs := map (constructorize_expr tn) bindings_exprs)
(bindings_types := bindings_types) (ctorlist := ctorlist).
+ apply IHe... intros. unfold not in *. apply H1. eapply Sub_Trans... apply Sub_Match_e0.
+ rewrite map_fst_f_combine...
+ assert (forall x y n bs cocases,
In x (map fst (combine bindings_exprs bindings_types)) ->
subterm y x -> y <> E_CoMatch (tn,n) bs cocases).
{ clear - H1. intros. apply H1. eapply Sub_Trans... apply Sub_Match_bs... }
clear - H0 H13 H3. induction H13; try apply ListTypeDeriv_Nil. simpl. apply ListTypeDeriv_Cons.
* inversion H0; subst. apply H4... intros. unfold not in *. eapply H3...
* apply IHListTypeDeriv; try inversion H0... intros. apply H3 with (x:=x0)...
+ unfold lookup_ctors. unfold lookup_ctors in *.
remember (filter (eq_TypeName (fst n)) (skeleton_dts (constructorize_to_skeleton p tn))) as fl.
simpl. clear - H14 Heqfl.
destruct (filter (eq_TypeName (fst n)) (skeleton_dts (program_skeleton p))) eqn:E; try discriminate.
inversion H14. subst ctorlist. clear H14. pose proof (in_eq t l). rewrite <- E in H.
rewrite filter_In in H. destruct H.
assert (exists t l, fl = t :: l) as flEq.
{ case_eq (eq_TypeName (fst n) tn); intros.
- simpl in Heqfl. rewrite H1 in Heqfl. exists tn. eexists...
- simpl in Heqfl. rewrite H1 in Heqfl. rewrite E in Heqfl. exists t. exists l...
}
destruct flEq as [t' [l' flEq]]. rewrite flEq. clear t' l' flEq.
unfold computeNewDatatype.
rewrite filter_app. rewrite filter_app. f_equal. rewrite <- app_nil_l. f_equal.
case_eq (eq_TypeName t tn); intros.
* rewrite eq_TypeName_eq in H1. subst.
pose proof (skeleton_dts_cdts_disjoint (program_skeleton p)).
unfold dts_cdts_disjoint in H1.
pose proof (skeleton_gfun_sigs_in_cdts_l (program_skeleton p)) as H2.
unfold gfun_sigs_in_cdts in H2.
pose proof (skeleton_gfun_sigs_in_cdts_g (program_skeleton p)) as H2'.
unfold gfun_sigs_in_cdts in H2'.
case_eq ((filter (fun x => eq_TypeName (fst (fst x)) tn)
(skeleton_gfun_sigs_l (program_skeleton p)))); intros;
case_eq ((filter (fun x => eq_TypeName (fst (fst x)) tn)
(skeleton_gfun_sigs_g (program_skeleton p)))); intros; auto; exfalso.
-- pose proof (in_eq p0 l0). rewrite <- H4 in H5. rewrite filter_In in H5.
destruct H5. rewrite eq_TypeName_eq in H6. subst.
rewrite Forall_forall in H2'. pose proof (H2' _ H5). unfold not in H1.
apply H1 with (t:=fst (fst p0))...
-- pose proof (in_eq p0 l0). rewrite <- H3 in H5. rewrite filter_In in H5.
destruct H5. rewrite eq_TypeName_eq in H6. subst.
rewrite Forall_forall in H2. pose proof (H2 _ H5). unfold not in H1.
apply H1 with (t:=fst (fst p0))...
-- pose proof (in_eq p0 l0). rewrite <- H3 in H5. rewrite filter_In in H5.
destruct H5. rewrite eq_TypeName_eq in H6. subst.
rewrite Forall_forall in H2. pose proof (H2 _ H5). unfold not in H1.
apply H1 with (t:=fst (fst p0))...
* rewrite <- app_nil_l. f_equal;
match goal with |- ?l = [] => case_eq l; intros; auto;
exfalso; pose proof (in_eq p0 l0); rewrite <- H2 in H3; rewrite filter_In in H3;
destruct H3; rewrite in_map_iff in H3; do 2 (destruct H3); rewrite filter_In in H5;
destruct H5; destruct p0; inversion H3; subst; simpl in *;
rewrite eq_TypeName_eq in H0; rewrite H0 in H4;
rewrite eq_TypeName_eq in H6; rewrite eq_TypeName_eq in H4; subst;
unfold QName in *; rewrite H4 in H1; rewrite eq_TypeName_refl in H1; discriminate
end.
+ rewrite Forall_forall in *. intros. rewrite <- map_fst_f_combine in H3.
rewrite in_map_iff in H3. do 2 (destruct H3). pose proof (H15 _ H4).
destruct x. destruct p0. destruct p1. destruct x0. destruct p0. destruct p1. subst.
inversion H3...
+ assert (forall x, In x ls -> In x ls)...
generalize H3. generalize H16.
generalize (map (fun ctor => snd ctor ++ bindings_types) ctorlist).
generalize ls at - 4. clear - H H1. induction ls0; intros.
* inversion H16. subst. apply ListTypeDeriv'_Nil.
* inversion H16. subst. simpl. apply ListTypeDeriv'_Cons.
-- rewrite Forall_forall in H. destruct a. simpl. apply H...
++ rewrite in_map_iff. exists (s,e0). split... apply H3...
++ intros. apply H1. apply Sub_Trans with (e2:=e0)... apply Sub_Match_cases.
rewrite in_map_iff. exists (s,e0). split... apply H3...
-- apply IHls0... intros. apply H3...
- simpl. case_eq (eq_TypeName tn (fst n)); intros.
+ exfalso. unfold not in H1. rewrite eq_TypeName_eq in H3. subst. destruct n.
eapply H1; try eapply Sub_Refl...
+ simpl. inversion H2. subst.
apply T_CoMatch with (bindings_exprs := map (constructorize_expr tn) bindings_exprs)
(bindings_types := bindings_types) (dtorlist := dtorlist).
* rewrite map_fst_f_combine...
* assert (forall x y n bs cocases,
In x (map fst (combine bindings_exprs bindings_types)) ->
subterm y x -> y <> E_CoMatch (tn,n) bs cocases).
{ clear - H1. intros. apply H1. eapply Sub_Trans... apply Sub_CoMatch_bs... }
clear - H0 H8 H4. induction H8; try apply ListTypeDeriv_Nil. simpl. apply ListTypeDeriv_Cons.
-- inversion H0; subst. apply H3... intros. unfold not in *. eapply H4...
-- apply IHListTypeDeriv; try inversion H0... intros. apply H4 with (x:=x0)...
* unfold lookup_dtors. rewrite <- H11. f_equal. simpl. unfold new_cdts.
unfold lookup_dtors. unfold new_dtors.
generalize (skeleton_cdts (program_skeleton p)).
generalize (skeleton_dtors (program_skeleton p)). intros c c0.
repeat (rewrite filter_compose).
rewrite filter_ext with (g:=eq_TypeName (fst n)).
2 : { clear - H3. intros. case_eq (eq_TypeName (fst n) a); intros...
rewrite eq_TypeName_eq in H. subst. rewrite H3... }
rewrite filter_ext with
(g:=(fun x : ScopedName * list TypeName * TypeName =>
let (y, _) := x in let (n0, _) := y in eq_TypeName (fst (unscope n0)) (fst n)))...
clear - H3. intros. destruct a. destruct p.
case_eq (eq_TypeName (fst (unscope s)) (fst n)); intros...
rewrite eq_TypeName_eq in H. rewrite H. rewrite H3...
* rewrite Forall_forall in *. intros. rewrite <- map_fst_f_combine in H4.
rewrite in_map_iff in H4. do 2 (destruct H4). pose proof (H13 _ H5).
destruct x. destruct p0. destruct p1. destruct x0. destruct p0. destruct p1.
destruct p2. destruct p0. subst. inversion H4...
* assert (forall x, In x ls -> In x ls)...
generalize H4. generalize H14.
generalize (map (fun dtor => snd (fst dtor) ++ bindings_types) dtorlist).
generalize (map snd dtorlist).
generalize ls at - 3. clear - H H1. induction ls0; intros.
-- inversion H14. subst. apply ListTypeDeriv'_Nil.
-- inversion H14. subst. simpl. apply ListTypeDeriv'_Cons.
++ rewrite Forall_forall in H. destruct a. simpl. apply H...
** rewrite in_map_iff. exists (s,e). split... apply H4...
** intros. apply H1. apply Sub_Trans with (e2:=e)... apply Sub_CoMatch_cocases.
rewrite in_map_iff. exists (s,e). split... apply H4...
++ apply IHls0... intros. apply H4...
- inversion H0. subst. simpl. apply T_Let with (t1:=t1).
+ apply IHe1... intros. apply H. apply Sub_Trans with (e2:=e1)... apply Sub_Let_e1...
+ apply IHe2... intros. apply H. apply Sub_Trans with (e2:=e2)... apply Sub_Let_e2...
Qed.
|
{"author": "ps-tuebingen", "repo": "decomposition-diversity", "sha": "28ab18c34f0a192c9b3d58caa709dee3e9129068", "save_path": "github-repos/coq/ps-tuebingen-decomposition-diversity", "path": "github-repos/coq/ps-tuebingen-decomposition-diversity/decomposition-diversity-28ab18c34f0a192c9b3d58caa709dee3e9129068/Formalization/CtorizeII.v"}
|
# Simulating readout noise on the Rigetti Quantum Virtual Machine
© Copyright 2018, Rigetti Computing.
$$
\newcommand{ket}[1]{\left|{#1}\right\rangle}
\newcommand{bra}[1]{\left\langle {#1}\right|}
\newcommand{tr}[1]{\mathrm{Tr}\,\left[ {#1}\right]}
\newcommand{expect}[1]{\left\langle {#1} \right \rangle}
$$
## Theoretical Overview
Qubit-Readout can be corrupted in a variety of ways. The two most relevant error mechanisms on the Rigetti QPU right now are:
1. Transmission line noise that makes a 0-state look like a 1-state or vice versa. We call this **classical readout bit-flip error**. This type of readout noise can be reduced by tailoring optimal readout pulses and using superconducting, quantum limited amplifiers to amplify the readout signal before it is corrupted by classical noise at the higher temperature stages of our cryostats.
2. T1 qubit decay during readout (our readout operations can take more than a µsecond unless they have been specially optimized), which leads to readout signals that initially behave like 1-states but then collapse to something resembling a 0-state. We will call this **T1-readout error**. This type of readout error can be reduced by achieving shorter readout pulses relative to the T1 time, i.e., one can try to reduce the readout pulse length, or increase the T1 time or both.
## Qubit measurements
This section provides the necessary theoretical foundation for accurately modeling noisy quantum measurements on superconducting quantum processors. It relies on some of the abstractions (density matrices, Kraus maps) introduced in our notebook on [gate noise models](GateNoiseModels.ipynb).
The most general type of measurement performed on a single qubit at a single time can be characterized by some set $\mathcal{O}$ of measurement outcomes, e.g., in the simplest case $\mathcal{O} = \{0, 1\}$, and some unnormalized quantum channels (see notebook on gate noise models) that encapsulate
1. the probability of that outcome
2. how the qubit state is affected conditional on the measurement outcome.
Here the _outcome_ is understood as classical information that has been extracted from the quantum system.
### Projective, ideal measurement
The simplest case that is usually taught in introductory quantum mechanics and quantum information courses are Born's rule and the projection postulate which state that there exist a complete set of orthogonal projection operators
$$
P_{\mathcal{O}} := \{\Pi_x \text{ Projector }\mid x \in \mathcal{O}\},
$$
i.e., one for each measurement outcome. Any projection operator must satisfy $\Pi_x^\dagger = \Pi_x = \Pi_x^2$ and for an _orthogonal_ set of projectors any two members satisfy
$$
\Pi_x\Pi_y = \delta_{xy} \Pi_x = \begin{cases} 0 & \text{ if } x \ne y \\ \Pi_x & \text{ if } x = y \end{cases}
$$
and for a _complete_ set we additionally demand that $\sum_{x\in\mathcal{O}} \Pi_x = 1$.
Following our introduction to gate noise, we write quantum states as density matrices as this is more general and in closer correspondence with classical probability theory.
With these the probability of outcome $x$ is given by $p(x) = \tr{\Pi_x \rho \Pi_x} = \tr{\Pi_x^2 \rho} = \tr{\Pi_x \rho}$ and the post measurement state is
$$
\rho_x = \frac{1}{p(x)} \Pi_x \rho \Pi_x,
$$
which is the projection postulate applied to mixed states.
If we were a sloppy quantum programmer and accidentally erased the measurement outcome then our best guess for the post measurement state would be given by something that looks an awful lot like a Kraus map:
$$
\rho_{\text{post measurement}} = \sum_{x\in\mathcal{O}} p(x) \rho_x = \sum_{x\in\mathcal{O}} \Pi_x \rho \Pi_x.
$$
The completeness of the projector set ensures that the trace of the post measurement is still 1 and the Kraus map form of this expression ensures that $\rho_{\text{post measurement}}$ is a positive (semi-)definite operator.
### Classical readout bit-flip error
Consider now the ideal measurement as above, but where the outcome $x$ is transmitted across a noisy classical channel that produces a final outcome $x'\in \mathcal{O}' = \{0', 1'\}$ according to some conditional probabilities $p(x'|x)$ that can be recorded in the _assignment probability matrix_
$$
P_{x'|x} = \begin{pmatrix}
p(0 | 0) & p(0 | 1) \\
p(1 | 0) & p(1 | 1)
\end{pmatrix}
$$
Note that this matrix has only two independent parameters as each column must be a valid probability distribution, i.e. all elements are non-negative and each column sums to 1.
This matrix allows us to obtain the probabilities $\mathbf{p}' := (p(x'=0), p(x'=1))^T$ from the original outcome probabilities $\mathbf{p} := (p(x=0), p(x=1))^T$ via $\mathbf{p}' = P_{x'|x}\mathbf{p}$.
The difference relative to the ideal case above is that now an outcome $x' = 0$ does not necessarily imply that the post measurement state is truly $\Pi_{0} \rho \Pi_{0} / p(x=0)$. Instead, the post measurement state given a noisy outcome $x'$ must be
\begin{align}
\rho_{x'} & = \sum_{x\in \mathcal{O}} p(x|x') \rho_x \\
& = \sum_{x\in \mathcal{O}} p(x'|x)\frac{p(x)}{p(x')} \rho_x \\
& = \frac{1}{p(x')}\sum_{x\in \mathcal{O}} p(x'|x) \Pi_x \rho \Pi_x
\end{align}
where
\begin{align}
p(x') & = \sum_{x\in\mathcal{O}} p(x'|x) p(x) \\
& = \tr{\sum_{x\in \mathcal{O}} p(x'|x) \Pi_x \rho \Pi_x} \\
& = \tr{\rho \sum_{x\in \mathcal{O}} p(x'|x)\Pi_x} \\
& = \tr{\rho E_x'}.
\end{align}
where we have exploited the cyclical property of the trace $\tr{ABC}=\tr{BCA}$ and the projection property $\Pi_x^2 = \Pi_x$. This has allowed us to derive the noisy outcome probabilities from a set of positive operators
$$
E_{x'} := \sum_{x\in \mathcal{O}} p(x'|x)\Pi_x \ge 0
$$
that must sum to 1:
$$
\sum_{x'\in\mathcal{O}'} E_x' = \sum_{x\in\mathcal{O}}\underbrace{\left[\sum_{x'\in\mathcal{O}'} p(x'|x)\right]}_{1}\Pi_x = \sum_{x\in\mathcal{O}}\Pi_x = 1.
$$
The above result is a type of generalized **Bayes' theorem** that is extremely useful for this type of (slightly) generalized measurement and the family of operators $\{E_{x'}| x' \in \mathcal{O}'\}$ whose expectations give the probabilities is called a **positive operator valued measure** (POVM). These operators are not generally orthogonal nor valid projection operators but they naturally arise in this scenario. This is not yet the most general type of measurement, but it will get us pretty far.
### How to model $T_1$ error
T1 type errors fall outside our framework so far as they involve a scenario in which the _quantum state itself_ is corrupted during the measurement process in a way that potentially erases the pre-measurement information as opposed to a loss of purely classical information. The most appropriate framework for describing this is given by that of measurement instruments, but for the practical purpose of arriving at a relatively simple description, we propose describing this by a T1 damping Kraus map followed by the noisy readout process as described above.
### Further reading
Chapter 3 of John Preskill's lecture notes http://www.theory.caltech.edu/people/preskill/ph229/notes/chap3.pdf
## How do I get started?
1. Come up with a good guess for your readout noise parameters $p(0|0)$ and $p(1|1)$, the off-diagonals then follow from the normalization of $P_{x'|x}$. If your assignment fidelity $F$ is given, and you assume that the classical bit flip noise is roughly symmetric, then a good approximation is to set $p(0|0)=p(1|1)=F$.
2. For your QUIL program `p`, and a qubit index `q` call:
```
p.define_noisy_readout(q, p00, p11)
```
where you should replace `p00` and `p11` with the assumed probabilities.
### Estimate $P_{x'|x}$ yourself!
You can also run some simple experiments to estimate the assignment probability matrix directly from a QPU.
**Scroll down for some examples!**
```python
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from pyquil.quil import Program, MEASURE, Pragma
from pyquil.api import QVMConnection
from pyquil.gates import I, X, RX, H, CNOT
from pyquil.noise import (estimate_bitstring_probs, correct_bitstring_probs,
bitstring_probs_to_z_moments, estimate_assignment_probs)
DARK_TEAL = '#48737F'
FUSCHIA = '#D6619E'
BEIGE = '#EAE8C6'
cxn = QVMConnection()
```
## Example 1: Rabi sequence with noisy readout
```python
%%time
# number of angles
num_theta = 101
# number of program executions
trials = 200
thetas = np.linspace(0, 2*np.pi, num_theta)
p00s = [1., 0.95, 0.9, 0.8]
results_rabi = np.zeros((num_theta, len(p00s)))
for jj, theta in enumerate(thetas):
for kk, p00 in enumerate(p00s):
cxn.random_seed = hash((jj, kk))
p = Program(RX(theta, 0))
ro = p.declare("ro")
# assume symmetric noise p11 = p00
p.define_noisy_readout(0, p00=p00, p11=p00)
p.measure(0, ro[0])
res = cxn.run(p, [0], trials=trials)
results_rabi[jj, kk] = np.sum(res)
```
```python
plt.figure(figsize=(14, 6))
for jj, (p00, c) in enumerate(zip(p00s, [DARK_TEAL, FUSCHIA, "k", "gray"])):
plt.plot(thetas, results_rabi[:, jj]/trials, c=c, label=r"$p(0|0)=p(1|1)={:g}$".format(p00))
plt.legend(loc="best")
plt.xlim(*thetas[[0,-1]])
plt.ylim(-.1, 1.1)
plt.grid(alpha=.5)
plt.xlabel(r"RX angle $\theta$ [radian]", size=16)
plt.ylabel(r"Excited state fraction $n_1/n_{\rm trials}$", size=16)
plt.title("Effect of classical readout noise on Rabi contrast.", size=18)
```
## Example 2: Estimate the assignment probabilities
### Estimate assignment probabilities for a perfect quantum computer
```python
estimate_assignment_probs(0, 1000, cxn, Program())
```
### Re-Estimate assignment probabilities for an imperfect quantum computer
```python
cxn.seed = None
header0 = Program().define_noisy_readout(0, .85, .95)
header1 = Program().define_noisy_readout(1, .8, .9)
header2 = Program().define_noisy_readout(2, .9, .85)
ap0 = estimate_assignment_probs(0, 100000, cxn, header0)
ap1 = estimate_assignment_probs(1, 100000, cxn, header1)
ap2 = estimate_assignment_probs(2, 100000, cxn, header2)
```
```python
print(ap0, ap1, ap2, sep="\n")
```
## Example 3: Use `pyquil.noise.correct_bitstring_probs` to correct for noisy readout
### 3a) Correcting the Rabi signal from above
```python
ap_last = np.array([[p00s[-1], 1 - p00s[-1]],
[1 - p00s[-1], p00s[-1]]])
corrected_last_result = [correct_bitstring_probs([1-p, p], [ap_last])[1] for p in results_rabi[:, -1] / trials]
```
```python
plt.figure(figsize=(14, 6))
for jj, (p00, c) in enumerate(zip(p00s, [DARK_TEAL, FUSCHIA, "k", "gray"])):
if jj not in [0, 3]:
continue
plt.plot(thetas, results_rabi[:, jj]/trials, c=c, label=r"$p(0|0)=p(1|1)={:g}$".format(p00), alpha=.3)
plt.plot(thetas, corrected_last_result, c="red", label=r"Corrected $p(0|0)=p(1|1)={:g}$".format(p00s[-1]))
plt.legend(loc="best")
plt.xlim(*thetas[[0,-1]])
plt.ylim(-.1, 1.1)
plt.grid(alpha=.5)
plt.xlabel(r"RX angle $\theta$ [radian]", size=16)
plt.ylabel(r"Excited state fraction $n_1/n_{\rm trials}$", size=16)
plt.title("Corrected contrast", size=18)
```
**We find that the corrected signal is fairly noisy (and sometimes exceeds the allowed interval $[0,1]$) due to the overall very small number of samples $n=200$.**
### 3b) In this example we will create a GHZ state $\frac{1}{\sqrt{2}}\left[\left|000\right\rangle + \left|111\right\rangle \right]$ and measure its outcome probabilities with and without the above noise model. We will then see how the Pauli-Z moments that indicate the qubit correlations are corrupted (and corrected) using our API.
```python
ghz_prog = Program(H(0), CNOT(0, 1), CNOT(1, 2),
MEASURE(0, 0), MEASURE(1, 1), MEASURE(2, 2))
print(ghz_prog)
results = cxn.run(ghz_prog, [0, 1, 2], trials=10000)
```
```python
header = header0 + header1 + header2
noisy_ghz = header + ghz_prog
print(noisy_ghz)
noisy_results = cxn.run(noisy_ghz, [0, 1, 2], trials=10000)
```
### Uncorrupted probability for $\left|000\right\rangle$ and $\left|111\right\rangle$
```python
probs = estimate_bitstring_probs(results)
probs[0, 0, 0], probs[1, 1, 1]
```
As expected the outcomes `000` and `111` each have roughly probability $1/2$.
### Corrupted probability for $\left|000\right\rangle$ and $\left|111\right\rangle$
```python
noisy_probs = estimate_bitstring_probs(noisy_results)
noisy_probs[0, 0, 0], noisy_probs[1, 1, 1]
```
The noise-corrupted outcome probabilities deviate significantly from their ideal values!
### Corrected probability for $\left|000\right\rangle$ and $\left|111\right\rangle$
```python
corrected_probs = correct_bitstring_probs(noisy_probs, [ap0, ap1, ap2])
corrected_probs[0, 0, 0], corrected_probs[1, 1, 1]
```
The corrected outcome probabilities are much closer to the ideal value.
### Estimate $\langle Z_0^{j} Z_1^{k} Z_2^{\ell}\rangle$ for $jkl=100, 010, 001$ from non-noisy data
*We expect these to all be very small*
```python
zmoments = bitstring_probs_to_z_moments(probs)
zmoments[1, 0, 0], zmoments[0, 1, 0], zmoments[0, 0, 1]
```
### Estimate $\langle Z_0^{j} Z_1^{k} Z_2^{\ell}\rangle$ for $jkl=110, 011, 101$ from non-noisy data
*We expect these to all be close to 1.*
```python
zmoments[1, 1, 0], zmoments[0, 1, 1], zmoments[1, 0, 1]
```
### Estimate $\langle Z_0^{j} Z_1^{k} Z_2^{\ell}\rangle$ for $jkl=100, 010, 001$ from noise-corrected data
```python
zmoments_corr = bitstring_probs_to_z_moments(corrected_probs)
zmoments_corr[1, 0, 0], zmoments_corr[0, 1, 0], zmoments_corr[0, 0, 1]
```
### Estimate $\langle Z_0^{j} Z_1^{k} Z_2^{\ell}\rangle$ for $jkl=110, 011, 101$ from noise-corrected data
```python
zmoments_corr[1, 1, 0], zmoments_corr[0, 1, 1], zmoments_corr[1, 0, 1]
```
##### Overall the correction can restore the contrast in our multi-qubit observables, though we also see that the correction can lead to slightly non-physical expectations. This effect is reduced the more samples we take.
|
{"hexsha": "31c8b2fb968b03a65e249bdbd9eb6d0b8b289698", "size": 19758, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "examples/ReadoutNoise.ipynb", "max_stars_repo_name": "oliverdutton/pyquil", "max_stars_repo_head_hexsha": "027a3f6aecbd8206baf39189a0183ad0f85c262b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-30T18:47:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-30T18:47:34.000Z", "max_issues_repo_path": "examples/ReadoutNoise.ipynb", "max_issues_repo_name": "abhayshivamtiwari/pyquil", "max_issues_repo_head_hexsha": "854bf41349393beeeedad7a4481797ad78ae36a5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/ReadoutNoise.ipynb", "max_forks_repo_name": "abhayshivamtiwari/pyquil", "max_forks_repo_head_hexsha": "854bf41349393beeeedad7a4481797ad78ae36a5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.439688716, "max_line_length": 565, "alphanum_fraction": 0.5957080676, "converted": true, "num_tokens": 4182}
|
theory Co_Snapshot
imports
Snapshot
Ordered_Resolution_Prover.Lazy_List_Chain
begin
section \<open>Extension to infinite traces\<close>
text \<open>The computation locale assumes that there already exists a known
final configuration $c'$ to the given initial $c$ and trace $t$. However,
we can show that the snapshot algorithm must terminate correctly even if
the underlying computation itself does not terminate. We relax
the trace relation to allow for a potentially infinite number of ``intermediate'' events, and
show that the algorithm's correctness still holds when imposing the same constraints
as in the computation locale.
We use a preexisting theory of lazy list chains by Schlichtkrull, Blanchette,
Traytel and Waldmann~\<^cite>\<open>"Ordered_Resolution_Prover-AFP"\<close> to construct infinite traces.\<close>
primrec ltake where
"ltake 0 t = []"
| "ltake (Suc i) t = (case t of LNil \<Rightarrow> [] | LCons x t' \<Rightarrow> x # ltake i t')"
primrec ldrop where
"ldrop 0 t = t"
| "ldrop (Suc i) t = (case t of LNil \<Rightarrow> LNil | LCons x t' \<Rightarrow> ldrop i t')"
lemma ltake_LNil[simp]: "ltake i LNil = []"
by (induct i) auto
lemma ltake_LCons: "0 < i \<Longrightarrow> ltake i (LCons x t) = x # ltake (i - 1) t"
by (induct i) auto
lemma take_ltake: "i \<le> j \<Longrightarrow> take i (ltake j xs) = ltake i xs"
by (induct j arbitrary: i xs) (auto simp: le_Suc_eq take_Cons' ltake_LCons split: llist.splits if_splits)
lemma nth_ltake [simp]: "i < min n (llength xs) \<Longrightarrow> (ltake n xs) ! i = lnth xs i"
by (induct n arbitrary: i xs)
(auto simp: nth_Cons' gr0_conv_Suc eSuc_enat[symmetric] split: llist.splits)
lemma length_ltake[simp]: "length (ltake i xs) = (case llength xs of \<infinity> \<Rightarrow> i | enat m \<Rightarrow> min i m)"
by (induct i arbitrary: xs)
(auto simp: zero_enat_def[symmetric] eSuc_enat split: llist.splits enat.splits)
lemma ltake_prepend:
"ltake i (prepend xs t) = (if i \<le> length xs then take i xs else xs @ ltake (i - length xs) t)"
proof (induct i arbitrary: xs t)
case 0
then show ?case
by (cases xs) auto
next
case (Suc i)
then show ?case
by (cases xs) auto
qed
lemma prepend_ltake_ldrop_id: "prepend (ltake i t) (ldrop i t) = t"
by (induct i arbitrary: t) (auto split: llist.splits)
context distributed_system
begin
coinductive cotrace where
cotr_init: "cotrace c LNil"
| cotr_step: "\<lbrakk> c \<turnstile> ev \<mapsto> c'; cotrace c' t \<rbrakk> \<Longrightarrow> cotrace c (LCons ev t)"
lemma cotrace_trace: "cotrace c t \<Longrightarrow> \<exists>!c'. trace c (ltake i t) c'"
proof (induct i arbitrary: c t)
case (Suc i)
then show ?case
proof (cases t)
case (LCons ev t')
with Suc(2) obtain c' where "c \<turnstile> ev \<mapsto> c'" "cotrace c' t'"
by (auto elim: cotrace.cases)
with Suc(1)[OF \<open>cotrace c' t'\<close>] show ?thesis
by (auto simp: LCons elim: trace.intros(2) elim: trace.cases trace_and_start_determines_end)
qed (auto intro: trace.intros elim: trace.cases)
qed (auto simp: zero_enat_def[symmetric] intro: trace.intros elim: trace.cases)
definition cos where "cos c t i = s c (ltake i t) i"
lemma cotrace_trace_cos: "cotrace c t \<Longrightarrow> trace c (ltake i t) (cos c t i)"
unfolding cos_def s_def
by (subst take_ltake, auto dest!: cotrace_trace[of _ _ i] elim!: theI')
lemma s_0[simp]: "s c t 0 = c"
unfolding s_def
by (auto intro!: the_equality[where P = "trace c []"] trace.intros elim: trace.cases)
lemma s_chop: "i \<le> length t \<Longrightarrow> s c t i = s c (take i t) i"
unfolding s_def
by auto
lemma cotrace_prepend: "trace c t c' \<Longrightarrow> cotrace c' u \<Longrightarrow> cotrace c (prepend t u)"
by (induct c t c' rule: trace.induct) (auto intro: cotrace.intros)
lemma s_Cons: "\<exists>c''. trace c' xs c'' \<Longrightarrow> c \<turnstile> ev \<mapsto> c' \<Longrightarrow> s c (ev # xs) (Suc i) = s c' xs i"
by (smt exists_trace_for_any_i take_Suc_Cons tr_step trace_and_start_determines_end)
lemma cotrace_ldrop: "cotrace c t \<Longrightarrow> i \<le> llength t \<Longrightarrow> cotrace (cos c t i) (ldrop i t)"
proof (induct i arbitrary: c t)
case (Suc i)
then show ?case
proof (cases t)
case (LCons ev t')
with Suc(2) obtain c' where "c \<turnstile> ev \<mapsto> c'" "cotrace c' t'"
by (auto elim: cotrace.cases)
with Suc(1)[OF \<open>cotrace c' t'\<close>] Suc(3) show ?thesis
by (auto simp: LCons cos_def eSuc_enat[symmetric] s_chop[symmetric] s_Cons[OF cotrace_trace'])
qed (auto intro: cotrace.intros)
qed (auto simp: zero_enat_def[symmetric] cos_def intro: cotrace.intros)
end
locale cocomputation = distributed_system +
fixes
init :: "('a, 'b, 'c) configuration"
assumes
finite_channels:
"finite {i. \<exists>p q. channel i = Some (p, q)}" and
strongly_connected_raw:
"\<forall>p q. (p \<noteq> q) \<longrightarrow>
(tranclp (\<lambda>p q. (\<exists>i. channel i = Some (p, q)))) p q" and
at_least_two_processes:
"card (UNIV :: 'a set) > 1" and
finite_processes:
"finite (UNIV :: 'a set)" and
no_initial_Marker:
"\<forall>i. (\<exists>p q. channel i = Some (p, q))
\<longrightarrow> Marker \<notin> set (msgs init i)" and
no_msgs_if_no_channel:
"\<forall>i. channel i = None \<longrightarrow> msgs init i = []" and
no_initial_process_snapshot:
"\<forall>p. \<not> has_snapshotted init p" and
no_initial_channel_snapshot:
"\<forall>i. channel_snapshot init i = ([], NotStarted)" and
valid: "\<exists>t. cotrace init t" and
l1: "\<forall>t i cid. cotrace init t
\<and> Marker \<in> set (msgs (cos init t i) cid)
\<longrightarrow> (\<exists>j \<le> llength t. j \<ge> i \<and> Marker \<notin> set (msgs (cos init t j) cid))" and
l2: "\<forall>t p. cotrace init t
\<longrightarrow> (\<exists>i \<le> llength t. has_snapshotted (cos init t i) p)"
begin
abbreviation coS where "coS \<equiv> cos init"
definition "some_snapshot t p = (SOME i. has_snapshotted (coS t i) p \<and> i \<le> llength t)"
lemma has_snapshotted:
"cotrace init t \<Longrightarrow> has_snapshotted (coS t (some_snapshot t p)) p \<and> some_snapshot t p \<le> llength t"
unfolding some_snapshot_def by (rule someI_ex) (auto dest!: l2[rule_format])
lemma cotrace_cos: "cotrace init t \<Longrightarrow> j < llength t \<Longrightarrow>
(coS t j) \<turnstile> lnth t j \<mapsto> (coS t (Suc j))"
apply (drule cotrace_trace_cos[of _ _ "Suc j"])
apply (drule step_Suc[rotated, of _ _ _ "j"])
apply (auto split: enat.splits llist.splits) []
apply (auto simp: s_chop[of j "_ # ltake j _"] cos_def nth_Cons' ltake_LCons lnth_LCons'
take_Cons' take_ltake
split: llist.splits enat.splits if_splits elim: order.strict_trans2[rotated])
apply (subst (asm) s_chop[of j "_ # ltake j _"])
apply (auto simp: take_Cons' take_ltake split: enat.splits)
done
lemma snapshot_stable:
"cotrace init t \<Longrightarrow> i \<le> j \<Longrightarrow> has_snapshotted (coS t i) p \<Longrightarrow> has_snapshotted (coS t j) p"
apply (drule cotrace_trace_cos[of _ _ j])
unfolding cos_def
by (metis exists_trace_for_any_i_j order_refl s_def snapshot_stable take_ltake)
lemma no_markers_if_all_snapshotted:
"cotrace init t \<Longrightarrow> i \<le> j \<Longrightarrow> \<forall>p. has_snapshotted (coS t i) p \<Longrightarrow>
Marker \<notin> set (msgs (coS t i) c) \<Longrightarrow> Marker \<notin> set (msgs (coS t j) c)"
apply (drule cotrace_trace_cos[of _ _ j])
unfolding cos_def
by (metis exists_trace_for_any_i_j no_markers_if_all_snapshotted order_refl s_def take_ltake)
lemma cotrace_all_have_snapshotted:
assumes "cotrace init t"
shows "\<exists>i \<le> llength t. \<forall>p. has_snapshotted (coS t i) p"
proof -
let ?i = "Max (range (some_snapshot t))"
show ?thesis
using has_snapshotted[OF assms] snapshot_stable[OF assms, of "some_snapshot t _" ?i _]
apply (intro exI[of _ ?i])
apply (auto simp: finite_processes)
apply (cases "llength t"; auto simp: )
apply (subst Max_le_iff)
apply (auto simp: finite_processes)
apply blast
done
qed
lemma no_messages_if_no_channel:
assumes "cotrace init t"
shows "channel cid = None \<Longrightarrow> msgs (coS t i) cid = []"
using no_messages_introduced_if_no_channel[OF assms[THEN cotrace_trace_cos, of i] no_msgs_if_no_channel, of cid i]
by (auto simp: cos_def)
lemma cotrace_all_have_snapshotted_and_no_markers:
assumes "cotrace init t"
shows "\<exists>i \<le> llength t. (\<forall>p. has_snapshotted (coS t i) p) \<and>
(\<forall>c. Marker \<notin> set (msgs (coS t i) c))"
proof -
from cotrace_all_have_snapshotted[OF assms] obtain j :: nat where
j: "j \<le> llength t" "\<forall>p. has_snapshotted (coS t j) p" by blast
from j(2) have *: "has_snapshotted (coS t k) p" if "k \<ge> j" for k p
using snapshot_stable[OF assms, of j k p] that by auto
define C where "C = {c. Marker \<in> set (msgs (coS t j) c)}"
have "finite C"
using no_messages_if_no_channel[OF assms, of _ j] unfolding C_def
by (intro finite_subset[OF _ finite_channels]) fastforce
define pick where "pick = (\<lambda>c. SOME k. k \<le> llength t \<and> k \<ge> j \<and> Marker \<notin> set (msgs (coS t k) c))"
{ fix c
assume "c \<in> C"
then have "\<exists>k \<le> llength t. k \<ge> j \<and> Marker \<notin> set (msgs (coS t k) c)"
using l1[rule_format, of t j c] assms unfolding C_def by blast
then have "pick c \<le> llength t \<and> pick c \<ge> j \<and> Marker \<notin> set (msgs (coS t (pick c)) c)"
unfolding pick_def
by (rule someI_ex)
} note pick = conjunct1[OF this] conjunct1[OF conjunct2[OF this]] conjunct2[OF conjunct2[OF this]]
show ?thesis
proof (cases "C = {}")
case True
with j show ?thesis
by (auto intro!: exI[of _ j] simp: C_def)
next
define m where "m = Max (pick ` C)"
case False
with \<open>finite C\<close> have m: "m \<in> pick ` C" "\<forall>x \<in> pick ` C. m \<ge> x"
unfolding m_def by auto
then have "j \<le> m" using pick(2) by auto
from *[OF \<open>j \<le> m\<close>] have "Marker \<notin> set (msgs (coS t m) c)" for c
proof (cases "c \<in> C")
case True
then show ?thesis
using no_markers_if_all_snapshotted[OF assms, of "pick c" m c] pick[of c] m *
by auto
next
case False
then show ?thesis
using no_markers_if_all_snapshotted[OF assms \<open>j \<le> m\<close> j(2), of c]
by (auto simp: C_def)
qed
with *[OF \<open>j \<le> m\<close>] m pick show ?thesis by auto
qed
qed
context
fixes t
assumes cotrace: "cotrace init t"
begin
definition "final_i \<equiv>
(SOME i. i \<le> llength t \<and> (\<forall>p. has_snapshotted (coS t i) p) \<and> (\<forall>c. Marker \<notin> set (msgs (coS t i) c)))"
definition final where
"final = coS t final_i"
lemma final_i: "final_i \<le> llength t" "(\<forall>p. has_snapshotted (coS t final_i) p)" "(\<forall>c. Marker \<notin> set (msgs (coS t final_i) c))"
unfolding final_i_def
by (rule someI2_ex[OF cotrace_all_have_snapshotted_and_no_markers[OF cotrace]]; auto intro: cotrace_trace_cos[OF cotrace])+
lemma final: "\<exists>t. trace init t final" "(\<forall>p. has_snapshotted final p)" "(\<forall>c. Marker \<notin> set (msgs final c))"
unfolding final_def
by (rule cotrace_trace_cos[OF cotrace] final_i exI)+
interpretation computation channel trans send recv init final
apply standard
apply (rule finite_channels)
apply (rule strongly_connected_raw)
apply (rule at_least_two_processes)
apply (rule finite_processes)
apply (rule no_initial_Marker)
apply (rule no_msgs_if_no_channel)
apply (rule no_initial_process_snapshot)
apply (rule no_initial_channel_snapshot)
apply (rule final(1))
apply (intro allI impI)
subgoal for t i cid
apply (rule exI[of _ "length t"])
apply (metis exists_trace_for_any_i final(3) le_cases take_all trace_and_start_determines_end)
done
apply (intro allI impI)
subgoal for t p
apply (rule exI[of _ "length t"])
apply (metis exists_trace_for_any_i final(2) order_refl take_all trace_and_start_determines_end)
done
done
definition coperm where
"coperm l r = (\<exists>xs ys z. mset xs = mset ys \<and> l = prepend xs z \<and> r = prepend ys z)"
lemma copermIL: "mset ys = mset xs \<Longrightarrow> t = prepend xs z \<Longrightarrow> coperm (prepend ys z) t"
unfolding coperm_def by auto
lemma snapshot_algorithm_is_cocorrect:
"\<exists>t' i. cotrace init t' \<and> coperm t' t \<and> state_equal_to_snapshot (coS t' i) final \<and> i \<le> final_i"
proof -
define prefix where "prefix = ltake final_i t"
define suffix where "suffix = ldrop final_i t"
have [simp]: "prepend prefix suffix = t"
unfolding prefix_def suffix_def prepend_ltake_ldrop_id ..
have [simp]: "cotrace final suffix"
unfolding suffix_def final_def
by (auto simp: cotrace final_i(1) intro!: cotrace_ldrop)
from cotrace_trace_cos[OF cotrace] have "trace init prefix final"
unfolding final_def prefix_def by blast
with snapshot_algorithm_is_correct obtain prefix' i where
"trace init prefix' final" "mset prefix' = mset prefix" "state_equal_to_snapshot (S prefix' i) final"
"i \<le> length prefix'"
by blast
moreover from \<open>mset prefix' = mset prefix\<close> \<open>i \<le> length prefix'\<close> have "i \<le> final_i"
by (auto dest!: mset_eq_length simp: prefix_def split: enat.splits)
ultimately show ?thesis
by (intro exI[of _ "prepend prefix' suffix"] exI[of _ i])
(auto simp: cos_def ltake_prepend s_chop[symmetric] intro!: cotrace_prepend elim!: copermIL)
qed
end
print_statement snapshot_algorithm_is_cocorrect
end
end
|
{"author": "isabelle-prover", "repo": "mirror-afp-devel", "sha": "c84055551f07621736c3eb6a1ef4fb7e8cc57dd1", "save_path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel", "path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel/mirror-afp-devel-c84055551f07621736c3eb6a1ef4fb7e8cc57dd1/thys/Chandy_Lamport/Co_Snapshot.thy"}
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import os
import time
import numpy as np
import yaml
import pickle
from collections import OrderedDict
# torch
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from tqdm import tqdm
import shutil
from torch.optim.lr_scheduler import ReduceLROnPlateau, MultiStepLR
import random
import inspect
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
# class LabelSmoothingCrossEntropy(nn.Module):
# def __init__(self):
# super(LabelSmoothingCrossEntropy, self).__init__()
# def forward(self, x, target, smoothing=0.1):
# confidence = 1. - smoothing
# logprobs = F.log_softmax(x, dim=-1)
# nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
# nll_loss = nll_loss.squeeze(1)
# smooth_loss = -logprobs.mean(dim=-1)
# loss = confidence * nll_loss + smoothing * smooth_loss
# return loss.mean()
def init_seed(_):
torch.cuda.manual_seed_all(1)
torch.manual_seed(1)
np.random.seed(1)
random.seed(1)
# torch.backends.cudnn.enabled = False
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def get_parser():
# parameter priority: command line > config > default
parser = argparse.ArgumentParser(
description='Decoupling Graph Convolution Network with DropGraph Module')
parser.add_argument(
'--work-dir',
default='./work_dir/temp',
help='the work folder for storing results')
parser.add_argument('-model_saved_name', default='')
parser.add_argument('-Experiment_name', default='')
parser.add_argument(
'--config',
default='./config/nturgbd-cross-view/test_bone.yaml',
help='path to the configuration file')
# processor
parser.add_argument(
'--phase', default='train', help='must be train or test')
parser.add_argument(
'--save-score',
type=str2bool,
default=False,
help='if ture, the classification score will be stored')
# visulize and debug
parser.add_argument(
'--seed', type=int, default=1, help='random seed for pytorch')
parser.add_argument(
'--log-interval',
type=int,
default=100,
help='the interval for printing messages (#iteration)')
parser.add_argument(
'--save-interval',
type=int,
default=2,
help='the interval for storing models (#iteration)')
parser.add_argument(
'--eval-interval',
type=int,
default=5,
help='the interval for evaluating models (#iteration)')
parser.add_argument(
'--print-log',
type=str2bool,
default=True,
help='print logging or not')
parser.add_argument(
'--show-topk',
type=int,
default=[1, 5],
nargs='+',
help='which Top K accuracy will be shown')
# feeder
parser.add_argument(
'--feeder', default='feeder.feeder', help='data loader will be used')
parser.add_argument(
'--num-worker',
type=int,
default=32,
help='the number of worker for data loader')
parser.add_argument(
'--train-feeder-args',
default=dict(),
help='the arguments of data loader for training')
parser.add_argument(
'--test-feeder-args',
default=dict(),
help='the arguments of data loader for test')
# model
parser.add_argument('--model', default=None, help='the model will be used')
parser.add_argument(
'--model-args',
type=dict,
default=dict(),
help='the arguments of model')
parser.add_argument(
'--weights',
default=None,
help='the weights for network initialization')
parser.add_argument(
'--ignore-weights',
type=str,
default=[],
nargs='+',
help='the name of weights which will be ignored in the initialization')
# optim
parser.add_argument(
'--base-lr', type=float, default=0.01, help='initial learning rate')
parser.add_argument(
'--step',
type=int,
default=[20, 40, 60],
nargs='+',
help='the epoch where optimizer reduce the learning rate')
parser.add_argument(
'--device',
type=int,
default=0,
nargs='+',
help='the indexes of GPUs for training or testing')
parser.add_argument('--optimizer', default='SGD', help='type of optimizer')
parser.add_argument(
'--nesterov', type=str2bool, default=False, help='use nesterov or not')
parser.add_argument(
'--batch-size', type=int, default=256, help='training batch size')
parser.add_argument(
'--test-batch-size', type=int, default=256, help='test batch size')
parser.add_argument(
'--start-epoch',
type=int,
default=0,
help='start training from which epoch')
parser.add_argument(
'--num-epoch',
type=int,
default=80,
help='stop training in which epoch')
parser.add_argument(
'--weight-decay',
type=float,
default=0.0005,
help='weight decay for optimizer')
parser.add_argument(
'--keep_rate',
type=float,
default=0.9,
help='keep probability for drop')
parser.add_argument(
'--groups',
type=int,
default=8,
help='decouple groups')
parser.add_argument('--only_train_part', default=True)
parser.add_argument('--only_train_epoch', default=0)
parser.add_argument('--warm_up_epoch', default=0)
return parser
class Processor():
"""
Processor for Skeleton-based Action Recgnition
"""
def __init__(self, arg):
arg.model_saved_name = "./save_models/" + arg.Experiment_name
arg.work_dir = "./work_dir/" + arg.Experiment_name
self.arg = arg
self.save_arg()
if arg.phase == 'train':
if not arg.train_feeder_args['debug']:
if os.path.isdir(arg.model_saved_name):
print('log_dir: ', arg.model_saved_name, 'already exist')
answer = input('delete it? y/n:')
if answer == 'y':
shutil.rmtree(arg.model_saved_name)
print('Dir removed: ', arg.model_saved_name)
input(
'Refresh the website of tensorboard by pressing any keys')
else:
print('Dir not removed: ', arg.model_saved_name)
self.global_step = 0
self.load_model()
self.load_optimizer()
self.load_data()
self.lr = self.arg.base_lr
self.best_acc = 0
def load_data(self):
Feeder = import_class(self.arg.feeder)
self.data_loader = dict()
if self.arg.phase == 'train':
self.data_loader['train'] = torch.utils.data.DataLoader(
dataset=Feeder(**self.arg.train_feeder_args),
batch_size=self.arg.batch_size,
shuffle=True,
num_workers=self.arg.num_worker,
drop_last=True,
worker_init_fn=init_seed)
self.data_loader['test'] = torch.utils.data.DataLoader(
dataset=Feeder(**self.arg.test_feeder_args),
batch_size=self.arg.test_batch_size,
shuffle=False,
num_workers=self.arg.num_worker,
drop_last=False,
worker_init_fn=init_seed)
def load_model(self):
output_device = self.arg.device[0] if type(
self.arg.device) is list else self.arg.device
self.output_device = output_device
Model = import_class(self.arg.model)
shutil.copy2(inspect.getfile(Model), self.arg.work_dir)
self.model = Model(**self.arg.model_args).cuda(output_device)
# print(self.model)
self.loss = nn.CrossEntropyLoss().cuda(output_device)
# self.loss = LabelSmoothingCrossEntropy().cuda(output_device)
if self.arg.weights:
self.print_log('Load weights from {}.'.format(self.arg.weights))
if '.pkl' in self.arg.weights:
with open(self.arg.weights, 'r') as f:
weights = pickle.load(f)
else:
weights = torch.load(self.arg.weights)
weights = OrderedDict(
[[k.split('module.')[-1],
v.cuda(output_device)] for k, v in weights.items()])
for w in self.arg.ignore_weights:
if weights.pop(w, None) is not None:
self.print_log('Sucessfully Remove Weights: {}.'.format(w))
else:
self.print_log('Can Not Remove Weights: {}.'.format(w))
try:
self.model.load_state_dict(weights)
except:
state = self.model.state_dict()
diff = list(set(state.keys()).difference(set(weights.keys())))
print('Can not find these weights:')
for d in diff:
print(' ' + d)
state.update(weights)
self.model.load_state_dict(state)
if type(self.arg.device) is list:
if len(self.arg.device) > 1:
self.model = nn.DataParallel(
self.model,
device_ids=self.arg.device,
output_device=output_device)
def load_optimizer(self):
if self.arg.optimizer == 'SGD':
params_dict = dict(self.model.named_parameters())
params = []
for key, value in params_dict.items():
decay_mult = 0.0 if 'bias' in key else 1.0
lr_mult = 1.0
weight_decay = 1e-4
params += [{'params': value, 'lr': self.arg.base_lr, 'lr_mult': lr_mult,
'decay_mult': decay_mult, 'weight_decay': weight_decay}]
self.optimizer = optim.SGD(
params,
momentum=0.9,
nesterov=self.arg.nesterov)
elif self.arg.optimizer == 'Adam':
self.optimizer = optim.Adam(
self.model.parameters(),
lr=self.arg.base_lr,
weight_decay=self.arg.weight_decay)
else:
raise ValueError()
self.lr_scheduler = ReduceLROnPlateau(self.optimizer, mode='min', factor=0.1,
patience=10, verbose=True,
threshold=1e-4, threshold_mode='rel',
cooldown=0)
def save_arg(self):
# save arg
arg_dict = vars(self.arg)
if not os.path.exists(self.arg.work_dir):
os.makedirs(self.arg.work_dir)
os.makedirs(self.arg.work_dir + '/eval_results')
with open('{}/config.yaml'.format(self.arg.work_dir), 'w') as f:
yaml.dump(arg_dict, f)
def adjust_learning_rate(self, epoch):
if self.arg.optimizer == 'SGD' or self.arg.optimizer == 'Adam':
if epoch < self.arg.warm_up_epoch:
lr = self.arg.base_lr * (epoch + 1) / self.arg.warm_up_epoch
else:
lr = self.arg.base_lr * (
0.1 ** np.sum(epoch >= np.array(self.arg.step)))
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
return lr
else:
raise ValueError()
def print_time(self):
localtime = time.asctime(time.localtime(time.time()))
self.print_log("Local current time : " + localtime)
def print_log(self, str, print_time=True):
if print_time:
localtime = time.asctime(time.localtime(time.time()))
str = "[ " + localtime + ' ] ' + str
print(str)
if self.arg.print_log:
with open('{}/log.txt'.format(self.arg.work_dir), 'a') as f:
print(str, file=f)
def record_time(self):
self.cur_time = time.time()
return self.cur_time
def split_time(self):
split_time = time.time() - self.cur_time
self.record_time()
return split_time
def train(self, epoch, save_model=False):
self.model.train()
self.print_log('Training epoch: {}'.format(epoch + 1))
loader = self.data_loader['train']
self.adjust_learning_rate(epoch)
loss_value = []
self.record_time()
timer = dict(dataloader=0.001, model=0.001, statistics=0.001)
process = tqdm(loader)
if epoch >= self.arg.only_train_epoch:
print('only train part, require grad')
for key, value in self.model.named_parameters():
if 'DecoupleA' in key:
value.requires_grad = True
print(key + '-require grad')
else:
print('only train part, do not require grad')
for key, value in self.model.named_parameters():
if 'DecoupleA' in key:
value.requires_grad = False
print(key + '-not require grad')
for batch_idx, (data, label, index) in enumerate(process):
self.global_step += 1
# get data
data = Variable(data.float().cuda(
self.output_device), requires_grad=False)
label = Variable(label.long().cuda(
self.output_device), requires_grad=False)
timer['dataloader'] += self.split_time()
# forward
if epoch < 100:
keep_prob = -(1 - self.arg.keep_rate) / 100 * epoch + 1.0
else:
keep_prob = self.arg.keep_rate
output = self.model(data, keep_prob)
if isinstance(output, tuple):
output, l1 = output
l1 = l1.mean()
else:
l1 = 0
loss = self.loss(output, label) + l1
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
loss_value.append(loss.data)
timer['model'] += self.split_time()
value, predict_label = torch.max(output.data, 1)
acc = torch.mean((predict_label == label.data).float())
self.lr = self.optimizer.param_groups[0]['lr']
if self.global_step % self.arg.log_interval == 0:
self.print_log(
'\tBatch({}/{}) done. Loss: {:.4f} lr:{:.6f}'.format(
batch_idx, len(loader), loss.data, self.lr))
timer['statistics'] += self.split_time()
# statistics of time consumption and loss
proportion = {
k: '{:02d}%'.format(int(round(v * 100 / sum(timer.values()))))
for k, v in timer.items()
}
state_dict = self.model.state_dict()
weights = OrderedDict([[k.split('module.')[-1],
v.cpu()] for k, v in state_dict.items()])
torch.save(weights, self.arg.model_saved_name +
'-' + str(epoch) + '.pt')
def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, result_file=None):
if wrong_file is not None:
f_w = open(wrong_file, 'w')
if result_file is not None:
f_r = open(result_file, 'w')
self.model.eval()
with torch.no_grad():
self.print_log('Eval epoch: {}'.format(epoch + 1))
for ln in loader_name:
loss_value = []
score_frag = []
right_num_total = 0
total_num = 0
loss_total = 0
step = 0
process = tqdm(self.data_loader[ln])
for batch_idx, (data, label, index) in enumerate(process):
data = Variable(
data.float().cuda(self.output_device),
requires_grad=False)
label = Variable(
label.long().cuda(self.output_device),
requires_grad=False)
with torch.no_grad():
output = self.model(data)
if isinstance(output, tuple):
output, l1 = output
l1 = l1.mean()
else:
l1 = 0
loss = self.loss(output, label)
score_frag.append(output.data.cpu().numpy())
loss_value.append(loss.data.cpu().numpy())
_, predict_label = torch.max(output.data, 1)
step += 1
if wrong_file is not None or result_file is not None:
predict = list(predict_label.cpu().numpy())
true = list(label.data.cpu().numpy())
for i, x in enumerate(predict):
if result_file is not None:
f_r.write(str(x) + ',' + str(true[i]) + '\n')
if x != true[i] and wrong_file is not None:
f_w.write(str(index[i]) + ',' +
str(x) + ',' + str(true[i]) + '\n')
score = np.concatenate(score_frag)
if 'UCLA' in arg.Experiment_name:
self.data_loader[ln].dataset.sample_name = np.arange(
len(score))
accuracy = self.data_loader[ln].dataset.top_k(score, 1)
if accuracy > self.best_acc:
self.best_acc = accuracy
score_dict = dict(
zip(self.data_loader[ln].dataset.sample_name, score))
with open('./work_dir/' + arg.Experiment_name + '/eval_results/best_acc' + '.pkl'.format(
epoch, accuracy), 'wb') as f:
pickle.dump(score_dict, f)
print('Eval Accuracy: ', accuracy,
' model: ', self.arg.model_saved_name)
score_dict = dict(
zip(self.data_loader[ln].dataset.sample_name, score))
self.print_log('\tMean {} loss of {} batches: {}.'.format(
ln, len(self.data_loader[ln]), np.mean(loss_value)))
for k in self.arg.show_topk:
self.print_log('\tTop{}: {:.2f}%'.format(
k, 100 * self.data_loader[ln].dataset.top_k(score, k)))
with open('./work_dir/' + arg.Experiment_name + '/eval_results/epoch_' + str(epoch) + '_' + str(accuracy) + '.pkl'.format(
epoch, accuracy), 'wb') as f:
pickle.dump(score_dict, f)
return np.mean(loss_value)
def start(self):
if self.arg.phase == 'train':
self.print_log('Parameters:\n{}\n'.format(str(vars(self.arg))))
self.global_step = self.arg.start_epoch * \
len(self.data_loader['train']) / self.arg.batch_size
for epoch in range(self.arg.start_epoch, self.arg.num_epoch):
save_model = ((epoch + 1) % self.arg.save_interval == 0) or (
epoch + 1 == self.arg.num_epoch)
self.train(epoch, save_model=save_model)
val_loss = self.eval(
epoch,
save_score=self.arg.save_score,
loader_name=['test'])
# self.lr_scheduler.step(val_loss)
print('best accuracy: ', self.best_acc,
' model_name: ', self.arg.model_saved_name)
elif self.arg.phase == 'test':
if not self.arg.test_feeder_args['debug']:
wf = self.arg.model_saved_name + '_wrong.txt'
rf = self.arg.model_saved_name + '_right.txt'
else:
wf = rf = None
if self.arg.weights is None:
raise ValueError('Please appoint --weights.')
self.arg.print_log = False
self.print_log('Model: {}.'.format(self.arg.model))
self.print_log('Weights: {}.'.format(self.arg.weights))
self.eval(epoch=self.arg.start_epoch, save_score=self.arg.save_score,
loader_name=['test'], wrong_file=wf, result_file=rf)
self.print_log('Done.\n')
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def import_class(name):
components = name.split('.')
mod = __import__(components[0]) # import return model
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
if __name__ == '__main__':
parser = get_parser()
# load arg form config file
p = parser.parse_args()
if p.config is not None:
with open(p.config, 'r') as f:
default_arg = yaml.load(f)
key = vars(p).keys()
for k in default_arg.keys():
if k not in key:
print('WRONG ARG: {}'.format(k))
assert (k in key)
parser.set_defaults(**default_arg)
arg = parser.parse_args()
init_seed(0)
processor = Processor(arg)
processor.start()
|
{"hexsha": "60dcb00f5f5bfc4a3d31840682957a07cf46810e", "size": 21631, "ext": "py", "lang": "Python", "max_stars_repo_path": "SL-GCN/main.py", "max_stars_repo_name": "SnorlaxSE/CVPR21Chal-SLR", "max_stars_repo_head_hexsha": "680f911131ca03559fb06d578f38d006f87aa478", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 85, "max_stars_repo_stars_event_min_datetime": "2021-03-17T06:17:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T12:52:37.000Z", "max_issues_repo_path": "SL-GCN/main.py", "max_issues_repo_name": "SnorlaxSE/CVPR21Chal-SLR", "max_issues_repo_head_hexsha": "680f911131ca03559fb06d578f38d006f87aa478", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 21, "max_issues_repo_issues_event_min_datetime": "2021-03-21T18:41:27.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T08:16:47.000Z", "max_forks_repo_path": "SL-GCN/main.py", "max_forks_repo_name": "SnorlaxSE/CVPR21Chal-SLR", "max_forks_repo_head_hexsha": "680f911131ca03559fb06d578f38d006f87aa478", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 28, "max_forks_repo_forks_event_min_datetime": "2021-03-20T09:04:47.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-15T02:29:06.000Z", "avg_line_length": 36.6627118644, "max_line_length": 138, "alphanum_fraction": 0.5408441588, "include": true, "reason": "import numpy", "num_tokens": 4549}
|
[STATEMENT]
lemma (in weak_lower_semilattice) weak_meet_assoc:
assumes L: "x \<in> carrier L" "y \<in> carrier L" "z \<in> carrier L"
shows "(x \<sqinter> y) \<sqinter> z .= x \<sqinter> (y \<sqinter> z)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<sqinter> y \<sqinter> z .= x \<sqinter> (y \<sqinter> z)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. x \<sqinter> y \<sqinter> z .= x \<sqinter> (y \<sqinter> z)
[PROOF STEP]
have "(x \<sqinter> y) \<sqinter> z = z \<sqinter> (x \<sqinter> y)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<sqinter> y \<sqinter> z = z \<sqinter> (x \<sqinter> y)
[PROOF STEP]
by (simp only: meet_comm)
[PROOF STATE]
proof (state)
this:
x \<sqinter> y \<sqinter> z = z \<sqinter> (x \<sqinter> y)
goal (1 subgoal):
1. x \<sqinter> y \<sqinter> z .= x \<sqinter> (y \<sqinter> z)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
x \<sqinter> y \<sqinter> z = z \<sqinter> (x \<sqinter> y)
goal (1 subgoal):
1. x \<sqinter> y \<sqinter> z .= x \<sqinter> (y \<sqinter> z)
[PROOF STEP]
from L
[PROOF STATE]
proof (chain)
picking this:
x \<in> carrier L
y \<in> carrier L
z \<in> carrier L
[PROOF STEP]
have "... .= \<Sqinter> {z, x, y}"
[PROOF STATE]
proof (prove)
using this:
x \<in> carrier L
y \<in> carrier L
z \<in> carrier L
goal (1 subgoal):
1. z \<sqinter> (x \<sqinter> y) .= \<Sqinter>{z, x, y}
[PROOF STEP]
by (simp add: weak_meet_assoc_lemma)
[PROOF STATE]
proof (state)
this:
z \<sqinter> (x \<sqinter> y) .= \<Sqinter>{z, x, y}
goal (1 subgoal):
1. x \<sqinter> y \<sqinter> z .= x \<sqinter> (y \<sqinter> z)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
z \<sqinter> (x \<sqinter> y) .= \<Sqinter>{z, x, y}
goal (1 subgoal):
1. x \<sqinter> y \<sqinter> z .= x \<sqinter> (y \<sqinter> z)
[PROOF STEP]
from L
[PROOF STATE]
proof (chain)
picking this:
x \<in> carrier L
y \<in> carrier L
z \<in> carrier L
[PROOF STEP]
have "... = \<Sqinter> {x, y, z}"
[PROOF STATE]
proof (prove)
using this:
x \<in> carrier L
y \<in> carrier L
z \<in> carrier L
goal (1 subgoal):
1. \<Sqinter>{z, x, y} = \<Sqinter>{x, y, z}
[PROOF STEP]
by (simp add: insert_commute)
[PROOF STATE]
proof (state)
this:
\<Sqinter>{z, x, y} = \<Sqinter>{x, y, z}
goal (1 subgoal):
1. x \<sqinter> y \<sqinter> z .= x \<sqinter> (y \<sqinter> z)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
\<Sqinter>{z, x, y} = \<Sqinter>{x, y, z}
goal (1 subgoal):
1. x \<sqinter> y \<sqinter> z .= x \<sqinter> (y \<sqinter> z)
[PROOF STEP]
from L
[PROOF STATE]
proof (chain)
picking this:
x \<in> carrier L
y \<in> carrier L
z \<in> carrier L
[PROOF STEP]
have "... .= x \<sqinter> (y \<sqinter> z)"
[PROOF STATE]
proof (prove)
using this:
x \<in> carrier L
y \<in> carrier L
z \<in> carrier L
goal (1 subgoal):
1. \<Sqinter>{x, y, z} .= x \<sqinter> (y \<sqinter> z)
[PROOF STEP]
by (simp add: weak_meet_assoc_lemma [symmetric])
[PROOF STATE]
proof (state)
this:
\<Sqinter>{x, y, z} .= x \<sqinter> (y \<sqinter> z)
goal (1 subgoal):
1. x \<sqinter> y \<sqinter> z .= x \<sqinter> (y \<sqinter> z)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>x \<sqinter> y \<sqinter> z \<in> carrier L; \<Sqinter>{x, y, z} \<in> carrier L; x \<sqinter> (y \<sqinter> z) \<in> carrier L\<rbrakk> \<Longrightarrow> x \<sqinter> y \<sqinter> z .= x \<sqinter> (y \<sqinter> z)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>x \<sqinter> y \<sqinter> z \<in> carrier L; \<Sqinter>{x, y, z} \<in> carrier L; x \<sqinter> (y \<sqinter> z) \<in> carrier L\<rbrakk> \<Longrightarrow> x \<sqinter> y \<sqinter> z .= x \<sqinter> (y \<sqinter> z)
goal (1 subgoal):
1. x \<sqinter> y \<sqinter> z .= x \<sqinter> (y \<sqinter> z)
[PROOF STEP]
by (simp add: L)
[PROOF STATE]
proof (state)
this:
x \<sqinter> y \<sqinter> z .= x \<sqinter> (y \<sqinter> z)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1760, "file": null, "length": 19}
|
[STATEMENT]
lemma wfT_e_eq:
fixes ce::ce
assumes "\<Theta> ; \<B> ; \<Gamma> \<turnstile>\<^sub>w\<^sub>f ce : b" and "atom z \<sharp> \<Gamma>"
shows "\<Theta>; \<B>; \<Gamma> \<turnstile>\<^sub>w\<^sub>f \<lbrace> z : b | CE_val (V_var z) == ce \<rbrace>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Theta> ; \<B> ; \<Gamma> \<turnstile>\<^sub>w\<^sub>f \<lbrace> z : b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == ce \<rbrace>
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. atom z \<sharp> (\<Theta>, \<B>, \<Gamma>)
2. \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b
3. \<Theta> ; \<B> ; (z, b, TRUE) #\<^sub>\<Gamma> \<Gamma> \<turnstile>\<^sub>w\<^sub>f [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == ce
[PROOF STEP]
show "\<Theta>; \<B> \<turnstile>\<^sub>w\<^sub>f b"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b
[PROOF STEP]
using wfX_wfB assms
[PROOF STATE]
proof (prove)
using this:
?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile>\<^sub>w\<^sub>f ?v : ?b \<Longrightarrow> ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?b
?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile>\<^sub>w\<^sub>f ?c \<Longrightarrow> True
?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?\<Gamma> \<Longrightarrow> True
?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile>\<^sub>w\<^sub>f ?\<tau> \<Longrightarrow> ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f b_of ?\<tau>
?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile>\<^sub>w\<^sub>f ?ts \<Longrightarrow> True
\<turnstile>\<^sub>w\<^sub>f ?\<Theta> \<Longrightarrow> True
?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?b \<Longrightarrow> True
?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile>\<^sub>w\<^sub>f ?ce : ?b \<Longrightarrow> ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?b
?\<Theta> \<turnstile>\<^sub>w\<^sub>f ?td \<Longrightarrow> True
?\<Theta> ; ?\<Phi> ; ?\<B> ; ?\<Gamma> ; ?\<Delta> \<turnstile>\<^sub>w\<^sub>f ?e : ?b \<Longrightarrow> ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?b
?\<Theta> ; ?\<Phi> ; ?\<B> ; ?\<Gamma> ; ?\<Delta> \<turnstile>\<^sub>w\<^sub>f ?s : ?b \<Longrightarrow> ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?b
?\<Theta> ; ?\<Phi> ; ?\<B> ; ?\<Gamma> ; ?\<Delta> ; ?tid ; ?dc ; ?t \<turnstile>\<^sub>w\<^sub>f ?cs : ?b \<Longrightarrow> ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?b
?\<Theta> ; ?\<Phi> ; ?\<B> ; ?\<Gamma> ; ?\<Delta> ; ?tid ; ?dclist \<turnstile>\<^sub>w\<^sub>f ?css : ?b \<Longrightarrow> ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?b
?\<Theta> \<turnstile>\<^sub>w\<^sub>f ?\<Phi> \<Longrightarrow> True
?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile>\<^sub>w\<^sub>f ?\<Delta> \<Longrightarrow> True
?\<Theta> ; ?\<Phi> \<turnstile>\<^sub>w\<^sub>f ?ftq \<Longrightarrow> True
\<lbrakk> ?\<Theta> ; ?\<Phi> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?ft ; ?\<B> |\<subseteq>| ?\<B>'\<rbrakk> \<Longrightarrow> ?\<Theta> ; ?\<Phi> ; ?\<B>' \<turnstile>\<^sub>w\<^sub>f ?ft
\<Theta> ; \<B> ; \<Gamma> \<turnstile>\<^sub>w\<^sub>f ce : b
atom z \<sharp> \<Gamma>
goal (1 subgoal):
1. \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b
goal (2 subgoals):
1. atom z \<sharp> (\<Theta>, \<B>, \<Gamma>)
2. \<Theta> ; \<B> ; (z, b, TRUE) #\<^sub>\<Gamma> \<Gamma> \<turnstile>\<^sub>w\<^sub>f [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == ce
[PROOF STEP]
show " atom z \<sharp> (\<Theta>, \<B>, \<Gamma>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. atom z \<sharp> (\<Theta>, \<B>, \<Gamma>)
[PROOF STEP]
using assms wfG_fresh_x wfX_wfY
[PROOF STATE]
proof (prove)
using this:
\<Theta> ; \<B> ; \<Gamma> \<turnstile>\<^sub>w\<^sub>f ce : b
atom z \<sharp> \<Gamma>
\<lbrakk> ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?\<Gamma> ; atom ?z \<sharp> ?\<Gamma>\<rbrakk> \<Longrightarrow> atom ?z \<sharp> (?\<Theta>, ?\<B>, ?\<Gamma>)
?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile>\<^sub>w\<^sub>f ?v : ?b \<Longrightarrow> ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?\<Gamma> \<and> \<turnstile>\<^sub>w\<^sub>f ?\<Theta>
?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile>\<^sub>w\<^sub>f ?c \<Longrightarrow> ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?\<Gamma> \<and> \<turnstile>\<^sub>w\<^sub>f ?\<Theta>
?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?\<Gamma> \<Longrightarrow> \<turnstile>\<^sub>w\<^sub>f ?\<Theta>
?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile>\<^sub>w\<^sub>f ?\<tau> \<Longrightarrow> ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?\<Gamma> \<and> \<turnstile>\<^sub>w\<^sub>f ?\<Theta> \<and> ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f b_of ?\<tau>
?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile>\<^sub>w\<^sub>f ?ts \<Longrightarrow> ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?\<Gamma> \<and> \<turnstile>\<^sub>w\<^sub>f ?\<Theta>
\<turnstile>\<^sub>w\<^sub>f ?\<Theta> \<Longrightarrow> True
?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?b \<Longrightarrow> \<turnstile>\<^sub>w\<^sub>f ?\<Theta>
?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile>\<^sub>w\<^sub>f ?ce : ?b \<Longrightarrow> ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?\<Gamma> \<and> \<turnstile>\<^sub>w\<^sub>f ?\<Theta>
?\<Theta> \<turnstile>\<^sub>w\<^sub>f ?td \<Longrightarrow> \<turnstile>\<^sub>w\<^sub>f ?\<Theta>
?\<Theta> ; ?\<Phi> ; ?\<B> ; ?\<Gamma> ; ?\<Delta> \<turnstile>\<^sub>w\<^sub>f ?e : ?b \<Longrightarrow> ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?\<Gamma> \<and> ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile>\<^sub>w\<^sub>f ?\<Delta> \<and> \<turnstile>\<^sub>w\<^sub>f ?\<Theta> \<and> ?\<Theta> \<turnstile>\<^sub>w\<^sub>f ?\<Phi>
?\<Theta> ; ?\<Phi> ; ?\<B> ; ?\<Gamma> ; ?\<Delta> \<turnstile>\<^sub>w\<^sub>f ?s : ?b \<Longrightarrow> ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?\<Gamma> \<and> ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile>\<^sub>w\<^sub>f ?\<Delta> \<and> \<turnstile>\<^sub>w\<^sub>f ?\<Theta> \<and> ?\<Theta> \<turnstile>\<^sub>w\<^sub>f ?\<Phi>
?\<Theta> ; ?\<Phi> ; ?\<B> ; ?\<Gamma> ; ?\<Delta> ; ?tid ; ?dc ; ?t \<turnstile>\<^sub>w\<^sub>f ?cs : ?b \<Longrightarrow> ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?\<Gamma> \<and> ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile>\<^sub>w\<^sub>f ?\<Delta> \<and> \<turnstile>\<^sub>w\<^sub>f ?\<Theta> \<and> ?\<Theta> \<turnstile>\<^sub>w\<^sub>f ?\<Phi>
?\<Theta> ; ?\<Phi> ; ?\<B> ; ?\<Gamma> ; ?\<Delta> ; ?tid ; ?dclist \<turnstile>\<^sub>w\<^sub>f ?css : ?b \<Longrightarrow> ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?\<Gamma> \<and> ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile>\<^sub>w\<^sub>f ?\<Delta> \<and> \<turnstile>\<^sub>w\<^sub>f ?\<Theta> \<and> ?\<Theta> \<turnstile>\<^sub>w\<^sub>f ?\<Phi>
?\<Theta> \<turnstile>\<^sub>w\<^sub>f ?\<Phi> \<Longrightarrow> \<turnstile>\<^sub>w\<^sub>f ?\<Theta>
?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile>\<^sub>w\<^sub>f ?\<Delta> \<Longrightarrow> ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?\<Gamma> \<and> \<turnstile>\<^sub>w\<^sub>f ?\<Theta>
?\<Theta> ; ?\<Phi> \<turnstile>\<^sub>w\<^sub>f ?ftq \<Longrightarrow> ?\<Theta> \<turnstile>\<^sub>w\<^sub>f ?\<Phi> \<and> \<turnstile>\<^sub>w\<^sub>f ?\<Theta>
?\<Theta> ; ?\<Phi> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?ft \<Longrightarrow> ?\<Theta> \<turnstile>\<^sub>w\<^sub>f ?\<Phi> \<and> \<turnstile>\<^sub>w\<^sub>f ?\<Theta>
goal (1 subgoal):
1. atom z \<sharp> (\<Theta>, \<B>, \<Gamma>)
[PROOF STEP]
by metis
[PROOF STATE]
proof (state)
this:
atom z \<sharp> (\<Theta>, \<B>, \<Gamma>)
goal (1 subgoal):
1. \<Theta> ; \<B> ; (z, b, TRUE) #\<^sub>\<Gamma> \<Gamma> \<turnstile>\<^sub>w\<^sub>f [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == ce
[PROOF STEP]
show "\<Theta> ; \<B> ; (z, b, TRUE) #\<^sub>\<Gamma> \<Gamma> \<turnstile>\<^sub>w\<^sub>f CE_val (V_var z) == ce "
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Theta> ; \<B> ; (z, b, TRUE) #\<^sub>\<Gamma> \<Gamma> \<turnstile>\<^sub>w\<^sub>f [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == ce
[PROOF STEP]
using wfTI wfC_e_eq assms wfTI
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>atom ?z \<sharp> (?\<Theta>, ?\<B>, ?\<Gamma>); ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?b ; ?\<Theta> ; ?\<B> ; (?z, ?b, TRUE) #\<^sub>\<Gamma> ?\<Gamma> \<turnstile>\<^sub>w\<^sub>f ?c \<rbrakk> \<Longrightarrow> ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile>\<^sub>w\<^sub>f \<lbrace> ?z : ?b | ?c \<rbrace>
\<lbrakk> ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile>\<^sub>w\<^sub>f ?ce : ?b ; atom ?x \<sharp> ?\<Gamma>\<rbrakk> \<Longrightarrow> ?\<Theta> ; ?\<B> ; (?x, ?b, TRUE) #\<^sub>\<Gamma> ?\<Gamma> \<turnstile>\<^sub>w\<^sub>f [ [ ?x ]\<^sup>v ]\<^sup>c\<^sup>e == ?ce
\<Theta> ; \<B> ; \<Gamma> \<turnstile>\<^sub>w\<^sub>f ce : b
atom z \<sharp> \<Gamma>
\<lbrakk>atom ?z \<sharp> (?\<Theta>, ?\<B>, ?\<Gamma>); ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?b ; ?\<Theta> ; ?\<B> ; (?z, ?b, TRUE) #\<^sub>\<Gamma> ?\<Gamma> \<turnstile>\<^sub>w\<^sub>f ?c \<rbrakk> \<Longrightarrow> ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile>\<^sub>w\<^sub>f \<lbrace> ?z : ?b | ?c \<rbrace>
goal (1 subgoal):
1. \<Theta> ; \<B> ; (z, b, TRUE) #\<^sub>\<Gamma> \<Gamma> \<turnstile>\<^sub>w\<^sub>f [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == ce
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<Theta> ; \<B> ; (z, b, TRUE) #\<^sub>\<Gamma> \<Gamma> \<turnstile>\<^sub>w\<^sub>f [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == ce
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 4222, "file": "MiniSail_WellformedL", "length": 11}
|
import oneflow as flow
import numpy as np
import time
import argparse
import torch
import string
from models.rnn_model_pytorch import RNN_PYTORCH
from models.rnn_model import RNN
# shared hyperparameters
n_hidden = 5000
all_letters = string.ascii_letters + " .,;'"
n_letters = len(all_letters)
n_categories = 25600
learning_rate = 0.0005
def _parse_args():
parser = argparse.ArgumentParser("flags for compare oneflow and pytorch speed")
return parser.parse_args()
def letterToIndex(letter):
return all_letters.find(letter)
def main(args):
rnn_module = RNN(n_letters, n_hidden, n_categories)
# Fake data, only for speed test purpose
test_word = "Depeng"
category_tensor = flow.Tensor([1], dtype=flow.int64)
line_tensor = flow.Tensor(len(test_word), 1, n_letters)
flow.nn.init.zeros_(line_tensor)
for li, letter in enumerate(test_word):
line_tensor[li, 0, letterToIndex(letter)] = 1
criterion = flow.nn.NLLLoss()
category_tensor_gpu = category_tensor.to("cuda")
line_tensor_gpu = line_tensor.to("cuda")
rnn_module.to("cuda")
criterion.to("cuda")
of_sgd = flow.optim.SGD(rnn_module.parameters(), lr=learning_rate)
bp_iters = 50
for_time = 0.0
bp_time = 0.0
update_time = 0.0
print("start oneflow training loop....")
start_t = time.time()
for i in range(bp_iters):
s_t = time.time()
hidden = rnn_module.initHidden()
for j in range(line_tensor_gpu.size()[0]):
output, hidden = rnn_module(line_tensor_gpu[j], hidden)
loss = criterion(output, category_tensor_gpu)
for_time += time.time() - s_t
s_t = time.time()
loss.backward()
bp_time += time.time() - s_t
s_t = time.time()
of_sgd.step()
of_sgd.zero_grad()
update_time += time.time() - s_t
of_loss = loss.numpy()
end_t = time.time()
print("oneflow traning loop avg time : {}".format((end_t - start_t) / bp_iters))
print("forward avg time : {}".format(for_time / bp_iters))
print("backward avg time : {}".format(bp_time / bp_iters))
print("update parameters avg time : {}".format(update_time / bp_iters))
#####################################################################################################
# # pytorch RNN
torch_rnn_module = RNN_PYTORCH(n_letters, n_hidden, n_categories)
torch_rnn_module.to("cuda")
category_tensor = torch.tensor([1], dtype=torch.long)
line_tensor = torch.zeros(len(test_word), 1, n_letters)
for li, letter in enumerate(test_word):
line_tensor[li][0][letterToIndex(letter)] = 1
criterion = torch.nn.NLLLoss()
category_tensor_gpu = category_tensor.to("cuda")
line_tensor_gpu = line_tensor.to("cuda")
torch_rnn_module.to("cuda")
criterion.to("cuda")
for_time = 0.0
bp_time = 0.0
update_time = 0.0
print("start pytorch training loop....")
start_t = time.time()
for i in range(bp_iters):
s_t = time.time()
hidden = torch_rnn_module.initHidden()
for i in range(line_tensor.size()[0]):
output, hidden = torch_rnn_module(line_tensor_gpu[i], hidden)
loss = criterion(output, category_tensor_gpu)
for_time += time.time() - s_t
s_t = time.time()
loss.backward()
bp_time += time.time() - s_t
s_t = time.time()
for p in torch_rnn_module.parameters():
p.data.add_(p.grad.data, alpha=-learning_rate)
torch_rnn_module.zero_grad()
update_time += time.time() - s_t
torch_loss = loss.cpu().detach().numpy()
end_t = time.time()
print("pytorch traning loop avg time : {}".format((end_t - start_t) / bp_iters))
print("forward avg time : {}".format(for_time / bp_iters))
print("backward avg time : {}".format(bp_time / bp_iters))
print("update parameters avg time : {}".format(update_time / bp_iters))
if __name__ == "__main__":
args = _parse_args()
main(args)
|
{"hexsha": "16f16f773c569cccbbd912aac7068cdcf18bbf16", "size": 4009, "ext": "py", "lang": "Python", "max_stars_repo_path": "rnn/compare_oneflow_and_pytorch_rnn_speed.py", "max_stars_repo_name": "ClimBin/models", "max_stars_repo_head_hexsha": "10989b361732ee5b93f5595f672fd7d0c18e8f93", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 43, "max_stars_repo_stars_event_min_datetime": "2021-06-03T09:07:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T15:21:48.000Z", "max_issues_repo_path": "rnn/compare_oneflow_and_pytorch_rnn_speed.py", "max_issues_repo_name": "ClimBin/models", "max_issues_repo_head_hexsha": "10989b361732ee5b93f5595f672fd7d0c18e8f93", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 64, "max_issues_repo_issues_event_min_datetime": "2021-05-31T10:34:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-17T03:44:58.000Z", "max_forks_repo_path": "rnn/compare_oneflow_and_pytorch_rnn_speed.py", "max_forks_repo_name": "ClimBin/models", "max_forks_repo_head_hexsha": "10989b361732ee5b93f5595f672fd7d0c18e8f93", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 37, "max_forks_repo_forks_event_min_datetime": "2021-07-04T03:13:18.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T07:30:47.000Z", "avg_line_length": 30.6030534351, "max_line_length": 105, "alphanum_fraction": 0.6360688451, "include": true, "reason": "import numpy", "num_tokens": 993}
|
import numpy as np
# Physical constants in cgs units
c = 3e10
G = 7e-8
# Calculate period given semimajor axis and total mass
def find_period(a, M, use_earth_units=False):
if use_earth_units:
return np.sqrt(a**3 / M)
else:
return np.sqrt(4 * np.pi**2 * a**3 / (G * M))
# Calculate total mass given semimajor axis and period
def find_mass(a, P, use_earth_units=False):
if use_earth_units:
return a**3 / P**2
else:
return 4 * np.pi**2 * a**3 / (G * P**2)
# Calculate semimajor axis given period and total mass
def find_semimajor_axis(P, M, use_earth_units=False):
if use_earth_units:
return np.power(M * P**2, 1/3)
else:
return np.power(G * M * P**2 / (4 * np.pi**2), 1/3)
print('Calculating the orbital radius of our Earth...')
orbital_radius_cm = find_semimajor_axis(60*60*24*365.25, 2e33)
orbital_radius_AU = find_semimajor_axis(1, 1, use_earth_units=True)
print('%s AU is equal to %2.2e cm.' % (orbital_radius_AU, orbital_radius_cm))
|
{"hexsha": "af19ff7bc39ed2a66638b5e84518d2b9503e6e60", "size": 1015, "ext": "py", "lang": "Python", "max_stars_repo_path": "initial_scripts/kepler.py", "max_stars_repo_name": "bbrzycki/argparse-tutorial", "max_stars_repo_head_hexsha": "84e7dc28df2a64ca7d859bd331dc5471d3600351", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "initial_scripts/kepler.py", "max_issues_repo_name": "bbrzycki/argparse-tutorial", "max_issues_repo_head_hexsha": "84e7dc28df2a64ca7d859bd331dc5471d3600351", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "initial_scripts/kepler.py", "max_forks_repo_name": "bbrzycki/argparse-tutorial", "max_forks_repo_head_hexsha": "84e7dc28df2a64ca7d859bd331dc5471d3600351", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8529411765, "max_line_length": 77, "alphanum_fraction": 0.6660098522, "include": true, "reason": "import numpy", "num_tokens": 325}
|
import math
from scipy import stats
import numpy as np
listy=[1.58,1.57,1.54,1.51,1.51,1.51,1.5099,1.5,1.48,1.44,1.44,1.43,1.44,1.46,1.46,1.46,1.46,1.46,1.46,1.46,1.46,1.46,1.455,1.445,1.44,1.44,1.43,1.46,1.46,1.46,1.45,1.45,1.45,1.45,1.45,1.45,1.45,1.45,1.45,1.45,1.45,1.45,1.45,1.45,1.45,1.45,1.45,1.45,1.44,1.44,1.44,1.45,1.4477,1.44,1.45,1.45,1.45,1.45,1.45,1.45,1.4499,1.44,1.45,1.45,1.44,1.44,1.44,1.45,1.4496,1.44,1.44,1.44,1.44,1.42,1.42,1.42,1.42,1.42,1.42,1.42,1.42,1.42,1.41,1.4,1.395,1.39,1.39,1.39,1.4,1.41,1.41,1.41,1.41,1.41,1.41,1.42,1.42,1.42,1.42,1.41,1.41,1.41,1.41,1.41,1.41,1.4099,1.41,1.41,1.41,1.4,1.4,1.4,1.4,1.4,1.3999,1.4,1.4,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.4,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.3899,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.4,1.41,1.41,1.41,1.41,1.41,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.3999,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.3994,1.3881,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.3899,1.3899,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.41,1.41,1.41,1.41,1.42,1.42,1.42,1.42,1.42,1.42,1.42,1.4197,1.42,1.42,1.42,1.42,1.42,1.42,1.42,1.42,1.42,1.42,1.42,1.43,1.43,1.429,1.43,1.43,1.43,1.4277,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.4268,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.425,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.4295,1.43,1.43,1.43,1.4277,1.43,1.43,1.43,1.425,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.44,1.45,1.46,1.46,1.46,1.46,1.46,1.46,1.46,1.45, 0]
listx = [idx for idx,val in enumerate(listy)]
i=4
principle=6000
shares=0
slope2 = -1
purchaseprice=0
while i<len(listy)-3:
price1=float(listy[i])
#price2=float(list[B+1])
#price3=float(list[B+2])
#slope1= (float(price2)-float(price1))
#slope2= (float(price3)-float(price2))
da_y = np.array(listy[i-4:i])
da_x = np.array(listx[i-4:i])
slope1=slope2
slope2, intercept, r_value, p_value, std_err = stats.linregress(da_x,da_y)
if slope1<=0 and slope2>0:
howmany= math.trunc(principle/price1)
print(f"buy at {howmany}@{price1}")
shares= howmany
principle= principle-(howmany*price1)
purchaseprice= price1
elif slope2<=0 and slope1>0 and purchaseprice != price1:
howmany= shares
print(f"sell at {howmany}@{price1}")
principle +=shares*price1
shares=0
i = i+1
print(f"principle:{principle} shares:{shares}")
print(f"total principle: {principle+(shares*price1)}@{price1}")
|
{"hexsha": "794141e9fea50d267c030e401fcf94d7135ebe0d", "size": 3004, "ext": "py", "lang": "Python", "max_stars_repo_path": "money.py", "max_stars_repo_name": "Dannyaffleck/stock", "max_stars_repo_head_hexsha": "9c6c62b798e4e3306a7bf4a185a0b4fca37cdd33", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "money.py", "max_issues_repo_name": "Dannyaffleck/stock", "max_issues_repo_head_hexsha": "9c6c62b798e4e3306a7bf4a185a0b4fca37cdd33", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "money.py", "max_forks_repo_name": "Dannyaffleck/stock", "max_forks_repo_head_hexsha": "9c6c62b798e4e3306a7bf4a185a0b4fca37cdd33", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-08T19:59:38.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-08T19:59:38.000Z", "avg_line_length": 61.306122449, "max_line_length": 1947, "alphanum_fraction": 0.6141810919, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1916}
|
#include "soar.hpp"
#include <Eigen/Dense>
#include <fmt/format.h>
#include <vector>
using namespace Eigen;
Soar::Soar(const Ref<const MatrixXd> &matA, const Ref<const MatrixXd> &matB)
: ndim_(matA.rows()), matA_(matA), matB_(matB),
u_(VectorXd::Random(ndim_)) {}
MatrixXd Soar::compute(int n) {
VectorXd q = u_ / u_.norm();
VectorXd f = VectorXd::Zero(ndim_);
// initialize
MatrixXcd matQ = MatrixXcd::Zero(ndim_, n);
MatrixXcd matP = MatrixXcd::Zero(ndim_, n);
MatrixXcd matT = MatrixXcd::Zero(n, n);
std::vector<int> deflation;
matQ.col(0) = q;
for (int i = 0; i < n - 1; ++i) {
// Recurrence role
VectorXcd r = matA_ * matQ.col(i) + matB_ * matP.col(i);
std::complex<double> norm_init = r.norm();
MatrixXcd basis = matQ.leftCols(i + 1);
// Modified Gram Schmidt procedure
// First orthogonalization
VectorXcd coef = VectorXd::Zero(i + 1);
for (int j = 0; j < i + 1; ++j) {
// Projection coeficients and projection subtraction
VectorXcd v = basis.col(j);
coef(j) = v.dot(r);
r -= coef(j) * v;
}
// Saving coeficients
matT.col(i).head(i + 1) = coef;
// Reorthogonalization, if needed.
if (r.norm() < 0.7 * norm_init) {
// Second Gram Schmidt orthogonalization
for (int j = 0; j < i + 1; ++j) {
VectorXd v = basis.col(j);
coef(j) = v.dot(r);
r -= coef(j) * v;
}
matT.col(i).head(i + 1) += coef;
}
double r_norm = r.norm();
matT(i + 1, i) = r_norm;
// check for breakdown
if (r_norm > tol_) {
matQ.col(i + 1) = r / r_norm;
VectorXd e_i = VectorXd::Zero(i + 1);
e_i(i) = 1.0;
// VectorXd v_aux = matT.block(1, 0, i + 1, i + 1).ldlt().solve(e_i);
VectorXd v_aux =
matT.block(1, 0, i + 1, i + 1).colPivHouseholderQr().solve(e_i);
f = matQ.leftCols(i + 1) * v_aux;
} else {
// Deflation reset
matT(i + 1, i) = 1.0;
matQ.col(i + 1) = VectorXd::Zero(ndim_);
VectorXd e_i = VectorXd::Zero(i + 1);
e_i(i) = 1.0;
// VectorXd v_aux = matT.block(1, 0, i + 1, i + 1).ldlt().solve(e_i);
VectorXd v_aux =
matT.block(1, 0, i + 1, i + 1).colPivHouseholderQr().solve(e_i);
f = matQ.leftCols(i + 1) * v_aux;
// Deflation verification
VectorXd f_proj;
for (int k : deflation) {
VectorXd p = matP.col(k);
double coef_f = p.dot(f) / p.dot(p);
f_proj = f - coef_f * p;
}
if (f_proj.norm() > tol_) {
deflation.push_back(i);
} else {
fmt::print("SOAR lucky breakdown.\n");
break;
}
}
matP.col(i + 1) = f;
}
fmt::print("zero1: {:9.3f}\n",
(matQ.transpose() * matQ - MatrixXd::Identity(n, n)).norm());
VectorXd e_n = VectorXd::Zero(n - 1);
e_n(n - 2) = 1.0;
VectorXd r = matA_ * matQ.col(n - 2) + matB_ * matP.col(n - 2);
for (int i = 0; i < n - 1; ++i) {
double coef = matQ.col(i).dot(r);
r -= coef * matQ.col(i);
}
double nm = (matA_ * matQ.leftCols(n - 1) + matB_ * matP.leftCols(n - 1) -
matQ.leftCols(n - 1) * matT.topLeftCorner(n - 1, n - 1) -
r * e_n.transpose())
.norm();
fmt::print("zero2: {:9.3f}\n", nm);
return matQ;
}
|
{"hexsha": "d418f4ee71b27ba91ff4031efdb4f621e6ac9cb4", "size": 3286, "ext": "cc", "lang": "C++", "max_stars_repo_path": "src/soar.cc", "max_stars_repo_name": "pan3rock/QuadEigsSOAR", "max_stars_repo_head_hexsha": "6b4a2e939c8987773cd7990f665e9ebf57ecdbde", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/soar.cc", "max_issues_repo_name": "pan3rock/QuadEigsSOAR", "max_issues_repo_head_hexsha": "6b4a2e939c8987773cd7990f665e9ebf57ecdbde", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/soar.cc", "max_forks_repo_name": "pan3rock/QuadEigsSOAR", "max_forks_repo_head_hexsha": "6b4a2e939c8987773cd7990f665e9ebf57ecdbde", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6036036036, "max_line_length": 76, "alphanum_fraction": 0.5359099209, "num_tokens": 1144}
|
#include "image.h"
#include "debug.h"
#include <boost/static_assert.hpp>
#include <png.h>
using namespace std;
BOOST_STATIC_ASSERT(sizeof(unsigned long) == 4);
Image imageFromPng(const string &fname) {
png_structp png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
CHECK(png_ptr);
png_infop info_ptr = png_create_info_struct(png_ptr);
CHECK(info_ptr);
if(setjmp(png_jmpbuf(png_ptr))) {
CHECK(0);
}
FILE *fp = fopen(fname.c_str(), "rb");
CHECK(fp);
png_init_io(png_ptr, fp);
png_read_info(png_ptr, info_ptr);
if(info_ptr->color_type == PNG_COLOR_TYPE_PALETTE)
png_set_palette_to_rgb(png_ptr);
if(info_ptr->color_type == PNG_COLOR_TYPE_GRAY && info_ptr->bit_depth < 8)
png_set_gray_1_2_4_to_8(png_ptr);
if(info_ptr->bit_depth == 16)
png_set_strip_16(png_ptr);
if(info_ptr->bit_depth < 8)
png_set_packing(png_ptr);
if(info_ptr->color_type == PNG_COLOR_TYPE_RGB)
png_set_filler(png_ptr, 0xff, PNG_FILLER_AFTER);
if(info_ptr->color_type == PNG_COLOR_TYPE_GRAY || info_ptr->color_type == PNG_COLOR_TYPE_GRAY_ALPHA)
png_set_gray_to_rgb(png_ptr);
png_read_update_info(png_ptr, info_ptr);
CHECK(info_ptr->bit_depth == 8);
CHECK(info_ptr->color_type == PNG_COLOR_TYPE_RGBA || info_ptr->color_type == PNG_COLOR_TYPE_RGB);
Image img;
img.x = info_ptr->width;
img.y = info_ptr->height;
img.c.resize(img.y, vector<unsigned long>(img.x));
vector<unsigned long *> ul;
for(int i = 0; i < img.c.size(); i++)
ul.push_back(&img.c[i][0]);
png_read_image(png_ptr, (png_byte**)&ul[0]);
fclose(fp);
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
return img;
};
|
{"hexsha": "9886bfb7aa9f9ef3d46e75224d14b0935a872d37", "size": 1695, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "image.cpp", "max_stars_repo_name": "zorbathut/d-net", "max_stars_repo_head_hexsha": "61f610ca71270c6a95cf57dc3acaeab8559a234b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2016-11-02T06:47:52.000Z", "max_stars_repo_stars_event_max_datetime": "2016-11-02T06:47:52.000Z", "max_issues_repo_path": "image.cpp", "max_issues_repo_name": "zorbathut/d-net", "max_issues_repo_head_hexsha": "61f610ca71270c6a95cf57dc3acaeab8559a234b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "image.cpp", "max_forks_repo_name": "zorbathut/d-net", "max_forks_repo_head_hexsha": "61f610ca71270c6a95cf57dc3acaeab8559a234b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.2985074627, "max_line_length": 102, "alphanum_fraction": 0.7067846608, "num_tokens": 480}
|
import base64
import datetime
from io import BytesIO
import pandas as pd
import numpy as np
import quandl
import matplotlib.pyplot as plt
from dateutil import tz
from matplotlib import animation
import mpl_toolkits.mplot3d.axes3d as p3
from pandas.plotting import register_matplotlib_converters
from mpl_toolkits.mplot3d import Axes3D
register_matplotlib_converters()
def time_judge(times, data, move_type, timeperiod):
while True:
if timeperiod == 'short':
timeduration = data.index
elif timeperiod == 'long':
timeduration = data['Date']
if times in timeduration or np.any(times == timeduration):
return str(times)
else:
b = datetime.datetime.strptime(times, "%Y-%m-%d")
if move_type == 'back':
c = b + datetime.timedelta(days=-1)
times = c.strftime("%Y-%m-%d")
elif move_type == 'forward':
c = b + datetime.timedelta(days=1)
times = c.strftime("%Y-%m-%d")
def getorigintime():
tz_sh = tz.gettz('Asia/Shanghai')
now = datetime.datetime.now(tz=tz_sh)
return now
def gettime():
return getorigintime().strftime('%Y-%m-%d')
def getdata():
golddata = quandl.get("SHFE/AUZ2020", authtoken="EDHKCFxMS-fA8rLYvvef", start_date="2019-11-18",
end_date=gettime())
return golddata
def getdata_l():
data = pd.read_excel('' + 'alldata.xlsx')
return data
def getcurrentdata():
golddata = getdata()
currentdate = gettime()
try:
currentdata = golddata.loc[str(currentdate), ['Open', 'Close', 'High', 'Low', 'Settle']].values
except Exception:
currentdata = []
for i in range(1, 10):
currentdate = (getorigintime() - datetime.timedelta(days=i)).strftime('%Y-%m-%d')
try:
currentdata = golddata.loc[str(currentdate), ['Open', 'Close', 'High', 'Low', 'Settle']].values
except Exception:
pass
if len(currentdata) > 0:
break
return currentdate, currentdata.tolist()
else:
return currentdate, currentdata.tolist()
def plot_price_trend(time, name):
golddata = getdata()
currenttime_ymd = str(gettime())
start = time_judge(time, golddata, 'forward', 'short')
end = time_judge(currenttime_ymd, golddata, 'back', 'short')
data = golddata.loc[start:end, ['Open', 'Close', 'High', 'Low', 'Settle']]
x = data.index
y_open = data['Open'].values
y_close = data['Close'].values
y_high = data['High'].values
y_low = data['Low'].values
y_settle = data['Settle'].values
plt.title(name, color='Navy', fontsize='large', fontweight='bold')
plt.figure(dpi=300)
# border of axis x and y
ax = plt.gca()
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('Navy')
ax.spines['left'].set_color('Navy')
ax.spines['right'].set_color('none')
plt.plot(x, y_open, label="Open Price")
plt.plot(x, y_close, label="Close Price")
plt.plot(x, y_high, label="High Price", ls='--')
plt.plot(x, y_low, label="Low Price", ls='--')
plt.plot(x, y_settle, label="Settle Price", marker='.')
# change axis value for longer than 1 month
if name == '2months' or name == '3months':
x_display = []
for index, value in enumerate(x):
if index % 7 == 0:
x_display.append(value.strftime('%m-%d'))
else:
x_display.append('')
else:
x_display = []
for index, value in enumerate(x):
x_display.append(value.strftime('%m-%d'))
# axis x and y
plt.xticks(x, x_display, color='Navy', rotation='45')
plt.yticks(color='Navy')
plt.legend()
# pwd = os.path.dirname(os.path.dirname(__file__))
# saveplace = pwd + '/static/pfas/img/' + name + '.png'
# plt.savefig(saveplace, transparent=True)
# use ascii save and load png
# put this in html :<embed id="pic0" src="data:image/png;base64,{{pic_1}}" />
buf = BytesIO()
plt.savefig(buf, transparent=True, format='png')
data = base64.b64encode(buf.getbuffer()).decode("ascii")
return data, name
def write_xdisplay(name, x, i):
if name:
x_display = []
for index, value in enumerate(x):
if index % i == 0:
x_display.append(value.strftime('%Y-%m'))
else:
x_display.append('')
else:
x_display = []
for index, value in enumerate(x):
x_display.append(value.strftime('%m-%d'))
return x_display
def plot_price_trend_l(time, name):
golddata = getdata_l()
start1 = time_judge(time, golddata, 'forward', 'long')
# end = time_judge('2019-12-16', golddata, 'back', 'long')
start = pd.Timestamp(start1)
start64 = np.datetime64(start)
end = datetime.datetime(2019, 12, 16)
end64 = np.datetime64(end)
df = golddata.loc[:, ['Date', 'Open', 'Close', 'High', 'Low', 'Settle']]
data = df[(df['Date'] >= start64) & (df['Date'] <= end64)]
x = data['Date']
y_open = data['Open'].values
y_close = data['Close'].values
y_high = data['High'].values
y_low = data['Low'].values
y_settle = data['Settle'].values
plt.title(name, color='Navy', fontsize='large', fontweight='bold')
plt.figure(dpi=300)
# plt.figure(figsize=(10,5))
# border of axis x and y
ax = plt.gca()
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('Navy')
ax.spines['left'].set_color('Navy')
ax.spines['right'].set_color('none')
plt.plot(x, y_open, label="Open Price")
plt.plot(x, y_close, label="Close Price")
plt.plot(x, y_high, label="High Price", ls='--')
plt.plot(x, y_low, label="Low Price", ls='--')
plt.plot(x, y_settle, label="Settle Price", marker='.')
# change axis value for longer than 1 month
if name == '6months':
x_display = write_xdisplay(name, x, 30)
elif name == '1year':
x_display = write_xdisplay(name, x, 120)
elif name == '2years':
x_display = write_xdisplay(name, x, 180)
elif name == '3years':
x_display = write_xdisplay(name, x, 360)
elif name == '5years':
x_display = write_xdisplay(name, x, 720)
elif name == '10years':
x_display = write_xdisplay(name, x, 1440)
elif name == '12years':
x_display = write_xdisplay(name, x, 1440)
# axis x and y
plt.xticks(x, x_display, color='Navy', rotation='45')
plt.yticks(color='Navy')
plt.legend()
# pwd = os.path.dirname(os.path.dirname(__file__))
# saveplace = pwd + '/static/pfas/img/' + name + '.png'
# plt.savefig(saveplace, transparent=True)
# use ascii save and load png
# put this in html :<embed id="pic0" src="data:image/png;base64,{{pic_1}}" />
buf = BytesIO()
plt.savefig(buf, transparent=True, format='png')
data = base64.b64encode(buf.getbuffer()).decode("ascii")
return data, name
def plot_price_table(time, name):
golddata = getdata()
currenttime_ymd = str(gettime())
start = time_judge(time, golddata, 'forward', 'short')
end = time_judge(currenttime_ymd, golddata, 'back', 'short')
data = golddata.loc[start:end, ['Open', 'Close', 'High', 'Low', 'Settle']]
plt.figure()
ax = plt.gca()
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
plt.xticks([])
plt.yticks([])
col_labels = ['Open', 'Close', 'High', 'Low', 'Settle']
row_labels = data.index.strftime('%m-%d')
table_vals = data.values.tolist()
cc_col = ['none' for i in range(len(col_labels))]
cc = [cc_col for i in range(len(row_labels))]
cc_row = ['none' for i in range(len(row_labels))]
plt.table(cellText=table_vals, rowLabels=row_labels, colLabels=col_labels, loc='center', cellColours=cc,
rowColours=cc_row, colColours=cc_col)
# pwd = os.path.dirname(os.path.dirname(__file__))
# saveplace = pwd + '/static/pfas/img/' + name + '.png'
# plt.savefig(saveplace, transparent=True)
buf = BytesIO()
plt.savefig(buf, transparent=True, format='png')
data = base64.b64encode(buf.getbuffer()).decode("ascii")
return data, name
def plot_3D(name):
golddata = getdata()
data = golddata.loc[:, ['Settle']]
data2 = golddata.loc[:, ['Volume']]
fig = plt.figure()
ax = fig.gca(projection='3d')
y = data.values.tolist()
y_new = [float(v) for i in y for v in i]
y = y_new
z = data2.values.tolist()
z_new = [float(v) for i in z for v in i]
z = z_new
x = [i for i in range(len(y))]
ax.plot(x, y, zs=0, zdir='z', label='curve in (x,y)', color='Gold')
ax.scatter(xs=x, zs=z, ys=y, zdir='z', label='points in (x,y,z)', c='Gold')
ax.legend()
ax.title.set_color('Navy')
ax.w_xaxis.set_pane_color((0.2, 0.2, 0.2, 1.0))
ax.w_yaxis.set_pane_color((0.2, 0.2, 0.2, 1.0))
ax.w_zaxis.set_pane_color((0.25, 0.25, 0.25, 1.0))
ax.set_xlabel('Days')
ax.set_ylabel('Settle Price')
ax.set_zlabel('Volume')
ax.view_init(elev=35, azim=-45)
plt.xticks(color='Navy')
plt.yticks(color='Navy')
ax.tick_params(axis='z', colors='Navy')
ax.xaxis.label.set_color('Navy')
ax.yaxis.label.set_color('Navy')
ax.zaxis.label.set_color('Navy')
# pwd = os.path.dirname(os.path.dirname(__file__))
# saveplace = pwd + '/static/pfas/img/' + name + '.png'
# plt.savefig(saveplace, transparent=True)
buf = BytesIO()
plt.savefig(buf, transparent=True, format='png')
data = base64.b64encode(buf.getbuffer()).decode("ascii")
return data, name
def plot_animation(name):
golddata = getdata()
data = golddata.loc[:, ['Settle']]
data2 = golddata.loc[:, ['High']]
y = [float(v) for i in data.values.tolist() for v in i]
x = [i for i in range(len(y))]
# moving average for 3 days
y_new = [(y[i] + y[i + 1] + y[i + 2]) / 3 if 0 < i < len(y) - 3 else np.NaN for i in range(len(y) - 2)]
x_new = [i for i in range(len(y))]
x_new.pop(0)
x_new.pop(-1)
# head-tail line
# x1, y1, x2, y2 = x[0], y[0], x[-1], y[-1]
# for i in range(len(y)):
# (y[i]+y[i+1]+y[i+2])/3
# def yy(x):
# return (x - x1) / (x2 - x1) * (y2 - y1) + y1
# yyy = [yy(index) for index in x]
fig, ax = plt.subplots()
ax.grid(True)
ax.set_xlabel('Days')
ax.set_ylabel('Settle Price')
ax.xaxis.label.set_color('#0028FF')
ax.yaxis.label.set_color('#0028FF')
line, = ax.plot(x, y, color='#0028FF', label='Settle Price')
line2, = ax.plot(x_new, y_new, color='#9B6A12', label='Moving Average(3days)')
ax.legend()
text_pt = plt.text(4, 0.8, '', fontsize=10, color='#0028FF')
point_ani, = plt.plot(x[0], y[0], "ro", color='#0028FF')
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('#0028FF')
ax.spines['left'].set_color('#0028FF')
ax.spines['right'].set_color('none')
plt.xticks(color='#0028FF')
plt.yticks(color='#0028FF')
ax1 = plt.gca()
# ax1.patch.set_facecolor("black")
xdata = []
ydata = []
def init(): # only required for blitting to give a clean slate.
line.set_ydata(y[0])
line.set_xdata(x[0])
return line,
def animate(i):
xdata.append(x[i])
ydata.append(y[i])
line.set_data(xdata, ydata)
text_pt.set_position((x[i], y[i]))
text_pt.set_text("x=%.3f,\n y=%.3f" % (x[i], y[i]))
point_ani.set_data(x[i], y[i])
point_ani.set_marker("o")
point_ani.set_markersize(5)
return line, point_ani, text_pt
ani = animation.FuncAnimation(
fig, animate, init_func=init, interval=120, blit=True, save_count=len(y))
# pwd = os.path.dirname(os.path.dirname(__file__))
# saveplace = pwd + '/static/pfas/img/' + name + '.gif'
# ani.save(saveplace, savefig_kwargs={'transparent': True}, writer='imagemagick')
# plt.savefig(saveplace, transparent=True)
# return ani.to_html5_video()
return ani.to_jshtml(), name
def plot_diy(time, name, *datatype):
columns = list(datatype)
golddata = getdata()
currenttime_ymd = str(gettime())
start = time_judge(time, golddata, 'forward', 'short')
end = time_judge(currenttime_ymd, golddata, 'back', 'short')
data = golddata.loc[start:end, columns]
x = data.index
plt.title(name, color='Navy', fontsize='large', fontweight='bold')
plt.figure(dpi=300)
# border of axis x and y
ax = plt.gca()
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('Navy')
ax.spines['left'].set_color('Navy')
ax.spines['right'].set_color('none')
# change axis value for longer than 1 month
delta = datetime.datetime.strptime(end, "%Y-%m-%d") - datetime.datetime.strptime(start, "%Y-%m-%d")
if delta.days > 30:
x_display = []
for index, value in enumerate(x):
if index % 7 == 0:
x_display.append(value.strftime('%m-%d'))
else:
x_display.append('')
else:
x_display = []
for index, value in enumerate(x):
x_display.append(value.strftime('%m-%d'))
# diy plot
for i in columns:
if i == 'Settle':
plt.plot(x, data[i].values, label=i + ' Price', marker='.')
elif i == 'High' or i == 'Low':
plt.plot(x, data[i].values, label=i + ' Price', ls='--')
else:
plt.plot(x, data[i].values, label=i + ' Price')
# axis x and y
plt.xticks(x, x_display, color='Navy', rotation='45')
plt.yticks(color='Navy')
plt.legend()
# pwd = os.path.dirname(os.path.dirname(__file__))
# saveplace = pwd + '/static/pfas/img/' + name + '.png'
# plt.savefig(saveplace, transparent=True)
# use ascii save and load png
# put this in html :<embed id="pic0" src="data:image/png;base64,{{pic_1}}" />
buf = BytesIO()
plt.savefig(buf, transparent=True, format='png')
data = base64.b64encode(buf.getbuffer()).decode("ascii")
return data
if __name__ == '__main__':
pass
# dt = getdata_l()
# print(dt['Date'])
# recordtime = datetime.datetime.strptime('2019-12-16', "%Y-%m-%d")
# sixmonths = (recordtime - datetime.timedelta(days=180)).strftime('%Y-%m-%d')
# plot_price_trend_l(sixmonths, '6months')
|
{"hexsha": "ca63116563b501252b64598a4175e0324ce7aa88", "size": 14472, "ext": "py", "lang": "Python", "max_stars_repo_path": "Flask/Flask_Template/flask_goldanalysis_template.py", "max_stars_repo_name": "YizheZhang-Ervin/EZDjango", "max_stars_repo_head_hexsha": "ae140d9743ab03e59fc1b385cbbfcd5a6941426d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-02T06:02:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-02T06:02:57.000Z", "max_issues_repo_path": "Flask/Flask_Template/flask_goldanalysis_template.py", "max_issues_repo_name": "YizheZhang-Ervin/FullStack_WebFramework", "max_issues_repo_head_hexsha": "ae140d9743ab03e59fc1b385cbbfcd5a6941426d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Flask/Flask_Template/flask_goldanalysis_template.py", "max_forks_repo_name": "YizheZhang-Ervin/FullStack_WebFramework", "max_forks_repo_head_hexsha": "ae140d9743ab03e59fc1b385cbbfcd5a6941426d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.645320197, "max_line_length": 111, "alphanum_fraction": 0.5982587065, "include": true, "reason": "import numpy", "num_tokens": 4188}
|
import argparse
import os
import numpy as np
import torch as t
from torch.optim import Adam
from utils.batch_loader import BatchLoader
from utils.parameters import Parameters
from model.rvae_dilated import RVAE_dilated
if __name__ == "__main__":
if not os.path.exists('data/word_embeddings.npy'):
raise FileNotFoundError("word embeddings file was't found")
parser = argparse.ArgumentParser(description='RVAE_dilated')
parser.add_argument('--num-iterations', type=int, default=25000, metavar='NI',
help='num iterations (default: 25000)')
parser.add_argument('--batch-size', type=int, default=45, metavar='BS',
help='batch size (default: 45)')
parser.add_argument('--use-cuda', type=bool, default=True, metavar='CUDA',
help='use cuda (default: True)')
parser.add_argument('--learning-rate', type=float, default=0.0005, metavar='LR',
help='learning rate (default: 0.0005)')
parser.add_argument('--dropout', type=float, default=0.3, metavar='DR',
help='dropout (default: 0.3)')
parser.add_argument('--use-trained', type=bool, default=False, metavar='UT',
help='load pretrained model (default: False)')
parser.add_argument('--ppl-result', default='', metavar='CE',
help='ce result path (default: '')')
parser.add_argument('--kld-result', default='', metavar='KLD',
help='ce result path (default: '')')
args = parser.parse_args()
batch_loader = BatchLoader('')
parameters = Parameters(batch_loader.max_word_len,
batch_loader.max_seq_len,
batch_loader.words_vocab_size,
batch_loader.chars_vocab_size)
rvae = RVAE_dilated(parameters)
if args.use_trained:
rvae.load_state_dict(t.load('trained_RVAE'))
if args.use_cuda:
rvae = rvae.cuda()
optimizer = Adam(rvae.learnable_parameters(), args.learning_rate)
train_step = rvae.trainer(optimizer, batch_loader)
validate = rvae.validater(batch_loader)
ppl_result = []
kld_result = []
for iteration in range(args.num_iterations):
ppl, kld = train_step(iteration, args.batch_size, args.use_cuda, args.dropout)
if iteration % 10 == 0:
print('\n')
print('------------TRAIN-------------')
print('----------ITERATION-----------')
print(iteration)
print('---------PERPLEXITY-----------')
print(ppl.data.cpu().numpy()[0])
print('-------------KLD--------------')
print(kld.data.cpu().numpy()[0])
print('------------------------------')
if iteration % 10 == 0:
ppl, kld = validate(args.batch_size, args.use_cuda)
ppl = ppl.data.cpu().numpy()[0]
kld = kld.data.cpu().numpy()[0]
print('\n')
print('------------VALID-------------')
print('---------PERPLEXITY-----------')
print(ppl)
print('-------------KLD--------------')
print(kld)
print('------------------------------')
ppl_result += [ppl]
kld_result += [kld]
if iteration % 20 == 0:
seed = np.random.normal(size=[1, parameters.latent_variable_size])
sample = rvae.sample(batch_loader, 50, seed, args.use_cuda)
print('\n')
print('------------SAMPLE------------')
print(sample)
print('------------------------------')
t.save(rvae.state_dict(), 'trained_RVAE')
np.save('ppl_result_{}.npy'.format(args.ppl_result), np.array(ppl_result))
np.save('kld_result_npy_{}'.format(args.kld_result), np.array(kld_result))
|
{"hexsha": "8d674696dd76c785063201449e47a27ba8ab46bb", "size": 3863, "ext": "py", "lang": "Python", "max_stars_repo_path": "train.py", "max_stars_repo_name": "kefirski/contiguous-succotash", "max_stars_repo_head_hexsha": "7497efd1392693248ed98805dcdbbf5dc125afc2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 57, "max_stars_repo_stars_event_min_datetime": "2017-06-29T13:41:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-30T11:51:08.000Z", "max_issues_repo_path": "train.py", "max_issues_repo_name": "analvikingur/contiguous-succotash", "max_issues_repo_head_hexsha": "7497efd1392693248ed98805dcdbbf5dc125afc2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-08-14T11:42:27.000Z", "max_issues_repo_issues_event_max_datetime": "2018-08-14T11:42:27.000Z", "max_forks_repo_path": "train.py", "max_forks_repo_name": "kefirski/contiguous-succotash", "max_forks_repo_head_hexsha": "7497efd1392693248ed98805dcdbbf5dc125afc2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 22, "max_forks_repo_forks_event_min_datetime": "2017-08-01T05:22:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-09T14:53:22.000Z", "avg_line_length": 37.5048543689, "max_line_length": 86, "alphanum_fraction": 0.5397359565, "include": true, "reason": "import numpy", "num_tokens": 820}
|
# 09.Picrust2_Songbird.r
#
# Figure 6, Table S13, Table S14, Table S15
# Ref for PICRUSt2: Douglas, G. M. et al. PICRUSt2 for prediction of metagenome functions. Nat. Biotechnol. 38, 685–688 (2020).
# Ref for Songbird: Morton, J. T. et al. Establishing microbial composition measurement standards with reference frames. Nat. Commun. 10, 2719 (2019).
# Ref for Qurro: Fedarko, M. W. et al. Visualizing ’omic feature rankings and log-ratios using Qurro. NAR Genomics Bioinforma. 2, (2020).
# Water and sediment sequences were analyzed separately
#### Phyloseq Object ####
ps.noncontam.tree
#### Set Directory ####
picrust <- file.path(paste(path_phy, "PICRUSt2", sep=""))
dir.create(picrust, showWarnings=FALSE)
setwd(picrust)
#### Prepare data for PICRUSt2 ####
# make a biomformat otu table
otu <- as(otu_table(ps.noncontam.tree),"matrix")
otu_biom <- make_biom(data=otu)
write_biom(otu_biom,"otu_biom.biom")
write.table(otu_table(ps.noncontam.tree), file = "otu_table.txt", sep = "\t", row.names = TRUE, col.names = NA)
# add sequences from "tax_table_for_tree.txt" file in folder "tree"
# export metadata table
write.table(sample_data(ps.noncontam.tree), file = "metadata_for_picrust2.txt", sep = "\t", row.names = TRUE, col.names = NA)
#### In Bash ####
wd=<path to PICRUSt2 working dir>
cd $wd
input=${wd}/otu_table.txt
output=${wd}/sequences_for_picrust2.fa
sed 's/"//g' metadata_for_picrust2.txt > metadata_for_picrust2_fixed.txt
# also add #SampleID to header
conda activate picrust2
# change these variables
study_seq=sequences_for_picrust2.fa
abundance_table=otu_biom.biom
date=<date>
rep=<repeat number>
# do not change these variables
output=${date}_picrust2_out_rep${rep}
db_path_wd=<path to PICRUSt2 default files>
ref_db=${db_path_wd}/prokaryotic/pro_ref
traits_table=${db_path_wd}/prokaryotic/ec-modified.txt.gz
copy_number_16S=${db_path_wd}/prokaryotic/16S.txt.gz
map_file=${db_path_wd}/pathway_mapfiles/metacyc_path2rxn_struc_filt_pro-modified.txt
regroup_map_file=${db_path_wd}/pathway_mapfiles/ec_level4_to_metacyc_rxn-modified.tsv
cd $wd
# run commands
picrust2_pipeline.py -s ${study_seq} -i ${abundance_table} -o ${output} -p 1 \
--ref_dir ${ref_db} \
--custom_trait_tables ${traits_table} \
--marker_gene_table ${copy_number_16S} \
--pathway_map ${map_file} \
--reaction_func ${traits_table} \
--regroup_map ${regroup_map_file} \
--verbose \
--stratified --per_sequence_contrib --coverage
# after done
add_descriptions.py -i ${output}/pathways_out/path_abun_unstrat.tsv.gz \
--custom_map_table ${db_path_wd}/description_mapfiles/metacyc_pathways_info-modified.txt.gz \
-o ${output}/pathways_out/path_abun_unstrat_descrip.tsv.gz
add_descriptions.py -i ${output}/ec-modified.txt_metagenome_out/pred_metagenome_unstrat.tsv.gz \
--custom_map_table ${db_path_wd}/description_mapfiles/ec_level4_info-modified.tsv.gz \
-o ${output}/ec-modified.txt_metagenome_out/pred_metagenome_unstrat_descrip.tsv.gz
#### the file OTU contribution for EC file is large; use bash to obtain lines of interest ####
# first create a file listing the EC of interest
EC_list=pathways_out_per_seq_contrib/path_list_all_selected.tsv
data=pathways_out_per_seq_contrib/path_abun_contrib.tsv
# move the pathway column (second column) to the first column
awk 'BEGIN {OFS="\t"} { print $2,$1,$3,$4,$5,$6,$7,$8,$9 }' ${data} > ${data}2
# search for pathways from EC_list file and print out matches from the datafile
awk 'FNR==NR {a[$1]; next}; $1 in a' ${EC_list} ${data}2 2>&1 | tee -a pathways_out_per_seq_contrib/path_abun_contrib_all_selected.tsv
#### Back to R: Prepare data ####
# Import the predicted Metacyc pathways
pred_pathway <- read.delim(<"path to file path_abun_unstrat_descrip.tsv">), row.names=1, sep = "\t", stringsAsFactors=FALSE, fileEncoding="latin1")
use_table <- pred_pathway
# Create "otu table" and "tax table"
tax_table_picrust2 <- use_table[names(use_table) %in% c("description") ]
tax_table_picrust2$ID <- rownames(tax_table_picrust2)
otu_table_picrust2 <- use_table[,-1]
otu_table_picrust2 <- t(otu_table_picrust2)
# Convert to matrix
tax_table_picrust2 <- as.matrix(tax_table_picrust2)
otu_table_picrust2 <- as.matrix(otu_table_picrust2)
rownames(tax_table_picrust2) <- gsub(':', '.', rownames(tax_table_picrust2))
colnames(otu_table_picrust2) <- gsub(':', '.', colnames(otu_table_picrust2))
# Retrieve metadata
DATA_PHYLOSEQ_FIXED_picrust2 <- DATA_PHYLOSEQ_FIXED
rownames(DATA_PHYLOSEQ_FIXED_picrust2) <- DATA_PHYLOSEQ_FIXED_picrust2$SampleID
# Make sure it looks good
head(tax_table_picrust2)
head(otu_table_picrust2)
# Make Phyloseq object
ps_picrust <- phyloseq(
otu_table(otu_table_picrust2, taxa_are_rows = FALSE),
sample_data(DATA_PHYLOSEQ_FIXED_picrust2),
tax_table(tax_table_picrust2)
)
ps_picrust
#### Songbird for picrust2: prepare data ####
songbird_picrust <- file.path(paste(picrust, "/Songbird", sep=""))
dir.create(songbird_picrust, showWarnings=FALSE)
setwd(songbird_picrust)
# format taxonomy table
tax <- as(tax_table(ps_picrust),"matrix")
tax <- as.data.frame(tax)
tax$picrust <- paste("p", rownames(tax), sep="__")
tax$description <- paste("d", tax$description, sep="__")
tax_cols <- c("picrust", "description")
tax$concat <- do.call(paste, c(tax[tax_cols], sep=";"))
for(co in tax_cols) tax[co]<-NULL
head(tax)
write.table(tax, "tax_for_qiime2.txt", quote=FALSE, col.names=FALSE, sep="\t")
# make a biomformat otu table
otu <- as(otu_table(ps_picrust),"matrix")
otu <- t(otu)
otu_biom <- make_biom(data=otu)
write_biom(otu_biom,"otu_biom.biom")
write.table(otu_table(ps_picrust), file = "picrust_otu_table.txt", sep = "\t", row.names = TRUE, col.names = NA)
# export metadata table
write.table(sample_data(ps_picrust), file = "metadata_for_qiime2.txt", sep = "\t", row.names = TRUE, col.names = NA)
#### Songbird for picrust2: to Bash ####
conda activate qiime2-2020.6
wd=<path to PICRUSt2 folder>
cd $wd
# prepare data for QIIME2
sed 's/"//g' metadata_for_qiime2.txt > metadata_for_qiime2_fixed.txt
# also add #SampleID to header
biom convert -i otu_biom.biom -o otu_biom_HDF5.biom --to-hdf5
biom add-metadata -i otu_biom_HDF5.biom -o otu_wTax_metadata.biom --observation-metadata-fp tax_for_qiime2.txt --sc-separated taxonomy --observation-header OTUID,taxonomy --sample-metadata-fp metadata_for_qiime2_fixed.txt
# Import to QIIME2
qiime tools import \
--input-path otu_biom_HDF5.biom \
--type 'FeatureTable[Frequency]' \
--input-format BIOMV210Format \
--output-path feature-table.qza
qiime tools import \
--type 'FeatureData[Taxonomy]' \
--input-format HeaderlessTSVTaxonomyFormat \
--input-path tax_for_qiime2.txt \
--output-path taxonomy.qza
# Check import
qiime feature-table summarize \
--i-table feature-table.qza \
--m-sample-metadata-file metadata_for_qiime2_fixed.txt \
--o-visualization summary_vis.qzv
qiime tools view summary_vis.qzv
# Make model
dir=Strat_group_ref # For Sediment: Location_description
mkdir ${dir}
qiime songbird multinomial \
--i-table feature-table.qza \
--m-metadata-file metadata_for_qiime2_fixed.txt \
--p-formula "C(Strat_group, Treatment('Unstratified'))" \ # For Sediment: --p-formula "Location_description"
--p-epochs 10000 \
--p-differential-prior 0.5 \
--p-summary-interval 1 \
--p-num-random-test-examples 5 \
--o-differentials ${dir}/differentials.qza \
--o-regression-stats ${dir}/regression-stats.qza \
--o-regression-biplot ${dir}/regression-biplot.qza \
--verbose
# Make null model
null_dir=null_model
mkdir ${null_dir}
qiime songbird multinomial \
--i-table feature-table.qza \
--m-metadata-file metadata_for_qiime2_fixed.txt \
--p-formula "1" \
--p-epochs 10000 \
--p-differential-prior 0.5 \
--p-summary-interval 1 \
--o-differentials ${null_dir}/null-diff.qza \
--o-regression-stats ${null_dir}/null-stats.qza \
--o-regression-biplot ${null_dir}/null-biplot.qza \
--p-num-random-test-examples 5 \
--verbose
# Visualize the first model's regression stats and the null model's
qiime songbird summarize-paired \
--i-regression-stats ${dir}/regression-stats.qza \
--i-baseline-stats ${null_dir}/null-stats.qza \
--o-visualization ${dir}/paired-summary.qzv
qiime tools view ${dir}/paired-summary.qzv
# Qurro
qiime qurro differential-plot \
--i-ranks ${dir}/differentials.qza \
--i-table feature-table.qza \
--m-sample-metadata-file metadata_for_qiime2_fixed.txt \
--m-feature-metadata-file tax_for_qiime2.txt \
--verbose \
--o-visualization ${dir}/qurro_plot_q2.qzv
qiime tools view ${dir}/qurro_plot_q2.qzv
# Export Songbird data
qiime metadata tabulate \
--m-input-file ${dir}/differentials.qza \
--o-visualization ${dir}/differentials-viz.qzv
qiime tools export \
--input-path ${dir}/differentials-viz.qzv \
--output-path ${dir}/differentials
#### Songbird-picrust2 top categories ####
# all pathways
use_pathway <- pred_pathway[,-1]
head(use_pathway)
# load pathways to focus on (csv file has columns for description, Order, broad_categorization1, broad_categorization2, broad_categorization3)
pathway_list <- read.delim(<path to csv file with select pathways>, sep = ",", stringsAsFactors=FALSE, fileEncoding="latin1")
rownames(pathway_list) <- pathway_list[,1]
pathway_list[,1] <- NULL
pathway_list
# subset pathways
focus_pathways <- use_pathway %>% filter(rownames(use_pathway) %in% rownames(pathway_list))
nrow(focus_pathways)
nrow(pathway_list)
focus_pathways <- as.data.frame(t(focus_pathways))
focus_pathways <- tibble::rownames_to_column(focus_pathways, "SampleID")
head(focus_pathways)
# setup denominator
pred_pathway_denominator <- use_pathway %>% filter(!rownames(use_pathway) %in% rownames(pathway_list))
head(pred_pathway_denominator)
# sum denominator
sum_denominator <- as.data.frame(colSums(pred_pathway_denominator))
colnames(sum_denominator) <- column_names
sum_denominator <- tibble::rownames_to_column(sum_denominator, "SampleID")
head(sum_denominator)
# calculate natural log ratios (Note: log is natural log in R)
df1 <- focus_pathways
df2 <- sum_denominator
df3 <- cbind(df1[1], log(df1[, -1] / df2[match(df1$SampleID, df2$SampleID), -1]))
head(df3)
# add descriptions
rownames(df3) <- df3$SampleID
df3$SampleID <- NULL
df3 <- as.data.frame(t(df3))
df3$description <- pathway_list$description[match(rownames(df3), rownames(pathway_list))]
df3$broad_categorization1 <- pathway_list$broad_categorization1[match(rownames(df3), rownames(pathway_list))]
df3$broad_categorization2 <- pathway_list$broad_categorization2[match(rownames(df3), rownames(pathway_list))]
df3$broad_categorization3 <- pathway_list$broad_categorization3[match(rownames(df3), rownames(pathway_list))]
df3$Order <- pathway_list$Order[match(rownames(df3), rownames(pathway_list))]
df3 <- tibble::rownames_to_column(df3, "pathway")
head(df3)
# add stratificaiton group layer
df3_melt <- reshape2::melt(df3, id=c("pathway", "description", "broad_categorization1", "broad_categorization2", "broad_categorization3", "Order"))
colnames(df3_melt) <- c("pathway", "description", "broad_categorization1", "broad_categorization2", "broad_categorization3", "Order", "SampleID", "Natural_Log_Ratio")
df3_melt$SampleID <- gsub("\\.", "-", df3_melt$SampleID)
df3_melt$Strat_group <- DATA_PHYLOSEQ_FIXED$Strat_group[match(df3_melt$SampleID, DATA_PHYLOSEQ_FIXED$SampleID)]
df3_melt$strat_time <- ordi_data$strat_time[match(df3_melt$SampleID, ordi_data$SampleID)]
# order the strat group (Water Samples)
df3_melt$Strat_group <- gsub('Unstratified', 'Incipient Stratification', df3_melt$Strat_group)
df3_melt$Strat_group <- factor(df3_melt$Strat_group, levels=c("Incipient Stratification","Epilimnion","Thermocline", "Hypolimnion"), ordered = TRUE)
# order the strat group (Sediment Samples)
df3_melt$Location_description <- factor(df3_melt$Location_description, levels=c("Inlet","Outlet"), ordered = TRUE)
df3_melt$Depth_desc <- factor(df3_melt$Depth_desc, levels=c("Top 5 cm","5-10 cm","10-21 cm"), ordered = TRUE)
# remove Inf values
df3_melt <- df3_melt[is.finite(df3_melt$Natural_Log_Ratio),]
head(df3_melt)
write.csv(df3_melt, "top_contributing_pathways_Limited2.csv")
#### Plot Figure 6 ####
## Prepare for plotting heatmap (Water Samples)
df3_melt_avg <- df3_melt %>%
group_by(pathway, description, broad_categorization1, broad_categorization2, broad_categorization3, Order, Strat_group, strat_time) %>%
summarise_at(vars(Natural_Log_Ratio), list(mean = mean)) %>%
ungroup()
df3_melt_avg <- as.data.frame(df3_melt_avg)
df3_melt_avg$broad_categorization2 <- factor(df3_melt_avg$broad_categorization2, levels=c('Carbon', 'Iron'), ordered = TRUE)
df3_melt_avg$Order <- factor(df3_melt_avg$Order, levels=c('1', '2', '3', '4', '5', '6' ,'7', '8', '9', '10', '11', '12', '13', '14'))
df3_melt_avg$broad_categorization3 <- factor(df3_melt_avg$broad_categorization3, levels=c('Methanogenesis', 'CO2 Fixation', 'C1 compound utilization', 'Fermentation', 'Aromatic Compounds', 'Iron microbes', 'Siderophore biosynthesis', 'Sulfur'), ordered = TRUE)
df3_melt_avg$broad_categorization1 <- gsub('Iron reducers', 'Iron reduction: Fe(III) to Fe(II)', df3_melt_avg$broad_categorization1)
df3_melt_avg$broad_categorization1 <- gsub('Magnetotactic bacteria', 'Magnetosome formation', df3_melt_avg$broad_categorization1)
df3_melt_avg$broad_categorization1 <- factor(df3_melt_avg$broad_categorization1, levels = unique(df3_melt_avg$broad_categorization1[order(df3_melt_avg$Order)]))
head(df3_melt_avg)
## Prepare for plotting heatmap (Sediment samples)
df3_melt_avg <- df3_melt %>%
group_by(pathway, desc, broad_categorization1, broad_categorization2, broad_categorization3, Order, Location_description) %>%
summarise_at(vars(Natural_Log_Ratio), list(mean = mean)) %>%
ungroup()
df3_melt_avg <- as.data.frame(df3_melt_avg)
df3_melt_avg$broad_categorization2 <- factor(df3_melt_avg$broad_categorization2, levels=c('Carbon', 'Iron'), ordered = TRUE)
df3_melt_avg$Order <- factor(df3_melt_avg$Order, levels=c('1', '2', '3', '4', '5', '6' ,'7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24'))
df3_melt_avg$broad_categorization3 <- factor(df3_melt_avg$broad_categorization3, levels=c('Methanogens', 'CO2 Fixation', 'Carbohydrate Degradation', 'Fermentation', 'Alcohol Degradation', 'Aldehyde Degradation', 'Amine and Polyamine Degradation', 'Aromatic Compound Degradation', 'Siderophore biosynthesis'), ordered = TRUE)
df3_melt_avg$broad_categorization1 <- factor(df3_melt_avg$broad_categorization1, levels = unique(df3_melt_avg$broad_categorization1[order(df3_melt_avg$Order)]))
head(df3_melt_avg)
# Set up theme
plot_theme <- theme(panel.background = element_rect(fill = "white", colour = "white", size = 0, linetype = "solid"),
panel.border = element_rect(colour="white", size=0, fill=NA),
strip.background=element_rect(fill='white', colour='white', size = 0),
strip.text = element_text(face="bold", size=10),
strip.text.y = element_text(angle = 360, hjust=0),
panel.spacing.x=unit(0.5, "lines"),
panel.grid.major = element_line(size = 0),
panel.grid.minor = element_line(size = 0),
axis.text = element_text(size=10, colour="black"),
axis.title = element_blank(),
axis.text.x = element_text(angle = 45, vjust = 1, hjust=1),
legend.position="right",
legend.key = element_rect(fill = "white"),
legend.title = element_text(face="bold", size=10),
legend.text = element_text(size=10))
plot_guide <- guides(fill = guide_colourbar(frame.linewidth = 1, frame.colour = "black", ticks = TRUE, ticks.colour = "black", ticks.linewidth = 1, reverse=F))
# Plot water samples
ggplot(df3_melt_avg, aes(Strat_group, broad_categorization1)) +
geom_tile(aes(fill = mean, width=0.9), colour = "black", size=0.5) +
scale_fill_gradientn(colors=c( "#7a024e", "#fdece9", "#b0d66d", "#34a0a4", "#02303f"), breaks=c(-8, -10, -12, -14, -16)) +
facet_grid(broad_categorization3 ~ ., scales = "free", space = "free") +
labs(fill="Natural Log Ratio\n(Average)") +
scale_y_discrete(limits=rev) + plot_theme + plot_guide
save_file_plot <- paste("top_contributing_pathways_heatmap_water.svg", sep="") #change the file name if need to
ggsave(save_file_plot, path = songbird_picrust, scale = 1, width = 7, height =6, units = c("in"), dpi = 300)
# Plot sediment samples
ggplot(df3_melt_avg, aes(Location_description, broad_categorization1)) +
geom_tile(aes(fill = mean, width=0.9), colour = "black", size=0.5) +
scale_fill_gradientn(colors=c("#213e1b", "#9be564", "#fee231", "#e78e23", "#341209"), breaks=c(-6, -8, -10, -12, -14)) +
facet_grid(broad_categorization3 ~ ., scales = "free", space = "free") +
labs(fill="Natural Log Ratio\n(Average)") +
scale_y_discrete(limits=rev) + plot_theme + plot_guide
save_file_plot <- paste("top_contributing_pathways_heatmap_sediment.svg", sep="") #change the file name if need to
ggsave(save_file_plot, path = songbird_picrust, scale = 1, width = 7, height =6, units = c("in"), dpi = 300)
|
{"hexsha": "52f15046dd73bef284950c4ec3640be70770d7d8", "size": 17182, "ext": "r", "lang": "R", "max_stars_repo_path": "09.Picrust2_Songbird.r", "max_stars_repo_name": "LLNL/2022_PondB_microbiome", "max_stars_repo_head_hexsha": "d9aaade01033eea9f220e96521099fd881971c82", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "09.Picrust2_Songbird.r", "max_issues_repo_name": "LLNL/2022_PondB_microbiome", "max_issues_repo_head_hexsha": "d9aaade01033eea9f220e96521099fd881971c82", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "09.Picrust2_Songbird.r", "max_forks_repo_name": "LLNL/2022_PondB_microbiome", "max_forks_repo_head_hexsha": "d9aaade01033eea9f220e96521099fd881971c82", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-23T17:39:35.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T17:39:35.000Z", "avg_line_length": 44.861618799, "max_line_length": 324, "alphanum_fraction": 0.7382726109, "num_tokens": 5161}
|
#!/usr/bin/env python3
"""
Keras implementation of CapsNet in Hinton's paper Dynamic Routing Between Capsules.
The current version maybe only works for TensorFlow backend. Actually it will be straightforward to re-write to TF code.
Adopting to other backends should be easy, but I have not tested this.
Usage:
python test.py
python test.py --epochs 50
python test.py --epochs 50 --routings 3
... ...
Result:
Validation accuracy > 99.5% after 20 epochs. Converge to 99.66% after 50 epochs.
About 110 seconds per epoch on a single GTX1070 GPU card
Author: Xifeng Guo, E-mail: `guoxifeng1990@163.com`, Github: `https://github.com/XifengGuo/CapsNet-Keras`
"""
import numpy as np
from keras import backend as K
from keras.losses import mean_squared_error
from keras_contrib.losses import DSSIMObjective
import matplotlib.pyplot as plt
from vignet.data import data_generator
from vignet.models import VIGNet
K.set_image_data_format('channels_last')
def denormalize_image(x):
x = (x + 1.) / 2.
x = np.clip(x, 0., 1.)
return x
def sample(model, args):
n_samples = 4
fig, ax = plt.subplots(nrows=n_samples, ncols=3, dpi=100, figsize=(40, 10*n_samples))
g = data_generator(1, test=True)
for i in range(n_samples):
inputs, ground_truth = next(g)
x = inputs[0]
x1_pred, x2_pred, pose1_pred, pose2_pred = model.predict_on_batch(inputs)
x1_true, x2_true, pose1_true, pose2_true = ground_truth
# Denormalize all images
x, x1_true, x2_true, x1_pred, x2_pred = map(denormalize_image, [x, x1_true, x2_true, x1_pred, x2_pred])
print(pose1_true, pose1_pred, pose2_true, pose2_pred)
ax[i][0].imshow(x.squeeze())
ax[i][1].imshow(x1_pred[0].squeeze())
ax[i][2].imshow(x2_pred[0].squeeze())
plt.show()
def get_iou(x_true, x_pred):
bs = len(x_true)
alpha_true = x_true[:,:,:,-1].reshape(bs, -1) > 0
alpha_pred = x_pred[:,:,:,-1].reshape(bs, -1) > 0
i = alpha_true * alpha_pred
i = i.sum(axis=-1)
u = alpha_true + alpha_pred
u = u.sum(axis=-1)
iou = i / u
return iou.mean()
def test_multi(model, args):
dssim = DSSIMObjective()
mse1 = 0
mse2 = 0
ssim1 = 0
ssim2 = 0
iou1 = 0
iou2 = 0
error1 = []
error2 = []
# Build the computational graph first
x_true = K.placeholder((None, 128, 128, 4), dtype='float32')
x_pred = K.placeholder((None, 128, 128, 4), dtype='float32')
mse = mean_squared_error(x_true, x_pred)
ssim = dssim(x_true, x_pred)
sess = K.get_session()
# x1 background, x2 foreground object
n = 2500 # @ batch_size 40 = 100k samples
g = data_generator(args.batch_size, test=True)
for i in range(n):
inputs, ground_truth = next(g)
x1_pred, x2_pred, pose1_pred, pose2_pred = model.predict_on_batch(inputs)
x1_true, x2_true, pose1_true, pose2_true = ground_truth
mse1 += sess.run(mse, feed_dict={x_true: x1_true, x_pred: x1_pred}).mean()
mse2 += sess.run(mse, feed_dict={x_true: x2_true, x_pred: x2_pred}).mean()
ssim1 += sess.run(ssim, feed_dict={x_true: x1_true, x_pred: x1_pred}).mean()
ssim2 += sess.run(ssim, feed_dict={x_true: x2_true, x_pred: x2_pred}).mean()
iou1 += get_iou(x1_true, x1_pred)
iou2 += get_iou(x2_true, x2_pred)
e1 = np.absolute(pose1_true - pose1_pred).mean(axis=0) / 2. # percent
error1.append(e1)
e2 = np.absolute(pose2_true - pose2_pred).mean(axis=0) / 2. # percent
error2.append(e2)
print(i)
mse1 /= n
mse2 /= n
ssim1 /= n
ssim2 /= n
iou1 /= n
iou2 /= n
error1 = np.stack(error1).mean(axis=0)
error2 = np.stack(error2).mean(axis=0)
print(mse1, mse2, ssim1, ssim2, iou1, iou2, error1, error2)
#print(model.metrics_names, e)
from vignet.voxel import voxel2obj
from vignet.vox2mesh_func import vox2mesh
import matplotlib.pyplot as plt
def test_single(model):
g = data_generator(1, test=True)
inputs, ground_truth = next(g)
outs = model.predict_on_batch(inputs)
im1, im2 = outs[0], outs[1]
plt.subplot(211)
plt.imshow(denormalize_image(im1.squeeze()))
plt.subplot(212)
plt.imshow(denormalize_image(im2.squeeze()))
plt.show()
voxel_prediction1 = outs[-1].squeeze()
voxel_prediction2 = outs[-2].squeeze()
# Save the prediction to an OBJ file (mesh file)
vox2mesh('prediction_mesh1.obj', voxel_prediction1[:, :, :, 1] > 0.4)
voxel2obj('prediction_vox1.obj', voxel_prediction1[:, :, :, 1] > 0.4)
vox2mesh('prediction_mesh2.obj', voxel_prediction2[:, :, :, 1] > 0.4)
voxel2obj('prediction_vox2.obj', voxel_prediction2[:, :, :, 1] > 0.4)
def main():
import os
import argparse
# setting the hyper parameters
parser = argparse.ArgumentParser(description="VIGNet on ShapeNet.")
parser.add_argument('--epochs', default=1000, type=int)
parser.add_argument('--initial_epoch', default=0, type=int)
parser.add_argument('--batch_size', default=6, type=int)
parser.add_argument('--lr', default=0.001, type=float,
help="Initial learning rate")
parser.add_argument('--lr_decay', default=0.9, type=float,
help="The value multiplied by lr at each epoch. Set a larger value for larger epochs")
parser.add_argument('--lam_recon', default=0.392, type=float,
help="The coefficient for the loss of decoder")
parser.add_argument('-r', '--routings', default=3, type=int,
help="Number of iterations used in routing algorithm. should > 0")
parser.add_argument('--debug', action='store_true',
help="Save weights by TensorBoard")
parser.add_argument('--save_dir', default='./result')
parser.add_argument('-w', '--weights', default=None,
help="The path of the saved weights. Should be specified when testing")
args = parser.parse_args()
print(args)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
# define model
train_model, test_model, voxelizer = VIGNet(input_shape=(128, 128, 4), n_class=2, routings=args.routings, capsule_size=16)
test_model.summary()
train_model.load_weights('pretrained/weights-905.h5')
voxelizer.load_weights('pretrained/3DR2N2.weights.simple.lstm.h5')
#test_multi(model=train_model, args=args)
test_single(test_model)
if __name__ == '__main__':
main()
|
{"hexsha": "473532accaf50208fa41e4a133bbd812bb170610", "size": 6538, "ext": "py", "lang": "Python", "max_stars_repo_path": "test.py", "max_stars_repo_name": "baudm/VIGNet", "max_stars_repo_head_hexsha": "10a9a70878556de1c97c4212091bb15a7f8977f5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test.py", "max_issues_repo_name": "baudm/VIGNet", "max_issues_repo_head_hexsha": "10a9a70878556de1c97c4212091bb15a7f8977f5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test.py", "max_forks_repo_name": "baudm/VIGNet", "max_forks_repo_head_hexsha": "10a9a70878556de1c97c4212091bb15a7f8977f5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3405405405, "max_line_length": 126, "alphanum_fraction": 0.653410829, "include": true, "reason": "import numpy", "num_tokens": 1880}
|
"""
Everything related to the individual rationality constraints.
"""
__all__ = ['IndividualRationality']
import theano
import theano.tensor as T
import scipy.optimize as opt
import numpy as np
from sepdesign._types import AgentType
from sepdesign._transfer_functions import TransferFunction
class IndividualRationality(object):
"""
A class that facilitates the implementation of the individual rationality
constraints.
:param agent_type: An instance of the class AgentType (immutable)
:param transfer_function: An instance of the class TransferFunction
(immutable)
"""
def __init__(self, agent_type, transfer_func):
assert isinstance(agent_type, AgentType)
assert isinstance(transfer_func, TransferFunction)
self._agent_type = agent_type
self._transfer_func = transfer_func
# Set up the optimization problem we need to solve
# Get expected utility of payoff
self.exp_u_pi = agent_type.get_exp_util(transfer_func)
# Symbolic variable for effort
t_e = self.exp_u_pi.t_x[0]
# Symbolic variable for transfer function parameters
t_a = self.exp_u_pi.t_x[1]
# Get the gradient of the expected utility with respect to effort
self.exp_u_pi_g_e = self.exp_u_pi.grad(t_e)
# Get the second derivative of the expected utility wrt to effort
self.exp_u_pi_g_e2 = self.exp_u_pi_g_e.grad(t_e)
# We also need the mixed derivative of the exp. util. wrt to effort
# and transfer parameters
self.exp_u_pi_g_ea = self.exp_u_pi_g_e.grad(t_a)
self._compiled = False
def compile(self):
"""
Compile everything that needs to be compiled.
"""
if self._compiled:
return
for obj in [self.exp_u_pi, self.exp_u_pi_g_e, self.exp_u_pi_g_e2,
self.exp_u_pi_g_ea]:
obj.compile()
self._compiled = True
# The objective function to be minimized
self._obj_fun = lambda _e, _a: -self.exp_u_pi(_e[0], _a)
self._obj_fun_jac = lambda _e, _a: -self.exp_u_pi_g_e(_e[0], _a)
def evaluate(self, a, num_restarts=10):
"""
Evaluate the individual rationality constraints at specific
transfer function parameters.
:param a: The parameters of the transfer function.
"""
if not self._compiled:
raise RuntimeError('Compile before attempting to evaluate.')
# Sanity check
if not isinstance(a, np.ndarray):
a = np.array(a)
assert a.ndim == 1
assert a.shape[0] == self.transfer_func.num_a
# Restart points (excludes bounds)
e0s = np.linspace(0, 1, num_restarts + 2)[1:-1]
# Solve for each one of the restart points
r_res = None
r_min = 1e99
all_opt_failed = True
for e0 in e0s:
res = opt.minimize(self._obj_fun, e0, args=a,
jac=self._obj_fun_jac, tol=1e-16,
method='SLSQP', bounds=((0.0, 1.0),))
#if res.success:
# all_opt_failed = False
if r_min > res.fun:
r_res = res
r_min = res.fun
#if all_opt_failed:
# raise RuntimeError('All the restarts failed.')
res = r_res
if res is None:
res = {}
res['e_star'] = 0.
res['e_star_g_a'] = np.zeros(a.shape[0])
res['exp_u_pi_e_star'] = 0.
else:
e_star = res.x[0]
exp_u_pi_e_star = -res.fun
exp_u_pi_g_e_star = -res.jac
exp_u_pi_g_e2_star = self.exp_u_pi_g_e2(e_star, a)
exp_u_pi_g_ea_star = self.exp_u_pi_g_ea(e_star, a)
# Get the Lagrange multipliers (Lemma 1)
if np.isclose(e_star, 0.0):
mu = np.array([exp_u_pi_g_e_star / -1.0, 0.0])
elif np.isclose(e_star, 1.0):
mu = np.array([0.0, exp_u_pi_g_e_star / 1.0])
else:
mu = np.zeros((2,))
d = a.shape[0]
A = np.zeros((3 * d, 3 * d))
A[:d, :d] = exp_u_pi_g_e2_star * np.eye(d)
A[:d, d:2*d] = -(-1.0) * np.eye(d)
A[:d, 2*d:3*d] = -(1.0) * np.eye(d)
A[d:2*d,:d] = mu[0] * (-1.0) * np.eye(d)
A[d:2*d, d:2*d] = (-e_star) * np.eye(d)
A[2*d:3*d, :d] = mu[1] * (1.0) * np.eye(d)
A[2*d:3*d, 2*d:3*d] = (e_star - 1.0) * np.eye(d)
b = np.zeros((3*d,))
b[:d] = -exp_u_pi_g_ea_star
x = np.linalg.solve(A+1.e-7*np.eye(A.shape[0]), b)
res['mu'] = mu
res['e_star'] = e_star
res['e_star_g_a'] = x[:d]
res['exp_u_pi_e_star'] = exp_u_pi_e_star
res['mu_g_a'] = x[d:]
res['exp_u_pi_e_star_g_a'] = -res['jac'] * res['e_star_g_a']
return res
@property
def agent_type(self):
"""
Get agent type.
"""
return self._agent_type
@property
def transfer_func(self):
"""
Get the transfer function.
"""
return self._transfer_func
if __name__ == '__main__':
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('white')
sns.set_context('paper')
import numpy as np
import numdifftools as nd
from ._quality_functions import *
from ._cost_functions import *
from ._utility_functions import *
from ._transfer_functions import *
# Create an agent of a specific type
for cost_coef in [0.0, 0.2, 0.7]:
print('*' * 80)
print('COST COEF: %1.2f' % cost_coef)
agent_type = AgentType(LinearQualityFunction(1.5, 0.2),
QuadraticCostFunction(cost_coef),
ExponentialUtilityFunction(2.0))
# Create a transfer function
t = RequirementPlusIncentiveTransferFunction()
# Create the individual rationality constraint for this person
ir = IndividualRationality(agent_type, t)
# Compile everything we need to solve the individual rationality const.
ir.compile()
# Evaluate the individual rationality constraints at specific transfer
# function parameters
a = [0.00, 0.3, 1., 0.1]
res = ir.evaluate(a)
# The optimal effort is here
e_star = res['e_star']
print('e_star = %1.2f' % e_star)
# The expected utility at the optimal effort
exp_u_pi_e_star = res['exp_u_pi_e_star']
print('E_i[U_i(Pi_i(e_star; a))] = %1.2f' % exp_u_pi_e_star)
# Let's compare e_star_g_e to the numerical derivative
e_star_g_a = res['e_star_g_a']
func = lambda _a: ir.evaluate(_a)['e_star']
func_g_a = nd.Gradient(func)
n_e_star_g_a = func_g_a(a)
print('e_star_g_a = ', e_star_g_a)
print('n_e_star_g_a =', n_e_star_g_a)
print('Close?', np.allclose(e_star_g_a, n_e_star_g_a, atol=1e-3))
|
{"hexsha": "d8647d8d8a55ff24b18e3bce0c0cd8becd13fb58", "size": 7115, "ext": "py", "lang": "Python", "max_stars_repo_path": "sepdesign/_individual_rationality.py", "max_stars_repo_name": "salarsk1/principal-agent-bilevel-programming", "max_stars_repo_head_hexsha": "e09b9456dff1e5d253b57bd4bc60f87fd36a749b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-06-30T13:53:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T11:30:22.000Z", "max_issues_repo_path": "sepdesign/_individual_rationality.py", "max_issues_repo_name": "salarsk1/principal-agent-systems-engineering", "max_issues_repo_head_hexsha": "e09b9456dff1e5d253b57bd4bc60f87fd36a749b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-10-22T08:56:51.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-22T08:56:51.000Z", "max_forks_repo_path": "sepdesign/_individual_rationality.py", "max_forks_repo_name": "salarsk1/principal-agent-systems-engineering", "max_forks_repo_head_hexsha": "e09b9456dff1e5d253b57bd4bc60f87fd36a749b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-11-15T00:58:55.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-15T00:58:55.000Z", "avg_line_length": 36.4871794872, "max_line_length": 79, "alphanum_fraction": 0.5766690091, "include": true, "reason": "import numpy,import scipy,import theano", "num_tokens": 1884}
|
program da_vp_bilin
!----------------------------------------------------------------------
! Purpose: Regridding from low to high resolution in control variable space
! by using bilinear interpolation
!
! where n is the grid number in x or y
! ns is the refinement ratio between two resulotions
!
! Method: follow da_bilin.f90
!
! Compile:
!
! pgf90 -o da_vp_bilin.exe da_vp_bilin.f90
!
! liuz@ucar.edu , 2016-08, NCAR/MMM
!----------------------------------------------------------------------
!use netcdf
implicit none
!These variables' incremental will be regridded by default
character (len=6), dimension(1:19) :: vNam
integer :: ix, jy, kz, k, status
integer :: ixh, jyh, kzh
integer :: nLat, nLon, oLat, oLon
integer :: sLat, eLat, sLon, eLon
integer :: rLat, rLon
real, dimension(:,:,:), allocatable :: v1, v2, v3, v4, v5
real, dimension(:,:,:), allocatable :: v6, v7, v8, v9, v10, v11
real, dimension(:,:,:), allocatable :: v1h, v2h, v3h, v4h, v5h
real, dimension(:,:,:), allocatable :: v6h, v7h, v8h, v9h, v10h, v11h
real, dimension(:,:), allocatable :: iVar, oVar
character (len = 255) :: appname = ""
character (len = 255) :: arg = ""
character (len = 19) :: analysis_date
character (len = 255) :: input_file= "vp_output.global"
character (len = 255) :: output_file= "vp_output.global_hires"
integer, parameter :: vp_unit = 8
integer, parameter :: vp_hires_unit = 9
integer :: ratio ! resolution ratio
integer :: cloud_cv_options ! 2 or 3 with cloud cv variables
integer :: use_cv_w ! =1 for w control variable
integer :: io_status
integer iargc
LOGICAL :: file_exists
!These variables' incremental will be regridded by default
!call getarg(0, appname)
!n=index(appname, '/', BACK=.true.)
!appname = trim(appname(n+1:))
call getarg(1, arg)
call getarg(2, arg)
read(arg, '(i3)') ratio
call getarg(3, arg)
call getarg(4, arg)
read(arg, '(i3)') cloud_cv_options
call getarg(5, arg)
call getarg(6, arg)
read(arg, '(i3)') use_cv_w
write (*, *) 'ratio = ', ratio, 'cloud_cv_options = ', cloud_cv_options, &
'use_cv_w = ', use_cv_w
! read vp file
!--------------------
inquire(FILE=trim(input_file), EXIST=file_exists)
if ( .not. file_exists ) then
Write(*,*) "\nError: "//trim(input_file)//" not exists\n"
call exit(-1)
else
Write(*,*) "Found: "//trim(input_file)
endif
open(unit=vp_unit,file=trim(input_file),iostat=io_status,form='UNFORMATTED',status='OLD')
if (io_status /= 0) then
write(*,*) "Error ",io_status," opening vp file "//trim(input_file)
call exit(-1)
end if
write(*,*) 'Reading vp from : '//trim(input_file)
!read(vp_unit) analysis_date
!print *, 'analysis_date = ', analysis_date
read(vp_unit) ix, jy, kz ! domain dimension (unstagered)
print *, "input file: ix, jy, kz = ", ix, jy, kz
allocate ( v1 (1:ix,1:jy,1:kz))
allocate ( v2 (1:ix,1:jy,1:kz))
allocate ( v3 (1:ix,1:jy,1:kz))
allocate ( v4 (1:ix,1:jy,1:kz))
allocate ( v5 (1:ix,1:jy,1:kz))
read(vp_unit) v1, v2, v3, v4, v5
if ( cloud_cv_options >= 2 ) then
allocate ( v6 (1:ix,1:jy,1:kz))
allocate ( v7 (1:ix,1:jy,1:kz))
allocate ( v8 (1:ix,1:jy,1:kz))
allocate ( v9 (1:ix,1:jy,1:kz))
allocate ( v10 (1:ix,1:jy,1:kz))
read(vp_unit) v6, v7, v8, v9, v10
end if
if ( use_cv_w == 1 ) then
allocate ( v11 (1:ix,1:jy,1:kz))
read(vp_unit) v11
end if
write(*,*) 'End Reading vp from : '//trim(input_file)
close(vp_unit)
!-----------------------------
! end read vp file
!----------------------
nLon = ix + 2 ! 52
nLat = jy + 2 ! 52
rLon = ix * ratio ! 150
rLat = jy * ratio ! 150
oLon = ( nLon - 1 ) * ratio + 1 ! 154
oLat = ( nLat - 1 ) * ratio + 1
elat = (oLat - rLat) / 2 ! 2
slat = oLat - rLat - elat + 1 ! 3
elon = (oLon - rLon) / 2
slon = oLon - rLon - elon + 1
allocate(iVar(nLon, nLat), stat=status)
allocate(oVar(oLon, oLat), stat=status)
ixh = ix*ratio
jyh = jy*ratio
allocate ( v1h (1:ixh,1:jyh,1:kz))
allocate ( v2h (1:ixh,1:jyh,1:kz))
allocate ( v3h (1:ixh,1:jyh,1:kz))
allocate ( v4h (1:ixh,1:jyh,1:kz))
allocate ( v5h (1:ixh,1:jyh,1:kz))
if ( cloud_cv_options >= 2 ) then
allocate ( v6h (1:ixh,1:jyh,1:kz))
allocate ( v7h (1:ixh,1:jyh,1:kz))
allocate ( v8h (1:ixh,1:jyh,1:kz))
allocate ( v9h (1:ixh,1:jyh,1:kz))
allocate ( v10h (1:ixh,1:jyh,1:kz))
end if
if ( use_cv_w == 1 ) then
allocate ( v11h (1:ixh,1:jyh,1:kz))
end if
do k = 1, kz
iVar(2:nlon-1,2:nlat-1) = v1(:,:,k)
iVar(1,:) = iVar(2,:)
iVar(nlon,:) = iVar(nlon-1,:)
iVar(:,1) = iVar(:,2)
iVar(:,nlat) = iVar(:,nlat-1)
call bilin(iVar,nLon,nLat,ratio,oVar,oLon,oLat)
v1h(:,:,k) = oVar(slon:olon-elon,slat:olat-elat)
iVar(2:nlon-1,2:nlat-1) = v2(:,:,k)
iVar(1,:) = iVar(2,:)
iVar(nlon,:) = iVar(nlon-1,:)
iVar(:,1) = iVar(:,2)
iVar(:,nlat) = iVar(:,nlat-1)
call bilin(iVar,nLon,nLat,ratio,oVar,oLon,oLat)
v2h(:,:,k) = oVar(slon:olon-elon,slat:olat-elat)
iVar(2:nlon-1,2:nlat-1) = v3(:,:,k)
iVar(1,:) = iVar(2,:)
iVar(nlon,:) = iVar(nlon-1,:)
iVar(:,1) = iVar(:,2)
iVar(:,nlat) = iVar(:,nlat-1)
call bilin(iVar,nLon,nLat,ratio,oVar,oLon,oLat)
v3h(:,:,k) = oVar(slon:olon-elon,slat:olat-elat)
iVar(2:nlon-1,2:nlat-1) = v4(:,:,k)
iVar(1,:) = iVar(2,:)
iVar(nlon,:) = iVar(nlon-1,:)
iVar(:,1) = iVar(:,2)
iVar(:,nlat) = iVar(:,nlat-1)
call bilin(iVar,nLon,nLat,ratio,oVar,oLon,oLat)
v4h(:,:,k) = oVar(slon:olon-elon,slat:olat-elat)
iVar(2:nlon-1,2:nlat-1) = v5(:,:,k)
iVar(1,:) = iVar(2,:)
iVar(nlon,:) = iVar(nlon-1,:)
iVar(:,1) = iVar(:,2)
iVar(:,nlat) = iVar(:,nlat-1)
call bilin(iVar,nLon,nLat,ratio,oVar,oLon,oLat)
v5h(:,:,k) = oVar(slon:olon-elon,slat:olat-elat)
if ( cloud_cv_options >= 2 ) then
iVar(2:nlon-1,2:nlat-1) = v6(:,:,k)
iVar(1,:) = iVar(2,:)
iVar(nlon,:) = iVar(nlon-1,:)
iVar(:,1) = iVar(:,2)
iVar(:,nlat) = iVar(:,nlat-1)
call bilin(iVar,nLon,nLat,ratio,oVar,oLon,oLat)
v6h(:,:,k) = oVar(slon:olon-elon,slat:olat-elat)
iVar(2:nlon-1,2:nlat-1) = v7(:,:,k)
iVar(1,:) = iVar(2,:)
iVar(nlon,:) = iVar(nlon-1,:)
iVar(:,1) = iVar(:,2)
iVar(:,nlat) = iVar(:,nlat-1)
call bilin(iVar,nLon,nLat,ratio,oVar,oLon,oLat)
v7h(:,:,k) = oVar(slon:olon-elon,slat:olat-elat)
iVar(2:nlon-1,2:nlat-1) = v8(:,:,k)
iVar(1,:) = iVar(2,:)
iVar(nlon,:) = iVar(nlon-1,:)
iVar(:,1) = iVar(:,2)
iVar(:,nlat) = iVar(:,nlat-1)
call bilin(iVar,nLon,nLat,ratio,oVar,oLon,oLat)
v8h(:,:,k) = oVar(slon:olon-elon,slat:olat-elat)
iVar(2:nlon-1,2:nlat-1) = v9(:,:,k)
iVar(1,:) = iVar(2,:)
iVar(nlon,:) = iVar(nlon-1,:)
iVar(:,1) = iVar(:,2)
iVar(:,nlat) = iVar(:,nlat-1)
call bilin(iVar,nLon,nLat,ratio,oVar,oLon,oLat)
v9h(:,:,k) = oVar(slon:olon-elon,slat:olat-elat)
iVar(2:nlon-1,2:nlat-1) = v10(:,:,k)
iVar(1,:) = iVar(2,:)
iVar(nlon,:) = iVar(nlon-1,:)
iVar(:,1) = iVar(:,2)
iVar(:,nlat) = iVar(:,nlat-1)
call bilin(iVar,nLon,nLat,ratio,oVar,oLon,oLat)
v10h(:,:,k) = oVar(slon:olon-elon,slat:olat-elat)
end if
if ( use_cv_w == 1 ) then
iVar(2:nlon-1,2:nlat-1) = v11(:,:,k)
iVar(1,:) = iVar(2,:)
iVar(nlon,:) = iVar(nlon-1,:)
iVar(:,1) = iVar(:,2)
iVar(:,nlat) = iVar(:,nlat-1)
call bilin(iVar,nLon,nLat,ratio,oVar,oLon,oLat)
v11h(:,:,k) = oVar(slon:olon-elon,slat:olat-elat)
end if
enddo
open(unit=vp_hires_unit,file=trim(output_file),iostat=io_status,form='UNFORMATTED',status='UNKNOWN')
if (io_status /= 0) then
write(*,*) "Error ",io_status," opening vp file "//trim(output_file)
call exit(-1)
end if
write(*,*) 'Writting vp on hires. to : '//trim(output_file)
print *, 'output file: ixh, jyh, kz=', ixh, jyh, kz
write(vp_hires_unit) ixh, jyh, kz
write(vp_hires_unit) v1h,v2h,v3h,v4h,v5h
if ( cloud_cv_options >= 2 ) then
write(vp_hires_unit) v6h,v7h,v8h,v9h,v10h
end if
if ( use_cv_w == 1 ) then
write(vp_hires_unit) v11h
end if
deallocate(v1, stat=status)
deallocate(v2, stat=status)
deallocate(v3, stat=status)
deallocate(v4, stat=status)
deallocate(v5, stat=status)
deallocate(v1h, stat=status)
deallocate(v2h, stat=status)
deallocate(v3h, stat=status)
deallocate(v4h, stat=status)
deallocate(v5h, stat=status)
if ( cloud_cv_options >= 2 ) then
deallocate(v6, stat=status)
deallocate(v7, stat=status)
deallocate(v8, stat=status)
deallocate(v9, stat=status)
deallocate(v10, stat=status)
deallocate(v6h, stat=status)
deallocate(v7h, stat=status)
deallocate(v8h, stat=status)
deallocate(v9h, stat=status)
deallocate(v10h, stat=status)
end if
if ( use_cv_w == 1 ) then
deallocate(v11, stat=status)
deallocate(v11h, stat=status)
end if
Write(*,*) "Regridding increment completed successfully"
contains
subroutine show_usage()
Write(*,*) 'Usage :'//trim(appname)// &
'[-h] [-fg_lores filename] [-an_lores filename] [-fg_hires filename] [-ns n ] [-o outputfile]'
Write(*,*) " -fg_lores Optional, low resulotion first guess file, default - fg"
Write(*,*) " -an_lores Optional, low resulotion analysis file comes from wrfvar, default - wrfvar_output"
Write(*,*) " -fg_hires Optional, high resultion first guess file, default - wrfinput_hires"
Write(*,*) " -ns Optional, the refinement ratio between two resulotions, default - 3"
Write(*,*) " -o Optional, output high resulotion analysis file, default - wrfvar_output_hires"
Write(*,*) " -h Show this help"
end subroutine show_usage
!subroutine nf90_handle_err(status, errmsg)
! integer, intent(in) :: status
! character(len=*), intent(in) :: errmsg
!
! if(status /= nf90_noerr) then
! print *, trim(nf90_strerror(status))//" : "//trim(errmsg)
! Stop
! end if
! end subroutine nf90_handle_err
subroutine bilin(old,xi,yi,ns,new,xo,yo)
! assume: xo = (xi-1)*ns + 1, xi=50, xo=49*3+1=148
! yo = (yi-1)*ns + 1
implicit none
integer, intent(in) :: xi,yi,xo,yo
real, dimension(xi,yi), intent(in) :: old
integer, intent(in) :: ns
real, dimension(xo,yo), intent(out):: new
real :: im(1:ns+1,2)
! real :: imm(1:ns+3,2)
integer:: i,j,jm1,im1,ix1,ix2,iy1,iy2
forall(i=1:ns+1) im(i,2) = real(i-1)/ns
im(:,1) = 1 - im(:,2)
do j=2,yi
jm1 = j - 1
iy2 = jm1 * ns + 1
iy1 = iy2 - ns
do i=2,xi
im1 = i - 1
ix2 = im1 * ns + 1
ix1 = ix2 - ns
new(ix1:ix2,iy1:iy2) = matmul(im,matmul(old(im1:i,jm1:j),transpose(im)))
end do
end do
! ns = ns + 2
! forall(i=1:ns+1) imm(i,2) = real(i-1)/ns
! imm(:,1) = 1 - imm(:,2)
!
! j=yi
! jm1 = j - 1
! iy2 = jm1 * ns + 1
! iy1 = iy2 - ns
!
! i=xi
! im1 = i - 1
! ix2 = im1 * ns + 1
! ix1 = ix2 - ns
! new(ix1:ix2,iy1:iy2) = matmul(imm,matmul(old(im1:i,jm1:j),transpose(imm)))
! end do
! end do
end subroutine bilin
end program da_vp_bilin
|
{"hexsha": "3855e0f95f9ebd15432b55d51cc598de59ebae41", "size": 12084, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "var/mri4dvar/da_vp_bilin.f90", "max_stars_repo_name": "matzegoebel/WRF-fluxavg", "max_stars_repo_head_hexsha": "686ae53053bf7cb55d6f078916d0de50f819fc62", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2018-08-27T12:49:19.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-16T14:22:54.000Z", "max_issues_repo_path": "var/mri4dvar/da_vp_bilin.f90", "max_issues_repo_name": "matzegoebel/WRF-fluxavg", "max_issues_repo_head_hexsha": "686ae53053bf7cb55d6f078916d0de50f819fc62", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2018-09-18T16:44:30.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-07T10:59:59.000Z", "max_forks_repo_path": "var/mri4dvar/da_vp_bilin.f90", "max_forks_repo_name": "matzegoebel/WRF-fluxavg", "max_forks_repo_head_hexsha": "686ae53053bf7cb55d6f078916d0de50f819fc62", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2018-08-31T21:51:56.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-21T21:41:59.000Z", "avg_line_length": 31.387012987, "max_line_length": 121, "alphanum_fraction": 0.5396391923, "num_tokens": 4311}
|
import os, sys
sys.path.append(os.getcwd())
try: # This only matters on Ishaan's computer
import experiment_tools
experiment_tools.wait_for_gpu()
except ImportError:
pass
import lasagne
import lib
import lib.lsun_downsampled
import lib.ops.gru
import lib.ops.linear
import lib.ops.lstm
import numpy as np
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import time
# import tflib.save_images
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
BATCH_SIZE = 64
ITERS = 200000
DIM = 128
SEQ_LEN = 8
PERIOD = 32
RESET_PROB = 0.1
# lib.ops.linear.enable_default_weightnorm()
srng = RandomStreams(seed=234)
def Generator(n_samples, h0s=None):
input_noise = srng.normal(
size=(n_samples, SEQ_LEN, 128)
)
inputs = lib.ops.linear.Linear('Generator.input_noise', 128, DIM, input_noise, initialization='he')
inputs = T.nnet.relu(inputs)
output_h, output_c = lib.ops.lstm.LSTM('Generator.RNN', DIM, DIM, inputs, h0s=h0s)
output = lib.ops.linear.Linear('Generator.Out', DIM, 1, output_h)
output = output.reshape((n_samples, SEQ_LEN))
return output, (output_h[:,-1], output_c[:,-1])
def Discriminator(inputs):
n_samples = inputs.shape[0]
output = inputs.reshape((n_samples,SEQ_LEN,1))
output = lib.ops.linear.Linear('Discriminator.In', 1, DIM, output, initialization='glorot_he')
output = T.nnet.relu(output)
output, _ = lib.ops.lstm.LSTM('Discriminator.RNN', DIM, DIM, output)
output = output.reshape((n_samples, SEQ_LEN*DIM))
output = lib.ops.linear.Linear('Discriminator.Out', SEQ_LEN*DIM, 1, output)
return output.reshape((n_samples,))
real_data = T.matrix('real_data')
h0, c0 = T.matrix('h0'), T.matrix('c0')
fake_data, (last_h, last_c) = Generator(BATCH_SIZE, [h0, c0])
fake_data_4x, (last_h_4x, last_c_4x) = Generator(4*BATCH_SIZE, [h0, c0])
disc_out = Discriminator(T.concatenate([real_data, fake_data], axis=0))
disc_real = disc_out[:BATCH_SIZE]
disc_fake = disc_out[BATCH_SIZE:]
gen_cost = -T.mean(Discriminator(fake_data_4x))
disc_cost = T.mean(disc_fake) - T.mean(disc_real)
alpha = srng.uniform(
size=(BATCH_SIZE,1),
low=0.,
high=1.
)
differences = fake_data - real_data
interpolates = real_data + (alpha*differences)
gradients = T.grad(T.sum(Discriminator(interpolates)), interpolates)
slopes = T.sqrt(T.sum(T.sqr(gradients), axis=1))
lipschitz_penalty = T.mean((slopes-1.)**2)
disc_cost += 10*lipschitz_penalty
gen_params = lib.search(gen_cost, lambda x: hasattr(x, 'param') and 'Generator' in x.name)
discrim_params = lib.search(disc_cost, lambda x: hasattr(x, 'param') and 'Discriminator' in x.name)
gen_grads = T.grad(gen_cost, gen_params)
discrim_grads = T.grad(disc_cost, discrim_params)
gen_grads = [
T.clip(g, lib.floatX(-1.0), lib.floatX(1.0))
for g in gen_grads
]
discrim_grads = [
T.clip(g, lib.floatX(-1.0), lib.floatX(1.0))
for g in discrim_grads
]
gen_updates = lasagne.updates.adam(gen_grads, gen_params, learning_rate=1e-4, beta1=0.5, beta2=0.9)
discrim_updates = lasagne.updates.adam(discrim_grads, discrim_params, learning_rate=1e-4, beta1=0.5, beta2=0.9)
print "Compiling functions"
train_discrim_fn = theano.function(
[real_data, h0, c0],
[disc_cost, last_h, last_c],
updates=discrim_updates.items(),
on_unused_input='warn'
)
train_gen_fn = theano.function(
[h0, c0],
[gen_cost, last_h_4x, last_c_4x],
updates=gen_updates.items(),
on_unused_input='warn'
)
_sample_fn = theano.function([h0, c0], [fake_data, last_h, last_c], on_unused_input='warn')
def generate_image(iteration):
_h0, _c0 = np.zeros((BATCH_SIZE, DIM), dtype='float32'), np.zeros((BATCH_SIZE, DIM), dtype='float32')
samples, _h0, _c0 = _sample_fn(_h0, _c0)
for i in xrange(3):
next_samples, _h0, _c0 = _sample_fn(_h0, _c0)
samples = np.concatenate([samples, next_samples], axis=1)
save_samples(samples, 'samples_{}.png'.format(iteration))
def save_samples(samples, filename):
seqlen = samples.shape[1]
plt.figure(figsize=(10,2*BATCH_SIZE*(10./seqlen)))
y_offset = 0
for sample in samples:
prev_x, prev_y = 0, sample[0]
for x,y in enumerate(sample[1:], start=1):
plt.plot([prev_x, x], [prev_y + y_offset, y + y_offset], color='k', linestyle='-', linewidth=2)
prev_x, prev_y = x,y
y_offset += 2
plt.savefig(filename)
plt.close()
def inf_train_gen():
while True:
samples = []
for i in xrange(BATCH_SIZE):
phase = np.random.uniform()*2*np.pi
period = np.random.randint(PERIOD/2)+1
x = np.arange(SEQ_LEN)
y = np.sin((2*np.pi / period)*x + phase)
samples.append(y)
yield np.array(samples, dtype='float32')
gen = inf_train_gen()
_disc_costs, _gen_costs, times, datatimes = [], [], [], []
save_samples(gen.next(), 'groundtruth.png')
print "Training!"
h0_d, c0_d = np.zeros((BATCH_SIZE, DIM), dtype='float32'), np.zeros((BATCH_SIZE, DIM), dtype='float32')
h0_g, c0_g = np.zeros((4*BATCH_SIZE, DIM), dtype='float32'), np.zeros((4*BATCH_SIZE, DIM), dtype='float32')
for iteration in xrange(ITERS):
if iteration % 100 == 0:
generate_image(iteration)
start_time = time.time()
disc_iters = 5
for i in xrange(disc_iters):
data_start_time = time.time()
_data = gen.next()
datatimes.append(time.time() - data_start_time)
_disc_cost, h0_d, c0_d = train_discrim_fn(_data, h0_d, c0_d)
keep_mask = np.random.uniform(size=(BATCH_SIZE,1)) > RESET_PROB
h0_d *= keep_mask
c0_d *= keep_mask
_disc_costs.append(_disc_cost)
_gen_cost, h0_g, c0_g = train_gen_fn(h0_g, c0_g)
_gen_costs.append(_gen_cost)
keep_mask = np.random.uniform(size=(4*BATCH_SIZE,1)) > RESET_PROB
h0_g *= keep_mask
c0_g *= keep_mask
times.append(time.time() - start_time)
if (iteration < 20) or (iteration % 20 == 19):
print "iter:\t{}\tdisc:\t{:.3f}\tgen:\t{:.3f}\ttime:\t{:.3f} datatime:\t{:.3f}".format(iteration, np.mean(_disc_costs), np.mean(_gen_costs), np.mean(times), np.mean(datatimes))
_disc_costs, _gen_costs, times, datatimes = [], [], [], []
|
{"hexsha": "150a56d74c1903931da26334bde0370a695e3578", "size": 6301, "ext": "py", "lang": "Python", "max_stars_repo_path": "py3/nn/experiments/wgan/sin_lstm.py", "max_stars_repo_name": "fr42k/gap-wgan-gp", "max_stars_repo_head_hexsha": "4e373c43d606a1b83f76893d93f9cf8be8cd460d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "py3/nn/experiments/wgan/sin_lstm.py", "max_issues_repo_name": "fr42k/gap-wgan-gp", "max_issues_repo_head_hexsha": "4e373c43d606a1b83f76893d93f9cf8be8cd460d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "py3/nn/experiments/wgan/sin_lstm.py", "max_forks_repo_name": "fr42k/gap-wgan-gp", "max_forks_repo_head_hexsha": "4e373c43d606a1b83f76893d93f9cf8be8cd460d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8232323232, "max_line_length": 184, "alphanum_fraction": 0.6773528011, "include": true, "reason": "import numpy,import theano,from theano", "num_tokens": 1884}
|
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
/*
* File: HttpRequestManager.hpp
* Author: ubuntu
*
* Created on February 15, 2018, 10:05 AM
*/
#ifndef HTTP_SERVER_REQUESTMANAGER_HPP
#define HTTP_SERVER_REQUESTMANAGER_HPP
#include <string>
#include <memory>
#include <boost/beast/core.hpp>
#include <boost/beast/http.hpp>
#include <boost/beast/version.hpp>
#include "keto/common/HttpEndPoints.hpp"
#include "keto/server_session/HttpSessionManager.hpp"
#include "keto/server_session/HttpTransactionManager.hpp"
#include "keto/server_session/HttpSparqlManager.hpp"
namespace keto {
namespace server_session {
class HttpRequestManager {
public:
HttpRequestManager(const HttpRequestManager& orig) = delete;
virtual ~HttpRequestManager();
static std::shared_ptr<HttpRequestManager> init();
static void fin();
static std::shared_ptr<HttpRequestManager> getInstance();
bool checkRequest(boost::beast::http::request<boost::beast::http::string_body>& req);
boost::beast::http::response<boost::beast::http::string_body>
handle_request(boost::beast::http::request<boost::beast::http::string_body>& req);
private:
std::shared_ptr<HttpSessionManager> httpSessionManagerPtr;
std::shared_ptr<HttpTransactionManager> httpTransactionManagerPtr;
std::shared_ptr<HttpSparqlManager> httpSparqlManagerPtr;
HttpRequestManager();
};
}
}
#endif /* HTTPREQUESTMANAGER_HPP */
|
{"hexsha": "e32e2864990c3c358ad6474e656b9a625e2faeda", "size": 1602, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/libs/http_server_session/include/keto/server_session/HttpRequestManager.hpp", "max_stars_repo_name": "burntjam/keto", "max_stars_repo_head_hexsha": "dbe32916a3bbc92fa0bbcb97d9de493d7ed63fd8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2020-03-04T10:38:00.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-04T10:38:00.000Z", "max_issues_repo_path": "src/libs/http_server_session/include/keto/server_session/HttpRequestManager.hpp", "max_issues_repo_name": "burntjam/keto", "max_issues_repo_head_hexsha": "dbe32916a3bbc92fa0bbcb97d9de493d7ed63fd8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/libs/http_server_session/include/keto/server_session/HttpRequestManager.hpp", "max_forks_repo_name": "burntjam/keto", "max_forks_repo_head_hexsha": "dbe32916a3bbc92fa0bbcb97d9de493d7ed63fd8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-03-04T10:38:01.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-04T10:38:01.000Z", "avg_line_length": 24.2727272727, "max_line_length": 89, "alphanum_fraction": 0.7378277154, "num_tokens": 366}
|
import random
import numpy as np
import torch
from args_fusion import args
import cv2
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import os
import torch
from torch.autograd import Variable
from osgeo import gdal_array,gdal
import time
'''
@brief:对一个大的图片进行分块
@params:block_x,block_y x轴 y轴块大小 image_size 原图片大小
'''
def blockingImage(blcoK_x,blcoK_y,image_size):
x_block = [[x-blcoK_x,x] for x in range(blcoK_x,image_size[0],blcoK_x)]
y_block = [[y-blcoK_y,y] for y in range(blcoK_y,image_size[1],blcoK_y)]
if x_block[-1][1] < image_size[0]:
x_block.append([x_block[-1][1],image_size[0]])
if y_block[-1][1] < image_size[1]:
y_block.append([y_block[-1][1],image_size[1]])
return x_block,y_block
'''
@brief:对分块后的图片进行融合
@params:vis_image,lr_image:两幅图片 model:网络模型 strategy:融合策略
'''
def blockImageFusion(vis_image,lr_image,model,mode,strategy):
with torch.no_grad():
lr_images = []
vis_images = []
ImageToTensor = transforms.Compose([transforms.ToTensor()])
if mode == 'L':
lr_image = np.reshape(lr_image, [1,
lr_image.shape[0],lr_image.shape[1]])
vis_image = np.reshape(vis_image, [1, vis_image.shape[0],vis_image.shape[1]])
else:
vis_image = ImageToTensor(vis_image).float().numpy()*255
lr_image = ImageToTensor(lr_image).float().numpy()*255
lr_images.append(lr_image)
vis_images.append(vis_image)
lr_images = np.stack(lr_images, axis=0)
vis_images = np.stack(vis_images, axis=0)
vis_images = torch.from_numpy(vis_images).float()
lr_images = torch.from_numpy(lr_images).float()
# dim = img_ir.shape
if args.cuda:
lr_images = lr_images.cuda()
vis_images = vis_images.cuda()
lr_images = Variable(lr_images, requires_grad=False)
vis_images = Variable(vis_images, requires_grad=False)
img_fusion =_generate_fusion_image(model, strategy, lr_images, vis_images)
# # save images
if args.cuda:
#img = img_fusion.cpu().clamp(0, 255).data[0].numpy()
img = img_fusion.cpu().data[0].numpy()
else:
#img = img_fusion.clamp(0, 255).data[0].numpy()
img = img_fusion.data[0].numpy()
#img = img.transpose(1, 2, 0).astype('uint8')
img = img.transpose(1, 2, 0).astype('int16')
return img
def _generate_fusion_image(model, strategy_type, img1, img2):
en_r = model.encoder(img1)
en_v = model.encoder(img2)
f = model.fusion(en_r, en_v, strategy_type=strategy_type)
img_fusion = model.decoder(f)
return img_fusion[0]
def getListFiles(path):
#获取文件目录下的所有文件(包含子文件夹内的文件)
assert os.path.isdir(path),'%s not exist,'%path
ret=[]
for root,dirs,files in os.walk(path):
for filespath in files:
ret.append(os.path.join(root,filespath))
return ret
'''
@params:
filename:tif遥感文件
band:默认提取通道
write:是否保存到本地
type_image:可见光图片还是红外遥感图片
@ex:
tifToPng(filename='E:/DataSet/TIF_images/PAN_1.tif',
read_bands=1,write=True,image_name='1',output_path='./png_res/')
'''
def tifToPng(filename=None,read_bands=1,write=True,image_name='vis',output_path='./png_res/'):
if os.path.exists(output_path) is False:
os.mkdir(output_path)
file = gdal.Open(filename)
if file == None:
print('图像读取失败')
return None
image_cols = file.RasterXSize #列数
image_rows = file.RasterYSize #行数
bands = file.RasterCount #波段数
print("image Name:%s Bands:%d Height:%d Width:%d"%(filename,bands,image_rows,image_cols))
band = file.GetRasterBand(read_bands)
data = band.ReadAsArray(0,0,image_cols,image_rows)
img = data.astype(np.uint8)
if write:
file_dir = output_path +image_name+'.jpg'
cv2.imwrite(file_dir,img)
def expandTif(filename,fx,fy,out_file_name):
#src
file = gdal.Open(filename)
if file == None:
print('打开失败')
return
image_cols = file.RasterXSize
image_rows = file.RasterYSize
get_band = file.GetRasterBand(1)
#dst
driver = file.GetDriver()
res_file = driver.Create(out_file_name,fx*image_cols,fy*image_rows,1,get_band.DataType)
res_band = res_file.GetRasterBand(1)
#block
image_size = [image_cols,image_rows]
blcoK_size_x = 300 #x分块处理大小
blcoK_size_y = 500 #y分块处理大小
x_block,y_block = blockingImage(blcoK_size_x,blcoK_size_y,image_size=image_size)
#write
start = time.time()
for m in range(fx):
for n in range(fy):
for _,row_block in enumerate(y_block):
y_start = row_block[0]
y_size = row_block[1]-row_block[0]
for _,col_block in enumerate(x_block):
x_start = col_block[0]
x_size = col_block[1]-col_block[0]
image_data = get_band.ReadAsArray(x_start,y_start,x_size,y_size)
res_band.WriteArray(image_data,xoff=m*image_cols+x_start,yoff=n*image_rows+y_start)
end = time.time()
print('time:%f'%(end-start))
#set
res_file.SetProjection(file.GetProjection())
res_file.SetGeoTransform(file.GetGeoTransform())
del file,get_band,res_file,res_band
def copyTif():
gdal.AllRegister()
#src
src_file_name = 'E:/DataSet/TIF_images/PAN_1.tif'
src_file = gdal.Open(src_file_name)
if src_file == None:
print('图像读取失败')
image_cols = src_file.RasterXSize #列数
image_rows = src_file.RasterYSize #行数
src_band = src_file.GetRasterBand(1) #提取第一波段
#dst
#driver = gdal.GetDriverByName('GTiff')
driver = src_file.GetDriver()
res_file = driver.Create('./tif_res/res.tif', image_cols, image_rows, 1, src_band.DataType)#1是bands,默认
res_band = res_file.GetRasterBand(1)
#block
image_size = [image_cols,image_rows]
blcoK_size_x = 300 #x分块处理大小
blcoK_size_y = 500 #y分块处理大小
x_block,y_block = blockingImage(blcoK_size_x,blcoK_size_y,image_size=image_size)
#write
start = time.time()
for _,row_block in enumerate(y_block):
y_start = row_block[0]
y_size = row_block[1]-row_block[0]
for _,col_block in enumerate(x_block):
x_start = col_block[0]
x_size = col_block[1]-col_block[0]
image_data = src_band.ReadAsArray(x_start,y_start,x_size,y_size)
res_band.WriteArray(image_data,xoff=x_start,yoff=y_start)
end = time.time()
print('time:%f'%(end-start))
#set
res_file.SetProjection(src_file.GetProjection())
res_file.SetGeoTransform(src_file.GetGeoTransform())
del src_file,src_band,res_file,res_band
if __name__ == '__main__':
expandTif('E:/DataSet/TIF_images/PAN_1.tif',fx=4,fy=10,out_file_name='./pan_res.tif')
tifToPng('./pan_res.tif',read_bands=1,write=True,image_name='pan_res',output_path='./')
|
{"hexsha": "e49340b5ae5d208bf2ea511ccc38c2a4dce085ae", "size": 6255, "ext": "py", "lang": "Python", "max_stars_repo_path": "ImageFusion_DL/utils.py", "max_stars_repo_name": "xiaoqi25478/Project", "max_stars_repo_head_hexsha": "04813495c21faf9892777c111b7284928f70727e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ImageFusion_DL/utils.py", "max_issues_repo_name": "xiaoqi25478/Project", "max_issues_repo_head_hexsha": "04813495c21faf9892777c111b7284928f70727e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ImageFusion_DL/utils.py", "max_forks_repo_name": "xiaoqi25478/Project", "max_forks_repo_head_hexsha": "04813495c21faf9892777c111b7284928f70727e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8128078818, "max_line_length": 103, "alphanum_fraction": 0.7302957634, "include": true, "reason": "import numpy", "num_tokens": 1947}
|
% NOTES:
% - 1000 words or less for RNAAS!
% - Add an appendix or some words to get >=3 pages for arxiv posting
% STYLE:
% - New line after each sentence (makes Git diff's readable)
% TODO:
% - Run: texcount -v3 -merge -incbib -dir -sub=none -utf8 -sum paper.tex
\documentclass[RNAAS]{aastex63}
% Load common packages
\usepackage{microtype} % ALWAYS!
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{booktabs}
\usepackage{graphicx}
\usepackage{color}
\usepackage{enumitem}
\setlist[description]{style=unboxed}
\sloppy\sloppypar\raggedbottom\frenchspacing
\input{preamble.tex}
\shorttitle{Stellar streams in the Legacy Surveys}
\shortauthors{Shipp \& Price-Whelan}
\begin{document}
\title{Something catchy...}
\author[0000-0003-2497-091X]{Nora~Shipp}
\affiliation{affiliation...}
\author[0000-0003-0872-7098]{Adrian~M.~Price-Whelan}
\affiliation{Center for Computational Astrophysics, Flatiron Institute,
Simons Foundation, 162 Fifth Avenue, New York, NY 10010, USA}
\section{Introduction}
% Keep it short: this will be our abstract on arxiv
Stellar streams provide a record of ancient and ongoing accretion into galaxies,
and provide unique constraints on the nature of dark matter on the scales of
individual galaxies and smaller.
The first streams were discovered around the Milky Way stellar halo by filtering photometric catalogs of stars, which led to ... \citep{Grillmair}.
Now, Gaia, use proper motions...\citep{Ibata,Malhan}.
However, many streams are too distant for Gaia to observe main sequence (what typical distance) ...
For these streams, deeper photometric surveys provide better star-galaxy separation, better photometric precision that increases the signal-to-noise... \citep{Belokurov:2007, Bernard:20??, Shipp:2018}.
Here, we produce maps of the ...
\section{Data and methods}
We use data from ...
We follow a methodology similar to \cite{Shipp:2018}...
Briefly, ... explain filtering ...
\section{Results}
Show figure...maybe RGB field of streams?
Link to zenodo archive of visualizations (so it has a DOI), explain content
\section{Discussion}
Lots of tentative new structures, but most are low S/N and hard to robustly select because of survey nonuniformities, background structures.
Something about LSST
Something about how we really need spectra of every halo star to get a high-contrast view of these things!
% \begin{figure}[!t]
% \begin{center}
% \includegraphics[width=0.8\textwidth]{example-binaries-long.pdf}
% \end{center}
% \caption{%
% The same as \figurename~\ref{fig:binary-examples-short}, but for \apogee\
% sources with long visit baselines ($\tau > 1000~\dayd$)
% \label{fig:binary-examples-long}
% }
% \end{figure}
\acknowledgements
It is a pleasure to thank...
Link to Legacy Surveys acknowledgement: http://legacysurvey.org/acknowledgment/
\software{
Astropy \citep{astropy:2018},
gala \citep{gala},
IPython \citep{ipython},
numpy \citep{numpy},
schwimmbad \citep{schwimmbad:2017},
scipy \citep{scipy},
}
\appendix
TODO.
% \bibliographystyle{aasjournal}
% \bibliography{refs}
\end{document}
|
{"hexsha": "e0a5556b84c0e48ea87d593ffacba478d302b09b", "size": 3171, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "rnaas/paper.tex", "max_stars_repo_name": "adrn/slegs", "max_stars_repo_head_hexsha": "18819b8f4c878f99f1007637a0525c56600ef32a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rnaas/paper.tex", "max_issues_repo_name": "adrn/slegs", "max_issues_repo_head_hexsha": "18819b8f4c878f99f1007637a0525c56600ef32a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rnaas/paper.tex", "max_forks_repo_name": "adrn/slegs", "max_forks_repo_head_hexsha": "18819b8f4c878f99f1007637a0525c56600ef32a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.8728813559, "max_line_length": 201, "alphanum_fraction": 0.7426679281, "num_tokens": 885}
|
import h5py
import matplotlib.pyplot as plt
import numpy as np
import os
import time
def detrending_verify(filename,save_folder_detrending,save_folder_masked,plane_ind=10):
f_str=os.path.split(filename)[-1]
mask_f_str=f_str.replace('aligned.h5','masked.h5')
detr_f_str=f_str.replace('aligned.h5','detrended.h5')
masked_file=os.path.join(os.path.normpath(save_folder_masked),mask_f_str)
detr_file=os.path.join(os.path.normpath(save_folder_detrending),detr_f_str)
with h5py.File(detr_file, "r") as f:
print("Loading detrended data from plane: "+str(plane_ind))
start=time.time()
detrended=f['data'][:,plane_ind,:,:].astype('float32')
end=time.time()
print('Time to load detrended plane data file: ',end-start)
with h5py.File(masked_file, "r") as f:
print("Loading masked data from plane: "+str(plane_ind))
start=time.time()
masked=f['data'][:,plane_ind,:,:].astype('float32')
end=time.time()
print('Time to load masked plane data file: ',end-start)
detrended=detrended.reshape(-1,1024*1024)
masked=masked.reshape(-1,1024*1024)
plt.plot(np.mean(detrended,axis=1),
label='Detrended')
plt.plot(np.mean(masked,axis=1),
label='Masked')
plt.legend()
plt.title('Detrending comparison (average image intensity in time) for plane'+str(plane_ind))
plt.show()
|
{"hexsha": "54fbf462a2ccb02e94d2dae037d2f032e96dce51", "size": 1432, "ext": "py", "lang": "Python", "max_stars_repo_path": "Visualization/detrending_viz.py", "max_stars_repo_name": "mariakesa/ZebraFishRegistrationPipeline", "max_stars_repo_head_hexsha": "4955044eb69dc04c579f59ccb24e02e4451aebcc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-05T08:06:30.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-05T08:06:30.000Z", "max_issues_repo_path": "Visualization/detrending_viz.py", "max_issues_repo_name": "mariakesa/ZebraFishRegistrationPipeline", "max_issues_repo_head_hexsha": "4955044eb69dc04c579f59ccb24e02e4451aebcc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Visualization/detrending_viz.py", "max_forks_repo_name": "mariakesa/ZebraFishRegistrationPipeline", "max_forks_repo_head_hexsha": "4955044eb69dc04c579f59ccb24e02e4451aebcc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.1176470588, "max_line_length": 97, "alphanum_fraction": 0.6682960894, "include": true, "reason": "import numpy", "num_tokens": 371}
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D
from keras.models import Sequential
#from sklearn.feature_extraction.text import CountVectorizer
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from keras.utils.np_utils import to_categorical
from keras.callbacks import EarlyStopping
n_most_common_words = 8000
max_len = 130
epochs = 10
emb_dim = 128
batch_size = 256
# load dataset
news = pd.read_csv("data/uci-news-aggregator.csv", usecols=['TITLE', 'CATEGORY'])
# Lets balance the categories of all classes to 45k
num_of_categories = 45000
shuffled = news.reindex(np.random.permutation(news.index))
e = shuffled[shuffled['CATEGORY'] == 'e'][:num_of_categories]
b = shuffled[shuffled['CATEGORY'] == 'b'][:num_of_categories]
t = shuffled[shuffled['CATEGORY'] == 't'][:num_of_categories]
m = shuffled[shuffled['CATEGORY'] == 'm'][:num_of_categories]
concated = pd.concat([e,b,t,m], ignore_index=True)
#Shuffle the dataset
concated = concated.reindex(np.random.permutation(concated.index))
concated['LABEL'] = 0
#One-hot encode the lab
concated.loc[concated['CATEGORY'] == 'e', 'LABEL'] = 0
concated.loc[concated['CATEGORY'] == 'b', 'LABEL'] = 1
concated.loc[concated['CATEGORY'] == 't', 'LABEL'] = 2
concated.loc[concated['CATEGORY'] == 'm', 'LABEL'] = 3
print(concated['LABEL'][:10])
labels = to_categorical(concated['LABEL'], num_classes=4)
print(labels[:10])
if 'CATEGORY' in concated.keys():
concated.drop(['CATEGORY'], axis=1)
'''
[1. 0. 0. 0.] e
[0. 1. 0. 0.] b
[0. 0. 1. 0.] t
[0. 0. 0. 1.] m
'''
tokenizer = Tokenizer(num_words=n_most_common_words, filters='\'!"#$%&()*+,-./:;<=>?@[\]^_`{|}~', lower=True)
tokenizer.fit_on_texts(concated['TITLE'].values)
sequences = tokenizer.texts_to_sequences(concated['TITLE'].values)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
X = pad_sequences(sequences, maxlen=max_len)
Y = labels
X_train, X_test, y_train, y_test = train_test_split(X , labels, test_size=0.25, random_state=42)
labels[:2]
print((X_train.shape, y_train.shape, X_test.shape, y_test.shape))
model = Sequential()
''' "Word embeddings" are a family of natural language processing techniques aiming at mapping semantic meaning into a geometric space. This is done by associating a numeric vector to every word in a dictionary, such that the distance (e.g. L2 distance or more commonly cosine distance) between any two vectors would capture part of the semantic relationship between the two associated words. The geometric space formed by these vectors is called an embedding space.
For instance, "coconut" and "polar bear" are words that are semantically quite different, so a reasonable embedding space would represent them as vectors that would be very far apart. But "kitchen" and "dinner" are related words, so they should be embedded close to each other.
Ideally, in a good embeddings space, the "path" (a vector) to go from "kitchen" and "dinner" would capture precisely the semantic relationship between these two concepts. In this case the relationship is "where x occurs", so you would expect the vector kitchen - dinner (difference of the two embedding vectors, i.e. path to go from dinner to kitchen) to capture this "where x occurs" relationship. Basically, we should have the vectorial identity: dinner + (where x occurs) = kitchen (at least approximately). If that's indeed the case, then we can use such a relationship vector to answer questions. For instance, starting from a new vector, e.g. "work", and applying this relationship vector, we should get sometime meaningful, e.g. work + (where x occurs) = office, answering "where does work occur?".
Word embeddings are computed by applying dimensionality reduction techniques to datasets of co-occurence statistics between words in a corpus of text. This can be done via neural networks (the "word2vec" technique), or via matrix factorization. '''
model.add(Embedding(n_most_common_words, emb_dim, input_length=X.shape[1]))
''' SpatialDropout1D performs the same function as Dropout, however it drops entire 1D feature maps instead of individual elements. If adjacent frames within feature maps are strongly correlated (as is normally the case in early convolution layers) then regular dropout will not regularize the activations and will otherwise just result in an effective learning rate decrease. In this case, SpatialDropout1D will help promote independence between feature maps and should be used instead. '''
model.add(SpatialDropout1D(0.7))
''' In machine learning, the vanishing gradient problem is a difficulty found in training artificial neural networks with gradient-based learning methods and backpropagation. In such methods, each of the neural networks weights receives an update proportional to the gradient of the error function with respect to the current weight in each iteration of training. Traditional activation functions such as the hyperbolic tangent function have gradients in the range (-1,1) or [0,1), and backpropagation computes gradients by the chain rule. This has the effect of multiplying n of these small numbers to compute gradients of the front layers in an n-layer network, meaning that the gradient (error signal) decreases exponentially with n and the front layers train very slowly.
One solution is to consider adding the updates instead of multiplying them, and this is exactly what the LSTM does. The state of every cell is updated in an additive way such that the gradient hardly vanishes. '''
model.add(LSTM(64, dropout=0.7, recurrent_dropout=0.7))
#Fully connected hidden with 4 neurons
model.add(Dense(4, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
print(model.summary())
history = model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size,validation_split=0.2,callbacks=[EarlyStopping(monitor='val_loss',patience=7, min_delta=0.0001)])
accr = model.evaluate(X_test,y_test)
print('Test set\n Loss: {:0.3f}\n Accuracy: {:0.3f}'.format(accr[0],accr[1]))
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
txt = ["Regular fast food eating linked to fertility issues in women"]
seq = tokenizer.texts_to_sequences(txt)
padded = pad_sequences(seq, maxlen=max_len)
pred = model.predict(padded)
labels = ['entertainment', 'bussiness', 'science/tech', 'health']
print(pred, labels[np.argmax(pred)])
|
{"hexsha": "1fbd63e73a2b55a1fa183b5d4368a23471650119", "size": 7083, "ext": "py", "lang": "Python", "max_stars_repo_path": "textclassification/nn_lstm.py", "max_stars_repo_name": "sshekhar10/mymllearnings", "max_stars_repo_head_hexsha": "5f7b075c56af28467985282e8021658fed6b1134", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "textclassification/nn_lstm.py", "max_issues_repo_name": "sshekhar10/mymllearnings", "max_issues_repo_head_hexsha": "5f7b075c56af28467985282e8021658fed6b1134", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "textclassification/nn_lstm.py", "max_forks_repo_name": "sshekhar10/mymllearnings", "max_forks_repo_head_hexsha": "5f7b075c56af28467985282e8021658fed6b1134", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 63.2410714286, "max_line_length": 805, "alphanum_fraction": 0.7663419455, "include": true, "reason": "import numpy", "num_tokens": 1715}
|
from outputs_fromDrone import InitializeConnectDrone, ReturnDroneAngle
from input_forDrone import droneArm, droneTakeOff, condition_yaw, send_global_velocity, send_ned_velocity_heading, goto_position_target_local_ned
from drone_variousMovements import fixDistanceToBlade,moveToNewLocation, moveDroneInLocalCoord, moveToNewDistance
import time
import math
import numpy
from dronekit import VehicleMode
import serial
import json
safeDistance = 2000
distDroneToBlade = 2500
## Initialize connection to drone
vehicle = InitializeConnectDrone()
print("Arm the drone...")
## Arm drone
droneArm(vehicle)
print("Drone take off...")
## Drone take off - test take of to 10 meters
droneTakeOff(10, vehicle)
##moveToNewDistance(vehicle, 331,320)
##
##time.sleep(5)
##
##print("slept")
## move drone using velocity from given distance
def movementToDistancePosition(distX, distY):
stopper = False
## x is +forward, -backward, y is +right, -left
signX = numpy.sign(distX)
signY = numpy.sign(distY)
counterX = abs(distX)
counterY = abs(distY)
while stopper is False:
if counterX > 0:
xMove = 1
counterX-=0.5
else:
xMove = 0
if counterY > 0:
yMove = 1
counterY-=0.5
else:
yMove = 0
if yMove == 0 and xMove == 0:
stopper = True
moveDroneInLocalCoord(vehicle, [signX*xMove,signY*yMove,0], [0.5,0.5,0.5])
time.sleep(0.1)
## move drone using velocity and given position
def movementToPositionNEW(posX, posY):
stopper = False
## x is +forward, -backward, y is +right, -left
distX = posX - vehicle.location.local_frame.north
distY = posY - vehicle.location.local_frame.east
signX = numpy.sign(distX)
signY = numpy.sign(distY)
while True:
moveDroneInLocalCoord(vehicle, [distX,distY,0], [1,1,1])
if math.fabs(vehicle.location.local_frame.north) >= math.fabs(posX*0.95) and math.fabs(vehicle.location.local_frame.east) >= math.fabs(posY*0.95):
print("Reached")
break
## time.sleep(1)
def getDataFromServer(sock):
if sock.inWaiting():
receiveMsg = sock.readline()
try:
unpickledDataFull = json.loads(receiveMsg.strip().decode())
outputArr = numpy.array(unpickledDataFull)
sock.write('4Send'.encode())
# print(unpickledDataFull)
return outputArr
except json.JSONDecodeError:
sock.write('4Send'.encode())
print("error")
return -1
serialReceive = serial.Serial('COM15', 115200, timeout = 10)
print("start waiting for data...")
try:
while True:
sendData = getDataFromServer(serialReceive)
if sendData is not -1:
for i in range(0,len(sendData)):
print(sendData[i,:])
xPosNew = float(sendData[i,1])
yPosNew = float(sendData[i,0])
## while True:
## send_ned_velocity_heading(1/sendData[i,1], 1/sendData[i,0], 0, vehicle)
while True:
distToTravel = math.sqrt((xPosNew - float(vehicle.location.local_frame.north))**2 + (yPosNew - float(vehicle.location.local_frame.east))**2)
velX = (0.5/distToTravel)*(xPosNew - float(vehicle.location.local_frame.north))
velY = (0.5/distToTravel)*(yPosNew - float(vehicle.location.local_frame.east))
send_ned_velocity_heading(velX, velY, 0, vehicle)
print(vehicle.location.local_frame.north)
if math.fabs(float(vehicle.location.local_frame.north)) >= math.fabs(xPosNew*0.95) and math.fabs(float(vehicle.location.local_frame.east)) >= math.fabs(yPosNew*0.95):
print("Reached")
break
time.sleep(1)
## movementToDistancePosition(sendData[i,1], sendData[i,0])
## time.sleep(1)
except KeyboardInterrupt:
print("exit")
##movementToDistancePosition(-1, -5)
##
##time.sleep(1)
##
##movementToDistancePosition(1, -4)
##
##time.sleep(1)
##
##movementToDistancePosition(3, -2)
##
##time.sleep(1)
##
##movementToDistancePosition(5, 0)
##
##time.sleep(1)
##
##movementToDistancePosition(4, 2)
##
##time.sleep(1)
##Rotate the drone and add the angle to the heading of the drone
##print("rotate the drone ")
##print(math.degrees(vehicle.attitude.yaw))
##condition_yaw(45,vehicle,relative=True)
##
##send_global_velocity(0,0,0,vehicle)
##time.sleep(1)
##
##print(math.degrees(vehicle.attitude.yaw))
##time.sleep(1)
##
#### Move Drone in local Coord system
##for i in range(0,10):
## moveDroneInLocalCoord(vehicle, [1,0,0], [1,0,0])
##
## time.sleep(0.1)
##
##print(math.degrees(vehicle.attitude.yaw))
## Send command to drone
##print("Fly by velocity")
##for i in range(0,10):
## moveDroneInLocalCoord(vehicle, [1,1,0], [2,2,2])
#### fixDistanceToBlade(safeDistance, distDroneToBlade, vehicle)
#### distDroneToBlade -=10
## ##condition_yaw(meanAngle_compensated)
## time.sleep(1)
print("Setting LAND mode...")
vehicle.mode = VehicleMode("LAND")
print ("Close vehicle object")
vehicle.close()
##sitl.stop()
print("Completed")
|
{"hexsha": "3a37d84cc8ea60c76823e736849d79892dea8a2b", "size": 5651, "ext": "py", "lang": "Python", "max_stars_repo_path": "Main_velocities_testScript.py", "max_stars_repo_name": "IvanNik17/Dronekit-Python-Functions", "max_stars_repo_head_hexsha": "6451f6fbd63ba1186a161b40c8f4ee64d86ca386", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Main_velocities_testScript.py", "max_issues_repo_name": "IvanNik17/Dronekit-Python-Functions", "max_issues_repo_head_hexsha": "6451f6fbd63ba1186a161b40c8f4ee64d86ca386", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Main_velocities_testScript.py", "max_forks_repo_name": "IvanNik17/Dronekit-Python-Functions", "max_forks_repo_head_hexsha": "6451f6fbd63ba1186a161b40c8f4ee64d86ca386", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.2276785714, "max_line_length": 190, "alphanum_fraction": 0.595823748, "include": true, "reason": "import numpy", "num_tokens": 1413}
|
from __future__ import print_function
import ConfigParser
import collections
import h5py, sys
import numpy as np
import torch
from torch.autograd import Variable
import gzip
import os
import math
from torch.optim import Optimizer
try:
import cPickle as pickle
except:
import pickle
def mkdir(paths):
if not isinstance(paths, (list, tuple)):
paths = [paths]
for path in paths:
if not os.path.isdir(path):
os.makedirs(path)
def to_variable(var=(), cuda=True, volatile=False):
out = []
for v in var:
if isinstance(v, np.ndarray):
v = torch.from_numpy(v)
if not v.is_cuda and cuda:
v = v.cuda()
if not isinstance(v, Variable):
v = Variable(v, volatile=volatile)
out.append(v)
return out
class AdamW(Optimizer):
"""Implements Adam algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
super(AdamW, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = grad.new().resize_as_(grad).zero_()
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = grad.new().resize_as_(grad).zero_()
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# if group['weight_decay'] != 0:
# grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
if group['weight_decay'] != 0:
decay_step_size = -step_size * group['weight_decay']
p.data.add_(decay_step_size, p.data)
return loss
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
def humansize(nbytes):
i = 0
while nbytes >= 1024 and i < len(suffixes) - 1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes)
return '%s%s' % (f, suffixes[i])
# ----------------------------------------------------------------------------------------------------------------------
def cprint(color, text, **kwargs):
if color[0] == '*':
pre_code = '1;'
color = color[1:]
else:
pre_code = ''
code = {
'a': '30',
'r': '31',
'g': '32',
'y': '33',
'b': '34',
'p': '35',
'c': '36',
'w': '37'
}
print("\x1b[%s%sm%s\x1b[0m" % (pre_code, code[color], text), **kwargs)
sys.stdout.flush()
# ----------------------------------------------------------------------------------------------------------------------
def one_hot(y, nb_classes):
nb_samples = y.shape[0]
Y = np.zeros((nb_samples, nb_classes))
Y[np.arange(nb_samples), y] = 1
return Y
def torch_onehot(y, Nclass, cuda=True):
if cuda:
y_onehot = torch.FloatTensor(y.shape[0], Nclass).cuda()
else:
y_onehot = torch.FloatTensor(y.shape[0], Nclass)
# In your for loop
y_onehot.zero_()
y_onehot.scatter_(1, y.unsqueeze(1), 1)
return y_onehot
def save_obj(obj, filename):
with open(filename, 'w') as f:
pickle.dump(obj, f, protocol=2)
def load_obj(file):
if not isinstance(file, str):
return pickle.load(f)
root, ext = os.path.splitext(file)
if ext == '.gz':
with gzip.open(file, 'r') as f:
return pickle.load(f)
else:
with open(file, 'r') as f:
return pickle.load(f)
def windower(x, M, N):
# M avance entre vetanas
# N windowsize
T = x.shape[0]
m = np.arange(0, T-N+1, M) # comienzos de ventana
L = m.shape[0] # N ventanas
ind = np.expand_dims(np.arange(0, N), axis=1) * np.ones((1,L)) + np.ones((N,1)) * m
X = x[ind.astype(int)]
return X.transpose()
def shuffle_in_unison(a, b, c=None):
rng_state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(rng_state)
np.random.shuffle(b)
if c is not None:
np.random.set_state(rng_state)
np.random.shuffle(c)
def gen_crossval_split(data, labels, Npart, Nparts, shuffle=False):
ndat = int(data.shape[0] / Nparts)
assert ndat * Nparts == data.shape[0]
i = Npart
d0 = data[:ndat * (i)]
d1 = data[ndat * (i + 1):]
l0 = labels[:ndat * (i)]
l1 = labels[ndat * (i + 1):]
return np.concatenate((d0, d1), axis=0), np.concatenate((l0, l1)), data[ndat *(i):ndat * (i + 1)], labels[ndat * (
i):ndat * (i + 1)]
|
{"hexsha": "bcfd26d82fb78e65aa541f3b0d4037ace8f9c6c6", "size": 6571, "ext": "py", "lang": "Python", "max_stars_repo_path": "NN_solution/delta_spectrogram_simplenet/src/utils.py", "max_stars_repo_name": "JavierAntoran/moby_dick_whale_detection", "max_stars_repo_head_hexsha": "bbd78c78b53d0d095cd36f37c925618844c8cde9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-05-03T17:20:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-26T12:36:04.000Z", "max_issues_repo_path": "NN_solution/delta_spectrogram_simplenet/src/utils.py", "max_issues_repo_name": "JavierAntoran/moby_dick_whale_detection", "max_issues_repo_head_hexsha": "bbd78c78b53d0d095cd36f37c925618844c8cde9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "NN_solution/delta_spectrogram_simplenet/src/utils.py", "max_forks_repo_name": "JavierAntoran/moby_dick_whale_detection", "max_forks_repo_head_hexsha": "bbd78c78b53d0d095cd36f37c925618844c8cde9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-29T06:07:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-29T06:07:23.000Z", "avg_line_length": 29.8681818182, "max_line_length": 120, "alphanum_fraction": 0.5376654999, "include": true, "reason": "import numpy", "num_tokens": 1690}
|
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
#include <boost/locale/encoding.hpp>
#include <boost/locale/util.hpp>
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
#include "pkzip_io.h"
using namespace std;
using boost::locale::conv::from_utf;
using boost::locale::conv::to_utf;
using boost::locale::conv::utf_to_utf;
using boost::locale::util::get_system_locale;
static inline auto read(istream &is, void *data, size_t size)
{
return is.read(static_cast<char *>(data), size)
&& is.gcount() == static_cast<streamsize>(size);
}
template <typename value_type>
static inline auto read(istream &is, value_type &value)
{
return read(is, &value, sizeof value);
}
static inline auto write(ostream &os, const void *data, size_t size)
{
os.write(static_cast<const char *>(data), size);
}
template <typename value_type>
static inline auto write(ostream &os, const value_type &value)
{
write(os, &value, sizeof value);
}
string zz::pkzip::charset = "cp932"; // TODO: resolve from OS environment
istream & zz::pkzip::operator >> (istream &is, local_file_header &header)
{
if (!read(is, header.signature) || !header ||
!read(is, header.version_needed_to_extract) ||
!read(is, header.general_purpose_bit_flag) ||
!read(is, header.compression_method) ||
!read(is, header.last_mod_file_time) ||
!read(is, header.last_mod_file_date) ||
!read(is, header.crc32) ||
!read(is, header.compressed_size) ||
!read(is, header.uncompressed_size) ||
!read(is, header.file_name_length) || header.file_name_length == 0 ||
!read(is, header.extra_field_length))
return is;
string file_name(header.file_name_length, '\0');
if (!read(is, &file_name[0], header.file_name_length))
return is;
const auto use_utf8 = header.general_purpose_bit_flag & general_purpose_bit_flags::use_utf8;
header.file_name = use_utf8 ? utf_to_utf<char_type>(file_name) : to_utf<char_type>(file_name, charset);
if (header.extra_field_length) {
header.extra_field.resize(header.extra_field_length);
if (!read(is, &header.extra_field[0], header.extra_field_length))
return is;
}
return is;
}
ostream & zz::pkzip::operator << (ostream &os, const local_file_header &header)
{
const auto use_utf8 = header.general_purpose_bit_flag & general_purpose_bit_flags::use_utf8;
const auto file_name = use_utf8 ? utf_to_utf<char>(header.file_name) : from_utf(header.file_name, charset);
if (file_name.size() > numeric_limits<decltype(header.file_name_length)>::max())
throw runtime_error("too long file name: " + file_name);
if (header.extra_field.size() > numeric_limits<decltype(header.extra_field_length)>::max())
throw runtime_error("too long extra field: " + file_name);
write(os, header.signature);
write(os, header.version_needed_to_extract);
write(os, header.general_purpose_bit_flag);
write(os, header.compression_method);
write(os, header.last_mod_file_time);
write(os, header.last_mod_file_date);
write(os, header.crc32);
write(os, header.compressed_size);
write(os, header.uncompressed_size);
write(os, static_cast<decltype(header.file_name_length)>(file_name.size()));
write(os, static_cast<decltype(header.extra_field_length)>(header.extra_field.size()));
write(os, file_name.data(), file_name.size());
write(os, header.extra_field.data(), header.extra_field.size());
return os;
}
istream & zz::pkzip::operator >> (istream &is, central_file_header &header)
{
if (!read(is, header.signature) || !header ||
!read(is, header.version_made_by) ||
!read(is, header.version_needed_to_extract) ||
!read(is, header.general_purpose_bit_flag) ||
!read(is, header.compression_method) ||
!read(is, header.last_mod_file_time) ||
!read(is, header.last_mod_file_date) ||
!read(is, header.crc32) ||
!read(is, header.compressed_size) ||
!read(is, header.uncompressed_size) ||
!read(is, header.file_name_length) || header.file_name_length == 0 ||
!read(is, header.extra_field_length) ||
!read(is, header.file_comment_length) ||
!read(is, header.disk_number_start) ||
!read(is, header.internal_file_attributes) ||
!read(is, header.external_file_attributes) ||
!read(is, header.relative_offset_of_local_header))
return is;
string file_name(header.file_name_length, '\0');
if (!is.read(&file_name[0], header.file_name_length))
return is;
const auto use_utf8 = header.general_purpose_bit_flag & general_purpose_bit_flags::use_utf8;
header.file_name = use_utf8 ? utf_to_utf<char_type>(file_name) : to_utf<char_type>(file_name, charset);
if (header.extra_field_length) {
header.extra_field.resize(header.extra_field_length);
if (!read(is, &header.extra_field[0], header.extra_field_length))
return is;
}
if (header.file_comment_length) {
string file_comment(header.file_comment_length, '\0');
if (!is.read(&file_comment[0], header.file_comment_length))
return is;
header.file_comment = use_utf8 ? utf_to_utf<char_type>(file_comment) : to_utf<char_type>(file_comment, charset);
}
return is;
}
ostream & zz::pkzip::operator << (ostream &os, const central_file_header &header)
{
const auto use_utf8 = header.general_purpose_bit_flag & general_purpose_bit_flags::use_utf8;
const auto file_name = use_utf8 ? utf_to_utf<char>(header.file_name) : from_utf(header.file_name, charset);
if (file_name.size() > numeric_limits<decltype(header.file_name_length)>::max())
throw runtime_error("too long file name: " + file_name);
if (header.extra_field.size() > numeric_limits<decltype(header.extra_field_length)>::max())
throw runtime_error("too long extra field: " + file_name);
const auto file_comment = use_utf8 ? utf_to_utf<char>(header.file_comment) : from_utf(header.file_comment, charset);
if (file_comment.size() > numeric_limits<decltype(header.file_comment_length)>::max())
throw runtime_error("too long file comment: " + file_comment);
write(os, header.signature);
write(os, header.version_made_by);
write(os, header.version_needed_to_extract);
write(os, header.general_purpose_bit_flag);
write(os, header.compression_method);
write(os, header.last_mod_file_time);
write(os, header.last_mod_file_date);
write(os, header.crc32);
write(os, header.compressed_size);
write(os, header.uncompressed_size);
write(os, static_cast<decltype(header.file_name_length)>(file_name.size()));
write(os, static_cast<decltype(header.extra_field_length)>(header.extra_field.size()));
write(os, static_cast<decltype(header.file_comment_length)>(file_comment.size()));
write(os, header.disk_number_start);
write(os, header.internal_file_attributes);
write(os, header.external_file_attributes);
write(os, header.relative_offset_of_local_header);
write(os, file_name.data(), file_name.size());
write(os, header.extra_field.data(), header.extra_field.size());
write(os, file_comment.data(), file_comment.size());
return os;
}
istream & zz::pkzip::operator >> (istream &is, end_of_central_directory_record &record)
{
if (!read(is, record.signature) || !record ||
!read(is, record.number_of_this_disk) ||
!read(is, record.number_of_the_disk_with_the_start_of_the_central_directory) ||
!read(is, record.total_number_of_entries_in_the_central_directory_on_this_disk) ||
!read(is, record.total_number_of_entries_in_the_central_directory) ||
!read(is, record.size_of_the_central_directory) ||
!read(is, record.offset_of_start_of_central_directory_with_respect_to_the_starting_disk_number) ||
!read(is, record.zip_file_comment_length))
return is;
if (record.zip_file_comment_length) {
record.zip_file_comment.resize(record.zip_file_comment_length);
if (!is.read(&record.zip_file_comment[0], record.zip_file_comment_length))
return is;
}
return is;
}
ostream & zz::pkzip::operator << (ostream &os, const end_of_central_directory_record &record)
{
if (record.zip_file_comment.size() > numeric_limits<decltype(record.zip_file_comment_length)>::max())
throw runtime_error("too long zip file comment: " + record.zip_file_comment);
write(os, record.signature);
write(os, record.number_of_this_disk);
write(os, record.number_of_the_disk_with_the_start_of_the_central_directory);
write(os, record.total_number_of_entries_in_the_central_directory_on_this_disk);
write(os, record.total_number_of_entries_in_the_central_directory);
write(os, record.size_of_the_central_directory);
write(os, record.offset_of_start_of_central_directory_with_respect_to_the_starting_disk_number);
write(os, static_cast<decltype(record.zip_file_comment_length)>(record.zip_file_comment.size()));
write(os, record.zip_file_comment.data(), record.zip_file_comment.size());
return os;
}
|
{"hexsha": "c87b1407ed120c781a4d387df51764ca6760e3d1", "size": 9231, "ext": "cc", "lang": "C++", "max_stars_repo_path": "src/pkzip_io.cc", "max_stars_repo_name": "andantissimo/0z", "max_stars_repo_head_hexsha": "b427da84a34df839c9011dea815d6180735f5a9e", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2020-07-28T08:38:40.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-28T08:38:40.000Z", "max_issues_repo_path": "src/pkzip_io.cc", "max_issues_repo_name": "andantissimo/0z", "max_issues_repo_head_hexsha": "b427da84a34df839c9011dea815d6180735f5a9e", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2019-10-15T05:31:15.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-15T16:45:21.000Z", "max_forks_repo_path": "src/pkzip_io.cc", "max_forks_repo_name": "andantissimo/0z", "max_forks_repo_head_hexsha": "b427da84a34df839c9011dea815d6180735f5a9e", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.9571428571, "max_line_length": 120, "alphanum_fraction": 0.7131405048, "num_tokens": 2136}
|
[STATEMENT]
lemma WHILEIT_rule:
assumes WF: "wf R"
assumes I0: "I s"
assumes IS: "\<And>s. \<lbrakk> I s; b s \<rbrakk> \<Longrightarrow> f s \<le> SPEC (\<lambda>s'. I s' \<and> (s',s)\<in>R)"
assumes PHI: "\<And>s. \<lbrakk> I s; \<not> b s \<rbrakk> \<Longrightarrow> \<Phi> s"
shows "WHILEIT I b f s \<le> SPEC \<Phi>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. WHILEIT I b f s \<le> SPEC \<Phi>
[PROOF STEP]
unfolding WHILEIT_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. REC\<^sub>T (WHILEI_body bind return I b f) s \<le> SPEC \<Phi>
[PROOF STEP]
apply (rule RECT_rule[OF WHILEI_body_trimono WF, where pre=I,OF I0])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>fa x. \<lbrakk>\<And>x'. \<lbrakk>I x'; (x', x) \<in> R\<rbrakk> \<Longrightarrow> fa x' \<le> SPEC \<Phi>; I x; REC\<^sub>T (WHILEI_body bind return I b f) = fa\<rbrakk> \<Longrightarrow> WHILEI_body bind return I b f fa x \<le> SPEC \<Phi>
[PROOF STEP]
unfolding WHILEI_body_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>fa x. \<lbrakk>\<And>x'. \<lbrakk>I x'; (x', x) \<in> R\<rbrakk> \<Longrightarrow> fa x' \<le> SPEC \<Phi>; I x; REC\<^sub>T (\<lambda>W s. if I s then if b s then bind (f s) W else return s else top) = fa\<rbrakk> \<Longrightarrow> (if I x then if b x then bind (f x) fa else return x else top) \<le> SPEC \<Phi>
[PROOF STEP]
apply (split if_split)+
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>fa x. \<lbrakk>\<And>x'. \<lbrakk>I x'; (x', x) \<in> R\<rbrakk> \<Longrightarrow> fa x' \<le> SPEC \<Phi>; I x; REC\<^sub>T (\<lambda>W s. if I s then if b s then bind (f s) W else return s else top) = fa\<rbrakk> \<Longrightarrow> (b x \<longrightarrow> (I x \<longrightarrow> bind (f x) fa \<le> SPEC \<Phi>) \<and> (\<not> I x \<longrightarrow> top \<le> SPEC \<Phi>)) \<and> (\<not> b x \<longrightarrow> (I x \<longrightarrow> return x \<le> SPEC \<Phi>) \<and> (\<not> I x \<longrightarrow> top \<le> SPEC \<Phi>))
[PROOF STEP]
apply (intro impI conjI)
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<And>fa x. \<lbrakk>\<And>x'. \<lbrakk>I x'; (x', x) \<in> R\<rbrakk> \<Longrightarrow> fa x' \<le> SPEC \<Phi>; I x; REC\<^sub>T (\<lambda>W s. if I s then if b s then bind (f s) W else return s else top) = fa; b x; I x\<rbrakk> \<Longrightarrow> bind (f x) fa \<le> SPEC \<Phi>
2. \<And>fa x. \<lbrakk>\<And>x'. \<lbrakk>I x'; (x', x) \<in> R\<rbrakk> \<Longrightarrow> fa x' \<le> SPEC \<Phi>; I x; REC\<^sub>T (\<lambda>W s. if I s then if b s then bind (f s) W else return s else top) = fa; b x; \<not> I x\<rbrakk> \<Longrightarrow> top \<le> SPEC \<Phi>
3. \<And>fa x. \<lbrakk>\<And>x'. \<lbrakk>I x'; (x', x) \<in> R\<rbrakk> \<Longrightarrow> fa x' \<le> SPEC \<Phi>; I x; REC\<^sub>T (\<lambda>W s. if I s then if b s then bind (f s) W else return s else top) = fa; \<not> b x; I x\<rbrakk> \<Longrightarrow> return x \<le> SPEC \<Phi>
4. \<And>fa x. \<lbrakk>\<And>x'. \<lbrakk>I x'; (x', x) \<in> R\<rbrakk> \<Longrightarrow> fa x' \<le> SPEC \<Phi>; I x; REC\<^sub>T (\<lambda>W s. if I s then if b s then bind (f s) W else return s else top) = fa; \<not> b x; \<not> I x\<rbrakk> \<Longrightarrow> top \<le> SPEC \<Phi>
[PROOF STEP]
apply simp_all
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>fa x. \<lbrakk>\<And>x'. \<lbrakk>I x'; (x', x) \<in> R\<rbrakk> \<Longrightarrow> fa x' \<le> SPEC \<Phi>; REC\<^sub>T (\<lambda>W s. if I s then if b s then bind (f s) W else return s else top) = fa; b x; I x\<rbrakk> \<Longrightarrow> bind (f x) fa \<le> SPEC \<Phi>
2. \<And>fa x. \<lbrakk>\<And>x'. \<lbrakk>I x'; (x', x) \<in> R\<rbrakk> \<Longrightarrow> fa x' \<le> SPEC \<Phi>; REC\<^sub>T (\<lambda>W s. if I s then if b s then bind (f s) W else return s else top) = fa; \<not> b x; I x\<rbrakk> \<Longrightarrow> return x \<le> SPEC \<Phi>
[PROOF STEP]
apply (rule ibind_rule)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>fa x. \<lbrakk>\<And>x'. \<lbrakk>I x'; (x', x) \<in> R\<rbrakk> \<Longrightarrow> fa x' \<le> SPEC \<Phi>; REC\<^sub>T (\<lambda>W s. if I s then if b s then bind (f s) W else return s else top) = fa; b x; I x\<rbrakk> \<Longrightarrow> f x \<le> SPEC (\<lambda>x. fa x \<le> SPEC \<Phi>)
2. \<And>fa x. \<lbrakk>\<And>x'. \<lbrakk>I x'; (x', x) \<in> R\<rbrakk> \<Longrightarrow> fa x' \<le> SPEC \<Phi>; REC\<^sub>T (\<lambda>W s. if I s then if b s then bind (f s) W else return s else top) = fa; \<not> b x; I x\<rbrakk> \<Longrightarrow> return x \<le> SPEC \<Phi>
[PROOF STEP]
apply (rule order_trans[OF IS], assumption+)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>fa x. \<lbrakk>\<And>x'. \<lbrakk>I x'; (x', x) \<in> R\<rbrakk> \<Longrightarrow> fa x' \<le> SPEC \<Phi>; REC\<^sub>T (\<lambda>W s. if I s then if b s then bind (f s) W else return s else top) = fa; b x; I x\<rbrakk> \<Longrightarrow> SPEC (\<lambda>s'. I s' \<and> (s', x) \<in> R) \<le> SPEC (\<lambda>x. fa x \<le> SPEC \<Phi>)
2. \<And>fa x. \<lbrakk>\<And>x'. \<lbrakk>I x'; (x', x) \<in> R\<rbrakk> \<Longrightarrow> fa x' \<le> SPEC \<Phi>; REC\<^sub>T (\<lambda>W s. if I s then if b s then bind (f s) W else return s else top) = fa; \<not> b x; I x\<rbrakk> \<Longrightarrow> return x \<le> SPEC \<Phi>
[PROOF STEP]
apply (rule iSPEC_rule)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>fa x xa. \<lbrakk>\<And>x'. \<lbrakk>I x'; (x', x) \<in> R\<rbrakk> \<Longrightarrow> fa x' \<le> SPEC \<Phi>; REC\<^sub>T (\<lambda>W s. if I s then if b s then bind (f s) W else return s else top) = fa; b x; I x; I xa \<and> (xa, x) \<in> R\<rbrakk> \<Longrightarrow> fa xa \<le> SPEC \<Phi>
2. \<And>fa x. \<lbrakk>\<And>x'. \<lbrakk>I x'; (x', x) \<in> R\<rbrakk> \<Longrightarrow> fa x' \<le> SPEC \<Phi>; REC\<^sub>T (\<lambda>W s. if I s then if b s then bind (f s) W else return s else top) = fa; \<not> b x; I x\<rbrakk> \<Longrightarrow> return x \<le> SPEC \<Phi>
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>fa x. \<lbrakk>\<And>x'. \<lbrakk>I x'; (x', x) \<in> R\<rbrakk> \<Longrightarrow> fa x' \<le> SPEC \<Phi>; REC\<^sub>T (\<lambda>W s. if I s then if b s then bind (f s) W else return s else top) = fa; \<not> b x; I x\<rbrakk> \<Longrightarrow> return x \<le> SPEC \<Phi>
[PROOF STEP]
apply (rule ireturn_rule)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>fa x. \<lbrakk>\<And>x'. \<lbrakk>I x'; (x', x) \<in> R\<rbrakk> \<Longrightarrow> fa x' \<le> SPEC \<Phi>; REC\<^sub>T (\<lambda>W s. if I s then if b s then bind (f s) W else return s else top) = fa; \<not> b x; I x\<rbrakk> \<Longrightarrow> \<Phi> x
[PROOF STEP]
apply (simp add: PHI)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 2864, "file": "Refine_Monadic_Generic_RefineG_While", "length": 13}
|
"""Placeholder."""
import histomicstk.utils as utils
from . import _linalg as linalg
from .complement_stain_matrix import complement_stain_matrix
import numpy
def separate_stains_macenko_pca(
im_sda, minimum_magnitude=16, min_angle_percentile=0.01,
max_angle_percentile=0.99, mask_out=None):
"""Compute the stain matrix for color deconvolution with the Macenko method.
For a two-stain image or matrix in SDA space, this method works by
computing a best-fit plane with PCA, wherein it selects the stain
vectors as percentiles in the "angle distribution" in that plane.
Parameters
----------
im_sda : array_like
Image (MxNx3) or matrix (3xN) in SDA space for which to compute the
stain matrix.
minimum_magnitude : float
The magnitude below which vectors will be excluded from the computation
of the angle distribution.
The default is based on the paper value of 0.15, adjusted for our
method of calculating SDA, thus 0.15 * 255 * log(10)/log(255)
min_angle_percentile : float
The smaller percentile of one of the vectors to pick from the angle
distribution
max_angle_percentile : float
The larger percentile of one of the vectors to pick from the angle
distribution
mask_out : array_like
if not None, should be (m, n) boolean numpy array.
This parameter ensures exclusion of non-masked areas from calculations.
This is relevant because elements like blood, sharpie marker,
white space, etc may throw off the normalization somewhat.
Returns
-------
w : array_like
A 3x3 matrix of stain column vectors
Note
----
All input pixels not otherwise excluded are used in the computation of the
principal plane and the angle distribution.
See Also
--------
histomicstk.preprocessing.color_deconvolution.color_deconvolution
histomicstk.preprocessing.color_deconvolution.separate_stains_xu_snmf
References
----------
.. [#] Van Eycke, Y. R., Allard, J., Salmon, I., Debeir, O., &
Decaestecker, C. (2017). Image processing in digital pathology: an
opportunity to solve inter-batch variability of immunohistochemical
staining. Scientific Reports, 7.
.. [#] Macenko, M., Niethammer, M., Marron, J. S., Borland, D.,
Woosley, J. T., Guan, X., ... & Thomas, N. E. (2009, June).
A method for normalizing histology slides for quantitative analysis.
In Biomedical Imaging: From Nano to Macro, 2009. ISBI'09.
IEEE International Symposium on (pp. 1107-1110). IEEE.
"""
# Image matrix
m = utils.convert_image_to_matrix(im_sda)
# mask out irrelevant values
if mask_out is not None:
keep_mask = numpy.equal(mask_out[..., None], False)
keep_mask = numpy.tile(keep_mask, (1, 1, 3))
keep_mask = utils.convert_image_to_matrix(keep_mask)
m = m[:, keep_mask.all(axis=0)]
# get rid of NANs and infinities
m = utils.exclude_nonfinite(m)
# Principal components matrix
pcs = linalg.get_principal_components(m)
# Input pixels projected into the PCA plane
proj = pcs.T[:-1].dot(m)
# Pixels above the magnitude threshold
filt = proj[:, linalg.magnitude(proj) > minimum_magnitude]
# The "angles"
angles = _get_angles(filt)
# The stain vectors
def get_percentile_vector(p):
return pcs[:, :-1].dot(filt[:, argpercentile(angles, p)])
min_v = get_percentile_vector(min_angle_percentile)
max_v = get_percentile_vector(max_angle_percentile)
# The stain matrix
w = complement_stain_matrix(linalg.normalize(
numpy.array([min_v, max_v]).T))
return w
def _get_angles(m):
"""Take a 2xN matrix of vectors and return a length-N array of an.
... angle-like quantity.
Since this is an internal function, we assume that the values
result from PCA, and so the second element of the vectors captures
secondary variation -- and thus is the one that takes on both
positive and negative values.
"""
m = linalg.normalize(m)
# "Angle" towards +x from the +y axis
return (1 - m[1]) * numpy.sign(m[0])
def argpercentile(arr, p):
"""Calculate index in arr of element nearest the pth percentile."""
# Index corresponding to percentile
i = int(p * arr.size + 0.5)
return numpy.argpartition(arr, i)[i]
|
{"hexsha": "d99f89486d90c76999e9d0778342c412f4b62db2", "size": 4467, "ext": "py", "lang": "Python", "max_stars_repo_path": "histomicstk/preprocessing/color_deconvolution/separate_stains_macenko_pca.py", "max_stars_repo_name": "basanto/HistomicsTK", "max_stars_repo_head_hexsha": "f3dbd93a7f31c7825574f9ccf0b86e09e9fee360", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 249, "max_stars_repo_stars_event_min_datetime": "2016-04-04T12:00:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T15:46:50.000Z", "max_issues_repo_path": "histomicstk/preprocessing/color_deconvolution/separate_stains_macenko_pca.py", "max_issues_repo_name": "basanto/HistomicsTK", "max_issues_repo_head_hexsha": "f3dbd93a7f31c7825574f9ccf0b86e09e9fee360", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 616, "max_issues_repo_issues_event_min_datetime": "2016-01-13T21:06:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-19T00:06:28.000Z", "max_forks_repo_path": "histomicstk/preprocessing/color_deconvolution/separate_stains_macenko_pca.py", "max_forks_repo_name": "basanto/HistomicsTK", "max_forks_repo_head_hexsha": "f3dbd93a7f31c7825574f9ccf0b86e09e9fee360", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 109, "max_forks_repo_forks_event_min_datetime": "2016-01-21T16:14:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-10T00:59:06.000Z", "avg_line_length": 35.736, "max_line_length": 80, "alphanum_fraction": 0.6756212223, "include": true, "reason": "import numpy", "num_tokens": 1099}
|
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
@pytest.fixture
def df_checks():
"""fixture dataframe"""
return pd.DataFrame(
{
"famid": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"birth": [1, 2, 3, 1, 2, 3, 1, 2, 3],
"ht1": [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
"ht2": [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9],
}
)
@pytest.fixture
def df_multi():
"""MultiIndex dataframe fixture."""
return pd.DataFrame(
{
("name", "a"): {0: "Wilbur", 1: "Petunia", 2: "Gregory"},
("names", "aa"): {0: 67, 1: 80, 2: 64},
("more_names", "aaa"): {0: 56, 1: 90, 2: 50},
}
)
def test_column_level_wrong_type(df_multi):
"""Raise TypeError if wrong type is provided for column_level."""
with pytest.raises(TypeError):
df_multi.pivot_longer(index="name", column_level={0})
@pytest.mark.xfail(reason="checking is done within _select_columns")
def test_type_index(df_checks):
"""Raise TypeError if wrong type is provided for the index."""
with pytest.raises(TypeError):
df_checks.pivot_longer(index=2007)
@pytest.mark.xfail(reason="checking is done within _select_columns")
def test_type_column_names(df_checks):
"""Raise TypeError if wrong type is provided for column_names."""
with pytest.raises(TypeError):
df_checks.pivot_longer(column_names=2007)
def test_type_names_to(df_checks):
"""Raise TypeError if wrong type is provided for names_to."""
with pytest.raises(TypeError):
df_checks.pivot_longer(names_to={2007})
def test_subtype_names_to(df_checks):
"""
Raise TypeError if names_to is a sequence
and the wrong type is provided for entries
in names_to.
"""
with pytest.raises(TypeError, match="1 in names_to.+"):
df_checks.pivot_longer(names_to=[1])
def test_duplicate_names_to(df_checks):
"""Raise error if names_to contains duplicates."""
with pytest.raises(ValueError, match="y is duplicated in names_to."):
df_checks.pivot_longer(names_to=["y", "y"], names_pattern="(.+)(.)")
def test_both_names_sep_and_pattern(df_checks):
"""
Raise ValueError if both names_sep
and names_pattern is provided.
"""
with pytest.raises(
ValueError,
match="Only one of names_pattern or names_sep should be provided.",
):
df_checks.pivot_longer(
names_to=["rar", "bar"], names_sep="-", names_pattern="(.+)(.)"
)
def test_name_pattern_wrong_type(df_checks):
"""Raise TypeError if the wrong type provided for names_pattern."""
with pytest.raises(TypeError, match="names_pattern should be one of.+"):
df_checks.pivot_longer(names_to=["rar", "bar"], names_pattern=2007)
def test_name_pattern_no_names_to(df_checks):
"""Raise ValueError if names_pattern and names_to is None."""
with pytest.raises(ValueError):
df_checks.pivot_longer(names_to=None, names_pattern="(.+)(.)")
def test_name_pattern_groups_len(df_checks):
"""
Raise ValueError if names_pattern
and the number of groups
differs from the length of names_to.
"""
with pytest.raises(
ValueError,
match="The length of names_to does not match "
"the number of groups in names_pattern.+",
):
df_checks.pivot_longer(names_to=".value", names_pattern="(.+)(.)")
def test_names_pattern_wrong_subtype(df_checks):
"""
Raise TypeError if names_pattern is a list/tuple
and wrong subtype is supplied.
"""
with pytest.raises(TypeError, match="1 in names_pattern.+"):
df_checks.pivot_longer(
names_to=["ht", "num"], names_pattern=[1, "\\d"]
)
def test_names_pattern_names_to_unequal_length(df_checks):
"""
Raise ValueError if names_pattern is a list/tuple
and wrong number of items in names_to.
"""
with pytest.raises(
ValueError,
match="The length of names_to does not match "
"the number of regexes in names_pattern.+",
):
df_checks.pivot_longer(
names_to=["variable"], names_pattern=["^ht", ".+i.+"]
)
def test_names_pattern_names_to_dot_value(df_checks):
"""
Raise Error if names_pattern is a list/tuple and
.value in names_to.
"""
with pytest.raises(
ValueError,
match=".value is not accepted in names_to "
"if names_pattern is a list/tuple.",
):
df_checks.pivot_longer(
names_to=["variable", ".value"], names_pattern=["^ht", ".+i.+"]
)
def test_name_sep_wrong_type(df_checks):
"""Raise TypeError if the wrong type is provided for names_sep."""
with pytest.raises(TypeError, match="names_sep should be one of.+"):
df_checks.pivot_longer(names_to=[".value", "num"], names_sep=["_"])
def test_name_sep_no_names_to(df_checks):
"""Raise ValueError if names_sep and names_to is None."""
with pytest.raises(ValueError):
df_checks.pivot_longer(names_to=None, names_sep="_")
def test_values_to_wrong_type(df_checks):
"""Raise TypeError if the wrong type is provided for `values_to`."""
with pytest.raises(TypeError, match="values_to should be one of.+"):
df_checks.pivot_longer(values_to={"salvo"})
def test_values_to_wrong_type_names_pattern(df_checks):
"""
Raise TypeError if `values_to` is a list,
and names_pattern is not.
"""
with pytest.raises(
TypeError,
match="values_to can be a list/tuple only "
"if names_pattern is a list/tuple.",
):
df_checks.pivot_longer(values_to=["salvo"])
def test_values_to_names_pattern_unequal_length(df_checks):
"""
Raise ValueError if `values_to` is a list,
and the length of names_pattern
does not match the length of values_to.
"""
with pytest.raises(
ValueError,
match="The length of values_to does not match "
"the number of regexes in names_pattern.+",
):
df_checks.pivot_longer(
values_to=["salvo"],
names_pattern=["ht", r"\d"],
names_to=["foo", "bar"],
)
def test_values_to_names_seq_names_to(df_checks):
"""
Raise ValueError if `values_to` is a list,
and intersects with names_to.
"""
with pytest.raises(
ValueError, match="salvo in values_to already exists in names_to."
):
df_checks.pivot_longer(
values_to=["salvo"], names_pattern=["ht"], names_to="salvo"
)
def test_sub_values_to(df_checks):
"""Raise error if values_to is a sequence, and contains non strings."""
with pytest.raises(TypeError, match="1 in values_to.+"):
df_checks.pivot_longer(
names_to=["x", "y"],
names_pattern=[r"ht", r"\d"],
values_to=[1, "salvo"],
)
def test_duplicate_values_to(df_checks):
"""Raise error if values_to is a sequence, and contains duplicates."""
with pytest.raises(ValueError, match="salvo is duplicated in values_to."):
df_checks.pivot_longer(
names_to=["x", "y"],
names_pattern=[r"ht", r"\d"],
values_to=["salvo", "salvo"],
)
def test_values_to_exists_in_columns(df_checks):
"""
Raise ValueError if values_to already
exists in the dataframe's columns.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(index="birth", values_to="birth")
def test_values_to_exists_in_names_to(df_checks):
"""
Raise ValueError if values_to is in names_to.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(values_to="num", names_to="num")
def test_column_multiindex_names_sep(df_multi):
"""
Raise ValueError if the dataframe's column is a MultiIndex,
and names_sep is present.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(
column_names=[("names", "aa")],
names_sep="_",
names_to=["names", "others"],
)
def test_column_multiindex_names_pattern(df_multi):
"""
Raise ValueError if the dataframe's column is a MultiIndex,
and names_pattern is present.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(
index=[("name", "a")],
names_pattern=r"(.+)(.+)",
names_to=["names", "others"],
)
def test_index_tuple_multiindex(df_multi):
"""
Raise ValueError if index is a tuple,
instead of a list of tuples,
and the dataframe's column is a MultiIndex.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(index=("name", "a"))
def test_column_names_tuple_multiindex(df_multi):
"""
Raise ValueError if column_names is a tuple,
instead of a list of tuples,
and the dataframe's column is a MultiIndex.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(column_names=("names", "aa"))
def test_sort_by_appearance(df_checks):
"""Raise error if sort_by_appearance is not boolean."""
with pytest.raises(TypeError):
df_checks.pivot_longer(
names_to=[".value", "value"],
names_sep="_",
sort_by_appearance="TRUE",
)
def test_ignore_index(df_checks):
"""Raise error if ignore_index is not boolean."""
with pytest.raises(TypeError):
df_checks.pivot_longer(
names_to=[".value", "value"], names_sep="_", ignore_index="TRUE"
)
def test_names_to_index(df_checks):
"""
Raise ValueError if there is no names_sep/names_pattern,
.value not in names_to and names_to intersects with index.
"""
with pytest.raises(
ValueError,
match=r".+in names_to already exist as column labels.+",
):
df_checks.pivot_longer(
names_to="famid",
index="famid",
)
def test_names_sep_pattern_names_to_index(df_checks):
"""
Raise ValueError if names_sep/names_pattern,
.value not in names_to and names_to intersects with index.
"""
with pytest.raises(
ValueError,
match=r".+in names_to already exist as column labels.+",
):
df_checks.pivot_longer(
names_to=["dim", "famid"],
names_sep="_",
index="famid",
)
def test_dot_value_names_to_columns_intersect(df_checks):
"""
Raise ValueError if names_sep/names_pattern,
.value in names_to,
and names_to intersects with the new columns
"""
with pytest.raises(
ValueError,
match=r".+in names_to already exist in the new dataframe\'s columns.+",
):
df_checks.pivot_longer(
index="famid", names_to=(".value", "ht"), names_pattern="(.+)(.)"
)
def test_values_to_seq_index_intersect(df_checks):
"""
Raise ValueError if values_to is a sequence,
and intersects with the index
"""
match = ".+values_to already exist as column labels assigned "
match = match + "to the dataframe's index parameter.+"
with pytest.raises(ValueError, match=rf"{match}"):
df_checks.pivot_longer(
index="famid",
names_to=("value", "ht"),
names_pattern=["ht", r"\d"],
values_to=("famid", "foo"),
)
def test_dot_value_names_to_index_intersect(df_checks):
"""
Raise ValueError if names_sep/names_pattern,
.value in names_to,
and names_to intersects with the index
"""
match = ".+already exist as column labels assigned "
match = match + "to the dataframe's index parameter.+"
with pytest.raises(
ValueError,
match=rf"{match}",
):
df_checks.rename(columns={"famid": "ht"}).pivot_longer(
index="ht", names_to=(".value", "num"), names_pattern="(.+)(.)"
)
def test_names_pattern_list_empty_any(df_checks):
"""
Raise ValueError if names_pattern is a list,
and not all matches are returned.
"""
with pytest.raises(
ValueError, match="No match was returned for the regex.+"
):
df_checks.pivot_longer(
index=["famid", "birth"],
names_to=["ht"],
names_pattern=["rar"],
)
def test_names_pattern_no_match(df_checks):
"""Raise error if names_pattern is a regex and returns no matches."""
with pytest.raises(
ValueError, match="Column labels .+ could not be matched with any .+"
):
df_checks.pivot_longer(
index="famid",
names_to=[".value", "value"],
names_pattern=r"(rar)(.)",
)
def test_names_pattern_incomplete_match(df_checks):
"""
Raise error if names_pattern is a regex
and returns incomplete matches.
"""
with pytest.raises(
ValueError, match="Column labels .+ could not be matched with any .+"
):
df_checks.pivot_longer(
index="famid",
names_to=[".value", "value"],
names_pattern=r"(ht)(.)",
)
def test_names_sep_len(df_checks):
"""
Raise error if names_sep,
and the number of matches returned
is not equal to the length of names_to.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(names_to=".value", names_sep="(\\d)")
def test_pivot_index_only(df_checks):
"""Test output if only index is passed."""
result = df_checks.pivot_longer(
index=["famid", "birth"],
names_to="dim",
values_to="num",
)
actual = df_checks.melt(
["famid", "birth"], var_name="dim", value_name="num"
)
assert_frame_equal(result, actual)
def test_pivot_column_only(df_checks):
"""Test output if only column_names is passed."""
result = df_checks.pivot_longer(
column_names=["ht1", "ht2"],
names_to="dim",
values_to="num",
ignore_index=False,
)
actual = df_checks.melt(
["famid", "birth"],
var_name="dim",
value_name="num",
ignore_index=False,
)
assert_frame_equal(result, actual)
def test_pivot_sort_by_appearance(df_checks):
"""Test output if sort_by_appearance is True."""
result = df_checks.pivot_longer(
column_names="ht*",
names_to="dim",
values_to="num",
sort_by_appearance=True,
)
actual = (
df_checks.melt(
["famid", "birth"],
var_name="dim",
value_name="num",
ignore_index=False,
)
.sort_index()
.reset_index(drop=True)
)
assert_frame_equal(result, actual)
def test_names_pat_str(df_checks):
"""
Test output when names_pattern is a string,
and .value is present.
"""
result = (
df_checks.pivot_longer(
column_names="ht*",
names_to=(".value", "age"),
names_pattern="(.+)(.)",
sort_by_appearance=True,
)
.reindex(columns=["famid", "birth", "age", "ht"])
.astype({"age": int})
)
actual = pd.wide_to_long(
df_checks, stubnames="ht", i=["famid", "birth"], j="age"
).reset_index()
assert_frame_equal(result, actual)
def test_multiindex_column_level(df_multi):
"""
Test output from MultiIndex column,
when column_level is provided.
"""
result = df_multi.pivot_longer(
index="name", column_names="names", column_level=0
)
expected_output = df_multi.melt(
id_vars="name", value_vars="names", col_level=0
)
assert_frame_equal(result, expected_output)
def test_multiindex(df_multi):
"""
Test output from MultiIndex column,
where column_level is not provided,
and there is no names_sep/names_pattern.
"""
result = df_multi.pivot_longer(index=[("name", "a")])
expected_output = df_multi.melt(id_vars=[("name", "a")])
assert_frame_equal(result, expected_output)
def test_multiindex_names_to(df_multi):
"""
Test output from MultiIndex column,
where column_level is not provided,
there is no names_sep/names_pattern,
and names_to is provided as a sequence.
"""
result = df_multi.pivot_longer(
index=[("name", "a")], names_to=["variable_0", "variable_1"]
)
expected_output = df_multi.melt(id_vars=[("name", "a")])
assert_frame_equal(result, expected_output)
def test_multiindex_names_to_length_mismatch(df_multi):
"""
Raise error if the length of names_to does not
match the number of column levels.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(
index=[("name", "a")],
names_to=["variable_0", "variable_1", "variable_2"],
)
def test_multiindex_incomplete_level_names(df_multi):
"""
Raise error if not all the levels have names.
"""
with pytest.raises(ValueError):
df_multi.columns.names = [None, "a"]
df_multi.pivot_longer(index=[("name", "a")])
def test_multiindex_index_level_names_intersection(df_multi):
"""
Raise error if level names exist in index.
"""
with pytest.raises(ValueError):
df_multi.columns.names = [None, "a"]
df_multi.pivot_longer(index=[("name", "a")])
def test_no_column_names(df_checks):
"""
Test output if all the columns
are assigned to the index parameter.
"""
assert_frame_equal(
df_checks.pivot_longer(df_checks.columns).rename_axis(columns=None),
df_checks,
)
@pytest.fixture
def test_df():
"""Fixture DataFrame"""
return pd.DataFrame(
{
"off_loc": ["A", "B", "C", "D", "E", "F"],
"pt_loc": ["G", "H", "I", "J", "K", "L"],
"pt_lat": [
100.07548220000001,
75.191326,
122.65134479999999,
124.13553329999999,
124.13553329999999,
124.01028909999998,
],
"off_lat": [
121.271083,
75.93845266,
135.043791,
134.51128400000002,
134.484374,
137.962195,
],
"pt_long": [
4.472089953,
-144.387785,
-40.45611048,
-46.07156181,
-46.07156181,
-46.01594293,
],
"off_long": [
-7.188632000000001,
-143.2288569,
21.242563,
40.937416999999996,
40.78472,
22.905889000000002,
],
}
)
def test_names_pattern_str(test_df):
"""Test output for names_pattern and .value."""
result = test_df.pivot_longer(
column_names="*_*",
names_to=["set", ".value"],
names_pattern="(.+)_(.+)",
sort_by_appearance=True,
)
actual = test_df.copy()
actual.columns = actual.columns.str.split("_").str[::-1].str.join("_")
actual = (
pd.wide_to_long(
actual.reset_index(),
stubnames=["loc", "lat", "long"],
sep="_",
i="index",
j="set",
suffix=r".+",
)
.reset_index("set")
.reset_index(drop=True)
)
assert_frame_equal(result, actual)
def test_names_sep(test_df):
"""Test output for names_sep and .value."""
result = test_df.pivot_longer(
names_to=["set", ".value"], names_sep="_", sort_by_appearance=True
)
actual = test_df.copy()
actual.columns = actual.columns.str.split("_").str[::-1].str.join("_")
actual = (
pd.wide_to_long(
actual.reset_index(),
stubnames=["loc", "lat", "long"],
sep="_",
i="index",
j="set",
suffix=".+",
)
.reset_index("set")
.reset_index(drop=True)
)
assert_frame_equal(result, actual)
def test_names_pattern_list():
"""Test output for names_pattern if list/tuple."""
df = pd.DataFrame(
{
"Activity": ["P1", "P2"],
"General": ["AA", "BB"],
"m1": ["A1", "B1"],
"t1": ["TA1", "TB1"],
"m2": ["A2", "B2"],
"t2": ["TA2", "TB2"],
"m3": ["A3", "B3"],
"t3": ["TA3", "TB3"],
}
)
result = df.pivot_longer(
index=["Activity", "General"],
names_pattern=["^m", "^t"],
names_to=["M", "Task"],
sort_by_appearance=True,
).loc[:, ["Activity", "General", "Task", "M"]]
actual = (
pd.wide_to_long(
df, i=["Activity", "General"], stubnames=["t", "m"], j="number"
)
.set_axis(["Task", "M"], axis="columns")
.droplevel(-1)
.reset_index()
)
assert_frame_equal(result, actual)
@pytest.fixture
def not_dot_value():
"""Fixture DataFrame"""
return pd.DataFrame(
{
"country": ["United States", "Russia", "China"],
"vault_2012": [48.1, 46.4, 44.3],
"floor_2012": [45.4, 41.6, 40.8],
"vault_2016": [46.9, 45.7, 44.3],
"floor_2016": [46.0, 42.0, 42.1],
}
)
def test_not_dot_value_sep(not_dot_value):
"""Test output when names_sep and no dot_value"""
result = not_dot_value.pivot_longer(
"country",
names_to=("event", "year"),
names_sep="_",
values_to="score",
sort_by_appearance=True,
)
result = result.sort_values(
["country", "event", "year"], ignore_index=True
)
actual = not_dot_value.set_index("country")
actual.columns = actual.columns.str.split("_", expand=True)
actual.columns.names = ["event", "year"]
actual = (
actual.stack(["event", "year"])
.rename("score")
.sort_index()
.reset_index()
)
assert_frame_equal(result, actual)
def test_not_dot_value_sep2(not_dot_value):
"""Test output when names_sep and no dot_value"""
result = not_dot_value.pivot_longer(
"country",
names_to="event",
names_sep="/",
values_to="score",
)
actual = not_dot_value.melt(
"country", var_name="event", value_name="score"
)
assert_frame_equal(result, actual)
def test_not_dot_value_pattern(not_dot_value):
"""Test output when names_pattern is a string and no dot_value"""
result = not_dot_value.pivot_longer(
"country",
names_to=("event", "year"),
names_pattern=r"(.+)_(.+)",
values_to="score",
sort_by_appearance=True,
)
result = result.sort_values(
["country", "event", "year"], ignore_index=True
)
actual = not_dot_value.set_index("country")
actual.columns = actual.columns.str.split("_", expand=True)
actual.columns.names = ["event", "year"]
actual = (
actual.stack(["event", "year"])
.rename("score")
.sort_index()
.reset_index()
)
assert_frame_equal(result, actual)
def test_not_dot_value_sep_single_column(not_dot_value):
"""
Test output when names_sep and no dot_value
for a single column.
"""
A = not_dot_value.loc[:, ["country", "vault_2012"]]
result = A.pivot_longer(
"country",
names_to=("event", "year"),
names_sep="_",
values_to="score",
)
result = result.sort_values(
["country", "event", "year"], ignore_index=True
)
actual = A.set_index("country")
actual.columns = actual.columns.str.split("_", expand=True)
actual.columns.names = ["event", "year"]
actual = (
actual.stack(["event", "year"])
.rename("score")
.sort_index()
.reset_index()
)
assert_frame_equal(result, actual)
def test_multiple_dot_value():
"""Test output for multiple .value."""
df = pd.DataFrame(
{
"x_1_mean": [1, 2, 3, 4],
"x_2_mean": [1, 1, 0, 0],
"x_1_sd": [0, 1, 1, 1],
"x_2_sd": [0.739, 0.219, 1.46, 0.918],
"y_1_mean": [1, 2, 3, 4],
"y_2_mean": [1, 1, 0, 0],
"y_1_sd": [0, 1, 1, 1],
"y_2_sd": [-0.525, 0.623, -0.705, 0.662],
"unit": [1, 2, 3, 4],
}
)
result = df.pivot_longer(
index="unit",
names_to=(".value", "time", ".value"),
names_pattern=r"(x|y)_([0-9])(_mean|_sd)",
).astype({"time": int})
actual = df.set_index("unit")
cols = [ent.split("_") for ent in actual.columns]
actual.columns = [f"{start}_{end}{middle}" for start, middle, end in cols]
actual = (
pd.wide_to_long(
actual.reset_index(),
stubnames=["x_mean", "y_mean", "x_sd", "y_sd"],
i="unit",
j="time",
)
.sort_index(axis=1)
.reset_index()
)
assert_frame_equal(result, actual)
@pytest.fixture
def single_val():
"""fixture dataframe"""
return pd.DataFrame(
{
"id": [1, 2, 3],
"x1": [4, 5, 6],
"x2": [5, 6, 7],
}
)
def test_multiple_dot_value2(single_val):
"""Test output for multiple .value."""
result = single_val.pivot_longer(
index="id", names_to=(".value", ".value"), names_pattern="(.)(.)"
)
assert_frame_equal(result, single_val)
def test_names_pattern_sequence_single_unique_column(single_val):
"""
Test output if names_pattern is a sequence of length 1.
"""
result = single_val.pivot_longer(
"id", names_to=["x"], names_pattern=("x",)
)
actual = (
pd.wide_to_long(single_val, ["x"], i="id", j="num")
.droplevel("num")
.reset_index()
)
assert_frame_equal(result, actual)
def test_names_pattern_single_column(single_val):
"""
Test output if names_to is only '.value'.
"""
result = single_val.pivot_longer(
"id", names_to=".value", names_pattern="(.)."
)
actual = (
pd.wide_to_long(single_val, ["x"], i="id", j="num")
.droplevel("num")
.reset_index()
)
assert_frame_equal(result, actual)
def test_names_pattern_single_column_not_dot_value(single_val):
"""
Test output if names_to is not '.value'.
"""
df = single_val[["x1"]]
result = df.pivot_longer(names_to="yA", names_pattern="(.+)")
assert_frame_equal(result, df.melt(var_name="yA"))
def test_names_pattern_single_column_not_dot_value1(single_val):
"""
Test output if names_to is not '.value'.
"""
df = single_val[["id", "x1"]]
result = df.pivot_longer(index="id", names_to="yA", names_pattern="(.+)")
assert_frame_equal(result, df.melt("id", var_name="yA"))
def test_names_pattern_seq_single_column(single_val):
"""
Test output if names_pattern is a list.
"""
df = single_val[["id", "x1"]]
result = df.pivot_longer(index="id", names_to="yA", names_pattern=[".+"])
assert_frame_equal(result, df.rename(columns={"x1": "yA"}))
def test_names_pattern_nulls_in_data():
"""Test output if nulls are present in data."""
df = pd.DataFrame(
{
"family": [1, 2, 3, 4, 5],
"dob_child1": [
"1998-11-26",
"1996-06-22",
"2002-07-11",
"2004-10-10",
"2000-12-05",
],
"dob_child2": [
"2000-01-29",
np.nan,
"2004-04-05",
"2009-08-27",
"2005-02-28",
],
"gender_child1": [1, 2, 2, 1, 2],
"gender_child2": [2.0, np.nan, 2.0, 1.0, 1.0],
}
)
result = df.pivot_longer(
"family",
names_to=[".value", "child"],
names_pattern=r"(.+)_(.+)\d",
ignore_index=False,
)
result.index = range(len(result))
actual = (
pd.wide_to_long(
df, ["dob", "gender"], i="family", j="child", sep="_", suffix=".+"
)
.reset_index()
.assign(child=lambda df: df.child.str[:-1])
)
assert_frame_equal(result, actual)
@pytest.fixture
def multiple_values_to():
"""fixture for multiple values_to"""
# https://stackoverflow.com/q/51519101/7175713
return pd.DataFrame(
{
"City": ["Houston", "Austin", "Hoover"],
"State": ["Texas", "Texas", "Alabama"],
"Name": ["Aria", "Penelope", "Niko"],
"Mango": [4, 10, 90],
"Orange": [10, 8, 14],
"Watermelon": [40, 99, 43],
"Gin": [16, 200, 34],
"Vodka": [20, 33, 18],
},
columns=[
"City",
"State",
"Name",
"Mango",
"Orange",
"Watermelon",
"Gin",
"Vodka",
],
)
def test_output_values_to_seq(multiple_values_to):
"""Test output when values_to is a list/tuple."""
actual = multiple_values_to.melt(
["City", "State"],
value_vars=["Mango", "Orange", "Watermelon"],
var_name="Fruit",
value_name="Pounds",
)
expected = multiple_values_to.pivot_longer(
index=["City", "State"],
column_names=slice("Mango", "Watermelon"),
names_to=("Fruit"),
values_to=("Pounds",),
names_pattern=[r"M|O|W"],
)
assert_frame_equal(expected, actual)
def test_output_values_to_seq1(multiple_values_to):
"""Test output when values_to is a list/tuple."""
# https://stackoverflow.com/a/51520155/7175713
df1 = multiple_values_to.melt(
id_vars=["City", "State"],
value_vars=["Mango", "Orange", "Watermelon"],
var_name="Fruit",
value_name="Pounds",
)
df2 = multiple_values_to.melt(
id_vars=["City", "State"],
value_vars=["Gin", "Vodka"],
var_name="Drink",
value_name="Ounces",
)
df1 = df1.set_index(
["City", "State", df1.groupby(["City", "State"]).cumcount()]
)
df2 = df2.set_index(
["City", "State", df2.groupby(["City", "State"]).cumcount()]
)
actual = (
pd.concat([df1, df2], axis=1)
.sort_index(level=2)
.reset_index(level=2, drop=True)
.reset_index()
.astype({"Fruit": "category", "Drink": "category"})
)
expected = multiple_values_to.pivot_longer(
index=["City", "State"],
column_names=slice("Mango", "Vodka"),
names_to=("Fruit", "Drink"),
values_to=("Pounds", "Ounces"),
names_pattern=[r"M|O|W", r"G|V"],
names_transform={"Fruit": "category", "Drink": "category"},
).sort_values(["Fruit", "City", "State"], ignore_index=True)
assert_frame_equal(expected, actual)
def test_categorical(df_checks):
"""Test category output for names_to."""
actual = df_checks.melt(["famid", "birth"]).astype(
{"variable": "category"}
)
expected = df_checks.pivot_longer(
["famid", "birth"], names_transform="category"
)
assert_frame_equal(actual, expected, check_categorical=False)
def test_names_transform_numeric():
"""
Test output for names_transform on numeric sub columns
"""
df = pd.DataFrame(
{
"treatment_1.1": [1.0, 2.0],
"treatment_2.1": [3.0, 4.0],
"result_1.2": [5.0, 6.0],
"result_1": [0, 9],
"A": ["X1", "X2"],
}
)
expected = (
df.pivot_longer(
index="A",
names_to=(".value", "colname"),
names_sep="_",
names_transform=float,
)
.sort_values(
["A", "colname", "result", "treatment"], ignore_index=True
)
.loc[:, ["A", "colname", "result", "treatment"]]
)
actual = pd.wide_to_long(
df,
["result", "treatment"],
i="A",
j="colname",
suffix="[0-9.]+",
sep="_",
).reset_index()
assert_frame_equal(actual, expected)
def test_duplicated_columns():
"""Test output for duplicated columns."""
rows = [["credit", 1, 1, 2, 3]]
columns = ["Type", "amount", "active", "amount", "active"]
df = pd.DataFrame(rows, columns=columns)
df = df.set_index("Type")
actual = pd.DataFrame(
{"amount": [1, 2], "active": [1, 3]},
index=pd.Index(["credit", "credit"], name="Type"),
)
expected = df.pivot_longer(
names_to=".value", names_pattern="(.+)", ignore_index=False
)
assert_frame_equal(actual, expected)
|
{"hexsha": "d2c8ffa684d03ae0242304c1499725b0be5d7404", "size": 32582, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/functions/test_pivot_longer.py", "max_stars_repo_name": "aliavni/pyjanitor", "max_stars_repo_head_hexsha": "245012443d01247a591fd0e931b154c7a12a9753", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/functions/test_pivot_longer.py", "max_issues_repo_name": "aliavni/pyjanitor", "max_issues_repo_head_hexsha": "245012443d01247a591fd0e931b154c7a12a9753", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/functions/test_pivot_longer.py", "max_forks_repo_name": "aliavni/pyjanitor", "max_forks_repo_head_hexsha": "245012443d01247a591fd0e931b154c7a12a9753", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.5418427726, "max_line_length": 79, "alphanum_fraction": 0.576668099, "include": true, "reason": "import numpy", "num_tokens": 8114}
|
from lib.math.linalg.vector import *
import numpy as np
|
{"hexsha": "570420d5137a7d4bd52fe07c5eca36d4602aad41", "size": 56, "ext": "py", "lang": "Python", "max_stars_repo_path": "quest/lib/math/linalg/__init__.py", "max_stars_repo_name": "Fluorescence-Tools/quest", "max_stars_repo_head_hexsha": "e17e5682f7686d1acc1fd8a22bdae33963bc16d6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "quest/lib/math/linalg/__init__.py", "max_issues_repo_name": "Fluorescence-Tools/quest", "max_issues_repo_head_hexsha": "e17e5682f7686d1acc1fd8a22bdae33963bc16d6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-08-14T08:01:26.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-15T22:59:05.000Z", "max_forks_repo_path": "quest/lib/math/linalg/__init__.py", "max_forks_repo_name": "Fluorescence-Tools/quest", "max_forks_repo_head_hexsha": "e17e5682f7686d1acc1fd8a22bdae33963bc16d6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.6666666667, "max_line_length": 36, "alphanum_fraction": 0.7857142857, "include": true, "reason": "import numpy", "num_tokens": 12}
|
"""
DoesNotExist()
This differential indicates that the derivative Does Not Exist (D.N.E).
This is not the cast that it is not implemented, but rather that it mathematically
is not defined.
"""
struct DoesNotExist <: AbstractDifferential end
function extern(x::DoesNotExist)
throw(ArgumentError("Derivative does not exit. Cannot be converted to an external type."))
end
Base.Broadcast.broadcastable(::DoesNotExist) = Ref(DoesNotExist())
Base.iterate(x::DoesNotExist) = (x, nothing)
Base.iterate(::DoesNotExist, ::Any) = nothing
|
{"hexsha": "f0dc5fe09c6ecdc6a5f91adfafdfb5cde1b58ac9", "size": 541, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/differentials/does_not_exist.jl", "max_stars_repo_name": "YingboMa/ChainRulesCore.jl", "max_stars_repo_head_hexsha": "0ad05d0f61f1fed90d1b5c0084abc00b703ba67f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/differentials/does_not_exist.jl", "max_issues_repo_name": "YingboMa/ChainRulesCore.jl", "max_issues_repo_head_hexsha": "0ad05d0f61f1fed90d1b5c0084abc00b703ba67f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/differentials/does_not_exist.jl", "max_forks_repo_name": "YingboMa/ChainRulesCore.jl", "max_forks_repo_head_hexsha": "0ad05d0f61f1fed90d1b5c0084abc00b703ba67f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.4736842105, "max_line_length": 94, "alphanum_fraction": 0.7560073937, "num_tokens": 123}
|
import unittest
import numpy as np
from ..stat import *
from .. import stat
class TestLedoitWolfCov(unittest.TestCase):
def setUp(self):
np.random.seed(0)
p, n = 40, 50
self.A = A = np.random.randn(p, p)
self.Sigma = np.dot(A, A.T)
X = np.random.randn(p, n)
X -= np.atleast_2d(np.mean(X, 1)).T
X = np.dot(A, X)
self.X0 = X0 = X - np.atleast_2d(np.mean(X, 1)).T
self.S = np.dot(X0, X0.T) / n
def test_var_of_cov(self):
X0, S = self.X0, self.S
p, n = X0.shape
V = np.mean(
[(np.dot(np.atleast_2d(o).T, np.atleast_2d(o)) - S)**2 for o in X0.T],
axis=0)
b2, d2, lamb = lw_cov_base(X0, S, np.eye(p))
self.assertAlmostEqual(np.sum(V) / n, b2)
def test_condition_number(self):
S_star = lw_cov(self.X0)
S = np.cov(self.X0, rowvar=False)
self.assertTrue(np.linalg.cond(S_star) < np.linalg.cond(S))
def test_accuracy(self):
X, S, Sigma = self.X0, self.S, self.Sigma
Sigma = np.dot(self.A.T, self.A)
self.assertTrue(np.linalg.norm(lw_cov(X) - Sigma)
< np.linalg.norm(S - Sigma))
def test_inv_accuracy(self):
X, S, Sigma = self.X0, self.S, self.Sigma
S_star = lw_cov(X)
invSigma, invS, invS_star = [np.linalg.inv(Y) for Y in [Sigma, S, S_star]]
self.assertTrue(np.linalg.norm(invS_star - invSigma)
< np.linalg.norm(invS - invSigma))
class TestKullbackLeibler(unittest.TestCase):
def setUp(self):
A = np.random.randn(4, 4)
self.Sig1 = np.dot(A, A.T)
self.inv_Sig1 = np.linalg.inv(self.Sig1)
self.mu1 = np.random.randn(4)
B = np.random.randn(4, 4)
self.Sig2 = np.dot(B, B.T)
self.inv_Sig2 = np.linalg.inv(self.Sig2)
self.mu2 = np.random.randn(4)
def test_equal_dist(self):
Sig_p, inv_Sig_p, mu = self.Sig1, self.inv_Sig1, self.mu1
self.assertAlmostEqual(norm_kl_divergence(inv_Sig_p, mu, Sig_p, mu), 0)
def test_mean_divergence(self):
Sig_q, inv_Sig_p, mu = self.Sig1, self.inv_Sig1, self.mu1
for i in range(4):
# generate random direction
rd = np.random.randn(Sig_q.shape[0])
# shift one mean in this direction
kld = np.asarray([norm_kl_divergence(inv_Sig_p, mu, Sig_q, mu + rd * d)
for d in np.linspace(0, 10, 50)])
# check that the KLD is monotonically increasing
self.assertTrue(np.all(np.diff(kld) > 0))
def test_cov_divergence(self):
Sig_q, inv_Sig_p, mu = self.Sig1, self.inv_Sig1, self.mu1
Sig_p = self.Sig2
kl = []
for alpha in np.linspace(0, 1, 10):
# create diverging covariance matrix
S = alpha * Sig_p + (1. - alpha) * Sig_q
kl.append(norm_kl_divergence(inv_Sig_p, mu, S, mu))
self.assertTrue(np.all(np.diff(kl) > 0))
def test_numerical(self):
mu_p, mu_q, sig_p, sig_q = -1, 0, 1, .5
kld_an = norm_kl_divergence(sig_p, mu_p, 1./sig_q, mu_q)
def norm_pdf(x, mu, sig):
return 1./np.sqrt(np.pi * 2. * sig **2)* np.exp(-(x-mu)**2./(2.*sig**2))
xs = np.linspace(-10, 10, 5000)
px = norm_pdf(xs, mu_p, sig_p**.5)
qx = norm_pdf(xs, mu_q, sig_q**.5)
div = px * np.log(px/qx)
kld_num = np.trapz(div, xs)
np.testing.assert_almost_equal(kld_num, kld_an)
def test_convenience_fun(self):
P = np.dot(np.random.randn(4, 4), np.random.rand(4, 10))
Q = np.dot(np.random.randn(4, 4), np.random.rand(4, 100))
self.assertAlmostEqual(
kl(P, Q),
norm_kl_divergence(lw_cov(P), np.mean(P, 1),
np.linalg.pinv(lw_cov(Q)), np.mean(Q, 1)))
class TestROC(unittest.TestCase):
def test_roc(self):
'''Test bounds and ordering of ROC'''
TPs, FPs = roc(np.random.rand(100), np.random.rand(100).round())
# test mononely increasing TPs and FPs
np.testing.assert_equal(np.sort(TPs), TPs)
np.testing.assert_equal(np.sort(FPs), FPs)
self.assertEqual(TPs.min(), 0)
self.assertEqual(TPs.max(), 1)
self.assertEqual(FPs.min(), 0)
self.assertEqual(FPs.max(), 1)
def test_reverse(self):
'''Test that the ROC is invariant for reversions'''
scores = np.array([-1, 0, 0, 0, 0, 0, 0, 1])
labels = np.array([ 0, 0, 0, 0, 1, 1, 1, 1])
t0, f0 = roc(scores, labels)
t1, f1 = roc(scores[::-1], labels[::-1]) # reversed ROC
np.testing.assert_equal(t0, t1)
np.testing.assert_equal(f0, f1)
def test_known(self):
'''Test ROC for known input'''
scores = np.array([-1, 0, 0, 0, 0, 0, 0, 1])
labels = np.array([ 0, 0, 0, 0, 1, 1, 1, 1])
t0, f0 = roc(scores, labels)
self.assertTrue((t0 == [0, .25, 1, 1]).all())
self.assertTrue((f0 == [0, 0, .75, 1]).all())
class TestAUC(unittest.TestCase):
def test_AUC_extrema(self):
'''Test AUC for extrema'''
self.assertEqual(auc([0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1]), 1)
self.assertEqual(auc([1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1]), 0)
self.assertEqual(auc([1, 0, 1, 0, 1, 0], [1, 1, 1, 1, 0, 0]), .5)
def test_AUC_symbounds(self):
'''Test AUC for symmetry and bounds'''
N = 100
for rho in [.1, .3, .5]:
for i in range(20):
xs = np.random.random(N)
ys = (np.linspace(0, 1, N) <= rho).round()
self.assertAlmostEqual(auc(xs, ys), 1-auc(xs, np.abs(ys-1)))
self.assertTrue(0 <= auc(xs, ys) <= 1)
def test_AUC_confidence(self):
'''Test AUC confidence interval for trends'''
# we do not know much, but we can test for trends
self.assertTrue(auc_confidence(1) > auc_confidence(100))
self.assertTrue(auc_confidence(100, rho=.1) > auc_confidence(100))
self.assertTrue(auc_confidence(100, delta=1e-8) > auc_confidence(100))
# and symmetry
for rho in [.01, .1, .5]:
self.assertAlmostEqual(auc_confidence(100, rho=rho),
auc_confidence(100, rho=1-rho))
def test_monte_carlo(self):
'''Monte Carlo test for AUC confidence intervals'''
SAMPLES = 100
for N in [10, 100, 1000]:
for rho in [0.1, .5, .9]:
xs = np.random.random(N)
ys = (np.linspace(0, 1, N) <= rho).round().astype(np.int)
self.assertEqual(ys.mean(), rho)
aucs = []
# create random AUCs
for i in range(SAMPLES):
np.random.shuffle(ys)
aucs.append(auc(xs, ys))
# test conservativeness
for delta in [.05, .001, .0001]:
epsilon = auc_confidence(N, rho, delta)
dev = np.abs(np.array(aucs) - 0.5)
e_p = np.mean(dev > epsilon)
self.assertTrue(e_p <= delta,
'empirical p (=%f) > delta (=%f)' % (e_p, delta))
class TestMutualInformation(unittest.TestCase):
def test_max_bits(self):
for i in range(4):
conf = np.eye(2 ** i)
self.assertAlmostEqual(mut_inf(conf), i)
def test_uniform(self):
for i in range(4):
conf = np.ones((i, i + 1))
self.assertAlmostEqual(mut_inf(conf), 0)
def test_zero(self):
self.assertTrue(np.isnan(mut_inf(np.zeros((5, 3)))))
def test_no_modification(self):
conf = np.ones((4, 3))
mut_inf(conf)
np.testing.assert_equal(conf, np.ones((4, 3)))
def test_symmetrical(self):
for i in range(4):
conf = np.random.rand(3, 8)
self.assertAlmostEqual(mut_inf(conf), mut_inf(conf.T))
def test_malformed(self):
self.assertRaises(AssertionError, mut_inf, -np.ones((3, 3)))
class TestFWERCorrection(unittest.TestCase):
def setUp(self):
ps = np.random.rand(10)
ps -= np.min(ps) # Scale between 0 and 1
ps /= np.max(ps)
self.ps = ps
def test_sort(self):
a = np.arange(20)
np.testing.assert_equal(stat._sort(a)[0], np.arange(20))
a = np.sort(a)
b, original_order = stat._sort(a)
np.testing.assert_equal(b, np.arange(20))
np.testing.assert_equal(b[original_order], a)
def test_bonferroni(self):
ps = self.ps
np.testing.assert_equal(bonferroni(ps), np.clip(ps * len(ps), 0, 1))
def test_bonferroni_holm(self):
ps = self.ps
n = len(ps)
ps = np.sort(ps)
adj_ps = np.asarray(ps) * (n - np.arange(n))
np.testing.assert_equal(bonferroni_holm(ps), np.clip(adj_ps, 0, 1))
def test_benjamini_hochberg(self):
ps = self.ps
n = len(ps)
ps = np.sort(ps)
adj_ps = (np.asarray(ps) * n) / (np.arange(n) + 1.0)
np.testing.assert_equal(benjamini_hochberg(ps), np.clip(adj_ps, 0, 1))
|
{"hexsha": "9af53fc68cd79c04848b8c1f33045e27b4e0db55", "size": 9137, "ext": "py", "lang": "Python", "max_stars_repo_path": "psychic/tests/teststat.py", "max_stars_repo_name": "wmvanvliet/psychic", "max_stars_repo_head_hexsha": "4ab75fb655795df0272c1bb0eb0dfeb232ffe143", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "psychic/tests/teststat.py", "max_issues_repo_name": "wmvanvliet/psychic", "max_issues_repo_head_hexsha": "4ab75fb655795df0272c1bb0eb0dfeb232ffe143", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "psychic/tests/teststat.py", "max_forks_repo_name": "wmvanvliet/psychic", "max_forks_repo_head_hexsha": "4ab75fb655795df0272c1bb0eb0dfeb232ffe143", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.5525291829, "max_line_length": 84, "alphanum_fraction": 0.5495239138, "include": true, "reason": "import numpy", "num_tokens": 2727}
|
#!/usr/bin/env python
# encoding: utf-8
"""
CombinatoricalMediaSimulations.py
Simulates all possible minimal media compositions consisting of unique carbon,
nitrogen, sulfate, phosphate sources. If a single compound provides multiple
elemental sources it serves a the solely source for them. # TODO: Reformulate this
KO analyses of genes and reactions are optional.
TODO:
1. Yaml has to be replaced with JSON (since 2.6 JSON is part of the stdlib)
Created by Nikolaus Sonnenschein on 2010-11-03.
Copyright (c) 2010 Jacobs University of Bremen. All rights reserved.
"""
import sys
import time
import textwrap
import Queue
import yaml
# try:
# import json
# except ImportError:
# import simplejson as json
if sys.argv[1] != 'client':
from ifba.storage.hdf5storage import SimulationDB, h5Container
# except ImportError:
#
# pass
# else:
# print "HDF5/pyTables is not available! You can use the script only in client mode!"
# sys.exit(-1)
from ifba.distributedFBA.networking import Server, Client
from ifba.distributedFBA.concurrency import GeneratorInputClient, h5OutputClient
from ifba.GlpkWrap.util import ImportCplex
from ifba.GlpkWrap.metabolism import Metabolism
from ifba.GlpkWrap.fluxdist import FBAsimulationResult
if sys.argv[1] != 'client':
from RandomMediaSimulations import generateStorageObject
from ifba.glpki.glpki import glp_delete_prob
def generateStorageObject(path, lp):
"""docstring for generateStorageObject"""
return SimulationDB(h5Container(path, lp))
def readSources(path):
f = open(path, 'r')
sources = list()
for line in f:
line = line.replace('\n', '')
sources.append(line.split('\t'))
return [tuple(sub) for sub in sources]
def generateCombinatoricalSets(*sources):
tmpString1 = " ".join(["for e%d in sources[%d]" % (i, i) for i in range(len(sources))])
tmpString2 = "("+ ", ".join(["e%d" % i for i in range(len(sources))]) + ")"
funcString = "gen = ("+tmpString2+" "+tmpString1+")"
codeBlock = compile(funcString, "blah", "exec")
exec(codeBlock, locals())
return gen
def generateCombinatoricalMedia(uptake, setGenerator):
for s in setGenerator:
yield dict([('R("'+elem+'_Transp")', (0, uptake)) for elem in s])
def readSourcesTable(path):
"""Reads a table of the form:
source carbon nitrogen phosphor sulfur
carb1 1 1 0 1"""
auxFunc = lambda x: (x[0], int(x[1]), int(x[2]), int(x[3]), int(x[4]))
return [auxFunc(line.rstrip().split('\t')) for line in open(path)]
def possibleSigs(sig):
return [(elem1, elem2, elem3, elem4) \
for elem1 in [0, 1][0:abs(sig[0] - 1)+1] \
for elem2 in [0, 1][0:abs(sig[1] - 1)+1] \
for elem3 in [0, 1][0:abs(sig[2] - 1)+1] \
for elem4 in [0, 1][0:abs(sig[3] - 1)+1]][1:]
def _DirtyHack(sourceTable, seedSources):
sources2sig = dict([(row[0], tuple(row[1:]))for row in sourceTable])
combiDict = dict([(k, list()) for k in [(elem1, elem2, elem3, elem4) for elem1 in [0, 1] for elem2 in [0, 1] for elem3 in [0, 1] for elem4 in [0, 1]]])
for row in sourceTable:
combiDict[tuple(row[1:])].append(row[0])
for i, carb in enumerate(seedSources):
sig = sources2sig[carb]
# print carb, sig
# print "possible other signatures:"
nextSigs = possibleSigs(sig)
if nextSigs == []:
yield [carb]
for sig2 in nextSigs:
potentialSources = combiDict[sig2]
# print potentialSources
tmp1 = [[carb, s] for s in potentialSources]
# print '\t', sig2
combSig1 = tuple([sig[i] + sig2[i] for i in range(4)])
# print "\tpossible other signatures2:"
nextSigs = possibleSigs(combSig1)
if nextSigs == []:
for elem in tmp1:
yield elem
for sig3 in nextSigs:
potentialSources = combiDict[sig3]
# print potentialSources
# if potentialSources == []:
# for elem in tmp1:
# yield elem
tmp2 = [elem + [s] for elem in tmp1 for s in potentialSources]
# print '\t\t', sig3
combSig2 = tuple([combSig1[i] + sig3[i] for i in range(4)])
# print "\t\tpossible other signatures2:"
nextSigs = possibleSigs(combSig2)
if nextSigs == []:
for elem in tmp2:
yield elem
for sig4 in nextSigs:
potentialSources = combiDict[sig4]
# print "4", potentialSources
# if potentialSources == []:
# for elem in tmp2:
# yield elem
# print 3*'\t', sig4
# print textwrap.fill(str(tmp2), initial_indent=3*'\t', subsequent_indent=3*'\t')
for s in potentialSources:
for elem in tmp2:
yield elem + [s]
def combinatoricalSources(sourceTable, seedSources):
mediaGenerator = _DirtyHack(sourceTable, seedSources)
stuff = set()
for med in mediaGenerator:
# print 4*'\t', med
stuff.add(tuple(sorted(med)))
# print len(stuff)
return stuff
def testCombinatoricalSources(sourceTable, carbonSources):
import numpy
stuff = combinatoricalSources(sourceTable, carbonSources)
stuff3 = set()
for elem in list(stuff):
tot = numpy.array([sources2sig[s] for s in elem]).sum()
if tot != 4:
print elem
print [sources2sig[s] for s in elem]
stuff3.add(tot)
print stuff3
def generateSolveMediumObject(path2model="", medium={}, include={}, objective=None, optimizationRoutine='pFBA', koQ=True, *args, **kwargs):
return SolveMedium(path2model=path2model, medium=medium, include=include, objective=objective, optimizationRoutine=optimizationRoutine, koQ=koQ, *args, **kwargs)
class SolveMedium(object):
def __init__(self, path2model="", medium={}, include={}, objective=None, optimizationRoutine='pFBA', koQ=True, *args, **kwargs):
self.koQ = koQ
self.optimizationRoutine = optimizationRoutine
self.objective = objective
self.lp = Metabolism(ImportCplex(path2model))
self.path2model = path2model
if objective:
self.lp.setReactionObjective(self.objective)
self.preMed = dict([(r, (-1000., 0)) for r in self.lp.getTransporters()])
self.preMed.update(include)
self.lp.modifyColumnBounds(self.preMed)
self.lp.modifyColumnBounds(dict([(r, (0., 1000.)) for r in self.lp.getReactions()]))
self.lp.modifyColumnBounds(medium)
self.lp.eraseHistory()
def run(self, *args, **kwargs):
"""docstring for run"""
f = getattr(self.lp, self.optimizationRoutine)()
knockoutEffects = dict()
wt = f[self.objective]
if self.koQ and wt > 0.:
knockoutEffects = self.lp.singleKoAnalysis(f.getActiveReactions())
for k in knockoutEffects:
knockoutEffects[k] = knockoutEffects[k] / wt
self.lp.undo()
return FBAsimulationResult(f, knockoutEffects, self.lp.getColumnBounds(),
self.lp.getObjectiveFunction(),
time.time(), self.path2model, "Test")
def __del__(self):
"""docstring for __del__"""
glp_delete_prob(self.lp.lp) # FIXME this is a dirty hack
del self
def solveMedium(path2model="", medium={}, include={}, objective=None, optimizationRoutine='pFBA', koQ=True, *args, **kwargs):
"""doc"""
lp = Metabolism(ImportCplex(path2model))
if objective:
lp.setReactionObjective(objective)
preMed = dict([(r, (-1000., 0)) for r in lp.getTransporters()])
preMed.update(include)
lp.modifyColumnBounds(preMed)
lp.modifyColumnBounds(medium)
lp.modifyColumnBounds(dict([(r, (0., 1000.)) for r in lp.getReactions()]))
lp.eraseHistory()
# print lp.cplex()
f = lp.pFBA()
# simulationStorage = generateStorageObject(outputfile, lp)
knockoutEffects = dict()
wt = f[objective]
print wt
if koQ and wt > 0.:
knockoutEffects = lp.singleKoAnalysis(f.getActiveReactions())
for k in knockoutEffects:
knockoutEffects[k] = knockoutEffects[k] / wt
lp.initialize()
# print knockoutEffects
return FBAsimulationResult(f, knockoutEffects, lp.getColumnBounds(),
lp.getObjectiveFunction(),
time.time(), path2model, "Test")
def basicFunctionality(outputfile, configPath):
config = yaml.load(open(configPath))
descr = yaml.dump(config)
print descr
config['descr'] = descr
sourceTable = readSourcesTable(config["sourcesPath"])
carbonSources = [elem[0] for elem in sourceTable if elem[1] == 1]
sources2sig = dict([(row[0], tuple(row[1:]))for row in sourceTable])
combSources = list(combinatoricalSources(sourceTable, carbonSources))
gen = generateCombinatoricalMedia(config["uptake"], combSources)
run = 0
for medium in gen:
print "Run:", run
run += 1
solveMedium(medium=medium,**config)
def client(serverip):
"""docstring for client"""
counter = 0
client = Client(task=generateSolveMediumObject, host=serverip)
while True:
counter = counter + 1
print counter
client.run()
def stub(gen, config):
for elem in gen:
config["medium"] = elem
yield config
def server(outputfile='test.h5', configPath='parameters.yaml'):
"""Server"""
config = yaml.load(open(configPath))
descr = yaml.dump(config)
print descr
config['descr'] = descr
sourceTable = readSourcesTable(config["sourcesPath"])
carbonSources = [elem[0] for elem in sourceTable if elem[1] == 1]
sources2sig = dict([(row[0], tuple(row[1:]))for row in sourceTable])
combSources = list(combinatoricalSources(sourceTable, carbonSources))
gen = generateCombinatoricalMedia(config["uptake"], combSources)
lp = Metabolism(ImportCplex(config["path2model"]))
simulationStorage = generateStorageObject(outputfile, lp)
inputQueue = Queue.Queue(20)
outputQueue = Queue.Queue(20)
# gen2 = (config["medium"] = elem for elem in gen)
gen2 = stub(gen, config)
t1 = GeneratorInputClient(inputQueue, gen2)
t1.start()
time.sleep(1)
t2 = h5OutputClient(outputQueue, simulationStorage)
t2.start()
time.sleep(1)
s = Server(inputQueue=inputQueue, outputQueue=outputQueue, host="localhost")
print s
s.run()
if __name__ == '__main__':
# print possibleSigs((0,0,1,1))
#
# sourcePath = '/Users/niko/arbeit/Data/SBMLmodels/iAF1260/biologValidatedSourcesWithElementalComposition.tsv'
# sourceTable = readSourcesTable(sourcePath)
# carbonSources = [elem[0] for elem in sourceTable if elem[1] == 1]
# print carbonSources
# sources2sig = dict([(row[0], tuple(row[1:]))for row in sourceTable])
# testCombinatoricalSources(sourceTable, carbonSources)
# combSources = list(combinatoricalSources(sourceTable, carbonSources))
# print list(generateCombinatoricalMedia(20., combSources))[0:10]
# include = dict([['R("R_ATPM")', [8.39, 8.39]],
# ['R("Mo2b_Transp")', [0, 18.5]],
# ['R("Mco2b_Transp")', [-1000, 1000]],
# ['R("Mh2ob_Transp")', [-1000, 1000]],
# ['R("Mhb_Transp")', [-1000, 1000]],
# ['R("Mna1b_Transp")', [-1000, 1000]],
# ['R("Mkb_Transp")', [-1000, 1000]],
# ['R("Mca2b_Transp")', [-1000, 1000]],
# ['R("Mcu2b_Transp")', [-1000, 1000]],
# ['R("Mmg2b_Transp")', [-1000, 1000]],
# ['R("Mzn2b_Transp")', [-1000, 1000]],
# ['R("Mmobdb_Transp")', [-1000, 1000]],
# ['R("Mfe2b_Transp")', [-1000, 1000]],
# ['R("Mfe3b_Transp")', [-1000, 1000]],
# ['R("Mcobalt2b_Transp")', [-1000, 1000]],
# ['R("Mmn2b_Transp")', [-1000, 1000]],
# ['R("Mclb_Transp")', [-1000, 1000]],
# ['R("R_CAT")', [0, 0]],
# ['R("R_SPODM")', [0, 0]],
# ['R("R_SPODMpp")', [0, 0]],
# ['R("R_FHL")', [0, 0]]])
# gen = generateCombinatoricalMedia(20., combSources)
# for i in range(10):
# medium = gen.next()
# print medium
# solveMedium('../models/iAF1260templateMinMax.lp', medium=medium, include=include, objective='R("R_Ec_biomass_iAF1260_core_59p81M")')
try:
sys.argv[1]
except IndexError:
sys.argv.append('server')
sys.argv.append('test.h5')
usage = """Usage:
python RandomMediaSimulations.py standalone storagefile configfile --> standalone mode
python RandomMediaSimulations.py server storagefile configfile --> server mode
python RandomMediaSimulations.py client serverip --> client mode"""
try:
if sys.argv[1] == 'standalone':
basicFunctionality(sys.argv[2], sys.argv[3])
elif sys.argv[1] == 'server':
server(sys.argv[2], sys.argv[3])
elif sys.argv[1] == 'client':
client(sys.argv[2])
else:
print usage
except IndexError:
print usage
|
{"hexsha": "509104233515e1ac6373707a9d322a7cee676fdb", "size": 13720, "ext": "py", "lang": "Python", "max_stars_repo_path": "ifba/bins/CombintoricalMediaSimulations.py", "max_stars_repo_name": "phantomas1234/fbaproject", "max_stars_repo_head_hexsha": "6aa2a9b547b8326d928f42566de632265016e729", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-05-09T02:44:15.000Z", "max_stars_repo_stars_event_max_datetime": "2016-05-09T02:44:15.000Z", "max_issues_repo_path": "ifba/bins/CombintoricalMediaSimulations.py", "max_issues_repo_name": "phantomas1234/fbaproject", "max_issues_repo_head_hexsha": "6aa2a9b547b8326d928f42566de632265016e729", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ifba/bins/CombintoricalMediaSimulations.py", "max_forks_repo_name": "phantomas1234/fbaproject", "max_forks_repo_head_hexsha": "6aa2a9b547b8326d928f42566de632265016e729", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.5389048991, "max_line_length": 165, "alphanum_fraction": 0.5952623907, "include": true, "reason": "import numpy", "num_tokens": 3631}
|
import numpy as np
from baselines.template.util import store_args, logger
from baselines.template.policy import Policy
def dims_to_shapes(input_dims):
return {key: tuple([val]) if val > 0 else tuple() for key, val in input_dims.items()}
class RandomPolicy(Policy):
@store_args
def __init__(self, input_dims, T, rollout_batch_size, **kwargs):
""" Just a random dummy. Does not learn anything
"""
Policy.__init__(self, input_dims, T, rollout_batch_size, **kwargs)
def get_actions(self, o, ag, g, policy_action_params=None):
# This is important for the rollout (Achieved through policy). DUMMY RETURN ZEROS
EMPTY = 0
u = np.random.randn(o.size // self.dimo, self.dimu)
return u, EMPTY
def store_episode(self, episode_batch, update_stats=True):
pass
def get_current_buffer_size(self):
pass
def sample_batch(self):
pass
def stage_batch(self, batch=None):
pass
def train(self, stage=True):
pass
def clear_buffer(self):
pass
def logs(self, prefix=''):
logs = []
logs += [('stats/some_stat_value', 0)]
return logger(logs, prefix)
|
{"hexsha": "36def1d8d234fe9b0a5d837e5c8438c1e21b6895", "size": 1206, "ext": "py", "lang": "Python", "max_stars_repo_path": "baselines/example_algorithm/random_policy.py", "max_stars_repo_name": "knowledgetechnologyuhh/goal_conditioned_RL_baselines", "max_stars_repo_head_hexsha": "915fc875fd8cc75accd0804d99373916756f726e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2020-07-01T16:16:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-20T21:56:33.000Z", "max_issues_repo_path": "baselines/example_algorithm/random_policy.py", "max_issues_repo_name": "knowledgetechnologyuhh/goal_conditioned_RL_baselines", "max_issues_repo_head_hexsha": "915fc875fd8cc75accd0804d99373916756f726e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2020-09-25T22:41:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:38:44.000Z", "max_forks_repo_path": "baselines/example_algorithm/random_policy.py", "max_forks_repo_name": "knowledgetechnologyuhh/goal_conditioned_RL_baselines", "max_forks_repo_head_hexsha": "915fc875fd8cc75accd0804d99373916756f726e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-07-01T16:19:08.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-28T10:45:59.000Z", "avg_line_length": 26.8, "max_line_length": 89, "alphanum_fraction": 0.6459369818, "include": true, "reason": "import numpy", "num_tokens": 288}
|
import cv2
import numpy as np
from visualize_cv2 import model, display_instances, class_names
capture = cv2.VideoCapture('videofile.mp4')
size = (
int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
)
codec = cv2.VideoWriter_fourcc(*'DIVX')
fps = capture.get(cv2.CAP_PROP_FPS)
output = cv2.VideoWriter('videofile_masked.avi', codec, fps, size)
frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
duration = frame_count/fps
minutes = int(duration/60)
seconds = duration%60
print('fps = ' + str(fps))
print('number of frames = ' + str(frame_count))
print('duration (S) = ' + str(duration))
print('duration (M:S) = ' + str(minutes) + ':' + str(seconds))
while(capture.isOpened()):
ret, frame = capture.read()
if ret:
results = model.detect([frame], verbose=0)
r = results[0]
frame = display_instances(
frame, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']
)
output.write(frame)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
capture.release()
output.release()
cv2.destroyAllWindows()
print('The video was processed')
|
{"hexsha": "e48220eec9ef50bdc1b3924d47127e149383ec5a", "size": 1222, "ext": "py", "lang": "Python", "max_stars_repo_path": "process_video.py", "max_stars_repo_name": "romellfudi/dataset_currency", "max_stars_repo_head_hexsha": "19a950e88fa724171cf93c47369b6fc61a57477f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-09T22:17:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-09T22:17:57.000Z", "max_issues_repo_path": "process_video.py", "max_issues_repo_name": "romellfudi/dataset_currency", "max_issues_repo_head_hexsha": "19a950e88fa724171cf93c47369b6fc61a57477f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "process_video.py", "max_forks_repo_name": "romellfudi/dataset_currency", "max_forks_repo_head_hexsha": "19a950e88fa724171cf93c47369b6fc61a57477f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.7727272727, "max_line_length": 82, "alphanum_fraction": 0.6587561375, "include": true, "reason": "import numpy", "num_tokens": 322}
|
import yaml
import os
import json
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.utils import to_categorical
data = yaml.safe_load(open('nlu\\train.yml').read())
# read data
inputs, outputs = [], []
for command in data['commands']:
inputs.append(str(command['input']).lower())
outputs.append('{}\{}'.format(command['entity'],command['action']))
# create dataset
# Create arrays one-hot encoding (number of examples, seq length, vocab_size)
# Create arrays sparse encoding (number of examples, seq length)
# Create input data
max_sent = max([len(x) for x in inputs])
# Create arrays
input_data = np.zeros((len(inputs), max_sent,256), dtype='float32' )
for i,inp in enumerate(inputs):
for k, ch in enumerate(bytes(inp.encode('utf-8'))):
input_data[i, k,int(ch)]=1.0
#output_data= to_categorical(output_data, len(output_data))
print(input_data[0].shape)
#print (len(chars))
#print('Max input seq:', max_sent)
labels = set(outputs)
fwrite = open('nlu\entities.txt', 'w', encoding='utf-8')
for label in labels:
fwrite.write(label + '\n')
fwrite.close()
labels = open('nlu\entities.txt', 'r', encoding='utf-8').read().split('\n')
labels2idx = {}
idx2labels = {}
for k, label in enumerate(labels):
labels2idx[label] = k
idx2labels[k] = label
output_data = []
for output in outputs:
output_data.append(labels2idx[output])
output_data = to_categorical(output_data, len(labels))
model = Sequential()
model.add(LSTM(128))
model.add(Dense(len(labels),activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
model.fit(input_data, output_data, epochs=256)
model.save('nlu\model.h5')
# Classify any given text into a category of our NLU framework
def classify(text):
# Create an input array
x = np.zeros((1, max_sent, 256), dtype='float32')
# Fill the x array with data from input text
for k, ch in enumerate(bytes(text.encode('utf-8'))):
x[0, k, int(ch)] = 1.0
out = model.predict(x)
idx = out.argmax()
#print('Text: "{}" is classified as "{}"'.format(text, idx2labels[idx]))
return idx2labels
'''
while True:
text=input('Enter some text:')
classify(text)
'''
|
{"hexsha": "b981148b5131505365ab665b428ab946d84b4216", "size": 2312, "ext": "py", "lang": "Python", "max_stars_repo_path": "nlu/model.py", "max_stars_repo_name": "Haisonvt21/Siri", "max_stars_repo_head_hexsha": "82c6432e8097821a866b4d182b0a00507835caa7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-12-25T02:14:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-15T06:25:51.000Z", "max_issues_repo_path": "nlu/model.py", "max_issues_repo_name": "Haisonvt21/Siri", "max_issues_repo_head_hexsha": "82c6432e8097821a866b4d182b0a00507835caa7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nlu/model.py", "max_forks_repo_name": "Haisonvt21/Siri", "max_forks_repo_head_hexsha": "82c6432e8097821a866b4d182b0a00507835caa7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.9, "max_line_length": 81, "alphanum_fraction": 0.6946366782, "include": true, "reason": "import numpy", "num_tokens": 586}
|
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from RetinaNet.utils import xywh_convert_xyxy
def compute_iou(boxes1, boxes2):
"""计算交并比(此计算方式仅作为原理展示, 时间复杂度和空间复杂度都过高, 请不要直接使用).
Args:
boxes1, boxes2: tf.Tensor, 边界框[x, y, width, height].
Returns:
边界框的交并比.
"""
boxes1 = xywh_convert_xyxy(boxes1)
x1min = boxes1[..., 0]
y1min = boxes1[..., 1]
x1max = boxes1[..., 2]
y1max = boxes1[..., 3]
boxes2 = xywh_convert_xyxy(boxes2)
x2min = boxes2[..., 0]
y2min = boxes2[..., 1]
x2max = boxes2[..., 2]
y2max = boxes2[..., 3]
m, n = boxes1.shape[0], boxes2.shape[0]
iou = np.zeros(shape=[m, n])
for i in range(m):
boxes1_area = (x1max[i] - x1min[i]) * (y1max[i] - y1min[i])
for j in range(n):
# 计算相交部分.
ximin = K.maximum(x1min[i], x2min[j])
yimin = K.maximum(y1min[i], y2min[j])
ximax = K.minimum(x1max[i], x2max[j])
yimax = K.minimum(y1max[i], y2max[j])
w = K.maximum(0, K.abs(ximin - ximax))
h = K.maximum(0, K.abs(yimin - yimax))
intersection_area = K.maximum(w * h, 0.0)
# 计算并集部分.
boxes2_area = (x2max[j] - x2min[j]) * (y2max[j] - y2min[j])
union_area = K.maximum(boxes1_area + boxes2_area - intersection_area, 1e-8) # 避免除零.
iou[i][j] = intersection_area / union_area
iou = tf.convert_to_tensor(iou, dtype=tf.float32)
return K.clip(iou, 0.0, 1.0)
def faster_compute_iou(boxes1, boxes2):
"""更快速的计算交并比,
同时使用vectorization和矩阵广播优化时间复杂度和空间复杂度.
Args:
boxes1, boxes2: tf.Tensor, 边界框[x, y, width, height].
Returns:
边界框的交并比.
Thanks:
@Srihari Humbarwadi(https://github.com/srihari-humbarwadi)
"""
boxes1_corners = xywh_convert_xyxy(boxes1)
boxes2_corners = xywh_convert_xyxy(boxes2)
# 计算相交部分.
left_upper = K.maximum(boxes1_corners[:, None, :2], boxes2_corners[:, :2]) # 取两个边界框[xmin, ymin]的最大值, 相交面最小坐标.
right_down = K.minimum(boxes1_corners[:, None, 2:], boxes2_corners[:, 2:]) # 取两个边界框[xmax, ymax]的最小值, 相交面最大坐标.
intersection = K.maximum(0.0, right_down - left_upper) # 坐标之差即为相交面的宽高.
intersection_area = intersection[:, :, 0] * intersection[:, :, 1]
boxes1_area = boxes1[:, 2] * boxes1[:, 3] # box[x, y, width, height] width * height
boxes2_area = boxes2[:, 2] * boxes2[:, 3]
# 计算并集部分.
union_area = K.maximum(
boxes1_area[:, None] + boxes2_area - intersection_area, 1e-8
)
return K.clip(intersection_area / union_area, 0.0, 1.0)
class AnchorBox(object):
"""锚框.
Attributes:
aspect_ratios: list, 宽高比.
scales: list, 锚框的缩放比.
strides: list, 滑动步长, 特征图和输入的图相差的倍数.
num_anchors: int, 锚框的数量.
areas: list, 锚框的面积.
anchor_dims: list, 所有锚框的尺寸(areas * aspect_ratios * scales).
References:
- [Lin, T. Y. , et al., 2017](https://arxiv.org/abs/1708.02002v2)
"""
def __init__(self):
"""初始化锚框."""
self.aspect_ratios = [0.5, 1.0, 2.0]
self.scales = [2 ** x for x in [0, 1/3, 2/3]]
self.strides = [8, 16, 32, 64, 128]
self.num_anchors = len(self.aspect_ratios) * len(self.scales)
self.areas = [x ** 2 for x in [32.0, 64.0, 128.0, 256.0, 512.0]]
self.anchor_dims = self._compute_dims()
def generate_anchors(self, image_height, image_width):
"""为特征金字塔的所有特征图生成锚框.
Args:
image_height: int,
输入图像的长度.
image_width: int,
输入图像的宽度.
Returns:
tf.Tensor, 所有的锚框.
"""
anchors = [
self._generate_anchors(tf.math.ceil(image_height / self.strides[index]),
tf.math.ceil(image_width / self.strides[index]),
level)
for index, level in enumerate(range(3, 8)) # P3, P4, P5, P6, P7
]
return K.concatenate(anchors, axis=0)
def _generate_anchors(self, feature_height, feature_width, level):
"""为给定等级特征图生成锚框.
Args:
feature_height: int,
输入特征图的长度.
feature_width: int,
输入特征图的宽度.
level: int,
特征图的等级.
Returns:
tf.Tensor, 给定等级特征图所有的锚框.
"""
# 遍历特征图生成锚框x坐标点和y坐标点(+0.5成为中心点).
x = tf.range(feature_width, dtype=tf.float32) + 0.5
y = tf.range(feature_height, dtype=tf.float32) + 0.5
# 合并成坐标(按照步长进行放大).
centers = K.stack(tf.meshgrid(x, y), axis=-1) * self.strides[level - 3]
# 增加不同形状锚框数量的中心点(这里是9种).
centers = K.expand_dims(centers, axis=-2)
centers = tf.tile(centers, [1, 1, self.num_anchors, 1])
# 获取不同特征层的不同形状锚框的宽高.
dims = self.anchor_dims[level - 3]
# 扩充到特征层的数量.
dims = tf.tile(dims, [feature_height, feature_width, 1, 1])
# 合并中心点和宽高(x, y, width, height).
anchors = K.concatenate([centers, dims], axis=-1)
return K.reshape(anchors, [feature_height * feature_width * self.num_anchors, 4]) # 展平.
def _compute_dims(self):
"""计算所有的锚框的尺寸."""
anchor_dims_all = list()
for area in self.areas: # 不同的面积(对应在不同的特征图上).
anchor_dims = list()
for ratio in self.aspect_ratios: # 不同宽高比.
# ratio = width / height; height * width = height ^ 2 * ratio = area.
anchor_height = tf.math.sqrt(area / ratio)
anchor_width = area / anchor_height
dims = K.reshape(K.stack([anchor_width, anchor_height], axis=-1), [1, 1, 2]) # [1, 1]为宽高数占位.
for scale in self.scales: # 不同的缩放比.
anchor_dims.append(dims * scale)
anchor_dims_all.append(K.stack(anchor_dims, axis=-2))
return anchor_dims_all
class LabelEncoder(object):
"""标签编码器.
Attributes:
anchor_box: RetinaNet.preprocessing.label_ops.AnchorBox,
锚框.
box_variance: tf.Tensor,
框方差, 用来增大损失(小于1), 便于计算梯度.
"""
def __init__(self):
"""初始化标签编码器."""
self.anchor_box = AnchorBox()
self.box_variance = tf.convert_to_tensor([0.1, 0.1, 0.2, 0.2], dtype=tf.float32)
def encode_batch(self, batch_images, gt_bboxes, label_ids):
"""编码每个批次的标签(包括分类框和回归框).
Args:
batch_images: tf.Tensor, 一个批次的图像.
gt_bboxes: tf.Tensor, 该批次图像对应的真实边界框.
label_ids: tf.Tensor, 一个批次的图像对应的原始标签.
Returns:
图像和对应编码后的标签.
"""
img_shape = K.shape(batch_images)
batch_size = img_shape[0]
# 创建标签数组.
labels = tf.TensorArray(dtype=tf.float32, size=batch_size, dynamic_size=True)
for i in range(batch_size):
label = self._encode_sample(img_shape, gt_bboxes[i], label_ids[i])
labels = labels.write(i, label)
return batch_images, labels.stack()
def _encode_sample(self, image_shape, gt_bboxes, cls_ids):
"""编码一张图片的标签(包括分类和回归框).
Args:
image_shape: tf.Tensor, 图片的形状.
gt_bboxes: tf.Tensor, 该图片对应的真实边界框.
cls_ids: tf.Tensor, 该图片对应的原始标签.
Returns:
编码后的标签.
"""
cls_ids = K.cast(cls_ids, dtype=tf.float32)
anchor_boxes = self.anchor_box.generate_anchors(image_shape[1], image_shape[2]) # 此刻的anchor_boxes还没有label.
# 获取anchor分类标签.
max_iou_idx, positive_mask, ignore_mask = self._match_anchor_boxes(anchor_boxes, gt_bboxes)
matched_gt_cls_ids = K.gather(cls_ids, max_iou_idx) # 匹配出来用于对齐.
cls_target = tf.where(condition=K.not_equal(positive_mask, 1.0), x=-1.0, y=matched_gt_cls_ids) # 正例是类ID, 否则-1.
cls_target = tf.where(condition=K.equal(ignore_mask, 1.0), x=-2.0, y=cls_target) # 正例是类ID, 否则-2.
cls_target = K.expand_dims(cls_target, axis=-1)
# 获取anchor回归框.
matched_gt_boxes = K.gather(gt_bboxes, max_iou_idx)
box_target = self._compute_box_target(anchor_boxes, matched_gt_boxes)
label = K.concatenate([cls_target, box_target], axis=-1)
return label
@staticmethod
def _match_anchor_boxes(anchor_boxes, gt_bboxes, match_iou=0.5, ignore_iou=0.4):
"""基于交并比将锚框和真实框匹配.
Args:
anchor_boxes: RetinaNet.preprocessing.label_ops.AnchorBox,
锚框.
gt_bboxes: tf.Tensor,
真实边界框.
match_iou: float, default=0.5,
标记为真实对象IoU阈值.
ignore_iou: float, default=0.4,
忽略对象IoU阈值.
Returns:
取出最大IoU对应的索引, 正例和忽略部分的标签.
"""
# 计算IoU.
iou_matrix = faster_compute_iou(anchor_boxes, gt_bboxes)
# 取出最大IoU和对应的索引.
max_iou = K.max(iou_matrix, axis=1)
max_iou_idx = K.argmax(iou_matrix, axis=1)
# 标记前景, 背景和忽略部分的标签.
positive_mask = K.greater_equal(max_iou, match_iou) # iou>=0.5正例.
negative_mask = K.less(max_iou, ignore_iou) # iou<0.4反例.
ignore_mask = tf.logical_not(tf.logical_or(positive_mask, negative_mask)) # iou=0.4~0.5忽略.
return max_iou_idx, K.cast(positive_mask, dtype=tf.float32), K.cast(ignore_mask, dtype=tf.float32)
def _compute_box_target(self, anchor_boxes, matched_gt_boxes):
"""计算框回归变换系数(原理同Fast R-CNN),
模型将使用框回归变换系数作为训练数据.
Args:
anchor_boxes: RetinaNet.preprocessing.label_ops.AnchorBox,
锚框.
matched_gt_boxes: tf.Tensor, 匹配真实边界框.
Returns:
框回归变换系数.
Notes: 变换系数(进行归一化), 能减少不同尺度的真实损失一致但是视觉直观差异大的情况; 更加容易梯度计算.
"""
box_target = K.concatenate(
[(matched_gt_boxes[..., :2] - anchor_boxes[..., :2]) / anchor_boxes[..., 2:], # (gt_x - ac_x, gt_y - ac_y).
tf.math.log(matched_gt_boxes[..., 2:] / anchor_boxes[..., 2:])], # 显著缩放差异.
axis=-1,
)
box_target /= self.box_variance
return box_target
|
{"hexsha": "4450dda4d07d0dcfe256417819a27691e2eda99e", "size": 10020, "ext": "py", "lang": "Python", "max_stars_repo_path": "RetinaNet/preprocessing/label_ops.py", "max_stars_repo_name": "sun1638650145/RetinaNet", "max_stars_repo_head_hexsha": "357edda03cdc1f976764b6ed4fcad6e639646142", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-23T09:32:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T06:59:03.000Z", "max_issues_repo_path": "RetinaNet/preprocessing/label_ops.py", "max_issues_repo_name": "sun1638650145/RetinaNet", "max_issues_repo_head_hexsha": "357edda03cdc1f976764b6ed4fcad6e639646142", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "RetinaNet/preprocessing/label_ops.py", "max_forks_repo_name": "sun1638650145/RetinaNet", "max_forks_repo_head_hexsha": "357edda03cdc1f976764b6ed4fcad6e639646142", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7373737374, "max_line_length": 120, "alphanum_fraction": 0.5814371257, "include": true, "reason": "import numpy", "num_tokens": 3507}
|
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!
! EVB-QMDFF - RPMD molecular dynamics and rate constant calculations on
! black-box generated potential energy surfaces
!
! Copyright (c) 2021 by Julien Steffen (steffen@pctc.uni-kiel.de)
! Stefan Grimme (grimme@thch.uni-bonn.de) (QMDFF code)
!
! Permission is hereby granted, free of charge, to any person obtaining a
! copy of this software and associated documentation files (the "Software"),
! to deal in the Software without restriction, including without limitation
! the rights to use, copy, modify, merge, publish, distribute, sublicense,
! and/or sell copies of the Software, and to permit persons to whom the
! Software is furnished to do so, subject to the following conditions:
!
! The above copyright notice and this permission notice shall be included in
! all copies or substantial portions of the Software.
!
! THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
! IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
! FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
! THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
! LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
! FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
! DEALINGS IN THE SOFTWARE.
!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!
! subroutine stints: calculate s/t integrals in cao(6d) basis
! tm int version (what does that mean???)
!
! part of QMDFF
!
subroutine stints(nat,nbf,xyz,s)
use qmdff
implicit none
integer::nat,nbf
real(kind=8)::xyz(3,nat)
real(kind=4)::s(nbf,nbf)
integer::i,j,k,l,iprimcount,jprimcount
integer::npri,nprj,ii,iii,jj,jjj,ll,m,li,lj,mm,nn,n
integer::lll(20),iall(4,4)
integer::lin(84),min(84),nin(84)
integer::lmnexp(84),ib,ie
real(kind=8)::xyza(3),xyzb(3),rab,est,ss,sss,lmnfak(84),gama,arg
real(kind=8)::aa(10),bb(10),gm2,ttt(1),tt,intcut
intcut=25.0d0
lll=(/1,2,2,2,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4/)
lin=(/0,1,0,0,2,0,0,1,1,0,3,0,0,2,2,1,0,1,0,1,4,0,0,3,3,1,0,1,0 &
& ,2,2,0,2,1,1,5,0,0,3,3,2,2,0,0,4,4,1,0,0,1,1,3,1,2,2,1,6,0,0,3, &
& 3,0,5,5,1,0,0,1,4,4,2,0,2,0,3,3,1,2,2,1,4,1,1,2/)
min=(/0,0,1,0,0, &
& 2,0,1,0,1,0,3,0,1,0,2,2,0,1,1,0,4,0,1,0,3,3,0,1,2,0,2,1,2,1,0,5, &
& 0,2,0,3,0,3,2,1,0,4,4,1,0,1,1,3,2,1,2,0,6,0,3,0,3,1,0,0,1,5,5,2,0, &
& 0,2,4,4,2,1,3,1,3,2,1,4,1,2/)
nin=(/0,0,0,1,0,0,2,0,1,1,0,0,3,0,1,0, &
& 1,2,2,1,0,0,4,0,1,0,1,3,3,0,2,2,1,1,2,0,0,5,0,2,0,3,2,3,0,1,0,1, &
& 4,4,3,1,1,1,2,2,0,0,6,0,3,3,0,1,5,5,1,0,0,2,4,4,0,2,1,2,2,3,1,3, &
& 1,1,4,2/)
s=0
iall(1,1)=1
iall(1,2)=4
iall(2,1)=4
iall(1,3)=10
iall(3,1)=10
iall(2,2)=10
iall(2,3)=20
iall(3,2)=20
iall(2,4)=35
iall(4,2)=35
iall(3,3)=35
iall(3,4)=56
iall(4,3)=56
iall(4,4)=84
k=0
iprimcount=0
do i=1,nbf
!
! aufpunkt i (first coordinate)
!
xyza(1:3)=xyz(1:3,aoat(i))
! #prims
npri=nprim(i)
jprimcount=0
li=lll(lao(i))
do j=1,i
lj=lll(lao(j))
k=k+1
nprj=nprim(j)
! aufpunkt j (second coordinate)
xyzb(1:3)=xyz(1:3,aoat(j))
rab=(xyza(1)-xyzb(1))**2 &
& +(xyza(2)-xyzb(2))**2 &
& +(xyza(3)-xyzb(3))**2
!
! precalc some overlap terms that depend only on lm
!
do ll=1,iall(li,lj)
call lmnpre(lin(ll),min(ll),nin(ll),lmnexp(ll),lmnfak(ll))
enddo
aa=0
bb=0
aa(lao(i))=1.0d0
bb(lao(j))=1.0d0
! prim loop
ss=0.0d0
do ii=1,npri
iii=iprimcount+ii
do jj=1,nprj
jjj=jprimcount+jj
gama=1.0d0/(alp(iii)+alp(jjj))
gm2 =0.5d0*gama
est=rab*alp(iii)*alp(jjj)*gama
!
! cutoff criterion
!
if (est.lt.intcut) then
arg=(pi*gama)**1.50d0
call pola(xyza,xyzb,alp(iii),alp(jjj), &
& gama,gm2,lao(i),lao(j),iall(li,lj), &
& aa,bb,lmnexp,lmnfak,est,arg,sss)
ss=ss+sss*cont(iii)*cont(jjj)
end if
end do
end do
s(i,j)=ss
s(j,i)=ss
42 jprimcount=jprimcount+nprj
end do
iprimcount=iprimcount+npri
end do
!
! normalized?
!
do i=1,nbf
!
! no diagonal contribution to H0
!
if(abs(1.d0-1.0d0/sqrt(s(i,i))).gt.1.d-6)then
write(*,*) i,s(i,i)
stop 'function not normalized inside stints'
endif
enddo
end subroutine stints
|
{"hexsha": "955512a2f20befc03976e9ef7752a99dc62f5f9c", "size": 4555, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/stints.f90", "max_stars_repo_name": "Trebonius91/EVB-QMDFF", "max_stars_repo_head_hexsha": "8d03e1ad073becb0161b0377b630d7b65fe3c290", "max_stars_repo_licenses": ["MIT", "Unlicense"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-13T15:27:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-13T15:27:13.000Z", "max_issues_repo_path": "src/stints.f90", "max_issues_repo_name": "chrinide/EVB-QMDFF", "max_issues_repo_head_hexsha": "8d03e1ad073becb0161b0377b630d7b65fe3c290", "max_issues_repo_licenses": ["MIT", "Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/stints.f90", "max_forks_repo_name": "chrinide/EVB-QMDFF", "max_forks_repo_head_hexsha": "8d03e1ad073becb0161b0377b630d7b65fe3c290", "max_forks_repo_licenses": ["MIT", "Unlicense"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-14T03:51:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-14T03:51:49.000Z", "avg_line_length": 29.5779220779, "max_line_length": 80, "alphanum_fraction": 0.5740944018, "num_tokens": 1918}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.