text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 6 21:39:46 2017
@author: Siqi Miao
"""
# test5.py
#
# Unboundedness test.
#
# indices of iB, iN start with 1
import numpy as np
from simplex_step import simplex_step
# start with a tableau form
A1 = np.matrix([[-1, 1, 2],
[-1, 1, 1],
[0, 1, 1]], dtype=np.float64)
A = np.hstack((np.eye(3), A1))
b = np.matrix([[1],
[2],
[3]], dtype=np.float64)
iB = [1, 2, 3]
iN = [4, 5, 6]
xB = np.matrix(np.copy(b))
c = np.matrix([[0, 0, 0, -1, 2, 1]], dtype=np.float64)
# form an invertible matrix B and modify the problem
B = np.matrix([[4, 1, 0],
[1, -2, -1],
[1, 2, 4]], dtype=np.float64)
A = B*A
b = B*b
# modify c
N = A[:, [index_N-1 for index_N in iN]]
c1 = np.matrix([[1, 1, 0]], dtype=np.float64)
c2 = c[:, (4-1):6]+c1*B.I*N
c = np.hstack((c1, c2))
# take a step.
irule = 0
[istatus, iB, iN, xB, Binv] = simplex_step(A, b, c, iB, iN, xB, irule)
X = np.zeros((6, 1), dtype=np.float64)
X[[(b-1) for b in iB]] = xB
if (istatus != 16):
print('INCORRECT ISTATUS!\n')
|
{"hexsha": "a779de498b27b5bc4c93b86b1975516be95a5bb2", "size": 1191, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python_IE411/simplex_method/test5.py", "max_stars_repo_name": "Rothdyt/codes-for-courses", "max_stars_repo_head_hexsha": "a2dfea516ebc7cabef31a5169533b6da352e7ccb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-09-23T00:00:13.000Z", "max_stars_repo_stars_event_max_datetime": "2018-11-02T22:56:35.000Z", "max_issues_repo_path": "Python_IE411/simplex_method/test5.py", "max_issues_repo_name": "Rothdyt/codes-for-courses", "max_issues_repo_head_hexsha": "a2dfea516ebc7cabef31a5169533b6da352e7ccb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python_IE411/simplex_method/test5.py", "max_forks_repo_name": "Rothdyt/codes-for-courses", "max_forks_repo_head_hexsha": "a2dfea516ebc7cabef31a5169533b6da352e7ccb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.8947368421, "max_line_length": 71, "alphanum_fraction": 0.4945424013, "include": true, "reason": "import numpy", "num_tokens": 445}
|
import numpy as np
import os
import argparse
import torch
from text import text_to_sequence, cmudict
from text.symbols import symbols
import commons
import models
import utils
import json
import jamotools
from glob import glob
import g2pk
from g2pk import G2p
g2p = G2p()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Inference settings.')
parser.add_argument('-t', type=str, default='안녕, 세상!', help='Script')
parser.add_argument('-m', type=str, default='kss', help='TTS model name')
parser.add_argument('-v', type=str, default='', help='Vocoder model name')
parser.add_argument('-f', type=str, default='tst_stns.txt', help='Sentence file list')
parser.add_argument('-n', type=float, default=0.333, help='Noise scale')
args = parser.parse_args()
language = json.load(open("language_setting.json", 'r'))['language']
if language == 'english':
cleaners = 'english_cleaners'
elif language == 'korean':
cleaners = 'korean_cleaners'
args.t = jamotools.split_syllables(args.t, jamo_type="JAMO")
elif language == 'korean_phoneme':
cleaners = 'korean_phoneme_cleaners'
args.t = g2p(args.t, descriptive=True, group_vowels=True)
args.t = jamotools.split_syllables(args.t, jamo_type="JAMO")
elif language == "korean_phoneme_no_15":
def dummy_link3(inp, descriptive=False, verbose=False):
return inp
g2pk.g2pk.link3 = dummy_link3
cleaners = 'korean_phoneme_cleaners'
args.t = g2p(args.t, descriptive=True, group_vowels=True)
args.t = jamotools.split_syllables(args.t, jamo_type="JAMO")
else:
assert False, f'Language Error [{language}]!'
# Clear remains
for f in glob('./hifi-gan/test_mel_files/*.npy'): os.remove(f)
for f in glob('./generated_files_from_mel/*.wav'): os.remove(f)
# model_dir = "./logs/kss/"
# model_dir = "./logs/ljspeech2/"
model_dir = f"./logs/{args.m}/"
hps = utils.get_hparams_from_dir(model_dir)
checkpoint_path = utils.latest_checkpoint_path(model_dir)
# If you are using a provided pretrained model
# hps = utils.get_hparams_from_file("./configs/any_config_file.json")
# checkpoint_path = "/path/to/pretrained_model"
model = models.FlowGenerator(
len(symbols) + getattr(hps.data, "add_blank", False),
out_channels=hps.data.n_mel_channels,
**hps.model).to("cuda")
utils.load_checkpoint(checkpoint_path, model)
model.decoder.store_inverse() # do not calcuate jacobians for fast decoding
_ = model.eval()
try:
cmu_dict = cmudict.CMUDict(hps.data.cmudict_path)
except AttributeError:
cmu_dict = None
if args.f is None:
tst_stns = [('sample.wav', args.t)]
else:
with open(args.f, 'r') as f:
tst_stns = [line.split('|') for line in f]
for stn in tst_stns: print(stn)
for file_name, tst_stn in tst_stns:
if getattr(hps.data, "add_blank", False):
text_norm = text_to_sequence(tst_stn.strip(), [cleaners], cmu_dict)
text_norm = commons.intersperse(text_norm, len(symbols))
else: # If not using "add_blank" option during training, adding spaces at the beginning and the end of utterance improves quality
tst_stn = " " + tst_stn.strip() + " "
text_norm = text_to_sequence(tst_stn.strip(), [cleaners], cmu_dict)
sequence = np.array(text_norm)[None, :]
print("".join([symbols[c] for c in sequence[0] if c < len(symbols)]))
# print("".join([symbols[c] if c < len(symbols) else "<BNK>" for c in sequence[0]]))
x_tst = torch.autograd.Variable(torch.from_numpy(sequence)).cuda().long()
x_tst_lengths = torch.tensor([x_tst.shape[1]]).cuda()
with torch.no_grad():
# noise_scale = .667
# noise_scale = .333
noise_scale = args.n
length_scale = 1.0
(y_gen_tst, *_), *_, (attn_gen, *_) = model(x_tst, x_tst_lengths, gen=True, noise_scale=noise_scale, length_scale=length_scale)
# save mel-framescd
if not os.path.exists('./hifi-gan/test_mel_files'):
os.makedirs('./hifi-gan/test_mel_files')
mel_file_name = file_name.replace('.wav', '.npy')
np.save(f"./hifi-gan/test_mel_files/{mel_file_name}", y_gen_tst.cpu().detach().numpy())
python_script = './hifi-gan/inference_e2e.py'
# options = f'--checkpoint_file ./runs/{}'
if 'kss' in args.m:
options = f'--checkpoint_file ./hifi-gan/runs/cp_hifigan/g_00110000' + \
f' --input_mels_dir ./hifi-gan/test_mel_files'
else:
options = f'--checkpoint_file ./hifi-gan/runs/cp_hifigan_custom/g_00015000' + \
f' --input_mels_dir ./hifi-gan/test_mel_files'
os.system(f'python {python_script} {options}')
# os.rename('./generated_files_from_mel/sample_generated_e2e.wav', \
# f'./generated_files_from_mel/{file_name}' )
# shutil.move('./hifi-gan/generated_files_from_mel/sample_generated_e2e.wav', 'wavs')
# # "./hifi-gan/generated_files_from_mel/sample_generated_e2e.wav"
|
{"hexsha": "3fe066bcd3eecae12e3897940814b6010a501571", "size": 5225, "ext": "py", "lang": "Python", "max_stars_repo_path": "inference.py", "max_stars_repo_name": "Joovvhan/glow-tts-custom", "max_stars_repo_head_hexsha": "47eab350ccd958beea78b9662d1b360fd3562f46", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-07-29T07:53:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-29T08:21:47.000Z", "max_issues_repo_path": "inference.py", "max_issues_repo_name": "Joovvhan/glow-tts-custom", "max_issues_repo_head_hexsha": "47eab350ccd958beea78b9662d1b360fd3562f46", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "inference.py", "max_forks_repo_name": "Joovvhan/glow-tts-custom", "max_forks_repo_head_hexsha": "47eab350ccd958beea78b9662d1b360fd3562f46", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-07-29T07:53:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-29T08:21:48.000Z", "avg_line_length": 40.1923076923, "max_line_length": 139, "alphanum_fraction": 0.6470813397, "include": true, "reason": "import numpy", "num_tokens": 1405}
|
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from dechorate import constants
from dechorate.utils.mds_utils import edm
from dechorate.utils.dsp_utils import envelope, normalize
def nlls_mds(D, X, A, thr_mic=0.05, thr_src=0.05):
dim, I = X.shape
dim, J = A.shape
assert D.shape == (I, J)
def fun(xXA, I, J, D):
X = xXA[:3*I].reshape(3, I)
A = xXA[3*I:].reshape(3, J)
cost = np.linalg.norm((edm(X, A) - D))**2
return cost
x0 = np.concatenate([X.flatten(), A.flatten()])
ub = np.zeros_like(x0)
ub[:] = np.inf
lb = -ub
# sources in +-5 cm from the guess
dims_slacks = [thr_src, thr_src, thr_src]
for j in range(J):
for d in range(dim):
ub[x0 == A[d, j]] = A[d, j] + dims_slacks[d]
lb[x0 == A[d, j]] = A[d, j] - dims_slacks[d]
# micros in +-5 cm from the guess
dims_slacks = [thr_mic, thr_mic, thr_mic]
for i in range(I):
for d in range(dim):
ub[x0 == X[d, i]] = X[d, i] + dims_slacks[d]
lb[x0 == X[d, i]] = X[d, i] - dims_slacks[d]
# set the origin in speaker 1
bounds = sp.optimize.Bounds(lb, ub, keep_feasible=True)
constraints = None
# constraints = sp.optimize.LinearConstraint(A, lb, ub)
res = sp.optimize.minimize(
fun, x0, args=(I, J, D), bounds=bounds, constraints=constraints,
options={'maxiter': 10e3, 'maxfev': 100e3})
print('Optimization')
print('message', res.message)
print('nit', res.nit)
print('nfev', res.nfev)
print('success', res.success)
print('fun', res.fun)
sol = res.x
# solution = solution.reshape(3, I+J)
# X = solution[:, :I]
# A = solution[:, I:]
X = sol[:3*I].reshape(3, I)
A = sol[3*I:].reshape(3, J)
return X, A
def nlls_mds_images(Ddp, De_c, De_f, X, A, thr_mic=0.10, thr_src=.2):
_, _, Rz = constants['room_size']
dim, I = X.shape
dim, J = A.shape
assert Ddp.shape == (I,J) == De_c.shape == De_f.shape
def fun_e1(xXA, I, J, Ddp, De_c, De_f):
X = xXA[:3*I].reshape(3, I)
A = xXA[3*I:].reshape(3, J)
Ae_c = A.copy()
Ae_f = A.copy()
Ae_c[2, :] = 2*Rz - Ae_c[2, :]
Ae_f[2, :] = -Ae_f[2, :]
cost = np.linalg.norm((edm(X, A) - Ddp))**2 \
+ np.linalg.norm((edm(X, Ae_c) - De_c))**2 \
+ np.linalg.norm((edm(X, Ae_f) - De_f))**2
return cost
x0 = np.concatenate([X.flatten(), A.flatten()])
ub = np.zeros_like(x0)
ub[:] = np.inf
lb = -ub
# sources in +-5 cm from the guess
dims_slacks = [thr_src, thr_src, thr_src]
for j in range(J):
for d in range(dim):
ub[x0 == A[d, j]] = A[d, j] + dims_slacks[d]
lb[x0 == A[d, j]] = A[d, j] - dims_slacks[d]
# micros in +-5 cm from the guess
dims_slacks = [thr_mic, thr_mic, thr_mic]
for i in range(I):
for d in range(dim):
ub[x0 == X[d, i]] = X[d, i] + dims_slacks[d]
lb[x0 == X[d, i]] = X[d, i] - dims_slacks[d]
# set the origin in speaker 1
bounds = sp.optimize.Bounds(lb, ub, keep_feasible=True)
constraints = None
# constraints = sp.optimize.LinearConstraint(A, lb, ub)
res = sp.optimize.minimize(
fun_e1, x0, args=(I, J, Ddp, De_c, De_f), bounds=bounds, constraints=constraints,
options={'maxiter': 10e3, 'maxfev': 100e3, 'disp': True})
print('Optimization')
print('message', res.message)
print('nit', res.nit)
print('nfev', res.nfev)
print('success', res.success)
print('fun', res.fun)
sol = res.x
# solution = solution.reshape(3, I+J)
# X = solution[:, :I]
# A = solution[:, I:]
X = sol[:3*I].reshape(3, I)
A = sol[3*I:].reshape(3, J)
return X, A
def nlls_mds_ceiling(Ddp, De_c, X, A, thr_mic=0.1, thr_src=.2):
_, _, Rz = constants['room_size']
dim, I = X.shape
dim, J = A.shape
assert Ddp.shape == (I, J) == De_c.shape
def fun_e1(xXA, I, J, Ddp, De_c):
X = xXA[:3*I].reshape(3, I)
A = xXA[3*I:].reshape(3, J)
Ae_c = A.copy()
Ae_c[2, :] = 2*Rz - A[2, :]
cost = np.linalg.norm((edm(X, A) - Ddp))**2 \
+ np.linalg.norm((edm(X, Ae_c) - De_c))**2
return cost
x0 = np.concatenate([X.flatten(), A.flatten()])
ub = np.zeros_like(x0)
ub[:] = np.inf
lb = -ub
# sources in +-5 cm from the guess
dims_slacks = [thr_src, thr_src, thr_src]
for j in range(J):
for d in range(dim):
ub[x0 == A[d, j]] = A[d, j] + dims_slacks[d]
lb[x0 == A[d, j]] = A[d, j] - dims_slacks[d]
# micros in +-5 cm from the guess
dims_slacks = [thr_mic, thr_mic, thr_mic]
for i in range(I):
for d in range(dim):
ub[x0 == X[d, i]] = X[d, i] + dims_slacks[d]
lb[x0 == X[d, i]] = X[d, i] - dims_slacks[d]
# set the origin in speaker 1
bounds = sp.optimize.Bounds(lb, ub, keep_feasible=True)
constraints = None
# constraints = sp.optimize.LinearConstraint(A, lb, ub)
res = sp.optimize.minimize(
fun_e1, x0, args=(I, J, Ddp, De_c), bounds=bounds, constraints=constraints,
options={'maxiter': 10e3, 'maxfev': 100e3})
print('Optimization')
print('message', res.message)
print('nit', res.nit)
print('nfev', res.nfev)
print('success', res.success)
print('fun', res.fun)
sol = res.x
# solution = solution.reshape(3, I+J)
# X = solution[:, :I]
# A = solution[:, I:]
X = sol[:3*I].reshape(3, I)
A = sol[3*I:].reshape(3, J)
return X, A
def nlls_mds_array(D, X, A):
dim, I = X.shape
dim, J = A.shape
assert D.shape == (I, J)
nULA = np.zeros([3, 5])
nULA[0, :] = np.array([0-3.25-5-4, 0-3.25-5, 0-3.25, 3.25, 3.25+10])/100
def rotate_and_translate(LA, new_center, new_angle):
# rotate
th = np.deg2rad(new_angle)
R = np.array([[[np.cos(th), -np.sin(th), 0],
[np.sin(th), np.cos(th), 0],
[0, 0, 1]]])
nULA_rot = R@LA
# translate
nULA_tra = nULA_rot + new_center[:, None]
return nULA_tra
def fun(x, I, J, D):
P = 6
p = x[:4*P].reshape([4,P])
arr_ang = p[0, :] # I thetas
arr_pos = p[1:4, :] # I barycenters coordinates
A = x[4*P:].reshape([3, J]) # J sources coordinates
X = np.zeros([3, I])
for i in range(P):
X[:, i*5:(i+1)*5] = rotate_and_translate(nULA,arr_pos[:, i], arr_ang[i])
cost = np.linalg.norm((edm(X, A) - D))**2
return cost
# x0 = np.concatenate([X.flatten(), A.flatten()])
# ub = np.zeros_like(x0)
# ub[:] = np.inf
# lb = -ub
# # sources in +-5 cm from the guess
# dims_slacks = [thr, thr, thr]
# for j in range(J):
# for d in range(dim):
# ub[x0 == A[d, j]] = A[d, j] + dims_slacks[d]
# lb[x0 == A[d, j]] = A[d, j] - dims_slacks[d]
# # micros in +-5 cm from the guess
# dims_slacks = [thr, thr, thr]
# for i in range(I):
# for d in range(dim):
# ub[x0 == X[d, i]] = X[d, i] + dims_slacks[d]
# lb[x0 == X[d, i]] = X[d, i] - dims_slacks[d]
# set the origin in speaker 1
# bounds = sp.optimize.Bounds(lb, ub, keep_feasible=True)
bounds = None
constraints = None
# constraints = sp.optimize.LinearConstraint(A, lb, ub)
path_to_positions = './data/dECHORATE/positions.csv'
audio_scene_positions = pd.read_csv(path_to_positions)
mic_bar_pos = audio_scene_positions.loc[audio_scene_positions['type'] == 'array']
mic_theta = np.array(mic_bar_pos['theta'])
mic_bar_pos = np.vstack([mic_bar_pos['x'], mic_bar_pos['y'], mic_bar_pos['2.353']])
x0 = np.concatenate([mic_theta.flatten(), mic_bar_pos.flatten(), A.flatten()])
res = sp.optimize.minimize(
fun, x0, args=(I, J, D), bounds=bounds, constraints=constraints,
options={'maxiter': 10e3, 'maxfev': 100e3})
print('Optimization')
print('message', res.message)
print('nit', res.nit)
print('nfev', res.nfev)
print('success', res.success)
print('fun', res.fun)
x = res.x
P = 6
p = x[:4*P].reshape([4,P])
arr_ang = p[0, :] # I thetas
arr_pos = p[1:4, :] # I barycenters coordinates
A = x[4*P:].reshape([3, J]) # J sources coordinates
X = np.zeros([3, I])
for i in range(P):
X[:, i*5:(i+1)*5] = rotate_and_translate(nULA,arr_pos[:, i], arr_ang[i])
return X, A
def nlls_mds_array_ceiling(D, De_c, X, A):
dim, I = X.shape
dim, J = A.shape
assert D.shape == (I, J)
Rz = constants['room_size'][2]
nULA = np.zeros([3, 5])
nULA[0, :] = np.array([0-3.25-5-4, 0-3.25-5, 0-3.25, 3.25, 3.25+10])/100
def rotate_and_translate(LA, new_center, new_angle):
# rotate
th = np.deg2rad(new_angle)
R = np.array([[[np.cos(th), -np.sin(th), 0],
[np.sin(th), np.cos(th), 0],
[0, 0, 1]]])
nULA_rot = R@LA
# translate
nULA_tra = nULA_rot + new_center[:, None]
return nULA_tra
def fun(x, I, J, D, De_c):
P = 6
p = x[:4*P].reshape([4, P])
arr_ang = p[0, :] # I thetas
arr_pos = p[1:4, :] # I barycenters coordinates
A = x[4*P:].reshape([3, J]) # J sources coordinates
X = np.zeros([3, I])
for i in range(P):
X[:, i*5:(i+1)*5] = rotate_and_translate(nULA,
arr_pos[:, i], arr_ang[i])
Ac = A.copy()
Ac[2, :] = Rz + (Rz - A[2, :])
cost = np.linalg.norm((edm(X, A) - D))**2 \
+ np.linalg.norm((edm(X, Ac) - De_c))**2
return cost
bounds = None
constraints = None
path_to_positions = './data/dECHORATE/positions.csv'
audio_scene_positions = pd.read_csv(path_to_positions)
mic_bar_pos = audio_scene_positions.loc[audio_scene_positions['type'] == 'array']
mic_theta = np.array(mic_bar_pos['theta'])
mic_bar_pos = np.vstack(
[mic_bar_pos['x'], mic_bar_pos['y'], mic_bar_pos['2.353']])
x0 = np.concatenate(
[mic_theta.flatten(), mic_bar_pos.flatten(), A.flatten()])
res = sp.optimize.minimize(
fun, x0, args=(I, J, D, De_c), bounds=bounds, constraints=constraints,
options={'maxiter': 10e3, 'maxfev': 100e3})
print('Optimization')
print('message', res.message)
print('nit', res.nit)
print('nfev', res.nfev)
print('success', res.success)
print('fun', res.fun)
x = res.x
P = 6
p = x[:4*P].reshape([4, P])
arr_ang = p[0, :] # I thetas
arr_pos = p[1:4, :] # I barycenters coordinates
A = x[4*P:].reshape([3, J]) # J sources coordinates
X = np.zeros([3, I])
for i in range(P):
X[:, i*5:(i+1)*5] = rotate_and_translate(nULA,
arr_pos[:, i], arr_ang[i])
return X, A
def nlls_mds_array_images(D, De_c, De_f, X, A):
dim, I = X.shape
dim, J = A.shape
assert D.shape == (I, J)
Rz = constants['room_size'][2]
nULA = np.zeros([3, 5])
nULA[0, :] = np.array([0-3.25-5-4, 0-3.25-5, 0-3.25, 3.25, 3.25+10])/100
def rotate_and_translate(LA, new_center, new_angle):
# rotate
th = np.deg2rad(new_angle)
R = np.array([[[np.cos(th), -np.sin(th), 0],
[np.sin(th), np.cos(th), 0],
[0, 0, 1]]])
nULA_rot = R@LA
# translate
nULA_tra = nULA_rot + new_center[:, None]
return nULA_tra
def fun(x, I, J, D, De_c, De_f):
P = 6
p = x[:4*P].reshape([4, P])
arr_ang = p[0, :] # I thetas
arr_pos = p[1:4, :] # I barycenters coordinates
A = x[4*P:].reshape([3, J]) # J sources coordinates
X = np.zeros([3, I])
for i in range(P):
X[:, i*5:(i+1)*5] = rotate_and_translate(nULA,
arr_pos[:, i], arr_ang[i])
Ac = A.copy()
Af = A.copy()
Ac[2, :] = 2*Rz - A[2, :]
Af[2, :] = -A[2, :]
cost = np.linalg.norm((edm(X, A) - D))**2 \
+ np.linalg.norm((edm(X, Ac) - De_c))**2 \
+ np.linalg.norm((edm(X, Af) - De_f))**2
return cost
bounds = None
constraints = None
path_to_positions = './data/dECHORATE/positions.csv'
audio_scene_positions = pd.read_csv(path_to_positions)
mic_bar_pos = audio_scene_positions.loc[audio_scene_positions['type'] == 'array']
mic_theta = np.array(mic_bar_pos['theta'])
mic_bar_pos = np.vstack(
[mic_bar_pos['x'], mic_bar_pos['y'], mic_bar_pos['2.353']])
x0 = np.concatenate(
[mic_theta.flatten(), mic_bar_pos.flatten(), A.flatten()])
res = sp.optimize.minimize(
fun, x0, args=(I, J, D, De_c, De_f), bounds=bounds, constraints=constraints,
options={'maxiter': 10e3, 'maxfev': 100e3})
print('Optimization')
print('message', res.message)
print('nit', res.nit)
print('nfev', res.nfev)
print('success', res.success)
print('fun', res.fun)
x = res.x
P = 6
p = x[:4*P].reshape([4, P])
arr_ang = p[0, :] # I thetas
arr_pos = p[1:4, :] # I barycenters coordinates
A = x[4*P:].reshape([3, J]) # J sources coordinates
X = np.zeros([3, I])
for i in range(P):
X[:, i*5:(i+1)*5] = rotate_and_translate(nULA,
arr_pos[:, i], arr_ang[i])
return X, A
def nlls_mds_array_with_rir_manifold(D, X, A, rirs_manifold):
dim, I = X.shape
dim, J = A.shape
assert D.shape == (I, J)
c = constants['speed_of_sound']
Fs = constants['Fs']
offset = constants['recording_offset']
# smooth the manifold
mani = np.zeros_like(rirs_manifold)
for ij in range(mani.shape[1]):
mani[:, ij] = 1-normalize(envelope(rirs_manifold[:, ij]))
nULA = np.zeros([3, 5])
nULA[0, :] = np.array([0-3.25-5-4, 0-3.25-5, 0-3.25, 3.25, 3.25+10])/100
def rotate_and_translate(LA, new_center, new_angle):
# rotate
th = np.deg2rad(new_angle)
R = np.array([[[np.cos(th), -np.sin(th), 0],
[np.sin(th), np.cos(th), 0],
[0, 0, 1]]])
nULA_rot = R@LA
# translate
nULA_tra = nULA_rot + new_center[:, None]
return nULA_tra
def fun(x, I, J, D, mani, c):
P = 6
p = x[:4*P].reshape([4, P])
arr_ang = p[0, :] # I thetas
arr_pos = p[1:4, :] # I barycenters coordinates
A = x[4*P:].reshape([3, J]) # J sources coordinates
X = np.zeros([3, I])
for i in range(P):
X[:, i*5:(i+1)*5] = rotate_and_translate(nULA,
arr_pos[:, i], arr_ang[i])
# cost = np.linalg.norm((edm(X, A) - D))**2
E = np.round((edm(X, A)/c) * Fs + offset).astype(int).T.flatten()
# for i, e in enumerate(E):
# plt.scatter(e, mani[e, i])
# plt.plot(mani[:, i])
# plt.show()
# input()
cost = np.sum(mani[E, :])
return cost
# x0 = np.concatenate([X.flatten(), A.flatten()])
# ub = np.zeros_like(x0)
# ub[:] = np.inf
# lb = -ub
# # sources in +-5 cm from the guess
# dims_slacks = [thr, thr, thr]
# for j in range(J):
# for d in range(dim):
# ub[x0 == A[d, j]] = A[d, j] + dims_slacks[d]
# lb[x0 == A[d, j]] = A[d, j] - dims_slacks[d]
# # micros in +-5 cm from the guess
# dims_slacks = [thr, thr, thr]
# for i in range(I):
# for d in range(dim):
# ub[x0 == X[d, i]] = X[d, i] + dims_slacks[d]
# lb[x0 == X[d, i]] = X[d, i] - dims_slacks[d]
# set the origin in speaker 1
# bounds = sp.optimize.Bounds(lb, ub, keep_feasible=True)
bounds = None
constraints = None
# constraints = sp.optimize.LinearConstraint(A, lb, ub)
path_to_positions = './data/dECHORATE/positions.csv'
audio_scene_positions = pd.read_csv(path_to_positions)
mic_bar_pos = audio_scene_positions.loc[audio_scene_positions['type'] == 'array']
mic_theta = np.array(mic_bar_pos['theta'])
mic_bar_pos = np.vstack(
[mic_bar_pos['x'], mic_bar_pos['y'], mic_bar_pos['2.353']])
x0 = np.concatenate(
[mic_theta.flatten(), mic_bar_pos.flatten(), A.flatten()])
res = sp.optimize.minimize(
fun, x0, args=(I, J, D, mani, c), bounds=bounds, constraints=constraints,
options={'maxiter': 10e3, 'maxfev': 100e3})
print('Optimization')
print('message', res.message)
print('nit', res.nit)
print('nfev', res.nfev)
print('success', res.success)
print('fun', res.fun)
x = res.x
P = 6
p = x[:4*P].reshape([4, P])
arr_ang = p[0, :] # I thetas
arr_pos = p[1:4, :] # I barycenters coordinates
A = x[4*P:].reshape([3, J]) # J sources coordinates
X = np.zeros([3, I])
for i in range(P):
X[:, i*5:(i+1)*5] = rotate_and_translate(nULA,
arr_pos[:, i], arr_ang[i])
return X, A
# def crcc_mds(Dobs, Xinit, Ainit):
# Nd, Md = Dobs.shape
# Dx, Nx = Xinit.shape
# Da, Ma = Ainit.shape
# assert Dx == Da == 3
# assert Nx == Nd
# assert Md == Ma
# N = Nx
# M = Md
# D = Dx
# # # Preparte observation
# # Dt = Dobs**2 - Dobs[0:1, :]**2 - Dobs[:, 0:1]**2 + Dobs[0, 0]**2
# # Dt = Dt[1:, 1:]
# # Prepare initialization
# X = Xinit
# A = Ainit
# # center wrt the first entry
# A = A - A[:, 0:1] # correspond to \texttt{A} in the paper
# X = X - X[:, 0:1] # correspond to \texttt{X} in the paper
# # Crocco wants the matrix as DxM and DxN
# # and remove the 1rst row
# Xt = X[:, 1:].T
# At = A[:, 1:].T
# Dinit = (-2 * Xt @ At.T)
# assert Dinit.shape == Dt.shape
# assert np.allclose(Dinit, Dt)
# Ui, Vi, Whi = np.linalg.svd(Dinit)
# Cinit = np.linalg.pinv(Ui[:, :3]) @ Xt
# # D = edm(X, A)**0.5
# # for i in range(N):
# # for j in range(M):
# # assert np.allclose(D[i, j]**2, np.linalg.norm(X[:, i] - A[:, j])**2)
# U, V, Wh = np.linalg.svd(Dt)
# # select only the first 3 eigenvalue
# # imposing rank=3
# V = np.diag(V[:3])
# U = U[:N-1, :3]
# Wh = Wh[:3, :M-1]
# assert np.allclose(Dt, U@V@Wh)
# # def f(x0, U, V, Wh, D):
# # C = x0[:9].reshape([3,3])
# # a00 = x0[-1]
# # cost = 0
# # UC = U @ C
# # UVW = U @ V @ Wh
# # for i in range(N-1):
# # for j in range(M-1):
# # uc_sq_sum = np.sum(UC[i, :])**2
# # uvw_sq_sum = np.sum(UVW[i, j])**2
# # _2uc_a00 = -2 * UC[i, 0]*a00
# # _dist = -D[i+1,j+1]**2 + D[0, j+1]**2
# # cost += (uc_sq_sum \
# # + uvw_sq_sum \
# # + _2uc_a00 \
# # + _dist)**2
# # return cost
# def fun(C, U, V, Wh, D, a1):
# C = C.reshape([3, 3])
# Xt = U @ C
# At = (- 0.5 * np.linalg.inv(C) @ V @ Wh).T
# Xt = np.concatenate([np.zeros([1, 3]), Xt], axis=0)
# At = np.concatenate([np.zeros([1, 3]), At], axis=0)
# At[0, :] = At[0, :] + a1
# print(edm(Xt.T,At.T))
# print(D)
# 1/0
# cost = np.linalg.norm(edm(X, Y) - D)
# return cost
# x0 = np.concatenate([np.random.randn(3,3).flatten()])
# print(fun(x0, U, V, Wh, Dobs, Ainit[0, 0]))
# res = minimize(fun, x0, args=(U, V, Wh, Dobs, Ainit[0,0]), options={'disp': True})
# print(res)
# 1/0
# return X
def crcc_mds1(D, Xinit, Ainit):
# [M, K] = size(sqT)
I, J = D.shape
dimx, Ix = Xinit.shape
dima, Ja = Ainit.shape
assert I == Ix
assert J == Ja
assert dimx == dima
# acccording to Crocco notation
Xinit = Xinit.T
Ainit = Ainit.T
assert edm(Xinit.T, Ainit.T).shape == D.shape
I, J = D.shape
Xt_init = (Xinit - Xinit[0:1, :])[1:, :]
At_init = (Ainit - Ainit[0:1, :])[1:, :]
Dt_init = -2 * Xt_init @ At_init.T
## FROM EDM TO Dtilde (centred and cropped)
# convert to squared "distances"
D2 = D ** 2
D2 = D2 - D2[0:1, :] - D2[:, 0:1] + D2[0, 0]
Dt = D2[1:, 1:]
assert Dt.shape == (I-1, J-1)
assert np.allclose(Dt, Dt_init)
## SVD
# [U, Sigma, V] = svd(T)
U, V, Wh = np.linalg.svd(Dt)
V = np.diag(V[:3])
U = U[:I-1, :3]
Wh = Wh[:3, :J-1]
assert np.allclose(Dt, U @ V @ Wh)
# def fun(C, U, V, Wh, D, a1):
# C = C.reshape([3, 3])
# # X_tilde = (U*C)'
# X = U @ C.T
# # Y_tilde = -1/2*inv(C)*Sigma*V'
# A = -0.5 * np.linalg.inv(C) @ V @ Wh
# # X = [[0 0 0]' X_tilde]
# # Y = [[0 0 0]' Y_tilde]
# X = np.concatenate([np.zeros([3, 1]), X])
# A = np.concatenate([np.zeros([3, 1]), A])
# # Y(1, :) = Y(1, : ) + a1
# A[0, :] = A[0, :] + a1
# # C = norm(edm(X, Y) - D, 'fro') ^ 2
# return np.linalg.norm(edm(X,Y) - D)
def fun(C, U, V, Wh, D, a):
C = C.reshape([3, 3])
UC = U@C
UVWh = U @ V @ Wh
cost = 0
for i in range(1, I-1):
for j in range(1, J-1):
c = np.sum(UC[i, :]**2)
c += np.sum(UVWh[i, j])
c += -2*UC[i, 0]*a
c += -D[i+1, j+1]**2 + D[0, j+1]**2
cost = c**2
return cost
x0 = np.random.randn(3,3).flatten()
res = sp.optimize.minimize(fun, x0, args=(U, V, Wh, D, Ainit[0, 0]), options={'disp': True})
C = res.x.reshape([3, 3])
Xt_est = U @ C
At_est = -0.5 * (np.linalg.inv(C) @ V @ Wh).T
Xt_est = np.concatenate([np.zeros([1, 3]), Xt_est])
At_est = np.concatenate([np.zeros([1, 3]), At_est])
At_est[0, 0] = Ainit[0, 0]
print(edm(Xt_est.T, At_est.T))
# # Assume we know the distance between the first sensor and the first
# # microphone. This is realistic.
# a1 = D[1, 1]
# def fun(C, U, V, Wh, D, a1):
# C = C.reshape([3, 3])
# # X_tilde = (U*C)'
# X_tilde = (U @ C).T
# # Y_tilde = -1/2*inv(C)*Sigma*V'
# Y_tilde = - 1/2 * np.linalg.inv(C) @ V @ Wh
# # X = [[0 0 0]' X_tilde]
# X = np.concatenate([np.zeros([3, 1]), X_tilde], axis=1)
# # Y = [[0 0 0]' Y_tilde]
# Y = np.concatenate([np.zeros([3, 1]), Y_tilde], axis=1)
# # Y(1, :) = Y(1, : ) + a1
# Y[0, :] = Y[0, :] + a1
# # C = norm(edm(X, Y) - D, 'fro') ^ 2
# cost = np.linalg.norm(edm(X, Y) - D)**2
# return cost
# _, c0, _ = np.linalg.svd(edm(X, A))
# c0 = np.diag(c0[:3]).flatten()
# res = sp.optimize.minimize(fun, c0, args=(U, V, Wh, D, a1), options={'disp': True})
# C = res.x.reshape([3, 3])
# 1/0
# # tilde_R = (U*C)'
# R_tilde = (U@C).T
# # tilde_S = -1/2 * C\(Sigma*V')
# S_tilde = -1/2 * np.linalg.inv(C) @ Sigma @ V.T
# # R = [[0 0 0]' tilde_R]
# R = np.concatenate([np.zeros([3, 1]), R_tilde], axis=1)
# # This doesn't work for some reason(S)!!!
# # tilde_S(1, :) = tilde_S(1, : ) + a1
# S = np.concatenate([np.zeros([3, 1]), S_tilde], axis=1)
# # Y(1, :) = Y(1, : ) + a1
# S[0, :] = S[0, :] + a1
# # S = [[a1 0 0]' tilde_S]
# # D = edm([R S], [R S])
# return R, S
# function[R, S, D] = unfold_crocco(sqT, c)
# %
# % [R, S, D] = unfold_crocco(sqT, c)
# %
# % Solves the multidimensional unfolding(MDU) problem using the method in
# % Marco Crocco, Alessio Del Bue, and Vittorio Murino: A Bilinear Approach to
# % the Position Self-Calibration of Multiple Sensors
# %
# %
# % INPUT: T ... M by K matrix, where M is the number of microphones, and K
# % the number of sources(acoustic events)
# T(i, j) is the
# % propagation time between the i-th microphone and the j-th
# % source
# % c ... speed of sound
# %
# % OUTPUT: R ... (dim by m) Estimated microphone locations
# % S ... (dim by k) Estimated source locations
# % D ... ((m+k) by(m+k)) Resulting EDM
# %
# %
# % Author: Ivan Dokmanic, 2014
# [M, K] = size(sqT)
# T = (sqT * c). ^ 2
# % convert to squared "distances"
# T = bsxfun(@minus, T, T(: , 1))
# % (*)
# T = bsxfun(@minus, T, T(1, : ))
# T = T(2: end, 2: end)
# D = (sqT * c). ^ 2
# [U, Sigma, V] = svd(T)
# Sigma = Sigma(1: 3, 1: 3)
# U = U(:, 1: 3)
# V = V(:, 1: 3)
# % Assume we know the distance between the first sensor and the first
# % microphone. This is realistic.
# a1 = sqT(1, 1) * c
# opt = optimset('MaxfevEvals', 1e8, 'MaxIter', 1e6)
# MAX_ITER = 0
# [Cbest, costbest] = fminsearch(@(C) costC2(C, U, Sigma, V, D, a1), randn(3), opt)
# for i = 1:
# MAX_ITER
# i
# [C, costval] = fminsearch(@(C) costC2(C, U, Sigma, V, D, a1), randn(3), opt)
# if costval < costbest
# costbest = costval
# Cbest = C
# end
# end
# C = Cbest
# tilde_R = (U*C)'
# tilde_S = -1/2 * C\(Sigma*V')
# R = [[0 0 0]' tilde_R]
# % This doesn't work for some reason(S)!!!
# tilde_S(1, :) = tilde_S(1, : ) + a1
# S = [[a1 0 0]' tilde_S]
# D = edm([R S], [R S])
# function C = costC2(C, U, Sigma, V, D, a1)
# X_tilde = (U*C)'
# Y_tilde = -1/2*inv(C)*Sigma*V'
# X = [[0 0 0]' X_tilde]
# Y = [[0 0 0]' Y_tilde]
# Y(1, :) = Y(1, : ) + a1
# C = norm(edm(X, Y) - D, 'fro') ^ 2
|
{"hexsha": "0bb45a0cbe697eccce31b2b92d55fa9fa15f930f", "size": 25823, "ext": "py", "lang": "Python", "max_stars_repo_path": "dechorate/calibration_and_mds.py", "max_stars_repo_name": "Chutlhu/DechorateDB", "max_stars_repo_head_hexsha": "378eda37ed296f2823e3306238101343c5f4084a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-06-01T10:57:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T03:17:16.000Z", "max_issues_repo_path": "dechorate/calibration_and_mds.py", "max_issues_repo_name": "Chutlhu/DechorateDB", "max_issues_repo_head_hexsha": "378eda37ed296f2823e3306238101343c5f4084a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-06-25T14:48:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T05:36:30.000Z", "max_forks_repo_path": "dechorate/calibration_and_mds.py", "max_forks_repo_name": "Chutlhu/DechorateDB", "max_forks_repo_head_hexsha": "378eda37ed296f2823e3306238101343c5f4084a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7783075089, "max_line_length": 96, "alphanum_fraction": 0.5055957867, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 8984}
|
[STATEMENT]
lemma (in linorder_topology) not_in_connected_cases:
assumes conn: "connected S"
assumes nbdd: "x \<notin> S"
assumes ne: "S \<noteq> {}"
obtains "bdd_above S" "\<And>y. y \<in> S \<Longrightarrow> x \<ge> y" | "bdd_below S" "\<And>y. y \<in> S \<Longrightarrow> x \<le> y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>bdd_above S; \<And>y. y \<in> S \<Longrightarrow> y \<le> x\<rbrakk> \<Longrightarrow> thesis; \<lbrakk>bdd_below S; \<And>y. y \<in> S \<Longrightarrow> x \<le> y\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>bdd_above S; \<And>y. y \<in> S \<Longrightarrow> y \<le> x\<rbrakk> \<Longrightarrow> thesis; \<lbrakk>bdd_below S; \<And>y. y \<in> S \<Longrightarrow> x \<le> y\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
obtain s where "s \<in> S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>s. s \<in> S \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using ne
[PROOF STATE]
proof (prove)
using this:
S \<noteq> {}
goal (1 subgoal):
1. (\<And>s. s \<in> S \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
s \<in> S
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>bdd_above S; \<And>y. y \<in> S \<Longrightarrow> y \<le> x\<rbrakk> \<Longrightarrow> thesis; \<lbrakk>bdd_below S; \<And>y. y \<in> S \<Longrightarrow> x \<le> y\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
s \<in> S
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>bdd_above S; \<And>y. y \<in> S \<Longrightarrow> y \<le> x\<rbrakk> \<Longrightarrow> thesis; \<lbrakk>bdd_below S; \<And>y. y \<in> S \<Longrightarrow> x \<le> y\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
assume "s \<le> x"
[PROOF STATE]
proof (state)
this:
s \<le> x
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>bdd_above S; \<And>y. y \<in> S \<Longrightarrow> y \<le> x\<rbrakk> \<Longrightarrow> thesis; \<lbrakk>bdd_below S; \<And>y. y \<in> S \<Longrightarrow> x \<le> y\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
have "False" if "x \<le> y" "y \<in> S" for y
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. False
[PROOF STEP]
using connectedD_interval[OF conn \<open>s \<in> S\<close> \<open>y \<in> S\<close> \<open>s \<le> x\<close> \<open>x \<le> y\<close>] \<open>x \<notin> S\<close>
[PROOF STATE]
proof (prove)
using this:
x \<in> S
x \<notin> S
goal (1 subgoal):
1. False
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<lbrakk>x \<le> ?y; ?y \<in> S\<rbrakk> \<Longrightarrow> False
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>bdd_above S; \<And>y. y \<in> S \<Longrightarrow> y \<le> x\<rbrakk> \<Longrightarrow> thesis; \<lbrakk>bdd_below S; \<And>y. y \<in> S \<Longrightarrow> x \<le> y\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>x \<le> ?y; ?y \<in> S\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
have wit: "y \<in> S \<Longrightarrow> x \<ge> y" for y
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>x \<le> ?y; ?y \<in> S\<rbrakk> \<Longrightarrow> False
goal (1 subgoal):
1. y \<in> S \<Longrightarrow> y \<le> x
[PROOF STEP]
using le_cases
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>x \<le> ?y; ?y \<in> S\<rbrakk> \<Longrightarrow> False
\<lbrakk>?x \<le> ?y \<Longrightarrow> ?P; ?y \<le> ?x \<Longrightarrow> ?P\<rbrakk> \<Longrightarrow> ?P
goal (1 subgoal):
1. y \<in> S \<Longrightarrow> y \<le> x
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
?y \<in> S \<Longrightarrow> ?y \<le> x
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>bdd_above S; \<And>y. y \<in> S \<Longrightarrow> y \<le> x\<rbrakk> \<Longrightarrow> thesis; \<lbrakk>bdd_below S; \<And>y. y \<in> S \<Longrightarrow> x \<le> y\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
?y \<in> S \<Longrightarrow> ?y \<le> x
[PROOF STEP]
have "bdd_above S"
[PROOF STATE]
proof (prove)
using this:
?y \<in> S \<Longrightarrow> ?y \<le> x
goal (1 subgoal):
1. bdd_above S
[PROOF STEP]
by (rule local.bdd_aboveI)
[PROOF STATE]
proof (state)
this:
bdd_above S
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>bdd_above S; \<And>y. y \<in> S \<Longrightarrow> y \<le> x\<rbrakk> \<Longrightarrow> thesis; \<lbrakk>bdd_below S; \<And>y. y \<in> S \<Longrightarrow> x \<le> y\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
note this wit
[PROOF STATE]
proof (state)
this:
bdd_above S
?y \<in> S \<Longrightarrow> ?y \<le> x
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>bdd_above S; \<And>y. y \<in> S \<Longrightarrow> y \<le> x\<rbrakk> \<Longrightarrow> thesis; \<lbrakk>bdd_below S; \<And>y. y \<in> S \<Longrightarrow> x \<le> y\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
s \<le> x \<Longrightarrow> bdd_above S
\<lbrakk>s \<le> x; ?y \<in> S\<rbrakk> \<Longrightarrow> ?y \<le> x
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>bdd_above S; \<And>y. y \<in> S \<Longrightarrow> y \<le> x\<rbrakk> \<Longrightarrow> thesis; \<lbrakk>bdd_below S; \<And>y. y \<in> S \<Longrightarrow> x \<le> y\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
s \<le> x \<Longrightarrow> bdd_above S
\<lbrakk>s \<le> x; ?y \<in> S\<rbrakk> \<Longrightarrow> ?y \<le> x
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>bdd_above S; \<And>y. y \<in> S \<Longrightarrow> y \<le> x\<rbrakk> \<Longrightarrow> thesis; \<lbrakk>bdd_below S; \<And>y. y \<in> S \<Longrightarrow> x \<le> y\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
s \<le> x \<Longrightarrow> bdd_above S
\<lbrakk>s \<le> x; ?y \<in> S\<rbrakk> \<Longrightarrow> ?y \<le> x
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>bdd_above S; \<And>y. y \<in> S \<Longrightarrow> y \<le> x\<rbrakk> \<Longrightarrow> thesis; \<lbrakk>bdd_below S; \<And>y. y \<in> S \<Longrightarrow> x \<le> y\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
assume "x \<le> s"
[PROOF STATE]
proof (state)
this:
x \<le> s
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>bdd_above S; \<And>y. y \<in> S \<Longrightarrow> y \<le> x\<rbrakk> \<Longrightarrow> thesis; \<lbrakk>bdd_below S; \<And>y. y \<in> S \<Longrightarrow> x \<le> y\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
have "False" if "x \<ge> y" "y \<in> S" for y
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. False
[PROOF STEP]
using connectedD_interval[OF conn \<open>y \<in> S\<close> \<open>s \<in> S\<close> \<open>x \<ge> y\<close> \<open>s \<ge> x\<close> ] \<open>x \<notin> S\<close>
[PROOF STATE]
proof (prove)
using this:
x \<in> S
x \<notin> S
goal (1 subgoal):
1. False
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<lbrakk>?y \<le> x; ?y \<in> S\<rbrakk> \<Longrightarrow> False
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>bdd_above S; \<And>y. y \<in> S \<Longrightarrow> y \<le> x\<rbrakk> \<Longrightarrow> thesis; \<lbrakk>bdd_below S; \<And>y. y \<in> S \<Longrightarrow> x \<le> y\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>?y \<le> x; ?y \<in> S\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
have wit: "y \<in> S \<Longrightarrow> x \<le> y" for y
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?y \<le> x; ?y \<in> S\<rbrakk> \<Longrightarrow> False
goal (1 subgoal):
1. y \<in> S \<Longrightarrow> x \<le> y
[PROOF STEP]
using le_cases
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?y \<le> x; ?y \<in> S\<rbrakk> \<Longrightarrow> False
\<lbrakk>?x \<le> ?y \<Longrightarrow> ?P; ?y \<le> ?x \<Longrightarrow> ?P\<rbrakk> \<Longrightarrow> ?P
goal (1 subgoal):
1. y \<in> S \<Longrightarrow> x \<le> y
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
?y \<in> S \<Longrightarrow> x \<le> ?y
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>bdd_above S; \<And>y. y \<in> S \<Longrightarrow> y \<le> x\<rbrakk> \<Longrightarrow> thesis; \<lbrakk>bdd_below S; \<And>y. y \<in> S \<Longrightarrow> x \<le> y\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
?y \<in> S \<Longrightarrow> x \<le> ?y
[PROOF STEP]
have "bdd_below S"
[PROOF STATE]
proof (prove)
using this:
?y \<in> S \<Longrightarrow> x \<le> ?y
goal (1 subgoal):
1. bdd_below S
[PROOF STEP]
by (rule bdd_belowI)
[PROOF STATE]
proof (state)
this:
bdd_below S
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>bdd_above S; \<And>y. y \<in> S \<Longrightarrow> y \<le> x\<rbrakk> \<Longrightarrow> thesis; \<lbrakk>bdd_below S; \<And>y. y \<in> S \<Longrightarrow> x \<le> y\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
note this wit
[PROOF STATE]
proof (state)
this:
bdd_below S
?y \<in> S \<Longrightarrow> x \<le> ?y
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>bdd_above S; \<And>y. y \<in> S \<Longrightarrow> y \<le> x\<rbrakk> \<Longrightarrow> thesis; \<lbrakk>bdd_below S; \<And>y. y \<in> S \<Longrightarrow> x \<le> y\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
x \<le> s \<Longrightarrow> bdd_below S
\<lbrakk>x \<le> s; ?y \<in> S\<rbrakk> \<Longrightarrow> x \<le> ?y
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>bdd_above S; \<And>y. y \<in> S \<Longrightarrow> y \<le> x\<rbrakk> \<Longrightarrow> thesis; \<lbrakk>bdd_below S; \<And>y. y \<in> S \<Longrightarrow> x \<le> y\<rbrakk> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
s \<le> x \<Longrightarrow> bdd_above S
\<lbrakk>s \<le> x; ?y \<in> S\<rbrakk> \<Longrightarrow> ?y \<le> x
x \<le> s \<Longrightarrow> bdd_below S
\<lbrakk>x \<le> s; ?y \<in> S\<rbrakk> \<Longrightarrow> x \<le> ?y
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
s \<le> x \<Longrightarrow> bdd_above S
\<lbrakk>s \<le> x; ?y \<in> S\<rbrakk> \<Longrightarrow> ?y \<le> x
x \<le> s \<Longrightarrow> bdd_below S
\<lbrakk>x \<le> s; ?y \<in> S\<rbrakk> \<Longrightarrow> x \<le> ?y
goal (1 subgoal):
1. thesis
[PROOF STEP]
by (meson le_cases that)
[PROOF STATE]
proof (state)
this:
thesis
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 4398, "file": null, "length": 37}
|
/**
*-----------------------------------------------------------------------------
* Title : Memory Master
* ----------------------------------------------------------------------------
* File : Master.cpp
* Created : 2016-09-20
* ----------------------------------------------------------------------------
* Description:
* Memory master interface.
* ----------------------------------------------------------------------------
* This file is part of the rogue software platform. It is subject to
* the license terms in the LICENSE.txt file found in the top-level directory
* of this distribution and at:
* https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
* No part of the rogue software platform, including this file, may be
* copied, modified, propagated, or distributed except according to the terms
* contained in the LICENSE.txt file.
* ----------------------------------------------------------------------------
**/
#include <rogue/interfaces/memory/Master.h>
#include <rogue/interfaces/memory/Slave.h>
#include <rogue/interfaces/memory/Constants.h>
#include <rogue/interfaces/memory/Transaction.h>
#include <rogue/GeneralError.h>
#include <rogue/Helpers.h>
#include <cstring>
#include <memory>
#include <rogue/GilRelease.h>
#include <rogue/ScopedGil.h>
#include <stdlib.h>
namespace rim = rogue::interfaces::memory;
#ifndef NO_PYTHON
#define BOOST_BIND_GLOBAL_PLACEHOLDERS
#include <boost/python.hpp>
namespace bp = boost::python;
#endif
//! Create a master container
rim::MasterPtr rim::Master::create () {
rim::MasterPtr m = std::make_shared<rim::Master>();
return(m);
}
void rim::Master::setup_python() {
#ifndef NO_PYTHON
bp::class_<rim::Master, rim::MasterPtr, boost::noncopyable>("Master",bp::init<>())
.def("_setSlave", &rim::Master::setSlave)
.def("_getSlave", &rim::Master::getSlave)
.def("_reqSlaveId", &rim::Master::reqSlaveId)
.def("_reqSlaveName", &rim::Master::reqSlaveName)
.def("_reqMinAccess", &rim::Master::reqMinAccess)
.def("_reqMaxAccess", &rim::Master::reqMaxAccess)
.def("_reqAddress", &rim::Master::reqAddress)
.def("_getError", &rim::Master::getError)
.def("_clearError", &rim::Master::clearError)
.def("_setTimeout", &rim::Master::setTimeout)
.def("_reqTransaction", &rim::Master::reqTransactionPy)
.def("_waitTransaction", &rim::Master::waitTransaction)
.def("_copyBits", &rim::Master::copyBits)
.staticmethod("_copyBits")
.def("_setBits", &rim::Master::setBits)
.staticmethod("_setBits")
.def("_anyBits", &rim::Master::anyBits)
.staticmethod("_anyBits")
.def("__rshift__", &rim::Master::rshiftPy)
.def("_stop", &rim::Master::stop)
;
#endif
}
//! Create object
rim::Master::Master() {
error_ = "";
slave_ = rim::Slave::create(4,0); // Empty placeholder
rogue::defaultTimeout(sumTime_);
log_ = rogue::Logging::create("memory.Master");
}
//! Destroy object
rim::Master::~Master() { }
//! Stop the interface
void rim::Master::stop() {}
//! Set slave
void rim::Master::setSlave ( rim::SlavePtr slave ) {
rogue::GilRelease noGil;
std::lock_guard<std::mutex> lock(mastMtx_);
slave_ = slave;
}
//! Get slave
rim::SlavePtr rim::Master::getSlave () {
return(slave_);
}
//! Query the slave id
uint32_t rim::Master::reqSlaveId() {
return(slave_->doSlaveId());
}
//! Query the slave name
std::string rim::Master::reqSlaveName() {
return(slave_->doSlaveName());
}
//! Query the minimum access size in bytes for interface
uint32_t rim::Master::reqMinAccess() {
return(slave_->doMinAccess());
}
//! Query the maximum access size in bytes for interface
uint32_t rim::Master::reqMaxAccess() {
return(slave_->doMaxAccess());
}
//! Query the offset
uint64_t rim::Master::reqAddress() {
return(slave_->doAddress());
}
//! Get error
std::string rim::Master::getError() {
return error_;
}
//! Rst error
void rim::Master::clearError() {
rogue::GilRelease noGil;
std::lock_guard<std::mutex> lock(mastMtx_);
error_ = "";
}
//! Set timeout
void rim::Master::setTimeout(uint64_t timeout) {
rogue::GilRelease noGil;
std::lock_guard<std::mutex> lock(mastMtx_);
if (timeout != 0 ) {
div_t divResult = div(timeout,1000000);
sumTime_.tv_sec = divResult.quot;
sumTime_.tv_usec = divResult.rem;
}
}
//! Post a transaction, called locally, forwarded to slave
uint32_t rim::Master::reqTransaction(uint64_t address, uint32_t size, void *data, uint32_t type) {
rim::TransactionPtr tran = rim::Transaction::create(sumTime_);
tran->iter_ = (uint8_t *)data;
tran->size_ = size;
tran->address_ = address;
tran->type_ = type;
return(intTransaction(tran));
}
#ifndef NO_PYTHON
//! Post a transaction, called locally, forwarded to slave, python version
uint32_t rim::Master::reqTransactionPy(uint64_t address, boost::python::object p, uint32_t size, uint32_t offset, uint32_t type) {
rim::TransactionPtr tran = rim::Transaction::create(sumTime_);
if ((type == rim::Read) || (type == rim::Verify)) {
if ( PyObject_GetBuffer(p.ptr(),&(tran->pyBuf_),PyBUF_CONTIG) < 0 )
throw(rogue::GeneralError("Master::reqTransactionPy","Python Buffer contig Error"));
}
else {
if ( PyObject_GetBuffer(p.ptr(),&(tran->pyBuf_),PyBUF_SIMPLE) < 0 )
throw(rogue::GeneralError("Master::reqTransactionPy","Python Buffer simple Error"));
}
if ( size == 0 ) tran->size_ = tran->pyBuf_.len;
else tran->size_ = size;
if ( (tran->size_ + offset) > tran->pyBuf_.len ) {
PyBuffer_Release(&(tran->pyBuf_));
throw(rogue::GeneralError::create("Master::reqTransactionPy",
"Attempt to access %i bytes in python buffer with size %i at offset %i",
tran->size_,tran->pyBuf_.len,offset));
}
tran->pyValid_ = true;
tran->iter_ = ((uint8_t *)tran->pyBuf_.buf) + offset;
tran->type_ = type;
tran->address_ = address;
return(intTransaction(tran));
}
#endif
uint32_t rim::Master::intTransaction(rim::TransactionPtr tran) {
TransactionMap::iterator it;
struct timeval currTime;
rim::SlavePtr slave;
{
rogue::GilRelease noGil;
std::lock_guard<std::mutex> lock(mastMtx_);
slave = slave_;
tranMap_[tran->id_] = tran;
}
log_->debug("Request transaction type=%i id=%i",tran->type_,tran->id_);
tran->log_->debug("Created transaction type=%i id=%i, address=0x%016x, size=0x%x",
tran->type_,tran->id_,tran->address_,tran->size_);
slave->doTransaction(tran);
tran->refreshTimer(tran);
return(tran->id_);
}
// Wait for transaction. Timeout in seconds
void rim::Master::waitTransaction(uint32_t id) {
TransactionMap::iterator it;
rim::TransactionPtr tran;
std::string error;
rogue::GilRelease noGil;
while (1) {
{ // Lock the vector
std::unique_lock<std::mutex> lock(mastMtx_);
if ( id != 0 ) it = tranMap_.find(id);
else it = tranMap_.begin();
if ( it != tranMap_.end() ) {
tran = it->second;
tranMap_.erase(it);
}
else break;
}
// Outside of lock
if ( (error = tran->wait()) != "" ) error_ = error;
}
}
//! Copy bits from src to dst with lsbs and size
void rim::Master::copyBits(uint8_t *dstData, uint32_t dstLsb, uint8_t *srcData, uint32_t srcLsb, uint32_t size) {
uint32_t srcBit;
uint32_t srcByte;
uint32_t dstBit;
uint32_t dstByte;
uint32_t rem;
uint32_t bytes;
srcByte = srcLsb / 8;
srcBit = srcLsb % 8;
dstByte = dstLsb / 8;
dstBit = dstLsb % 8;
rem = size;
do {
bytes = rem / 8;
// Aligned
if ( (srcBit == 0) && (dstBit == 0) && (bytes > 0) ) {
std::memcpy(&(dstData[dstByte]),&(srcData[srcByte]),bytes);
dstByte += bytes;
srcByte += bytes;
rem -= (bytes * 8);
}
// Not aligned
else {
dstData[dstByte] &= ((0x1 << dstBit) ^ 0xFF);
dstData[dstByte] |= ((srcData[srcByte] >> srcBit) & 0x1) << dstBit;
srcByte += (++srcBit / 8);
dstByte += (++dstBit / 8);
srcBit %= 8;
dstBit %= 8;
rem -= 1;
}
} while (rem != 0);
}
#ifndef NO_PYTHON
//! Copy bits from src to dst with lsbs and size
void rim::Master::copyBitsPy(boost::python::object dst, uint32_t dstLsb, boost::python::object src, uint32_t srcLsb, uint32_t size) {
Py_buffer srcBuf;
Py_buffer dstBuf;
if ( PyObject_GetBuffer(dst.ptr(),&dstBuf,PyBUF_CONTIG) < 0 )
throw(rogue::GeneralError("Master::copyBits","Python Buffer Error"));
if ( (dstLsb + size) > (dstBuf.len*8) ) {
PyBuffer_Release(&dstBuf);
throw(rogue::GeneralError::create("Master::copyBits",
"Attempt to copy %i bits starting from bit %i from dest array with bitSize %i",
size, dstLsb, dstBuf.len*8));
}
if ( PyObject_GetBuffer(src.ptr(),&srcBuf,PyBUF_SIMPLE) < 0 ) {
PyBuffer_Release(&dstBuf);
throw(rogue::GeneralError("Master::copyBits","Python Buffer Error"));
}
if ( (srcLsb + size) > (srcBuf.len*8) ) {
PyBuffer_Release(&srcBuf);
PyBuffer_Release(&dstBuf);
throw(rogue::GeneralError::create("Master::copyBits",
"Attempt to copy %i bits starting from bit %i from source array with bitSize %i",
size, srcLsb, srcBuf.len*8));
}
copyBits((uint8_t *)dstBuf.buf, dstLsb, (uint8_t *)srcBuf.buf, srcLsb, size);
PyBuffer_Release(&srcBuf);
PyBuffer_Release(&dstBuf);
}
#endif
//! Set all bits in dest with lbs and size
void rim::Master::setBits(uint8_t *dstData, uint32_t lsb, uint32_t size) {
uint32_t dstBit;
uint32_t dstByte;
uint32_t rem;
uint32_t bytes;
dstByte = lsb / 8;
dstBit = lsb % 8;
rem = size;
do {
bytes = rem / 8;
// Aligned
if ( (dstBit == 0) && (bytes > 0) ) {
memset(&(dstData[dstByte]),0xFF,bytes);
dstByte += bytes;
rem -= (bytes * 8);
}
// Not aligned
else {
dstData[dstByte] |= (0x1 << dstBit);
dstByte += (++dstBit / 8);
dstBit %= 8;
rem -= 1;
}
} while (rem != 0);
}
#ifndef NO_PYTHON
//! Set all bits in dest with lbs and size
void rim::Master::setBitsPy(boost::python::object dst, uint32_t lsb, uint32_t size) {
Py_buffer dstBuf;
if ( PyObject_GetBuffer(dst.ptr(),&dstBuf,PyBUF_CONTIG) < 0 )
throw(rogue::GeneralError("Master::setBits","Python Buffer Error"));
if ( (lsb + size) > (dstBuf.len*8) ) {
PyBuffer_Release(&dstBuf);
throw(rogue::GeneralError::create("Master::setBits",
"Attempt to set %i bits starting from bit %i in array with bitSize %i",
size, lsb, dstBuf.len*8));
}
setBits((uint8_t *)dstBuf.buf, lsb, size);
PyBuffer_Release(&dstBuf);
}
#endif
//! Return true if any bits are set in range
bool rim::Master::anyBits(uint8_t *dstData, uint32_t lsb, uint32_t size) {
uint32_t dstBit;
uint32_t dstByte;
uint32_t rem;
uint32_t bytes;
bool ret;
dstByte = lsb / 8;
dstBit = lsb % 8;
rem = size;
ret = false;
do {
bytes = rem / 8;
// Aligned
if ( (dstBit == 0) && (bytes > 0) ) {
if (dstData[dstByte] != 0) ret = true;
dstByte += 1;
rem -= 8;
}
// Not aligned
else {
if ( (dstData[dstByte] & (0x1 << dstBit)) != 0) ret = true;
dstByte += (++dstBit / 8);
dstBit %= 8;
rem -= 1;
}
} while (ret == false && rem != 0);
return ret;
}
#ifndef NO_PYTHON
//! Return true if any bits are set in range
bool rim::Master::anyBitsPy(boost::python::object dst, uint32_t lsb, uint32_t size) {
Py_buffer dstBuf;
bool ret;
if ( PyObject_GetBuffer(dst.ptr(),&dstBuf,PyBUF_SIMPLE) < 0 )
throw(rogue::GeneralError("Master::anyBits","Python Buffer Error"));
if ( (lsb + size) > (dstBuf.len*8) ) {
PyBuffer_Release(&dstBuf);
throw(rogue::GeneralError::create("Master::anyBits",
"Attempt to access %i bits starting from bit %i from array with bitSize %i",
size, lsb, dstBuf.len*8));
}
ret = anyBits((uint8_t *)dstBuf.buf, lsb, size);
PyBuffer_Release(&dstBuf);
return ret;
}
void rim::Master::rshiftPy ( bp::object p ) {
rim::SlavePtr slv;
// First Attempt to access object as a memory slave
boost::python::extract<rim::SlavePtr> get_slave(p);
// Test extraction
if ( get_slave.check() ) slv = get_slave();
// Otherwise look for indirect call
else if ( PyObject_HasAttrString(p.ptr(), "_getMemorySlave" ) ) {
// Attempt to convert returned object to slave pointer
boost::python::extract<rim::SlavePtr> get_slave(p.attr("_getMemorySlave")());
// Test extraction
if ( get_slave.check() ) slv = get_slave();
}
// Success
if ( slv != NULL ) setSlave(slv);
else throw(rogue::GeneralError::create("memory::Master::rshiftPy","Attempt to use >> operator with incompatible memory slave"));
}
#endif
//! Support >> operator in C++
rim::SlavePtr & rim::Master::operator >>(rim::SlavePtr & other) {
setSlave(other);
return other;
}
|
{"hexsha": "0e460a05d885c7fdb925e01870e3b85f3c9a0370", "size": 13440, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/rogue/interfaces/memory/Master.cpp", "max_stars_repo_name": "mwittgen/rogue", "max_stars_repo_head_hexsha": "4be0e9a4d17bdd3987a268f54ad195ee1093190d", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 26.0, "max_stars_repo_stars_event_min_datetime": "2016-11-02T15:46:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T16:50:00.000Z", "max_issues_repo_path": "src/rogue/interfaces/memory/Master.cpp", "max_issues_repo_name": "mwittgen/rogue", "max_issues_repo_head_hexsha": "4be0e9a4d17bdd3987a268f54ad195ee1093190d", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": 419.0, "max_issues_repo_issues_event_min_datetime": "2017-06-07T16:06:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T21:05:17.000Z", "max_forks_repo_path": "src/rogue/interfaces/memory/Master.cpp", "max_forks_repo_name": "mwittgen/rogue", "max_forks_repo_head_hexsha": "4be0e9a4d17bdd3987a268f54ad195ee1093190d", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": 12.0, "max_forks_repo_forks_event_min_datetime": "2017-07-25T22:58:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-17T23:30:05.000Z", "avg_line_length": 28.7179487179, "max_line_length": 133, "alphanum_fraction": 0.5993303571, "num_tokens": 3734}
|
from functools import partial
from typing import Optional, Dict
import glm
import numpy as np
from lib.opengl.core.base import *
from lib.opengl import *
from lib.gen import Worker
from .shader_node import GameShaderNode
from .rs import GameRenderSettings
from ..map import TilemapSampler
from tests.util import Timer
class TileMapNode(GameShaderNode):
def __init__(self, map: TilemapSampler, name: str):
super().__init__(name)
self.map = map
self.map_texture = Texture2D()
self.last_map_center = None
self.last_map_offset = None
self.last_map_scale = None
self.map_worker = Worker.instance("numpy")
self.map_requested = False
def get_game_shader_code(self):
return """
#include <wang-tile.glsl>
#line 32
uniform ivec2 u_tile_size;
uniform ivec2 u_tile_set_size;
vec3 wang_tile(in vec2 uv, int idx00, int idx10, int idx01, int idx11) {
float d = wang_tile_distance(uv, idx00);
//d = smin(d, wang_tile_distance(uv + vec2(2, 0), idx10));
d -= .08 * (abs(sin(uv.x*3.1415)) + abs(sin(uv.y*6.28)));
vec3 col1 = vec3(0.2, .5, .7);
vec3 col2 = vec3(0.8, 0.6, 0.2);
vec3 col = mix(col1, col2, smoothstep(0.05, -0.05, d));
return col;
}
vec4 game_shader(in GameShader gs) {
//return texture(u_tex4, gs.texCoord) / 10.;
ivec2 map_pos = ivec2(gs.map_pos);
ivec4 map00 = ivec4(texelFetch(u_tex4, map_pos, 0));
ivec4 map10 = ivec4(texelFetch(u_tex4, map_pos + ivec2(1, 0), 0));
ivec4 map01 = ivec4(texelFetch(u_tex4, map_pos + ivec2(0, 1), 0));
ivec4 map11 = ivec4(texelFetch(u_tex4, map_pos + ivec2(1, 1), 0));
vec2 tile_pos = fract(gs.map_pos);
// when using bilinear mag filter, this is needed
//tile_pos = tile_pos * (float(u_tile_size - 1.) + .5) / float(u_tile_size);
//vec4 color = texture(u_tex1, tile_pos / u_tile_set_size);
vec4 color = vec4(wang_tile(tile_pos * 2. - 1., map00.z, map10.z, map01.z, map11.z), 1);
color.xyz *= .2 + .8 * clamp(map00.y/10., 0, 1);
//float frame = smoothstep(0.6, 0., max(abs(gs.uv.x), abs(gs.uv.y)) - 1.);
vec2 screen_uv = gs.texCoord * 2. - 1.;
//float frame = smoothstep(1., .8, max(abs(screen_uv.x), abs(screen_uv.y)));
float frame = 1. - dot(screen_uv*.8, screen_uv);
color *= .7 + .3 * frame;
return color;
}
"""
def num_multi_sample(self) -> int:
return 4
def create(self, render_settings: RenderSettings):
super().create(render_settings)
if not self.map_texture.is_created():
self.map_texture.create()
def release(self):
self.map_texture.release()
super().release()
def render(self, rs: GameRenderSettings, pass_num: int):
self.map_texture.set_active_texture(3)
self.map_texture.bind()
self.map_texture.set_parameter(gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
self.update_map(rs)
self.quad.drawable.shader.set_uniform("u_tile_size", [32, 32])
self.quad.drawable.shader.set_uniform("u_tile_set_size", [4, 4])
if self.last_map_offset:
with rs.projection:
rs.projection.location = rs.projection.location - self.last_map_offset
super().render(rs, pass_num)
def update_map(self, rs: GameRenderSettings):
map_center = (int(rs.projection.location[0]), int(rs.projection.location[1]))
if self.last_map_center is None:
do_update = True
else:
dist = abs(self.last_map_center[0] - map_center[0]) + abs(self.last_map_center[1] - map_center[1])
do_update = dist > 2
do_update |= self.last_map_scale * 1.1 < rs.projection.scale
if do_update or self.map_requested:
if self.map_requested:
map_array = None
result = self.map_worker.pop_result("map")
if result:
self.map_requested = False
map_array = result["result"]
w, h, map_center, scale = result["extra"]
else:
# radius
w = h = max(16, int(rs.projection.scale * 1.3))
mx, my, mw, mh = map_center[0] - w, map_center[1] - h, w * 2 + 1, h * 2 + 1
self.map_worker.request(
"map",
partial(self.map, mx, my, mw, mh),
extra=(w, h, map_center, rs.projection.scale)
)
map_array = None
self.map_requested = True
if map_array is not None:
self.upload_map(map_array)
self.last_map_center = map_center
self.last_map_offset = glm.vec2(*map_center) - glm.vec2(w, h)
self.last_map_scale = scale
def upload_map(self, float_array: np.ndarray):
# print(float_array)
if float_array.dtype.name != "float32":
float_array = float_array.astype("float32")
if not self.map_texture.is_created():
self.map_texture.create()
self.map_texture.bind()
self.map_texture.upload_numpy(
float_array,
width=float_array.shape[1],
input_format=gl.GL_RGBA, gpu_format=gl.GL_RGBA32F,
)
|
{"hexsha": "356c3f1a9bac2ff2723fb587f898cf1d9128b25a", "size": 5672, "ext": "py", "lang": "Python", "max_stars_repo_path": "tilegame/render/tilemap_node.py", "max_stars_repo_name": "defgsus/thegame", "max_stars_repo_head_hexsha": "38a627d9108f1418b94b08831fd640dd87fbba83", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-05T11:49:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-05T11:49:26.000Z", "max_issues_repo_path": "tilegame/render/tilemap_node.py", "max_issues_repo_name": "defgsus/thegame", "max_issues_repo_head_hexsha": "38a627d9108f1418b94b08831fd640dd87fbba83", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tilegame/render/tilemap_node.py", "max_forks_repo_name": "defgsus/thegame", "max_forks_repo_head_hexsha": "38a627d9108f1418b94b08831fd640dd87fbba83", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.5850340136, "max_line_length": 110, "alphanum_fraction": 0.5601198872, "include": true, "reason": "import numpy", "num_tokens": 1425}
|
import numpy as np
import pandas as pd
from .wordle_dictionary import popularity_dict
class Guesser:
"""This class handles guessing strategies. It is sent a strategy name on initialization and sets its guessing function
to one of the strategy functions.
"""
def __init__(self,strategy):
strategies={
"random":self.make_random_guess,
"entropy":self.make_entropy_guess,
"scored":self.make_scored_guess
}
try:
self.make_guess=strategies[strategy]
self.guesses=0
except:
print(f"Guessing strategy must be one of:")
print(", ".join(list(strategies.keys())))
def make_random_guess(self,possible_words):
"""Just pick a random possible word"""
return np.random.choice(possible_words)
def make_scored_guess(self,possible_words):
"""Pick the first word in the list of possible words
uses the fact possible_words is sorted by letter frequency"""
return possible_words[0]
def make_entropy_guess(self,possible_words):
"""Pick the word that will reduce the size of possible_words the most
using the expectation value of the reduction of the list size from each letter"""
#entropy calculation is slow so lets use precalculated choice for first guess
if self.guesses==0:
self.guesses+=1
return "soare"
#for all letter guesses entropy is wordle specific so do full calculation
else:
position_count_df,letter_count_df=self.get_position_letter_counts(possible_words)
n_words=len(possible_words)
expect_func=lambda x: self.expected_list_reduction(x,n_words,
position_count_df,letter_count_df)
expectations=map(expect_func,possible_words)
A=zip(expectations,possible_words)
A=sorted(A,reverse=True)
expectations,possible_words=zip(*A)
self.guesses+=1
return possible_words[0]
def get_position_letter_counts(self,possible_words):
"""Turn possible_words list into a dataframe of counts of letter/position pairs
as well as a list of counts of just letter appearances"""
df=pd.DataFrame(np.asarray([list(word) for word in possible_words]))
df.columns=[1,2,3,4,5]
df=df.melt(var_name="Position",value_name="Letter")
df=df.groupby(["Position","Letter"]).size().reset_index().rename({0:"Count"},axis=1)
letters=df.groupby("Letter")["Count"].sum()
df=df.set_index(["Position","Letter"])
return df,letters
def expected_list_reduction(self,guess,n_words,position_count_df,letter_count_df):
"""
Calculates the expectation value of the reduction in the word list that will be
possible from the scoring (green/orange/white) of each letter in a word
Sums them all together to get an idea of total reduction in list size from a guess
"""
expect=0.0
done=[]
for i,letter in enumerate(guess):
#count the number of times a letter would be green/orange/white in our word list
green_count=position_count_df.loc[i+1,letter].values[0]
orange_count=letter_count_df.loc[letter]-green_count
white_count=n_words-green_count-orange_count
#expectation value is then probability of result*(reduction in list size from that result)
expect+=(green_count/n_words)*(n_words-green_count)
#oranges/whites are only useful once so repeated letters only contribute green reduction
if letter not in done:
expect+=(orange_count/n_words)*(n_words-orange_count)
expect+=(white_count/n_words)*(n_words-white_count)
done.append(letter)
expect+=popularity_dict[guess]
return expect
|
{"hexsha": "318e43181b985926bade0b235dad813bc1785c2b", "size": 4070, "ext": "py", "lang": "Python", "max_stars_repo_path": "WordleBot/Guesser.py", "max_stars_repo_name": "jonholdship/WordleBot", "max_stars_repo_head_hexsha": "16eb715218a73924068f0caa813082cfe5a8bcbc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-23T04:06:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-23T04:06:16.000Z", "max_issues_repo_path": "WordleBot/Guesser.py", "max_issues_repo_name": "jonholdship/WordleBot", "max_issues_repo_head_hexsha": "16eb715218a73924068f0caa813082cfe5a8bcbc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-01-19T09:15:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T20:54:18.000Z", "max_forks_repo_path": "WordleBot/Guesser.py", "max_forks_repo_name": "jonholdship/WordleBot", "max_forks_repo_head_hexsha": "16eb715218a73924068f0caa813082cfe5a8bcbc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-15T09:59:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-15T09:59:14.000Z", "avg_line_length": 46.7816091954, "max_line_length": 122, "alphanum_fraction": 0.6358722359, "include": true, "reason": "import numpy", "num_tokens": 827}
|
# Copyright 2021 KU Leuven.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Rinaldo Wander Montalvão, PhD
#
import math
import numpy as np
from typing import Dict, List, Tuple
from Bio.PDB import Chain
from dataclasses import dataclass
from numpy.polynomial import chebyshev
from scipy.interpolate import CubicSpline
@dataclass
class ResidueGeometry:
# Residue information
name: str = None
chain: str = None
res_num: str = None
res_order: int = None
# Differential geometry
curvature: float = None
torsion: float = None
arc_len: float = None
# Knot theory invariant
writhing: float = None
# Dihedral angles
phi: float = None
psi: float = None
# Residue annotation
res_ann: Dict[str, str] = None
class GeometryParser:
"""
Class for parsing the geometrical properties of a protein chain
"""
__slots__ = ('__residues',
'__residues_map',
'__degrees',
'__gap_list',
'__anomaly_list')
def __init__(self, chain: Chain, deg: bool = True) -> None:
"""
:param chain: Protein chain
:type chain: Chain
:param deg: Degree?
:type deg: bool
"""
residues, residues_map = GeometryParser.calc_geometry(chain=chain, deg=deg)
self.__residues = residues
self.__residues_map = residues_map
self.__degrees = deg
self.__gap_list = GeometryParser.find_gaps(chain=chain)
self.__anomaly_list = GeometryParser.find_anomalies(chain=chain)
@property
def residues(self) -> Dict[str, ResidueGeometry]:
"""
Access to residue geometry
:return: dataclass for residue geometry
:rtype: dict[int, ResidueGeometry]
"""
return self.__residues
@property
def residues_map(self) -> Dict[str, ResidueGeometry]:
"""
Maps residues number
:return: Dictionary for the residue map
:rtype: dict[int, ResidueGeometry]
"""
return self.__residues_map
@property
def deg(self) -> bool:
"""
Are phi and psi in degrees?
:return: True if phi and psi are in degrees, False for radians
:rtype: bool
"""
return self.__degrees
@staticmethod
def find_gaps(chain: Chain) -> List[Tuple[int, int]]:
"""
Find gaps in the protein's chain
:param chain: Protein chain
:type chain: Chain
:return: List of gaps
:rtype: list[tuple[int, int]]
"""
gaps = []
i = 0
residue = list(chain.get_residues())[i]
het_flag, prev, insertion_code = residue.id
for i in range(len(chain)):
# get chain and pos
residue = list(chain.get_residues())[i]
het_flag, pos, insertion_code = residue.id
if pos - prev > 1:
gaps.append((prev, pos))
prev = pos
return gaps
@property
def gaps(self) -> List[Tuple[int, int]]:
"""
Access to chain's gaps
:return: Chain's gaps
:rtype: list[tuple[int, int]]
"""
return self.__gap_list
@property
def gap(self) -> bool:
"""
Are there gaps in the chain?
:return: True if gaps were found
:rtype: bool
"""
return len(self.__gap_list) > 0
@staticmethod
def find_anomalies(chain: Chain) -> List[str]:
"""
Find anomalies in the protein's chain
:param chain: Protein chain
:type chain: Chain
:return: List of anomalies
:rtype: list[str]
"""
# TODO: insert anomalies
anomalies = []
return anomalies
@property
def anomalies(self) -> List[str]:
"""
Access to chain's anomalies
:return: Chain's anomalies
:rtype: list[str]
"""
return self.__anomaly_list
@property
def anomaly(self) -> bool:
"""
Are there anomalies in the chain?
:return: True if anomalies were found
:rtype: bool
"""
return len(self.__anomaly_list) > 0
@staticmethod
def calc_curvature_torsion(p: float,
t: List[float],
xt: CubicSpline,
yt: CubicSpline,
zt: CubicSpline) -> (float, float):
"""
Function to compute Curvature and Torsion
:param p: point of calculation
:type p: float
:param t: list os parameters
:type t: list[float]
:param xt: x(t)
:type xt: CubicSpline
:param yt: y(t)
:type yt: CubicSpline
:param zt: z(t)
:type zt: CubicSpline
:return: curvature, torsion
:rtype: (float, float)
"""
numpts = 51
# TODO: Improve balance method
mn = np.min(t)
mx = np.max(t)
for dt in range(1, 4):
delta = float(dt)
ini = p - delta
end = p + delta
if ini < mn:
offset = mn - ini
elif end > mx:
offset = mx - end
else:
offset = 0.0
ini += offset
end += offset
tp = np.linspace(ini, end, numpts)
cxt, resxt = chebyshev.chebfit(tp, xt(tp), deg=10, full=True)
cyt, resyt = chebyshev.chebfit(tp, yt(tp), deg=10, full=True)
czt, reszt = chebyshev.chebfit(tp, zt(tp), deg=10, full=True)
if resxt[0].size != 0 and resyt[0].size != 0 and reszt[0].size != 0:
break
cxtd1 = chebyshev.chebder(cxt, m=1)
cytd1 = chebyshev.chebder(cyt, m=1)
cztd1 = chebyshev.chebder(czt, m=1)
cxtd2 = chebyshev.chebder(cxt, m=2)
cytd2 = chebyshev.chebder(cyt, m=2)
cztd2 = chebyshev.chebder(czt, m=2)
cxtd3 = chebyshev.chebder(cxt, m=3)
cytd3 = chebyshev.chebder(cyt, m=3)
cztd3 = chebyshev.chebder(czt, m=3)
xtd1 = chebyshev.chebval(p, cxtd1)
ytd1 = chebyshev.chebval(p, cytd1)
ztd1 = chebyshev.chebval(p, cztd1)
xtd2 = chebyshev.chebval(p, cxtd2)
ytd2 = chebyshev.chebval(p, cytd2)
ztd2 = chebyshev.chebval(p, cztd2)
xtd3 = chebyshev.chebval(p, cxtd3)
ytd3 = chebyshev.chebval(p, cytd3)
ztd3 = chebyshev.chebval(p, cztd3)
# Compute curvature
v1 = np.array([xtd1, ytd1, ztd1])
v2 = np.array([xtd2, ytd2, ztd2])
rs = np.cross(v1, v2)
r1 = np.dot(rs, rs)
r2 = np.dot(v1, v1)
curvature = math.sqrt(r1) / math.sqrt(r2) ** 3
# Compute torsion
det = -xtd3 * ytd2 * ztd1
det += xtd2 * ytd3 * ztd1
det += xtd3 * ytd1 * ztd2
det -= xtd1 * ytd3 * ztd2
det -= xtd2 * ytd1 * ztd3
det += xtd1 * ytd2 * ztd3
torsion = det / r1
return curvature, torsion
@staticmethod
def calc_arc_length(p: float, xt: CubicSpline, yt: CubicSpline, zt: CubicSpline) -> float:
"""
Compute the arc length of a 3-residues long curve
:param p: point around the curve is calculated
:type p: float
:param xt: x(t)
:type xt: CubicSpline
:param yt: y(t)
:type yt: CubicSpline
:param zt: z(t)
:type zt: CubicSpline
:return: arc length
:rtype: float
"""
arc_len = 0.0
i = p - 1.0
while i < (p + 1.0):
dx = xt(i + 0.1) - xt(i)
dy = yt(i + 0.1) - yt(i)
dz = zt(i + 0.1) - zt(i)
dist = np.array([dx, dy, dz])
arc_len += math.sqrt(np.dot(dist, dist))
i += 0.1
return arc_len
@staticmethod
def calc_writhing(i: int, t: List[float], x: List[float], y: List[float], z: List[float]) -> float:
"""
Compute the writhing number in a 5-residue long window
:param i: residue postion
:type i: int
:param t: curve's parameters
:type t: list[float]
:param x: x(t)
:type x: list[float]
:param y: y(t}
:type y: list[float]
:param z: z(t)
:type z: list[float]
:return: writhing number
:rtype: float
"""
start = i - 2
stop = i + 2
ini = 0
end = len(t) - 1
if start < ini:
offset = ini - start
elif stop > end:
offset = end - stop
else:
offset = 0
start += offset
stop += offset
rij = np.zeros(3)
ri1j = np.zeros(3)
rij1 = np.zeros(3)
rjj1 = np.zeros(3)
rii1 = np.zeros(3)
ri1j1 = np.zeros(3)
# Return the number's sign
def sgn(v: np.ndarray) -> float:
return v and (1.0, -1.0)[v < 0.0]
total = 0.0
for ii in range(start, stop - 2):
for jj in range(ii + 2, stop):
rij[0] = x[jj] - x[ii]
rij[1] = y[jj] - y[ii]
rij[2] = z[jj] - z[ii]
ri1j[0] = x[jj] - x[ii + 1]
ri1j[1] = y[jj] - y[ii + 1]
ri1j[2] = z[jj] - z[ii + 1]
rij1[0] = x[jj + 1] - x[ii]
rij1[1] = y[jj + 1] - y[ii]
rij1[2] = z[jj + 1] - z[ii]
ri1j1[0] = x[jj + 1] - x[ii + 1]
ri1j1[1] = y[jj + 1] - y[ii + 1]
ri1j1[2] = z[jj + 1] - z[ii + 1]
rjj1[0] = x[jj + 1] - x[jj]
rjj1[1] = y[jj + 1] - y[jj]
rjj1[2] = z[jj + 1] - z[jj]
rii1[0] = x[ii + 1] - x[ii]
rii1[1] = y[ii + 1] - y[ii]
rii1[2] = z[ii + 1] - z[ii]
aij = (np.cross(rij, rij1) / np.linalg.norm(np.cross(rij, rij1)))
bij = (np.cross(rij1, ri1j1) / np.linalg.norm(np.cross(rij1, ri1j1)))
cij = (np.cross(ri1j1, ri1j) / np.linalg.norm(np.cross(ri1j1, ri1j)))
dij = (np.cross(ri1j, rij) / np.linalg.norm(np.cross(ri1j, rij)))
omegaij = (math.asin(np.dot(aij, bij)) +
math.asin(np.dot(bij, cij)) +
math.asin(np.dot(cij, dij)) +
math.asin(np.dot(dij, aij))) * sgn(np.dot(np.cross(rjj1, rii1), rij1))
total += omegaij / (4.0 * math.pi)
writhing = 2.0 * total
return writhing
@staticmethod
def calc_geometry(chain: Chain, deg: bool) -> Dict[int, ResidueGeometry]:
"""
Function used to compute the geometric properties around residues.
It computes curvature, torsion, arc-length and writhing number
:param chain: Protein main-chain
:type chain: Chain
:param deg: angle in degrees?
:type deg: bool
:return: Residue dictionary
:rtype: Dict[int, ResidueGeometry]
"""
t = []
x = []
y = []
z = []
residues_map = {}
residues = {}
num = 0
for residue in chain:
# Skip invalid residues
res_type, model, chain_id, res_id = residue.get_full_id()
het_flag, pos, insertion_code = res_id
if het_flag[0] != " ":
continue
try:
coord = residue["CA"].get_coord()
except KeyError:
print(f'ERROR: missing CA atom at {residue.get_resname()} - {residue.get_full_id()}!')
raise
t.append(float(num))
x.append(coord[0])
y.append(coord[1])
z.append(coord[2])
residues[int(num)] = ResidueGeometry(name=residue.get_resname(),
chain=chain_id,
res_num=num,
res_order=pos,
curvature=0.0,
torsion=0.0,
arc_len=0.0,
writhing=0.0,
res_ann={})
residues_map[pos] = num
num += 1
# Fit the alpha-carbons with a cubic-spline
xt = CubicSpline(t, x, bc_type='natural')
yt = CubicSpline(t, y, bc_type='natural')
zt = CubicSpline(t, z, bc_type='natural')
ini = 0
end = len(t) - 1
for i, tp in enumerate(t):
# Compute curvature and torsion
if ini < i < end:
curvature, torsion = GeometryParser.calc_curvature_torsion(p=tp, t=t, xt=xt, yt=yt, zt=zt)
elif i == ini:
curvature, torsion = GeometryParser.calc_curvature_torsion(p=t[+1], t=t, xt=xt, yt=yt, zt=zt)
else:
curvature, torsion = GeometryParser.calc_curvature_torsion(p=t[-2], t=t, xt=xt, yt=yt, zt=zt)
# Compute the arc length
arc_len = GeometryParser.calc_arc_length(p=tp, xt=xt, yt=yt, zt=zt)
# Compute the writhing number
writhing = GeometryParser.calc_writhing(i=i, t=t, x=x, y=y, z=z)
residues[int(tp)].curvature = curvature
residues[int(tp)].torsion = torsion
residues[int(tp)].arc_len = arc_len
residues[int(tp)].writhing = writhing
GeometryParser.calc_dihedral_angles(chain=chain, residues=residues, deg=deg)
return residues, residues_map
@staticmethod
def calc_dihedral_torsion(p1: np.ndarray, p2: np.ndarray, p3: np.ndarray, p4: np.ndarray, deg: bool) -> float:
"""
Compute the dihedral angles between four vectors
:param p1: vector 1
:type p1: np.ndarray
:param p2: vector 2
:type p2: np.ndarray
:param p3: vector 3
:type p3: np.ndarray
:param p4: vector 4
:type p4: np.ndarray
:param deg: dihedral angle
:type deg: bool
:return: Dihedral angle (in degrees if deg=True, radians otherwise)
:rtype: float
"""
b1 = p2 - p1
b2 = p2 - p3
b3 = p4 - p3
# Normalize a vector
def norm_vec(v: np.ndarray) -> np.ndarray:
return v/np.linalg.norm(v)
n1 = norm_vec(np.cross(b1, b2))
n2 = norm_vec(np.cross(b2, b3))
m1 = np.cross(n1, norm_vec(b2))
x = np.dot(n1, n2)
y = np.dot(m1, n2)
if deg:
theta = math.degrees(math.atan2(y, x))
else:
theta = math.atan2(y, x)
return theta
@staticmethod
def calc_dihedral_angles(chain: Chain, residues: Dict[int, ResidueGeometry], deg: bool) -> None:
"""
Compute the dihedral angles phi and psi.
:param chain: Protein chain
:type chain: Chain
:param residues: Residue dictionary
:type residues: Dict[int, ResidueGeometry]
:param deg: angle in degrees?
:type deg: bool
"""
idx = [res.id[1] for res in list(chain.get_residues()) if res.id[0] == ' ']
ini = idx[+0]
end = idx[-2]
num = 0
for i, residue in enumerate(chain):
# Skip invalid residues
het_flag, pos, insertion_code = residue.id
if het_flag[0] != ' ':
continue
# core atoms
try:
atom_n = residue['N'].get_coord()
atom_ca = residue['CA'].get_coord()
atom_c = residue['C'].get_coord()
except KeyError:
pdb, model, chain_id = chain.full_id
print(f'Error: Missing N, CA or C atom at [{pos}] {pdb} - {model} - {chain_id}')
raise
# phi
if pos > ini:
het_flag, prev_res, insertion_code = chain[idx[i - 1]].id
if abs(prev_res - pos) <= 1:
try:
p1 = chain[idx[i - 1]]['C'].get_coord()
except KeyError:
pdb, model, chain_id = chain.full_id
print(f'Error: Missing C atom at [{idx[i - 1]}] {pdb} - {model} - {chain_id}')
raise
p2 = atom_n
p3 = atom_ca
p4 = atom_c
phi = GeometryParser.calc_dihedral_torsion(p1=p1, p2=p2, p3=p3, p4=p4, deg=deg)
else:
phi = 0.0
else:
phi = 0.0
# psi
if pos < end:
try:
het_flag, next_res, insertion_code = chain[idx[i + 1]].id
except (IndexError, KeyError):
pdb, model, chain_id = chain.full_id
print(f'Error: after residue [{idx[i]}] {pdb} - {model} - {chain_id}')
raise
if abs(next_res - pos) <= 1:
p1 = atom_n
p2 = atom_ca
p3 = atom_c
try:
p4 = chain[idx[i + 1]]['N'].get_coord()
except KeyError:
pdb, model, chain_id = chain.full_id
print(f'Error: Missing N atom at [{idx[i + 1]}] {pdb} - {model} - {chain_id}')
raise
psi = GeometryParser.calc_dihedral_torsion(p1=p1, p2=p2, p3=p3, p4=p4, deg=deg)
else:
psi = 0.0
else:
psi = 0.0
residues[num].phi = phi
residues[num].psi = psi
num += 1
|
{"hexsha": "a111272687f795c728603ee69858983187a672b1", "size": 18379, "ext": "py", "lang": "Python", "max_stars_repo_path": "melodia/geometryparser.py", "max_stars_repo_name": "rwmontalvao/Melodia", "max_stars_repo_head_hexsha": "ff0a21637b976fd89853504a59c86db6e127878f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-03-04T17:03:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T07:21:09.000Z", "max_issues_repo_path": "melodia/geometryparser.py", "max_issues_repo_name": "rwmontalvao/Melodia", "max_issues_repo_head_hexsha": "ff0a21637b976fd89853504a59c86db6e127878f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "melodia/geometryparser.py", "max_forks_repo_name": "rwmontalvao/Melodia", "max_forks_repo_head_hexsha": "ff0a21637b976fd89853504a59c86db6e127878f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8890756303, "max_line_length": 114, "alphanum_fraction": 0.4969258393, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 5027}
|
from numpy.random import random_integers
from numpy import mean, amin
class DictionaryManager:
@staticmethod
def create_default_dict():
return {
'1': [],
'2': [],
'3': []
}
@staticmethod
def fill_dict(quantity, dictionary):
for key in dictionary:
dictionary[key] = random_integers(1, 10, quantity).tolist()
return dictionary
@staticmethod
def find_minimal_average(dictionary):
averages = []
for key in dictionary:
averages.append(mean(dictionary[key]))
min_average = amin(averages)
average_index = averages.index(min_average)
return {
'key': list(dictionary.keys())[average_index],
'value': list(dictionary.values())[average_index],
'average': round(min_average, 2)
}
|
{"hexsha": "cda57d7861751297029d6d47515acaf5c2a27028", "size": 874, "ext": "py", "lang": "Python", "max_stars_repo_path": "data-science-module/task_1/services/dictionary_manager.py", "max_stars_repo_name": "burevestnik-png/tint-ognp", "max_stars_repo_head_hexsha": "c4b6a4a08e37ac89f3cb0677f79032b473c70aa1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-09-30T12:07:44.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-14T11:21:34.000Z", "max_issues_repo_path": "data-science-module/task_1/services/dictionary_manager.py", "max_issues_repo_name": "burevestnik-png/python-OGNP", "max_issues_repo_head_hexsha": "c4b6a4a08e37ac89f3cb0677f79032b473c70aa1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data-science-module/task_1/services/dictionary_manager.py", "max_forks_repo_name": "burevestnik-png/python-OGNP", "max_forks_repo_head_hexsha": "c4b6a4a08e37ac89f3cb0677f79032b473c70aa1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.7058823529, "max_line_length": 71, "alphanum_fraction": 0.5823798627, "include": true, "reason": "from numpy", "num_tokens": 176}
|
[STATEMENT]
lemma iMODb_card: "0 < m \<Longrightarrow> card [r, mod m, c] = Suc c"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < m \<Longrightarrow> card [ r, mod m, c ] = Suc c
[PROOF STEP]
apply (induct c)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. 0 < m \<Longrightarrow> card [ r, mod m, 0 ] = Suc 0
2. \<And>c. \<lbrakk>0 < m \<Longrightarrow> card [ r, mod m, c ] = Suc c; 0 < m\<rbrakk> \<Longrightarrow> card [ r, mod m, Suc c ] = Suc (Suc c)
[PROOF STEP]
apply (simp add: iMODb_0 iIN_card)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>c. \<lbrakk>0 < m \<Longrightarrow> card [ r, mod m, c ] = Suc c; 0 < m\<rbrakk> \<Longrightarrow> card [ r, mod m, Suc c ] = Suc (Suc c)
[PROOF STEP]
apply (subst iMODb_Suc_insert_conv[symmetric])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>c. \<lbrakk>0 < m \<Longrightarrow> card [ r, mod m, c ] = Suc c; 0 < m\<rbrakk> \<Longrightarrow> card (insert (r + m * Suc c) [ r, mod m, c ]) = Suc (Suc c)
[PROOF STEP]
apply (subst card_insert_disjoint)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>c. \<lbrakk>0 < m \<Longrightarrow> card [ r, mod m, c ] = Suc c; 0 < m\<rbrakk> \<Longrightarrow> finite [ r, mod m, c ]
2. \<And>c. \<lbrakk>0 < m \<Longrightarrow> card [ r, mod m, c ] = Suc c; 0 < m\<rbrakk> \<Longrightarrow> r + m * Suc c \<notin> [ r, mod m, c ]
3. \<And>c. \<lbrakk>0 < m \<Longrightarrow> card [ r, mod m, c ] = Suc c; 0 < m\<rbrakk> \<Longrightarrow> Suc (card [ r, mod m, c ]) = Suc (Suc c)
[PROOF STEP]
apply (simp add: iT_finite iT_iff)+
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 715, "file": "Nat-Interval-Logic_IL_Interval", "length": 6}
|
[STATEMENT]
lemma state_q_bound:
fixes c :: nat
and l :: register
and ic :: configuration
and p :: program
and q :: nat
and a :: nat
defines "b == B c"
and "m == length p - 1"
assumes is_val: "is_valid_initial ic p a"
and q: "q > 0"
and terminate: "terminates ic p q"
and c: "c > 0"
assumes "k<m"
shows "SKe ic p b q k < b ^ q"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. SKe ic p b q k < b ^ q
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. SKe ic p b q k < b ^ q
[PROOF STEP]
from b_def
[PROOF STATE]
proof (chain)
picking this:
b \<equiv> B c
[PROOF STEP]
have "b>1"
[PROOF STATE]
proof (prove)
using this:
b \<equiv> B c
goal (1 subgoal):
1. 1 < b
[PROOF STEP]
using B_def
[PROOF STATE]
proof (prove)
using this:
b \<equiv> B c
B ?c = 2 ^ Suc ?c
goal (1 subgoal):
1. 1 < b
[PROOF STEP]
apply auto
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>b \<equiv> 2 * 2 ^ c; \<And>c. B c = 2 * 2 ^ c\<rbrakk> \<Longrightarrow> Suc 0 < 2 * 2 ^ c
[PROOF STEP]
by (metis One_nat_def one_less_numeral_iff power_gt1_lemma semiring_norm(76))
[PROOF STATE]
proof (state)
this:
1 < b
goal (1 subgoal):
1. SKe ic p b q k < b ^ q
[PROOF STEP]
hence b: "b > 2"
[PROOF STATE]
proof (prove)
using this:
1 < b
goal (1 subgoal):
1. 2 < b
[PROOF STEP]
using c b_def B_def
[PROOF STATE]
proof (prove)
using this:
1 < b
0 < c
b \<equiv> B c
B ?c = 2 ^ Suc ?c
goal (1 subgoal):
1. 2 < b
[PROOF STEP]
by (smt One_nat_def Suc_le_lessD less_Suc_eq_le less_trans_Suc linorder_neqE_nat
numeral_2_eq_2 power_Suc0_right power_inject_exp)
[PROOF STATE]
proof (state)
this:
2 < b
goal (1 subgoal):
1. SKe ic p b q k < b ^ q
[PROOF STEP]
from \<open>k<m\<close>
[PROOF STATE]
proof (chain)
picking this:
k < m
[PROOF STEP]
have "\<not> ishalt (p!k)"
[PROOF STATE]
proof (prove)
using this:
k < m
goal (1 subgoal):
1. \<not> ishalt (p ! k)
[PROOF STEP]
using is_val
[PROOF STATE]
proof (prove)
using this:
k < m
is_valid_initial ic p a
goal (1 subgoal):
1. \<not> ishalt (p ! k)
[PROOF STEP]
by (simp add: is_valid_def is_valid_initial_def is_val m_def)
[PROOF STATE]
proof (state)
this:
\<not> ishalt (p ! k)
goal (1 subgoal):
1. SKe ic p b q k < b ^ q
[PROOF STEP]
hence "S ic p k q = 0"
[PROOF STATE]
proof (prove)
using this:
\<not> ishalt (p ! k)
goal (1 subgoal):
1. S ic p k q = 0
[PROOF STEP]
using terminate terminates_def correct_halt_def S_def
[PROOF STATE]
proof (prove)
using this:
\<not> ishalt (p ! k)
terminates ic p q
terminates ?c ?p ?q = (0 < ?q \<and> correct_halt ?c ?p ?q \<and> (\<forall>x<?q. \<not> ishalt (?p ! fst (steps ?c ?p x))))
correct_halt ?c ?p ?q = (ishalt (?p ! fst (steps ?c ?p ?q)) \<and> (\<forall>l<length (snd ?c). snd (steps ?c ?p ?q) ! l = 0))
S ?c ?p ?k ?t = (if fst (steps ?c ?p ?t) = ?k then Suc 0 else 0)
goal (1 subgoal):
1. S ic p k q = 0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
S ic p k q = 0
goal (1 subgoal):
1. SKe ic p b q k < b ^ q
[PROOF STEP]
hence "SKe ic p b q k = (\<Sum>t = 0..q-1. b ^ t * S ic p k t)"
[PROOF STATE]
proof (prove)
using this:
S ic p k q = 0
goal (1 subgoal):
1. SKe ic p b q k = (\<Sum>t = 0..q - 1. b ^ t * S ic p k t)
[PROOF STEP]
using \<open>q>0\<close>
[PROOF STATE]
proof (prove)
using this:
S ic p k q = 0
0 < q
goal (1 subgoal):
1. SKe ic p b q k = (\<Sum>t = 0..q - 1. b ^ t * S ic p k t)
[PROOF STEP]
apply (auto cong: sum.cong simp: SKe_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>S ic p k q = 0; 0 < q\<rbrakk> \<Longrightarrow> (\<Sum>t = 0..q. b ^ t * S ic p k t) = (\<Sum>x = 0..q - Suc 0. b ^ x * S ic p k x)
[PROOF STEP]
by (metis (no_types, lifting) Suc_pred
add_cancel_right_right mult_0_right sum.atLeast0_atMost_Suc)
[PROOF STATE]
proof (state)
this:
SKe ic p b q k = (\<Sum>t = 0..q - 1. b ^ t * S ic p k t)
goal (1 subgoal):
1. SKe ic p b q k < b ^ q
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
SKe ic p b q k = (\<Sum>t = 0..q - 1. b ^ t * S ic p k t)
goal (1 subgoal):
1. SKe ic p b q k < b ^ q
[PROOF STEP]
have "... \<le> (\<Sum>t = 0..q-1. b^t)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>t = 0..q - 1. b ^ t * S ic p k t) \<le> sum ((^) b) {0..q - 1}
[PROOF STEP]
by (auto simp add: S_def gr_implies_not0 sum_mono)
[PROOF STATE]
proof (state)
this:
(\<Sum>t = 0..q - 1. b ^ t * S ic p k t) \<le> sum ((^) b) {0..q - 1}
goal (1 subgoal):
1. SKe ic p b q k < b ^ q
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(\<Sum>t = 0..q - 1. b ^ t * S ic p k t) \<le> sum ((^) b) {0..q - 1}
goal (1 subgoal):
1. SKe ic p b q k < b ^ q
[PROOF STEP]
have "... < b ^ q"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sum ((^) b) {0..q - 1} < b ^ q
[PROOF STEP]
using `q>0` sum_bt
[PROOF STATE]
proof (prove)
using this:
0 < q
2 < ?b \<Longrightarrow> sum ((^) ?b) {0..?q} < ?b ^ Suc ?q
goal (1 subgoal):
1. sum ((^) b) {0..q - 1} < b ^ q
[PROOF STEP]
by (metis Suc_diff_1 b)
[PROOF STATE]
proof (state)
this:
sum ((^) b) {0..q - 1} < b ^ q
goal (1 subgoal):
1. SKe ic p b q k < b ^ q
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
SKe ic p b q k < b ^ q
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
SKe ic p b q k < b ^ q
goal (1 subgoal):
1. SKe ic p b q k < b ^ q
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
SKe ic p b q k < b ^ q
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 2626, "file": "DPRM_Theorem_Register_Machine_MultipleStepState", "length": 31}
|
# Overlap Iterator
# ================
struct OverlapIterator{Sa,Sb,F,G}
intervals_a::Sa
intervals_b::Sb
isless::F
filter::G
end
function Base.eltype(::Type{OverlapIterator{Sa,Sb,F,G}}) where {Sa,Sb,F,G}
return Tuple{Interval{metadatatype(Sa)},Interval{metadatatype(Sb)}}
end
function Base.IteratorSize(::Type{OverlapIterator{Sa,Sb,F,G}}) where {Sa,Sb,F,G}
return Base.SizeUnknown()
end
"""
eachoverlap(intervals_a, intervals_b, [seqname_isless=Base.isless])
Create an iterator of overlapping intervals between `intervals_a` and `intervals_b`.
This function assumes elements of `intervals_a` and `intervals_b` are sorted by
its sequence name and left position. If the element type is not a subtype of
`GenomicFeatures.Interval`, elements are converted to `Interval` objects.
The third optional argument is a function that defines the order of sequence
names. The default function is `Base.isless`, which is the lexicographical
order.
"""
function eachoverlap(intervals_a, intervals_b, seqname_isless=Base.isless; filter=true_cmp)
return OverlapIterator(intervals_a, intervals_b, seqname_isless, filter)
end
struct OverlapIteratorState{Sa,Sb,Ta,Tb}
next_a::Sa
next_b::Sb
queue::Queue{Interval{Tb}}
queue_index::Int
end
function OverlapIteratorState(
Ta::Type, Tb::Type, next_a::Sa, next_b::Sb, queue::Queue, queue_index::Int) where {Sa, Sb}
return OverlapIteratorState{Sa,Sb,Ta,Tb}(next_a, next_b, queue, queue_index)
end
function OverlapIteratorState(Ta::Type, Tb::Type, next_a::Sa, next_b::Sb) where {Sa, Sb}
queue = Queue{Interval{Tb}}()
return OverlapIteratorState{Sa,Sb,Ta,Tb}(next_a, next_b, queue, 1)
end
function Base.iterate(iter::OverlapIterator)
next_a = iterate(iter.intervals_a)
next_b = iterate(iter.intervals_b)
Ta = metadatatype(iter.intervals_a)
Tb = metadatatype(iter.intervals_b)
state = OverlapIteratorState(Ta, Tb, next_a, next_b)
return iterate(iter, state)
end
# check i1 and i2 are ordered
function check_ordered(i1, i2, compare_func)
if !isordered(i1, i2, compare_func)
error("intervals are not sorted")
end
return nothing
end
function Base.iterate(iter::OverlapIterator, state::OverlapIteratorState{Sa,Sb,Ta,Tb}) where {Sa,Sb,Ta,Tb}
next_a = state.next_a
next_b = state.next_b
queue = state.queue
queue_index = state.queue_index
if next_a === nothing
return nothing
end
entry_a, state_a = next_a
interval_a = convert(Interval{Ta}, entry_a)
while true
if queue_index > lastindex(state.queue)
# end of queue: add more to queue, or advance a
if next_b === nothing
next_a = iterate(iter.intervals_a, state_a)
if next_a === nothing
return break
end
entry_a, state_a = next_a
next_interval_a = convert(Interval{Ta}, entry_a)
check_ordered(interval_a, next_interval_a, iter.isless)
interval_a = next_interval_a
queue_index = firstindex(state.queue)
else
entry_b, state_b = next_b
interval_b = convert(Interval{Tb}, entry_b)
if !isempty(queue)
check_ordered(queue[end], interval_b, iter.isless)
end
push!(queue, interval_b)
next_b = iterate(iter.intervals_b, state_b)
end
else
entry_a, state_a = next_a
interval_a = convert(Interval{Ta}, entry_a)
interval_b = queue[queue_index]
c = compare_overlap(interval_a, interval_b, iter.isless)
queue_index += 1
if c < 0
# No more possible intersections with interval_a, advance
next_a = iterate(iter.intervals_a, state_a)
if next_a === nothing
break
end
entry_a, state_a = next_a
next_interval_a = convert(Interval{Ta}, entry_a)
check_ordered(interval_a, next_interval_a, iter.isless)
interval_a = next_interval_a
queue_index = firstindex(state.queue)
elseif c == 0
if iter.filter(interval_a, interval_b)
return ((interval_a, interval_b),
OverlapIteratorState(Ta, Tb, next_a, next_b, queue, queue_index))
end
else
if queue_index == firstindex(queue) + 1
# noting else can intersect front of queue
popfirst!(queue)
end
end
end
end
# no more intersections found
return nothing
end
# Return:
# -1 when `i1` precedes `i2`,
# 0 when `i1` overlaps with `i2`, and
# +1 when `i1` follows `i2`.
function compare_overlap(i1::Interval, i2::Interval, isless::Function)
if isless(i1.seqname, i2.seqname)::Bool
return -1
elseif isless(i2.seqname, i1.seqname)::Bool
return +1
else # i1.seqname == i2.seqname
if i1.last < i2.first
return -1
elseif i1.first > i2.last
return +1
else
return 0
end
end
end
# Faster comparison for `Base.isless`. Note that `Base.isless` must be
# consistent wtih `Base.cmp` to work correctly.
function compare_overlap(i1::Interval, i2::Interval, ::typeof(Base.isless))
c = cmp(i1.seqname, i2.seqname)
if c != 0
return c
end
if i1.last < i2.first
return -1
elseif i1.first > i2.last
return +1
else
return 0
end
end
|
{"hexsha": "dfbd1e71900debb227edefcf4eb5e6d8d81273df", "size": 5721, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/overlap.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/GenomicFeatures.jl-899a7d2d-5c61-547b-bef9-6698a8d05446", "max_stars_repo_head_hexsha": "ceb9cf17264ced957a6c05c7a2c206c8e8c8ef15", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/overlap.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/GenomicFeatures.jl-899a7d2d-5c61-547b-bef9-6698a8d05446", "max_issues_repo_head_hexsha": "ceb9cf17264ced957a6c05c7a2c206c8e8c8ef15", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/overlap.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/GenomicFeatures.jl-899a7d2d-5c61-547b-bef9-6698a8d05446", "max_forks_repo_head_hexsha": "ceb9cf17264ced957a6c05c7a2c206c8e8c8ef15", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.262295082, "max_line_length": 106, "alphanum_fraction": 0.6184233526, "num_tokens": 1463}
|
import sys
import numpy as np
import math
sys.path.append("../../")
from config import Config
import g2o
opt = g2o.SparseOptimizer()
block_solver = g2o.BlockSolverSE3(g2o.LinearSolverEigenSE3())
solver = g2o.OptimizationAlgorithmLevenberg(block_solver)
opt.set_algorithm(solver)
flag = g2o.Flag()
print('flag: ', flag.value)
opt.set_force_stop_flag(flag)
flag.value = False
print('opt flag: ', opt.force_stop_flag())
flag.value = True
print('opt flag: ', opt.force_stop_flag())
|
{"hexsha": "14f2083aa872b771216a053cdec3f8834dd88a9d", "size": 497, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyslam/test/g2o/test_optimization_flag.py", "max_stars_repo_name": "dysdsyd/VO_benchmark", "max_stars_repo_head_hexsha": "a7602edab934419c1ec73618ee655e18026f834f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-11T09:13:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-03T01:39:56.000Z", "max_issues_repo_path": "pyslam/test/g2o/test_optimization_flag.py", "max_issues_repo_name": "dysdsyd/VO_benchmark", "max_issues_repo_head_hexsha": "a7602edab934419c1ec73618ee655e18026f834f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyslam/test/g2o/test_optimization_flag.py", "max_forks_repo_name": "dysdsyd/VO_benchmark", "max_forks_repo_head_hexsha": "a7602edab934419c1ec73618ee655e18026f834f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.88, "max_line_length": 68, "alphanum_fraction": 0.7364185111, "include": true, "reason": "import numpy", "num_tokens": 131}
|
#!/usr/bin/env python
# coding: utf-8
# In[16]:
# import necessary libraries - Monir
import pandas as pd
import os
import glob
import numpy as np
# In[18]:
# assign dataset names - Monir
PUBLIC_DISPATCHSCADA_list_of_files = []
#read all dataset names with starting PUBLIC_DISPATCHSCADA - Monir
PUBLIC_DISPATCHSCADA_list_of_files = glob.glob('PUBLIC_DISPATCHSCADA*.csv')
# In[19]:
# create empty list
dataframes_list = []
list_of_names = PUBLIC_DISPATCHSCADA_list_of_files
# In[20]:
# append datasets into teh list
for i in range(len(list_of_names)):
temp_df = pd.read_csv(list_of_names[i], skiprows = 1, skipfooter = 1)
#dataframes_list[i]=temp_df
dataframes_list.append(temp_df)
# In[23]:
dataframes_list[0].head()
# In[24]:
dataframes_list[0].tail()
# In[25]:
dataframes_list[0].shape
# In[27]:
len(dataframes_list)
# In[28]:
dataframes_list[8890].tail()
# In[29]:
# multiple DataFrames are be merged (Concatenate pandas objects) - Monir
PUBLIC_DISPATCHSCADA_df = pd.concat(dataframes_list)
# In[30]:
PUBLIC_DISPATCHSCADA_df.shape
# In[31]:
# set a specific column of DataFrame as index - Monir
PUBLIC_DISPATCHSCADA_df.set_index('DUID')
# In[32]:
PUBLIC_DISPATCHSCADA_df.dtypes
# In[33]:
PUBLIC_DISPATCHSCADA_df.info()
# In[34]:
# Export Pandas DataFrame to CSV - Monir
PUBLIC_DISPATCHSCADA_df.to_csv('PUBLIC_DISPATCHSCADA_df.csv', index=False)
# In[ ]:
|
{"hexsha": "f98d4c9ed05fa595d138c20f6b0d30e1c7ab8189", "size": 1435, "ext": "py", "lang": "Python", "max_stars_repo_path": "code-for-fetching-data/PUBLIC_DISPATCHSCADA_DATA-monir.py", "max_stars_repo_name": "mzkhan2000/AEMO-data-Analytics", "max_stars_repo_head_hexsha": "94c2906d8af699b55e95744656841c79fd019f77", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-15T00:28:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-15T00:28:23.000Z", "max_issues_repo_path": "code-for-fetching-data/PUBLIC_DISPATCHSCADA_DATA-monir.py", "max_issues_repo_name": "mzkhan2000/AEMO-data-Analytics", "max_issues_repo_head_hexsha": "94c2906d8af699b55e95744656841c79fd019f77", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code-for-fetching-data/PUBLIC_DISPATCHSCADA_DATA-monir.py", "max_forks_repo_name": "mzkhan2000/AEMO-data-Analytics", "max_forks_repo_head_hexsha": "94c2906d8af699b55e95744656841c79fd019f77", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 12.264957265, "max_line_length": 75, "alphanum_fraction": 0.7226480836, "include": true, "reason": "import numpy", "num_tokens": 390}
|
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Adapted from source code: https://github.com/karlhigley/ranking-metrics-torch
from abc import abstractmethod
from typing import List
import numpy as np
import tensorflow as tf
from merlin_standard_lib import Registry
from .utils import tf_utils
ranking_metrics_registry = Registry("tf.ranking_metrics")
METRIC_PARAMETERS_DOCSTRING = """
scores : tf.Tensor
scores of predicted item-ids.
labels : tf.Tensor
true item-ids labels.
"""
@tf.keras.utils.register_keras_serializable(package="transformers4rec")
class RankingMetric(tf.keras.metrics.Metric):
"""
Metric wrapper for computing ranking metrics@K for session-based task.
Parameters
----------
top_ks : list, default [2, 5])
list of cutoffs
labels_onehot : bool
Enable transform the encoded labels to one-hot representation
"""
def __init__(
self,
name=None,
dtype=None,
top_ks: List[int] = [2, 5],
labels_onehot: bool = False,
**kwargs,
):
super(RankingMetric, self).__init__(name=name, **kwargs)
self.top_ks = top_ks
self.labels_onehot = labels_onehot
# Store the mean vector of the batch metrics (for each cut-off at topk) in ListWrapper
self.metric_mean: List[tf.Tensor] = []
self.accumulator = tf.Variable(
tf.zeros(shape=[1, len(self.top_ks)]),
trainable=False,
shape=tf.TensorShape([None, tf.compat.v1.Dimension(len(self.top_ks))]),
)
def get_config(self):
config = {"top_ks": self.top_ks, "labels_onehot": self.labels_onehot}
base_config = super(RankingMetric, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _build(self, shape):
bs = shape[0]
variable_shape = [bs, tf.compat.v1.Dimension(len(self.top_ks))]
self.accumulator.assign(tf.zeros(variable_shape))
def update_state(self, y_true: tf.Tensor, y_pred: tf.Tensor, **kwargs):
# Computing the metrics at different cut-offs
# init batch accumulator
self._build(shape=tf.shape(y_pred))
if self.labels_onehot:
y_true = tf_utils.tranform_label_to_onehot(y_true, tf.shape(y_pred)[-1])
self._metric(
scores=tf.reshape(y_pred, [-1, tf.shape(y_pred)[-1]]),
labels=y_true,
)
self.metric_mean.append(self.accumulator)
def result(self):
# Computing the mean of the batch metrics (for each cut-off at topk)
return tf.reduce_mean(tf.concat(self.metric_mean, axis=0), axis=0)
def reset_state(self):
self.metric_mean = []
@abstractmethod
def _metric(self, scores: tf.Tensor, labels: tf.Tensor, **kwargs):
"""
Update `self.accumulator` with the ranking metric of
prediction scores and one-hot labels for different cut-offs `ks`.
This method should be overridden by subclasses.
Parameters
----------
{METRIC_PARAMETERS_DOCSTRING}
"""
raise NotImplementedError
def metric_fn(self, scores: tf.Tensor, labels: tf.Tensor, **kwargs) -> tf.Tensor:
"""
Compute ranking metric over predictions and one-hot targets for different cut-offs.
Parameters
----------
{METRIC_PARAMETERS_DOCSTRING}
"""
self._build(shape=tf.shape(scores))
self._metric(scores=tf.reshape(scores, [-1, tf.shape(scores)[-1]]), labels=labels, **kwargs)
return self.accumulator
@ranking_metrics_registry.register_with_multiple_names("precision_at", "precision")
@tf.keras.utils.register_keras_serializable(package="transformers4rec")
class PrecisionAt(RankingMetric):
def __init__(self, top_ks=None, labels_onehot=False, **kwargs):
super(PrecisionAt, self).__init__(top_ks=top_ks, labels_onehot=labels_onehot, **kwargs)
def _metric(self, scores: tf.Tensor, labels: tf.Tensor, **kwargs) -> tf.Tensor:
"""
Compute precision@K for each provided cutoff in ks
Parameters
----------
{METRIC_PARAMETERS_DOCSTRING}
"""
ks = tf.convert_to_tensor(self.top_ks)
ks, scores, labels = check_inputs(ks, scores, labels)
_, _, topk_labels = tf_utils.extract_topk(ks, scores, labels)
bs = tf.shape(scores)[0]
for index in range(int(tf.shape(ks)[0])):
k = ks[index]
rows_ids = tf.range(bs, dtype=tf.int64)
indices = tf.concat(
[
tf.expand_dims(rows_ids, 1),
tf.cast(index, tf.int64) * tf.ones([bs, 1], dtype=tf.int64),
],
axis=1,
)
self.accumulator.scatter_nd_update(
indices=indices, updates=tf.reduce_sum(topk_labels[:, : int(k)], axis=1) / float(k)
)
@ranking_metrics_registry.register_with_multiple_names("recall_at", "recall")
@tf.keras.utils.register_keras_serializable(package="transformers4rec")
class RecallAt(RankingMetric):
def __init__(self, top_ks=None, labels_onehot=False, **kwargs):
super(RecallAt, self).__init__(top_ks=top_ks, labels_onehot=labels_onehot, **kwargs)
def _metric(self, scores: tf.Tensor, labels: tf.Tensor, **kwargs) -> tf.Tensor:
"""
Compute recall@K for each provided cutoff in ks
Parameters
----------
{METRIC_PARAMETERS_DOCSTRING}
"""
ks = tf.convert_to_tensor(self.top_ks)
ks, scores, labels = check_inputs(ks, scores, labels)
_, _, topk_labels = tf_utils.extract_topk(ks, scores, labels)
# Compute recalls at K
num_relevant = tf.reduce_sum(labels, axis=-1)
rel_indices = tf.where(num_relevant != 0)
rel_count = tf.gather_nd(num_relevant, rel_indices)
if tf.shape(rel_indices)[0] > 0:
for index in range(int(tf.shape(ks)[0])):
k = ks[index]
rel_labels = tf.cast(
tf.gather_nd(topk_labels, rel_indices)[:, : int(k)], tf.float32
)
batch_recall_k = tf.cast(
tf.reshape(
tf.math.divide(tf.reduce_sum(rel_labels, axis=-1), rel_count),
(len(rel_indices), 1),
),
tf.float32,
)
# Ensuring type is double, because it can be float if --fp16
update_indices = tf.concat(
[
rel_indices,
tf.expand_dims(
tf.cast(index, tf.int64) * tf.ones(tf.shape(rel_indices)[0], tf.int64),
-1,
),
],
axis=1,
)
self.accumulator.scatter_nd_update(
indices=update_indices, updates=tf.reshape(batch_recall_k, (-1,))
)
@ranking_metrics_registry.register_with_multiple_names("avg_precision_at", "avg_precision", "map")
@tf.keras.utils.register_keras_serializable(package="transformers4rec")
class AvgPrecisionAt(RankingMetric):
def __init__(self, top_ks=None, labels_onehot=False, **kwargs):
super(AvgPrecisionAt, self).__init__(top_ks=top_ks, labels_onehot=labels_onehot, **kwargs)
max_k = tf.reduce_max(self.top_ks)
self.precision_at = PrecisionAt(top_ks=1 + np.array((range(max_k)))).metric_fn
def _metric(self, scores: tf.Tensor, labels: tf.Tensor, **kwargs) -> tf.Tensor:
"""
Compute average precision @K for provided cutoff in ks
Parameters
----------
{METRIC_PARAMETERS_DOCSTRING}
"""
ks = tf.convert_to_tensor(self.top_ks)
ks, scores, labels = check_inputs(ks, scores, labels)
topk_scores, _, topk_labels = tf_utils.extract_topk(ks, scores, labels)
num_relevant = tf.reduce_sum(labels, axis=-1)
bs = tf.shape(scores)[0]
precisions = self.precision_at(topk_scores, topk_labels)
rel_precisions = precisions * topk_labels
for index in range(int(tf.shape(ks)[0])):
k = ks[index]
tf_total_prec = tf.reduce_sum(rel_precisions[:, :k], axis=1)
clip_value = tf.clip_by_value(
num_relevant, clip_value_min=1, clip_value_max=tf.cast(k, tf.float32)
)
rows_ids = tf.range(bs, dtype=tf.int64)
indices = tf.concat(
[
tf.expand_dims(rows_ids, 1),
tf.cast(index, tf.int64) * tf.ones([bs, 1], dtype=tf.int64),
],
axis=1,
)
self.accumulator.scatter_nd_update(indices=indices, updates=tf_total_prec / clip_value)
@ranking_metrics_registry.register_with_multiple_names("dcg_at", "dcg")
@tf.keras.utils.register_keras_serializable(package="transformers4rec")
class DCGAt(RankingMetric):
def __init__(self, top_ks=None, labels_onehot=False, **kwargs):
super(DCGAt, self).__init__(top_ks=top_ks, labels_onehot=labels_onehot, **kwargs)
def _metric(
self, scores: tf.Tensor, labels: tf.Tensor, log_base: int = 2, **kwargs
) -> tf.Tensor:
"""
Compute discounted cumulative gain @K for each provided cutoff in ks
(ignoring ties)
Parameters
----------
{METRIC_PARAMETERS_DOCSTRING}
"""
ks = tf.convert_to_tensor(self.top_ks)
ks, scores, labels = check_inputs(ks, scores, labels)
_, _, topk_labels = tf_utils.extract_topk(ks, scores, labels)
# Compute discounts
max_k = tf.reduce_max(ks)
discount_positions = tf.cast(tf.range(max_k), tf.float32)
discount_log_base = tf.math.log(tf.convert_to_tensor([log_base], dtype=tf.float32))
discounts = 1 / (tf.math.log(discount_positions + 2) / discount_log_base)
bs = tf.shape(scores)[0]
# Compute DCGs at K
for index in range(len(self.top_ks)):
k = ks[index]
m = topk_labels[:, :k] * tf.repeat(
tf.expand_dims(discounts[:k], 0), tf.shape(topk_labels)[0], axis=0
)
rows_ids = tf.range(bs, dtype=tf.int64)
indices = tf.concat(
[
tf.expand_dims(rows_ids, 1),
tf.cast(index, tf.int64) * tf.ones([bs, 1], dtype=tf.int64),
],
axis=1,
)
self.accumulator.scatter_nd_update(
indices=indices, updates=tf.cast(tf.reduce_sum(m, axis=1), tf.float32)
)
# Ensuring type is double, because it can be float if --fp16
@ranking_metrics_registry.register_with_multiple_names("ndcg_at", "ndcg")
@tf.keras.utils.register_keras_serializable(package="transformers4rec")
class NDCGAt(RankingMetric):
def __init__(self, top_ks=None, labels_onehot=False, **kwargs):
super(NDCGAt, self).__init__(top_ks=top_ks, labels_onehot=labels_onehot, **kwargs)
self.dcg_at = DCGAt(top_ks).metric_fn
def _metric(
self, scores: tf.Tensor, labels: tf.Tensor, log_base: int = 2, **kwargs
) -> tf.Tensor:
"""
Compute normalized discounted cumulative gain @K for each provided cutoffs in ks
(ignoring ties)
Parameters
----------
{METRIC_PARAMETERS_DOCSTRING}
"""
ks = tf.convert_to_tensor(self.top_ks)
ks, scores, labels = check_inputs(ks, scores, labels)
topk_scores, _, topk_labels = tf_utils.extract_topk(ks, scores, labels)
# Compute discounted cumulative gains
gains = self.dcg_at(labels=topk_labels, scores=topk_scores, log_base=log_base)
self.accumulator.assign(gains)
normalizing_gains = self.dcg_at(labels=topk_labels, scores=topk_labels, log_base=log_base)
# Prevent divisions by zero
relevant_pos = tf.where(normalizing_gains != 0)
tf.where(normalizing_gains == 0, 0.0, gains)
updates = tf.gather_nd(self.accumulator, relevant_pos) / tf.gather_nd(
normalizing_gains, relevant_pos
)
self.accumulator.scatter_nd_update(relevant_pos, updates)
def check_inputs(ks, scores, labels):
if len(ks.shape) > 1:
raise ValueError("ks should be a 1-dimensional tensor")
if len(scores.shape) != 2:
raise ValueError("scores must be a 2-dimensional tensor")
if len(labels.shape) != 2:
raise ValueError("labels must be a 2-dimensional tensor")
scores.get_shape().assert_is_compatible_with(labels.get_shape())
return (tf.cast(ks, tf.int32), tf.cast(scores, tf.float32), tf.cast(labels, tf.float32))
def process_metrics(metrics, prefix=""):
metrics_proc = {}
for metric in metrics:
results = metric.result()
if getattr(metric, "top_ks", None):
for i, ks in enumerate(metric.top_ks):
metrics_proc.update(
{f"{prefix}{metric.name.split('_')[0]}@{ks}": tf.gather(results, i)}
)
else:
metrics_proc[metric.name] = results
return metrics_proc
|
{"hexsha": "bed5070611c6e8fdee8fcb708fe20667a7aed96b", "size": 13815, "ext": "py", "lang": "Python", "max_stars_repo_path": "transformers4rec/tf/ranking_metric.py", "max_stars_repo_name": "Jwmc999/Transformers4Rec", "max_stars_repo_head_hexsha": "e6cdf13a7c0102303c0258120274f88b2d42c9c2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 415, "max_stars_repo_stars_event_min_datetime": "2021-09-20T20:47:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T16:51:03.000Z", "max_issues_repo_path": "transformers4rec/tf/ranking_metric.py", "max_issues_repo_name": "Jwmc999/Transformers4Rec", "max_issues_repo_head_hexsha": "e6cdf13a7c0102303c0258120274f88b2d42c9c2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 128, "max_issues_repo_issues_event_min_datetime": "2021-09-21T07:19:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T15:08:27.000Z", "max_forks_repo_path": "transformers4rec/tf/ranking_metric.py", "max_forks_repo_name": "Jwmc999/Transformers4Rec", "max_forks_repo_head_hexsha": "e6cdf13a7c0102303c0258120274f88b2d42c9c2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 44, "max_forks_repo_forks_event_min_datetime": "2021-09-23T07:25:36.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T04:17:53.000Z", "avg_line_length": 37.6430517711, "max_line_length": 100, "alphanum_fraction": 0.6144046326, "include": true, "reason": "import numpy", "num_tokens": 3189}
|
theory prop_15
imports Main
"$HIPSTER_HOME/IsaHipster"
begin
datatype 'a list = Nil2 | Cons2 "'a" "'a list"
datatype Nat = Z | S "Nat"
fun lt :: "Nat => Nat => bool" where
"lt x (Z) = False"
| "lt (Z) (S z) = True"
| "lt (S x2) (S z) = lt x2 z"
fun len :: "'a list => Nat" where
"len (Nil2) = Z"
| "len (Cons2 y xs) = S (len xs)"
fun ins :: "Nat => Nat list => Nat list" where
"ins x (Nil2) = Cons2 x (Nil2)"
| "ins x (Cons2 z xs) =
(if lt x z then Cons2 x (Cons2 z xs) else Cons2 z (ins x xs))"
(*hipster lt len ins *)
theorem x0 :
"(len (ins x xs)) = (S (len xs))"
by (tactic \<open>Subgoal.FOCUS_PARAMS (K (Tactic_Data.hard_tac @{context})) @{context} 1\<close>)
end
|
{"author": "moajohansson", "repo": "IsaHipster", "sha": "91f6ea3f1166a9de547722ece6445fe843ad89b4", "save_path": "github-repos/isabelle/moajohansson-IsaHipster", "path": "github-repos/isabelle/moajohansson-IsaHipster/IsaHipster-91f6ea3f1166a9de547722ece6445fe843ad89b4/benchmark/isaplanner/prop_15.thy"}
|
\chapter{Malware and malware samples}
First of all, before addressing the question of what a ``malware sample'' is,
let us analyze what a ``malware'' is.
The National Institute of Standards and Technologies (NIST) throws the
following definition of malware: ``Software or firmware intended to perform an
unauthorized process that will have adverse impact on the confidentiality,
integrity, or availability of an information system. A virus, worm, Trojan
horse, or other code-based entity that infects a host. Spyware and some forms
of adware are also examples of malicious
code.''\cite{Nist2013}
And RFC4949\cite{Rfc4949} defines
``malicious logic'' as ``Hardware, firmware, or software that is intentionally
included or inserted in a system for a harmful purpose.''
A lot of definitions of ``malware'' do
exist\cite{NistGlossary2020} but in this work we propose to use this one to remark some key points of
malware nature:
\begin{tcolorbox}
Any element, hardware, software and/or firmware, determined malicious in
some context and in a state of its life cycle.
\end{tcolorbox}
Notice that proposed malware definition has a strong subjective component
because it is not possible to say if something is malicious in a fully
objective way\cite{LayeredDetectionMethod}. Therefore, an element is not inherently malicious. Instead, an
element is determined malicious in a state of its life cycle and taking into
account the context.
To clarify this idea with an example: the TCP/IP swiss army knife
\texttt{netcat} does not seem to be designed specifically with malicious
purposes so, if context is ignored, it must be classified as goodware. But,
installed by an attacker and with the purpose of giving access to a remote
computer, this tool must clearly be classified as malware. It is in these
cases considered inconclusive (and in some others cases not relevant here) in
which industry uses intermediate terms such as ``riskware'', ``hacktool'',
``grayware'' or ``potentially unwanted program''.
\begin{figure}
\centering
\includegraphics[width=0.99\textwidth]{./figures/Image1.png}
\caption{\label{fig:1} An illustration of a VirusTotal analysis of Netcat.}
\end{figure}
Malware context is absolutely fundamental during analysis\cite{ContextBasedMalware}\cite{LearningFromContext} so, it must to be
included in the collected samples. The concept of malware sample is broad,
since its nature consists of a composition of miscellaneous elements\cite{ResidentViruses}\cite{FilelessAttacks}\cite{AdvacedVolatileThreat}. We must,
therefore, include different kind of elements (memory dumps, files, pictures
and circuit specifications in case of hardware malware, network traffic
captures, environment variables, etc.) and to enrich these elements with
metadata to indicate the analyst what exactly is any kind of element
appended as metadata as necessary.
In contradiction with any definition of ``malware'', currently a ``malware
sample'' is any kind of file that, potentially, could be malware. In other
words, and to keep it simple, malware samples are not always malware. Let us
clarify this issue next.
\section{The state of the art in malware samples}
\label{sec:soa}
It is known that intelligence tools simply store files, without any rigor of
selection beyond self-tagging by the user who, while it is true, accepts the
EULA (End User License Agreement), probably desiring strongly that potential
sensitive data are properly encrypted.
In the same way, antimalware solutions also take malware samples from the
system of its clients and, with high probability, all the undetected and
unknown elements, because new detections cannot come from other place than the
knowledge acquired from the unknown and undetected samples. Therefore, those
files are paradoxically also called malware samples. This is not a new
thing. For instance, Kaspersky Antivirus has starred the news not long ago on
this issue\cite{RussianStoleNsaSpySecrets}\cite{KasperskyToolForSpying}. This
highlights that pointing to Kaspersky Antivirus as espionage tool by EE.UU and
EU is not other thing than to accidentally discover the general lack of
current malware sample treatment in general, not by a specific company or
product. In other words, a treatment which does not take into account, among
other issues not at all negligible, data confidentiality and user privacy.
This incident meant great losses, leading Kaspersky to create a ``Transparency
Center''\cite{KasperskyTransparencyCenter} and Eugene Kaspersky, CEO of Kaspersky Lab, to publish many releases like
this: ``We’re even willing to meet with any of them and give them our source
code to thoroughly review it, as we’ve got nothing to
hide''\cite{EugeneKasperskyBlog2017}. Certainly, Kaspersky antivirus is not a spy tool by itself (as happens with
any software piece, it depends on the context, it depends on who uses it), you
can check the entire source code line by line and you will not notice
specifically designed code for espionage, but you will notice the real issue:
samples treatment! And no direct actions were taken in this sense by antivirus
industry because, if it is not well done, it can reduce the protection rate
that, unfortunately, is the only thing the customer demands.
We reproduce as en example two EULA blocks chosen at random because to quote
all will be too much extensive and repetitive. These are quite similar for all
antivirus companies and products.
\begin{tcolorbox}
\small
12.1 The Software or Support may employ applications and tools to collect
Personal Data, sensitive data or other information about Company and End
Users (including End Users’ name, address, e-mail address and payment
details), their computers, files stored on their computers, or their
computers’ interactions with other computers (including information
regarding network, licenses used, hardware type, model, hard disk size, CPU
type, disk type, RAM size, 32 or 64 bit architecture, operating system
types, versions, locale, BIOS version, BIOS model, total scanners deployed,
database size, system telemetry, device ID, IP address, location, content,
McAfee products installed, McAfee components, processes and services
information, frequency and details of update of McAfee components,
information about third party products installed, extracts of logs created
by McAfee, usage patterns of McAfee products and specific features, etc.)
(collectively,
Data).\footnote{\href{https://www.mcafee.com/enterprise/en-us/assets/legal/end-user-license-agreements-en-us.pdf}{\texttt{https://www.mcafee.com/enterprise/en-us/assets/legal/end-user-license-agreements-en-us.pdf}}}
\tcblower
SECTION B. CONDITIONS REGARDING DATA PROCESSING
Provision of information (if applicable) In order to enhance the protection
of information and improve the quality of the Software and services, You
agree to automatically provide Kaspersky Lab with the following information
of a statistical and administrative nature: information about installed
programs, license data, information on detected threats and infections,
checksums of processed objects, technical information about the Computer and
devices connected to it, information about online activity of the device as
well as You agree that such information can be provided to third-party
service providers. More information is available at help.kaspersky.com. In
order to identify new information security threats and their sources,
enhance the operational protection of Users of the Software, and improve the
quality of the product, You agree to automatically provide Kaspersky Lab
with information specified in the Terms of Use of Kaspersky Security
Network. Also, You can activate and deactivate the Kaspersky Security
Network service at any time in the Software settings window. You further
acknowledge and agree that any information gathered by Rightholder can be
used to track and publish reports on security risk trends at the
Rightholder’s sole and exclusive discretion. If you do not wish to provide
information to the Kaspersky Security Network service, You should not
activate the Kaspersky Security Network service. If service is already
activated, you should immediately de-activate the Kaspersky Security Network
service.
Kaspersky Lab protects the information received in accordance with
applicable governing law and Kaspersky Lab's rules. Data is transmitted over
a secure channel.\footnote{\href{https://products.s.kaspersky-labs.com/homeuser/kav2020/20.0.14.1085abc/english-INT-0.2007.0/3231373433327c44454c7c4e554c4c/eula_en.txt}{\texttt{https://products.s.kaspersky-labs.com/homeuser/kav2020/20.0.14.1085abc/english-INT-0.2007.0/3231373433327c44454c7c4e554c4c/eula_en.txt}}}
\end{tcolorbox}
As you can read from its own words: sensitive data is collected. You can check
not only literature but also the code to verify this.
\section{Antivirus telemetry}
\subsection{Analysis}
Our starting hypothesis is that any antimalware solution is an espionage tool
in potential, and that not only files are sent to the server but also
intelligence information that can be used to spy the users.
Let us do some reverse engineering on an antivirus product in order to know
what kind of information is sent to the company. Let us do this with
\texttt{Malwarebytes} because telemetry DLL is very easy to identify, this is
actually the main reason to chose this antivirus product for investigating the
issue.
\begin{figure}[t]
\centering
\includegraphics[width=0.85\textwidth]{./figures/ControllerImpl}
\caption{\label{fig:ControllerImpl} Reverse engineering in
\texttt{Malwarbytes}.}
\end{figure}
The file responsible of \texttt{Malwarebytes} antivirus telemetry is the
\begin{tcolorbox}
\texttt{TelemetryControllerImpl.dll}
\end{tcolorbox}
shown in Figure~\ref{fig:ControllerImpl}. If we take a look at the exported
functions, we can see what kind of information is collected
(Figure~\ref{fig:ExportedFunction}).
\begin{figure}
\centering
\includegraphics[width=0.85\textwidth]{./figures/ExportedFunction}
\caption{\label{fig:ExportedFunction} Exported functions.}
\end{figure}
This list of exported function names seems to be self-explanatory, and it
reveals without obfuscation what kind of information is sent. It can be
summarized as follows.
\begin{itemize}
\item Malware information (ransomware is treated in its own specific way).
\item Exploits information.
\item Client data.
\item License data.
\item Error information.
\item Quarantine information.
\item Statistics.
\end{itemize}
We can also contrast the summary developed above with every of the third endpoint path
names of the \texttt{Malwarebytes} telemetry exposed API, shown in Figure~\ref{fig:api}.
\begin{figure}[t]
\centering
\includegraphics[width=0.80\textwidth]{./figures/Malwarebytes}
\caption{\label{fig:api} \texttt{Malwarebytes} telemetry API.}
\end{figure}
The endpoint path is the following:
\href{https://telemetry.malwarebytes.com/api}{\texttt{https://telemetry.malwarebytes.com/api}}
and, as an off-topic observation, the development endpoint is also publicly
exposed:
\href{https://telemetry.dev.malwarebytes.com/api}{\texttt{https://telemetry.dev.malwarebytes.com/api}}.
We can also examine some function of those (\texttt{SendMwacReport}, for
instance, has an interesting name) in order to understand how are samples
treated in terms of confidentiality. This function sends a JSON file
\texttt{mwcstream.json} using Microsoft Winsocks library.
\begin{figure}[h]
\centering
\includegraphics[width=0.99\textwidth]{./figures/mwcstream}
\end{figure}
The function responsible of the ``send'' action is
\begin{tcolorbox}
\texttt{TelemetryControllerImpl::SendMwacReport}
\end{tcolorbox}
as shown in the following disassembly listing:
\begin{figure}[h]
\centering
\includegraphics[width=0.99\textwidth]{./figures/SendMwacReport}
\end{figure}
We can take another function from the list and we can see that all information
is sent in the same way. It uses also a JSON file (in this case it is named
\texttt{malwarestream.json}):
\begin{figure}[h]
\centering
\includegraphics[width=0.99\textwidth]{./figures/malwarestream}
\end{figure}
And finally the appropriate report sending function for this kind of JSON
structure, \texttt{TelementryControllerImpl::ReportMalwareStream} in this
case:
\begin{figure}[h]
\centering
\includegraphics[width=0.99\textwidth]{./figures/ReportMalwareStream}
\end{figure}
Our next step is debug \texttt{Malwarebytes} in order to see how this JSON
looks like. For instance, we can break in some point inside the function
\texttt{SendOneMwacRecordV2} after analyzing (on demand) a file infector, and
see the information.
\begin{figure}[h]
\centering
\includegraphics[width=0.99\textwidth]{./figures/Debug1}
\end{figure}
As you can see, apart of malware sample itself, other information is collected
separately, unencrypted and stored in a non-standard format. We remark that
the computer where these tests have been performed can be easily tracked by
checking the unique identifier \texttt{machine\_id}. Into the binary
\texttt{.rdata} section a series of WMI queries do exist for machine
identification purposes.
\begin{figure}
\centering
\includegraphics[width=0.85\textwidth]{./figures/Debug2}
\caption{\label{fig:malwarebytes-debug} Debugging \texttt{Malwarebytes}.}
\end{figure}
\begin{tcolorbox}
\small
\begin{verbatim}
SELECT Index, MACAddress, Name FROM Win32_NetworkAdapter
where AdapterTypeId=0
SELECT UUID FROM Win32_ComputerSystemProduct
SELECT processorID FROM win32_processor
SELECT SerialNumber FROM Win32_BIOS
SELECT Signature FROM Win32_DiskDrive WHERE Index=%u
SELECT serialNumber FROM Win32_PhysicalMemory
SELECT SerialNumber FROM Win32_DiskDrive WHERE Index=%u
\end{verbatim}
\end{tcolorbox}
Since Microsoft Winsock functions are used for network communications, we can
also break in \texttt{Send} and \texttt{SendTo} functions and show de buffer
content before telemetry data are sent
(Figure~\ref{fig:malwarebytes-debug}). It is as easy as stated because the
\texttt{Malwarebytes} self-defense is not enabled just after installing the
product, a really hilarious security bug which can be used to attack the
debugger without bypassing any kind of protection.
The file responsible of Malwarebytes antivirus cloud functionality is the
\texttt{CloudControllerImpl.dll}.
\begin{figure}[h]
\centering
\includegraphics[width=0.7\textwidth]{./figures/CloudcontrollerImpl}
\end{figure}
Let us follow the same analysis steps with this file. Now, export listing looks as
follows:
\begin{figure}[h]
\centering
\includegraphics[width=0.7\textwidth]{./figures/ExportListing}
\end{figure}
There are five functions at the very end of the screenshot named starting by
the prefix \texttt{Submit}. Those functions are responsible of submitting
files and memory chunks to the \texttt{Malwarebytes} cloud storage, but those
functions are only called when upgrading to the premium version of the
product\cite{MalwarebytesUserGuide}.
\begin{figure}[h]
\centering
\includegraphics[width=0.99\textwidth]{./figures/MalwareBytesFree}
\end{figure}
The reader should know what ``exploit'', ``ransomware'', and ``rootkit'' mean,
but there are two function names which maybe seem a little stranger:
\noindent\texttt{SubmitDopplegangDetection} and \texttt{SubmitShurikenDetection}
because they are \texttt{Malwarebytes} specific terms. The first one,
\texttt{SubmitDopplegangDetection}, as you can see in the following picture,
is used to send ``scam'' detections:
\begin{figure}[h]
\centering
\includegraphics[width=0.75\textwidth]{./figures/Scamdetection}
\end{figure}
But \texttt{SubmitShurikenDetection} is much more interesting for us, because
it is used to send heuristically detected samples, meaning, by definition,
that false positives will absolutely happens (goodware files are potentially
sent to the cloud storage).
\begin{figure}[h]
\centering
\includegraphics[width=0.75\textwidth]{./figures/SubmitShurikenDetection}
\end{figure}
All of this information is submited to the following endpoints:
\begin{tcolorbox}
\small
\href{https://bactem-staging.mwbsys.com/files}{\texttt{https://bactem-staging.mwbsys.com/files}} \\
\href{https://staging-blitz.mb-cosmos.com/}{\texttt{https://bactem-staging.mwbsys.com/files}} \\
\href{https://blitz.mb-cosmos.com/}{\texttt{https://blitz.mb-cosmos.com/}} \\
\href{https://static-blitz.mb-cosmos.com/}{\texttt{https://static-blitz.mb-cosmos.com/}}
\\
\href{https://blitz.mb-cosmos.com/}{\texttt{https://blitz.mb-cosmos.com/}} \\
\href{https://static-blitz.mb-cosmos.com/}{\texttt{https://static-blitz.mb-cosmos.com/}}
\end{tcolorbox}
If the reader is interested about where those files are stored, we can also
answer this question. These files are stored in the server referenced by the
following IP address: \texttt{3.229.68.76} and it corresponds to Amazon Data
Services NoVa:
%\begin{figure}[h]
% \centering
% \includegraphics[width=0.75\textwidth]{./figures/DNScheck}
%\end{figure}
\begin{figure}[h]
\centering
\includegraphics[width=0.75\textwidth]{./figures/Lookup}
\end{figure}
This is an important point because the code is developed using Amazon S3
API\cite{AmazonS3RestApi}.
The following is a little fragment of the \texttt{Shuriken} sending function:
\begin{figure}[h]
\centering
\includegraphics[width=0.99\textwidth]{./figures/Shuriken}
\end{figure}
As you can see, \texttt{x-amz-meta-payloadtype} is the ``payloadtype'' custom
metadata parameter prefixed as specified by Amazon S3 API, and the rest means
that a \texttt{Shuriken} sample submission is happening.
\begin{figure}[h]
\centering
\includegraphics[width=0.99\textwidth]{./figures/Shuriken2}
\end{figure}
Thus, we have identified the mechanism used to send samples and telemetry data
to the \texttt{Malwarebytes} cloud server. Another interesting thing is to
know how file and memory samples are chosen by this product in order to keep
them in the server. Following the natural analysis flow, some interesting
functions are located into \texttt{MBAMService.exe} which finally relies on
\texttt{CloudControllerImpl.dll} where all the hard job takes place.
\begin{figure}[h]
\centering
\includegraphics[width=0.75\textwidth]{./figures/MBAMService}
\end{figure}
There are a lot of callbacks into this binary image. Two of them make
reference to cloud submission and telemetry submission, specifically:
\begin{figure}[h]
\centering
\includegraphics[width=0.99\textwidth]{./figures/Callbacks}
\end{figure}
If reader is really interested about how \texttt{Malwarebytes} heuristically
chooses files to be sent to the cloud storage, remember that when using
heuristics, by definition, no categorical conclusions are possible so false
positives will happen. Therefore, confidential files in addition to potential
malware embedding confidential data could be sent to the server. It is
recommended to the restless reader to disassemble
\texttt{CloudControllerImpl.dll} by his/her own to perform a further analysis.
At this point, it is necessary to install \texttt{Malwarebytes} Premium in
order to investigate the file submission features. We obtained a trial\cite{MalwarebytesPremium} license
for a limited period. In this Premium Trial version, real-time features are
available. We noticed that this \texttt{Malwarebytes} version has more sample
submission routines, and identified the following sample submission exports in
the \texttt{CloudControllerImpl.dll} library file:
\begin{enumerate}
\item \texttt{SubmitDDSSample}
\item \texttt{SubmitDopplegangDetection}
\item \texttt{SubmitExploitData}
\item \texttt{SubmitMWACDetection}
\item \texttt{SubmitQuarantineRestoreItem}
\item \texttt{SubmitRansomwareDetection}
\item \texttt{SubmitRootkitDetection}
\item \texttt{SubmitShurikenDetection}
\end{enumerate}
The first thing we are interested in to investigate is how samples are
submitted. To this end, we executed a portable executable file infector and
broke into the \texttt{send} function of \texttt{WS2\_32.dll} to see how the
buffer content looks like.
\begin{figure}[h]
\centering
\includegraphics[width=0.99\textwidth]{./figures/Submission}
\end{figure}
As you can see in the JSON structure of the previous image, the sample file is stored into the following location:
\noindent{\small\verb|{C:\PROGRAMDATA\MALWAREBYTES\MBAMSERVICE\tmp\{hash_sha256}\{hash_sha256}.zip|}
\noindent This is a PKZIP file encrypted with the typical malware sample
password: ``infected''\cite{ZeltserShareMalware}. And it will be sent to the Amazon server
\begin{tcolorbox}
\texttt{btoc-samples-prod.s3.amazonaws.com}.
\end{tcolorbox}
The hypothesis that files are submitted is confirmed. The other thing we are
interested in to investigate is if \texttt{Malwarebytes} indiscriminately
submits all files or not. Since a ``hello world'' program should never lead
to a false positive, if it is submitted, it can be assumed that submissions
are done indiscriminately.
\begin{figure}[h]
\centering
\includegraphics[width=0.99\textwidth]{./figures/HelloWorld}
\end{figure}
The experiment result indicated that, when \texttt{helloworld.exe} was
executed, some information was sent to \texttt{Malwarebytes} by using
\texttt{WS2\_32.dll} library \texttt{send} function with the call flow coming
from somewhere inside \texttt{RTPControllerImpl.dll}. This library contains
the following strings: \pagebreak
\begin{figure}[h]
\centering
\includegraphics[width=0.85\textwidth]{./figures/Strings}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=0.6\textwidth]{./figures/ExperimentJSON}
\caption{\label{fig:ExperimentJSON} Sample of the JSON experiment.}
\end{figure}
A function of \texttt{RTPControllerImpl} uses it to generate the
JSON. Experiment JSON looks as in Figure~\ref{fig:ExperimentJSON}. So,
\texttt{Malwarebytes} Premium sends file samples only if it suspects a file
could be infected (a \texttt{sample\_upload\_reason} field do exist into the
JSON structure). If the file is not a suspicious one, \texttt{Malwarebytes}
Premium sends information about the file (like the file path and something
like this) but not the file content itself. Anyway, subjectively, in our
opinion, executable files leak a lot of information about the user behavior.
\subsection{Conclusions}
The reverse engineering of the \texttt{Malwarebytes} antivirus products
reveals that these are not especially intrusive. They are classical
antiviruses with cloud features which send suspicious files and telemetry to
the cloud server just for purposes of comparison.
Our analysis indicates that some files (but, with congratulations to
\texttt{Malwarebytes}, not an indiscriminate massive volume of goodware ones)
are sent to the company's cloud server powered by Amazon.
Sample submission is done by using a PKZIP file protected with the typical
malware sample password: ``infected''\cite{ZeltserShareMalware}. And metadata information (telemetry) is
sent separately using a JSON format. On the other hand, the most important
thing is that accessed goodware information is sent, the user is unambiguously
identified and some information is collected apart of the sample file itself.
If such collected information were accessed, for instance, by a third party
like an unethical employee or a government intelligence agency (which maybe
collaborates with the antivirus company and could take advantage of this fact)
they could track a specific user (or users, in general) and combine this
information with other databases (including another antivirus products)\cite{KasperskyBoundariesOfTrust}.
\texttt{Malwarebytes} could substantially improve its system by removing both,
the compressed samples system and the telemetry data. Instead, it could
progressively add sample and telemetry support to the UMSE dynamic linking
library and call to it before submitting samples.
\section{Cyber threat hunting telemetry and samples submission}
\subsection{Analysis}
Cyber threat hunting is defined as follows: ``the process of proactively and
iteratively searching through networks to detect and isolate advanced threats
that evade existing security solutions''.
In practice, cyber threat hunting means to capture as many events as possible,
correlate them and send reports of them all the time. All the magic can be
summarized in one sentence: everything can be detected if everything is
real-time reviewed.
\begin{figure}
\centering
\includegraphics[width=0.75\textwidth]{./figures/WindowsDefender}
\caption{\label{fig:WindowsDefender} A screenshot of WindowsDefender.}
\end{figure}
For instance, Windows Defender Advanced Threat Protection captures the
following information\cite{MicrosoftDefenderAtp2020}, as shown in Figure~\ref{fig:WindowsDefender}:\footnote{\href{https://docs.microsoft.com/en-us/windows/security/threat-protection/microsoft-defender-atp/advanced-hunting-schema-reference}{https://docs.microsoft.com/en-us/windows/security/threat-protection/microsoft-defender-atp/advanced-hunting-schema-reference}}
\begin{enumerate}
\item Alerts on Microsoft Defender Security Center.
\item Machine information, including OS information.
\item Network properties of machines, including adapters, IP and MAC
addresses, as well as connected networks and domains.
\item Process creation and related events.
\item Network connection and related
events.
\item File creation, modification, and other file system events.
\item Creation and modification of registry entries.
\item Sign-ins and other authentication events.
\item DLL loading events.
\item Multiple event types, including events triggered by security controls
such as Windows Defender Antivirus and exploit protection.
\end{enumerate}
The idea is the same for all products. Event correlation is an important
feature. Check Figure~\ref{fig:CarbonBlack}, as an example, taken out of from the
\texttt{Carbon Black}\cite{CarbonBlack2017} tool.
\begin{figure}
\centering
\includegraphics[width=0.99\textwidth]{./figures/CarbonBlack}
\caption{\label{fig:CarbonBlack} Source: \href{https://www.carbonblack.com/wp-content/uploads/2017/04/BanHash-B.png}{\texttt{https://www.carbonblack.com/wp-content/uploads/2017/04/BanHash-B.png}}}
\end{figure}
\subsection{Conclusion}
It is hard to imagine a more aggressive kind of security tools in terms of
user data confidentiality. More detections but unjustifiably much less
confidentiality. You can check pictures publicly available in Google of this
kind of tools, most of them will be carefully chosen by the manufacturer
(meaning that the aggressive behavior will be as hidden as possible) but if
one watches the dashboard containing event logs of those tools, one will soon
realize how powerful they are in terms of surveillance. You will see a steady
stream of events unrelated to malware. Threat hunting tools could also
substantially improve their system by adding events, files, processes,
registry keys and support for system elements into the UMSE dynamic linking
library, and by calling to its API before submitting collected data.
\section{Operating system telemetry and samples submission}
\subsection{Analysis}
Next, we want to explore what happens if there are no additional antivirus
software installed in the computer. Must the user be worried about security
products that maybe come built-in the operating system? And, if these are
disabled, must the user be worried about other security products installed in
the local area network (LAN) because of firewall telemetry and sample
submission capabilities?
The Microsoft Windows telemetry DLL file is located in
\begin{tcolorbox}
\verb|C:\Windows\System32\generaltel.dll|.
\end{tcolorbox}
\begin{figure}[h]
\centering
\includegraphics[width=0.6\textwidth]{./figures/WindowsTelemetry}
\end{figure}
And you will readily notice that miscellaneous antivirus, antispyware and
firewall information is sent to Microsoft, where \texttt{Firewall information}
means network information. It is possible to disable all the telemetry but
maybe your LAN neighbor does not do that.
\begin{figure}[h]
\centering
\includegraphics[width=0.99\textwidth]{./figures/MiscellaneousAntivirus}
\end{figure}
The cloud submission features of sample files also do exist and they are
customizable by the user. It is possible to check easily (without reverse
engineering nothing) what kind of information is sent to Microsoft because,
due to open criticism, they released a tool named \texttt{Diagnostic Data
Viewer}. You can use it immediately for those purposes (Figure~\ref{fig:libUmse}).
\begin{figure}[h]
\centering
\includegraphics[width=0.75\textwidth]{./figures/libUmse}
\caption{\label{fig:libUmse} libUmse.dll}
\end{figure}
In the lab computer used for this work, minimum telemetry is allowed which
means that antimalware telemetry is disabled but nevertheless crashes allow
Microsoft to follow the development of this thesis in real-time. Someone might
think that this assessment is unrelated to malware but information about
crashes is also used for this\cite{MicrosoftRdpCrashes2019} (see Figure~\ref{fig:rdp})
purpose.\footnote{\href{https://www.microsoft.com/security/blog/2019/11/07/the-new-cve-2019-0708-rdp-exploit-attacks-explained/}{https://www.microsoft.com/security/blog/2019/11/07/the-new-cve-2019-0708-rdp-exploit-attacks-explained/}}
\begin{figure}[h]
\centering
\includegraphics[width=0.8\textwidth]{./figures/RDP}
\caption{\label{fig:rdp}}
\end{figure}
\subsection{Conclusion}
Some operating systems contain built-in antimalware/firewall/threat hunting
solutions. It is difficult for the common user to disable telemetry
capabilities. A minimum telemetry is required by the operating system (and
seems to be used also for malware purposes). Antivirus, antispyware and
firewall information is sent meaning that also LAN network information can be
revealed. Operating system antimalware/firewall/threat hunting solutions
could also substantially improve its system by adding events, files,
processes, registry keys and system elements support into the UMSE dynamic
linking library and calling to it before submitting collected data.
\section{Intelligence products}
\subsection{Analysis}
Malware intelligence tools store goodware in, maybe, greater volume than
malware. \texttt{VirusTotal} is a service which allows you to check if a file
is malware or not by querying a lot of antivirus engines. If you search,
e.g.,''tutorial pdf'' in Google, (a reasonable random goodware file), the
first result, in this case, is a Python Tutorial PDF. Finally, if you check if
this file does exist in \texttt{VirusTotal}, you can see that it does (really,
a lot of files are in \texttt{VirusTotal}, this can be checked
straightforwardly). So, any person with a \texttt{VirusTotal} paid API account
can download this file, make advanced searches in binaries (including Yara
search) and easily locate files\cite{VirusTotalFileSearch2020}. It is true that people who submits files
accepts the EULA but, as you can see, if you have the paid \texttt{VirusTotal}
API you can download books, software, movies \dots. Those old determined
goodware files, some even signed, will be never removed from storage.
\subsection{Conclusion}
Malware intelligence tools store goodware in, maybe, greater volume than
malware\cite{IntrusionDetectionNetworks}. Those are not malware samples, those are simple, ordinary files.
Intelligence products could also substantially improve its system by adding
encryption support for uncommon and copyrighted likely goodware files (because
those files are not interesting for malware analysis in any way, not even to
be compared with infected files) into the UMSE dynamic linking library. In
this way, only files detected by at least one antimalware engine or a reliable
human should be decrypted and opened to the world.
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "thesis"
%%% End:
|
{"hexsha": "4eb720e60b9494d0a6dc7ed00ec45742a27155a5", "size": 32092, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Master Thesis/malware.tex", "max_stars_repo_name": "dalvarezperez/umse", "max_stars_repo_head_hexsha": "253b103b0955e20ca1437a2b28d93462f97e4810", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2020-02-24T06:30:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-30T17:26:23.000Z", "max_issues_repo_path": "Master Thesis/malware.tex", "max_issues_repo_name": "dalvarezperez/umse", "max_issues_repo_head_hexsha": "253b103b0955e20ca1437a2b28d93462f97e4810", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Master Thesis/malware.tex", "max_forks_repo_name": "dalvarezperez/umse", "max_forks_repo_head_hexsha": "253b103b0955e20ca1437a2b28d93462f97e4810", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-11T19:43:59.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-11T19:43:59.000Z", "avg_line_length": 49.6780185759, "max_line_length": 367, "alphanum_fraction": 0.7979558769, "num_tokens": 7865}
|
import logging
import numpy as np
from monai.inferers import SlidingWindowInferer
from monai.transforms import (
Activationsd,
AsDiscreted,
CenterSpatialCropd,
CropForegroundd,
EnsureChannelFirstd,
LoadImaged,
NormalizeIntensityd,
Orientationd,
RandAffined,
RandFlipd,
RandHistogramShiftd,
Spacingd,
ToTensord,
)
from monailabel.utils.train.basic_train import BasicTrainTask
logger = logging.getLogger(__name__)
class MyTrain(BasicTrainTask):
def train_pre_transforms(self):
return [
LoadImaged(keys=("image", "label")),
EnsureChannelFirstd(keys=("image", "label")),
Spacingd(
keys=("image", "label"),
pixdim=(1.0, 1.0, 1.0),
mode=("bilinear", "nearest"),
),
Orientationd(keys=["image", "label"], axcodes="RAS"),
NormalizeIntensityd(keys="image"),
RandHistogramShiftd(keys="image", num_control_points=8, prob=0.8),
CropForegroundd(keys=["image", "label"], source_key="image"),
RandFlipd(keys=["image", "label"], prob=0.5, spatial_axis=0),
RandAffined(
keys=["image", "label"],
mode=("bilinear", "nearest"),
prob=1.0,
spatial_size=(128, 128, 128),
rotate_range=(0, 0, np.pi / 15),
scale_range=(0.1, 0.1, 0.1),
),
ToTensord(keys=("image", "label")),
]
def train_post_transforms(self):
return [
ToTensord(keys=("pred", "label")),
Activationsd(keys="pred", softmax=True),
AsDiscreted(
keys=("pred", "label"),
argmax=(True, False),
to_onehot=True,
n_classes=4,
),
]
def val_pre_transforms(self):
return [
LoadImaged(keys=("image", "label")),
EnsureChannelFirstd(keys=("image", "label")),
Spacingd(
keys=("image", "label"),
pixdim=(1.0, 1.0, 1.0),
mode=("bilinear", "nearest"),
),
Orientationd(keys=["image", "label"], axcodes="RAS"),
NormalizeIntensityd(keys="image"),
CenterSpatialCropd(keys=["image", "label"], roi_size=[128, 128, 128]),
ToTensord(keys=("image", "label")),
]
def val_inferer(self):
return SlidingWindowInferer(roi_size=(128, 128, 128))
|
{"hexsha": "adff4273f89578b51e77b51612d08004d00af30e", "size": 2526, "ext": "py", "lang": "Python", "max_stars_repo_path": "segmentation_heart_ventricles/lib/train.py", "max_stars_repo_name": "pritesh-mehta/MONAILabel-Apps", "max_stars_repo_head_hexsha": "b7f89f8a4cfbdbd788616e9fb95cd7427a9d729b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "segmentation_heart_ventricles/lib/train.py", "max_issues_repo_name": "pritesh-mehta/MONAILabel-Apps", "max_issues_repo_head_hexsha": "b7f89f8a4cfbdbd788616e9fb95cd7427a9d729b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "segmentation_heart_ventricles/lib/train.py", "max_forks_repo_name": "pritesh-mehta/MONAILabel-Apps", "max_forks_repo_head_hexsha": "b7f89f8a4cfbdbd788616e9fb95cd7427a9d729b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1851851852, "max_line_length": 82, "alphanum_fraction": 0.5316706255, "include": true, "reason": "import numpy", "num_tokens": 614}
|
//==================================================================================================
/*!
@file
@copyright 2016 NumScale SAS
@copyright 2016 J.T. Lapreste
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
//==================================================================================================
#ifndef BOOST_SIMD_FUNCTION_IROUND2EVEN_HPP_INCLUDED
#define BOOST_SIMD_FUNCTION_IROUND2EVEN_HPP_INCLUDED
#if defined(DOXYGEN_ONLY)
namespace boost { namespace simd
{
/*!
@ingroup group-arithmetic
Function object implementing iround2even capabilities
Computes the integer conversion of the round2even of its parameter.
@par semantic:
For any given value @c x of type @c T:
@code
as_integer_t<T> r = iround2even(x);
@endcode
is similar to:
@code
as_integer_t<T> r = toints(round2even(x));
@endcode
@par Note:
Speed can be gained using iround2even(x, fast_) that uses @ref
toint in place of @ref toints, but be aware that large values can be not correctly converted
and that invalid entries lead to undefined results
**/
const boost::dispatch::functor<tag::iround2even_> iround2even = {};
} }
#endif
#include <boost/simd/function/scalar/iround2even.hpp>
#include <boost/simd/function/simd/iround2even.hpp>
#endif
|
{"hexsha": "8de1d3716eae487ad862eada00fbb22bb10da515", "size": 1417, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/boost/simd/function/iround2even.hpp", "max_stars_repo_name": "yaeldarmon/boost.simd", "max_stars_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/boost/simd/function/iround2even.hpp", "max_issues_repo_name": "yaeldarmon/boost.simd", "max_issues_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/boost/simd/function/iround2even.hpp", "max_forks_repo_name": "yaeldarmon/boost.simd", "max_forks_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.7358490566, "max_line_length": 100, "alphanum_fraction": 0.623853211, "num_tokens": 329}
|
# -*- coding: utf-8 -*-
"""ai__Final.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1bFFlIU-MF56Bt6dX1edpg14OptUTmTSi
# SUBMITTED BY :
SUNIDHI SINGLA 101983052
In this notebook, I have made an attempt to get a simple text classification model up and running. In this, amazon fine food review data from Kaggle (Link for the dataset-https://www.kaggle.com/simonerossi/sentiment-analysis-on-fine-food-reviews)
# SENTIMENT ANALYSIS
### Importing all the Libraries
"""
import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn
import nltk
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
nltk.download('wordnet')
nltk.download('stopwords')
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from sklearn.model_selection import train_test_split
from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from nltk.tokenize import TweetTokenizer
import string
"""#### Reading Dataset"""
df=pd.read_csv('Reviews.csv')
print(df.info())
df['sentiment'] = np.where(df.Score>3, 1,np.where(df.Score<3, -1,0))
df.rename(columns={'Text': 'review'}, inplace=True)
df=df[["review","sentiment"]]
print(df.sentiment.value_counts())
df
"""# Handling Missing Values"""
#Finding if there is any row with NULL value
print("Total Samples which do not have reviews: ",len(df[df["review"]==None]))
print("Total Samples which do not have sentiments: ",len(df[df["sentiment"]==None]))
#If there is any missing value then drop that row
df.dropna(inplace=True)
df
"""# Undersampling"""
shuffled_df = df.sample(frac=1,random_state=4)
df1 = shuffled_df.loc[shuffled_df['sentiment'] == 0]
df1_temp = shuffled_df.loc[shuffled_df['sentiment'] == 1].sample(n=42640,random_state=42)
df2_temp = shuffled_df.loc[shuffled_df['sentiment'] == -1].sample(n=42640,random_state=42)
normalized_df = pd.concat([df1_temp,df1,df2_temp])
plt.figure(figsize=(8, 8))
sns.countplot('sentiment', data=normalized_df)
plt.title('Count')
plt.show()
df=normalized_df
print(df.sentiment.value_counts())
#Mapping positive to 1 and negative to -1 and neutral to 0
df.loc[df['sentiment'] == 1, 'sentiment'] = "positive"
df.loc[df['sentiment'] == -1, 'sentiment'] = "negative"
df.loc[df['sentiment'] == 0, 'sentiment'] = "neutral"
df
"""### Data Cleaning """
#Data Cleaning
def process_string(text):
text = re.sub(r"https:\/\/.*[\r\n]*","",text) #remove any urls from the text
text = re.sub(r"www\.\w*\.\w\w\w","",text) #remove any urls starting from www. in the text
text = re.sub(r"<[\w]*[\s]*/>","",text) #remove any html elements from the text
text = re.sub(r"[\.]*","",text) #remove prediods marks
text= re.sub('[0-9\n]',' ',text) # Remove numbers
text = re.sub(r"[,.;@#?!&$_]+\ *", " ", text) #Remove special character
tokenizer = TweetTokenizer(preserve_case=False,strip_handles=True,reduce_len=True) #initilze tweet tokenizer
text_tokens = tokenizer.tokenize(text) #tokenize text
porter_stemmer = PorterStemmer() #intizlize porter stemmer
english_stopwords = stopwords.words("english") #get english stopwords
english_stopwords.remove('not')
temp = ["br","href", 'http', 'just', 'amazon', 'product','time','year', 'tried','i\'ve']
english_stopwords.extend(temp)
cleaned_text_tokens = [] # a list to hold cleaned text tokens
for word in text_tokens:
if((word not in english_stopwords) and #remove stopwords
(word not in string.punctuation)): #remove punctuation marks
stemmed_word = porter_stemmer.stem(word) #get stem of the current word
cleaned_text_tokens.append(stemmed_word) #appened stemmed word to list of cleaned list
#combine list into single string
clean_text = " ".join(cleaned_text_tokens)
return clean_text
df["review"] = df["review"].apply(process_string)
df
"""# Word Cloud after Data Cleaning"""
#After Cleaning and Preprocessing, lets make a Word Cloud
wordcloud = WordCloud( background_color="white",width=2000,height=2000, max_words=2000).generate(str(df))
plt.figure(1,figsize=(10, 10))
plt.imshow(wordcloud)
plt.axis("off")
plt.show()
"""### Tf-Idf"""
vectorizer = TfidfVectorizer() #term frequency–inverse document frequency
X = vectorizer.fit_transform(df.review)
print(vectorizer.get_feature_names())
print(X)
"""# Training and testing data"""
#Splitting to training and testing
tfidf_train,tfidf_test,sentiment_values_train,sentiment_values_test=train_test_split(X,df.sentiment,test_size=0.20,random_state=0)
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
from sklearn.model_selection import StratifiedKFold
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import LinearSVC
from sklearn.metrics import f1_score
kfold = StratifiedKFold(n_splits=12)
max_f1score=0
model_name=''
"""# Logistic Regression"""
lr = LogisticRegression(random_state=0, C=0.82286, max_iter=2000)
cv = cross_val_score(lr,tfidf_train,sentiment_values_train,cv=kfold)
print("Cross Validation Scores :",cv)
print("Mean of Cross Validation score :",cv.mean()*100)
lr.fit(tfidf_train,sentiment_values_train)
sentiment_values_pred_lr=lr.predict(tfidf_test)
report=classification_report(sentiment_values_test, sentiment_values_pred_lr,target_names=['negative','positive','neutral'])
print("Classification Report: \n",report)
f1=metrics.f1_score(sentiment_values_pred_lr,sentiment_values_test,average='weighted')*100
cm=confusion_matrix(sentiment_values_test, sentiment_values_pred_lr)
print("Confusion Matrix :\n",cm)
print('The accuracy of the Logistic Regression:',metrics.accuracy_score(sentiment_values_pred_lr,sentiment_values_test)*100)
print('The f1_score of the Logistic Regression:',f1)
plot_confusion_matrix(lr,tfidf_test , sentiment_values_test)
plt.show()
if(f1>max_f1score):
max_f1score=f1
model_name=LogisticRegression
"""# SVM"""
linear_svc = LinearSVC(C=0.5, random_state=42)
linear_svc.fit(tfidf_train, sentiment_values_train)
predict = linear_svc.predict(tfidf_test)
cv = cross_val_score(linear_svc,tfidf_train,sentiment_values_train,cv=kfold) #It takes the features df and target y , splits into k-folds (which is the cv parameter), fits on the (k-1) folds and evaluates on the last fold. It does this k times, which is why you get k values in your output array.Helps to determine hyperparameters for the model which will reult in lowest test errors.
print("Cross Validation Scores :",cv)
print("Mean of Cross Validation score :",cv.mean()*100)
report=classification_report(sentiment_values_test, predict,target_names=['negative','positive','neutral'])
print("Classification Report: \n",report)
matrix=confusion_matrix(sentiment_values_test, predict)
print("Confusion Matrix: \n",matrix)
accuracy=accuracy_score(sentiment_values_test, predict)*100
print("Accuracy: \n", accuracy)
f1=f1_score(sentiment_values_test, predict,average='weighted')*100
print("F1 score: \n",f1)
if(f1>max_f1score):
max_f1score=f1
model_name=linear_svc
plot_confusion_matrix(linear_svc,tfidf_test , sentiment_values_test)
plt.show()
"""## Bernoulli Naive Bayes"""
model_nb = BernoulliNB()
model_nb.fit(tfidf_train, sentiment_values_train)
predict=model_nb.predict(tfidf_test)
cv = cross_val_score(model_nb,tfidf_train,sentiment_values_train,cv=kfold) #It takes the features df and target y , splits into k-folds (which is the cv parameter), fits on the (k-1) folds and evaluates on the last fold. It does this k times, which is why you get k values in your output array.Helps to determine hyperparameters for the model which will reult in lowest test errors.
print("Cross Validation Scores :",cv)
print("Mean of Cross Validation score :",cv.mean()*100)
report=classification_report(sentiment_values_test, predict,target_names=['negative','positive','neutral'])
print("Classification Report: \n",report)
matrix=confusion_matrix(sentiment_values_test, predict)
print("Confusion Matrix: \n",matrix)
accuracy=accuracy_score(sentiment_values_test, predict)*100
print("Accuracy: \n", accuracy)
f1score=f1_score(sentiment_values_test, predict,average='weighted')*100
print ("F1 score: \n", f1score)
plot_confusion_matrix(model_nb,tfidf_test , sentiment_values_test)
plt.show()
if(f1score>max_f1score):
max_f1score=f1score
model_name=BernoulliNB
"""# MultinomialNB"""
mnb = MultinomialNB(alpha=2)
cv = cross_val_score(mnb,tfidf_train,sentiment_values_train,cv=kfold)
print("Cross Validation Scores :",cv)
print("Mean of Cross Validation score :",cv.mean()*100)
mnb.fit(tfidf_train,sentiment_values_train)
y_pred_mnb=mnb.predict(tfidf_test)
cm=confusion_matrix(sentiment_values_test, y_pred_mnb)
f1score=metrics.f1_score(y_pred_mnb,sentiment_values_test,average='weighted')*100
print("Confusion Matrix :\n",cm)
print('The accuracy of the Naive Bayes :', metrics.accuracy_score(y_pred_mnb,sentiment_values_test)*100)
print('The f1 score of the Naive Bayes :', f1score)
print("\n",metrics.classification_report(y_pred_mnb,sentiment_values_test))
plot_confusion_matrix(mnb,tfidf_test , sentiment_values_test)
plt.show()
if(f1score>max_f1score):
max_f1score=f1score
model_name=MultinomialNB
"""## Decision Tree Classifier"""
maximum_tree_depth= 15
model_dt = DecisionTreeClassifier(max_depth=maximum_tree_depth)
model_dt.fit(tfidf_train,sentiment_values_train)
predict = model_dt.predict(tfidf_test)
report=classification_report(sentiment_values_test, predict,target_names=['negative','positive','neutral'])
print("Classification Report: \n",report)
matrix=confusion_matrix(sentiment_values_test, predict)
print("Confusion Matrix: \n",matrix)
accuracy=accuracy_score(sentiment_values_test, predict)*100
print("Accuracy: \n", accuracy)
f1=f1_score(sentiment_values_test,predict,average='micro')*100
print ("F1 score: \n", f1score)
plot_confusion_matrix(model_dt,tfidf_test , sentiment_values_test)
plt.show()
if(f1>max_f1score):
max_f1score=f1
model_name=DecisionTreeClassifier
"""#### Finding Best among above all Discussed Models """
print('Max F1_score is of following model')
print('F1-Score=',max_f1score,'\nModel name : ',model_name)
#MODEL TESTING
newreview = "Product arrived labeled as Jumbo Salted Peanuts...the peanuts were actually small sized unsalted. Not sure if this was an error or if the vendor intended to represent the product as Jumbo."
def prediction(text):
text = re.sub(r"https:\/\/.*[\r\n]*","",text)
text = re.sub(r"www\.\w*\.\w\w\w","",text)
text = re.sub(r"<[\w]*[\s]*/>","",text)
text = re.sub(r"[\.]*","",text)
text= re.sub('[0-9\n]',' ',text)
text = re.sub(r"[,.;@#?!&$_]+\ *", " ", text)
tokenizer = TweetTokenizer(preserve_case=False,strip_handles=True,reduce_len=True)
text_tokens = tokenizer.tokenize(text)
porter_stemmer = PorterStemmer()
english_stopwords = stopwords.words("english")
english_stopwords.remove('not')
temp = ["br","href", 'http', 'just', 'amazon', 'product','time','year', 'tried','I\'ve']
english_stopwords.extend(temp)
cleaned_text_tokens = []
for word in text_tokens:
if((word not in english_stopwords) and
(word not in string.punctuation)):
stemmed_word = porter_stemmer.stem(word)
cleaned_text_tokens.append(stemmed_word)
clean_text = " ".join(cleaned_text_tokens)
clean_text =[clean_text]
clean_text = vectorizer.transform(clean_text).toarray()
if linear_svc.predict(clean_text)[0] == 1:
return "positive"
else:
return "negative"
feedback = prediction(newreview)
print("This review is: ", feedback)
|
{"hexsha": "48d6fdf238a92123c2c4141c55c435a7baae41b8", "size": 13235, "ext": "py", "lang": "Python", "max_stars_repo_path": "ai__final.py", "max_stars_repo_name": "SunidhiSingla/Sentiment-analysis", "max_stars_repo_head_hexsha": "40f1d2f921c6ba135bdf9c7f402456a5aa572763", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ai__final.py", "max_issues_repo_name": "SunidhiSingla/Sentiment-analysis", "max_issues_repo_head_hexsha": "40f1d2f921c6ba135bdf9c7f402456a5aa572763", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ai__final.py", "max_forks_repo_name": "SunidhiSingla/Sentiment-analysis", "max_forks_repo_head_hexsha": "40f1d2f921c6ba135bdf9c7f402456a5aa572763", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.1107491857, "max_line_length": 432, "alphanum_fraction": 0.6985266339, "include": true, "reason": "import numpy", "num_tokens": 3194}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# test_peakfinder.py
"""
Test suite for `PeakFinder` class
Copyright (c) 2016, David Hoffman
"""
from nose.tools import *
from peaks.peakfinder import PeakFinder
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
import unittest
class TestPeakFinder(unittest.TestCase):
"""A test case for the PeakFinder class"""
def setUp(self):
"""Set up our variables"""
shape = (256, 512)
data = np.zeros(shape)
points = self.points = (np.random.rand(10, 2) * shape).astype(int)
data[points.T[0], points.T[1]] = 1
assert data.sum() == 10, "Something wrong with data generation," " points = {}".format(
points
)
self.data = data
def test_self_consistency(self):
pf = PeakFinder(self.data)
pf.thresh = 0.1
pf.find_blobs()
found_points = np.sort(pf.blobs[:, :2], 0)
assert_array_equal(found_points, np.sort(self.points, 0))
|
{"hexsha": "9ba8321ec662158c4f88c3634e552aca08ddb790", "size": 1018, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_peakfinder.py", "max_stars_repo_name": "david-hoffman/peaks", "max_stars_repo_head_hexsha": "b31a13fcb93005ed01e5295389f91491bafc71cd", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-10-15T00:04:47.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-29T15:13:34.000Z", "max_issues_repo_path": "tests/test_peakfinder.py", "max_issues_repo_name": "david-hoffman/peaks", "max_issues_repo_head_hexsha": "b31a13fcb93005ed01e5295389f91491bafc71cd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-02-24T05:21:29.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-24T05:21:49.000Z", "max_forks_repo_path": "tests/test_peakfinder.py", "max_forks_repo_name": "david-hoffman/peaks", "max_forks_repo_head_hexsha": "b31a13fcb93005ed01e5295389f91491bafc71cd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-07-01T14:15:16.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-15T00:04:53.000Z", "avg_line_length": 27.5135135135, "max_line_length": 95, "alphanum_fraction": 0.6296660118, "include": true, "reason": "import numpy,from numpy", "num_tokens": 257}
|
import utils
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
np.random.seed(123)
inputData = utils.GetInputData((100, 75))
# Plot 4 graphics with different data statistics from csv input
plt.figure(figsize=(20, 14))
plt.subplot(2, 2, 1)
fig = sns.countplot(
y=inputData['cellType'], order=inputData['cellType'].value_counts().index, palette='viridis')
plt.xticks(fig.get_xticks())
plt.title('Most Frequent Type of Cells')
plt.subplot(2, 2, 2)
sns.kdeplot(inputData['age'], shade=True, color='blue')
plt.title('Age Distribution')
plt.xticks(list(range(0, 100, 10)))
plt.subplot(2, 2, 3)
fig = sns.countplot(x=inputData['localization'], order=inputData['localization'].value_counts(
).index, palette='inferno')
plt.xticks(fig.get_xticks(), rotation=90)
plt.title('Most Frequent Localizations')
plt.subplot(2, 2, 4)
fig = sns.countplot(
x=inputData['sex'], order=inputData['sex'].value_counts().index, palette='summer')
plt.xticks(fig.get_xticks(), rotation=90)
plt.title('Sex')
plt.show()
# dispaly sample of 5 images from each classification class
n_samples = 5
fig, m_axs = plt.subplots(7, n_samples, figsize=(4*n_samples, 3*7))
for n_axs, (type_name, type_rows) in zip(m_axs,
inputData.sort_values(['cellType']).groupby('cellType')):
n_axs[0].set_title(type_name)
for c_ax, (_, c_row) in zip(n_axs, type_rows.sample(n_samples, random_state=1234).iterrows()):
c_ax.imshow(c_row['image'])
c_ax.axis('off')
plt.show()
|
{"hexsha": "2b734a92011ce4106b023ac3d5d748638fcdf3dc", "size": 1523, "ext": "py", "lang": "Python", "max_stars_repo_path": "ML/dataVis.py", "max_stars_repo_name": "AlexandruStahie/SkinLesSuggest", "max_stars_repo_head_hexsha": "ecf4a25a56ce620aaef9a88052559b9e97349ddd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ML/dataVis.py", "max_issues_repo_name": "AlexandruStahie/SkinLesSuggest", "max_issues_repo_head_hexsha": "ecf4a25a56ce620aaef9a88052559b9e97349ddd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ML/dataVis.py", "max_forks_repo_name": "AlexandruStahie/SkinLesSuggest", "max_forks_repo_head_hexsha": "ecf4a25a56ce620aaef9a88052559b9e97349ddd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7291666667, "max_line_length": 98, "alphanum_fraction": 0.7025607354, "include": true, "reason": "import numpy", "num_tokens": 421}
|
import datetime
import time
import cv2 as cv
import numpy as np
from munkres import Munkres
from scipy.special import comb
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, adjusted_mutual_info_score, \
mutual_info_score, normalized_mutual_info_score
from sklearn.metrics.cluster import contingency_matrix, adjusted_rand_score, homogeneity_completeness_v_measure, \
fowlkes_mallows_score
def _comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=True)
vComb = np.vectorize(_comb2)
class bcolor:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def print_timestamped(string):
st = datetime.datetime.fromtimestamp(time.time()).strftime('%d-%m-%Y_%H:%M:%S')
print(st + ": " + string)
def get_timestamp():
return datetime.datetime.fromtimestamp(time.time()).strftime('%d-%m-%Y_%H.%M.%S')
def info(string):
print(f"{bcolor.OKBLUE}" + string + f"{bcolor.ENDC}")
def warning(string):
print(f"{bcolor.WARNING}" + string + f"{bcolor.ENDC}")
def error(string):
print(f"{bcolor.FAIL}" + string + f"{bcolor.ENDC}")
exit(-1)
def return_position_indices(mri_shape):
x = mri_shape[0]
y = mri_shape[1]
if len(mri_shape) == 2:
# Retrieve all the indices of a (240, 240) image
return np.array([[i, j] for i in range(x - 1, -1, -1) for j in range(y)])
else:
z = mri_shape[2]
return np.array([[i, j, k] for i in range(x) for j in range(y) for k in range(z)])
# Handle the data
def transform(mri_scans, slice_dimension=0, twod=True, chosen_slice=None):
# Transform the 2d matrices in column vectors
dim_0 = slice_dimension
dim_1 = (slice_dimension + 1) % 3
dim_2 = (slice_dimension + 2) % 3
mris_shape = list(mri_scans.values())[0].shape
if twod:
mris_shape = (mris_shape[dim_0], mris_shape[dim_1])
transformed_matrices = {}
if twod:
if chosen_slice is None:
error("Please choose a slice to create a 2D matrix.")
for k, matrix in mri_scans.items():
if matrix.shape[:2] != mris_shape:
error("The MRI " + k + " does not have the same shape as the other MRIs. " +
"Please convert it such that the all have the same shape.")
transformed_matrices[k] = matrix.reshape((matrix.shape[dim_0] * matrix.shape[dim_1]), matrix.shape[dim_2])[
:, chosen_slice]
else:
for k, matrix in mri_scans.items():
if matrix.shape != mris_shape:
error("The MRI " + k + " does not have the same shape as the other MRIs. " +
"Please convert it such that the all have the same shape.")
transformed_matrices[k] = matrix.reshape((matrix.shape[0] * matrix.shape[1] * matrix.shape[2]))
return transformed_matrices, mris_shape
# Handle the data
def transform_single(mri, slice_dimension=0, twod=False, chosen_slice=None):
# Transform the 2d matrices in column vectors
dim_0 = slice_dimension
dim_1 = (slice_dimension + 1) % 3
dim_2 = (slice_dimension + 2) % 3
if twod:
mris_shape = (mri.shape[dim_0], mri.shape[dim_1])
else:
mris_shape = mri.shape
if twod:
if chosen_slice is None:
error("Please choose a slice to create a 2D matrix.")
reshaped_mri = mri.reshape((mri.shape[dim_0] * mri.shape[dim_1]), mri.shape[dim_2])[:, chosen_slice]
else:
reshaped_mri = mri.reshape((mri.shape[0] * mri.shape[1] * mri.shape[2]))
return reshaped_mri, mris_shape
def slice_up(mri, original_shape, chosen_slice=76):
temp = mri.reshape(original_shape)
if len(original_shape) == 2:
return temp
else:
return temp[:, :, chosen_slice]
def build_stacked_matrix(transformed_mris):
stacked_argument = []
def_size = list(transformed_mris.values())[0].shape
for k, matrix in transformed_mris.items():
if matrix.shape != def_size:
error("All transformed matrices must have the same size.")
stacked_argument.append(matrix)
stacked_matrix = np.stack(stacked_argument, axis=-1)
return stacked_matrix
def build_position_matrix(mri_scans):
new_mris = {}
for k, mri in mri_scans.items():
new_mris[k] = np.array([[mri[i, j, k], (i / mri.shape[0]), (j / mri.shape[1]), (k / mri.shape[2])]
for i in range(mri.shape[0])
for j in range(mri.shape[1])
for k in range(mri.shape[2])], dtype=np.float_)
return new_mris
def remove_background(transformed_mri, nonzero_indices=None):
if nonzero_indices is None:
nonzero_indices = np.nonzero(transformed_mri)[0]
# We just get all possible values in a column vector
coloured = transformed_mri[nonzero_indices]
return coloured, nonzero_indices
def add_background(labels, desired_shape, nonzero):
labels_with_background = np.zeros(desired_shape)
labels_with_background[nonzero] = labels
return labels_with_background
def adjust_labels_with_background(labels_with_background, i):
# In this method the labels are translated from sequencing i to the others such that
# there is colour consistency.
m = Munkres()
for k in labels_with_background:
if k == i:
continue
contmat = contingency_matrix(labels_with_background[i], labels_with_background[k])
minimization_matrix = contmat.max() - contmat
munkres_tuples = m.compute(minimization_matrix)
mapping = {}
for t in munkres_tuples:
# Create the mappings
mapping[t[1]] = np.float_(t[0])
# print(mapping)
nonzeros = np.nonzero(labels_with_background[k])[0]
for index in nonzeros:
labels_with_background[k][index] = mapping[labels_with_background[k][index]]
return labels_with_background
def map_colors(to_map, segmentation):
m = Munkres()
contmat = contingency_matrix(segmentation, to_map)
print(contmat)
nonzeros = np.nonzero(to_map)[0]
minimization_matrix = contmat.max() - contmat
munkres_tuples = m.compute(minimization_matrix)
mapping = {}
for t in munkres_tuples:
# Create the mappings
mapping[t[1]] = np.float_(t[0])
for index in nonzeros:
to_map[index] = mapping[to_map[index]]
uniq = np.unique(to_map)
mapping = {}
for i in range(len(uniq)):
mapping[uniq[i]] = np.float_(i)
for index in nonzeros:
to_map[index] = mapping[to_map[index]]
return to_map
def adjust_labels_with_background_segmented(labels_with_background, segmentation):
m = Munkres()
for k in labels_with_background:
contmat = contingency_matrix(segmentation, labels_with_background[k])
nonzeros = np.nonzero(labels_with_background[k])[0]
minimization_matrix = contmat.max() - contmat
munkres_tuples = m.compute(minimization_matrix)
mapping = {}
for t in munkres_tuples:
# Create the mappings
mapping[t[1]] = np.float_(t[0])
for index in nonzeros:
labels_with_background[k][index] = mapping[labels_with_background[k][index]]
return labels_with_background
def intersection_foreground(arr1, arr2):
return np.logical_and(arr1 > arr1.min(), arr2 > arr2.min())
def union_foreground(arr1, arr2):
return np.logical_or(arr1 > arr1.min(), arr2 > arr2.min())
def remove_outliers(arr, scan_type):
# Standardize
arr = normalize_with_opt(arr, 1)
# Don't consider the background
partial = arr[arr > arr.min()]
# Compute the percentiles
q25, q75 = np.percentile(partial, 25), np.percentile(partial, 75)
iqr = q75 - q25
# Outlier cutoff
cut_off = iqr * 1.5
lower, upper = q25 - cut_off, q75 + cut_off
# print(lower, upper)
# Remove outliers above
# TODO: Is this correct? Definitely for t1 -> t2
if "t1" in scan_type:
arr[arr > upper] = arr.min()
else:
arr[arr < lower] = arr.min()
# Normalize 0, 1
arr = normalize_with_opt(arr, 0)
return arr
def normalize_with_opt(arr, opt):
# print(opt, "[", arr.min(), arr.max(), "]", end=" - ")
if opt == 0:
return (arr - arr.min()) / (arr.max() - arr.min())
elif opt == 1:
return (arr - np.mean(arr[arr > arr.min()])) / np.std(arr[arr > arr.min()])
# print("[", arr.min(), arr.max(), "]")
return arr
def get_stats(truth, pred):
contingency = contingency_matrix(truth, pred, sparse=True)
# cm = contingency_matrix(truth, pred)
# print(cm)
# print(cm.sum(axis=0))
# print(cm.sum(axis=1))
tp_plus_fp = sum(_comb2(n_k) for n_k in np.ravel(contingency.sum(axis=0)))
tp_plus_fn = sum(_comb2(n_c) for n_c in np.ravel(contingency.sum(axis=1)))
tp = sum(_comb2(n_ij) for n_ij in contingency.data)
fp = tp_plus_fp - tp
fn = tp_plus_fn - tp
tn = _comb2(truth.shape[0]) - tp - fp - fn
return [tp, fp, fn, tn]
def crop_center(img, target_shape):
if len(img.shape) == 3:
x, y, z = img.shape
cropx, cropy, cropz = target_shape
startx = x // 2 - cropx // 2
starty = y // 2 - cropy // 2
startz = z // 2 - cropz // 2
return img[starty:starty + cropy, startx:startx + cropx, startz:startz + cropz]
elif len(img.shape) == 2:
x, y = img.shape
cropx, cropy = target_shape
startx = x // 2 - cropx // 2
starty = y // 2 - cropy // 2
return img[starty:starty + cropy, startx:startx + cropx]
else:
pass # Not supported
return img
def precision_recall(truth, pred):
res = {}
if len(truth) != len(pred):
print("Something's wrong with the indices, they are not equal.")
exit(-1)
print_timestamped("Computing scores.")
tp, fp, fn, tn = get_stats(truth, pred)
# print(tp, fp, fn, tn)
rand_index = float(tp + tn) / (tp + fp + fn + tn)
res["RI"] = rand_index
res["ARI"] = adjusted_rand_score(truth, pred)
res["accuracy"] = accuracy_score(truth, pred)
precision = float(tp) / (tp + fp)
res["precision_cm"] = precision
recall = float(tp) / (tp + fn)
res["recall_cm"] = recall
res["precision_micro"] = precision_score(truth, pred, average="micro")
res["recall_micro"] = recall_score(truth, pred, average="micro")
res["f1_micro"] = f1_score(truth, pred, average="micro")
res["precision_macro"] = precision_score(truth, pred, average="macro")
res["recall_macro"] = recall_score(truth, pred, average="macro")
res["f1_macro"] = f1_score(truth, pred, average="macro")
res["precision_weighted"] = precision_score(truth, pred, average="weighted")
res["recall_weighted"] = recall_score(truth, pred, average="weighted")
res["f1_weighted"] = f1_score(truth, pred, average="weighted")
res["AMI"] = adjusted_mutual_info_score(truth, pred)
res["MI"] = mutual_info_score(truth, pred)
res["NMI"] = normalized_mutual_info_score(truth, pred)
homo_compl_v = homogeneity_completeness_v_measure(truth, pred)
res["homogeneity"] = homo_compl_v[0]
res["completeness"] = homo_compl_v[1]
res["v_measure"] = homo_compl_v[2]
res["fowlkes"] = fowlkes_mallows_score(truth, pred)
# print((precision * recall) ** (float(1)/2), res["fowlkes"])
print_timestamped("Finished computing scores.")
return res
def filter_blur(mapped, mris_shape, mode="median", k_size=3):
r_mapped = mapped.reshape(mris_shape)
reshape_size = np.size(mapped)
if mode == "average":
kernel = np.ones((k_size, k_size), np.float32) / (k_size * k_size)
dst = cv.filter2D(r_mapped, -1, kernel)
elif mode == "median":
dst = cv.medianBlur(np.float32(r_mapped), ksize=k_size)
elif mode == "blur":
dst = cv.blur(r_mapped, ksize=(k_size, k_size))
elif mode == "gblur":
dst = cv.GaussianBlur(r_mapped, k_size=(k_size, k_size), sigmaX=0)
else:
error("Smoothing mode not recognized.")
return dst.reshape(reshape_size)
|
{"hexsha": "86f15ea97d05bd67dc65121816b20ef90778905d", "size": 12367, "ext": "py", "lang": "Python", "max_stars_repo_path": "util/util.py", "max_stars_repo_name": "giuliabaldini/brainclustering", "max_stars_repo_head_hexsha": "853bd46e12338da9ae4fe348c508163d9951feb3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "util/util.py", "max_issues_repo_name": "giuliabaldini/brainclustering", "max_issues_repo_head_hexsha": "853bd46e12338da9ae4fe348c508163d9951feb3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "util/util.py", "max_forks_repo_name": "giuliabaldini/brainclustering", "max_forks_repo_head_hexsha": "853bd46e12338da9ae4fe348c508163d9951feb3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4484679666, "max_line_length": 119, "alphanum_fraction": 0.6380690547, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3349}
|
// Boost.Geometry (aka GGL, Generic Geometry Library)
// Copyright (c) 2012-2020 Barend Gehrels, Amsterdam, the Netherlands.
// Use, modification and distribution is subject to the Boost Software License,
// Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_GEOMETRY_ALGORITHMS_DETAIL_BUFFER_LINE_LINE_INTERSECTION_HPP
#define BOOST_GEOMETRY_ALGORITHMS_DETAIL_BUFFER_LINE_LINE_INTERSECTION_HPP
#include <boost/geometry/algorithms/detail/make/make.hpp>
#include <boost/geometry/arithmetic/infinite_line_functions.hpp>
#include <boost/geometry/util/math.hpp>
namespace boost { namespace geometry
{
#ifndef DOXYGEN_NO_DETAIL
namespace detail { namespace buffer
{
struct line_line_intersection
{
template <typename Point>
static Point between_point(Point const& a, Point const& b)
{
Point result;
geometry::set<0>(result, (geometry::get<0>(a) + geometry::get<0>(b)) / 2.0);
geometry::set<1>(result, (geometry::get<1>(a) + geometry::get<1>(b)) / 2.0);
return result;
}
template <typename Point>
static bool
apply(Point const& pi, Point const& pj, Point const& qi, Point const& qj,
Point const& vertex, bool equidistant, Point& ip)
{
// Calculates ip (below) by either intersecting p (pi, pj)
// with q (qi, qj) or by taking a point between pj and qi (b) and
// intersecting r (b, v), where v is the original vertex, with p (or q).
// The reason for dual approach: p might be nearly collinear with q,
// and in that case the intersection points can lose precision
// (or be plainly wrong).
// Therefore it takes the most precise option (this is usually p, r)
//
// /qj |
// / |
// / / |
// / / |
// / / |
// /qi / |
// / |
// ip * + b * v |
// \ |
// \pj \ |
// \ \ |
// \ \ |
// \ \ |
// \pi \ |
//
// If generated sides along the segments can have an adapted distance,
// in a custom strategy, then the calculation of the point in between
// might be incorrect and the optimization is not used.
using ct = typename coordinate_type<Point>::type;
auto const p = detail::make::make_infinite_line<ct>(pi, pj);
auto const q = detail::make::make_infinite_line<ct>(qi, qj);
using line = decltype(p);
using arithmetic::determinant;
using arithmetic::assign_intersection_point;
// The denominator is the determinant of (a,b) values of lines p q
// | pa pa |
// | qb qb |
auto const denominator_pq = determinant<line, &line::a, &line::b>(p, q);
constexpr decltype(denominator_pq) const zero = 0;
if (equidistant)
{
auto const between = between_point(pj, qi);
auto const r = detail::make::make_infinite_line<ct>(vertex, between);
auto const denominator_pr = determinant<line, &line::a, &line::b>(p, r);
if (math::equals(denominator_pq, zero)
&& math::equals(denominator_pr, zero))
{
// Degenerate case (for example when length results in <inf>)
return false;
}
ip = geometry::math::abs(denominator_pq) > geometry::math::abs(denominator_pr)
? assign_intersection_point<Point>(p, q, denominator_pq)
: assign_intersection_point<Point>(p, r, denominator_pr);
}
else
{
if (math::equals(denominator_pq, zero))
{
return false;
}
ip = assign_intersection_point<Point>(p, q, denominator_pq);
}
return true;
}
};
}} // namespace detail::buffer
#endif // DOXYGEN_NO_DETAIL
}} // namespace boost::geometry
#endif // BOOST_GEOMETRY_ALGORITHMS_DETAIL_BUFFER_LINE_LINE_INTERSECTION_HPP
|
{"hexsha": "f1014c9bb450975b8b7251d2707d0d62ea75f66c", "size": 4446, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "ReactNativeFrontend/ios/Pods/boost/boost/geometry/algorithms/detail/buffer/line_line_intersection.hpp", "max_stars_repo_name": "Harshitha91/Tmdb-react-native-node", "max_stars_repo_head_hexsha": "e06e3f25a7ee6946ef07a1f524fdf62e48424293", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 326.0, "max_stars_repo_stars_event_min_datetime": "2015-02-08T13:47:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T02:13:59.000Z", "max_issues_repo_path": "ReactNativeFrontend/ios/Pods/boost/boost/geometry/algorithms/detail/buffer/line_line_intersection.hpp", "max_issues_repo_name": "Harshitha91/Tmdb-react-native-node", "max_issues_repo_head_hexsha": "e06e3f25a7ee6946ef07a1f524fdf62e48424293", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 623.0, "max_issues_repo_issues_event_min_datetime": "2015-01-02T23:45:23.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-09T11:15:23.000Z", "max_forks_repo_path": "ReactNativeFrontend/ios/Pods/boost/boost/geometry/algorithms/detail/buffer/line_line_intersection.hpp", "max_forks_repo_name": "Harshitha91/Tmdb-react-native-node", "max_forks_repo_head_hexsha": "e06e3f25a7ee6946ef07a1f524fdf62e48424293", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 215.0, "max_forks_repo_forks_event_min_datetime": "2015-01-14T15:50:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T03:58:36.000Z", "avg_line_length": 36.7438016529, "max_line_length": 90, "alphanum_fraction": 0.5427350427, "num_tokens": 1006}
|
import numpy as np
import pandas as pd
from .base_test_class import DartsBaseTestClass
from ..models.kalman_filter import KalmanFilter
from ..models.filtering_model import MovingAverage
from ..timeseries import TimeSeries
from ..utils import timeseries_generation as tg
class KalmanFilterTestCase(DartsBaseTestClass):
def test_kalman(self):
""" KalmanFilter test.
Creates an increasing sequence of numbers, adds noise and
assumes the kalman filter predicts values closer to real values
"""
testing_signal = np.arange(1, 5, 0.1)
noise = np.random.normal(0, 0.7, testing_signal.shape)
testing_signal_with_noise = testing_signal + noise
df = pd.DataFrame(data=testing_signal_with_noise, columns=['signal'])
testing_signal_with_noise_ts = TimeSeries.from_dataframe(df, value_cols=['signal'])
kf = KalmanFilter(dim_x=1)
filtered_ts = kf.filter(testing_signal_with_noise_ts, num_samples=1).univariate_values()
noise_distance = testing_signal_with_noise - testing_signal
prediction_distance = filtered_ts - testing_signal
self.assertGreater(noise_distance.std(), prediction_distance.std())
def test_kalman_multivariate(self):
kf = KalmanFilter(dim_x=3)
sine_ts = tg.sine_timeseries(length=30, value_frequency=0.1)
noise_ts = tg.gaussian_timeseries(length=30) * 0.1
ts = sine_ts.stack(noise_ts)
prediction = kf.filter(ts)
self.assertEqual(prediction.width, 3)
class MovingAverageTestCase(DartsBaseTestClass):
def test_moving_average_univariate(self):
ma = MovingAverage(window=3, centered=False)
sine_ts = tg.sine_timeseries(length=30, value_frequency=0.1)
sine_filtered = ma.filter(sine_ts)
self.assertGreater(np.mean(np.abs(sine_ts.values())), np.mean(np.abs(sine_filtered.values())))
def test_moving_average_multivariate(self):
ma = MovingAverage(window=3)
sine_ts = tg.sine_timeseries(length=30, value_frequency=0.1)
noise_ts = tg.gaussian_timeseries(length=30) * 0.1
ts = sine_ts.stack(noise_ts)
ts_filtered = ma.filter(ts)
self.assertGreater(np.mean(np.abs(ts.values()[:, 0])), np.mean(np.abs(ts_filtered.values()[:, 0])))
self.assertGreater(np.mean(np.abs(ts.values()[:, 1])), np.mean(np.abs(ts_filtered.values()[:, 1])))
if __name__ == '__main__':
KalmanFilterTestCase().test_kalman()
MovingAverageTestCase().test_moving_average_univariate()
MovingAverageTestCase().test_moving_average_multivariate()
|
{"hexsha": "7942a6b51f2c12124eae3aac1c29bba63d4ae55d", "size": 2614, "ext": "py", "lang": "Python", "max_stars_repo_path": "darts/tests/test_filters.py", "max_stars_repo_name": "muliliao/darts", "max_stars_repo_head_hexsha": "2b5f5c3aa81c6962f4d0d2ba5f280d42f5dc5eb0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "darts/tests/test_filters.py", "max_issues_repo_name": "muliliao/darts", "max_issues_repo_head_hexsha": "2b5f5c3aa81c6962f4d0d2ba5f280d42f5dc5eb0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "darts/tests/test_filters.py", "max_forks_repo_name": "muliliao/darts", "max_forks_repo_head_hexsha": "2b5f5c3aa81c6962f4d0d2ba5f280d42f5dc5eb0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.884057971, "max_line_length": 107, "alphanum_fraction": 0.7061973986, "include": true, "reason": "import numpy", "num_tokens": 606}
|
MODULE interpolation_functions
! Module containing functions necessary for 3DInterpolation
CONTAINS
INTEGER FUNCTION fact(n)
! Retruns the factorial of n
IMPLICIT NONE
INTEGER, INTENT(IN) :: n
INTEGER p,i
p = 1
do i = 1, n
p = p * i
end do
fact = p
END FUNCTION fact
! Returns the inverse of a matrix calculated by finding the LU
! decomposition. Depends on LAPACK.
function inv(A) result(Ainv)
IMPLICIT NONE
INTEGER, PARAMETER :: dp = selected_real_kind(15, 307)
real(KIND=dp), dimension(:,:), intent(in) :: A
real(KIND=dp), dimension(size(A,1),size(A,2)) :: Ainv
real(KIND=dp), dimension(size(A,1)) :: work ! work array for LAPACK
integer, dimension(size(A,1)) :: ipiv ! pivot indices
integer :: N,M,info,i
! External procedures defined in LAPACK
external DGETRF
external DGETRI
! Store A in Ainv to prevent it from being overwritten by LAPACK
Ainv = A(:,:)
M = size(A,1)
N = size(A,2)
! DGETRF computes an LU factorization of a general M-by-N matrix A
! using partial pivoting with row interchanges.
call DGETRF(M, N, Ainv, M, ipiv, info)
if (info /= 0) then
stop 'Matrix is numerically singular!'
end if
! DGETRI computes the inverse of a matrix using the LU factorization
! computed by DGETRF.
call DGETRI(M, Ainv, M, ipiv, work, M, info)
if (info /= 0) then
stop 'Matrix inversion failed!'
end if
end function inv
END MODULE interpolation_functions
|
{"hexsha": "087e9d1f1d94d4b7da3b5ab7352370afb73a653f", "size": 1480, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/OpenFoam/interpolation_functions.f90", "max_stars_repo_name": "apengsigkarup/OceanWave3D", "max_stars_repo_head_hexsha": "91979da3ede3215b2ae65bffab89b695ff17f112", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 46, "max_stars_repo_stars_event_min_datetime": "2016-01-08T12:36:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T06:56:45.000Z", "max_issues_repo_path": "src/OpenFoam/interpolation_functions.f90", "max_issues_repo_name": "apengsigkarup/OceanWave3D", "max_issues_repo_head_hexsha": "91979da3ede3215b2ae65bffab89b695ff17f112", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2015-10-10T19:45:08.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-07T07:37:11.000Z", "max_forks_repo_path": "src/OpenFoam/interpolation_functions.f90", "max_forks_repo_name": "apengsigkarup/OceanWave3D", "max_forks_repo_head_hexsha": "91979da3ede3215b2ae65bffab89b695ff17f112", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2015-10-01T12:17:08.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-02T16:23:37.000Z", "avg_line_length": 27.4074074074, "max_line_length": 70, "alphanum_fraction": 0.6756756757, "num_tokens": 436}
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import reduce
from tensorio import compare_tensor
import numpy as np
import akg
from akg.utils import kernel_exec as utils
from akg.ops.nn import maxpool
from akg.utils.dsl_create import cal_pad_shapes_by_strategy, get_value
from akg import tvm
from base import get_rtol_atol
from gen_random import random_gaussian
import math
def compute_blockdim(shape):
size = 0
if isinstance(shape, (list, tuple)):
for i in shape:
size = size * i
elif isinstance(shape, int):
size = shape
else:
size = 2
return min(32, math.ceil(size / 8192 + 1))
def benchmark(input, kernel, stride, pad):
sh, sw = stride
N, C1, H, W, C0 = input.shape
KH, KW = kernel
[ph_h, ph_t, pw_h, pw_t], [out_size_h, out_size_w] = \
cal_pad_shapes_by_strategy(input.shape, kernel, stride, pad)
out_size_w = get_value(out_size_w, akg.tvm.expr.IntImm)
out_size_h = get_value(out_size_h, akg.tvm.expr.IntImm)
out_shape = (N, C1, out_size_h, out_size_w, C0)
mask_shape = (N, C1, KH, KW, out_size_h, out_size_w, C0)
min_value = -65504.0 if input.dtype == 'float16' \
else -340282346638528859811704183484516925440.0
out = np.full(out_shape, min_value, dtype=input.dtype)
mask = np.zeros(mask_shape)
inputpad = np.full((N, C1, H + ph_h + ph_t, W + pw_h + pw_t, C0),
np.finfo(input.dtype).min, dtype=input.dtype)
inputpad[:, :, ph_h:ph_h + H, pw_h:pw_h + W, :] = input
for i in range(out_size_h):
for j in range(out_size_w):
out[:, :, i, j, :] = \
np.max(inputpad[:, :, i * sh:i * sh + KH, j * sw:j * sw + KW, :], axis=(2, 3))
kerneled_shape_tmp = (inputpad.shape[0], inputpad.shape[1],
KH * KW, inputpad.shape[4])
maxid = np.zeros(out_shape)
for i in range(out_size_h):
for j in range(out_size_w):
maxid[:, :, i, j, :] = \
np.argmax(np.reshape(
inputpad[:, :, i * sh:i * sh + KH, j * sw:j * sw + KW, :],
kerneled_shape_tmp), axis=2)
mask_shape_f = [N, C1, KH * KW, out_size_h, out_size_w, C0]
mask = np.reshape(mask, tuple(mask_shape_f))
index_shape = [N, C1, 1, out_size_h, out_size_w, C0]
def cal_num(shape):
return reduce(lambda i, j: i * j, [shape[i] for i in range(len(shape))])
n_indexs = [i for i in range(N) for _ in range(cal_num(index_shape[1:]))]
c1_indexs = [i for i in range(C1) \
for _ in range(cal_num(index_shape[2:]))] * N
ho_indexs = [i for i in range(out_size_h) \
for _ in range(cal_num(index_shape[4:]))] * \
cal_num(index_shape[:3])
wo_indexs = [i for i in range(out_size_w) \
for _ in range(cal_num(index_shape[5:]))] * \
cal_num(index_shape[:4])
c0_indexs = list(range(C0)) * cal_num(index_shape[:-1])
mask[n_indexs, c1_indexs, maxid.flatten().astype(np.int32), ho_indexs, wo_indexs, c0_indexs] = 1
mask = np.reshape(mask, tuple(mask_shape))
out = out.astype(input.dtype)
mask = mask.astype(input.dtype)
return out, mask
def maxpool_with_argmax_run(shape, kernel, stride, pad, dsl, dtype, attrs=None, polyhedral=True):
build_shape = []
arg_list = []
if attrs is None:
attrs = {}
if attrs.get("dynamic"):
for i in range(len(shape)):
if i == len(shape) - 1:
build_shape.append(shape[i])
else:
tmp_var = tvm.var("I" + str(i))
build_shape.append(tmp_var)
arg_list.append(shape[i])
else:
build_shape = shape
arg_len = len(arg_list)
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(maxpool.maxpool_with_argmax,
[shape], [dtype], op_attrs=[kernel, stride, pad],
kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
input, expects, outputs = \
gen_data(dtype, kernel, pad, shape, stride)
return mod, expects, \
{"args": (input, outputs[0], outputs[1]), 'outputs': (-2 - arg_len, -1 - arg_len), 'tuning': False}
else:
return mod
else:
if polyhedral:
if attrs.get("dynamic") and len(build_shape) > 0:
mod = utils.op_build_test(maxpool.maxpool_with_argmax_dynamic,
[build_shape], [dtype], op_attrs=[kernel, stride, pad],
kernel_name='maxpool', attrs=attrs)
else:
mod = utils.op_build_test(maxpool.maxpool_with_argmax,
[shape], [dtype], op_attrs=[kernel, stride, pad],
kernel_name='maxpool', attrs=attrs)
else:
mod = maxpool.maxpool_manual_schedule(shape, kernel, stride, pad, dtype,
attrs=attrs, polyhedral=polyhedral)
input, expects, outputs = \
gen_data(dtype, kernel, pad, shape, stride, attrs)
args = [input, outputs[0], outputs[1]]
if attrs is not None and attrs.get("dynamic"):
args = args + arg_list
block_dim = compute_blockdim(shape)
args.append(block_dim)
outputs = utils.mod_launch(mod, args, (-3 - arg_len, -2 - arg_len), expect=expects)
else:
outputs = utils.mod_launch(mod, args, (-2 - arg_len, -1 - arg_len), expect=expects)
rtol, atol = get_rtol_atol("maxpool", dtype)
results = list(map(lambda x, y:
compare_tensor(x, y, rtol=rtol, atol=atol),
outputs, expects))
return input, outputs, expects, all(results)
def gen_data(dtype, kernel, pad, shape, stride, attrs=None):
support_list = {"float16": np.float16, "float32": np.float32}
import time
seed_tmp = int(time.time())
input = random_gaussian(shape, miu=0,
sigma=0.1, seed=seed_tmp).astype(support_list[dtype])
expect_max, expect_mask = benchmark(input, kernel, stride, pad)
out_shape = expect_max.shape
mask_shape = expect_mask.shape
res = np.full(out_shape, -1, dtype)
res_mask = np.full(mask_shape, -1, dtype)
if attrs is not None and attrs.get("dynamic"):
expect_mask = np.full(expect_mask.shape, 0.0, dtype)
return input, [expect_max, expect_mask], [res, res_mask]
|
{"hexsha": "01c196fadc28b2761d1fea2051a4de31cdd56373", "size": 7230, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/common/test_run/maxpool_with_argmax_run.py", "max_stars_repo_name": "laekov/akg", "max_stars_repo_head_hexsha": "5316b8cb2340bbf71bdc724dc9d81513a67b3104", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-31T02:43:43.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-31T02:43:43.000Z", "max_issues_repo_path": "tests/common/test_run/maxpool_with_argmax_run.py", "max_issues_repo_name": "laekov/akg", "max_issues_repo_head_hexsha": "5316b8cb2340bbf71bdc724dc9d81513a67b3104", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/common/test_run/maxpool_with_argmax_run.py", "max_forks_repo_name": "laekov/akg", "max_forks_repo_head_hexsha": "5316b8cb2340bbf71bdc724dc9d81513a67b3104", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.0795454545, "max_line_length": 115, "alphanum_fraction": 0.5910096819, "include": true, "reason": "import numpy", "num_tokens": 1878}
|
# OpenFace API tests.
#
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import os
import numpy as np
np.set_printoptions(precision=5)
import scipy
import scipy.spatial
import openface
openfaceDir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
modelDir = os.path.join(openfaceDir, 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
exampleImages = os.path.join(openfaceDir, 'images', 'examples')
lfwSubset = os.path.join(openfaceDir, 'data', 'lfw-subset')
dlibFacePredictor = os.path.join(dlibModelDir,
"shape_predictor_68_face_landmarks.dat")
model = os.path.join(openfaceModelDir, 'nn4.small2.v1.t7')
imgDim = 96
align = openface.AlignDlib(dlibFacePredictor)
net = openface.TorchNeuralNet(model, imgDim=imgDim)
def _read_to_rgb(imageFile):
imgPath = os.path.join(exampleImages, imageFile)
bgrImg = cv2.imread(imgPath)
if bgrImg is None:
raise Exception("Unable to load image: {}".format(imgPath))
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
return rgbImg
def test_pipeline():
rgbImg = _read_to_rgb('lennon-1.jpg')
bb = align.getLargestFaceBoundingBox(rgbImg)
print ("Bounding box found was: ")
print (bb)
# assert bb.left() == 341
# assert bb.right() == 1006
# assert bb.top() == 193
# assert bb.bottom() == 859
alignedFace = align.align(imgDim, rgbImg, bb, landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
rep = net.forward(alignedFace)
cosDist = scipy.spatial.distance.cosine(rep, np.ones(128))
print(cosDist)
#assert np.isclose(cosDist, 0.938840385931, atol=0.01)
def test_pipeline_comparisons():
labels = ['lennon-1', 'lennon-2', 'clapton-1', 'clapton-2', 'adams']
images = dict((k, _read_to_rgb(k + '.jpg')) for k in labels)
bounding_boxes = dict((k, align.getLargestFaceBoundingBox(images[k])) for k in labels)
aligned_faces = dict((k, align.align(imgDim, images[k], bounding_boxes[k])) for k in labels)
non_normalised_features = dict((k, net.forward(aligned_faces[k])) for k in labels)
features = dict((k, non_normalised_features[k] / np.linalg.norm(non_normalised_features[k])) for k in labels)
for k in labels:
print((k, features[k]))
print('')
print('Pairwise comparison table')
print(',' + ','.join(labels))
for outer in labels:
s = [outer]
for inner in labels:
# smaller distance => more similar, zero => identical
dist = scipy.spatial.distance.cosine(features[outer], features[inner])
s.append('%.6f' % dist)
print(','.join(s))
print('')
|
{"hexsha": "b98c7ef5294c47ecffc5c02fa3ca79ce95ff7315", "size": 3246, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/openface_api_tests.py", "max_stars_repo_name": "rhyswat/openface", "max_stars_repo_head_hexsha": "d495e579f537d6009c8a6b42d3b7e2b654bdc8e2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/openface_api_tests.py", "max_issues_repo_name": "rhyswat/openface", "max_issues_repo_head_hexsha": "d495e579f537d6009c8a6b42d3b7e2b654bdc8e2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/openface_api_tests.py", "max_forks_repo_name": "rhyswat/openface", "max_forks_repo_head_hexsha": "d495e579f537d6009c8a6b42d3b7e2b654bdc8e2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6703296703, "max_line_length": 113, "alphanum_fraction": 0.6940850277, "include": true, "reason": "import numpy,import scipy", "num_tokens": 851}
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 21 03:23:54 2021
@author: ASUS
"""
import cv2
import numpy as np
def model_train(SUDOKU_TRAIN_DATA_PATH, SUDOKU_TRAIN_LABEL_PATH, MODEL_PATH):
# load training data and label
samples = np.loadtxt(SUDOKU_TRAIN_DATA_PATH, np.float32)
responses = np.loadtxt(SUDOKU_TRAIN_LABEL_PATH, np.float32)
responses = responses.reshape((responses.size,1))
# define and train knn model
model = cv2.ml.KNearest_create()
model.train(samples,cv2.ml.ROW_SAMPLE,responses)
model.save(MODEL_PATH)
if __name__=="__main__":
MODEL_PATH = 'sudoku_digit_model.xml'
SUDOKU_TRAIN_DATA_PATH = 'data/sudoku_data.data'
SUDOKU_TRAIN_LABEL_PATH = 'data/sudoku_labels.data'
model_train(SUDOKU_TRAIN_DATA_PATH, SUDOKU_TRAIN_LABEL_PATH, MODEL_PATH)
print("digit recongizer training complete")
|
{"hexsha": "9b393ec6c849f929f0d5812747c13d9c30b882a0", "size": 905, "ext": "py", "lang": "Python", "max_stars_repo_path": "knn_train.py", "max_stars_repo_name": "terenceylchow124/sudoku_automating", "max_stars_repo_head_hexsha": "21686545938f3c1d782c4f23486fc603635c3a8a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "knn_train.py", "max_issues_repo_name": "terenceylchow124/sudoku_automating", "max_issues_repo_head_hexsha": "21686545938f3c1d782c4f23486fc603635c3a8a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "knn_train.py", "max_forks_repo_name": "terenceylchow124/sudoku_automating", "max_forks_repo_head_hexsha": "21686545938f3c1d782c4f23486fc603635c3a8a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2068965517, "max_line_length": 78, "alphanum_fraction": 0.7104972376, "include": true, "reason": "import numpy", "num_tokens": 243}
|
-- To mathlib ?
import data.set.function
import data.equiv.basic
import topology.basic
import topology.constructions
#print continuous_equiv_fun_basis
#check function.uncurry
#check continuous
#print is_open_map.of_inverse
#print is_open_map.comp
#check preimage_equivalence
#print nhds_le_of_le
lemma continuous_uncurry {A B C : Type} [topological_space A] [topological_space B] [topological_space C]
: continuous (@function.uncurry A B C) :=
begin
intros s hs,
apply is_open_iff_nhds.mpr,
intros a ha,
rw filter.le_principal_iff,
change (function.uncurry a ∈ s) at ha,
sorry,
end
lemma continuous_uncurry_apply {A B C : Type} [topological_space A] [topological_space B] [topological_space C]
(f : A → B → C) (hf : continuous f) : continuous (function.uncurry f) :=
begin
intros s1 hs1,
let s2 : set (B → C) := { h : B → C | ∀ b, h b ∈ s1 },
have hs2 : is_open s2,
{ sorry, },
sorry,
end
#check continuous_iff_continuous_at
#print continuous_at
#print filter.tendsto
#print continuous_infi_dom
#check continuous_iff_coinduced_le
#print topological_space.coinduced
lemma continuous_flip {A B C: Type} [topological_space A] [topological_space B] [topological_space C]
(f : A → B → C) (hf : continuous f) : continuous (flip f) :=
begin
--rw continuous_iff_continuous_at,
--replace hf := (continuous_iff_continuous_at.mp hf),
--intros x s hs,
rw continuous_iff_coinduced_le,
rw continuous_iff_coinduced_le at hf,
intros x hx,
sorry,
end
|
{"author": "ramonfmir", "repo": "lean-experiments", "sha": "041c8727bb540fb8d1519c1ad84924d473885c27", "save_path": "github-repos/lean/ramonfmir-lean-experiments", "path": "github-repos/lean/ramonfmir-lean-experiments/lean-experiments-041c8727bb540fb8d1519c1ad84924d473885c27/src/mlv/differentiable_ltl/other/continuous.lean"}
|
import numpy as np
import torch
import torch.nn.functional as F
def compute_hist(prediction, gt, n_classes, ignore_label):
N, C, H, W = gt.size()
prediction = F.interpolate(prediction, (H, W), mode='bilinear', align_corners=True)
prediction = torch.argmax(prediction, dim=1).flatten().cpu().numpy()
gt = gt.flatten().cpu().numpy()
keep = np.logical_not(gt == ignore_label)
merge = prediction[keep] * n_classes + gt[keep]
hist = np.bincount(merge, minlength=n_classes**2)
hist = hist.reshape((n_classes, n_classes))
correct_pixels = np.diag(hist).sum()
valid_pixels = keep.sum()
return hist, correct_pixels, valid_pixels
def compute_angle(prediction, gt, ignore_label):
N, C, H, W = gt.size()
prediction = F.interpolate(prediction, (H, W), mode='bilinear', align_corners=True)
prediction = prediction.permute(0, 2, 3, 1).contiguous().view(-1, 3)
gt = gt.permute(0, 2, 3, 1).contiguous().view(-1, 3)
mask = ((gt == ignore_label).sum(dim=1) - 3).nonzero().squeeze()
prediction = prediction[mask]
gt = gt[mask]
cosine_distance = F.cosine_similarity(gt, prediction)
cosine_distance = cosine_distance.cpu().numpy()
cosine_distance = np.minimum(np.maximum(cosine_distance, -1.0), 1.0)
angles = np.arccos(cosine_distance) / np.pi * 180.0
return angles
def count_correct(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k)
return res
|
{"hexsha": "27b3c929b38a1ec10362ce81c75659dc6f8bff37", "size": 1842, "ext": "py", "lang": "Python", "max_stars_repo_path": "core/utils/metrics.py", "max_stars_repo_name": "WZzhaoyi/MTLNAS", "max_stars_repo_head_hexsha": "c04fcce1437eef306a41a6a224551be99d88f9a3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 86, "max_stars_repo_stars_event_min_datetime": "2020-04-04T03:37:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-13T07:36:24.000Z", "max_issues_repo_path": "core/utils/metrics.py", "max_issues_repo_name": "hengxyz/MTLNAS", "max_issues_repo_head_hexsha": "c04fcce1437eef306a41a6a224551be99d88f9a3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-04-05T15:09:15.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-08T21:12:35.000Z", "max_forks_repo_path": "core/utils/metrics.py", "max_forks_repo_name": "hengxyz/MTLNAS", "max_forks_repo_head_hexsha": "c04fcce1437eef306a41a6a224551be99d88f9a3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2020-04-07T03:27:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-22T09:13:27.000Z", "avg_line_length": 37.5918367347, "max_line_length": 88, "alphanum_fraction": 0.6454940282, "include": true, "reason": "import numpy", "num_tokens": 499}
|
#ifndef NTFMT_FLOAT_HPP_
#define NTFMT_FLOAT_HPP_
#include "ntfmt_fwd.hpp"
#include "ntfmt.hpp"
#ifndef NTFMT_PRINT_FLOAT_BUFFER_SIZE
#ifdef BOOST_PLATFORM_CONFIG
#define NTFMT_PRINT_FLOAT_BUFFER_SIZE std::numeric_limits<T>::max_exponent
#else
#define NTFMT_PRINT_FLOAT_BUFFER_SIZE 24
#endif
#endif
#include <string.h>
#include <float.h>
#include <cmath>
#ifndef NTFMT_NO_BOOST
#include <boost/integer.hpp>
#include <boost/type_traits/integral_promotion.hpp>
#endif
namespace ntfmt {
namespace detail {
#ifdef BOOST_DINKUMWARE_STDLIB
using ::fabs;
using ::floor;
using ::ceil;
using ::fmod;
using ::log;
using ::log10;
using ::pow;
inline bool isinf(double d) { return !::_finite(d); }
inline bool isinf(long double d) { return !::_finite((double)d); }
inline bool isnan(double d) { return !!::_isnan(d); }
inline bool isnan(long double d) { return !!::_isnan((double)d); }
inline double nextafter(double x, double y) { return ::_nextafter(x, y); }
inline long double nextafter(long double x, long double y) { return ::_nextafter((double)x, (double)y); }
inline double copysign(double x) { return ::_copysign(x); }
inline long double copysign(long double x) { return ::_copysign((double)x); }
#else
using std::fabs;
using std::floor;
using std::ceil;
using std::isinf;
using std::isnan;
using std::fmod;
using std::log;
using std::log10;
using std::pow;
using ::copysign;
using ::nextafter;
#endif
template <typename charT>
inline charT *gstrncpy(charT *const, charT const *const, size_t);
template <>
inline char *gstrncpy<char>(char *const dst, char const *const src, size_t lim) { return strncpy(dst, src, lim); }
template <>
inline wchar_t *gstrncpy<wchar_t>(wchar_t *const dst, wchar_t const *const src, size_t lim) { return wcsncpy(dst, src, lim); }
template <typename T>
inline T get_predecessor(T const &v) { return nextafter(v, -numeric_limits<T>::infinity()); }
template <typename T>
inline T get_successor(T const &v) { return nextafter(v, +numeric_limits<T>::infinity()); }
template <typename charT, typename FloatT>
inline charT to_hexstr(FloatT const v, unsigned const base, bool const capital, typename enable_if< is_floating_point<typename decay<FloatT>::type> >::type * = 0) {
return hexstr<charT>::str(capital)[static_cast<unsigned>(fmod(v, static_cast<FloatT>(base)))];
}
template <typename charT> struct nan_str;
template <>
struct nan_str<char> {
static char const (&str(bool const capital))[4] {
static char const s[][4] = { "nan", "NAN" };
return s[capital];
}
};
template <>
struct nan_str<wchar_t> {
static wchar_t const (&str(bool const capital))[4] {
static wchar_t const s[][4] = { L"nan", L"NAN" };
return s[capital];
}
};
template <typename charT> struct inf_str;
template <>
struct inf_str<char> {
static char const (&str(bool const capital))[4] {
static char const s[][4] = { "inf", "INF" };
return s[capital];
}
};
template <>
struct inf_str<wchar_t> {
static wchar_t const (&str(bool const capital))[4] {
static wchar_t const s[][4] = { L"inf", L"INF" };
return s[capital];
}
};
template <typename charT>
inline void inc_strnum(charT *const strnum, unsigned const base, unsigned const col, bool const capital) {
if (strnum[col]++ == to_hexstr<charT>(base-1, base, capital)) {
strnum[col] = to_hexstr<charT>(0, base, capital);
inc_strnum(strnum, base, col - 1, capital);
}
}
template <typename T>
inline T ipow(unsigned const base, int e) {
return pow(static_cast<T>(base), e);
}
template <typename T>
inline int ilog(unsigned const base, T v) {
if (isinf(v)) return ilog(base, std::numeric_limits<T>::max());
if (v==0) return 0;
return static_cast<int>(floor( (base==10) ? log10(v) : (log(v)/log(static_cast<T>(base))) )) + 1;
}
namespace detail {
#ifndef NTFMT_NO_BOOST
using boost::int_max_value_t;
using boost::int_min_value_t;
using boost::integral_promotion;
#else
template <unsigned long long N>
struct int_max_value_t {
typedef
typename if_c<(N<=UCHAR_MAX), unsigned char,
typename if_c<(N<=USHRT_MAX), unsigned short,
typename if_c<(N<=UINT_MAX), unsigned int,
typename if_c<(N<=ULONG_MAX), unsigned long,
typename if_c<(N<=ULONG_LONG_MAX), unsigned long long,
void>::type>::type>::type>::type>::type type;
typedef type least;
typedef type fast;
};
template <long long N>
struct int_min_value_t {
typedef
typename if_c<(N>=SCHAR_MIN), signed char,
typename if_c<(N>=SHRT_MIN), signed short,
typename if_c<(N>=INT_MIN), signed int,
typename if_c<(N>=LONG_MIN), signed long,
typename if_c<(N>=LONG_LONG_MIN), signed long long,
void>::type>::type>::type>::type>::type type;
typedef type least;
typedef type fast;
};
template <typename T>
struct integral_promotion {
typedef decltype((T)0+0) type;
};
#endif
}
template <typename T>
struct dtoa_traits {
typedef typename detail::int_max_value_t<numeric_limits<T>::max_exponent>::least higher_type;
typedef typename detail::int_min_value_t<numeric_limits<T>::min_exponent>::least lower_type;
typedef typename detail::integral_promotion<typename select_larger_type<higher_type, lower_type>::type>::type return_type;
};
template <typename charT, typename T, size_t N>
typename dtoa_traits<T>::return_type dtoa(charT (&out)[N], T const v, int prec, flags_t const &flags) {
typedef typename dtoa_traits<T>::return_type return_type;
unsigned const base = flags.radix;
bool const capital = flags.capital;
T const value = fabs(v);
return_type const k = ilog(base, get_successor(value));
prec = ref_clip(0, prec, static_cast<int>(N-3));
out[0] = NTFMT_CHR_ZERO;
{
T q = (k>0) ? (value / ipow<T>(base, k-1)) : (value * ipow<T>(base, 1-k));
charT const *const end = array_end(out) - 1;
for (charT *ptr = array_begin(out) + 1; ptr < end; ++ptr) {
*ptr = to_hexstr<charT>(q, base, capital);
q = fmod(q, base) * base;
}
unsigned f = static_cast<unsigned>(from_hexstr(out[prec+1], base, capital));
if (f > base/2 || (f == base/2 && !(q == 0 && static_cast<unsigned>(from_hexstr(out[prec], base, capital))%2 == 0))) {
inc_strnum(out, base, prec, capital);
}
}
bool const carried = (out[0] != NTFMT_CHR_ZERO);
if (!carried) std::copy(out+1, out+prec+1, out);
out[prec+carried] = 0;
return ref_min(k+carried, static_cast<return_type>(N-3));
}
template <typename charT, typename T>
void float_printer(sink_fn_t<charT> &fn, T const &value, const flags_t &flags) {
using namespace std;
if (ntfmt_unlikely(isnan(value))) {
fn(nan_str<charT>::str(flags.capital));
return;
}
charT head[6] = { };
charT *phead = head;
if (copysign(static_cast<T>(1), value) < 0) *phead++ = NTFMT_CHR_MINUS;
else if (flags.plus) *phead++ = NTFMT_CHR_PLUS;
else if (flags.space) *phead++ = NTFMT_CHR_SPACE;
if (ntfmt_unlikely(isinf(value))) {
fn(head);
fn(inf_str<charT>::str(flags.capital));
return;
}
int const prec = flags.prec_enable ? flags.precision : 6;
int const l = ilog(flags.radix, fabs(value));
bool const exponential = flags.exponential || ( !flags.fixed && ((l < -4) || (l > prec)));
charT buf[NTFMT_PRINT_FLOAT_BUFFER_SIZE + 8] = { };
if (exponential) {
int const e = dtoa<charT, T>(buf, value, prec + flags.exponential, flags);
if (!flags.exponential && !flags.alter) {
charT *p = buf + gstrlen(buf) - 1;
while (*p == NTFMT_CHR_ZERO) *p-- = 0;
}
*phead++ = buf[0];
buf[0] = buf[1] && (prec!=0 || flags.alter) ? NTFMT_CHR_DOT : 0;
size_t const wid = gstrlen(head) + gstrlen(buf) + 5;
if (!flags.minus) fill_chr_to(fn, NTFMT_CHR_SPACE, flags.width - wid);
fn(head);
fn(buf);
fn(flags.capital ? NTFMT_CH_LIT('E') : NTFMT_CH_LIT('e'));
flags_t ff = default_flags();
ff.zero = 1;
ff.prec_enable = 1;
ff.precision = 3;
ff.plus = 1;
integer_printer(fn, static_cast<long>(e-1), ff);
if (flags.minus) fill_chr_to(fn, NTFMT_CHR_SPACE, flags.width - wid);
} else {
int const e = dtoa<charT, T>(buf, value, prec + (flags.fixed ? l : 0), flags);
if (e < 1) {
*phead++ = NTFMT_CHR_ZERO;
if (prec!=0) *phead++ = NTFMT_CHR_DOT;
if (e <= -prec) buf[0] = 0;
} else {
if (prec!=0) {
charT *const end = &buf[gstrlen(buf)]+1;
std::rotate(&buf[e], end, end+1);
buf[e] = NTFMT_CHR_DOT;
}
if (!flags.fixed && !flags.alter) {
charT *p = buf + gstrlen(buf) - 1;
while (*p == NTFMT_CHR_ZERO) *p-- = 0;
if (*p == NTFMT_CHR_DOT) *p-- = 0;
}
}
int const lim = e < -prec ? prec : -e;
size_t const wid = gstrlen(head) + gstrlen(buf) + lim + (e>=1 ? e : 0);
if (!flags.minus) fill_chr_to(fn, NTFMT_CHR_SPACE, flags.width - wid);
fn(head);
fill_chr_to(fn, NTFMT_CHR_ZERO, lim);
fn(buf);
if (flags.minus) fill_chr_to(fn, NTFMT_CHR_SPACE, flags.width - wid);
}
}
}
}
#endif
|
{"hexsha": "8bd442bd9b410e5540a10e1f17919f5173b605f7", "size": 9396, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "ntfmt_float.hpp", "max_stars_repo_name": "kikairoya/ntfmt", "max_stars_repo_head_hexsha": "17899285d87bddaf90ea64a7203f32e3881ba3b6", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2015-05-01T10:15:01.000Z", "max_stars_repo_stars_event_max_datetime": "2016-04-13T03:59:33.000Z", "max_issues_repo_path": "ntfmt_float.hpp", "max_issues_repo_name": "kikairoya/ntfmt", "max_issues_repo_head_hexsha": "17899285d87bddaf90ea64a7203f32e3881ba3b6", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ntfmt_float.hpp", "max_forks_repo_name": "kikairoya/ntfmt", "max_forks_repo_head_hexsha": "17899285d87bddaf90ea64a7203f32e3881ba3b6", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.191011236, "max_line_length": 167, "alphanum_fraction": 0.6306939123, "num_tokens": 2826}
|
From algebra Require Export cmra.
From algebra Require Import upred.
Local Hint Extern 10 (_ ≤ _) => omega.
Record agree (A : Type) : Type := Agree {
agree_car :> nat → A;
agree_is_valid : nat → Prop;
agree_valid_S n : agree_is_valid (S n) → agree_is_valid n
}.
Arguments Agree {_} _ _ _.
Arguments agree_car {_} _ _.
Arguments agree_is_valid {_} _ _.
Section agree.
Context {A : cofeT}.
Instance agree_validN : ValidN (agree A) := λ n x,
agree_is_valid x n ∧ ∀ n', n' ≤ n → x n ≡{n'}≡ x n'.
Instance agree_valid : Valid (agree A) := λ x, ∀ n, ✓{n} x.
Lemma agree_valid_le n n' (x : agree A) :
agree_is_valid x n → n' ≤ n → agree_is_valid x n'.
Proof. induction 2; eauto using agree_valid_S. Qed.
Instance agree_equiv : Equiv (agree A) := λ x y,
(∀ n, agree_is_valid x n ↔ agree_is_valid y n) ∧
(∀ n, agree_is_valid x n → x n ≡{n}≡ y n).
Instance agree_dist : Dist (agree A) := λ n x y,
(∀ n', n' ≤ n → agree_is_valid x n' ↔ agree_is_valid y n') ∧
(∀ n', n' ≤ n → agree_is_valid x n' → x n' ≡{n'}≡ y n').
Program Instance agree_compl : Compl (agree A) := λ c,
{| agree_car n := c n n; agree_is_valid n := agree_is_valid (c n) n |}.
Next Obligation.
intros c n ?. apply (chain_cauchy c n (S n)), agree_valid_S; auto.
Qed.
Definition agree_cofe_mixin : CofeMixin (agree A).
Proof.
split.
- intros x y; split.
+ by intros Hxy n; split; intros; apply Hxy.
+ by intros Hxy; split; intros; apply Hxy with n.
- split.
+ by split.
+ by intros x y Hxy; split; intros; symmetry; apply Hxy; auto; apply Hxy.
+ intros x y z Hxy Hyz; split; intros n'; intros.
* trans (agree_is_valid y n'). by apply Hxy. by apply Hyz.
* trans (y n'). by apply Hxy. by apply Hyz, Hxy.
- intros n x y Hxy; split; intros; apply Hxy; auto.
- intros n c; apply and_wlog_r; intros;
symmetry; apply (chain_cauchy c); naive_solver.
Qed.
Canonical Structure agreeC := CofeT agree_cofe_mixin.
Lemma agree_car_ne n (x y : agree A) : ✓{n} x → x ≡{n}≡ y → x n ≡{n}≡ y n.
Proof. by intros [??] Hxy; apply Hxy. Qed.
Lemma agree_cauchy n (x : agree A) i : ✓{n} x → i ≤ n → x n ≡{i}≡ x i.
Proof. by intros [? Hx]; apply Hx. Qed.
Program Instance agree_op : Op (agree A) := λ x y,
{| agree_car := x;
agree_is_valid n := agree_is_valid x n ∧ agree_is_valid y n ∧ x ≡{n}≡ y |}.
Next Obligation. naive_solver eauto using agree_valid_S, dist_S. Qed.
Instance agree_core : Core (agree A) := id.
Instance agree_div : Div (agree A) := λ x y, x.
Instance: Comm (≡) (@op (agree A) _).
Proof. intros x y; split; [naive_solver|by intros n (?&?&Hxy); apply Hxy]. Qed.
Lemma agree_idemp (x : agree A) : x ⋅ x ≡ x.
Proof. split; naive_solver. Qed.
Instance: ∀ n : nat, Proper (dist n ==> impl) (@validN (agree A) _ n).
Proof.
intros n x y Hxy [? Hx]; split; [by apply Hxy|intros n' ?].
rewrite -(proj2 Hxy n') -1?(Hx n'); eauto using agree_valid_le.
symmetry. by apply dist_le with n; try apply Hxy.
Qed.
Instance: ∀ x : agree A, Proper (dist n ==> dist n) (op x).
Proof.
intros n x y1 y2 [Hy' Hy]; split; [|done].
split; intros (?&?&Hxy); repeat (intro || split);
try apply Hy'; eauto using agree_valid_le.
- etrans; [apply Hxy|apply Hy]; eauto using agree_valid_le.
- etrans; [apply Hxy|symmetry; apply Hy, Hy'];
eauto using agree_valid_le.
Qed.
Instance: Proper (dist n ==> dist n ==> dist n) (@op (agree A) _).
Proof. by intros n x1 x2 Hx y1 y2 Hy; rewrite Hy !(comm _ _ y2) Hx. Qed.
Instance: Proper ((≡) ==> (≡) ==> (≡)) op := ne_proper_2 _.
Instance: Assoc (≡) (@op (agree A) _).
Proof.
intros x y z; split; simpl; intuition;
repeat match goal with H : agree_is_valid _ _ |- _ => clear H end;
by cofe_subst; rewrite !agree_idemp.
Qed.
Lemma agree_included (x y : agree A) : x ≼ y ↔ y ≡ x ⋅ y.
Proof.
split; [|by intros ?; exists y].
by intros [z Hz]; rewrite Hz assoc agree_idemp.
Qed.
Lemma agree_op_inv n (x1 x2 : agree A) : ✓{n} (x1 ⋅ x2) → x1 ≡{n}≡ x2.
Proof. intros Hxy; apply Hxy. Qed.
Lemma agree_valid_includedN n (x y : agree A) : ✓{n} y → x ≼{n} y → x ≡{n}≡ y.
Proof.
move=> Hval [z Hy]; move: Hval; rewrite Hy.
by move=> /agree_op_inv->; rewrite agree_idemp.
Qed.
Definition agree_cmra_mixin : CMRAMixin (agree A).
Proof.
split; try (apply _ || done).
- by intros n x1 x2 Hx y1 y2 Hy.
- intros n x [? Hx]; split; [by apply agree_valid_S|intros n' ?].
rewrite -(Hx n'); last auto.
symmetry; apply dist_le with n; try apply Hx; auto.
- intros x; apply agree_idemp.
- by intros n x y [(?&?&?) ?].
- by intros x y; rewrite agree_included.
- intros n x y1 y2 Hval Hx; exists (x,x); simpl; split.
+ by rewrite agree_idemp.
+ by move: Hval; rewrite Hx; move=> /agree_op_inv->; rewrite agree_idemp.
Qed.
Canonical Structure agreeR : cmraT := CMRAT agree_cofe_mixin agree_cmra_mixin.
Program Definition to_agree (x : A) : agree A :=
{| agree_car n := x; agree_is_valid n := True |}.
Solve Obligations with done.
Global Instance to_agree_ne n : Proper (dist n ==> dist n) to_agree.
Proof. intros x1 x2 Hx; split; naive_solver eauto using @dist_le. Qed.
Global Instance to_agree_proper : Proper ((≡) ==> (≡)) to_agree := ne_proper _.
Global Instance to_agree_inj n : Inj (dist n) (dist n) (to_agree).
Proof. by intros x y [_ Hxy]; apply Hxy. Qed.
Lemma to_agree_car n (x : agree A) : ✓{n} x → to_agree (x n) ≡{n}≡ x.
Proof. intros [??]; split; naive_solver eauto using agree_valid_le. Qed.
(** Internalized properties *)
Lemma agree_equivI {M} a b : (to_agree a ≡ to_agree b)%I ≡ (a ≡ b : uPred M)%I.
Proof.
uPred.unseal. do 2 split. by intros [? Hv]; apply (Hv n). apply: to_agree_ne.
Qed.
Lemma agree_validI {M} x y : ✓ (x ⋅ y) ⊑ (x ≡ y : uPred M).
Proof. uPred.unseal; split=> r n _ ?; by apply: agree_op_inv. Qed.
End agree.
Arguments agreeC : clear implicits.
Arguments agreeR : clear implicits.
Program Definition agree_map {A B} (f : A → B) (x : agree A) : agree B :=
{| agree_car n := f (x n); agree_is_valid := agree_is_valid x |}.
Solve Obligations with auto using agree_valid_S.
Lemma agree_map_id {A} (x : agree A) : agree_map id x = x.
Proof. by destruct x. Qed.
Lemma agree_map_compose {A B C} (f : A → B) (g : B → C) (x : agree A) :
agree_map (g ∘ f) x = agree_map g (agree_map f x).
Proof. done. Qed.
Section agree_map.
Context {A B : cofeT} (f : A → B) `{Hf: ∀ n, Proper (dist n ==> dist n) f}.
Instance agree_map_ne n : Proper (dist n ==> dist n) (agree_map f).
Proof. by intros x1 x2 Hx; split; simpl; intros; [apply Hx|apply Hf, Hx]. Qed.
Instance agree_map_proper : Proper ((≡) ==> (≡)) (agree_map f) := ne_proper _.
Lemma agree_map_ext (g : A → B) x :
(∀ x, f x ≡ g x) → agree_map f x ≡ agree_map g x.
Proof. by intros Hfg; split; simpl; intros; rewrite ?Hfg. Qed.
Global Instance agree_map_monotone : CMRAMonotone (agree_map f).
Proof.
split; first apply _.
- by intros n x [? Hx]; split; simpl; [|by intros n' ?; rewrite Hx].
- intros x y; rewrite !agree_included=> ->.
split; last done; split; simpl; last tauto.
by intros (?&?&Hxy); repeat split; intros;
try apply Hxy; try apply Hf; eauto using @agree_valid_le.
Qed.
End agree_map.
Definition agreeC_map {A B} (f : A -n> B) : agreeC A -n> agreeC B :=
CofeMor (agree_map f : agreeC A → agreeC B).
Instance agreeC_map_ne A B n : Proper (dist n ==> dist n) (@agreeC_map A B).
Proof.
intros f g Hfg x; split; simpl; intros; first done.
by apply dist_le with n; try apply Hfg.
Qed.
Program Definition agreeRF (F : cFunctor) : rFunctor := {|
rFunctor_car A B := agreeR (cFunctor_car F A B);
rFunctor_map A1 A2 B1 B2 fg := agreeC_map (cFunctor_map F fg)
|}.
Next Obligation.
intros ? A1 A2 B1 B2 n ???; simpl. by apply agreeC_map_ne, cFunctor_ne.
Qed.
Next Obligation.
intros F A B x; simpl. rewrite -{2}(agree_map_id x).
apply agree_map_ext=>y. by rewrite cFunctor_id.
Qed.
Next Obligation.
intros F A1 A2 A3 B1 B2 B3 f g f' g' x; simpl. rewrite -agree_map_compose.
apply agree_map_ext=>y; apply cFunctor_compose.
Qed.
Instance agreeRF_contractive F :
cFunctorContractive F → rFunctorContractive (agreeRF F).
Proof.
intros ? A1 A2 B1 B2 n ???; simpl.
by apply agreeC_map_ne, cFunctor_contractive.
Qed.
|
{"author": "amintimany", "repo": "iris-with-logrel-backup", "sha": "9e98ff8be4b4ca516a497d328aaf31cbae186a6c", "save_path": "github-repos/coq/amintimany-iris-with-logrel-backup", "path": "github-repos/coq/amintimany-iris-with-logrel-backup/iris-with-logrel-backup-9e98ff8be4b4ca516a497d328aaf31cbae186a6c/algebra/agree.v"}
|
/*
enum Tokens__
{
STRING = 257,
BOOLEAN = 258,
INTEGER = 259,
DOUBLE = 260,
NIL = 261,
LAMBDA = 262,
REGEX = 263,
LCB = 264,
RCB = 265,
LB = 266,
RB = 267,
COMMA = 268,
COLON = 269,
};
*/
%baseclass-header = "JSONLexerbase.h"
%class-header = "JSONLexer.h"
%implementation-header = "JSONLexerimpl.h"
%class-name = "JSONLexer"
%lex-source = "JSONLexer.cpp"
%namespace = "zpt"
//%debug
%no-lines
%x string string_single escaped unicode number regexp
%%
[\n\r\f \t]+ // skip white space
"true" return 258;
"false" return 258;
"null" return 261;
"undefined" return 261;
lambda\(([^\)]+)\) return 262;
\{ return 264;
\} return 265;
\[ return 266;
\] return 267;
\, return 268;
\: return 269;
[\-0-9] {
more();
begin(StartCondition_::number);
}
\" {
begin(StartCondition_::string);
}
\' {
begin(StartCondition_::string_single);
}
\/ {
begin(StartCondition_::regexp);
}
<number>{
[0-9\.e\+]* {
begin(StartCondition_::INITIAL);
if (matched().find(".") != std::string::npos || matched().find("e+") != std::string::npos) {
return 260;
}
else {
return 259;
}
}
}
<string>{
\" {
std::string _out(matched());
_out.erase(_out.length() - 1, 1);
setMatched(_out);
begin(StartCondition_::INITIAL);
return 257;
}
\\ {
std::string _out(matched());
_out.erase(_out.length() - 1, 1);
setMatched(_out);
more();
d_intermediate_state = StartCondition_::string;
begin(StartCondition_::escaped);
}
[^\\"] {
more();
}
}
<string_single>{
\' {
std::string _out(matched());
_out.erase(_out.length() - 1, 1);
setMatched(_out);
begin(StartCondition_::INITIAL);
return 257;
}
\\ {
std::string _out(matched());
_out.erase(_out.length() - 1, 1);
setMatched(_out);
more();
d_intermediate_state = StartCondition_::string_single;
begin(StartCondition_::escaped);
}
[^\\'] {
more();
}
}
<regexp>{
\/ {
std::string _out(matched());
_out.erase(_out.length() - 1, 1);
setMatched(_out);
begin(StartCondition_::INITIAL);
return 263;
}
\\ {
std::string _out(matched());
_out.erase(_out.length() - 1, 1);
setMatched(_out);
more();
d_intermediate_state = StartCondition_::regexp;
begin(StartCondition_::escaped);
}
[^\\/] {
more();
}
}
<escaped> {
n {
std::string _out(matched());
_out.erase(_out.length() - 1, 1);
_out.insert(_out.length(), "\n");
setMatched(_out);
more();
begin(d_intermediate_state);
}
t {
std::string _out(matched());
_out.erase(_out.length() - 1, 1);
_out.insert(_out.length(), "\t");
setMatched(_out);
more();
begin(d_intermediate_state);
}
r {
std::string _out(matched());
_out.erase(_out.length() - 1, 1);
_out.insert(_out.length(), "\r");
setMatched(_out);
more();
begin(d_intermediate_state);
}
f {
std::string _out(matched());
_out.erase(_out.length() - 1, 1);
_out.insert(_out.length(), "\f");
setMatched(_out);
more();
begin(d_intermediate_state);
}
u {
std::string _out(matched());
_out.erase(_out.length() - 1, 1);
setMatched(_out);
more();
begin(StartCondition_::unicode);
}
\\ {
std::string _out(matched());
_out.erase(_out.length() - 1, 1);
_out.insert(_out.length(), "\\");
setMatched(_out);
more();
begin(d_intermediate_state);
}
[^\\ntrfu] {
more();
begin(d_intermediate_state);
}
}
<unicode> {
.{4} {
std::string _out(matched());
std::stringstream ss;
ss << _out[_out.length() - 4] << _out[_out.length() - 3] << _out[_out.length() - 2] << _out[_out.length() - 1];
int c;
ss >> std::hex >> c;
wchar_t w = (wchar_t) c;
std::string dest("");
if (w <= 0x7f) {
dest.insert(dest.begin(), w);
}
else if (w <= 0x7ff) {
dest.insert(dest.end(), 0xc0 | ((w >> 6) & 0x1f));
dest.insert(dest.end(), 0x80 | (w & 0x3f));
}
else if (w <= 0xffff) {
dest.insert(dest.end(), 0xe0 | ((w >> 12) & 0x0f));
dest.insert(dest.end(), 0x80 | ((w >> 6) & 0x3f));
dest.insert(dest.end(), 0x80 | (w & 0x3f));
}
else if (w <= 0x10ffff) {
dest.insert(dest.end(), 0xf0 | ((w >> 18) & 0x07));
dest.insert(dest.end(), 0x80 | ((w >> 12) & 0x3f));
dest.insert(dest.end(), 0x80 | ((w >> 6) & 0x3f));
dest.insert(dest.end(), 0x80 | (w & 0x3f));
}
else {
dest.insert(dest.end(), '?');
}
_out.assign(_out.substr(0, _out.length() - 4));
_out.insert(_out.length(), dest);
setMatched(_out);
more();
begin(d_intermediate_state);
}
}
|
{"hexsha": "fbdc3715511d09c79b198c5217055089196b33b5", "size": 4529, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "parsers/json/src/json/JSON.f", "max_stars_repo_name": "naazgull/zapata", "max_stars_repo_head_hexsha": "e5734ff88a17b261a2f4547fa47f01dbb1a69d84", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2016-08-10T16:51:23.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-08T22:07:47.000Z", "max_issues_repo_path": "parsers/json/src/json/JSON.f", "max_issues_repo_name": "naazgull/zapata", "max_issues_repo_head_hexsha": "e5734ff88a17b261a2f4547fa47f01dbb1a69d84", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": 78, "max_issues_repo_issues_event_min_datetime": "2015-02-25T15:16:02.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-31T15:58:15.000Z", "max_forks_repo_path": "parsers/json/src/json/JSON.f", "max_forks_repo_name": "naazgull/zapata", "max_forks_repo_head_hexsha": "e5734ff88a17b261a2f4547fa47f01dbb1a69d84", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2015-01-13T14:39:21.000Z", "max_forks_repo_forks_event_max_datetime": "2018-11-24T06:48:09.000Z", "avg_line_length": 19.9515418502, "max_line_length": 113, "alphanum_fraction": 0.5758445573, "num_tokens": 1559}
|
# --------------------------------
# Name: DensityToVector.py
# Purpose: This script is intended to help aid the network/vector analysis process by computing weighted kernel densities on
# list of incoming fields which represent weights for the KDE estimation. These estimations are then joined back to
# a network feature class. If provided a list of numbers, the tool will compute percentiles of the input densities
# and will add percentile scores to the input densities for non-zero/non-null values.
# Current Owner: David Wasserman
# Last Modified: 4/16/2021
# Copyright: David Wasserman
# ArcGIS Version: ArcGIS Pro/10.4
# Python Version: 3.5/2.7
# --------------------------------
# Copyright 2017 David J. Wasserman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------
# Import Modules
import arcpy
import numpy as np
import SharedArcNumericalLib as san
try:
import pandas as pd
except:
arcpy.AddError("This library requires Pandas installed in the ArcGIS Python Install."
" Might require installing pre-requisite libraries and software.")
try:
arcpy.CheckExtension("Spatial")
except:
arcpy.AddError("This tool requires spatial analyst to function. Please add SA.")
# Function Definitions
def density_to_vector(in_fc, weighted_fields, input_network, percentile_bool=True, field_edit="", cell_size=500,
search_radius=800, area_unit="SQUARE_MILES",sample_percentage=25,group_by_statistic="MEAN"):
"""This function will compute kernel densities and associate them with a target network/vector file. If the
percentile bool is true, percentile scores are added along side each density.
Parameters
-----------------
in_fc: input feature class
weighted_fields: fields to use for a weighted KDE
input_network: point,polyline,polygon file to associate density values to
percentile_bool: if true, adds percentiles to scores
field_edit: prepended field name
cell_size: cell size of KDE raster
search_radius: search radius/bandwith of KDE
area_unit: unit of output raster
sample_percentage: determine sample points to use in polyline files. 10 will given 1 point every 10% of the line.
Includes stop and end points.
group_by_statistic: if multiple sample points are used, this is the statistic used to aggregate it"""
try:
arcpy.env.overwriteOutput = True
# Start Analysis
desc = arcpy.Describe(input_network)
work_space = desc.catalogPath
arcpy.env.scratchWorkspace = "memory"
temp_out_sample = "memory/sample_points_out"
temp_sample_points = "memory/sample_points"
temp_input_layer = "Temp_Input_Layer"
join_field = "JNField"
join_index = "JNindex"
oid_field = str(desc.OIDFieldName)
san.add_new_field(input_network,join_field,"LONG")
arcpy.CalculateField_management(input_network,join_field,"!{0}!".format(oid_field),"PYTHON")
san.arc_print("Generating sample points from feature class in memory...")
san.generate_sample_points(input_network,temp_sample_points,int(sample_percentage))
final_df = None
for field in weighted_fields:
san.arc_print("Computing density for field {0}...".format(field))
arcpy.MakeFeatureLayer_management(in_fc, temp_input_layer,
san.construct_sql_equality_query(field, None, work_space,
noneEqualityOperator="is not"))
output_kde = arcpy.sa.KernelDensity(in_fc, str(field), cell_size, search_radius, area_unit)
arcpy.sa.ExtractValuesToPoints(temp_sample_points, output_kde, temp_out_sample, True)
raw_sample_df = san.arcgis_table_to_dataframe(temp_out_sample, [join_field,"RASTERVALU"])
new_field_name = "DN_" + str(field_edit) + str(field)
raw_sample_df[new_field_name] = raw_sample_df["RASTERVALU"]
raw_sample_df[new_field_name].replace([0], np.NaN, inplace=True)
raw_sample_df = raw_sample_df.groupby(join_field).agg(str(group_by_statistic).lower())
if percentile_bool:
new_percentile_field = "Per_" + str(field_edit) + str(field)
raw_sample_df[new_percentile_field] = raw_sample_df[new_field_name].rank(pct=True)
field_list = [new_field_name, new_percentile_field] if percentile_bool else [new_field_name]
if final_df is not None:
final_df = pd.concat([final_df, raw_sample_df[field_list]], axis=1)
else:
raw_sample_df[join_index] = raw_sample_df.index
final_df = raw_sample_df[[join_index] + field_list]
final_df.reset_index()
san.arc_print("Extending density fields to table...")
final_df = san.validate_df_names(final_df, work_space)
fin_records = final_df.to_records()
arcpy.da.ExtendTable(input_network, join_field, fin_records, join_index, append_only=False)
san.arc_print("Deleteing temporary join field...")
arcpy.DeleteField_management(input_network,[join_field,join_index])
san.arc_print("Script Completed Successfully.", True)
except arcpy.ExecuteError:
san.arc_print(arcpy.GetMessages(2))
except Exception as e:
san.arc_print(e.args[0])
# End do_analysis function
# This test allows the script to be used from the operating
# system command prompt (stand-alone), in a Python IDE,
# as a geoprocessing script tool, or as a module imported in
# another script
if __name__ == '__main__':
# Define input parameters
input_feature_class = arcpy.GetParameterAsText(0)
weighted_fields = arcpy.GetParameter(1)
input_network = arcpy.GetParameterAsText(2)
percentile_bool = arcpy.GetParameter(3)
field_edit = arcpy.GetParameterAsText(4)
cell_size = arcpy.GetParameter(5)
search_radius = arcpy.GetParameter(6)
area_unit_factor = arcpy.GetParameter(7)
percentage_sample = arcpy.GetParameter(8)
group_by_stat = arcpy.GetParameter(9)
density_to_vector(input_feature_class, weighted_fields, input_network, bool(percentile_bool), field_edit, cell_size,
search_radius, area_unit_factor,percentage_sample,group_by_stat)
|
{"hexsha": "b9142c28f64b9c430683e803c36c8a60855f8b43", "size": 6877, "ext": "py", "lang": "Python", "max_stars_repo_path": "Scripts/DensityToVector.py", "max_stars_repo_name": "d-wasserman/arc-numerical-tools", "max_stars_repo_head_hexsha": "a88ed46c48083dfa615895ecf75e7c1c9c650f97", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Scripts/DensityToVector.py", "max_issues_repo_name": "d-wasserman/arc-numerical-tools", "max_issues_repo_head_hexsha": "a88ed46c48083dfa615895ecf75e7c1c9c650f97", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2022-01-25T00:58:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-26T05:44:40.000Z", "max_forks_repo_path": "Scripts/DensityToVector.py", "max_forks_repo_name": "d-wasserman/arc-numerical-tools", "max_forks_repo_head_hexsha": "a88ed46c48083dfa615895ecf75e7c1c9c650f97", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-09-14T21:44:34.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-15T22:21:05.000Z", "avg_line_length": 50.197080292, "max_line_length": 124, "alphanum_fraction": 0.6995783045, "include": true, "reason": "import numpy", "num_tokens": 1478}
|
"""Variational priors q(nu)."""
import math
import torch
from torch import nn
from torch import distributions
import numpy as np
import flow
import network.mask
from util import reshape_lattice
class AutoregressivePrior(nn.Module):
"""q(\nu; \theta) is the prior on the auxiliary latent variables \nu."""
def __init__(self,
latent_size,
flow_depth,
hidden_size,
hidden_degrees,
activation,
reverse,
flow_std):
super().__init__()
modules = []
self.latent_size = latent_size
for flow_num in range(flow_depth):
module = flow.AutoregressiveSampleAndLogProb(
num_input=latent_size,
use_context=False,
hidden_size=hidden_size,
hidden_degrees=hidden_degrees,
activation=activation,
flow_std=flow_std)
modules.append(module)
if reverse:
modules.append(flow.Reverse(latent_size))
self.q_nu = flow.FlowSequential(*modules)
self.q_nu_0 = distributions.Normal(loc=0.0, scale=flow_std)
def sample_base_distribution(self, num_samples):
nu_0 = torch.randn((num_samples, self.latent_size),
device=next(self.parameters()).device)
return nu_0.mul_(self.q_nu_0.scale)
def sample_and_log_prob(self, num_samples):
nu_0 = self.sample_base_distribution(num_samples)
log_q_nu_0 = self.q_nu_0.log_prob(nu_0).sum(-1)
nu, log_q_nu = self.q_nu(nu_0)
nu = reshape_lattice(nu)
return nu, log_q_nu_0 + log_q_nu
class RealNVPPrior(nn.Module):
"""q(\nu; \theta) is the prior on the auxiliary latent variables \nu."""
def __init__(self,
latent_shape,
flow_depth,
hidden_size,
flow_std):
super().__init__()
modules = [flow.CheckerSplit(latent_shape)]
for flow_num in range(flow_depth):
modules.append(flow.RealNVPPermuteSampleAndLogProb(
in_channels=1,
hidden_size=hidden_size,
# invert mask opposite to prior
parity=True if flow_num % 2 == 1 else False))
modules.append(flow.CheckerConcat(latent_shape))
self.q_nu = flow.RealNVPSequential(*modules)
self.q_nu_0 = distributions.Normal(loc=0.0, scale=flow_std)
self.latent_shape = latent_shape
def sample_base_distribution(self, num_samples):
nu_0 = torch.randn((num_samples,) + self.latent_shape,
device=next(self.parameters()).device)
return nu_0.mul_(self.q_nu_0.scale)
def sample_and_log_prob(self, num_samples):
nu_0 = self.sample_base_distribution(num_samples)
log_q_nu_0 = self.q_nu_0.log_prob(nu_0).sum((1, 2))
nu, log_q_nu = self.q_nu(nu_0)
return nu, log_q_nu_0 + log_q_nu
|
{"hexsha": "25f83a5f76bda1e0317272f7bff5e681ca6a8d35", "size": 2758, "ext": "py", "lang": "Python", "max_stars_repo_path": "variational/prior.py", "max_stars_repo_name": "altosaar/hierarchical-variational-models-physics", "max_stars_repo_head_hexsha": "611d91e0281664d7d5ba1679bec7adfb3aac41e2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2020-05-10T20:44:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-12T23:06:24.000Z", "max_issues_repo_path": "variational/prior.py", "max_issues_repo_name": "altosaar/hierarchical-variational-models-physics", "max_issues_repo_head_hexsha": "611d91e0281664d7d5ba1679bec7adfb3aac41e2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "variational/prior.py", "max_forks_repo_name": "altosaar/hierarchical-variational-models-physics", "max_forks_repo_head_hexsha": "611d91e0281664d7d5ba1679bec7adfb3aac41e2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2289156627, "max_line_length": 74, "alphanum_fraction": 0.663524293, "include": true, "reason": "import numpy", "num_tokens": 698}
|
import numpy as np
from sklearn.cluster import KMeans
from sklearn.metrics import calinski_harabasz_score, davies_bouldin_score, silhouette_score
from .base import BaseModel
from ..utils import get_array_counts
class BestKMeans(BaseModel):
sklearn_estimator = KMeans
available_metrics = {
"calinski_harabasz": calinski_harabasz_score,
"davies_bouldin": davies_bouldin_score,
"silhouette": silhouette_score
}
def __init__(self, k_min: int = 2, k_max: int = 6):
super().__init__()
self._estimator = self.sklearn_estimator
self._k_min = k_min
self._k_max = k_max
def fit(self, X: np.ndarray, y: None = None) -> None:
super().fit(X, y)
self.models_ = {}
self.scores_ = {}
for k in range(self._k_min, self._k_max + 1):
model, model_name = self._estimator(n_clusters=k), k
y_pred = model.fit_predict(X)
self.models_[model_name] = model
self.scores_[model_name] = {
name: metric(X, y_pred) for name, metric in self.available_metrics.items()
}
def is_fitted(self) -> bool:
if self.models_ is None or self.scores_ is None:
return False
return True
def predict(self, X: np.array, k: int, return_counts: bool = False):
super().predict(X)
y_pred = self.models_[k].predict(X)
if return_counts:
return get_array_counts(y_pred)
return y_pred
|
{"hexsha": "ea98a2388dd17fa25e8e0f4083a499a00f90a60b", "size": 1500, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/models/cluster.py", "max_stars_repo_name": "joshunrau/CognitiveSubtypes", "max_stars_repo_head_hexsha": "a23464c5e66e2f84f28fab5686011eb01f8bb548", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/models/cluster.py", "max_issues_repo_name": "joshunrau/CognitiveSubtypes", "max_issues_repo_head_hexsha": "a23464c5e66e2f84f28fab5686011eb01f8bb548", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/models/cluster.py", "max_forks_repo_name": "joshunrau/CognitiveSubtypes", "max_forks_repo_head_hexsha": "a23464c5e66e2f84f28fab5686011eb01f8bb548", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-14T21:37:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-14T21:37:10.000Z", "avg_line_length": 31.25, "max_line_length": 91, "alphanum_fraction": 0.6293333333, "include": true, "reason": "import numpy", "num_tokens": 381}
|
//
// Copyright (C) 2011 Danny Havenith
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
/**
* Generic instruction decoder classes.
*
* This file defines a template meta function:
* ~~~~{.cpp}
* find_decoder< Implementation, InstructionSet>::type
* ~~~~
*
* This meta function defines a decoder that can recognize uint16_t words as
* being one of the instructions out of the InstructionSet and that can call
* an overload of the `execute()` member function on an instance of the class
* `Implementation`.
*
* The class `Implementation` can be an actual AVR core implementation, but
* also a disassembler or an instruction pre-compiler.
*
* @see find_decoder
*
*/
#ifndef DECODER_HPP_
#define DECODER_HPP_
#include "mpl_with_large_vectors.h"
#include <boost/utility/enable_if.hpp>
#include <boost/cstdint.hpp>
#include "unpack_operand.hpp"
#include "instruction.hpp"
#include "avr_operand_types.hpp"
namespace avrsim
{
namespace mpl = boost::mpl;
typedef boost::uint16_t uint16_t;
struct UNKNOWN_INSTRUCTION
{
};
/**
* Final instruction decoder that calls an `execute()` overload with the right
* arguments.
*
* This instruction decoder already knows the instruction,
* but now extracts operands from the instruction word and calls the correct
* member function on an implementation.
*
* The correct member function will be `execute( instruction{}, args...)`
* the `instruction` type is used as a tag to tag-dispatch to the right overload
* of `execute()`.
*/
template< typename instruction,
typename operand_list = typename operands< instruction, instructions::avr_operands>::type,
unsigned int operand_count = mpl::size< operand_list>::type::value>
struct operand_harvester
{
};
/**
* Specialization of the operand_harvester class for instructions with zero
* operands.
*/
template<typename instruction_tag, typename operand_list>
struct operand_harvester<instruction_tag, operand_list, 0>
{
template< typename implementation>
static void decode_and_execute( implementation &imp, uint16_t instruction)
{
imp.execute( instruction_tag());
}
};
/**
* Specialization of the operand_harvester class for instructions with one
* operand.
*/
template<typename instruction_tag, typename operand_list>
struct operand_harvester<instruction_tag, operand_list, 1>
{
template< typename implementation>
static void decode_and_execute( implementation &imp, uint16_t instruction)
{
imp.execute(
instruction_tag(),
unpack< instruction_tag, boost::mpl::at_c<operand_list, 0>::type::value>( instruction)
);
}
};
/**
* Specialization of the operand_harvester class for instructions with two
* operands.
*/
template<typename instruction_tag, typename operand_list>
struct operand_harvester<instruction_tag, operand_list, 2>
{
template< typename implementation>
static void decode_and_execute( implementation &imp, uint16_t instruction)
{
imp.execute(
instruction_tag(),
unpack< instruction_tag, boost::mpl::at_c<operand_list, 0>::type::value>( instruction),
unpack< instruction_tag, boost::mpl::at_c<operand_list, 1>::type::value>( instruction)
);
}
};
/**
* Special case of an instruction decoder that ignores the instruction word
* and calls the 'unknown instruction' implementation.
*/
struct unknown_instruction_caller
{
template< typename implementation>
void decode_and_execute( implementation &imp, uint16_t instruction)
{
imp.execute( UNKNOWN_INSTRUCTION(), instruction);
}
};
template<
typename implementation,
typename instructions_with_zero_at_position,
typename instructions_with_one_at_position,
int bit
>
struct decoder;
/// Meta function that finds a decoder given an instruction set (or subset)
template<
typename implementation,
typename instruction_set,
int instruction_set_size = boost::mpl::size< instruction_set>::value
>
struct find_decoder
{
/// This meta function determines if a bit position, given the instruction set, is a discriminating
/// bit position.
/// A discriminating bit position is one where all the instructions in the instruction set have a fixed
/// zero or one value
/// and where none of the instructions has an operand. Furthermore, it should not be the case that all
/// the instructions have the same value (zero or one) at the given position.
/// If a bit position is a discriminating position, then we can look at that bit in an instruction word
/// and reduce the number of candidate instructions.
template <int bit>
struct is_discriminator
{
typedef typename mpl::count_if< instruction_set, has_at<bit, 0> >::type zero_count;
typedef typename mpl::count_if< instruction_set, has_at<bit, 1> >::type one_count;
typedef typename mpl::count_if< instruction_set, has_operand_at<bit> >::type operand_count;
typedef mpl::bool_< zero_count::value != 0 && one_count::value != 0 && operand_count::value == 0> type;
};
/// meta function to find a discrimination bit position in the given instruction set.
/// the dummy template argument is there to satisfy g++s thing about fully specializing
/// a template at 'non-namespace scope'.
template< int start_bit, int dummy = 0>
struct find_discriminator : mpl::if_<
typename is_discriminator< start_bit>::type,
mpl::int_< start_bit>,
typename find_discriminator< start_bit - 1>::type
>
{};
/// we should never reach this point, but we need it to end recursion.
template< int dummy>
struct find_discriminator<-1, dummy> : mpl::identity< mpl::int_<-1> >
{};
typedef typename find_discriminator<15>::type discriminator_bit;
typedef typename mpl::copy_if< instruction_set, has_at< discriminator_bit::value, 0>, mpl::front_inserter< mpl::list<> > >::type zeros;
typedef typename mpl::copy_if< instruction_set, has_at< discriminator_bit::value, 1>, mpl::front_inserter< mpl::list<> > >::type ones;
/// the final result of this metafunction, the decoder that examines the discriminating bit
/// and then decides to look further in either the instructions that have a value of '1' at that
/// position or in those that have a '0' there.
typedef decoder< implementation, zeros, ones, discriminator_bit::value> type;
};
/// Specialization of the find_decoder meta function for when the instruction set contains one instruction only.
///
/// When the instruction set contains only one item, we don't really need to decode
/// anymore, so we return a class that will harvest the operands from the instruction word and
/// that calls the implementation.
template<
typename implementation,
typename instruction_set
>
struct find_decoder<implementation, instruction_set, 1>
{
typedef typename boost::mpl::front<instruction_set>::type instruction_tag;
typedef operand_harvester<instruction_tag> type;
};
/// Specialization of the find_decoder meta function for zero-sized instruction set.
///
/// Something's gone wrong if we end up with an empty instruction set, because now we don't know what function to call.
/// We can't do anything else but return a decoder that emits a call to the unknown instruction function.
template<
typename implementation,
typename instruction_set
>
struct find_decoder<implementation, instruction_set, 0>
{
typedef unknown_instruction_caller type;
};
/// Default single-bit decoder.
///
/// A decoder examines a single bit of an instruction and delegates further processing of the instruction
/// to the next decoder.
/// The template arguments instructions_with_zero_at_position and instructionw_with_one_at_position together
/// contain the complete instruction set that this decoder can decode.
template<
typename implementation,
typename instructions_with_zero_at_position,
typename instructions_with_one_at_position,
int bit
>
struct decoder
{
static void decode_and_execute( implementation &imp, uint16_t instruction)
{
if ( instruction & (1 << bit))
{
typedef typename find_decoder< implementation, instructions_with_one_at_position>::type next_decoder;
next_decoder::decode_and_execute( imp, instruction);
}
else
{
typedef typename find_decoder< implementation, instructions_with_zero_at_position>::type next_decoder;
next_decoder::decode_and_execute( imp, instruction);
}
}
};
} // end namespace avrsim
#endif /* DECODER_HPP_ */
|
{"hexsha": "16401bd03331dd0fdccf44332836e51bd56caf75", "size": 8797, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "avrsim/decoder.hpp", "max_stars_repo_name": "DannyHavenith/avrgo", "max_stars_repo_head_hexsha": "c61002455968f918eeaad280b86906d76c4b65de", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-07-17T13:57:48.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-17T13:57:48.000Z", "max_issues_repo_path": "avrsim/decoder.hpp", "max_issues_repo_name": "DannyHavenith/avrgo", "max_issues_repo_head_hexsha": "c61002455968f918eeaad280b86906d76c4b65de", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "avrsim/decoder.hpp", "max_forks_repo_name": "DannyHavenith/avrgo", "max_forks_repo_head_hexsha": "c61002455968f918eeaad280b86906d76c4b65de", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7707509881, "max_line_length": 139, "alphanum_fraction": 0.7217233148, "num_tokens": 1877}
|
"""
Various utilities, running tools, img editing etc.
If you plan on using this implementation, please cite our work:
@INPROCEEDINGS{Grabowski2021IGARSS,
author={Grabowski, Bartosz and Ziaja, Maciej and Kawulok, Michal
and Nalepa, Jakub},
booktitle={IGARSS 2021 - 2021 IEEE International Geoscience
and Remote Sensing Symposium},
title={Towards Robust Cloud Detection in
Satellite Images Using U-Nets},
year={2021},
note={in press}}
"""
import math
from PIL import Image
Image.MAX_IMAGE_PIXELS = 310000000
from pathlib import Path
from scipy.stats import zscore
from tensorflow import keras
from typing import Dict, List, Tuple, Callable
from skimage import io, img_as_ubyte
from tensorflow.keras.preprocessing.image import load_img
import mlflow
import numpy as np
import spectral.io.envi as envi
import tensorflow.keras.backend as K
import cloud_detection.losses
def open_as_array(
channel_files: Dict[str, Path],
channel_names: Tuple[str] = ("red", "green", "blue", "nir"),
size: Tuple[int] = None,
normalize: bool = True,
standardize: bool = False,
) -> np.ndarray:
"""
Load image as array from given files. Normalises images on load.
:param channel_files: Dict with paths to files containing each channel
of an image. Keys should contain channel_names.
:param channel_names: Tuple of channel names to load.
:param size: size of the image. If None, return original size.
:param normalize: whether to normalize the image.
:param standardize: whether to standardize the image
(separately for each band).
:return: given image as a single numpy array.
"""
array_img = np.stack(
[np.array(load_img(
channel_files[name],
color_mode="grayscale",
target_size=size
)) for name in channel_names],
axis=2,
)
if normalize:
array_img = array_img / np.iinfo(array_img.dtype).max
# Standardize should not be used for 38-Cloud dataset, because it will
# standardize based on patches in the training, but based on images
# in the evaluation.
if standardize:
array_img_shape = array_img.shape
array_img = array_img.reshape(-1, array_img_shape[-1])
array_img = zscore(array_img, axis=0)
array_img = array_img.reshape(array_img_shape)
return array_img
def load_38cloud_gt(channel_files: Dict[str, Path]) -> np.ndarray:
"""
Load 38-Cloud ground truth mask as array from given files.
:param channel_files: Dict with paths to files containing each channel
of an image, must contain key 'gt'.
:return: patch ground truth.
"""
masks = np.array(load_img(channel_files["gt"], color_mode="grayscale"))
return np.expand_dims(masks / 255, axis=-1)
def load_l8cca_gt(path: Path) -> np.ndarray:
"""
Load L8CCA Validation Data image ground truth.
:param path: path containing image gts.
:return: image ground truth.
"""
img = envi.open(list(path.glob("*_fixedmask.hdr"))[0])
img = np.array(img.open_memmap(), dtype=np.int)
img = np.where(img > 128, 1, 0)
return img
def true_positives(y_true: np.ndarray, y_pred: np.ndarray) -> np.ndarray:
"""
Calculate matrices indicating true positives in given predictions.
:param y_true: True labels.
:param y_pred: Predicted labels.
:return: Array with values indicating true positives in predictions.
"""
return y_true * y_pred
def false_positives(y_true: np.ndarray, y_pred: np.ndarray):
"""
Calculate matrices indicating false positives in given predictions.
:param y_true: True labels.
:param y_pred: Predicted labels.
:return: Array with values indicating false positives in predictions.
"""
y_true_neg = 1 - y_true
return y_true_neg * y_pred
def false_negatives(y_true: np.ndarray, y_pred: np.ndarray):
"""
Calculate matrices indicating false negatives in given predictions.
:param y_true: True labels.
:param y_pred: Predicted labels.
:return: Array with values indicating false negatives in predictions.
"""
y_pred_neg = 1 - y_pred
return y_true * y_pred_neg
def overlay_mask(
image: np.ndarray,
mask: np.ndarray,
rgb_color: Tuple[float, float, float],
overlay_intensity: float = 0.5
) -> np.ndarray:
"""
Overlay a mask on image for visualization purposes.
:param image: Image on which mask should be overlaid.
:param mask: Mask which should be overlaid on the image.
:param rgb_color: Tuple of three floats containing intensity of RGB
channels of created mask. RBG values can be in range 0 to 1 or 0 to 255
depending on the image and mask values range. This will effectively
set color of the overlay mask.
:param overlay_intensity: Intensity of the overlaid mask. Should be
between 0 and 1.
:return: mask overlaid on image.
"""
image = np.copy(image)
for i, color in enumerate(rgb_color):
channel = image[:, :, i]
channel += overlay_intensity * color * mask[:, :, 0]
return np.clip(image, 0, 1)
def setup_mlflow(run_name: str):
"""
Start mlflow run with given name.
:param run_name: name of the run.
"""
mlflow.set_tracking_uri("http://beetle.mlflow.kplabs.pl")
mlflow.set_experiment("cloud_detection")
mlflow.start_run(run_name=run_name)
def pad(img: np.ndarray, patch_size: int = 384) -> np.ndarray:
"""
Padding of an image to divide it into patches.
:param img: image to pad.
:param patch_size: size of the patches.
:return: padded image.
"""
x_len, y_len, _ = img.shape
x_r = (-x_len) % patch_size
y_r = (-y_len) % patch_size
x_l_pad, x_r_pad = int(np.floor(x_r / 2)), int(np.ceil(x_r / 2))
y_l_pad, y_r_pad = int(np.floor(y_r / 2)), int(np.ceil(y_r / 2))
return np.pad(img, ((x_l_pad, x_r_pad), (y_l_pad, y_r_pad), (0, 0)))
def unpad(img: np.ndarray, gt_shape: Tuple) -> np.ndarray:
"""
Unpadding of an image to return it to its original shape.
:param img: image to unpad.
:param gt_shape: shape of the original image.
:return: unpadded image.
"""
r, c, _ = img.shape
r_gt, c_gt, _ = gt_shape
r_pad = int((r - r_gt) / 2)
c_pad = int((c - c_gt) / 2)
return img[r_pad: r_pad + r_gt, c_pad: c_pad + c_gt]
def get_metrics_tf(
gt: np.ndarray, pred: np.ndarray, metric_fns: List[Callable]) -> Dict:
"""
Calculates TensorFlow evaluation metrics for a given image predictions.
:param gt: image ground truth.
:param pred: image predictions.
:param metric_fns: list of metric functions.
:return: evaluation metrics.
"""
gt_ph = K.placeholder(ndim=4)
pred_ph = K.placeholder(ndim=4)
metrics = {}
for metric_fn in metric_fns:
if type(metric_fn) is str:
metric_name = metric_fn
metric_fn = getattr(cloud_detection.losses, metric_fn)
else:
metric_name = metric_fn.__name__
loss = K.mean(metric_fn(gt_ph, pred_ph))
metrics[f"{metric_name}"] = loss.eval(
session=K.get_session(), feed_dict={gt_ph: gt, pred_ph: pred}
)
return metrics
def save_vis(
img_id: str,
img_vis: np.ndarray,
img_pred: np.ndarray,
img_gt: np.ndarray,
rpath: Path,
):
"""
Save visualisations set for img of given id.
Visualisations set includes:
* Mask overlay of uncertain regions of segmentation.
* Ground truth mask.
* Prediction mask.
* TP, FP, FN mask overlays.
:param img_id: Id of visualised img,
will be used for naming saved artifacts.
:param img_vis: RGB image.
:param img_pred: Prediction mask, result of segmentation.
:param img_gt: Ground truth mask.
:param rpath: Path where artifacts should be saved.
"""
rpath = rpath / img_id
Path(rpath).mkdir(parents=True, exist_ok=False)
unc = np.copy(img_pred)
unc[unc < 0.001] = 0
unc[unc > 0.999] = 0
unc[unc != 0] = 1
unc_vis = overlay_mask(img_vis, unc, (1, 1, 0), 1.0)
io.imsave(rpath / "unc.png", img_as_ubyte(unc_vis))
img_pred = np.round(img_pred)
io.imsave(rpath / "gt.png", img_gt[:, :, 0])
io.imsave(rpath / "pred.png", img_as_ubyte(img_pred[:, :, 0]))
mask_vis = overlay_mask(
img_vis, true_positives(img_gt, img_pred), (1., 1., 0.))
mask_vis = overlay_mask(
mask_vis, false_positives(img_gt, img_pred), (1., 0., 0.))
mask_vis = overlay_mask(
mask_vis, false_negatives(img_gt, img_pred), (1., 0., 1.))
io.imsave(rpath / "masks.png", img_as_ubyte(mask_vis))
def make_paths(*args: str) -> Tuple[Path]:
"""
Make Paths out of strings.
:params: strings to make into Paths.
:return: Paths made out of input strings.
"""
paths = [Path(path) if path is not None else None for path in [*args]]
return tuple(paths)
class MLFlowCallback(keras.callbacks.Callback):
def on_epoch_end(self, epoch: int, logs: Dict = None):
"""
Triggered after each epoch, logging metrics to MLFlow.
:param epoch: index of epoch.
:param logs: logs for MLFlow.
"""
mlflow.log_metrics(logs, step=epoch)
def strip_nir(hyper_img: np.ndarray) -> np.ndarray:
"""
Strips nir channel so image can be displayed.
:param hyper_img: image with shape (x, y, 4) where fourth channel is nir.
:return: image with shape (x, y, 3) with standard RGB channels.
"""
return hyper_img[:, :, :3]
def load_image_paths(
base_path: Path,
patches_path: Path = None,
split_ratios: List[float] = [1.0],
shuffle: bool = True,
img_id: str = None,
seed: int = 42
) -> List[List[Dict[str, Path]]]:
"""
Build paths to all files containing image channels.
:param base_path: root path containing directories with image channels.
:param patches_path: path to images patches names to load
(if None, all patches will be used).
:param split_ratios: list containing split ratios,
splits should add up to one.
:param shuffle: whether to shuffle image paths.
:param img_id: image ID; if specified, load paths for this image only.
:param seed: random seed for shuffling; relevant only if shuffle=True.
:return: list with paths to image files, separated into splits.
Structured as: list_of_splits[list_of_files['file_channel', Path]]
"""
files = build_paths(base_path, patches_path, img_id)
if len(files) == 0:
raise ValueError("No files loaded")
print(f"Loaded paths for images of { len(files) } samples")
if shuffle:
saved_seed = np.random.get_state()
np.random.seed(seed)
np.random.shuffle(files)
np.random.set_state(saved_seed)
if sum(split_ratios) != 1:
raise RuntimeError("Split ratios don't sum up to one.")
split_beg = 0
splits = []
for ratio in split_ratios:
split_end = split_beg + math.ceil(ratio * len(files))
splits.append(files[split_beg:split_end])
split_beg = split_end
return splits
def combine_channel_files(red_file: Path) -> Dict[str, Path]:
"""
Get paths to 'green', 'blue', 'nir' and 'gt' channel files
based on path to the 'red' channel of the given image.
:param red_file: path to red channel file.
:return: dictionary containing paths to files with each image channel.
"""
return {
"red": red_file,
"green": Path(str(red_file).replace("red", "green")),
"blue": Path(str(red_file).replace("red", "blue")),
"nir": Path(str(red_file).replace("red", "nir")),
"gt": Path(str(red_file).replace("red", "gt")),
}
def build_paths(
base_path: Path, patches_path: Path, img_id: str
) -> List[Dict[str, Path]]:
"""
Build paths to all files containing image channels.
:param base_path: root path containing directories with image channels.
:param patches_path: path to images patches names to load
(if None, all patches will be used).
:param img_id: image ID; if specified, load paths for this image only.
:return: list of dicts containing paths to files with image channels.
"""
# Get red channel filenames
if img_id is None:
red_files = list(base_path.glob("*red/*.TIF"))
else:
red_files = list(base_path.glob(f"*red/*{img_id}.TIF"))
if patches_path is not None:
patches_names = set(
np.genfromtxt(
patches_path,
dtype="str",
skip_header=1,
)
)
select_files = []
for fname in red_files:
fname_str = str(fname)
if (
fname_str[fname_str.find("patch"): fname_str.find(".TIF")]
in patches_names
):
select_files.append(fname)
red_files = select_files
red_files.sort()
# Get other channels in accordance to the red channel filenames
return [combine_channel_files(red_file) for red_file in red_files]
|
{"hexsha": "0cfb52a7b7d5debfc5250e7c1b090ea665dbed09", "size": 13232, "ext": "py", "lang": "Python", "max_stars_repo_path": "beetles/cloud_detection/utils.py", "max_stars_repo_name": "ESA-PhiLab/hypernet", "max_stars_repo_head_hexsha": "b33f7893d3dfcbbc2c10076fb61b2b1f1316402a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 34, "max_stars_repo_stars_event_min_datetime": "2018-11-14T09:38:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-31T17:44:51.000Z", "max_issues_repo_path": "beetles/cloud_detection/utils.py", "max_issues_repo_name": "ESA-PhiLab/hypernet", "max_issues_repo_head_hexsha": "b33f7893d3dfcbbc2c10076fb61b2b1f1316402a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2018-09-11T14:52:35.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T09:32:01.000Z", "max_forks_repo_path": "beetles/cloud_detection/utils.py", "max_forks_repo_name": "ESA-PhiLab/hypernet", "max_forks_repo_head_hexsha": "b33f7893d3dfcbbc2c10076fb61b2b1f1316402a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2018-10-24T12:42:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-12T03:50:50.000Z", "avg_line_length": 32.9975062344, "max_line_length": 79, "alphanum_fraction": 0.6482769045, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3337}
|
\section{Linear SNR maximisation in practice}
\subsection{Spectral split before SNR maximisation}
The cutoff frequencies were chosen to introduce
|
{"hexsha": "d1bb4a76b6963786dc1fda775c9c74f226882555", "size": 153, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "modules/Scraps/GEVD_practice.tex", "max_stars_repo_name": "tfiers/master-thesis", "max_stars_repo_head_hexsha": "3e97128eeb18827b03da90817fe6f6985c84ad80", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-23T01:39:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-23T01:39:24.000Z", "max_issues_repo_path": "modules/Scraps/GEVD_practice.tex", "max_issues_repo_name": "tfiers/master-thesis", "max_issues_repo_head_hexsha": "3e97128eeb18827b03da90817fe6f6985c84ad80", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 46, "max_issues_repo_issues_event_min_datetime": "2018-09-18T16:38:12.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-10T22:37:35.000Z", "max_forks_repo_path": "modules/Scraps/GEVD_practice.tex", "max_forks_repo_name": "tfiers/master-thesis", "max_forks_repo_head_hexsha": "3e97128eeb18827b03da90817fe6f6985c84ad80", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.3, "max_line_length": 51, "alphanum_fraction": 0.8039215686, "num_tokens": 31}
|
import numpy as np
from .ext.at_patch import get_nodes_at_patch as _get_nodes_at_patch
def get_nodes_at_patch(graph):
"""Set up data structure that describes node-patch connectivity.
Parameters
----------
links_at_patch: ndarray
Links that define each patch.
nodes_at_link: ndarray
Nodes that define each link.
Returns
-------
ndarray
Nodes that define each patch.
"""
nodes_at_patch = np.full(graph.links_at_patch.shape, -1, dtype=int)
_get_nodes_at_patch(graph.links_at_patch, graph.nodes_at_link, nodes_at_patch)
return nodes_at_patch
|
{"hexsha": "0d1d537fb64eebf9ff7372fae6a25dcdc447c75c", "size": 617, "ext": "py", "lang": "Python", "max_stars_repo_path": "landlab/graph/object/at_patch.py", "max_stars_repo_name": "amanaster2/landlab", "max_stars_repo_head_hexsha": "ea17f8314eb12e3fc76df66c9b6ff32078caa75c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 257, "max_stars_repo_stars_event_min_datetime": "2015-01-13T16:01:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T22:37:43.000Z", "max_issues_repo_path": "landlab/graph/object/at_patch.py", "max_issues_repo_name": "amanaster2/landlab", "max_issues_repo_head_hexsha": "ea17f8314eb12e3fc76df66c9b6ff32078caa75c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1222, "max_issues_repo_issues_event_min_datetime": "2015-02-05T21:36:53.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T17:53:49.000Z", "max_forks_repo_path": "landlab/graph/object/at_patch.py", "max_forks_repo_name": "amanaster2/landlab", "max_forks_repo_head_hexsha": "ea17f8314eb12e3fc76df66c9b6ff32078caa75c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 274, "max_forks_repo_forks_event_min_datetime": "2015-02-11T19:56:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T23:31:07.000Z", "avg_line_length": 23.7307692308, "max_line_length": 82, "alphanum_fraction": 0.7001620746, "include": true, "reason": "import numpy", "num_tokens": 137}
|
from keras.models import load_model
model=load_model("cnn-intel-image-model.h5") #load model <- this has run on 3 epochs with ~85% accuracy
from keras.preprocessing import image
test_image = image.load_img("seg_pred/14.jpg",target_size=(64,64))
#test_image #since this format is PIL or pillow so it can be printed
test_image = image.img_to_array(test_image) #convert PIL image to numpy array
import numpy as np
test_image = np.expand_dims(test_image,axis=0)
#since keras uses tensor flow and for tensorflow it needs 4d image so we converted 3d image to 4d image using above
result = model.predict(test_image)
print("\n\n\n\nPredictions : ",end="")
if result[0][0]==1:
print("Buildings")
elif result[0][1]==1:
print("Forest")
elif result[0][2]==1:
print("Glacier")
elif result[0][3]==1:
print("Mountain")
elif result[0][4]==1:
print("Sea")
else:
print("Street")
|
{"hexsha": "96f450edf38a5ccab48f31ce2738d185c9cde28f", "size": 904, "ext": "py", "lang": "Python", "max_stars_repo_path": "Intel-Placeimage-Classification Testing.py", "max_stars_repo_name": "rajansh87/Intel-Image-Classification-using-CNN", "max_stars_repo_head_hexsha": "39ec1417316c12e14bdce3a37195d8328b7b7aa5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-19T18:53:51.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-19T18:53:51.000Z", "max_issues_repo_path": "Intel-Placeimage-Classification Testing.py", "max_issues_repo_name": "rajansh87/Intel-Image-Classification-using-CNN", "max_issues_repo_head_hexsha": "39ec1417316c12e14bdce3a37195d8328b7b7aa5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Intel-Placeimage-Classification Testing.py", "max_forks_repo_name": "rajansh87/Intel-Image-Classification-using-CNN", "max_forks_repo_head_hexsha": "39ec1417316c12e14bdce3a37195d8328b7b7aa5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.6, "max_line_length": 115, "alphanum_fraction": 0.7112831858, "include": true, "reason": "import numpy", "num_tokens": 248}
|
[STATEMENT]
lemma support_preList: "support (preList upds C1) \<subseteq> lesvars upds"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. support (preList upds C1) \<subseteq> lesvars upds
[PROOF STEP]
proof (induct upds)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. support (preList [] C1) \<subseteq> lesvars []
2. \<And>a upds. support (preList upds C1) \<subseteq> lesvars upds \<Longrightarrow> support (preList (a # upds) C1) \<subseteq> lesvars (a # upds)
[PROOF STEP]
case Nil
[PROOF STATE]
proof (state)
this:
goal (2 subgoals):
1. support (preList [] C1) \<subseteq> lesvars []
2. \<And>a upds. support (preList upds C1) \<subseteq> lesvars upds \<Longrightarrow> support (preList (a # upds) C1) \<subseteq> lesvars (a # upds)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. support (preList [] C1) \<subseteq> lesvars []
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
support (preList [] C1) \<subseteq> lesvars []
goal (1 subgoal):
1. \<And>a upds. support (preList upds C1) \<subseteq> lesvars upds \<Longrightarrow> support (preList (a # upds) C1) \<subseteq> lesvars (a # upds)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a upds. support (preList upds C1) \<subseteq> lesvars upds \<Longrightarrow> support (preList (a # upds) C1) \<subseteq> lesvars (a # upds)
[PROOF STEP]
case (Cons a upds)
[PROOF STATE]
proof (state)
this:
support (preList upds C1) \<subseteq> lesvars upds
goal (1 subgoal):
1. \<And>a upds. support (preList upds C1) \<subseteq> lesvars upds \<Longrightarrow> support (preList (a # upds) C1) \<subseteq> lesvars (a # upds)
[PROOF STEP]
obtain y e v where a: "a=(y,(e,v))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>y e v. a = (y, e, v) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using prod_cases3
[PROOF STATE]
proof (prove)
using this:
(\<And>a b c. ?y = (a, b, c) \<Longrightarrow> ?thesis) \<Longrightarrow> ?thesis
goal (1 subgoal):
1. (\<And>y e v. a = (y, e, v) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
a = (y, e, v)
goal (1 subgoal):
1. \<And>a upds. support (preList upds C1) \<subseteq> lesvars upds \<Longrightarrow> support (preList (a # upds) C1) \<subseteq> lesvars (a # upds)
[PROOF STEP]
from Cons
[PROOF STATE]
proof (chain)
picking this:
support (preList upds C1) \<subseteq> lesvars upds
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
support (preList upds C1) \<subseteq> lesvars upds
goal (1 subgoal):
1. support (preList (a # upds) C1) \<subseteq> lesvars (a # upds)
[PROOF STEP]
unfolding a
[PROOF STATE]
proof (prove)
using this:
support (preList upds C1) \<subseteq> lesvars upds
goal (1 subgoal):
1. support (preList ((y, e, v) # upds) C1) \<subseteq> lesvars ((y, e, v) # upds)
[PROOF STEP]
apply (simp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. support (preList upds C1) \<subseteq> lesvars upds \<Longrightarrow> support (\<lambda>a b. a y = preT C1 e b \<and> preList upds C1 a b) \<subseteq> insert y (lesvars upds)
[PROOF STEP]
apply(rule subset_trans[OF support_and])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. support (preList upds C1) \<subseteq> lesvars upds \<Longrightarrow> support (\<lambda>l s. l y = preT C1 e s) \<union> support (preList upds C1) \<subseteq> insert y (lesvars upds)
[PROOF STEP]
apply(rule Un_least)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. support (preList upds C1) \<subseteq> lesvars upds \<Longrightarrow> support (\<lambda>l s. l y = preT C1 e s) \<subseteq> insert y (lesvars upds)
2. support (preList upds C1) \<subseteq> lesvars upds \<Longrightarrow> support (preList upds C1) \<subseteq> insert y (lesvars upds)
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. support (preList upds C1) \<subseteq> lesvars upds \<Longrightarrow> support (\<lambda>l s. l y = preT C1 e s) \<subseteq> insert y (lesvars upds)
[PROOF STEP]
apply(rule subset_trans[OF support_eq])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. support (preList upds C1) \<subseteq> lesvars upds \<Longrightarrow> supportE (\<lambda>l. preT C1 e) \<union> {y} \<subseteq> insert y (lesvars upds)
[PROOF STEP]
using supportE_twicepreT subset_trans supportE_single2
[PROOF STATE]
proof (prove)
using this:
supportE (\<lambda>l. preT ?C1.0 (preT ?C2.0 (?e l))) \<subseteq> supportE ?e
\<lbrakk>?A \<subseteq> ?B; ?B \<subseteq> ?C\<rbrakk> \<Longrightarrow> ?A \<subseteq> ?C
supportE (\<lambda>l. ?P) = {}
goal (1 subgoal):
1. support (preList upds C1) \<subseteq> lesvars upds \<Longrightarrow> supportE (\<lambda>l. preT C1 e) \<union> {y} \<subseteq> insert y (lesvars upds)
[PROOF STEP]
by simp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. support (preList upds C1) \<subseteq> lesvars upds \<Longrightarrow> support (preList upds C1) \<subseteq> insert y (lesvars upds)
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. support (preList upds C1) \<subseteq> lesvars upds \<Longrightarrow> support (preList upds C1) \<subseteq> insert y (lesvars upds)
[PROOF STEP]
by auto
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
support (preList (a # upds) C1) \<subseteq> lesvars (a # upds)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 2101, "file": "Hoare_Time_Nielson_VCGi", "length": 24}
|
"""Contains the audio featurizer class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from data_utils.utility import read_manifest
from data_utils.audio import AudioSegment
from python_speech_features import mfcc
from python_speech_features import delta
class AudioFeaturizer(object):
"""Audio featurizer, for extracting features from audio contents of
AudioSegment or SpeechSegment.
Currently, it supports feature types of linear spectrogram and mfcc.
:param specgram_type: Specgram feature type. Options: 'linear'.
:type specgram_type: str
:param stride_ms: Striding size (in milliseconds) for generating frames.
:type stride_ms: float
:param window_ms: Window size (in milliseconds) for generating frames.
:type window_ms: float
:param max_freq: When specgram_type is 'linear', only FFT bins
corresponding to frequencies between [0, max_freq] are
returned; when specgram_type is 'mfcc', max_feq is the
highest band edge of mel filters.
:types max_freq: None|float
:param target_sample_rate: Audio are resampled (if upsampling or
downsampling is allowed) to this before
extracting spectrogram features.
:type target_sample_rate: float
:param use_dB_normalization: Whether to normalize the audio to a certain
decibels before extracting the features.
:type use_dB_normalization: bool
:param target_dB: Target audio decibels for normalization.
:type target_dB: float
"""
def __init__(self,
specgram_type='linear',
stride_ms=10.0,
window_ms=20.0,
max_freq=None,
target_sample_rate=16000,
use_dB_normalization=True,
target_dB=-20):
self._specgram_type = specgram_type
self._stride_ms = stride_ms
self._window_ms = window_ms
self._max_freq = max_freq
self._target_sample_rate = target_sample_rate
self._use_dB_normalization = use_dB_normalization
self._target_dB = target_dB
def featurize(self,
audio_segment,
allow_downsampling=True,
allow_upsampling=True):
"""Extract audio features from AudioSegment or SpeechSegment.
:param audio_segment: Audio/speech segment to extract features from.
:type audio_segment: AudioSegment|SpeechSegment
:param allow_downsampling: Whether to allow audio downsampling before
featurizing.
:type allow_downsampling: bool
:param allow_upsampling: Whether to allow audio upsampling before
featurizing.
:type allow_upsampling: bool
:return: Spectrogram audio feature in 2darray.
:rtype: ndarray
:raises ValueError: If audio sample rate is not supported.
"""
# upsampling or downsampling
if ((audio_segment.sample_rate > self._target_sample_rate and
allow_downsampling) or
(audio_segment.sample_rate < self._target_sample_rate and
allow_upsampling)):
audio_segment.resample(self._target_sample_rate)
if audio_segment.sample_rate != self._target_sample_rate:
raise ValueError("Audio sample rate is not supported. "
"Turn allow_downsampling or allow up_sampling on.")
# decibel normalization
if self._use_dB_normalization:
audio_segment.normalize(target_db=self._target_dB)
# extract spectrogram
return self._compute_specgram(audio_segment.samples,
audio_segment.sample_rate)
def _compute_specgram(self, samples, sample_rate):
"""Extract various audio features."""
if self._specgram_type == 'linear':
return self._compute_linear_specgram(
samples, sample_rate, self._stride_ms, self._window_ms,
self._max_freq)
elif self._specgram_type == 'mfcc':
return self._compute_mfcc(samples, sample_rate, self._stride_ms,
self._window_ms, self._max_freq)
else:
raise ValueError("Unknown specgram_type %s. "
"Supported values: linear." % self._specgram_type)
def _compute_linear_specgram(self,
samples,
sample_rate,
stride_ms=10.0,
window_ms=20.0,
max_freq=None,
eps=1e-14):
"""Compute the linear spectrogram from FFT energy."""
if max_freq is None:
max_freq = sample_rate / 2
if max_freq > sample_rate / 2:
raise ValueError("max_freq must not be greater than half of "
"sample rate.")
if stride_ms > window_ms:
raise ValueError("Stride size must not be greater than "
"window size.")
stride_size = int(0.001 * sample_rate * stride_ms)
window_size = int(0.001 * sample_rate * window_ms)
specgram, freqs = self._specgram_real(
samples,
window_size=window_size,
stride_size=stride_size,
sample_rate=sample_rate)
ind = np.where(freqs <= max_freq)[0][-1] + 1
return np.log(specgram[:ind, :] + eps)
def _specgram_real(self, samples, window_size, stride_size, sample_rate):
"""Compute the spectrogram for samples from a real signal."""
# extract strided windows
truncate_size = (len(samples) - window_size) % stride_size
samples = samples[:len(samples) - truncate_size]
nshape = (window_size, (len(samples) - window_size) // stride_size + 1)
nstrides = (samples.strides[0], samples.strides[0] * stride_size)
windows = np.lib.stride_tricks.as_strided(
samples, shape=nshape, strides=nstrides)
assert np.all(
windows[:, 1] == samples[stride_size:(stride_size + window_size)])
# window weighting, squared Fast Fourier Transform (fft), scaling
weighting = np.hanning(window_size)[:, None]
fft = np.fft.rfft(windows * weighting, axis=0)
fft = np.absolute(fft)
fft = fft**2
scale = np.sum(weighting**2) * sample_rate
fft[1:-1, :] *= (2.0 / scale)
fft[(0, -1), :] /= scale
# prepare fft frequency list
freqs = float(sample_rate) / window_size * np.arange(fft.shape[0])
return fft, freqs
def _compute_mfcc(self,
samples,
sample_rate,
stride_ms=10.0,
window_ms=20.0,
max_freq=None):
"""Compute mfcc from samples."""
if max_freq is None:
max_freq = sample_rate / 2
if max_freq > sample_rate / 2:
raise ValueError("max_freq must not be greater than half of "
"sample rate.")
if stride_ms > window_ms:
raise ValueError("Stride size must not be greater than "
"window size.")
# compute the 13 cepstral coefficients, and the first one is replaced
# by log(frame energy)
mfcc_feat = mfcc(
signal=samples,
samplerate=sample_rate,
winlen=0.001 * window_ms,
winstep=0.001 * stride_ms,
highfreq=max_freq)
# Deltas
d_mfcc_feat = delta(mfcc_feat, 2)
# Deltas-Deltas
dd_mfcc_feat = delta(d_mfcc_feat, 2)
# transpose
mfcc_feat = np.transpose(mfcc_feat)
d_mfcc_feat = np.transpose(d_mfcc_feat)
dd_mfcc_feat = np.transpose(dd_mfcc_feat)
# concat above three features
concat_mfcc_feat = np.concatenate(
(mfcc_feat, d_mfcc_feat, dd_mfcc_feat))
return concat_mfcc_feat
|
{"hexsha": "0a54701bff7269ffe8b02df74ad5298986c14507", "size": 8253, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_utils/featurizer/audio_featurizer.py", "max_stars_repo_name": "limpidezza/DeepSpeech", "max_stars_repo_head_hexsha": "b3c728d46ff4eee68c45f20b0abb76e968008bcb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 45, "max_stars_repo_stars_event_min_datetime": "2020-02-24T14:34:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T08:33:56.000Z", "max_issues_repo_path": "data_utils/featurizer/audio_featurizer.py", "max_issues_repo_name": "limpidezza/DeepSpeech", "max_issues_repo_head_hexsha": "b3c728d46ff4eee68c45f20b0abb76e968008bcb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-03-20T07:17:27.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-27T08:33:30.000Z", "max_forks_repo_path": "data_utils/featurizer/audio_featurizer.py", "max_forks_repo_name": "limpidezza/DeepSpeech", "max_forks_repo_head_hexsha": "b3c728d46ff4eee68c45f20b0abb76e968008bcb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2020-03-09T13:23:47.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-24T05:45:25.000Z", "avg_line_length": 43.8989361702, "max_line_length": 80, "alphanum_fraction": 0.6006300739, "include": true, "reason": "import numpy", "num_tokens": 1759}
|
using LinearAlgebra
struct ValueOne; end
ValueOne()
# Compute X <- a X + b I.
function matfun_axpby!(X,a,b,Y::UniformScaling)
m,n=size(X)
if ~(a isa ValueOne)
rmul!(X,a)
end
@inbounds for i=1:n
X[i,i]+=(b isa ValueOne) ? 1 : b
end
end
# Compute X <- a X + b Y.
function matfun_axpby!(X,a,b,Y)
m,n=size(X)
if ~(a isa ValueOne)
rmul!(X,a)
end
@inbounds for i=1:m
@inbounds for j=1:n
if (b isa ValueOne)
X[i,j]+=Y[i,j]
else
X[i,j]+=b*Y[i,j]
end
end
end
end
@inline function exp_bbcs_m4(A)
T=promote_type(eltype(A),ComplexF64)
A_copy=similar(A,T); A_copy .= A;
return exp_bbcs_m4!(A_copy)
end
@inline function exp_bbcs_m4!(A)
T=promote_type(eltype(A),ComplexF64) # Make it work for many 'bigger' types (matrices and scalars)
# max_memslots=6
n=size(A,1)
# The first slots are precomputed nodes [:A]
memslots2 = similar(A,T)
memslots3 = similar(A,T)
memslots4 = similar(A,T)
memslots5 = similar(A,T)
memslots6 = similar(A,T)
# Assign precomputed nodes memslots
memslots1=A # overwrite A
# Uniform scaling is exploited.
# No matrix I explicitly allocated.
value_one=ValueOne()
# Computation order: B2 B3 Ba4 Bb4 B4 Ba5 Bb5 B5 T2k7
# Computing B2 with operation: mult
mul!(memslots2,memslots1,memslots1)
# Computing B3 with operation: mult
mul!(memslots3,memslots2,memslots1)
# Computing Ba4 = x*A+x*B2+x*B3
coeff1=0.0 + 0.13340427306445612im
coeff2=0.020226020298183107 + 0.0im
coeff3=-0.0 - 0.00674638241111651im
memslots4 .= coeff1.*memslots1 .+ coeff2.*memslots2 .+ coeff3.*memslots3
# Computing Bb4 = x*A+x*B2+x*B3
coeff1=0.0 + 0.13340427306445612im
coeff2=0.020226020298183107 + 0.0im
coeff3=-0.0 - 0.00674638241111651im
memslots5 .= coeff1.*memslots1 .+ coeff2.*memslots2 .+ coeff3.*memslots3
# Computing B4 with operation: mult
mul!(memslots6,memslots4,memslots5)
# Deallocating Ba4 in slot 4
# Deallocating Bb4 in slot 5
# Computing Ba5 = x*I+x*A+x*B2+x*B3+x*B4
coeff1=2.6958430691533257 + 0.0im
coeff2=0.0 + 0.05272871327381115im
coeff3=-0.09896214548845832 + 0.0im
coeff4=0.0 + 0.007295441446830946im
coeff5=1.0 + 0.0im
memslots4 .= coeff2.*memslots1 .+ coeff3.*memslots2 .+ coeff4.*memslots3 .+ coeff5.*memslots6
mul!(memslots4,true,I*coeff1,true,true)
# Computing Bb5 = x*I+x*A+x*B2+x*B3+x*B4
coeff1=2.6958430691533257 + 0.0im
coeff2=0.0 - 1.3591092616886926im
coeff3=-0.09896214548845832 + 0.0im
coeff4=0.0 + 0.015964794632994668im
coeff5=1.0 + 0.0im
# Smart lincomb recycle B4
memslots6 .= coeff2.*memslots1 .+ coeff3.*memslots2 .+ coeff4.*memslots3 .+ coeff5.*memslots6
mul!(memslots6,true,I*coeff1,true,true)
# Computing B5 with operation: mult
mul!(memslots5,memslots4,memslots6)
# Deallocating Ba5 in slot 4
# Deallocating Bb5 in slot 6
# Computing T2k7 = x*I+x*A+x*B2+x*B3+x*B5
coeff1=-6.267569853502023 + 0.0im
coeff2=0.0 + 2.521796947120981im
coeff3=0.05786296656487002 + 0.0im
coeff4=0.0 - 0.0776668640807187im
coeff5=1.0 + 0.0im
# Smart lincomb recycle A
memslots1 .= coeff2.*memslots1 .+ coeff3.*memslots2 .+ coeff4.*memslots3 .+ coeff5.*memslots5
mul!(memslots1,true,I*coeff1,true,true)
# Deallocating B2 in slot 2
# Deallocating B3 in slot 3
# Deallocating B5 in slot 5
return memslots1 # Returning T2k7
end
|
{"hexsha": "435e9e141b70f715b87524488d05ec6feac03b56", "size": 3564, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "data/generated/exp/exp_bbcs_m4.jl", "max_stars_repo_name": "matrixfunctions/GraphMatFunData", "max_stars_repo_head_hexsha": "e69413a9c6f297ef003179cb04e738137f775759", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-07T23:01:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-07T23:01:14.000Z", "max_issues_repo_path": "data/generated/exp/exp_bbcs_m4.jl", "max_issues_repo_name": "matrixfunctions/GraphMatFunData", "max_issues_repo_head_hexsha": "e69413a9c6f297ef003179cb04e738137f775759", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/generated/exp/exp_bbcs_m4.jl", "max_forks_repo_name": "matrixfunctions/GraphMatFunData", "max_forks_repo_head_hexsha": "e69413a9c6f297ef003179cb04e738137f775759", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1081081081, "max_line_length": 102, "alphanum_fraction": 0.6512345679, "num_tokens": 1335}
|
import json
import logging
import numpy as np
import requests
from commons.operations_utils.functions import serialize, deserialize
from commons.decorators.decorators import optimized_collection_response, normalize_optimized_collection_argument
from commons.utils.async_thread_pool_executor import AsyncThreadPoolExecutor
from federated_aggregator.utils.decorators import deserialize_encrypted_server_data, serialize_encrypted_server_gradient, deserialize_encrypted_server_data_2
class DataOwnerConnector:
def __init__(self, data_owner_port, encryption_service, active_encryption):
self.data_owner_port = data_owner_port
self.async_thread_pool = AsyncThreadPoolExecutor()
self.encryption_service = encryption_service
self.active_encryption = active_encryption
def send_gradient_to_data_owners(self, data_owners, gradient, model_id, public_key):
args = [self._build_data(data_owner, gradient, model_id, public_key) for data_owner in data_owners]
self.async_thread_pool.run(executable=self._send_gradient, args=args)
#@optimized_dict_collection_response(optimization=np.asarray, active=True)
def get_gradient_from_data_owners(self, model_data):
args = [
(trainer, model_data.model_type, model_data.model.weights, model_data.model_id, model_data.public_key)
for trainer in model_data.local_trainers
]
return self.async_thread_pool.run(executable=self._get_update_from_data_owner, args=args)
@optimized_collection_response(optimization=np.asarray, active=True)
def get_data_owners_model(self, model_data):
args = [
"http://{}:{}/model".format(trainer.host, self.data_owner_port)
for trainer in model_data.local_trainers
]
results = self.async_thread_pool.run(executable=self._send_get_request_to_data_owner, args=args)
return [result for result in results]
def send_requirements_to_data_owners(self, data_owners, data):
args = [
("http://{}:{}/trainings".format(data_owner.host, self.data_owner_port), data)
for data_owner in data_owners
]
self.async_thread_pool.run(executable=self._send_post_request_to_data_owner, args=args)
@optimized_collection_response(optimization=np.asarray, active=True)
def get_linked_data_owners(self, data_owners, model_id):
args = [
"http://{}:{}/trainings/{}".format(data_owner.host, self.data_owner_port, model_id)
for data_owner in data_owners
]
results = self.async_thread_pool.run(executable=self._send_get_request_to_data_owner, args=args)
return [result for result in results]
def send_mses(self, validators, model_data, mses, role):
args = [
(
"http://{}:{}/trainings/{}/metrics".format(validators[i].host, self.data_owner_port, model_data.model_id), {'mse': mses[i], 'role': role})
for i in range(len(validators))
]
self.async_thread_pool.run(executable=self._send_put_request_to_data_owner, args=args)
def get_model_metrics_from_validators(self, validators, model_data, weights=None):
model = weights if weights is not None else model_data.model.weights
data = {'model': serialize(model, self.encryption_service, model_data.public_key),
'model_type': model_data.model_type,
'model_id': model_data.model_id,
'public_key': model_data.public_key
}
args = [
("http://{}:{}/trainings/{}/metrics".format(validator.host, self.data_owner_port, model_data.model_id), data)
for validator in validators
]
results = self.async_thread_pool.run(executable=self._send_post_request_to_data_owner, args=args)
results = [result['diff'] for result in results]
results = [deserialize(result, self.encryption_service, model_data.public_key) for result in results]
return results
@deserialize_encrypted_server_data()
def _get_update_from_data_owner(self, data):
"""
:param data:
:return:
"""
data_owner, model_type, weights, model_id, public_key = data
url = "http://{}:{}/trainings/{}".format(data_owner.host, self.data_owner_port, model_id)
payload = {"model_type": model_type, "weights": self.encryption_service.get_serialized_collection(weights) if self.active_encryption else weights, "public_key": public_key}
logging.info("Url: {}".format(url))
response = requests.post(url, json=payload)
response.raise_for_status()
logging.info("response {}".format(response))
return response.json()
@serialize_encrypted_server_gradient(schema=json.dumps)
def _send_gradient(self, data):
"""
Replace with parallel
:param data:
:return:
"""
url, payload = data
logging.info("Url: {} ".format(url))
response = requests.put(url, json=payload)
response.raise_for_status()
logging.info("response {}".format(response))
def send_result_to_data_owners(self, model_id, contribs, data_owners):
args = [
("http://{}:{}/trainings/{}".format(data_owner.host, self.data_owner_port, model_id), {'contribs': contribs})
for data_owner in data_owners
]
self.async_thread_pool.run(executable=self._send_patch_request_to_data_owner, args=args)
@deserialize_encrypted_server_data_2()
def _send_get_request_to_data_owner(self, url):
logging.info("Url: {} ".format(url))
response = requests.get(url).json()
response.raise_for_status()
logging.info("Response {}".format(response))
return response
@normalize_optimized_collection_argument(active=True)
def _build_data(self, data_owner, gradient, model_id, public_key):
return "http://{}:{}/trainings/{}".format(data_owner.host, self.data_owner_port, model_id), {"gradient": gradient, "public_key": public_key}
@staticmethod
def _send_post_request_to_data_owner(data):
url, payload = data
logging.info("Url: {} ".format(url))
response = requests.post(url, json=payload, timeout=None)
response.raise_for_status()
logging.info("Response: {} ".format(response))
return response.json()
@staticmethod
def _send_put_request_to_data_owner(data):
url, payload = data
logging.info("Url: {} ".format(url))
response = requests.put(url, json=payload, timeout=None)
response.raise_for_status()
logging.info("Response: {} ".format(response))
return response.json()
@staticmethod
def _send_patch_request_to_data_owner(data):
url, payload = data
logging.info("Url: {} ".format(url))
response = requests.patch(url, json=payload, timeout=None)
response.raise_for_status()
logging.info("Response: {} ".format(response))
return response.json()
def send_encrypted_prediction(self, data_owner, encrypted_prediction):
"""
{'model_id': model_id,
'prediction_id': prediction_id,
'encrypted_prediction': Data Owner encrypted prediction,
'public_key': Data Owner PK
}
:param data_owner:
:param encrypted_prediction:
:return:
"""
url = "http://{}:{}/predictions/{}".format(data_owner.host, self.data_owner_port,
encrypted_prediction["prediction_id"])
payload = encrypted_prediction
logging.info("Url {} payload".format(url))
response = requests.patch(url, json=payload)
response.raise_for_status()
logging.info("Response {}".format(response))
return response
|
{"hexsha": "db830d05acd2d80125ea515516a2a9245f9b88b7", "size": 7866, "ext": "py", "lang": "Python", "max_stars_repo_path": "federated_aggregator/connectors/data_owner_connector.py", "max_stars_repo_name": "DeltaML/federated-aggregator", "max_stars_repo_head_hexsha": "89ce539b82f71f8151518f4578334ae7c6f684a1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-04-24T02:19:25.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-24T02:19:25.000Z", "max_issues_repo_path": "federated_aggregator/connectors/data_owner_connector.py", "max_issues_repo_name": "DeltaML/federated-aggregator", "max_issues_repo_head_hexsha": "89ce539b82f71f8151518f4578334ae7c6f684a1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2019-07-18T03:25:12.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-04T17:20:09.000Z", "max_forks_repo_path": "federated_aggregator/connectors/data_owner_connector.py", "max_forks_repo_name": "DeltaML/federated-aggregator", "max_forks_repo_head_hexsha": "89ce539b82f71f8151518f4578334ae7c6f684a1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.6931818182, "max_line_length": 180, "alphanum_fraction": 0.6782354437, "include": true, "reason": "import numpy", "num_tokens": 1644}
|
from sets import Set
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.mixture import GMM
from sklearn import svm
import numpy as np
from scipy import sparse
import math
import re
import string
import codecs
import math
import pdb
from sklearn import datasets
from sklearn.decomposition import PCA
#data is in hw1 format, return list of list of words format for word2vec use
def getData(filename, vocab, intEntry= False):
matrix = []
labels = []
f = open(filename, 'r')
lines = f.readlines()
for song in lines[3: ]:
song = song.split()
labels.append(int(song[0]))
words = song[1: : 2]
times = song[2: : 2]
if intEntry:
temp = np.zeros([1, len(vocab)])
for i in range(len(words)):
temp[0][int(words[i]) - 1] = times[i]
matrix.append(temp[0])
else:
temp = []
for i in range(len(words)):
for j in range(int(times[i])):
temp.append(vocab[int(words[i]) - 1])
matrix.append(temp)
if intEntry:
matrix = np.array(matrix)
return matrix, labels
# input train_text, vali_text, test_text: each being a list of strings
# train_labels, vali_labels: each being a list of labels
def useWord2vec(train_text, train_labels, test_text, test_labels, get = False):
from gensim.models import Word2Vec
sentence = []
sentence.extend([i for i in train_text])
sentence.extend([i for i in test_text])
# train your word2vec here
model = Word2Vec(sentence, size = 100, window = 5, min_count = 1, workers = 4)
# train your classifiers here
trainMatrix = []
for song in train_text:
matrix = [model[word] for word in song]
matrix = np.array(matrix).mean(0)
trainMatrix.append(matrix)
testMatrix = []
for song in test_text:
matrix = [model[word] for word in song]
matrix = np.array(matrix).mean(0)
testMatrix.append(matrix)
ftrain = open('word2VecTrain', 'w')
for song in trainMatrix:
for entry in song:
ftrain.write(str(entry))
ftrain.write(' ')
ftrain.write('\n')
ftest = open('word2VecTest', 'w')
for song in testMatrix:
for entry in song:
ftest.write(str(entry))
ftest.write(' ')
ftest.write('\n')
if get:
return np.array(trainMatrix), np.array(testMatrix)
C = [0.001, 0.01, 0.1, 1, 10, 100]
accuracySVM = []
accuracyLR = []
for i in C:
modelSVM = LinearSVC(C = i)
modelLR = LogisticRegression(C = i)
modelSVM.fit(trainMatrix, train_labels)
modelLR.fit(trainMatrix, train_labels)
predictionsSVM = modelSVM.predict(testMatrix)
predictionsLR = modelLR.predict(testMatrix)
accuracySVM.append(1.0 * sum(np.equal(predictionsSVM, test_labels)) / len(test_labels))
accuracyLR.append(1.0 * sum(np.equal(predictionsLR, test_labels)) / len(test_labels))
print accuracySVM
print accuracyLR
def main():
f = open('revised_dataset_test_7Genres.txt')
vocab = f.readlines()[2].split()
#(test_matrix, test_labels) = getData('reviesd_dataset_test_4Genres.txt', vocab)
#(train_matrix, train_labels) = getData('reviesd_dataset_train_4Genres.txt', vocab)
#useWord2vec(train_matrix, train_labels, test_matrix, test_labels)
#integers matrix
print 'fetch data'
(test_matrix, test_labels) = getData('revised_dataset_test_7Genres.txt', vocab, True)
(train_matrix, train_labels) = getData('revised_dataset_train_7Genres.txt', vocab, True)
transformer = TfidfTransformer(norm = False)
total = np.concatenate((train_matrix, test_matrix), axis = 0)
print 'calculate tfidf\n'
total = transformer.fit_transform(total).toarray()
train_matrix = sparse.csr_matrix(total[0: train_matrix.shape[0], :])
test_matrix = sparse.csr_matrix(total[train_matrix.shape[0]: total.shape[0], :])
#multinomial naive bayes
print 'multinomial naive bayes'
modelNB = MultinomialNB()
modelNB.fit(train_matrix, train_labels)
predictionsNB = modelNB.predict(train_matrix)
print 'train accuracy: ', 1.0 * sum(np.equal(predictionsNB, train_labels)) / len(train_labels)
predictionsNB = modelNB.predict(test_matrix)
print 'test accuracy: ', 1.0 * sum(np.equal(predictionsNB, test_labels)) / len(test_labels)
total2 = [0, 0, 0, 0, 0, 0, 0]
correct = [0, 0, 0, 0, 0, 0, 0]
for i in range(len(predictionsNB)):
total2[test_labels[i] - 1] += 1
correct[test_labels[i] - 1] += (predictionsNB[i] == test_labels[i])
print total2
print correct
for j in range(7):
print 1.0 * correct[j] / total2[j]
#linear svm
print 'linear svm'
C = [10, 1, 0.1, 0.01, 0.001, 0.0001, 0.00001]
i = 1e-5
modelSVM = svm.SVC(kernel = 'linear', gamma = i)
print i
modelSVM.fit(train_matrix, train_labels)
predictionsSVM = modelSVM.predict(test_matrix)
accuracySVM = 1.0 * sum(np.equal(predictionsSVM, test_labels)) / len(test_labels)
total = [0, 0, 0, 0, 0, 0, 0]
correct = [0, 0, 0, 0, 0, 0, 0]
for i in range(len(prediction)):
total[test_labels[i] - 1] += 1
correct[test_labels[i] - 1] += (predictionsSVM[i] == test_labels[i])
for j in range(7):
print 1.0 * correct[j] / total[j]
print i, accuracySVM
#gaussian svm
print 'gaussian kernel svm'
i = 0.001
modelSVM = svm.SVC(kernel = 'rbf', C = i)
modelSVM.fit(train_matrix[0: 2500, :], train_labels[0: 2500])
predictionsSVM = modelSVM.predict(test_matrix)
accuracySVM = 1.0 * sum(np.equal(predictionsSVM, test_labels)) / len(test_labels)
print i, accuracySVM
#gaussian naive bayes
modelNB = GaussianNB()
modelNB.fit(train_matrix.toarray(), train_labels)
predictionsNB = modelNB.predict(test_matrix.toarray())
print 'test accuracy: ', 1.0 * sum(np.equal(predictionsNB, test_labels)) / len(test_labels)
predictionsNB = modelNB.predict(train_matrix.toarray())
print 'train accuracy: ', 1.0 * sum(np.equal(predictionsNB, train_labels)) / len(train_labels)
#logistic regression
print 'doing feature selection with PCA\nextract :',
featureNum = 3500
print featureNum, ' features'
pca = PCA(n_components = featureNum)
total = pca.fit_transform(total)
#get training data
train_matrix = total[0: train_matrix.shape[0], :]
test_matrix = total[train_matrix.shape[0]: total.shape[0], :]
#concatenate constant
train_matrix = np.concatenate((train_matrix, np.ones([train_matrix.shape[0], 1])), axis = 1)
test_matrix = np.concatenate((test_matrix, np.ones([test_matrix.shape[0], 1])), axis = 1)
print 'logistic regression'
C = [10, 1, 0.1, 0.01, 0.001, 0.0001, 0.00001]
for i in C:
print i,
modelLR = modelLR = LogisticRegression(C = i)
modelLR.fit(train_matrix, train_labels)
predictionsLR = modelLR.predict(test_matrix)
accuracyLR = (1.0 * sum(np.equal(predictionsLR, test_labels)) / len(test_labels))
print 'test accuracy: ', accuracyLR
predictionsLR = modelLR.predict(train_matrix)
accuracyLR = (1.0 * sum(np.equal(predictionsLR, train_labels)) / len(train_labels))
print 'train accuracy: ', accuracyLR
if __name__ == '__main__':
main()
|
{"hexsha": "2ce9ab846ad6f53aeb83351ea77c6ff261b1f60a", "size": 7083, "ext": "py", "lang": "Python", "max_stars_repo_path": "Code Submission/Genre Classification/classifiers.py", "max_stars_repo_name": "bluechill/Gendered-Lyrical-Identification", "max_stars_repo_head_hexsha": "adecb7cf356b0ca1b6b6f3bca80fa4aadb125d97", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-03-15T15:08:02.000Z", "max_stars_repo_stars_event_max_datetime": "2016-03-15T15:08:02.000Z", "max_issues_repo_path": "Code Submission/Genre Classification/classifiers.py", "max_issues_repo_name": "bluechill/Gendered-Lyrical-Identification", "max_issues_repo_head_hexsha": "adecb7cf356b0ca1b6b6f3bca80fa4aadb125d97", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2015-10-31T16:44:00.000Z", "max_issues_repo_issues_event_max_datetime": "2015-10-31T16:44:00.000Z", "max_forks_repo_path": "Code Submission/Genre Classification/classifiers.py", "max_forks_repo_name": "bluechill/Music-Lyrics-Classification-and-Generation", "max_forks_repo_head_hexsha": "adecb7cf356b0ca1b6b6f3bca80fa4aadb125d97", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4103773585, "max_line_length": 95, "alphanum_fraction": 0.6963151207, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2038}
|
[STATEMENT]
lemma mag_zero [simp]: "mag 0 = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mag 0 = (0::'a)
[PROOF STEP]
by (simp add: zero_Quantity_ext_def)
|
{"llama_tokens": 79, "file": "Physical_Quantities_ISQ_Quantities", "length": 1}
|
#!/usr/bin/env python
""""
Take event file and create multiple new event files separated by CCD
command from CIAO: dmcopy filtered_event.fits[EVENTS][ccd_id=N] out.fits clobber=yes
Make sure CIAO is running before running this script
"""
import argparse
import os
import subprocess
import astropy.io.fits as pyfits
from chandra_suli.run_command import CommandRunner
from chandra_suli.logging_system import get_logger
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Create new event files separated by CCD')
parser.add_argument('--evtfile', help="Event file name", type=str, required=True)
args = parser.parse_args()
logger = get_logger("separate_CCD.py")
runner = CommandRunner(logger)
print "Separating by CCD..."
for ccd_id in range(10):
ccd_file = "ccd_%s_%s" % (ccd_id, os.path.basename(args.evtfile))
cmd_line = "dmcopy %s[EVENTS][ccd_id=%s] %s clobber=yes" % (args.evtfile, ccd_id, ccd_file)
runner.run(cmd_line)
# check if certain CCD files are empty and then delete them if so
f = pyfits.open("%s" % (ccd_file))
ccd_data = f[1].data
if len(ccd_data) == 0:
os.remove(ccd_file)
f.close()
|
{"hexsha": "b44b89e1254f8a1fc5ef2fc7f946909408da49dc", "size": 1242, "ext": "py", "lang": "Python", "max_stars_repo_path": "chandra_suli/separate_CCD.py", "max_stars_repo_name": "nitikayad96/chandra_suli", "max_stars_repo_head_hexsha": "905ded69825f8b3d4fa29a84661697abdb827a87", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-01-08T19:57:19.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-08T19:57:19.000Z", "max_issues_repo_path": "chandra_suli/separate_CCD.py", "max_issues_repo_name": "nitikayad96/chandra_suli", "max_issues_repo_head_hexsha": "905ded69825f8b3d4fa29a84661697abdb827a87", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chandra_suli/separate_CCD.py", "max_forks_repo_name": "nitikayad96/chandra_suli", "max_forks_repo_head_hexsha": "905ded69825f8b3d4fa29a84661697abdb827a87", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.3469387755, "max_line_length": 99, "alphanum_fraction": 0.6876006441, "include": true, "reason": "import astropy", "num_tokens": 318}
|
[STATEMENT]
lemma LIM_offset_zero_cancel: "(\<lambda>h. f (a + h)) \<midarrow>0\<rightarrow> L \<Longrightarrow> f \<midarrow>a\<rightarrow> L"
for a :: "'a::real_normed_vector"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>h. f (a + h)) \<midarrow>0::'a\<rightarrow> L \<Longrightarrow> f \<midarrow>a\<rightarrow> L
[PROOF STEP]
by (drule LIM_offset [where k = "- a"]) simp
|
{"llama_tokens": 149, "file": null, "length": 1}
|
from __future__ import division
from sympy import S, sqrt, Sum, symbols
from sympy.physics.quantum.cg import Wigner3j, Wigner6j, Wigner9j, CG, cg_simp
from sympy.functions.special.tensor_functions import KroneckerDelta
def test_cg_simp_add():
j, m1, m1p, m2, m2p = symbols('j m1 m1p m2 m2p')
# Test Varshalovich 8.7.1 Eq 1
a = CG(S(1)/2,S(1)/2,0,0,S(1)/2,S(1)/2)
b = CG(S(1)/2,-S(1)/2,0,0,S(1)/2,-S(1)/2)
c = CG(1,1,0,0,1,1)
d = CG(1,0,0,0,1,0)
e = CG(1,-1,0,0,1,-1)
assert cg_simp(a+b) == 2
assert cg_simp(c+d+e) == 3
assert cg_simp(a+b+c+d+e) == 5
assert cg_simp(a+b+c) == 2+c
assert cg_simp(2*a+b) == 2+a
assert cg_simp(2*c+d+e) == 3+c
assert cg_simp(5*a+5*b) == 10
assert cg_simp(5*c+5*d+5*e) == 15
assert cg_simp(-a-b) == -2
assert cg_simp(-c-d-e) == -3
assert cg_simp(-6*a-6*b) == -12
assert cg_simp(-4*c-4*d-4*e) == -12
a = CG(S(1)/2,S(1)/2,j,0,S(1)/2,S(1)/2)
b = CG(S(1)/2,-S(1)/2,j,0,S(1)/2,-S(1)/2)
c = CG(1,1,j,0,1,1)
d = CG(1,0,j,0,1,0)
e = CG(1,-1,j,0,1,-1)
assert cg_simp(a+b) == 2*KroneckerDelta(j,0)
assert cg_simp(c+d+e) == 3*KroneckerDelta(j,0)
assert cg_simp(a+b+c+d+e) == 5*KroneckerDelta(j,0)
assert cg_simp(a+b+c) == 2*KroneckerDelta(j,0)+c
assert cg_simp(2*a+b) == 2*KroneckerDelta(j,0)+a
assert cg_simp(2*c+d+e) == 3*KroneckerDelta(j,0)+c
assert cg_simp(5*a+5*b) == 10*KroneckerDelta(j,0)
assert cg_simp(5*c+5*d+5*e) == 15*KroneckerDelta(j,0)
assert cg_simp(-a-b) == -2*KroneckerDelta(j,0)
assert cg_simp(-c-d-e) == -3*KroneckerDelta(j,0)
assert cg_simp(-6*a-6*b) == -12*KroneckerDelta(j,0)
assert cg_simp(-4*c-4*d-4*e) == -12*KroneckerDelta(j,0)
# Test Varshalovich 8.7.1 Eq 2
a = CG(S(1)/2,S(1)/2,S(1)/2,-S(1)/2,0,0)
b = CG(S(1)/2,-S(1)/2,S(1)/2,S(1)/2,0,0)
c = CG(1,1,1,-1,0,0)
d = CG(1,0,1,0,0,0)
e = CG(1,-1,1,1,0,0)
assert cg_simp(a-b) == sqrt(2)
assert cg_simp(c-d+e) == sqrt(3)
assert cg_simp(a-b+c-d+e) == sqrt(2)+sqrt(3)
assert cg_simp(a-b+c) == sqrt(2)+c
assert cg_simp(2*a-b) == sqrt(2)+a
assert cg_simp(2*c-d+e) == sqrt(3)+c
assert cg_simp(5*a-5*b) == 5*sqrt(2)
assert cg_simp(5*c-5*d+5*e) == 5*sqrt(3)
assert cg_simp(-a+b) == -sqrt(2)
assert cg_simp(-c+d-e) == -sqrt(3)
assert cg_simp(-6*a+6*b) == -6*sqrt(2)
assert cg_simp(-4*c+4*d-4*e) == -4*sqrt(3)
a = CG(S(1)/2,S(1)/2,S(1)/2,-S(1)/2,j,0)
b = CG(S(1)/2,-S(1)/2,S(1)/2,S(1)/2,j,0)
c = CG(1,1,1,-1,j,0)
d = CG(1,0,1,0,j,0)
e = CG(1,-1,1,1,j,0)
assert cg_simp(a-b) == sqrt(2)*KroneckerDelta(j,0)
assert cg_simp(c-d+e) == sqrt(3)*KroneckerDelta(j,0)
assert cg_simp(a-b+c-d+e) == sqrt(2)*KroneckerDelta(j,0)+sqrt(3)*KroneckerDelta(j,0)
assert cg_simp(a-b+c) == sqrt(2)*KroneckerDelta(j,0)+c
assert cg_simp(2*a-b) == sqrt(2)*KroneckerDelta(j,0)+a
assert cg_simp(2*c-d+e) == sqrt(3)*KroneckerDelta(j,0)+c
assert cg_simp(5*a-5*b) == 5*sqrt(2)*KroneckerDelta(j,0)
assert cg_simp(5*c-5*d+5*e) == 5*sqrt(3)*KroneckerDelta(j,0)
assert cg_simp(-a+b) == -sqrt(2)*KroneckerDelta(j,0)
assert cg_simp(-c+d-e) == -sqrt(3)*KroneckerDelta(j,0)
assert cg_simp(-6*a+6*b) == -6*sqrt(2)*KroneckerDelta(j,0)
assert cg_simp(-4*c+4*d-4*e) == -4*sqrt(3)*KroneckerDelta(j,0)
# Test Varshalovich 8.7.2 Eq 9
# alpha=alphap,beta=betap case
# numerical
a = CG(S(1)/2,S(1)/2,S(1)/2,-S(1)/2,1,0)**2
b = CG(S(1)/2,S(1)/2,S(1)/2,-S(1)/2,0,0)**2
c = CG(1,0,1,1,1,1)**2
d = CG(1,0,1,1,2,1)**2
assert cg_simp(a+b) == 1
assert cg_simp(c+d) == 1
assert cg_simp(a+b+c+d) == 2
assert cg_simp(4*a+4*b) == 4
assert cg_simp(4*c+4*d) == 4
assert cg_simp(5*a+3*b) == 3+2*a
assert cg_simp(5*c+3*d) == 3+2*c
assert cg_simp(-a-b) == -1
assert cg_simp(-c-d) == -1
# symbolic
a = CG(S(1)/2,m1,S(1)/2,m2,1,1)**2
b = CG(S(1)/2,m1,S(1)/2,m2,1,0)**2
c = CG(S(1)/2,m1,S(1)/2,m2,1,-1)**2
d = CG(S(1)/2,m1,S(1)/2,m2,0,0)**2
assert cg_simp(a+b+c+d) == 1
assert cg_simp(4*a+4*b+4*c+4*d) == 4
assert cg_simp(3*a+5*b+3*c+4*d) == 3+2*b+d
assert cg_simp(-a-b-c-d) == -1
a = CG(1,m1,1,m2,2,2)**2
b = CG(1,m1,1,m2,2,1)**2
c = CG(1,m1,1,m2,2,0)**2
d = CG(1,m1,1,m2,2,-1)**2
e = CG(1,m1,1,m2,2,-2)**2
f = CG(1,m1,1,m2,1,1)**2
g = CG(1,m1,1,m2,1,0)**2
h = CG(1,m1,1,m2,1,-1)**2
i = CG(1,m1,1,m2,0,0)**2
assert cg_simp(a+b+c+d+e+f+g+h+i) == 1
assert cg_simp(4*(a+b+c+d+e+f+g+h+i)) == 4
assert cg_simp(a+b+2*c+d+4*e+f+g+h+i) == 1+c+3*e
assert cg_simp(-a-b-c-d-e-f-g-h-i) == -1
# alpha!=alphap or beta!=betap case
# numerical
a = CG(S(1)/2,S(1)/2,S(1)/2,-S(1)/2,1,0)*CG(S(1)/2,-S(1)/2,S(1)/2,S(1)/2,1,0)
b = CG(S(1)/2,S(1)/2,S(1)/2,-S(1)/2,0,0)*CG(S(1)/2,-S(1)/2,S(1)/2,S(1)/2,0,0)
c = CG(1,1,1,0,2,1)*CG(1,0,1,1,2,1)
d = CG(1,1,1,0,1,1)*CG(1,0,1,1,1,1)
assert cg_simp(a+b) == 0
assert cg_simp(c+d) == 0
# symbolic
a = CG(S(1)/2,m1,S(1)/2,m2,1,1)*CG(S(1)/2,m1p,S(1)/2,m2p,1,1)
b = CG(S(1)/2,m1,S(1)/2,m2,1,0)*CG(S(1)/2,m1p,S(1)/2,m2p,1,0)
c = CG(S(1)/2,m1,S(1)/2,m2,1,-1)*CG(S(1)/2,m1p,S(1)/2,m2p,1,-1)
d = CG(S(1)/2,m1,S(1)/2,m2,0,0)*CG(S(1)/2,m1p,S(1)/2,m2p,0,0)
assert cg_simp(a+b+c+d) == KroneckerDelta(m1,m1p)*KroneckerDelta(m2,m2p)
a = CG(1,m1,1,m2,2,2)*CG(1,m1p,1,m2p,2,2)
b = CG(1,m1,1,m2,2,1)*CG(1,m1p,1,m2p,2,1)
c = CG(1,m1,1,m2,2,0)*CG(1,m1p,1,m2p,2,0)
d = CG(1,m1,1,m2,2,-1)*CG(1,m1p,1,m2p,2,-1)
e = CG(1,m1,1,m2,2,-2)*CG(1,m1p,1,m2p,2,-2)
f = CG(1,m1,1,m2,1,1)*CG(1,m1p,1,m2p,1,1)
g = CG(1,m1,1,m2,1,0)*CG(1,m1p,1,m2p,1,0)
h = CG(1,m1,1,m2,1,-1)*CG(1,m1p,1,m2p,1,-1)
i = CG(1,m1,1,m2,0,0)*CG(1,m1p,1,m2p,0,0)
assert cg_simp(a+b+c+d+e+f+g+h+i) == KroneckerDelta(m1,m1p)*KroneckerDelta(m2,m2p)
def test_cg_simp_sum():
x, a, b, c, cp, alpha, beta, gamma, gammap = symbols('x a b c cp alpha beta gamma gammap')
# Varshalovich 8.7.1 Eq 1
assert cg_simp(x * Sum(CG(a,alpha,b,0,a,alpha), (alpha,-a,a))) == x*(2*a+1)*KroneckerDelta(b,0)
assert cg_simp(x * Sum(CG(a,alpha,b,0,a,alpha), (alpha,-a,a))+CG(1,0,1,0,1,0)) == x*(2*a+1)*KroneckerDelta(b,0)+CG(1,0,1,0,1,0)
assert cg_simp(2 * Sum(CG(1,alpha,0,0,1,alpha), (alpha,-1,1))) == 6
# Varshalovich 8.7.1 Eq 2
assert cg_simp(x*Sum((-1)**(a-alpha) * CG(a,alpha,a,-alpha,c,0), (alpha,-a,a))) == x*sqrt(2*a+1)*KroneckerDelta(c,0)
assert cg_simp(3*Sum((-1)**(2-alpha) * CG(2,alpha,2,-alpha,0,0), (alpha,-2,2))) == 3*sqrt(5)
# Varshalovich 8.7.2 Eq 4
assert cg_simp(Sum(CG(a,alpha,b,beta,c,gamma)*CG(a,alpha,b,beta,cp,gammap),(alpha,-a,a),(beta,-b,b))) == KroneckerDelta(c,cp)*KroneckerDelta(gamma,gammap)
assert cg_simp(Sum(CG(a,alpha,b,beta,c,gamma)*CG(a,alpha,b,beta,c,gammap),(alpha,-a,a),(beta,-b,b))) == KroneckerDelta(gamma,gammap)
assert cg_simp(Sum(CG(a,alpha,b,beta,c,gamma)*CG(a,alpha,b,beta,cp,gamma),(alpha,-a,a),(beta,-b,b))) == KroneckerDelta(c,cp)
assert cg_simp(Sum(CG(a,alpha,b,beta,c,gamma)**2,(alpha,-a,a),(beta,-b,b))) == 1
assert cg_simp(Sum(CG(2,alpha,1,beta,2,gamma)*CG(2,alpha,1,beta,2,gammap), (alpha,-2,2), (beta,-1,1))) == KroneckerDelta(gamma,gammap)
def test_doit():
assert Wigner3j(1/2,-1/2,1/2,1/2,0,0).doit() == -sqrt(2)/2
assert Wigner6j(1,2,3,2,1,2).doit() == sqrt(21)/105
assert Wigner9j(2,1,1,S(3)/2,S(1)/2,1,S(1)/2,S(1)/2,0).doit() == sqrt(2)/12
assert CG(1/2,1/2,1/2,-1/2,1,0).doit() == sqrt(2)/2
|
{"hexsha": "ac853d7f24d933af914f8fead97759958ec15ae0", "size": 7510, "ext": "py", "lang": "Python", "max_stars_repo_path": "sympy/physics/quantum/tests/test_cg.py", "max_stars_repo_name": "sn6uv/sympy", "max_stars_repo_head_hexsha": "5b149c2f72847e4785c65358b09d99b29f101dd5", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2015-05-11T12:26:38.000Z", "max_stars_repo_stars_event_max_datetime": "2016-08-19T00:11:03.000Z", "max_issues_repo_path": "sympy/physics/quantum/tests/test_cg.py", "max_issues_repo_name": "goodok/sympy", "max_issues_repo_head_hexsha": "de84ed2139125a755ea7b6ba91d945d9fbbe5ed9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sympy/physics/quantum/tests/test_cg.py", "max_forks_repo_name": "goodok/sympy", "max_forks_repo_head_hexsha": "de84ed2139125a755ea7b6ba91d945d9fbbe5ed9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.0736196319, "max_line_length": 158, "alphanum_fraction": 0.5552596538, "include": true, "reason": "from sympy", "num_tokens": 3756}
|
import pandas as pd
import numpy as np
from klasy.RedisClient import RedisClient
from klasy.CassandraClient import CassandraClient
import json
from datetime import datetime
# INFO
#
# ponoć wykorzystanie w Pandas funkcji agregującej - argument aggfunc jest dosyć nietypowe i warte ujęcia w sprawku
class PandasMovies:
''' wczytanie danych z plików '''
def __init__(self, datasetrows=None, useRedis=False, RedisHost=None, RedisPort=None, RedisDB=None,
useCassandra=False, CassandraHost=None, CassandraPort=None):
self._useRedis = useRedis
self._useCasandra = useCassandra
# do redisa i cassandry wrzucamy jedynie user_ratedmovies.dat.txt
usedDB = False
if useRedis == True:
self._RClient = RedisClient(RedisHost, RedisPort, RedisDB)
''' detekcja, czy Redis jest pusty'''
_rated_movies = [json.loads(el) for el in self._RClient.lrange("_rated_movies", 0, -1)]
''' podjecie decyzji skad wczytac dane '''
if (len(_rated_movies) == 0):
''' nic nie bylo w redisie, wiec trzeba zaladowac dane z pliku'''
print("Redis is empty")
self.loadDatasetFromFiles(datasetrows)
self.pushAllDataToRedis()
else:
''' cos jest w redisie, wiec korzystamy z redisa '''
self._rated_movies = dictToDF(_rated_movies)
usedDB = True
if useCassandra == True:
self._CClient = CassandraClient(CassandraHost, port=CassandraPort)
if len(self._CClient.get_data_table()) == 0:
print("Cassandra empty")
self.loadDatasetFromFiles(datasetrows)
self.pushAllDataToCassandra()
else:
self._rated_movies = dictToDF(self._CClient.get_data_table())
usedDB = True
if usedDB == False:
self.loadDatasetFromFiles(datasetrows)
# dalsza czesc wczytywania danych
# movie genres nie siedzi w redisie
self._movie_genres = pd.read_csv("../dane/movie_genres.dat.txt", sep='\t', dtype={"movieID": int})
self._movie_genres_dummy = self._movie_genres.copy()
self._movie_genres_dummy['dummy_column'] = 1
self._movie_genres_names = list({row["genre"] for index, row in self._movie_genres.iterrows()})
# print(self._movie_genres_names)
joined = self._rated_movies.join(self._movie_genres, rsuffix='genres')
self._tables = joined
print("Data loaded")
def loadDatasetFromFiles(self, datasetrows):
if datasetrows == None:
self._rated_movies = pd.read_csv("../dane/user_ratedmovies.dat.txt", sep='\t',
dtype={"userID": int, "rating": np.float64})
else:
self._rated_movies = pd.read_csv("../dane/user_ratedmovies.dat.txt", sep='\t',
dtype={"userID": int, "rating": np.float64}, nrows=datasetrows)
def pushAllDataToRedis(self):
for index, row in self._rated_movies.iterrows():
self._RClient.rpush("_rated_movies", row.to_json(orient='columns'))
def pushAllDataToCassandra(self):
for index, row in self._rated_movies.iterrows():
jsoned = json.loads(row.to_json(orient='columns'))
self._CClient.push_data_table(index, int(jsoned["userID"]), int(jsoned["movieID"]), float(jsoned["rating"]), int(jsoned["date_day"]),
int(jsoned["date_month"]), int(jsoned["date_year"]), int(jsoned["date_hour"]),
int(jsoned["date_minute"]), int(jsoned["date_second"]))
''' pobranie czystego joina '''
def getJoined(self):
return self._tables
''' zrobienie Pivota na całą tabele '''
def getPivotAllTable(self):
self._pivoted = self._movie_genres_dummy.pivot_table(index=['movieID'], columns='genre', values='dummy_column',
fill_value=0).add_prefix("genre-")
self._joined = pd.merge(self._rated_movies, self._pivoted, on="movieID").drop(
["date_day", "date_minute", "date_month", "date_second", "date_year", "date_hour"], axis=1).astype(int)
return self._joined
''' pobranie wszystkich gatunków na podstawie ID usera '''
def getPivotUser(self, userID):
self._joined = pd.merge(self._rated_movies[self._rated_movies.userID == userID], self._movie_genres,
on="movieID")
self._pivoted = self._joined.pivot_table(columns='genre', fill_value=0, aggfunc=np.mean, values="rating",
dropna=False).add_prefix("genre-")
# print(list(self._pivoted.columns))
for colname in self._movie_genres_names:
if "genre-" + colname not in self._pivoted.columns:
# print(colname)
self._pivoted.insert(len(self._pivoted), "genre-" + colname, 0)
self._pivoted = self._pivoted.reindex(sorted(self._pivoted.columns), axis=1)
return self._pivoted
''' pobranie czystego pivota '''
def getAvg(self):
self._joined = pd.merge(self._rated_movies, self._movie_genres, on="movieID")
# print(self._joined)
self._pivoted = self._joined.pivot_table(columns='genre', fill_value=0, aggfunc=np.mean,
values="rating").add_prefix("genre-")
for colname in self._movie_genres_names:
if "genre-" + colname not in self._pivoted.columns:
# print(colname)
self._pivoted.insert(len(self._pivoted), "genre-" + colname, 0)
self._pivoted = self._pivoted.reindex(sorted(self._pivoted.columns), axis=1)
return self._pivoted
''' wektory roznic dla kazdego gatunku na podstawie srednich ocen filmow wystawionych przez usera '''
def getDifferenceWithAvgUser(self, userID):
return self.getAvg().subtract(self.getPivotUser(userID)).fillna(0)
# return self.getAvg() - self.getPivotUser(userID)
''' przepisanie ratingu pod kolumne z konkretnym gatunkiem '''
def rewriteRatingToGenreColumn(self):
self._pivoted = self._tables.pivot_table(index=['movieID'], columns='genre', values='rating',
fill_value=0).add_prefix("genre-")
self._joined = pd.merge(self._rated_movies, self._pivoted, on="movieID").drop(
["date_day", "date_minute", "date_month", "date_second", "date_year", "date_hour"], axis=1).astype(int)
return self._joined
''' dodawanie wpisu '''
def appendRecord(self, userID, rating, movieID):
#
# rozkminioine na chlopski rozum
# najprosciej dodac wpis a'la user_ratedmovies.dat.txt
# reszta bedzie dostosowana do tego
#
dic = {}
dic["userID"] = userID
dic["movieID"] = movieID
dic["rating"] = rating
dic["date_day"] = datetime.today().strftime("%d")
dic["date_month"] = datetime.today().strftime("%m")
dic["date_year"] = datetime.today().strftime("%y")
dic["date_hour"] = datetime.today().strftime("%H")
dic["date_minute"] = datetime.today().strftime("%M")
dic["date_second"] = datetime.today().strftime("%S")
df = pd.DataFrame(dic, index=[0])
self._rated_movies = self._rated_movies.append(df, sort=True)
self.reloadRatedMovies()
if self._useRedis == True:
self._RClient.rpush("_rated_movies", json.dumps(dic))
if self._useCasandra == True:
self._CClient.push_data_table(self._CClient.lastindex+1, dic["userID"], dic["movieID"], dic["rating"], int(dic["date_day"]), int(dic["date_month"]), int(dic["date_year"]), int(dic["date_hour"]), int(dic["date_minute"]), int(dic["date_second"]))
''' nie używana, ale moze się kiedyś przyda '''
def dropRecord(self, userID, rating, movieID):
# dropuje konkretny wpis
self._rated_movies = self._rated_movies.drop(self._rated_movies[(self._rated_movies.userID == userID) & (
self._rated_movies.rating == rating) & (self._rated_movies.movieID == movieID)].index)
self.reloadRatedMovies()
''' pełen drop '''
def fullDrop(self):
self._rated_movies = self._rated_movies.iloc[0:0]
self.reloadRatedMovies()
if self._useRedis == True:
self._RClient.wyczysc_kolejke("_rated_movies")
if self._useCasandra == True:
self._CClient.clear_table()
''' funkcja reloadowania, po wiekszych zmianach '''
def reloadRatedMovies(self):
joined = self._rated_movies.join(self._movie_genres, rsuffix='genres')
self._tables = joined
''' Data frame na slownik '''
def dfToDict(df):
return df.to_dict(orient='records')
''' Slownik na Data Frame '''
def dictToDF(dict):
df = pd.DataFrame.from_dict(dict)
return df.sort_index(axis=1)
''' sprawdzenie bezstratnosci '''
def bezstratnosc(df):
xx = df.getJoined().sort_index(axis=1)
xx2 = dictToDF(dfToDict(df.getJoined()))
return (xx == xx2).all()
if __name__ == '__main__':
pm = PandasMovies(datasetrows=2, useRedis=False, RedisHost="localhost", RedisPort=6379, RedisDB=0, useCassandra=True, CassandraHost='localhost', CassandraPort=9043)
# pm = PandasMovies()
# print(pm.getAvg())
# pm.appendRecord(78, 5, 3)
# pm.fullDrop()
# print(pm.getAvg())
print(pm.getPivotAllTable())
print(pm.getAvg())
#print(pm.getPivotAllTable())
#print(pm.getAvg())
#print(pm.getDifferenceWithAvgUser(75))
#print(pm.getPivotUser(75))
# zad 4 lab4
# print("ZAD 4")
# print(bezstratnosc(pm))
# zad 5 lab4
# print("ZAD 5")
# print(dfToDict(pm.getAvg()))
# zad 5 diagnostyczne lab4
# print("ZAD 5 - DIAGNOSTYCZNE")
# for item in dfToDict(pm.rewriteRatingToGenreColumn()):
# print(item)
# zad 6 lab4
# print("ZAD 6")
# print(dfToDict(pm.getPivotUser(78)))
# zad 7 lab4
# print("ZAD 7")
# print(dfToDict(pm.getDifferenceWithAvgUser(78)))
|
{"hexsha": "fdcba9215192d9d4630f84c29a41664e9112b486", "size": 10294, "ext": "py", "lang": "Python", "max_stars_repo_path": "klasy/PandasMovies.py", "max_stars_repo_name": "BMarcin/PP_sem6_WTI_lab", "max_stars_repo_head_hexsha": "19bd0ee88a0a4751b2683d8fd6694fbbce7ba698", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-03-09T09:44:35.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-21T11:57:46.000Z", "max_issues_repo_path": "klasy/PandasMovies.py", "max_issues_repo_name": "BMarcin/PP_sem6_WTI_lab", "max_issues_repo_head_hexsha": "19bd0ee88a0a4751b2683d8fd6694fbbce7ba698", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "klasy/PandasMovies.py", "max_forks_repo_name": "BMarcin/PP_sem6_WTI_lab", "max_forks_repo_head_hexsha": "19bd0ee88a0a4751b2683d8fd6694fbbce7ba698", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-03-17T13:46:42.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-12T13:48:10.000Z", "avg_line_length": 36.8960573477, "max_line_length": 256, "alphanum_fraction": 0.6156984651, "include": true, "reason": "import numpy", "num_tokens": 2595}
|
#Python3
#Creando un diseño de Baticircuito
###### IMPORTANTO PAQUETES ######
import numpy as np
###### COLOCANDO VALORES DE CORRIENTES ######
i1 = 0.001
i2 = 0.002
i3 = 0.003
i4 = 0.004
i5 = 0.005
r1 = 1000
r2 = 2000
r3 = 3000
r4 = 4000
r5 = 5000
corriente = np.array([-i1, 0, i5, -i5, 0, i1, 0, 0, -i2, 0, i2, i3, 0, -i4, i4, 0, -i3, 0])
# n1 n2 n3 n4 n5 n6 n7 n8 n9 n10 n11 n12 n13 n14 n15 n16 n17 n18
###### VALOR DE TENSION O DIFERENCIA DE POTENCIAL ELECTRICO ######
tension = np.array([[17/12000, -1/r3, 0, 0, 0, 0, 0, -1/r2, -1/r4, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[-1/r3, 9/4000, -1/750, -1/r3, 0, -1/r4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, -1/750, 7/2400, -1/r4, -1/r3, 0, -1/r1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, -1/r3, -1/r4, 107/60000, -1/r1, -1/r5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, -1/r3, -1/r1, 19/12000, 0, -1/r4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, -1/r4, 0, -1/r5, 0, 7/10000, 0, -1/r4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, -1/r1, 0, -1/r4, 0, 7/4000, -1/r2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[-1/r2, 0, 0, 0, 0, -1/r4, -1/r2, 37/12000, -1/r3, -1/r1, -1/r2, 0, 0, 0, 0, 0, 0, 0],
[-1/r4, 0, 0, 0, 0, 0, 0, -1/r3, 107/60000, -1/r1, 0, 0, 0, 0, 0, 0, -1/r5, 0],
[0, 0, 0, 0, 0, 0, 0, -1/r1, -1/r1, 69/20000, -1/r2, -1/r2, 0, 0, 0, 0, -1/r4, -1/r5],
[0, 0, 0, 0, 0, 0, 0, -1/r2, 0, -1/r2, 1/1000, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, -1/r2, 0, 13/12000, 0, 0, -1/r3, -1/r4, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 19/12000, -1/r4, -1/r1, 0, 0, -1/r3],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1/r4, 89/60000, -1/r5, -1/1875, 0, -1/r2],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1/r3, -1/r1, -1/r5, 61/30000, -1/r2, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1/r4, 0, -1/1875, -1/r2, 97/60000, -1/r3, 0],
[0, 0, 0, 0, 0, 0, 0, 0, -1/r5, -1/r4, 0, 0, 0, 0, 0, -1/r3, 31/30000, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, -1/r5, 0, 0, -1/r3, -1/r2, 0, 0, 0, 31/30000]])
###### CREANDO DISENO ######
p = np.linalg.solve(tension, corriente)
print(p)
|
{"hexsha": "113550cdcea8da951438f28ce8848eef438b0781", "size": 2247, "ext": "py", "lang": "Python", "max_stars_repo_path": "DisenoBaticircuito.py", "max_stars_repo_name": "brown9804/Python_DiversosAlgortimos", "max_stars_repo_head_hexsha": "e9ff0fbe761f24a49a30a513d50824ca56cafaa3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-06-28T21:06:53.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-01T20:39:30.000Z", "max_issues_repo_path": "DisenoBaticircuito.py", "max_issues_repo_name": "brown9804/Python_DiversosAlgortimos", "max_issues_repo_head_hexsha": "e9ff0fbe761f24a49a30a513d50824ca56cafaa3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DisenoBaticircuito.py", "max_forks_repo_name": "brown9804/Python_DiversosAlgortimos", "max_forks_repo_head_hexsha": "e9ff0fbe761f24a49a30a513d50824ca56cafaa3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.3962264151, "max_line_length": 132, "alphanum_fraction": 0.4080996885, "include": true, "reason": "import numpy", "num_tokens": 1445}
|
from enum import Enum
import cv2
import numpy as np
from keras.models import model_from_json
from keras.preprocessing import image
class FaceEmotionEstimatorModels(Enum):
KERAS = 0
DEFAULT = KERAS
class FaceEmotionEstimator:
def __init__(self, model=FaceEmotionEstimatorModels.DEFAULT, path=None):
self._base = None
if model == FaceEmotionEstimatorModels.KERAS:
self._base = FaceEmotionEstimator_KERAS(path)
def estimate(self, frame, face_image):
return self._base.estimate(frame, face_image)
class FaceEmotionEstimator_KERAS:
def __init__(self, path):
self._classifier = model_from_json(open(path + 'emotion_deploy.json', "r").read())
self._classifier.load_weights(path + 'emotion_net.h5')
self._selection = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
def estimate(self, frame, face_image):
face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY)
face_image = cv2.resize(face_image, (48, 48))
img_pixels = np.expand_dims(image.img_to_array(face_image), axis = 0) / 255
predictions = self._classifier.predict(img_pixels)
max_index = np.argmax(predictions[0])
return self._selection[max_index]
|
{"hexsha": "ab97fac72e0110c4c6d21bc44ad4ff45bd904ad3", "size": 1266, "ext": "py", "lang": "Python", "max_stars_repo_path": "libfaceid/emotion.py", "max_stars_repo_name": "anhlbt/faceidsys", "max_stars_repo_head_hexsha": "630efe78830360565958621c80d247a6055c7cb4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 421, "max_stars_repo_stars_event_min_datetime": "2018-12-01T01:20:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-13T04:56:12.000Z", "max_issues_repo_path": "libfaceid/emotion.py", "max_issues_repo_name": "anhlbt/faceidsys", "max_issues_repo_head_hexsha": "630efe78830360565958621c80d247a6055c7cb4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 38, "max_issues_repo_issues_event_min_datetime": "2018-12-18T05:38:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:06:47.000Z", "max_forks_repo_path": "libfaceid/emotion.py", "max_forks_repo_name": "anhlbt/faceidsys", "max_forks_repo_head_hexsha": "630efe78830360565958621c80d247a6055c7cb4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 148, "max_forks_repo_forks_event_min_datetime": "2018-12-06T10:12:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-24T08:22:31.000Z", "avg_line_length": 29.4418604651, "max_line_length": 93, "alphanum_fraction": 0.6998420221, "include": true, "reason": "import numpy", "num_tokens": 315}
|
#include <boost/geometry.hpp>
#include <boost/geometry/geometries/point_xy.hpp>
#include <boost/geometry/geometries/polygon.hpp>
#include <gridmap/operations/rasterize.h>
#include <navigation_interface/params.h>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <pluginlib/class_list_macros.h>
#include <pure_pursuit_controller/plugin.h>
#include <visualization_msgs/MarkerArray.h>
PLUGINLIB_EXPORT_CLASS(pure_pursuit_controller::PurePursuitController, navigation_interface::Controller)
namespace pure_pursuit_controller
{
namespace
{
typedef boost::geometry::model::d2::point_xy<double> Point; // Point type for boost polygon library
typedef boost::geometry::model::polygon<Point, // Point type
true, // Orientation is clockwise
false, // Closure is implied (last point != first point)
std::vector, // Store for points (efficient if all points known in advance)
std::vector, // Store for rings (efficient if all holes known in advance)
std::allocator, // Point allocator
std::allocator // Ring allocator
>
Polygon;
struct CollisionCheck
{
bool in_collision;
double min_distance_to_collision;
visualization_msgs::Marker marker;
};
CollisionCheck robotInCollision(const gridmap::OccupancyGrid& grid, const Eigen::Isometry2d& robot_pose,
const Eigen::Isometry2d& future_pose, const std::vector<Eigen::Vector2d>& footprint,
const float alpha)
{
// Want to interpolate footprint between current robot pose and future robot pose
const double linear_step = 0.01;
const double angular_step = 0.04;
const size_t max_steps = 20;
const size_t min_steps = 2;
// First create interpolated poses
std::vector<Eigen::Isometry2d> interpolated;
interpolated.push_back(robot_pose);
const Eigen::Vector2d linear_dir = future_pose.translation() - robot_pose.translation();
const double linear_dist = linear_dir.norm();
const Eigen::Rotation2Dd rot(robot_pose.linear());
const Eigen::Rotation2Dd future_rot(future_pose.linear());
const Eigen::Rotation2Dd rot_dir(future_rot.inverse() * rot);
const double rot_dist = std::abs(rot_dir.smallestAngle());
const std::size_t steps =
std::min(max_steps, std::max(min_steps, static_cast<std::size_t>(
std::max((linear_dist / linear_step), (rot_dist / angular_step)))));
for (std::size_t j = 1; j < steps; j++)
{
const double fraction = static_cast<double>(j + 1) / static_cast<double>(steps);
const Eigen::Isometry2d pose =
Eigen::Translation2d(robot_pose.translation() + fraction * linear_dir) * rot.slerp(fraction, future_rot);
interpolated.push_back(pose);
}
// Create vector of polygons. Polygon at each pose starting from robot pose
std::vector<Polygon> polygons;
for (const Eigen::Isometry2d& pose : interpolated)
{
Polygon polygon;
for (const Eigen::Vector2d& p : footprint)
{
const Eigen::Vector2d world_point = pose.translation() + pose.rotation() * p;
Point point(world_point[0], world_point[1]);
boost::geometry::append(polygon.outer(), point);
}
polygons.push_back(polygon);
}
// Union the footprints
Polygon union_output = polygons.front();
for (std::size_t i = 1; i < polygons.size(); i++)
{
std::vector<Polygon> temp;
boost::geometry::union_(union_output, polygons[i], temp);
ROS_ASSERT_MSG(temp.size() == 1, "Footprint union failed. More than 1 resulting polygons");
union_output = temp[0];
}
int min_x = std::numeric_limits<int>::max();
int max_x = 0;
int min_y = std::numeric_limits<int>::max();
int max_y = 0;
std::vector<Eigen::Array2i> map_footprint;
for (const Point& p : union_output.outer())
{
const auto map_point = grid.dimensions().getCellIndex(Eigen::Vector2d(p.x(), p.y()));
map_footprint.push_back(map_point);
min_x = std::min(map_point.x(), min_x);
max_x = std::max(map_point.x(), max_x);
min_y = std::min(map_point.y(), min_y);
max_y = std::max(map_point.y(), max_y);
}
map_footprint.push_back(map_footprint.front());
const auto connected_poly = gridmap::connectPolygon(map_footprint);
double min_distance_to_collision = std::numeric_limits<double>::max();
{
const cv::Mat cv_im = cv::Mat(grid.dimensions().size().y(), grid.dimensions().size().x(), CV_8U,
reinterpret_cast<void*>(const_cast<uint8_t*>(grid.cells().data())));
// Invert the costmap data such that all objects are considered zeros
cv::Mat inv_cv_im;
cv::bitwise_not(cv_im, inv_cv_im);
// Calculate the distance transform to all objects (zero pixels)
cv::Mat distance;
cv::distanceTransform(inv_cv_im, distance, cv::DIST_L2, cv::DIST_MASK_PRECISE, CV_32F);
for (const auto& p : connected_poly)
{
const double f = static_cast<double>(distance.at<float>(p.y(), p.x()));
min_distance_to_collision = std::min(min_distance_to_collision, f);
}
min_distance_to_collision *= grid.dimensions().resolution();
}
visualization_msgs::Marker marker;
marker.ns = "points";
marker.id = 0;
marker.type = visualization_msgs::Marker::POINTS;
marker.header.stamp = ros::Time::now();
marker.header.frame_id = "map";
marker.frame_locked = true;
marker.scale.x = 0.02;
marker.scale.y = 0.02;
marker.scale.z = 0.00;
marker.color.a = 1.f;
marker.pose.orientation.w = 1.0;
bool in_collision = false;
auto append_raster = [&grid, &marker, &in_collision, alpha](const int x, const int y) {
const Eigen::Array2i p{x, y};
const Eigen::Vector2d w = grid.dimensions().getCellCenter(p);
geometry_msgs::Point mp;
mp.x = w.x();
mp.y = w.y();
mp.z = 0.0;
marker.points.push_back(mp);
std_msgs::ColorRGBA c;
c.a = alpha;
if (grid.dimensions().contains(p) && grid.occupied(p))
{
in_collision = true;
c.r = 1.0;
}
else
{
c.g = 1.0;
}
marker.colors.push_back(c);
};
gridmap::rasterPolygonFill(append_raster, connected_poly, min_x, max_x, min_y, max_y);
// rasterPolygonFill is not properly including all edges
for (const auto& p : connected_poly)
append_raster(p.x(), p.y());
return {in_collision, min_distance_to_collision, marker};
}
visualization_msgs::Marker buildMarker(const navigation_interface::KinodynamicState& robot_state,
const navigation_interface::KinodynamicState& target_state)
{
visualization_msgs::Marker marker;
marker.ns = "target";
marker.id = 0;
marker.type = visualization_msgs::Marker::ARROW;
marker.header.stamp = ros::Time::now();
marker.header.frame_id = "odom";
marker.frame_locked = true;
marker.scale.x = 0.02;
marker.scale.y = 0.04;
marker.scale.z = 0.04;
marker.color.a = 1.f;
marker.pose.orientation.w = 1.0;
{
geometry_msgs::Point mp;
mp.x = robot_state.pose.translation().x();
mp.y = robot_state.pose.translation().y();
mp.z = 0.0;
marker.points.push_back(mp);
}
{
geometry_msgs::Point mp;
mp.x = target_state.pose.translation().x();
mp.y = target_state.pose.translation().y();
mp.z = 0.0;
marker.points.push_back(mp);
}
return marker;
}
// We can treat rotation like a Z axis, so we can calculate a 3D norm between any two poses
double dist(const Eigen::Isometry2d& pose_1, const Eigen::Isometry2d& pose_2, const double a_weight)
{
const double diff_x = pose_2.translation().x() - pose_1.translation().x();
const double diff_y = pose_2.translation().y() - pose_1.translation().y();
const double diff_a = Eigen::Rotation2Dd(pose_1.linear().inverse() * pose_2.linear()).smallestAngle();
return (diff_x * diff_x) + (diff_y * diff_y) + a_weight * (diff_a * diff_a);
}
// Find the node on the path that is closest to the robot.
// If the robot is between two nodes, it will always pick the one in front to prevent back-tracking
// Because the nodes aren't evenly spaced, this is actually not a trivial problem
std::size_t findClosest(const std::vector<navigation_interface::KinodynamicState>& nodes, const Eigen::Isometry2d& pose)
{
if (nodes.size() <= 2)
return nodes.size() - 1;
std::vector<double> distances;
std::transform(
nodes.begin(), nodes.end(), std::back_inserter(distances),
[&pose](const navigation_interface::KinodynamicState& state) { return dist(state.pose, pose, 0.1); });
const auto closest_it = std::min_element(distances.begin(), distances.end());
const std::size_t closest_i = std::distance(distances.begin(), closest_it);
if (closest_i == 0 || closest_i == nodes.size() - 1)
{
return static_cast<std::size_t>(closest_i);
}
// Resolve ambiguity by checking points slightly before and after the closest node
// Find midpoints -----A------M1---closest_i---M2-------B------->
Eigen::Isometry2d m_1;
Eigen::Isometry2d m_2;
{
// Calculate pose of M1
const Eigen::Vector2d translation_vec =
nodes[closest_i].pose.translation() - nodes[closest_i - 1].pose.translation();
const Eigen::Rotation2Dd rot(nodes[closest_i - 1].pose.linear());
const Eigen::Rotation2Dd target_rot(nodes[closest_i].pose.linear());
m_1 = Eigen::Translation2d(nodes[closest_i - 1].pose.translation() + 0.99 * translation_vec) *
rot.slerp(0.99, target_rot);
}
{
// Calculate pose of M2
const Eigen::Vector2d translation_vec =
nodes[closest_i + 1].pose.translation() - nodes[closest_i].pose.translation();
const Eigen::Rotation2Dd rot(nodes[closest_i].pose.linear());
const Eigen::Rotation2Dd target_rot(nodes[closest_i + 1].pose.linear());
m_2 = Eigen::Translation2d(nodes[closest_i].pose.translation() + 0.01 * translation_vec) *
rot.slerp(0.01, target_rot);
}
if (dist(m_1, pose, 0.1) < dist(m_2, pose, 0.1))
{
return static_cast<std::size_t>(closest_i);
}
else
{
return static_cast<std::size_t>(closest_i + 1);
}
}
// Performs a lookahead to give the robot a "carrot" to chase
navigation_interface::KinodynamicState lookAhead(const std::vector<navigation_interface::KinodynamicState>& nodes,
const Eigen::Isometry2d& robot_pose, const Eigen::Vector3d& velocity,
const std::size_t path_index, const double look_ahead_time,
const unsigned int interpolation_steps)
{
Eigen::Isometry2d future_pose = robot_pose;
future_pose.pretranslate(look_ahead_time * Eigen::Vector2d(velocity.topRows(2)));
future_pose.rotate(Eigen::Rotation2Dd(look_ahead_time * velocity[2]));
// To prevent the robot from getting stuck with 0 lookahead, set a minimum of 5cm
// Note: these are squared distances!
const double required_distance = std::max(0.0025, dist(robot_pose, future_pose, 0.1));
for (std::size_t i = path_index; i < nodes.size(); ++i)
{
const double dist_to_node = dist(robot_pose, nodes[i].pose, 0.1);
// Found first node that is far enough. Now interpolate between previous node and this node.
if (dist_to_node > required_distance)
{
const Eigen::Isometry2d start_pose = (i == 0) ? robot_pose : nodes[i - 1].pose;
const Eigen::Vector2d translation_vec = nodes[i].pose.translation() - start_pose.translation();
const Eigen::Rotation2Dd rot(start_pose.linear());
const Eigen::Rotation2Dd target_rot(nodes[i].pose.linear());
Eigen::Isometry2d prev_interpolated_pose = nodes[i].pose;
for (int j = interpolation_steps; j >= 0; j--)
{
// Step from front to back to avoid finding a point behind the robot.
const double fraction = static_cast<double>(j) / static_cast<double>(interpolation_steps);
const Eigen::Isometry2d interpolated_pose =
Eigen::Translation2d(start_pose.translation() + fraction * translation_vec) *
rot.slerp(fraction, target_rot);
// Found pose just within range. Return the previous one (just outside)
if (dist(interpolated_pose, robot_pose, 0.1) < required_distance)
{
navigation_interface::KinodynamicState target_state = nodes[i];
target_state.pose = prev_interpolated_pose;
return target_state;
}
prev_interpolated_pose = interpolated_pose;
}
// None of the interpolated poses are within required_distance. Just use the node itself.
return nodes[i];
}
}
return nodes.back();
}
} // namespace
PurePursuitController::PurePursuitController()
{
}
PurePursuitController::~PurePursuitController()
{
}
// cppcheck-suppress unusedFunction
bool PurePursuitController::setTrajectory(const navigation_interface::Trajectory& trajectory)
{
if (trajectory.states.empty())
return false;
path_index_ = 1; // first pose is under robot
trajectory_.reset(new navigation_interface::Trajectory(trajectory));
control_integral_ = Eigen::Vector3d::Zero();
control_error_ = Eigen::Vector3d::Zero();
last_update_ = ros::SteadyTime::now();
return true;
}
// cppcheck-suppress unusedFunction
void PurePursuitController::clearTrajectory()
{
trajectory_.reset();
path_index_ = 0;
control_integral_ = Eigen::Vector3d::Zero();
control_error_ = Eigen::Vector3d::Zero();
last_update_ = ros::SteadyTime::now();
}
// cppcheck-suppress unusedFunction
boost::optional<std::string> PurePursuitController::trajectoryId() const
{
if (trajectory_)
return boost::optional<std::string>(trajectory_->id);
else
return {};
}
boost::optional<navigation_interface::Trajectory> PurePursuitController::trajectory() const
{
if (trajectory_)
return boost::optional<navigation_interface::Trajectory>(*trajectory_);
else
return {};
}
navigation_interface::Controller::Result
// cppcheck-suppress unusedFunction
PurePursuitController::control(const ros::SteadyTime& time, const gridmap::AABB& local_region,
const navigation_interface::KinodynamicState& robot_state,
const Eigen::Isometry2d& map_to_odom)
{
navigation_interface::Controller::Result result;
result.command = Eigen::Vector3d::Zero();
if (!trajectory_)
{
result.outcome = navigation_interface::Controller::Outcome::FAILED;
return result;
}
double dt = time.toSec() - last_update_.toSec();
ROS_ASSERT_MSG(dt > 0.0, "Pure Pursuit - Negative time step!");
last_update_ = time;
const double dist_to_goal = (trajectory_->states.back().pose.translation() - robot_state.pose.translation()).norm();
const double angle_to_goal =
std::abs(Eigen::Rotation2Dd(robot_state.pose.linear().inverse() * trajectory_->states.back().pose.linear())
.smallestAngle());
//
// Goal condition
//
if (angle_to_goal < yaw_goal_tolerance_ && dist_to_goal < xy_goal_tolerance_)
{
ROS_INFO_STREAM("Control Complete! angle_to_goal: " << angle_to_goal << " dist_to_goal: " << dist_to_goal);
result.outcome = navigation_interface::Controller::Outcome::COMPLETE;
last_update_ = ros::SteadyTime::now();
return result;
}
//
// Find target
//
// path_index is the index closest to where the robot is currently
// target_state is the pose the robot is aiming for
navigation_interface::KinodynamicState target_state;
path_index_ = findClosest(trajectory_->states, robot_state.pose);
const double dist_to_closest = dist(robot_state.pose, trajectory_->states[path_index_].pose, 0.1);
if (dist_to_closest > tracking_error_)
{
ROS_WARN_STREAM("Tracking error. Distance to trajectory: " << dist_to_closest << " > " << tracking_error_);
result.outcome = navigation_interface::Controller::Outcome::FAILED;
last_update_ = ros::SteadyTime::now();
return result;
}
target_state = lookAhead(trajectory_->states, robot_state.pose, robot_state.velocity, path_index_, look_ahead_time_,
interpolation_steps_);
//
// Check immediate collisions
//
double min_distance_to_collision;
{
// TODO need a more intelligent way to lock read access without waiting for path planning
// auto lock = map_data_->grid.getLock();
gridmap::OccupancyGrid local_grid(map_data_->grid, local_region);
// lock.unlock();
const Eigen::Isometry2d map_robot_pose = map_to_odom * robot_state.pose;
const Eigen::Isometry2d map_goal_pose = map_to_odom * target_state.pose;
const CollisionCheck cc = robotInCollision(local_grid, map_robot_pose, map_goal_pose, robot_footprint_, 1.f);
min_distance_to_collision = cc.min_distance_to_collision;
if (debug_viz_)
footprint_pub_.publish(cc.marker);
if (cc.in_collision)
{
ROS_WARN_STREAM("Robot is in collision!");
result.outcome = navigation_interface::Controller::Outcome::FAILED;
last_update_ = ros::SteadyTime::now();
return result;
}
}
if (debug_viz_)
target_state_pub_.publish(buildMarker(robot_state, target_state));
ROS_ASSERT(robot_state.pose.linear().allFinite());
ROS_ASSERT(robot_state.pose.translation().allFinite());
//
// PD control (PID when close to goal)
//
const Eigen::Vector2d target_vec_wrt_robot =
robot_state.pose.linear().inverse() * (target_state.pose.translation() - robot_state.pose.translation());
const Eigen::Vector3d control_error = {
target_vec_wrt_robot[0], target_vec_wrt_robot[1],
Eigen::Rotation2Dd(robot_state.pose.linear().inverse() * target_state.pose.linear()).smallestAngle()};
ROS_ASSERT(control_error.allFinite());
Eigen::Vector3d target_velocity;
const Eigen::Vector3d control_dot_ = (control_error - control_error_) / dt;
control_error_ = control_error;
ROS_ASSERT(control_error_.allFinite());
const bool final_pid_control = path_index_ == trajectory_->states.size() - 1;
if (final_pid_control)
{
control_integral_ += control_error;
// Integrator wind-up prevention clamp
for (unsigned int i = 0; i < 3; ++i)
{
control_integral_[i] =
std::min(std::max(-control_integral_max_[i], control_integral_[i]), control_integral_max_[i]);
}
target_velocity = p_gain_.cwiseProduct(control_error) + i_gain_.cwiseProduct(control_integral_) +
d_gain_.cwiseProduct(control_dot_);
}
else
{
target_velocity = p_gain_.cwiseProduct(control_error) + d_gain_.cwiseProduct(control_dot_);
}
ROS_ASSERT_MSG(target_velocity.allFinite(), "%f %f %f", target_velocity[0], target_velocity[1], target_velocity[2]);
//
// Max acceleration check
//
Eigen::Vector3d vel_command = target_velocity;
const Eigen::Vector2d translation_dv(target_velocity[0] - prev_cmd_vel[0], target_velocity[1] - prev_cmd_vel[1]);
const double rotation_dv = target_velocity[2] - prev_cmd_vel[2];
// We limit the X and Y accelerations together to preserve the direction
const double translation_accel = translation_dv.norm() / dt;
if (translation_accel > max_translation_accel_)
{
// ROS_INFO_STREAM("Excessive translation acceleration: " << translation_accel << " > " <<
// max_translation_accel_);
vel_command[0] = prev_cmd_vel[0] + translation_dv[0] * (max_translation_accel_ / translation_accel);
vel_command[1] = prev_cmd_vel[1] + translation_dv[1] * (max_translation_accel_ / translation_accel);
}
const double rotation_accel = std::abs(rotation_dv) / dt;
if (rotation_accel > max_rotation_accel_)
{
// ROS_INFO_STREAM("Excessive rotation acceleration: " << rotation_accel << " > " << max_rotation_accel_);
vel_command[2] = prev_cmd_vel[2] + rotation_dv * (max_rotation_accel_ / rotation_accel);
}
ROS_ASSERT(vel_command.allFinite());
prev_cmd_vel = vel_command;
//
// Max velocity check
//
// Limit max velocity based on distance to nearest obstacle
const double d = std::max(min_avoid_distance_, std::min(max_avoid_distance_, min_distance_to_collision));
const double velocity_scale = d / max_avoid_distance_;
Eigen::Vector3d augmented_max_vel = {std::min(max_velocity_[0], max_velocity_[0] * velocity_scale),
std::min(max_velocity_[1], max_velocity_[1] * velocity_scale),
std::min(max_velocity_[2], max_velocity_[2] * velocity_scale)};
{
double vel_factor = 1.0;
for (long i = 0; i < 3; ++i)
{
if (std::abs(vel_command[i]) > augmented_max_vel[i])
{
vel_factor = std::max(vel_factor, std::abs(vel_command[i] / augmented_max_vel[i]));
}
}
vel_command /= vel_factor;
ROS_ASSERT(vel_command.allFinite());
}
result.command = vel_command;
result.outcome = navigation_interface::Controller::Outcome::SUCCESSFUL;
return result;
}
// cppcheck-suppress unusedFunction
void PurePursuitController::onInitialize(const YAML::Node& parameters)
{
look_ahead_time_ = parameters["look_ahead_time"].as<double>(look_ahead_time_);
max_velocity_[0] = parameters["max_velocity_x"].as<double>(max_velocity_[0]);
max_velocity_[1] = parameters["max_velocity_y"].as<double>(max_velocity_[1]);
max_velocity_[2] = parameters["max_velocity_w"].as<double>(max_velocity_[2]);
max_translation_accel_ = parameters["max_translation_accel"].as<double>(max_translation_accel_);
max_rotation_accel_ = parameters["max_rotation_accel"].as<double>(max_rotation_accel_);
xy_goal_tolerance_ = parameters["xy_goal_tolerance"].as<double>(xy_goal_tolerance_);
yaw_goal_tolerance_ = parameters["yaw_goal_tolerance"].as<double>(yaw_goal_tolerance_);
p_gain_[0] = parameters["p_gain_x"].as<double>(p_gain_[0]);
p_gain_[1] = parameters["p_gain_y"].as<double>(p_gain_[1]);
p_gain_[2] = parameters["p_gain_w"].as<double>(p_gain_[2]);
i_gain_[0] = parameters["i_gain_x"].as<double>(i_gain_[0]);
i_gain_[1] = parameters["i_gain_y"].as<double>(i_gain_[1]);
i_gain_[2] = parameters["i_gain_w"].as<double>(i_gain_[2]);
d_gain_[0] = parameters["d_gain_x"].as<double>(d_gain_[0]);
d_gain_[1] = parameters["d_gain_y"].as<double>(d_gain_[1]);
d_gain_[2] = parameters["d_gain_w"].as<double>(d_gain_[2]);
control_integral_max_[0] = parameters["control_integral_max_x"].as<double>(control_integral_max_[0]);
control_integral_max_[1] = parameters["control_integral_max_y"].as<double>(control_integral_max_[1]);
control_integral_max_[2] = parameters["control_integral_max_w"].as<double>(control_integral_max_[2]);
max_avoid_distance_ = parameters["max_avoid_distance"].as<double>(max_avoid_distance_);
min_avoid_distance_ = parameters["min_avoid_distance"].as<double>(min_avoid_distance_);
robot_footprint_ = navigation_interface::get_point_list(
parameters, "footprint",
{{0.480, 0.000}, {0.380, -0.395}, {-0.380, -0.395}, {-0.480, 0.000}, {-0.380, 0.395}, {0.380, 0.395}});
debug_viz_ = parameters["debug_viz"].as<bool>(debug_viz_);
if (debug_viz_)
{
ros::NodeHandle nh("~");
target_state_pub_ = nh.advertise<visualization_msgs::Marker>("target_state", 100);
footprint_pub_ = nh.advertise<visualization_msgs::Marker>("footprint", 100);
}
}
// cppcheck-suppress unusedFunction
void PurePursuitController::onMapDataChanged()
{
}
} // namespace pure_pursuit_controller
|
{"hexsha": "3015e27b715ce9d51ddb52038c79869de607c8a8", "size": 24900, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "pure_pursuit_controller/src/plugin.cpp", "max_stars_repo_name": "Boeing/modular_navigation", "max_stars_repo_head_hexsha": "1489fdf94079fd6b1d3a41d0fc18924f43805a52", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7.0, "max_stars_repo_stars_event_min_datetime": "2020-11-24T03:53:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T08:13:59.000Z", "max_issues_repo_path": "pure_pursuit_controller/src/plugin.cpp", "max_issues_repo_name": "Boeing/modular_navigation", "max_issues_repo_head_hexsha": "1489fdf94079fd6b1d3a41d0fc18924f43805a52", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pure_pursuit_controller/src/plugin.cpp", "max_forks_repo_name": "Boeing/modular_navigation", "max_forks_repo_head_hexsha": "1489fdf94079fd6b1d3a41d0fc18924f43805a52", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 7.0, "max_forks_repo_forks_event_min_datetime": "2020-11-27T12:24:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T08:14:02.000Z", "avg_line_length": 39.586645469, "max_line_length": 120, "alphanum_fraction": 0.6492369478, "num_tokens": 5883}
|
"""
file: database.py
language:python 3
extracts information from the unicode consortium web database
"""
__DB = "https://www.unicode.org/Public/UCD/latest/ucd/"
from typing import *
import urllib.request
import requests
import pickle
import numpy as np
import random
# inclusive decimal range of a unicode subset
__UnicodeRange = Tuple[int, int]
# matches subsets names with codepoint ranges
__UnicodeBlocks = Dict[str, __UnicodeRange]
# maps potentially implemented characters to unicode
# blocks with implemented characters
__UnicodeMap = List[str]
UNDEFINED_BLOCK = "undefined" # for indicating that a character is not defined
def generate_positive_pairs_consortium(unicode_clusters_codepoints_map: dict, num_pairs: int):
"""
randomly sample positive pairs of homoglyphs with replacement, returns list of tuples of codepoint_id, codepoint_id
clusters_codepoints_map: mapping of cluster ids to lists of codepoints
"""
reverse_mapping = {codepoint: cluster_id for cluster_id in unicode_clusters_codepoints_map.keys() for codepoint in
unicode_clusters_codepoints_map[cluster_id]}
codepoints = list(reverse_mapping.keys())
l = [None] * num_pairs
for i in range(num_pairs):
code_a = codepoints[random.randint(0, len(codepoints) - 1)]
code_b = code_a
homoglyphs = unicode_clusters_codepoints_map[reverse_mapping[code_a]]
while code_b == code_a:
code_b = homoglyphs[random.randint(0, len(homoglyphs) - 1)]
l[i] = (code_a, code_b)
return l
def generate_negative_pairs_consortium(unicode_clusters_codepoints_map: dict, num_pairs: int):
"""
randomly sample positive pairs of homoglyphs with replacement, returns list of tuples of codepoint_id, codepoint_id
clusters_codepoints_map: mapping of cluster ids to lists of codepoints
"""
reverse_mapping = {codepoint: cluster_id for cluster_id in unicode_clusters_codepoints_map.keys() for codepoint in
unicode_clusters_codepoints_map[cluster_id]}
codepoints = list(reverse_mapping.keys())
l = [None] * num_pairs
for i in range(num_pairs):
code_a = codepoints[random.randint(0, len(codepoints) - 1)]
code_b = code_a
while reverse_mapping[code_b] == reverse_mapping[code_a]:
code_b = codepoints[random.randint(0, len(codepoints) - 1)]
l[i] = (code_a, code_b)
return l
def get_consortium_clusters_dict():
url = 'https://www.unicode.org/Public/security/12.0.0/confusables.txt'
# Load Text File
r = requests.get(url, allow_redirects=True)
punycodes_list = r.text[385:].split("\n")
punycodes_list = punycodes_list[:len(punycodes_list) - 3]
# 4248 pairs
# 3353 we support :(
consortium_clusters_dict = {}
for i in punycodes_list:
if i is not '':
punycode_pair = i.split(";")[:2]
if len(punycode_pair[1]) < 8:
source = str(int(punycode_pair[0].replace(' ', ''), 16))
target = str(int(punycode_pair[1].replace(' ', '').replace('\t', ''), 16))
if target not in consortium_clusters_dict:
consortium_clusters_dict[target] = [source]
else:
consortium_clusters_dict[target].append(source)
return consortium_clusters_dict
def generate_supported_consortium_feature_vectors_and_clusters_dict(n_clusters: int, features_dict_file_path: str):
consortium_clusters_dict = get_consortium_clusters_dict()
# print(len(consortium_clusters_dict))
features_dict = pickle.load(open(features_dict_file_path, 'rb'))
supported_consortium_feature_vectors = {}
supported_consortium_clusters_dict = {}
for cluster_source in consortium_clusters_dict.keys():
if cluster_source in features_dict:
supported_consortium_clusters_dict[cluster_source] = []
for target in consortium_clusters_dict[cluster_source]:
if target in features_dict:
supported_consortium_clusters_dict[cluster_source].append(target)
if cluster_source not in supported_consortium_feature_vectors:
supported_consortium_feature_vectors[cluster_source] = features_dict[cluster_source]
if target not in supported_consortium_feature_vectors:
supported_consortium_feature_vectors[target] = features_dict[target]
if len(supported_consortium_clusters_dict[cluster_source]) == 0:
del supported_consortium_clusters_dict[cluster_source]
if len(supported_consortium_clusters_dict) == n_clusters:
break
reformatted = {}
count = 0
for key, value in supported_consortium_clusters_dict.items():
value.append(key)
reformatted[count] = value
count += 1
supported_consortium_clusters_dict = reformatted
return supported_consortium_feature_vectors, supported_consortium_clusters_dict
def generate_data_for_experiment(num_random_additions: int = 0):
supported_consortium_feature_vectors, supported_consortium_clusters_dict = generate_supported_consortium_feature_vectors_and_clusters_dict(
9999, 'features_dict_file.pkl')
clusters_unicode_characters = list(supported_consortium_feature_vectors.keys())
unicode_median_feature_vector_dict = pickle.load(open('features_dict_file.pkl', 'rb'))
all_unicode_characters = list(unicode_median_feature_vector_dict.keys())
for _ in range(num_random_additions):
possible_random_character = all_unicode_characters.pop(random.randint(0, len(all_unicode_characters) - 1))
if possible_random_character not in clusters_unicode_characters:
supported_consortium_feature_vectors[possible_random_character] = unicode_median_feature_vector_dict[
possible_random_character]
return supported_consortium_feature_vectors, supported_consortium_clusters_dict
def _generate_statistics(converted_dict, features_dict_file_path):
import cupy as cp
def _generate_adjacency_matrix(features):
# gpu [n, k]
ordered_features_gpu = cp.array(features)
n, k = ordered_features_gpu.shape
a = ordered_features_gpu.reshape((n, 1, 1, k))
b = ordered_features_gpu.reshape((1, n, k, 1))
# [n, n]
dot_products = cp.matmul(a, b).reshape((n, n))
# [n]
norms = cp.linalg.norm(ordered_features_gpu, axis=1)
norms_a = norms.reshape((n, 1))
norms_b = norms.reshape((1, n)) # same as the above but transposed
# [n, n]
norms_prod = cp.multiply(norms_a, norms_b)
cosine_similarity = dot_products / (norms_prod + 1e-7)
return cp.asnumpy(cosine_similarity)
features_dict = pickle.load(open(features_dict_file_path, 'rb'))
mean = 0
count = 0
std_dev = 0
for cluster in converted_dict.values():
ordered_features = np.empty([len(cluster), len(features_dict[cluster[0]])], dtype=np.float32)
for i in range(len(cluster)):
ordered_features[i] = features_dict[cluster[i]]
if len(cluster) > 2:
cos_matrix = _generate_adjacency_matrix(ordered_features)
stats = np.tril(cos_matrix, -1)
stats = stats[stats != 0]
mean += np.mean(stats)
std_dev += np.std(stats)
count += 1
print(mean / count)
print(std_dev / count)
# def normalize_rows(vector):
# return vector / (np.linalg.norm(vector, axis=1).reshape((vector.shape[0], 1)) + 1e-8)
#
#
# def calculate_centroid(feature_vectors):
# normalized_vectors = normalize_rows(feature_vectors)
# centroid_ = np.sum(normalized_vectors, axis=0)
# return normalize_rows(centroid_.reshape((1, -1)))
def _is_character_block(block_name: str) -> bool:
"""
checks if block implements actual characters
:param block_name: name of the block
:return: true if are characters
"""
keywords = ["Surrogate", "Private"]
is_character = True
for keyword in keywords:
if keyword in block_name:
is_character = False
break
return is_character
def map_blocks() -> (__UnicodeBlocks, __UnicodeMap, int):
"""
In some browsers and email clients, all the characters in the domain name
needs to be of the same subset, or "block," of unicode, in order to be
displayed in unicode form.
This helper function determines whether a unicode domain name will be
displayed in "xe--*" ascii encoding format, or its unicode form.
This function uses the definition of subset blocks specified by the latest
unicode standard.
:return: tuple of UnicodeSets, UnicodeMap, and total # of chars
"""
blocks = {}
block_map = []
with urllib.request.urlopen(__DB + "Blocks.txt") as response:
lines = response.read().decode('utf-8').split("\n")
for line in lines:
if len(line) > 0 and line[0] != '\n' and line[0] != '#':
line = line.strip()
(hex_range, block_name) = line.split("; ")
if _is_character_block(block_name):
(start_hex, end_hex) = hex_range.split("..")
start = int(start_hex, 16)
end = int(end_hex, 16)
blocks[block_name] = (start, end)
if len(block_map) < end + 1:
for i in range(len(block_map), end + 1):
block_map.append(UNDEFINED_BLOCK)
for i in range(start, end + 1):
block_map[i] = block_name
# as of unicode 12
# block_map produces an array for the first 900k unicode code points
# around 140k of which belong to blocks with defined code points
n = _prune_block_map(block_map)
return blocks, block_map, n
def _is_code_range(description: str) -> int:
"""
determines whether an entry is a code point or the start/end of a range
:param description: entry description, second field in line
:return: -1 if it's a code point, 0 if it's first in a range, 1 if it's last
"""
if len(description) > 4:
if description[-4:] == "rst>": # first in range, inclusive
return 0
if description[-4:] == "ast>": # last in range, inclusive
return 1
return -1
def _prune_block_map(block_map: __UnicodeMap):
"""
goes through the block map and "un-define" the blocks for characters
that are not actually implemented
:param block_map: unicode map of characters and blocks
:return: total number of implemented characters
"""
n = 0
implemented = [False] * len(block_map)
with urllib.request.urlopen(__DB + "UnicodeData.txt") as response:
lines = response.read().decode('utf-8').split("\n")
i = 0
while i < len(lines):
line = lines[i].strip()
fields = line.split(";")
if len(line) > 0 and fields[1] != "<control>" \
and _is_character_block(fields[1]):
index = int(fields[0], 16)
retval = _is_code_range(fields[1])
if retval == -1:
implemented[index] = True
n += 1
elif retval == 0:
i += 1
line = lines[i].strip()
fields = line.split(";")
end = int(fields[0], 16)
for k in range(index, end + 1):
implemented[k] = True
n += 1
i += 1
for i in range(len(implemented)):
if not implemented[i]:
block_map[i] = UNDEFINED_BLOCK
# 137929 as of unicode 12
return n
|
{"hexsha": "913ba10a55920a1c828f5f08ea57a734b595ef09", "size": 10882, "ext": "py", "lang": "Python", "max_stars_repo_path": "unicode_info/database.py", "max_stars_repo_name": "PerryXDeng/project_punyslayer", "max_stars_repo_head_hexsha": "79529b020ca56a5473dbb85ac7155bc03dc5023a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-10-25T04:57:03.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-16T00:34:18.000Z", "max_issues_repo_path": "unicode_info/database.py", "max_issues_repo_name": "PerryXDeng/project_punyslayer", "max_issues_repo_head_hexsha": "79529b020ca56a5473dbb85ac7155bc03dc5023a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "unicode_info/database.py", "max_forks_repo_name": "PerryXDeng/project_punyslayer", "max_forks_repo_head_hexsha": "79529b020ca56a5473dbb85ac7155bc03dc5023a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-06-25T14:54:24.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-25T14:54:24.000Z", "avg_line_length": 37.9163763066, "max_line_length": 141, "alphanum_fraction": 0.707222937, "include": true, "reason": "import numpy,import cupy", "num_tokens": 2702}
|
# Copyright (c) 2021 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
from . import SyncSmokeTest
from . import SmokeTest
import carla
import time
import math
import numpy as np
from enum import Enum
def list_equal_tol(objs, tol = 1e-5):
if (len(objs) < 2):
return True
for i in range(1, len(objs)):
equal = equal_tol(objs[0], objs[i], tol)
if not equal:
return False
return True
def equal_tol(obj_a, obj_b, tol = 1e-5):
if isinstance(obj_a, list):
return obj_a == obj_b
if isinstance(obj_a, carla.libcarla.Vector3D):
diff = abs(obj_a - obj_b)
return diff.x < tol and diff.y < tol and diff.z < tol
return abs(obj_a - obj_b) < tol
def equal_physics_control(pc_a, pc_b):
error_msg = ""
for key in dir(pc_a):
if key.startswith('__') or key == "wheels":
continue
if not equal_tol(getattr(pc_a, key), getattr(pc_b, key), 1e-3):
error_msg = "Car property: '%s' in VehiclePhysicsControl does not match: %.4f %.4f" \
% (key, getattr(pc_a, key), getattr(pc_b, key))
return False, error_msg
if len(pc_a.wheels) != len(pc_b.wheels):
error_msg = "The number of wheels does not match %d, %d" \
% (len(pc_a.wheels) != len(pc_b.wheels))
return False, error_msg
for w in range(0, len(pc_a.wheels)):
for key in dir(pc_a.wheels[w]):
if key.startswith('__') or key == "position":
continue
if not equal_tol(getattr(pc_a.wheels[w], key), getattr(pc_b.wheels[w], key), 1e-3):
error_msg = "Wheel property: '%s' in VehiclePhysicsControl does not match: %.4f %.4f" \
% (key, getattr(pc_a.wheels[w], key), getattr(pc_b.wheels[w], key))
return False, error_msg
return True, error_msg
def change_physics_control(vehicle, tire_friction = None, drag = None, wheel_sweep = None, long_stiff = None):
# Change Vehicle Physics Control parameters of the vehicle
physics_control = vehicle.get_physics_control()
if drag is not None:
physics_control.drag_coefficient = drag
if wheel_sweep is not None:
physics_control.use_sweep_wheel_collision = wheel_sweep
front_left_wheel = physics_control.wheels[0]
front_right_wheel = physics_control.wheels[1]
rear_left_wheel = physics_control.wheels[2]
rear_right_wheel = physics_control.wheels[3]
if tire_friction is not None:
front_left_wheel.tire_friction = tire_friction
front_right_wheel.tire_friction = tire_friction
rear_left_wheel.tire_friction = tire_friction
rear_right_wheel.tire_friction = tire_friction
if long_stiff is not None:
front_left_wheel.long_stiff_value = long_stiff
front_right_wheel.long_stiff_value = long_stiff
rear_left_wheel.long_stiff_value = long_stiff
rear_right_wheel.long_stiff_value = long_stiff
wheels = [front_left_wheel, front_right_wheel, rear_left_wheel, rear_right_wheel]
physics_control.wheels = wheels
return physics_control
SpawnActor = carla.command.SpawnActor
FutureActor = carla.command.FutureActor
ApplyTargetVelocity = carla.command.ApplyTargetVelocity
SetEnableGravity = carla.command.SetEnableGravity
ApplyVehicleControl = carla.command.ApplyVehicleControl
ApplyVehiclePhysicsControl = carla.command.ApplyVehiclePhysicsControl
class TestApplyVehiclePhysics(SyncSmokeTest):
def wait(self, frames=100):
for _i in range(0, frames):
self.world.tick()
def check_single_physics_control(self, bp_vehicle):
veh_tranf = self.world.get_map().get_spawn_points()[0]
vehicle = self.world.spawn_actor(bp_vehicle, veh_tranf)
# Checking the setting of car variables (drag coefficient)
pc_a = change_physics_control(vehicle, drag=5)
vehicle.apply_physics_control(pc_a)
self.wait(2)
pc_b = vehicle.get_physics_control()
equal, msg = equal_physics_control(pc_a, pc_b)
if not equal:
self.fail("%s: %s" % (bp_vehicle.id, msg))
self.wait(2)
# Checking the setting of wheel variables (tire friction)
pc_a = change_physics_control(vehicle, tire_friction=5, long_stiff=987)
vehicle.apply_physics_control(pc_a)
self.wait(2)
pc_b = vehicle.get_physics_control()
equal, msg = equal_physics_control(pc_a, pc_b)
if not equal:
self.fail("%s: %s" % (bp_vehicle.id, msg))
vehicle.destroy()
def check_multiple_physics_control(self, bp_vehicles, index_bp = None):
num_veh = 10
vehicles = []
pc_a = []
pc_b = []
for i in range(0, num_veh):
veh_tranf = self.world.get_map().get_spawn_points()[i]
bp_vehicle = bp_vehicles[index_bp] if index_bp is not None else bp_vehicles[i]
vehicles.append(self.world.spawn_actor(bp_vehicle, veh_tranf))
drag_coeff = 3.0 + 0.1*i
pc_a.append(change_physics_control(vehicles[i], drag=drag_coeff))
vehicles[i].apply_physics_control(pc_a[i])
self.wait(2)
for i in range(0, num_veh):
pc_b.append(vehicles[i].get_physics_control())
for i in range(0, num_veh):
equal, msg = equal_physics_control(pc_a[i], pc_b[i])
if not equal:
self.fail("%s: %s" % (bp_vehicle.id, msg))
pc_a = []
pc_b = []
for i in range(0, num_veh):
friction = 1.0 + 0.1*i
lstiff = 500 + 100*i
pc_a.append(change_physics_control(vehicles[i], tire_friction=friction, long_stiff=lstiff))
vehicles[i].apply_physics_control(pc_a[i])
self.wait(2)
for i in range(0, num_veh):
pc_b.append(vehicles[i].get_physics_control())
for i in range(0, num_veh):
equal, msg = equal_physics_control(pc_a[i], pc_b[i])
if not equal:
self.fail("%s: %s" % (bp_vehicle.id, msg))
for i in range(0, num_veh):
vehicles[i].destroy()
def test_single_physics_control(self):
print("TestApplyVehiclePhysics.test_single_physics_control")
bp_vehicles = self.world.get_blueprint_library().filter("vehicle.*")
for bp_veh in bp_vehicles:
self.check_single_physics_control(bp_veh)
def test_multiple_physics_control(self):
print("TestApplyVehiclePhysics.test_multiple_physics_control")
bp_vehicles = self.world.get_blueprint_library().filter("vehicle.*")
for idx in range(0, len(bp_vehicles)):
self.check_multiple_physics_control(bp_vehicles, idx)
bp_vehicles = self.world.get_blueprint_library().filter("vehicle.*")
self.check_multiple_physics_control(bp_vehicles)
class TestVehicleFriction(SyncSmokeTest):
def wait(self, frames=100):
for _i in range(0, frames):
self.world.tick()
def test_vehicle_zero_friction(self):
print("TestVehicleFriction.test_vehicle_zero_friction")
self.client.load_world("Town05_Opt", False)
bp_vehicles = self.world.get_blueprint_library().filter("vehicle.*")
for bp_veh in bp_vehicles:
veh_transf_00 = carla.Transform(carla.Location(33, -200, 0.2), carla.Rotation(yaw=90))
veh_transf_01 = carla.Transform(carla.Location(29, -200, 0.7), carla.Rotation(yaw=90))
batch = [
SpawnActor(bp_veh, veh_transf_00)
.then(ApplyTargetVelocity(FutureActor, carla.Vector3D(0, 0, 0)))
.then(SetEnableGravity(FutureActor, True)),
SpawnActor(bp_veh, veh_transf_01)
.then(ApplyTargetVelocity(FutureActor, carla.Vector3D(0, 0, 0)))
.then(SetEnableGravity(FutureActor, False))
]
responses = self.client.apply_batch_sync(batch)
veh_ids = [x.actor_id for x in responses]
veh_refs = [self.world.get_actor(x) for x in veh_ids]
if (0 in veh_ids) or (None in veh_refs):
self.fail("%s: The test cars could not be correctly spawned" % (bp_veh.id))
self.wait(10)
self.client.apply_batch_sync([
ApplyVehiclePhysicsControl(veh_refs[0], change_physics_control(veh_refs[0], tire_friction=0.0, drag=0.0)),
ApplyVehiclePhysicsControl(veh_refs[1], change_physics_control(veh_refs[1], drag=0.0))])
self.wait(1)
vel_ref = 100.0 / 3.6
self.client.apply_batch_sync([
ApplyTargetVelocity(veh_refs[0], carla.Vector3D(0, vel_ref, 0)),
ApplyTargetVelocity(veh_refs[1], carla.Vector3D(0, vel_ref, 0)),
])
self.wait(1)
vel_veh_00 = veh_refs[0].get_velocity().y
vel_veh_01 = veh_refs[1].get_velocity().y
if not list_equal_tol([vel_ref, vel_veh_00, vel_veh_01], 1e-3):
self.client.apply_batch_sync([carla.command.DestroyActor(x) for x in veh_ids])
self.fail("%s: Velocities are not equal after initialization. Ref: %.3f -> [%.3f, %.3f]"
% (bp_veh.id, vel_ref, vel_veh_00, vel_veh_01))
self.wait(100)
vel_veh_00 = veh_refs[0].get_velocity().y
vel_veh_01 = veh_refs[1].get_velocity().y
if not list_equal_tol([vel_ref, vel_veh_00, vel_veh_01], 1e-1):
self.client.apply_batch_sync([carla.command.DestroyActor(x) for x in veh_ids])
self.fail("%s: Velocities are not equal after simulation. Ref: %.3f -> [%.3f, %.3f]"
% (bp_veh.id, vel_ref, vel_veh_00, vel_veh_01))
self.client.apply_batch_sync([carla.command.DestroyActor(x) for x in veh_ids])
def test_vehicle_friction_volume(self):
print("TestVehicleFriction.test_vehicle_friction_volume")
self.client.load_world("Town05_Opt", False)
bp_vehicles = self.world.get_blueprint_library().filter("vehicle.*")
bp_vehicles = [x for x in bp_vehicles if int(x.get_attribute('number_of_wheels')) == 4]
value_vol_friction = 5.0
friction_bp = self.world.get_blueprint_library().find('static.trigger.friction')
friction_bp.set_attribute('friction', str(value_vol_friction))
extent = carla.Location(300.0, 4500.0, 700.0)
friction_bp.set_attribute('extent_x', str(extent.x))
friction_bp.set_attribute('extent_y', str(extent.y))
friction_bp.set_attribute('extent_z', str(extent.z))
vol_transf = carla.Transform(carla.Location(27, -100, 1))
self.world.debug.draw_box(box=carla.BoundingBox(vol_transf.location, extent * 1e-2), rotation=vol_transf.rotation, life_time=1000, thickness=0.5, color=carla.Color(r=0,g=255,b=0))
friction_trigger = self.world.spawn_actor(friction_bp, vol_transf)
for bp_veh in bp_vehicles:
veh_transf_00 = carla.Transform(carla.Location(36, -200, 0.2), carla.Rotation(yaw=90))
veh_transf_01 = carla.Transform(carla.Location(28, -200, 0.7), carla.Rotation(yaw=90))
batch = [
SpawnActor(bp_veh, veh_transf_00)
.then(ApplyTargetVelocity(FutureActor, carla.Vector3D(0, 0, 0))),
SpawnActor(bp_veh, veh_transf_01)
.then(ApplyTargetVelocity(FutureActor, carla.Vector3D(0, 0, 0)))
]
responses = self.client.apply_batch_sync(batch)
veh_ids = [x.actor_id for x in responses]
veh_refs = [self.world.get_actor(x) for x in veh_ids]
if (0 in veh_ids) or (None in veh_refs):
self.fail("%s: The test cars could not be correctly spawned" % (bp_veh.id))
self.wait(10)
vel_ref = 50.0 / 3.6
friction_ref = 0.0
self.client.apply_batch_sync([
ApplyVehiclePhysicsControl(veh_refs[0], change_physics_control(veh_refs[0], tire_friction=friction_ref, drag=0.0)),
ApplyVehiclePhysicsControl(veh_refs[1], change_physics_control(veh_refs[1], tire_friction=friction_ref, drag=0.0))])
self.wait(1)
self.client.apply_batch_sync([
ApplyTargetVelocity(veh_refs[0], carla.Vector3D(0, vel_ref, 0)),
ApplyTargetVelocity(veh_refs[1], carla.Vector3D(0, vel_ref, 0)),
])
self.wait(4)
# Before trigger
bef_vel_veh_00 = veh_refs[0].get_velocity().y
bef_vel_veh_01 = veh_refs[1].get_velocity().y
bef_tire_fr_00 = veh_refs[0].get_physics_control().wheels[0].tire_friction
bef_tire_fr_01 = veh_refs[1].get_physics_control().wheels[0].tire_friction
extent = carla.Location(100.0, 100.0, 200.0)
self.world.debug.draw_box(box=carla.BoundingBox(veh_refs[1].get_location(), extent * 1e-2), rotation=vol_transf.rotation, life_time=8, thickness=0.5, color=carla.Color(r=255,g=0,b=0))
if not equal_tol(bef_vel_veh_00, vel_ref, 1e-3) or not equal_tol(bef_tire_fr_00, friction_ref, 1e-3):
self.fail("%s: Reference vehicle has changed before trigger. Vel: %.3f [%.3f]. Fric: %.3f [%.3f]"
% (bp_veh.id, bef_vel_veh_00, vel_ref, bef_tire_fr_00, friction_ref))
if not equal_tol(bef_vel_veh_01, vel_ref, 1e-3) or not equal_tol(bef_tire_fr_01, friction_ref, 1e-3):
self.fail("%s: Test vehicle has changed before trigger. Vel: %.3f [%.3f]. Fric: %.3f [%.3f]"
% (bp_veh.id, bef_vel_veh_01, vel_ref, bef_tire_fr_01, friction_ref))
self.wait(100)
# Inside trigger
ins_vel_veh_00 = veh_refs[0].get_velocity().y
ins_vel_veh_01 = veh_refs[1].get_velocity().y
ins_tire_fr_00 = veh_refs[0].get_physics_control().wheels[0].tire_friction
ins_tire_fr_01 = veh_refs[1].get_physics_control().wheels[0].tire_friction
extent = carla.Location(100.0, 100.0, 200.0)
self.world.debug.draw_box(box=carla.BoundingBox(veh_refs[1].get_location(), extent * 1e-2), rotation=vol_transf.rotation, life_time=8, thickness=0.5, color=carla.Color(r=255,g=0,b=0))
if not equal_tol(ins_vel_veh_00, vel_ref, 1e-3) or not equal_tol(ins_tire_fr_00, friction_ref, 1e-3):
self.fail("%s: Reference vehicle has changed inside trigger. Vel: %.3f [%.3f]. Fric: %.3f [%.3f]"
% (bp_veh.id, ins_vel_veh_00, vel_ref, ins_tire_fr_00, friction_ref))
if ins_vel_veh_01 > vel_ref or not equal_tol(ins_tire_fr_01, value_vol_friction, 1e-3):
self.fail("%s: Test vehicle is not correct inside trigger. Vel: %.3f [%.3f]. Fric: %.3f [%.3f]"
% (bp_veh.id, ins_vel_veh_01, vel_ref, ins_tire_fr_01, value_vol_friction))
self.wait(200)
# Outside trigger
out_vel_veh_00 = veh_refs[0].get_velocity().y
out_vel_veh_01 = veh_refs[1].get_velocity().y
out_tire_fr_00 = veh_refs[0].get_physics_control().wheels[0].tire_friction
out_tire_fr_01 = veh_refs[1].get_physics_control().wheels[0].tire_friction
extent = carla.Location(100.0, 100.0, 200.0)
self.world.debug.draw_box(box=carla.BoundingBox(veh_refs[1].get_location(), extent * 1e-2), rotation=vol_transf.rotation, life_time=8, thickness=0.5, color=carla.Color(r=255,g=0,b=0))
if not equal_tol(out_vel_veh_00, vel_ref, 1e-3) or not equal_tol(out_tire_fr_00, friction_ref, 1e-3):
self.fail("%s: Reference vehicle has changed after trigger. Vel: %.3f [%.3f]. Fric: %.3f [%.3f]"
% (bp_veh.id, ins_vel_veh_00, vel_ref, out_tire_fr_00, friction_ref))
if out_vel_veh_01 > vel_ref or not equal_tol(out_tire_fr_01, friction_ref, 1e-3):
self.fail("%s: Test vehicle is not correct after trigger. Vel: %.3f [%.3f]. Fric: %.3f [%.3f]"
% (bp_veh.id, out_vel_veh_01, vel_ref, out_tire_fr_01, friction_ref))
self.client.apply_batch_sync([carla.command.DestroyActor(x) for x in veh_ids])
friction_trigger.destroy()
def test_vehicle_friction_values(self):
print("TestVehicleFriction.test_vehicle_friction_values")
self.client.load_world("Town05_Opt", False)
bp_vehicles = self.world.get_blueprint_library().filter("vehicle.*")
for bp_veh in bp_vehicles:
veh_transf = carla.Transform(carla.Location(36, -200, 0.3), carla.Rotation(yaw=90))
veh_transf_00 = carla.Transform(carla.Location(36, -200, 0.2), carla.Rotation(yaw=90))
veh_transf_01 = carla.Transform(carla.Location(32, -200, 0.7), carla.Rotation(yaw=90))
veh_transf_02 = carla.Transform(carla.Location(28, -200, 0.7), carla.Rotation(yaw=90))
batch = [
SpawnActor(bp_veh, veh_transf_00)
.then(ApplyTargetVelocity(FutureActor, carla.Vector3D(0, 0, 0))),
SpawnActor(bp_veh, veh_transf_01)
.then(ApplyTargetVelocity(FutureActor, carla.Vector3D(0, 0, 0))),
SpawnActor(bp_veh, veh_transf_02)
.then(ApplyTargetVelocity(FutureActor, carla.Vector3D(0, 0, 0)))
]
responses = self.client.apply_batch_sync(batch)
veh_ids = [x.actor_id for x in responses]
veh_refs = [self.world.get_actor(x) for x in veh_ids]
if (0 in veh_ids) or (None in veh_refs):
self.fail("%s: The test cars could not be correctly spawned" % (bp_veh.id))
self.wait(10)
self.client.apply_batch_sync([
ApplyVehiclePhysicsControl(veh_refs[0], change_physics_control(veh_refs[0], tire_friction=0.0, drag=0.0)),
ApplyVehiclePhysicsControl(veh_refs[1], change_physics_control(veh_refs[1], tire_friction=0.5, drag=0.0)),
ApplyVehiclePhysicsControl(veh_refs[2], change_physics_control(veh_refs[1], tire_friction=3.0, drag=0.0))])
self.wait(1)
vel_ref = 100.0 / 3.6
self.wait(1)
self.client.apply_batch_sync([
ApplyTargetVelocity(veh_refs[0], carla.Vector3D(0, vel_ref, 0)),
ApplyTargetVelocity(veh_refs[1], carla.Vector3D(0, vel_ref, 0)),
ApplyTargetVelocity(veh_refs[2], carla.Vector3D(0, vel_ref, 0))
])
self.wait(50)
vel_veh_00 = veh_refs[0].get_velocity().y
loc_veh_00 = veh_refs[0].get_location().y
loc_veh_01 = veh_refs[1].get_location().y
loc_veh_02 = veh_refs[2].get_location().y
for _i in range(0, 150):
self.world.tick()
self.client.apply_batch_sync([
ApplyVehicleControl(veh_refs[0], carla.VehicleControl(brake=1.0)),
ApplyVehicleControl(veh_refs[1], carla.VehicleControl(brake=1.0)),
ApplyVehicleControl(veh_refs[2], carla.VehicleControl(brake=1.0))
])
dist_veh_00 = veh_refs[0].get_location().y - loc_veh_00
dist_veh_01 = veh_refs[1].get_location().y - loc_veh_01
dist_veh_02 = veh_refs[2].get_location().y - loc_veh_02
err_veh_01 = dist_veh_01 > 0.75 * dist_veh_00
err_veh_02 = dist_veh_02 > 0.75 * dist_veh_01
if err_veh_01 or err_veh_02:
self.fail("%s: Friction test failed: ErrVeh01: %r ErrVeh02: %r."
% (bp_veh.id, err_veh_01, err_veh_02))
self.client.apply_batch_sync([carla.command.DestroyActor(x) for x in veh_ids])
class TestVehicleTireConfig(SyncSmokeTest):
def wait(self, frames=100):
for _i in range(0, frames):
self.world.tick()
def test_vehicle_wheel_collision(self):
print("TestVehicleTireConfig.test_vehicle_wheel_collision")
self.client.load_world("Town05_Opt", False)
bp_vehicles = self.world.get_blueprint_library().filter("vehicle.*")
bp_vehicles = [x for x in bp_vehicles if int(x.get_attribute('number_of_wheels')) == 4]
for bp_veh in bp_vehicles:
veh_transf_00 = carla.Transform(carla.Location(36, -200, 0.2), carla.Rotation(yaw=91))
veh_transf_01 = carla.Transform(carla.Location(31, -200, 0.7), carla.Rotation(yaw=91))
batch = [
SpawnActor(bp_veh, veh_transf_00)
.then(ApplyTargetVelocity(FutureActor, carla.Vector3D(0, 0, 0))),
SpawnActor(bp_veh, veh_transf_01)
.then(ApplyTargetVelocity(FutureActor, carla.Vector3D(0, 0, 0)))
]
responses = self.client.apply_batch_sync(batch)
veh_ids = [x.actor_id for x in responses]
veh_refs = [self.world.get_actor(x) for x in veh_ids]
if (0 in veh_ids) or (None in veh_refs):
self.fail("%s: The test cars could not be correctly spawned" % (bp_veh.id))
self.wait(10)
vel_ref = 100.0 / 3.6
self.client.apply_batch_sync([
ApplyVehiclePhysicsControl(veh_refs[0], change_physics_control(veh_refs[0], wheel_sweep = False)),
ApplyVehiclePhysicsControl(veh_refs[1], change_physics_control(veh_refs[1], wheel_sweep = True))])
self.wait(1)
self.client.apply_batch_sync([
ApplyTargetVelocity(veh_refs[0], carla.Vector3D(0, vel_ref, 0)),
ApplyTargetVelocity(veh_refs[1], carla.Vector3D(0, vel_ref, 0))
])
self.wait(150)
loc_veh_00 = veh_refs[0].get_location().y
loc_veh_01 = veh_refs[1].get_location().y
vel_veh_00 = veh_refs[0].get_velocity().y
vel_veh_01 = veh_refs[1].get_velocity().y
if not list_equal_tol([vel_veh_00, vel_veh_01], 0.5):
self.client.apply_batch_sync([carla.command.DestroyActor(x) for x in veh_ids])
self.fail("%s: Velocities are not equal after simulation. [%.3f, %.3f]"
% (bp_veh.id, vel_veh_00, vel_veh_01))
if not list_equal_tol([loc_veh_00, loc_veh_01], 0.5):
self.client.apply_batch_sync([carla.command.DestroyActor(x) for x in veh_ids])
self.fail("%s: Locations are not equal after simulation. [%.3f, %.3f]"
% (bp_veh.id, loc_veh_00, loc_veh_01))
self.client.apply_batch_sync([carla.command.DestroyActor(x) for x in veh_ids])
def test_vehicle_tire_long_stiff(self):
print("TestVehicleTireConfig.test_vehicle_tire_long_stiff")
self.client.load_world("Town05_Opt", False)
bp_vehicles = self.world.get_blueprint_library().filter("vehicle.*")
bp_vehicles = [x for x in bp_vehicles if int(x.get_attribute('number_of_wheels')) == 4]
for bp_veh in bp_vehicles:
ref_pos = -200
veh_transf_00 = carla.Transform(carla.Location(36 - 0, ref_pos, 0.2), carla.Rotation(yaw=90))
veh_transf_01 = carla.Transform(carla.Location(36 - 5, ref_pos, 0.2), carla.Rotation(yaw=90))
batch = [
SpawnActor(bp_veh, veh_transf_00)
.then(ApplyTargetVelocity(FutureActor, carla.Vector3D(0, 0, 0))),
SpawnActor(bp_veh, veh_transf_01)
.then(ApplyTargetVelocity(FutureActor, carla.Vector3D(0, 0, 0)))
]
responses = self.client.apply_batch_sync(batch)
veh_ids = [x.actor_id for x in responses]
veh_refs = [self.world.get_actor(x) for x in veh_ids]
if (0 in veh_ids) or (None in veh_refs):
self.fail("%s: The test cars could not be correctly spawned" % (bp_veh.id))
self.wait(10)
self.client.apply_batch_sync([
ApplyVehiclePhysicsControl(veh_refs[0], change_physics_control(veh_refs[0], drag=0.0, long_stiff = 100)),
ApplyVehiclePhysicsControl(veh_refs[1], change_physics_control(veh_refs[1], drag=0.0, long_stiff = 2000))])
self.wait(1)
self.client.apply_batch_sync([
ApplyVehicleControl(veh_refs[0], carla.VehicleControl(throttle=1.0)),
ApplyVehicleControl(veh_refs[1], carla.VehicleControl(throttle=1.0))])
self.wait(100)
loc_veh_00 = veh_refs[0].get_location().y
loc_veh_01 = veh_refs[1].get_location().y
dist_veh_00 = loc_veh_00 - ref_pos
dist_veh_01 = loc_veh_01 - ref_pos
if dist_veh_01 < dist_veh_00:
self.fail("%s: Longitudinal stiffness test failed, check that please. Veh00: [%f] Veh01: [%f]"
% (bp_veh.id, dist_veh_00, dist_veh_01))
self.client.apply_batch_sync([carla.command.DestroyActor(x) for x in veh_ids])
class TestStickyControl(SyncSmokeTest):
def wait(self, frames=100):
for _i in range(0, frames):
self.world.tick()
def run_scenario(self, bp_veh, veh_control, continous = False, reset_after_first = False, sticky = None):
ref_pos = -1
veh_transf = carla.Transform(carla.Location(235, ref_pos, 0.2), carla.Rotation(yaw=90))
veh_forward = veh_transf.rotation.get_forward_vector()
if sticky is not None:
bp_veh.set_attribute("sticky_control", sticky)
batch = [SpawnActor(bp_veh, veh_transf)
.then(ApplyTargetVelocity(FutureActor, carla.Vector3D(0, 0, 0)))]
responses = self.client.apply_batch_sync(batch)
if len(responses) != 1 or responses[0].error:
self.fail("%s: The test car could not be correctly spawned" % (bp_veh.id))
vehicle_id = responses[0].actor_id
vehicle_00 = self.world.get_actor(vehicle_id)
for _i in range(0, 10):
self.world.tick()
self.client.apply_batch_sync([ApplyVehicleControl(vehicle_00, veh_control)])
self.world.tick()
for _i in range(0, 150):
if continous:
self.client.apply_batch_sync([ApplyVehicleControl(vehicle_00, veh_control)])
if reset_after_first:
self.client.apply_batch_sync([ApplyVehicleControl(vehicle_00, carla.VehicleControl())])
self.world.tick()
loc_veh_00 = vehicle_00.get_location().y
vel_veh_00 = vehicle_00.get_velocity().y
dist_veh_00 = loc_veh_00 - ref_pos
self.client.apply_batch([carla.command.DestroyActor(vehicle_id)])
self.world.tick()
return dist_veh_00, vel_veh_00
def test_default(self):
print("TestStickyControl.test_default")
inp_control = carla.VehicleControl(throttle=1.0)
bp_vehicles = self.world.get_blueprint_library().filter("vehicle.*")
bp_veh = bp_vehicles[0]
d0, v0 = self.run_scenario(bp_veh, inp_control)
d1, v1 = self.run_scenario(bp_veh, inp_control, continous=True)
d2, v2 = self.run_scenario(bp_veh, inp_control, continous=True, sticky="False")
if not equal_tol(d0, d1, 1e-3) or not equal_tol(v0, v1, 1e-3):
self.fail("%s: The default input is not sticky: Default: [%f, %f] ContinousThrottle: [%f, %f]"
% (bp_veh.id, d0, v0, d1, v1))
if not equal_tol(d0, d2, 1e-3) or not equal_tol(v0, v2, 1e-3):
self.fail("%s: The default input is not sticky: Default: [%f, %f] ContinousThrottle: [%f, %f]"
% (bp_veh.id, d0, v0, d2, v2))
def test_true(self):
print("TestStickyControl.test_true")
inp_control = carla.VehicleControl(throttle=1.0)
bp_vehicles = self.world.get_blueprint_library().filter("vehicle.*")
bp_veh = bp_vehicles[0]
d0, v0 = self.run_scenario(bp_veh, inp_control, sticky="True")
d1, v1 = self.run_scenario(bp_veh, inp_control, continous=True)
d2, v2 = self.run_scenario(bp_veh, inp_control, continous=True, sticky="False")
if not equal_tol(d0, d1, 1e-3) or not equal_tol(v0, v1, 1e-3):
self.fail("%s: The input is not sticky: StickyTrue: [%f, %f] ContinousThrottle: [%f, %f]"
% (bp_veh.id, d0, v0, d1, v1))
if not equal_tol(d0, d2, 1e-3) or not equal_tol(v0, v2, 1e-3):
self.fail("%s: The input is not sticky: StickyTrue: [%f, %f] ContinousThrottle: [%f, %f]"
% (bp_veh.id, d0, v0, d2, v2))
def test_false(self):
print("TestStickyControl.test_false")
inp_control = carla.VehicleControl(throttle=1.0)
bp_vehicles = self.world.get_blueprint_library().filter("vehicle.*")
bp_veh = bp_vehicles[0]
d0, v0 = self.run_scenario(bp_veh, inp_control, sticky="False")
d1, v1 = self.run_scenario(bp_veh, inp_control, reset_after_first=True, sticky="True")
d2, v2 = self.run_scenario(bp_veh, inp_control, reset_after_first=True, sticky="False")
if not equal_tol(d0, d1, 1e-5) or not equal_tol(v0, v1, 1e-5):
self.fail("%s: The input is sticky: StickyFalse: [%f, %f] Reset: [%f, %f]"
% (bp_veh.id, d0, v0, d1, v1))
if not equal_tol(d0, d2, 1e-5) or not equal_tol(v0, v2, 1e-5):
self.fail("%s: The input is sticky: StickyFalse: [%f, %f] Reset: [%f, %f]"
% (bp_veh.id, d0, v0, d2, v2))
|
{"hexsha": "b8d8a502e5ea83dab39ba2bd4a92fa2bc00a7b1a", "size": 29718, "ext": "py", "lang": "Python", "max_stars_repo_path": "PythonAPI/test/smoke/test_vehicle_physics.py", "max_stars_repo_name": "Sid1057/carla_sport", "max_stars_repo_head_hexsha": "76323ce68f7093278b2f47aa3d37ec90fa19038a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PythonAPI/test/smoke/test_vehicle_physics.py", "max_issues_repo_name": "Sid1057/carla_sport", "max_issues_repo_head_hexsha": "76323ce68f7093278b2f47aa3d37ec90fa19038a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PythonAPI/test/smoke/test_vehicle_physics.py", "max_forks_repo_name": "Sid1057/carla_sport", "max_forks_repo_head_hexsha": "76323ce68f7093278b2f47aa3d37ec90fa19038a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.5109809663, "max_line_length": 195, "alphanum_fraction": 0.6248065146, "include": true, "reason": "import numpy", "num_tokens": 8100}
|
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while(1):
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower = np.array([0, 10, 60], dtype = "uint8")
upper = np.array([20, 150, 255], dtype = "uint8")
mask = cv2.inRange(hsv, lower, upper)
res = cv2.bitwise_and(frame,frame, mask= mask)
cv2.imshow('frame',frame)
cv2.imshow('mask',mask)
cv2.imshow('res',res)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
|
{"hexsha": "e13e956164f7746a05f342d2b4e834bd030b57de", "size": 563, "ext": "py", "lang": "Python", "max_stars_repo_path": "gsv_skin_color.py", "max_stars_repo_name": "bhargavyagnik/AutoMouse", "max_stars_repo_head_hexsha": "717e92e1d9af006650641b9e234c95a4a86d277f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gsv_skin_color.py", "max_issues_repo_name": "bhargavyagnik/AutoMouse", "max_issues_repo_head_hexsha": "717e92e1d9af006650641b9e234c95a4a86d277f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gsv_skin_color.py", "max_forks_repo_name": "bhargavyagnik/AutoMouse", "max_forks_repo_head_hexsha": "717e92e1d9af006650641b9e234c95a4a86d277f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.6538461538, "max_line_length": 54, "alphanum_fraction": 0.5737122558, "include": true, "reason": "import numpy", "num_tokens": 176}
|
import struct
import numpy as np
from ._header import header_size
from ._protocol import protocol_version
class Writer(object):
def __init__(self):
self.format_ = '='
self.args_ = []
def tobytes(self):
return struct.pack(self.format_, *self.args_)
def write_uint8(self, c):
self.format_ += 'B'
self.args_.append(int(c))
def write_uint16(self, s):
self.format_ += 'H'
self.args_.append(int(s))
def write_uint64(self, n):
self.format_ += 'Q'
self.args_.append(int(n))
def prepend_header(self):
request_size = struct.calcsize(self.format_)
self.format_ = '=HQ' + self.format_[1:]
self.args_ = [protocol_version, request_size] + self.args_
def write_bytes(self, array):
self.format_ += '%ds' % (array.dtype.itemsize * array.size)
self.args_.append(array.tobytes(order='F'))
def write_vector(self, v):
self.write_uint64(v.size)
self.write_bytes(v)
def write_matrix(self, m):
assert len(m.shape) == 2
self.write_uint64(m.shape[0])
self.write_uint64(m.shape[1])
self.write_bytes(m)
|
{"hexsha": "f19b8c3a4a5421300a79df1aae77879bd293cf37", "size": 1181, "ext": "py", "lang": "Python", "max_stars_repo_path": "bbai/_computation/_writer.py", "max_stars_repo_name": "rnburn/bbai", "max_stars_repo_head_hexsha": "403f84b4937f4bce4fad8d10ee887330d1a322be", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2021-05-20T13:51:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T17:09:51.000Z", "max_issues_repo_path": "bbai/_computation/_writer.py", "max_issues_repo_name": "rnburn/bbai", "max_issues_repo_head_hexsha": "403f84b4937f4bce4fad8d10ee887330d1a322be", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bbai/_computation/_writer.py", "max_forks_repo_name": "rnburn/bbai", "max_forks_repo_head_hexsha": "403f84b4937f4bce4fad8d10ee887330d1a322be", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-08T02:02:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T02:02:31.000Z", "avg_line_length": 26.2444444444, "max_line_length": 67, "alphanum_fraction": 0.6130397968, "include": true, "reason": "import numpy", "num_tokens": 290}
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2019 Kiri Choi
pySME is a Python script to run R SME package
(https://cran.r-project.org/web/packages/sme/index.html). SME package generates
smoothing-splines mixed-effects models from metabolomics data. This script
follows methodology given by Berk et al. (2011) and utilizes bootstrapping to
approximate p-values. Running this script requires R with SME package installed.
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
path = './output_leukemia'
# Parameters
N = 12 # Number of subjects
t_n = 5 # Number of time points
iplN = 121 # Number of interpolated time points
showFig = False # Flag to plot figures
figSize = (20,16) # Size of figures
plotLegend = False # Flag to plot legend
colorMap = 'viridis' # kwarg for colormap
plotSMEMeanOnly = False # Only plot SME mean trace
mergePlot = True # Merge multiple plots
plotHeatmap = False # Plot heatmap comparing two data groups
t = np.array([1,2,4,6,7])
iplT = np.linspace(1, 7, iplN)
fulldata = pd.read_csv(os.path.join(path, 'fulldata.csv')).iloc[:, 1:]
data_fid = np.array(fulldata.columns[3:])
pval = pd.read_csv(os.path.join(path, 'pval.csv')).iloc[:, 1:]
pval = pval.values.flatten()
grp0_f = fulldata[(fulldata.grp == 0)]['ind']
grp1_f = fulldata[(fulldata.grp == 1)]['ind']
grp0 = np.unique(fulldata[(fulldata.grp == 0)]['ind'])
grp1 = np.unique(fulldata[(fulldata.grp == 1)]['ind'])
ys0mu = np.array(pd.read_csv(os.path.join(path, 'ys0mu.csv')).iloc[:, 1:])
ys1mu = np.array(pd.read_csv(os.path.join(path, 'ys1mu.csv')).iloc[:, 1:])
ys0vHat = np.empty((len(data_fid), len(grp0), iplN))
ys0vHatDir = os.path.join(path, 'ys0vHat')
ys0vHatF = [f for f in os.listdir(ys0vHatDir) if os.path.isfile(os.path.join(ys0vHatDir, f))]
ys0vHatF.sort(key=lambda x: int(''.join(filter(str.isdigit, x))))
ys1vHat = np.empty((len(data_fid), len(grp1), iplN))
ys1vHatDir = os.path.join(path, 'ys1vHat')
ys1vHatF = [f for f in os.listdir(ys1vHatDir) if os.path.isfile(os.path.join(ys1vHatDir, f))]
ys1vHatF.sort(key=lambda x: int(''.join(filter(str.isdigit, x))))
for i in range(len(ys0vHatF)):
ys0vHat[i] = pd.read_csv(os.path.join(ys0vHatDir, ys0vHatF[i])).iloc[:, 1:]
for i in range(len(ys1vHatF)):
ys1vHat[i] = pd.read_csv(os.path.join(ys1vHatDir, ys1vHatF[i])).iloc[:, 1:]
cmap1 = cm.get_cmap(colorMap, 2)
cmap2 = cm.get_cmap(colorMap, N)
cmap3 = cm.get_cmap(colorMap, len(data_fid))
cmap_grp0 = cm.get_cmap('viridis', len(grp0))
cmap_grp1 = cm.get_cmap('viridis', len(grp1))
def plotC(idx):
"""
Plots data points, individual, and mean curves of the control group
:param idx: index of the selection list
"""
fdgrp0tme_arr = np.array(fulldata[fulldata.grp == 0]["tme"])
fdgrp0sel_arr = np.array(fulldata[fulldata.grp == 0][data_fid])
plt.figure(figsize=figSize)
if not plotSMEMeanOnly:
for g0 in range(len(grp0)):
tmeIdx = np.where(grp0_f == grp0[g0])
plt.plot(fdgrp0tme_arr[tmeIdx], fdgrp0sel_arr[:,idx][tmeIdx], color=cmap_grp0(g0), marker='o', linestyle='')
plt.plot(iplT, ys0vHat[idx][g0], color=cmap_grp0(g0), linestyle='dashed')
plt.plot(iplT, ys0mu[idx], lw=3, color=cmap1(0))
plt.show()
def plotT(idx):
"""
Plots data points, individual, and mean curves of the treatment group
:param idx: index of the selection list
"""
fdgrp1tme_arr = np.array(fulldata[fulldata.grp == 1]["tme"])
fdgrp1sel_arr = np.array(fulldata[fulldata.grp == 1][data_fid])
plt.figure(figsize=figSize)
if not plotSMEMeanOnly:
for g1 in range(len(grp1)):
tmeIdx = np.where(grp1_f == grp1[g1])
plt.plot(fdgrp1tme_arr[tmeIdx], fdgrp1sel_arr[:,idx][tmeIdx], color=cmap_grp1(g1), marker='o', linestyle='')
plt.plot(iplT, ys1vHat[idx][g1], color=cmap_grp1(g1), linestyle='dashed')
plt.plot(iplT, ys1mu[idx], lw=3, color=cmap1(1))
plt.show()
def plotCT(idx):
"""
Plots data points, individual, and mean curves of both the control and the treatment group
:param idx: index of the selection list
"""
fdgrp0tme_arr = np.array(fulldata[fulldata.grp == 0]["tme"])
fdgrp0sel_arr = np.array(fulldata[fulldata.grp == 0][data_fid])
fdgrp1tme_arr = np.array(fulldata[fulldata.grp == 1]["tme"])
fdgrp1sel_arr = np.array(fulldata[fulldata.grp == 1][data_fid])
plt.figure(figsize=figSize)
if not plotSMEMeanOnly:
for g0 in range(len(grp0)):
tmeIdx = np.where(grp0_f == grp0[g0])
plt.plot(fdgrp0tme_arr[tmeIdx], fdgrp0sel_arr[:,idx][tmeIdx], color=cmap1(0), marker='o', linestyle='')
plt.plot(iplT, ys0vHat[idx][g0], color=cmap1(0), linestyle='dashed')
for g1 in range(len(grp1)):
tmeIdx = np.where(grp1_f == grp1[g1])
plt.plot(fdgrp1tme_arr[tmeIdx], fdgrp1sel_arr[:,idx][tmeIdx], color=cmap1(1), marker='o', linestyle='')
plt.plot(iplT, ys1vHat[idx][g1], color=cmap1(len(data_fid)), linestyle='dashed')
plt.plot(iplT, ys0mu[idx], lw=3, color=cmap1(0))
plt.plot(iplT, ys1mu[idx], lw=3, color=cmap1(1))
plt.show()
|
{"hexsha": "281d2dedfd8e110140a1fc12fde7c20621977144", "size": 5398, "ext": "py", "lang": "Python", "max_stars_repo_path": "plotting.py", "max_stars_repo_name": "kirichoi/pySME", "max_stars_repo_head_hexsha": "4879a80cefe131568f8c4d91b52f97fe0c79d315", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "plotting.py", "max_issues_repo_name": "kirichoi/pySME", "max_issues_repo_head_hexsha": "4879a80cefe131568f8c4d91b52f97fe0c79d315", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plotting.py", "max_forks_repo_name": "kirichoi/pySME", "max_forks_repo_head_hexsha": "4879a80cefe131568f8c4d91b52f97fe0c79d315", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.4861111111, "max_line_length": 121, "alphanum_fraction": 0.6435716932, "include": true, "reason": "import numpy", "num_tokens": 1690}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 12 12:16:24 2019
@author: kyle
"""
import numpy as np
import os
import time
import sys
if ".." not in sys.path:
sys.path.append("..")
from utilities import create_directories
from state import State
from recombination_methods import structure, structure_dr, postprocessing_rates
from lambda_variation import lambda_distribution
from graphing import graph_rates_from_file
def get_them_rates_boi(atom):
ion = State(atom, seq, shell)
x_bnd = np.array([[0.8, 1.2], [0.8,1.2]])
x_res = np.array([5,5])
prior_shape = "gaussian"
likelihood_shape = "gaussian"
create_directories(ion, method="combined")
direc = f"results/isoelectronic/{seq}/{atom}{ion.ion_charge}/combined_method/"
os.system("cp " + f"results/isoelectronic/{seq}/{atom}{ion.ion_charge}/experimental_coefficients.dat " +
direc+"experimental_coefficients.dat")
lambdas_file = direc + "lambdas.npy"
n_samples = 50
if "lambdas.npy" in os.listdir(direc):
lambda_samples = np.load(lambdas_file)
else:
lambda_samples = lambda_distribution(ion, x_bnd=x_bnd, x_res=x_res, nist_cutoff=nist_cutoff, prior_shape=prior_shape,
likelihood_shape=likelihood_shape, outfile=lambdas_file)
lambda_samples = lambda_samples[np.random.randint(0, lambda_samples.shape[0], size=n_samples), :]
max_shift = 0.2
E, E_nist, delta_E = structure(ion, method="combined")
structure_dr(ion, method="combined")
T = postprocessing_rates(ion, E, E_nist, method="combined")[0]
n_rates = T.size
rates = np.zeros((n_samples, n_rates))
for i in range(n_samples):
potential = np.random.choice([-1,1])
x = lambda_samples[i,:]
if seq == "he":
x = np.r_[1.0,x]
E, E_nist, delta_E = structure(ion, method="combined", lambdas=x, potential=potential)
structure_dr(ion, method="combined", lambdas=x, potential=potential)
shifts = (np.random.rand(*E.shape) * 2 - 1)*max_shift / 13.6
"""
shifts = np.zeros(*E.shape)
for i in range(len(shifts)-5):
shifts[i] = (np.random.random()*2 - 1) * 0.2 / 13.6
for i in range(len(shifts)-5, len(shifts)):
shifts[i] = (np.random.random()*2 - 1) * 1.5 / 13.6
"""
shifts[0] = 0.0
rates[i,:] = postprocessing_rates(ion, E, E_nist, method="combined", shift=shifts)[1]
rates_file = direc + "rates.npy"
np.save(rates_file, np.array([T,rates]))
graphs = direc + f"graphs_{n_samples}_samples.png"
graph_rates_from_file(ion, rates_file, graphs, graph_every=1)
if __name__ == "__main__":
start = time.time()
shell = "2-2" #core excitation shells
atom = "o"
seq = "li" #isoelectronic sequence
nist_cutoff = 0.01
get_them_rates_boi(atom)
|
{"hexsha": "3460eccf3f2bc31060cb142758eba294ab830a33", "size": 3047, "ext": "py", "lang": "Python", "max_stars_repo_path": "recombination/combined_method.py", "max_stars_repo_name": "hvanwyk/atomic_data_uncertainties", "max_stars_repo_head_hexsha": "e6b376d600090203b20810c730a21021ea62ab44", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-04T18:27:18.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-04T18:27:18.000Z", "max_issues_repo_path": "recombination/combined_method.py", "max_issues_repo_name": "hvanwyk/atomic_data_uncertainties", "max_issues_repo_head_hexsha": "e6b376d600090203b20810c730a21021ea62ab44", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "recombination/combined_method.py", "max_forks_repo_name": "hvanwyk/atomic_data_uncertainties", "max_forks_repo_head_hexsha": "e6b376d600090203b20810c730a21021ea62ab44", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.1683168317, "max_line_length": 125, "alphanum_fraction": 0.6140466032, "include": true, "reason": "import numpy", "num_tokens": 841}
|
[STATEMENT]
lemma ProjInd_mem_eq1:"\<lbrakk>\<forall>j\<in>I. aGroup (A j); f \<in> carrier (a\<Pi>\<^bsub>I\<^esub> A) \<rightarrow> B;
bij_to f (carrier (a\<Pi>\<^bsub>I\<^esub> A)) B; aGroup S;
h \<in> aHom (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f);
\<forall>j\<in>I. compos (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (ProjInd I A f j) h = ProjInd I A f j\<rbrakk> \<Longrightarrow> h = ag_idmap (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<forall>j\<in>I. aGroup (A j); f \<in> carrier (a\<Pi>\<^bsub>I\<^esub> A) \<rightarrow> B; bij_to f (carrier (a\<Pi>\<^bsub>I\<^esub> A)) B; aGroup S; h \<in> aHom (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); \<forall>j\<in>I. compos (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (ProjInd I A f j) h = ProjInd I A f j\<rbrakk> \<Longrightarrow> h = aI\<^bsub>Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f\<^esub>
[PROOF STEP]
apply (rule funcset_eq[of _ "carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f)"])
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>\<forall>j\<in>I. aGroup (A j); f \<in> carrier (a\<Pi>\<^bsub>I\<^esub> A) \<rightarrow> B; bij_to f (carrier (a\<Pi>\<^bsub>I\<^esub> A)) B; aGroup S; h \<in> aHom (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); \<forall>j\<in>I. compos (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (ProjInd I A f j) h = ProjInd I A f j\<rbrakk> \<Longrightarrow> h \<in> extensional (carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f))
2. \<lbrakk>\<forall>j\<in>I. aGroup (A j); f \<in> carrier (a\<Pi>\<^bsub>I\<^esub> A) \<rightarrow> B; bij_to f (carrier (a\<Pi>\<^bsub>I\<^esub> A)) B; aGroup S; h \<in> aHom (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); \<forall>j\<in>I. compos (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (ProjInd I A f j) h = ProjInd I A f j\<rbrakk> \<Longrightarrow> aI\<^bsub>Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f\<^esub> \<in> extensional (carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f))
3. \<lbrakk>\<forall>j\<in>I. aGroup (A j); f \<in> carrier (a\<Pi>\<^bsub>I\<^esub> A) \<rightarrow> B; bij_to f (carrier (a\<Pi>\<^bsub>I\<^esub> A)) B; aGroup S; h \<in> aHom (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); \<forall>j\<in>I. compos (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (ProjInd I A f j) h = ProjInd I A f j\<rbrakk> \<Longrightarrow> \<forall>x\<in>carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f). h x = aI\<^bsub>Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f\<^esub> x
[PROOF STEP]
apply (simp add:aHom_def)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>\<forall>j\<in>I. aGroup (A j); f \<in> carrier (a\<Pi>\<^bsub>I\<^esub> A) \<rightarrow> B; bij_to f (carrier (a\<Pi>\<^bsub>I\<^esub> A)) B; aGroup S; h \<in> aHom (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); \<forall>j\<in>I. compos (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (ProjInd I A f j) h = ProjInd I A f j\<rbrakk> \<Longrightarrow> aI\<^bsub>Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f\<^esub> \<in> extensional (carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f))
2. \<lbrakk>\<forall>j\<in>I. aGroup (A j); f \<in> carrier (a\<Pi>\<^bsub>I\<^esub> A) \<rightarrow> B; bij_to f (carrier (a\<Pi>\<^bsub>I\<^esub> A)) B; aGroup S; h \<in> aHom (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); \<forall>j\<in>I. compos (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (ProjInd I A f j) h = ProjInd I A f j\<rbrakk> \<Longrightarrow> \<forall>x\<in>carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f). h x = aI\<^bsub>Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f\<^esub> x
[PROOF STEP]
apply (simp add:ag_idmap_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<forall>j\<in>I. aGroup (A j); f \<in> carrier (a\<Pi>\<^bsub>I\<^esub> A) \<rightarrow> B; bij_to f (carrier (a\<Pi>\<^bsub>I\<^esub> A)) B; aGroup S; h \<in> aHom (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); \<forall>j\<in>I. compos (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (ProjInd I A f j) h = ProjInd I A f j\<rbrakk> \<Longrightarrow> \<forall>x\<in>carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f). h x = aI\<^bsub>Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f\<^esub> x
[PROOF STEP]
apply (rule ballI)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>\<forall>j\<in>I. aGroup (A j); f \<in> carrier (a\<Pi>\<^bsub>I\<^esub> A) \<rightarrow> B; bij_to f (carrier (a\<Pi>\<^bsub>I\<^esub> A)) B; aGroup S; h \<in> aHom (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); \<forall>j\<in>I. compos (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (ProjInd I A f j) h = ProjInd I A f j; x \<in> carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f)\<rbrakk> \<Longrightarrow> h x = aI\<^bsub>Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f\<^esub> x
[PROOF STEP]
apply (simp add:ag_idmap_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>\<forall>j\<in>I. aGroup (A j); f \<in> carrier (a\<Pi>\<^bsub>I\<^esub> A) \<rightarrow> B; bij_to f (carrier (a\<Pi>\<^bsub>I\<^esub> A)) B; aGroup S; h \<in> aHom (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); \<forall>j\<in>I. compos (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (ProjInd I A f j) h = ProjInd I A f j; x \<in> carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f)\<rbrakk> \<Longrightarrow> h x = x
[PROOF STEP]
apply (frule prodag_aGroup[of "I" "A"],
frule aGroup.Ag_ind_aGroup[of "a\<Pi>\<^bsub>I\<^esub> A" "f" "B"], assumption+)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>\<forall>j\<in>I. aGroup (A j); f \<in> carrier (a\<Pi>\<^bsub>I\<^esub> A) \<rightarrow> B; bij_to f (carrier (a\<Pi>\<^bsub>I\<^esub> A)) B; aGroup S; h \<in> aHom (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); \<forall>j\<in>I. compos (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (ProjInd I A f j) h = ProjInd I A f j; x \<in> carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); aGroup (a\<Pi>\<^bsub>I\<^esub> A); aGroup (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f)\<rbrakk> \<Longrightarrow> h x = x
[PROOF STEP]
apply (frule_tac a = x in aHom_mem[of "Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f" "Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f"
"h"], assumption+)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>\<forall>j\<in>I. aGroup (A j); f \<in> carrier (a\<Pi>\<^bsub>I\<^esub> A) \<rightarrow> B; bij_to f (carrier (a\<Pi>\<^bsub>I\<^esub> A)) B; aGroup S; h \<in> aHom (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); \<forall>j\<in>I. compos (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (ProjInd I A f j) h = ProjInd I A f j; x \<in> carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); aGroup (a\<Pi>\<^bsub>I\<^esub> A); aGroup (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); h x \<in> carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f)\<rbrakk> \<Longrightarrow> h x = x
[PROOF STEP]
apply (rule_tac x = "h x" and y = x in ProjInd_mem_eq[of "I" "A" "f" "B" "S"],
assumption+)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>\<forall>j\<in>I. aGroup (A j); f \<in> carrier (a\<Pi>\<^bsub>I\<^esub> A) \<rightarrow> B; bij_to f (carrier (a\<Pi>\<^bsub>I\<^esub> A)) B; aGroup S; h \<in> aHom (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); \<forall>j\<in>I. compos (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (ProjInd I A f j) h = ProjInd I A f j; x \<in> carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); aGroup (a\<Pi>\<^bsub>I\<^esub> A); aGroup (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); h x \<in> carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f)\<rbrakk> \<Longrightarrow> \<forall>j\<in>I. ProjInd I A f j (h x) = ProjInd I A f j x
[PROOF STEP]
apply (rotate_tac 1,
rule ballI,
frule_tac x = j in bspec, assumption,
thin_tac "\<forall>j\<in>I. compos (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (ProjInd I A f j) h =
ProjInd I A f j")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x j. \<lbrakk>f \<in> carrier (a\<Pi>\<^bsub>I\<^esub> A) \<rightarrow> B; bij_to f (carrier (a\<Pi>\<^bsub>I\<^esub> A)) B; aGroup S; h \<in> aHom (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); x \<in> carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); aGroup (a\<Pi>\<^bsub>I\<^esub> A); aGroup (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); h x \<in> carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); \<forall>j\<in>I. aGroup (A j); j \<in> I; compos (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (ProjInd I A f j) h = ProjInd I A f j\<rbrakk> \<Longrightarrow> ProjInd I A f j (h x) = ProjInd I A f j x
[PROOF STEP]
apply (simp add:compos_def compose_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x j. \<lbrakk>f \<in> carrier (a\<Pi>\<^bsub>I\<^esub> A) \<rightarrow> B; bij_to f (carrier (a\<Pi>\<^bsub>I\<^esub> A)) B; aGroup S; h \<in> aHom (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); x \<in> carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); aGroup (a\<Pi>\<^bsub>I\<^esub> A); aGroup (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); h x \<in> carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); \<forall>j\<in>I. aGroup (A j); j \<in> I; (\<lambda>x\<in>carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f). ProjInd I A f j (h x)) = ProjInd I A f j\<rbrakk> \<Longrightarrow> ProjInd I A f j (h x) = ProjInd I A f j x
[PROOF STEP]
apply (subgoal_tac "(\<lambda>x\<in>carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f). ProjInd I A f j (h x)) x
= ProjInd I A f j x",
thin_tac "(\<lambda>x\<in>carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f). ProjInd I A f j (h x)) =
ProjInd I A f j")
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>x j. \<lbrakk>f \<in> carrier (a\<Pi>\<^bsub>I\<^esub> A) \<rightarrow> B; bij_to f (carrier (a\<Pi>\<^bsub>I\<^esub> A)) B; aGroup S; h \<in> aHom (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); x \<in> carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); aGroup (a\<Pi>\<^bsub>I\<^esub> A); aGroup (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); h x \<in> carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); \<forall>j\<in>I. aGroup (A j); j \<in> I; (\<lambda>x\<in>carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f). ProjInd I A f j (h x)) x = ProjInd I A f j x\<rbrakk> \<Longrightarrow> ProjInd I A f j (h x) = ProjInd I A f j x
2. \<And>x j. \<lbrakk>f \<in> carrier (a\<Pi>\<^bsub>I\<^esub> A) \<rightarrow> B; bij_to f (carrier (a\<Pi>\<^bsub>I\<^esub> A)) B; aGroup S; h \<in> aHom (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f) (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); x \<in> carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); aGroup (a\<Pi>\<^bsub>I\<^esub> A); aGroup (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); h x \<in> carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f); \<forall>j\<in>I. aGroup (A j); j \<in> I; (\<lambda>x\<in>carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f). ProjInd I A f j (h x)) = ProjInd I A f j\<rbrakk> \<Longrightarrow> (\<lambda>x\<in>carrier (Ag_ind (a\<Pi>\<^bsub>I\<^esub> A) f). ProjInd I A f j (h x)) x = ProjInd I A f j x
[PROOF STEP]
apply simp+
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 5301, "file": "Group-Ring-Module_Algebra4", "length": 13}
|
[STATEMENT]
lemma frontier_ball [simp]:
fixes a :: "'a::real_normed_vector"
shows "0 < e \<Longrightarrow> frontier (ball a e) = sphere a e"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < e \<Longrightarrow> frontier (ball a e) = sphere a e
[PROOF STEP]
by (force simp: frontier_def)
|
{"llama_tokens": 111, "file": null, "length": 1}
|
[STATEMENT]
lemma tensor_eqI[intro]:
assumes "dims A = dims B" and "vec A = vec B"
shows "A=B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. A = B
[PROOF STEP]
by (metis assms tensor_from_vec_simp)
|
{"llama_tokens": 91, "file": "Deep_Learning_Tensor", "length": 1}
|
/*!
@file
Includes all the adaptors for the standard library.
@copyright Louis Dionne 2013-2016
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_HANA_EXT_STD_HPP
#define BOOST_HANA_EXT_STD_HPP
//! @ingroup group-ext
//! @defgroup group-ext-std Standard library adapters
//! Adapters for components in the standard library.
#include <boost/hana/config.hpp>
#ifndef BOOST_HANA_CONFIG_HAS_NO_STD_TUPLE_ADAPTER
# include <boost/hana/ext/std/tuple.hpp>
#endif
#include <boost/hana/ext/std/array.hpp>
#include <boost/hana/ext/std/integer_sequence.hpp>
#include <boost/hana/ext/std/integral_constant.hpp>
#include <boost/hana/ext/std/pair.hpp>
#include <boost/hana/ext/std/ratio.hpp>
#include <boost/hana/ext/std/vector.hpp>
#endif // !BOOST_HANA_EXT_STD_HPP
|
{"hexsha": "449415a7eadab0f23ff899953dc13195f971254b", "size": 898, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "ios/Pods/boost-for-react-native/boost/hana/ext/std.hpp", "max_stars_repo_name": "rudylee/expo", "max_stars_repo_head_hexsha": "b3e65a7a5b205f14a3eb6cd6fa8d13c8d663b1cc", "max_stars_repo_licenses": ["Apache-2.0", "MIT"], "max_stars_count": 8805.0, "max_stars_repo_stars_event_min_datetime": "2015-11-03T00:52:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T22:30:03.000Z", "max_issues_repo_path": "ios/Pods/boost-for-react-native/boost/hana/ext/std.hpp", "max_issues_repo_name": "rudylee/expo", "max_issues_repo_head_hexsha": "b3e65a7a5b205f14a3eb6cd6fa8d13c8d663b1cc", "max_issues_repo_licenses": ["Apache-2.0", "MIT"], "max_issues_count": 14694.0, "max_issues_repo_issues_event_min_datetime": "2015-02-24T15:13:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T13:16:45.000Z", "max_forks_repo_path": "ios/Pods/boost-for-react-native/boost/hana/ext/std.hpp", "max_forks_repo_name": "rudylee/expo", "max_forks_repo_head_hexsha": "b3e65a7a5b205f14a3eb6cd6fa8d13c8d663b1cc", "max_forks_repo_licenses": ["Apache-2.0", "MIT"], "max_forks_count": 1329.0, "max_forks_repo_forks_event_min_datetime": "2015-11-03T20:25:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T18:10:38.000Z", "avg_line_length": 28.9677419355, "max_line_length": 79, "alphanum_fraction": 0.7538975501, "num_tokens": 216}
|
import cv2
import numpy as np
import PIL, PIL.Image
def imrectify(img, K, D, balance=0.0):
# https://medium.com/@kennethjiang/calibrate-fisheye-lens-using-opencv-part-2-13990f1b157f
dim = img.shape[:2][::-1]
new_K = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(K, D, dim, np.eye(3), balance=balance)
map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), new_K, dim, cv2.CV_16SC2)
undistorted_img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
return undistorted_img
def imresize(image, shape, resize_method=PIL.Image.LANCZOS):
assert (len(shape) == 3)
assert (shape[-1] == 1 or shape[-1] == 3)
assert (image.shape[0] / image.shape[1] == shape[0] / shape[1]) # maintain aspect ratio
height, width, channels = shape
if len(image.shape) > 2 and image.shape[2] == 1:
image = image[:,:,0]
im = PIL.Image.fromarray(image)
im = im.resize((width, height), resize_method)
im = np.array(im)
if len(im.shape) == 2:
im = np.expand_dims(im, 2)
assert (im.shape == tuple(shape))
return im
def yaw_rotmat(yaw):
return np.array([[np.cos(yaw), -np.sin(yaw), 0.],
[np.sin(yaw), np.cos(yaw), 0.],
[0., 0., 1.]])
|
{"hexsha": "934f73593d68ec75dfc011bf789dc06b6d469281", "size": 1302, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/badgr/utils/np_utils.py", "max_stars_repo_name": "KaiW-53/badgr", "max_stars_repo_head_hexsha": "6184302f156a7bc624af57b2521b1e89ffd6523d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 110, "max_stars_repo_stars_event_min_datetime": "2020-02-14T05:25:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T07:40:52.000Z", "max_issues_repo_path": "src/badgr/utils/np_utils.py", "max_issues_repo_name": "KaiW-53/badgr", "max_issues_repo_head_hexsha": "6184302f156a7bc624af57b2521b1e89ffd6523d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2020-03-12T12:49:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:14:51.000Z", "max_forks_repo_path": "src/badgr/utils/np_utils.py", "max_forks_repo_name": "KaiW-53/badgr", "max_forks_repo_head_hexsha": "6184302f156a7bc624af57b2521b1e89ffd6523d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 38, "max_forks_repo_forks_event_min_datetime": "2020-02-21T02:32:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-28T17:54:56.000Z", "avg_line_length": 32.55, "max_line_length": 112, "alphanum_fraction": 0.6321044547, "include": true, "reason": "import numpy", "num_tokens": 420}
|
r"""## CMS Open Data and the MOD HDF5 Format
Starting in 2014, the CMS Collaboration began to release research-grade
recorded and simulated datasets on the [CERN Open Data Portal](http://opendata.
cern.ch/). These fantastic resources provide a unique opportunity for
researchers with diverse connections to experimental particle phyiscs world to
engage with cutting edge particle physics by developing tools and testing novel
strategies on actual LHC data. Our goal in making portions of the CMS Open
Data available in a reprocessed format is to ease as best as possible the
technical complications that have thus far been present when attempting to use
Open Data (see also [recent efforts by the CMS Collaboration](http://opendata.
cern.ch/docs/cms-releases-open-data-for-machine-learning) to make the data more
accessible).
To facilitate access to Open Data, we have developed a format utilizing the
widespread [HDF5 file format](https://www.hdfgroup.org/solutions/hdf5/) that
stores essential information for some particle physics analyses. This "MOD HDF5
Format" is currently optimized for studies based on jets, but may be updated in
the future to support other types of analyses.
To further the goals of Open Data, we have made our reprocessed samples
available on the [Zenodo platform](https://zenodo.org/). Currently, the only
"collection" of datasets that is available is `CMS2011AJets`, which was used in
[Exploring the Space of Jets with CMS Open Data](https://arxiv.org/abs/
1908.08542) for [EMD](/docs/emd)-based studies. More collections may be added
in the future as our research group completes more studies with the Open Data.
For now, this page focuses on the `CMS2011AJets` collection. This collection
includes datasets of jets that are CMS-recorded (CMS), Pythia-generated (GEN),
and detector-simulated (SIM), or in code `'cms'`, `'gen'`, `'sim'`,
respectively. The datasets include all available jets above 375 GeV, which is
where the HLT_Jet300 trigger was found to be fully efficient in both data and
simulation. Note that the pT values referenced in the name of the SIM/GEN
datasets are those of the generator-level hard parton. The DOIs of
`CMS2011AJets` MOD HDF5 datasets are:
[](https://
doi.org/10.5281/zenodo.3340205) - CMS 2011A Jets, pT > 375 GeV
<br>
[](https://
doi.org/10.5281/zenodo.3341500) - SIM/GEN QCD Jets 170-300 GeV
<br>
[](https://
doi.org/10.5281/zenodo.3341498) - SIM/GEN QCD Jets 300-470 GeV
<br>
[](https://
doi.org/10.5281/zenodo.3341419) - SIM/GEN QCD Jets 470-600 GeV
<br>
[](https://
doi.org/10.5281/zenodo.3364139) - SIM/GEN QCD Jets 600-800 GeV
<br>
[](https://
doi.org/10.5281/zenodo.3341413) - SIM/GEN QCD Jets 800-1000 GeV
<br>
[](https://
doi.org/10.5281/zenodo.3341502) - SIM/GEN QCD Jets 1000-1400 GeV
<br>
[](https://
doi.org/10.5281/zenodo.3341770) - SIM/GEN QCD Jets 1400-1800 GeV
<br>
[](https://
doi.org/10.5281/zenodo.3341772) - SIM/GEN QCD Jets 1800-$\infty$ GeV
For more details regarding the creation of these samples, as well as for the
DOIs of the original CMS Open Datasets, see [Exploring the Space of Jets with
CMS Open Data](https://arxiv.org/abs/1908.08542). To get started using the
samples, see the [MOD Jet Demo](/demos/#mod-jet-demo) which makes use of the
[`load`](#load) function.
"""
from __future__ import absolute_import, division, print_function
import gc
import json
import math
import os
import re
import sys
import time
import warnings
import h5py
import numpy as np
import six
from energyflow.utils.data_utils import _get_filepath
from energyflow.utils import (COMP_MAP, EF_DATA_DIR, REVERSE_COMPS, ZENODO_URL_PATTERN,
create_pool, explicit_comp, ischrgd)
__all__ = ['MODDataset', 'load', 'filter_particles', 'kfactors']
COLLECTIONS = {
'CMS2011AJets': {
'cms': {
'subdatasets': [('CMS_Jet300_pT375-infGeV', 18, '3340205')],
},
'sim': {
'subdatasets': [
('SIM170_Jet300_pT375-infGeV', 1, '3341500'),
('SIM300_Jet300_pT375-infGeV', 24, '3341498'),
('SIM470_Jet300_pT375-infGeV', 73, '3341419'),
('SIM600_Jet300_pT375-infGeV', 78, '3364139'),
('SIM800_Jet300_pT375-infGeV', 79, '3341413'),
('SIM1000_Jet300_pT375-infGeV', 40, '3341502'),
('SIM1400_Jet300_pT375-infGeV', 40, '3341770'),
('SIM1800_Jet300_pT375-infGeV', 20, '3341772'),
]
},
'gen': {
'subdatasets': [
('GEN170_pT375-infGeV', 1, '3341500'),
('GEN300_pT375-infGeV', 24, '3341498'),
('GEN470_pT375-infGeV', 74, '3341419'),
('GEN600_pT375-infGeV', 79, '3364139'),
('GEN800_pT375-infGeV', 79, '3341413'),
('GEN1000_pT375-infGeV', 40, '3341502'),
('GEN1400_pT375-infGeV', 40, '3341770'),
('GEN1800_pT375-infGeV', 20, '3341772'),
]
}
}
}
###############################################################################
# PUBLIC FUNCTIONS
###############################################################################
# load(*args, amount=1, cache_dir='~/.energyflow', collection='CMS2011AJets',
# dataset='cms', subdatasets=None, validate_files=False,
# store_pfcs=True, store_gens=True, verbose=0)
def load(*args, **kwargs):
r"""Loads samples from the specified MOD dataset. Any file that is needed
that has not been cached will be automatically downloaded from Zenodo.
Downloaded files are cached for later use. File checksums are optionally
validated to ensure dataset fidelity.
**Arguments**
- ***args** : _arbitrary positional arguments_
- Used to specify cuts to be made to the dataset while loading; see
the detailed description of the positional arguments accepted by
[`MODDataset`](#moddataset).
- **amount** : _int_ or _float_
- Approximate amount of the dataset to load. If an integer, this is the
number of files to load (a warning is issued if more files are
requested than are available). If a float, this is the fraction of the
number of available files to load, rounded up to the nearest whole
number. Note that since ints and floats are treated different, a value
of `1` loads one file whereas `1.0` loads the entire dataset. A value
of `-1` also loads the entire dataset.
- **cache_dir** : _str_
- The directory where to store/look for the files. Note that
`'datasets'` is automatically appended to the end of this path, as well
as the collection name. For example, the default is to download/look
for files in the directory `'~/.energyflow/datasets/CMS2011AJets'`.
- **collection** : _str_
- Name of the collection of datasets to consider. Currently the only
collection is `'CMS2011AJets'`, though more may be added in the future.
- **dataset** : _str_
- Which dataset in the collection to load. Currently the
`'CMS2011AJets'` collection has `'cms'`, `'sim'`, and `'gen'` datasets.
- **subdatasets** : {_tuple_, _list_} of _str_ or `None`
- The names of subdatasets to use. A value of `None` uses all available
subdatasets. Currently, for the `'CMS2011AJets'` collection, the
`'cms'` dataset has one subdataset, `'CMS_Jet300_pT375-infGeV'`, the
`'sim'` dataset has eight subdatasets arrange according to generator
$\hat p_T$, e.g. `'SIM470_Jet300_pT375-infGeV'`, and the `'gen'`
dataset also has eight subdatasets arranged similaraly, e.g.
`'GEN470_pT375-infGeV'`.
- **validate_files** : _bool_
- Whether or not to validate files according to their MD5 hashes. It
is a good idea to set this to `True` when first downloading the files
from Zenodo in order to ensure they downloaded properly.
- **store_pfcs** : _bool_
- Whether or not to store PFCs if they are present in the dataset.
- **store_gens** : _bool_
- Whether or not to store gen-level particles (referred to as
"gens") if they are present in the dataset.
- **verbose** : _int_
- Verbosity level to use when loading. `0` is the least verbose, `1`
is more verbose, and `2` is the most verbose.
**Returns**
- _MODDataset_
- A `MODDataset` object containing the selected events or jets from the
specified collection, dataset, and subdatasets.
"""
default_kwargs = {
'amount': 1,
'cache_dir': '~/.energyflow',
'collection': 'CMS2011AJets',
'dataset': 'cms',
'subdatasets': None,
'validate_files': False,
'store_gens': True,
'store_pfcs': True,
'verbose': 0,
}
# process arguments
for k,v in default_kwargs.items():
if k not in kwargs:
kwargs[k] = v
# store arguments
amount = kwargs['amount']
cache_dir = kwargs['cache_dir']
validate_files = kwargs['validate_files']
verbose = kwargs['verbose']
moddset_kwargs = {kw: kwargs[kw] for kw in ['store_gens', 'store_pfcs']}
# verify collection
cname = kwargs['collection']
collection = _get_collection(cname)
# verify dataset
dataset = _get_dataset(collection, kwargs['dataset'])
# determine subdatasets
if kwargs['subdatasets'] is None:
subdatasets = dataset['subdatasets']
else:
# filter subdatasets according to specified values
subdatasets = [sdset for sdset in dataset['subdatasets']
if sdset[0] in kwargs['subdatasets']]
# check that no unrecognized subdatasets were passed in
allowed_sds = set([sdset[0] for sdset in dataset['subdatasets']])
remaining_sds = set(kwargs['subdatasets']) - allowed_sds
if len(remaining_sds):
raise ValueError('Did not understand the following subdatasets: {}'.format(remaining_sds)
+ ', acceptable values are {}'.format(allowed_sds))
# get file info
info = _get_dataset_info(cname)
hashes, total_weights = info['md5_hashes'], info['total_weights']
# iterate over subdatasets
moddsets = []
for subdataset in subdatasets:
name, nfiles, record = subdataset
# get path to dataset files
subdir = os.path.join('datasets', cname, name)
# determine number of files to read in
if amount == -1:
nfiles_load = nfiles
elif isinstance(amount, float):
nfiles_load = math.ceil(amount * nfiles)
elif isinstance(amount, int):
if amount > nfiles:
warnings.warn('Requested {} files but only have {}'.format(amount, nfiles))
nfiles_load = nfiles
else:
nfiles_load = amount
else:
raise ValueError('Amount {} not understood'.format(amount))
# iterate over files
modsubdsets = []
start_subdset = time.time()
for i in range(nfiles_load):
start = time.time()
filename = '{}_{}_compressed.h5'.format(name, i)
file_hash = hashes[filename] if validate_files else None
url = ZENODO_URL_PATTERN.format(record, filename)
filepath = _get_filepath(filename, url, cache_dir, cache_subdir=subdir,
file_hash=file_hash)
moddset_args = (filepath,) + args
modsubdsets.append(MODDataset(*moddset_args, **moddset_kwargs))
if verbose >= 2:
print(' Loaded {} in {:.3f}s'.format(name + '_{}'.format(i), time.time() - start))
if verbose >= 1:
print('Loaded {} in {:.3f}s'.format(name, time.time() - start_subdset))
# set weights appropriately in case we're not using all the files
subdset_total_weight = sum([dset._orig_total_weight for dset in modsubdsets])
for dset in modsubdsets:
dset.jets_f[:,dset.weight] *= total_weights[name]/subdset_total_weight
moddsets.extend(modsubdsets)
# return concatenated MODDataset
return MODDataset(datasets=moddsets)
def filter_particles(particles, which='all', pt_cut=None, chs=False, pt_i=0, pid_i=4, vertex_i=5):
"""Constructs a mask that will select particles according to specified
properties. Currently supported are selecting particles according to their
charge, removing particles associated to a pileup vertex, and implementing
a minimum particle-level pT cut
**Arguments**
- **particles** : _numpy.ndarray_
- Two-dimensional array of particles.
- **which** : {`'all'`, `'charged'`, `'neutral'`}
- Selects particles according to their charge.
- **pt_cut** : _float_ or `None`
- If not `None`, the minimum transverse momentum a particle can have to
be selected.
- **chs** : _bool_
- Whether or not to implement charged hadron subtraction (CHS), which
removes particles associated to a non-leading vertex (i.e. with vertex
ids greater than or equal to 1).
- **pt_i** : _int_
- Column index of the transverse momentum values of the particles.
- **pid_i** : _int_
- Column index of the particle IDs (used to select by charge).
- **vertex_i** : _int_
- Column index of the vertex IDs (used to implement CHS).
**Returns**
- _numpy.ndarray_
- A boolean mask which selects the specified particles, i.e.
`particles[filter_particles(particles, ...)]` will be an array of only
those particles passing the specified cuts.
"""
mask = np.ones(len(particles), dtype=bool)
# pt cut
if pt_cut is not None:
mask &= (particles[:,pt_i] >= pt_cut)
# select specified particles
if which != 'all':
chrg_mask = ischrgd(particles[:,pid_i])
if which == 'charged':
mask &= chrg_mask
elif which == 'neutral':
mask &= ~chrg_mask
else:
raise ValueError("'which' must be one of {'all', 'charged', 'neutral}")
# apply chs
if chs:
mask &= (particles[:,vertex_i] <= 0)
return mask
def kfactors(dataset, pts, npvs=None, collection='CMS2011AJets', apply_residual_correction=True):
"""Evaluates k-factors used by a particular collection. Currently, since
CMS2011AJets is the only supported collection, some of the arguments are
specific to the details of this collection (such as the use of jet pTs) and
may change in future versions of this function.
**Arguments**
- **dataset** : {`'sim'`, `'gen'`}
- Specifies which type of k-factor to use. `'sim'` includes a reweighting
to match the distribution of the number of primary vertices between
the simulation dataset and the CMS data whereas `'gen'` does not.
- **pts** : _numpy.ndarray_
- The transverse momenta of the jets, used to determine the
pT-dependent k-factor due to using only leading order matrix elements
in the event generation. For the CMS2011AJets collection, these are
derived from Figure 5 of [this reference](https://doi.org/10.1016/j.
physletb.2014.01.034).
- **npvs** : _numpy.ndarray_ of integer type or `None`
- The number of primary vertices of a simulated event, used to
reweight a simulated event to match the pileup distribution of data.
Should be the same length as `pts` and correspond to the same events.
Not used if `dataset` is `'gen'`.
- **collection** : _str_
- Name of the collection of datasets to consider. Currently the only
collection is `'CMS2011AJets'`, though more may be added in the future.
- **apply_residual_correction** : _bool_
- Whether or not to apply a residual correction derived from the first
bin of the pT spectrum that corrects for the remaining difference
between data and simulation.
**Returns**
- _numpy.ndarray_
- An array of k-factors corresponding to the events specified by the
`pts` and (optionally) the `npvs` arrays. These should be multiplied
into any existing weight for the simulated or generated event.
"""
# verify dataset
if dataset not in {'sim', 'gen'}:
raise ValueError("dataset must be one of 'sim' or 'gen'")
# get info for the specified collection
info = _get_dataset_info(collection)
# base kfactors from https://arxiv.org/abs/1309.5311
base_kfactors = np.interp(pts, info['kfactor_x'], info['kfactor_y'])
# include npv reweighting if sim
if dataset == 'sim':
# verify we have npvs
if npvs is None:
raise ValueError("npvs cannot be None when dataset is 'sim'")
base_kfactors *= info['npv_hist_ratios'][np.asarray(npvs, dtype=int)]
# apply residual factor if desired
if apply_residual_correction:
base_kfactors *= info['residual_factor']
return base_kfactors
###############################################################################
# PRIVATE FUNCTIONS for MODDataset
###############################################################################
def _get_collection(cname):
# verify collection
if cname not in COLLECTIONS:
raise ValueError("Collection '{}' not recognized".format(cname))
return COLLECTIONS[cname]
def _get_dataset(collection, dname):
# check for valid dname (special case info since we add that to collection)
if dname == 'info' or dname not in collection:
raise ValueError('dataset {} not recognized'.format(dname))
return collection[dname]
def _get_dataset_info(cname):
# get collection
collection = _get_collection(cname)
# cache info if not already stored
if 'info' not in collection:
fpath = os.path.join(EF_DATA_DIR, '{}.json'.format(cname))
with open(fpath, 'r') as f:
info = json.load(f)
# convert to numpy arrays
for key in ['kfactor_x', 'kfactor_y', 'npv_hist_ratios']:
info[key] = np.asarray(info[key])
collection['info'] = info
return collection['info']
def _cols_str(cols, nspaces=4):
return str(cols).replace('\n', '\n' + nspaces*' ')
def _separate_particle_arrays(particles, particles_index, mask, copy=True):
# array to hold particles
particles_array = np.zeros(np.count_nonzero(mask), dtype='O')
# iterate over indices
n = 0
for start, end, m in zip(particles_index[:-1], particles_index[1:], mask):
if m:
particles_array[n] = np.array(particles[start:end], copy=copy)
n += 1
return particles_array
def _process_selections(sel_list):
sels = []
for sel in sel_list:
if isinstance(sel, six.string_types):
sels.append(sel)
else:
sels.append(''.join([str(s) for s in sel]))
return '&'.join(sels)
def _moddset_save(arg):
i, filepath, compression = arg
moddsets[i].save(filepath, compression=compression, verbose=0)
def _make_particles_index(particle_arrays):
# list of indices
index = [0]
# iterate over all particles
for particles in particle_arrays:
index.append(index[-1] + len(particles))
# convert to numpy array with proper dtype
return np.asarray(index, dtype=np.uint32)
def _write_large_object_array_to_h5(hf, name, arr, dtype=None, ncols=None,
chunksize=10**5, **compression):
nrows = sum([len(x) for x in arr])
ncols = arr[0].shape[1] if ncols is None else ncols
dtype = arr[0].dtype if dtype is None else dtype
dataset = hf.create_dataset(name, (nrows, ncols), dtype=dtype, **compression)
begin = end = ind = 0
while end < len(arr):
end = min(len(arr), end + chunksize)
arr_chunk = np.concatenate(arr[begin:end], axis=0)
dataset[ind:ind+len(arr_chunk)] = arr_chunk
begin = end
ind += len(arr_chunk)
del arr_chunk
return dataset
###############################################################################
# MODDataset
###############################################################################
class MODDataset(object):
"""Loads and provides access to datasets in MOD HDF5 format. Jets can be
selected when loading from file according to a number of kinematic
attributes. MOD HDF5 datasets are created via the [`save`](#save) method.
Currently, the MOD HDF5 format consists of an HDF5 file with the following
arrays, each of which are stored as properties of the `MODDataset`:
- `/jets_i` - _int64_
- An array of integer jet attributes, which are currently:
- `fn` : The file number of the jet, used to index the
[`filenames`](#filenames) array.
- `rn` : The run number of the jet.
- `lbn` : The lumiblock number (or lumisection) of the jet.
- `evn` : The event number of the jet.
- `npv` (CMS/SIM only) : The number of primary vertices of the
event containing the jet.
- `quality` (CMS/SIM only) : The quality of the jet, where `0`
means no quality, `1` is "loose", `2` is "medium", and `3` is
"tight".
- `hard_pid` (SIM/GEN only) : The particle ID of the hard parton
associated to the jet (`0` if not associated).
- `/jets_f` - _float64_
- An array of floating point jet attributes, which are currently:
- `jet_pt` : Transverse momentum of the jet.
- `jet_y` : Rapidity of the jet.
- `jet_phi` : Azimuthal angle of the jet.
- `jet_m` : Mass of the jet.
- `jet_eta` : Pseudorapidity of the jet.
- `jec` (CMS/SIM only) : Jet energy correction.
- `jet_area` (CMS/SIM only) : Area of the jet.
- `jet_max_nef` (CMS/SIM only) : Maximum of the hadronic and
electromagnetic energy fractions of the jet.
- `gen_jet_pt` (SIM only) : Transverse momentum of an associated
GEN jet. `-1` if not associated.
- `gen_jet_y` (SIM only) : Rapidity of an associated GEN jet. `-1`
if not associated.
- `gen_jet_phi` (SIM only) : Azimuthal angle of an associated GEN
jet. `-1` if not associated.
- `gen_jet_m` (SIM only) : Mass of an associated GEN jet. `-1` if
not associated.
- `gen_jet_eta` (SIM only) : Pseudorapidity of an associated GEN
jet. `-1` if not associated.
- `hard_pt` (SIM/GEN only) : Transverse momentum of an associated
hard parton. `-1` if not associated.
- `hard_y` (SIM/GEN only) : Rapidity of an associated hard parton.
`-1` if not associated.
- `hard_phi` (SIM/GEN only) : Azimuthal angle of an associated hard
parton. `-1` if not associated.
- `weight` : Contribution of this jet to the cross-section, in
nanobarns.
- `/pfcs` - _float64_ (CMS/SIM only)
- An array of all particle flow candidates, with attributes listed
below. There is a separate `/pfcs_index` array in the file which
contains information for `MODDataset` to separate these particles into
separate jets. The columns of the array are currently:
- `pt` : Transverse momentum of the PFC.
- `y` : Rapidity of the PFC.
- `phi` : Azimuthal angle of the PFC.
- `m` : Mass of the PFC.
- `pid` : PDG ID of the PFC.
- `vertex` : Vertex ID of the PFC. `0` is leading vertex, `>0` is
a pileup vertex, and `-1` is unknown. Neutral particles are
assigned to the leading vertex.
- `/gens` - _float64_ (SIM/GEN only)
- An array of all generator-level particles, currently with the same
columns as the `pfcs` array (the vertex column contains all `0`s). For
the SIM dataset, these are the particles of jets associated to the SIM
jets which are described in the `jets_i` and `jets_f` arrays. As with
`pfcs`, there is a separate `gens_index` array which tells `MODDataset`
how to separate these gen particles into distinct jets.
- `/filenames` - _str_
- An array of strings indexed by the `fn` attribute of each jet. For
CMS, this array is one dimensional and contains the CMS-provided
filenames. For SIM/GEN, this array is two dimensional where the first
column is the pT value that appears in the name of the dataset and the
second column is the CMS-provided filename. In all cases, indexing this
array with the `fn` attribute of a jet gives the file information in
which the event containing that jet is to be found.
Note that the column names of the `jets_i`, `jets_f`, `pfcs`, and `gens`
arrays are stored as lists of strings in the attributes `jets_i_cols`,
`jets_f_cols`, `pfcs_cols`, and `gens_cols`.
For each of the above arrays, `MODDataset` stores the index of the column
as an attribute with the same name as the column. For example, for an
instance called `modds`, `modds.fn` has a value of `0` since it is the
first column in the `jets_i` array, `modds.jet_phi` has a value of `2`,
`modds.m` has a value of `3`, etc.
Even more helpfully, a view of each column of the jets arrays is stored
as an attribute as well, so that `modds.jet_pts` is the same as
`modds.jets_f[:,modds.jet_pt]`, `modds.evns` is the same as
`modds.jets_i[:,modds.evn]`, etc. Additionally, one special view is stored,
`corr_jet_pts`, which is equal to the product of the jet pTs and the JECs,
i.e. `modds.jet_pts*modds.jecs`.
`MODDataset` supports the builtin `len()` method, which returns the number
of jets currently stored in the dataset, as well as the `print()` method,
which prints a summary of the dataset.
"""
# MODDataset(*args, datasets=None, path=None, num=-1, shuffle=True,
# store_pfcs=True, store_gens=True)
def __init__(self, *args, **kwargs):
"""`MODDataset` can be initialized from a MOD HDF5 file or from a list
of existing `MODDataset`s. In the first case, the filename should be
given as the first positional argument. In the second case, the
`datasets` keyword argument should be set to a list of `MODDataset`
objects.
**Arguments**
- ***args** : _arbitrary positional arguments_
- Each argument specifies a requirement for an event to be selected
and kept in the `MODDataset`. All requirements are ANDed together.
Each specification can be a string or a tuple/list of objects that
will be converted to strings and concatenated together. Each string
specifies the name of one of the columns of one of the jets arrays
(`'corr_jet_pts'` is also accepted, see above, as well as
`'abs_jet_eta'`, `'abs_gen_jet_y'`, etc, which use the absolute
values of the [pseudo]rapidities of the jets) as well as one or
more comparisons to be performed using the values of that column
and the given values in the string. For example,
`('corr_jet_pts >', 400)`, which is the same as
`'corr_jet_pts>400'`, will select jets with a corrected pT above
400 GeV. Ampersands may be used within one string to indicated
multiple requirements, e.g. `'corr_jet_pts > 400 & abs_jet_eta'`,
which has the same effect as using multiple arguements each with a
single requirement.
- **datasets** : {_tuple_, _list_} of `MODDataset` instances or `None`
- `MODDataset`s from which to initialize this dataset. Effectively
what this does is to concatenate the arrays held by the datasets.
Should always be `None` when initializing from an existing file.
- **path** : _str_ or `None`
- If not `None`, then `path` is prepended to the filename when
initializing from file. Has no effect when initializing from
existing datasets.
- **num** : _int_
- The number of events or jets to keep after subselections are
applied. A value of `-1` keeps the entire dataset. The weights
are properly rescaled to preserve the total cross section of the
selection.
- **shuffle** : _bool_
- When subselecting a fraction of the dataset (i.e. `num!=-1`),
if `False` the first `num` events passing cuts will be kept, if
`True` then a random subset of `num` events will be kept. Note that
this has no effect when `num` is `-1`, and also that this flag only
affects which events are selected and does not randomize the order
of the events that are ultimately stored by the `MODDataset` object.
- **store_pfcs** : _bool_
- Whether or not to store PFCs if they are present in the dataset.
- **store_gens** : _bool_
- Whether or not to store gen-level particles (referred to as
"gens") if they are present in the dataset.
"""
default_kwargs = {
'datasets': None,
'num': -1,
'path': None,
'shuffle': True,
'store_gens': True,
'store_pfcs': True,
}
# process kwargs
for k,v in default_kwargs.items():
if k not in kwargs:
kwargs[k] = v
# store options
self.num = kwargs.pop('num')
self.shuffle = kwargs.pop('shuffle')
self.store_pfcs = kwargs.pop('store_pfcs')
self.store_gens = kwargs.pop('store_gens')
datasets = kwargs.pop('datasets')
# check for disallowed kwargs
other_allowed_kwargs = {'_arrays', '_dataset', 'path'}
for kw in kwargs:
if kw not in other_allowed_kwargs:
raise TypeError("__init__() got an unexpected keyword argument '{}'".format(kw))
# initialize from explicit arrays (used only when making files initially)
if len(args) == 0 and '_arrays' in kwargs and '_dataset' in kwargs:
self._init_from_arrays(kwargs['_dataset'], kwargs['_arrays'])
# initialize from list of datasets
elif datasets is not None:
self.selection = _process_selections(args)
self._init_from_datasets(datasets)
# initialize from file
elif len(args) and isinstance(args[0], six.string_types):
self.selection = _process_selections(args[1:])
self._init_from_filename(args[0], kwargs['path'])
else:
raise RuntimeError('Initialization of MODDataset not understood')
#################
# PRIVATE METHODS
#################
# close any HDF5 files and try to garbage collect arrays to free memory
def __del__(self):
# close file
self.close()
# delete arrays
if hasattr(self, '_jets_i'):
del self._jets_i
if hasattr(self, '_jets_f'):
del self._jets_f
if hasattr(self, '_filenames'):
del self._filenames
# delete pfcs if they exist
if hasattr(self, '_pfcs'):
del self._pfcs
# delete gens if they exist
if hasattr(self, '_gens'):
del self._gens
# delete particles if the exist
if hasattr(self, '_particles'):
del self._particles
# force garbage collection
gc.collect()
# length of this object is the number of jets it's holding
def __len__(self):
return len(self.jets_i)
# makes MODDataset printable
def __repr__(self):
s = ('{} MODDataset\n'.format(self.dataset.upper()) +
' Jet Integers - {}\n {}\n'.format(self.jets_i.shape, _cols_str(self.jets_i_cols)) +
' Jet Floats - {}\n {}\n'.format(self.jets_f.shape, _cols_str(self.jets_f_cols)))
if self.store_pfcs:
s += ' PFCs - {}\n {}\n'.format(self.pfcs.shape, _cols_str(self.pfcs_cols))
if self.store_gens:
s += ' GENs - {}\n {}\n'.format(self.gens.shape, _cols_str(self.gens_cols))
s += ' Filenames - {}\n'.format(self.filenames.shape)
return s
# determine which type of dataset this object is holding
def _store_dataset_info(self, dataset):
assert dataset in ['cms', 'sim', 'gen'], "Dataset must be one of ['cms', 'sim', 'gen']"
self.dataset = dataset
self.cms = (self.dataset == 'cms')
self.sim = (self.dataset == 'sim')
self.gen = (self.dataset == 'gen')
# update options based on dataset type
self.store_pfcs &= not self.gen
self.store_gens &= not self.cms
# store column names of the given array
def _store_cols(self, arr, cols=None, allow_multiple=False):
# get cols from file
if cols is None:
cols = self.hf[arr].attrs['cols']
cols = np.asarray(cols, dtype='U')
setattr(self, '_' + arr + '_cols', cols)
for i,col in enumerate(cols):
# ensure cols are unique
if not allow_multiple:
m = "Repeat instances of col '{}', check file validity".format(col)
assert not hasattr(self, col), m
# store column index
setattr(self, col, i)
# store views of the columns of the jets_i and jets_f arrays as attributes
def _store_views_of_jets(self):
for jets in ['jets_i', 'jets_f']:
# retrieve array, cols
arr, cols = getattr(self, jets), getattr(self, jets + '_cols')
for i,col in enumerate(cols):
# set attribute + s as view of this column of array
setattr(self, col + 's', arr[:,i])
# calculate corrected pts
self.corr_jet_pts = self.jet_pts*self.jecs if hasattr(self, 'jecs') else self.jet_pts
# ensure that the particles attribute is set appropriately
def _set_particles(self):
if self.store_pfcs and not self.gen:
self._particles = self.pfcs
self._particles_cols = self.pfcs_cols
if self.store_gens and self.gen:
self._particles = self.gens
self._particles_cols = self.gens_cols
def _init_from_filename(self, filename, path):
# handle suffix
if not filename.endswith('.h5'):
filename += '.h5'
# get filepath
self.filepath = filename if path is None else os.path.join(path, filename)
# determine type of dataset
filename_lower = os.path.basename(self.filepath).lower()
dataset = ('cms' if 'cms' in filename_lower else
('sim' if 'sim' in filename_lower else
('gen' if 'gen' in filename_lower else None)))
# store dataset info
self._store_dataset_info(dataset)
# open h5 file
self._hf = h5py.File(self.filepath, 'r')
# load selected jets
self._jets_i = self.hf['jets_i'][:]
self._jets_f = self.hf['jets_f'][:]
# update store particles based on availability
self.store_pfcs &= ('pfcs' in self.hf)
self.store_gens &= ('gens' in self.hf)
# store jets cols
self._store_cols('jets_i')
self._store_cols('jets_f')
# store views of jets cols
self._store_views_of_jets()
# sum all weights
self._orig_total_weight = np.sum(self.weights)
# process selections
self._mask, self.specs = self.sel(_selection=self.selection)
# determine weight factor caused by subselecting
total_weight_after_selections = np.sum(self.weights[self._mask])
# select the requested number of jets
if self.num != -1:
# shuffle if requested
arange = np.arange(len(self._mask))[self._mask]
if self.shuffle:
np.random.shuffle(arange)
# mask out jets beyond what was requested
self._mask[arange[self.num:]] = False
# weight factor
weight_factor = total_weight_after_selections/np.sum(self.weights[self._mask])
else:
weight_factor = 1.0
# apply mask to jets
self._jets_i = self.jets_i[self._mask]
self._jets_f = self.jets_f[self._mask]
# alter weights due to subselection
self._jets_f[:,self.weight] *= weight_factor
# store views of jets cols
self._store_views_of_jets()
if self.store_pfcs:
# read in pfcs_index
self.pfcs_index = self.hf['pfcs_index'][:]
# store pfcs as separate arrays
self._pfcs = _separate_particle_arrays(self.hf['pfcs'][:], self.pfcs_index, self._mask)
# store pfcs cols
self._store_cols('pfcs')
if self.store_gens:
# read in gens_index
self.gens_index = self.hf['gens_index'][:]
# store gens as separate arrays
self._gens = _separate_particle_arrays(self.hf['gens'][:], self.gens_index, self._mask)
# store gens cols
self._store_cols('gens', allow_multiple=self.store_pfcs)
# store filenames
self._filenames = self.hf['filenames'][:].astype('U')
# set particles
self._set_particles()
def _init_from_datasets(self, datasets):
# lists to hold arrays to concatenate
jets_i, jets_f = [], []
pfcs, gens = [], []
# iterate over arguments
for i,dataset in enumerate(datasets):
# ensure they are a MODDataset
td = type(dataset)
if td != MODDataset:
m = "Incorrect type '{}' encountered when initializing from list".format(td)
raise TypeError(m)
# get info from first dataset
if i == 0:
# extract dataset info
self._store_dataset_info(dataset.dataset)
# array info
self._filenames = dataset.filenames
self.store_pfcs &= dataset.store_pfcs
self.store_gens &= dataset.store_gens
# store jets cols
self._store_cols('jets_i', dataset.jets_i_cols)
self._store_cols('jets_f', dataset.jets_f_cols)
# store specs
self.specs = dataset.specs
# pfcs cols
if self.store_pfcs:
self._store_cols('pfcs', dataset.pfcs_cols)
# gens cols
if self.store_gens:
self._store_cols('gens', dataset.gens_cols, allow_multiple=self.store_pfcs)
# check for consistency
else:
m = "Datasets must all be of the same type ('cms', 'sim', 'gen')"
assert dataset.dataset == self.dataset, m
assert np.all(dataset.filenames == self.filenames), 'filenames must match'
assert np.all(dataset.jets_i_cols == self.jets_i_cols), 'jets_i_cols must match'
assert np.all(dataset.jets_f_cols == self.jets_f_cols), 'jets_f_cols must match'
if self.store_pfcs:
assert np.all(dataset.pfcs_cols == self.pfcs_cols), 'pfcs_cols must match'
if self.store_gens:
assert np.all(dataset.gens_cols == self.gens_cols), 'gen_cols must match'
# store jets
jets_i.append(dataset.jets_i)
jets_f.append(dataset.jets_f)
# store pfcs
if self.store_pfcs:
pfcs.append(dataset.pfcs)
# store gens
if self.store_gens:
gens.append(dataset.gens)
# concatenate jets
self._jets_i = np.concatenate(jets_i, axis=0)
self._jets_f = np.concatenate(jets_f, axis=0)
# store views of jets cols
self._store_views_of_jets()
# sum all weights
self._orig_total_weight = np.sum(self.weights)
if self.store_pfcs:
self._pfcs = np.concatenate(pfcs, axis=0)
if self.store_gens:
self._gens = np.concatenate(gens, axis=0)
# set particles
self._set_particles()
# note that this method of initialization is not publicly supported
def _init_from_arrays(self, dataset, arrays):
# update options
self.store_pfcs &= 'pfcs' in arrays
self.store_gens &= 'gens' in arrays
# store dataset info by hand
self._store_dataset_info(dataset)
# jets arrays
self._jets_i, self._jets_f = arrays['jets_i'], arrays['jets_f']
# jets cols
self._store_cols('jets_i', arrays['jets_i_cols'])
self._store_cols('jets_f', arrays['jets_f_cols'])
# store views of jets cols
self._store_views_of_jets()
# sum all weights
self._orig_total_weight = np.sum(self.weights)
# pfcs
if self.store_pfcs:
self._pfcs = arrays['pfcs']
self._store_cols('pfcs', arrays['pfcs_cols'])
# gens
if self.store_gens:
self._gens = arrays['gens']
self._store_cols('gens', arrays['gens_cols'], allow_multiple=self.store_pfcs)
# filenames
self._filenames = np.asarray(arrays['filenames'], dtype='U')
# set particles
self._set_particles()
################
# PUBLIC METHODS
################
def apply_mask(self, mask, preserve_total_weight=False):
"""Subselects jets held by the `MODDataset` according to a boolean
mask.
**Arguments**
- **mask** : _numpy.ndarray_ or type _bool_
- A boolean mask used to select which jets are to be kept. Should
be the same length as the `MODDataset` object.
- **preserve_total_weight** : _bool_
- Whether or not to keep the cross section of the `MODDataset`
fixed after the selection.
"""
if len(mask) != len(self):
raise IndexError('Incorrectly sized mask')
if preserve_total_weight:
total_weight_before_mask = np.sum(self.weights)
self._jets_i = self.jets_i[mask]
self._jets_f = self.jets_f[mask]
if preserve_total_weight:
weight_factor = total_weight_before_mask/np.sum(self.jets_f[:,self.weight])
self._jets_f[:,self.weight] *= weight_factor
self._store_views_of_jets()
if self.store_pfcs:
self._pfcs = self.pfcs[mask]
if self.store_gens:
self._gens = self.gens[mask]
# set particles
self._set_particles()
def sel(self, *args, **kwargs):
"""Returns a boolean mask that selects jets according to the specified
requirements.
**Arguments**
- ***args** : _arbitrary positional arguments_
- Used to specify cuts to be made to the dataset while loading; see
the detailed description of the positional arguments accepted by
[`MODDataset`](#moddataset).
**Returns**
- _numpy.ndarray_ of type _bool_
- A boolean mask that will select jets that pass all of the
specified requirements.
"""
selection = kwargs.pop('_selection', None)
for kw in kwargs:
raise ValueError("Unknown keyword argument '{}'".format(kw))
return_specs = False
if selection is None:
selection = _process_selections(args)
elif len(args) == 0:
return_specs = True
else:
raise ValueError("args cannot be set when using '_selection'")
# make mask which is all true
mask = np.ones(self.jets_i.shape[0], dtype=bool)
# valid columns to select from
if not hasattr(self, 'selection_cols'):
self.selection_cols = self.jets_f_cols.tolist() + self.jets_i_cols.tolist()
self.selection_cols += ['corr_jet_pt', 'abs_jet_eta', 'abs_jet_y']
# handle special cases for sim
if 'get_jet_eta' in self.selection_cols:
self.selection_cols += ['abs_gen_jet_eta', 'abs_gen_jet_y']
# special cases for gen
if self.gen:
self.selection_cols += ['quality']
if not len(selection):
return (mask, []) if return_specs else mask
# regular expression for selection
if not hasattr(self, '_sel_re'):
cols_re = '|'.join(self.selection_cols)
comps_re = '|'.join(COMP_MAP.keys())
expr = (r'\s*(-?(?:\d*\.\d*|inf)\s*({0})|-?\d+\s*({0}))?'
r'\s*({1})s?'
r'\s*(({0})\s*-?(?:\d*\.\d*|inf)|({0})\s*-?\d+)?\s*(&\s*|$)').format(comps_re, cols_re)
self._sel_re = re.compile(expr)
self._sel_re_check = re.compile('(?:{})+'.format(expr))
# check that we overall have a valid selection
if self._sel_re_check.fullmatch(selection) is None:
raise ValueError("Selection '{}' not understood".format(selection))
# iterate over selections
specs = []
for groups in self._sel_re.findall(selection):
name = groups[3] + 's'
nspecs = 0
for i in [0,4]:
val, cf, ci = groups[i:i+3]
if val != '':
if cf != '':
c = cf
val = float(val.replace(cf, ''))
elif ci != '':
c = ci
val = int(val.replace(ci, ''))
else:
raise ValueError('Invalid groups from selection: {}'.format(groups))
# handle reversals
if i == 0:
c = REVERSE_COMPS.get(c, c)
specs.append((name, c, val))
nspecs += 1
if nspecs == 0:
raise ValueError('Invalid groups from selection: {}'.format(groups))
# apply specs
for spec in specs:
name = spec[0]
if 'abs_' in name:
arr = np.abs(getattr(self, name.replace('abs_', '')))
elif 'quality' in name and self.gen:
continue
else:
arr = getattr(self, name)
mask &= explicit_comp(arr, spec[1], spec[2])
return (mask, specs) if return_specs else mask
def save(self, filepath, npf=-1, compression=None, verbose=1, n_jobs=1):
"""Saves a `MODDataset` in the MOD HDF5 format.
**Arguments**
- **filepath** : _str_
- The filepath (with or without the `'.h5'` suffix) where the saved
file will be located.
- **npf** : _int_
- The number of jets per file. If not `-1`, multiple files will be
created with `npf` jets as the maximum number stored in each file,
in which case `'_INDEX'`, where `INDEX` is the index of that file,
will be appended to the filename.
- **compression** : _int_ or `None`
- If not `None`, the gzip compression level to use when saving the
arrays in the HDF5 file. If not `None`, `'_compressed'` will be
appended to the end of the filename.
- **verbose** : _int_
- Verbosity level to use when saving the files.
- **n_jobs** : _int_
- The number of processes to use when saving the files; only
relevant when `npf!=-1`.
"""
path, name = os.path.split(filepath)
if name.endswith('.h5'):
name = '.'.join(name.split('.')[:-1])
start = time.time()
if npf != -1:
global moddsets
i = begin = end = 0
args, moddsets = [], []
while end < len(self.jets_i):
end = min(end + npf, len(self.jets_i))
arrays = {'jets_i': self.jets_i[begin:end], 'jets_i_cols': self.jets_i_cols,
'jets_f': self.jets_f[begin:end], 'jets_f_cols': self.jets_f_cols,
'filenames': self.filenames}
if self.store_pfcs:
arrays['pfcs'] = self.pfcs[begin:end]
arrays['pfcs_cols'] = self.pfcs_cols
if self.store_gens:
arrays['gens'] = self.gens[begin:end]
arrays['gens_cols'] = self.gens_cols
filepath = os.path.join(path, '{}_{}'.format(name, i))
moddset = self.__class__(_dataset=self.dataset, _arrays=arrays)
if n_jobs == 1:
moddset.save(filepath, compression=compression, verbose=verbose)
else:
moddsets.append(moddset)
args.append((i, filepath, compression))
begin = end
i += 1
if n_jobs != 1:
if verbose >= 1:
l = len(args)
pf = (l, 's' if l > 1 else '', time.time() - start)
print('Constructed {} temporary MODDataset{} in {:.3f}s'.format(*pf))
if n_jobs == -1:
n_jobs = os.cpu_count()
if n_jobs is None:
n_jobs = 4
start = time.time()
with create_pool(processes=min(n_jobs, len(args))) as pool:
for i,_ in enumerate(pool.imap_unordered(_moddset_save, args, chunksize=1)):
if verbose >= 1 and ((i+1) % 5 == 0 or i+1 == len(args)):
pf = (i+1, (i+1)/len(args)*100, time.time() - start)
print(' Saved {} files, {:.2f}% done in {:.3f}s'.format(*pf))
del moddsets
return
# compression opts
compression = ({'compression': 'gzip', 'compression_opts': compression}
if compression is not None else {})
comp_str = '_compressed' if len(compression) else ''
# ensure directory exists
if not os.path.exists(path):
if verbose >= 2:
print('Creating', path)
os.mkdir(path)
if verbose >= 2:
print('Saving to', path)
filename = name + comp_str
hf = h5py.File(os.path.join(path, filename + '.h5'), 'w')
# jets_i
jets_i = hf.create_dataset('jets_i', data=self.jets_i, **compression)
jets_i.attrs.create('cols', np.asarray(self.jets_i_cols, dtype='S'))
# jets_f
jets_f = hf.create_dataset('jets_f', data=self.jets_f, **compression)
jets_f.attrs.create('cols', np.asarray(self.jets_f_cols, dtype='S'))
# pfcs
if self.store_pfcs:
pfcs = _write_large_object_array_to_h5(hf, 'pfcs', self.pfcs,
ncols=len(self.pfcs_cols), **compression)
pfcs.attrs.create('cols', np.asarray(self.pfcs_cols, dtype='S'))
hf.create_dataset('pfcs_index', data=_make_particles_index(self.pfcs), **compression)
# gens
if self.store_gens:
gens = _write_large_object_array_to_h5(hf, 'gens', self.gens,
ncols=len(self.gens_cols), **compression)
gens.attrs.create('cols', np.asarray(self.gens_cols, dtype='S'))
hf.create_dataset('gens_index', data=_make_particles_index(self.gens), **compression)
# filenames
hf.create_dataset('filenames', data=self.filenames.astype('S'), **compression)
# close
hf.close()
if verbose >= 1:
args = (filename, len(self.jets_i), time.time() - start)
print(' Saved {} with {} jets in {:.3f}s'.format(*args))
def close(self):
"""Closes the underlying HDF5 file, if one is associated with the
`MODDataset` object. Note that associated HDF5 files are closed by
default when the `MODDataset` object is deleted.
"""
if hasattr(self, '_hf'):
self._hf.close()
############
# PROPERTIES
############
@property
def jets_i(self):
"""The `jets_i` array, described under [`MODDataset`](#moddataset)."""
return self._jets_i
@property
def jets_f(self):
"""The `jets_f` array, described under [`MODDataset`](#moddataset)."""
return self._jets_f
@property
def pfcs(self):
"""The `pfcs` array, described under [`MODDataset`](#moddataset)."""
return self._pfcs if hasattr(self, '_pfcs') else None
@property
def gens(self):
"""The `gens` array, described under [`MODDataset`](#moddataset)."""
return self._gens if hasattr(self, '_gens') else None
@property
def particles(self):
"""If this is a CMS or SIM dataset, `particles` is the same as `pfcs`;
for GEN it is the same as `gens`.
"""
return self._particles if hasattr(self, '_particles') else None
@property
def filenames(self):
"""The `filenames` array, described under [`MODDataset`](#moddataset)."""
return self._filenames
@property
def hf(self):
"""The underlying HDF5 file, if one is associated to the `MODDataset`."""
return self._hf if hasattr(self, '_hf') else None
@property
def jets_i_cols(self):
return self._jets_i_cols
@property
def jets_f_cols(self):
return self._jets_f_cols
@property
def pfcs_cols(self):
return self._pfcs_cols if hasattr(self, '_pfcs_cols') else None
@property
def gens_cols(self):
return self._gens_cols if hasattr(self, '_gens_cols') else None
@property
def particles_cols(self):
return self._particles_cols if hasattr(self, '_particles_cols') else None
|
{"hexsha": "ce3d4428bf405c6c671c72135b0f0d3c845ac236", "size": 55113, "ext": "py", "lang": "Python", "max_stars_repo_path": "env/lib/python3.7/site-packages/energyflow/datasets/mod.py", "max_stars_repo_name": "nickchak21/particledist", "max_stars_repo_head_hexsha": "59b788a894655273ec177a3a6bb4cf9526f8c402", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-03-01T19:47:13.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-01T19:47:13.000Z", "max_issues_repo_path": "env/lib/python3.7/site-packages/energyflow/datasets/mod.py", "max_issues_repo_name": "nickchak21/particledist", "max_issues_repo_head_hexsha": "59b788a894655273ec177a3a6bb4cf9526f8c402", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "env/lib/python3.7/site-packages/energyflow/datasets/mod.py", "max_forks_repo_name": "nickchak21/particledist", "max_forks_repo_head_hexsha": "59b788a894655273ec177a3a6bb4cf9526f8c402", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.6215837421, "max_line_length": 107, "alphanum_fraction": 0.5964291546, "include": true, "reason": "import numpy", "num_tokens": 13259}
|
# Copyright (c) 2020 fortiss GmbH
#
# Authors: Patrick Hart
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import sys
import logging
import time
import tensorflow as tf
import numpy as np
tf.compat.v1.enable_v2_behavior()
# BARK imports
from bark.runtime.commons.parameters import ParameterServer
# tf agent imports
from tf_agents.drivers import dynamic_step_driver
from tf_agents.drivers import dynamic_episode_driver
from tf_agents.environments import tf_py_environment
from tf_agents.metrics import tf_metrics
from tf_agents.eval import metric_utils
from tf_agents.utils import common
from tf_agents.trajectories import time_step as ts
# BARK-ML imports
from bark_ml.library_wrappers.lib_tf_agents.tfa_wrapper import TFAWrapper
from bark_ml.commons.tracer import Tracer
class TFARunner:
def __init__(self,
environment=None,
agent=None,
tracer=None,
params=None):
self._params = params or ParameterServer()
self._eval_metrics = [
tf_metrics.AverageReturnMetric(
buffer_size=self._params["ML"]["TFARunner"]["EvaluationSteps", "", 25]),
tf_metrics.AverageEpisodeLengthMetric(
buffer_size=self._params["ML"]["TFARunner"]["EvaluationSteps", "", 25])
]
self._agent = agent
self._agent.set_action_externally = True
self._summary_writer = None
self._environment = environment
self._wrapped_env = tf_py_environment.TFPyEnvironment(
TFAWrapper(self._environment))
self.GetInitialCollectionDriver()
self.GetCollectionDriver()
self._logger = logging.getLogger()
self._tracer = tracer or Tracer()
def SetupSummaryWriter(self):
if self._params["ML"]["TFARunner"]["SummaryPath"] is not None:
try:
self._summary_writer = tf.summary.create_file_writer(
self._params["ML"]["TFARunner"]["SummaryPath"])
except:
pass
self.GetInitialCollectionDriver()
self.GetCollectionDriver()
def GetInitialCollectionDriver(self):
self._initial_collection_driver = \
dynamic_episode_driver.DynamicEpisodeDriver(
env=self._wrapped_env,
policy=self._agent._agent.collect_policy,
observers=[self._agent._replay_buffer.add_batch],
num_episodes=self._params["ML"]["TFARunner"]["InitialCollectionEpisodes", "", 50])
def GetCollectionDriver(self):
self._collection_driver = dynamic_episode_driver.DynamicEpisodeDriver(
env=self._wrapped_env,
policy=self._agent._agent.collect_policy,
observers=[self._agent._replay_buffer.add_batch],
num_episodes=self._params["ML"]["TFARunner"]["CollectionEpisodesPerStep", "", 1])
def CollectInitialEpisodes(self):
self._initial_collection_driver.run()
def Train(self):
self.CollectInitialEpisodes()
if self._summary_writer is not None:
with self._summary_writer.as_default():
self._train()
else:
self._train()
def _train(self):
"""Agent specific
"""
pass
def ReshapeActionIfRequired(self, action_step):
action_shape = action_step.action.shape
expected_shape = self._agent._eval_policy.action_spec.shape
action = action_step.action.numpy()
if action_shape != expected_shape:
# logging.warning("Action shape" + str(action_shape) + \
# " does not match with expected shape " + str(expected_shape) +\
# " -> reshaping is tried")
action = np.reshape(action, expected_shape)
# logging.info(action)
return action
def RunEpisode(self, render=True, **kwargs):
state = self._environment.reset()
is_terminal = False
while not is_terminal:
action_step = self._agent._eval_policy.action(
ts.transition(state, reward=0.0, discount=1.0))
action = self.ReshapeActionIfRequired(action_step)
env_data = self._environment.step(action)
self._tracer.Trace(env_data, **kwargs)
state, is_terminal = env_data[0], env_data[2]
if render:
self._environment.render()
def Run(self, num_episodes=10, render=False, mode="not_training", **kwargs):
for i in range(0, num_episodes):
trajectory = self.RunEpisode(
render=render, **kwargs, num_episode=i)
# average collision, reward, and step count
mean_col_rate = self._tracer.Query(
key="collision", group_by="num_episode", agg_type="ANY_TRUE").mean()
mean_col_rate += self._tracer.Query(
key="drivable_area", group_by="num_episode", agg_type="ANY_TRUE").mean()
goal_reached = self._tracer.Query(
key="goal_reached", group_by="num_episode", agg_type="ANY_TRUE").mean()
mean_reward = self._tracer.Query(
key="reward", group_by="num_episode", agg_type="SUM").mean()
mean_steps = self._tracer.Query(
key="step_count", group_by="num_episode", agg_type="LAST_VALUE").mean()
if mode == "training":
global_iteration = self._agent._agent._train_step_counter.numpy()
tf.summary.scalar("mean_reward", mean_reward, step=global_iteration)
tf.summary.scalar("mean_steps", mean_steps, step=global_iteration)
tf.summary.scalar(
"mean_collision_rate", mean_col_rate, step=global_iteration)
self._logger.info(
f"The agent achieved an average reward of {mean_reward:.3f}," +
f" collision-rate of {mean_col_rate:.5f}, took on average" +
f" {mean_steps:.3f} steps, and reached the goal " +
f" {goal_reached:.3f} (evaluated over {num_episodes} episodes).")
# reset tracer
self._tracer.Reset()
|
{"hexsha": "0fdef245afde13ed611999bf57b4fd07c1621406", "size": 5563, "ext": "py", "lang": "Python", "max_stars_repo_path": "bark_ml/library_wrappers/lib_tf_agents/runners/tfa_runner.py", "max_stars_repo_name": "mansoorcheema/bark-ml", "max_stars_repo_head_hexsha": "349c0039a5f54778d6b7aea7fd18e3e979efc3a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bark_ml/library_wrappers/lib_tf_agents/runners/tfa_runner.py", "max_issues_repo_name": "mansoorcheema/bark-ml", "max_issues_repo_head_hexsha": "349c0039a5f54778d6b7aea7fd18e3e979efc3a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bark_ml/library_wrappers/lib_tf_agents/runners/tfa_runner.py", "max_forks_repo_name": "mansoorcheema/bark-ml", "max_forks_repo_head_hexsha": "349c0039a5f54778d6b7aea7fd18e3e979efc3a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.5878378378, "max_line_length": 90, "alphanum_fraction": 0.7068128708, "include": true, "reason": "import numpy", "num_tokens": 1295}
|
#include <leatherman/windows/registry.hpp>
#include <leatherman/windows/system_error.hpp>
#include <leatherman/windows/windows.hpp>
#include <leatherman/locale/locale.hpp>
#include <boost/algorithm/string/trim.hpp>
#include <boost/nowide/convert.hpp>
// Mark string for translation (alias for leatherman::locale::format)
using leatherman::locale::_;
using namespace std;
namespace leatherman { namespace windows {
registry_exception::registry_exception(string const& message) :
runtime_error(message)
{
}
static HKEY get_hkey(registry::HKEY hkey)
{
switch (hkey) {
case registry::HKEY::CLASSES_ROOT: return HKEY_CLASSES_ROOT;
case registry::HKEY::CURRENT_CONFIG: return HKEY_CURRENT_CONFIG;
case registry::HKEY::CURRENT_USER: return HKEY_CURRENT_USER;
case registry::HKEY::LOCAL_MACHINE: return HKEY_LOCAL_MACHINE;
case registry::HKEY::PERFORMANCE_DATA: return HKEY_PERFORMANCE_DATA;
case registry::HKEY::PERFORMANCE_NLSTEXT: return HKEY_PERFORMANCE_NLSTEXT;
case registry::HKEY::PERFORMANCE_TEXT: return HKEY_PERFORMANCE_TEXT;
case registry::HKEY::USERS: return HKEY_USERS;
default:
throw registry_exception(_("invalid HKEY specified"));
}
}
// Returns the registry value as a wstring buffer. It's up to the caller to interpret it.
// This only really works for RRF_RT_REG_EXPAND_SZ, RRF_RT_REG_MULTI_SZ, RRF_RT_REG_SZ,
// and RRF_RT_REG_DWORD.
static wstring get_regvalue(registry::HKEY hkey, string const& lpSubKey, string const& lpValue, DWORD flags)
{
auto hk = get_hkey(hkey);
auto lpSubKeyW = boost::nowide::widen(lpSubKey);
auto lpValueW = boost::nowide::widen(lpValue);
// If we're getting a DWORD we don't care about buffer size and other stuff, so we only need
// to call RegGetValueW once.
if (flags == RRF_RT_REG_DWORD) {
DWORD dwValue;
DWORD dwSize = sizeof(dwValue);
auto err = RegGetValueW(hk, lpSubKeyW.c_str(), lpValueW.c_str(), flags, nullptr, (LPBYTE)&dwValue, &dwSize);
if (err != ERROR_SUCCESS) {
throw registry_exception(_("error reading registry key {1} {2}: {3}",
lpSubKey, lpValue, windows::system_error(err)));
}
return std::to_wstring(dwValue);
}
DWORD size = 0u;
auto err = RegGetValueW(hk, lpSubKeyW.c_str(), lpValueW.c_str(), flags, nullptr, nullptr, &size);
if (err != ERROR_SUCCESS) {
throw registry_exception(_("error reading registry key {1} {2}: {3}",
lpSubKey, lpValue, windows::system_error(err)));
}
// Size is the number of bytes needed.
wstring buffer((size*sizeof(char))/sizeof(wchar_t), '\0');
err = RegGetValueW(hk, lpSubKeyW.c_str(), lpValueW.c_str(), flags, nullptr, &buffer[0], &size);
if (err != ERROR_SUCCESS) {
throw registry_exception(_("error reading registry key {1} {2}: {3}",
lpSubKey, lpValue, windows::system_error(err)));
}
// Size now represents bytes copied (which can be less than we allocated). Resize, and also remove the
// extraneous null-terminator from RegGetValueW (wstring handles termination internally).
auto numwchars = (size*sizeof(char))/sizeof(wchar_t);
buffer.resize(numwchars > 0u ? numwchars - 1u : 0u);
return buffer;
}
unsigned long registry::get_registry_dword(registry::HKEY hkey, string const& subkey, string const& value)
{
return std::stoi(get_regvalue(hkey, subkey, value, RRF_RT_REG_DWORD));
}
string registry::get_registry_string(registry::HKEY hkey, string const& subkey, string const& value)
{
// From http://msdn.microsoft.com/en-us/library/windows/desktop/ms724868(v=vs.85).aspx
// "RRF_RT_REG_SZV automatically converts REG_EXPAND_SZ to REG_SZ unless RRF_NOEXPAND is specified."
// This seems like the desired behavior most of the time.
return boost::nowide::narrow(get_regvalue(hkey, subkey, value, RRF_RT_REG_SZ));
}
vector<string> registry::get_registry_strings(registry::HKEY hkey, string const& subkey, string const& value)
{
auto buffer = get_regvalue(hkey, subkey, value, RRF_RT_REG_MULTI_SZ);
vector<string> strings;
wstring accum;
for (auto c : buffer) {
if (c == L'\0') {
string val = boost::trim_copy(boost::nowide::narrow(accum));
if (!val.empty()) {
strings.emplace_back(move(val));
}
accum.clear();
} else {
accum += c;
}
}
return strings;
}
}} // namespace leatherman::windows
|
{"hexsha": "a7e313ac7114c9c6fd1e4a1a64ff623745519097", "size": 4937, "ext": "cc", "lang": "C++", "max_stars_repo_path": "windows/src/registry.cc", "max_stars_repo_name": "gimmyxd/leatherman", "max_stars_repo_head_hexsha": "1215b70591c9386a34e2ca6f640dd4db40f942a6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 55.0, "max_stars_repo_stars_event_min_datetime": "2015-08-27T13:17:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-07T15:19:59.000Z", "max_issues_repo_path": "windows/src/registry.cc", "max_issues_repo_name": "gimmyxd/leatherman", "max_issues_repo_head_hexsha": "1215b70591c9386a34e2ca6f640dd4db40f942a6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 236.0, "max_issues_repo_issues_event_min_datetime": "2015-02-23T23:50:10.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-01T18:09:12.000Z", "max_forks_repo_path": "windows/src/registry.cc", "max_forks_repo_name": "gimmyxd/leatherman", "max_forks_repo_head_hexsha": "1215b70591c9386a34e2ca6f640dd4db40f942a6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 88.0, "max_forks_repo_forks_event_min_datetime": "2015-02-23T22:40:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-07T15:19:59.000Z", "avg_line_length": 42.1965811966, "max_line_length": 120, "alphanum_fraction": 0.6311525218, "num_tokens": 1188}
|
abstract type InterleaveMarker end
struct Iyes <: InterleaveMarker end
struct Ino <: InterleaveMarker end
struct InterleavedImage{T,N,AA1<:AbstractArray{T,N}, AA2<:AbstractArray{T,N}, IMS<:NTuple{N,InterleaveMarker}} <: AbstractArray{T,N}
oddA::AA1
evenA::AA2
imarkers::IMS
end
function InterleavedImage(oddA::AbstractArray{T,N}, evenA::AbstractArray{T,N}, idim::Int=N) where {T,N}
if size(oddA) != size(evenA)
error("The two image arrays must be of the same size")
end
if idim > N || idim < 1
error("Interleaved dim out of range")
end
ims = ((ifelse(x==idim, Iyes(), Ino()) for x = 1:N)...,)
return InterleavedImage(oddA, evenA, ims)
end
imarkers(img::InterleavedImage) = img.imarkers
interleaved_dim(A::InterleavedImage) = findfirst(isa.(imarkers(A), Iyes))
oddchild(A::InterleavedImage) = A.oddA
evenchild(A::InterleavedImage) = A.evenA
Base.IndexStyle(::Type{<:InterleavedImage}) = IndexCartesian()
_size(szs::Tuple, curm::Iyes, ims...) = (first(szs)*2, _size(Base.tail(szs), ims...)...)
_size(szs::Tuple, curm::Ino, ims...) = (first(szs), _size(Base.tail(szs), ims...)...)
_size(szs::Tuple{}, ims...) = ()
size(B::InterleavedImage) = _size(size(B.oddA), B.imarkers...)
#returns the appropriate child array and translates the query I
#into an index for that array
function arr_idx(img::InterleavedImage{T,N}, I::Tuple) where {T,N}
markers = imarkers(img)
child = _chooseimage(img, I, markers...)
return child, _arr_idx(markers, I)
end
@inline _chooseimage(img, I, ::Ino, rest...) = _chooseimage(img, Base.tail(I), rest...)
@inline _chooseimage(img, I, ::Iyes, rest...) = return isodd(first(I)) ? oddchild(img) : evenchild(img)
@inline _chooseimage(img, I) = error("no yes markers found")
_arr_idx(::Tuple{}, ::Tuple{}) = ()
_arr_idx(markers, I) = (_arr_idx1(first(markers), first(I)), _arr_idx(Base.tail(markers), Base.tail(I))...)
_arr_idx1(::Iyes, i) = i>>1 + isodd(i)
_arr_idx1(::Ino, i) = i
function getindex(img::InterleavedImage{T,N}, I::Vararg{Int, N}) where {T,N}
chosenA, idx = arr_idx(img, (I...,))
return chosenA[idx...]
end
function setindex!(img::InterleavedImage{T,N}, v, I::Vararg{Int, N}) where {T,N}
chosenA, idx = arr_idx(img, (I...,))
chosenA[idx...] = v
end
#utilities for ImageMeta and AxisArray types
#Note: this ditches img2's properites in favor of img1's. Will fix if it turns out to be a problem.
InterleavedImage(img1::ImageMeta{T,N}, img2::ImageMeta{T,N}, idim::Int=N) where {T,N} =
ImageMeta(InterleavedImage(arraydata(img1), arraydata(img2), idim), properties(img1))
InterleavedImage(img1::AxisArray{T,N}, img2::AxisArray{T,N}, idim::Int=N) where {T,N} =
match_axisspacing(InterleavedImage(arraydata(img1), arraydata(img2), idim), img1)
|
{"hexsha": "ff5a53633ae27e87545b5d50d244e25757de6e56", "size": 2773, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/interleaved.jl", "max_stars_repo_name": "HolyLab/InterleavedImages.jl", "max_stars_repo_head_hexsha": "fb15e99d6bcc55af603e2316c769350737464c62", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-05-01T19:17:24.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-01T19:17:24.000Z", "max_issues_repo_path": "src/interleaved.jl", "max_issues_repo_name": "HolyLab/InterleavedImages.jl", "max_issues_repo_head_hexsha": "fb15e99d6bcc55af603e2316c769350737464c62", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-03-22T16:03:11.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-24T02:26:15.000Z", "max_forks_repo_path": "src/interleaved.jl", "max_forks_repo_name": "HolyLab/InterleavedImages.jl", "max_forks_repo_head_hexsha": "fb15e99d6bcc55af603e2316c769350737464c62", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.1884057971, "max_line_length": 132, "alphanum_fraction": 0.6822935449, "num_tokens": 882}
|
def us_counties_Data2Dict(RemoveEmptyFips=False,RemoveUnknownCounties=False):
# Function to import nyt_us_counties.csv data into a dictionary
import numpy as np
import sys
import pickle
import os
import git
repo=git.Repo('.', search_parent_directories=True)
cwd=repo.working_dir
os.chdir(cwd)
sys.path.append('Josh/Processing/')
from dateProcess import DateProcess
File=open('Josh/Processing/Processed Data/GeoDict.pkl','rb')
GeoDict=pickle.load(File)
File.close()
data=np.loadtxt('data/us/covid/nyt_us_counties_daily.csv',dtype=str,delimiter=',')
# Remove data from unknown counties
FipsCol=np.nonzero(data[0]=='fips')[0][0]
if RemoveEmptyFips:
data=data[data[:,FipsCol]!='']
else:
data[data[:,FipsCol]=='',FipsCol]='0'
if RemoveUnknownCounties:
CountyCol=np.nonzero(data[0]=='county')[0][0]
data=data[data[:,CountyCol]!='Unknown']
DataDict={data[0][i]:data[1:,i] for i in range(data.shape[1])}
# Keys for variables. If changed an error is thrown and must be updated manually
Keys=['date','county','state','fips','cases','deaths']
for key in Keys:
if key not in DataDict:
raise ValueError("Column Headers changed; update keys in Josh/Processing/nyt_us_counties_Import.py")
# Convert fips data from str to int then into coordinate pairs, then normailize coordinate data
DataDict['fips']=DataDict['fips'].astype(int)
for fip in DataDict['fips']:
if fip not in GeoDict:
GeoDict[fip]=np.array([np.nan,np.nan])
coords=np.array([GeoDict[DataDict['fips'][i]] for i in range(len(DataDict['fips']))])
# means=coords.mean(0)
# stds=coords.std(0)
# coords=(coords-means)/stds
# np.savetxt(CoordSavePath+'Coord_Mean_Std.txt',[means,stds])
DataDict['coords']=coords
# Convert Dates into day since January 1st
DataDict['day']=np.array([[DateProcess(DataDict['date'][i])] for i in range(len(DataDict['date']))])
DataDict['cases']=DataDict['cases'].astype(float).astype(int)
DataDict['deaths']=DataDict['deaths'].astype(float).astype(int)
return DataDict
|
{"hexsha": "0333949b2644b4fd9d027f9f8aaf2712da0debe5", "size": 2198, "ext": "py", "lang": "Python", "max_stars_repo_path": "Josh/Processing/nyt_us_counties_Import2.py", "max_stars_repo_name": "aco8ogren/Tentin-Quarantino", "max_stars_repo_head_hexsha": "08b494f5deb2c33e3bb5981135c780b0a34d5557", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Josh/Processing/nyt_us_counties_Import2.py", "max_issues_repo_name": "aco8ogren/Tentin-Quarantino", "max_issues_repo_head_hexsha": "08b494f5deb2c33e3bb5981135c780b0a34d5557", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Josh/Processing/nyt_us_counties_Import2.py", "max_forks_repo_name": "aco8ogren/Tentin-Quarantino", "max_forks_repo_head_hexsha": "08b494f5deb2c33e3bb5981135c780b0a34d5557", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.5614035088, "max_line_length": 112, "alphanum_fraction": 0.6656050955, "include": true, "reason": "import numpy", "num_tokens": 592}
|
/*****************************************************************************
* Licensed to Qualys, Inc. (QUALYS) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* QUALYS licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
****************************************************************************/
/**
* @file
* @brief IronBee --- Constant Module
*
* This module adds constants to IronBee. Constants can be set at
* configuration time and used via a variety of APIs:
*
* - Rules can access constants via the `CONSTANT` var. E.g., `CONSTANT:foo`.
* - Configuration files can set constants via the `ConstantSet` directive.
* - Other modules can access constants via ib_constant_get() and
* ib_constant_set().
*
* The `ConstantSet` directive can be called in two ways:
* - `ConstantSet key` sets the constant `key` to the empty string. This is
* useful for sending "boolean" constants that are either true (defined) or
* false (not defined).
* - `ConstantSet key value` sets the constant `key` to the string `value`.
*
* @author Christopher Alfeld <calfeld@qualys.com>
*/
#include <ironbee/module/constant.h>
#include <ironbeepp/all.hpp>
#include <ironbee/string.h>
#include <boost/bind.hpp>
using namespace std;
using namespace IronBee;
namespace {
//! Name of oracle variable.
const char* c_oracle_var = "CONSTANT";
/**
* Map of constant key to constant value.
*
* We store lists of fields because that's what the oracle is required
* to return and it can't allocate them on the fly because it doesn't
* know an appropriate lifetime.
*/
typedef map<string, List<ConstField> > map_t;
class Delegate;
//! Per context data.
struct per_context_t
{
//! Constructor.
explicit
per_context_t(Delegate* delegate_) : delegate(delegate_) {}
//! Constructor.
per_context_t() : delegate(NULL) {}
//! Constants. Note: Copied from parent by copy constructor.
map_t constants;
//! Delegate. Used by external API.
Delegate* delegate;
};
//! Delegate
class Delegate :
public IronBee::ModuleDelegate
{
public:
//! Constructor.
explicit
Delegate(IronBee::Module module);
/**
* Set a constant.
*
* @param[in] context Context of constant.
* @param[in] value Value; name will be used as key.
*
* @throw einval if constant with given key already exists.
* @throw bad_alloc on allocation failure.
**/
void set(Context context, ConstField value);
/**
* Get a constant.
*
* @note Returns a singular Field if no such constant rather than throwing
* enoent.
*
* @param[in] context Context of constant.
* @param[in] key Key.
* @param[in] key_length Length of @a key.
* @returns Value if constant with given key exists and Field() if not.
**/
ConstField get(ConstContext context, const char* key, size_t key_length) const;
private:
//! Get per-context data for context.
per_context_t& get_per_context(Context context);
//! Get per-context data for context.
const per_context_t& get_per_context(ConstContext context) const;
//! Hook for context transaction event. Setup Oracle.
void on_context_transaction(IronBee::Transaction tx) const;
/**
* Get a dynamic field for accessing constants.
*
* @param[in] context Context of constants.
* @param[in] mm Memory manager to determine lifetime of field.
* @returns Dynamic field for accessing constants of this context.
**/
Field oracle(Context context, MemoryManager mm) const;
//! Oracle getter function; forwards to set().
ConstList<ConstField> oracle_get(ConstContext context, const char* key, size_t key_length) const;
//! Oracle setter function; throws IronBee::einval.
void oracle_set() const;
/**
* Handle `ConstantSet` directive.
*
* @param[in] cp Configuration parser.
* @param[in] directive_name Name of directive.
* @param[in] params Parameters of directive.
* @throw IronBee::einval on too few parameters or already existent set.
**/
void dir_set(
IronBee::ConfigurationParser cp,
const char* directive_name,
IronBee::List<const char*> params
);
//! Var source for oracle.
VarSource m_oracle_source;
//! An empty list to return for no-such-constant.
ConstList<ConstField> m_empty_list;
};
} // Anonymous
IBPP_BOOTSTRAP_MODULE_DELEGATE("constant", Delegate)
// Implementation
// Reopen for doxygen; not needed by C++.
namespace {
Delegate::Delegate(IronBee::Module module) :
IronBee::ModuleDelegate(module),
m_empty_list(
List<ConstField>::create(module.engine().main_memory_mm())
)
{
module.set_configuration_data<per_context_t>(per_context_t(this));
module.engine().register_configuration_directives()
.list(
"ConstantSet",
boost::bind(&Delegate::dir_set, this, _1, _2, _3)
)
;
module.engine().register_hooks()
.handle_context_transaction(
boost::bind(&Delegate::on_context_transaction, this, _2)
)
;
m_oracle_source = VarSource::register_(
module.engine().var_config(),
IB_S2SL(c_oracle_var),
IB_PHASE_REQUEST_HEADER, IB_PHASE_REQUEST_HEADER
);
}
void Delegate::on_context_transaction(IronBee::Transaction tx) const
{
m_oracle_source.set(
tx.var_store(),
oracle(tx.context(), tx.memory_manager())
);
}
void Delegate::set(Context context, ConstField value)
{
MemoryManager mm = module().engine().main_memory_mm();
List<ConstField> list_value;
map_t& constants = get_per_context(context).constants;
const string key_s(value.name(), value.name_length());
map_t::const_iterator i = constants.find(key_s);
if (i != constants.end()) {
BOOST_THROW_EXCEPTION(einval() << errinfo_what(
"Constant " + key_s + " already exists."
));
}
list_value = List<ConstField>::create(mm);
list_value.push_back(value);
constants.insert(map_t::value_type(key_s, list_value));
}
ConstField Delegate::get(ConstContext context, const char* key, size_t key_length) const
{
ConstList<ConstField> result = oracle_get(context, key, key_length);
if (result.empty()) {
return ConstField();
}
else {
assert(result.size() == 1);
return result.front();
}
}
Field Delegate::oracle(Context context, MemoryManager mm) const
{
return Field::create_dynamic_list<ConstField>(
mm,
IB_S2SL(c_oracle_var),
boost::bind(&Delegate::oracle_get, this, context, _2, _3),
boost::bind(&Delegate::oracle_set, this)
);
}
ConstList<ConstField> Delegate::oracle_get(ConstContext context, const char* key, size_t key_length) const
{
const map_t& constants = get_per_context(context).constants;
map_t::const_iterator i = constants.find(string(key, key_length));
if (i == constants.end()) {
return m_empty_list;
}
return i->second;
}
void Delegate::oracle_set() const
{
BOOST_THROW_EXCEPTION(
einval() << errinfo_what("Can not set constants through oracle.")
);
}
void Delegate::dir_set(
IronBee::ConfigurationParser cp,
const char* directive_name,
List<const char*> params
)
{
const char* key;
const char* value_string;
MemoryManager mm = module().engine().main_memory_mm();
if (params.size() < 1 || params.size() > 2) {
ib_cfg_log_error(
cp.ib(),
"%s takes 1 or 2 arguments; has %zd.",
directive_name,
params.size()
);
BOOST_THROW_EXCEPTION(einval());
}
List<const char*>::const_iterator i = params.begin();
key = *i;
++i;
if (i != params.end()) {
value_string = *i;
}
else {
value_string = "";
}
set(
cp.current_context(),
Field::create_byte_string(
mm,
IB_S2SL(key),
ByteString::create(mm, value_string)
)
);
}
const per_context_t& Delegate::get_per_context(ConstContext context) const
{
// Immediately adding appropriate const to result.
return module().configuration_data<per_context_t>(
Context::remove_const(context)
);
}
per_context_t& Delegate::get_per_context(Context context)
{
return module().configuration_data<per_context_t>(context);
}
} // Anonymous
extern "C" {
ib_status_t ib_module_constant_get(
const ib_field_t** value,
const ib_context_t *ctx,
const char *key,
size_t key_length
)
{
assert(value);
assert(ctx);
assert(key);
ConstContext context(ctx);
Module m = Module::with_name(context.engine(), "constant");
if (! m) {
return IB_EOTHER;
}
try {
// We promise not to modify the data. But need non-const context
// to access it.
*value = m.configuration_data<per_context_t>(
Context::remove_const(context)
).delegate->get(context, key, key_length).ib();
}
catch (...) {
return convert_exception(m.engine());
}
return IB_OK;
}
ib_status_t ib_module_constant_set(
ib_context_t* ctx,
const ib_field_t *value
)
{
assert(ctx);
assert(value);
Context context(ctx);
Module m = Module::with_name(context.engine(), "constant");
if (! m) {
return IB_EOTHER;
}
try {
m.configuration_data<per_context_t>(context).delegate->set(
context,
ConstField(value)
);
}
catch (...) {
return convert_exception(m.engine());
}
return IB_OK;
}
}
namespace IronBee {
namespace Constant {
void set(Context ctx, ConstField value)
{
throw_if_error(ib_module_constant_set(ctx.ib(), value.ib()));
}
ConstField get(ConstContext ctx, const char* key, size_t key_length)
{
const ib_field_t* result;
throw_if_error(ib_module_constant_get(&result, ctx.ib(), key, key_length));
return ConstField(result);
}
ConstField get(ConstContext ctx, const char* key)
{
return get(ctx, IB_S2SL(key));
}
ConstField get(ConstContext ctx, ConstByteString key)
{
return get(ctx, key.const_data(), key.size());
}
ConstField get(ConstContext ctx, const std::string& key)
{
return get(ctx, key.data(), key.length());
}
}
}
|
{"hexsha": "1b269110bf3e2912ecae039da0aeee797c0fc879", "size": 11086, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "modules/constant.cpp", "max_stars_repo_name": "b1v1r/ironbee", "max_stars_repo_head_hexsha": "97b453afd9c3dc70342c6183a875bde22c9c4a76", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 148.0, "max_stars_repo_stars_event_min_datetime": "2015-01-10T01:53:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T20:48:12.000Z", "max_issues_repo_path": "modules/constant.cpp", "max_issues_repo_name": "ErikHendriks/ironbee", "max_issues_repo_head_hexsha": "97b453afd9c3dc70342c6183a875bde22c9c4a76", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8.0, "max_issues_repo_issues_event_min_datetime": "2015-03-09T15:50:36.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-10T19:23:06.000Z", "max_forks_repo_path": "modules/constant.cpp", "max_forks_repo_name": "ErikHendriks/ironbee", "max_forks_repo_head_hexsha": "97b453afd9c3dc70342c6183a875bde22c9c4a76", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 46.0, "max_forks_repo_forks_event_min_datetime": "2015-03-08T22:45:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-15T13:47:59.000Z", "avg_line_length": 26.713253012, "max_line_length": 106, "alphanum_fraction": 0.6442359733, "num_tokens": 2549}
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
# This file contains two subroutines
# - construcutmodel(results, kwargs) : constructs a steady-state pyomo reactor model for use in error maximization sampling
# - ems(riperes,sim,lb,ub,nspec,kwargs) : perform error maximization sampling
import numpy as np
import rbfopt
import os
# import sys
# import random
import datetime
import time
# pkg
from idaes.apps import ripe
def constructmodel(riperes, **kwargs):
# Inputs:
# riperes - output of ripemodel()
# kwargs - kwargs supplied to ripemodel()
# Outputs:
# ripesim - pyomo model
# sharedata must be supplied explicitly
if "sharedata" in kwargs.keys():
sharedata = kwargs["sharedata"]
else:
sharedata = ripe.sharedata
# Define gas constant for clarity later
gc = sharedata["gasconst"]
# determine type of kinetic form used
if "E" in riperes.keys():
acte = riperes["E"]
if "Tref" in kwargs.keys():
Tref = sharedata["Tref"]
ptype = 3
else:
ptype = 2
else:
ptype = 1
prek = riperes["k"]
mechs = riperes["mechanisms"]
stoichs = riperes["stoichiometry"]
ns = len(stoichs[0])
nr = len(mechs)
# replace 'massact' with species specific mechanisms (required of rbfopt)
for i in range(nr):
if mechs[i] == "massact":
mechs[i] = ripe.mechs.mechperstoich(mechs[i], stoichs[i])
# Define a steady-state reactor model in pyomo using ripe resutls
def ripesim(data):
import pyomo.environ as pyo
# ripesim expects input in a particular order
model = pyo.ConcreteModel()
# s is index over species
model.s = pyo.RangeSet(ns)
# r over reactions
model.r = pyo.RangeSet(nr)
# initialize variable from data
model.conc0 = pyo.Param(
model.s, initialize=dict(((s), data[s - 1]) for s in model.s)
)
if ptype > 1:
model.T = pyo.Param(initialize=data[ns])
model.E = pyo.Param(
model.r, initialize=dict(((r), acte[r - 1]) for r in model.r)
)
model.nu = pyo.Param(
model.r,
model.s,
initialize=dict(
((r, s), stoichs[r - 1][s - 1]) for r in model.r for s in model.s
),
)
model.conc = pyo.Var(model.s, domain=pyo.NonNegativeReals, initialize=1.0)
try:
model.flow = pyo.Param(
model.s, initialize=dict(((s), data[ns + s]) for s in model.s)
)
except Exception:
model.flow = pyo.Param(model.s, initialize=1.0)
try:
model.vol = pyo.Param(initialize=float(data[2 * ns + 1]))
except Exception:
model.vol = pyo.Param(initialize=1.0)
model.k = pyo.Param(
model.r, initialize=dict(((r), prek[r - 1]) for r in model.r)
)
model.rate = pyo.Var(model.r, initialize=0.0)
model.dum = pyo.Var(model.s, initialize=0.0)
# define predicted rates of generation
# different problem types require different rate forms
def prates_1(model, r):
return model.rate[r] == model.k[r] * mechs[r - 1](*model.conc[:])
def prates_2(model, r):
indata = [model.conc[i] for i in model.s] + [data[ns]]
return model.rate[r] == model.k[r] * pyo.exp(
-(model.E[r] / (gc * model.T))
) * mechs[r - 1](*indata)
def prates_3(model, r):
indata = [model.conc[i] for i in model.s] + [data[ns]]
return model.rate[r] == model.k[r] * pyo.exp(
-(model.E[r] / (gc)) * (1 / (model.T) - 1 / (Tref))
) * mechs[r - 1](*indata)
if ptype == 1:
trule = prates_1
elif ptype == 2:
trule = prates_2
else:
trule = prates_3
model.rc = pyo.Constraint(model.r, rule=trule)
def balance(model, s):
return 100.0 * model.dum[s] == model.flow[s] * (1.0 / model.vol) * (
model.conc0[s] - model.conc[s]
) + sum([model.nu[i, s] * model.rate[i] for i in model.r])
model.bal = pyo.Constraint(model.s, rule=balance)
def obj(model):
return sum([model.dum[i] ** 2 for i in model.s])
model.OBJ = pyo.Objective(rule=obj)
opt = pyo.SolverFactory(sharedata["minlp_path"])
results = opt.solve(model, tee=sharedata["showpyomo"])
model.solutions.store_to(results)
conres = []
for i in range(ns):
conres.append(
results.Solution.Variable["conc[" + str(i + 1) + "]"]["Value"]
)
return conres
return ripesim
def ems(riperes, sim, lb, ub, nspec, **kwargs):
# This subroutine performs error maximization sampling
# Inputs:
# riperes - results from ripemodel()
# sim - Original black-box simulator (callable in python)
# lb/ub - bounds for each independent variable
# nspec - number of species (can be different than #lb/ub)
# Outputs:
# x - next best input point
# errs - absolute errors obtained on predicted point
# Non-default options can be provided explicitly through kwargs
if "sharedata" in kwargs.keys():
sharedata = kwargs["sharedata"]
else:
sharedata = ripe.sharedata
# poskeys = sharedata["ivars"] + ["x"]
inkeys = list(set(kwargs.keys()) - set(["sharedata"]))
ndim = len(lb)
ns = nspec
# Call atermconstruct.formatinputs to get input data
# in a convenient form
if "x" in inkeys:
conc = kwargs["x"]
dflag = True
else:
conc = [1] * ns
dflag = False
if "frac" in kwargs.keys():
dofrac = True
# conc[i] = kwargs["frac"](conc[i]) # i not defined?
else:
dofrac = False
data, kwargs, fdata, pc, alldata = ripe.atermconstruct.formatinputs(conc, kwargs)
# Handle inkeys exceptions and (possibly) generate a pyomo model
if "Tref" in inkeys:
Tref = sharedata["Tref"]
if "res_sim" in inkeys:
res_sim = kwargs["res_sim"]
else:
if "Tref" in inkeys:
res_sim = constructmodel(riperes, sharedata=sharedata, Tref=Tref)
else:
res_sim = constructmodel(riperes, sharedata=sharedata)
ndim = len(lb)
nd, ns = np.shape(fdata)
if "T" in inkeys:
params = [[], []]
params[0] = riperes["k"]
params[1] = riperes["E"]
else:
params = riperes["k"]
# Check for multiple requested points
# if "nreq" in inkeys:
# nreq = kwargs["nreq"]
# else:
# nreq = 1
# subroutine for using fractional arguments (mole fracs or partial pressure)
def apply_frac(x, doit=True):
if doit:
return kwargs["frac"](x[:])
else:
return x
def check_fun(x):
# Check fun is used to evaluate errors of the current model on provided data
x[:] = apply_frac(x[:], dofrac)
return -1.0 * np.sum(
np.divide(
np.power(np.subtract(x[:ns], res_sim(x[ns:])), 2), riperes["sigma"]
)
)
def max_err(x):
# max_err determines the absolute errors (max called later)
x[:] = apply_frac(x[:], dofrac)
errs = np.absolute(np.subtract(sim(x), res_sim(x)))[0]
return errs
def error_fun(x):
# error fun is the black box objective
x[:] = apply_frac(x[:], dofrac)
sim_conc = sim(x[:])
res_conc = res_sim(x[:])
return -1.0 * np.sum(
np.power(np.divide(np.subtract(sim_conc, res_conc), sim_conc), 2)
)
t_targets = np.zeros([nd, 1])
if dflag:
for i in range(nd):
t_targets[i] = check_fun(alldata[i][:])
bb = rbfopt.RbfoptUserBlackBox(
ndim, np.array(lb), np.array(ub), np.array(["R"] * ndim), error_fun
)
# Request that any point returned be no closer than the other closest point
distance = 10.0
for i in range(nd):
for j in range(nd):
if i != j:
distance = max(
distance,
np.linalg.norm(
alldata[i, ns : ns + ndim] - alldata[j, ns : ns + ndim]
),
)
settings = []
t = datetime.datetime.now()
d_mult = 1.0
settings = rbfopt.RbfoptSettings(
nlp_solver_path=sharedata["nlp_path"],
minlp_solver_path=sharedata["minlp_path"],
print_solver_output=False,
min_dist=d_mult * distance,
algorithm="Gutmann",
rand_seed=int(time.mktime(t.timetuple())),
) # random seed required for RBFopt functionality
# Alternative settings used in testing saved for posterity
# settings = rbfopt.RbfoptSettings( do_infstep=True,nlp_solver_path='/usr/local/ipopt/3.10.2/ipopt/ipopt.pc', minlp_solver_path='~/baron/baron',print_solver_output=False, algorithm='MSRM',global_search_method = 'solver',rand_seed = int(time.mktime(t.timetuple())), min_dist = d_mult*distance)
# ),#init_strategy='all_corners',
# num_global_searches = 0,)
# provide initialization data if flag is tripped
if not dflag:
# print 'check conc : ',alldata[:,ns:ns+ndim],t_targets
alg = rbfopt.RbfoptAlgorithm(
settings,
bb,
do_init_strategy=False,
init_node_pos=alldata[:, ns : ns + ndim],
init_node_val=t_targets,
) # ,num_nodes_at_restart=nd)
else:
alg = rbfopt.RbfoptAlgorithm(settings, bb)
# Call to rbfopt in a manner that terminal is not polluted
f = open(os.devnull, "w")
alg.set_output_stream(f)
val, new_x, itercount, evalcount, fast_evalcount = alg.optimize(pause_after_iters=1)
f.close()
if len(new_x) < ndim:
x = list(new_x) + [0] * (ndim - len(new_x))
else:
x = list(new_x)
# Calculate relevant metrics and identify output responsible for error violation
errs = max_err(x)
# maxv = np.max(errs)
# loc = np.argmax(errs)
if "T" in inkeys:
# prop_x = x[:-1]
prop_t = x[-1]
# else:
# prop_x = x
# print 'Maximum error on new proposed point : ', maxv,' on species # ',loc+1
# print 'Proposed initial concentrations', prop_x
if "T" in inkeys:
print("Proposed Temperature", prop_t)
return [x, errs]
|
{"hexsha": "6a22105958ba669a41c6d114a7414adfad663cd4", "size": 11266, "ext": "py", "lang": "Python", "max_stars_repo_path": "idaes/apps/ripe/emsampling.py", "max_stars_repo_name": "OOAmusat/idaes-pse", "max_stars_repo_head_hexsha": "ae7d3bb8e372bc32822dcdcb75e9fd96b78da539", "max_stars_repo_licenses": ["RSA-MD"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "idaes/apps/ripe/emsampling.py", "max_issues_repo_name": "OOAmusat/idaes-pse", "max_issues_repo_head_hexsha": "ae7d3bb8e372bc32822dcdcb75e9fd96b78da539", "max_issues_repo_licenses": ["RSA-MD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "idaes/apps/ripe/emsampling.py", "max_forks_repo_name": "OOAmusat/idaes-pse", "max_forks_repo_head_hexsha": "ae7d3bb8e372bc32822dcdcb75e9fd96b78da539", "max_forks_repo_licenses": ["RSA-MD"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-17T11:08:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T11:08:43.000Z", "avg_line_length": 34.5582822086, "max_line_length": 299, "alphanum_fraction": 0.5715426948, "include": true, "reason": "import numpy,import pyomo", "num_tokens": 2998}
|
import numpy as np
import tensorflow as tf
import gym
import time
import spinup.algos.sppox.core as core
from spinup.utils.logx import EpochLogger
from spinup.utils.mpi_tf import MpiAdamOptimizer, sync_all_params
from spinup.utils.mpi_tools import mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar, num_procs
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
class PPOBuffer:
"""
A buffer for storing trajectories experienced by a PPO agent interacting
with the environment, and using Generalized Advantage Estimation (GAE-Lambda)
for calculating the advantages of state-action pairs.
"""
def __init__(self, obs_dim, act_dim, size, gamma=0.99, lam=0.95):
self.obs_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(core.combined_shape(size, act_dim), dtype=np.float32)
self.adv_buf = np.zeros(size, dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.ret_buf = np.zeros(size, dtype=np.float32)
self.val_buf = np.zeros(size, dtype=np.float32)
self.logp_buf = np.zeros(size, dtype=np.float32)
self.gamma, self.lam = gamma, lam
self.ptr, self.path_start_idx, self.max_size = 0, 0, size
def store(self, obs, act, rew, val, logp):
"""
Append one timestep of agent-environment interaction to the buffer.
"""
assert self.ptr < self.max_size # buffer has to have room so you can store
self.obs_buf[self.ptr] = obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.val_buf[self.ptr] = val
self.logp_buf[self.ptr] = logp
self.ptr += 1
def finish_path(self, last_val=0):
"""
Call this at the end of a trajectory, or when one gets cut off
by an epoch ending. This looks back in the buffer to where the
trajectory started, and uses rewards and value estimates from
the whole trajectory to compute advantage estimates with GAE-Lambda,
as well as compute the rewards-to-go for each state, to use as
the targets for the value function.
The "last_val" argument should be 0 if the trajectory ended
because the agent reached a terminal state (died), and otherwise
should be V(s_T), the value function estimated for the last state.
This allows us to bootstrap the reward-to-go calculation to account
for timesteps beyond the arbitrary episode horizon (or epoch cutoff).
"""
path_slice = slice(self.path_start_idx, self.ptr)
rews = np.append(self.rew_buf[path_slice], last_val)
vals = np.append(self.val_buf[path_slice], last_val)
# the next two lines implement GAE-Lambda advantage calculation
deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]
self.adv_buf[path_slice] = core.discount_cumsum(deltas, self.gamma * self.lam) # + vals[:-1] # Adv + V = Q
# the next line computes rewards-to-go, to be targets for the value function
self.ret_buf[path_slice] = core.discount_cumsum(rews, self.gamma)[:-1]
self.path_start_idx = self.ptr
def get(self):
"""
Call this at the end of an epoch to get all of the data from
the buffer, with advantages appropriately normalized (shifted to have
mean zero and std one). Also, resets some pointers in the buffer.
"""
assert self.ptr == self.max_size # buffer has to be full before you can get
self.ptr, self.path_start_idx = 0, 0
# the next two lines implement the advantage normalization trick
# adv_mean, adv_std = mpi_statistics_scalar(self.adv_buf)
# self.adv_buf = (self.adv_buf - adv_mean) / adv_std
return [self.obs_buf, self.act_buf, self.adv_buf,
self.ret_buf, self.logp_buf]
"""
Proximal Policy Optimization (by clipping),
with early stopping based on approximate KL
"""
def sppo(args, env_fn, actor_critic=core.mlp_actor_critic, ac_kwargs=dict(), seed=0,
steps_per_epoch=4000, epochs=50, gamma=0.99, clip_ratio=0.2,
train_pi_iters=80, train_v_iters=80, lam=0.97, max_ep_len=200,
target_kl=0.01, logger_kwargs=dict(), save_freq=10):
"""
Args:
env_fn : A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
actor_critic: A function which takes in placeholder symbols
for state, ``x_ph``, and action, ``a_ph``, and returns the main
outputs from the agent's Tensorflow computation graph:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``pi`` (batch, act_dim) | Samples actions from policy given
| states.
``logp`` (batch,) | Gives log probability, according to
| the policy, of taking actions ``a_ph``
| in states ``x_ph``.
``logp_pi`` (batch,) | Gives log probability, according to
| the policy, of the action sampled by
| ``pi``.
``v`` (batch,) | Gives the value estimate for states
| in ``x_ph``. (Critical: make sure
| to flatten this!)
=========== ================ ======================================
ac_kwargs (dict): Any kwargs appropriate for the actor_critic
function you provided to PPO.
seed (int): Seed for random number generators.
steps_per_epoch (int): Number of steps of interaction (state-action pairs)
for the agent and the environment in each epoch.
epochs (int): Number of epochs of interaction (equivalent to
number of policy updates) to perform.
gamma (float): Discount factor. (Always between 0 and 1.)
clip_ratio (float): Hyperparameter for clipping in the policy objective.
Roughly: how far can the new policy go from the old policy while
still profiting (improving the objective function)? The new policy
can still go farther than the clip_ratio says, but it doesn't help
on the objective anymore. (Usually small, 0.1 to 0.3.)
pi_lr (float): Learning rate for policy optimizer.
vf_lr (float): Learning rate for value function optimizer.
train_pi_iters (int): Maximum number of gradient descent steps to take
on policy loss per epoch. (Early stopping may cause optimizer
to take fewer than this.)
train_v_iters (int): Number of gradient descent steps to take on
value function per epoch.
lam (float): Lambda for GAE-Lambda. (Always between 0 and 1,
close to 1.)
max_ep_len (int): Maximum length of trajectory / episode / rollout.
target_kl (float): Roughly what KL divergence we think is appropriate
between new and old policies after an update. This will get used
for early stopping. (Usually small, 0.01 or 0.05.)
logger_kwargs (dict): Keyword args for EpochLogger.
save_freq (int): How often (in terms of gap between epochs) to save
the current policy and value function.
"""
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
seed += 10000 * proc_id()
tf.set_random_seed(seed)
np.random.seed(seed)
env = env_fn()
obs_dim = env.observation_space.shape
act_dim = env.action_space.shape
# Share information about action space with policy architecture
ac_kwargs['action_space'] = env.action_space
# Inputs to computation graph
x_ph, a_ph = core.placeholders_from_spaces(env.observation_space, env.action_space)
adv_ph, ret_ph, logp_old_ph = core.placeholders(None, None, None)
# Main outputs from computation graph
pi, logp, logp_pi, h, v = actor_critic(x_ph, a_ph, **ac_kwargs)
# Need all placeholders in *this* order later (to zip with data from buffer)
all_phs = [x_ph, a_ph, adv_ph, ret_ph, logp_old_ph]
# Every step, get: action, value, and logprob
get_action_ops = [pi, v, logp_pi, h]
# Experience buffer
local_steps_per_epoch = int(steps_per_epoch / num_procs())
buf = PPOBuffer(obs_dim, act_dim, local_steps_per_epoch, gamma, lam)
# Count variables
var_counts = tuple(core.count_vars(scope) for scope in ['pi', 'v'])
logger.log('\nNumber of parameters: \t pi: %d, \t v: %d\n'%var_counts)
# PPO objectives
ratio = tf.exp(logp - logp_old_ph) # pi(a|s) / pi_old(a|s)
# For PPO
# min_adv = tf.where(adv_ph > 0, (1 + clip_ratio) * adv_ph, (1 - clip_ratio) * adv_ph)
# pi_loss = -tf.reduce_mean(tf.minimum(ratio * adv_ph, min_adv))
# SPPO NO.2: add entropy
adv_logp = adv_ph - args.alpha * tf.stop_gradient(logp)
min_adv = tf.where(adv_logp>0, (1+clip_ratio)*adv_logp, (1-clip_ratio)*adv_logp)
pi_loss = -tf.reduce_mean(tf.minimum(ratio * adv_logp, min_adv))
v_loss = tf.reduce_mean((ret_ph - v)**2)
# Info (useful to watch during learning)
approx_kl = tf.reduce_mean(logp_old_ph - logp) # a sample estimate for KL-divergence, easy to compute
# approx_ent = tf.reduce_mean(-logp) # a sample estimate for entropy, also easy to compute
approx_ent = tf.reduce_mean(-h) # exact entropy
clipped = tf.logical_or(ratio > (1+clip_ratio), ratio < (1-clip_ratio))
clipfrac = tf.reduce_mean(tf.cast(clipped, tf.float32))
# Optimizers
train_pi = MpiAdamOptimizer(learning_rate=args.pi_lr).minimize(pi_loss)
train_v = MpiAdamOptimizer(learning_rate=args.vf_lr).minimize(v_loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Sync params across processes
sess.run(sync_all_params())
# Setup model saving
logger.setup_tf_saver(sess, inputs={'x': x_ph}, outputs={'pi': pi, 'v': v})
def update():
inputs = {k:v for k,v in zip(all_phs, buf.get())}
pi_l_old, v_l_old, ent = sess.run([pi_loss, v_loss, approx_ent], feed_dict=inputs)
# Training
for i in range(train_pi_iters):
_, kl = sess.run([train_pi, approx_kl], feed_dict=inputs)
kl = mpi_avg(kl)
if kl > 1.5 * target_kl:
logger.log('Early stopping at step %d due to reaching max kl.'%i)
break
logger.store(StopIter=i)
for _ in range(train_v_iters):
sess.run(train_v, feed_dict=inputs)
# Log changes from update
pi_l_new, v_l_new, kl, cf = sess.run([pi_loss, v_loss, approx_kl, clipfrac], feed_dict=inputs)
logger.store(LossPi=pi_l_old, LossV=v_l_old,
KL=kl, Entropy=ent, ClipFrac=cf,
DeltaLossPi=(pi_l_new - pi_l_old),
DeltaLossV=(v_l_new - v_l_old))
start_time = time.time()
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
# Main loop: collect experience in env and update/log each epoch
for epoch in range(epochs):
for t in range(local_steps_per_epoch):
a, v_t, logp_t, h_t = sess.run(get_action_ops, feed_dict={x_ph: o.reshape(1,-1)})
# SPPO NO.1: add entropy
rh = r - args.alpha * logp_t
# rh = r - args.alpha * h_t # exact entropy
# save and log
buf.store(o, a, rh, v_t, logp_t)
logger.store(VVals=v_t)
o, r, d, _ = env.step(a[0])
ep_ret += r
ep_len += 1
# d = False if ep_len == max_ep_len else d
terminal = d or (ep_len == max_ep_len)
if terminal or (t==local_steps_per_epoch-1):
if not(terminal):
print('Warning: trajectory cut off by epoch at %d steps.'%ep_len)
# if trajectory didn't reach terminal state, bootstrap value target
last_val = r if d else sess.run(v, feed_dict={x_ph: o.reshape(1,-1)})
buf.finish_path(last_val)
if terminal:
# only save EpRet / EpLen if trajectory finished
logger.store(EpRet=ep_ret, EpLen=ep_len)
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
# # Save model
# if (epoch % save_freq == 0) or (epoch == epochs-1):
# logger.save_state({'env': env}, None)
# Perform PPO update!
update()
# Log info about epoch
logger.log_tabular('Epoch', epoch)
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
logger.log_tabular('VVals', with_min_and_max=True)
logger.log_tabular('TotalEnvInteracts', (epoch+1)*steps_per_epoch)
logger.log_tabular('LossPi', average_only=True)
logger.log_tabular('LossV', average_only=True)
logger.log_tabular('DeltaLossPi', average_only=True)
logger.log_tabular('DeltaLossV', average_only=True)
logger.log_tabular('Entropy', average_only=True)
logger.log_tabular('KL', average_only=True)
logger.log_tabular('ClipFrac', average_only=True)
logger.log_tabular('StopIter', average_only=True)
logger.log_tabular('Time', time.time()-start_time)
logger.dump_tabular()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='LunarLander-v2') # CartPole-v0 Acrobot-v1 Breakout-ram-v4 # 'LunarLanderContinuous-v2' 0.02 # LunarLander-v2 0.05
parser.add_argument('--max_ep_len', type=int, default=1000)
parser.add_argument('--hid', type=int, default=300)
parser.add_argument('--l', type=int, default=2)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--alpha', type=float, default=0.1)
parser.add_argument('--pi_lr', type=float, default=3e-4)
parser.add_argument('--vf_lr', type=float, default=1e-3)
parser.add_argument('--seed', '-s', type=int, default=3)
parser.add_argument('--cpu', type=int, default=4)
parser.add_argument('--steps', type=int, default=4000)
parser.add_argument('--epochs', type=int, default=30000)
parser.add_argument('--exp_name', type=str, default='LunarLander-v2_apple_0.1_logp')
args = parser.parse_args()
mpi_fork(args.cpu) # run parallel code with mpi
from spinup.utils.run_utils import setup_logger_kwargs
logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)
class Wrapper(object):
def __init__(self, env, action_repeat=1):
self._env = env
self.action_repeat = action_repeat
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
r = 0.0
for _ in range(self.action_repeat):
obs_, reward_, done_, info_ = self._env.step(action)
reward_ = reward_ if reward_ > -99.0 else 0.0
r = r + reward_
if done_:
return obs_, r, done_, info_
return obs_, r, done_, info_
sppo(args, lambda : Wrapper(gym.make(args.env),1), actor_critic=core.mlp_actor_critic,
ac_kwargs=dict(hidden_sizes=[args.hid]*args.l), gamma=args.gamma, max_ep_len=args.max_ep_len,
seed=args.seed, steps_per_epoch=args.steps, epochs=args.epochs,
logger_kwargs=logger_kwargs)
|
{"hexsha": "6a5e8ae8ed5f447d0baa725df3e6b5316fb5facb", "size": 15930, "ext": "py", "lang": "Python", "max_stars_repo_path": "spinup/algos/sppox/sppox.py", "max_stars_repo_name": "JingbinLiu/DRL", "max_stars_repo_head_hexsha": "90578c2447d47da661269cb6c981fd04fe2977f9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 93, "max_stars_repo_stars_event_min_datetime": "2019-01-03T09:49:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T08:51:50.000Z", "max_issues_repo_path": "spinup/algos/sppox/sppox.py", "max_issues_repo_name": "JingbinLiu/DRL", "max_issues_repo_head_hexsha": "90578c2447d47da661269cb6c981fd04fe2977f9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2019-07-01T01:42:26.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-17T18:21:10.000Z", "max_forks_repo_path": "spinup/algos/sppox/sppox.py", "max_forks_repo_name": "JingbinLiu/DRL", "max_forks_repo_head_hexsha": "90578c2447d47da661269cb6c981fd04fe2977f9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 32, "max_forks_repo_forks_event_min_datetime": "2019-01-18T01:34:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-24T06:39:53.000Z", "avg_line_length": 43.5245901639, "max_line_length": 166, "alphanum_fraction": 0.6188323917, "include": true, "reason": "import numpy", "num_tokens": 3888}
|
[STATEMENT]
lemma of_bl_length2:
"length xs + c < LENGTH('a) \<Longrightarrow> of_bl xs * 2^c < (2::'a::len word) ^ (length xs + c)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length xs + c < LENGTH('a) \<Longrightarrow> of_bl xs * 2 ^ c < 2 ^ (length xs + c)
[PROOF STEP]
by (simp add: of_bl_length word_less_power_trans2)
|
{"llama_tokens": 143, "file": "Word_Lib_Reversed_Bit_Lists", "length": 1}
|
# # Estimate Binomial draw probabilility
using DynamicHMCModels
Random.seed!(1356779)
# Define a structure to hold the data.
Base.@kwdef struct BernoulliProblem
"Total number of draws in the data."
n::Int
"Number of draws ' == 1' "
obs::Vector{Int}
end;
# Write a function to return properly dimensioned transformation.
make_transformation(model::BernoulliProblem) =
as((p = as𝕀, ))
# Add data
model = BernoulliProblem(; n = 9, obs = rand(Binomial(9, 2/3), 3))
# Make the type callable with the parameters *as a single argument*.
function (model::BernoulliProblem)(θ)
@unpack n, obs = model # extract the data
@unpack p = θ
loglikelihood(Binomial(n, p), obs)
end
# Use a flat priors (the default, omitted) for α
P = TransformedLogDensity(make_transformation(model), model)
∇P = ADgradient(:ForwardDiff, P);
# Sample chain
results = mcmc_with_warmup(Random.GLOBAL_RNG, ∇P, 1000;
reporter = NoProgressReport()
)
posterior = P.transformation.(results.chain)
# Create Particles NamedTuple object
println()
p = as_particles(posterior)
p |> display
println()
DynamicHMC.Diagnostics.EBFMI(results.tree_statistics) |> display
println()
DynamicHMC.Diagnostics.summarize_tree_statistics(results.tree_statistics) |> display
|
{"hexsha": "61ba91c5c9ea342df6bd1c9e67930c6468e60dbf", "size": 1274, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "scripts/02/m2.1d.jl", "max_stars_repo_name": "StatisticalRethinkingJulia/SRDynamicHMC.jl", "max_stars_repo_head_hexsha": "fba06cfe037b98d5a9a64d367f15bec8dcecb9ed", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2019-03-01T23:51:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-02T10:02:40.000Z", "max_issues_repo_path": "scripts/02/m2.1d.jl", "max_issues_repo_name": "StatisticalRethinkingJulia/SRDynamicHMC.jl", "max_issues_repo_head_hexsha": "fba06cfe037b98d5a9a64d367f15bec8dcecb9ed", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2019-07-22T23:28:15.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-03T11:55:47.000Z", "max_forks_repo_path": "scripts/02/m2.1d.jl", "max_forks_repo_name": "StatisticalRethinkingJulia/SRDynamicHMC.jl", "max_forks_repo_head_hexsha": "fba06cfe037b98d5a9a64d367f15bec8dcecb9ed", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-10-28T09:34:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-03T12:52:10.000Z", "avg_line_length": 22.350877193, "max_line_length": 84, "alphanum_fraction": 0.726844584, "num_tokens": 344}
|
"""Tests for spatio_temporal."""
import numpy as np
from vizier import pyvizier
from vizier.pyvizier.converters import core
from vizier.pyvizier.converters import spatio_temporal as st
from absl.testing import absltest
_metric_converters = [
core.DefaultModelOutputConverter(
pyvizier.MetricInformation(
name='y1', goal=pyvizier.ObjectiveMetricGoal.MAXIMIZE)),
core.DefaultModelOutputConverter(
pyvizier.MetricInformation(
name='y2', goal=pyvizier.ObjectiveMetricGoal.MINIMIZE))
]
_trials = [
pyvizier.Trial(
id=1,
parameters={'x1': pyvizier.ParameterValue(1)},
measurements=[
pyvizier.Measurement(
steps=1, elapsed_secs=10, metrics={
'y1': 1,
'y2': -1
}),
pyvizier.Measurement(
steps=2, elapsed_secs=20, metrics={
'y1': 3,
'y2': -3
}),
pyvizier.Measurement(
steps=3, elapsed_secs=30, metrics={
'y1': 2,
'y2': -2
})
]),
pyvizier.Trial(
id=2,
parameters={'x1': pyvizier.ParameterValue(2)},
measurements=[
pyvizier.Measurement(
steps=1, elapsed_secs=10, metrics={
'y1': -4,
'y2': 4
}),
pyvizier.Measurement(
steps=5, elapsed_secs=50, metrics={
'y1': -6,
'y2': 6
}),
pyvizier.Measurement(
steps=6, elapsed_secs=60, metrics={
'y1': -5,
'y2': 5
})
])
]
class TimedLabelsExtractorTest(absltest.TestCase):
def test_steps(self):
converter = st.TimedLabelsExtractor(
_metric_converters, 'steps', value_extraction='raw')
timed_labels = converter.convert(_trials)
np.testing.assert_almost_equal(
timed_labels[0].times,
np.asarray([1, 2, 3], dtype=np.float32)[:, np.newaxis])
np.testing.assert_almost_equal(
timed_labels[1].times,
np.asarray([1, 5, 6], dtype=np.float32)[:, np.newaxis])
def test_elapsed_secs(self):
converter = st.TimedLabelsExtractor(
_metric_converters, 'elapsed_secs', value_extraction='raw')
timed_labels = converter.convert(_trials)
np.testing.assert_almost_equal(
timed_labels[0].times,
np.asarray([10, 20, 30], dtype=np.float32)[:, np.newaxis])
np.testing.assert_almost_equal(
timed_labels[1].times,
np.asarray([10, 50, 60], dtype=np.float32)[:, np.newaxis])
def test_index(self):
converter = st.TimedLabelsExtractor(
_metric_converters, 'index', value_extraction='raw')
timed_labels = converter.convert(_trials)
np.testing.assert_almost_equal(
timed_labels[0].times,
np.asarray([0, 1, 2], dtype=np.float32)[:, np.newaxis])
np.testing.assert_almost_equal(
timed_labels[1].times,
np.asarray([0, 1, 2], dtype=np.float32)[:, np.newaxis])
def test_labels_raw(self):
converter = st.TimedLabelsExtractor(
_metric_converters, 'elapsed_secs', value_extraction='raw')
timed_labels = converter.convert(_trials)
np.testing.assert_almost_equal(
timed_labels[0].labels['y1'],
np.asarray([1, 3, 2], dtype=np.float32)[:, np.newaxis])
np.testing.assert_almost_equal(
timed_labels[0].labels['y2'],
np.asarray([-1, -3, -2], dtype=np.float32)[:, np.newaxis])
def test_labels_cummax(self):
converter = st.TimedLabelsExtractor(
_metric_converters, 'elapsed_secs', value_extraction='cummax')
timed_labels = converter.convert(_trials)
np.testing.assert_almost_equal(
timed_labels[0].labels['y1'],
np.asarray([1, 3, 3], dtype=np.float32)[:, np.newaxis])
np.testing.assert_almost_equal(
timed_labels[0].labels['y2'],
np.asarray([-1, -3, -3], dtype=np.float32)[:, np.newaxis])
def test_labels_strict_cummax_firstonly(self):
converter = st.TimedLabelsExtractor([_metric_converters[0]],
'elapsed_secs',
value_extraction='cummax_firstonly')
timed_labels = converter.convert(_trials)
np.testing.assert_almost_equal(
timed_labels[0].times,
np.asarray([10, 20, 30], dtype=np.float32)[:, np.newaxis])
np.testing.assert_almost_equal(
timed_labels[0].labels['y1'],
np.asarray([1, 3, 3], dtype=np.float32)[:, np.newaxis])
np.testing.assert_almost_equal(
timed_labels[1].times,
np.asarray([10, 60], dtype=np.float32)[:, np.newaxis])
np.testing.assert_almost_equal(
timed_labels[1].labels['y1'],
np.asarray([-4, -4], dtype=np.float32)[:, np.newaxis])
def test_labels_cummax_lastonly(self):
converter = st.TimedLabelsExtractor([_metric_converters[0]],
'elapsed_secs',
value_extraction='cummax_lastonly')
timed_labels = converter.convert(_trials)
np.testing.assert_almost_equal(
timed_labels[0].times,
np.asarray([10, 30], dtype=np.float32)[:, np.newaxis])
np.testing.assert_almost_equal(
timed_labels[0].labels['y1'],
np.asarray([1, 3], dtype=np.float32)[:, np.newaxis])
np.testing.assert_almost_equal(
timed_labels[1].times,
np.asarray([60], dtype=np.float32)[:, np.newaxis])
np.testing.assert_almost_equal(
timed_labels[1].labels['y1'],
np.asarray([-4], dtype=np.float32)[:, np.newaxis])
def test_extract_all_timestamps(self):
converter = st.TimedLabelsExtractor(
_metric_converters, 'steps', value_extraction='cummax')
all_ts = converter.extract_all_timestamps(_trials)
np.testing.assert_almost_equal(
all_ts, np.asarray([1, 2, 3, 5, 6], dtype=np.float32))
class SparseSpatioTemporalConverterTest(absltest.TestCase):
def test_all(self):
extractor = st.TimedLabelsExtractor(
_metric_converters, 'steps', value_extraction='raw')
converter = st.SparseSpatioTemporalConverter([], extractor)
features, labels = converter.to_xy(_trials)
np.testing.assert_equal(features, {
'steps':
np.array([[1.], [2.], [3.], [1.], [5.], [6.]], dtype=np.float32)
})
np.testing.assert_equal(
labels, {
'y1': [[1.], [3.], [2.], [-4.], [-6.], [-5.]],
'y2': [[-1.], [-3.], [-2.], [4.], [6.], [5.]],
})
self.assertEqual(converter.features_shape, {'steps': (None, 1)})
self.assertEqual(converter.labels_shape, {'y1': (None, 1), 'y2': (None, 1)})
self.assertEqual(
converter.output_specs, {
'steps':
core.NumpyArraySpec.from_parameter_config(
pyvizier.ParameterConfig.factory(
name='steps', bounds=(0.0, np.finfo(np.float).max)),
core.NumpyArraySpecType.default_factory)
})
class DenseSpatioTemporalConverterTest(absltest.TestCase):
def test_all(self):
extractor = st.TimedLabelsExtractor(
_metric_converters, 'steps', value_extraction='raw')
converter = st.DenseSpatioTemporalConverter([],
extractor,
temporal_index_points=np.array(
[1., 2., 3., 5., 6.]))
features, labels = converter.to_xy(_trials)
np.testing.assert_equal(features, {})
self.assertEqual(converter.features_shape, {})
np.testing.assert_equal(
labels, {
'y1':
np.array([[1., 3., 2., np.nan, np.nan],
[-4., np.nan, np.nan, -6., -5.]]),
'y2':
np.array([[-1., -3., -2., np.nan, np.nan],
[4., np.nan, np.nan, 6., 5.]])
},
err_msg=f'{labels}')
self.assertEqual(converter.labels_shape, {'y1': (None, 5), 'y2': (None, 5)})
def test_xty(self):
extractor = st.TimedLabelsExtractor(
_metric_converters, 'steps', value_extraction='raw')
parameter = core.DefaultModelInputConverter(
pyvizier.ParameterConfig.factory(name='x1', bounds=(0, 5)))
converter = st.DenseSpatioTemporalConverter([parameter],
extractor,
temporal_index_points=np.array(
[1., 5., 6.]))
features, temporal_index_points, labels = converter.to_xty(_trials, 'infer')
np.testing.assert_equal(features, {'x1': [[1], [2]]})
np.testing.assert_equal(temporal_index_points, [
1.,
2.,
3.,
5.,
6.,
])
np.testing.assert_equal(
labels, {
'y1':
np.array([[1., 3., 2., np.nan, np.nan],
[-4., np.nan, np.nan, -6., -5.]]),
'y2':
np.array([[-1., -3., -2., np.nan, np.nan],
[4., np.nan, np.nan, 6., 5.]])
},
err_msg=f'{labels}')
if __name__ == '__main__':
absltest.main()
|
{"hexsha": "6253c56931e0e821b69fbf533f929d5d7b6d6e1a", "size": 9346, "ext": "py", "lang": "Python", "max_stars_repo_path": "vizier/pyvizier/converters/spatio_temporal_test.py", "max_stars_repo_name": "google/vizier", "max_stars_repo_head_hexsha": "12b64ce191410e1c3a79a98472a1b17811290ed3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2022-03-03T21:05:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T17:17:51.000Z", "max_issues_repo_path": "vizier/pyvizier/converters/spatio_temporal_test.py", "max_issues_repo_name": "google/vizier", "max_issues_repo_head_hexsha": "12b64ce191410e1c3a79a98472a1b17811290ed3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vizier/pyvizier/converters/spatio_temporal_test.py", "max_forks_repo_name": "google/vizier", "max_forks_repo_head_hexsha": "12b64ce191410e1c3a79a98472a1b17811290ed3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3657587549, "max_line_length": 80, "alphanum_fraction": 0.5600256794, "include": true, "reason": "import numpy", "num_tokens": 2270}
|
"""
Example oneD_discrete_control.py
Author: Joshua A. Marshall <joshua.marshall@queensu.ca>
GitHub: https://github.com/botprof/agv-examples
"""
# %% SIMULATION SETUP
from scipy import signal
import numpy as np
import matplotlib.pyplot as plt
from mobotpy.models import Cart
# %% PARAMETERS
# Set some parameters that describe the desired behaviour
ZETA = 1.1
OMEGA_N = np.sqrt(0.3)
# Define the sample time [s]
T = 0.5
# Compute the pole locations
lambda_s = np.roots([1, 2 * ZETA * OMEGA_N, OMEGA_N ** 2])
lambda_z = np.exp(lambda_s * T)
# Define the vehicle mass [kg]
M = 10.0
# Define the system matrices
F = np.array([[1, T], [0, 1]])
G = np.array([[T ** 2 / (2 * M)], [T / M]])
# Find gain matrix K that places the poles at lambda_z
K = signal.place_poles(F, G, lambda_z)
# %% FUNCTION DEFINTIONS
def vehicle(x, u, F, G):
"""Discrete-time 1D dynamic vehicle model."""
x_new = F @ x + G @ [u]
return x_new
def controller(x, K):
"""Proportional controller."""
u = -K @ x
return u
# %% RUN SIMULATION
# Create an array of time values [s]
SIM_TIME = 30.0
t = np.arange(0, SIM_TIME, T)
N = np.size(t)
# Initialize arrays that will be populated with our inputs and states
x = np.zeros((2, N))
u = np.zeros(N)
# Set the initial position [m], velocity [m/s], and force input [N]
x[0, 0] = 1.0
x[1, 0] = 0.0
u[0] = 0.0
# Run the simulation
for k in range(1, N):
x[:, k] = vehicle(x[:, k - 1], u[k - 1], F, G)
u[k] = controller(x[:, k], K.gain_matrix)
# %% MAKE A PLOT
# Change some plot settings (optional)
plt.rc("text", usetex=True)
plt.rc("text.latex", preamble=r"\usepackage{cmbright,amsmath}")
plt.rc("savefig", format="pdf")
plt.rc("savefig", bbox="tight")
# Plot the states (x) and input (u) vs time (t)
fig1 = plt.figure(1)
ax1a = plt.subplot(311)
plt.plot(t, x[0, :], "C0")
plt.grid(color="0.95")
plt.ylabel(r"$x_1$ [m]")
plt.setp(ax1a, xticklabels=[])
ax1b = plt.subplot(312)
plt.plot(t, x[1, :], "C0")
plt.grid(color="0.95")
plt.ylabel(r"$x_2$ [m/s]")
plt.setp(ax1b, xticklabels=[])
ax1c = plt.subplot(313)
plt.step(t, u, "C1", where="post")
plt.grid(color="0.95")
plt.ylabel(r"$u$ [N]")
plt.xlabel(r"$t$ [s]")
# Save the plot
plt.savefig("../agv-book/figs/ch2/oneD_discrete_control_fig1.pdf")
# %% MAKE AN ANIMATION
# Set the side length of the vehicle [m]
LENGTH = 1.0
# Let's use the Cart class to create an animation
vehicle = Cart(LENGTH)
# Create and save the animation
ani = vehicle.animate(
x[0, :], T, True, "../agv-book/gifs/ch2/oneD_discrete_control.gif"
)
# %%
# Show all the plots to the screen
plt.show()
|
{"hexsha": "d99f971f253a8dcaabb7ddd68ee978392f4598e3", "size": 2593, "ext": "py", "lang": "Python", "max_stars_repo_path": "oneD_discrete_control.py", "max_stars_repo_name": "botprof/agv-examples", "max_stars_repo_head_hexsha": "a21b0f65fa50ad023864e18c40a37353f2a37f84", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-11-06T11:14:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T10:54:57.000Z", "max_issues_repo_path": "oneD_discrete_control.py", "max_issues_repo_name": "botprof/agv-examples", "max_issues_repo_head_hexsha": "a21b0f65fa50ad023864e18c40a37353f2a37f84", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2022-02-13T17:32:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-13T22:50:30.000Z", "max_forks_repo_path": "oneD_discrete_control.py", "max_forks_repo_name": "botprof/agv-examples", "max_forks_repo_head_hexsha": "a21b0f65fa50ad023864e18c40a37353f2a37f84", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-21T10:48:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-21T10:48:22.000Z", "avg_line_length": 21.7899159664, "max_line_length": 70, "alphanum_fraction": 0.6490551485, "include": true, "reason": "import numpy,from scipy", "num_tokens": 866}
|
# Script to plot Figures 4 (A, B and C)
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
# Prepare the dataframe containing all variation data. MERGED_prio1_prio2.csv is a dataframe with all germline variation found in actionable genes (known and novel)
df = pd.read_csv('/path/to/MERGED_prio1_prio2.csv',sep='\t') # '/mnt/64716603-5b56-4f9a-b195-c11560647a3a/Projects/PHARMACOGENETICS/PGx_project/CNIO_jLanillos/Tier1_PharmGKB/samples'
#Filtering the "consequence" column for those variants of interest. I have previously checked all possible consequences annotated and chosen the following ones:
df = df.loc[df['annonimous_ANNOTATION'].str.contains('missense|frameshift|stop|start_lost|splice')] # len(df) = 3448
mask = (df['annonimous_GENE'].str.contains(','))
df_valid = df[mask]
df['SYMBOL'] = df['annonimous_GENE']
# Some variants affect overlapping genes, and I just want to get the info related to the genes of interest. Create an auxiliary "'aux'" column which contains the index location of the gene of interest. E.g. UGT1A8,UGT1A4,UGT1A1
# aux = 2, to retrieve latter the gene UGT1A1 from that list (which is in "annonimous_GENE" col)
genesdf = pd.read_csv('/path/to/bioMart_transcripts_length.csv',sep='\t')
genes = list(genesdf['Gene name'])
df.loc[mask, 'aux'] = df_valid.apply(lambda x: str([i for i, j in enumerate(x['annonimous_GENE'].split(',')) if j == [z for z in genes if z in x['annonimous_GENE']][0]]).replace('[','').replace(']',''), axis=1)
mask = (df['annonimous_GENE'].str.contains(','))
df_valid = df[mask]
df.loc[mask, 'SYMBOL'] = df_valid.apply(lambda x: x['annonimous_GENE'].split(',')[int(x['aux'])],axis = 1)
# Since we know the 'aux' column, we can apply the same principle to columns with multiple annotations and get the right term into "ANNOTATION" column
df['ANNOTATION'] = df['annonimous_ANNOTATION']
df.loc[mask, 'ANNOTATION'] = df_valid.apply(lambda x: x['annonimous_ANNOTATION'].split(',')[int(x['aux'])],axis = 1)
#Filter by consequence again on the newly created "ANNOTATION" column. That column may contain a consequence we did not want
df = df.loc[df['ANNOTATION'].str.contains('missense|frameshift|stop|start_lost|splice')] # len(df) = 3387
df = df.loc[~(df['ANNOTATION'].str.contains('synonymous SNV'))] # len(df) = 3352
# More filtering criteria? No, by the moment
# Splice variants. I have summarized all variants containing "splice" and checked for their distance to NeasrestSS. df.loc[df['ANNOTATION'].str.contains('splice')].groupby(['ANNOTATION','distNearestSS'])['ID'].count()
# I will filter out variants labeled as "splice_acceptor_variant&splice_region_variant&intron_variant" and "splice_donor_variant&splice_region_variant&intron_variant"
# Variants in those two categories are located further to the SS (as far as -11 and 13 bp)
df['distNearestSS_aux'] = df['distNearestSS']
mask = (df['annonimous_GENE'].str.contains(','))
df_valid = df[mask]
df.loc[mask, 'distNearestSS_aux'] = df_valid.apply(lambda x: x['distNearestSS_aux'].split(',')[int(x['aux'])],axis = 1)
# Filtering splice_acceptor_variant&splice_region_variant&intron_variant in the splicing canonical regions (-2 and -1 bp from the start of the exon)
dfaux = df.loc[df['ANNOTATION'] == 'splice_acceptor_variant&splice_region_variant&intron_variant'] #len(dfaux) = 114
dfaux = dfaux.loc[df['distNearestSS_aux'].astype(float)>-3] #len(dfaux) = 1
df = df.loc[~(df['ANNOTATION'] == 'splice_acceptor_variant&splice_region_variant&intron_variant')] # len(df) = 3238; these variants are further than 3 bp from the canonical splice site
df = pd.concat([df,dfaux]) #len(df) = 3239
# Filteringsplice_donor_variant&splice_region_variant&intron_variant in the splicing canonical regions (+2 and +1 bp from the end of the exon)
dfaux = df.loc[df['ANNOTATION'] == 'splice_donor_variant&splice_region_variant&intron_variant'] #len(dfaux) = 114
dfaux = dfaux.loc[df['distNearestSS_aux'].astype(float)<3] #len(dfaux) = 1
df = df.loc[~(df['ANNOTATION'] == 'splice_donor_variant&splice_region_variant&intron_variant')] # len(df) = 3238; these variants are further than 3 bp from the canonical splice site
df = pd.concat([df,dfaux]) #len(df) = 3127
# Filtering out all variants which are spliceACCEPTOR/DONOR&intron_variant with close distance to NeasrestSS (-3,3)
dfaux = df.loc[~df['ANNOTATION'].str.contains('splice_acceptor_variant&intron_variant')]
dfaux = dfaux.loc[~dfaux['ANNOTATION'].str.contains('splice_donor_variant&intron_variant')]
dff = df.loc[(df['ANNOTATION'].str.contains('splice_acceptor_variant&intron_variant')) & (df['distNearestSS_aux'].astype(float)>-3) ]
dff2 = df.loc[(df['ANNOTATION'].str.contains('splice_donor_variant&intron_variant')) & (df['distNearestSS_aux'].astype(float)<3) ]
df = pd.concat([dfaux,dff, dff2]) #len(df) = 2481
# Create a new column with simplified protein consequence terms
df['ANNOTATION_simple'] = df['ANNOTATION'].apply(lambda x: x.split(' ')[0].split('&')[0].replace('start_lost','start/stop gained/lost').replace('stopgain','start/stop gained/lost').replace('stoploss','start/stop gained/lost').replace('nonframeshift','inframe').split('_')[0])
# Annotate restricted data: per individual NGS location
procdf = pd.read_csv('/path/to/dictionary_proc_pais.csv',sep='|',header = None) # Restricted
procdf_previous = pd.read_csv('/path/to/dictionary_proc.csv',sep='\t') # Restricted
sampledf = pd.read_csv('/path/to/sample_dictionary_20210105.csv',sep='\t',header = None) # Restricted
sample_dict = dict(zip(list(sampledf[1]),list(sampledf[0].astype(str))))
proc_dict = dict(zip(list(procdf[0].astype(str)),list(procdf[1])))
proc_dict_previous = dict(zip(list(procdf_previous['sample'].astype(str)),list(procdf_previous['from'])))
df['annonimous_samples'] = df['samples'].apply(lambda x: ','.join([sample_dict[i] for i in x.split(';')]))
df['annonimous_proc_previous'] = df['annonimous_samples'].apply(lambda x: ','.join([proc_dict_previous[i] for i in x.split(',')]))
df['annonimous_proc'] = df['annonimous_samples'].apply(lambda x: ','.join([proc_dict[i] for i in x.split(',')]))
############## Figure 4A
plt.style.use('default')
df['annonimous_proc_simple'] = df.apply(lambda x: ','.join(list(set(x['annonimous_proc'].split(',')))),axis=1)
df.loc[(df['gnomad_AF'] == 0) & (df['dbSNP_id'] == '.')].groupby('annonimous_proc_simple')['ID'].count()
othercombs = [x for x in list(set(list(df['annonimous_proc_simple']))) if (x != 'Spain') and (x != 'Colombia') and (x != 'Brazil')]
GENES = ['CACNA1S','CYP2B6','CYP2C9','CYP4F2','DPYD','G6PD','NUDT15','RYR1','SLCO1B1','TPMT','UGT1A1'] # Only ClinAcc PGx genes
df = df.loc[df['SYMBOL'].str.contains('|'.join(GENES))]
f, ax1 = plt.subplots(figsize=(2,3))
dff = df.loc[(df['gnomad_AF'] != 0) | (df['dbSNP_id'] != '.')]
dff_novel = df.loc[(df['gnomad_AF'] == 0) & (df['dbSNP_id'] == '.')]
colordict = dict(zip(['Spain','Colombia','Brazil', '|'.join(othercombs)],['tab:blue','tab:red','tab:green', 'tab:olive']))
bottom = (0,0)
for i in ['Spain','Colombia','Brazil', '|'.join(othercombs)]:
if i == '|'.join(othercombs):
labels = 'Mixed'
countriescount = (len(dff.loc[dff['annonimous_proc_simple'].str.contains(i)]), len(dff_novel.loc[dff_novel['annonimous_proc_simple'].str.contains(i)]))
else:
labels = i
countriescount = (len(dff.loc[dff['annonimous_proc_simple'] == i])),(len(dff_novel.loc[dff_novel['annonimous_proc_simple'] == i]))
ax1.bar(['Known\nvariants','Novel\nvariants'], countriescount, color = [colordict[i]],label = labels, edgecolor = 'black', bottom = bottom,width = 0.5)#align = 'edge',
bottom = ( bottom[0] + countriescount[0], bottom[1] + countriescount[1])
ax1.legend(title='Countries', handletextpad=0.5,labelspacing=0.2, fontsize=8,facecolor='white', loc='upper right', bbox_to_anchor=(1.3, 0.8))
plt.ylabel('Count')
plt.style.use('ggplot')
ax1.grid(b=None)#, linestyle='-', linewidth=2)
ax1.set_facecolor('white')
ax1.spines['bottom'].set_color('k')
ax1.spines['top'].set_color('w')
ax1.spines['right'].set_color('w')
ax1.spines['left'].set_color('k')
plt.subplots_adjust(left=0.295, bottom=0.140, right=0.805, top=0.985, wspace=0.17, hspace=0.42)
plt.savefig('/path/to/Figures/Figure_4A.png',format = 'png', dpi = 500)
plt.show()
# Figure 4B (piechart)
f, ax2 = plt.subplots(figsize=(3,2.5))
dfplot = df.loc[(df['gnomad_AF'] == 0) & (df['dbSNP_id'] == '.')].groupby('ANNOTATION_simple')['ID'].count().reset_index();
dfplot.plot(kind='pie',ax=ax2, y='ID', autopct='%1.1f%%',startangle=25, shadow=False, labels = dfplot['ANNOTATION_simple'], labeldistance=np.nan, pctdistance=1.24, textprops={'family':'sans-serif', 'fontsize':9})
ax2.legend(fontsize=8, loc='lower left', bbox_to_anchor=(0.3, 0.1),handletextpad=0.1,labelspacing=0.2)
plt.ylabel('')
nrnovel= len(df.loc[(df['gnomad_AF'] == 0) & (df['dbSNP_id'] == '.')])
ax2.set_title('Novel variants\n' + '(n=' + str(nrnovel) + ')', fontsize=10, pad=-8)
plt.subplots_adjust(right=0.865, bottom=0.0)#, left=0.805, top=0.985, wspace=0.17, hspace=0.42)
plt.savefig('/path/to/Figure_4B.pdf',format = 'pdf', dpi = 500)
plt.show()
#### Figure 4C ####
import numpy as np
df_known = df.loc[~((df['gnomad_AF'] == 0) & (df['dbSNP_id'] == '.'))]
dff_novel = df.loc[(df['gnomad_AF'] == 0) & (df['dbSNP_id'] == '.')]
dff_novel_lof = dff_novel.loc[~dff_novel['ANNOTATION_simple'].str.contains('missense|inframe')]
dff_novel_VUS = dff_novel.loc[dff_novel['ANNOTATION_simple'].str.contains('missense|inframe')]
genesdf = pd.read_csv('../bioMart_transcripts_length.csv',sep='\t')
genesdf = genesdf.loc[genesdf['Gene name'].str.contains('|'.join(GENES))]
genesdf['Transcript length (kb)'] = genesdf['Transcript length (including UTRs and CDS)']/1000
d = dict(dff_novel_lof.loc[dff_novel_lof['SYMBOL'].str.contains('|'.join(GENES))].groupby('SYMBOL')['N_samples'].sum())
genesdf['Nr.Novel LoF variants'] = genesdf['Gene name'].map(d)
genesdf['Nr.Novel Lof vars per kb'] = genesdf['Nr.Novel LoF variants'] / genesdf['Transcript length (kb)']
d = dict(dff_novel_VUS.loc[dff_novel_VUS['SYMBOL'].str.contains('|'.join(GENES))].groupby('SYMBOL')['N_samples'].sum())
genesdf['Nr.Novel VUS variants'] = genesdf['Gene name'].map(d)
genesdf['Nr.Novel VUS vars per kb'] = genesdf['Nr.Novel VUS variants'] / genesdf['Transcript length (kb)']
plotdf = genesdf[['Gene name','Transcript length (kb)','Nr.Novel Lof vars per kb','Nr.Novel LoF variants','Nr.Novel VUS variants','Nr.Novel VUS vars per kb']]
haplo = pd.read_csv('/path/to/haplotypes_20210107.csv',sep='\t')
pheno = pd.read_csv('/path/to/phenotypes_20210107.csv',sep='\t')
genes = list()
values = list()
values_pheno = list()
for gene in GENES:
if gene == 'CACNA1S':
genes.append(gene)
values.append(0)
values_pheno.append(0)
elif gene == 'CYP2B6':
genes.append(gene)
values.append(len(haplo.loc[~(haplo[gene].isnull())].loc[~haplo[gene].astype(str).str.contains('4|22')]))
values_pheno.append(len(pheno.loc[~(pheno['Phenotype_' + gene].isnull())]))
elif gene == 'G6PD':
genes.append(gene)
values.append(len(haplo.loc[~(haplo[gene].isnull())]))
values_pheno.append(len(pheno.loc[pheno['Phenotype_' + gene].astype(str).str.contains('G6PD_Deficient')]))
else:
genes.append(gene)
values.append(len(haplo.loc[~(haplo[gene].isnull())]))
values_pheno.append(len(pheno.loc[~(pheno['Phenotype_' + gene].isnull())]))
gf = pd.DataFrame()
gf['Gene'] = genes
gf['Nr.ind'] = values_pheno # Considering individuals with actionable phenotypes
gf = gf.sort_values(by='Nr.ind',ascending=False)
d = dict(zip(list(gf['Gene']), list(gf['Nr.ind'])))
plotdf['Nr.actionable alleles'] = plotdf['Gene name'].map(d)
plotdf['PCT Novel Variants'] = [(str(x)+'%').replace('inf%','+36').replace('nan%','0.0%') for x in list((100*plotdf['Nr.Novel LoF variants'] / plotdf['Nr.actionable alleles']).round(decimals=1))]
plotdf.loc[(plotdf['Gene name'].str.contains('CACNA1S|RYR1')),'PCT Novel Variants'] ='NA'
UGT1A1 = np.round(100*(plotdf['Nr.Novel LoF variants'].loc[plotdf['Gene name'] == 'UGT1A1'].values[0] / plotdf['Nr.actionable alleles'].loc[plotdf['Gene name'] == 'UGT1A1'].values[0]), 2)
plotdf.loc[(plotdf['Gene name'].str.contains('UGT1A1')),'PCT Novel Variants'] = str(UGT1A1) + '%'
plt.style.use('ggplot')
plotdf.set_index('Gene name', inplace=True)
f, ax1 = plt.subplots(figsize=(5,3))
plotdf['Nr.Novel LoF variants'].plot(ax=ax1,kind='bar', width=0.75, edgecolor = 'black', label = 'Novel LOF')
# Text on the top of each barplot
labels = list(plotdf['PCT Novel Variants'])
values = list(plotdf['Nr.Novel LoF variants'] + 0.8) #list(plotdf['Nr.Novel LoF variants'])
for i in range(len(plotdf)):
plt.text(x = i-0.27 , y = values[i]+0.3, s = labels[i], size = 7)
plt.ylim([0,10])
plt.ylabel('nr. novel LOF variants', fontsize = 10)
ax1.grid(b=None)#, linestyle='-', linewidth=2)
ax1.set_facecolor('white')
ax1.spines['bottom'].set_color('k')
ax1.spines['top'].set_color('w')
ax1.spines['right'].set_color('w')
ax1.spines['left'].set_color('k')
# Adjust the margins
plt.subplots_adjust(bottom= 0.27, top = 0.95, left=0.135, right=0.945)
plt.savefig('/path/to/Figures/Figure_4C.png',format = 'png', dpi = 500)
plt.show()
################################# STATS
# Calculate the mean MAFs of known and novel variants
df_known['MAF'].mean()
dff_novel['MAF'].mean()
# Chi-square test to compare MAFs of known variants versus MAFs of novel variants
from scipy.stats import ttest_ind
ttest_ind(df_known['MAF'],dff_novel['MAF'])
# % Known variants found in only one country
100*(len(df_known.loc[~df_known['annonimous_proc_simple'].str.contains(',')]) / len(df_known))
# % Novel variants found in only one country
100*len(dff_novel.loc[~dff_novel['annonimous_proc_simple'].str.contains(',')]) / len(dff_novel)
# Average % contribution novel LoF variants to actionable alleles:
plotdf['PCT Novel Variants'].str.replace('%','').str.replace('NA','0').astype(float).mean()
# % novel "VUS" (i.e., missense of inframe) in our results
100*len(dff_novel.loc[dff_novel['ANNOTATION_simple'].str.contains('missense|inframe')]) / len(dff_novel)
|
{"hexsha": "09c36d8d8de47aa328b40d7efa6b47561ada7eea", "size": 14158, "ext": "py", "lang": "Python", "max_stars_repo_path": "Figures_tables/7_Fig4A_B_C.py", "max_stars_repo_name": "jlanillos/clinAcc_PGx_WES", "max_stars_repo_head_hexsha": "cc9a5dc89520b05793b5e7fda1aa7cb953d22ff9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-06-24T22:25:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T21:24:05.000Z", "max_issues_repo_path": "Figures_tables/7_Fig4A_B_C.py", "max_issues_repo_name": "jlanillos/clinAcc_PGx_WES", "max_issues_repo_head_hexsha": "cc9a5dc89520b05793b5e7fda1aa7cb953d22ff9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Figures_tables/7_Fig4A_B_C.py", "max_forks_repo_name": "jlanillos/clinAcc_PGx_WES", "max_forks_repo_head_hexsha": "cc9a5dc89520b05793b5e7fda1aa7cb953d22ff9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 60.5042735043, "max_line_length": 275, "alphanum_fraction": 0.7020765645, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4379}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import json
import os
import numpy as np
import ray
import ray.services
from ray.experimental.sgd import utils
logger = logging.getLogger(__name__)
def _try_import_strategy():
"""Late import for Tesnorflow"""
import tensorflow as tf
return tf.distribute.experimental.MultiWorkerMirroredStrategy
class TFRunner:
"""Manages a TensorFlow model for training."""
def __init__(self, model_creator, data_creator, config=None,
verbose=False):
"""Initializes the runner.
Args:
model_creator (dict -> Model): see tf_trainer.py.
data_creator (dict -> tf.Dataset, tf.Dataset): see tf_trainer.py.
config (dict): see tf_trainer.py.
verbose (bool): Outputs training data if true.
"""
self.model_creator = model_creator
self.data_creator = data_creator
self.config = {} if config is None else config
self.epoch = 0
self.verbose = verbose
def setup(self):
"""Initializes the model."""
logger.debug("Creating dataset")
self.train_dataset, self.test_dataset = self.data_creator(self.config)
logger.debug("Creating model")
self.model = self.model_creator(self.config)
def setup_distributed(self, urls, world_rank, world_size):
"""Sets up TensorFLow distributed environment and initializes the model.
Args:
urls (str): the URLs that each node uses to connect.
world_rank (int): the index of the runner.
world_size (int): the total number of runners.
"""
assert len(urls) == world_size
tf_config = {
"cluster": {
"worker": urls
},
"task": {
"index": world_rank,
"type": "worker"
}
}
os.environ["TF_CONFIG"] = json.dumps(tf_config)
MultiWorkerMirroredStrategy = _try_import_strategy()
self.strategy = MultiWorkerMirroredStrategy()
self.train_dataset, self.test_dataset = self.data_creator(self.config)
logger.debug("Creating model with MultiWorkerMirroredStrategy")
with self.strategy.scope():
self.model = self.model_creator(self.config)
# For use in model.evaluate()
self.local_model = None
def step(self):
"""Runs a training epoch and updates the model parameters."""
fit_default_config = {"verbose": self.verbose}
fit_default_config.update(self.config.get("fit_config", {}))
history = self.model.fit(self.train_dataset, **fit_default_config)
if history is None:
stats = {}
else:
stats = {"train_" + k: v[-1] for k, v in history.history.items()}
self.epoch += 1
return stats
def validate(self):
"""Evaluates the model on the validation data set."""
stats = {}
evaluate_config = {"verbose": self.verbose}
evaluate_config.update(self.config.get("evaluate_config", {}))
results = self.model.evaluate(self.test_dataset, **evaluate_config)
if results is None:
# Using local Model since model.evaluate() returns None
# for MultiWorkerMirroredStrategy
logger.warning("Running a local model to get validation score.")
self.local_model = self.model_creator(self.config)
self.local_model.set_weights(self.model.get_weights())
results = self.local_model.evaluate(self.test_dataset,
**evaluate_config)
if isinstance(results, list):
stats = {
"validation_" + k: v
for k, v in zip(self.model.metrics_names, results)
}
else:
stats = {"loss": results}
return stats
def get_state(self):
"""Returns the state of the runner."""
return {
"epoch": self.epoch,
"weights": self.model.get_weights(),
"optimizer_weights": self.model.optimizer.get_weights()
}
def set_state(self, state):
"""Sets the state of the model."""
self.model = self.model_creator(self.config)
self.epoch = state["epoch"]
self.model.set_weights(state["weights"])
# This part is due to ray.get() changing scalar np.int64 object to int
state["optimizer_weights"][0] = np.array(
state["optimizer_weights"][0], dtype=np.int64)
if self.model.optimizer.weights == []:
self.model._make_train_function()
self.model.optimizer.set_weights(state["optimizer_weights"])
def shutdown(self):
"""Attempts to shut down the worker."""
del self.model
del self.train_dataset
del self.test_dataset
def get_node_ip(self):
"""Returns the IP address of the current node."""
return ray.services.get_node_ip_address()
def find_free_port(self):
"""Finds a free port on the current node."""
return utils.find_free_port()
|
{"hexsha": "13903c073fdce24ef4acd7ac573c3c58b7e7cd88", "size": 5218, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/ray/experimental/sgd/tf/tf_runner.py", "max_stars_repo_name": "sunho/ray", "max_stars_repo_head_hexsha": "0ac8138b26cc66978df150c89ef291263f23c9a1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-06-17T12:38:24.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-11T07:52:26.000Z", "max_issues_repo_path": "python/ray/experimental/sgd/tf/tf_runner.py", "max_issues_repo_name": "sunho/ray", "max_issues_repo_head_hexsha": "0ac8138b26cc66978df150c89ef291263f23c9a1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2018-08-15T19:19:25.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-30T01:54:46.000Z", "max_forks_repo_path": "python/ray/experimental/sgd/tf/tf_runner.py", "max_forks_repo_name": "sunho/ray", "max_forks_repo_head_hexsha": "0ac8138b26cc66978df150c89ef291263f23c9a1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-10-31T23:20:07.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-13T20:16:03.000Z", "avg_line_length": 33.0253164557, "max_line_length": 80, "alphanum_fraction": 0.6101954772, "include": true, "reason": "import numpy", "num_tokens": 1038}
|
import numpy as np
from pyqtgraph import ImageView, PlotItem
from qtpy import QtCore
from qtpy.QtWidgets import *
class ImageViewModule(QFrame):
"""
This class wraps the pyqt imageview model, takes care of configuring it and adds
a set image method to it
"""
def __init__(self, main_widget, histogram=True, crop_selector=False):
super().__init__()
self.main_widget = main_widget
# self.setMinimumWidth(600)
# self.setMinimumHeight(300)
# self.setStyleSheet("ImageViewModule {margin:5px; border:1px solid rgb(50, 65, "
# "75);} ")
self.setStyleSheet("ImageViewModule {margin:0px; border:0px solid rgb(50, 65, "
"75); padding: 0px;} ")
self.layout = QVBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
# self.layout.setAlignment(Qt.AlignHCenter)
self.image_label = QLabel()
self.layout.addWidget(self.image_label)
self.setLayout(self.layout)
# self.already_loaded = True
# self.no_image_message = QPushButton("Please open a dataset first")
# self.no_image_message.clicked.connect(main_widget.open_file_dialog)
# self.no_image_message.setStyleSheet("QPushButton {font-size:80;}")
self.plot_item = PlotItem()
self.image_view = ImageView(view=self.plot_item)
self.image_view.keyPressEvent = self.keyPressEvent
self.image_view.ui.layoutWidget.setContentsMargins(0, 0, 0, 0)
# self.image_view.ui.roiBtn.hide()
self.image_view.ui.menuBtn.hide()
if not histogram:
self.image_view.ui.histogram.hide()
if not crop_selector:
self.image_view.ui.roiBtn.hide()
# self.image_view.getRoiPlot().hide()
self.image_item = self.image_view.getImageItem()
self.image_view.installEventFilter(self.main_widget.eventFilterCustom)
self.image_item.installEventFilter(self.main_widget.eventFilterCustom)
self.plot_item.installEventFilter(self.main_widget.eventFilterCustom)
self.layout.addWidget(self.image_view)
@property
def data_handler(self):
return self.main_widget.data_handler
def keyPressEvent(self, ev):
if ev.key() == QtCore.Qt.Key_Space and False:
if self.image_view.playRate == 0:
fps = (self.image_view.getProcessedImage().shape[0] - 1) / (
self.image_view.tVals[-1] - self.image_view.tVals[0])
self.image_view.play(fps)
# print fps
else:
self.image_view.play(0)
ev.accept()
elif ev.key() == QtCore.Qt.Key_Home:
self.image_view.setCurrentIndex(0)
self.image_view.play(0)
ev.accept()
elif ev.key() == QtCore.Qt.Key_End:
self.image_view.setCurrentIndex(
self.image_view.getProcessedImage().shape[0] - 1)
self.image_view.play(0)
ev.accept()
elif ev.key() in self.image_view.noRepeatKeys:
ev.accept()
if ev.isAutoRepeat():
return
self.image_view.keysPressed[ev.key()] = 1
self.image_view.evalKeyState()
else:
QWidget.keyPressEvent(self.image_view, ev)
def setImage(self, data):
# if self.already_loaded == False:
# print("changed image")
# self.already_loaded = True
# self.layout.removeWidget(self.no_image_message)
# self.no_image_message.deleteLater()
# # self.layout.setAlignment(Qt.AlignLeft)
# self.image_view = ImageView()
#
# self.layout.addWidget(self.image_view)
self.image_view.setImage(data, levelMode='mono', autoRange=True,
autoLevels=True, autoHistogramRange=True)
bottom_5 = np.percentile(data, 5)
top_5 = np.percentile(data, 95)
bottom_2 = np.percentile(data, 2)
top_2 = np.percentile(data, 98)
self.image_view.setLevels(bottom_5, top_5)
self.image_view.setHistogramRange(bottom_2, top_2)
|
{"hexsha": "69f434abfae875dfb1d2ce72b648618e9df6f49b", "size": 4182, "ext": "py", "lang": "Python", "max_stars_repo_path": "cidan/GUI/ImageView/ImageViewModule.py", "max_stars_repo_name": "Mishne-Lab/cidan", "max_stars_repo_head_hexsha": "3f579b6d5a49e17690e9aa07dfb60d3e8c05e681", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-11-24T17:47:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-20T16:19:53.000Z", "max_issues_repo_path": "cidan/GUI/ImageView/ImageViewModule.py", "max_issues_repo_name": "Mishne-Lab/cidan", "max_issues_repo_head_hexsha": "3f579b6d5a49e17690e9aa07dfb60d3e8c05e681", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-10-03T21:48:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-07T23:56:42.000Z", "max_forks_repo_path": "cidan/GUI/ImageView/ImageViewModule.py", "max_forks_repo_name": "Mishne-Lab/cidan", "max_forks_repo_head_hexsha": "3f579b6d5a49e17690e9aa07dfb60d3e8c05e681", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-12T18:47:22.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-12T18:47:22.000Z", "avg_line_length": 41.82, "max_line_length": 89, "alphanum_fraction": 0.6174079388, "include": true, "reason": "import numpy", "num_tokens": 920}
|
[STATEMENT]
lemma suffix_eval: "(\<sigma> |\<^sub>s i) j = \<sigma> (j + i)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<sigma> |\<^sub>s i) j = \<sigma> (j + i)
[PROOF STEP]
unfolding suffix_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<sigma> (j + i) = \<sigma> (j + i)
[PROOF STEP]
by simp
|
{"llama_tokens": 143, "file": "ConcurrentIMP_Infinite_Sequences", "length": 2}
|
import pandas as pd
import numpy as np
def test(start_date, end_date, ticker_list, data_source, time_interval,
technical_indicator_list, drl_lib, env, model_name, if_vix = True,
**kwargs):
from finrl.apps import config
# import DRL agents
from finrl.drl_agents.stablebaselines3.models import DRLAgent as DRLAgent_sb3
from finrl.drl_agents.rllib.models import DRLAgent as DRLAgent_rllib
from finrl.drl_agents.elegantrl.models import DRLAgent as DRLAgent_erl
# import data processor
from finrl.neo_finrl.data_processor import DataProcessor
#fetch data
DP = DataProcessor(data_source, **kwargs)
data = DP.download_data(ticker_list, start_date, end_date, time_interval)
data = DP.clean_data(data)
data = DP.add_technical_indicator(data, technical_indicator_list)
if if_vix:
data = DP.add_vix(data)
price_array, tech_array, turbulence_array = DP.df_to_array(data, if_vix)
env_config = {'price_array':price_array,
'tech_array':tech_array,
'turbulence_array':turbulence_array,
'if_train':False}
env_instance = env(config=env_config)
#load elegantrl needs state dim, action dim and net dim
net_dimension = kwargs.get('net_dimension', 2**7)
cwd = kwargs.get('cwd','./'+str(model_name))
print("price_array: ",len(price_array))
if drl_lib == 'elegantrl':
episode_total_assets = DRLAgent_erl.DRL_prediction(model_name=model_name,
cwd=cwd,
net_dimension=net_dimension,
environment=env_instance)
return episode_total_assets
elif drl_lib == 'rllib':
#load agent
episode_total_assets = DRLAgent_rllib.DRL_prediction(
model_name=model_name,
env = env,
price_array=price_array,
tech_array=tech_array,
turbulence_array=turbulence_array,
agent_path = cwd)
return episode_total_assets
elif drl_lib == 'stable_baselines3':
episode_total_assets = DRLAgent_sb3.DRL_prediction_load_from_file(
model_name=model_name,
environment = env_instance,
cwd = cwd)
return episode_total_assets
else:
raise ValueError('DRL library input is NOT supported. Please check.')
if __name__ == '__main__':
from finrl.app.config import DOW_30_TICKER
from finrl.app.config import TECHNICAL_INDICATORS_LIST
from finrl.app.config import TEST_START_DATE
from finrl.app.config import TEST_END_DATE
from finrl.app.config import ERL_PARAMS
from finrl.app.config import RLlib_PARAMS
from finrl.app.config import SAC_PARAMS
#construct environment
from finrl.neo_finrl.env_stock_trading.env_stock_trading import StockTradingEnv
env = StockTradingEnv
#demo for elegantrl
account_value_erl=test(start_date = TEST_START_DATE,
end_date = TEST_END_DATE,
ticker_list = DOW_30_TICKER,
data_source = 'yahoofinance',
time_interval= '1D',
technical_indicator_list= TECHNICAL_INDICATORS_LIST,
drl_lib='elegantrl',
env=env,
model_name='ppo',
cwd='./test_ppo',
net_dimension = 512)
#demo for rllib
ray.shutdown() #always shutdown previous session if any
account_value_rllib = test(start_date = TEST_START_DATE,
end_date = TEST_END_DATE,
ticker_list = DOW_30_TICKER,
data_source = 'yahoofinance',
time_interval= '1D',
technical_indicator_list= TECHNICAL_INDICATORS_LIST,
drl_lib='rllib',
env=env,
model_name='ppo',
cwd='./test_ppo/checkpoint_000030/checkpoint-30',
rllib_params = RLlib_PARAMS)
#demo for stable baselines3
account_value_sb3=test(start_date = TEST_START_DATE,
end_date = TEST_END_DATE,
ticker_list = DOW_30_TICKER,
data_source = 'yahoofinance',
time_interval= '1D',
technical_indicator_list= TECHNICAL_INDICATORS_LIST,
drl_lib='stable_baselines3',
env=env,
model_name='sac',
cwd='./test_sac.zip')
|
{"hexsha": "8620bdbdbcb033ce51b7551fd691ae284474ca7b", "size": 5080, "ext": "py", "lang": "Python", "max_stars_repo_path": "finrl/test.py", "max_stars_repo_name": "puneeth714/FinRL", "max_stars_repo_head_hexsha": "ec71c84342f7b78cf91d5c32e16e5fc88f24bc56", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1949, "max_stars_repo_stars_event_min_datetime": "2020-09-23T03:02:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-12T18:43:35.000Z", "max_issues_repo_path": "finrl/test.py", "max_issues_repo_name": "puneeth714/FinRL", "max_issues_repo_head_hexsha": "ec71c84342f7b78cf91d5c32e16e5fc88f24bc56", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 154, "max_issues_repo_issues_event_min_datetime": "2020-09-23T03:05:09.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-11T23:57:16.000Z", "max_forks_repo_path": "finrl/test.py", "max_forks_repo_name": "puneeth714/FinRL", "max_forks_repo_head_hexsha": "ec71c84342f7b78cf91d5c32e16e5fc88f24bc56", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 478, "max_forks_repo_forks_event_min_datetime": "2020-10-14T19:01:14.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-12T17:36:02.000Z", "avg_line_length": 42.6890756303, "max_line_length": 83, "alphanum_fraction": 0.5559055118, "include": true, "reason": "import numpy", "num_tokens": 1038}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import argparse
import pprint
import tqdm
from collections import defaultdict
import numpy as np
import pandas as pd
import torch
from datasets import get_dataloader
from transforms import get_transform
from tasks import get_task
import utils.config
import utils.checkpoint
def get_center(vectors):
avg = np.mean(vectors, axis=0)
if avg.ndim == 1:
avg = avg / np.linalg.norm(avg)
elif avg.ndim == 2:
assert avg.shape[1] == 512
avg = avg / np.linalg.norm(avg, axis=1, keepdims=True)
else:
assert False, avg.shape
return avg
def get_nearest_k(center, features, k, threshold):
feature_with_dis = [(feature, np.dot(center, feature)) for feature in features]
if len(feature_with_dis) > 10:
distances = np.array([dis for _, dis in feature_with_dis])
filtered = [feature for feature, dis in feature_with_dis if dis > 0.5]
if len(filtered) < len(feature_with_dis):
distances = np.array([feature for feature, dis in feature_with_dis if dis <= 0.5])
if len(filtered) > k:
return filtered
feature_with_dis = [feature for feature, dis in sorted(feature_with_dis, key=lambda v: v[1], reverse=True)]
return feature_with_dis[:k]
def get_image_center(features):
if len(features) < 4:
return get_center(features)
for _ in range(2):
center = get_center(features)
features = get_nearest_k(center, features, int(len(features) * 3 / 4), 0.5)
if len(features) < 4:
break
return get_center(features)
def average_features(features, id_list):
averaged_features = []
averaged_id_list = []
unique_ids = set(id_list)
unique_ids = list(sorted(list(unique_ids)))
for unique_id in unique_ids:
assert unique_id != 'new_whale'
cur_features = [feature for feature, Id
in zip(features, id_list) if Id == unique_id]
cur_features = np.stack(cur_features, axis=0)
if len(cur_features) == 1:
averaged_features.append(cur_features[0])
averaged_id_list.append(unique_id)
else:
averaged_feature = get_center(cur_features)
averaged_features.append(averaged_feature)
averaged_id_list.append(unique_id)
averaged_features = np.stack(averaged_features, axis=0)
assert averaged_features.shape[0] == len(averaged_id_list)
return averaged_features, averaged_id_list
def inference(config, task, dataloader, ret_dict):
task.get_model().eval()
id_dict = {}
with torch.no_grad():
batch_size = config.eval.batch_size
total_size = len(dataloader.dataset)
total_step = math.ceil(total_size / batch_size)
column_values_dict = defaultdict(list)
metric_list_dict = defaultdict(list)
tbar = tqdm.tqdm(enumerate(dataloader), total=total_step)
for i, data in tbar:
images = data['image'].cuda()
key_list = data['key']
id_list = data['id']
outputs = task.forward(images)
predicts = task.inference(outputs=outputs)
for key, Id, feature in zip(key_list, id_list, predicts['features'].cpu().numpy()):
ret_dict[key].append(feature)
id_dict[key] = Id
return id_dict
def inference_single_tta(config, task, preprocess_opt, split, fold, flip, align, ret_dict):
config.transform.params.align = align
transform = 'test' if split == 'test' else 'all'
config.data.params.landmark_ver = fold
dataloader = get_dataloader(config, split,
get_transform(config, transform, flip=flip, **preprocess_opt))
id_dict = inference(config, task, dataloader, ret_dict)
return id_dict
def inference_single_setting(config, task, preprocess_opt, split, flip, align, landmark_folds, ret_dict):
for i in landmark_folds:
id_dict = inference_single_tta(config, task, preprocess_opt, split, i,
flip=flip, align=align, ret_dict=ret_dict)
return id_dict
def run(config, tta_flip, tta_landmark, checkpoint_name, output_path):
train_dir = config.train.dir
task = get_task(config)
checkpoint = utils.checkpoint.get_checkpoint(config, checkpoint_name)
last_epoch, step = utils.checkpoint.load_checkpoint(task.get_model(),
None,
checkpoint)
print('from checkpoint: {} last epoch:{}'.format(checkpoint, last_epoch))
preprocess_opt = task.get_preprocess_opt()
config.data.params.train_csv = 'train.csv'
landmark_folds = range(6) if tta_landmark else [5]
###########################################################################
# train features
# non-flip
# default
ret_dict = defaultdict(list)
id_dict = inference_single_setting(config, task, preprocess_opt,
'known_whale', flip=False, align=False, landmark_folds=landmark_folds, ret_dict=ret_dict)
# align
id_dict = inference_single_setting(config, task, preprocess_opt,
'known_whale', flip=False, align=True, landmark_folds=landmark_folds, ret_dict=ret_dict)
id_features_dict = defaultdict(list)
for key, features in ret_dict.items():
id_features_dict[id_dict[key]].extend(features)
features_dict_ori = {key:get_image_center(features) for key, features in id_features_dict.items()}
id_list_ori = list(sorted(features_dict_ori.keys()))
features_ori = np.stack([features_dict_ori[Id] for Id in id_list_ori], axis=0)
# flip
# default
if tta_flip:
ret_dict = defaultdict(list)
id_dict = inference_single_setting(config, task, preprocess_opt,
'known_whale', flip=True, align=False, landmark_folds=landmark_folds, ret_dict=ret_dict)
# align
id_dict = inference_single_setting(config, task, preprocess_opt,
'known_whale', flip=True, align=True, landmark_folds=landmark_folds, ret_dict=ret_dict)
id_features_dict = defaultdict(list)
for key, features in ret_dict.items():
id_features_dict[id_dict[key]].extend(features)
features_dict_flip = {key:get_image_center(features) for key, features in id_features_dict.items()}
id_list_flip = list(sorted(features_dict_flip.keys()))
features_flip = np.stack([features_dict_flip[Id] for Id in id_list_flip], axis=0)
assert id_list_ori == id_list_flip
id_list = id_list_ori
###########################################################################
# test features
# non-flip
# default
ret_dict = defaultdict(list)
id_dict = inference_single_setting(config, task, preprocess_opt,
'test', flip=False, align=False, landmark_folds=landmark_folds, ret_dict=ret_dict)
id_dict = inference_single_setting(config, task, preprocess_opt,
'test', flip=False, align=True, landmark_folds=landmark_folds, ret_dict=ret_dict)
features_dict = {key:get_image_center(features) for key, features in ret_dict.items()}
test_key_list_ori = list(sorted(id_dict.keys()))
test_features_ori = np.stack([features_dict[key] for key in test_key_list_ori], axis=0)
# flip
# default
if tta_flip:
ret_dict = defaultdict(list)
id_dict = inference_single_setting(config, task, preprocess_opt,
'test', flip=True, align=False, landmark_folds=landmark_folds, ret_dict=ret_dict)
id_dict = inference_single_setting(config, task, preprocess_opt,
'test', flip=True, align=True, landmark_folds=landmark_folds, ret_dict=ret_dict)
features_dict = {key:get_image_center(features) for key, features in ret_dict.items()}
test_key_list_flip = list(sorted(id_dict.keys()))
test_features_flip = np.stack([features_dict[key] for key in test_key_list_flip], axis=0)
assert test_key_list_flip == test_key_list_ori
key_list = test_key_list_ori
# calculate distance
m_ori = np.matmul(test_features_ori, features_ori.transpose())
if tta_flip:
m_flip = np.matmul(test_features_flip, features_flip.transpose())
m = np.mean(np.stack([m_ori, m_flip],axis=0), axis=0)
else:
m = m_ori
records = []
for i, scores in enumerate(m):
records.append(tuple([key_list[i]] + ['{:08f}'.format(v) for v in scores]))
columns = ['Image'] + id_list
assert len(records[0]) == len(columns)
df_distance = pd.DataFrame.from_records(records, columns=columns)
df_distance.to_csv(output_path, index=False)
def parse_args():
description = 'inference similarities of whales'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--config', dest='config_file',
help='configuration filename',
default=None, type=str)
parser.add_argument('--checkpoint_name', dest='checkpoint_name',
help='checkpoint name',
default='best.score.pth', type=str)
parser.add_argument('--output_path', dest='output_path',
help='output path',
default='output.csv', type=str)
parser.add_argument('--tta_flip', dest='tta_flip',
help='tta flip',
default=1, type=int)
parser.add_argument('--tta_landmark', dest='tta_landmark',
help='tta landmark',
default=1, type=int)
return parser.parse_args()
def main():
import warnings
warnings.filterwarnings("ignore")
print('inference similarities of whales')
args = parse_args()
if args.config_file is None:
raise Exception('no configuration file')
config = utils.config.load(args.config_file)
pprint.PrettyPrinter(indent=2).pprint(config)
dir_name = os.path.dirname(args.output_path)
os.makedirs(dir_name, exist_ok=True)
run(config,
tta_flip=args.tta_flip==1,
tta_landmark=args.tta_landmark==1,
checkpoint_name=args.checkpoint_name,
output_path=args.output_path)
print('success!')
if __name__ == '__main__':
main()
|
{"hexsha": "59e241022d5f1c82b9fb386882f0dcc1b7864e77", "size": 10642, "ext": "py", "lang": "Python", "max_stars_repo_path": "kaggle_humpback/inference_similarity.py", "max_stars_repo_name": "maxjeblick/kaggle-humpback", "max_stars_repo_head_hexsha": "78674fc8761490fafc2db825ccbebcec89508ca2", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 167, "max_stars_repo_stars_event_min_datetime": "2019-03-08T03:34:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T06:01:17.000Z", "max_issues_repo_path": "inference_similarity.py", "max_issues_repo_name": "zhenlan0426/kaggle-humpback", "max_issues_repo_head_hexsha": "c975332a99bec9c2485fea17c831f52f9a77736f", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-03-17T07:38:26.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-24T11:20:46.000Z", "max_forks_repo_path": "inference_similarity.py", "max_forks_repo_name": "zhenlan0426/kaggle-humpback", "max_forks_repo_head_hexsha": "c975332a99bec9c2485fea17c831f52f9a77736f", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 50, "max_forks_repo_forks_event_min_datetime": "2019-03-09T00:16:56.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T11:38:48.000Z", "avg_line_length": 37.6042402827, "max_line_length": 131, "alphanum_fraction": 0.6386017666, "include": true, "reason": "import numpy", "num_tokens": 2318}
|
"""Test the PetsKSP linear solver class."""
import unittest
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.misc_components import Comp4LinearCacheTest
from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives
try:
from openmdao.vectors.petsc_vector import PETScVector
except ImportError:
PETScVector = None
from openmdao.test_suite.groups.implicit_group import TestImplicitGroup
from openmdao.utils.assert_utils import assert_near_equal, assert_warning
@unittest.skipUnless(PETScVector is not None, "PETSc is required.")
class TestPETScKrylov(unittest.TestCase):
def test_options(self):
"""Verify that the PETScKrylov specific options are declared."""
group = om.Group()
group.linear_solver = om.PETScKrylov()
assert(group.linear_solver.options['ksp_type'] == 'fgmres')
def test_solve_linear_ksp_default(self):
"""Solve implicit system with PETScKrylov using default method."""
group = TestImplicitGroup(lnSolverClass=om.PETScKrylov)
p = om.Problem(group)
p.setup()
p.set_solver_print(level=0)
# Conclude setup but don't run model.
p.final_setup()
d_inputs, d_outputs, d_residuals = group.get_linear_vectors()
# forward
d_residuals.set_val(1.0)
d_outputs.set_val(0.0)
group.run_solve_linear(['linear'], 'fwd')
output = d_outputs.asarray()
assert_near_equal(output, group.expected_solution, 1e-15)
# reverse
d_outputs.set_val(1.0)
d_residuals.set_val(0.0)
group.run_solve_linear(['linear'], 'rev')
output = d_residuals.asarray()
assert_near_equal(output, group.expected_solution, 1e-15)
def test_solve_linear_ksp_gmres(self):
"""Solve implicit system with PETScKrylov using 'gmres' method."""
group = TestImplicitGroup(lnSolverClass=om.PETScKrylov)
group.linear_solver.options['ksp_type'] = 'gmres'
p = om.Problem(group)
p.setup()
p.set_solver_print(level=0)
# Conclude setup but don't run model.
p.final_setup()
d_inputs, d_outputs, d_residuals = group.get_linear_vectors()
# forward
d_residuals.set_val(1.0)
d_outputs.set_val(0.0)
group.run_solve_linear(['linear'], 'fwd')
output = d_outputs.asarray()
assert_near_equal(output, group.expected_solution, 1e-15)
# reverse
d_outputs.set_val(1.0)
d_residuals.set_val(0.0)
group.run_solve_linear(['linear'], 'rev')
output = d_residuals.asarray()
assert_near_equal(output, group.expected_solution, 1e-15)
def test_solve_linear_ksp_maxiter(self):
"""Verify that PETScKrylov abides by the 'maxiter' option."""
group = TestImplicitGroup(lnSolverClass=om.PETScKrylov)
group.linear_solver.options['maxiter'] = 2
p = om.Problem(group)
p.setup()
p.set_solver_print(level=0)
# Conclude setup but don't run model.
p.final_setup()
d_inputs, d_outputs, d_residuals = group.get_linear_vectors()
# forward
d_residuals.set_val(1.0)
d_outputs.set_val(0.0)
group.run_solve_linear(['linear'], 'fwd')
self.assertTrue(group.linear_solver._iter_count == 3)
# reverse
d_outputs.set_val(1.0)
d_residuals.set_val(0.0)
group.run_solve_linear(['linear'], 'rev')
self.assertTrue(group.linear_solver._iter_count == 3)
def test_solve_linear_ksp_precon(self):
"""Solve implicit system with PETScKrylov using a preconditioner."""
group = TestImplicitGroup(lnSolverClass=om.PETScKrylov)
precon = group.linear_solver.precon = om.LinearBlockGS()
p = om.Problem(group)
p.setup()
p.set_solver_print(level=0)
# Conclude setup but don't run model.
p.final_setup()
d_inputs, d_outputs, d_residuals = group.get_linear_vectors()
# forward
d_residuals.set_val(1.0)
d_outputs.set_val(0.0)
group.run_solve_linear(['linear'], 'fwd')
output = d_outputs.asarray()
assert_near_equal(output, group.expected_solution, 1e-15)
self.assertTrue(precon._iter_count > 0)
# reverse
d_outputs.set_val(1.0)
d_residuals.set_val(0.0)
group.run_solve_linear(['linear'], 'rev')
output = d_residuals.asarray()
assert_near_equal(output, group.expected_solution, 3e-15)
self.assertTrue(precon._iter_count > 0)
# test the direct solver and make sure KSP correctly recurses for _linearize
precon = group.linear_solver.precon = om.DirectSolver(assemble_jac=False)
p.setup()
# Conclude setup but don't run model.
p.final_setup()
d_inputs, d_outputs, d_residuals = group.get_linear_vectors()
# forward
d_residuals.set_val(1.0)
d_outputs.set_val(0.0)
group.linear_solver._linearize()
group.run_solve_linear(['linear'], 'fwd')
output = d_outputs.asarray()
assert_near_equal(output, group.expected_solution, 1e-15)
# reverse
d_outputs.set_val(1.0)
d_residuals.set_val(0.0)
group.linear_solver._linearize()
group.run_solve_linear(['linear'], 'rev')
output = d_residuals.asarray()
assert_near_equal(output, group.expected_solution, 3e-15)
def test_solve_linear_ksp_precon_left(self):
"""Solve implicit system with PETScKrylov using a preconditioner."""
group = TestImplicitGroup(lnSolverClass=om.PETScKrylov)
precon = group.linear_solver.precon = om.DirectSolver(assemble_jac=False)
group.linear_solver.options['precon_side'] = 'left'
group.linear_solver.options['ksp_type'] = 'richardson'
p = om.Problem(group)
p.setup()
p.set_solver_print(level=0)
# Conclude setup but don't run model.
p.final_setup()
d_inputs, d_outputs, d_residuals = group.get_linear_vectors()
# forward
d_residuals.set_val(1.0)
d_outputs.set_val(0.0)
group.run_linearize()
group.run_solve_linear(['linear'], 'fwd')
output = d_outputs.asarray()
assert_near_equal(output, group.expected_solution, 1e-15)
# reverse
d_outputs.set_val(1.0)
d_residuals.set_val(0.0)
group.run_linearize()
group.run_solve_linear(['linear'], 'rev')
output = d_residuals.asarray()
assert_near_equal(output, group.expected_solution, 3e-15)
# test the direct solver and make sure KSP correctly recurses for _linearize
precon = group.linear_solver.precon = om.DirectSolver(assemble_jac=False)
group.linear_solver.options['precon_side'] = 'left'
group.linear_solver.options['ksp_type'] = 'richardson'
p.setup()
# Conclude setup but don't run model.
p.final_setup()
d_inputs, d_outputs, d_residuals = group.get_linear_vectors()
# forward
d_residuals.set_val(1.0)
d_outputs.set_val(0.0)
group.linear_solver._linearize()
group.run_solve_linear(['linear'], 'fwd')
output = d_outputs.asarray()
assert_near_equal(output, group.expected_solution, 1e-15)
# reverse
d_outputs.set_val(1.0)
d_residuals.set_val(0.0)
group.linear_solver._linearize()
group.run_solve_linear(['linear'], 'rev')
output = d_residuals.asarray()
assert_near_equal(output, group.expected_solution, 3e-15)
def test_solve_on_subsystem(self):
"""solve an implicit system with KSP attached anywhere but the root"""
p = om.Problem()
model = p.model
dv = model.add_subsystem('des_vars', om.IndepVarComp())
# just need a dummy variable so the sizes don't match between root and g1
dv.add_output('dummy', val=1.0, shape=10)
g1 = model.add_subsystem('g1', TestImplicitGroup(lnSolverClass=om.PETScKrylov))
p.setup()
p.set_solver_print(level=0)
# Conclude setup but don't run model.
p.final_setup()
# forward
d_inputs, d_outputs, d_residuals = g1.get_linear_vectors()
d_residuals.set_val(1.0)
d_outputs.set_val(0.0)
g1.run_solve_linear(['linear'], 'fwd')
output = d_outputs.asarray()
assert_near_equal(output, g1.expected_solution, 1e-15)
# reverse
d_inputs, d_outputs, d_residuals = g1.get_linear_vectors()
d_outputs.set_val(1.0)
d_residuals.set_val(0.0)
g1.linear_solver._linearize()
g1.run_solve_linear(['linear'], 'rev')
output = d_residuals.asarray()
assert_near_equal(output, g1.expected_solution, 3e-15)
def test_linear_solution_cache(self):
# Test derivatives across a converged Sellar model. When caching
# is performed, the second solve takes less iterations than the
# first one.
# Forward mode
prob = om.Problem()
model = prob.model
model.add_subsystem('px', om.IndepVarComp('x', 1.0), promotes=['x'])
model.add_subsystem('d1', Comp4LinearCacheTest(), promotes=['x', 'y'])
model.nonlinear_solver = om.NonlinearBlockGS()
model.linear_solver = om.PETScKrylov()
model.add_design_var('x', cache_linear_solution=True)
model.add_objective('y', cache_linear_solution=True)
prob.setup(mode='fwd')
prob.set_solver_print(level=0)
prob.run_model()
J = prob.driver._compute_totals(of=['y'], wrt=['x'], use_abs_names=False,
return_format='flat_dict')
icount1 = prob.model.linear_solver._iter_count
J = prob.driver._compute_totals(of=['y'], wrt=['x'], use_abs_names=False,
return_format='flat_dict')
icount2 = prob.model.linear_solver._iter_count
# Should take less iterations when starting from previous solution.
self.assertTrue(icount2 < icount1)
# Reverse mode
prob = om.Problem()
model = prob.model
model.add_subsystem('px', om.IndepVarComp('x', 1.0), promotes=['x'])
model.add_subsystem('d1', Comp4LinearCacheTest(), promotes=['x', 'y'])
model.nonlinear_solver = om.NonlinearBlockGS()
model.linear_solver = om.PETScKrylov()
model.add_design_var('x', cache_linear_solution=True)
model.add_objective('y', cache_linear_solution=True)
prob.setup(mode='rev')
prob.set_solver_print(level=0)
prob.run_model()
J = prob.driver._compute_totals(of=['y'], wrt=['x'], use_abs_names=False,
return_format='flat_dict')
icount1 = prob.model.linear_solver._iter_count
J = prob.driver._compute_totals(of=['y'], wrt=['x'], use_abs_names=False,
return_format='flat_dict')
icount2 = prob.model.linear_solver._iter_count
# Should take less iterations when starting from previous solution.
self.assertTrue(icount2 < icount1)
def test_error_under_cs(self):
"""Verify that PETScKrylov abides by the 'maxiter' option."""
prob = om.Problem()
model = prob.model
model.add_subsystem('px', om.IndepVarComp('x', 1.0), promotes=['x'])
model.add_subsystem('pz', om.IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])
model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])
model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])
model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0),
promotes=['obj', 'x', 'z', 'y1', 'y2'])
model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)
model.linear_solver = om.PETScKrylov()
model.approx_totals(method='cs')
prob.setup(mode='fwd')
prob.set_solver_print(level=0)
prob.run_model()
with self.assertRaises(RuntimeError) as cm:
J = prob.compute_totals(of=['obj'], wrt=['z'])
msg = 'PETScKrylov in <model> <class Group>: PETScKrylov solver is not supported under complex step.'
self.assertEqual(str(cm.exception), msg)
@unittest.skipUnless(PETScVector, "PETSc is required.")
class TestPETScKrylovSolverFeature(unittest.TestCase):
def test_specify_solver(self):
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives
prob = om.Problem()
model = prob.model
model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])
model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])
model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0),
promotes=['obj', 'x', 'z', 'y1', 'y2'])
model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
model.nonlinear_solver = om.NonlinearBlockGS()
model.linear_solver = om.PETScKrylov()
prob.setup()
prob.set_val('x', 1.)
prob.set_val('z', np.array([5.0, 2.0]))
prob.run_model()
wrt = ['z']
of = ['obj']
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_near_equal(J['obj', 'z'][0][0], 9.61001056, .00001)
assert_near_equal(J['obj', 'z'][0][1], 1.78448534, .00001)
def test_specify_ksp_type(self):
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, \
SellarDis2withDerivatives
prob = om.Problem()
model = prob.model
model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])
model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])
model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0),
promotes=['obj', 'x', 'z', 'y1', 'y2'])
model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
model.nonlinear_solver = om.NonlinearBlockGS()
model.linear_solver = om.PETScKrylov()
model.linear_solver.options['ksp_type'] = 'gmres'
prob.setup()
prob.set_val('x', 1.)
prob.set_val('z', np.array([5.0, 2.0]))
prob.run_model()
wrt = ['z']
of = ['obj']
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_near_equal(J['obj', 'z'][0][0], 9.61001056, .00001)
assert_near_equal(J['obj', 'z'][0][1], 1.78448534, .00001)
def test_feature_maxiter(self):
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives
prob = om.Problem()
model = prob.model
model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])
model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])
model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0),
promotes=['obj', 'x', 'z', 'y1', 'y2'])
model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
model.nonlinear_solver = om.NonlinearBlockGS()
model.linear_solver = om.PETScKrylov()
model.linear_solver.options['maxiter'] = 3
prob.setup()
prob.set_val('x', 1.)
prob.set_val('z', np.array([5.0, 2.0]))
prob.run_model()
wrt = ['z']
of = ['obj']
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_near_equal(J['obj', 'z'][0][0], 4.93218027, .00001)
assert_near_equal(J['obj', 'z'][0][1], 1.73406455, .00001)
def test_feature_atol(self):
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives
prob = om.Problem()
model = prob.model
model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])
model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])
model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0),
promotes=['obj', 'x', 'z', 'y1', 'y2'])
model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
model.nonlinear_solver = om.NonlinearBlockGS()
model.linear_solver = om.PETScKrylov()
model.linear_solver.options['atol'] = 1.0e-20
prob.setup()
prob.set_val('x', 1.)
prob.set_val('z', np.array([5.0, 2.0]))
prob.run_model()
wrt = ['z']
of = ['obj']
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_near_equal(J['obj', 'z'][0][0], 9.61001055699, .00001)
assert_near_equal(J['obj', 'z'][0][1], 1.78448533563, .00001)
def test_feature_rtol(self):
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives
prob = om.Problem()
model = prob.model
model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])
model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])
model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0),
promotes=['obj', 'x', 'z', 'y1', 'y2'])
model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
model.nonlinear_solver = om.NonlinearBlockGS()
model.linear_solver = om.PETScKrylov()
model.linear_solver.options['rtol'] = 1.0e-20
prob.setup()
prob.set_val('x', 1.)
prob.set_val('z', np.array([5.0, 2.0]))
prob.run_model()
wrt = ['z']
of = ['obj']
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_near_equal(J['obj', 'z'][0][0], 9.61001055699, .00001)
assert_near_equal(J['obj', 'z'][0][1], 1.78448533563, .00001)
def test_specify_precon(self):
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, \
SellarDis2withDerivatives
prob = om.Problem()
model = prob.model
model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])
model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])
model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0),
promotes=['obj', 'x', 'z', 'y1', 'y2'])
model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)
model.linear_solver = om.PETScKrylov()
model.linear_solver.precon = om.LinearBlockGS()
model.linear_solver.precon.options['maxiter'] = 2
prob.setup()
prob.set_val('x', 1.)
prob.set_val('z', np.array([5.0, 2.0]))
prob.run_model()
assert_near_equal(prob.get_val('y1'), 25.58830273, .00001)
assert_near_equal(prob.get_val('y2'), 12.05848819, .00001)
def test_specify_precon_left(self):
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, \
SellarDis2withDerivatives
prob = om.Problem()
model = prob.model
model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])
model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])
model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0),
promotes=['obj', 'x', 'z', 'y1', 'y2'])
model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)
model.linear_solver = om.PETScKrylov()
model.linear_solver.precon = om.DirectSolver()
model.linear_solver.options['precon_side'] = 'left'
model.linear_solver.options['ksp_type'] = 'richardson'
prob.setup()
prob.set_val('x', 1.)
prob.set_val('z', np.array([5.0, 2.0]))
prob.run_model()
assert_near_equal(prob.get_val('y1'), 25.58830273, .00001)
assert_near_equal(prob.get_val('y2'), 12.05848819, .00001)
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "92c08026435c217211f4b6a80ac03269b72f6e64", "size": 22938, "ext": "py", "lang": "Python", "max_stars_repo_path": "openmdao/solvers/linear/tests/test_petsc_ksp.py", "max_stars_repo_name": "anilyil/OpenMDAO", "max_stars_repo_head_hexsha": "97c6e589ccb00318093d7d17f0e853fba74ec1f9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "openmdao/solvers/linear/tests/test_petsc_ksp.py", "max_issues_repo_name": "anilyil/OpenMDAO", "max_issues_repo_head_hexsha": "97c6e589ccb00318093d7d17f0e853fba74ec1f9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "openmdao/solvers/linear/tests/test_petsc_ksp.py", "max_forks_repo_name": "anilyil/OpenMDAO", "max_forks_repo_head_hexsha": "97c6e589ccb00318093d7d17f0e853fba74ec1f9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1809815951, "max_line_length": 110, "alphanum_fraction": 0.6062429157, "include": true, "reason": "import numpy", "num_tokens": 6162}
|
'''
amplicon experiment (:mod:`calour.amplicon_experiment`)
=======================================================
.. currentmodule:: calour.amplicon_experiment
Classes
^^^^^^^
.. autosummary::
:toctree: generated
AmpliconExperiment
'''
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Calour development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from logging import getLogger
import numpy as np
import skbio
from .experiment import Experiment
from .util import _get_taxonomy_string, _to_list
logger = getLogger(__name__)
class AmpliconExperiment(Experiment):
'''This class stores amplicon data and associated metadata.
This is a child class of :class:`.Experiment`
Parameters
----------
data : numpy.ndarray or scipy.sparse.csr_matrix
The abundance table for OTUs, metabolites, genes, etc. Samples
are in row and features in column
sample_metadata : pandas.DataFrame
The metadata on the samples
feature_metadata : pandas.DataFrame
The metadata on the features
description : str
name of experiment
sparse : bool
store the data array in :class:`scipy.sparse.csr_matrix`
or :class:`numpy.ndarray`
Attributes
----------
data : numpy.ndarray or scipy.sparse.csr_matrix
The abundance table for OTUs, metabolites, genes, etc. Samples
are in row and features in column
sample_metadata : pandas.DataFrame
The metadata on the samples
feature_metadata : pandas.DataFrame
The metadata on the features
exp_metadata : dict
metadata about the experiment (data md5, filenames, etc.)
shape : tuple of (int, int)
the dimension of data
sparse : bool
store the data as sparse matrix (scipy.sparse.csr_matrix) or dense numpy array.
description : str
name of the experiment
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.heatmap_databases = ('dbbact',)
def filter_taxonomy(exp: Experiment, values, negate=False, inplace=False, substring=True):
'''filter keeping only observations with taxonomy string matching taxonomy
if substring=True, look for partial match instead of identity.
Matching is case insensitive
Parameters
----------
values : str or list of str
the taxonomy string/strings to filter (can be partial if substring is True)
negate : bool, optional
False (default) to keep matching taxonomies, True to remove matching taxonomies
inplace : bool, optional
do the filtering on the original :class:`.Experiment` object or a copied one.
substring : bool, optional
True (default) to do partial (substring) matching for the taxonomy string,
False to do exact matching
Returns
-------
AmpliconExperiment
With only features with matching taxonomy
'''
if 'taxonomy' not in exp.feature_metadata.columns:
logger.warning('No taxonomy field in experiment')
return None
values = _to_list(values)
taxstr = exp.feature_metadata['taxonomy'].str.lower()
select = np.zeros(len(taxstr), dtype=bool)
for cval in values:
if substring:
select += [cval.lower() in ctax for ctax in taxstr]
else:
select += [cval.lower() == ctax for ctax in taxstr]
if negate is True:
select = ~ select
logger.info('%s remaining' % np.sum(select))
return exp.reorder(select, axis=1, inplace=inplace)
def filter_fasta(exp: Experiment, filename, negate=False, inplace=False):
'''Filter features from experiment based on fasta file
Parameters
----------
filename : str
the fasta filename containing the sequences to use for filtering
negate : bool, optional
False (default) to keep only sequences matching the fasta file;
True to remove sequences in the fasta file.
inplace : bool, optional
False (default) to create a copy of the experiment, True to filter inplace
Returns
-------
newexp : Experiment
filtered so contains only sequence present in exp and in the fasta file
'''
logger.debug('Filter by sequence using fasta file %s' % filename)
okpos = []
tot_seqs = 0
for cseq in skbio.read(filename, format='fasta'):
tot_seqs += 1
cseq = str(cseq).upper()
if cseq in exp.feature_metadata.index:
pos = exp.feature_metadata.index.get_loc(cseq)
okpos.append(pos)
logger.debug('loaded %d sequences. found %d sequences in experiment' % (tot_seqs, len(okpos)))
if negate:
okpos = np.setdiff1d(np.arange(len(exp.feature_metadata.index)), okpos, assume_unique=True)
newexp = exp.reorder(okpos, axis=1, inplace=inplace)
return newexp
@Experiment._record_sig
def sort_taxonomy(exp: Experiment, inplace=False):
'''Sort the features based on the taxonomy
Sort features based on the taxonomy (alphabetical)
Parameters
----------
inplace : bool, optional
False (default) to create a copy
True to Replace data in exp
Returns
-------
Experiment
sorted by taxonomy
'''
logger.debug('sort features by taxonomies')
taxonomy = _get_taxonomy_string(exp, remove_underscore=True)
sort_pos = np.argsort(taxonomy, kind='mergesort')
exp = exp.reorder(sort_pos, axis=1, inplace=inplace)
return exp
@Experiment._record_sig
def filter_orig_reads(exp, minreads, **kwargs):
'''Filter keeping only samples with >= minreads in the original reads column
Note this function uses the _calour_original_abundance field rather than the current sum of sequences per sample.
So if you start with a sample with 100 reads, normalizing and filtering with other functions with not change the original reads column
(which will remain 100).
If you want to filter based on current total reads, use ``filter_by_data()`` instead
Parameters
----------
minreads : numeric
Keep only samples with >= minreads reads (when loaded - not affected by normalization)
Returns
-------
AmpliconExperiment - with only samples with enough original reads
'''
origread_field = '_calour_original_abundance'
if origread_field not in exp.sample_metadata.columns:
raise ValueError('%s field not initialzed. Did you load the data with calour.read_amplicon() ?' % origread_field)
good_pos = (exp.sample_metadata[origread_field] >= minreads).values
newexp = exp.reorder(good_pos, axis=0, **kwargs)
return newexp
def collapse_taxonomy(exp: Experiment, level='genus', inplace=False):
'''Collapse all features sharing the same taxonomy up to level into a single feature
Sums abundances of all features sharing the same taxonomy up to level.
Parameters
----------
level: str or int, optional
the level to bin the taxonmies. can be int (0=kingdom, 1=phylum,...6=species)
or a string ('kingdom' or 'k' etc.)
inplace : bool, optional
False (default) to create a copy
True to Replace data in exp
'''
level_dict = {'kingdom': 0, 'k': 0,
'phylum': 1, 'p': 1,
'class': 2, 'c': 2,
'order': 3, 'o': 3,
'family': 4, 'f': 4,
'genus': 5, 'g': 5,
'species': 6, 's': 6}
if not isinstance(level, int):
if level not in level_dict:
raise ValueError('Unsupported taxonomy level %s. Please use out of %s' % (level, list(level_dict.keys())))
level = level_dict[level]
if inplace:
newexp = exp
else:
newexp = exp.copy()
def _tax_level(tax_str, level):
# local function to get taxonomy up to given level
ctax = tax_str.split(';')
level += 1
if len(ctax) < level:
ctax.extend(['other'] * (level - len(ctax)))
return ';'.join(ctax[:level])
newexp.feature_metadata['_calour_tax_group'] = newexp.feature_metadata['taxonomy'].apply(_tax_level, level=level)
newexp.aggregate_by_metadata('_calour_tax_group', agg='sum', axis=1, inplace=True)
newexp.feature_metadata['taxonomy'] = newexp.feature_metadata['_calour_tax_group']
return newexp
def split_taxonomy(self, field='taxonomy', sep=';',
names=['kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species']):
'''Split taxonomy column into individual column per level.
Assume the taxonomy string is in QIIME style:
"k__Bacteria;p__Firmicutes;c__Bacilli;o__Bacillales;f__Staphylococcaceae;g__Staphylococcus;s__"
Parameters
----------
sep : str
the separator between taxa levels
names : list
the column names for the new columns split from ``field``
'''
self.feature_metadata[names] = self.feature_metadata[field].str.split(sep, expand=True)
# return so you can chain the functions
return self
def find_lowest_taxonomy(self, field='taxonomy', new_field='taxa'):
'''Create a new column that contains the taxonomy of lowest possible level.
For example, 'k__Bacteria; p__Firmicutes; c__Bacilli,
o__Lactobacillales; f__Enterococcaceae; g__Enterococcus,
s__' will return 'g__Enterococcus'
Parameters
----------
field : str
column name that contains all levels of taxonomy
new_field : str
new column name
Returns
-------
AmpliconExperiment
'''
def find_highest(s):
levels = s.split(';')
b = [len(i) > 3 for i in levels]
return np.array(levels)[b][-1]
self.feature_metadata[new_field] = self.feature_metadata[field].apply(find_highest)
return self
|
{"hexsha": "92db8baa8fcf1f4df1d776af6f4c25804dafed06", "size": 10704, "ext": "py", "lang": "Python", "max_stars_repo_path": "calour/amplicon_experiment.py", "max_stars_repo_name": "pennyneve/calour", "max_stars_repo_head_hexsha": "f255fa822d82bdbffa604e14126603c48b0daff4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "calour/amplicon_experiment.py", "max_issues_repo_name": "pennyneve/calour", "max_issues_repo_head_hexsha": "f255fa822d82bdbffa604e14126603c48b0daff4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "calour/amplicon_experiment.py", "max_forks_repo_name": "pennyneve/calour", "max_forks_repo_head_hexsha": "f255fa822d82bdbffa604e14126603c48b0daff4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.9103448276, "max_line_length": 142, "alphanum_fraction": 0.6044469357, "include": true, "reason": "import numpy", "num_tokens": 2330}
|
import math
import numpy as np
import pypact as pp
from tests.testerbase import Tester
DECIMAL_PLACE_ACC = 6
class GroupConvertUnitTest(Tester):
def _test_imp(self, in_group, in_values, out_group, expected_values, almost=False):
if almost:
np.testing.assert_almost_equal(expected_values, pp.groupconvert.by_energy(
in_group, in_values, out_group), err_msg="Assert group convert")
else:
self.assertEqual(expected_values, pp.groupconvert.by_energy(
in_group, in_values, out_group), "Assert group convert")
def test_byenergy_simple_overlap(self):
in_group = [0.0, 1.0]
in_values = [1.0]
out_group = [0.5, 1.0]
expected_values = [0.5]
self._test_imp(in_group, in_values, out_group, expected_values)
def test_byenergy_simple_overlap2(self):
in_group = [0.0, 1.0]
in_values = [1.0]
out_group = [0.0, 0.5]
expected_values = [0.5]
self.assertEqual(expected_values, pp.groupconvert.by_energy(
in_group, in_values, out_group), "Assert group convert")
def test_byenergy_simple_adjacent(self):
in_group = [0.0, 1.0]
in_values = [1.0]
out_group = [1.0, 1.5]
expected_values = [0.0]
self.assertEqual(expected_values, pp.groupconvert.by_energy(
in_group, in_values, out_group), "Assert group convert")
def test_byenergy_simple_adjacent2(self):
in_group = [0.0, 1.0]
in_values = [1.0]
out_group = [-1.0, 0.0]
expected_values = [0.0]
self.assertEqual(expected_values, pp.groupconvert.by_energy(
in_group, in_values, out_group), "Assert group convert")
def test_byenergy_simple_same(self):
in_group = [0.0, 1.0]
in_values = [1.0]
out_group = [0.0, 1.0]
expected_values = [1.0]
self.assertEqual(expected_values, pp.groupconvert.by_energy(
in_group, in_values, out_group), "Assert group convert")
def test_byenergy_simple_same2(self):
in_group = [0.0, 1.0, 2.0]
in_values = [1.0, 0.7]
out_group = [0.0, 1.0, 2.0]
expected_values = [1.0, 0.7]
self.assertEqual(expected_values, pp.groupconvert.by_energy(
in_group, in_values, out_group), "Assert group convert")
def test_byenergy_simple_negative1(self):
in_group = [-1.0, 0.0, 1.0]
in_values = [5.0, 8.0]
out_group = [0.0, 0.5, 0.75, 1.0]
expected_values = [4.0, 2.0, 2.0]
self.assertEqual(expected_values, pp.groupconvert.by_energy(
in_group, in_values, out_group), "Assert group convert")
def test_byenergy_simple_negative2(self):
in_group = [-1.0, 0.0, 1.0]
in_values = [5.0, 8.0]
out_group = [-10.0, 0.5, 0.75, 1.0]
expected_values = [9.0, 2.0, 2.0]
self.assertEqual(expected_values, pp.groupconvert.by_energy(
in_group, in_values, out_group), "Assert group convert")
def test_byenergy_case1(self):
self._test_imp([0.2, 0.5], [8], [0., 0.4, 0.5],
[16./3., 8./3.], almost=True)
def test_byenergy_case2(self):
self._test_imp([0, 0.1, 2], [2, 3], [0.1, 0.25, 0.5, 0.75, 0.9],
[0.23684210526315788, 0.39473684210526316, 0.39473684210526305, 0.23684210526315788], almost=True)
def test_byenergy_case3(self):
self._test_imp([0, 0.2, 2], [2, 3], [0.1, 0.25, 0.5, 0.75, 0.9],
[1.0833333333333333, 0.41666666666666663, 0.41666666666666663, 0.25], almost=True)
def test_byenergy_case4(self):
self._test_imp([0, 0.2, 0.3, 0.4, 0.55], [2, 3, 1, 8], [0.1, 0.25, 0.5, 0.75, 0.9],
[2.5, 7.833333333333331, 2.6666666666666687, 0.0], almost=True)
def test_byenergy_709_to_single(self):
g_709 = list(reversed(pp.ALL_GROUPS[709]))
self._test_imp(g_709, [1.0]*709, [1e6, 2e6],
[15.050386030584683], almost=True)
|
{"hexsha": "7916c7c4b8efd48f69d33eaf8b6dd51ab98bbf54", "size": 4038, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/input/groupconverttest.py", "max_stars_repo_name": "zxkjack123/pypact", "max_stars_repo_head_hexsha": "8b37f42007e0accabc9fb31d4ab76935b559d817", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2018-01-22T14:00:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T06:29:22.000Z", "max_issues_repo_path": "tests/input/groupconverttest.py", "max_issues_repo_name": "listato/pypact", "max_issues_repo_head_hexsha": "a418ba218cdf4a25ae3e7d72e0919905d027d2ba", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2018-12-07T14:30:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-27T20:33:06.000Z", "max_forks_repo_path": "tests/input/groupconverttest.py", "max_forks_repo_name": "listato/pypact", "max_forks_repo_head_hexsha": "a418ba218cdf4a25ae3e7d72e0919905d027d2ba", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2018-05-29T13:41:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-21T01:33:41.000Z", "avg_line_length": 38.8269230769, "max_line_length": 121, "alphanum_fraction": 0.6067360079, "include": true, "reason": "import numpy", "num_tokens": 1280}
|
#!/usr/bin/env python
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
## @file unittests/libtests/feassemble/data/ElasticityImplicit.py
## @brief Python application for generating C++ data files for testing
## C++ ElasticityImplicit object.
from pyre.components.Component import Component
import numpy
# ----------------------------------------------------------------------
# ElasticityImplicit class
class ElasticityImplicit(Component):
"""
Python application for generating C++ data files for testing C++
ElasticityImplicit object.
"""
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="elasticityimplicit"):
"""
Constructor.
"""
Component.__init__(self, name, facility="formulation")
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def calculateResidual(self, integrator):
"""
Calculate contribution to residual of operator for integrator.
{r} = -[K]{u(t)}
"""
K = integrator._calculateStiffnessMat()
residual = -numpy.dot(K, integrator.fieldT+integrator.fieldTIncr)
return residual.flatten()
def calculateJacobian(self, integrator):
"""
Calculate contribution to Jacobian matrix of operator for integrator.
[A] = [K]
"""
K = integrator._calculateStiffnessMat()
jacobian = K
return jacobian
# FACTORY //////////////////////////////////////////////////////////////
def formulation():
return ElasticityImplicit()
# End of file
|
{"hexsha": "0f5ed13d1b5cf5703bfbbec2d307213084b9c5c8", "size": 1980, "ext": "py", "lang": "Python", "max_stars_repo_path": "unittests/libtests/feassemble/data/ElasticityImplicit.py", "max_stars_repo_name": "joegeisz/pylith", "max_stars_repo_head_hexsha": "f74060b7b19d7e90abf8597bbe9250c96593c0ad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-09T06:24:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-09T06:24:11.000Z", "max_issues_repo_path": "unittests/libtests/feassemble/data/ElasticityImplicit.py", "max_issues_repo_name": "joegeisz/pylith", "max_issues_repo_head_hexsha": "f74060b7b19d7e90abf8597bbe9250c96593c0ad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "unittests/libtests/feassemble/data/ElasticityImplicit.py", "max_forks_repo_name": "joegeisz/pylith", "max_forks_repo_head_hexsha": "f74060b7b19d7e90abf8597bbe9250c96593c0ad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.0632911392, "max_line_length": 73, "alphanum_fraction": 0.5651515152, "include": true, "reason": "import numpy", "num_tokens": 397}
|
import os
import json
import math
import numpy as np
from collections import OrderedDict
from sklearn.mixture import GaussianMixture
from mp.utils.feature_extractor import Feature_extractor # pylint: disable=import-error
class histogramm_based_warning():
def __init__(self) -> None:
self.path_to_warnings = os.path.join(os.environ['OPERATOR_PERSISTENT_DIR'],'hist_based_warnings')
def load_seg_feature(self,feature):
'''Loads all feature values for a given feature in order to use them for a histogram
Args:
feature(str): The name of the feature, compare Feature_extractor
Returns(ndarray): An array filled with the values of the single feature '''
features = []
feat_extr = Feature_extractor([feature])
work_path = os.path.join(os.environ["PREPROCESSED_WORKFLOW_DIR"],os.environ["PREPROCESSED_OPERATOR_OUT_SCALED_DIR_TRAIN"])
for id in os.listdir(work_path):
id_path = os.path.join(work_path,id)
seg_path_short = os.path.join(id_path,'seg')
seg_features_path = os.path.join(seg_path_short,'features.json')
feat_vec = feat_extr.read_feature_vector(seg_features_path)
if not np.isnan(np.sum(np.array(feat_vec))):
features.append(feat_vec)
return np.array(features)
class hist_based_warning_slice_dice(histogramm_based_warning):
# segmentation smoothness
def __init__(self) -> None:
super().__init__()
self.path = os.path.join(self.path_to_warnings,'slice_dice')
if not os.path.isdir(self.path):
os.makedirs(self.path)
self.path_to_threshholds = os.path.join(self.path,'threshholds.json')
if os.path.exists(self.path_to_threshholds):
with open(self.path_to_threshholds,'r') as file:
self.thresh_dict = json.load(file)
def compute_threshholds(self,percentiles):
'''Computes the threshholds for given percentages. Eg The intervalls
where 90 percent of the values are contained. Saves the values as a dict
Eg dict[0.9] = 0.6 can be interpreted that 90 percent of the data lies above 0.6.
Args:
percentiles(list(floats)): The percentiles. For 30 percent use 0.3 and so on. '''
data = self.load_seg_feature('dice_scores')
thresh_dict = OrderedDict()
for percent in percentiles:
threshhold = self.get_threshhold(data,percent)
thresh_dict[percent]=threshhold
self.save_threshholds(thresh_dict)
def get_threshhold(self,data,percent):
'''computes a single threshhold, where x percent of the data are contained.
Since the slice dice (segmentation smoothness) is the better the higher,
only one theshhold needs to be computed
Args:
data(ndarray): The array containing the feature values
percent(float): How much percent should be contained in the intervall
Returns(float): The threshhold'''
bins = np.arange(np.min(data),1,step=0.001)
hist, bin_edges = np.histogram(data,bins=bins)
total_points = np.sum(hist)
dens = np.array(hist)/total_points
len_hist = len(hist)
for i in range(len(hist)):
weight = np.sum(dens[len_hist-i:len_hist])
if weight >= percent:
return bin_edges[len_hist-i]
def save_threshholds(self,thresh_dict):
'saves the dictionary of the threshholds at self.path_to_threshholds'
with open(self.path_to_threshholds) as save_file:
json.dump(thresh_dict,save_file)
def label_seg(self,seg_feature):
'''Returns in which "percentile" the given segmentation feature value lies.
A return value of 0.6 means, that the data point would be contained in a range, where
60 percent of the training data were. So the higher the return value, the less likely the
occurence.'''
num_threshholds = len(self.thresh_dict.items()) + 1 # because we divide by it
for i,(_,threshhold) in enumerate(self.thresh_dict.items()):
if seg_feature > threshhold:
return (num_threshholds-i)/num_threshholds
return 0
class hist_based_warning_conn_comp(histogramm_based_warning):
def __init__(self) -> None:
super().__init__()
self.path = os.path.join(self.path_to_warnings,'conn_comp')
if not os.path.isdir(self.path):
os.makedirs(self.path)
self.path_to_threshholds = os.path.join(self.path,'threshholds.json')
if os.path.exists(self.path_to_threshholds):
with open(self.path_to_threshholds,'r') as file:
self.thresh_dict = json.load(file)
def compute_threshholds(self,percentiles):
'''Computes the threshholds for given percentages. Eg The intervalls
where 90 percent of the values are contained. Saves the values as a dict
Eg dict[0.9] = 25 can be interpreted that 90 percent of the data lies have less then
25 connected components. Contrary to slice dice, the threshholds go higher, because most seg
had a low nuber of conn comp
Args:
percentiles(list(floats)): The percentiles. For 30 percent use 0.3 and so on. '''
data = self.load_seg_feature('conn_comp')
thresh_dict = OrderedDict()
for percent in percentiles:
threshhold = self.get_threshhold(data,percent)
thresh_dict[percent]=threshhold
self.save_threshholds(thresh_dict)
def get_threshhold(self,data,percent):
'''computes a list of threshholds, where x percent of the data are contained.
Since the number of conncted comp cannot be lower then 1, one threshhold suffices
Args:
data(ndarray): The array containing the feature values
percent(float): How much percent should be contained in the intervall
Returns(float): The threshhold'''
hist, bin_edges = np.histogram(data,bins=np.arange(0,np.max(data),step=1),density=True)
cum_hist = np.cumsum(hist)
for i in range(len(cum_hist)):
if cum_hist[i]>percent:
return math.ceil(bin_edges[i])
def save_threshholds(self,thresh_dict):
with open(self.path_to_threshholds) as save_file:
json.dump(thresh_dict,save_file)
def label_seg(self,seg_feature):
num_threshholds = len(self.thresh_dict.items()) + 1
for i,(_,threshhold) in enumerate(self.thresh_dict.items()):
if seg_feature < threshhold:
return (num_threshholds-i)/num_threshholds
return 0
class hist_based_warning_int_mode(histogramm_based_warning):
def __init__(self) -> None:
super().__init__()
self.path = os.path.join(self.path_to_warnings,'gauss_params')
if not os.path.isdir(self.path):
os.makedirs(self.path)
self.path_to_threshholds = os.path.join(self.path,'threshholds.json')
if os.path.exists(self.path_to_threshholds):
with open(self.path_to_threshholds,'r') as file:
self.thresh_dict = json.load(file)
def compute_threshholds(self,percentiles):
'''Computes the threshholds for given percentages. Eg The intervalls
where 90 percent of the values are contained. Saves the values as a dict
Eg dict[0.9] = [0.1,0.2,0.5,0.6] can be interpreted that 90 percent of the data lies
within [0.1,0.2] and [0.5,0.6].
Args:
percentiles(list(floats)): The percentiles. For 30 percent use 0.3 and so on. '''
data = self.load_seg_feature('gauss_params')
thresh_dict = OrderedDict()
for percent in percentiles:
threshholds = self.get_threshhold(data,percent)
thresh_dict[percent]=threshholds
self.save_threshholds(thresh_dict)
def get_threshhold(self,data,percent):
'''computes a list of threshhold, that correspond to intervalls, where x percent of the data are contained.
Threshholds can be a list of 4 or 2 values.
This is because the modes of the values are somewhere in the intervall [0,1], so we do that in order to be more precise.
When the intervalls overlap we have only 2 values (the absolute upper and lower bound) otherwise we have 4 values per percent
Args:
data(ndarray): The array containing the feature values
percent(float): How much percent should be contained in the intervall
Returns(float): The list of threshholds'''
#first fit a mixture with 2 components to find 2 modes
gm = GaussianMixture(n_components=2).fit(data)
if gm.means_[0][0] < gm.means_[1][0]:
means = [gm.means_[0][0],gm.means_[1][0]]
vars = [gm.covariances_[0][0][0],gm.covariances_[1][0][0]]
weights = [gm.weights_[0],gm.weights_[1]]
# try to balance the steplengths, according to weights and cov
step_0 = vars[0]*weights[0]
step_1 = vars[1]*weights[1]
else:
means = [gm.means_[1][0],gm.means_[0][0]]
vars = [gm.covariances_[1][0][0],gm.covariances_[0][0][0]]
weights = [gm.weights_[1],gm.weights_[0]]
# try to balance the steplengths, according to weights and std
step_0 = (vars[0]**(1/2))*(1/20)*weights[0]
step_1 = (vars[1]**(1/2))*(1/20)*weights[1]
#find the threshholds
hist_0, bins_0 = np.histogram(data,np.arange(0,1,step_0))
hist_1, bins_1 = np.histogram(data,np.arange(0,1,step_1))
number_points = np.sum(hist_0)
hist_0 = np.array(hist_0)/number_points
hist_1 = np.array(hist_1)/number_points
hist = [hist_0,hist_1]
bins = [bins_0,bins_1]
mode_0_bin = np.argmax(bins[0]>means[0])
mode_1_bin = np.argmax(bins[1]>means[1])
mode_bins = [mode_0_bin,mode_1_bin]
# if the intervalls are overlapping, inner intervalls are not increased in this case
overlapping = False
complete_0 = False
complete_1 = False
i = 0
mass = 0
while mass<percent:
# check whether intervalls are overlapping
if bins[1][mode_bins[1]-i] < bins[0][mode_bins[0]+i+1] and not overlapping:
#add the bigger bin to the mass
overlapping = True
if weights[0]>weights[1]:
mass = mass + hist[0][mode_bins[0]+i]
else:
mass = mass + hist[1][mode_bins[1]-i]
if mode_bins[0]-i < 0 or mode_bins[0]+i > len(hist[0]):
complete_0 = True
if mode_bins[1]-i < 0 or mode_bins[1]+i > len(hist[1]):
complete_1 = True
#if both ditributions have reached their end break the loop
if complete_1 or complete_0 :
break
#add masses
if i == 0:
mass = hist[0][mode_bins[0]]+hist[1][mode_bins[1]]
if overlapping:
mass = mass + hist[0][mode_bins[0]-i]+hist[1][mode_bins[1]+1]
else:
mass0 = hist[0][mode_bins[0]-i]+hist[0][mode_bins[0]+i]
mass1 = hist[1][mode_bins[1]+i]+hist[1][mode_bins[1]-i]
mass = mass + mass0 + mass1
i = i + 1
if overlapping:
return [bins[0][mode_bins[0]-i+1],bins[1][mode_bins[1]+i]]
else:
return [bins[0][mode_bins[0]-i+1],bins[0][mode_bins[0]+i],bins[1][mode_bins[1]-i+1],bins[1][mode_bins[1]+i]]
def save_threshholds(self,thresh_dict):
with open(self.path_to_threshholds) as save_file:
json.dump(thresh_dict,save_file)
def label_seg(self,seg_feature):
num_threshholds = len(self.thresh_dict.items()) +1
for i,threshholds in enumerate(self.thresh_dict.items()):
if self.feature_in_threshholds(seg_feature,threshholds):
return (num_threshholds-i)/num_threshholds
return 0
def feature_in_threshholds(self,feature,threshholds):
if len(threshholds) == 2:
return ((feature >= threshholds[0]) and (feature <= threshholds[1]))
if len(threshholds == 4):
in_first = ((feature >= threshholds[0]) and (feature <= threshholds[1]))
in_second = ((feature >= threshholds[2]) and (feature <= threshholds[3]))
return in_first or in_second
|
{"hexsha": "8333297614b3ff11612ce0bfaf34227dde22b166", "size": 13016, "ext": "py", "lang": "Python", "max_stars_repo_path": "mp/models/statistical/histogramm_based_warnings.py", "max_stars_repo_name": "MECLabTUDA/QA_Seg", "max_stars_repo_head_hexsha": "72a961e081ac814243ae65b46e0276079af5680f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mp/models/statistical/histogramm_based_warnings.py", "max_issues_repo_name": "MECLabTUDA/QA_Seg", "max_issues_repo_head_hexsha": "72a961e081ac814243ae65b46e0276079af5680f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mp/models/statistical/histogramm_based_warnings.py", "max_forks_repo_name": "MECLabTUDA/QA_Seg", "max_forks_repo_head_hexsha": "72a961e081ac814243ae65b46e0276079af5680f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.1594202899, "max_line_length": 138, "alphanum_fraction": 0.6100184388, "include": true, "reason": "import numpy", "num_tokens": 3064}
|
/*
* Copyright (C) 2005 National Association of REALTORS(R)
*
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, and/or sell copies of the
* Software, and to permit persons to whom the Software is furnished
* to do so, provided that the above copyright notice(s) and this
* permission notice appear in all copies of the Software and that
* both the above copyright notice(s) and this permission notice
* appear in supporting documentation.
*/
#include <boost/regex.hpp>
#include <iostream>
#include <string>
#include "DBHelper.h"
using std::cout;
using std::endl;
using std::string;
using namespace odbcrets::test;
int main(int argc, char *argv[])
{
DBHelper db;
if (argc != 3) {
std::string program_name = boost::regex_replace(std::string(argv[0]), boost::regex(".*/"), "");
std::cerr << "Usage: " << program_name << " dsn query" << std::endl;
return 1;
}
std::string dsn(argv[1]);
std::string query(argv[2]);
try
{
db.connect(dsn);
cout << db.executeQuery(query) << endl;
int num = db.numResultCols();
cout << "Search Result has " << num << " columns" << endl;
num = db.rowCount();
cout << "Search Result has " << num << " rows" << endl;
cout << db.describeColumn(1) << endl;
ResultColumnPtr col1(new CharResultColumn(1024));
cout << db.describeColumn(2) << endl;
ResultColumnPtr col2(new CharResultColumn(1024));
db.setStmtAttr(SQL_ROWSET_SIZE, (SQLPOINTER) 2, -6);
cout << "pre fetch" << endl;
while (db.fetch())
{
db.getData(1, col1);
db.getData(2, col2);
cout << col1 << " ";
cout << col2 << endl;
}
db.disconnect();
}
catch (std::exception& e)
{
cout << e.what() << endl;
}
}
|
{"hexsha": "a8c6c4defac4fd9faae88eab1b2fcdd1d95b3bc0", "size": 2134, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "project/examples/cpp/src/ExecuteQuery.cpp", "max_stars_repo_name": "mkhon/ezRETS", "max_stars_repo_head_hexsha": "7040e80061da719b5a2d56a80431198962f57893", "max_stars_repo_licenses": ["ICU"], "max_stars_count": 20.0, "max_stars_repo_stars_event_min_datetime": "2015-07-11T15:54:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-05T04:55:24.000Z", "max_issues_repo_path": "project/examples/cpp/src/ExecuteQuery.cpp", "max_issues_repo_name": "mkhon/ezRETS", "max_issues_repo_head_hexsha": "7040e80061da719b5a2d56a80431198962f57893", "max_issues_repo_licenses": ["ICU"], "max_issues_count": 5.0, "max_issues_repo_issues_event_min_datetime": "2015-01-12T22:38:56.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-16T01:08:18.000Z", "max_forks_repo_path": "project/examples/cpp/src/ExecuteQuery.cpp", "max_forks_repo_name": "NationalAssociationOfRealtors/ezRETS", "max_forks_repo_head_hexsha": "7040e80061da719b5a2d56a80431198962f57893", "max_forks_repo_licenses": ["ICU"], "max_forks_count": 13.0, "max_forks_repo_forks_event_min_datetime": "2015-04-05T03:28:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-13T16:52:52.000Z", "avg_line_length": 28.8378378378, "max_line_length": 103, "alphanum_fraction": 0.6044985942, "num_tokens": 525}
|
// Boost.Bimap
//
// Copyright (c) 2006-2007 Matias Capeletto
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// VC++ 8.0 warns on usage of certain Standard Library and API functions that
// can be cause buffer overruns or other possible security issues if misused.
// See http://msdn.microsoft.com/msdnmag/issues/05/05/SafeCandC/default.aspx
// But the wording of the warning is misleading and unsettling, there are no
// portable alternative functions, and VC++ 8.0's own libraries use the
// functions in question. So turn off the warnings.
#define _CRT_SECURE_NO_DEPRECATE
#define _SCL_SECURE_NO_DEPRECATE
#include <boost/config.hpp>
#define BOOST_BIMAP_DISABLE_SERIALIZATION
// Boost.Test
#include <boost/test/minimal.hpp>
// std
#include <set>
#include <map>
#include <string>
#include <functional>
// Set type specifications
#include <boost/bimap/set_of.hpp>
#include <boost/bimap/multiset_of.hpp>
// bimap container
#include <boost/bimap/bimap.hpp>
#include <libs/bimap/test/test_bimap.hpp>
struct left_tag {};
struct right_tag {};
void test_bimap()
{
using namespace boost::bimaps;
typedef std::map<int,double> left_data_type;
left_data_type left_data;
left_data.insert( left_data_type::value_type(1,0.1) );
left_data.insert( left_data_type::value_type(2,0.2) );
left_data.insert( left_data_type::value_type(3,0.3) );
left_data.insert( left_data_type::value_type(4,0.4) );
typedef std::map<double,int> right_data_type;
right_data_type right_data;
right_data.insert( right_data_type::value_type(0.1,1) );
right_data.insert( right_data_type::value_type(0.2,2) );
right_data.insert( right_data_type::value_type(0.3,3) );
right_data.insert( right_data_type::value_type(0.4,4) );
//--------------------------------------------------------------------
{
typedef bimap< int, double > bm_type;
std::set< bm_type::value_type > data;
data.insert( bm_type::value_type(1,0.1) );
data.insert( bm_type::value_type(2,0.2) );
data.insert( bm_type::value_type(3,0.3) );
data.insert( bm_type::value_type(4,0.4) );
bm_type bm;
test_set_set_bimap(bm,data,left_data,right_data);
}
//--------------------------------------------------------------------
//--------------------------------------------------------------------
{
typedef bimap
<
multiset_of< tagged<int, left_tag > >,
multiset_of< tagged<double, right_tag > >,
multiset_of_relation< std::less< _relation > >
> bm_type;
std::set< bm_type::value_type > data;
data.insert( bm_type::value_type(1,0.1) );
data.insert( bm_type::value_type(2,0.2) );
data.insert( bm_type::value_type(3,0.3) );
data.insert( bm_type::value_type(4,0.4) );
bm_type bm;
test_multiset_multiset_bimap(bm,data,left_data,right_data);
test_tagged_bimap<left_tag,right_tag>(bm,data);
}
//--------------------------------------------------------------------
//--------------------------------------------------------------------
{
typedef bimap<int,double,right_based> bm_type;
std::set< bm_type::value_type > data;
data.insert( bm_type::value_type(1,0.1) );
data.insert( bm_type::value_type(2,0.2) );
data.insert( bm_type::value_type(3,0.3) );
data.insert( bm_type::value_type(4,0.4) );
bm_type bm;
test_set_set_bimap(bm,data,left_data,right_data);
}
//--------------------------------------------------------------------
//--------------------------------------------------------------------
{
typedef bimap
<
multiset_of< int, std::greater<int> >, set_of<std::string> ,
multiset_of_relation< std::greater< _relation > >
> bimap_type;
bimap_type b1;
b1.insert( bimap_type::value_type(1,"one") );
bimap_type b2( b1 );
BOOST_CHECK( b1 == b2 );
BOOST_CHECK( ! ( b1 != b2 ) );
BOOST_CHECK( b1 <= b2 );
BOOST_CHECK( b1 >= b2 );
BOOST_CHECK( ! ( b1 < b2 ) );
BOOST_CHECK( ! ( b1 > b2 ) );
b1.insert( bimap_type::value_type(2,"two") );
b2 = b1;
BOOST_CHECK( b2 == b1 );
b1.insert( bimap_type::value_type(3,"three") );
b2.left = b1.left;
BOOST_CHECK( b2 == b1 );
b1.insert( bimap_type::value_type(4,"four") );
b2.right = b1.right;
BOOST_CHECK( b2 == b1 );
b1.clear();
b2.swap(b1);
BOOST_CHECK( b2.empty() && !b1.empty() );
b1.left.swap( b2.left );
BOOST_CHECK( b1.empty() && !b2.empty() );
b1.right.swap( b2.right );
BOOST_CHECK( b2.empty() && !b1.empty() );
}
//--------------------------------------------------------------------
}
int test_main( int, char* [] )
{
test_bimap();
return 0;
}
|
{"hexsha": "e747cb7c44863a95b94bba7d22891e19a967153e", "size": 5109, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "deps/src/boost_1_65_1/libs/bimap/test/test_bimap_ordered.cpp", "max_stars_repo_name": "shreyasvj25/turicreate", "max_stars_repo_head_hexsha": "32e84ca16aef8d04aff3d49ae9984bd49326bffd", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 11356.0, "max_stars_repo_stars_event_min_datetime": "2017-12-08T19:42:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T16:55:25.000Z", "max_issues_repo_path": "deps/src/boost_1_65_1/libs/bimap/test/test_bimap_ordered.cpp", "max_issues_repo_name": "shreyasvj25/turicreate", "max_issues_repo_head_hexsha": "32e84ca16aef8d04aff3d49ae9984bd49326bffd", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2402.0, "max_issues_repo_issues_event_min_datetime": "2017-12-08T22:31:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T19:25:52.000Z", "max_forks_repo_path": "deps/src/boost_1_65_1/libs/bimap/test/test_bimap_ordered.cpp", "max_forks_repo_name": "shreyasvj25/turicreate", "max_forks_repo_head_hexsha": "32e84ca16aef8d04aff3d49ae9984bd49326bffd", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1343.0, "max_forks_repo_forks_event_min_datetime": "2017-12-08T19:47:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T11:31:36.000Z", "avg_line_length": 29.0284090909, "max_line_length": 78, "alphanum_fraction": 0.5480524564, "num_tokens": 1307}
|
import numpy as np
import pytest
import quanguru.QuantumToolbox.evolution as evo#pylint: disable=import-error
sigmaOpers = ["sigmaMinusReference", "sigmaPlusReference", "sigmaZReference"]
preExpects = [np.array([[0, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 0]]),
np.array([[0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 0]]),
np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 1, 0], [0, 0, 0, -1]])]
@pytest.mark.parametrize("op, expect", [[o, e] for (o, e) in zip(sigmaOpers, preExpects)])
def test_preSO(op, expect, referenceValues):
# test the preSO for sigma -, +, and Z operators by comparing expected results
assert np.allclose(evo._preSO(referenceValues[op]), expect) #pylint:disable=protected-access
posExpects = [np.array([[0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]]),
np.array([[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0]]),
np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]])]
@pytest.mark.parametrize("op, expect", [[o, e] for (o, e) in zip(sigmaOpers, posExpects)])
def test_postSO(op, expect, referenceValues):
# test the posSO for sigma -, +, and Z operators by comparing expected results
assert np.allclose(evo._postSO(referenceValues[op]), expect) #pylint:disable=protected-access
preposExpects = [np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0]]),
np.array([[0, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]),
np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])]
@pytest.mark.parametrize("op, expect", [[o, e] for (o, e) in zip(sigmaOpers, preposExpects)])
def test_prepostSO(op, expect, referenceValues):
# test the preposSO for sigma -, +, and Z operators by comparing expected results
assert np.allclose(evo._prepostSO(referenceValues[op], referenceValues[op].T), expect) #pylint:disable=protected-access
dissipatorExpects = [np.array([[-1, 0, 0, 0], [0, -0.5, 0, 0], [0, 0, -0.5, 0], [1, 0, 0, 0]]),
np.array([[0, 0, 0, 1], [0, -0.5, 0, 0], [0, 0, -0.5, 0], [0, 0, 0, -1]]),
np.array([[0, 0, 0, 0], [0, -2, 0, 0], [0, 0, -2, 0], [0, 0, 0, 0]])]
@pytest.mark.parametrize("op, expect", [[o, e] for (o, e) in zip(sigmaOpers, dissipatorExpects)])
def test_dissipator(op, expect, referenceValues):
# test the dissipator for sigma -, +, and Z operators by comparing expected results
assert np.allclose(evo.dissipator(referenceValues[op]), expect)
|
{"hexsha": "96be66a1e5b53d451c4ff7217447376e80d0bce1", "size": 2520, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_evolution.py", "max_stars_repo_name": "AngsarM/QuanGuru", "max_stars_repo_head_hexsha": "5db6105f843bbc78c2d5b1547e32d494fbe10b8d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-05-23T06:30:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-27T13:33:54.000Z", "max_issues_repo_path": "tests/test_evolution.py", "max_issues_repo_name": "cahitkargi/QuanGuru", "max_issues_repo_head_hexsha": "9b5c94465cd58bc32f6ff845f29dfdec7e0f9075", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 26, "max_issues_repo_issues_event_min_datetime": "2022-03-18T02:40:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T07:00:25.000Z", "max_forks_repo_path": "tests/test_evolution.py", "max_forks_repo_name": "cahitkargi/QuanGuru", "max_forks_repo_head_hexsha": "9b5c94465cd58bc32f6ff845f29dfdec7e0f9075", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-05-23T06:30:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-04T02:40:08.000Z", "avg_line_length": 66.3157894737, "max_line_length": 123, "alphanum_fraction": 0.5714285714, "include": true, "reason": "import numpy", "num_tokens": 1041}
|
using JuLIP
using Base.Test
using JuLIP.Testing
verbose=true
# check whether on CI
isCI = haskey(ENV, "CI")
notCI = !isCI
eam_W4 = nothing
# check whether ASE is available
hasase = true
try
import ASE
catch
hasase = false
end
julip_tests = [
("testaux.jl", "Miscellaneous"),
("test_atoms.jl", "Atoms"),
("test_build.jl", "Build"),
("testanalyticpotential.jl", "Analytic Potential"),
("testpotentials.jl", "Potentials"),
# ("test_ad.jl", "AD Potentials"),
("testvarcell.jl", "Variable Cell"),
("testhessian.jl", "Hessian"),
("testsolve.jl", "Solve"),
]
# remove testsolve if on Travis
if isCI
julip_tests = julip_tests[1:end-1]
end
# "testexpvarcell.jl"; # USE THIS TO WORK ON EXPCELL IMPLEMENTATION
# ===== some prototype potentials ======
print("Loading some interatomic potentials . .")
data = joinpath(dirname(@__FILE__), "..", "data") * "/"
eam_Fe = JuLIP.Potentials.EAM(data * "pfe.plt", data * "ffe.plt", data * "F_fe.plt")
print(" .")
eam_W = JuLIP.Potentials.FinnisSinclair(data*"W-pair-Wang-2014.plt", data*"W-e-dens-Wang-2014.plt")
print(" .")
try
eam_W4 = JuLIP.Potentials.EAM(data * "w_eam4.fs")
catch
eam_W4 = nothing
end
println(" done.")
println("≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡")
println(" Starting JuLIP Tests")
println("≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡")
@testset "JuLIP" begin
for (testfile, testid) in julip_tests
println("=======================")
println("Testset $(testid)")
println("=======================")
@testset "$(testid)" begin include(testfile); end
end
end
|
{"hexsha": "4ae70cb90664d32a70be51ee296a84110eaafe21", "size": 1560, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "hlwang0717/JuLIP.jl", "max_stars_repo_head_hexsha": "c8d325191b99be5c545a0fdb2b8fe11581c125fe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "hlwang0717/JuLIP.jl", "max_issues_repo_head_hexsha": "c8d325191b99be5c545a0fdb2b8fe11581c125fe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "hlwang0717/JuLIP.jl", "max_forks_repo_head_hexsha": "c8d325191b99be5c545a0fdb2b8fe11581c125fe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.6363636364, "max_line_length": 99, "alphanum_fraction": 0.6173076923, "num_tokens": 572}
|
""" Unit tests for testing support
"""
import logging
import unittest
import numpy
from astropy import units as u
from astropy.coordinates import SkyCoord
from data_models.memory_data_models import BlockVisibility
from wrappers.arlexecute.execution_support.arlexecutebase import ARLExecuteBase
from wrappers.arlexecute.execution_support.dask_init import get_dask_Client
from workflows.arlexecute.simulation.simulation_arlexecute import simulate_list_arlexecute_workflow
log = logging.getLogger(__name__)
class TestSimulationArlexecuteSupport(unittest.TestCase):
def setUp(self):
client = get_dask_Client(memory_limit=4 * 1024 * 1024 * 1024, n_workers=4, dashboard_address=None)
global arlexecute
arlexecute = ARLExecuteBase(use_dask=True)
arlexecute.set_client(client)
from data_models.parameters import arl_path
self.dir = arl_path('test_results')
self.frequency = numpy.linspace(1e8, 1.5e8, 3)
self.channel_bandwidth = numpy.array([2.5e7, 2.5e7, 2.5e7])
self.phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000')
self.times = numpy.linspace(-300.0, 300.0, 3) * numpy.pi / 43200.0
def tearDown(self):
global arlexecute
arlexecute.close()
del arlexecute
def test_create_simulate_vis_list(self):
vis_list = simulate_list_arlexecute_workflow(frequency=self.frequency, channel_bandwidth=self.channel_bandwidth)
assert len(vis_list) == len(self.frequency)
vt = arlexecute.compute(vis_list[0], sync=True)
assert isinstance(vt, BlockVisibility)
assert vt.nvis > 0
|
{"hexsha": "6d8a7386f259e536a462fc77558535f2a28b1c0f", "size": 1672, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/workflows/test_simulation_arlexecute.py", "max_stars_repo_name": "ska-telescope/algorithm-reference-library", "max_stars_repo_head_hexsha": "1b2c8d6079249202864abf8c60cdea40f0f123cb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2016-12-14T11:20:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-13T15:23:41.000Z", "max_issues_repo_path": "tests/workflows/test_simulation_arlexecute.py", "max_issues_repo_name": "ska-telescope/algorithm-reference-library", "max_issues_repo_head_hexsha": "1b2c8d6079249202864abf8c60cdea40f0f123cb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 30, "max_issues_repo_issues_event_min_datetime": "2017-06-27T09:15:38.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-11T18:16:37.000Z", "max_forks_repo_path": "tests/workflows/test_simulation_arlexecute.py", "max_forks_repo_name": "SKA-ScienceDataProcessor/algorithm-reference-library", "max_forks_repo_head_hexsha": "1b2c8d6079249202864abf8c60cdea40f0f123cb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2017-07-02T03:45:49.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-11T17:19:01.000Z", "avg_line_length": 35.5744680851, "max_line_length": 120, "alphanum_fraction": 0.7302631579, "include": true, "reason": "import numpy,from astropy", "num_tokens": 418}
|
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
⊢ Injective toFun
[PROOFSTEP]
rintro ⟨s, f, hf⟩ ⟨t, g, hg⟩ (rfl : f = g)
[GOAL]
case mk.mk
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
s : Finset α
f : α → M
hf : ∀ (a : α), a ∈ s ↔ f a ≠ 0
t : Finset α
hg : ∀ (a : α), a ∈ t ↔ f a ≠ 0
⊢ { support := s, toFun := f, mem_support_toFun := hf } = { support := t, toFun := f, mem_support_toFun := hg }
[PROOFSTEP]
congr
[GOAL]
case mk.mk.e_support
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
s : Finset α
f : α → M
hf : ∀ (a : α), a ∈ s ↔ f a ≠ 0
t : Finset α
hg : ∀ (a : α), a ∈ t ↔ f a ≠ 0
⊢ s = t
[PROOFSTEP]
ext a
[GOAL]
case mk.mk.e_support.a
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
s : Finset α
f : α → M
hf : ∀ (a : α), a ∈ s ↔ f a ≠ 0
t : Finset α
hg : ∀ (a : α), a ∈ t ↔ f a ≠ 0
a : α
⊢ a ∈ s ↔ a ∈ t
[PROOFSTEP]
exact (hf _).trans (hg _).symm
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f : α →₀ M
⊢ ↑f = 0 ↔ f = 0
[PROOFSTEP]
rw [← coe_zero, FunLike.coe_fn_eq]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f g : α →₀ M
x✝ : f.support = g.support ∧ ∀ (x : α), x ∈ f.support → ↑f x = ↑g x
h₁ : f.support = g.support
h₂ : ∀ (x : α), x ∈ f.support → ↑f x = ↑g x
a : α
⊢ ↑f a = ↑g a
[PROOFSTEP]
classical exact
if h : a ∈ f.support then h₂ a h
else by
have hf : f a = 0 := not_mem_support_iff.1 h
have hg : g a = 0 := by rwa [h₁, not_mem_support_iff] at h
rw [hf, hg]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f g : α →₀ M
x✝ : f.support = g.support ∧ ∀ (x : α), x ∈ f.support → ↑f x = ↑g x
h₁ : f.support = g.support
h₂ : ∀ (x : α), x ∈ f.support → ↑f x = ↑g x
a : α
⊢ ↑f a = ↑g a
[PROOFSTEP]
exact
if h : a ∈ f.support then h₂ a h
else by
have hf : f a = 0 := not_mem_support_iff.1 h
have hg : g a = 0 := by rwa [h₁, not_mem_support_iff] at h
rw [hf, hg]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f g : α →₀ M
x✝ : f.support = g.support ∧ ∀ (x : α), x ∈ f.support → ↑f x = ↑g x
h₁ : f.support = g.support
h₂ : ∀ (x : α), x ∈ f.support → ↑f x = ↑g x
a : α
h : ¬a ∈ f.support
⊢ ↑f a = ↑g a
[PROOFSTEP]
have hf : f a = 0 := not_mem_support_iff.1 h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f g : α →₀ M
x✝ : f.support = g.support ∧ ∀ (x : α), x ∈ f.support → ↑f x = ↑g x
h₁ : f.support = g.support
h₂ : ∀ (x : α), x ∈ f.support → ↑f x = ↑g x
a : α
h : ¬a ∈ f.support
hf : ↑f a = 0
⊢ ↑f a = ↑g a
[PROOFSTEP]
have hg : g a = 0 := by rwa [h₁, not_mem_support_iff] at h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f g : α →₀ M
x✝ : f.support = g.support ∧ ∀ (x : α), x ∈ f.support → ↑f x = ↑g x
h₁ : f.support = g.support
h₂ : ∀ (x : α), x ∈ f.support → ↑f x = ↑g x
a : α
h : ¬a ∈ f.support
hf : ↑f a = 0
⊢ ↑g a = 0
[PROOFSTEP]
rwa [h₁, not_mem_support_iff] at h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f g : α →₀ M
x✝ : f.support = g.support ∧ ∀ (x : α), x ∈ f.support → ↑f x = ↑g x
h₁ : f.support = g.support
h₂ : ∀ (x : α), x ∈ f.support → ↑f x = ↑g x
a : α
h : ¬a ∈ f.support
hf : ↑f a = 0
hg : ↑g a = 0
⊢ ↑f a = ↑g a
[PROOFSTEP]
rw [hf, hg]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f : α →₀ M
⊢ f.support = ∅ ↔ f = 0
[PROOFSTEP]
exact_mod_cast @Function.support_eq_empty_iff _ _ _ f
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f : α →₀ M
⊢ Finset.Nonempty f.support ↔ f ≠ 0
[PROOFSTEP]
simp only [Finsupp.support_eq_empty, Finset.nonempty_iff_ne_empty, Ne.def]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f : α →₀ M
⊢ f ≠ 0 ↔ ∃ a, ↑f a ≠ 0
[PROOFSTEP]
simp [← Finsupp.support_eq_empty, Finset.eq_empty_iff_forall_not_mem]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f : α →₀ M
⊢ card f.support = 0 ↔ f = 0
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
s : Set α
f : α →₀ M
⊢ ↑f.support ⊆ s ↔ ∀ (a : α), ¬a ∈ s → ↑f a = 0
[PROOFSTEP]
simp only [Set.subset_def, mem_coe, mem_support_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
s : Set α
f : α →₀ M
⊢ (∀ (x : α), ↑f x ≠ 0 → x ∈ s) ↔ ∀ (a : α), ¬a ∈ s → ↑f a = 0
[PROOFSTEP]
exact forall_congr' fun a => not_imp_comm
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Unique α
f g : α →₀ M
h : ↑f default = ↑g default
a : α
⊢ ↑f a = ↑g a
[PROOFSTEP]
rwa [Unique.eq_default a]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝ a'✝ : α
b✝ : M
a : α
b : M
a' : α
⊢ (a' ∈ if b = 0 then ∅ else {a}) ↔ Pi.single a b a' ≠ 0
[PROOFSTEP]
classical
obtain rfl | hb := eq_or_ne b 0
· simp [Pi.single, update]
rw [if_neg hb, mem_singleton]
obtain rfl | ha := eq_or_ne a' a
· simp [hb, Pi.single, update]
simp [Pi.single_eq_of_ne' ha.symm, ha]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝ a'✝ : α
b✝ : M
a : α
b : M
a' : α
⊢ (a' ∈ if b = 0 then ∅ else {a}) ↔ Pi.single a b a' ≠ 0
[PROOFSTEP]
obtain rfl | hb := eq_or_ne b 0
[GOAL]
case inl
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝ a'✝ : α
b : M
a a' : α
⊢ (a' ∈ if 0 = 0 then ∅ else {a}) ↔ Pi.single a 0 a' ≠ 0
[PROOFSTEP]
simp [Pi.single, update]
[GOAL]
case inr
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝ a'✝ : α
b✝ : M
a : α
b : M
a' : α
hb : b ≠ 0
⊢ (a' ∈ if b = 0 then ∅ else {a}) ↔ Pi.single a b a' ≠ 0
[PROOFSTEP]
rw [if_neg hb, mem_singleton]
[GOAL]
case inr
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝ a'✝ : α
b✝ : M
a : α
b : M
a' : α
hb : b ≠ 0
⊢ a' = a ↔ Pi.single a b a' ≠ 0
[PROOFSTEP]
obtain rfl | ha := eq_or_ne a' a
[GOAL]
case inr.inl
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a'✝ : α
b✝ b : M
a' : α
hb : b ≠ 0
⊢ a' = a' ↔ Pi.single a' b a' ≠ 0
[PROOFSTEP]
simp [hb, Pi.single, update]
[GOAL]
case inr.inr
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝ a'✝ : α
b✝ : M
a : α
b : M
a' : α
hb : b ≠ 0
ha : a' ≠ a
⊢ a' = a ↔ Pi.single a b a' ≠ 0
[PROOFSTEP]
simp [Pi.single_eq_of_ne' ha.symm, ha]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
a a' : α
b : M
inst✝ : Decidable (a = a')
⊢ ↑(single a b) a' = if a = a' then b else 0
[PROOFSTEP]
classical
simp_rw [@eq_comm _ a a']
convert Pi.single_apply a b a'
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
a a' : α
b : M
inst✝ : Decidable (a = a')
⊢ ↑(single a b) a' = if a = a' then b else 0
[PROOFSTEP]
simp_rw [@eq_comm _ a a']
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
a a' : α
b : M
inst✝ : Decidable (a = a')
⊢ ↑(single a b) a' = if a' = a then b else 0
[PROOFSTEP]
convert Pi.single_apply a b a'
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
f : α → β
hf : Injective f
x z : α
y : M
⊢ ↑(single (f x) y) (f z) = ↑(single x y) z
[PROOFSTEP]
classical simp only [single_apply, hf.eq_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
f : α → β
hf : Injective f
x z : α
y : M
⊢ ↑(single (f x) y) (f z) = ↑(single x y) z
[PROOFSTEP]
simp only [single_apply, hf.eq_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
⊢ ↑(single a b) = Set.indicator {a} fun x => b
[PROOFSTEP]
classical
ext
simp [single_apply, Set.indicator, @eq_comm _ a]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
⊢ ↑(single a b) = Set.indicator {a} fun x => b
[PROOFSTEP]
ext
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
x✝ : α
⊢ ↑(single a b) x✝ = Set.indicator {a} (fun x => b) x✝
[PROOFSTEP]
simp [single_apply, Set.indicator, @eq_comm _ a]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
⊢ ↑(single a b) a = b
[PROOFSTEP]
classical exact Pi.single_eq_same (f := λ _ => M) a b
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
⊢ ↑(single a b) a = b
[PROOFSTEP]
exact Pi.single_eq_same (f := λ _ => M) a b
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
h : a ≠ a'
⊢ ↑(single a b) a' = 0
[PROOFSTEP]
classical exact Pi.single_eq_of_ne' h _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
h : a ≠ a'
⊢ ↑(single a b) a' = 0
[PROOFSTEP]
exact Pi.single_eq_of_ne' h _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
a✝ a' : α
b✝ : M
inst✝ : DecidableEq α
a : α
b : M
⊢ ↑(single a b) = update 0 a b
[PROOFSTEP]
classical rw [single_eq_set_indicator, ← Set.piecewise_eq_indicator, Set.piecewise_singleton]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
a✝ a' : α
b✝ : M
inst✝ : DecidableEq α
a : α
b : M
⊢ ↑(single a b) = update 0 a b
[PROOFSTEP]
rw [single_eq_set_indicator, ← Set.piecewise_eq_indicator, Set.piecewise_singleton]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝ a' : α
b : M
a : α
⊢ (fun f => ↑f) (single a 0) = (fun f => ↑f) 0
[PROOFSTEP]
classical simpa only [single_eq_update, coe_zero] using Function.update_eq_self a (0 : α → M)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝ a' : α
b : M
a : α
⊢ (fun f => ↑f) (single a 0) = (fun f => ↑f) 0
[PROOFSTEP]
simpa only [single_eq_update, coe_zero] using Function.update_eq_self a (0 : α → M)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝ a'✝ : α
b✝ : M
a a' : α
b : M
⊢ single a (↑(single a' b) a) = ↑(single a' (single a' b)) a
[PROOFSTEP]
classical
rw [single_apply, single_apply]
ext
split_ifs with h
· rw [h]
· rw [zero_apply, single_apply, ite_self]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝ a'✝ : α
b✝ : M
a a' : α
b : M
⊢ single a (↑(single a' b) a) = ↑(single a' (single a' b)) a
[PROOFSTEP]
rw [single_apply, single_apply]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝ a'✝ : α
b✝ : M
a a' : α
b : M
⊢ single a (if a' = a then b else 0) = if a' = a then single a' b else 0
[PROOFSTEP]
ext
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝¹ a'✝ : α
b✝ : M
a a' : α
b : M
a✝ : α
⊢ ↑(single a (if a' = a then b else 0)) a✝ = ↑(if a' = a then single a' b else 0) a✝
[PROOFSTEP]
split_ifs with h
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝¹ a'✝ : α
b✝ : M
a a' : α
b : M
a✝ : α
h : a' = a
⊢ ↑(single a b) a✝ = ↑(single a' b) a✝
[PROOFSTEP]
rw [h]
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝¹ a'✝ : α
b✝ : M
a a' : α
b : M
a✝ : α
h : ¬a' = a
⊢ ↑(single a 0) a✝ = ↑0 a✝
[PROOFSTEP]
rw [zero_apply, single_apply, ite_self]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
⊢ (single a b).support ⊆ {a}
[PROOFSTEP]
classical show ite _ _ _ ⊆ _; split_ifs <;> [exact empty_subset _; exact Subset.refl _]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
⊢ (single a b).support ⊆ {a}
[PROOFSTEP]
show ite _ _ _ ⊆ _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
⊢ (if b = 0 then ∅ else {a}) ⊆ {a}
[PROOFSTEP]
split_ifs <;> [exact empty_subset _; exact Subset.refl _]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
⊢ (if b = 0 then ∅ else {a}) ⊆ {a}
[PROOFSTEP]
split_ifs
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
h✝ : b = 0
⊢ ∅ ⊆ {a}
[PROOFSTEP]
exact empty_subset _
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
h✝ : ¬b = 0
⊢ {a} ⊆ {a}
[PROOFSTEP]
exact Subset.refl _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
x : α
⊢ ↑(single a b) x ∈ {0, b}
[PROOFSTEP]
rcases em (a = x) with (rfl | hx) <;> [simp; simp [single_eq_of_ne hx]]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
x : α
⊢ ↑(single a b) x ∈ {0, b}
[PROOFSTEP]
rcases em (a = x) with (rfl | hx)
[GOAL]
case inl
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
⊢ ↑(single a b) a ∈ {0, b}
[PROOFSTEP]
simp
[GOAL]
case inr
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
x : α
hx : ¬a = x
⊢ ↑(single a b) x ∈ {0, b}
[PROOFSTEP]
simp [single_eq_of_ne hx]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝ a' : α
b : M
a : α
b₁ b₂ : M
eq : single a b₁ = single a b₂
⊢ b₁ = b₂
[PROOFSTEP]
have : (single a b₁ : α →₀ M) a = (single a b₂ : α →₀ M) a := by rw [eq]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝ a' : α
b : M
a : α
b₁ b₂ : M
eq : single a b₁ = single a b₂
⊢ ↑(single a b₁) a = ↑(single a b₂) a
[PROOFSTEP]
rw [eq]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝ a' : α
b : M
a : α
b₁ b₂ : M
eq : single a b₁ = single a b₂
this : ↑(single a b₁) a = ↑(single a b₂) a
⊢ b₁ = b₂
[PROOFSTEP]
rwa [single_eq_same, single_eq_same] at this
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝ a' : α
b✝ : M
a x : α
b : M
⊢ ↑(single a b) x = 0 ↔ x = a → b = 0
[PROOFSTEP]
simp [single_eq_set_indicator]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝ a' : α
b✝ : M
a x : α
b : M
⊢ ↑(single a b) x ≠ 0 ↔ x = a ∧ b ≠ 0
[PROOFSTEP]
simp [single_apply_eq_zero]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝ a'✝ : α
b✝ : M
a a' : α
b : M
⊢ a ∈ (single a' b).support ↔ a = a' ∧ b ≠ 0
[PROOFSTEP]
simp [single_apply_eq_zero, not_or]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝ a' : α
b✝ : M
f : α →₀ M
a : α
b : M
⊢ f = single a b ↔ f.support ⊆ {a} ∧ ↑f a = b
[PROOFSTEP]
refine' ⟨fun h => h.symm ▸ ⟨support_single_subset, single_eq_same⟩, _⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝ a' : α
b✝ : M
f : α →₀ M
a : α
b : M
⊢ f.support ⊆ {a} ∧ ↑f a = b → f = single a b
[PROOFSTEP]
rintro ⟨h, rfl⟩
[GOAL]
case intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝ a' : α
b : M
f : α →₀ M
a : α
h : f.support ⊆ {a}
⊢ f = single a (↑f a)
[PROOFSTEP]
ext x
[GOAL]
case intro.h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝ a' : α
b : M
f : α →₀ M
a : α
h : f.support ⊆ {a}
x : α
⊢ ↑f x = ↑(single a (↑f a)) x
[PROOFSTEP]
by_cases hx : a = x
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝ a' : α
b : M
f : α →₀ M
a : α
h : f.support ⊆ {a}
x : α
hx : a = x
⊢ ↑f x = ↑(single a (↑f a)) x
[PROOFSTEP]
simp only [hx, single_eq_same, single_eq_of_ne, Ne.def, not_false_iff]
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝ a' : α
b : M
f : α →₀ M
a : α
h : f.support ⊆ {a}
x : α
hx : ¬a = x
⊢ ↑f x = ↑(single a (↑f a)) x
[PROOFSTEP]
simp only [hx, single_eq_same, single_eq_of_ne, Ne.def, not_false_iff]
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝ a' : α
b : M
f : α →₀ M
a : α
h : f.support ⊆ {a}
x : α
hx : ¬a = x
⊢ ↑f x = 0
[PROOFSTEP]
exact not_mem_support_iff.1 (mt (fun hx => (mem_singleton.1 (h hx)).symm) hx)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
a₁ a₂ : α
b₁ b₂ : M
⊢ single a₁ b₁ = single a₂ b₂ ↔ a₁ = a₂ ∧ b₁ = b₂ ∨ b₁ = 0 ∧ b₂ = 0
[PROOFSTEP]
constructor
[GOAL]
case mp
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
a₁ a₂ : α
b₁ b₂ : M
⊢ single a₁ b₁ = single a₂ b₂ → a₁ = a₂ ∧ b₁ = b₂ ∨ b₁ = 0 ∧ b₂ = 0
[PROOFSTEP]
intro eq
[GOAL]
case mp
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
a₁ a₂ : α
b₁ b₂ : M
eq : single a₁ b₁ = single a₂ b₂
⊢ a₁ = a₂ ∧ b₁ = b₂ ∨ b₁ = 0 ∧ b₂ = 0
[PROOFSTEP]
by_cases h : a₁ = a₂
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
a₁ a₂ : α
b₁ b₂ : M
eq : single a₁ b₁ = single a₂ b₂
h : a₁ = a₂
⊢ a₁ = a₂ ∧ b₁ = b₂ ∨ b₁ = 0 ∧ b₂ = 0
[PROOFSTEP]
refine' Or.inl ⟨h, _⟩
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
a₁ a₂ : α
b₁ b₂ : M
eq : single a₁ b₁ = single a₂ b₂
h : a₁ = a₂
⊢ b₁ = b₂
[PROOFSTEP]
rwa [h, (single_injective a₂).eq_iff] at eq
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
a₁ a₂ : α
b₁ b₂ : M
eq : single a₁ b₁ = single a₂ b₂
h : ¬a₁ = a₂
⊢ a₁ = a₂ ∧ b₁ = b₂ ∨ b₁ = 0 ∧ b₂ = 0
[PROOFSTEP]
rw [FunLike.ext_iff] at eq
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
a₁ a₂ : α
b₁ b₂ : M
eq✝ : single a₁ b₁ = single a₂ b₂
eq : ∀ (x : α), ↑(single a₁ b₁) x = ↑(single a₂ b₂) x
h : ¬a₁ = a₂
⊢ a₁ = a₂ ∧ b₁ = b₂ ∨ b₁ = 0 ∧ b₂ = 0
[PROOFSTEP]
have h₁ := eq a₁
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
a₁ a₂ : α
b₁ b₂ : M
eq✝ : single a₁ b₁ = single a₂ b₂
eq : ∀ (x : α), ↑(single a₁ b₁) x = ↑(single a₂ b₂) x
h : ¬a₁ = a₂
h₁ : ↑(single a₁ b₁) a₁ = ↑(single a₂ b₂) a₁
⊢ a₁ = a₂ ∧ b₁ = b₂ ∨ b₁ = 0 ∧ b₂ = 0
[PROOFSTEP]
have h₂ := eq a₂
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
a₁ a₂ : α
b₁ b₂ : M
eq✝ : single a₁ b₁ = single a₂ b₂
eq : ∀ (x : α), ↑(single a₁ b₁) x = ↑(single a₂ b₂) x
h : ¬a₁ = a₂
h₁ : ↑(single a₁ b₁) a₁ = ↑(single a₂ b₂) a₁
h₂ : ↑(single a₁ b₁) a₂ = ↑(single a₂ b₂) a₂
⊢ a₁ = a₂ ∧ b₁ = b₂ ∨ b₁ = 0 ∧ b₂ = 0
[PROOFSTEP]
simp only [single_eq_same, single_eq_of_ne h, single_eq_of_ne (Ne.symm h)] at h₁ h₂
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
a₁ a₂ : α
b₁ b₂ : M
eq✝ : single a₁ b₁ = single a₂ b₂
eq : ∀ (x : α), ↑(single a₁ b₁) x = ↑(single a₂ b₂) x
h : ¬a₁ = a₂
h₁ : b₁ = 0
h₂ : 0 = b₂
⊢ a₁ = a₂ ∧ b₁ = b₂ ∨ b₁ = 0 ∧ b₂ = 0
[PROOFSTEP]
exact Or.inr ⟨h₁, h₂.symm⟩
[GOAL]
case mpr
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
a₁ a₂ : α
b₁ b₂ : M
⊢ a₁ = a₂ ∧ b₁ = b₂ ∨ b₁ = 0 ∧ b₂ = 0 → single a₁ b₁ = single a₂ b₂
[PROOFSTEP]
rintro (⟨rfl, rfl⟩ | ⟨rfl, rfl⟩)
[GOAL]
case mpr.inl.intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
a₁ : α
b₁ : M
⊢ single a₁ b₁ = single a₁ b₁
[PROOFSTEP]
rfl
[GOAL]
case mpr.inr.intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
a₁ a₂ : α
⊢ single a₁ 0 = single a₂ 0
[PROOFSTEP]
rw [single_zero, single_zero]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
i : α
h : b ≠ 0
⊢ (single i b).support ≠ ⊥
[PROOFSTEP]
simpa only [support_single_ne_zero _ h] using singleton_ne_empty _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b b' : M
hb : b ≠ 0
hb' : b' ≠ 0
i j : α
⊢ Disjoint (single i b).support (single j b').support ↔ i ≠ j
[PROOFSTEP]
rw [support_single_ne_zero _ hb, support_single_ne_zero _ hb', disjoint_singleton]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
⊢ single a b = 0 ↔ b = 0
[PROOFSTEP]
simp [FunLike.ext_iff, single_eq_set_indicator]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b✝ : M
a₁ a₂ : α
b : M
⊢ ↑(single a₁ b) a₂ = ↑(single a₂ b) a₁
[PROOFSTEP]
classical simp only [single_apply, eq_comm]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b✝ : M
a₁ a₂ : α
b : M
⊢ ↑(single a₁ b) a₂ = ↑(single a₂ b) a₁
[PROOFSTEP]
simp only [single_apply, eq_comm]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
a a' : α
b : M
inst✝¹ : Nonempty α
inst✝ : Nontrivial M
⊢ Nontrivial (α →₀ M)
[PROOFSTEP]
inhabit α
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
a a' : α
b : M
inst✝¹ : Nonempty α
inst✝ : Nontrivial M
inhabited_h : Inhabited α
⊢ Nontrivial (α →₀ M)
[PROOFSTEP]
rcases exists_ne (0 : M) with ⟨x, hx⟩
[GOAL]
case intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
a a' : α
b : M
inst✝¹ : Nonempty α
inst✝ : Nontrivial M
inhabited_h : Inhabited α
x : M
hx : x ≠ 0
⊢ Nontrivial (α →₀ M)
[PROOFSTEP]
exact nontrivial_of_ne (single default x) 0 (mt single_eq_zero.1 hx)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
a a' : α
b : M
inst✝ : Unique α
b' : M
⊢ single a b = single a' b' ↔ b = b'
[PROOFSTEP]
rw [unique_ext_iff, Unique.eq_default a, Unique.eq_default a', single_eq_same, single_eq_same]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
f : α →₀ M
⊢ card f.support = 1 ↔ ∃ a, ↑f a ≠ 0 ∧ f = single a (↑f a)
[PROOFSTEP]
simp only [card_eq_one, support_eq_singleton]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
f : α →₀ M
⊢ card f.support = 1 ↔ ∃ a b x, f = single a b
[PROOFSTEP]
simp only [card_eq_one, support_eq_singleton']
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a✝ a' : α
b✝ : M
f : α →₀ M
a : α
x✝ : ∃ b, f = single a b
b : M
hb : f = single a b
⊢ f.support ⊆ {a}
[PROOFSTEP]
rw [hb, support_subset_singleton, single_eq_same]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
a a' : α
b : M
inst✝ : Nonempty α
f : α →₀ M
⊢ card f.support ≤ 1 ↔ ∃ a, f = single a (↑f a)
[PROOFSTEP]
simp only [card_le_one_iff_subset_singleton, support_subset_singleton]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
a a' : α
b : M
inst✝ : Nonempty α
f : α →₀ M
⊢ card f.support ≤ 1 ↔ ∃ a b, f = single a b
[PROOFSTEP]
simp only [card_le_one_iff_subset_singleton, support_subset_singleton']
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
a a' : α
b : M
inst✝¹ : DecidableEq α
inst✝ : Finite α
x : α
m : M
⊢ ↑equivFunOnFinite (single x m) = Pi.single x m
[PROOFSTEP]
ext
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
a a' : α
b : M
inst✝¹ : DecidableEq α
inst✝ : Finite α
x : α
m : M
x✝ : α
⊢ ↑equivFunOnFinite (single x m) x✝ = Pi.single x m x✝
[PROOFSTEP]
simp [Finsupp.single_eq_pi_single, equivFunOnFinite]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
a a' : α
b : M
inst✝¹ : DecidableEq α
inst✝ : Finite α
x : α
m : M
⊢ ↑equivFunOnFinite.symm (Pi.single x m) = single x m
[PROOFSTEP]
rw [← equivFunOnFinite_single, Equiv.symm_apply_apply]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f✝ : α →₀ M
a✝ : α
b✝ : M
i : α
f : α →₀ M
a : α
b : M
⊢ Finset α
[PROOFSTEP]
haveI := Classical.decEq α
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f✝ : α →₀ M
a✝ : α
b✝ : M
i : α
f : α →₀ M
a : α
b : M
this : DecidableEq α
⊢ Finset α
[PROOFSTEP]
haveI := Classical.decEq M
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f✝ : α →₀ M
a✝ : α
b✝ : M
i : α
f : α →₀ M
a : α
b : M
this✝ : DecidableEq α
this : DecidableEq M
⊢ Finset α
[PROOFSTEP]
exact if b = 0 then f.support.erase a else insert a f.support
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f✝ : α →₀ M
a✝ : α
b✝ : M
i✝ : α
f : α →₀ M
a : α
b : M
i : α
⊢ (i ∈ if b = 0 then erase f.support a else insert a f.support) ↔ Function.update (↑f) a b i ≠ 0
[PROOFSTEP]
classical
simp [Function.update, Ne.def]
split_ifs with hb ha ha <;> try simp only [*, not_false_iff, iff_true, not_true, iff_false]
· rw [Finset.mem_erase]
simp
· rw [Finset.mem_erase]
simp [ha]
· rw [Finset.mem_insert]
simp [ha]
· rw [Finset.mem_insert]
simp [ha]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f✝ : α →₀ M
a✝ : α
b✝ : M
i✝ : α
f : α →₀ M
a : α
b : M
i : α
⊢ (i ∈ if b = 0 then erase f.support a else insert a f.support) ↔ Function.update (↑f) a b i ≠ 0
[PROOFSTEP]
simp [Function.update, Ne.def]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f✝ : α →₀ M
a✝ : α
b✝ : M
i✝ : α
f : α →₀ M
a : α
b : M
i : α
⊢ (i ∈ if b = 0 then erase f.support a else insert a f.support) ↔ ¬(if i = a then b else ↑f i) = 0
[PROOFSTEP]
split_ifs with hb ha ha
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f✝ : α →₀ M
a✝ : α
b✝ : M
i✝ : α
f : α →₀ M
a : α
b : M
i : α
hb : b = 0
ha : i = a
⊢ i ∈ erase f.support a ↔ ¬b = 0
[PROOFSTEP]
try simp only [*, not_false_iff, iff_true, not_true, iff_false]
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f✝ : α →₀ M
a✝ : α
b✝ : M
i✝ : α
f : α →₀ M
a : α
b : M
i : α
hb : b = 0
ha : i = a
⊢ i ∈ erase f.support a ↔ ¬b = 0
[PROOFSTEP]
simp only [*, not_false_iff, iff_true, not_true, iff_false]
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f✝ : α →₀ M
a✝ : α
b✝ : M
i✝ : α
f : α →₀ M
a : α
b : M
i : α
hb : b = 0
ha : ¬i = a
⊢ i ∈ erase f.support a ↔ ¬↑f i = 0
[PROOFSTEP]
try simp only [*, not_false_iff, iff_true, not_true, iff_false]
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f✝ : α →₀ M
a✝ : α
b✝ : M
i✝ : α
f : α →₀ M
a : α
b : M
i : α
hb : b = 0
ha : ¬i = a
⊢ i ∈ erase f.support a ↔ ¬↑f i = 0
[PROOFSTEP]
simp only [*, not_false_iff, iff_true, not_true, iff_false]
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f✝ : α →₀ M
a✝ : α
b✝ : M
i✝ : α
f : α →₀ M
a : α
b : M
i : α
hb : ¬b = 0
ha : i = a
⊢ i ∈ insert a f.support ↔ ¬b = 0
[PROOFSTEP]
try simp only [*, not_false_iff, iff_true, not_true, iff_false]
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f✝ : α →₀ M
a✝ : α
b✝ : M
i✝ : α
f : α →₀ M
a : α
b : M
i : α
hb : ¬b = 0
ha : i = a
⊢ i ∈ insert a f.support ↔ ¬b = 0
[PROOFSTEP]
simp only [*, not_false_iff, iff_true, not_true, iff_false]
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f✝ : α →₀ M
a✝ : α
b✝ : M
i✝ : α
f : α →₀ M
a : α
b : M
i : α
hb : ¬b = 0
ha : ¬i = a
⊢ i ∈ insert a f.support ↔ ¬↑f i = 0
[PROOFSTEP]
try simp only [*, not_false_iff, iff_true, not_true, iff_false]
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f✝ : α →₀ M
a✝ : α
b✝ : M
i✝ : α
f : α →₀ M
a : α
b : M
i : α
hb : ¬b = 0
ha : ¬i = a
⊢ i ∈ insert a f.support ↔ ¬↑f i = 0
[PROOFSTEP]
simp only [*, not_false_iff, iff_true, not_true, iff_false]
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f✝ : α →₀ M
a✝ : α
b✝ : M
i✝ : α
f : α →₀ M
a : α
b : M
i : α
hb : b = 0
ha : i = a
⊢ ¬a ∈ erase f.support a
[PROOFSTEP]
rw [Finset.mem_erase]
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f✝ : α →₀ M
a✝ : α
b✝ : M
i✝ : α
f : α →₀ M
a : α
b : M
i : α
hb : b = 0
ha : i = a
⊢ ¬(a ≠ a ∧ a ∈ f.support)
[PROOFSTEP]
simp
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f✝ : α →₀ M
a✝ : α
b✝ : M
i✝ : α
f : α →₀ M
a : α
b : M
i : α
hb : b = 0
ha : ¬i = a
⊢ i ∈ erase f.support a ↔ ¬↑f i = 0
[PROOFSTEP]
rw [Finset.mem_erase]
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f✝ : α →₀ M
a✝ : α
b✝ : M
i✝ : α
f : α →₀ M
a : α
b : M
i : α
hb : b = 0
ha : ¬i = a
⊢ i ≠ a ∧ i ∈ f.support ↔ ¬↑f i = 0
[PROOFSTEP]
simp [ha]
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f✝ : α →₀ M
a✝ : α
b✝ : M
i✝ : α
f : α →₀ M
a : α
b : M
i : α
hb : ¬b = 0
ha : i = a
⊢ a ∈ insert a f.support
[PROOFSTEP]
rw [Finset.mem_insert]
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f✝ : α →₀ M
a✝ : α
b✝ : M
i✝ : α
f : α →₀ M
a : α
b : M
i : α
hb : ¬b = 0
ha : i = a
⊢ a = a ∨ a ∈ f.support
[PROOFSTEP]
simp [ha]
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f✝ : α →₀ M
a✝ : α
b✝ : M
i✝ : α
f : α →₀ M
a : α
b : M
i : α
hb : ¬b = 0
ha : ¬i = a
⊢ i ∈ insert a f.support ↔ ¬↑f i = 0
[PROOFSTEP]
rw [Finset.mem_insert]
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f✝ : α →₀ M
a✝ : α
b✝ : M
i✝ : α
f : α →₀ M
a : α
b : M
i : α
hb : ¬b = 0
ha : ¬i = a
⊢ i = a ∨ i ∈ f.support ↔ ¬↑f i = 0
[PROOFSTEP]
simp [ha]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
f : α →₀ M
a : α
b : M
i : α
inst✝ : DecidableEq α
⊢ ↑(update f a b) = Function.update (↑f) a b
[PROOFSTEP]
delta update Function.update
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
f : α →₀ M
a : α
b : M
i : α
inst✝ : DecidableEq α
⊢ ↑{ support := if b = 0 then erase f.support a else insert a f.support,
toFun := fun a_1 => if h : a_1 = a then (_ : a = a_1) ▸ b else ↑f a_1,
mem_support_toFun :=
(_ :
∀ (i : α),
(i ∈ if b = 0 then erase f.support a else insert a f.support) ↔ Function.update (↑f) a b i ≠ 0) } =
fun a_1 => if h : a_1 = a then (_ : a = a_1) ▸ b else ↑f a_1
[PROOFSTEP]
ext
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
f : α →₀ M
a : α
b : M
i : α
inst✝ : DecidableEq α
x✝ : α
⊢ ↑{ support := if b = 0 then erase f.support a else insert a f.support,
toFun := fun a_1 => if h : a_1 = a then (_ : a = a_1) ▸ b else ↑f a_1,
mem_support_toFun :=
(_ :
∀ (i : α),
(i ∈ if b = 0 then erase f.support a else insert a f.support) ↔ Function.update (↑f) a b i ≠ 0) }
x✝ =
if h : x✝ = a then (_ : a = x✝) ▸ b else ↑f x✝
[PROOFSTEP]
dsimp
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
f : α →₀ M
a : α
b : M
i : α
inst✝ : DecidableEq α
x✝ : α
⊢ (if h : x✝ = a then (_ : a = x✝) ▸ b else ↑f x✝) = if h : x✝ = a then (_ : a = x✝) ▸ b else ↑f x✝
[PROOFSTEP]
split_ifs
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
f : α →₀ M
a : α
b : M
i : α
inst✝ : DecidableEq α
x✝ : α
h✝ : x✝ = a
⊢ (_ : a = x✝) ▸ b = (_ : a = x✝) ▸ b
[PROOFSTEP]
simp
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
f : α →₀ M
a : α
b : M
i : α
inst✝ : DecidableEq α
x✝ : α
h✝ : ¬x✝ = a
⊢ ↑f x✝ = ↑f x✝
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f : α →₀ M
a : α
b : M
i : α
⊢ update f a (↑f a) = f
[PROOFSTEP]
classical
ext
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f : α →₀ M
a : α
b : M
i : α
⊢ update f a (↑f a) = f
[PROOFSTEP]
ext
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f : α →₀ M
a : α
b : M
i a✝ : α
⊢ ↑(update f a (↑f a)) a✝ = ↑f a✝
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f : α →₀ M
a : α
b : M
i : α
⊢ update 0 a b = single a b
[PROOFSTEP]
classical
ext
rw [single_eq_update]
rfl
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f : α →₀ M
a : α
b : M
i : α
⊢ update 0 a b = single a b
[PROOFSTEP]
ext
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f : α →₀ M
a : α
b : M
i a✝ : α
⊢ ↑(update 0 a b) a✝ = ↑(single a b) a✝
[PROOFSTEP]
rw [single_eq_update]
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f : α →₀ M
a : α
b : M
i a✝ : α
⊢ ↑(update 0 a b) a✝ = Function.update 0 a b a✝
[PROOFSTEP]
rfl
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
f : α →₀ M
a : α
b : M
i : α
inst✝¹ : DecidableEq α
inst✝ : DecidableEq M
⊢ (update f a b).support = if b = 0 then erase f.support a else insert a f.support
[PROOFSTEP]
classical dsimp [update]; congr <;> apply Subsingleton.elim
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
f : α →₀ M
a : α
b : M
i : α
inst✝¹ : DecidableEq α
inst✝ : DecidableEq M
⊢ (update f a b).support = if b = 0 then erase f.support a else insert a f.support
[PROOFSTEP]
dsimp [update]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
f : α →₀ M
a : α
b : M
i : α
inst✝¹ : DecidableEq α
inst✝ : DecidableEq M
⊢ (if b = 0 then erase f.support a else insert a f.support) = if b = 0 then erase f.support a else insert a f.support
[PROOFSTEP]
congr
[GOAL]
case e_t.h.e_2.h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
f : α →₀ M
a : α
b : M
i : α
inst✝¹ : DecidableEq α
inst✝ : DecidableEq M
⊢ (fun a b => Classical.decEq α a b) = fun a b => inst✝¹ a b
[PROOFSTEP]
apply Subsingleton.elim
[GOAL]
case e_e.h.e_3.h.h.e_2.h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
f : α →₀ M
a : α
b : M
i : α
inst✝¹ : DecidableEq α
inst✝ : DecidableEq M
⊢ (fun a b => Classical.decEq α a b) = fun a b => inst✝¹ a b
[PROOFSTEP]
apply Subsingleton.elim
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
f : α →₀ M
a : α
b : M
i : α
inst✝ : DecidableEq α
⊢ (update f a 0).support = erase f.support a
[PROOFSTEP]
classical
simp only [update, ite_true, mem_support_iff, ne_eq, not_not]
congr; apply Subsingleton.elim
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
f : α →₀ M
a : α
b : M
i : α
inst✝ : DecidableEq α
⊢ (update f a 0).support = erase f.support a
[PROOFSTEP]
simp only [update, ite_true, mem_support_iff, ne_eq, not_not]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
f : α →₀ M
a : α
b : M
i : α
inst✝ : DecidableEq α
⊢ erase f.support a = erase f.support a
[PROOFSTEP]
congr
[GOAL]
case h.e_2.h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
f : α →₀ M
a : α
b : M
i : α
inst✝ : DecidableEq α
⊢ (fun a b => Classical.decEq α a b) = fun a b => inst✝ a b
[PROOFSTEP]
apply Subsingleton.elim
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
f : α →₀ M
a : α
b : M
i : α
inst✝ : DecidableEq α
h : b ≠ 0
⊢ (update f a b).support = insert a f.support
[PROOFSTEP]
classical
simp only [update, h, ite_false, mem_support_iff, ne_eq]
congr; apply Subsingleton.elim
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
f : α →₀ M
a : α
b : M
i : α
inst✝ : DecidableEq α
h : b ≠ 0
⊢ (update f a b).support = insert a f.support
[PROOFSTEP]
simp only [update, h, ite_false, mem_support_iff, ne_eq]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
f : α →₀ M
a : α
b : M
i : α
inst✝ : DecidableEq α
h : b ≠ 0
⊢ insert a f.support = insert a f.support
[PROOFSTEP]
congr
[GOAL]
case h.e_3.h.h.e_2.h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
f : α →₀ M
a : α
b : M
i : α
inst✝ : DecidableEq α
h : b ≠ 0
⊢ (fun a b => Classical.decEq α a b) = fun a b => inst✝ a b
[PROOFSTEP]
apply Subsingleton.elim
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a : α
f : α →₀ M
a' : α
⊢ a' ∈ Finset.erase f.support a ↔ (fun a' => if a' = a then 0 else ↑f a') a' ≠ 0
[PROOFSTEP]
classical
rw [mem_erase, mem_support_iff]; dsimp
split_ifs with h
exact ⟨fun H _ => H.1 h, fun H => (H rfl).elim⟩
exact and_iff_right h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a : α
f : α →₀ M
a' : α
⊢ a' ∈ Finset.erase f.support a ↔ (fun a' => if a' = a then 0 else ↑f a') a' ≠ 0
[PROOFSTEP]
rw [mem_erase, mem_support_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a : α
f : α →₀ M
a' : α
⊢ a' ≠ a ∧ ↑f a' ≠ 0 ↔ (fun a' => if a' = a then 0 else ↑f a') a' ≠ 0
[PROOFSTEP]
dsimp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a : α
f : α →₀ M
a' : α
⊢ ¬a' = a ∧ ¬↑f a' = 0 ↔ ¬(if a' = a then 0 else ↑f a') = 0
[PROOFSTEP]
split_ifs with h
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a : α
f : α →₀ M
a' : α
h : a' = a
⊢ ¬a' = a ∧ ¬↑f a' = 0 ↔ ¬0 = 0
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a : α
f : α →₀ M
a' : α
h : ¬a' = a
⊢ ¬a' = a ∧ ¬↑f a' = 0 ↔ ¬↑f a' = 0
[PROOFSTEP]
exact ⟨fun H _ => H.1 h, fun H => (H rfl).elim⟩
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a : α
f : α →₀ M
a' : α
h : ¬a' = a
⊢ ¬a' = a ∧ ¬↑f a' = 0 ↔ ¬↑f a' = 0
[PROOFSTEP]
exact and_iff_right h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : DecidableEq α
a : α
f : α →₀ M
⊢ (erase a f).support = Finset.erase f.support a
[PROOFSTEP]
classical
dsimp [erase]
congr; apply Subsingleton.elim
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : DecidableEq α
a : α
f : α →₀ M
⊢ (erase a f).support = Finset.erase f.support a
[PROOFSTEP]
dsimp [erase]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : DecidableEq α
a : α
f : α →₀ M
⊢ Finset.erase f.support a = Finset.erase f.support a
[PROOFSTEP]
congr
[GOAL]
case h.e_2.h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : DecidableEq α
a : α
f : α →₀ M
⊢ (fun a b => Classical.decEq α a b) = fun a b => inst✝ a b
[PROOFSTEP]
apply Subsingleton.elim
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a : α
f : α →₀ M
⊢ ↑(erase a f) a = 0
[PROOFSTEP]
classical simp only [erase, coe_mk, ite_true]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a : α
f : α →₀ M
⊢ ↑(erase a f) a = 0
[PROOFSTEP]
simp only [erase, coe_mk, ite_true]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
f : α →₀ M
h : a' ≠ a
⊢ ↑(erase a f) a' = ↑f a'
[PROOFSTEP]
classical simp only [erase, coe_mk, h, ite_false]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
f : α →₀ M
h : a' ≠ a
⊢ ↑(erase a f) a' = ↑f a'
[PROOFSTEP]
simp only [erase, coe_mk, h, ite_false]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a : α
b : M
⊢ erase a (single a b) = 0
[PROOFSTEP]
ext s
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a : α
b : M
s : α
⊢ ↑(erase a (single a b)) s = ↑0 s
[PROOFSTEP]
by_cases hs : s = a
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a : α
b : M
s : α
hs : s = a
⊢ ↑(erase a (single a b)) s = ↑0 s
[PROOFSTEP]
rw [hs, erase_same]
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a : α
b : M
s : α
hs : s = a
⊢ 0 = ↑0 a
[PROOFSTEP]
rfl
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a : α
b : M
s : α
hs : ¬s = a
⊢ ↑(erase a (single a b)) s = ↑0 s
[PROOFSTEP]
rw [erase_ne hs]
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a : α
b : M
s : α
hs : ¬s = a
⊢ ↑(single a b) s = ↑0 s
[PROOFSTEP]
exact single_eq_of_ne (Ne.symm hs)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
h : a ≠ a'
⊢ erase a (single a' b) = single a' b
[PROOFSTEP]
ext s
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
h : a ≠ a'
s : α
⊢ ↑(erase a (single a' b)) s = ↑(single a' b) s
[PROOFSTEP]
by_cases hs : s = a
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
h : a ≠ a'
s : α
hs : s = a
⊢ ↑(erase a (single a' b)) s = ↑(single a' b) s
[PROOFSTEP]
rw [hs, erase_same, single_eq_of_ne h.symm]
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a a' : α
b : M
h : a ≠ a'
s : α
hs : ¬s = a
⊢ ↑(erase a (single a' b)) s = ↑(single a' b) s
[PROOFSTEP]
rw [erase_ne hs]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f : α →₀ M
a : α
haf : ¬a ∈ f.support
⊢ erase a f = f
[PROOFSTEP]
ext b
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f : α →₀ M
a : α
haf : ¬a ∈ f.support
b : α
⊢ ↑(erase a f) b = ↑f b
[PROOFSTEP]
by_cases hab : b = a
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f : α →₀ M
a : α
haf : ¬a ∈ f.support
b : α
hab : b = a
⊢ ↑(erase a f) b = ↑f b
[PROOFSTEP]
rwa [hab, erase_same, eq_comm, ← not_mem_support_iff]
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
f : α →₀ M
a : α
haf : ¬a ∈ f.support
b : α
hab : ¬b = a
⊢ ↑(erase a f) b = ↑f b
[PROOFSTEP]
rw [erase_ne hab]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a : α
⊢ erase a 0 = 0
[PROOFSTEP]
classical rw [← support_eq_empty, support_erase, support_zero, erase_empty]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
a : α
⊢ erase a 0 = 0
[PROOFSTEP]
rw [← support_eq_empty, support_erase, support_zero, erase_empty]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
s : Finset α
f : α → M
hf : ∀ (a : α), f a ≠ 0 → a ∈ s
⊢ ∀ (a : α), a ∈ filter (fun x => f x ≠ 0) s ↔ f a ≠ 0
[PROOFSTEP]
classical simpa
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
s : Finset α
f : α → M
hf : ∀ (a : α), f a ≠ 0 → a ∈ s
⊢ ∀ (a : α), a ∈ filter (fun x => f x ≠ 0) s ↔ f a ≠ 0
[PROOFSTEP]
simpa
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
s : Finset α
f : α → M
hf : ∀ (a : α), f a ≠ 0 → a ∈ s
⊢ (onFinset s f hf).support ⊆ s
[PROOFSTEP]
classical convert filter_subset (f · ≠ 0) s
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
s : Finset α
f : α → M
hf : ∀ (a : α), f a ≠ 0 → a ∈ s
⊢ (onFinset s f hf).support ⊆ s
[PROOFSTEP]
convert filter_subset (f · ≠ 0) s
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : Zero M
s : Finset α
f : α → M
hf : ∀ (a : α), f a ≠ 0 → a ∈ s
a : α
⊢ a ∈ (onFinset s f hf).support ↔ f a ≠ 0
[PROOFSTEP]
rw [Finsupp.mem_support_iff, Finsupp.onFinset_apply]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : DecidableEq M
s : Finset α
f : α → M
hf : ∀ (a : α), f a ≠ 0 → a ∈ s
⊢ (onFinset s f hf).support = filter (fun a => f a ≠ 0) s
[PROOFSTEP]
dsimp [onFinset]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : DecidableEq M
s : Finset α
f : α → M
hf : ∀ (a : α), f a ≠ 0 → a ∈ s
⊢ filter (fun x => ¬f x = 0) s = filter (fun a => ¬f a = 0) s
[PROOFSTEP]
congr
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
inst✝¹ : Zero N
inst✝ : Zero P
f : M → N
hf : f 0 = 0
g : α →₀ M
a : α
⊢ (f ∘ ↑g) a ≠ 0 → a ∈ g.support
[PROOFSTEP]
rw [mem_support_iff, not_imp_not]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
inst✝¹ : Zero N
inst✝ : Zero P
f : M → N
hf : f 0 = 0
g : α →₀ M
a : α
⊢ ↑g a = 0 → (f ∘ ↑g) a = 0
[PROOFSTEP]
exact fun H => (congr_arg f H).trans hf
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
inst✝¹ : Zero N
inst✝ : Zero P
f : M → N
hf : f 0 = 0
a : α
⊢ ↑(mapRange f hf 0) a = ↑0 a
[PROOFSTEP]
simp only [hf, zero_apply, mapRange_apply]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
inst✝¹ : Zero N
inst✝ : Zero P
f : M → N
hf : f 0 = 0
a : α
b : M
a' : α
⊢ ↑(mapRange f hf (single a b)) a' = ↑(single a (f b)) a'
[PROOFSTEP]
classical simpa only [single_eq_pi_single] using Pi.apply_single _ (fun _ => hf) a _ a'
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
inst✝¹ : Zero N
inst✝ : Zero P
f : M → N
hf : f 0 = 0
a : α
b : M
a' : α
⊢ ↑(mapRange f hf (single a b)) a' = ↑(single a (f b)) a'
[PROOFSTEP]
simpa only [single_eq_pi_single] using Pi.apply_single _ (fun _ => hf) a _ a'
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
inst✝¹ : Zero N
inst✝ : Zero P
e : M → N
he0 : e 0 = 0
f : ι →₀ M
he : Injective e
⊢ (mapRange e he0 f).support = f.support
[PROOFSTEP]
ext
[GOAL]
case a
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
inst✝¹ : Zero N
inst✝ : Zero P
e : M → N
he0 : e 0 = 0
f : ι →₀ M
he : Injective e
a✝ : ι
⊢ a✝ ∈ (mapRange e he0 f).support ↔ a✝ ∈ f.support
[PROOFSTEP]
simp only [Finsupp.mem_support_iff, Ne.def, Finsupp.mapRange_apply]
[GOAL]
case a
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
inst✝¹ : Zero N
inst✝ : Zero P
e : M → N
he0 : e 0 = 0
f : ι →₀ M
he : Injective e
a✝ : ι
⊢ ¬e (↑f a✝) = 0 ↔ ¬↑f a✝ = 0
[PROOFSTEP]
exact he.ne_iff' he0
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
v : α →₀ M
a₂ : β
this : DecidableEq β
h : a₂ ∈ map f v.support
⊢ ∃! a, a ∈ v.support ∧ (fun a₁ => ↑f a₁ = a₂) a
[PROOFSTEP]
rcases Finset.mem_map.1 h with ⟨a, ha, rfl⟩
[GOAL]
case intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
v : α →₀ M
this : DecidableEq β
a : α
ha : a ∈ v.support
h : ↑f a ∈ map f v.support
⊢ ∃! a_1, a_1 ∈ v.support ∧ (fun a₁ => ↑f a₁ = ↑f a) a_1
[PROOFSTEP]
exact ExistsUnique.intro a ⟨ha, rfl⟩ fun b ⟨_, hb⟩ => f.injective hb
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
v : α →₀ M
a₂ : β
⊢ a₂ ∈ map f v.support ↔
(fun a₂ =>
if h : a₂ ∈ map f v.support then
↑v (choose (fun a₁ => ↑f a₁ = a₂) v.support (_ : ∃! a, a ∈ v.support ∧ (fun a₁ => ↑f a₁ = a₂) a))
else 0)
a₂ ≠
0
[PROOFSTEP]
dsimp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
v : α →₀ M
a₂ : β
⊢ a₂ ∈ map f v.support ↔
¬(if h : a₂ ∈ map f v.support then
↑v (choose (fun a₁ => ↑f a₁ = a₂) v.support (_ : ∃! a, a ∈ v.support ∧ ↑f a = a₂))
else 0) =
0
[PROOFSTEP]
split_ifs with h
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
v : α →₀ M
a₂ : β
h : a₂ ∈ map f v.support
⊢ a₂ ∈ map f v.support ↔ ¬↑v (choose (fun a₁ => ↑f a₁ = a₂) v.support (_ : ∃! a, a ∈ v.support ∧ ↑f a = a₂)) = 0
[PROOFSTEP]
simp only [h, true_iff_iff, Ne.def]
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
v : α →₀ M
a₂ : β
h : a₂ ∈ map f v.support
⊢ ¬↑v (choose (fun a₁ => ↑f a₁ = a₂) v.support (_ : ∃! a, a ∈ v.support ∧ ↑f a = a₂)) = 0
[PROOFSTEP]
rw [← not_mem_support_iff, not_not]
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
v : α →₀ M
a₂ : β
h : a₂ ∈ map f v.support
⊢ choose (fun a₁ => ↑f a₁ = a₂) v.support (_ : ∃! a, a ∈ v.support ∧ ↑f a = a₂) ∈ v.support
[PROOFSTEP]
classical apply Finset.choose_mem
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
v : α →₀ M
a₂ : β
h : a₂ ∈ map f v.support
⊢ choose (fun a₁ => ↑f a₁ = a₂) v.support (_ : ∃! a, a ∈ v.support ∧ ↑f a = a₂) ∈ v.support
[PROOFSTEP]
apply Finset.choose_mem
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
v : α →₀ M
a₂ : β
h : ¬a₂ ∈ map f v.support
⊢ a₂ ∈ map f v.support ↔ ¬0 = 0
[PROOFSTEP]
simp only [h, Ne.def, ne_self_iff_false]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
v : α →₀ M
a : α
⊢ ↑(embDomain f v) (↑f a) = ↑v a
[PROOFSTEP]
classical
change dite _ _ _ = _
split_ifs with h <;> rw [Finset.mem_map' f] at h
· refine' congr_arg (v : α → M) (f.inj' _)
exact Finset.choose_property (fun a₁ => f a₁ = f a) _ _
· exact (not_mem_support_iff.1 h).symm
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
v : α →₀ M
a : α
⊢ ↑(embDomain f v) (↑f a) = ↑v a
[PROOFSTEP]
change dite _ _ _ = _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
v : α →₀ M
a : α
⊢ (if h : ↑f a ∈ map f v.support then
(fun h =>
↑v (choose (fun a₁ => ↑f a₁ = ↑f a) v.support (_ : ∃! a_1, a_1 ∈ v.support ∧ (fun a₁ => ↑f a₁ = ↑f a) a_1)))
h
else (fun h => 0) h) =
↑v a
[PROOFSTEP]
split_ifs with h
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
v : α →₀ M
a : α
h : ↑f a ∈ map f v.support
⊢ (fun h => ↑v (choose (fun a₁ => ↑f a₁ = ↑f a) v.support (_ : ∃! a_1, a_1 ∈ v.support ∧ (fun a₁ => ↑f a₁ = ↑f a) a_1)))
h =
↑v a
[PROOFSTEP]
rw [Finset.mem_map' f] at h
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
v : α →₀ M
a : α
h : ¬↑f a ∈ map f v.support
⊢ (fun h => 0) h = ↑v a
[PROOFSTEP]
rw [Finset.mem_map' f] at h
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
v : α →₀ M
a : α
h✝ : ↑f a ∈ map f v.support
h : a ∈ v.support
⊢ (fun h => ↑v (choose (fun a₁ => ↑f a₁ = ↑f a) v.support (_ : ∃! a_1, a_1 ∈ v.support ∧ (fun a₁ => ↑f a₁ = ↑f a) a_1)))
h✝ =
↑v a
[PROOFSTEP]
refine' congr_arg (v : α → M) (f.inj' _)
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
v : α →₀ M
a : α
h✝ : ↑f a ∈ map f v.support
h : a ∈ v.support
⊢ Embedding.toFun f
(choose (fun a₁ => ↑f a₁ = ↑f a) v.support (_ : ∃! a_1, a_1 ∈ v.support ∧ (fun a₁ => ↑f a₁ = ↑f a) a_1)) =
Embedding.toFun f a
[PROOFSTEP]
exact Finset.choose_property (fun a₁ => f a₁ = f a) _ _
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
v : α →₀ M
a : α
h✝ : ¬↑f a ∈ map f v.support
h : ¬a ∈ v.support
⊢ (fun h => 0) h✝ = ↑v a
[PROOFSTEP]
exact (not_mem_support_iff.1 h).symm
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
v : α →₀ M
a : β
h : ¬a ∈ Set.range ↑f
⊢ ↑(embDomain f v) a = 0
[PROOFSTEP]
classical
refine' dif_neg (mt (fun h => _) h)
rcases Finset.mem_map.1 h with ⟨a, _h, rfl⟩
exact Set.mem_range_self a
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
v : α →₀ M
a : β
h : ¬a ∈ Set.range ↑f
⊢ ↑(embDomain f v) a = 0
[PROOFSTEP]
refine' dif_neg (mt (fun h => _) h)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
v : α →₀ M
a : β
h✝ : ¬a ∈ Set.range ↑f
h : a ∈ map f v.support
⊢ a ∈ Set.range ↑f
[PROOFSTEP]
rcases Finset.mem_map.1 h with ⟨a, _h, rfl⟩
[GOAL]
case intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
v : α →₀ M
a : α
_h : a ∈ v.support
h✝ : ¬↑f a ∈ Set.range ↑f
h : ↑f a ∈ map f v.support
⊢ ↑f a ∈ Set.range ↑f
[PROOFSTEP]
exact Set.mem_range_self a
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
l₁ l₂ : α →₀ M
h : embDomain f l₁ = embDomain f l₂
a : α
⊢ ↑l₁ a = ↑l₂ a
[PROOFSTEP]
simpa only [embDomain_apply] using FunLike.ext_iff.1 h (f a)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
g : M → N
p : α →₀ M
hg : g 0 = 0
⊢ embDomain f (mapRange g hg p) = mapRange g hg (embDomain f p)
[PROOFSTEP]
ext a
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
g : M → N
p : α →₀ M
hg : g 0 = 0
a : β
⊢ ↑(embDomain f (mapRange g hg p)) a = ↑(mapRange g hg (embDomain f p)) a
[PROOFSTEP]
by_cases h : a ∈ Set.range f
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
g : M → N
p : α →₀ M
hg : g 0 = 0
a : β
h : a ∈ Set.range ↑f
⊢ ↑(embDomain f (mapRange g hg p)) a = ↑(mapRange g hg (embDomain f p)) a
[PROOFSTEP]
rcases h with ⟨a', rfl⟩
[GOAL]
case pos.intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
g : M → N
p : α →₀ M
hg : g 0 = 0
a' : α
⊢ ↑(embDomain f (mapRange g hg p)) (↑f a') = ↑(mapRange g hg (embDomain f p)) (↑f a')
[PROOFSTEP]
rw [mapRange_apply, embDomain_apply, embDomain_apply, mapRange_apply]
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
g : M → N
p : α →₀ M
hg : g 0 = 0
a : β
h : ¬a ∈ Set.range ↑f
⊢ ↑(embDomain f (mapRange g hg p)) a = ↑(mapRange g hg (embDomain f p)) a
[PROOFSTEP]
rw [mapRange_apply, embDomain_notin_range, embDomain_notin_range, ← hg]
[GOAL]
case neg.h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
g : M → N
p : α →₀ M
hg : g 0 = 0
a : β
h : ¬a ∈ Set.range ↑f
⊢ ¬a ∈ Set.range ↑f
[PROOFSTEP]
assumption
[GOAL]
case neg.h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
g : M → N
p : α →₀ M
hg : g 0 = 0
a : β
h : ¬a ∈ Set.range ↑f
⊢ ¬a ∈ Set.range ↑f
[PROOFSTEP]
assumption
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
l : α →₀ M
f : α ↪ β
a : β
b : M
hb : b ≠ 0
h : embDomain f l = single a b
⊢ ∃ x, l = single x b ∧ ↑f x = a
[PROOFSTEP]
classical
have h_map_support : Finset.map f l.support = { a } := by rw [← support_embDomain, h, support_single_ne_zero _ hb]
have ha : a ∈ Finset.map f l.support := by simp only [h_map_support, Finset.mem_singleton]
rcases Finset.mem_map.1 ha with ⟨c, _hc₁, hc₂⟩
use c
constructor
· ext d
rw [← embDomain_apply f l, h]
by_cases h_cases : c = d
· simp only [Eq.symm h_cases, hc₂, single_eq_same]
· rw [single_apply, single_apply, if_neg, if_neg h_cases]
by_contra hfd
exact h_cases (f.injective (hc₂.trans hfd))
· exact hc₂
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
l : α →₀ M
f : α ↪ β
a : β
b : M
hb : b ≠ 0
h : embDomain f l = single a b
⊢ ∃ x, l = single x b ∧ ↑f x = a
[PROOFSTEP]
have h_map_support : Finset.map f l.support = { a } := by rw [← support_embDomain, h, support_single_ne_zero _ hb]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
l : α →₀ M
f : α ↪ β
a : β
b : M
hb : b ≠ 0
h : embDomain f l = single a b
⊢ map f l.support = {a}
[PROOFSTEP]
rw [← support_embDomain, h, support_single_ne_zero _ hb]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
l : α →₀ M
f : α ↪ β
a : β
b : M
hb : b ≠ 0
h : embDomain f l = single a b
h_map_support : map f l.support = {a}
⊢ ∃ x, l = single x b ∧ ↑f x = a
[PROOFSTEP]
have ha : a ∈ Finset.map f l.support := by simp only [h_map_support, Finset.mem_singleton]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
l : α →₀ M
f : α ↪ β
a : β
b : M
hb : b ≠ 0
h : embDomain f l = single a b
h_map_support : map f l.support = {a}
⊢ a ∈ map f l.support
[PROOFSTEP]
simp only [h_map_support, Finset.mem_singleton]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
l : α →₀ M
f : α ↪ β
a : β
b : M
hb : b ≠ 0
h : embDomain f l = single a b
h_map_support : map f l.support = {a}
ha : a ∈ map f l.support
⊢ ∃ x, l = single x b ∧ ↑f x = a
[PROOFSTEP]
rcases Finset.mem_map.1 ha with ⟨c, _hc₁, hc₂⟩
[GOAL]
case intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
l : α →₀ M
f : α ↪ β
a : β
b : M
hb : b ≠ 0
h : embDomain f l = single a b
h_map_support : map f l.support = {a}
ha : a ∈ map f l.support
c : α
_hc₁ : c ∈ l.support
hc₂ : ↑f c = a
⊢ ∃ x, l = single x b ∧ ↑f x = a
[PROOFSTEP]
use c
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
l : α →₀ M
f : α ↪ β
a : β
b : M
hb : b ≠ 0
h : embDomain f l = single a b
h_map_support : map f l.support = {a}
ha : a ∈ map f l.support
c : α
_hc₁ : c ∈ l.support
hc₂ : ↑f c = a
⊢ l = single c b ∧ ↑f c = a
[PROOFSTEP]
constructor
[GOAL]
case h.left
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
l : α →₀ M
f : α ↪ β
a : β
b : M
hb : b ≠ 0
h : embDomain f l = single a b
h_map_support : map f l.support = {a}
ha : a ∈ map f l.support
c : α
_hc₁ : c ∈ l.support
hc₂ : ↑f c = a
⊢ l = single c b
[PROOFSTEP]
ext d
[GOAL]
case h.left.h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
l : α →₀ M
f : α ↪ β
a : β
b : M
hb : b ≠ 0
h : embDomain f l = single a b
h_map_support : map f l.support = {a}
ha : a ∈ map f l.support
c : α
_hc₁ : c ∈ l.support
hc₂ : ↑f c = a
d : α
⊢ ↑l d = ↑(single c b) d
[PROOFSTEP]
rw [← embDomain_apply f l, h]
[GOAL]
case h.left.h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
l : α →₀ M
f : α ↪ β
a : β
b : M
hb : b ≠ 0
h : embDomain f l = single a b
h_map_support : map f l.support = {a}
ha : a ∈ map f l.support
c : α
_hc₁ : c ∈ l.support
hc₂ : ↑f c = a
d : α
⊢ ↑(single a b) (↑f d) = ↑(single c b) d
[PROOFSTEP]
by_cases h_cases : c = d
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
l : α →₀ M
f : α ↪ β
a : β
b : M
hb : b ≠ 0
h : embDomain f l = single a b
h_map_support : map f l.support = {a}
ha : a ∈ map f l.support
c : α
_hc₁ : c ∈ l.support
hc₂ : ↑f c = a
d : α
h_cases : c = d
⊢ ↑(single a b) (↑f d) = ↑(single c b) d
[PROOFSTEP]
simp only [Eq.symm h_cases, hc₂, single_eq_same]
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
l : α →₀ M
f : α ↪ β
a : β
b : M
hb : b ≠ 0
h : embDomain f l = single a b
h_map_support : map f l.support = {a}
ha : a ∈ map f l.support
c : α
_hc₁ : c ∈ l.support
hc₂ : ↑f c = a
d : α
h_cases : ¬c = d
⊢ ↑(single a b) (↑f d) = ↑(single c b) d
[PROOFSTEP]
rw [single_apply, single_apply, if_neg, if_neg h_cases]
[GOAL]
case neg.hnc
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
l : α →₀ M
f : α ↪ β
a : β
b : M
hb : b ≠ 0
h : embDomain f l = single a b
h_map_support : map f l.support = {a}
ha : a ∈ map f l.support
c : α
_hc₁ : c ∈ l.support
hc₂ : ↑f c = a
d : α
h_cases : ¬c = d
⊢ ¬a = ↑f d
[PROOFSTEP]
by_contra hfd
[GOAL]
case neg.hnc
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
l : α →₀ M
f : α ↪ β
a : β
b : M
hb : b ≠ 0
h : embDomain f l = single a b
h_map_support : map f l.support = {a}
ha : a ∈ map f l.support
c : α
_hc₁ : c ∈ l.support
hc₂ : ↑f c = a
d : α
h_cases : ¬c = d
hfd : a = ↑f d
⊢ False
[PROOFSTEP]
exact h_cases (f.injective (hc₂.trans hfd))
[GOAL]
case h.right
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
l : α →₀ M
f : α ↪ β
a : β
b : M
hb : b ≠ 0
h : embDomain f l = single a b
h_map_support : map f l.support = {a}
ha : a ∈ map f l.support
c : α
_hc₁ : c ∈ l.support
hc₂ : ↑f c = a
⊢ ↑f c = a
[PROOFSTEP]
exact hc₂
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
a : α
m : M
⊢ embDomain f (single a m) = single (↑f a) m
[PROOFSTEP]
classical
ext b
by_cases h : b ∈ Set.range f
· rcases h with ⟨a', rfl⟩
simp [single_apply]
· simp only [embDomain_notin_range, h, single_apply, not_false_iff]
rw [if_neg]
rintro rfl
simp at h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
a : α
m : M
⊢ embDomain f (single a m) = single (↑f a) m
[PROOFSTEP]
ext b
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
a : α
m : M
b : β
⊢ ↑(embDomain f (single a m)) b = ↑(single (↑f a) m) b
[PROOFSTEP]
by_cases h : b ∈ Set.range f
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
a : α
m : M
b : β
h : b ∈ Set.range ↑f
⊢ ↑(embDomain f (single a m)) b = ↑(single (↑f a) m) b
[PROOFSTEP]
rcases h with ⟨a', rfl⟩
[GOAL]
case pos.intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
a : α
m : M
a' : α
⊢ ↑(embDomain f (single a m)) (↑f a') = ↑(single (↑f a) m) (↑f a')
[PROOFSTEP]
simp [single_apply]
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
a : α
m : M
b : β
h : ¬b ∈ Set.range ↑f
⊢ ↑(embDomain f (single a m)) b = ↑(single (↑f a) m) b
[PROOFSTEP]
simp only [embDomain_notin_range, h, single_apply, not_false_iff]
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
a : α
m : M
b : β
h : ¬b ∈ Set.range ↑f
⊢ 0 = if ↑f a = b then m else 0
[PROOFSTEP]
rw [if_neg]
[GOAL]
case neg.hnc
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
a : α
m : M
b : β
h : ¬b ∈ Set.range ↑f
⊢ ¬↑f a = b
[PROOFSTEP]
rintro rfl
[GOAL]
case neg.hnc
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : Zero M
inst✝ : Zero N
f : α ↪ β
a : α
m : M
h : ¬↑f a ∈ Set.range ↑f
⊢ False
[PROOFSTEP]
simp at h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H✝ : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
inst✝¹ : Zero N
inst✝ : Zero P
f : M → N → P
hf : f 0 0 = 0
g₁ : α →₀ M
g₂ : α →₀ N
a : α
H : f (↑g₁ a) (↑g₂ a) ≠ 0
⊢ a ∈ g₁.support ∪ g₂.support
[PROOFSTEP]
classical
rw [mem_union, mem_support_iff, mem_support_iff, ← not_and_or]
rintro ⟨h₁, h₂⟩; rw [h₁, h₂] at H ; exact H hf
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H✝ : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
inst✝¹ : Zero N
inst✝ : Zero P
f : M → N → P
hf : f 0 0 = 0
g₁ : α →₀ M
g₂ : α →₀ N
a : α
H : f (↑g₁ a) (↑g₂ a) ≠ 0
⊢ a ∈ g₁.support ∪ g₂.support
[PROOFSTEP]
rw [mem_union, mem_support_iff, mem_support_iff, ← not_and_or]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H✝ : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
inst✝¹ : Zero N
inst✝ : Zero P
f : M → N → P
hf : f 0 0 = 0
g₁ : α →₀ M
g₂ : α →₀ N
a : α
H : f (↑g₁ a) (↑g₂ a) ≠ 0
⊢ ¬(↑g₁ a = 0 ∧ ↑g₂ a = 0)
[PROOFSTEP]
rintro ⟨h₁, h₂⟩
[GOAL]
case intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H✝ : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
inst✝¹ : Zero N
inst✝ : Zero P
f : M → N → P
hf : f 0 0 = 0
g₁ : α →₀ M
g₂ : α →₀ N
a : α
H : f (↑g₁ a) (↑g₂ a) ≠ 0
h₁ : ↑g₁ a = 0
h₂ : ↑g₂ a = 0
⊢ False
[PROOFSTEP]
rw [h₁, h₂] at H
[GOAL]
case intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H✝ : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
inst✝¹ : Zero N
inst✝ : Zero P
f : M → N → P
hf : f 0 0 = 0
g₁ : α →₀ M
g₂ : α →₀ N
a : α
H : f 0 0 ≠ 0
h₁ : ↑g₁ a = 0
h₂ : ↑g₂ a = 0
⊢ False
[PROOFSTEP]
exact H hf
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
inst✝¹ : Zero N
inst✝ : Zero P
D : DecidableEq α
f : M → N → P
hf : f 0 0 = 0
g₁ : α →₀ M
g₂ : α →₀ N
⊢ (zipWith f hf g₁ g₂).support ⊆ g₁.support ∪ g₂.support
[PROOFSTEP]
rw [Subsingleton.elim D]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝² : Zero M
inst✝¹ : Zero N
inst✝ : Zero P
D : DecidableEq α
f : M → N → P
hf : f 0 0 = 0
g₁ : α →₀ M
g₂ : α →₀ N
⊢ (zipWith f hf g₁ g₂).support ⊆ g₁.support ∪ g₂.support
[PROOFSTEP]
exact support_onFinset_subset
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : AddZeroClass M
inst✝ : DecidableEq α
g₁ g₂ : α →₀ M
h : Disjoint g₁.support g₂.support
a : α
ha✝ : a ∈ g₁.support ∪ g₂.support
ha : a ∈ g₁.support
⊢ a ∈ (g₁ + g₂).support
[PROOFSTEP]
have : a ∉ g₂.support := disjoint_left.1 h ha
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : AddZeroClass M
inst✝ : DecidableEq α
g₁ g₂ : α →₀ M
h : Disjoint g₁.support g₂.support
a : α
ha✝ : a ∈ g₁.support ∪ g₂.support
ha : a ∈ g₁.support
this : ¬a ∈ g₂.support
⊢ a ∈ (g₁ + g₂).support
[PROOFSTEP]
simp only [mem_support_iff, not_not] at *
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : AddZeroClass M
inst✝ : DecidableEq α
g₁ g₂ : α →₀ M
h : Disjoint g₁.support g₂.support
a : α
ha✝ : a ∈ g₁.support ∪ g₂.support
ha : ↑g₁ a ≠ 0
this : ↑g₂ a = 0
⊢ ↑(g₁ + g₂) a ≠ 0
[PROOFSTEP]
simpa only [add_apply, this, add_zero]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : AddZeroClass M
inst✝ : DecidableEq α
g₁ g₂ : α →₀ M
h : Disjoint g₁.support g₂.support
a : α
ha✝ : a ∈ g₁.support ∪ g₂.support
ha : a ∈ g₂.support
⊢ a ∈ (g₁ + g₂).support
[PROOFSTEP]
have : a ∉ g₁.support := disjoint_right.1 h ha
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : AddZeroClass M
inst✝ : DecidableEq α
g₁ g₂ : α →₀ M
h : Disjoint g₁.support g₂.support
a : α
ha✝ : a ∈ g₁.support ∪ g₂.support
ha : a ∈ g₂.support
this : ¬a ∈ g₁.support
⊢ a ∈ (g₁ + g₂).support
[PROOFSTEP]
simp only [mem_support_iff, not_not] at *
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : AddZeroClass M
inst✝ : DecidableEq α
g₁ g₂ : α →₀ M
h : Disjoint g₁.support g₂.support
a : α
ha✝ : a ∈ g₁.support ∪ g₂.support
ha : ↑g₂ a ≠ 0
this : ↑g₁ a = 0
⊢ ↑(g₁ + g₂) a ≠ 0
[PROOFSTEP]
simpa only [add_apply, this, zero_add]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
a : α
b₁ b₂ : M
a' : α
⊢ ↑(single a (b₁ + b₂)) a' = ↑(single a b₁ + single a b₂) a'
[PROOFSTEP]
by_cases h : a = a'
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
a : α
b₁ b₂ : M
a' : α
h : a = a'
⊢ ↑(single a (b₁ + b₂)) a' = ↑(single a b₁ + single a b₂) a'
[PROOFSTEP]
rw [h, add_apply, single_eq_same, single_eq_same, single_eq_same]
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
a : α
b₁ b₂ : M
a' : α
h : ¬a = a'
⊢ ↑(single a (b₁ + b₂)) a' = ↑(single a b₁ + single a b₂) a'
[PROOFSTEP]
rw [add_apply, single_eq_of_ne h, single_eq_of_ne h, single_eq_of_ne h, zero_add]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
f : α →₀ M
a : α
b : M
⊢ update f a b = single a b + erase a f
[PROOFSTEP]
classical
ext j
rcases eq_or_ne a j with (rfl | h)
· simp
· simp [Function.update_noteq h.symm, single_apply, h, erase_ne, h.symm]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
f : α →₀ M
a : α
b : M
⊢ update f a b = single a b + erase a f
[PROOFSTEP]
ext j
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
f : α →₀ M
a : α
b : M
j : α
⊢ ↑(update f a b) j = ↑(single a b + erase a f) j
[PROOFSTEP]
rcases eq_or_ne a j with (rfl | h)
[GOAL]
case h.inl
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
f : α →₀ M
a : α
b : M
⊢ ↑(update f a b) a = ↑(single a b + erase a f) a
[PROOFSTEP]
simp
[GOAL]
case h.inr
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
f : α →₀ M
a : α
b : M
j : α
h : a ≠ j
⊢ ↑(update f a b) j = ↑(single a b + erase a f) j
[PROOFSTEP]
simp [Function.update_noteq h.symm, single_apply, h, erase_ne, h.symm]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
f : α →₀ M
a : α
b : M
⊢ update f a b = erase a f + single a b
[PROOFSTEP]
classical
ext j
rcases eq_or_ne a j with (rfl | h)
· simp
· simp [Function.update_noteq h.symm, single_apply, h, erase_ne, h.symm]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
f : α →₀ M
a : α
b : M
⊢ update f a b = erase a f + single a b
[PROOFSTEP]
ext j
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
f : α →₀ M
a : α
b : M
j : α
⊢ ↑(update f a b) j = ↑(erase a f + single a b) j
[PROOFSTEP]
rcases eq_or_ne a j with (rfl | h)
[GOAL]
case h.inl
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
f : α →₀ M
a : α
b : M
⊢ ↑(update f a b) a = ↑(erase a f + single a b) a
[PROOFSTEP]
simp
[GOAL]
case h.inr
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
f : α →₀ M
a : α
b : M
j : α
h : a ≠ j
⊢ ↑(update f a b) j = ↑(erase a f + single a b) j
[PROOFSTEP]
simp [Function.update_noteq h.symm, single_apply, h, erase_ne, h.symm]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
a : α
f : α →₀ M
⊢ single a (↑f a) + erase a f = f
[PROOFSTEP]
rw [← update_eq_single_add_erase, update_self]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
a : α
f : α →₀ M
⊢ erase a f + single a (↑f a) = f
[PROOFSTEP]
rw [← update_eq_erase_add_single, update_self]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
a : α
f f' : α →₀ M
⊢ erase a (f + f') = erase a f + erase a f'
[PROOFSTEP]
ext s
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
a : α
f f' : α →₀ M
s : α
⊢ ↑(erase a (f + f')) s = ↑(erase a f + erase a f') s
[PROOFSTEP]
by_cases hs : s = a
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
a : α
f f' : α →₀ M
s : α
hs : s = a
⊢ ↑(erase a (f + f')) s = ↑(erase a f + erase a f') s
[PROOFSTEP]
rw [hs, add_apply, erase_same, erase_same, erase_same, add_zero]
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
a : α
f f' : α →₀ M
s : α
hs : ¬s = a
⊢ ↑(erase a (f + f')) s = ↑(erase a f + erase a f') s
[PROOFSTEP]
rw [add_apply, erase_ne hs, erase_ne hs, erase_ne hs, add_apply]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
p : (α →₀ M) → Prop
f✝ : α →₀ M
h0 : p 0
ha : ∀ (a : α) (b : M) (f : α →₀ M), ¬a ∈ f.support → b ≠ 0 → p f → p (single a b + f)
s : Finset α
f : α →₀ M
hf : f.support = ∅
⊢ p f
[PROOFSTEP]
rwa [support_eq_empty.1 hf]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
p : (α →₀ M) → Prop
f✝ : α →₀ M
h0 : p 0
ha : ∀ (a : α) (b : M) (f : α →₀ M), ¬a ∈ f.support → b ≠ 0 → p f → p (single a b + f)
s✝ : Finset α
a : α
s : Finset α
has : ¬a ∈ s
ih : ∀ (f : α →₀ M), f.support = s → p f
f : α →₀ M
hf : f.support = cons a s has
⊢ p f
[PROOFSTEP]
suffices p (single a (f a) + f.erase a) by rwa [single_add_erase] at this
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
p : (α →₀ M) → Prop
f✝ : α →₀ M
h0 : p 0
ha : ∀ (a : α) (b : M) (f : α →₀ M), ¬a ∈ f.support → b ≠ 0 → p f → p (single a b + f)
s✝ : Finset α
a : α
s : Finset α
has : ¬a ∈ s
ih : ∀ (f : α →₀ M), f.support = s → p f
f : α →₀ M
hf : f.support = cons a s has
this : p (single a (↑f a) + erase a f)
⊢ p f
[PROOFSTEP]
rwa [single_add_erase] at this
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
p : (α →₀ M) → Prop
f✝ : α →₀ M
h0 : p 0
ha : ∀ (a : α) (b : M) (f : α →₀ M), ¬a ∈ f.support → b ≠ 0 → p f → p (single a b + f)
s✝ : Finset α
a : α
s : Finset α
has : ¬a ∈ s
ih : ∀ (f : α →₀ M), f.support = s → p f
f : α →₀ M
hf : f.support = cons a s has
⊢ p (single a (↑f a) + erase a f)
[PROOFSTEP]
classical
apply ha
· rw [support_erase, mem_erase]
exact fun H => H.1 rfl
· rw [← mem_support_iff, hf]
exact mem_cons_self _ _
· apply ih _ _
rw [support_erase, hf, Finset.erase_cons]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
p : (α →₀ M) → Prop
f✝ : α →₀ M
h0 : p 0
ha : ∀ (a : α) (b : M) (f : α →₀ M), ¬a ∈ f.support → b ≠ 0 → p f → p (single a b + f)
s✝ : Finset α
a : α
s : Finset α
has : ¬a ∈ s
ih : ∀ (f : α →₀ M), f.support = s → p f
f : α →₀ M
hf : f.support = cons a s has
⊢ p (single a (↑f a) + erase a f)
[PROOFSTEP]
apply ha
[GOAL]
case a
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
p : (α →₀ M) → Prop
f✝ : α →₀ M
h0 : p 0
ha : ∀ (a : α) (b : M) (f : α →₀ M), ¬a ∈ f.support → b ≠ 0 → p f → p (single a b + f)
s✝ : Finset α
a : α
s : Finset α
has : ¬a ∈ s
ih : ∀ (f : α →₀ M), f.support = s → p f
f : α →₀ M
hf : f.support = cons a s has
⊢ ¬a ∈ (erase a f).support
[PROOFSTEP]
rw [support_erase, mem_erase]
[GOAL]
case a
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
p : (α →₀ M) → Prop
f✝ : α →₀ M
h0 : p 0
ha : ∀ (a : α) (b : M) (f : α →₀ M), ¬a ∈ f.support → b ≠ 0 → p f → p (single a b + f)
s✝ : Finset α
a : α
s : Finset α
has : ¬a ∈ s
ih : ∀ (f : α →₀ M), f.support = s → p f
f : α →₀ M
hf : f.support = cons a s has
⊢ ¬(a ≠ a ∧ a ∈ f.support)
[PROOFSTEP]
exact fun H => H.1 rfl
[GOAL]
case a
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
p : (α →₀ M) → Prop
f✝ : α →₀ M
h0 : p 0
ha : ∀ (a : α) (b : M) (f : α →₀ M), ¬a ∈ f.support → b ≠ 0 → p f → p (single a b + f)
s✝ : Finset α
a : α
s : Finset α
has : ¬a ∈ s
ih : ∀ (f : α →₀ M), f.support = s → p f
f : α →₀ M
hf : f.support = cons a s has
⊢ ↑f a ≠ 0
[PROOFSTEP]
rw [← mem_support_iff, hf]
[GOAL]
case a
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
p : (α →₀ M) → Prop
f✝ : α →₀ M
h0 : p 0
ha : ∀ (a : α) (b : M) (f : α →₀ M), ¬a ∈ f.support → b ≠ 0 → p f → p (single a b + f)
s✝ : Finset α
a : α
s : Finset α
has : ¬a ∈ s
ih : ∀ (f : α →₀ M), f.support = s → p f
f : α →₀ M
hf : f.support = cons a s has
⊢ a ∈ cons a s has
[PROOFSTEP]
exact mem_cons_self _ _
[GOAL]
case a
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
p : (α →₀ M) → Prop
f✝ : α →₀ M
h0 : p 0
ha : ∀ (a : α) (b : M) (f : α →₀ M), ¬a ∈ f.support → b ≠ 0 → p f → p (single a b + f)
s✝ : Finset α
a : α
s : Finset α
has : ¬a ∈ s
ih : ∀ (f : α →₀ M), f.support = s → p f
f : α →₀ M
hf : f.support = cons a s has
⊢ p (erase a f)
[PROOFSTEP]
apply ih _ _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
p : (α →₀ M) → Prop
f✝ : α →₀ M
h0 : p 0
ha : ∀ (a : α) (b : M) (f : α →₀ M), ¬a ∈ f.support → b ≠ 0 → p f → p (single a b + f)
s✝ : Finset α
a : α
s : Finset α
has : ¬a ∈ s
ih : ∀ (f : α →₀ M), f.support = s → p f
f : α →₀ M
hf : f.support = cons a s has
⊢ (erase a f).support = s
[PROOFSTEP]
rw [support_erase, hf, Finset.erase_cons]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
p : (α →₀ M) → Prop
f✝ : α →₀ M
h0 : p 0
ha : ∀ (a : α) (b : M) (f : α →₀ M), ¬a ∈ f.support → b ≠ 0 → p f → p (f + single a b)
s : Finset α
f : α →₀ M
hf : f.support = ∅
⊢ p f
[PROOFSTEP]
rwa [support_eq_empty.1 hf]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
p : (α →₀ M) → Prop
f✝ : α →₀ M
h0 : p 0
ha : ∀ (a : α) (b : M) (f : α →₀ M), ¬a ∈ f.support → b ≠ 0 → p f → p (f + single a b)
s✝ : Finset α
a : α
s : Finset α
has : ¬a ∈ s
ih : ∀ (f : α →₀ M), f.support = s → p f
f : α →₀ M
hf : f.support = cons a s has
⊢ p f
[PROOFSTEP]
suffices p (f.erase a + single a (f a)) by rwa [erase_add_single] at this
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
p : (α →₀ M) → Prop
f✝ : α →₀ M
h0 : p 0
ha : ∀ (a : α) (b : M) (f : α →₀ M), ¬a ∈ f.support → b ≠ 0 → p f → p (f + single a b)
s✝ : Finset α
a : α
s : Finset α
has : ¬a ∈ s
ih : ∀ (f : α →₀ M), f.support = s → p f
f : α →₀ M
hf : f.support = cons a s has
this : p (erase a f + single a (↑f a))
⊢ p f
[PROOFSTEP]
rwa [erase_add_single] at this
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
p : (α →₀ M) → Prop
f✝ : α →₀ M
h0 : p 0
ha : ∀ (a : α) (b : M) (f : α →₀ M), ¬a ∈ f.support → b ≠ 0 → p f → p (f + single a b)
s✝ : Finset α
a : α
s : Finset α
has : ¬a ∈ s
ih : ∀ (f : α →₀ M), f.support = s → p f
f : α →₀ M
hf : f.support = cons a s has
⊢ p (erase a f + single a (↑f a))
[PROOFSTEP]
classical
apply ha
· rw [support_erase, mem_erase]
exact fun H => H.1 rfl
· rw [← mem_support_iff, hf]
exact mem_cons_self _ _
· apply ih _ _
rw [support_erase, hf, Finset.erase_cons]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
p : (α →₀ M) → Prop
f✝ : α →₀ M
h0 : p 0
ha : ∀ (a : α) (b : M) (f : α →₀ M), ¬a ∈ f.support → b ≠ 0 → p f → p (f + single a b)
s✝ : Finset α
a : α
s : Finset α
has : ¬a ∈ s
ih : ∀ (f : α →₀ M), f.support = s → p f
f : α →₀ M
hf : f.support = cons a s has
⊢ p (erase a f + single a (↑f a))
[PROOFSTEP]
apply ha
[GOAL]
case a
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
p : (α →₀ M) → Prop
f✝ : α →₀ M
h0 : p 0
ha : ∀ (a : α) (b : M) (f : α →₀ M), ¬a ∈ f.support → b ≠ 0 → p f → p (f + single a b)
s✝ : Finset α
a : α
s : Finset α
has : ¬a ∈ s
ih : ∀ (f : α →₀ M), f.support = s → p f
f : α →₀ M
hf : f.support = cons a s has
⊢ ¬a ∈ (erase a f).support
[PROOFSTEP]
rw [support_erase, mem_erase]
[GOAL]
case a
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
p : (α →₀ M) → Prop
f✝ : α →₀ M
h0 : p 0
ha : ∀ (a : α) (b : M) (f : α →₀ M), ¬a ∈ f.support → b ≠ 0 → p f → p (f + single a b)
s✝ : Finset α
a : α
s : Finset α
has : ¬a ∈ s
ih : ∀ (f : α →₀ M), f.support = s → p f
f : α →₀ M
hf : f.support = cons a s has
⊢ ¬(a ≠ a ∧ a ∈ f.support)
[PROOFSTEP]
exact fun H => H.1 rfl
[GOAL]
case a
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
p : (α →₀ M) → Prop
f✝ : α →₀ M
h0 : p 0
ha : ∀ (a : α) (b : M) (f : α →₀ M), ¬a ∈ f.support → b ≠ 0 → p f → p (f + single a b)
s✝ : Finset α
a : α
s : Finset α
has : ¬a ∈ s
ih : ∀ (f : α →₀ M), f.support = s → p f
f : α →₀ M
hf : f.support = cons a s has
⊢ ↑f a ≠ 0
[PROOFSTEP]
rw [← mem_support_iff, hf]
[GOAL]
case a
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
p : (α →₀ M) → Prop
f✝ : α →₀ M
h0 : p 0
ha : ∀ (a : α) (b : M) (f : α →₀ M), ¬a ∈ f.support → b ≠ 0 → p f → p (f + single a b)
s✝ : Finset α
a : α
s : Finset α
has : ¬a ∈ s
ih : ∀ (f : α →₀ M), f.support = s → p f
f : α →₀ M
hf : f.support = cons a s has
⊢ a ∈ cons a s has
[PROOFSTEP]
exact mem_cons_self _ _
[GOAL]
case a
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
p : (α →₀ M) → Prop
f✝ : α →₀ M
h0 : p 0
ha : ∀ (a : α) (b : M) (f : α →₀ M), ¬a ∈ f.support → b ≠ 0 → p f → p (f + single a b)
s✝ : Finset α
a : α
s : Finset α
has : ¬a ∈ s
ih : ∀ (f : α →₀ M), f.support = s → p f
f : α →₀ M
hf : f.support = cons a s has
⊢ p (erase a f)
[PROOFSTEP]
apply ih _ _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
p : (α →₀ M) → Prop
f✝ : α →₀ M
h0 : p 0
ha : ∀ (a : α) (b : M) (f : α →₀ M), ¬a ∈ f.support → b ≠ 0 → p f → p (f + single a b)
s✝ : Finset α
a : α
s : Finset α
has : ¬a ∈ s
ih : ∀ (f : α →₀ M), f.support = s → p f
f : α →₀ M
hf : f.support = cons a s has
⊢ (erase a f).support = s
[PROOFSTEP]
rw [support_erase, hf, Finset.erase_cons]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H✝ : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : AddZeroClass M
inst✝ : AddZeroClass N
f g : (α →₀ M) →+ N
H : ∀ (x : α) (y : M), ↑f (single x y) = ↑g (single x y)
⊢ f = g
[PROOFSTEP]
refine' AddMonoidHom.eq_of_eqOn_denseM add_closure_setOf_eq_single _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H✝ : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : AddZeroClass M
inst✝ : AddZeroClass N
f g : (α →₀ M) →+ N
H : ∀ (x : α) (y : M), ↑f (single x y) = ↑g (single x y)
⊢ Set.EqOn ↑f ↑g {f | ∃ a b, f = single a b}
[PROOFSTEP]
rintro _ ⟨x, y, rfl⟩
[GOAL]
case intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H✝ : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : AddZeroClass M
inst✝ : AddZeroClass N
f g : (α →₀ M) →+ N
H : ∀ (x : α) (y : M), ↑f (single x y) = ↑g (single x y)
x : α
y : M
⊢ ↑f (single x y) = ↑g (single x y)
[PROOFSTEP]
apply H
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H✝ : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : AddZeroClass M
inst✝ : MulOneClass N
f g : Multiplicative (α →₀ M) →* N
H : ∀ (x : α) (y : M), ↑f (↑Multiplicative.ofAdd (single x y)) = ↑g (↑Multiplicative.ofAdd (single x y))
⊢ f = g
[PROOFSTEP]
have := @addHom_ext α M (Additive N) _ _ (MonoidHom.toAdditive'' f) (MonoidHom.toAdditive'' g) H
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H✝ : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : AddZeroClass M
inst✝ : MulOneClass N
f g : Multiplicative (α →₀ M) →* N
H : ∀ (x : α) (y : M), ↑f (↑Multiplicative.ofAdd (single x y)) = ↑g (↑Multiplicative.ofAdd (single x y))
this : ↑MonoidHom.toAdditive'' f = ↑MonoidHom.toAdditive'' g
⊢ f = g
[PROOFSTEP]
ext
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H✝ : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : AddZeroClass M
inst✝ : MulOneClass N
f g : Multiplicative (α →₀ M) →* N
H : ∀ (x : α) (y : M), ↑f (↑Multiplicative.ofAdd (single x y)) = ↑g (↑Multiplicative.ofAdd (single x y))
this : ↑MonoidHom.toAdditive'' f = ↑MonoidHom.toAdditive'' g
x✝ : Multiplicative (α →₀ M)
⊢ ↑f x✝ = ↑g x✝
[PROOFSTEP]
rw [FunLike.ext_iff] at this
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H✝ : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : AddZeroClass M
inst✝ : MulOneClass N
f g : Multiplicative (α →₀ M) →* N
H : ∀ (x : α) (y : M), ↑f (↑Multiplicative.ofAdd (single x y)) = ↑g (↑Multiplicative.ofAdd (single x y))
this✝ : ↑MonoidHom.toAdditive'' f = ↑MonoidHom.toAdditive'' g
this : ∀ (x : α →₀ M), ↑(↑MonoidHom.toAdditive'' f) x = ↑(↑MonoidHom.toAdditive'' g) x
x✝ : Multiplicative (α →₀ M)
⊢ ↑f x✝ = ↑g x✝
[PROOFSTEP]
apply this
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : AddZeroClass M
inst✝ : AddZeroClass N
f : M → N
hf : f 0 = 0
hf' : ∀ (x y : M), f (x + y) = f x + f y
v₁ v₂ : α →₀ M
x✝ : α
⊢ ↑(mapRange f hf (v₁ + v₂)) x✝ = ↑(mapRange f hf v₁ + mapRange f hf v₂) x✝
[PROOFSTEP]
simp only [hf', add_apply, mapRange_apply]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
f : α ↪ β
⊢ (fun v => embDomain f v) 0 = 0
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
f : α ↪ β
v w : α →₀ M
⊢ ZeroHom.toFun { toFun := fun v => embDomain f v, map_zero' := (_ : 0 = 0) } (v + w) =
ZeroHom.toFun { toFun := fun v => embDomain f v, map_zero' := (_ : 0 = 0) } v +
ZeroHom.toFun { toFun := fun v => embDomain f v, map_zero' := (_ : 0 = 0) } w
[PROOFSTEP]
ext b
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
f : α ↪ β
v w : α →₀ M
b : β
⊢ ↑(ZeroHom.toFun { toFun := fun v => embDomain f v, map_zero' := (_ : 0 = 0) } (v + w)) b =
↑(ZeroHom.toFun { toFun := fun v => embDomain f v, map_zero' := (_ : 0 = 0) } v +
ZeroHom.toFun { toFun := fun v => embDomain f v, map_zero' := (_ : 0 = 0) } w)
b
[PROOFSTEP]
by_cases h : b ∈ Set.range f
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
f : α ↪ β
v w : α →₀ M
b : β
h : b ∈ Set.range ↑f
⊢ ↑(ZeroHom.toFun { toFun := fun v => embDomain f v, map_zero' := (_ : 0 = 0) } (v + w)) b =
↑(ZeroHom.toFun { toFun := fun v => embDomain f v, map_zero' := (_ : 0 = 0) } v +
ZeroHom.toFun { toFun := fun v => embDomain f v, map_zero' := (_ : 0 = 0) } w)
b
[PROOFSTEP]
rcases h with ⟨a, rfl⟩
[GOAL]
case pos.intro
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
f : α ↪ β
v w : α →₀ M
a : α
⊢ ↑(ZeroHom.toFun { toFun := fun v => embDomain f v, map_zero' := (_ : 0 = 0) } (v + w)) (↑f a) =
↑(ZeroHom.toFun { toFun := fun v => embDomain f v, map_zero' := (_ : 0 = 0) } v +
ZeroHom.toFun { toFun := fun v => embDomain f v, map_zero' := (_ : 0 = 0) } w)
(↑f a)
[PROOFSTEP]
simp
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddZeroClass M
f : α ↪ β
v w : α →₀ M
b : β
h : ¬b ∈ Set.range ↑f
⊢ ↑(ZeroHom.toFun { toFun := fun v => embDomain f v, map_zero' := (_ : 0 = 0) } (v + w)) b =
↑(ZeroHom.toFun { toFun := fun v => embDomain f v, map_zero' := (_ : 0 = 0) } v +
ZeroHom.toFun { toFun := fun v => embDomain f v, map_zero' := (_ : 0 = 0) } w)
b
[PROOFSTEP]
simp only [Set.mem_range, not_exists, coe_add, Pi.add_apply, embDomain_notin_range _ _ _ h, add_zero]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : NegZeroClass G
inst✝ : NegZeroClass H
f : G → H
hf : f 0 = 0
hf' : ∀ (x : G), f (-x) = -f x
v : α →₀ G
x✝ : α
⊢ ↑(mapRange f hf (-v)) x✝ = ↑(-mapRange f hf v) x✝
[PROOFSTEP]
simp only [hf', neg_apply, mapRange_apply]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : SubNegZeroMonoid G
inst✝ : SubNegZeroMonoid H
f : G → H
hf : f 0 = 0
hf' : ∀ (x y : G), f (x - y) = f x - f y
v₁ v₂ : α →₀ G
x✝ : α
⊢ ↑(mapRange f hf (v₁ - v₂)) x✝ = ↑(mapRange f hf v₁ - mapRange f hf v₂) x✝
[PROOFSTEP]
simp only [hf', sub_apply, mapRange_apply]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddCommMonoid M
k l m n : α
u v : M
hu : u ≠ 0
hv : v ≠ 0
⊢ single k u + single l v = single m u + single n v ↔ k = m ∧ l = n ∨ u = v ∧ k = n ∧ l = m ∨ u + v = 0 ∧ k = l ∧ m = n
[PROOFSTEP]
classical
simp_rw [FunLike.ext_iff, coe_add, single_eq_pi_single, ← funext_iff]
exact Pi.single_add_single_eq_single_add_single hu hv
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddCommMonoid M
k l m n : α
u v : M
hu : u ≠ 0
hv : v ≠ 0
⊢ single k u + single l v = single m u + single n v ↔ k = m ∧ l = n ∨ u = v ∧ k = n ∧ l = m ∨ u + v = 0 ∧ k = l ∧ m = n
[PROOFSTEP]
simp_rw [FunLike.ext_iff, coe_add, single_eq_pi_single, ← funext_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddCommMonoid M
k l m n : α
u v : M
hu : u ≠ 0
hv : v ≠ 0
⊢ ((fun a => (Pi.single k u + Pi.single l v) a) = fun a => (Pi.single m u + Pi.single n v) a) ↔
k = m ∧ l = n ∨ u = v ∧ k = n ∧ l = m ∨ u + v = 0 ∧ k = l ∧ m = n
[PROOFSTEP]
exact Pi.single_add_single_eq_single_add_single hu hv
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : DecidableEq α
inst✝ : AddGroup G
f g : α →₀ G
⊢ (f - g).support ⊆ f.support ∪ g.support
[PROOFSTEP]
rw [sub_eq_add_neg, ← support_neg g]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝¹ : DecidableEq α
inst✝ : AddGroup G
f g : α →₀ G
⊢ (f + -g).support ⊆ f.support ∪ (-g).support
[PROOFSTEP]
exact support_add
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddGroup G
f : α →₀ G
a : α
⊢ erase a f = f - single a (↑f a)
[PROOFSTEP]
ext a'
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddGroup G
f : α →₀ G
a a' : α
⊢ ↑(erase a f) a' = ↑(f - single a (↑f a)) a'
[PROOFSTEP]
rcases eq_or_ne a a' with (rfl | h)
[GOAL]
case h.inl
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddGroup G
f : α →₀ G
a : α
⊢ ↑(erase a f) a = ↑(f - single a (↑f a)) a
[PROOFSTEP]
simp
[GOAL]
case h.inr
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddGroup G
f : α →₀ G
a a' : α
h : a ≠ a'
⊢ ↑(erase a f) a' = ↑(f - single a (↑f a)) a'
[PROOFSTEP]
simp [erase_ne h.symm, single_eq_of_ne h]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
ι : Type u_4
M : Type u_5
M' : Type u_6
N : Type u_7
P : Type u_8
G : Type u_9
H : Type u_10
R : Type u_11
S : Type u_12
inst✝ : AddGroup G
f : α →₀ G
a : α
b : G
⊢ update f a b = f - single a (↑f a) + single a b
[PROOFSTEP]
rw [update_eq_erase_add_single, erase_eq_sub_single]
|
{"mathlib_filename": "Mathlib.Data.Finsupp.Defs", "llama_tokens": 66341}
|
# ------------------------------------------------------------------------------
# Portions of this code are from
# det3d (https://github.com/poodarchu/Det3D/tree/56402d4761a5b73acd23080f537599b0888cce07)
# Copyright (c) 2019 朱本金
# Licensed under the MIT License
# ------------------------------------------------------------------------------
import logging
from collections import defaultdict
from det3d.core import box_torch_ops
import torch
from det3d.torchie.cnn import kaiming_init
from torch import double, nn
from det3d.models.losses.centernet_loss import FastFocalLoss, RegLoss, WeightedFastFocalLoss
from det3d.models.utils import Sequential
from ..registry import BBOX_HEADS
from ..utils.norm import RSNorm
import copy
import numpy as np
try:
from det3d.ops.dcn import DeformConv
except:
print("Deformable Convolution not built!")
from .center_head import CenterHead
from torch.nn import functional as F
class RangeStratified(nn.Module):
"""
Range stratified convolution and normalization
:param kernel: tuple
:param nheads: int. number of heads. 1 for single group.
:param ngoups: int
:param inchannels: int.
:param outchannels: int.
:para act: string. 'ReLU' or 'Mish'. 'Mish' does not work.
"""
def __init__(self, kernel, nheads, ngroups, inchannels, outchannels, act='ReLU'):
super(RangeStratified, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(inchannels * ngroups * nheads, outchannels * ngroups * nheads, kernel, groups=ngroups * nheads),
nn.GroupNorm(ngroups * nheads, outchannels * ngroups * nheads),
activation[act],
)
self.padding_az = kernel[0] // 2
self.padding_r = kernel[1] // 2
self.ngroups = ngroups
def forward(self, x):
x = F.pad(x, (0, 0, self.padding_az, self.padding_az))
step = x.shape[-1] // self.ngroups
if self.padding_r > 0:
x = F.pad(x, (self.padding_r, self.padding_r, 0, 0))
x = torch.cat([x[:,:, :, (step*i):(step*(i+1)+2*self.padding_r)] for i in range(self.ngroups)], 1)
else:
x = torch.cat([x[:,:, :, step*i:step*(i+1)] for i in range(self.ngroups)], 1)
x = self.conv(x)
step = x.shape[1] // self.ngroups
x = torch.cat([x[:, step * i:step * (i + 1), :, :] for i in range(self.ngroups)], -1)
return x
#does not work
class Mish(nn.Module):
def __init__(self):
super(Mish, self).__init__()
def forward(self, input):
return input * torch.tanh(F.softplus(input))
activation = {'ReLU': nn.ReLU(inplace=True), 'Mish': Mish()}
@BBOX_HEADS.register_module
class CenterHeadSingle(CenterHead):
"""
Centerpoint Single-group det heads.
:param voxel_shape: string. 'cuboid' or 'cylinder'
:param act: string. 'ReLU' or 'Mish'. 'ReLU' is better.
"""
def __init__(
self,
in_channels=[128,],
tasks=[],
dataset='nuscenes',
weight=0.25,
code_weights=[],
common_heads=dict(),
logger=None,
init_bias=-2.19,
share_conv_channel=64,
num_hm_conv=2,
dcn_head=False,
voxel_shape='cuboid',
act='ReLU',
):
super(CenterHeadSingle, self).__init__(in_channels, tasks, dataset, weight, code_weights,
common_heads, logger, init_bias, share_conv_channel,
num_hm_conv, dcn_head, voxel_shape)
num_classes = [len(t["class_names"]) for t in tasks]
self.class_names = [t["class_names"] for t in tasks]
self.code_weights = code_weights
self.weight = weight # weight between hm loss and loc loss
self.dataset = dataset
self.num_heads = len(num_classes)
self.in_channels = in_channels
self.num_classes = num_classes
self.voxel_shape = voxel_shape
self.crit = FastFocalLoss()
self.crit_reg = RegLoss()
self.common_heads = common_heads
self.box_n_dim = 9 if 'vel' in common_heads else 7
self.use_direction_classifier = False
self.heads = copy.deepcopy(common_heads)
if not logger:
logger = logging.getLogger("CenterHead")
self.logger = logger
logger.info(
f"num_classes: {num_classes}"
)
# a shared convolution
self.shared_conv = nn.Sequential(
nn.Conv2d(in_channels, share_conv_channel,
kernel_size=3, padding=1, bias=True),
RSNorm(1, 4, share_conv_channel),
activation[act]
)
self.tasks = None
print("Use HM Bias: ", init_bias)
head_conv = 64
final_kernel = 3
#improve runtime by merging heads
for head in common_heads:
classes, num_conv = common_heads[head]
fc = Sequential()
if 'reg' in head:
fc.add(RangeStratified((3,3), 1, 8, share_conv_channel, head_conv, act)),
fc.add(nn.Conv2d(head_conv, classes,
kernel_size=1, bias=True))
elif '_' in head:
n = len(head.split('_'))
for i in range(num_conv - 1):
fc.add(nn.Conv2d(share_conv_channel, head_conv,
kernel_size=final_kernel, stride=1,
padding=final_kernel // 2, bias=True, groups=n))
fc.add(nn.GroupNorm(head_conv, head_conv))
fc.add(activation[act])
fc.add(nn.Conv2d(head_conv, classes * n,
kernel_size=final_kernel, stride=1,
padding=final_kernel // 2, bias=True, groups=n))
else:
for i in range(num_conv - 1):
fc.add(nn.Conv2d(share_conv_channel, head_conv,
kernel_size=final_kernel, stride=1,
padding=final_kernel // 2, bias=True))
fc.add(nn.GroupNorm(head_conv, head_conv))
fc.add(activation[act])
fc.add(nn.Conv2d(head_conv, classes,
kernel_size=final_kernel, stride=1,
padding=final_kernel // 2, bias=True))
self.__setattr__(head, fc)
self.hm = Sequential()
self.heads.update(dict(hm=(sum(num_classes), num_hm_conv)))
for _ in range(num_hm_conv - 1):
self.hm.add(nn.Conv2d(share_conv_channel, head_conv,
kernel_size=final_kernel, stride=1,
padding=final_kernel // 2, bias=True))
self.hm.add(nn.GroupNorm(head_conv, head_conv))
self.hm.add(activation[act])
self.hm.add(nn.Conv2d(head_conv, sum(num_classes),
kernel_size=final_kernel, stride=1,
padding=final_kernel // 2, bias=True))
logger.info("Finish CenterHead Initialization")
def forward(self, x):
ret_dict = dict()
x = self.shared_conv(x)
for head in self.heads:
if '_' in head:
names = head.split('_')
tmp = self.__getattr__(head)(x)
dim = tmp.shape[1] // len(names)
for j, nm in enumerate(names):
ret_dict[nm] = tmp[:, j * dim: (j + 1) * dim, ...]
elif 'heightdim' in head:
tmp = self.__getattr__(head)(x)
ret_dict['height'] = tmp[:, :1, ...]
ret_dict['dim'] = tmp[:, 1:, ...]
else:
ret_dict[head] = self.__getattr__(head)(x)
return {'det_preds': [ret_dict]}
@BBOX_HEADS.register_module
class CenterHeadSinglePos(CenterHeadSingle):
"""
Centerpoint Single-group det heads with range stratified and feature undistortion
:param voxel_shape: string. 'cuboid' or 'cylinder'
:param voxel_generator: dict.
:param out_size_fator: int. stride of RPN
"""
def __init__(
self,
in_channels=[128, ],
tasks=[],
dataset='nuscenes',
weight=0.25,
code_weights=[],
common_heads=dict(),
logger=None,
init_bias=-2.19,
share_conv_channel=64,
num_hm_conv=2,
dcn_head=False,
voxel_shape='cuboid',
voxel_generator=None,
out_size_factor=4,
):
super(CenterHeadSinglePos, self).__init__(in_channels, tasks, dataset, weight, code_weights,
common_heads, logger, init_bias, share_conv_channel,
num_hm_conv, dcn_head, voxel_shape)
head_conv = 64
# position decoding
with torch.no_grad():
pc_range = voxel_generator['range']
voxel_size = voxel_generator['voxel_size']
nsectors = voxel_generator['nsectors']
min_az, max_az = pc_range[1], pc_range[4]
interval = (max_az - min_az) / nsectors
ref_pc_range = pc_range.copy()
ref_pc_range[4] = min_az + interval
r_size = round((ref_pc_range[3] - ref_pc_range[0]) / voxel_size[0] / out_size_factor)
a_size = round((ref_pc_range[4] - ref_pc_range[1]) / voxel_size[1] / out_size_factor)
grid_a, grid_r = torch.meshgrid(torch.arange(a_size, device=torch.cuda.current_device()),
torch.arange(r_size, device=torch.cuda.current_device()))
grid_a = grid_a * out_size_factor * voxel_size[1] + ref_pc_range[1]
grid_r = grid_r * out_size_factor * voxel_size[0] + ref_pc_range[0]
cos = torch.cos(grid_a)
sin = torch.sin(grid_a)
self.pos_encoding = torch.cat(
[(grid_r * cos).unsqueeze(0), (grid_r * sin).unsqueeze(0), grid_r.unsqueeze(0), cos.unsqueeze(0),
sin.unsqueeze(0)]).unsqueeze(0)
# undistortion weight and bias
self.calibration_weight = Sequential(
nn.Conv2d(5, head_conv, kernel_size=3, padding=1),
nn.Tanh(),
nn.Conv2d(head_conv, head_conv, kernel_size=1),
nn.Tanh(),
)
self.calibration_bias = Sequential(
nn.Conv2d(5, head_conv, kernel_size=3, padding=1),
nn.Tanh(),
nn.Conv2d(head_conv, head_conv, kernel_size=1),
)
def forward(self, x, **kwargs):
ret_dict = dict()
x = self.shared_conv(x)
cal_weight = self.calibration_weight(self.pos_encoding)
cal_bias = self.calibration_bias(self.pos_encoding)
calibrated = x * cal_weight + cal_bias
for head in self.heads:
if '_' in head:
names = head.split('_')
tmp = self.__getattr__(head)(x)
dim = tmp.shape[1] // len(names)
for j, nm in enumerate(names):
ret_dict[nm] = tmp[:, j * dim: (j + 1) * dim, ...]
elif 'heightdim' in head:
tmp = self.__getattr__(head)(x)
ret_dict['height'] = tmp[:, :1, ...]
ret_dict['dim'] = tmp[:, 1:, ...]
elif 'hm' in head:
ret_dict[head] = self.__getattr__(head)(calibrated)
else:
ret_dict[head] = self.__getattr__(head)(x)
return {'det_preds': [ret_dict]}
|
{"hexsha": "ceee6b5e40e1ec681d1fd61776b4a0ced00e75e1", "size": 11675, "ext": "py", "lang": "Python", "max_stars_repo_path": "det3d/models/bbox_heads/center_head_parallel.py", "max_stars_repo_name": "motional/polarstream", "max_stars_repo_head_hexsha": "74af9548cad69a4f546b83dae7b87454bc590c9e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2022-03-29T04:53:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T02:29:28.000Z", "max_issues_repo_path": "det3d/models/bbox_heads/center_head_parallel.py", "max_issues_repo_name": "motional/polarstream", "max_issues_repo_head_hexsha": "74af9548cad69a4f546b83dae7b87454bc590c9e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "det3d/models/bbox_heads/center_head_parallel.py", "max_forks_repo_name": "motional/polarstream", "max_forks_repo_head_hexsha": "74af9548cad69a4f546b83dae7b87454bc590c9e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-29T04:31:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T04:31:53.000Z", "avg_line_length": 40.9649122807, "max_line_length": 118, "alphanum_fraction": 0.5484368308, "include": true, "reason": "import numpy", "num_tokens": 2747}
|
# Copyright 2019 United Kingdom Research and Innovation
# Author: Evgueni Ovtchinnikov (evgueni.ovtchinnikov@stfc.ac.uk)
# -*- coding: utf-8 -*-
"""
Principal Components update demo.
Performs PCA on a chunk of data, then addds more data and updates principal
components.
Usage: pca_update <data_file> <tolerance> <q_first> <max_pcs> [gpu]
data_file : the name of the file containing data matrix X
tolerance : PCA approximation tolerance wanted
q_first : relative size of the first chunk
max_pcs : maximal number of principal components to compute (<1: no limit)
gpu : run on GPU if this argument is present
"""
import numpy
import sys
import timeit
from raleigh.interfaces.pca import pca, pca_error
narg = len(sys.argv)
if narg < 5:
usage = \
'Usage: pca_update <data_file> <tolerance> <q_first> <max_pcs> [gpu]'
raise SystemExit(usage)
data = numpy.load(sys.argv[1])
atol = float(sys.argv[2])
q = float(sys.argv[3])
mpc = int(sys.argv[4])
arch = 'cpu' if narg < 6 else 'gpu!'
numpy.random.seed(1) # make results reproducible
m_all = data.shape[0]
n = data.shape[1]
if len(data.shape) > 2: # allow for multi-dimensional samples (e.g. images)
n = numpy.prod(data.shape[1:])
data = numpy.reshape(data, (m_all, n))
m = min(m_all, max(1, int(q*m_all)))
print('computing PCs for %d data samples...' % m)
start = timeit.default_timer()
mean, trans, comps = pca(data[: m, :], tol=atol, mpc=mpc, arch=arch)
elapsed = timeit.default_timer() - start
ncomp = comps.shape[0]
print('%d principal components computed in %.2e sec' % (ncomp, elapsed))
em, ef = pca_error(data[: m, :], mean, trans, comps)
print('PCA error: max %.1e, Frobenius %.1e' % (em, ef))
if m < m_all:
print('\nmore data arrived, updating PCs for %d data samples...' % m_all)
start = timeit.default_timer()
mean, trans, comps = pca(data[m :, :], mpc=mpc, tol=atol, verb=0, arch=arch, \
have=(mean, trans, comps))
elapsed = timeit.default_timer() - start
ncomp = comps.shape[0]
print('%d principal components updated in %.2e sec' % (ncomp, elapsed))
em, ef = pca_error(data, mean, trans, comps)
print('PCA error: max %.1e, Frobenius %.1e' % (em, ef))
print('done')
|
{"hexsha": "4b3d8c4581aff51f8fa67a8673cbff00060e5feb", "size": 2212, "ext": "py", "lang": "Python", "max_stars_repo_path": "raleigh/examples/pca/pca_update.py", "max_stars_repo_name": "evgueni-ovtchinnikov/raleigh", "max_stars_repo_head_hexsha": "620cff4a848cb98034671edc1ebdc6b108fe88b4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-09-25T13:45:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-28T15:16:51.000Z", "max_issues_repo_path": "raleigh/examples/pca/pca_update.py", "max_issues_repo_name": "evgueni-ovtchinnikov/raleigh", "max_issues_repo_head_hexsha": "620cff4a848cb98034671edc1ebdc6b108fe88b4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "raleigh/examples/pca/pca_update.py", "max_forks_repo_name": "evgueni-ovtchinnikov/raleigh", "max_forks_repo_head_hexsha": "620cff4a848cb98034671edc1ebdc6b108fe88b4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5294117647, "max_line_length": 82, "alphanum_fraction": 0.678119349, "include": true, "reason": "import numpy", "num_tokens": 657}
|
/**
* Copyright (C) 2016-2020 Xilinx, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may
* not use this file except in compliance with the License. A copy of the
* License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
#include <boost/test/unit_test.hpp>
#include "setup.h"
#include "xocl/core/device.h"
#include "xocl/core/memory.h"
#include "xocl/core/time.h"
#include <vector>
#include <memory>
// Terminology
// - ubuf is user's buffer in host code
// - [hbuf,dbuf] is buffer object with host side and device side memory
// To run all tests in this suite use
// % em -env opt txocl --run_test=test_clEnqueueMapBuffer
// To run selective tests in this suite use
// % em -env opt txocl --run_test=test_clEnqueueMapBuffer/<test case name>
// test_clEnqueueMapBuffer1
// Test data consistency with map and unmap of resident memory object and
// unaligned ubuf. This creates [hbuf,dbuf] where hbuf is separate
// from ubuf.
// test_clEnqueueMapBuffer2
// Test data consistency with map and unmap of resident memory object and
// aligned ubuf. This creates [hbuf,dbuf] where hbuf is the same as ubuf.
// test_clEnqueueMapBuffer3
// Test data consistency with map and unmap of resident memory object and
// no ubuf. This creates [hbuf,dbuf], where hbuf is directly used by user.
BOOST_AUTO_TEST_SUITE ( test_clEnqueueMapBuffer )
// Test data consistency with map and unmap of resident memory object and
// likely unaligned user buffer.
// To run just this test use:
// em -env opt --run-test=test_clEnqueueMapBuffer/test_clEnqueueMapBuffer1
BOOST_AUTO_TEST_CASE( test_clEnqueueMapBuffer1 )
{
ocl_sw_emulation ocl;
cl_int err = CL_SUCCESS;
// Allocate unaligned (for xrt_xocl::device) buffer to force allocation
// of separate host buffer in backing buffer object
const size_t sz = 5;
std::unique_ptr<char[]> storage(new char[sz]);
auto ubuf = storage.get();
std::strncpy(ubuf,"hello",sz);
auto cq = clCreateCommandQueue(ocl.context,ocl.device,0,&err);
BOOST_CHECK_EQUAL(err,CL_SUCCESS);
// Create a mem object and request that host ptr (ubuf) be used.
auto mem = clCreateBuffer(ocl.context,CL_MEM_READ_WRITE|CL_MEM_USE_HOST_PTR,sz,ubuf,&err);
BOOST_CHECK_EQUAL(err,CL_SUCCESS);
// Migrate the mem object to the device. The mem object becomes
// resident on the device. Under the hood a buffer object
// [hbuf,dbuf] is created and ubuf is memcpy to hbuf before hbuf is
// DMAed to the dbuf.
cl_event migrate_event = nullptr;
clEnqueueMigrateMemObjects(cq,1,&mem,0,0,nullptr,&migrate_event);
clWaitForEvents(1,&migrate_event);
clReleaseEvent(migrate_event);
// Since ubuf is unaligned, the underlying [hbuf,dbuf] has allocated
// its own host backing buffer (hbuf). Verify that map buffer still
// returns user's ptr (ubuf) not the backing ptr (hbuf).
auto wptr = clEnqueueMapBuffer(cq,mem,CL_TRUE,CL_MAP_WRITE,0,sz,0,0,nullptr,&err);
BOOST_CHECK_EQUAL(err,CL_SUCCESS);
BOOST_CHECK_EQUAL(wptr,ubuf);
// Since the mem object is mapped for writing, writing to the mapped
// ptr (wptr) and unmapping should ensure that hbuf is updated when
// the mem object is unmapped. Further, since the mem object is
// resident, unmapping should also update the device buffer (dbuf).
std::strncpy(ubuf,"01234",sz); // remember ubuf is same as wptr
cl_event unmap_event = nullptr;
clEnqueueUnmapMemObject(cq,mem,wptr,0,nullptr,&unmap_event);
clWaitForEvents(1,&unmap_event);
clReleaseEvent(unmap_event);
// Verify that dbuf was updated by now mapping the mem object for
// read, which will sync dbuf to hbuf and use memcpy to mem object's
// buffer (ubuf).
auto rptr = clEnqueueMapBuffer(cq,mem,CL_TRUE,CL_MAP_READ,0,sz,0,0,nullptr,&err);
BOOST_CHECK_EQUAL(err,CL_SUCCESS);
BOOST_CHECK_EQUAL(rptr,ubuf);
BOOST_CHECK_EQUAL(std::strncmp(ubuf,"01234",sz),0);
clReleaseMemObject(mem);
}
// Test data consistency with map and unmap of resident memory object and
// aligned user buffer.
BOOST_AUTO_TEST_CASE( test_clEnqueueMapBuffer2 )
{
ocl_sw_emulation ocl;
cl_int err = CL_SUCCESS;
// Allocate aligned (for xrt_xocl::device) buffer to force allocation
// of separate host buffer in backing buffer object
const size_t sz = 5;
auto deleter = [](void* v) { free(v); };
std::unique_ptr<void,decltype(deleter)> storage(nullptr,deleter);
void* vbuf = nullptr;
BOOST_CHECK_EQUAL(posix_memalign(&vbuf,128,sz),0);
storage.reset(vbuf);
auto ubuf = static_cast<char*>(vbuf);
std::strncpy(ubuf,"hello",5);
auto cq = clCreateCommandQueue(ocl.context,ocl.device,0,&err);
BOOST_CHECK_EQUAL(err,CL_SUCCESS);
// Create a mem object and request that host ptr (ubuf) be used.
auto mem = clCreateBuffer(ocl.context,CL_MEM_READ_WRITE|CL_MEM_USE_HOST_PTR,sz,ubuf,&err);
BOOST_CHECK_EQUAL(err,CL_SUCCESS);
// Migrate the mem object to the device. The mem object becomes
// resident on the device. Under the hood a buffer object
// [hbuf,dbuf] is created, where hbuf is the same as ubuf provided
// alignment needs are met. Finally hbuf is DMA'ed to dbuf.
cl_event migrate_event = nullptr;
clEnqueueMigrateMemObjects(cq,1,&mem,0,0,nullptr,&migrate_event);
clWaitForEvents(1,&migrate_event);
clReleaseEvent(migrate_event);
// Since ubuf is aligned, the underlying [hbuf,dbuf] uses ubuf as hbuf.
// This is invisible to user, there is no way to check. Verify that map
// buffer returns ubuf.
auto wptr = clEnqueueMapBuffer(cq,mem,CL_TRUE,CL_MAP_WRITE,0,sz,0,0,nullptr,&err);
BOOST_CHECK_EQUAL(err,CL_SUCCESS);
BOOST_CHECK_EQUAL(wptr,ubuf);
// Since the mem object is mapped for writing, writing to the mapped
// ptr (wptr) and unmapping should ensure that hbuf is updated when
// the mem object is unmapped. Further, since the mem object is
// resident, unmapping should also update the device buffer (dbuf).
std::strncpy(ubuf,"01234",5); // remember ubuf is same as wptr
cl_event unmap_event = nullptr;
clEnqueueUnmapMemObject(cq,mem,wptr,0,nullptr,&unmap_event);
clWaitForEvents(1,&unmap_event);
clReleaseEvent(unmap_event);
// Verify that dbuf was updated by now mapping the mem object for
// read, which will sync dbuf to hbuf but since hbuf and ubuf are
// the same memcpy is skipped, which again is invisble to user.
auto rptr = clEnqueueMapBuffer(cq,mem,CL_TRUE,CL_MAP_READ,0,sz,0,0,nullptr,&err);
BOOST_CHECK_EQUAL(err,CL_SUCCESS);
BOOST_CHECK_EQUAL(rptr,ubuf);
BOOST_CHECK_EQUAL(std::strncmp(ubuf,"01234",5),0);
clReleaseMemObject(mem);
}
// Test data consistency with map and unmap of resident memory object
// with no user buffer
BOOST_AUTO_TEST_CASE( test_clEnqueueMapBuffer3 )
{
ocl_sw_emulation ocl;
cl_int err = CL_SUCCESS;
size_t sz = 5;
auto cq = clCreateCommandQueue(ocl.context,ocl.device,0,&err);
BOOST_CHECK_EQUAL(err,CL_SUCCESS);
// Create a mem object and request that host ptr (ubuf) be used.
auto mem = clCreateBuffer(ocl.context,CL_MEM_READ_WRITE,sz,nullptr,&err);
BOOST_CHECK_EQUAL(err,CL_SUCCESS);
// Migrate the mem object to the device. The mem object becomes
// resident on the device. Under the hood a buffer object
// [hbuf,dbuf] is created, where hbuf is the same as ubuf provided
// alignment needs are met. Finally hbuf is DMA'ed to dbuf.
cl_event migrate_event = nullptr;
clEnqueueMigrateMemObjects(cq,1,&mem,0,0,nullptr,&migrate_event);
clWaitForEvents(1,&migrate_event);
clReleaseEvent(migrate_event);
// Since ubuf is aligned, the underlying [hbuf,dbuf] uses ubuf as hbuf.
// This is invisible to user, there is no way to check. Verify that map
// buffer returns ubuf.
auto wptr = clEnqueueMapBuffer(cq,mem,CL_TRUE,CL_MAP_WRITE,0,sz,0,0,nullptr,&err);
BOOST_CHECK_EQUAL(err,CL_SUCCESS);
BOOST_CHECK(wptr!=nullptr);
// Since the mem object is mapped for writing, writing to the mapped
// ptr (wptr) and unmapping should ensure that hbuf is updated when
// the mem object is unmapped. Further, since the mem object is
// resident, unmapping should also update the device buffer (dbuf).
std::strncpy(static_cast<char*>(wptr),"01234",5); // remember ubuf is same as wptr
cl_event unmap_event = nullptr;
clEnqueueUnmapMemObject(cq,mem,wptr,0,nullptr,&unmap_event);
clWaitForEvents(1,&unmap_event);
clReleaseEvent(unmap_event);
// Verify that dbuf was updated by now mapping the mem object for
// read, which will sync dbuf to hbuf but since hbuf and ubuf are
// the same memcpy is skipped, which again is invisble to user.
auto rptr = clEnqueueMapBuffer(cq,mem,CL_TRUE,CL_MAP_READ,0,sz,0,0,nullptr,&err);
BOOST_CHECK_EQUAL(err,CL_SUCCESS);
BOOST_CHECK_EQUAL(rptr,wptr);
BOOST_CHECK_EQUAL(std::strncmp(static_cast<char*>(rptr),"01234",5),0);
clReleaseMemObject(mem);
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "d9f89ba77f3f6d7d9d368542359c9cd2ed8a27e6", "size": 9206, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/runtime_src/xocl/test/api/tclEnqueueMapBuffer.cpp", "max_stars_repo_name": "AlphaBu/XRT", "max_stars_repo_head_hexsha": "72d34d637d3292e56871f9384888e6aed73b5969", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 359.0, "max_stars_repo_stars_event_min_datetime": "2018-10-05T03:05:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T06:28:16.000Z", "max_issues_repo_path": "src/runtime_src/xocl/test/api/tclEnqueueMapBuffer.cpp", "max_issues_repo_name": "AlphaBu/XRT", "max_issues_repo_head_hexsha": "72d34d637d3292e56871f9384888e6aed73b5969", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5832.0, "max_issues_repo_issues_event_min_datetime": "2018-10-02T22:43:29.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T22:28:05.000Z", "max_forks_repo_path": "src/runtime_src/xocl/test/api/tclEnqueueMapBuffer.cpp", "max_forks_repo_name": "AlphaBu/XRT", "max_forks_repo_head_hexsha": "72d34d637d3292e56871f9384888e6aed73b5969", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 442.0, "max_forks_repo_forks_event_min_datetime": "2018-10-02T23:06:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T08:34:44.000Z", "avg_line_length": 40.3771929825, "max_line_length": 92, "alphanum_fraction": 0.7429936998, "num_tokens": 2600}
|
"""
Created on Mon Jun 8 15:57:44 2020
@author: prbpedro
"""
import pandas
import matplotlib.pyplot
import numpy
def executeKMeans():
"""
Método de Clustering que objetiva particionar n observações dentre k grupos
onde cada observação pertence ao grupo mais próximo da média. Isso resulta
em uma divisão do espaço de dados em um Diagrama de Voronoi.
"""
from random import seed
from random import randint
seed(1)
data = {'x': [randint(0,99) for _ in range(30)],
'y': [randint(0,99) for _ in range(30)]
}
print(data)
df = pandas.DataFrame(data, columns=['x', 'y'])
df.info()
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=3)
kmeans.fit(df)
centroids = kmeans.cluster_centers_ #Coordenadas dos centróids
print(centroids)
matplotlib.pyplot.scatter(df['x'], df['y'], c=kmeans.labels_.astype(float), s=50, alpha=0.5)
matplotlib.pyplot.scatter(centroids[:,0], centroids[:,1], c='red', s=50)
matplotlib.pyplot.xlabel('X')
matplotlib.pyplot.xlabel('Y')
def read_iris_dataset():
from sklearn import datasets
iris_ds = datasets.load_iris();
df_iris = pandas.DataFrame(data=numpy.c_[iris_ds['data'], iris_ds['target']],
columns=iris_ds['feature_names'] + ['target'])
df_iris.info()
X = df_iris.iloc[:,:-1] # dados de entrada (features) (tudo menos target)
y = df_iris.iloc[:,4] # dados de saída (target)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test=train_test_split(X, y, test_size=0.2)
# Standardize features by removing the mean and scaling to unit variance
# Média do desvio padrão
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
return iris_ds, df_iris, X_train, X_test, y_train, y_test
def executeKnn(iris_ds, df_iris, X_train, X_test, y_train, y_test):
"""
Algoritimo supervisionado que determina o rótulo de classificação de uma
amostra baseado nas amostras vizinhas advindas de um conjunto de treinamento
"""
# Treinamento do modeloiris_ds, df_iris, X_train, X_test, y_train, y_test
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors=5)
classifier.fit(X_train, y_train)
# Previsão
y_pred = classifier.predict(X_test)
plot_confusion_matrix(y_test, y_pred)
def plot_confusion_matrix(y_test, y_pred):
# confusion_matrix e classification_report
from sklearn.metrics import classification_report, confusion_matrix
mc = confusion_matrix(y_test, y_pred)
print(mc)
print(classification_report(y_test, y_pred))
from mlxtend.plotting import plot_confusion_matrix
fig, ax = plot_confusion_matrix(conf_mat=mc)
matplotlib.pyplot.show()
def executeDecisionTree(iris_ds, df_iris, X_train, X_test, y_train, y_test):
"""
Algoritimo supervisionado de classificação por árvore de decisão.
"""
from sklearn.tree import DecisionTreeClassifier
classifier = DecisionTreeClassifier()
classifier.fit(X_train, y_train)
# Previsão
y_pred = classifier.predict(X_test)
plot_confusion_matrix(y_test, y_pred)
plot_arvore_decisao(classifier, iris_ds['feature_names'])
plot_arvore_decisao
def plot_arvore_decisao(classifier, f_names):
from sklearn.tree import export_graphviz
from IPython.display import Image
import pydotplus
dot_data = export_graphviz(classifier, out_file=None, filled=True,
rounded=True, special_characters=True,
feature_names = f_names, class_names=['0','1','2'])
graph = pydotplus.graph_from_dot_data(dot_data)
graph.write_png('iris.png')
Image(graph.create_png())
def executeSVM(iris_ds, df_iris, X_train, X_test, y_train, y_test):
"""
Algoritimo supervisionado de classificação por SVM.
Hiperplano de separação das classes.
"""
# Treinamento do modelo
from sklearn.svm import SVC
classifier = SVC(gamma='auto') # Kernel Linear
classifier.fit(X_train, y_train)
# Previsão
y_pred = classifier.predict(X_test)
plot_confusion_matrix(y_test, y_pred)
def executeMPL(iris_ds, df_iris, X_train, X_test, y_train, y_test):
"""
Algoritimo supervisionado de classificação por rede neural covolucional.
Hiperplano de separação das classes.
"""
# Treinamento do modelo
from sklearn.neural_network import MLPClassifier
# rede com duas camadas escondidas com 5 neuronios cada
classifier = MLPClassifier(solver='lbfgs', alpha=1e-5,
hidden_layer_sizes=(5,5), random_state=1)
classifier.fit(X_train, y_train)
# Previsão
y_pred = classifier.predict(X_test)
plot_confusion_matrix(y_test, y_pred)
if __name__ == '__main__':
iris_ds, df_iris, X_train, X_test, y_train, y_test=read_iris_dataset()
executeKMeans()
executeKnn(iris_ds, df_iris, X_train, X_test, y_train, y_test)
executeDecisionTree(iris_ds, df_iris, X_train, X_test, y_train, y_test)
executeSVM(iris_ds, df_iris, X_train, X_test, y_train, y_test)
executeMPL(iris_ds, df_iris, X_train, X_test, y_train, y_test)
|
{"hexsha": "09900b9b177e5421cd587384c79c233cb73c72a9", "size": 5442, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/modulo2/algoritimos_mineracao.py", "max_stars_repo_name": "prbpedro/bootcamp_machine_learning", "max_stars_repo_head_hexsha": "1713e121cd333c8e80ef05aac0365e886ed9dab1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/modulo2/algoritimos_mineracao.py", "max_issues_repo_name": "prbpedro/bootcamp_machine_learning", "max_issues_repo_head_hexsha": "1713e121cd333c8e80ef05aac0365e886ed9dab1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-11-13T17:46:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:31:46.000Z", "max_forks_repo_path": "src/modulo2/algoritimos_mineracao.py", "max_forks_repo_name": "prbpedro/bootcamp_machine_learning", "max_forks_repo_head_hexsha": "1713e121cd333c8e80ef05aac0365e886ed9dab1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1096774194, "max_line_length": 96, "alphanum_fraction": 0.6936787946, "include": true, "reason": "import numpy", "num_tokens": 1413}
|
from __future__ import annotations
__all__ = [
"load_months",
]
from os import PathLike
import numpy as np
import pandas as pd
def load_months(fnames: PathLike | list[PathLike]):
"""
Load and process a month's csv - setting up approriate multiindex etc.
Note
----
At the end of the month for routes after midnight the service date
will be the previous day, and the schedulded time will +1 day.
that's where the 25.5 in the scheduled-chunked index come from
"""
df = pd.concat([pd.read_csv(f) for f in np.atleast_1d(fnames)])
df["actual"] = pd.to_datetime(df["actual"])
df["scheduled"] = pd.to_datetime(df["scheduled"])
df["service_date"] = pd.to_datetime(df["service_date"])
date_rng = pd.date_range("1900-01-01 0:00:00", periods=52, freq="30min")
labels = date_rng[1:]
df["scheduled-chunked"] = pd.cut(
df["scheduled"], bins=date_rng, labels=labels, right=False
).apply(lambda x: (x.day - 1) * 24 + x.hour + x.minute / 60)
multi_index_cols = ["route_id", "direction_id", "service_date", "scheduled-chunked"]
df = df.set_index(pd.MultiIndex.from_frame(df[multi_index_cols]))
df = df.drop(multi_index_cols, axis="columns").sort_index()
return df
|
{"hexsha": "f0cc78778e9875fd95096dffab3251f35920a6ed", "size": 1243, "ext": "py", "lang": "Python", "max_stars_repo_path": "mbta_analysis/_loading.py", "max_stars_repo_name": "ianhi/mbta-analysis", "max_stars_repo_head_hexsha": "3701345989677516af14b3fb2beb7fccbe4b0bff", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mbta_analysis/_loading.py", "max_issues_repo_name": "ianhi/mbta-analysis", "max_issues_repo_head_hexsha": "3701345989677516af14b3fb2beb7fccbe4b0bff", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mbta_analysis/_loading.py", "max_forks_repo_name": "ianhi/mbta-analysis", "max_forks_repo_head_hexsha": "3701345989677516af14b3fb2beb7fccbe4b0bff", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5277777778, "max_line_length": 88, "alphanum_fraction": 0.6749798874, "include": true, "reason": "import numpy", "num_tokens": 345}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.