text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
# Aula 5 - Regressão e Machine Learning
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plot
import math
import time
start = time.time()
# Formatação geral para apresentar os dados com 2 casas decimais
pd.options.display.float_format = '{:,.2f}'.format
sns.set_style("whitegrid")
# Bibliotecas para machine learning # procurar SQLearning
from sklearn.metrics import mean_squared_error # Analise de "qualidade" da Inteligencia Artificial (IA)
from sklearn.dummy import DummyRegressor # Modelo de regressão que faz a média simples (pior modelo possivel), serve para comparação de eficiencia
from sklearn.model_selection import train_test_split # Separar a amostra em treino e teste # SVR - regressão #SVC - classificação
from sklearn.svm import LinearSVR # Uma forma de inteligencia artificial para regressão Lienar
from sklearn.svm import SVR # Forma mais robusta e pesada de Inteligencia Artificial
from sklearn.tree import DecisionTreeRegressor # Arvore de Decisão, forma mais rápida de treinar uma IA
# Forma de tentar minimizar a aleatoriedade no processo
import numpy as np
np.random.seed(43267) # Fixa o inicio da geração dos termos aleatórios
enem = pd.read_csv('B:\Programação\Quarentena_Dados\dados\enem_sample_2018_43278.csv')
colunas_de_notas = enem[['NU_NOTA_CN', 'NU_NOTA_CH', 'NU_NOTA_LC', 'NU_NOTA_MT', 'NU_NOTA_REDACAO']] # Pegar as colunas de notas
notas = colunas_de_notas.dropna() # Remover quem não possui nota em alguma prova
notas.columns = ['cienc_naturais', 'cienc_humanas', 'linguagem_codigo', 'matematica','redacao'] # Renomear as colunas
# Tentar adivinhar a nota de Linguagem_codigo a partir das outras notas
# Funciona de forma similar a uma regressão (x, Y)
x = notas[['cienc_naturais', 'cienc_humanas', 'matematica', 'redacao']]
y = notas['linguagem_codigo']
# Treino da inteligencia
# Ele seleciona alguns elementos para "ensinar" e outros para "testar" a qualidade do teste
x_treino, x_teste, y_treino, y_teste = train_test_split(x, y, random_state=326784) # Separa a amostra em elementos de treino e de teste
print(f'Dados para treino (x e y): x = {x_treino.shape} e y = {y_treino.shape}') # random_state é outra forma de fixar a escolha de termos aleatorios
#print(x_treino.shape) # não é muito eficiente, pois um método pode chamar outro que utilizam random, e este não seguira este padrão ...
print(f'Dados para teste (x e y): x = {x_teste.shape} e y = {y_teste.shape}')
# Criar o modelo de inteligencia artificial
print('Criação e treino da inteligencia artificial (IA)')
a = time.process_time()
# modelo = SVR() # Cria um modelo Não Linear (é muito "pesado")
print('Modelo - Linear SVR')
modelo_svrl = LinearSVR(max_iter=1000) # Máquina de Vetores de suporte (SVM, do inglês: support vector machine)
modelo_svrl = modelo_svrl.fit(x_treino, y_treino) # .fit - Realiza o treino (forma de aprender as regras, ou tentar)
predicoes_svrl = modelo_svrl.predict(x_teste) # .predict - Saida dos valores estimados pela IA
# plot.figure(figsize=(10,10))
# sns.scatterplot(x=y_teste, y=(predicoes_svr - y_teste)) # plotar diferença entre o projetado e o real
# plot.show()
#print(modelo_svr)
qualidade_svrl = mean_squared_error(y_teste, predicoes_svrl)
del modelo_svrl, predicoes_svrl
print(f'Tempo gasto: {time.process_time()- a} s')
print('Modelo - SVR') # Muito Pesado
a = time.process_time()
modelo_svr = SVR()
modelo_svr = modelo_svr.fit(x_treino, y_treino)
predicoes_svr = modelo_svr.predict(x_teste)
qualidade_svr = mean_squared_error(y_teste, predicoes_svr)
del modelo_svr, predicoes_svr
print(f'Tempo gasto: {time.process_time()- a} s')
print('Modelo - Ávore de Decisão')
a = time.process_time()
modelo_dt = DecisionTreeRegressor() # Avore de decisão (é bem rápido)
modelo_dt = modelo_dt.fit(x_treino, y_treino)
predicoes_dt = modelo_dt.predict(x_teste) # Saida dos valores estimados pela IA
# plot.figure(figsize=(10,10))
# sns.scatterplot(x=y_teste, y=(predicoes_dt - y_teste)) # plotar diferença entre o projetado e o real
# plot.show()
#print(modelo_dt)
qualidade_dt = mean_squared_error(y_teste, predicoes_dt)
del modelo_dt, predicoes_dt
print(f'Tempo gasto: {time.process_time()- a} s')
print('Modelo - Falso (média)')
a = time.process_time()
modelo_falso = DummyRegressor() # Teste utilizando média (falsa IA)
modelo_falso = modelo_falso.fit(x_treino, y_treino)
predicoes_falsas = modelo_falso.predict(x_teste)
# plot.figure(figsize=(10,10))
# sns.scatterplot(x = y_teste, y = (predicoes_falsas - y_teste)) # Erro da previsão utilizando apenas a média dos dados do "treino"
# plot.show()
#print(modelo_falso)
qualidade_falso = mean_squared_error(y_teste, predicoes_falsas)
del modelo_falso, predicoes_falsas
print(f'Tempo gasto: {time.process_time()- a} s')
###### Apagar as variáves para poder refazer o teste ########
del x, y, x_treino, y_treino, x_teste, y_teste
##### Remover as notas abaxido de 100 #########
f = 100 # filto do valor mínimo
notas_uteis = notas[ (notas.linguagem_codigo > f) & # Fazer em Apenas um comando
(notas.cienc_humanas > f) &
(notas.cienc_naturais > f) &
(notas.matematica > f) &
(notas.redacao > f) ]
x = notas_uteis[['cienc_naturais', 'cienc_humanas', 'matematica', 'redacao']]
y = notas_uteis['linguagem_codigo']
x_treino, x_teste, y_treino, y_teste = train_test_split(x, y, random_state=326784) # Separa a amostra em elementos de treino e de teste
print('Dataframe sem as notas abaixo de 100')
print(f'Dados para treino (x e y): x = {x_treino.shape} e y = {y_treino.shape}')
print(f'Dados para teste (x e y): x = {x_teste.shape} e y = {y_teste.shape}')
# Criar o modelo de inteligencia artificial
print('Modelo - SVR') # Muito Pesado
a = time.process_time()
modelo_svr = SVR()
modelo_svr = modelo_svr.fit(x_treino, y_treino)
predicoes_svr = modelo_svr.predict(x_teste)
qualidade_svr1 = mean_squared_error(y_teste, predicoes_svr)
del modelo_svr, predicoes_svr
print(f'Tempo gasto: {time.process_time()- a} s')
print('Modelo - Linear SVR')
a = time.process_time()
modelo_svrl = LinearSVR()
modelo_svrl = modelo_svrl.fit(x_treino, y_treino)
predicoes_svrl = modelo_svrl.predict(x_teste)
qualidade_svrl1 = mean_squared_error(y_teste, predicoes_svrl)
del modelo_svrl, predicoes_svrl
print(f'Tempo gasto: {time.process_time()- a} s')
print('Modelo - Ávore de Decisão')
a = time.process_time()
modelo_dt = DecisionTreeRegressor()
modelo_dt = modelo_dt.fit(x_treino, y_treino)
predicoes_dt = modelo_dt.predict(x_teste)
qualidade_dt1 = mean_squared_error(y_teste, predicoes_dt)
del modelo_dt, predicoes_dt
print(f'Tempo gasto: {time.process_time()- a} s')
print('Modelo - Falso (média)')
a = time.process_time()
modelo_falso = DummyRegressor(strategy="mean") # Teste utilizando média (falsa IA)
modelo_falso = modelo_falso.fit(x_treino, y_treino)
predicoes_falsas = modelo_falso.predict(x_teste)
qualidade_falso1 = mean_squared_error(y_teste, predicoes_falsas)
del modelo_falso, predicoes_falsas
print(f'Tempo gasto: {time.process_time()- a} s')
print('Modelo - Falso (mediana)')
a = time.process_time()
modelo_falso = DummyRegressor(strategy="median") # Teste utilizando médiana (falsa IA)
modelo_falso = modelo_falso.fit(x_treino, y_treino)
predicoes_falsas = modelo_falso.predict(x_teste)
qualidade_falso2 = mean_squared_error(y_teste, predicoes_falsas)
del modelo_falso, predicoes_falsas
print(f'Tempo gasto: {time.process_time()- a} s')
# Qualidade do teste # Seria o "erro quadrático"
### Avaliação dos métodos
print('Avaliação de desempenho dos métodos:')
# print(f"Método 1: Pontuação = {avaliacao_metodo:.2f}; Raiz = {math.sqrt(avaliacao_metodo):.2f}")
print(f'SRV0: \t\tPontuação = {qualidade_svr:.2f}, Raiz = {math.sqrt(qualidade_svr):.2f}')
print(f'SRV1: \t\tPontuação = {qualidade_svr1:.2f}, Raiz = {math.sqrt(qualidade_svr1):.2f}')
print(f'lin. SRV0: \tPontuação = {qualidade_svrl:.2f}, Raiz = {math.sqrt(qualidade_svrl):.2f}')
print(f'lin. SRV1: \tPontuação = {qualidade_svrl1:.2f}, Raiz = {math.sqrt(qualidade_svrl1):.2f}')
print(f'DT0: \t\tPontuação = {qualidade_dt:.2f}, Raiz = {math.sqrt(qualidade_dt):.2f}')
print(f'DT1: \t\tPontuação = {qualidade_dt1:.2f}, Raiz = {math.sqrt(qualidade_dt1):.2f}')
print(f'Falso0: \tPontuação = {qualidade_falso:.2f}, Raiz = {math.sqrt(qualidade_falso):.2f}')
print(f'Falso1: \tPontuação = {qualidade_falso1:.2f}, Raiz = {math.sqrt(qualidade_falso1):.2f}')
print(f'Falso2: \tPontuação = {qualidade_falso2:.2f}, Raiz = {math.sqrt(qualidade_falso2):.2f}')
### Gráficos Adicionais
# plotar/confrontar os resulados de um eixo com a previsão \o/
# sns.scatterplot(x=x_teste['matematica'].values, y=predicoe_notas_limguagem) # Previsões (fundo)
# sns.scatterplot(x=x_teste['matematica'].values, y=y_teste) # Valores Reais (parte de cima)
# plot.show()
# Vamos utilizar uma métrica para nos dizer como nosso modelo está indo
# Utilizaremos o Erro Quadrático Médio.
# Existem centenas de métricas de avaliação, tudo vai depender do que você precisa e o que você está prevendo.
## Desafio 1 da Tais Spadini
# Explore os parâmetros C e o max_iter do modelo LinesSVR. Não há garantias que o resultado será melhor.
## Desafio 2 do Thiago Gonçalves
# No gráfico em que plotamos a média com o valor previsto, plote a média das 4 notas ao invés de uma.
## Desafio 3 do Paulo Silveira
# Remover as notas zero e testar os mesmos modelos, comparando o resultado
## Desafio 4 do Guilherme Silveira
# Interpretar tudo que foi feito e compartilhar suas conclusões
## Desafio 5 do Thiago Gonçalves
# Calcule as métricas de erro que utilizamos (mean square root error) também no conjunto de treino, e veja o que acontece comparado com o conjunto de teste.
|
{"hexsha": "e75196acd4901dad75c51406a7d73661dc1b0fb7", "size": 10207, "ext": "py", "lang": "Python", "max_stars_repo_path": "Aula 05.py", "max_stars_repo_name": "brunoamaia94/Quarentena_Dados", "max_stars_repo_head_hexsha": "9402ec6223dc15146906f659db05eb1c0b41de97", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Aula 05.py", "max_issues_repo_name": "brunoamaia94/Quarentena_Dados", "max_issues_repo_head_hexsha": "9402ec6223dc15146906f659db05eb1c0b41de97", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Aula 05.py", "max_forks_repo_name": "brunoamaia94/Quarentena_Dados", "max_forks_repo_head_hexsha": "9402ec6223dc15146906f659db05eb1c0b41de97", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.9201877934, "max_line_length": 203, "alphanum_fraction": 0.7170569217, "include": true, "reason": "import numpy", "num_tokens": 3073}
|
section \<open>General purpose definitions and lemmas\<close>
theory Misc imports
Main
begin
text \<open>A handy abbreviation when working with maps\<close>
abbreviation make_map :: "'a set \<Rightarrow> 'b \<Rightarrow> ('a \<rightharpoonup> 'b)" ("[ _ |=> _ ]")
where
"[ks |=> v] \<equiv> \<lambda>k. if k \<in> ks then Some v else None"
text \<open>Projecting the components of a triple\<close>
definition "fst3 \<equiv> fst"
definition "snd3 \<equiv> fst \<circ> snd"
definition "thd3 \<equiv> snd \<circ> snd"
lemma fst3_simp [simp]: "fst3 (a,b,c) = a" by (simp add: fst3_def)
lemma snd3_simp [simp]: "snd3 (a,b,c) = b" by (simp add: snd3_def)
lemma thd3_simp [simp]: "thd3 (a,b,c) = c" by (simp add: thd3_def)
end
|
{"author": "data61", "repo": "PSL", "sha": "2a71eac0db39ad490fe4921a5ce1e4344dc43b12", "save_path": "github-repos/isabelle/data61-PSL", "path": "github-repos/isabelle/data61-PSL/PSL-2a71eac0db39ad490fe4921a5ce1e4344dc43b12/SeLFiE/Example/afp-2020-05-16/thys/GPU_Kernel_PL/Misc.thy"}
|
#redirect Big Blue
|
{"hexsha": "347676e280f3b8525c9f8d8fb5fd5ea690ca60d5", "size": 19, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/The_Yellow_House.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/The_Yellow_House.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/The_Yellow_House.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 9.5, "max_line_length": 18, "alphanum_fraction": 0.7894736842, "num_tokens": 5}
|
[STATEMENT]
lemma ereal_mult_less_0_iff:
fixes a b :: ereal
shows "a * b < 0 \<longleftrightarrow> (0 < a \<and> b < 0) \<or> (a < 0 \<and> 0 < b)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (a * b < 0) = (0 < a \<and> b < 0 \<or> a < 0 \<and> 0 < b)
[PROOF STEP]
by (cases rule: ereal2_cases[of a b]) (simp_all add: mult_less_0_iff)
|
{"llama_tokens": 165, "file": null, "length": 1}
|
#!/usr/bin/env python
"""
P2sGrad:
Zhang, X. et al. P2sgrad: Refined gradients for optimizing deep face models.
in Proc. CVPR 9906-9914, 2019
I think the grad defined in Eq.(11) is equivalent to define a MSE loss with 0
or 1 as target:
\mathcal{L}_i = \sum_{j=1}^{K} (\cos\theta_{i,j} - \delta(j == y_i))^2
The difference from a common MSE is that the network output is cos angle.
"""
from __future__ import print_function
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_f
from torch.nn import Parameter
__author__ = "Xin Wang"
__email__ = "wangxin@nii.ac.jp"
__copyright__ = "Copyright 2021, Xin Wang"
###################
class P2SActivationLayer(torch_nn.Module):
""" Output layer that produces cos\theta between activation vector x
and class vector w_j
in_dim: dimension of input feature vectors
output_dim: dimension of output feature vectors
(i.e., number of classes)
Usage example:
batchsize = 64
input_dim = 10
class_num = 5
l_layer = P2SActivationLayer(input_dim, class_num)
l_loss = P2SGradLoss()
data = torch.rand(batchsize, input_dim, requires_grad=True)
target = (torch.rand(batchsize) * class_num).clamp(0, class_num-1)
target = target.to(torch.long)
scores = l_layer(data)
loss = l_loss(scores, target)
loss.backward()
"""
def __init__(self, in_dim, out_dim):
super(P2SActivationLayer, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.weight = Parameter(torch.Tensor(in_dim, out_dim))
self.weight.data.uniform_(-1, 1).renorm_(2,1,1e-5).mul_(1e5)
return
def forward(self, input_feat):
"""
Compute P2sgrad activation
input:
------
input_feat: tensor (batchsize, input_dim)
output:
-------
tensor (batchsize, output_dim)
"""
# normalize the weight (again)
# w (feature_dim, output_dim)
w = self.weight.renorm(2, 1, 1e-5).mul(1e5)
# normalize the input feature vector
# x_modulus (batchsize)
# sum input -> x_modules in shape (batchsize)
x_modulus = input_feat.pow(2).sum(1).pow(0.5)
# w_modules (output_dim)
# w_moduls should be 1, since w has been normalized
w_modulus = w.pow(2).sum(0).pow(0.5)
# W * x = ||W|| * ||x|| * cos())))))))
# inner_wx (batchsize, output_dim)
inner_wx = input_feat.mm(w)
# cos_theta (batchsize, output_dim)
cos_theta = inner_wx / x_modulus.view(-1, 1)
cos_theta = cos_theta.clamp(-1, 1)
# done
return cos_theta
class P2SGradLoss(torch_nn.Module):
"""P2SGradLoss() MSE loss between output and target one-hot vectors
See usage in __doc__ of P2SActivationLayer
"""
def __init__(self):
super(P2SGradLoss, self).__init__()
self.m_loss = torch_nn.MSELoss()
def forward(self, input_score, target):
"""
input
-----
input_score: tensor (batchsize, class_num)
cos\theta given by P2SActivationLayer(input_feat)
target: tensor (batchsize)
target[i] is the target class index of the i-th sample
output
------
loss: scaler
"""
# target (batchsize, 1)
target = target.long() #.view(-1, 1)
# filling in the target
# index (batchsize, class_num)
with torch.no_grad():
index = torch.zeros_like(input_score)
# index[i][target[i][j]] = 1
index.scatter_(1, target.data.view(-1, 1), 1)
# MSE between \cos\theta and one-hot vectors
loss = self.m_loss(input_score, index)
return loss
if __name__ == "__main__":
print("Definition of P2SGrad Loss")
|
{"hexsha": "ce1ffd24a20223a913ba89c499de21f68e2a5cdc", "size": 3964, "ext": "py", "lang": "Python", "max_stars_repo_path": "core_modules/p2sgrad.py", "max_stars_repo_name": "Nijta/project-NN-Pytorch-scripts", "max_stars_repo_head_hexsha": "06a50ab072613fb60b8b8e1cea85c4aa8e75549d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 150, "max_stars_repo_stars_event_min_datetime": "2020-06-04T00:02:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T03:32:56.000Z", "max_issues_repo_path": "core_modules/p2sgrad.py", "max_issues_repo_name": "Nijta/project-NN-Pytorch-scripts", "max_issues_repo_head_hexsha": "06a50ab072613fb60b8b8e1cea85c4aa8e75549d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2020-06-17T04:08:33.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-01T03:42:25.000Z", "max_forks_repo_path": "core_modules/p2sgrad.py", "max_forks_repo_name": "Nijta/project-NN-Pytorch-scripts", "max_forks_repo_head_hexsha": "06a50ab072613fb60b8b8e1cea85c4aa8e75549d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2020-06-16T03:28:35.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-15T03:46:13.000Z", "avg_line_length": 28.5179856115, "max_line_length": 77, "alphanum_fraction": 0.5978809284, "include": true, "reason": "import numpy", "num_tokens": 1047}
|
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from segmentoly.utils.weights import msra_fill
class Encoder(nn.Module):
def __init__(self, dim_input, dim_internal, num_layers):
super().__init__()
self.dim_input = dim_input
module_list = []
for i in range(num_layers):
module_list.extend([
nn.Conv2d(dim_input, dim_internal, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(dim_internal),
nn.ReLU(inplace=True)
])
dim_input = dim_internal
self.layers = nn.Sequential(*module_list)
self._init_weights()
def _init_weights(self):
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
msra_fill(m.weight)
nn.init.constant_(m.bias, 0)
def forward(self, feature):
feature = self.layers(feature)
return feature
class DecoderAttention2d(nn.Module):
str_to_class = {
'GRU': nn.GRU,
'LSTM': nn.LSTM
}
def __init__(self, hidden_size, vocab_size, decoder_input_feature_size, rnn_type):
super().__init__()
self.hidden_size = hidden_size
self.vocab_size = vocab_size
assert len(decoder_input_feature_size) == 2
self.flatten_feature_size = decoder_input_feature_size[0] * decoder_input_feature_size[1]
self.embedding = nn.Embedding(vocab_size, self.hidden_size)
self.decoder = self.str_to_class[rnn_type](
input_size=self.hidden_size,
hidden_size=self.hidden_size,
num_layers=1, bidirectional=False)
self.encoder_outputs_w = nn.Linear(self.hidden_size, self.hidden_size)
self.hidden_state_w = nn.Linear(self.hidden_size, self.hidden_size)
self.v = nn.Parameter(torch.Tensor(self.hidden_size, 1)) # context vector
self.attn = nn.Linear(self.hidden_size * 2, self.flatten_feature_size)
self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.out = nn.Linear(self.hidden_size, self.vocab_size)
nn.init.normal_(self.v, 0, 0.1)
def forward(self, input, hidden, encoder_outputs, cell=None):
'''
:param input: Shape is [1, BATCH_SIZE]
:param hidden: Shape is [1, BATCH_SIZE, HIDDEN_DIM]
:param cell: Shape is [1, BATCH_SIZE, HIDDEN_DIM], it is used in case of LSTM
:param encoder_outputs: [BATCH_SIZE, T, HIDDEN_DIM]
:return:
'''
BATCH_SIZE = hidden.shape[1]
assert tuple(hidden.shape) == (1, BATCH_SIZE, self.hidden_size), f'{hidden.shape}'
assert tuple(input.shape) == (BATCH_SIZE,), f'{input.shape} {input}'
assert tuple(encoder_outputs.shape) == (
BATCH_SIZE, self.flatten_feature_size, self.hidden_size), f'{encoder_outputs.shape}'
input = input.long()
input = self.embedding(input)
encoder_outputs_w = self.encoder_outputs_w(encoder_outputs)
hidden_state_w = self.hidden_state_w(hidden[0]).unsqueeze(1)
assert tuple(hidden_state_w.shape) == (BATCH_SIZE, 1, self.hidden_size)
hidden_state_w = hidden_state_w.expand(
(BATCH_SIZE, encoder_outputs_w.shape[1], self.hidden_size))
assert tuple(hidden_state_w.shape) == (
BATCH_SIZE, encoder_outputs_w.shape[1], self.hidden_size)
s = torch.tanh(encoder_outputs_w + hidden_state_w)
assert tuple(s.shape) == (BATCH_SIZE, self.flatten_feature_size, self.hidden_size)
s = s.reshape(-1, self.hidden_size)
s = torch.matmul(s, self.v)
s = s.reshape(-1, self.flatten_feature_size)
attn_weights = F.softmax(s, dim=1)
# print('attn_weights.shape', attn_weights.shape)
attn_applied = torch.bmm(attn_weights.unsqueeze(1), encoder_outputs)
# print('attn_applied.shape', attn_applied.shape)
attn_applied = attn_applied.permute(1, 0, 2)
attn_applied = attn_applied.squeeze(0)
output = torch.cat((input, attn_applied), 1)
output = self.attn_combine(output).unsqueeze(0)
output = F.relu(output)
if isinstance(self.decoder, nn.GRU):
output, hidden = self.decoder(output, hidden)
elif isinstance(self.decoder, nn.LSTM):
output, (hidden, cell) = self.decoder(output, (hidden, cell))
# to avoid removing/renaming output by mo.py
hidden = torch.reshape(hidden, hidden.shape)
output = self.out(output[0])
if self.training:
output = F.log_softmax(output, dim=1)
if isinstance(self.decoder, nn.LSTM):
return output, hidden, cell, attn_weights
if isinstance(self.decoder, nn.GRU):
return output, hidden, attn_weights
class TextRecognitionHeadAttention(nn.Module):
def __init__(self,
input_feature_size,
encoder_dim_input,
encoder_dim_internal,
encoder_num_layers,
decoder_input_feature_size,
decoder_max_seq_len,
decoder_vocab_size,
decoder_dim_hidden,
decoder_sos_index,
decoder_rnn_type,
visualize):
super().__init__()
self.input_feature_size = input_feature_size
self.encoder_dim_input = encoder_dim_input
self.encoder = Encoder(encoder_dim_input, encoder_dim_internal, encoder_num_layers)
self.dropout = nn.Dropout(0.5)
self.decoder = DecoderAttention2d(hidden_size=decoder_dim_hidden,
vocab_size=decoder_vocab_size,
decoder_input_feature_size=decoder_input_feature_size,
rnn_type=decoder_rnn_type)
self.decoder_input_feature_size = decoder_input_feature_size
self.decoder_max_seq_len = decoder_max_seq_len
self.decoder_sos_int = decoder_sos_index
self.decoder_dim_hidden = decoder_dim_hidden
self.visualize = visualize
self.criterion = nn.NLLLoss(reduction='none')
def forward(self, features, target=None, masks=None):
features = self.encoder(features)
if masks is not None:
masks = masks.expand(-1, features.shape[1], -1, -1)
features = features * masks
if self.training:
if not all([len(t) <= self.decoder_max_seq_len for t in target if t is not None]):
return torch.tensor(0.0, device=features.device), torch.tensor(0.0,
device=features.device)
valid_targets_indexes = torch.tensor(
[ind for ind, tgt in enumerate(target) if tgt], device=features.device)
if len(valid_targets_indexes) == 0:
return torch.tensor(0.0, device=features.device), torch.tensor(0.0,
device=features.device)
target = [np.array(t) for t in target if t]
target = [np.pad(t, (0, self.decoder_max_seq_len - len(t))) for t in target]
target = np.array(target)
batch_size = target.shape[0]
features = features[valid_targets_indexes]
features = features.view(features.shape[0], features.shape[1], -1) # B C H*W
features = features.permute(0, 2, 1) # T=H*W B C
features = self.dropout(features)
decoder_hidden = torch.zeros([1, batch_size, self.decoder_dim_hidden],
device=features.device)
decoder_cell = torch.zeros([1, batch_size, self.decoder_dim_hidden],
device=features.device)
loss = 0
positive_counter = 0
decoder_input = torch.ones([batch_size], device=features.device,
dtype=torch.long) * self.decoder_sos_int
target = torch.tensor(target, device=features.device, dtype=torch.long)
for di in range(self.decoder_max_seq_len):
if isinstance(self.decoder.decoder, nn.GRU):
decoder_output, decoder_hidden, decoder_attention = self.decoder(
decoder_input, decoder_hidden, features)
elif isinstance(self.decoder.decoder, nn.LSTM):
decoder_output, decoder_hidden, decoder_cell, decoder_attention = self.decoder(
decoder_input, decoder_hidden, features, decoder_cell)
mask = (target[:, di] != 0).float()
loss += self.criterion(decoder_output, target[:, di]) * mask
mask_sum = torch.sum(mask)
if mask_sum == 0:
break
positive_counter += mask_sum
decoder_input = target[:, di]
assert positive_counter > 0
loss = torch.sum(loss) / positive_counter
return loss.to(features.device), torch.tensor(0.0, device=features.device)
else:
batch_size = features.shape[0]
features = features.view(features.shape[0], features.shape[1], -1)
features = features.permute(0, 2, 1)
decoder_hidden = torch.zeros([1, batch_size, self.decoder_dim_hidden],
device=features.device)
decoder_cell = torch.zeros([1, batch_size, self.decoder_dim_hidden],
device=features.device)
decoder_input = torch.ones([batch_size], device=features.device,
dtype=torch.long) * self.decoder_sos_int
decoder_outputs = []
if self.visualize:
full_attention_mask = np.zeros([112, 112], dtype=np.uint8)
for di in range(self.decoder_max_seq_len):
if isinstance(self.decoder.decoder, nn.GRU):
decoder_output, decoder_hidden, decoder_attention = self.decoder(
decoder_input, decoder_hidden, features)
elif isinstance(self.decoder.decoder, nn.LSTM):
decoder_output, decoder_hidden, decoder_cell, decoder_attention = self.decoder(
decoder_input, decoder_hidden, features, decoder_cell)
if self.visualize:
attention = decoder_attention.cpu().detach().numpy()
attention = (np.reshape(attention, (-1, 28)) * 500).astype(np.uint8)
attention = cv2.resize(attention, (112, 112))
full_attention_mask += attention
cv2.imshow('attention', attention)
cv2.waitKey(30)
topv, topi = decoder_output.topk(1)
decoder_outputs.append(decoder_output)
decoder_input = topi.detach().view(batch_size)
decoder_outputs = torch.stack(decoder_outputs)
if self.visualize:
cv2.imshow('full', full_attention_mask)
return decoder_outputs
def dummy_forward(self):
return torch.zeros((1, self.decoder_max_seq_len, self.decoder.vocab_size),
dtype=torch.float32)
|
{"hexsha": "af7ad42a85803467a0ebed57db5118e2f68d224e", "size": 12033, "ext": "py", "lang": "Python", "max_stars_repo_path": "pytorch_toolkit/text_spotting/text_spotting/models/text_recognition_heads/attention_based.py", "max_stars_repo_name": "morkovka1337/openvino_training_extensions", "max_stars_repo_head_hexsha": "846db45c264d6b061505213f51763520b9432ba9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-12-29T02:47:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-12T08:12:51.000Z", "max_issues_repo_path": "pytorch_toolkit/text_spotting/text_spotting/models/text_recognition_heads/attention_based.py", "max_issues_repo_name": "morkovka1337/openvino_training_extensions", "max_issues_repo_head_hexsha": "846db45c264d6b061505213f51763520b9432ba9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2020-09-25T22:40:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:37:36.000Z", "max_forks_repo_path": "pytorch_toolkit/text_spotting/text_spotting/models/text_recognition_heads/attention_based.py", "max_forks_repo_name": "morkovka1337/openvino_training_extensions", "max_forks_repo_head_hexsha": "846db45c264d6b061505213f51763520b9432ba9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-12T10:08:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-12T10:08:44.000Z", "avg_line_length": 39.976744186, "max_line_length": 102, "alphanum_fraction": 0.6027590792, "include": true, "reason": "import numpy", "num_tokens": 2463}
|
[STATEMENT]
lemma image_eq_to_f:
assumes "f1 ` S1 = f2 ` S2"
obtains f where "\<And> x. x \<in> S2 \<Longrightarrow> f x \<in> S1 \<and> f1 (f x) = f2 x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>f. (\<And>x. x \<in> S2 \<Longrightarrow> f x \<in> S1 \<and> f1 (f x) = f2 x) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
proof (atomize_elim)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<exists>f. \<forall>x. x \<in> S2 \<longrightarrow> f x \<in> S1 \<and> f1 (f x) = f2 x
[PROOF STEP]
from assms
[PROOF STATE]
proof (chain)
picking this:
f1 ` S1 = f2 ` S2
[PROOF STEP]
have "\<forall>x. x \<in> S2 \<longrightarrow> (\<exists> y. y \<in> S1 \<and> f1 y = f2 x)"
[PROOF STATE]
proof (prove)
using this:
f1 ` S1 = f2 ` S2
goal (1 subgoal):
1. \<forall>x. x \<in> S2 \<longrightarrow> (\<exists>y. y \<in> S1 \<and> f1 y = f2 x)
[PROOF STEP]
by (metis image_iff)
[PROOF STATE]
proof (state)
this:
\<forall>x. x \<in> S2 \<longrightarrow> (\<exists>y. y \<in> S1 \<and> f1 y = f2 x)
goal (1 subgoal):
1. \<exists>f. \<forall>x. x \<in> S2 \<longrightarrow> f x \<in> S1 \<and> f1 (f x) = f2 x
[PROOF STEP]
thus "\<exists>f. \<forall>x. x \<in> S2 \<longrightarrow> f x \<in> S1 \<and> f1 (f x) = f2 x"
[PROOF STATE]
proof (prove)
using this:
\<forall>x. x \<in> S2 \<longrightarrow> (\<exists>y. y \<in> S1 \<and> f1 y = f2 x)
goal (1 subgoal):
1. \<exists>f. \<forall>x. x \<in> S2 \<longrightarrow> f x \<in> S1 \<and> f1 (f x) = f2 x
[PROOF STEP]
by metis
[PROOF STATE]
proof (state)
this:
\<exists>f. \<forall>x. x \<in> S2 \<longrightarrow> f x \<in> S1 \<and> f1 (f x) = f2 x
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 754, "file": "Incredible_Proof_Machine_Build_Incredible_Tree", "length": 7}
|
# Monopoly odds
import numpy as np
from numba import njit
from collections import Counter
SIDES = 4
def solve():
iterations = 1_000_000
distribution = np.array([i + j + 2 for i, j in np.ndindex(SIDES, SIDES)])
positions = np.zeros(iterations, dtype=np.uint8)
rng = np.random.default_rng()
rints = rng.integers(0, 16, iterations, dtype=np.uint8)
kernel(positions, distribution, rints, iterations)
return "".join(
str(x[0]).zfill(2) for x in Counter(positions).most_common(3))
@njit
def kernel(positions, distribution, rints, iterations):
for i in range(iterations):
positions[i] = throw(positions[i - 1], i, distribution, rints)
@njit
def throw(current_square, iteration, distribution, rints):
n = rints[iteration]
new = (current_square + distribution[n]) % 40
if new == 30:
return 10
if new in [2, 17, 33]:
if n == 0:
return 0
elif n == 1:
return 10
elif new in [7, 22, 36]:
if n == 0:
return 0
elif n == 1:
return 10
elif n == 2:
return 11
elif n == 3:
return 24
elif n == 4:
return 39
elif n == 5:
return 5
elif n == 6 or n == 7:
return ((new + 5) // 10 * 10 + 5) % 40
elif n == 8:
if new == 7:
return 12
elif new == 22:
return 28
elif new == 36:
return 36
elif n == 9:
return (new - 3) % 40
return new
if __name__ == "__main__":
print(solve())
|
{"hexsha": "7efd09813b1513bffa845038cdffb5c966734dad", "size": 1637, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/p084.py", "max_stars_repo_name": "wephy/project-euler", "max_stars_repo_head_hexsha": "cc4824478282d3e1514a1bf7a1821b938db5bfcb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/p084.py", "max_issues_repo_name": "wephy/project-euler", "max_issues_repo_head_hexsha": "cc4824478282d3e1514a1bf7a1821b938db5bfcb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-06-07T19:03:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-07T19:03:35.000Z", "max_forks_repo_path": "python/p084.py", "max_forks_repo_name": "wephy/project-euler", "max_forks_repo_head_hexsha": "cc4824478282d3e1514a1bf7a1821b938db5bfcb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.0563380282, "max_line_length": 77, "alphanum_fraction": 0.5180207697, "include": true, "reason": "import numpy,from numba", "num_tokens": 477}
|
[STATEMENT]
lemma fun_upds_append_drop [simp]:
"size xs = size ys \<Longrightarrow> m(xs@zs[\<mapsto>]ys) = m(xs[\<mapsto>]ys)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length xs = length ys \<Longrightarrow> m(xs @ zs [\<mapsto>] ys) = m(xs [\<mapsto>] ys)
[PROOF STEP]
proof (induct xs arbitrary: ys)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>ys. length [] = length ys \<Longrightarrow> m([] @ zs [\<mapsto>] ys) = m([] [\<mapsto>] ys)
2. \<And>a xs ys. \<lbrakk>\<And>ys. length xs = length ys \<Longrightarrow> m(xs @ zs [\<mapsto>] ys) = m(xs [\<mapsto>] ys); length (a # xs) = length ys\<rbrakk> \<Longrightarrow> m((a # xs) @ zs [\<mapsto>] ys) = m(a # xs [\<mapsto>] ys)
[PROOF STEP]
case (Cons a xs)
[PROOF STATE]
proof (state)
this:
length xs = length ?ys \<Longrightarrow> m(xs @ zs [\<mapsto>] ?ys) = m(xs [\<mapsto>] ?ys)
length (a # xs) = length ys
goal (2 subgoals):
1. \<And>ys. length [] = length ys \<Longrightarrow> m([] @ zs [\<mapsto>] ys) = m([] [\<mapsto>] ys)
2. \<And>a xs ys. \<lbrakk>\<And>ys. length xs = length ys \<Longrightarrow> m(xs @ zs [\<mapsto>] ys) = m(xs [\<mapsto>] ys); length (a # xs) = length ys\<rbrakk> \<Longrightarrow> m((a # xs) @ zs [\<mapsto>] ys) = m(a # xs [\<mapsto>] ys)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
length xs = length ?ys \<Longrightarrow> m(xs @ zs [\<mapsto>] ?ys) = m(xs [\<mapsto>] ?ys)
length (a # xs) = length ys
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
length xs = length ?ys \<Longrightarrow> m(xs @ zs [\<mapsto>] ?ys) = m(xs [\<mapsto>] ?ys)
length (a # xs) = length ys
goal (1 subgoal):
1. m((a # xs) @ zs [\<mapsto>] ys) = m(a # xs [\<mapsto>] ys)
[PROOF STEP]
by (cases ys) (auto simp: map_upd_upds_conv_if)
[PROOF STATE]
proof (state)
this:
m((a # xs) @ zs [\<mapsto>] ys) = m(a # xs [\<mapsto>] ys)
goal (1 subgoal):
1. \<And>ys. length [] = length ys \<Longrightarrow> m([] @ zs [\<mapsto>] ys) = m([] [\<mapsto>] ys)
[PROOF STEP]
qed auto
|
{"llama_tokens": 869, "file": null, "length": 6}
|
# Augmenting Face Image Database via Transformations (Flip, Rotate, Crop & Scale)
# Jay Narhan
# UserID: JN721
#
# This class is designed to apply a series of image transformations to a set of images. A transformation is simply a
# function. It is a function that maps the image to another version of the image.
#
# G(x,y) = T( f(x,y) )
#
# G will be the new image based on the Transformation T. For every image, f(x,y) there will be 10 transformations
# generated.
#
# Images (including the original) may be saved to disk, along with a reference file that tracks the labelled emotion for
# each original image. Saving to disk requires invocation of the script with the argument "-s" at the cmd line.
# As a Class of type NN_Images, the object can be used for in-memory processing as required.
# Usage: Import this Class via "from NN_images import *" and call the methods needed.
import math, os, sys
import numpy as np
import pandas as pd
from skimage.io import imread, imsave
from skimage import transform as tf
from skimage import img_as_float
class NN_Images(object):
def __init__(self):
self.images = dict()
self.ROOT_DIR = os.getcwd()
self.IMAGE_DIR = './images'
self.DATA_DIR = './data'
self.TRANS_DIR = './trans_res'
self.IMG_WIDTH = 350
self.IMG_HEIGHT = 350
self.legends_file = 'legend.csv'
def get_imgs(self):
return self.images
def transform_img__(self, img, fn, emotion):
self.images[fn] = {'Image': img, 'Emotion': emotion} # Store original
counter = 0
self.images["Trans" + str(counter) + "_" + fn] = {'Image': np.fliplr(img), 'Emotion': emotion} # FLIP the image
counter += 1
for deg in range(-10, 15, 5): # ROTATE to be robust to camera orientation
if deg == 0:
continue
self.images["Trans" + str(counter) + "_" + fn] = {'Image': tf.rotate(img, deg), 'Emotion': emotion}
counter += 1
lenX, lenY = img.shape # CROP based on rough heuristic
for crop_size in range(8, 14, 2):
cropped = img[lenX / crop_size: - lenX / crop_size, lenY / crop_size: - lenY / crop_size]
self.images["Trans" + str(counter) + "_" + fn] = {'Image': cropped, 'Emotion': emotion}
counter += 1
for i in range(2): # SCALE down images (random factor btw 1.1 to 1.21)
scale_factor = math.sqrt(1.1) ** np.random.randint(2, 5)
scaled_img = tf.warp(img, tf.AffineTransform(scale=(scale_factor, scale_factor)))
self.images["Trans" + str(counter) + "_" + fn] = {'Image': scaled_img, 'Emotion': emotion}
counter += 1
def process_imgs(self):
# Read the file that tracks the emotions against the original images. Each new transformed image, will carry the
# same emotion label.
try:
os.chdir(self.DATA_DIR)
legend = pd.read_csv(self.legends_file)
except IOError as e:
print "I/O Error ({0}).".format(e.args[0])
sys.exit(2)
except OSError as e:
print "O/S Error({0}:{1})".format(e.args[1], self.DATA_DIR)
sys.exit(2)
finally:
os.chdir(self.ROOT_DIR)
os.chdir(self.IMAGE_DIR)
processed_imgs = 0
for filename in os.listdir(os.getcwd()):
try:
img = img_as_float(imread(filename)) # Read file as a float
# Pre-process:
rows, cols = img.shape
if cols != self.IMG_WIDTH or rows != self.IMG_HEIGHT:
print 'Resizing image ... '
img = tf.resize(img, output_shape=(self.IMG_WIDTH, self.IMG_HEIGHT))
emotion = legend.loc[legend['image'] == filename, 'emotion'].iloc[0] # Track the emotion of original
self.transform_img__(img, filename, emotion)
processed_imgs += 1
except IOError as e:
print "WARNING: {0} ... skipping this non-image file.".format(e.args[0])
print 'Processed {0} images'.format(processed_imgs)
os.chdir(self.ROOT_DIR)
def S2D(self, userid):
os.chdir(self.ROOT_DIR)
if not os.path.exists(self.TRANS_DIR):
os.makedirs('trans_res')
os.chdir(self.TRANS_DIR)
legend = pd.DataFrame(columns=['user.id', 'image', 'emotion'])
try:
for name, data in self.images.iteritems():
imsave(name, data['Image']) # Save image to disk
df = pd.DataFrame([[userid,
name, data['Emotion']]],
columns=['user.id', 'image', 'emotion'])
legend = legend.append(df)
legend = legend.sort_values(by='image')
legend.to_csv('01_legend.csv', index=False) # More efficient write to disk
except:
print 'Unknown Error in Saving to Disk'
pass
finally:
os.chdir(self.ROOT_DIR)
|
{"hexsha": "073764b7600dba5d7c658a14f837a080dba7bb5a", "size": 5512, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/JN721.py", "max_stars_repo_name": "shubham07kb/facial_expressions", "max_stars_repo_head_hexsha": "f620279b6729bd8697d9d98b4208bff074bec180", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/JN721.py", "max_issues_repo_name": "shubham07kb/facial_expressions", "max_issues_repo_head_hexsha": "f620279b6729bd8697d9d98b4208bff074bec180", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/JN721.py", "max_forks_repo_name": "shubham07kb/facial_expressions", "max_forks_repo_head_hexsha": "f620279b6729bd8697d9d98b4208bff074bec180", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.8169014085, "max_line_length": 121, "alphanum_fraction": 0.5417271408, "include": true, "reason": "import numpy", "num_tokens": 1219}
|
import numpy as np
x = np.array([-1, 1, 2.569858113, 6]) # x coordinates in space
y = np.array([1, 3, 1, -2]) # f(x)
print("Column 0 ")
print(y)
n = len(y)
table = np.zeros([n, n]) # Create a square matrix to hold table
table[::, 0] = y # first column is y
results = {"table": [], "coefficient": []}
def newtonInterpolation(x, y):
""" Creates Newton table and extracts coefficients """
for j in range(1, n):
results["table"].append([])
for i in range(n - j):
# create table by updating other columns
table[i][j] = (table[i + 1][j - 1] - table[i][j - 1]) / (x[i + j] - x[i])
results["table"][len(results["table"]) - 1].append(table[i][j])
coeff = table[0] # return first row
for c in coeff:
results["coefficient"].append(c)
index = 1
for i in results["table"]:
print("Column ", index)
print(i)
index += 1
return table[0]
coeff_vector = newtonInterpolation(x, y)
print("The newton polynom is: ")
for i in range(n):
print(coeff_vector[i], end=" ")
for j in range(i):
print("( x -", x[j], ")", end=" ")
if (i != n - 1):
print("+", end=" ")
print()
|
{"hexsha": "65fde16b02f0a099129549c54caf5e10177f03ab", "size": 1255, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python/newtonInterpolation.py", "max_stars_repo_name": "eechava6/NumericalAnalysisMethods", "max_stars_repo_head_hexsha": "3eeb06bdb20d97f13a09fd0ed71bce045173ffef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Python/newtonInterpolation.py", "max_issues_repo_name": "eechava6/NumericalAnalysisMethods", "max_issues_repo_head_hexsha": "3eeb06bdb20d97f13a09fd0ed71bce045173ffef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python/newtonInterpolation.py", "max_forks_repo_name": "eechava6/NumericalAnalysisMethods", "max_forks_repo_head_hexsha": "3eeb06bdb20d97f13a09fd0ed71bce045173ffef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.6078431373, "max_line_length": 86, "alphanum_fraction": 0.5155378486, "include": true, "reason": "import numpy", "num_tokens": 371}
|
using StatsPlots, Distributions
@testset "fa" begin
Random.seed!(1234)
a = rand(Uniform(0.5, 2.0), 30)
b = [sort(rand(Uniform(-3, 3), 4); rev = false) for i in 1:30]
θ = rand(Normal(0, 1), 3000)
resp = generate_response(θ, a, b)
fit1 = fa(resp; method = :em)
@test all(loadings(fit1) .≤ 1.0)
fit2 = fa(resp; method = :cm)
@test all(loadings(fit2) .≤ 1.0)
fit3 = fa(resp; cor_method = :Pearson, method = :em)
@test all(loadings(fit3) .≤ 1.0)
# @code_warntype fa(resp; method = :em)
# @profview fa(resp; method = :em)
end
@testset "parallel" begin
Random.seed!(1234)
a = rand(Uniform(0.5, 2.0), 30)
b = [sort(rand(Uniform(-3, 3), 4); rev = false) for i in 1:30]
θ = rand(Normal(0, 1), 3000)
resp = generate_response(θ, a, b)
par_fit1 = parallel(resp, 10, x -> fa(x; cor_method = :Polychoric))
@test ParallelAnalysis.findnfactors(par_fit1.FA.real, par_fit1.FA.resampled) == 1
@test ParallelAnalysis.findnfactors(par_fit1.PCA.real, par_fit1.PCA.resampled) == 1
par_fit2 = parallel(resp, 10, x -> fa(x; cor_method = :Pearson))
@test ParallelAnalysis.findnfactors(par_fit2.FA.real, par_fit2.FA.resampled) == 1
@test ParallelAnalysis.findnfactors(par_fit2.PCA.real, par_fit2.PCA.resampled) == 1
# @code_warntype parallel(resp, 10, x -> fa(x; cor_method = :Polychoric))
# @profview parallel(resp, 10, x -> fa(x; cor_method = :Polychoric))
# @code_warntype parallel(resp, 100, x -> fa(x; cor_method = :Pearson))
plot(par_fit1.PCA)
plot(par_fit1.FA)
plot(par_fit1)
plot(par_fit1, markershape = :none)
end
|
{"hexsha": "003fb3388d03a2e292e60fda3a2eb3cc91f352fe", "size": 1637, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/Test2_parallel.jl", "max_stars_repo_name": "takuizum/ParallelAnalysis.jl", "max_stars_repo_head_hexsha": "56609f6f0822dba101b8720f16b20ea3bfc3ad72", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/Test2_parallel.jl", "max_issues_repo_name": "takuizum/ParallelAnalysis.jl", "max_issues_repo_head_hexsha": "56609f6f0822dba101b8720f16b20ea3bfc3ad72", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-07-27T12:09:48.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-06T01:31:25.000Z", "max_forks_repo_path": "test/Test2_parallel.jl", "max_forks_repo_name": "takuizum/ParallelAnalysis.jl", "max_forks_repo_head_hexsha": "56609f6f0822dba101b8720f16b20ea3bfc3ad72", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0697674419, "max_line_length": 87, "alphanum_fraction": 0.6353084911, "num_tokens": 586}
|
import cv2
import numpy as np
img = np.zeros((512, 512, 3), np.uint8)
# print(img.shape)
# img[200:300, 100:300] = 255, 0, 0
# draw a line
cv2.line(img, (0, 0), (300, 300), (0, 255, 0), 3)
# draw a rewctangle
cv2.rectangle(img, (0, 0), (250, 350), (0, 0, 255), 3)
# draw a circle
cv2.circle(img, (250, 250), 30, (255, 0, 0), 3)
# add text
cv2.putText(img, "OpenCV", (300, 100), cv2.FONT_HERSHEY_COMPLEX, 1, (150, 0, 150), 3)
cv2.imshow("Image", img)
cv2.waitKey(0)
|
{"hexsha": "0138baf49094f7172471cf4c98a93ff73615e21a", "size": 470, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/opencv/chapter_03.py", "max_stars_repo_name": "snandasena/udacity-dl", "max_stars_repo_head_hexsha": "7ea13ec7ebd992f1199f43bd5300782436ed71e5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-12-04T11:52:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-21T14:42:32.000Z", "max_issues_repo_path": "src/opencv/chapter_03.py", "max_issues_repo_name": "snandasena/udacity-dl", "max_issues_repo_head_hexsha": "7ea13ec7ebd992f1199f43bd5300782436ed71e5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/opencv/chapter_03.py", "max_forks_repo_name": "snandasena/udacity-dl", "max_forks_repo_head_hexsha": "7ea13ec7ebd992f1199f43bd5300782436ed71e5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.380952381, "max_line_length": 85, "alphanum_fraction": 0.6106382979, "include": true, "reason": "import numpy", "num_tokens": 208}
|
[STATEMENT]
lemma almost_full_on_pp_iff:
"almost_full_on (adds) A \<longleftrightarrow> almost_full_on (adds) (mapping_of ` A)" (is "?l \<longleftrightarrow> ?r")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. almost_full_on (adds) A = almost_full_on (adds) (pp.mapping_of ` A)
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. almost_full_on (adds) A \<Longrightarrow> almost_full_on (adds) (pp.mapping_of ` A)
2. almost_full_on (adds) (pp.mapping_of ` A) \<Longrightarrow> almost_full_on (adds) A
[PROOF STEP]
assume ?l
[PROOF STATE]
proof (state)
this:
almost_full_on (adds) A
goal (2 subgoals):
1. almost_full_on (adds) A \<Longrightarrow> almost_full_on (adds) (pp.mapping_of ` A)
2. almost_full_on (adds) (pp.mapping_of ` A) \<Longrightarrow> almost_full_on (adds) A
[PROOF STEP]
with _
[PROOF STATE]
proof (chain)
picking this:
PROP ?psi \<Longrightarrow> PROP ?psi
almost_full_on (adds) A
[PROOF STEP]
show ?r
[PROOF STATE]
proof (prove)
using this:
PROP ?psi \<Longrightarrow> PROP ?psi
almost_full_on (adds) A
goal (1 subgoal):
1. almost_full_on (adds) (pp.mapping_of ` A)
[PROOF STEP]
proof (rule almost_full_on_hom)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x y. \<lbrakk>x \<in> A; y \<in> A; x adds y\<rbrakk> \<Longrightarrow> pp.mapping_of x adds pp.mapping_of y
[PROOF STEP]
fix x y :: "('a, 'b) pp"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x y. \<lbrakk>x \<in> A; y \<in> A; x adds y\<rbrakk> \<Longrightarrow> pp.mapping_of x adds pp.mapping_of y
[PROOF STEP]
assume "x adds y"
[PROOF STATE]
proof (state)
this:
x adds y
goal (1 subgoal):
1. \<And>x y. \<lbrakk>x \<in> A; y \<in> A; x adds y\<rbrakk> \<Longrightarrow> pp.mapping_of x adds pp.mapping_of y
[PROOF STEP]
thus "mapping_of x adds mapping_of y"
[PROOF STATE]
proof (prove)
using this:
x adds y
goal (1 subgoal):
1. pp.mapping_of x adds pp.mapping_of y
[PROOF STEP]
by (simp only: adds_pp_iff)
[PROOF STATE]
proof (state)
this:
pp.mapping_of x adds pp.mapping_of y
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
almost_full_on (adds) (pp.mapping_of ` A)
goal (1 subgoal):
1. almost_full_on (adds) (pp.mapping_of ` A) \<Longrightarrow> almost_full_on (adds) A
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. almost_full_on (adds) (pp.mapping_of ` A) \<Longrightarrow> almost_full_on (adds) A
[PROOF STEP]
assume ?r
[PROOF STATE]
proof (state)
this:
almost_full_on (adds) (pp.mapping_of ` A)
goal (1 subgoal):
1. almost_full_on (adds) (pp.mapping_of ` A) \<Longrightarrow> almost_full_on (adds) A
[PROOF STEP]
hence "almost_full_on (\<lambda>x y. mapping_of x adds mapping_of y) A"
[PROOF STATE]
proof (prove)
using this:
almost_full_on (adds) (pp.mapping_of ` A)
goal (1 subgoal):
1. almost_full_on (\<lambda>x y. pp.mapping_of x adds pp.mapping_of y) A
[PROOF STEP]
using subset_refl
[PROOF STATE]
proof (prove)
using this:
almost_full_on (adds) (pp.mapping_of ` A)
?A \<subseteq> ?A
goal (1 subgoal):
1. almost_full_on (\<lambda>x y. pp.mapping_of x adds pp.mapping_of y) A
[PROOF STEP]
by (rule almost_full_on_map)
[PROOF STATE]
proof (state)
this:
almost_full_on (\<lambda>x y. pp.mapping_of x adds pp.mapping_of y) A
goal (1 subgoal):
1. almost_full_on (adds) (pp.mapping_of ` A) \<Longrightarrow> almost_full_on (adds) A
[PROOF STEP]
thus ?l
[PROOF STATE]
proof (prove)
using this:
almost_full_on (\<lambda>x y. pp.mapping_of x adds pp.mapping_of y) A
goal (1 subgoal):
1. almost_full_on (adds) A
[PROOF STEP]
by (simp only: adds_pp_iff[symmetric])
[PROOF STATE]
proof (state)
this:
almost_full_on (adds) A
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1583, "file": "Polynomials_PP_Type", "length": 18}
|
[STATEMENT]
lemma partitionI: \<^marker>\<open>contributor \<open>Paulo Em?lio de Vilhena\<close>\<close>
fixes A :: "'a set" and B :: "('a set) set"
assumes "\<Union>B = A"
and "\<And>b1 b2. \<lbrakk> b1 \<in> B; b2 \<in> B \<rbrakk> \<Longrightarrow> b1 \<inter> b2 \<noteq> {} \<Longrightarrow> b1 = b2"
shows "partition A B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Congruence.partition A B
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>a. a \<in> A \<Longrightarrow> \<exists>!b. b \<in> B \<and> a \<in> b
2. \<And>b. b \<in> B \<Longrightarrow> b \<subseteq> A
[PROOF STEP]
show "\<And>a. a \<in> A \<Longrightarrow> \<exists>!b. b \<in> B \<and> a \<in> b"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a. a \<in> A \<Longrightarrow> \<exists>!b. b \<in> B \<and> a \<in> b
[PROOF STEP]
proof (rule ccontr)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a. \<lbrakk>a \<in> A; \<nexists>!b. b \<in> B \<and> a \<in> b\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
fix a
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a. \<lbrakk>a \<in> A; \<nexists>!b. b \<in> B \<and> a \<in> b\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
assume "a \<in> A" "\<nexists>!b. b \<in> B \<and> a \<in> b"
[PROOF STATE]
proof (state)
this:
a \<in> A
\<nexists>!b. b \<in> B \<and> a \<in> b
goal (1 subgoal):
1. \<And>a. \<lbrakk>a \<in> A; \<nexists>!b. b \<in> B \<and> a \<in> b\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
a \<in> A
\<nexists>!b. b \<in> B \<and> a \<in> b
[PROOF STEP]
obtain b1 b2 where "b1 \<in> B" "a \<in> b1"
and "b2 \<in> B" "a \<in> b2" "b1 \<noteq> b2"
[PROOF STATE]
proof (prove)
using this:
a \<in> A
\<nexists>!b. b \<in> B \<and> a \<in> b
goal (1 subgoal):
1. (\<And>b1 b2. \<lbrakk>b1 \<in> B; a \<in> b1; b2 \<in> B; a \<in> b2; b1 \<noteq> b2\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using assms(1)
[PROOF STATE]
proof (prove)
using this:
a \<in> A
\<nexists>!b. b \<in> B \<and> a \<in> b
\<Union> B = A
goal (1 subgoal):
1. (\<And>b1 b2. \<lbrakk>b1 \<in> B; a \<in> b1; b2 \<in> B; a \<in> b2; b1 \<noteq> b2\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
b1 \<in> B
a \<in> b1
b2 \<in> B
a \<in> b2
b1 \<noteq> b2
goal (1 subgoal):
1. \<And>a. \<lbrakk>a \<in> A; \<nexists>!b. b \<in> B \<and> a \<in> b\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
thus False
[PROOF STATE]
proof (prove)
using this:
b1 \<in> B
a \<in> b1
b2 \<in> B
a \<in> b2
b1 \<noteq> b2
goal (1 subgoal):
1. False
[PROOF STEP]
using assms(2)
[PROOF STATE]
proof (prove)
using this:
b1 \<in> B
a \<in> b1
b2 \<in> B
a \<in> b2
b1 \<noteq> b2
\<lbrakk>?b1.0 \<in> B; ?b2.0 \<in> B; ?b1.0 \<inter> ?b2.0 \<noteq> {}\<rbrakk> \<Longrightarrow> ?b1.0 = ?b2.0
goal (1 subgoal):
1. False
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
?a \<in> A \<Longrightarrow> \<exists>!b. b \<in> B \<and> ?a \<in> b
goal (1 subgoal):
1. \<And>b. b \<in> B \<Longrightarrow> b \<subseteq> A
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>b. b \<in> B \<Longrightarrow> b \<subseteq> A
[PROOF STEP]
show "\<And>b. b \<in> B \<Longrightarrow> b \<subseteq> A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>b. b \<in> B \<Longrightarrow> b \<subseteq> A
[PROOF STEP]
using assms(1)
[PROOF STATE]
proof (prove)
using this:
\<Union> B = A
goal (1 subgoal):
1. \<And>b. b \<in> B \<Longrightarrow> b \<subseteq> A
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
?b \<in> B \<Longrightarrow> ?b \<subseteq> A
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1753, "file": null, "length": 18}
|
#include <boost/numeric/odeint/external/mpi/mpi_nested_algebra.hpp>
|
{"hexsha": "71be267b9324b5b7c70074cced878ead74d29bf3", "size": 68, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_numeric_odeint_external_mpi_mpi_nested_algebra.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_numeric_odeint_external_mpi_mpi_nested_algebra.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_numeric_odeint_external_mpi_mpi_nested_algebra.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 34.0, "max_line_length": 67, "alphanum_fraction": 0.8382352941, "num_tokens": 19}
|
import os
import streamlit as st
import streamlit.components.v1 as components
import time
from video import getvideo
from modelDownloader import downloader
import cv2
import numpy as np
# import matplotlib.pyplot as plt
from PIL import Image
import tempfile
import requests
from pathlib import Path
def detect_objects(our_image,score_threshold,nms_threshold):
# st.set_option('deprecation.showPyplotGlobalUse', False)
col1, col2 = st.columns(2)
new_img = np.array(our_image.convert('RGB'))
img = cv2.cvtColor(new_img,1)
height,width = img.shape[:2]
img_blob = cv2.dnn.blobFromImage(img, 0.003922, (416, 416), swapRB=True, crop=False)
# only single label
class_labels = ["corona_virus"]
#Declare only a single color
class_colors = ["0,255,0"]
class_colors = [np.array(every_color.split(",")).astype("int") for every_color in class_colors]
class_colors = np.array(class_colors)
class_colors = np.tile(class_colors,(1,1))
cfgpath=os.path.join(os.path.dirname( __file__ ),'model','cov_yolov4.cfg')
modelpath=os.path.join(os.path.dirname( __file__ ),'model','cov_yolov4_best.weights')
if not os.path.exists(modelpath):
loc=os.path.join(os.path.dirname( __file__ ),'model')
d=downloader()
with st.spinner('Downloading weights...'):
d.downloadFile("https://dl.dropbox.com/s/909wlai4r3y4uz1/cov_yolov4_best.weights?dl=1",loc)
# Loading the coronavirus custom model
# input preprocessed blob into model and pass through the model
# obtain the detection predictions by the model using forward() method
yolo_model = cv2.dnn.readNetFromDarknet(cfgpath,modelpath)
# Get all layers from the yolo network
# Loop and find the last layer (output layer) of the yolo network
yolo_layers = yolo_model.getLayerNames()
#yolo_output_layer = [yolo_layers[yolo_layer[0] - 1] for yolo_layer in yolo_model.getUnconnectedOutLayers()]
yolo_output_layer=yolo_model.getUnconnectedOutLayersNames()
# input preprocessed blob into model and pass through the model
yolo_model.setInput(img_blob)
# obtain the detection layers by forwarding through till the output layer
obj_detection_layers = yolo_model.forward(yolo_output_layer)
############## NMS Change 1 ###############
# initialization for non-max suppression (NMS)
# declare list for [class id], [box center, width & height[], [confidences]
class_ids_list = []
boxes_list = []
confidences_list = []
pclass = []
count = 0
############## NMS Change 1 END ###########
# loop over each of the layer outputs
for object_detection_layer in obj_detection_layers:
# loop over the detections
for object_detection in object_detection_layer:
# obj_detections[1 to 4] => will have the two center points, box width and box height
# obj_detections[5] => will have scores for all objects within bounding box
all_scores = object_detection[5:]
predicted_class_id = np.argmax(all_scores)
prediction_confidence = all_scores[predicted_class_id]
# take only predictions with confidence more than 20%
if prediction_confidence > 0.20:
#get the predicted label
predicted_class_label = class_labels[predicted_class_id]
#obtain the bounding box co-oridnates for actual image from resized image size
bounding_box = object_detection[0:4] * np.array([width,height,width,height])
(box_center_x_pt, box_center_y_pt, box_width, box_height) = bounding_box.astype("int")
start_x_pt = int(box_center_x_pt - (box_width / 2))
start_y_pt = int(box_center_y_pt - (box_height / 2))
############## NMS Change 2 ###############
#save class id, start x, y, width & height, confidences in a list for nms processing
#make sure to pass confidence as float and width and height as integers
class_ids_list.append(predicted_class_id)
confidences_list.append(float(prediction_confidence))
boxes_list.append([start_x_pt, start_y_pt, int(box_width), int(box_height)])
############## NMS Change 2 END ###########
############## NMS Change 3 ###############
# Applying the NMS will return only the selected max value ids while suppressing the non maximum (weak) overlapping bounding boxes
# Non-Maxima Suppression confidence set as 0.5 & max_suppression threhold for NMS as 0.4 (adjust and try for better perfomance)
max_value_ids = cv2.dnn.NMSBoxes(boxes_list, confidences_list,score_threshold,nms_threshold )
# loop through the final set of detections remaining after NMS and draw bounding box and write text
for max_valueid in max_value_ids:
#max_class_id = max_valueid[0]
max_class_id = max_valueid
box = boxes_list[max_class_id]
start_x_pt = box[0]
start_y_pt = box[1]
box_width = box[2]
box_height = box[3]
#get the predicted class id and label
predicted_class_id = class_ids_list[max_class_id]
predicted_class_label = class_labels[predicted_class_id]
prediction_confidence = confidences_list[max_class_id]
############## NMS Change 3 END ###########
end_x_pt = start_x_pt + box_width
end_y_pt = start_y_pt + box_height
#get a random mask color from the numpy array of colors
box_color = class_colors[predicted_class_id]
#convert the color numpy array as a list and apply to text and box
box_color = [int(c) for c in box_color]
# print the prediction in console
predicted_class_label = "{}: {:.2f}%".format(predicted_class_label, prediction_confidence * 100)
pclass.append(predicted_class_label)
#print("predicted object {}".format(predicted_class_label))
count+=1
# draw rectangle and text in the image
cv2.rectangle(img, (start_x_pt, start_y_pt), (end_x_pt, end_y_pt), box_color, 1)
cv2.putText(img, predicted_class_label, (start_x_pt, start_y_pt-5), cv2.FONT_HERSHEY_COMPLEX, 0.7, box_color, 2)
with col1:
st.header("Original image")
st.image(our_image,use_column_width='always')
with col2:
st.header("Detected objects in the image")
st.image(img,use_column_width='always')
st.info("Zoom in the image to see the confidence scores of the objects detected")
if len(pclass)==1:
st.success("Coronovirus detected")
elif len(pclass)>=2:
st.success("Detected {} coronaviruses.".format(count))
else:
st.error("No coronavirus detected. Make sure you have uploaded the correct grayscale electron microscopic image of coronavirus. If you see this error even after uploading the correct image for coronavirus then the model requires further training")
def object_main():
"""OBJECT DETECTION APP"""
#Favicon
favpath=os.path.join(os.path.dirname( __file__ ),'images','icons8-coronavirus-16.png')
img1=Image.open(favpath)
#st.set_page_config(layout='wide')
st.set_page_config(layout='wide',page_title='Object detection',page_icon=img1,initial_sidebar_state = 'auto')
#components.iframe("https://docs.streamlit.io/en/latest")
hide_streamlit_style = """
<style>
footer {visibility: hidden;}
</style>
"""
st.markdown("""
<style>
.css-18e3th9{
position: relative;
padding-bottom: 0px;
padding-top: 0px;
}
</style>""",unsafe_allow_html=True)
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
st.markdown("""
<style>
div.css-18e3th9{
position: relative;
padding-bottom: 0px;
padding-top: 0px;
}
</style>
""",unsafe_allow_html=True)
st.markdown("""
<style>
nav{
position: relative;
display: flex;
width: 640px;
margin: 4rem auto;
}
nav.navbar a{
display: block;
width: 20%;
padding: .65rem 0;
color:rgb(255, 75, 75);
text-decoration: none;
text-align: center;
text-transform: uppercase;
}
.nav-underline, .nav-underline2{
position: absolute;
left: 0;
bottom: -1px;
width: 18%;
height: 2px;
background: #fff;
transition: all .3s ease-in-out;
}
.nav-underline2{
top: -1px !important;
}
nav a:hover{
font-size: 20px;
font-weight: 900;
transition: font-size .1s linear,
font-weight .1s linear;
color: rgb(220,20,60);
}
nav a:nth-child(1).current ~ .nav-underline{
left: 0;
}
nav a:nth-child(2).current ~ .nav-underline{
left: 20%;
}
nav a:nth-child(3).current ~ .nav-underline{
left: 40%;
}
nav a:nth-child(4).current ~ .nav-underline{
left: 60%;
}
nav a:nth-child(5).current ~ .nav-underline{
left: 80%;
}
nav a:nth-child(1):hover ~ .nav-underline{
left: 0;
}
nav a:nth-child(2):hover ~ .nav-underline{
left: 20%;
}
nav a:nth-child(3):hover ~ .nav-underline{
left: 40%;
}
nav a:nth-child(4):hover ~ .nav-underline{
left: 60%;
}
nav a:nth-child(5):hover ~ .nav-underline{
left: 80%;
}
nav a:nth-child(1).current ~ .nav-underline2{
left: 0;
}
nav a:nth-child(2).current ~ .nav-underline2{
left: 20%;
}
nav a:nth-child(3).current ~ .nav-underline2{
left: 40%;
}
nav a:nth-child(4).current ~ .nav-underline2{
left: 60%;
}
nav a:nth-child(5).current ~ .nav-underline2{
left: 80%;
}
nav a:nth-child(1):hover ~ .nav-underline2{
left: 0;
}
nav a:nth-child(2):hover ~ .nav-underline2{
left: 20%;
}
nav a:nth-child(3):hover ~ .nav-underline2{
left: 40%;
}
nav a:nth-child(4):hover ~ .nav-underline2{
left: 60%;
}
nav a:nth-child(5):hover ~ .nav-underline2{
left: 80%;
}
</style>
""",unsafe_allow_html=True)
st.markdown("""
<nav class="navbar">
<a href="#">Home</a>
<a href="https://github.com/Kaushal000/Streamlit-coronavirus-detection-app/wiki" target="_blank">WIKI</a>
<a href="#" class="current">Thesis</a>
<a href="https://github.com/Kaushal000/Streamlit-coronavirus-detection-app/tree/main/src" target="_blank">Source</a>
<a href="https://colab.research.google.com/drive/1KszU9b3t-T_Ia5GNjiy_uuktOnydlEID#scrollTo=O2w9w1Ye_nk1" target="_blank">Train</a>
<div class="nav-underline"></div>
<div class="nav-underline2"></div>
</nav>""",unsafe_allow_html=True)
"""
[![GitHub][github_badge]][github_link]
[github_badge]: https://badgen.net/badge/icon/GitHub?icon=github&color=black&label
[github_link]: https://github.com/Kaushal000/Streamlit-coronavirus-detection-app
"""
st.title("Coronavirus detection app")
opt=st.sidebar.radio("Choose what to do",("Run the app","View documentation","View source code","Show mAP% score"))
if opt=="Run the app":
st.header("Object Detection")
st.write("Object detection is a central algorithm in computer vision. The algorithm implemented below is YOLO (You Only Look Once), a state-of-the-art algorithm trained to identify thousands of objects types. It extracts objects from images and identifies them using OpenCV and Yolo. This task involves Deep Neural Networks(DNN), yolo trained model, yolo configuration and a dataset to detect objects.")
score_threshold = st.sidebar.slider("Confidence Threshold", 0.00,1.00,0.5,0.01)
nms_threshold = st.sidebar.slider("NMS Threshold", 0.00, 1.00, 0.4, 0.01)
choice = st.radio("", ("Show Demo", "Upload image and detect coronaviruses from image" ,"Upload video and detect coronaviruses form video"))
st.write()
if choice == "Upload image and detect coronaviruses from image":
st.set_option('deprecation.showfileUploaderEncoding', False)
image_file = st.file_uploader("Upload Image", type=['jpg','png','jpeg'])
if image_file is not None:
our_image = Image.open(image_file)
st.info('Image uploaded')
with st.spinner('Detecting objects and generating confidence scores...'):
time.sleep(5)
detect_objects(our_image,score_threshold,nms_threshold)
elif choice== "Upload video and detect coronaviruses form video" :
st.write()
f=st.file_uploader("Upload Video",type='mp4')
col1, col2, col3 = st.columns([5,20,1])
if f is not None:
tfile = tempfile.NamedTemporaryFile(delete=False)
tfile.write(f.read())
nm=tfile.name
with col1:
st.write("")
with col2:
getvideo(nm,score_threshold,nms_threshold)
with col3:
st.write("")
else :
path=os.path.join(os.path.dirname( __file__ ),'images','coronavirus.jpg')
our_image = Image.open(path)
detect_objects(our_image,score_threshold,nms_threshold)
# embed streamlit docs in a streamlit app
elif opt=="View documentation":
with st.spinner('Fetching documentation from github..'):
time.sleep(5)
content=requests.get('https://raw.githubusercontent.com/Kaushal000/Streamlit-coronavirus-detection-app/main/README.md').text
st.markdown(content,unsafe_allow_html=True)
elif opt=="View source code":
pth=os.path.join(os.path.dirname( __file__ ),'app.py')
p=Path(pth).read_text()
st.code(p,language='python')
else:
col1,col2,col3=st.columns([11,20,10])
st.markdown("""
<style>
.Red{
color:red;
}
.Blue{
color:blue;
}
</style>
""",unsafe_allow_html=True)
with col1:
st.markdown("""<br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br>
<div>
<strong>The <span class="Blue">blue</span> line indicates average loss percentage</strong> 👉
</div>
""",unsafe_allow_html=True)
with col2:
p=os.path.join(os.path.dirname(__file__),'images','cov.png')
chart=Image.open(p)
st.image(chart,use_column_width='auto')
with col3:
st.markdown(
"""<br><br><br><br>
<div>
👈 <strong>The <span class="Red">red</span> line indicates mAP% score</strong>
</div>
<br><br><br><br>
""",unsafe_allow_html=True)
st.sidebar.markdown(
"""<br><br>
<style>
.center {
margin: auto;
width: 50%;
padding: 10px;
color: rgb(255, 75, 75);
}
</style>
<h3 class="center">Presentation</h3>
<iframe src="https://onedrive.live.com/embed?resid=CC721E0634E29AC8%2113676&authkey=%21AJqlhggJ3vIp8MA&em=2&wdAr=1.7777777777777777" width="300px" height="263px" frameborder="0">This is an embedded <a target="_blank" href="https://office.com">Microsoft Office</a></iframe>
""",unsafe_allow_html=True)
if __name__ == '__main__':
object_main()
|
{"hexsha": "841d164d52dbbc07a38c19c6bff9eb6b8a1f0546", "size": 16108, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/app.py", "max_stars_repo_name": "Kaushal000/Streamlit-coronavirus-detection-app", "max_stars_repo_head_hexsha": "3f6c646c4e7a268f27f2cb6b7d8d02d002e8679a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/app.py", "max_issues_repo_name": "Kaushal000/Streamlit-coronavirus-detection-app", "max_issues_repo_head_hexsha": "3f6c646c4e7a268f27f2cb6b7d8d02d002e8679a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/app.py", "max_forks_repo_name": "Kaushal000/Streamlit-coronavirus-detection-app", "max_forks_repo_head_hexsha": "3f6c646c4e7a268f27f2cb6b7d8d02d002e8679a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4885654886, "max_line_length": 411, "alphanum_fraction": 0.6028681401, "include": true, "reason": "import numpy", "num_tokens": 4006}
|
from torch.utils import data
import json
from PIL import Image
import os
import cv2
import numpy as np
class DataLoader(data.Dataset):
def __init__(self, data_list, transform=None):
with open(os.path.abspath(data_list)) as json_file:
data = json.load(json_file)
self.data = data
self.transform = transform
def __getitem__(self, index):
data = self.data[index]
image_dir = data['image_dir']
label = data['target']
img = Image.open(image_dir)
# img = np.array(img)
# img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
if self.transform is not None:
img = self.transform(img)
return img, label, image_dir
def __len__(self):
return len(self.data)
|
{"hexsha": "c9580a1d58328dc4d998d23f3431adb89dfe65cc", "size": 792, "ext": "py", "lang": "Python", "max_stars_repo_path": "classification/data_loader.py", "max_stars_repo_name": "hrlblab/Glo-In-One", "max_stars_repo_head_hexsha": "7daef49c557bccd6f5c956b88603357346dc78a2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-10-31T02:04:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-31T02:04:07.000Z", "max_issues_repo_path": "classification/data_loader.py", "max_issues_repo_name": "hrlblab/Glo-In-One", "max_issues_repo_head_hexsha": "7daef49c557bccd6f5c956b88603357346dc78a2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "classification/data_loader.py", "max_forks_repo_name": "hrlblab/Glo-In-One", "max_forks_repo_head_hexsha": "7daef49c557bccd6f5c956b88603357346dc78a2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-15T04:48:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-15T04:48:40.000Z", "avg_line_length": 24.0, "max_line_length": 69, "alphanum_fraction": 0.6287878788, "include": true, "reason": "import numpy", "num_tokens": 182}
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Basic ROS
import rospy
# ROS messages
from visualization_msgs.msg import Marker
from sensor_msgs.msg import LaserScan
# Maths
import numpy as np
# Custom libraries
from splitandmerge import splitandmerge
from probabilistic_lib.functions import publish_lines
#===============================================================================
class SplitAndMergeNode(object):
'''
Class to hold all ROS related transactions to use split and merge algorithm.
'''
#===========================================================================
def __init__(self):
'''
Initializes publishers and subscribers.
'''
# Publishers
self.pub_line = rospy.Publisher("lines", Marker,queue_size=0)
self.pub_laser = rospy.Publisher("scan_cut",LaserScan,queue_size=0)
self.id=0
# Subscribers
self.sub_scan = rospy.Subscriber("scan", LaserScan, self.laser_callback)
#===========================================================================
def laser_callback(self, msg):
'''
Function called each time a LaserScan message with topic "scan" arrives.
'''
# Project LaserScan to points in space
rng = np.array(msg.ranges)
ang = np.linspace(msg.angle_min, msg.angle_max, len(msg.ranges))
points = np.vstack((rng * np.cos(ang),
rng * np.sin(ang)))
msg.range_max = 3
self.pub_laser.publish(msg)
# Filter long ranges
points = points[:, rng < msg.range_max]
# Use split and merge to obtain lines and publish
lines = splitandmerge(points)
self.id=self.id+1
# Publish results
publish_lines(lines, self.pub_line, frame=msg.header.frame_id,
time=msg.header.stamp, ns='scan_line', color=(1,0,0),marker_id=self.id)
#===============================================================================
if __name__ == '__main__':
# ROS initializzation
rospy.init_node('splitandmerge')
node = SplitAndMergeNode()
# Continue forever
rospy.spin()
|
{"hexsha": "322264a06588b4d24fff91a396d9315ce1b2c7f7", "size": 2237, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/split_and_merge/src/splitandmergenode.py", "max_stars_repo_name": "karanchawla/ROS", "max_stars_repo_head_hexsha": "e005b8b488bcf493a6dd283d3179db597bfcb09d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/split_and_merge/src/splitandmergenode.py", "max_issues_repo_name": "karanchawla/ROS", "max_issues_repo_head_hexsha": "e005b8b488bcf493a6dd283d3179db597bfcb09d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/split_and_merge/src/splitandmergenode.py", "max_forks_repo_name": "karanchawla/ROS", "max_forks_repo_head_hexsha": "e005b8b488bcf493a6dd283d3179db597bfcb09d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0694444444, "max_line_length": 93, "alphanum_fraction": 0.5234689316, "include": true, "reason": "import numpy", "num_tokens": 440}
|
#!/usr/bin/env python
# coding: utf-8
import sys
import os
import time
import json
import numpy as np
import pandas as pd
import dill
import random
from os.path import join, dirname, abspath, pardir, basename
from sklearn.pipeline import FeatureUnion, Pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix
from common.data import *
class NgramsExtractor:
def __init__(self, min_ngram_len = 1, max_ngram_len = 2):
self.positive_counter = CountVectorizer(analyzer='word',
tokenizer=lambda x: x.split(),
token_pattern=None,
stop_words=None,
ngram_range=(min_ngram_len, max_ngram_len),)
#self.negative_counter = CountVectorizer(analyzer='word',
# tokenizer=lambda x: x.split(),
# token_pattern=None,
# stop_words=None,
# ngram_range=(min_ngram_len, max_ngram_len),)
def fit(self, x, y = None):
positives = x.lengths.apply(get_positives)
#negatives = x.lengths.apply(get_negatives)
self.positive_counter.fit(positives.apply(join_str))
#self.negative_counter.fit(negatives.apply(join_str))
return self
def transform(self, data_list):
positives = data_list.lengths.apply(get_positives)
#negatives = data_list.lengths.apply(get_negatives)
positives_str = positives.apply(join_str)
#negatives_str = negatives.apply(join_str)
positive_ngrams = self.positive_counter.transform(positives_str)
#negative_ngrams = self.negative_counter.transform(negatives_str)
#return np.concatenate((positive_ngrams.todense(), negative_ngrams.todense()), axis=1)
return positive_ngrams.todense()
def classify(train, test):
# Ngrams feature extractor
combinedFeatures = FeatureUnion([('ngrams', NgramsExtractor(1, 2))])
# Training and validation data
X_train = combinedFeatures.fit_transform(train)
y_train = np.array(train.class_label)
X_test = combinedFeatures.transform(test)
y_test = np.array(test.class_label)
# Model training
batch_mode = False
if batch_mode:
rfc = RandomForestClassifier(n_estimators=0, warm_start=True)
skf = StratifiedKFold(n_splits=2)
for _, index in skf.split(X_train, y_train):
rfc.n_estimators += 50
rfc.fit(X_train[index], y_train[index])
else:
rfc = RandomForestClassifier(n_estimators=70)
rfc.fit(X_train, y_train)
# Model evaluation
yhat_test = rfc.predict(X_test)
yhat_prob = np.max(rfc.predict_proba(X_test), axis=1)
# Accuracy metric
acc = accuracy_score(y_test, yhat_test)
print("Accuracy Score:", acc)
return y_test, yhat_test, yhat_prob
num_classes = 10500 # Number of classes (sites)
num_samples = 20 # Number of samples for each class (site)
min_packets = 1 # Minimum of packets for each row (record)
max_packets = 50 # Maximun of packets for each row (record)
def classifier_train():
# Locate dataset
data_dir = join(abspath(join(dirname("__file__"), pardir, pardir)), 'dataset', 'closed-world')
print(data_dir)
# Load dataset
df = load_data(data_dir)
print("initial data", df.shape)
# Clean dataset
df_cleaned = clean_df_closed(df, min_packets, max_packets, num_classes, num_samples)
print("cleaned data", df_cleaned.shape)
# Split dataset: train 90%, test 10%
df_train, df_test, _, _ = train_test_split(df_cleaned, df_cleaned.class_label, test_size=0.1, stratify=df_cleaned.class_label)
# Perform k-fold cross classification
results = []
kf = StratifiedKFold(n_splits = 5)
for k, (train_k, val_k) in enumerate(kf.split(df_train, df_train.class_label)):
print("k-fold", k)
start_time = time.time()
result = classify(df_train.iloc[train_k], df_train.iloc[val_k])
print("--- %s seconds ---" % (time.time() - start_time))
results.append(result)
# Classification report
reports = pd.DataFrame(columns=['k-fold', 'label', 'precision', 'recall', 'f1-score', 'support'])
true_vectors, pred_vectors = [r[0] for r in results], [r[1] for r in results]
for i, (y_true, y_pred) in enumerate(zip(true_vectors, pred_vectors)):
# The precision, recall, F1 score for each class and averages in one k-fold
output = classification_report(y_true, y_pred, output_dict=True, zero_division=0)
report = pd.DataFrame(output).transpose()
report = report.reset_index()
report = report.rename(columns={'index': 'label'})
report['k-fold'] = i
reports = reports.append(report)
# Statistics report
statistics = reports.groupby('label').describe().loc['macro avg']
print("Mean")
print(statistics.xs('mean', level=1))
print("Standard deviation")
print(statistics.xs('std', level=1))
# Test dataset accuracy
start_time = time.time()
classify(df_train, df_test)
print("--- %s seconds ---" % (time.time() - start_time))
def classifier_build():
# Locate dataset
train_dir = join(abspath(join(dirname("__file__"), pardir, pardir)), 'dataset', 'closed-world')
print(train_dir)
test_dir = join(abspath(join(dirname("__file__"), pardir, pardir)), 'dataset', 'open-world')
print(test_dir)
# Load dataset
df_train = load_data(train_dir)
print("initial train data", df_train.shape)
df_test = load_data(test_dir)
print("initial test data", df_test.shape)
# Clean dataset
df_train_cleaned = clean_df_closed(df_train, min_packets, max_packets, num_classes, num_samples)
print("cleaned train data", df_train_cleaned.shape)
df_test_cleaned = clean_df_opened(df_test, min_packets, max_packets, 0, 1)
print("cleaned test data", df_test_cleaned.shape)
# Remove test labels which are not in training labels
train_list = list(set(df_train_cleaned.class_label.tolist()))
df_test_cleaned = df_test_cleaned[df_test_cleaned["class_label"].isin(train_list)]
print("cleaned cleaned test data", df_test_cleaned.shape)
start_time = time.time()
target, prediction, probability = classify(df_train_cleaned, df_test_cleaned)
print("--- %s seconds ---" % (time.time() - start_time))
# Save result into CSV
cw_result = np.column_stack((target.astype(int), prediction.astype(int), probability))
np.savetxt("cw_result.csv", cw_result, delimiter=",", fmt="%.2f")
def classifier_serve():
# Load pipeline
loaded_model = dill.load(open('doh_data_classify.pickle', 'rb'))
print("Model Loaded")
# Load websites
urls = get_url_list("../collection/websites.txt")
for line in sys.stdin:
# Locate file
data_file = join(abspath(dirname("__file__")), line)[:-1]
# Load file
df_new = load_data(data_file)
# Predict with pipeline
pred_new = loaded_model.predict(df_new)
pred_pro = loaded_model.predict_proba(df_new)
pred_url = [ urls[int(index) - 1] for index in pred_new ]
print("Prediction:", pred_url, np.max(pred_pro, axis=1))
if __name__ == '__main__':
if (len(sys.argv) == 2):
if (sys.argv[1] == 'train'):
print("Training...")
classifier_train()
print("Training done!!!")
exit(0)
elif (sys.argv[1] == 'build'):
print("Building...")
classifier_build()
print("Building done!!!")
exit(0)
elif (sys.argv[1] == 'serve'):
print("Serving...")
classifier_serve()
print("Serving done!!!")
exit(0)
print("usage: doh_data_classify.py { train | build | serve }")
exit(1)
classifier_train()
|
{"hexsha": "2547f520d972a5e26b0883d47d8f358df054b76e", "size": 8380, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/doh_data_classifier/classifier_rfc.py", "max_stars_repo_name": "yshao321/doh_website_fingerprinting", "max_stars_repo_head_hexsha": "2c44940f80ba6503075735302a1d8cd421571ada", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-31T04:40:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-31T04:40:12.000Z", "max_issues_repo_path": "code/doh_data_classifier/classifier_rfc.py", "max_issues_repo_name": "yshao321/doh_website_fingerprinting", "max_issues_repo_head_hexsha": "2c44940f80ba6503075735302a1d8cd421571ada", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/doh_data_classifier/classifier_rfc.py", "max_forks_repo_name": "yshao321/doh_website_fingerprinting", "max_forks_repo_head_hexsha": "2c44940f80ba6503075735302a1d8cd421571ada", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.9162995595, "max_line_length": 130, "alphanum_fraction": 0.6423627685, "include": true, "reason": "import numpy", "num_tokens": 1941}
|
[STATEMENT]
lemma arctan_bounds:
assumes "0 \<le> x" "x < 1"
shows arctan_lower_bound:
"(\<Sum>k<2 * n. (- 1) ^ k * (1 / real (k * 2 + 1) * x ^ (k * 2 + 1))) \<le> arctan x"
(is "(\<Sum>k<_. (- 1)^ k * ?a k) \<le> _")
and arctan_upper_bound:
"arctan x \<le> (\<Sum>k<2 * n + 1. (- 1) ^ k * (1 / real (k * 2 + 1) * x ^ (k * 2 + 1)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>k<2 * n. (- 1) ^ k * (1 / real (k * 2 + 1) * x ^ (k * 2 + 1))) \<le> arctan x &&& arctan x \<le> (\<Sum>k<2 * n + 1. (- 1) ^ k * (1 / real (k * 2 + 1) * x ^ (k * 2 + 1)))
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. (\<Sum>k<2 * n. (- 1) ^ k * (1 / real (k * 2 + 1) * x ^ (k * 2 + 1))) \<le> arctan x
2. arctan x \<le> (\<Sum>k<2 * n + 1. (- 1) ^ k * (1 / real (k * 2 + 1) * x ^ (k * 2 + 1)))
[PROOF STEP]
have tendsto_zero: "?a \<longlonglongrightarrow> 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>k. 1 / real (k * 2 + 1) * x ^ (k * 2 + 1)) \<longlonglongrightarrow> 0
[PROOF STEP]
proof (rule tendsto_eq_rhs)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. (\<lambda>k. 1 / real (k * 2 + 1) * x ^ (k * 2 + 1)) \<longlonglongrightarrow> ?x
2. ?x = 0
[PROOF STEP]
show "(\<lambda>k. 1 / real (k * 2 + 1) * x ^ (k * 2 + 1)) \<longlonglongrightarrow> 0 * 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>k. 1 / real (k * 2 + 1) * x ^ (k * 2 + 1)) \<longlonglongrightarrow> 0 * 0
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
0 \<le> x
x < 1
goal (1 subgoal):
1. (\<lambda>k. 1 / real (k * 2 + 1) * x ^ (k * 2 + 1)) \<longlonglongrightarrow> 0 * 0
[PROOF STEP]
by (intro tendsto_mult real_tendsto_divide_at_top)
(auto simp: filterlim_real_sequentially filterlim_sequentially_iff_filterlim_real
intro!: real_tendsto_divide_at_top tendsto_power_zero filterlim_real_sequentially
tendsto_eq_intros filterlim_at_top_mult_tendsto_pos filterlim_tendsto_add_at_top)
[PROOF STATE]
proof (state)
this:
(\<lambda>k. 1 / real (k * 2 + 1) * x ^ (k * 2 + 1)) \<longlonglongrightarrow> 0 * 0
goal (1 subgoal):
1. 0 * 0 = 0
[PROOF STEP]
qed simp
[PROOF STATE]
proof (state)
this:
(\<lambda>k. 1 / real (k * 2 + 1) * x ^ (k * 2 + 1)) \<longlonglongrightarrow> 0
goal (2 subgoals):
1. (\<Sum>k<2 * n. (- 1) ^ k * (1 / real (k * 2 + 1) * x ^ (k * 2 + 1))) \<le> arctan x
2. arctan x \<le> (\<Sum>k<2 * n + 1. (- 1) ^ k * (1 / real (k * 2 + 1) * x ^ (k * 2 + 1)))
[PROOF STEP]
have nonneg: "0 \<le> ?a n" for n
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 \<le> 1 / real (n * 2 + 1) * x ^ (n * 2 + 1)
[PROOF STEP]
by (force intro!: divide_nonneg_nonneg mult_nonneg_nonneg zero_le_power assms)
[PROOF STATE]
proof (state)
this:
0 \<le> 1 / real (?n * 2 + 1) * x ^ (?n * 2 + 1)
goal (2 subgoals):
1. (\<Sum>k<2 * n. (- 1) ^ k * (1 / real (k * 2 + 1) * x ^ (k * 2 + 1))) \<le> arctan x
2. arctan x \<le> (\<Sum>k<2 * n + 1. (- 1) ^ k * (1 / real (k * 2 + 1) * x ^ (k * 2 + 1)))
[PROOF STEP]
have le: "?a (Suc n) \<le> ?a n" for n
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 1 / real (Suc n * 2 + 1) * x ^ (Suc n * 2 + 1) \<le> 1 / real (n * 2 + 1) * x ^ (n * 2 + 1)
[PROOF STEP]
by (rule mult_mono[OF _ power_decreasing]) (auto simp: field_split_simps assms less_imp_le)
[PROOF STATE]
proof (state)
this:
1 / real (Suc ?n * 2 + 1) * x ^ (Suc ?n * 2 + 1) \<le> 1 / real (?n * 2 + 1) * x ^ (?n * 2 + 1)
goal (2 subgoals):
1. (\<Sum>k<2 * n. (- 1) ^ k * (1 / real (k * 2 + 1) * x ^ (k * 2 + 1))) \<le> arctan x
2. arctan x \<le> (\<Sum>k<2 * n + 1. (- 1) ^ k * (1 / real (k * 2 + 1) * x ^ (k * 2 + 1)))
[PROOF STEP]
from summable_Leibniz'(4)[of ?a, OF tendsto_zero nonneg le, of n]
summable_Leibniz'(2)[of ?a, OF tendsto_zero nonneg le, of n]
assms
[PROOF STATE]
proof (chain)
picking this:
(\<Sum>i. (- 1) ^ i * (1 / real (i * 2 + 1) * x ^ (i * 2 + 1))) \<le> (\<Sum>i<2 * n + 1. (- 1) ^ i * (1 / real (i * 2 + 1) * x ^ (i * 2 + 1)))
(\<Sum>i<2 * n. (- 1) ^ i * (1 / real (i * 2 + 1) * x ^ (i * 2 + 1))) \<le> (\<Sum>i. (- 1) ^ i * (1 / real (i * 2 + 1) * x ^ (i * 2 + 1)))
0 \<le> x
x < 1
[PROOF STEP]
show "(\<Sum>k<2*n. (- 1)^ k * ?a k) \<le> arctan x" "arctan x \<le> (\<Sum>k<2 * n + 1. (- 1)^ k * ?a k)"
[PROOF STATE]
proof (prove)
using this:
(\<Sum>i. (- 1) ^ i * (1 / real (i * 2 + 1) * x ^ (i * 2 + 1))) \<le> (\<Sum>i<2 * n + 1. (- 1) ^ i * (1 / real (i * 2 + 1) * x ^ (i * 2 + 1)))
(\<Sum>i<2 * n. (- 1) ^ i * (1 / real (i * 2 + 1) * x ^ (i * 2 + 1))) \<le> (\<Sum>i. (- 1) ^ i * (1 / real (i * 2 + 1) * x ^ (i * 2 + 1)))
0 \<le> x
x < 1
goal (1 subgoal):
1. (\<Sum>k<2 * n. (- 1) ^ k * (1 / real (k * 2 + 1) * x ^ (k * 2 + 1))) \<le> arctan x &&& arctan x \<le> (\<Sum>k<2 * n + 1. (- 1) ^ k * (1 / real (k * 2 + 1) * x ^ (k * 2 + 1)))
[PROOF STEP]
by (auto simp: arctan_series)
[PROOF STATE]
proof (state)
this:
(\<Sum>k<2 * n. (- 1) ^ k * (1 / real (k * 2 + 1) * x ^ (k * 2 + 1))) \<le> arctan x
arctan x \<le> (\<Sum>k<2 * n + 1. (- 1) ^ k * (1 / real (k * 2 + 1) * x ^ (k * 2 + 1)))
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 2635, "file": null, "length": 15}
|
# This file is part of IntegerTriangles.
# Copyright Peter Luschny. License is MIT.
# Version of: UTC 2021-05-22 22:01:34
# 8228b27e-bb38-11eb-3e8e-73d8fe660356
# Do not edit this file, it is generated from the modules and will be overwritten!
# Edit the modules in the src directory and build this file with BuildTriangles.jl!
tstdir = realpath(joinpath(dirname(@__FILE__)))
srcdir = joinpath(dirname(tstdir), "src")
tstdir ∉ LOAD_PATH && push!(LOAD_PATH, tstdir)
srcdir ∉ LOAD_PATH && push!(LOAD_PATH, srcdir)
module perftests
using IntegerTriangles, Dates, InteractiveUtils
InteractiveUtils.versioninfo()
start = Dates.now()
# +++ TrianglesBase.jl +++
# +++ TrianglesExamples.jl +++
T = LaguerreTriangle(8)
Println.(PolyTriangle(T))
Println.(PolyArray(T))
Println.(Inverse(PolyTriangle(T)))
Println.(FubiniTriangle(8))
Println.(DArcaisTriangle(8))
# +++ TrianglesExplorer.jl +++
# +++ TrianglesPlot.jl +++
# +++ TrianglesTables.jl +++
# +++ TrianglesTraitCard.jl +++
# +++ TrianglesUtils.jl +++
GetSeqnum(ℤInt[1, 1, -2, 3, -3, 3, -5, 7, -6, 6, -10,
12, -11, 13, -17, 20, -21, 21, -27, 34, -33, 36, -46, 51,
-53, 58, -68, 78, -82, 89, -104]) |> println
GetSeqnum(ℤInt[0, 1, 1, 1, 2, 1, 2, 1, 5, 2, 2, 1, 5, 1,
2, 1, 14, 1, 5, 1, 5, 2, 2, 1, 15, 2, 2, 5, 4, 1, 4, 1, 51,
1, 2, 1, 14, 1, 2, 2, 14, 1, 6, 1, 4, 2, 2, 1, 52]) |> println
GetSeqnum(ℤInt[1, 1, 7, 37, 241, 2101, 18271, 201097, 2270017,
29668681, 410815351, 6238931821, 101560835377, 1765092183037,
32838929702671, 644215775792401]) |> println
GetSeqnum(ℤInt[1, 1, 1, 7, 37, 241, 2101, 18271, 201097, 2270017,
29668681, 410815351, 6238931821, 101560835377, 1765092183037,
32838929702671, 644215775792401]) |> println
GetSeqnum(ℤInt[0, 1, 2, 7, 44, 361, 3654, 44207, 622552, 10005041,
180713290, 3624270839, 79914671748, 1921576392793, 50040900884366,
1403066801155039, 42142044935535536]) |> println
GetSeqnum(ℤInt[0, 70, 3783, 338475, 40565585, 6061961733,
1083852977811, 225615988054171, 53595807366038234, 14308700593468127485,
4241390625289880226714]) |> println
GetSeqnumUri(ℤInt[1, 1, 7, 37, 241, 2101, 18271, 201097, 2270017,
29668681, 410815351, 6238931821, 101560835377, 1765092183037,
32838929702671, 644215775792401]) |> println
stop = Dates.now()
tdiff = stop - start
println("\nJulia version: " * string(VERSION) )
println(start)
println("Total test time: ", tdiff)
end # module
|
{"hexsha": "a1c06463d72a8c54f421c2b4d2586a979d88337b", "size": 2372, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/perftests.jl", "max_stars_repo_name": "OpenLibMathSeq/IntegerTriangles.jl", "max_stars_repo_head_hexsha": "15cc716086e8d59309b30869f6a4ed59be2dc068", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-07T22:57:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-07T22:57:34.000Z", "max_issues_repo_path": "test/perftests.jl", "max_issues_repo_name": "OpenLibMathSeq/IntegerTriangles.jl", "max_issues_repo_head_hexsha": "15cc716086e8d59309b30869f6a4ed59be2dc068", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-03-05T21:19:45.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-06T10:52:38.000Z", "max_forks_repo_path": "test/perftests.jl", "max_forks_repo_name": "OpenLibMathSeq/IntegerTriangles.jl", "max_forks_repo_head_hexsha": "15cc716086e8d59309b30869f6a4ed59be2dc068", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.8965517241, "max_line_length": 83, "alphanum_fraction": 0.7015177066, "num_tokens": 1034}
|
from scipy.spatial.distance import pdist, squareform
from numpy.random import np
from numpy import concatenate
import heapq
# Solves a facility location problem using the algorithm from Jain et al. "Greedy Facility Location Algorithms Analyzed
# using Dual Fitting with Factor-Revealing LP". A facility location algorithm takes as input a graph G=(V,E), where
# each edge is a cost for connecting two vertices (or "cities"). The goal is to choose a subset of vertices (or
# "facilities"), such that each city is connected to exactly one facility, while minimizing the total cost. The
# cost is the sum of selected edge connections, plus predefined cost for opening up each facility. The algorithm
# of Jain et al. is an O(E log E) algorithm with an approximation guarantee of within 1.61 times the optimal
# solution
#
# Let cityFacilityCosts be a list of 3-tuples, where each element (cost,facility,city) is the cost
# of connecting a city to a facility and openFacilityCost is the cost of opening a new facility.
# Returns a dict where retval[facility][city] is present for connected facility-city pairs, with value equal
# to the connection cost.
class FacilityLocation():
def __init__(self, points=None, cityFacilityCosts=None, cityDisallowedCityNeighbors=None):
self.points = points
self.cityFacilityCosts = cityFacilityCosts
self.costs_s = None
self.allFacilities = {}
self.allCities = {}
self.city_disallowed_city_neighbors = cityDisallowedCityNeighbors
if not self.points is None:
for i in range(0,len(self.points)):
self.allFacilities[i] = self.allCities[i] = 1
else:
for c in cityFacilityCosts:
if not c[1] in self.allFacilities: self.allFacilities[c[1]] = 1
if not c[2] in self.allCities: self.allCities[c[2]] = 1
def solve(self, openFacilityCost=None, openFacilityCosts=None, debug=0):
if openFacilityCosts is None:
openFacilityCosts = {}
for f in self.allFacilities:
openFacilityCosts[f] = openFacilityCost
self.openFacilityCosts = openFacilityCosts
if self.cityFacilityCosts is None:
self.compute_costs()
# A priority queue keyed by facility, with value set to the value of alpha when that facility should be opened
self.fac_open_candidates = priority_dict()
# A dict keyed by facility, where each value is a 4-tuple: (S1,S2,n1,n2), where S1 is the sum of offers from
# unconnected cities--excluding alpha terms--to this facility, n1 is the number of offers from unconnected
# cities, S2 is the sum of switching offers from connected cities, and n2 is the number of offers from
# connected cities. Keeping this structure allows us to compute at any given time the value of alpha
# at which a facility should be opened
self.facility_offers = {}
self.facility_cities = {} # Return value, a dict where the key is a facility and each value is a list of connected cities
self.city_facilities = {} # A dict keyed by city, with the value set to the facility that city is connected to
self.fac_city_offers = {} # For each facility f and city c, fac_city_offers[f][c] is an offer to connect c to f
self.city_fac_offers = {} # For each facility f and city c, city_fac_offers[c][f] is an offer to connect c to f
self.facility_disallowed_cities = {} # For each facility f and city c, facility_disallowed_cities[f][c] implies c cannot be connected to f
self.total_cost = 0
# Now sort the object-object pairs by increasing matching cost, and iterate through each one. This enables us
# to monotonically increase alpha, while keeping track of exactly the connection offers that are non-zero
if self.costs_s is None:
self.costs_s = sorted(self.cityFacilityCosts, key=lambda t: t[0])
self.big = 0
for k in range(0, len(self.costs_s)):
if len(self.costs_s[k]) > 0:
self.big = max(self.big, self.costs_s[k][0] + 1)
for k in range(0, len(self.costs_s)):
cost = self.costs_s[k][0]
# For some unopened facility, the total offers it receives reaches the cost of opening a facility before
# alpha=cost. Open that facility
while len(self.fac_open_candidates)>0 and (not self.costs_s[k][2] in self.city_facilities):
alpha = self.fac_open_candidates[self.fac_open_candidates.smallest()]
if alpha > cost:
break
self.open_facility(self.fac_open_candidates.pop_smallest(), alpha, debug)
if k < len(self.costs_s):
if debug > 1: self.debug_offers()
facility = self.costs_s[k][1]
city = self.costs_s[k][2]
self.offer(city, facility, cost, debug)
if debug > 1: self.debug_offers()
while len(self.city_facilities) < len(self.allCities):
if debug > 0: print "Opening facility because some cities were unconnected"
alpha = self.fac_open_candidates[self.fac_open_candidates.smallest()]
self.open_facility(self.fac_open_candidates.pop_smallest(), alpha, debug)
return (self.facility_cities, self.total_cost)
def offer(self, city, facility, cost, debug):
#if 3 in self.facility_disallowed_cities: print " facility_disallowed_cities[3]="+str(self.facility_disallowed_cities[3])
if (not city in self.city_facilities):
if (not facility in self.facility_disallowed_cities or
not city in self.facility_disallowed_cities[facility] or
self.facility_disallowed_cities[facility][city][0] <= 0):
# Offer city as a candidate connection to facility
if debug > 0: print "Offer city " + str(city) + " to facility " + str(facility) + " at cost " + str(cost)
if not facility in self.facility_offers:
self.facility_offers[facility] = [0,0,0,0]
t = self.facility_offers[facility]
t[0] -= cost
t[2] += 1
if not city in self.city_fac_offers: self.city_fac_offers[city]={}
if not facility in self.fac_city_offers: self.fac_city_offers[facility]={}
self.fac_city_offers[facility][city] = cost
self.city_fac_offers[city][facility] = (cost,None,None)
if not facility in self.facility_cities:
if t[2] != 0:
self.fac_open_candidates[facility] = (self.openFacilityCosts[facility]-t[0]-t[1])/t[2]
elif facility in self.fac_open_candidates:
del self.fac_open_candidates[facility]
if facility in self.facility_cities:
# Connect city to existing facility
if debug > 0: print "Connect city " + str(city) + " to existing facility " + str(facility) + " at cost " + str(cost)
self.connect(facility, city, debug)
else:
if (not self.city_disallowed_city_neighbors is None) and (city in self.city_disallowed_city_neighbors):
for c2 in self.city_disallowed_city_neighbors[city]:
if not isinstance(self.city_disallowed_city_neighbors[city],dict) or facility!=self.city_disallowed_city_neighbors[city][c2]:
# Connecting city to facility requires that c2 can't be connected to facility
if not facility in self.facility_disallowed_cities: self.facility_disallowed_cities[facility] = {}
if not c2 in self.facility_disallowed_cities[facility]:
self.facility_disallowed_cities[facility][c2] = [0,0]
self.facility_disallowed_cities[facility][c2][0] += 1
if debug > 0: print " offer " + str(city) + " disallow " + str(c2) + " from " + str(facility) + " count=" + str(self.facility_disallowed_cities[facility][c2][0])
elif (facility in self.facility_disallowed_cities and city in self.facility_disallowed_cities[facility] and
self.facility_disallowed_cities[facility][city][0] > 0):
if debug > 0: print "disallowed offer city " + str(city) + " to facility " + str(facility) + " at cost " + str(cost)
self.facility_disallowed_cities[facility][city][1] = cost
def open_facility(self, facility, alpha, debug):
# Open a new facility and connect all candidate cities
self.facility_cities[facility] = {}
if not facility in self.facility_disallowed_cities: self.facility_disallowed_cities[facility] = {}
self.total_cost += self.openFacilityCosts[facility]
if debug > 0: print "Open facility " + str(facility) + " at alpha " + str(alpha) + ":"
offers = [c for c in self.fac_city_offers[facility]]
for c in self.fac_city_offers[facility]:
# Connect each offering city c to facility
self.connect(facility, c, debug)
del self.fac_city_offers[facility]
def connect(self, facility, c, debug):
#if 3 in self.facility_disallowed_cities: print " facility_disallowed_cities[3]="+str(self.facility_disallowed_cities[3])
# Connect facility to city c. Assumes a connection between them has already been offered
c_ip_j = self.fac_city_offers[facility][c]
if debug > 0: print " connect city " + str(c) + " to facility " + str(facility) + " at cost " + str(c_ip_j)
if c in self.facility_disallowed_cities[facility] and self.facility_disallowed_cities[facility][c][0] > 0: raise Exception("huh?")
del self.city_fac_offers[c][facility]
self.facility_cities[facility][c] = c_ip_j
self.total_cost += c_ip_j
'''
if (not self.city_disallowed_city_neighbors is None) and (c in self.city_disallowed_city_neighbors):
for c2 in self.city_disallowed_city_neighbors[c]:
if not isinstance(self.city_disallowed_city_neighbors[c],dict) or facility!=self.city_disallowed_city_neighbors[c][c2]:
# Connecting city to facility requires that c2 can't be connected to facility
if not c2 in self.facility_disallowed_cities[facility]:
self.facility_disallowed_cities[facility][c2] = [0,0]
self.facility_disallowed_cities[facility][c2][0] += 1
if debug > 0: print " connect " + str(c) + " disallow " + str(c2) + " from " + str(facility) + " count=" + str(self.facility_disallowed_cities[facility][c2][0])
'''
if c in self.city_facilities:
# If c is already connected to a facility f, we must switch it
f=self.city_facilities[c]
self.total_cost -= self.facility_cities[f][c]
if debug > 0: print " requires switch from facility " + str(f) + " at cost " + str(self.facility_cities[f][c])
del self.facility_cities[f][c]
del self.city_facilities[c]
self.city_facilities[c] = facility
bad = []
for f in self.city_fac_offers[c]:
# Update the offers of city c to switch to other unopened facilities
c_i_j = self.fac_city_offers[f][c]
t = self.facility_offers[f]
if not self.city_fac_offers[c][f][1] is None:
t[1] += self.city_fac_offers[c][f][2] # remove c from sum offers to connect unconnected cities to f
t[3] -= 1 # number of offers to connect unconnected cities to f
if t[3] < 0 and debug > 1: raise Exception("huh?")
else:
t[0] += c_i_j # remove c from sum switch offers
t[2] -= 1 # number of switch
if t[2] < 0 and debug > 1: raise Exception("huh?")
if (not f in self.facility_cities) and (c_i_j < c_ip_j):
if (not f in self.facility_disallowed_cities or
not c in self.facility_disallowed_cities[f] or
self.facility_disallowed_cities[f][c][0] <= 0):
if debug > 0: print " offer switch from facility " + str(facility) + "("+str(c_ip_j)+ ") to " + str(f) + "(" + str(c_i_j)+") for city " + str(c)
t[1] += c_ip_j-c_i_j # sum offers to switch connections of already connected cities to f
t[3] += 1 # number of offers to switch connections of already connected cities to f
self.city_fac_offers[c][f] = (c_i_j,f,c_i_j-c_ip_j)
if (not self.city_disallowed_city_neighbors is None) and (c in self.city_disallowed_city_neighbors):
for c2 in self.city_disallowed_city_neighbors[c]:
if not isinstance(self.city_disallowed_city_neighbors[c],dict) or facility!=self.city_disallowed_city_neighbors[c][c2]:
# Connecting city to facility requires that c2 can't be connected to facility
if not f in self.facility_disallowed_cities: self.facility_disallowed_cities[f] = {}
if not c2 in self.facility_disallowed_cities[f]:
self.facility_disallowed_cities[f][c2] = [0,0]
self.facility_disallowed_cities[f][c2][0] += 1
if debug > 0: print " offer switch " + str(c) + " disallow " + str(c2) + " from " + str(f) + " count=" + str(self.facility_disallowed_cities[f][c2][0])
else:
bad.append(f)
if (not self.city_disallowed_city_neighbors is None) and (c in self.city_disallowed_city_neighbors) and f != facility:
for c2 in self.city_disallowed_city_neighbors[c]:
if c2 in self.facility_disallowed_cities[f]:
self.facility_disallowed_cities[f][c2][0] -= 1
if self.facility_disallowed_cities[f][c2][0] == 0 and self.facility_disallowed_cities[f][c2][1] != 0 and not c2 in self.city_facilities:
if debug > 0: print " undisallow " + str(c2) + " from " + str(f) + " ( "+str(c)+" was connected) cost=" + str(self.facility_disallowed_cities[f][c2][1])
self.offer(c2, f, self.facility_disallowed_cities[f][c2][1], debug)
if (not f in self.facility_cities):
self.fac_open_candidates[f] = (self.openFacilityCosts[f]-t[0]-t[1])/t[2] if t[2] > 0 else self.big
for f in bad:
del self.fac_city_offers[f][c]
del self.city_fac_offers[c][f]
def compute_costs(self, type='euclidean'):
C = squareform(pdist(self.points, type))
self.cityFacilityCosts = []
for i in range(0,C.shape[0]):
for j in range(0,C.shape[0]):
self.cityFacilityCosts.append((C[i,j], i,j))
self.costs_s = None
def debug_offers(self):
print ""
print "--facility_cities " + str(self.facility_cities)
print "--city_facilities " + str(self.city_facilities)
print "--fac_open_candidates " + str(self.fac_open_candidates)
print "--fac_city_offers " + str(self.fac_city_offers)
print "--city_fac_offers " + str(self.city_fac_offers)
print "--facility_offers " + str(self.facility_offers)
class priority_dict(dict):
"""Dictionary that can be used as a priority queue.
Keys of the dictionary are items to be put into the queue, and values
are their respective priorities. All dictionary methods work as expected.
The advantage over a standard heapq-based priority queue is
that priorities of items can be efficiently updated (amortized O(1))
using code as 'thedict[item] = new_priority.'
The 'smallest' method can be used to return the object with lowest
priority, and 'pop_smallest' also removes it.
The 'sorted_iter' method provides a destructive sorted iterator.
"""
def __init__(self, *args, **kwargs):
super(priority_dict, self).__init__(*args, **kwargs)
self._rebuild_heap()
def _rebuild_heap(self):
self._heap = [(v, k) for k, v in self.iteritems()]
heapq.heapify(self._heap)
def smallest(self):
"""Return the item with the lowest priority.
Raises IndexError if the object is empty.
"""
heap = self._heap
v, k = heap[0]
while k not in self or self[k] != v:
heapq.heappop(heap)
v, k = heap[0]
return k
def pop_smallest(self):
"""Return the item with the lowest priority and remove it.
Raises IndexError if the object is empty.
"""
heap = self._heap
v, k = heapq.heappop(heap)
while k not in self or self[k] != v:
v, k = heapq.heappop(heap)
del self[k]
return k
def __setitem__(self, key, val):
# We are not going to remove the previous value from the heap,
# since this would have a cost O(n).
super(priority_dict, self).__setitem__(key, val)
if len(self._heap) < 2 * len(self):
heapq.heappush(self._heap, (val, key))
else:
# When the heap grows larger than 2 * len(self), we rebuild it
# from scratch to avoid wasting too much memory.
self._rebuild_heap()
def setdefault(self, key, val):
if key not in self:
self[key] = val
return val
return self[key]
def update(self, *args, **kwargs):
# Reimplementing dict.update is tricky -- see e.g.
# http://mail.python.org/pipermail/python-ideas/2007-May/000744.html
# We just rebuild the heap from scratch after passing to super.
super(priority_dict, self).update(*args, **kwargs)
self._rebuild_heap()
def sorted_iter(self):
"""Sorted iterator of the priority dictionary items.
Beware: this will destroy elements as they are returned.
"""
while self:
yield self.pop_smallest()
def filter_by_perm(user, queryset, perm):
bad_ids = []
for a in queryset:
if not user.has_perm('read', a):
bad_ids.append(a.pk)
return queryset.exclude(id__in=bad_ids) if len(bad_ids) > 0 else queryset
'''
import matplotlib.pyplot as plt
import math
def test_facility_location(numPts=20, ptDim=2, openCosts=None, p1=None, p2=None):
if openCosts is None:
openCosts = [1,2,4,8,16,32,64,128,256]
for i in range(0,len(openCosts)): openCosts[i] *= numPts/20.0
w = math.ceil(math.sqrt(len(openCosts)))
fig = plt.figure()
if p1 is None: p1 = 2.5 * np.random.randn(numPts, ptDim) + 3
if p2 is None: p2 = 2.5 * np.random.randn(numPts, ptDim) + -3
p = np.concatenate((p1,p2))
fac = FacilityLocation(points=p)
for n in range(0,len(openCosts)):
[facilities,cost]=fac.solve(openFacilityCost=openCosts[n])
ax = fig.add_subplot(100*w + 10*w + n+1)
pt1 = ax.plot(p1[:,0], p1[:,1], 'b.')
pt2 = ax.plot(p2[:,0], p2[:,1], 'r.')
for i in facilities:
for j in facilities[i]:
ax.plot([p[i,0],p[j,0]], [p[i,1],p[j,1]], 'g')
ax.set_title('openCost='+str(openCosts[n]) + " totalCost="+str(cost))
fig.show()
return (p1,p2)
'''
|
{"hexsha": "189672695fc9b1e5493b5bbb1adb82f628b378a1", "size": 19977, "ext": "py", "lang": "Python", "max_stars_repo_path": "crowdsourcing/util/facility_location.py", "max_stars_repo_name": "sbranson/online_crowdsourcing", "max_stars_repo_head_hexsha": "d1f7c814bb60aae9cf5e76e0b299713246f98ce3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-08-14T21:14:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-04T09:32:37.000Z", "max_issues_repo_path": "crowdsourcing/util/facility_location.py", "max_issues_repo_name": "sbranson/online_crowdsourcing", "max_issues_repo_head_hexsha": "d1f7c814bb60aae9cf5e76e0b299713246f98ce3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "crowdsourcing/util/facility_location.py", "max_forks_repo_name": "sbranson/online_crowdsourcing", "max_forks_repo_head_hexsha": "d1f7c814bb60aae9cf5e76e0b299713246f98ce3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-11-09T08:20:27.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-09T08:20:27.000Z", "avg_line_length": 55.6462395543, "max_line_length": 198, "alphanum_fraction": 0.6084497172, "include": true, "reason": "from numpy,from scipy", "num_tokens": 4839}
|
%---------------------------Shape-----------------------------
\section{Edge Ratio\label{s:hex-edge-ratio}}
The edge ratio quality metric is the ratio of the longest to
shortest edge of a hexahedron:
\[
q = \frac{L_{\max}}{L_{\min}}.
\]
\hexmetrictable{edge ratio}%
{$1$}% Dimension
{--}% Acceptable range
{$[1,DBL\_MAX]$}% Normal range
{$[1,DBL\_MAX]$}% Full range
{$1$}% Cube
{--}% Citation
{v\_hex\_edge\_ratio}% Verdict function name
|
{"hexsha": "481350b32b9755dc940aa579a9a5af0786d9e925", "size": 677, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Utilities/verdict/docs/VerdictUserManual2007/HexEdgeRatio.tex", "max_stars_repo_name": "Lin1225/vtk_v5.10.0", "max_stars_repo_head_hexsha": "b54ac74f4716572862365fbff28cd0ecb8d08c3d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-06-01T00:15:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T10:49:02.000Z", "max_issues_repo_path": "Utilities/verdict/docs/VerdictUserManual2007/HexEdgeRatio.tex", "max_issues_repo_name": "Armand0s/homemade_vtk", "max_issues_repo_head_hexsha": "6bc7b595a4a7f86e8fa969d067360450fa4e0a6a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-17T11:40:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-02T21:23:25.000Z", "max_forks_repo_path": "Utilities/verdict/docs/VerdictUserManual2007/HexEdgeRatio.tex", "max_forks_repo_name": "Armand0s/homemade_vtk", "max_forks_repo_head_hexsha": "6bc7b595a4a7f86e8fa969d067360450fa4e0a6a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2015-03-23T21:13:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-03T11:15:39.000Z", "avg_line_length": 37.6111111111, "max_line_length": 67, "alphanum_fraction": 0.3840472674, "num_tokens": 144}
|
! ------------------------------------------------------------------------------
!
! Main Program mtclim
!
! author: Johannes Brenner
!
! created: 03.08.2016
! last update: 30.05.2017
!
! ------------------------------------------------------------------------------
!
program main
!
use mo_kind, only: i4, dp
use mo_ansi_colors, only: color, c_red, c_green
use mo_mtclim
!
!----------------------------------------
! variable declarations
!
integer(i4) :: i
real(dp), dimension(:), allocatable :: parameterset
! path namelists
character(len=*), parameter :: fileini = "ini"
character(len=*), parameter :: fileparameters = "parameters"
! ini namefile
! input, output file names
character(265) :: in_meteo, outprefix
namelist / io_ini / in_meteo, outprefix
!
! controls
integer(i4) :: nhead, ndays
integer(i4) :: indewpt, outhum, inyear, lwrad, netlwrad
!
namelist / control_ini / nhead, ndays, indewpt, outhum, inyear, lwrad, netlwrad
!
! site characteristics
real(dp) :: base_elev,base_isoh,site_lat,site_elev,site_slp,site_asp
real(dp) :: site_isoh,site_ehoriz,site_whoriz,tmax_lr,tmin_lr
!
namelist / parameters_ini / base_elev,base_isoh,site_lat,site_elev,site_slp,site_asp,&
site_isoh,site_ehoriz,site_whoriz,tmax_lr,tmin_lr
!
! parameters
!
real(dp) :: TBASE,ABASE,C,B0,B1,B2,RAIN_SCALAR,DIF_ALB,SC_INT,SC_SLOPE, &
SNOW_TCRIT,SNOW_TRATE,TDAYCOEF,LWAVE_COR
namelist / parameters / TBASE,ABASE,C,B0,B1,B2,RAIN_SCALAR,DIF_ALB,SC_INT,SC_SLOPE, &
SNOW_TCRIT,SNOW_TRATE,TDAYCOEF,LWAVE_COR
!
! read namelists in ini file
!
integer(i4), dimension(:), allocatable :: yday !/* array of yearday values */
real(dp), dimension(:), allocatable :: tmax !/* array of base maximum temperature values */
real(dp), dimension(:), allocatable :: tmin !/* array of base minimum temperature values */
real(dp), dimension(:), allocatable :: prcp !/* array of base daily precipitation values */
!real(dp), dimension(:), allocatable :: swrad_obs !/* array of observed incoming shortwave radiation values */
!real(dp), dimension(:), allocatable :: lwrad_obs !/* array of observed incoming longwave radiation values */
!real(dp), dimension(:), allocatable :: netlwrad_obs !/* array of observed net outgoing longtwave radiation values */
!real(dp), dimension(:), allocatable :: tdew !/* array of base dewpoint temperature values */
real(dp), dimension(:), allocatable :: s_tmax !/* array of site tmax values */
real(dp), dimension(:), allocatable :: s_tmin !/* array of site tmin values */
real(dp), dimension(:), allocatable :: s_tday !/* array of site daylight temperature values */
real(dp), dimension(:), allocatable :: s_prcp !/* array of site prcp values */
real(dp), dimension(:), allocatable :: s_hum !/* array of site humidity values (VPD or VP, Pa) */
real(dp), dimension(:), allocatable :: s_srad !/* array of site shortwave radiation values */
real(dp), dimension(:), allocatable :: s_srad_dif !/* array of site shortwave diffuse radiation values */
real(dp), dimension(:), allocatable :: s_lrad !/* array of site longwave radiation values */
real(dp), dimension(:), allocatable :: s_lradnet !/* array of site net longwave radiation values */
real(dp), dimension(:), allocatable :: s_dayl !/* array of site daylength values */
real(dp), dimension(:), allocatable :: s_swe !/* array of site snow water equivalent values (cm) */
!
logical :: isgood, allgood
allgood = .true.
!
Write(*,*) ''
Write(*,*) 'Test mo_mtclim.f90'
!
! read out namefiles
!
open(700, file=fileini)
read(700, nml=io_ini)
read(700, nml=control_ini)
read(700, nml=parameters_ini)
close(700)
!
open(700, file=fileparameters)
read(700, nml=parameters)
close(700)
!
allocate(parameterset(14))
!
parameterset(1) = TBASE
parameterset(2) = ABASE
parameterset(3) = C
parameterset(4) = B0
parameterset(5) = B1
parameterset(6) = B2
parameterset(7) = RAIN_SCALAR
parameterset(8) = DIF_ALB
parameterset(9) = SC_INT
parameterset(10) = SC_SLOPE
parameterset(11) = SNOW_TCRIT
parameterset(12) = SNOW_TRATE
parameterset(13) = TDAYCOEF
parameterset(14) = LWAVE_COR
!
!----------------------------------------
! inital progress indicator
print *, "Starting MTCLIM version 4.3\n"
!
!----------------------------------------
! allocate space in the data arrays for input and output data
!
!allocate(year(ndays))
allocate(yday(ndays))
allocate(tmax(ndays))
allocate(tmin(ndays))
allocate(prcp(ndays))
!
!allocate(swrad_obs(ndays))
!allocate(lwrad_obs(ndays))
!allocate(netlwrad_obs(ndays))
!
allocate(s_swe(ndays))
!
allocate(s_tmin(ndays))
allocate(s_tmax(ndays))
allocate(s_tday(ndays))
!
allocate(s_hum(ndays))
allocate(s_srad(ndays))
allocate(s_srad_dif(ndays))
allocate(s_lrad(ndays))
allocate(s_lradnet(ndays))
allocate(s_dayl(ndays))
!
!---------------------------------------
! read meteorological data from input file into data arrays
!---------------------------------------
! read meteorological data from input file into data arrays
open(unit=1234, file=in_meteo, status='old', action='read')
do i=1, ndays
read(1234, '(i3,f9.2,f9.2,f9.2)') yday(i), tmax(i), tmin(i), prcp(i)
enddo
close(1234)
!
!----------------------------------------
! estimate daily air temperatures
call calc_tair(tmin, tmax, ndays, site_elev, base_elev, TDAYCOEF, tmin_lr, tmax_lr, s_tmax, s_tmin, s_tday)
print *, "Completed calc_tair()"
!
!----------------------------------------
! estimate daily precipitation
s_prcp = calc_prcp(prcp, ndays, site_isoh, base_isoh)
print *, "Completed calc_prcp()"
!----------------------------------------
! estimate daily snowpack
s_swe = snowpack(s_tmin, s_prcp, yday, ndays, SNOW_TCRIT, SNOW_TRATE)
print *, "Completed snowpack()"
!
!----------------------------------------
! estimate srad and humidity with iterative algorithm
! without Tdew input data, an iterative estimation of shortwave radiation and humidity is required
call calc_srad_humidity_iterative( &
yday, s_tmax, s_tmin, s_tday, s_prcp, s_swe, &
s_hum, s_srad, s_srad_dif ,s_lrad, s_lradnet, s_dayl, &
ndays=ndays, outhum=outhum, lwrad=lwrad, netlwrad=netlwrad, &
site_lat=site_lat, site_elev=site_elev, site_asp=site_asp, site_slp=site_slp, &
site_ehoriz=site_ehoriz, site_whoriz=site_whoriz,&
TBASE=TBASE, ABASE=ABASE, C=C, B0=B0, B1=B1, B2=B2, &
RAIN_SCALAR=RAIN_SCALAR, DIF_ALB=DIF_ALB, LWAVE_COR=LWAVE_COR)
print *, "Completed radiation algorithm"
!----------------------------------------
! write output file
open(unit=1234, file=outprefix, status='unknown', action='write')
write(1234, '(A3,A9,A9,A9,A9,A9,A9,A9,A9,A9,A9)') "yd", "tmax", "tmin", "tday", "prcp", "VPD", &
"swrad", "swrad_dif", "lwrad", "lwradnet", "daylen"
write(1234, '(A3,A9,A9,A9,A9,A9,A9,A9,A9,A9,A9)') " ","(deg C)", "(deg C)", "(deg C)", "(cm)", "(Pa)", &
"(W m-2)", "(W m-2)", "(W m-2)", "(W m-2)", "(s)"
do i=1, ndays
write(1234, '(i3,f9.2,f9.2,f9.2,f9.2,f9.2,f9.2,f9.2,f9.2,f9.2,f9.2)') yday(i), s_tmax(i), s_tmin(i), s_tday(i),&
s_prcp(i), s_hum(i), s_srad(i), s_srad_dif(i), s_lrad(i), s_lradnet(i), s_dayl(i)
end do
close(unit=1234)
print *, "Results written in ", outprefix
!----------------------------------------
! test mtclim eval
!call eval_mtclim (parameterset, yday, tmin, tmax, prcp, ndays, &
! outhum, lwrad, netlwrad &
! s_tmax, s_tmin, s_tday, s_prcp, &
! s_hum, s_srad, s_srad_dif, s_lrad, s_lradnet, s_dayl, &
! site_elev, base_elev, tmin_lr, tmax_lr, site_isoh, base_isoh, &
! site_lat, site_asp, site_slp, site_ehoriz, site_whoriz)
!----------------------------------------
! test mtclim objective swrad
!if (observ .eq. 1_i4) then
! print *, "RMSE is ", objective_swrd(parameterset)
!end if
!
!----------------------------------------
! free previously allocated memory before returning
!
deallocate(yday)
deallocate(tmax)
deallocate(tmin)
deallocate(prcp)
!
deallocate(s_swe)
!
deallocate(s_tmin)
deallocate(s_tmax)
deallocate(s_tday)
!
deallocate(s_hum)
deallocate(s_srad)
deallocate(s_srad_dif)
deallocate(s_lrad)
deallocate(s_lradnet)
deallocate(s_dayl)
!
!------------------------------------------
isgood = .true.
! Finish
allgood = allgood .and. isgood
if (allgood) then
write(*,*) 'mo_mtclim ', color('o.k.', c_green)
else
write(*,*) 'mo_mtclim ', color('failed!', c_red)
endif
!
end program main
|
{"hexsha": "8d488e4b065f4d1f1602834ec3f7cb1eb149644f", "size": 8504, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "test/test_mo_mtclim/main.f90", "max_stars_repo_name": "mcuntz/jams_fortran", "max_stars_repo_head_hexsha": "a1dcab78eda21f930aa6c8c3633598a522f01659", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2020-02-28T00:14:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-20T23:32:41.000Z", "max_issues_repo_path": "test/test_mo_mtclim/main.f90", "max_issues_repo_name": "mcuntz/jams_fortran", "max_issues_repo_head_hexsha": "a1dcab78eda21f930aa6c8c3633598a522f01659", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-05-09T15:33:11.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-28T16:16:09.000Z", "max_forks_repo_path": "test/test_mo_mtclim/main.f90", "max_forks_repo_name": "mcuntz/jams_fortran", "max_forks_repo_head_hexsha": "a1dcab78eda21f930aa6c8c3633598a522f01659", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-09T08:08:56.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-09T08:08:56.000Z", "avg_line_length": 36.0338983051, "max_line_length": 121, "alphanum_fraction": 0.632173095, "num_tokens": 2588}
|
using OpenQuantumBase, Test
import LinearAlgebra: Diagonal
funcs = [(x) -> x, (x) -> 1 - x]
test_diag_operator = OpenQuantumBase.DiagonalOperator(funcs)
@test test_diag_operator(0.5) == OpenQuantumBase.Diagonal([0.5, 0.5])
test_geometric_operator = OpenQuantumBase.GeometricOperator(((x) -> -1.0im * x))
@test test_geometric_operator(0.5) == [0 0.5im; -0.5im 0]
@test_throws ArgumentError OpenQuantumBase.GeometricOperator((x) -> x, (x) -> 1 - x)
H = AdiabaticFrameHamiltonian(funcs, [])
@test H(2, 0.5) ≈ Diagonal([π, π])
H = AdiabaticFrameHamiltonian(funcs, nothing)
@test H(2, 0.0) ≈ Diagonal([0, 2π])
H = AdiabaticFrameHamiltonian((s)->[s, 1 - s], nothing)
@test H(2, 0.5) ≈ Diagonal([π, π])
# in_place update for matrices
du = [1.0 + 0.0im 0; 0 0]
u = PauliVec[2][1]
ρ = u * u'
hres = Diagonal([0, 2π])
H(du, ρ, 10, 0.0)
@test du ≈ -1.0im * (hres * ρ - ρ * hres)
dθ = (s) -> π / 2
gap = (s) -> (cos(2 * π * s) + 1) / 2
H = AdiabaticFrameHamiltonian([(x) -> -gap(x), (x) -> gap(x)], [dθ])
u = PauliVec[2][1]
ρ = u * u'
@test get_cache(H) == zeros(eltype(H), 2, 2)
@test size(H) == (2, 2)
@test H(10, 0.5) ≈ π * σx / 20
@test H(5, 0.0) ≈ π * σx / 10 - 2π * σz
# in_place update for vector
cache = get_cache(H)
update_cache!(cache, H, 10, 0.0)
@test cache ≈ -1.0im * (π * σx / 20 - 2π * σz)
update_cache!(cache, H, 10, 0.5)
@test cache ≈ -1.0im * (π * σx / 20)
# in_place update for matrices
du = [1.0 + 0.0im 0; 0 0]
hres = π * σx / 20 - 2π * σz
H(du, ρ, 10, 0.0)
@test du ≈ -1.0im * (hres * ρ - ρ * hres)
|
{"hexsha": "5b55dc303877f855568925ada599c1cb16293862", "size": 1517, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/hamiltonian/adiabatic_frame_hamiltonian.jl", "max_stars_repo_name": "neversakura/QTBase.jl", "max_stars_repo_head_hexsha": "937a3236f1b9578bc223b21817dec2e7a8512ee2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-12-02T18:11:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-27T03:57:26.000Z", "max_issues_repo_path": "test/hamiltonian/adiabatic_frame_hamiltonian.jl", "max_issues_repo_name": "neversakura/QTBase.jl", "max_issues_repo_head_hexsha": "937a3236f1b9578bc223b21817dec2e7a8512ee2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 39, "max_issues_repo_issues_event_min_datetime": "2020-12-01T20:10:13.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-14T02:33:37.000Z", "max_forks_repo_path": "test/hamiltonian/adiabatic_frame_hamiltonian.jl", "max_forks_repo_name": "USCqserver/QTBase.jl", "max_forks_repo_head_hexsha": "937a3236f1b9578bc223b21817dec2e7a8512ee2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-12-10T01:26:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-18T18:55:07.000Z", "avg_line_length": 30.34, "max_line_length": 84, "alphanum_fraction": 0.6058009229, "num_tokens": 676}
|
# This file was generated by the Julia Swagger Code Generator
# Do not modify this file directly. Modify the swagger specification instead.
mutable struct IoK8sApimachineryPkgApisMetaV1ObjectMeta <: SwaggerModel
annotations::Any # spec type: Union{ Nothing, Dict{String, String} } # spec name: annotations
clusterName::Any # spec type: Union{ Nothing, String } # spec name: clusterName
creationTimestamp::Any # spec type: Union{ Nothing, IoK8sApimachineryPkgApisMetaV1Time } # spec name: creationTimestamp
deletionGracePeriodSeconds::Any # spec type: Union{ Nothing, Int64 } # spec name: deletionGracePeriodSeconds
deletionTimestamp::Any # spec type: Union{ Nothing, IoK8sApimachineryPkgApisMetaV1Time } # spec name: deletionTimestamp
finalizers::Any # spec type: Union{ Nothing, Vector{String} } # spec name: finalizers
generateName::Any # spec type: Union{ Nothing, String } # spec name: generateName
generation::Any # spec type: Union{ Nothing, Int64 } # spec name: generation
initializers::Any # spec type: Union{ Nothing, IoK8sApimachineryPkgApisMetaV1Initializers } # spec name: initializers
labels::Any # spec type: Union{ Nothing, Dict{String, String} } # spec name: labels
managedFields::Any # spec type: Union{ Nothing, Vector{IoK8sApimachineryPkgApisMetaV1ManagedFieldsEntry} } # spec name: managedFields
name::Any # spec type: Union{ Nothing, String } # spec name: name
namespace::Any # spec type: Union{ Nothing, String } # spec name: namespace
ownerReferences::Any # spec type: Union{ Nothing, Vector{IoK8sApimachineryPkgApisMetaV1OwnerReference} } # spec name: ownerReferences
resourceVersion::Any # spec type: Union{ Nothing, String } # spec name: resourceVersion
selfLink::Any # spec type: Union{ Nothing, String } # spec name: selfLink
uid::Any # spec type: Union{ Nothing, String } # spec name: uid
function IoK8sApimachineryPkgApisMetaV1ObjectMeta(;annotations=nothing, clusterName=nothing, creationTimestamp=nothing, deletionGracePeriodSeconds=nothing, deletionTimestamp=nothing, finalizers=nothing, generateName=nothing, generation=nothing, initializers=nothing, labels=nothing, managedFields=nothing, name=nothing, namespace=nothing, ownerReferences=nothing, resourceVersion=nothing, selfLink=nothing, uid=nothing)
o = new()
validate_property(IoK8sApimachineryPkgApisMetaV1ObjectMeta, Symbol("annotations"), annotations)
setfield!(o, Symbol("annotations"), annotations)
validate_property(IoK8sApimachineryPkgApisMetaV1ObjectMeta, Symbol("clusterName"), clusterName)
setfield!(o, Symbol("clusterName"), clusterName)
validate_property(IoK8sApimachineryPkgApisMetaV1ObjectMeta, Symbol("creationTimestamp"), creationTimestamp)
setfield!(o, Symbol("creationTimestamp"), creationTimestamp)
validate_property(IoK8sApimachineryPkgApisMetaV1ObjectMeta, Symbol("deletionGracePeriodSeconds"), deletionGracePeriodSeconds)
setfield!(o, Symbol("deletionGracePeriodSeconds"), deletionGracePeriodSeconds)
validate_property(IoK8sApimachineryPkgApisMetaV1ObjectMeta, Symbol("deletionTimestamp"), deletionTimestamp)
setfield!(o, Symbol("deletionTimestamp"), deletionTimestamp)
validate_property(IoK8sApimachineryPkgApisMetaV1ObjectMeta, Symbol("finalizers"), finalizers)
setfield!(o, Symbol("finalizers"), finalizers)
validate_property(IoK8sApimachineryPkgApisMetaV1ObjectMeta, Symbol("generateName"), generateName)
setfield!(o, Symbol("generateName"), generateName)
validate_property(IoK8sApimachineryPkgApisMetaV1ObjectMeta, Symbol("generation"), generation)
setfield!(o, Symbol("generation"), generation)
validate_property(IoK8sApimachineryPkgApisMetaV1ObjectMeta, Symbol("initializers"), initializers)
setfield!(o, Symbol("initializers"), initializers)
validate_property(IoK8sApimachineryPkgApisMetaV1ObjectMeta, Symbol("labels"), labels)
setfield!(o, Symbol("labels"), labels)
validate_property(IoK8sApimachineryPkgApisMetaV1ObjectMeta, Symbol("managedFields"), managedFields)
setfield!(o, Symbol("managedFields"), managedFields)
validate_property(IoK8sApimachineryPkgApisMetaV1ObjectMeta, Symbol("name"), name)
setfield!(o, Symbol("name"), name)
validate_property(IoK8sApimachineryPkgApisMetaV1ObjectMeta, Symbol("namespace"), namespace)
setfield!(o, Symbol("namespace"), namespace)
validate_property(IoK8sApimachineryPkgApisMetaV1ObjectMeta, Symbol("ownerReferences"), ownerReferences)
setfield!(o, Symbol("ownerReferences"), ownerReferences)
validate_property(IoK8sApimachineryPkgApisMetaV1ObjectMeta, Symbol("resourceVersion"), resourceVersion)
setfield!(o, Symbol("resourceVersion"), resourceVersion)
validate_property(IoK8sApimachineryPkgApisMetaV1ObjectMeta, Symbol("selfLink"), selfLink)
setfield!(o, Symbol("selfLink"), selfLink)
validate_property(IoK8sApimachineryPkgApisMetaV1ObjectMeta, Symbol("uid"), uid)
setfield!(o, Symbol("uid"), uid)
o
end
end # type IoK8sApimachineryPkgApisMetaV1ObjectMeta
const _property_map_IoK8sApimachineryPkgApisMetaV1ObjectMeta = Dict{Symbol,Symbol}(Symbol("annotations")=>Symbol("annotations"), Symbol("clusterName")=>Symbol("clusterName"), Symbol("creationTimestamp")=>Symbol("creationTimestamp"), Symbol("deletionGracePeriodSeconds")=>Symbol("deletionGracePeriodSeconds"), Symbol("deletionTimestamp")=>Symbol("deletionTimestamp"), Symbol("finalizers")=>Symbol("finalizers"), Symbol("generateName")=>Symbol("generateName"), Symbol("generation")=>Symbol("generation"), Symbol("initializers")=>Symbol("initializers"), Symbol("labels")=>Symbol("labels"), Symbol("managedFields")=>Symbol("managedFields"), Symbol("name")=>Symbol("name"), Symbol("namespace")=>Symbol("namespace"), Symbol("ownerReferences")=>Symbol("ownerReferences"), Symbol("resourceVersion")=>Symbol("resourceVersion"), Symbol("selfLink")=>Symbol("selfLink"), Symbol("uid")=>Symbol("uid"))
const _property_types_IoK8sApimachineryPkgApisMetaV1ObjectMeta = Dict{Symbol,String}(Symbol("annotations")=>"Dict{String, String}", Symbol("clusterName")=>"String", Symbol("creationTimestamp")=>"IoK8sApimachineryPkgApisMetaV1Time", Symbol("deletionGracePeriodSeconds")=>"Int64", Symbol("deletionTimestamp")=>"IoK8sApimachineryPkgApisMetaV1Time", Symbol("finalizers")=>"Vector{String}", Symbol("generateName")=>"String", Symbol("generation")=>"Int64", Symbol("initializers")=>"IoK8sApimachineryPkgApisMetaV1Initializers", Symbol("labels")=>"Dict{String, String}", Symbol("managedFields")=>"Vector{IoK8sApimachineryPkgApisMetaV1ManagedFieldsEntry}", Symbol("name")=>"String", Symbol("namespace")=>"String", Symbol("ownerReferences")=>"Vector{IoK8sApimachineryPkgApisMetaV1OwnerReference}", Symbol("resourceVersion")=>"String", Symbol("selfLink")=>"String", Symbol("uid")=>"String")
Base.propertynames(::Type{ IoK8sApimachineryPkgApisMetaV1ObjectMeta }) = collect(keys(_property_map_IoK8sApimachineryPkgApisMetaV1ObjectMeta))
Swagger.property_type(::Type{ IoK8sApimachineryPkgApisMetaV1ObjectMeta }, name::Symbol) = Union{Nothing,eval(Meta.parse(_property_types_IoK8sApimachineryPkgApisMetaV1ObjectMeta[name]))}
Swagger.field_name(::Type{ IoK8sApimachineryPkgApisMetaV1ObjectMeta }, property_name::Symbol) = _property_map_IoK8sApimachineryPkgApisMetaV1ObjectMeta[property_name]
function check_required(o::IoK8sApimachineryPkgApisMetaV1ObjectMeta)
true
end
function validate_property(::Type{ IoK8sApimachineryPkgApisMetaV1ObjectMeta }, name::Symbol, val)
end
|
{"hexsha": "282c8d9f00f8759a67032368511a74b14883fa1b", "size": 7639, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/api/model_IoK8sApimachineryPkgApisMetaV1ObjectMeta.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/Kuber.jl-87e52247-8a1b-5e01-9430-8fbcac83a23a", "max_stars_repo_head_hexsha": "dd6bdfe80b497256c3ffc86cd283785d613c24a6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/api/model_IoK8sApimachineryPkgApisMetaV1ObjectMeta.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/Kuber.jl-87e52247-8a1b-5e01-9430-8fbcac83a23a", "max_issues_repo_head_hexsha": "dd6bdfe80b497256c3ffc86cd283785d613c24a6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/api/model_IoK8sApimachineryPkgApisMetaV1ObjectMeta.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/Kuber.jl-87e52247-8a1b-5e01-9430-8fbcac83a23a", "max_forks_repo_head_hexsha": "dd6bdfe80b497256c3ffc86cd283785d613c24a6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 99.2077922078, "max_line_length": 888, "alphanum_fraction": 0.7733996596, "num_tokens": 1958}
|
import numpy as np
from classification_model.config import core
from classification_model.processing import preprocessors as pp
def test_FeatureKeeper(pipeline_inputs):
"""Testing FeatureKeeper function"""
# Given
feature_keeper = pp.FeatureKeeper(
variables_to_keep=core.config.model_config.VARIABLES_TO_KEEP
)
X_train, X_test, y_train, y_test = pipeline_inputs
# When
subject = feature_keeper.fit_transform(X_train, y_train)
# Then
assert list(subject.columns) == core.config.model_config.VARIABLES_TO_KEEP
def test_CategoricalGrouping(pipeline_inputs):
"""Testing CategoricalGrouping function"""
# Given
categorical_grouping = pp.CategoricalGrouping(
config_dict=core.config.model_config.VARIABLES_TO_GROUP
)
X_train, X_test, y_train, y_test = pipeline_inputs
# When
subject = categorical_grouping.fit_transform(X_train, y_train)
cat_len = []
t_cat_len = []
for k in core.config.model_config.VARIABLES_TO_GROUP.keys():
for i, v in core.config.model_config.VARIABLES_TO_GROUP[k].items():
cat_len.append(sum([X_train.loc[X_train[k] == j].shape[0] for j in v]))
t_cat_len.append(subject.loc[subject[k] == i].shape[0])
assert_list = [cat_len[i] == t_cat_len[i] for i in range(len(cat_len))]
# Then
assert sum(assert_list) == len(cat_len)
def test_RareCategoriesGrouping(pipeline_inputs):
"""Testing RareCategoriesGrouping function"""
# Given
rare_categories_grouping = pp.RareCategoriesGrouping(
threshold=core.config.model_config.VARIABLES_THRESHOLD
)
X_train, X_test, y_train, y_test = pipeline_inputs
# When
subject = rare_categories_grouping.fit_transform(X_train, y_train)
cat_dict_lower = {}
cat_dict_subject = {}
print(rare_categories_grouping.threshold)
for k, v in rare_categories_grouping.threshold.items():
cat_list = X_train[k].value_counts(normalize=True)
cat_dict_lower[k] = cat_list[cat_list < float(v)].index
cat_dict_subject[k] = subject[k].value_counts(normalize=True).index
print(cat_dict_subject)
print("********")
print(cat_dict_lower)
# Then
for k in rare_categories_grouping.threshold.keys():
assert "Rare" in list(cat_dict_subject[k])
for i in cat_dict_lower[k]:
assert i not in cat_dict_subject[k]
def test_MissingImputer(pipeline_inputs):
"""Testing RareCategoriesGrouping function"""
# Given
missing_imputer = pp.MissingImputer(
numerical_variables=core.config.model_config.NUMERICAL_VARIABLES
)
X_train, X_test, y_train, y_test = pipeline_inputs
# When
subject = missing_imputer.fit_transform(X_train, y_train)
# Then
assert np.sum(subject.isnull().sum().values) == 0
|
{"hexsha": "517aa659f0b5f70e91210f192f7281a3944e0616", "size": 2822, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_processing/test_preprocessors.py", "max_stars_repo_name": "Windact/classification_model", "max_stars_repo_head_hexsha": "e42e8099069467eeb21f94f6777eb34e68906500", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_processing/test_preprocessors.py", "max_issues_repo_name": "Windact/classification_model", "max_issues_repo_head_hexsha": "e42e8099069467eeb21f94f6777eb34e68906500", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_processing/test_preprocessors.py", "max_forks_repo_name": "Windact/classification_model", "max_forks_repo_head_hexsha": "e42e8099069467eeb21f94f6777eb34e68906500", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.010989011, "max_line_length": 83, "alphanum_fraction": 0.7104890149, "include": true, "reason": "import numpy", "num_tokens": 669}
|
import pygame, sys
import numpy as np
import subprocess
import time
import json
WIDTH = 600
HEIGHT = 600+100
LINE_WIDTH = 15
WIN_LINE_WIDTH = 15
BOARD_ROWS = 3
BOARD_COLS = 3
SQUARE_SIZE = 200
CIRCLE_RADIUS = 60
CIRCLE_WIDTH = 15
CROSS_WIDTH = 25
SPACE = 55
RED = (255, 0, 0)
BG_COLOR = (20, 200, 160)
LINE_COLOR = (23, 145, 135)
CIRCLE_COLOR = (239, 231, 200)
CROSS_COLOR = (66, 66, 66)
PLAYER_LOOKUP = {
1 : 'ai',
2 : 'user'
}
CONTRACT_ADDRESS = '0x4e136383b01c19f1ba82a2bda668bf5b3a24f71ff9dab4ff1b129f3c74ceb20'
CONTRACT_VIEW_BOARD = f"starknet call --network=alpha --address {CONTRACT_ADDRESS} --abi tic_contract_abi.json --function view_board".split(" ")
CONTRACT_USER_MOVE = f"starknet invoke --network=alpha --address {CONTRACT_ADDRESS} --abi tic_contract_abi.json --function user_move --inputs".split(" ")
CONTRACT_RESET_BOARD = f"starknet invoke --network=alpha --address {CONTRACT_ADDRESS} --abi tic_contract_abi.json --function reset_board".split(" ")
STARKNET_TX_STATUS = f"starknet tx_status --network=alpha --id="
def draw_lines():
pygame.draw.line( screen, LINE_COLOR, (0, SQUARE_SIZE), (WIDTH, SQUARE_SIZE), LINE_WIDTH )
pygame.draw.line( screen, LINE_COLOR, (0, 2 * SQUARE_SIZE), (WIDTH, 2 * SQUARE_SIZE), LINE_WIDTH )
pygame.draw.line( screen, LINE_COLOR, (SQUARE_SIZE, 0), (SQUARE_SIZE, HEIGHT), LINE_WIDTH )
pygame.draw.line( screen, LINE_COLOR, (2 * SQUARE_SIZE, 0), (2 * SQUARE_SIZE, HEIGHT), LINE_WIDTH )
def draw_figures():
for row in range(BOARD_ROWS):
for col in range(BOARD_COLS):
if board[row][col] == 1:
pygame.draw.circle( screen, CIRCLE_COLOR, (int( col * SQUARE_SIZE + SQUARE_SIZE//2 ), int( row * SQUARE_SIZE + SQUARE_SIZE//2 )), CIRCLE_RADIUS, CIRCLE_WIDTH )
elif board[row][col] == 2:
pygame.draw.line( screen, CROSS_COLOR, (col * SQUARE_SIZE + SPACE, row * SQUARE_SIZE + SQUARE_SIZE - SPACE), (col * SQUARE_SIZE + SQUARE_SIZE - SPACE, row * SQUARE_SIZE + SPACE), CROSS_WIDTH )
pygame.draw.line( screen, CROSS_COLOR, (col * SQUARE_SIZE + SPACE, row * SQUARE_SIZE + SPACE), (col * SQUARE_SIZE + SQUARE_SIZE - SPACE, row * SQUARE_SIZE + SQUARE_SIZE - SPACE), CROSS_WIDTH )
def mark_square(row, col, player):
board[row][col] = player
def available_square(row, col):
return board[row][col] == 0
def is_board_full():
for row in range(BOARD_ROWS):
for col in range(BOARD_COLS):
if board[row][col] == 0:
return False
return True
def check_win(player):
for col in range(BOARD_COLS):
if board[0][col] == player and board[1][col] == player and board[2][col] == player:
draw_vertical_winning_line(col, player)
return True
for row in range(BOARD_ROWS):
if board[row][0] == player and board[row][1] == player and board[row][2] == player:
draw_horizontal_winning_line(row, player)
return True
if board[2][0] == player and board[1][1] == player and board[0][2] == player:
draw_asc_diagonal(player)
return True
if board[0][0] == player and board[1][1] == player and board[2][2] == player:
draw_desc_diagonal(player)
return True
return False
def draw_vertical_winning_line(col, player):
posX = col * SQUARE_SIZE + SQUARE_SIZE//2
if player == 1:
color = CIRCLE_COLOR
elif player == 2:
color = CROSS_COLOR
pygame.draw.line( screen, color, (posX, 15), (posX, HEIGHT - 15), LINE_WIDTH )
def draw_horizontal_winning_line(row, player):
posY = row * SQUARE_SIZE + SQUARE_SIZE//2
if player == 1:
color = CIRCLE_COLOR
elif player == 2:
color = CROSS_COLOR
pygame.draw.line( screen, color, (15, posY), (WIDTH - 15, posY), WIN_LINE_WIDTH )
def draw_asc_diagonal(player):
if player == 1:
color = CIRCLE_COLOR
elif player == 2:
color = CROSS_COLOR
pygame.draw.line( screen, color, (15, HEIGHT - 15), (WIDTH - 15, 15), WIN_LINE_WIDTH )
def draw_desc_diagonal(player):
if player == 1:
color = CIRCLE_COLOR
elif player == 2:
color = CROSS_COLOR
pygame.draw.line( screen, color, (15, 15), (WIDTH - 15, HEIGHT - 15), WIN_LINE_WIDTH )
def restart():
screen.fill( BG_COLOR )
draw_lines()
for row in range(BOARD_ROWS):
for col in range(BOARD_COLS):
board[row][col] = 0
# text box initialization
def update_message (message):
font = pygame.font.Font(None, 24)
text = font.render(message, 1, (255, 255, 255))
text_rect = text.get_rect(center =(WIDTH / 2, HEIGHT-50))
screen.fill ((0, 0, 0), (0, WIDTH, HEIGHT, 100))
screen.blit(text, text_rect)
pygame.display.update()
def command_check_tx_status(tx_id):
command = STARKNET_TX_STATUS+tx_id
return command.split(' ')
def subprocess_run (cmd):
result = subprocess.run(cmd, stdout=subprocess.PIPE)
result = result.stdout.decode('utf-8')[:-1] # remove trailing newline
return result
# game param initialization
board = np.zeros( (BOARD_ROWS, BOARD_COLS) )
player = 2
game_over = False
# screen initialization
pygame.init()
screen = pygame.display.set_mode( (WIDTH, HEIGHT) )
pygame.display.set_caption( 'TIC TAC TOE' )
screen.fill( BG_COLOR )
draw_lines()
# reset game board on StarkNet
result = subprocess_run(CONTRACT_RESET_BOARD)
update_message("Resetting the board on StarkNet...")
print(result)
reset_tx_id = result.split(' ')[-1]
while(True): # checking tx status every 10 seconds
cmd = command_check_tx_status(reset_tx_id)
result = subprocess_run(cmd)
result_json = json.loads(result)
tx_status = result_json['tx_status']
time.sleep(10)
if (tx_status == 'ACCEPTED_ONCHAIN'):
update_message(f"Reset successful. It's {PLAYER_LOOKUP[player]}'s turn.")
print(tx_status)
break
else:
print(tx_status)
# view_board() and update board visuals
result = subprocess_run(CONTRACT_VIEW_BOARD)
print(result)
result = result.split(' ')
for row in range(BOARD_ROWS):
for col in range(BOARD_COLS):
board[row][col] = result[row*3+col]
draw_figures()
pygame.display.update()
# game loop
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN and not game_over:
mouseX = event.pos[0]
mouseY = event.pos[1]
clicked_row = int(mouseY // SQUARE_SIZE)
clicked_col = int(mouseX // SQUARE_SIZE)
if available_square( clicked_row, clicked_col ):
mark_square( clicked_row, clicked_col, player )
if check_win( player ):
game_over = True
#player = player % 2 + 1
draw_figures()
pygame.display.update()
### starknet contract interaction
# user_move()
result = subprocess_run(CONTRACT_USER_MOVE+[f'{clicked_row}',f'{clicked_col}'])
user_move_tx_id = result.split(' ')[-1]
print(result)
print(user_move_tx_id)
# wait for confirmation of user_move accepted onchain
update_message("Waiting for AI's countermove from StarkNet...")
while(True): # checking tx status every 10 seconds
cmd = command_check_tx_status(user_move_tx_id)
result = subprocess_run(cmd)
result_json = json.loads(result)
tx_status = result_json['tx_status']
if (tx_status == 'ACCEPTED_ONCHAIN'):
update_message("AI has moved. It is your turn now.")
print(tx_status)
break
elif (tx_status == 'REJECTED'):
update_message("Your invalid move irritated the AI.")
print(tx_status)
break
time.sleep(10)
# view_board()
result = subprocess_run(CONTRACT_VIEW_BOARD)
print(result)
update_message(result)
# update board visuals
result = result.split(' ')
for row in range(BOARD_ROWS):
for col in range(BOARD_COLS):
board[row][col] = result[row*3+col]
draw_figures()
pygame.display.update()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
restart()
player = 1
game_over = False
pygame.display.update()
|
{"hexsha": "e42cd4be592a6c62e4d5fb6fd0bfe7a32692c2af", "size": 7661, "ext": "py", "lang": "Python", "max_stars_repo_path": "gui/game.py", "max_stars_repo_name": "guiltygyoza/tictactoe-on-starknet", "max_stars_repo_head_hexsha": "9ff31e7813e0686fc509cb8471803f70b539d6f2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2021-10-12T05:45:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-04T03:50:27.000Z", "max_issues_repo_path": "gui/game.py", "max_issues_repo_name": "guiltygyoza/tictactoe-on-starknet", "max_issues_repo_head_hexsha": "9ff31e7813e0686fc509cb8471803f70b539d6f2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-11T16:44:41.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-12T15:59:45.000Z", "max_forks_repo_path": "gui/game.py", "max_forks_repo_name": "guiltygyoza/tictactoe-on-starknet", "max_forks_repo_head_hexsha": "9ff31e7813e0686fc509cb8471803f70b539d6f2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-02T06:46:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-02T06:46:13.000Z", "avg_line_length": 29.8093385214, "max_line_length": 196, "alphanum_fraction": 0.7047382848, "include": true, "reason": "import numpy", "num_tokens": 2137}
|
from sklearn import linear_model
from stable_baselines.common.vec_env.vec_video_recorder import VecVideoRecorder
from stable_baselines.low_dim_analysis.eval_util import get_full_param_traj_file_path, get_aug_plot_dir, get_full_params_dir, get_save_dir
import minepy
from matplotlib import pyplot as plt
import numpy as np
import gym
from low_dim_update_stable.new_neuron_analysis.dir_tree_util import *
from stable_baselines import bench, logger
from stable_baselines.common import set_global_seeds
from stable_baselines.common.vec_env.vec_normalize import VecNormalize
from stable_baselines.ppo2 import PPO2
from stable_baselines.common.vec_env.dummy_vec_env import DummyVecEnv
import os
import pandas as pd
from gym.envs.registration import register
from new_neuron_analysis.experiment_augment_input import read_all_data, get_experiment_path_for_this_run, AttributeDict
def safe_mean(arr):
"""
Compute the mean of an array if there is at least one element.
For empty array, return nan. It is used for logging only.
:param arr: (np.ndarray)
:return: (float)
"""
return np.nan if len(arr) == 0 else np.mean(arr)
# def fill_contacts_jac_dict(contacts, contact_dict, neuron_values):
# for contact in contacts:
# J_contact = contact.bodynode1.linear_jacobian(contact.bodynode1.to_local(contact.point))
# if contact.bodynode1.name in contact_dict:
# contact_dict[contact.bodynode1.name][contact.bodynode1.name].append(J_contact.reshape((-1, 1)))
# for i, layer in enumerate(neuron_values[1:-2]):
# contact_dict[contact.bodynode1.name][i].append(layer.reshape((-1, 1)))
# else:
# contact_dict[contact.bodynode1.name] = {}
# contact_dict[contact.bodynode1.name][contact.bodynode1.name] = [J_contact.reshape((-1, 1))]
# for i, layer in enumerate(neuron_values[1:-2]):
# contact_dict[contact.bodynode1.name][i] = [layer.reshape((-1, 1))]
def compute_alpha(npoints):
NPOINTS_BINS = [1, 25, 50, 250, 500, 1000, 2500, 5000, 10000, 40000]
ALPHAS = [0.85, 0.80, 0.75, 0.70, 0.65, 0.6, 0.55, 0.5, 0.45, 0.4]
if npoints < 1:
raise ValueError("the number of points must be >=1")
return ALPHAS[np.digitize([npoints], NPOINTS_BINS)[0] - 1]
regr = linear_model.LinearRegression()
def get_normalized_SSE(lagrange_l, neuron_l, regr):
range_neuron = max(neuron_l) - min(neuron_l)
neuron_l = neuron_l/range_neuron
X = lagrange_l.reshape(-1,1)
y = neuron_l.reshape(-1,1)
regr.fit(X, y)
y_pred = regr.predict(X)
SSE = ((y - y_pred) ** 2).sum()
TV = ((y - np.average(y)) ** 2).sum()
return SSE,TV
def get_Reflective_correlation_coefficient(lagrange_l, neuron_l):
prod = np.dot(lagrange_l, neuron_l)
A = np.sum(lagrange_l**2)
B = np.sum(neuron_l**2)
return prod/np.sqrt(A*B)
def scatter_the_nonlinear_significant_but_not_linear_ones(lagrangian_values,
layer_values_list, linear_threshold, nonlinear_threshold, out_dir):
for key, nda in lagrangian_values.items():
for ind, lagrange_l in enumerate(nda):
for layer_ind, layer in enumerate(layer_values_list):
for neuron_ind, neuron_l in enumerate(layer):
linear_co = np.corrcoef(lagrange_l, neuron_l)[1, 0]
alpha_cl = compute_alpha(lagrange_l.shape[0])
mine = minepy.MINE(alpha=alpha_cl, c=5, est="mic_e")
mine.compute_score(lagrange_l, neuron_l)
mic = mine.mic()
if abs(linear_co) < linear_threshold and mic > nonlinear_threshold:
name = f"{out_dir}/{key}_index_{ind}_VS_layer_{layer_ind}_neuron_{neuron_ind}_" \
f"linear_correlation{linear_co}_nonlinear_correlation{mic}.jpg"
plt.figure()
plt.scatter(lagrange_l, neuron_l)
plt.xlabel("lagrange")
plt.ylabel("neuron")
plt.savefig(name)
plt.close()
def scatter_the_linear_significant_ones(lagrangian_values, layer_values_list, threshold, out_dir):
for key, nda in lagrangian_values.items():
for ind, lagrange_l in enumerate(nda):
for layer_ind, layer in enumerate(layer_values_list):
for neuron_ind, neuron_l in enumerate(layer):
co = np.corrcoef(lagrange_l, neuron_l)[1,0]
normalized_SSE, TV = get_normalized_SSE(lagrange_l, neuron_l, regr)
Reflective_correlation_coefficient = get_Reflective_correlation_coefficient(lagrange_l, neuron_l)
# if abs(co) > threshold:
if (abs(co) > threshold and normalized_SSE < 200):
name = f"{out_dir}/{key}_index_{ind}_VS_layer_{layer_ind}_neuron_{neuron_ind}_" \
f"linear_correlation{co}_normalized_SSE_{normalized_SSE}_Syy_{TV}" \
f"_Reflective_correlation_coefficient_{Reflective_correlation_coefficient}.jpg"
plt.figure()
plt.scatter(lagrange_l,neuron_l)
plt.xlabel("lagrange")
plt.ylabel("neuron")
plt.savefig(name)
plt.close()
def plot_everything(lagrangian_values, layer_values_list, out_dir, PLOT_CUTOFF):
for key, nda in lagrangian_values.items():
for ind, l in enumerate(nda):
name = f"{out_dir}/{key}_index_{ind}.jpg"
plt.figure()
plt.plot(l[:PLOT_CUTOFF])
plt.savefig(name)
plt.close()
for layer_ind, layer in enumerate(layer_values_list):
for neuron_ind, l in enumerate(layer):
name = f"{out_dir}/layer_{layer_ind}_neuron_{neuron_ind}.jpg"
plt.figure()
plt.plot(l[:PLOT_CUTOFF])
plt.savefig(name)
plt.close()
def visualize_policy_and_collect_COM(augment_num_timesteps, top_num_to_include_slice, augment_seed, augment_run_num, network_size,
policy_env, policy_num_timesteps, policy_run_num, policy_seed, eval_seed, eval_run_num, learning_rate,
additional_note, metric_param):
result_dir = get_result_dir(policy_env, policy_num_timesteps, policy_run_num,
policy_seed, eval_seed, eval_run_num, additional_note, metric_param)
args = AttributeDict()
args.normalize = True
args.num_timesteps = augment_num_timesteps
args.run_num = augment_run_num
args.alg = "ppo2"
args.seed = augment_seed
logger.log(f"#######VISUALIZE: {args}")
# non_linear_global_dict
linear_global_dict, non_linear_global_dict, lagrangian_values, input_values, layers_values, all_weights = read_all_data(
policy_env, policy_num_timesteps, policy_run_num, policy_seed, eval_seed, eval_run_num, additional_note=additional_note)
timestamp = get_time_stamp('%Y_%m_%d_%H_%M_%S')
experiment_label = f"learning_rate_{learning_rate}timestamp_{timestamp}_augment_num_timesteps{augment_num_timesteps}" \
f"_top_num_to_include{top_num_to_include_slice.start}_{top_num_to_include_slice.stop}" \
f"_augment_seed{augment_seed}_augment_run_num{augment_run_num}_network_size{network_size}" \
f"_policy_num_timesteps{policy_num_timesteps}_policy_run_num{policy_run_num}_policy_seed{policy_seed}" \
f"_eval_seed{eval_seed}_eval_run_num{eval_run_num}_additional_note_{additional_note}"
entry_point = 'gym.envs.dart:DartWalker2dEnv_aug_input'
this_run_dir = get_experiment_path_for_this_run(entry_point, args.num_timesteps, args.run_num,
args.seed, learning_rate=learning_rate, top_num_to_include=top_num_to_include_slice,
result_dir=result_dir, network_size=network_size, metric_param=metric_param)
traj_params_dir_name = get_full_params_dir(this_run_dir)
save_dir = get_save_dir( this_run_dir)
aug_plot_dir = get_aug_plot_dir(this_run_dir) + "_vis"
final_file = get_full_param_traj_file_path(traj_params_dir_name, "pi_final")
final_params = pd.read_csv(final_file, header=None).values[0]
args.env = f'{experiment_label}_{entry_point}-v1'
register(
id=args.env,
entry_point=entry_point,
max_episode_steps=1000,
kwargs={'linear_global_dict':linear_global_dict,
'non_linear_global_dict':non_linear_global_dict,
'top_to_include_slice':top_num_to_include_slice,
'aug_plot_dir': aug_plot_dir,
"lagrangian_values":lagrangian_values,
"layers_values":layers_values}
)
def make_env():
env_out = gym.make(args.env)
env_out = bench.Monitor(env_out, logger.get_dir(), allow_early_resets=True)
return env_out
env = DummyVecEnv([make_env])
walker_env = env.envs[0].env.env
walker_env.disableViewer = False
if args.normalize:
env = VecNormalize(env)
set_global_seeds(args.seed)
walker_env.seed(args.seed)
model = PPO2.load(f"{save_dir}/ppo2", seed=augment_seed)
model.set_pi_from_flat(final_params)
if args.normalize:
env.load_running_average(save_dir)
sk = env.venv.envs[0].env.env.robot_skeleton
lagrangian_values = {}
obs = np.zeros((env.num_envs,) + env.observation_space.shape)
obs[:] = env.reset()
env = VecVideoRecorder(env, aug_plot_dir, record_video_trigger=lambda x: x == 0, video_length=3000, name_prefix="vis_this_policy")
lagrangian_values["M"] = [sk.M.reshape((-1,1))]
lagrangian_values["COM"] = [sk.C.reshape((-1,1))]
lagrangian_values["Coriolis"] = [sk.c.reshape((-1,1))]
lagrangian_values["q"] = [sk.q.reshape((-1, 1))]
lagrangian_values["dq"] = [sk.dq.reshape((-1, 1))]
contact_values = {}
neuron_values = model.give_neuron_values(obs)
raw_layer_values_list = [[neuron_value.reshape((-1,1))] for neuron_value in neuron_values]
env.render()
ep_infos = []
steps_to_first_done = 0
first_done = False
# epi_rew = 0
for _ in range(3000):
actions = model.step(obs)[0]
# yield neuron_values
obs, rew, done, infos = env.step(actions)
# epi_rew+= rew[0]
if done and not first_done:
first_done = True
if not first_done:
steps_to_first_done += 1
neuron_values = model.give_neuron_values(obs)
for i, layer in enumerate(neuron_values):
raw_layer_values_list[i].append(layer.reshape((-1,1)))
# fill_contacts_jac_dict(infos[0]["contacts"], contact_dict=contact_values, neuron_values=neuron_values)
lagrangian_values["M"].append(sk.M.reshape((-1, 1)))
lagrangian_values["q"].append(sk.q.reshape((-1, 1)))
lagrangian_values["dq"].append(sk.dq.reshape((-1, 1)))
lagrangian_values["COM"].append(sk.C.reshape((-1, 1)))
lagrangian_values["Coriolis"].append(sk.c.reshape((-1, 1)))
# env.render()
# time.sleep(1)
for info in infos:
maybe_ep_info = info.get('episode')
if maybe_ep_info is not None:
ep_infos.append(maybe_ep_info)
env.render()
done = done.any()
if done:
episode_rew = safe_mean([ep_info['r'] for ep_info in ep_infos])
print(f'episode_rew={episode_rew}')
# print(f'episode_rew={epi_rew}')
# epi_rew = 0
obs = env.reset()
#Hstack into a big matrix
lagrangian_values["M"] = np.hstack(lagrangian_values["M"])
lagrangian_values["COM"] = np.hstack(lagrangian_values["COM"])
lagrangian_values["Coriolis"] = np.hstack(lagrangian_values["Coriolis"])
lagrangian_values["q"] = np.hstack(lagrangian_values["q"])
lagrangian_values["dq"] = np.hstack(lagrangian_values["dq"])
# for contact_body_name, l in contact_values.items():
# body_contact_dict = contact_values[contact_body_name]
# for name, l in body_contact_dict.items():
# body_contact_dict[name] = np.hstack(body_contact_dict[name])
input_values = np.hstack(raw_layer_values_list[0])
layers_values = [np.hstack(layer_list) for layer_list in raw_layer_values_list][1:-2]# drop variance and inputs
for i, com in enumerate(lagrangian_values["COM"]):
plt.figure()
plt.plot(np.arange(len(com)), com)
plt.xlabel("time")
plt.ylabel(f"COM{i}")
plt.savefig(f"{aug_plot_dir}/COM{i}.jpg")
plt.close()
def visualize_augment_experiment(augment_num_timesteps, top_num_to_include_slice, augment_seed, augment_run_num, network_size,
policy_env, policy_num_timesteps, policy_run_num, policy_seed, eval_seed, eval_run_num, learning_rate,
additional_note, result_dir, lagrangian_inds_to_include=None):
args = AttributeDict()
args.normalize = True
args.num_timesteps = augment_num_timesteps
args.run_num = augment_run_num
args.alg = "ppo2"
args.seed = augment_seed
logger.log(f"#######TRAIN: {args}")
# non_linear_global_dict
timestamp = get_time_stamp('%Y_%m_%d_%H_%M_%S')
experiment_label = f"learning_rate_{learning_rate}timestamp_{timestamp}_augment_num_timesteps{augment_num_timesteps}" \
f"_top_num_to_include{top_num_to_include_slice.start}_{top_num_to_include_slice.stop}" \
f"_augment_seed{augment_seed}_augment_run_num{augment_run_num}_network_size{network_size}" \
f"_policy_num_timesteps{policy_num_timesteps}_policy_run_num{policy_run_num}_policy_seed{policy_seed}" \
f"_eval_seed{eval_seed}_eval_run_num{eval_run_num}_additional_note_{additional_note}"
if policy_env == "DartWalker2d-v1":
entry_point = 'gym.envs.dart:DartWalker2dEnv_aug_input'
elif policy_env == "DartHopper-v1":
entry_point = 'gym.envs.dart:DartHopperEnv_aug_input'
elif policy_env == "DartHalfCheetah-v1":
entry_point = 'gym.envs.dart:DartHalfCheetahEnv_aug_input'
elif policy_env == "DartSnake7Link-v1":
entry_point = 'gym.envs.dart:DartSnake7LinkEnv_aug_input'
else:
raise NotImplemented()
this_run_dir = get_experiment_path_for_this_run(entry_point, args.num_timesteps, args.run_num,
args.seed, learning_rate=learning_rate, top_num_to_include=top_num_to_include_slice,
result_dir=result_dir, network_size=network_size)
full_param_traj_dir_path = get_full_params_dir(this_run_dir)
log_dir = get_log_dir(this_run_dir)
save_dir = get_save_dir(this_run_dir)
create_dir_remove(this_run_dir)
create_dir_remove(full_param_traj_dir_path)
create_dir_remove(save_dir)
create_dir_remove(log_dir)
logger.configure(log_dir)
# note this is only linear
if lagrangian_inds_to_include is None:
linear_top_vars_list = read_linear_top_var(policy_env, policy_num_timesteps, policy_run_num, policy_seed, eval_seed,
eval_run_num, additional_note)
# keys_to_include = ["COM", "M", "Coriolis", "total_contact_forces_contact_bodynode",
# "com_jacobian", "contact_bodynode_jacobian"]
keys_to_include = ["COM", "M", "Coriolis", "com_jacobian"]
# lagrangian_inds_to_include = linear_top_vars_list[top_num_to_include_slice]
lagrangian_inds_to_include = get_wanted_lagrangians(keys_to_include, linear_top_vars_list, top_num_to_include_slice)
with open(f"{log_dir}/lagrangian_inds_to_include.json", 'w') as fp:
json.dump(lagrangian_inds_to_include, fp)
args.env = f'{experiment_label}_{entry_point}-v1'
register(
id=args.env,
entry_point=entry_point,
max_episode_steps=1000,
kwargs={"lagrangian_inds_to_include": lagrangian_inds_to_include}
)
def make_env():
env_out = gym.make(args.env)
env_out.env.visualize = False
env_out = bench.Monitor(env_out, logger.get_dir(), allow_early_resets=True)
return env_out
env = DummyVecEnv([make_env])
walker_env = env.envs[0].env.env
walker_env.disableViewer = True
if args.normalize:
env = VecNormalize(env)
policy = MlpPolicy
# extra run info I added for my purposes
run_info = {"run_num": args.run_num,
"env_id": args.env,
"full_param_traj_dir_path": full_param_traj_dir_path}
layers = [network_size, network_size]
set_global_seeds(args.seed)
walker_env.seed(args.seed)
policy_kwargs = {"net_arch" : [dict(vf=layers, pi=layers)]}
model = PPO2(policy=policy, env=env, n_steps=4096, nminibatches=64, lam=0.95, gamma=0.99,
noptepochs=10,
ent_coef=0.0, learning_rate=learning_rate, cliprange=0.2, optimizer='adam', policy_kwargs=policy_kwargs,
seed=args.seed)
model.tell_run_info(run_info)
model.learn(total_timesteps=args.num_timesteps, seed=args.seed)
model.save(f"{save_dir}/ppo2")
if args.normalize:
env.save_running_average(save_dir)
return log_dir
if __name__ == '__main__':
# visualize_policy_and_collect_COM(seed=0, run_num=0, policy_num_timesteps=3000000, policy_run_num=0, policy_seed=0)
policy_env = "DartWalker2d-v1"
policy_seed = 3
policy_run_num = 1
policy_num_timesteps = 5000000
policy_env = "DartWalker2d-v1"
eval_seed = 4
eval_run_num = 4
augment_seed = 2
augment_run_num = 0
augment_num_timesteps = 1500000
top_num_to_include = slice(0,0)
network_size = 64
additional_note = ""
learning_rate = 64 / network_size * 3e-4
metric_param = None
visualize_policy_and_collect_COM(augment_num_timesteps, top_num_to_include_slice=top_num_to_include, augment_seed=augment_seed,
augment_run_num=augment_run_num, network_size=network_size,
policy_env=policy_env, policy_num_timesteps=policy_num_timesteps,
policy_run_num=policy_run_num, policy_seed=policy_seed, eval_seed=eval_seed,
eval_run_num=eval_run_num, learning_rate=learning_rate,
additional_note=additional_note, metric_param=metric_param)
# visualize_policy_and_collect_COM(seed=3, run_num=3, policy_env=policy_env, policy_num_timesteps=2000000,
# policy_seed=1, policy_run_num=0)
# seeds = [0, 1, 2]
# run_nums = [0, 1, 2]
# for seed in seeds:
# for run_num in run_nums:
# run_trained_policy(seed=seed, run_num=run_num)
#
# visualize_trained_policy(seed=3, run_num, policy_env, policy_num_timesteps, policy_seed, policy_run_num)
#TODO Give filenames more info to identify which hyperparameter is the data for
|
{"hexsha": "77fd6ade5dad5cfaf49bd297cbad2fa04022169f", "size": 19412, "ext": "py", "lang": "Python", "max_stars_repo_path": "new_neuron_analysis/visualize_augment_policy.py", "max_stars_repo_name": "hugerepo-tianhang/low_dim_update_stable", "max_stars_repo_head_hexsha": "565f6cbf886d266d0633bc112ccae28f1d116ee1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "new_neuron_analysis/visualize_augment_policy.py", "max_issues_repo_name": "hugerepo-tianhang/low_dim_update_stable", "max_issues_repo_head_hexsha": "565f6cbf886d266d0633bc112ccae28f1d116ee1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "new_neuron_analysis/visualize_augment_policy.py", "max_forks_repo_name": "hugerepo-tianhang/low_dim_update_stable", "max_forks_repo_head_hexsha": "565f6cbf886d266d0633bc112ccae28f1d116ee1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.2738589212, "max_line_length": 138, "alphanum_fraction": 0.6600556357, "include": true, "reason": "import numpy", "num_tokens": 4737}
|
import torch
import torch.nn as nn
import torch.nn.parallel
from torch.autograd import Variable
import torch.nn.functional as F
from torchvision import models
import torch.utils.model_zoo as model_zoo
from torch.nn import init
import os
import numpy as np
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('Linear') != -1:
init.normal(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_xavier(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_normal_(m.weight.data, gain=0.02)
elif classname.find('Linear') != -1:
init.xavier_normal_(m.weight.data, gain=0.02)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def init_weights(net, init_type='normal'):
print('initialization method [%s]' % init_type)
if init_type == 'normal':
net.apply(weights_init_normal)
elif init_type == 'xavier':
net.apply(weights_init_xavier)
elif init_type == 'kaiming':
net.apply(weights_init_kaiming)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
class FeatureExtraction(nn.Module):
def __init__(self, input_nc, ngf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(FeatureExtraction, self).__init__()
downconv = nn.Conv2d(input_nc, ngf, kernel_size=4, stride=2, padding=1)
model = [downconv, nn.ReLU(True), norm_layer(ngf)]
for i in range(n_layers):
in_ngf = 2 ** i * ngf if 2 ** i * ngf < 512 else 512
out_ngf = 2 ** (i + 1) * ngf if 2 ** i * ngf < 512 else 512
downconv = nn.Conv2d(in_ngf, out_ngf, kernel_size=4, stride=2, padding=1)
model += [downconv, nn.ReLU(True)]
model += [norm_layer(out_ngf)]
model += [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.ReLU(True)]
model += [norm_layer(512)]
model += [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.ReLU(True)]
self.model = nn.Sequential(*model)
init_weights(self.model, init_type='normal')
def forward(self, x):
return self.model(x)
class FeatureL2Norm(torch.nn.Module):
def __init__(self):
super(FeatureL2Norm, self).__init__()
def forward(self, feature):
epsilon = 1e-6
norm = torch.pow(torch.sum(torch.pow(feature, 2), 1) + epsilon, 0.5).unsqueeze(1).expand_as(feature)
return torch.div(feature, norm)
class FeatureCorrelation(nn.Module):
def __init__(self):
super(FeatureCorrelation, self).__init__()
def forward(self, feature_A, feature_B):
b, c, h, w = feature_A.size()
# reshape features for matrix multiplication
feature_A = feature_A.transpose(2, 3).contiguous().view(b, c, h * w)
feature_B = feature_B.view(b, c, h * w).transpose(1, 2)
# perform matrix mult.
feature_mul = torch.bmm(feature_B, feature_A)
correlation_tensor = feature_mul.view(b, h, w, h * w).transpose(2, 3).transpose(1, 2)
return correlation_tensor
class FeatureRegression(nn.Module):
def __init__(self, input_nc=512, output_dim=6, use_cuda=True):
super(FeatureRegression, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(input_nc, 512, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 256, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
)
self.linear = nn.Linear(64 * 4 * 3, output_dim)
self.tanh = nn.Tanh()
if use_cuda:
self.conv.cuda()
self.linear.cuda()
self.tanh.cuda()
def forward(self, x):
x = self.conv(x)
x = x.view(x.size(0), -1)
x = self.linear(x)
x = self.tanh(x)
return x
class AffineGridGen(nn.Module):
def __init__(self, out_h=256, out_w=192, out_ch=3):
super(AffineGridGen, self).__init__()
self.out_h = out_h
self.out_w = out_w
self.out_ch = out_ch
def forward(self, theta):
theta = theta.contiguous()
batch_size = theta.size()[0]
out_size = torch.Size((batch_size, self.out_ch, self.out_h, self.out_w))
return F.affine_grid(theta, out_size)
class TpsGridGen(nn.Module):
def __init__(self, out_h=256, out_w=192, use_regular_grid=True, grid_size=3, reg_factor=0, use_cuda=True):
super(TpsGridGen, self).__init__()
self.out_h, self.out_w = out_h, out_w
self.reg_factor = reg_factor
self.use_cuda = use_cuda
# create grid in numpy
self.grid = np.zeros([self.out_h, self.out_w, 3], dtype=np.float32)
# sampling grid with dim-0 coords (Y)
self.grid_X, self.grid_Y = np.meshgrid(np.linspace(-1, 1, out_w), np.linspace(-1, 1, out_h))
# grid_X,grid_Y: size [1,H,W,1,1]
self.grid_X = torch.FloatTensor(self.grid_X).unsqueeze(0).unsqueeze(3)
self.grid_Y = torch.FloatTensor(self.grid_Y).unsqueeze(0).unsqueeze(3)
if use_cuda:
self.grid_X = self.grid_X.cuda()
self.grid_Y = self.grid_Y.cuda()
# initialize regular grid for control points P_i
if use_regular_grid:
axis_coords = np.linspace(-1, 1, grid_size)
self.N = grid_size * grid_size
P_Y, P_X = np.meshgrid(axis_coords, axis_coords)
P_X = np.reshape(P_X, (-1, 1)) # size (N,1)
P_Y = np.reshape(P_Y, (-1, 1)) # size (N,1)
P_X = torch.FloatTensor(P_X)
P_Y = torch.FloatTensor(P_Y)
self.P_X_base = P_X.clone()
self.P_Y_base = P_Y.clone()
self.Li = self.compute_L_inverse(P_X, P_Y).unsqueeze(0)
self.P_X = P_X.unsqueeze(2).unsqueeze(3).unsqueeze(4).transpose(0, 4)
self.P_Y = P_Y.unsqueeze(2).unsqueeze(3).unsqueeze(4).transpose(0, 4)
if use_cuda:
self.P_X = self.P_X.cuda()
self.P_Y = self.P_Y.cuda()
self.P_X_base = self.P_X_base.cuda()
self.P_Y_base = self.P_Y_base.cuda()
def forward(self, theta):
warped_grid = self.apply_transformation(theta, torch.cat((self.grid_X, self.grid_Y), 3))
return warped_grid
def compute_L_inverse(self, X, Y):
N = X.size()[0] # num of points (along dim 0)
# construct matrix K
Xmat = X.expand(N, N)
Ymat = Y.expand(N, N)
P_dist_squared = torch.pow(Xmat - Xmat.transpose(0, 1), 2) + torch.pow(Ymat - Ymat.transpose(0, 1), 2)
P_dist_squared[P_dist_squared == 0] = 1 # make diagonal 1 to avoid NaN in log computation
K = torch.mul(P_dist_squared, torch.log(P_dist_squared))
# construct matrix L
O = torch.FloatTensor(N, 1).fill_(1)
Z = torch.FloatTensor(3, 3).fill_(0)
P = torch.cat((O, X, Y), 1)
L = torch.cat((torch.cat((K, P), 1), torch.cat((P.transpose(0, 1), Z), 1)), 0)
Li = torch.inverse(L)
if self.use_cuda:
Li = Li.cuda()
return Li
def apply_transformation(self, theta, points):
if theta.dim() == 2:
theta = theta.unsqueeze(2).unsqueeze(3)
# points should be in the [B,H,W,2] format,
# where points[:,:,:,0] are the X coords
# and points[:,:,:,1] are the Y coords
# input are the corresponding control points P_i
batch_size = theta.size()[0]
# split theta into point coordinates
Q_X = theta[:, :self.N, :, :].squeeze(3)
Q_Y = theta[:, self.N:, :, :].squeeze(3)
Q_X = Q_X + self.P_X_base.expand_as(Q_X)
Q_Y = Q_Y + self.P_Y_base.expand_as(Q_Y)
# get spatial dimensions of points
points_b = points.size()[0]
points_h = points.size()[1]
points_w = points.size()[2]
# repeat pre-defined control points along spatial dimensions of points to be transformed
P_X = self.P_X.expand((1, points_h, points_w, 1, self.N))
P_Y = self.P_Y.expand((1, points_h, points_w, 1, self.N))
# compute weigths for non-linear part
W_X = torch.bmm(self.Li[:, :self.N, :self.N].expand((batch_size, self.N, self.N)), Q_X)
W_Y = torch.bmm(self.Li[:, :self.N, :self.N].expand((batch_size, self.N, self.N)), Q_Y)
# reshape
# W_X,W,Y: size [B,H,W,1,N]
W_X = W_X.unsqueeze(3).unsqueeze(4).transpose(1, 4).repeat(1, points_h, points_w, 1, 1)
W_Y = W_Y.unsqueeze(3).unsqueeze(4).transpose(1, 4).repeat(1, points_h, points_w, 1, 1)
# compute weights for affine part
A_X = torch.bmm(self.Li[:, self.N:, :self.N].expand((batch_size, 3, self.N)), Q_X)
A_Y = torch.bmm(self.Li[:, self.N:, :self.N].expand((batch_size, 3, self.N)), Q_Y)
# reshape
# A_X,A,Y: size [B,H,W,1,3]
A_X = A_X.unsqueeze(3).unsqueeze(4).transpose(1, 4).repeat(1, points_h, points_w, 1, 1)
A_Y = A_Y.unsqueeze(3).unsqueeze(4).transpose(1, 4).repeat(1, points_h, points_w, 1, 1)
# compute distance P_i - (grid_X,grid_Y)
# grid is expanded in point dim 4, but not in batch dim 0, as points P_X,P_Y are fixed for all batch
points_X_for_summation = points[:, :, :, 0].unsqueeze(3).unsqueeze(4).expand(
points[:, :, :, 0].size() + (1, self.N))
points_Y_for_summation = points[:, :, :, 1].unsqueeze(3).unsqueeze(4).expand(
points[:, :, :, 1].size() + (1, self.N))
if points_b == 1:
delta_X = points_X_for_summation - P_X
delta_Y = points_Y_for_summation - P_Y
else:
# use expanded P_X,P_Y in batch dimension
delta_X = points_X_for_summation - P_X.expand_as(points_X_for_summation)
delta_Y = points_Y_for_summation - P_Y.expand_as(points_Y_for_summation)
dist_squared = torch.pow(delta_X, 2) + torch.pow(delta_Y, 2)
# U: size [1,H,W,1,N]
dist_squared[dist_squared == 0] = 1 # avoid NaN in log computation
U = torch.mul(dist_squared, torch.log(dist_squared))
# expand grid in batch dimension if necessary
points_X_batch = points[:, :, :, 0].unsqueeze(3)
points_Y_batch = points[:, :, :, 1].unsqueeze(3)
if points_b == 1:
points_X_batch = points_X_batch.expand((batch_size,) + points_X_batch.size()[1:])
points_Y_batch = points_Y_batch.expand((batch_size,) + points_Y_batch.size()[1:])
points_X_prime = A_X[:, :, :, :, 0] + \
torch.mul(A_X[:, :, :, :, 1], points_X_batch) + \
torch.mul(A_X[:, :, :, :, 2], points_Y_batch) + \
torch.sum(torch.mul(W_X, U.expand_as(W_X)), 4)
points_Y_prime = A_Y[:, :, :, :, 0] + \
torch.mul(A_Y[:, :, :, :, 1], points_X_batch) + \
torch.mul(A_Y[:, :, :, :, 2], points_Y_batch) + \
torch.sum(torch.mul(W_Y, U.expand_as(W_Y)), 4)
return torch.cat((points_X_prime, points_Y_prime), 3)
# Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1
# at the bottleneck
class UnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer,
innermost=True)
for i in range(num_downs - 5):
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block,
norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block,
norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block,
norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True,
norm_layer=norm_layer)
self.model = unet_block
def forward(self, input):
return self.model(input)
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
uprelu = nn.ReLU(True)
if norm_layer != None:
downnorm = norm_layer(inner_nc)
upnorm = norm_layer(outer_nc)
if outermost:
upsample = nn.Upsample(scale_factor=2, mode='bilinear')
upconv = nn.Conv2d(inner_nc * 2, outer_nc, kernel_size=3, stride=1, padding=1, bias=use_bias)
down = [downconv]
# up = [uprelu, upsample, upconv, upnorm]
up = [uprelu, upsample, upconv]
model = down + [submodule] + up
elif innermost:
upsample = nn.Upsample(scale_factor=2, mode='bilinear')
upconv = nn.Conv2d(inner_nc, outer_nc, kernel_size=3, stride=1, padding=1, bias=use_bias)
down = [downrelu, downconv]
if norm_layer == None:
up = [uprelu, upsample, upconv]
else:
up = [uprelu, upsample, upconv, upnorm]
model = down + up
else:
upsample = nn.Upsample(scale_factor=2, mode='bilinear')
upconv = nn.Conv2d(inner_nc * 2, outer_nc, kernel_size=3, stride=1, padding=1, bias=use_bias)
if norm_layer == None:
down = [downrelu, downconv]
up = [uprelu, upsample, upconv]
else:
down = [downrelu, downconv, downnorm]
up = [uprelu, upsample, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1)
# UNet with residual blocks
class ResidualBlock(nn.Module):
def __init__(self, in_features=64, norm_layer=nn.BatchNorm2d):
super(ResidualBlock, self).__init__()
self.relu = nn.ReLU(True)
if norm_layer == None:
# hard to converge with out batch or instance norm
self.block = nn.Sequential(
nn.Conv2d(in_features, in_features, 3, 1, 1, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(in_features, in_features, 3, 1, 1, bias=False),
)
else:
self.block = nn.Sequential(
nn.Conv2d(in_features, in_features, 3, 1, 1, bias=False),
norm_layer(in_features),
nn.ReLU(inplace=True),
nn.Conv2d(in_features, in_features, 3, 1, 1, bias=False),
norm_layer(in_features)
)
def forward(self, x):
residual = x
out = self.block(x)
out += residual
out = self.relu(out)
return out
# return self.relu(x + self.block(x))
class ResUnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False):
super(ResUnetGenerator, self).__init__()
# construct unet structure
unet_block = ResUnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer,
innermost=True)
for i in range(num_downs - 5):
unet_block = ResUnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block,
norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = ResUnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block,
norm_layer=norm_layer)
unet_block = ResUnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block,
norm_layer=norm_layer)
unet_block = ResUnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block,
norm_layer=norm_layer)
unet_block = ResUnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True,
norm_layer=norm_layer)
self.model = unet_block
def forward(self, input):
output = self.model(input)
# print("\tIn Model: input size", input.size(),
# "output size", output.size())
return output
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class ResUnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(ResUnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=3,
stride=2, padding=1, bias=use_bias)
# add two resblock
res_downconv = [ResidualBlock(inner_nc, norm_layer), ResidualBlock(inner_nc, norm_layer)]
res_upconv = [ResidualBlock(outer_nc, norm_layer), ResidualBlock(outer_nc, norm_layer)]
# res_downconv = [ResidualBlock(inner_nc)]
# res_upconv = [ResidualBlock(outer_nc)]
downrelu = nn.ReLU(True)
uprelu = nn.ReLU(True)
if norm_layer != None:
downnorm = norm_layer(inner_nc)
upnorm = norm_layer(outer_nc)
if outermost:
upsample = nn.Upsample(scale_factor=2, mode='nearest')
upconv = nn.Conv2d(inner_nc * 2, outer_nc, kernel_size=3, stride=1, padding=1, bias=use_bias)
down = [downconv, downrelu] + res_downconv
# up = [uprelu, upsample, upconv, upnorm]
up = [upsample, upconv]
model = down + [submodule] + up
elif innermost:
upsample = nn.Upsample(scale_factor=2, mode='nearest')
upconv = nn.Conv2d(inner_nc, outer_nc, kernel_size=3, stride=1, padding=1, bias=use_bias)
down = [downconv, downrelu] + res_downconv
if norm_layer == None:
up = [upsample, upconv, uprelu] + res_upconv
else:
up = [upsample, upconv, upnorm, uprelu] + res_upconv
model = down + up
else:
upsample = nn.Upsample(scale_factor=2, mode='nearest')
upconv = nn.Conv2d(inner_nc * 2, outer_nc, kernel_size=3, stride=1, padding=1, bias=use_bias)
if norm_layer == None:
down = [downconv, downrelu] + res_downconv
up = [upsample, upconv, uprelu] + res_upconv
else:
down = [downconv, downnorm, downrelu] + res_downconv
up = [upsample, upconv, upnorm, uprelu] + res_upconv
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1)
class Vgg19(nn.Module):
def __init__(self, requires_grad=False):
super(Vgg19, self).__init__()
vgg_pretrained_features = models.vgg19(pretrained=True).features
self.slice1 = nn.Sequential()
self.slice2 = nn.Sequential()
self.slice3 = nn.Sequential()
self.slice4 = nn.Sequential()
self.slice5 = nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
return out
def gram_matrix(input):
a, b, c, d = input.size() # a=batch size(=1)
# b=number of feature maps
# (c,d)=dimensions of a f. map (N=c*d)
features = input.view(a * b, c * d) # resise F_XL into \hat F_XL
G = torch.mm(features, features.t()) # compute the gram product
# we 'normalize' the values of the gram matrix
# by dividing by the number of element in each feature maps.
return G.div(a * b * c * d)
class StyleLoss(nn.Module):
def __init__(self):
super(StyleLoss, self).__init__()
def forward(self, x, y):
Gx = gram_matrix(x)
Gy = gram_matrix(y)
return F.mse_loss(Gx, Gy) * 30000000
class VGGLoss(nn.Module):
def __init__(self, model=None):
super(VGGLoss, self).__init__()
if model is None:
self.vgg = Vgg19()
else:
self.vgg = model
self.vgg.cuda()
# self.vgg.eval()
self.criterion = nn.L1Loss()
self.style_criterion = StyleLoss()
self.weights = [1.0, 1.0, 1.0, 1.0, 1.0]
self.style_weights = [1.0, 1.0, 1.0, 1.0, 1.0]
# self.weights = [5.0, 1.0, 0.5, 0.4, 0.8]
# self.style_weights = [10e4, 1000, 50, 15, 50]
def forward(self, x, y, style=False):
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
loss = 0
if style:
# return both perceptual loss and style loss.
style_loss = 0
for i in range(len(x_vgg)):
this_loss = (self.weights[i] *
self.criterion(x_vgg[i], y_vgg[i].detach()))
this_style_loss = (self.style_weights[i] *
self.style_criterion(x_vgg[i], y_vgg[i].detach()))
loss += this_loss
style_loss += this_style_loss
return loss, style_loss
for i in range(len(x_vgg)):
this_loss = (self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach()))
loss += this_loss
return loss
class GMM(nn.Module):
""" Geometric Matching Module
"""
def __init__(self, opt, input_nc):
super(GMM, self).__init__()
self.extractionA = FeatureExtraction(input_nc, ngf=64, n_layers=3, norm_layer=nn.BatchNorm2d)
self.extractionB = FeatureExtraction(3, ngf=64, n_layers=3, norm_layer=nn.BatchNorm2d)
self.l2norm = FeatureL2Norm()
self.correlation = FeatureCorrelation()
self.regression = FeatureRegression(input_nc=192, output_dim=2 * opt.grid_size ** 2, use_cuda=True)
self.gridGen = TpsGridGen(opt.fine_height, opt.fine_width, use_cuda=True, grid_size=opt.grid_size)
def forward(self, inputA, inputB):
featureA = self.extractionA(inputA)
featureB = self.extractionB(inputB)
featureA = self.l2norm(featureA)
featureB = self.l2norm(featureB)
correlation = self.correlation(featureA, featureB)
theta = self.regression(correlation)
grid = self.gridGen(theta)
return grid, theta
def save_checkpoint(model, save_path):
if not os.path.exists(os.path.dirname(save_path)):
os.makedirs(os.path.dirname(save_path))
torch.save(model.state_dict(), save_path)
def load_checkpoint(model, checkpoint_path):
if not os.path.exists(checkpoint_path):
print('No checkpoint!')
return
model.load_state_dict(torch.load(checkpoint_path))
# try:
# model.load_state_dict(torch.load(checkpoint_path))
# except:
# model = nn.DataParallel(model)
# model.load_state_dict(torch.load(checkpoint_path))
|
{"hexsha": "8a1dae863d7924b8a818052fe998ef9263921fbd", "size": 26686, "ext": "py", "lang": "Python", "max_stars_repo_path": "Tencent/Video_Generation/MakeItTalk/src/models/model_image_translation.py", "max_stars_repo_name": "orange-eng/internship", "max_stars_repo_head_hexsha": "c8c566df453d3a4bdf692338f74916ae15792fa1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-11-14T14:09:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-08T22:04:50.000Z", "max_issues_repo_path": "Tencent/Video_Generation/MakeItTalk/src/models/model_image_translation.py", "max_issues_repo_name": "orange-eng/internship", "max_issues_repo_head_hexsha": "c8c566df453d3a4bdf692338f74916ae15792fa1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Tencent/Video_Generation/MakeItTalk/src/models/model_image_translation.py", "max_forks_repo_name": "orange-eng/internship", "max_forks_repo_head_hexsha": "c8c566df453d3a4bdf692338f74916ae15792fa1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.5023328149, "max_line_length": 120, "alphanum_fraction": 0.5895975418, "include": true, "reason": "import numpy", "num_tokens": 7104}
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import magnum as mn
import numpy as np
from fairmotion.core import motion
from fairmotion.data import amass
from fairmotion.ops import motion as motion_ops
from habitat_sim.logging import LoggingContext, logger
LoggingContext.reinitialize_from_env()
logger.setLevel("INFO")
#### Constants ###
ROOT, FIRST, LAST = 0, 0, -1
class MType(Enum):
"""
This emun class represents the two ways that motion data can be setup and used.
Transitive motion data is used to configure and make available cyclical motion data
that is needed for the character to traverse paths and displace the character between
Scenic actions. Scenic motion data is more performative in nature and is used to
configure motion capture data that usually plays out in a specific location.
"""
TRANSITIVE = 0
SCENIC = 1
class Motions:
"""
The Motions class is collection of stats that will hold the different movement motions
for the character to use when following a path. The character is left-footed so that
is our reference for with step the motions assume first.
"""
@dataclass
class MotionData:
"""
A class intended to handle precomputations of utilities of the motion we want to
load into the character.
"""
# transitive motion is necessary for scenic motion build
def __init__(
self,
motion_: motion.Motion,
type_: MType,
transitive_motion_=None,
scenic_direction_offset: int = 0,
) -> None:
logger.info("Loading Motion data...")
if type_ == MType.TRANSITIVE:
# primary
self.motion = motion_
self.type = MType.TRANSITIVE
self.poses = motion_.poses
self.fps = motion_.fps
self.num_of_frames: int = len(motion_.poses)
self.map_of_total_displacement: float = []
self.center_of_root_drift: mn.Vector3 = mn.Vector3()
self.time_length = self.num_of_frames * (1.0 / motion_.fps)
# intermediates
self.translation_drifts: List[mn.Vector3] = []
self.forward_displacements: List[mn.Vector3] = []
self.root_orientations: List[mn.Quaternion] = []
# first and last frame root position vectors
f = motion_.poses[0].get_transform(ROOT, local=False)[0:3, 3]
f = mn.Vector3(f)
l = motion_.poses[LAST].get_transform(ROOT, local=False)[0:3, 3]
l = mn.Vector3(l)
# axis that motion uses for up and forward
self.direction_up = mn.Vector3.z_axis()
forward_V = (l - f) * (mn.Vector3(1.0, 1.0, 1.0) - mn.Vector3.z_axis())
self.direction_forward = forward_V.normalized()
### Fill derived data structures ###
# fill translation_drifts and forward_displacements
for i in range(self.num_of_frames):
j = i + 1
if j == self.num_of_frames:
# interpolate forward and drift from nth vectors and 1st vectors and push front
self.forward_displacements.insert(
0,
(
(
self.forward_displacements[LAST]
+ self.forward_displacements[0]
)
* 0.5
),
)
self.translation_drifts.insert(
0,
(
(
self.translation_drifts[LAST]
+ self.translation_drifts[0]
)
* 0.5
),
)
break
# root translation
curr_root_t = motion_.poses[i].get_transform(ROOT, local=False)[
0:3, 3
]
next_root_t = motion_.poses[j].get_transform(ROOT, local=False)[
0:3, 3
]
delta_P_vector = mn.Vector3(next_root_t - curr_root_t)
forward_vector = delta_P_vector.projected(self.direction_forward)
drift_vector = delta_P_vector - forward_vector
self.forward_displacements.append(forward_vector)
self.translation_drifts.append(drift_vector)
j, summ = 0, 0
# fill translation_drifts and forward_displacements
for i in range(self.num_of_frames):
curr_root_t = motion_.poses[i].get_transform(ROOT, local=False)[
0:3, 3
]
prev_root_t = motion_.poses[j].get_transform(ROOT, local=False)[
0:3, 3
]
# fill map_of_total_displacement
summ += (
mn.Vector3(curr_root_t - prev_root_t)
.projected(self.direction_forward)
.length()
)
self.map_of_total_displacement.append(summ)
j = i
# fill root_orientations
for pose in motion_.poses:
root_T = pose.get_transform(ROOT, local=False)
root_rotation = mn.Quaternion.from_matrix(
mn.Matrix3x3(root_T[0:3, 0:3])
)
self.root_orientations.append(root_rotation)
# get center of drift
summ = mn.Vector3()
for pose in motion_.poses:
root_T = mn.Matrix4(pose.get_transform(ROOT, local=False))
root_T.translation *= (
mn.Vector3(1.0, 1.0, 1.0) - self.direction_forward
)
summ += root_T.translation
self.center_of_root_drift = summ / self.num_of_frames
if type_ == MType.SCENIC:
# must pass in a transitive motion
assert transitive_motion_ is not None
# primary
self.motion = motion_
self.type = MType.SCENIC
self.poses = motion_.poses
self.fps = motion_.fps
self.num_of_frames: int = len(motion_.poses)
self.time_length = self.num_of_frames * (1.0 / motion_.fps)
self.transitive_motion_: Motions.MotionData = transitive_motion_
# axis that motion uses for up and forward
self.direction_up = mn.Vector3.z_axis()
self.direction_forward = mn.Vector3.x_axis()
# add scenic_direction offset
angle = scenic_direction_offset
rotate = mn.Quaternion.rotation(mn.Deg(angle), self.direction_up)
self.direction_forward = rotate.transform_vector(self.direction_forward)
# edge cases
self.start_translation = motion_.poses[FIRST].get_transform(
ROOT, local=False
)[0:3, 3]
self.final_translation = motion_.poses[LAST].get_transform(
ROOT, local=False
)[0:3, 3]
logger.info("Loading Motion data...")
motion_files = [
"data/fairmotion/amass_test_data/CMU/CMU/10/10_04_poses.npz", # [0] cycle walk
"data/fairmotion/amass_test_data/CMU/CMU/09/09_01_poses.npz", # [1] cycle run
"data/fairmotion/amass_test_data/CMU/CMU/13/13_08_poses.npz", # [2] drink beverage
"data/fairmotion/amass_test_data/CMU/CMU/13/13_22_poses.npz", # [3] wash window
"data/fairmotion/amass_test_data/CMU/CMU/13/13_23_poses.npz", # [4] sweep floor
"data/fairmotion/amass_test_data/CMU/CMU/13/13_29_poses.npz", # [5] jumping jacks
"data/fairmotion/amass_test_data/CMU/CMU/13/13_10_poses.npz", # [6] reach for
]
bm_path = "data/fairmotion/amass_test_data/smplh/male/model.npz"
motion_data = amass.load_parallel(motion_files, bm_path=bm_path)
### TRANSITIVE ###
# all motions must have same fps for this implementation, so use first motion to set global
fps = motion_data[0].fps
# Standing pose that must be converted (amass -> habitat joint positions)
standing_pose = motion_data[0].poses[0]
# Walk-to-walk cycle
walk_to_walk = MotionData(
motion_ops.cut(motion_data[0], 300, 430), MType.TRANSITIVE
)
# Run-to-run cycle
run_to_run = MotionData(motion_ops.cut(motion_data[1], 3, 89), MType.TRANSITIVE)
### SCENIC ###
drink_beverage = MotionData(
motion_data[2], MType.SCENIC, transitive_motion_=walk_to_walk
)
wash_window = MotionData(
motion_ops.cut(motion_data[3], 3040, 4090),
MType.SCENIC,
transitive_motion_=walk_to_walk,
)
sweep_floor = MotionData(
motion_data[4], MType.SCENIC, transitive_motion_=walk_to_walk
)
jumping_jacks = MotionData(
motion_data[5],
MType.SCENIC,
transitive_motion_=run_to_run,
scenic_direction_offset=90,
)
reach_for = MotionData(
motion_data[6],
MType.SCENIC,
transitive_motion_=walk_to_walk,
scenic_direction_offset=-80,
)
@dataclass
class PathData:
"""
The PathData class is purposed to instantiate with given path points, and used
to manage the path data fro the current path following sequence.
"""
def __init__(self, path_points) -> None:
self.points = path_points
self.length = self.calc_path_length(path_points)
self.time = 0.0
# [REFACTOR] I would like to change this to a static method, once I figure out how
def calc_path_length(self, path_points: List[mn.Vector3]) -> float:
# get path length
i, j, summ = 0, 0, 0.0
while i < len(path_points):
summ += (mn.Vector3(path_points[i] - path_points[j])).length()
j = i
i += 1
return summ
def __str__(self) -> str:
return f"PathData(points={type(self.points)}, length={self.length}, time={self.time})"
@dataclass
class ActionOrder:
"""
The ActionOrder class holds the data necessary to command the pathfollower character
to perform a scenic motion at a specified location.
"""
def __init__(
self,
motion_data: Motions.MotionData,
location: Optional[Union[mn.Vector3, np.array]] = None,
facing: Optional[Union[mn.Vector3, np.array]] = None,
) -> None:
self.motion_data = motion_data
self.location = None
if location is not None:
self.location = mn.Vector3(location)
self.facing = None
if facing is not None:
self.facing = mn.Vector3(facing)
# Transitive motions must have a location
if motion_data.type == MType.TRANSITIVE:
assert location is not None
# keeps track of the motion key frame preview modes
class Preview(Enum):
OFF = 0
KEYFRAMES = 1
TRAJECTORY = 2
ALL = 3
# keeps track of the activity that intances model is participating in currently
class Activity(Enum):
NONE = 0
MOTION_STAGE = 1
PATH_FOLLOW_SEQ = 2
SEQUENCE = 3
class Timer:
"""
Timer class used to keep track of time.
"""
start_time = 0.0
@staticmethod
def start() -> None:
"""
Starts timer and resets previous frame time to the start time
"""
Timer.start_time = time.time()
@staticmethod
def check() -> float:
"""
Returns time since last call to `start()`. Only accurate if `start()`
has been called previously.
"""
return time.time() - Timer.start_time
|
{"hexsha": "e9ac508a47940cfc90069782327d8453e0335f2b", "size": 12500, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/fairmotion_interface_utils.py", "max_stars_repo_name": "narekvslife/habitat-sim", "max_stars_repo_head_hexsha": "69ae4848503d5dcc74d6b5920957c1a641ef0a9b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-05T22:25:02.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-05T22:25:02.000Z", "max_issues_repo_path": "examples/fairmotion_interface_utils.py", "max_issues_repo_name": "narekvslife/habitat-sim", "max_issues_repo_head_hexsha": "69ae4848503d5dcc74d6b5920957c1a641ef0a9b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/fairmotion_interface_utils.py", "max_forks_repo_name": "narekvslife/habitat-sim", "max_forks_repo_head_hexsha": "69ae4848503d5dcc74d6b5920957c1a641ef0a9b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.443148688, "max_line_length": 103, "alphanum_fraction": 0.56456, "include": true, "reason": "import numpy", "num_tokens": 2783}
|
program crystal2d nk
!c use ieee_arithmetic
! implicit double precision (a-h,o-z) dp
!***********************************************************************
! *
! *
! *
! *
! copyright through North Carolina State University
! by M.A. Zikry (1994),all rights reserved
! *
!***********************************************************************
! *
! p l e a s e n o t e *
! *
! *
! recipients of crystal2d are asked not to distribute *
! their source to other facilities.
! *
! Permission must be obtained for use, contact M.A. Zikry *
! *
! *
! *
!***********************************************************************
! *
! l e g a l n o t i ! e *
! *
! *
! copyright and distribution held by North Carolina State University
! this work has been generated by M.A. Zikry and his students
!
!
! This is an implicit two dimensional quasi-static and dynamic code
! specialized for large crystalline deformation, it is tailored for
! inelastic deformation and failure.
!***********************************************************************
! The work for this code is based on the finite -strain high strain-rate
! work developed by M.A. Zikry from papers published in 1990-1994
! that work has been extended and modified in this code
! by M. Kao as part of his doctoral research
! M.A. Zikry and M. Kao do not hold any legal responsibilty
! for use of this code. All questions and need for documentation can be obtained
! from this research group.
!
! This code has been developed based on a constitutive formulation that accounts
! for mobile and immobile dislocation densities coupled to a multiple-slip
! crytalline framework. These formulations are given in modules matslip.
! Details are given in papers by M. A. Zikry and M. Kao
!
!
!
! This is the main program
! this code has been developed for the Sun but there are versions for the Cray
!
!
!
! [ P A R A M E T E R S]
! ......................
use CN_Objects_manager
use EC_Objects_manager
!!!!!!!!!!!! integer, parameter :: nume = 40000
!!!!!!!!!!!! integer, parameter :: nss = 24
real*8 hed vax750
!!!!!! common/WMLBC/BCflag
common/hourglass/fhg(40000,8),fhghis(40000,8)
common/hourglass2/hgsstore(40000),hgshis(40000)
common/hgenergy/hgenerstore(40000),hgenerhis(40000)
common/totalenergy/totenerstore(40000),totenerhis(40000),
1inertener(40000)
common/hgstress/hgstress1store(40000),hgstress2store(40000),
1 hgstress1his(40000),hgstress2his(40000)
common/irdmp1/lendr,lenhr,irt,trt,ityprs
common/bk00/ioff(96)
common/bk01/h4(4,5),p14(4,5),p24(4,5)
common/bk02/ioofc,iphase,imass,lpar(9)
common/bk03/numdc,imassn,idampn,irller,penstf
common/bk04/nprm(8)
common/bk05/ifil,iadd,maxsiz,head(12)
common/bk06/nprnt,mprint,itmpop,numelt,jprint,idump,locstr
common/bk07/mbfc,nelpg,hed(12)
common/bk08/kprint,nstep,ite,ilimit,newstf
common/bk09/maxref,rhsn,rhsvn,cvtl,iteref,ectl,tolls
common/bk10/npb,nodep(2,8)
common/bk11/cnwmk(2),iequit,iprint,isref
common/bk12/ntlen
common/bk14/lfna(15),lfnt(6)
common/bk15/cpuio(36),cpuip(36)
common/bk16/maxint,hgc
common/bk17/dn1,dn2,nwebuf,ntime,numnp,neq,ibar,mthsol
common/bk18/nummat,ityp2d,ako(31)
common/bk20/ntotal
common/bk23/itemp,itherm,irtin
character*4 mess
common/bk25/mess
common/bk26/dt,a0,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10
common/bk27/nlcur,nptst,nthpy,nthpz,nthps,xmy,xmz,xms,nload,nptm
common/bk30/numlp,numpc,h22(2,2),pl2(2,2),h33(3,2),pl3(3,2)
common/bk32/nsref,nequit,time,timep,lprint,nprint
common/bk33/irfreq,krfreq,iress
common/bk49/bulkmx,ncon(30)
logical rezone
common/rezone/rezone,nrzn,nctr,irzcnt,nrezon
common/slar3/nsl,nsntl,nmntl,nslnmx,sltol,slhrd
common/riksw1/sp1,ds0,cs01,cs02,rrsp,linsch,igso,irco,idamp
common/riksw2/rlnew,alfa0,dsx,iteopt,idctrl,riksf,numspu,mthunl
common/automt/dtmin,dtmax,mxback,termtm
common/fissn0/maxneq,mwspac,ntpe0,ntpe1,nfissl(3)
common/fissn1/melemt,nnns,ntpe2,n2g,llls
common/fissn2/nwpblk,numblk,mwsusd,mxnepb,maxch,matpr,mench,ifa(2)
common/fissn3/ifissl,kfissl(3)
common/total/itrlas,irflas,irhlas,itrtot,irftot,irhtot
common/xcom0/imeth,interq,imess
common/cn0/iconv,lpb,nrcc,icon,iband,idirw,ftlst
common/cn1/numati,numnpi,numeli,nblk1,nslidi,ntslvi,ntmsri,
1 nnpbi,nepbi,ncnpi
common/cn2/nlcuri,nlcmxi,ncnldi,npresi,ndbci,nbfri,nbfzi,
1 nbfai,ncnmai,ncndpi,initci
common/cn3/ibbari,intgi,nmbfi,ithopi,ithcri,ithini,iengri,
1 ijinti
common/cn4/itypei,ianali,ibwmni,icorei,ipcori,isolti,gamai,
1 betai,raydmi
common/cn5/delti,nstepi,ioprti,ioplti,irstri,irezi,mthsli,
1 ditoli,entoli,nstrfi,nstiti,nitrfi,nrfsti
common/cn6/nunldi,mthsui,idcndi,idcdri,iarcni,iadmpi,arcszi
common/cn7/tollsi,tolsli,rftani,tolrzi,igeomi
common/taux1/itopaz,ithadd
common/newz1/ibase,iadd5,iadd6,icnt6,locst6
common/effort/number
common/fmeml/fl
character*8 names
character*80 lnkarg
common/filen/names(25)
common/array/maxa,maxadd,ifield
common/args/lnkarg,numargs
common/double/iprec,ncpw,unit
!!!!!!!!!!!!!! common/WMLthermal/thermalflag,thermalconstraint(40000),
!!!!!!!!!!!!!! 1 Tinit(40000),Rqold(40000)
!!!!!!!!!!!!!! common/WMLthermalSolve/Rqolder(40000),Tinitdolder(40000)
! 1 ,dummy(40000,1)
! common/ /b(1) cray1
common /main_block/ b(400000000)
character*8 nameh,namef,namei,namest
!
! define a 'heap' for the file buffers (minimum length = sum of
! buffer lengths + 2*(number of buffers) + 2).
! note: 'hloc=heap' must be included in the input to ldr.
!
common/heap/ bufrs(444000)
common/stack/ bozos(222000)
common/excute/execut(125)
common/vrsn/ vn,cdate,tx1(10),tx2(10)
! version number and compile date
!
*****************
common /wblock1/ iplotDirxy, x_area, yield_stress
common/wblock2/ g_source, g_immob, g_minter, g_recov, b_v,
1 b_vvec(87),nmo,nim
!!! common /wblock3/ density_ms, density_ims,etain(1000),ecin(1000)
common /wblock11/ pd_counter
common /wblock20/ mat_type(nume)
integer pd_counter(nume)!!!!!!!!!!!!, BCflag
character*8 vn,cdate
character*72 tx1,tx2
call open_files
!
pd_counter = 0
!
!.... set precision level
iprec = 1
! iprec = 2 dp
!.... set characters per word
ncpw = 4
! ncpw=8 unics
!.... define precision of constants
unit=1.0
!
call rdarg wkstn
call linky(names)
!!!!!!!! call enablc unix
!
imeth=0
call getnam(lfnt(3),namei)
call getnam(lfnt(4),nameh)
call getnam(lfna(1),namef)
write(*,*)' in crystal2d.f ',namei
write(*,*)' in crystal2d.f ',nameh
write(*,*)' in crystal2d.f ',namef
if ((namef.eq.'rstxyz'.and.nameh.eq.'newfle').or.
& (namei.eq.'convert'))then
!.... input phase
! call ovrlay('crystal2d',1,0,'recall')
call ovrlay(1,0)
!.... initialization phase
! call ovrlay('crystal2d',2,0,'recall')
call ovrlay(2,0)
!.... stress initialization
call getnam(lfna(11),namest)
if(namest.ne.'strxyz') call strset
endif
!.... solution phase
write(*,*) 'calling input_data'
call input_data
write(*,*) '---------after calling input_data'
close (7777)
if(imeth.eq.0.and.nameh.eq.'newfle') then
call solven
endif
!---- add by ismail
call ThermalLoadCleanMemory()
call dtimestepsCleanMemory()
call DiffCoeffTableCleanMemory()
!----------call the CN manager to clean the memory ismail2016-02-17
Call CNCleanMem()
CALL GBCleanMemory()
CALL EC_CleanMem()
end
|
{"hexsha": "18620ff7fea2f7fa5e94ed46ef990c2d7e141d2a", "size": 9640, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/crystal2d.f", "max_stars_repo_name": "imohame/LabCode", "max_stars_repo_head_hexsha": "b7fca6e58f6c26917ff4a8862ab473da282d027d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/crystal2d.f", "max_issues_repo_name": "imohame/LabCode", "max_issues_repo_head_hexsha": "b7fca6e58f6c26917ff4a8862ab473da282d027d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/crystal2d.f", "max_forks_repo_name": "imohame/LabCode", "max_forks_repo_head_hexsha": "b7fca6e58f6c26917ff4a8862ab473da282d027d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-04-19T08:21:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-18T02:43:24.000Z", "avg_line_length": 40.3347280335, "max_line_length": 84, "alphanum_fraction": 0.52593361, "num_tokens": 2740}
|
"""Cross-validated training and prediction."""
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, ClassifierMixin, clone
class CVModel(BaseEstimator, ClassifierMixin):
def __init__(self, base_estimator=None):
self.base_estimator = base_estimator
def fit(self, X_train, y_train, fold_num, train_overall=True, **kwargs):
"""Fits a cross-validated model - a model for each left-out fold plus one overall model.
X_train: the training predictors
y_train: the training outcome
fold_num: the indicator of which fold each row belongs to"""
self.model_dict = {}
self.fold_set = np.unique(fold_num)
self.num_unique_y_values = len(np.unique(y_train))
self.num_features = X_train.shape[1]
## If a DataFrame is given (rather than just an array) then make a note of the column names.
## This way we can match up column names when we use predict_proba.
if type(X_train) == pd.DataFrame:
self.fit_columns = np.array(X_train.columns)
else:
self.fit_columns = None
## Make copies of the estimator for each of the fold models
for fold in self.fold_set:
self.model_dict[fold] = clone(self.base_estimator)
## Train the separate models, each one leaving out a particular fold in training
for fold in self.fold_set:
print("Leave out fold {} and train on the rest".format(fold))
X_tr = X_train[fold_num != fold]
y_tr = y_train[fold_num != fold]
self.model_dict[fold].fit(X_tr, y_tr, **kwargs)
## Train the overall model on all of the data
if train_overall:
print("Train the overall model".format(fold))
self.model_dict['overall_model'] = clone(self.base_estimator)
self.model_dict['overall_model'].fit(X_train, y_train, **kwargs)
return self
def predict_proba(self, X_test, fold_num=None, **kwargs):
"""Predict probabilities in cross-validated fashion.
X_test: the data to predict on
fold_num: the indicator of which fold a row belongs to / which model variant to use.
If fold_num is not specified, it will default to use the overall_model
"""
## If we have column names and X_test is a DataFrame, then subset X_test to those columns
## in the correct order, and error if those columns are not present
if self.fit_columns is not None:
if type(X_test) == pd.DataFrame:
X_test = X_test.loc[:, self.fit_columns]
if fold_num is None:
#print("no folds specified, using overall_model")
if 'overall_model' not in self.model_dict.keys():
#print("Error: overall_model not trained and fold_num not specified")
return None
else:
results = self.model_dict['overall_model'].predict_proba(X_test, **kwargs)
return results
else:
results = np.zeros((X_test.shape[0], self.num_unique_y_values))
fold_set = np.unique(fold_num)
for fold in fold_set:
X_te = X_test[fold_num == fold]
fold_results = self.model_dict[fold].predict_proba(X_te, **kwargs)
results[fold_num==fold] = fold_results
return results
def predict(self, X_test, fold_num=None, **kwargs):
"""Predict final values in cross-validated fashion.
X_test: the data to predict on
fold_num: the indicator of which fold a row belongs to / which model variant to use.
If fold_num is not specified, it will default to use the overall_model
"""
## If we have column names and X_test is a DataFrame, then subset X_test to those columns
## in the correct order, and error if those columns are not present
if self.fit_columns is not None:
if type(X_test) == pd.DataFrame:
X_test = X_test.loc[:, self.fit_columns]
if fold_num is None:
#print("no folds specified, using overall_model")
if 'overall_model' not in self.model_dict.keys():
print("Error: overall_model not trained and fold_num not specified")
return None
else:
results = self.model_dict['overall_model']
return results
else:
results = np.zeros(X_test.shape[0])
fold_set = np.unique(fold_num)
for fold in fold_set:
X_te = X_test[fold_num == fold]
fold_results = self.model_dict[fold].predict(X_te, **kwargs)
results[fold_num==fold] = fold_results
return results
def grid_search(self, X, y, fold_ind, param_grid, score_fn, verbose=True):
param_arg_list = _get_param_settings_from_grid(param_grid)
num_settings = len(param_arg_list)
print("Size of grid to search = {} different settings".format(num_settings))
param_list_scores = np.zeros(num_settings)
old_self = clone(self.base_estimator)
for i in range(num_settings):
print("Fitting setting {} of {}".format(i+1,num_settings))
curr_param_dict = param_arg_list[i]
if verbose:
print(curr_param_dict)
self.base_estimator.set_params(**curr_param_dict)
self.fit(X, y, fold_ind, train_overall=False)
curr_preds = self.predict_proba(X, fold_ind)
if type(score_fn) == list:
for j, fn in enumerate(score_fn):
curr_score= fn(y, curr_preds)
param_arg_list[i]['score_'+str(j)] = curr_score
if verbose:
print(curr_param_dict,'score function '+str(j)+':',curr_score)
else:
curr_score= score_fn(y, curr_preds)
param_arg_list[i]['score'] = curr_score
if verbose:
print(curr_param_dict,'score function '+':',curr_score)
param_list_scores[i]=curr_score
self.base_estimator = old_self
return param_arg_list
def _get_param_settings_from_grid(param_grid):
num_settings = np.prod([len(i) for i in param_grid.values()])
pg_tuple = tuple(param_grid.items())
param_names = [k[0] for k in pg_tuple]
param_lists = [k[1] for k in pg_tuple]
param_list_lengths = [len(k) for k in param_lists]
param_dict_list = []
for i in range(num_settings):
indices = _int_to_indices(i,param_list_lengths)
curr_param_dict = {}
for k in range(len(param_names)):
curr_param_dict[param_names[k]]=param_lists[k][indices[k]]
param_dict_list.append(curr_param_dict)
return param_dict_list
def _int_to_indices(j,lengths):
out_list = []
for i in range(len(lengths)):
curr_ind = j % lengths[i]
out_list.append(curr_ind)
j = j//lengths[i]
return(out_list)
|
{"hexsha": "b4c3207a5e0c3591a483e6408b4aed5f8ab28c39", "size": 7080, "ext": "py", "lang": "Python", "max_stars_repo_path": "ml_insights/CVModel.py", "max_stars_repo_name": "JustinKurland/introspective", "max_stars_repo_head_hexsha": "3626c5a176c70fb6d09071307949032b5ff4f0e5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 126, "max_stars_repo_stars_event_min_datetime": "2016-11-11T22:41:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-14T07:42:48.000Z", "max_issues_repo_path": "ml_insights/CVModel.py", "max_issues_repo_name": "JustinKurland/introspective", "max_issues_repo_head_hexsha": "3626c5a176c70fb6d09071307949032b5ff4f0e5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2016-10-28T21:43:24.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-27T14:46:04.000Z", "max_forks_repo_path": "ml_insights/CVModel.py", "max_forks_repo_name": "JustinKurland/introspective", "max_forks_repo_head_hexsha": "3626c5a176c70fb6d09071307949032b5ff4f0e5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 66, "max_forks_repo_forks_event_min_datetime": "2016-11-12T23:25:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-13T19:22:48.000Z", "avg_line_length": 43.1707317073, "max_line_length": 102, "alphanum_fraction": 0.618079096, "include": true, "reason": "import numpy", "num_tokens": 1525}
|
"""
Script for performing a fit to a histogramm of recorded
time differences for the use with QNet
"""
import scipy.optimize as optimize
import numpy
import pylab
import sys
#import optimalbins
def main(bincontent=None,binning = (0,10,21), fitrange = None):
def decay(p,x):
return p[0]*numpy.exp(-x/p[1])+p[2]
def error(p,x,y):
return decay(p,x)-y
if bincontent is None:
nbins = 10
xmin = 1.0
xmax = 20.0
times = [float(l) for l in open(sys.argv[1]).readlines() if xmin<float(l)<xmax]
print len(times),"decay times"
#nbins = optimalbins.optbinsize(times,1,80)
#print nbins, 'Optimalbins selects nbins'
#nbins = optimalbins.optbinsize(times,1,30)
print "Nbins:",nbins
bin_edges = numpy.linspace(binning[0],binning[1],binning[2])
bin_centers = bin_edges[:-1] + 0.5*(bin_edges[1]-bin_edges[0])
hist,edges = numpy.histogram(times,bin_edges)
#hist=hist[:-1]
p0 = numpy.array([200,2.0,5])
output = optimize.leastsq(error,p0,args=(bin_centers,hist),full_output=1)
p = output[0]
covar = output[1]
print "Fit parameters:",p
print "Covariance matrix:",covar
chisquare=0.
deviations=error(p,bin_centers,hist)
for i,d in enumerate(deviations):
chisquare += d*d/decay(p,bin_centers[i])
params = {'legend.fontsize': 13}
pylab.rcParams.update(params)
fitx=numpy.linspace(xmin,xmax,100)
pylab.plot(bin_centers,hist,"b^",fitx,decay(p,fitx),"b-")
pylab.ylim(0,max(hist)+100)
pylab.xlabel("Decay time in microseconds")
pylab.ylabel("Events in time bin")
# pylab.legend(("Data","Fit: (%4.2f +- %4.2f) microsec,chisq/ndf=%4.2f"%(p[1],numpy.sqrt(covar[1][1]),chisquare/(nbins-len(p)))))
pylab.legend(("Data","Fit: (%4.2f) microsec,chisq/ndf=%4.2f"%(p[1]),chisquare/(nbins-len(p))))
pylab.grid()
pylab.savefig("fit.png")
else:
# this is then used for the mudecaywindow
# in muonic
# we have to adjust the bins
# to the values of the used histogram
if len(bincontent) == 0:
print 'WARNING: Empty bins.'
return None
bins = numpy.linspace(binning[0],binning[1],binning[2])
bin_centers = bins[:-1] + 0.5*(bins[1]-bins[0])
if fitrange is not None:
if fitrange[0] < binning[0]:
fitrange = (binning[0], fitrange[1])
if fitrange[1] > binning[1]:
fitrange = (fitrange[0],binning[1])
bin_mask = [(bin_centers <= fitrange[1]) & (bin_centers >= fitrange[0])]
bin_centers_ = numpy.asarray([x for x in bin_centers if (x <= fitrange[1] and x >= fitrange[0])])
if len(bin_centers_) < 3:
print 'WARNING: fit range too small. Skipping fitting. Try with larger fit range.'
return None
else:
bin_centers = bin_centers_
bincontent = bincontent[bin_mask]
# we cut the leading edge of the distribution away for the fit
glob_max = max(bincontent)
cut = 0
for i in enumerate(bincontent):
if i[1] == glob_max:
cut = i[0]
cut_bincontent = bincontent[cut:]
cut_bincenter = bin_centers[cut]
cut_bincenters = bin_centers[cut:]
# maybe something for the future..
#nbins = optimalbins.optbinsize(cut_bincontent,1,20)
#fit_bins = n.linspace(cut_bincenter,20,nbins)
#fit_bin_centers = fit_bins[:-1] + 0.5*(fit_bins[1]-fit_bins[0])
#fit_bincontent = n.zeros(len(fit_bin_centers))
## the bincontent must be redistributed to fit_bincontent
#for binindex_fit in xrange(len(fit_bincontent)):
# for binindex,content in enumerate(bincontent):
# if bin_centers[binindex] <= fit_bin_centers[binindex_fit]:
# fit_bincontent[binindex_fit] += content
p0 = numpy.array([200,2.0,5])
#output = optimize.leastsq(error,p0,args=(fit_bin_centers,fitbincontent),full_output=1)
output = optimize.leastsq(error,p0,args=(cut_bincenters,cut_bincontent),full_output=1)
p = output[0]
covar = output[1]
print "Fit parameters:",p
print "Covariance matrix:",covar
chisquare=0.
deviations=error(p,cut_bincenters,cut_bincontent)
for i,d in enumerate(deviations):
chisquare += d*d/decay(p,cut_bincenters[i])
params = {'legend.fontsize': 13}
pylab.rcParams.update(params)
#nbins = 84
nbins = len(bins)
xmin = cut_bincenters[0]
xmax = cut_bincenters[-1]
fitx=numpy.linspace(xmin,xmax,100)
#return (bin_centers,bincontent,fitx,decay,p,covar,chisquare,nbins)
return (cut_bincenters,cut_bincontent,fitx,decay,p,covar,chisquare,nbins)
def gaussian_fit(bincontent,binning = (0,2,10), fitrange = None):
def gauss(p,x):
return p[0]*(1/((p[1]*numpy.sqrt(2*numpy.pi))))*numpy.exp(-0.5*(((x - p[2])/p[1])**2))
def error(p,x,y):
return gauss(p,x)-y
if len(bincontent) == 0:
print 'WARNING: Empty bins.'
return None
# this is then used for the mudecaywindow
# in muonic
# we have to adjust the bins
# to the values of the used histogram
bins = numpy.linspace(binning[0],binning[1],binning[2])
bin_centers = bins[:-1] + 0.5*(bins[1]-bins[0])
if fitrange is not None:
if fitrange[0] < binning[0]:
fitrange = (binning[0], fitrange[1])
if fitrange[1] > binning[1]:
fitrange = (fitrange[0],binning[1])
bin_mask = [(bin_centers <= fitrange[1]) & (bin_centers >= fitrange[0])]
bin_centers_ = numpy.asarray([x for x in bin_centers if (x <= fitrange[1] and x >= fitrange[0])])
if len(bin_centers_) < 3:
print 'WARNING: fit range too small. Skipping fitting. Try with larger fit range.'
return None
else:
bin_centers = bin_centers_
bincontent = bincontent[bin_mask]
# we cut the leading edge of the distribution away for the fit
#glob_max = max(bincontent)
#cut = 0
#for i in enumerate(bincontent):
# if i[1] == glob_max:
# cut = i[0]
cut_bincontent = bincontent#[cut:]
cut_bincenter = bin_centers#[cut]
cut_bincenters = bin_centers#[cut:]
# maybe something for the future..
#nbins = optimalbins.optbinsize(cut_bincontent,1,20)
#fit_bins = n.linspace(cut_bincenter,20,nbins)
#fit_bin_centers = fit_bins[:-1] + 0.5*(fit_bins[1]-fit_bins[0])
#fit_bincontent = n.zeros(len(fit_bin_centers))
## the bincontent must be redistributed to fit_bincontent
#for binindex_fit in xrange(len(fit_bincontent)):
# for binindex,content in enumerate(bincontent):
# if bin_centers[binindex] <= fit_bin_centers[binindex_fit]:
# fit_bincontent[binindex_fit] += content
# p0 = numpy.array([20,1.0,5])
wsum = cut_bincontent.sum()
mean = (cut_bincontent * cut_bincenters ).sum() / wsum
meansquared = (cut_bincontent * cut_bincenters**2 ).sum() /wsum
var = meansquared - mean**2
p0 = numpy.array([max(cut_bincontent),var,mean])
#output = optimize.leastsq(error,p0,args=(fit_bin_centers,fitbincontent),full_output=1)
output = optimize.leastsq(error,p0,args=(cut_bincenters,cut_bincontent),full_output=1)
p = output[0]
covar = output[1]
print "Fit parameters:",p
print "Covariance matrix:",covar
chisquare=0.
deviations=error(p,cut_bincenters,cut_bincontent)
for i,d in enumerate(deviations):
chisquare += d*d/gauss(p,cut_bincenters[i])
params = {'legend.fontsize': 13}
pylab.rcParams.update(params)
#nbins = 84
nbins = len(bins)
xmin = cut_bincenters[0]
xmax = cut_bincenters[-1]
fitx=numpy.linspace(xmin,xmax,100)
#return (bin_centers,bincontent,fitx,decay,p,covar,chisquare,nbins)
return (cut_bincenters,cut_bincontent,fitx,gauss,p,covar,chisquare,nbins)
if __name__ == '__main__':
main()
|
{"hexsha": "5767384faba457308d0537d25d0cef4345e3b74e", "size": 8464, "ext": "py", "lang": "Python", "max_stars_repo_path": "muonic/analysis/fit.py", "max_stars_repo_name": "LambdaDigamma/muonic", "max_stars_repo_head_hexsha": "cc242582168101f1ab444ffdc915f8a007078bc4", "max_stars_repo_licenses": ["Naumen", "Condor-1.1", "MS-PL"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2015-05-12T14:11:08.000Z", "max_stars_repo_stars_event_max_datetime": "2015-08-09T17:57:59.000Z", "max_issues_repo_path": "muonic/analysis/fit.py", "max_issues_repo_name": "LambdaDigamma/muonic", "max_issues_repo_head_hexsha": "cc242582168101f1ab444ffdc915f8a007078bc4", "max_issues_repo_licenses": ["Naumen", "Condor-1.1", "MS-PL"], "max_issues_count": 60, "max_issues_repo_issues_event_min_datetime": "2015-05-12T14:07:25.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-08T10:17:39.000Z", "max_forks_repo_path": "muonic/analysis/fit.py", "max_forks_repo_name": "LambdaDigamma/muonic", "max_forks_repo_head_hexsha": "cc242582168101f1ab444ffdc915f8a007078bc4", "max_forks_repo_licenses": ["Naumen", "Condor-1.1", "MS-PL"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2015-06-10T08:34:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-22T21:41:08.000Z", "avg_line_length": 33.7211155378, "max_line_length": 137, "alphanum_fraction": 0.5972353497, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2361}
|
import kbmod
import numpy
import re
import pdb
# layered image functions
def science_to_numpy(self, copy_data=False):
if copy_data == None:
copy_data = False
return numpy.array( self.get_science(), copy=copy_data )
def mask_to_numpy(self, copy_data=False):
if copy_data == None:
copy_data = False
return numpy.array( self.get_mask(), copy=copy_data )
def variance_to_numpy(self, copy_data=False):
if copy_data == None:
copy_data = False
return numpy.array( self.get_variance(), copy=copy_data )
def pool_science(self, copy_data=False):
if copy_data == None:
copy_data = False
return numpy.array( self.get_science_pooled(), copy=copy_data )
def pool_variance(self, copy_data=False):
if copy_data == None:
copy_data = False
return numpy.array( self.get_variance_pooled(), copy=copy_data )
kbmod.layered_image.science = science_to_numpy
kbmod.layered_image.mask = mask_to_numpy
kbmod.layered_image.variance = variance_to_numpy
kbmod.layered_image.pool_science = pool_science
kbmod.layered_image.pool_variance = pool_variance
# stack functions
def master_mask_to_numpy(self, copy_data=False):
if copy_data == None:
copy_data = False
return numpy.array( self.get_master_mask(), copy=copy_data )
def sciences_to_numpy(self, copy_data=False):
if copy_data == None:
copy_data = False
return [ numpy.array( img, copy=copy_data ) for img in self.get_sciences()]
def masks_to_numpy(self, copy_data=False):
if copy_data == None:
copy_data = False
return [ numpy.array( img, copy=copy_data ) for img in self.get_masks()]
def variances_to_numpy(self, copy_data=False):
if copy_data == None:
copy_data = False
return [ numpy.array( img, copy=copy_data ) for img in self.get_variances()]
kbmod.image_stack.master_mask = master_mask_to_numpy
kbmod.image_stack.sciences = sciences_to_numpy
kbmod.image_stack.masks = masks_to_numpy
kbmod.image_stack.variances = variances_to_numpy
# search functions
def psi_images_to_numpy(self, copy_data=False):
if copy_data == None:
copy_data = False
return [ numpy.array( img, copy=copy_data ) for img in self.get_psi_images()]
def phi_images_to_numpy(self, copy_data=False):
if copy_data == None:
copy_data = False
return [numpy.array( img, copy=copy_data ) for img in self.get_phi_images()]
def lightcurve(self, t):
psi = numpy.array(self.psi_curves(t))
phi = numpy.array(self.phi_curves(t))
return (psi,phi)
kbmod.stack_search.get_psi = psi_images_to_numpy
kbmod.stack_search.get_phi = phi_images_to_numpy
kbmod.stack_search.lightcurve = lightcurve
# trajectory utilities
def compare_trajectory(a, b, v_thresh, pix_thresh):
# compare flux too?
if (b.obs_count == 0 and
abs(a.x-b.x)<=pix_thresh and
abs(a.y-b.y)<=pix_thresh and
abs(a.x_v-b.x_v)<v_thresh and
abs(a.y_v-b.y_v)<v_thresh):
b.obs_count += 1
return True
else:
return False
def match_trajectories(results_list, test_list, v_thresh, pix_thresh):
matches = []
unmatched = []
for r in results_list:
if any(compare_trajectory(r, test, v_thresh, pix_thresh)
for test in test_list):
matches.append(r)
for t in test_list:
if (t.obs_count == 0):
unmatched.append(t)
t.obs_count = 0
return matches, unmatched
def score_results(results, test, v_thresh, pix_thresh):
score = 0
for t in range(len(test)):
for i in range(len(results)):
if (compare_trajectory(results[i], test[t], v_thresh, pix_thresh)):
score += i
test[t].obs_count = 0
break
if (i==len(results)-1):
score += i
return score/len(test)
def save_trajectories(t_list, path):
if (len(t_list) == 0):
return
if (type(t_list[0]) == kbmod.traj_region):
t_list = region_to_grid(t_list)
with open(path, 'w+') as f:
for t in t_list:
f.write(str(t)+'\n')
def load_trajectories(path):
t_list = []
with open(path, 'r') as f:
for line in f.readlines():
nums = re.findall(r"[-+]?\d*\.\d+|\d+", line)
t = kbmod.trajectory()
t.lh = float(nums[0])
t.flux = float(nums[1])
t.x = int(float(nums[2]))
t.y = int(float(nums[3]))
t.x_v = float(nums[4])
t.y_v = float(nums[5])
t.obs_count = int(float(nums[6]))
t_list.append(t)
return t_list
def grid_to_region(t_list, duration):
r_list = []
for t in t_list:
r = kbmod.traj_region()
r.ix = t.x
r.iy = t.y
r.fx = t.x+t.x_v*duration
r.fy = t.y+t.y_v*duration
r.depth = 0
r.obs_count = t.obs_count
r.likelihood = t.lh
r.flux = t.flux
r_list.append(r)
return r_list
def region_to_grid(r_list, duration):
t_list = []
for r in r_list:
t = kbmod.trajectory()
t.x = int(r.ix)
t.y = int(r.iy)
t.x_v = (r.fx-r.ix)/duration
t.y_v = (r.fy-r.iy)/duration
t.lh = r.likelihood
t.flux = r.flux
t.obs_count = r.obs_count
t_list.append(t)
return t_list
kbmod.save_trajectories = save_trajectories
kbmod.load_trajectories = load_trajectories
kbmod.grid_to_region = grid_to_region
kbmod.region_to_grid = region_to_grid
kbmod.match_trajectories = match_trajectories
kbmod.score_results = score_results
# constants
kbmod.__version__ = "0.3.4"
kbmod.pool_max = 1
kbmod.pool_min = 0
kbmod.no_data = -9999.0
|
{"hexsha": "c313b1632d20734ccc854cd626d695878629d1f7", "size": 5629, "ext": "py", "lang": "Python", "max_stars_repo_path": "search/pybinds/kbmodpy.py", "max_stars_repo_name": "fraserw/kbmod", "max_stars_repo_head_hexsha": "65d69746d1dd8de867f8da147d73c09439d28b41", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2018-07-23T11:39:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-27T17:15:42.000Z", "max_issues_repo_path": "search/pybinds/kbmodpy.py", "max_issues_repo_name": "fraserw/kbmod", "max_issues_repo_head_hexsha": "65d69746d1dd8de867f8da147d73c09439d28b41", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 42, "max_issues_repo_issues_event_min_datetime": "2017-06-19T22:55:41.000Z", "max_issues_repo_issues_event_max_datetime": "2018-03-15T02:49:39.000Z", "max_forks_repo_path": "search/pybinds/kbmodpy.py", "max_forks_repo_name": "fraserw/kbmod", "max_forks_repo_head_hexsha": "65d69746d1dd8de867f8da147d73c09439d28b41", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2018-07-23T11:39:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-27T18:43:02.000Z", "avg_line_length": 29.4712041885, "max_line_length": 80, "alphanum_fraction": 0.6502042992, "include": true, "reason": "import numpy", "num_tokens": 1494}
|
import itertools
import numpy as np
from arqtic.program import Program, Gate
from arqtic.exceptions import Error
import scipy
from sklearn.linear_model import Lasso, LinearRegression
import qiskit as qk
from qiskit.aqua.operators.primitive_ops import MatrixOp
from qiskit import Aer, execute
import scipy.linalg as la
#define Pauli matrics
X = np.array([[0.0,1.0],[1.0,0.0]])
Y = np.array([[0,-1.0j],[1.0j,0.0]])
Z = np.array([[1.0,0.0],[0.0,-1.0]])
I = np.eye(2)
gate_matrix_dict = {
"X": X,
"Y": Y,
"Z": Z,
"I": I,
}
#Pauli I = 0
#Pauli X = 1
#Pauli Y = 2
#Pauli Z = 3
#PauliMultTable1q_ij gives the Pauli (w/o coeff) given by Pauli_i*Pauli_j
#PauliMultCoeffTable1q_ij gives this coefficient
#The 1q denotes that the Paulis along the row/columns are given by 1-qubit Paulis (i.e. I,X,Y,Z)
PauliMultTable1q = np.array([[0, 1, 2, 3],
[1, 0, 3, 2],
[2, 3, 0, 1],
[3, 2, 1, 0]])
PauliMultCoeffTable1q = np.array([[1, 1, 1, 1],
[1, 1, 1j, -1j],
[1, -1j, 1, 1j],
[1, 1j, -1j, 1]])
#When we move to Pauli operators that act on 2qubits (i.e. II,IX,IY,IZ,XI,...,ZZ) we need new tables
PauliMultTable2q=np.zeros((16,16))
for i in range(16):
for j in range(16):
PauliMultTable2q[i,j]=int(4*PauliMultTable1q[int(np.floor(i/4)),int(np.floor(j/4))]+PauliMultTable1q[i%4,j%4])
PauliMultCoeffTable2q=np.array(np.zeros((16,16)), dtype=complex)
for i in range(16):
for j in range(16):
PauliMultCoeffTable2q[i,j]=PauliMultCoeffTable1q[int(np.floor(i/4)),int(np.floor(j/4))]*PauliMultCoeffTable1q[i%4,j%4]
PauliMultTable3q=np.zeros((64,64))
for i in range(64):
for j in range(64):
PauliMultTable3q[i,j]=int(16*PauliMultTable2q[int(np.floor(i/16)),int(np.floor(j/16))]+PauliMultTable2q[i%16,j%16])
PauliMultCoeffTable3q=np.array(np.zeros((64,64)), dtype=complex)
for i in range(64):
for j in range(64):
PauliMultCoeffTable3q[i,j]=PauliMultCoeffTable2q[int(np.floor(i/16)),int(np.floor(j/16))]*PauliMultCoeffTable2q[i%16,j%16]
def make_Pauli_basis(domain_size):
pauli_basis_ops = []
for s in itertools.product(['I', 'X', 'Y', 'Z'], repeat=domain_size):
op = [1]
for d in range(domain_size):
op = np.kron(op,gate_matrix_dict[s[d]])
pauli_basis_ops.append(op)
return pauli_basis_ops
def pauli_basis_names(domain_size):
pauli_basis_names = []
for s in itertools.product(['I', 'X', 'Y', 'Z'], repeat=domain_size):
pauli_basis_names.append(s)
return pauli_basis_names
def get_PauliBasis_hamTFIM(Jz, mu_x, nspins, domain, pbc=False):
#check that domain=1 only when nspins=1
if (domain == 1):
if (nspins > 1):
raise Error('Domain can only be set to 1 with nspins=1')
#check that domain <= nspins
if (domain > nspins):
raise Error('Domain is larger than the number of spins')
#check that domain < 4
if (domain > 3):
raise Error('At this time, domain is limited to 3')
#define active qubit sets based on domain and nspins
active_qubit_sets = []
nsets = nspins-domain+1
if (pbc ==True): nsets+=1
for i in range(nsets):
qset = []
for j in range(domain):
qset.append((i+j)%nspins)
active_qubit_sets.append(qset)
#build up TFIM hamiltonian in Pauli basis
H = []
#create a ham_term for each set of active qubits
for i in range(nsets):
hterm = []
hterm.append(active_qubit_sets[i])
hm_array = [0]*4**domain
if (domain == 1):
#add transverse field term "X" to the one active qubit set
hm_array[1] = mu_x
if (domain == 2):
#add exchange correlation term "ZZ" to all active qubit sets
hm_array[15] = Jz
#add transverse field term "XI" to all active qubit sets
hm_array[4] = mu_x
#add transverse field term "IX" to last active qubit set if PBC is false
if (pbc == False):
if (i == nsets-1):
hm_array[1] = mu_x
if (domain == 3):
#add exchange correlation term "ZZI" to all active qubit sets
hm_array[60] = Jz
#add exchange correlation term "IZZ" to last active qubit set
if (i == nsets-1):
hm_array[15] = Jz
#add transverse field term "XII" to all active qubit sets
hm_array[16] = mu_x
#add transverse field term "IXI" to last active qubit set
if (i == nsets-1):
hm_array[4] = mu_x
#add transverse field term "IIX" to last active qubit set if PBC is false
if (pbc == False):
hm_array[1] = mu_x
hterm.append(hm_array)
H.append(hterm)
return H
def get_PauliBasis_Heisenberg_Ham(sim_obj, domain, pbc=False):
nspins = sim_obj.num_spins
#check that domain=1 only when nspins=1
if (domain == 1):
if (nspins > 1):
raise Error('Domain can only be set to 1 with nspins=1')
#check that domain <= nspins
if (domain > nspins):
raise Error('Domain is larger than the number of spins')
#check that domain < 4
if (domain > 3):
raise Error('At this time, domain is limited to 3')
#define active qubit sets based on domain and nspins
active_qubit_sets = []
nsets = nspins-domain+1
if (pbc ==True): nsets+=1
for i in range(nsets):
qset = []
for j in range(domain):
qset.append((i+j)%nspins)
active_qubit_sets.append(qset)
#build up TFIM hamiltonian in Pauli basis
H = []
#create a ham_term for each set of active qubits
for i in range(nsets):
hterm = []
hterm.append(active_qubit_sets[i])
hm_array = [0]*4**domain
if (domain == 1):
if len(sim_obj.hx) > 0:
#add transverse field term "X" to the one active qubit set
hm_array[1] = sim_obj.hx[0]
if len(sim_obj.hy) > 0:
#add transverse field term "Y" to the one active qubit set
hm_array[2] = sim_obj.hy[0]
if len(sim_obj.hz) > 0:
#add transverse field term "Z" to the one active qubit set
hm_array[3] = sim_obj.hz[0]
if (domain == 2):
if len(sim_obj.Jx) > 0:
#add exchange interaction term "XX" to all active qubit sets
hm_array[5] = sim_obj.Jx[0]
if len(sim_obj.Jy) > 0:
#add exchange interaction term "YY" to all active qubit sets
hm_array[10] = sim_obj.Jy[0]
if len(sim_obj.Jz) > 0:
#add exchange interaction term "ZZ" to all active qubit sets
hm_array[15] = sim_obj.Jz[0]
if len(sim_obj.hx) > 0:
#add transverse field term "XI" to all active qubit sets
hm_array[4] = sim_obj.hx[0]
#add transverse field term "IX" to last active qubit set if PBC is false
if (pbc == False):
if (i == nsets-1):
hm_array[1] = sim_obj.hx[0]
if len(sim_obj.hy) > 0:
#add transverse field term "YI" to all active qubit sets
hm_array[8] = sim_obj.hy[0]
#add transverse field term "IY" to last active qubit set if PBC is false
if (pbc == False):
if (i == nsets-1):
hm_array[2] = sim_obj.hy[0]
if len(sim_obj.hz) > 0:
#add transverse field term "ZI" to all active qubit sets
hm_array[12] = sim_obj.hz[0]
#add transverse field term "IZ" to last active qubit set if PBC is false
if (pbc == False):
if (i == nsets-1):
hm_array[3] = sim_obj.hz[0]
if (domain == 3):
if len(sim_obj.Jx) > 0:
#add exchange interaction term "XXI" to all active qubit sets
hm_array[20] = sim_obj.Jx[0]
#add exchange interaction term "IXX" to last active qubit set
if (i == nsets-1):
hm_array[5] = sim_obj.Jx[0]
if len(sim_obj.Jy) > 0:
#add exchange interaction term "YYI" to all active qubit sets
hm_array[40] = sim_obj.Jy[0]
#add exchange interaction term "IYY" to last active qubit set
if (i == nsets-1):
hm_array[10] = sim_obj.Jy[0]
if len(sim_obj.Jz) > 0:
#add exchange interaction term "ZZI" to all active qubit sets
hm_array[60] = sim_obj.Jz[0]
#add exchange interaction term "IZZ" to last active qubit set
if (i == nsets-1):
hm_array[15] = sim_obj.Jz[0]
if len(sim_obj.hx) > 0:
#add transverse field term "XII" to all active qubit sets
hm_array[16] = sim_obj.hx[0]
#add transverse field term "IXI" to last active qubit set
if (i == nsets-1):
hm_array[4] = sim_obj.hx[0]
#add transverse field term "IIX" to last active qubit set if PBC is false
if (pbc == False):
hm_array[1] = sim_obj.hx[0]
if len(sim_obj.hy) > 0:
#add transverse field term "YII" to all active qubit sets
hm_array[32] = sim_obj.hy[0]
#add transverse field term "IYI" to last active qubit set
if (i == nsets-1):
hm_array[8] = sim_obj.hy[0]
#add transverse field term "IIY" to last active qubit set if PBC is false
if (pbc == False):
hm_array[2] = sim_obj.hy[0]
if len(sim_obj.hz) > 0:
#add transverse field term "ZII" to all active qubit sets
hm_array[48] = sim_obj.hz[0]
#add transverse field term "IZI" to last active qubit set
if (i == nsets-1):
hm_array[12] = sim_obj.hz[0]
#add transverse field term "IIZ" to last active qubit set if PBC is false
if (pbc == False):
hm_array[3] = sim_obj.hz[0]
hterm.append(hm_array)
H.append(hterm)
return H
def get_exepctation_values_th(psi, Pauli_basis, active_qubits, nspins):
exp_values = []
for i in range(len(Pauli_basis)):
#enable pauli_basis operator to act on entire qubit system
full_op = [1]
pauli_op_not_applied = True
for k in range(nspins):
if (k in active_qubits):
if(pauli_op_not_applied):
full_op = np.kron(full_op, Pauli_basis[i])
pauli_op_not_applied = False
else:
full_op = np.kron(full_op,np.eye(2))
#get expectation value of full pauli operator in state psi
exp_values.append(np.real(np.dot(np.transpose(np.conj(psi)),np.dot(full_op,psi))))
return exp_values
def get_energy_from_exps(exp_vals, ham):
energy = 0
for i in range(len(exp_vals)):
energy += exp_vals[i]*ham[i]
return energy
def compute_norm(expectation_values, dbeta, ham, domain):
norm = 0
Pm_coeffs = -dbeta*ham
Pm_coeffs[0] += 1
for i, j in itertools.product(range(len(ham)), repeat=2):
if (domain == 1):
norm += np.conj(Pm_coeffs[i])*Pm_coeffs[j]*PauliMultCoeffTable1q[i,j]*expectation_values[int(PauliMultTable1q[i,j])]
if (domain == 2):
norm += np.conj(Pm_coeffs[i])*Pm_coeffs[j]*PauliMultCoeffTable2q[i,j]*expectation_values[int(PauliMultTable2q[i,j])]
if (domain == 3):
norm += np.conj(Pm_coeffs[i])*Pm_coeffs[j]*PauliMultCoeffTable3q[i,j]*expectation_values[int(PauliMultTable3q[i,j])]
return np.sqrt(norm)
def compute_Smatrix(expectation_values, ham, domain):
dim = len(expectation_values)
S=np.zeros((dim,dim))
for i, j in itertools.product(range(len(ham)), repeat=2):
if (domain == 1):
S[i,j] = 2*np.real(PauliMultCoeffTable1q[i,j]*expectation_values[int(PauliMultTable1q[i,j])])
if (domain == 2):
S[i,j] = 2*np.real(PauliMultCoeffTable2q[i,j]*expectation_values[int(PauliMultTable2q[i,j])])
if (domain == 3):
S[i,j] = 2*np.real(PauliMultCoeffTable3q[i,j]*expectation_values[int(PauliMultTable3q[i,j])])
return S
def compute_bvec(expectation_values, dbeta, ham, norm, domain):
dim = len(expectation_values)
b = np.zeros(dim)
Pm = -dbeta*ham
Pm[0] += 1
for i, j in itertools.product(range(len(ham)), repeat=2):
if (domain == 1):
b[i]+=2*np.imag((np.conj(Pm[j])/norm)*PauliMultCoeffTable1q[j,i]*expectation_values[int(PauliMultTable1q[i,j])])
if (domain == 2):
b[i]+=2*np.imag((np.conj(Pm[j])/norm)*PauliMultCoeffTable2q[j,i]*expectation_values[int(PauliMultTable2q[i,j])])
if (domain == 3):
b[i]+=2*np.imag((np.conj(Pm[j])/norm)*PauliMultCoeffTable3q[j,i]*expectation_values[int(PauliMultTable3q[i,j])])
return b
def qite_step(psi, pauli_basis, active_qubits, nspins, dbeta, ham, domain,regularizer):
#get expectation values of Pauli basis operators for state psi
exp_values = get_exepctation_values_th(psi, pauli_basis, active_qubits, nspins)
#get energy from exp_values
energy = get_energy_from_exps(exp_values, ham)
#compute S matrix
S_mat = compute_Smatrix(exp_values, ham, domain)
#compute norm of sum of Pauli basis ops on psi
norm = compute_norm(exp_values, dbeta, ham, domain)
#compute b-vector
b_vec = compute_bvec(exp_values, dbeta, ham, norm, domain)
#solve linear equation for x
#dalpha = np.eye(len(pauli_basis))*regularizer
#x = np.linalg.lstsq(S_mat + dalpha, -b_vec, rcond=-1)[0]
#x = np.linalg.lstsq(S_mat,-b_vec,rcond=-1)[0]
#clf = Lasso(alpha=regularizer)
reg = LinearRegression()
reg.fit(S_mat, b_vec)
x = reg.coef_
return x, energy
def get_new_psi(psi0, A_ops, pauli_basis, nspins, domain):
psi = psi0
for i in range(len(A_ops)):
active_qubits = A_ops[i][0]
op = np.zeros((2**domain,2**domain), dtype=complex)
for j in range(len(pauli_basis)):
op += A_ops[i][1][j]*pauli_basis[j]
#exponentiate op
exp_op = scipy.linalg.expm(1j*op)
#exp_op just acts on active qubits so convert to op that acts on whole system
exp_op_full = [1]
exp_op_not_applied = True
for k in range(nspins):
if (k in active_qubits):
if(exp_op_not_applied):
exp_op_full = np.kron(exp_op_full, exp_op)
exp_op_not_applied = False
else:
exp_op_full = np.kron(exp_op_full,np.eye(2))
psi = np.dot(exp_op_full, psi)
psi = psi/np.linalg.norm(psi)
return psi
def get_state_from_string(string):
psi = [1]
#rev_str = str(string)[::-1]
for q in string:
if (q == '0'):
psi = np.kron(psi,np.array([1,0]))
elif (q == '1'):
psi = np.kron(psi,np.array([0,1]))
else:
print("bad value for initial psi")
return psi
def Aop_to_Terms(A, domain):
nqubits = len(A[0])
nops = len(A[1])
names = pauli_basis_names(domain)
terms = []
for i in range(nops):
coeff = A[1][i]
if (abs(coeff) > 1e-12):
paulis = []
for j in range(domain):
if (names[i][j] != "I"):
paulis.append(prog.Pauli(names[i][j],A[0][j]))
term = prog.Term(paulis, coeff)
if (len(paulis) > 0):
terms.append(term)
return terms
def Aop_to_matrix(A, domain):
unitary_mat = []
qubits = A[0]
unitary_mat.append(qubits)
coeffs = A[1]
names = pauli_basis_names(domain)
total_mat = np.zeros((2**domain, 2**domain), dtype=complex)
for i in range(len(coeffs)):
coeff = coeffs[i]
pauli_mat = [1]
for j in range(domain):
pauli = gate_matrix_dict[names[i][j]]
pauli_mat = np.kron(pauli_mat, pauli)
pauli_mat *= coeff
total_mat += pauli_mat
unitary_mat.append(total_mat)
return unitary_mat
def make_QITE_program(sim_obj, regularizer=0.1):
beta = sim_obj.beta
dbeta = sim_obj.delta_beta
domain = sim_obj.domain
backend = sim_obj.backend
nbeta = int(beta/dbeta)
psi = get_state_from_string(sim_obj.initial_spins)
nspins = sim_obj.num_spins
#get Pauli basis
pauli_basis = make_Pauli_basis(domain)
#get hamiltonian in Pauli basis
H = get_PauliBasis_Heisenberg_Ham(sim_obj, domain, pbc=False)
#creat array of operators to be exponentiated for QITE
A_ops = []
energies = []
qite_prog = Program(nspins)
#add initial state preparation to program
spin_idx = 0
for spin in sim_obj.initial_spins:
if (spin == '1'):
qite_prog.add_gate(Gate([spin_idx], name='X'))
spin_idx += 1
#loop over nbeta steps
for ib in range(nbeta):
total_eng = 0
for hterm in H:
#get the list of qubits this term acts on
active_qubits = hterm[0]
A_ops.append([])
A_ops[-1].append(active_qubits)
#get the array of coeffs for Pauli basis ops that act on these qubits
ham = np.asarray(hterm[1])
#get coeffs for qite circuit
x, energy = qite_step(psi, pauli_basis, active_qubits, nspins, dbeta, ham, domain,regularizer)
total_eng += energy
op_coeffs = []
for i in range(len(x)):
if (np.abs(x[i]) > 1e-12):
op_coeffs.append(x[i])
else: op_coeffs.append(0.0)
x = op_coeffs
A_ops[-1].append(x)
Xmat=np.complex128(np.zeros([2**domain,2**domain]))
for p in range(4**domain):
Xmat+=np.complex128(x[p])*pauli_basis[p]
exp_op = scipy.linalg.expm(-1j*Xmat)
exp_op_full = [1]
exp_op_not_applied = True
for k in range(nspins):
if (k in active_qubits):
if(exp_op_not_applied):
exp_op_full = np.kron(exp_op_full, exp_op)
exp_op_not_applied = False
else:
exp_op_full = np.kron(exp_op_full,np.eye(2))
psi = np.dot(exp_op_full, psi)
#psi = psi/np.linalg.norm(psi)
#add unitary to qite program
#qite_prog.add_gate(Gate(active_qubits, unitary=exp_op_full))
qite_prog.add_gate(Gate(active_qubits, unitary=exp_op))
energies.append(total_eng)
final_eng = 0
for hterm in H:
active_qubits = hterm[0]
ham = np.asarray(hterm[1])
exp_vals = get_exepctation_values_th(psi, pauli_basis, active_qubits, nspins)
final_eng += get_energy_from_exps(exp_vals, ham)
energies.append(final_eng)
return qite_prog, energies
|
{"hexsha": "0a1bcc9a98536623f6039d6bb90ec3c040427cfc", "size": 19547, "ext": "py", "lang": "Python", "max_stars_repo_path": "arqtic/qite.py", "max_stars_repo_name": "lebassman/ArQTiC", "max_stars_repo_head_hexsha": "23d9877dfb0a8c5c10da2865abaf9ca830298199", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-06-10T01:23:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-13T06:29:45.000Z", "max_issues_repo_path": "arqtic/qite.py", "max_issues_repo_name": "lebassman/ArQTiC", "max_issues_repo_head_hexsha": "23d9877dfb0a8c5c10da2865abaf9ca830298199", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "arqtic/qite.py", "max_forks_repo_name": "lebassman/ArQTiC", "max_forks_repo_head_hexsha": "23d9877dfb0a8c5c10da2865abaf9ca830298199", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.2510040161, "max_line_length": 130, "alphanum_fraction": 0.5700107433, "include": true, "reason": "import numpy,import scipy", "num_tokens": 5581}
|
import numpy as np
def calculate_exploration_prob(loss_history, act_explor_prob,threshold):
mean = np.mean(loss_history)
variance = 0
for i in loss_history:
variance += np.square(i-mean)
if threshold >= variance:
act_explor_prob += 0.05
else:
act_explor_prob -= 0.05
if act_explor_prob < 0:
return 0
elif act_explor_prob > 1:
return 1
else:
return act_explor_prob
|
{"hexsha": "a654046012f0a0be8b5fe2ef5b6e639fae49832b", "size": 450, "ext": "py", "lang": "Python", "max_stars_repo_path": "Zadanie3/srcV2/Misc.py", "max_stars_repo_name": "letv3/NSIETE", "max_stars_repo_head_hexsha": "3e65c66ddb14cf11b4757bdc8a70d5438785dec2", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Zadanie3/srcV2/Misc.py", "max_issues_repo_name": "letv3/NSIETE", "max_issues_repo_head_hexsha": "3e65c66ddb14cf11b4757bdc8a70d5438785dec2", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Zadanie3/srcV2/Misc.py", "max_forks_repo_name": "letv3/NSIETE", "max_forks_repo_head_hexsha": "3e65c66ddb14cf11b4757bdc8a70d5438785dec2", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.5652173913, "max_line_length": 72, "alphanum_fraction": 0.6355555556, "include": true, "reason": "import numpy", "num_tokens": 127}
|
[STATEMENT]
theorem cptn_iff_cptn_mod: "(c \<in> cptn) = (c \<in> cptn_mod)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (c \<in> cptn) = (c \<in> cptn_mod)
[PROOF STEP]
apply(rule iffI)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. c \<in> cptn \<Longrightarrow> c \<in> cptn_mod
2. c \<in> cptn_mod \<Longrightarrow> c \<in> cptn
[PROOF STEP]
apply(erule cptn_onlyif_cptn_mod)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. c \<in> cptn_mod \<Longrightarrow> c \<in> cptn
[PROOF STEP]
apply(erule cptn_if_cptn_mod)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 293, "file": null, "length": 4}
|
import numpy as np
for train_set in ['train_yyn', 'train_ynn']:
all_res = []
for nlayer in [1, 2, 3]:
for eunits in [50, 60, 70, 80, 90]:
average_list = []
for random_seed in [1, 3, 5, 7, 9]:
exp_dir = 'exp/%s_pytorch_train_delta_%d_%d_%d' % (train_set,
nlayer, eunits, random_seed)
result = np.loadtxt(exp_dir + '/result.sum')
result = np.expand_dims(result, axis=0)
average_list.append(result)
aver = np.concatenate(average_list, axis=0)
aver = np.mean(aver, axis=0, keepdims=True)
all_res.append(aver)
all_res = np.concatenate(all_res, axis=0)
np.savetxt('exp/%s_pytorch_train_delta.txt' %
(train_set), all_res, fmt='%.2f')
|
{"hexsha": "001a14568ca601c274e0a46bb8544a5308d3bce9", "size": 858, "ext": "py", "lang": "Python", "max_stars_repo_path": "egs/modified_yesno/asr_mfcc_char_ns/local/average_random_seed.py", "max_stars_repo_name": "ZhaoZeyu1995/espnet", "max_stars_repo_head_hexsha": "350e97da275e35a9495da8ee2f53d19f93f50241", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "egs/modified_yesno/asr_mfcc_char_ns/local/average_random_seed.py", "max_issues_repo_name": "ZhaoZeyu1995/espnet", "max_issues_repo_head_hexsha": "350e97da275e35a9495da8ee2f53d19f93f50241", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "egs/modified_yesno/asr_mfcc_char_ns/local/average_random_seed.py", "max_forks_repo_name": "ZhaoZeyu1995/espnet", "max_forks_repo_head_hexsha": "350e97da275e35a9495da8ee2f53d19f93f50241", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.9, "max_line_length": 95, "alphanum_fraction": 0.520979021, "include": true, "reason": "import numpy", "num_tokens": 224}
|
# Importing Required Libraries
from scipy.spatial import distance as dist
from imutils import perspective
from imutils import contours
import numpy as np
import argparse
import imutils
import cv2
import matplotlib.pyplot as plt
# helper function to find midpoint between two points
def midpoint(ptA, ptB):
return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)
# function to find dimensions from a 2D image
def process_image(imagepath, width):
#read image using opencv
image = cv2.imread(imagepath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
# Edge Detection using Canny() of opencv
edged = cv2.Canny(gray, 10, 100)
edged = cv2.dilate(edged, None, iterations=3)
edged = cv2.erode(edged, None, iterations=1)
# finding all contours from the image
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
#sorting contours from left to right
(cnts, _) = contours.sort_contours(cnts)
pixelsPerMetric = None # metric for measuring objects
resA = 0 # area of resultant object
# looping over the contours
for c in cnts:
#ignoring small contours, coz they can be noise
if cv2.contourArea(c) < 1000:
continue
# compute the rotated bounding box of the contour
box = cv2.minAreaRect(c)
box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
box = np.array(box, dtype="int")
# order the points in the contour such that they appear in top-left, top-right, bottom-right, and bottom-left order
box = perspective.order_points(box)
# finding midpoints on all four sides of the rectangle
(tl, tr, br, bl) = box
(tltrX, tltrY) = midpoint(tl, tr)
(blbrX, blbrY) = midpoint(bl, br)
(tlblX, tlblY) = midpoint(tl, bl)
(trbrX, trbrY) = midpoint(tr, br)
# compute the Euclidean distance between the midpoints
dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
# initialising metric with ref object's width
if pixelsPerMetric is None:
pixelsPerMetric = dB / width
# compute the size of the object
dimA = dA / pixelsPerMetric
dimB = dB / pixelsPerMetric
# finding the largest object in the image
# assuming luggage is biggest in the image
if (dimA*dimB > resA):
resA = dimA*dimB
resDim = (dimA,dimB)
return resDim
#main function to get all dimensions of any object
def find_dimensions(image1, image2, width1, width2):
# declaring resultant variables
res1, res2, res3 = 0, 0, 0
# getting dimensions from each image
dim1, dim2 = process_image(image1, width1)
dim3, dim4 = process_image(image2, width2)
# rounding dimensions till second decimal place
dim1, dim2, dim3, dim4 = round(dim1,2), round(dim2,2), round(dim3,2), round(dim4,2)
# finding overlapping dimension and eliminating it
# threshold 0.25cm (can be changed)
if(abs(dim1-dim3) > 0.25):
res1 = dim1; res2=dim2; res3=dim3
else:
res1 = dim1; res2=dim2; res3=dim4
return (res1,res2,res3)
if __name__ == '__main__':
d = find_dimensions('speaker1.jpeg', 'speaker2.jpeg', 7.2, 7.2)
print(d)
|
{"hexsha": "4d47db0f7e9b964e3996e9259c7351a1f78e95af", "size": 3562, "ext": "py", "lang": "Python", "max_stars_repo_path": "Baggage Fitment Index/final.py", "max_stars_repo_name": "sahilrider/Machine-Vision-Challenge", "max_stars_repo_head_hexsha": "d930a1dad0de05643a1245c3c243fb03a268ba4b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-10-18T06:46:18.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-01T19:17:21.000Z", "max_issues_repo_path": "Baggage Fitment Index/final.py", "max_issues_repo_name": "sahilrider/Machine-Vision-Challenge", "max_issues_repo_head_hexsha": "d930a1dad0de05643a1245c3c243fb03a268ba4b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Baggage Fitment Index/final.py", "max_forks_repo_name": "sahilrider/Machine-Vision-Challenge", "max_forks_repo_head_hexsha": "d930a1dad0de05643a1245c3c243fb03a268ba4b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-05T18:11:25.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-05T18:11:25.000Z", "avg_line_length": 33.9238095238, "max_line_length": 123, "alphanum_fraction": 0.6313868613, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1015}
|
[STATEMENT]
lemma parCases'[consumes 5, case_names cPar1 cPar2 cComm1 cComm2]:
fixes \<Psi> :: 'b
and P :: "('a, 'b, 'c) psi"
and Q :: "('a, 'b, 'c) psi"
and \<alpha> :: "'a action"
and T :: "('a, 'b, 'c) psi"
and C :: "'d::fs_name"
assumes Trans: "\<Psi> \<rhd> P \<parallel> Q \<longmapsto>x\<alpha> \<prec> xT"
and "bn x\<alpha> \<sharp>* \<Psi>"
and "bn x\<alpha> \<sharp>* P"
and "bn x\<alpha> \<sharp>* Q"
and "bn x\<alpha> \<sharp>* subject x\<alpha>"
and rPar1: "\<And>P' A\<^sub>Q \<Psi>\<^sub>Q. \<lbrakk>\<Psi> \<otimes> \<Psi>\<^sub>Q \<rhd> P \<longmapsto>x\<alpha> \<prec> P'; extractFrame Q = \<langle>A\<^sub>Q, \<Psi>\<^sub>Q\<rangle>; distinct A\<^sub>Q;
A\<^sub>Q \<sharp>* \<Psi>; A\<^sub>Q \<sharp>* P; A\<^sub>Q \<sharp>* Q; A\<^sub>Q \<sharp>* x\<alpha>; A\<^sub>Q \<sharp>* P'; A\<^sub>Q \<sharp>* C; xT = P' \<parallel> Q\<rbrakk> \<Longrightarrow> Prop"
and rPar2: "\<And>Q' A\<^sub>P \<Psi>\<^sub>P. \<lbrakk>\<Psi> \<otimes> \<Psi>\<^sub>P \<rhd> Q \<longmapsto>x\<alpha> \<prec> Q'; extractFrame P = \<langle>A\<^sub>P, \<Psi>\<^sub>P\<rangle>; distinct A\<^sub>P;
A\<^sub>P \<sharp>* \<Psi>; A\<^sub>P \<sharp>* P; A\<^sub>P \<sharp>* Q; A\<^sub>P \<sharp>* x\<alpha>; A\<^sub>P \<sharp>* Q'; A\<^sub>P \<sharp>* C; xT = P \<parallel> Q'\<rbrakk> \<Longrightarrow> Prop"
and rComm1: "\<And>\<Psi>\<^sub>Q M N P' A\<^sub>P \<Psi>\<^sub>P K xvec Q' A\<^sub>Q.
\<lbrakk>\<Psi> \<otimes> \<Psi>\<^sub>Q \<rhd> P \<longmapsto>M\<lparr>N\<rparr> \<prec> P'; extractFrame P = \<langle>A\<^sub>P, \<Psi>\<^sub>P\<rangle>; distinct A\<^sub>P;
\<Psi> \<otimes> \<Psi>\<^sub>P \<rhd> Q \<longmapsto>K\<lparr>\<nu>*xvec\<rparr>\<langle>N\<rangle> \<prec> Q'; extractFrame Q = \<langle>A\<^sub>Q, \<Psi>\<^sub>Q\<rangle>; distinct A\<^sub>Q;
\<Psi> \<otimes> \<Psi>\<^sub>P \<otimes> \<Psi>\<^sub>Q \<turnstile> M \<leftrightarrow> K; distinct xvec;
A\<^sub>P \<sharp>* \<Psi>; A\<^sub>P \<sharp>* \<Psi>\<^sub>Q; A\<^sub>P \<sharp>* P; A\<^sub>P \<sharp>* M; A\<^sub>P \<sharp>* N; A\<^sub>P \<sharp>* P'; A\<^sub>P \<sharp>* Q; A\<^sub>P \<sharp>* xvec; A\<^sub>P \<sharp>* Q'; A\<^sub>P \<sharp>* A\<^sub>Q; A\<^sub>P \<sharp>* C;
A\<^sub>Q \<sharp>* \<Psi>; A\<^sub>Q \<sharp>* \<Psi>\<^sub>P; A\<^sub>Q \<sharp>* P; A\<^sub>Q \<sharp>* K; A\<^sub>Q \<sharp>* N; A\<^sub>Q \<sharp>* P'; A\<^sub>Q \<sharp>* Q; A\<^sub>Q \<sharp>* xvec; A\<^sub>Q \<sharp>* Q'; A\<^sub>Q \<sharp>* C;
xvec \<sharp>* \<Psi>; xvec \<sharp>* \<Psi>\<^sub>P; xvec \<sharp>* P; xvec \<sharp>* M; xvec \<sharp>* K; xvec \<sharp>* Q; xvec \<sharp>* \<Psi>\<^sub>Q; xvec \<sharp>* C; x\<alpha>=\<tau>; xT = \<lparr>\<nu>*xvec\<rparr>(P' \<parallel> Q')\<rbrakk> \<Longrightarrow> Prop"
and rComm2: "\<And>\<Psi>\<^sub>Q M xvec N P' A\<^sub>P \<Psi>\<^sub>P K Q' A\<^sub>Q.
\<lbrakk>\<Psi> \<otimes> \<Psi>\<^sub>Q \<rhd> P \<longmapsto>M\<lparr>\<nu>*xvec\<rparr>\<langle>N\<rangle> \<prec> P'; extractFrame P = \<langle>A\<^sub>P, \<Psi>\<^sub>P\<rangle>; distinct A\<^sub>P;
\<Psi> \<otimes> \<Psi>\<^sub>P \<rhd> Q \<longmapsto>K\<lparr>N\<rparr> \<prec> Q'; extractFrame Q = \<langle>A\<^sub>Q, \<Psi>\<^sub>Q\<rangle>; distinct A\<^sub>Q;
\<Psi> \<otimes> \<Psi>\<^sub>P \<otimes> \<Psi>\<^sub>Q \<turnstile> M \<leftrightarrow> K; distinct xvec;
A\<^sub>P \<sharp>* \<Psi>; A\<^sub>P \<sharp>* \<Psi>\<^sub>Q; A\<^sub>P \<sharp>* P; A\<^sub>P \<sharp>* M; A\<^sub>P \<sharp>* N; A\<^sub>P \<sharp>* P'; A\<^sub>P \<sharp>* Q; A\<^sub>P \<sharp>* xvec; A\<^sub>P \<sharp>* Q'; A\<^sub>P \<sharp>* A\<^sub>Q; A\<^sub>P \<sharp>* C;
A\<^sub>Q \<sharp>* \<Psi>; A\<^sub>Q \<sharp>* \<Psi>\<^sub>P; A\<^sub>Q \<sharp>* P; A\<^sub>Q \<sharp>* K; A\<^sub>Q \<sharp>* N; A\<^sub>Q \<sharp>* P'; A\<^sub>Q \<sharp>* Q; A\<^sub>Q \<sharp>* xvec; A\<^sub>Q \<sharp>* Q'; A\<^sub>Q \<sharp>* C;
xvec \<sharp>* \<Psi>; xvec \<sharp>* \<Psi>\<^sub>P; xvec \<sharp>* P; xvec \<sharp>* M; xvec \<sharp>* K; xvec \<sharp>* Q; xvec \<sharp>* \<Psi>\<^sub>Q; xvec \<sharp>* C; x\<alpha>=\<tau>; xT = \<lparr>\<nu>*xvec\<rparr>(P' \<parallel> Q')\<rbrakk> \<Longrightarrow> Prop"
shows "Prop"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Prop
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Prop
[PROOF STEP]
from Trans
[PROOF STATE]
proof (chain)
picking this:
\<Psi> \<rhd> P \<parallel> Q \<longmapsto> x\<alpha> \<prec> xT
[PROOF STEP]
have "distinct(bn x\<alpha>)"
[PROOF STATE]
proof (prove)
using this:
\<Psi> \<rhd> P \<parallel> Q \<longmapsto> x\<alpha> \<prec> xT
goal (1 subgoal):
1. distinct (bn x\<alpha>)
[PROOF STEP]
by(auto dest: boundOutputDistinct)
[PROOF STATE]
proof (state)
this:
distinct (bn x\<alpha>)
goal (1 subgoal):
1. Prop
[PROOF STEP]
have "length(bn x\<alpha>) = residualLength(x\<alpha> \<prec> xT)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
goal (1 subgoal):
1. Prop
[PROOF STEP]
note Trans
[PROOF STATE]
proof (state)
this:
\<Psi> \<rhd> P \<parallel> Q \<longmapsto> x\<alpha> \<prec> xT
goal (1 subgoal):
1. Prop
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<Psi> \<rhd> P \<parallel> Q \<longmapsto> x\<alpha> \<prec> xT
goal (1 subgoal):
1. Prop
[PROOF STEP]
have "length [] = inputLength(P \<parallel> Q)" and "distinct []"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length [] = inputLength (P \<parallel> Q) &&& distinct []
[PROOF STEP]
by(auto simp add: inputLength_inputLength'_inputLength''.simps)
[PROOF STATE]
proof (state)
this:
length [] = inputLength (P \<parallel> Q)
distinct []
goal (1 subgoal):
1. Prop
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
length [] = inputLength (P \<parallel> Q)
distinct []
goal (1 subgoal):
1. Prop
[PROOF STEP]
note \<open>length(bn x\<alpha>) = residualLength(x\<alpha> \<prec> xT)\<close> \<open>distinct(bn x\<alpha>)\<close>
[PROOF STATE]
proof (state)
this:
length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
distinct (bn x\<alpha>)
goal (1 subgoal):
1. Prop
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
distinct (bn x\<alpha>)
goal (1 subgoal):
1. Prop
[PROOF STEP]
note \<open>length(bn x\<alpha>) = residualLength(x\<alpha> \<prec> xT)\<close> \<open>distinct(bn x\<alpha>)\<close>
[PROOF STATE]
proof (state)
this:
length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
distinct (bn x\<alpha>)
goal (1 subgoal):
1. Prop
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
distinct (bn x\<alpha>)
goal (1 subgoal):
1. Prop
[PROOF STEP]
note \<open>length(bn x\<alpha>) = residualLength(x\<alpha> \<prec> xT)\<close> \<open>distinct(bn x\<alpha>)\<close>
[PROOF STATE]
proof (state)
this:
length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
distinct (bn x\<alpha>)
goal (1 subgoal):
1. Prop
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
distinct (bn x\<alpha>)
goal (1 subgoal):
1. Prop
[PROOF STEP]
note \<open>length(bn x\<alpha>) = residualLength(x\<alpha> \<prec> xT)\<close> \<open>distinct(bn x\<alpha>)\<close>
[PROOF STATE]
proof (state)
this:
length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
distinct (bn x\<alpha>)
goal (1 subgoal):
1. Prop
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
distinct (bn x\<alpha>)
goal (1 subgoal):
1. Prop
[PROOF STEP]
obtain x::name where "x \<sharp> \<Psi>" and "x \<sharp> P" and "x \<sharp> Q" and "x \<sharp> x\<alpha>" and "x \<sharp> xT"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>x. \<lbrakk>x \<sharp> \<Psi>; x \<sharp> P; x \<sharp> Q; x \<sharp> x\<alpha>; x \<sharp> xT\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(generate_fresh "name") auto
[PROOF STATE]
proof (state)
this:
x \<sharp> \<Psi>
x \<sharp> P
x \<sharp> Q
x \<sharp> x\<alpha>
x \<sharp> xT
goal (1 subgoal):
1. Prop
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
\<Psi> \<rhd> P \<parallel> Q \<longmapsto> x\<alpha> \<prec> xT
length [] = inputLength (P \<parallel> Q)
distinct []
length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
distinct (bn x\<alpha>)
length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
distinct (bn x\<alpha>)
length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
distinct (bn x\<alpha>)
length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
distinct (bn x\<alpha>)
x \<sharp> \<Psi>
x \<sharp> P
x \<sharp> Q
x \<sharp> x\<alpha>
x \<sharp> xT
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<Psi> \<rhd> P \<parallel> Q \<longmapsto> x\<alpha> \<prec> xT
length [] = inputLength (P \<parallel> Q)
distinct []
length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
distinct (bn x\<alpha>)
length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
distinct (bn x\<alpha>)
length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
distinct (bn x\<alpha>)
length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
distinct (bn x\<alpha>)
x \<sharp> \<Psi>
x \<sharp> P
x \<sharp> Q
x \<sharp> x\<alpha>
x \<sharp> xT
goal (1 subgoal):
1. Prop
[PROOF STEP]
using \<open>bn x\<alpha> \<sharp>* \<Psi>\<close> \<open>bn x\<alpha> \<sharp>* P\<close> \<open>bn x\<alpha> \<sharp>* Q\<close> \<open>bn x\<alpha> \<sharp>* subject x\<alpha>\<close>
[PROOF STATE]
proof (prove)
using this:
\<Psi> \<rhd> P \<parallel> Q \<longmapsto> x\<alpha> \<prec> xT
length [] = inputLength (P \<parallel> Q)
distinct []
length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
distinct (bn x\<alpha>)
length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
distinct (bn x\<alpha>)
length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
distinct (bn x\<alpha>)
length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
distinct (bn x\<alpha>)
x \<sharp> \<Psi>
x \<sharp> P
x \<sharp> Q
x \<sharp> x\<alpha>
x \<sharp> xT
bn x\<alpha> \<sharp>* \<Psi>
bn x\<alpha> \<sharp>* P
bn x\<alpha> \<sharp>* Q
bn x\<alpha> \<sharp>* subject x\<alpha>
goal (1 subgoal):
1. Prop
[PROOF STEP]
using rPar1 rPar2 rComm1 rComm2
[PROOF STATE]
proof (prove)
using this:
\<Psi> \<rhd> P \<parallel> Q \<longmapsto> x\<alpha> \<prec> xT
length [] = inputLength (P \<parallel> Q)
distinct []
length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
distinct (bn x\<alpha>)
length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
distinct (bn x\<alpha>)
length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
distinct (bn x\<alpha>)
length (bn x\<alpha>) = residualLength (x\<alpha> \<prec> xT)
distinct (bn x\<alpha>)
x \<sharp> \<Psi>
x \<sharp> P
x \<sharp> Q
x \<sharp> x\<alpha>
x \<sharp> xT
bn x\<alpha> \<sharp>* \<Psi>
bn x\<alpha> \<sharp>* P
bn x\<alpha> \<sharp>* Q
bn x\<alpha> \<sharp>* subject x\<alpha>
\<lbrakk>\<Psi> \<otimes> ?\<Psi>\<^sub>Q \<rhd> P \<longmapsto> x\<alpha> \<prec> ?P'; extractFrame Q = \<langle>?A\<^sub>Q, ?\<Psi>\<^sub>Q\<rangle>; distinct ?A\<^sub>Q; ?A\<^sub>Q \<sharp>* \<Psi>; ?A\<^sub>Q \<sharp>* P; ?A\<^sub>Q \<sharp>* Q; ?A\<^sub>Q \<sharp>* x\<alpha>; ?A\<^sub>Q \<sharp>* ?P'; ?A\<^sub>Q \<sharp>* C; xT = ?P' \<parallel> Q\<rbrakk> \<Longrightarrow> Prop
\<lbrakk>\<Psi> \<otimes> ?\<Psi>\<^sub>P \<rhd> Q \<longmapsto> x\<alpha> \<prec> ?Q'; extractFrame P = \<langle>?A\<^sub>P, ?\<Psi>\<^sub>P\<rangle>; distinct ?A\<^sub>P; ?A\<^sub>P \<sharp>* \<Psi>; ?A\<^sub>P \<sharp>* P; ?A\<^sub>P \<sharp>* Q; ?A\<^sub>P \<sharp>* x\<alpha>; ?A\<^sub>P \<sharp>* ?Q'; ?A\<^sub>P \<sharp>* C; xT = P \<parallel> ?Q'\<rbrakk> \<Longrightarrow> Prop
\<lbrakk>\<Psi> \<otimes> ?\<Psi>\<^sub>Q \<rhd> P \<longmapsto> ?M\<lparr>?N\<rparr> \<prec> ?P'; extractFrame P = \<langle>?A\<^sub>P, ?\<Psi>\<^sub>P\<rangle>; distinct ?A\<^sub>P; \<Psi> \<otimes> ?\<Psi>\<^sub>P \<rhd> Q \<longmapsto> ?K\<lparr>\<nu>*?xvec\<rparr>\<langle>?N\<rangle> \<prec> ?Q'; extractFrame Q = \<langle>?A\<^sub>Q, ?\<Psi>\<^sub>Q\<rangle>; distinct ?A\<^sub>Q; \<Psi> \<otimes> ?\<Psi>\<^sub>P \<otimes> ?\<Psi>\<^sub>Q \<turnstile> ?M \<leftrightarrow> ?K; distinct ?xvec; ?A\<^sub>P \<sharp>* \<Psi>; ?A\<^sub>P \<sharp>* ?\<Psi>\<^sub>Q; ?A\<^sub>P \<sharp>* P; ?A\<^sub>P \<sharp>* ?M; ?A\<^sub>P \<sharp>* ?N; ?A\<^sub>P \<sharp>* ?P'; ?A\<^sub>P \<sharp>* Q; ?A\<^sub>P \<sharp>* ?xvec; ?A\<^sub>P \<sharp>* ?Q'; ?A\<^sub>P \<sharp>* ?A\<^sub>Q; ?A\<^sub>P \<sharp>* C; ?A\<^sub>Q \<sharp>* \<Psi>; ?A\<^sub>Q \<sharp>* ?\<Psi>\<^sub>P; ?A\<^sub>Q \<sharp>* P; ?A\<^sub>Q \<sharp>* ?K; ?A\<^sub>Q \<sharp>* ?N; ?A\<^sub>Q \<sharp>* ?P'; ?A\<^sub>Q \<sharp>* Q; ?A\<^sub>Q \<sharp>* ?xvec; ?A\<^sub>Q \<sharp>* ?Q'; ?A\<^sub>Q \<sharp>* C; ?xvec \<sharp>* \<Psi>; ?xvec \<sharp>* ?\<Psi>\<^sub>P; ?xvec \<sharp>* P; ?xvec \<sharp>* ?M; ?xvec \<sharp>* ?K; ?xvec \<sharp>* Q; ?xvec \<sharp>* ?\<Psi>\<^sub>Q; ?xvec \<sharp>* C; x\<alpha> = \<tau>; xT = \<lparr>\<nu>*?xvec\<rparr>?P' \<parallel> ?Q'\<rbrakk> \<Longrightarrow> Prop
\<lbrakk>\<Psi> \<otimes> ?\<Psi>\<^sub>Q \<rhd> P \<longmapsto> ?M\<lparr>\<nu>*?xvec\<rparr>\<langle>?N\<rangle> \<prec> ?P'; extractFrame P = \<langle>?A\<^sub>P, ?\<Psi>\<^sub>P\<rangle>; distinct ?A\<^sub>P; \<Psi> \<otimes> ?\<Psi>\<^sub>P \<rhd> Q \<longmapsto> ?K\<lparr>?N\<rparr> \<prec> ?Q'; extractFrame Q = \<langle>?A\<^sub>Q, ?\<Psi>\<^sub>Q\<rangle>; distinct ?A\<^sub>Q; \<Psi> \<otimes> ?\<Psi>\<^sub>P \<otimes> ?\<Psi>\<^sub>Q \<turnstile> ?M \<leftrightarrow> ?K; distinct ?xvec; ?A\<^sub>P \<sharp>* \<Psi>; ?A\<^sub>P \<sharp>* ?\<Psi>\<^sub>Q; ?A\<^sub>P \<sharp>* P; ?A\<^sub>P \<sharp>* ?M; ?A\<^sub>P \<sharp>* ?N; ?A\<^sub>P \<sharp>* ?P'; ?A\<^sub>P \<sharp>* Q; ?A\<^sub>P \<sharp>* ?xvec; ?A\<^sub>P \<sharp>* ?Q'; ?A\<^sub>P \<sharp>* ?A\<^sub>Q; ?A\<^sub>P \<sharp>* C; ?A\<^sub>Q \<sharp>* \<Psi>; ?A\<^sub>Q \<sharp>* ?\<Psi>\<^sub>P; ?A\<^sub>Q \<sharp>* P; ?A\<^sub>Q \<sharp>* ?K; ?A\<^sub>Q \<sharp>* ?N; ?A\<^sub>Q \<sharp>* ?P'; ?A\<^sub>Q \<sharp>* Q; ?A\<^sub>Q \<sharp>* ?xvec; ?A\<^sub>Q \<sharp>* ?Q'; ?A\<^sub>Q \<sharp>* C; ?xvec \<sharp>* \<Psi>; ?xvec \<sharp>* ?\<Psi>\<^sub>P; ?xvec \<sharp>* P; ?xvec \<sharp>* ?M; ?xvec \<sharp>* ?K; ?xvec \<sharp>* Q; ?xvec \<sharp>* ?\<Psi>\<^sub>Q; ?xvec \<sharp>* C; x\<alpha> = \<tau>; xT = \<lparr>\<nu>*?xvec\<rparr>?P' \<parallel> ?Q'\<rbrakk> \<Longrightarrow> Prop
goal (1 subgoal):
1. Prop
[PROOF STEP]
by(cases rule: semanticsCases[of _ _ _ _ _ _ _ _ _ C x x]) (auto simp add: psi.inject residualInject residualInject')
[PROOF STATE]
proof (state)
this:
Prop
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 6351, "file": "Psi_Calculi_Tau", "length": 27}
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from nn_meter.utils.utils import try_import_onnx
import networkx as nx
from .utils import get_tensor_shape
from .constants import SLICE_TYPE
from itertools import chain
import logging
class OnnxConverter:
def __init__(self, model):
onnx = try_import_onnx()
from onnx import shape_inference
inferred_model = shape_inference.infer_shapes(model)
self.graph = inferred_model.graph
self.tensors = {}
for tensor in chain(self.graph.input, self.graph.value_info, self.graph.output):
self.tensors[tensor.name] = {
"shape": get_tensor_shape(tensor),
"inputs": [],
"outputs": [],
}
for node in self.graph.node:
for input_name in node.input:
if input_name in self.tensors:
self.tensors[input_name]["outputs"].append(node)
for output_name in node.output:
if output_name in self.tensors:
self.tensors[output_name]["inputs"].append(node)
self.G = self.to_networkx()
def to_networkx(self):
G = nx.DiGraph()
sliced_tensors = set()
selected_slice = set()
for node in self.graph.node:
if node.op_type == SLICE_TYPE:
tensor = node.input[0]
if tensor in sliced_tensors:
continue
else:
sliced_tensors.add(tensor)
selected_slice.add(node.name)
G.add_node(node.name, **self.fetch_attrs(node))
for node in self.graph.node:
if node.op_type == SLICE_TYPE and node.name not in selected_slice:
continue
for input_name in node.input:
if input_name in self.tensors: # remove dummy ops
G.add_edge(input_name, node.name)
for output_name in node.output:
if output_name in self.tensors:
G.add_edge(node.name, output_name)
if node.op_type == SLICE_TYPE:
for tensor_name in self._get_sibling_slice_output_tensors(node):
G.add_edge(node.name, tensor_name)
return G
def fetch_attrs(self, node):
from onnx import AttributeProto
attrs = {}
input_tensors = []
for input_name in node.input:
if input_name in self.tensors:
input_tensors.append(self.tensors[input_name]["shape"])
output_tensors = []
for output_name in node.output:
if output_name in self.tensors:
output_tensors.append(self.tensors[output_name]["shape"])
if node.op_type == SLICE_TYPE:
for tensor_name in self._get_sibling_slice_output_tensors(node):
output_tensors.append(self.tensors[tensor_name]["shape"])
if (
len(input_tensors) == 0
or len(input_tensors[0]) <= 1
or len(output_tensors) == 0
or len(output_tensors[0]) <= 1
):
logging.warning(f"Empty shape information with {node.name}")
return attrs
attrs["attr"] = {}
attrs["type"] = node.op_type
attrs["input_shape"] = input_tensors
attrs["output_shape"] = output_tensors
for attr in node.attribute:
if attr.type == AttributeProto.FLOAT:
attrs["attr"][attr.name] = attr.f
elif attr.type == AttributeProto.INT:
attrs["attr"][attr.name] = attr.i
elif attr.type == AttributeProto.INTS:
attrs["attr"][attr.name] = list(attr.ints)
elif attr.type == AttributeProto.STRING:
attrs["attr"][attr.name] = str(attr.s)
else:
logging.warning(f"Unsupported attributes type: {attr.type}")
return attrs
def convert(self):
result = {}
for node in self.G.nodes:
node_attrs = self.G.nodes[node]
if node in self.tensors or not node_attrs:
continue
outbounds = []
inbounds = []
for succ in self.G.successors(node):
for succ_succ in self.G.successors(succ):
outbounds.append(succ_succ)
for pred in self.G.predecessors(node):
for pred_pred in self.G.predecessors(pred):
inbounds.append(pred_pred)
result[node] = {
"attr": node_attrs,
"outbounds": outbounds,
"inbounds": inbounds,
}
return result
def _get_sibling_slice_output_tensors(self, node):
output_tensors = []
for slice in self.tensors[node.input[0]]["outputs"]:
if slice.name != node.name and slice.op_type == SLICE_TYPE:
for output_name in slice.output:
if output_name in self.tensors:
output_tensors.append(output_name)
return output_tensors
|
{"hexsha": "d4d214f9065299fae8d0877722c3a467dd181c16", "size": 5134, "ext": "py", "lang": "Python", "max_stars_repo_path": "nn_meter/ir_converters/onnx_converter/converter.py", "max_stars_repo_name": "kaleid-liner/nn-Meter", "max_stars_repo_head_hexsha": "526f8ddeeb33816a0b2b7f97964683e0510cd85d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nn_meter/ir_converters/onnx_converter/converter.py", "max_issues_repo_name": "kaleid-liner/nn-Meter", "max_issues_repo_head_hexsha": "526f8ddeeb33816a0b2b7f97964683e0510cd85d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nn_meter/ir_converters/onnx_converter/converter.py", "max_forks_repo_name": "kaleid-liner/nn-Meter", "max_forks_repo_head_hexsha": "526f8ddeeb33816a0b2b7f97964683e0510cd85d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4113475177, "max_line_length": 88, "alphanum_fraction": 0.5613556681, "include": true, "reason": "import networkx", "num_tokens": 1022}
|
import numpy as np
#import nengolib
from nengo_ssp.spatial_semantic_pointer import SpatialSemanticPointer
from nengo_ssp.hrr_algebra import HrrAlgebra
def PlaneWaveBasis(K):
# Create the bases vectors X,Y as described in the paper with the wavevectors
# (k_i = (u_i,v_i)) given in a matrix K. To get hexganal patterns use 3 K vectors 120 degs apart
# To get mulit-scales/orientation, give many such sets of 3 K vectors
# K is _ by 2
d = K.shape[0]
n = K.shape[1]
Bases = []
for i in range(n):
F = np.ones((d*2 + 1,), dtype="complex")
F[0:d] = np.exp(1.j*K[:,i])
F[-d:] = np.flip(np.conj(F[0:d]))
F = np.fft.ifftshift(F)
Basis = SpatialSemanticPointer(data=np.fft.ifft(F), algebra=HrrAlgebra())
Bases.append(Basis)
return Bases
def WeightedPlaneWaveBasis(K,W):
# Create the bases vectors X,Y as described in the paper with the wavevectors
# (k_i = (u_i,v_i)) given in a matrix K. To get hexganal patterns use 3 K vectors 120 degs apart
# To get mulit-scales/orientation, give many such sets of 3 K vectors
# K is _ by 2
d = K.shape[0]
n = K.shape[1]
Bases = []
for i in range(n):
F = np.ones((d*2 + 1,), dtype="complex")
F[0:d] = W*np.exp(1.j*K[:,i])
F[-d:] = np.flip(W*np.conj(F[0:d]))
Basis = SpatialSemanticPointer(data=np.fft.ifft(F), algebra=HrrAlgebra())
Bases.append(Basis)
return Bases
def HexagonalBasis(n_rotates=8,n_scales=8,scale_min=0.8, scale_max=3):
# Create bases vectors X,Y consisting of mulitple sets of hexagonal bases
K_hex = np.array([[0,1], [np.sqrt(3)/2,-0.5], [-np.sqrt(3)/2,-0.5]])
scales = np.linspace(scale_min,scale_max,n_scales)
K_scales = np.vstack([K_hex*i for i in scales])
thetas = np.arange(0,n_rotates)*np.pi/(n_rotates) #***
R_mats = np.stack([np.stack([np.cos(thetas), -np.sin(thetas)],axis=1),
np.stack([np.sin(thetas), np.cos(thetas)], axis=1)], axis=1)
K_scale_rotates = (R_mats @ K_scales.T).transpose(1,2,0).T.reshape(-1,2)
X, Y = PlaneWaveBasis(K_scale_rotates)
return X, Y, K_scale_rotates
def RectangularBasis(n_rotates=8,n_scales=8,scale_min=0.8, scale_max=3):
# Create bases vectors X,Y consisting of mulitple sets of hexagonal bases
K_rec = np.array([[0,1], [1,0]])
scales = np.linspace(scale_min,scale_max,n_scales)
K_scales = np.vstack([K_rec*i for i in scales])
thetas = np.arange(0,n_rotates)*np.pi/(n_rotates) #***
R_mats = np.stack([np.stack([np.cos(thetas), -np.sin(thetas)],axis=1),
np.stack([np.sin(thetas), np.cos(thetas)], axis=1)], axis=1)
K_scale_rotates = (R_mats @ K_scales.T).transpose(1,2,0).T.reshape(-1,2)
X, Y = PlaneWaveBasis(K_scale_rotates)
return X, Y, K_scale_rotates
def RecursiveBasisFun(K):
def _recursive_fun(A,x,y):
plane_wave = np.exp(1.j*(K[:,0]*x + K[:,1]*y))
h = np.sum((plane_wave + np.conj(plane_wave)).real,axis=0)
#h = np.sum(plane_wave,axis=0)
return np.fft.ifft(np.fft.fft(A, axis=0)**(np.abs(h)/3 + 2), axis=0)
return _recursive_fun
def GridCellEncoders(n_G,X,Y, radius=10):
d = len(X.v)
N = (d-1)//6
#G_pos_dist = nengolib.stats.Rd()
#G_pos = G_pos_dist.sample(n_G,2)*2*radius - radius
G_pos = np.random.rand(n_G,2)*2*radius - radius
if N < n_G:
G_sorts = np.hstack([np.arange(N), np.random.randint(0, N - 1, size = n_G - N)])
else:
G_sorts = np.arange(n_G)
G_encoders = np.zeros((n_G,d))
for i in np.arange(n_G):
sub_mat = _get_sub_SSP(G_sorts[i],N)
proj_mat = _proj_sub_SSP(G_sorts[i],N)
Xi = SpatialSemanticPointer(data = sub_mat @ X.v)
Yi = SpatialSemanticPointer(data = sub_mat @ Y.v)
G_encoders[i,:] = N * proj_mat @ ((Xi**G_pos[i,0])*(Yi**G_pos[i,1])).v
return G_encoders, G_sorts
def UnitaryVectors(D, eps=1e-3, rng=np.random):
a = rng.rand((D - 1) // 2)
sign = rng.choice((-1, +1), len(a))
phi = sign * np.pi * (eps + a * (1 - 2 * eps))
assert np.all(np.abs(phi) >= np.pi * eps)
assert np.all(np.abs(phi) <= np.pi * (1 - eps))
fv = np.zeros(D, dtype='complex64')
fv[0] = 1
fv[1:(D + 1) // 2] = np.cos(phi) + 1j * np.sin(phi)
fv[-1:D // 2:-1] = np.conj(fv[1:(D + 1) // 2])
if D % 2 == 0:
fv[D // 2] = 1
assert np.allclose(np.abs(fv), 1)
v = np.fft.ifft(fv)
v = v.real
return SpatialSemanticPointer(v)
def _get_sub_FourierSSP(n, N, sublen=3):
# Return a matrix, \bar{A}_n
# Consider the multi scale representation (S_{total}) and sub vectors (S_n) described in the paper
# Then
# \bar{A}_n F{S_{total}} = F{S_n}
# i.e. pick out the sub vector in the Fourier domain
tot_len = 2*sublen*N + 1
FA = np.zeros((2*sublen + 1, tot_len))
FA[0:sublen, sublen*n:sublen*(n+1)] = np.eye(sublen)
FA[sublen, sublen*N] = 1
FA[sublen+1:, tot_len - np.arange(sublen*(n+1),sublen*n,-1)] = np.eye(sublen)
return FA
def _get_sub_SSP(n,N,sublen=3):
# Return a matrix, A_n
# Consider the multi scale representation (S_{total}) and sub vectors (S_n) described in the paper
# Then
# A_n S_{total} = S_n
# i.e. pick out the sub vector in the time domain
tot_len = 2*sublen*N + 1
FA = _get_sub_FourierSSP(n,N,sublen=sublen)
W = np.fft.fft(np.eye(tot_len))
invW = np.fft.ifft(np.eye(2*sublen + 1))
A = invW @ np.fft.ifftshift(FA) @ W
return A.real
def _proj_sub_FourierSSP(n,N,sublen=3):
# Return a matrix, \bar{B}_n
# Consider the multi scale representation (S_{total}) and sub vectors (S_n) described in the paper
# Then
# \sum_n \bar{B}_n F{S_{n}} = F{S_{total}}
# i.e. project the sub vector in the Fourier domain such that summing all such projections gives the full vector in Fourier domain
tot_len = 2*sublen*N + 1
FB = np.zeros((2*sublen + 1, tot_len))
FB[0:sublen, sublen*n:sublen*(n+1)] = np.eye(sublen)
FB[sublen, sublen*N] = 1/N # all sub vectors have a "1" zero freq term so scale it so full vector will have 1
FB[sublen+1:, tot_len - np.arange(sublen*(n+1),sublen*n,-1)] = np.eye(sublen)
return FB.T
def _proj_sub_SSP(n,N,sublen=3):
# Return a matrix, B_n
# Consider the multi scale representation (S_{total}) and sub vectors (S_n) described in the paper
# Then
# \sum_n B_n S_{n} = S_{total}
# i.e. project the sub vector in the time domain such that summing all such projections gives the full vector
tot_len = 2*sublen*N + 1
FB = _proj_sub_FourierSSP(n,N,sublen=sublen)
invW = np.fft.ifft(np.eye(tot_len))
W = np.fft.fft(np.eye(2*sublen + 1))
B = invW @ np.fft.ifftshift(FB) @ W
return B.real
def _planewave_mat(K, xx, yy, x0=0, y0=0):
# Sum all plane waves to get inference pattern.
# If you make SSPs with basis vectors from ssp_plane_basis(K) and call
# sim_dots, _ = similarity_plot(X, Y, xs, ys, x0, y0)
# then sim_dots should be the same as whats returned here. This is a check/quicker way to try out patterns
mat = np.zeros(xx.shape)
for i in np.arange(K.shape[0]):
plane_wave = np.exp(1.j*(K[i,0]*(xx-x0) + K[i,1]*(yy-y0)))
mat += (plane_wave + np.conj(plane_wave)).real
return mat
|
{"hexsha": "89e2827ee3ea1adee47c56141def5bd19967ad6c", "size": 7364, "ext": "py", "lang": "Python", "max_stars_repo_path": "nengo_ssp/vector_generation.py", "max_stars_repo_name": "nsdumont/nengo_ssp", "max_stars_repo_head_hexsha": "9530a4618e213fb695b52887772c1309d0f07a0b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nengo_ssp/vector_generation.py", "max_issues_repo_name": "nsdumont/nengo_ssp", "max_issues_repo_head_hexsha": "9530a4618e213fb695b52887772c1309d0f07a0b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nengo_ssp/vector_generation.py", "max_forks_repo_name": "nsdumont/nengo_ssp", "max_forks_repo_head_hexsha": "9530a4618e213fb695b52887772c1309d0f07a0b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.6850828729, "max_line_length": 134, "alphanum_fraction": 0.6195002716, "include": true, "reason": "import numpy", "num_tokens": 2458}
|
// Implementation of all debugging command handlers.
#include <iostream>
#include <map>
#include <unordered_map>
#include <string>
#include <vector>
#include <boost/graph/breadth_first_search.hpp>
#include <boost/tokenizer.hpp>
#include "typedefs.h"
#include "debugger_commands.h"
#include "debugger_graph.h"
#include "debugger_print.h"
#include "debugger_prompt.h"
using namespace adb;
void dump_graph(Graph& graph, ScratchpadDatapath* acc, std::string graph_name) {
std::unordered_map<Vertex, const ExecNode*> vertexToNode;
BGL_FORALL_VERTICES(v, graph, Graph) {
const ExecNode* node =
acc->getProgram().nodes.at(get(boost::vertex_node_id, graph, v));
vertexToNode[v] = node;
}
std::ofstream out(graph_name + "_graph.dot", std::ofstream::out);
write_graphviz(out, graph, make_microop_label_writer(vertexToNode, graph));
}
void reconstruct_graph(Graph* new_graph,
ScratchpadDatapath* acc,
unsigned root_node_id,
unsigned num_nodes,
int max_node_id,
bool show_branch_children) {
const Graph &g = acc->getProgram().graph;
const ExecNode* root_node = acc->getProgram().nodes.at(root_node_id);
Vertex root_vertex = root_node->get_vertex();
unsigned num_nodes_visited = 0;
std::map<unsigned, Vertex> existing_nodes;
NodeVisitor visitor(new_graph,
&existing_nodes,
acc,
root_vertex,
num_nodes,
max_node_id,
show_branch_children,
&num_nodes_visited);
boost::breadth_first_search(g, root_vertex, boost::visitor(visitor));
}
HandlerRet adb::cmd_print_cycle(const CommandTokens& command_tokens,
Command* subcmd_list,
ScratchpadDatapath* acc) {
if (command_tokens.size() < 2) {
std::cerr << "ERROR: Need to specify a cycle to print activity for!\n";
return HANDLER_ERROR;
}
int cycle = -1;
int max_nodes = 300; // Default.
try {
cycle = std::stoi(command_tokens[1], NULL, 10);
} catch (const std::invalid_argument& e) {
std::cerr << "ERROR: Invalid cycle! Must be a nonnegative integer.\n";
return HANDLER_ERROR;
}
CommandTokens args_tokens(++command_tokens.begin(), command_tokens.end());
CommandArgs args;
if (parse_command_args(args_tokens, args) != 0)
return HANDLER_ERROR;
if (args.find("max_nodes") != args.end())
max_nodes = args["max_nodes"];
if (max_nodes <= 0) {
std::cerr << "ERROR: Cannot specify max_nodes to be <= 0!\n";
return HANDLER_ERROR;
}
DebugCyclePrinter printer(cycle, (unsigned)max_nodes, acc, std::cout);
printer.printAll();
return HANDLER_SUCCESS;
}
HandlerRet adb::cmd_print_function(const CommandTokens& command_tokens,
Command* subcmd_list,
ScratchpadDatapath* acc) {
if (command_tokens.size() < 2) {
std::cerr << "ERROR: Need to specify a function to print!\n";
return HANDLER_ERROR;
}
std::string function_name = command_tokens[1];
DebugFunctionPrinter printer(function_name, acc, std::cout);
printer.printAll();
return HANDLER_SUCCESS;
}
HandlerRet adb::cmd_print_loop(const CommandTokens& command_tokens,
Command* subcmd_list,
ScratchpadDatapath* acc) {
if (command_tokens.size() < 2) {
std::cerr << "ERROR: Need to specify a loop to print!\n";
return HANDLER_ERROR;
}
std::string loop_name = command_tokens[1];
DebugLoopPrinter printer(acc, std::cout);
printer.printLoop(loop_name);
return HANDLER_SUCCESS;
}
HandlerRet adb::cmd_print_edge(const CommandTokens& command_tokens,
Command* subcmd_list,
ScratchpadDatapath* acc) {
if (command_tokens.size() < 3) {
std::cerr << "ERROR: Need to specify source and target node ids!\n";
return HANDLER_ERROR;
}
unsigned src_node_id = 0xdeadbeef;
unsigned tgt_node_id = 0xdeadbeef;
try {
src_node_id = std::stoi(command_tokens[1], NULL, 10);
tgt_node_id = std::stoi(command_tokens[2], NULL, 10);
} catch (const std::invalid_argument& e) {
std::cerr << "ERROR: Invalid node id! Must be a nonnegative integer.\n";
return HANDLER_ERROR;
}
if (!acc->getProgram().nodeExists(src_node_id)) {
std::cerr << "ERROR: Source node " << src_node_id << " does not exist!\n";
return HANDLER_ERROR;
}
if (!acc->getProgram().nodeExists(tgt_node_id)) {
std::cerr << "ERROR: Target node " << tgt_node_id << " does not exist!\n";
return HANDLER_ERROR;
}
const ExecNode* source = acc->getProgram().nodes.at(src_node_id);
const ExecNode* target = acc->getProgram().nodes.at(tgt_node_id);
DebugEdgePrinter printer(source, target, acc, std::cout);
printer.printAll();
return HANDLER_SUCCESS;
}
HandlerRet adb::cmd_print_node(const CommandTokens& command_tokens,
Command* subcmd_list,
ScratchpadDatapath* acc) {
if (command_tokens.size() < 2) {
std::cerr << "ERROR: Need to specify a node id!\n";
return HANDLER_ERROR;
}
unsigned node_id = 0xdeadbeef;
try {
node_id = std::stoi(command_tokens[1], NULL, 10);
} catch (const std::invalid_argument& e) {
std::cerr << "ERROR: Invalid node id! Must be a nonnegative integer.\n";
return HANDLER_ERROR;
}
if (!acc->getProgram().nodeExists(node_id)) {
std::cerr << "ERROR: Node " << node_id << " does not exist!\n";
return HANDLER_ERROR;
}
const ExecNode* node = acc->getProgram().nodes.at(node_id);
DebugNodePrinter printer(node, acc, std::cout);
printer.printAll();
return HANDLER_SUCCESS;
}
HandlerRet adb::cmd_print(const CommandTokens& command_tokens,
Command* subcmd_list,
ScratchpadDatapath* acc) {
if (command_tokens.size() < 2) {
std::cerr << "ERROR: Invalid arguments to 'print'.\n";
return HANDLER_ERROR;
}
CommandTokens subcommand_tokens(++command_tokens.begin(), command_tokens.end());
HandlerRet ret = dispatch_command(subcommand_tokens, subcmd_list, acc);
if (ret == HANDLER_NOT_FOUND) {
std::cerr << "ERROR: Unsupported object to print!\n";
return HANDLER_ERROR;
}
return ret;
}
// graph root=N [max_nodes=K] [show_branch_children=1/0]
HandlerRet adb::cmd_graph(const CommandTokens& command_tokens,
Command* subcmd_list,
ScratchpadDatapath* acc) {
if (command_tokens.size() < 2) {
std::cout << "Invalid arguments to command 'graph'!\n";
return HANDLER_ERROR;
}
int root_node = -1;
bool show_branch_children = true; // Default
int num_nodes = 300; // Default.
int max_node_id = -1; // Default.
CommandTokens args_tokens(++command_tokens.begin(), command_tokens.end());
CommandArgs args;
if (parse_command_args(args_tokens, args) != 0)
return HANDLER_ERROR;
if (args.find("root") != args.end()) {
root_node = args["root"];
} else {
std::cerr << "ERROR: Must specify the root node!\n";
return HANDLER_ERROR;
}
if (args.find("num_nodes") != args.end())
num_nodes = args["num_nodes"];
if (args.find("max_node_id") != args.end())
max_node_id = args["max_node_id"];
if (args.find("show_branch_children") != args.end())
show_branch_children = args["show_branch_children"];
if (!acc->getProgram().nodeExists(root_node)) {
std::cerr << "ERROR: Node " << root_node << " does not exist!\n";
return HANDLER_ERROR;
}
Graph subgraph;
try {
reconstruct_graph(&subgraph,
acc,
root_node,
num_nodes,
max_node_id,
show_branch_children);
} catch (const bfs_finished &e) {
// Nothing to do.
}
dump_graph(subgraph, acc, "debug");
std::cout << "Graph has been written to debug_graph.dot.\n";
return HANDLER_SUCCESS;
}
HandlerRet adb::cmd_help(const CommandTokens& tokens,
Command* subcmd_list,
ScratchpadDatapath* acc) {
std::cout << "\nAladdin debugger help\n"
<< "========================\n\n"
<< "The Aladdin debugger works just like Aladdin itself, except after Aladdin runs the\n"
<< "global optimization pass, it will pause and allow the user to query information\n"
<< "about the DDDG. The user can print details about individual nodes and see them in\n"
<< "a format that is more readable than the raw trace. Additionally, the user can\n"
<< "request a dump of a portion of the DDDG to visually inspect the dependencies\n"
<< "between nodes, as the entire DDDG is generally too large for any graph drawing\n"
<< "program to render.\n\n"
<< "The debugger will interrupt Aladdin's execution at two places: after the global\n"
<< "optimization pass, and after all the scheduling has taken place.\n\n"
<< "Note: the debugger cannot alter any Aladdin state. It can only report the state\n"
<< "in its current form.\n\n"
<< "Supported commands:\n"
<< " help : Print this message\n"
<< "\n"
<< " print node [id] : Print details about this node\n"
<< " print loop [label-name] : Print details about the loop labeled by label-name.\n"
<< " If multiple functions contain such a label, the user is prompted to select\n"
<< " the correct one. Details include the average latency of the loop and a list of\n"
<< " the branch nodes that correspond to this loop header. Technically, this will\n"
<< " work for any labeled statement, but the loop statistics would not be present.\n"
<< " print function [function-name] : Print details about this function\n"
<< " print edge [node-0] [node-1] : Print details about edges between the two nodes.\n"
<< " print cycle [cycle] : Print which nodes were executing at this cycle.\n"
<< " Optional arguments:\n"
<< " max_nodes=M : Print up to M nodes. Default: 300.\n"
<< "\n"
<< " graph root=[node-id] : Dump the DDDG in BFS fashion, with node-id as the root.\n"
<< " Optional arguments:\n"
<< " num_nodes=M : Graph up to M nodes. Default: 300.\n"
<< " max_node_id=N : Don't show any nodes greater than this ID. Default: unlimited.\n"
<< " show_branch_children=1/0 : Include edges to the children of all branch and call nodes.\n"
<< " By default, include (1). Set to 0 to exclude.\n"
<< " Branch and call nodes tend to have a lot of child dependent nodes that may\n"
<< " not be dependent on each other (e.g. different iterations of the same or\n"
<< " different loop), so you can exclude them to keep the output cleaner.\n"
<< "\n"
<< " continue : Continue executing Aladdin\n"
<< " quit : Quit the debugger.\n";
return HANDLER_SUCCESS;
}
HandlerRet adb::cmd_continue(const CommandTokens& tokens,
Command* subcmd_list,
ScratchpadDatapath* acc) {
return CONTINUE;
}
HandlerRet adb::cmd_quit(const CommandTokens& tokens,
Command* subcmd_list,
ScratchpadDatapath* acc) {
return QUIT;
}
|
{"hexsha": "9852767131d2cd6e10298404db863bdb8afa2735", "size": 11949, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "common/debugger_commands.cpp", "max_stars_repo_name": "nitish2112/ALADDIN", "max_stars_repo_head_hexsha": "2cef8aa854f25483f9f50a8a07d185b56c53e150", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-11-13T15:59:40.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-13T15:59:40.000Z", "max_issues_repo_path": "common/debugger_commands.cpp", "max_issues_repo_name": "nitish2112/ALADDIN", "max_issues_repo_head_hexsha": "2cef8aa854f25483f9f50a8a07d185b56c53e150", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "common/debugger_commands.cpp", "max_forks_repo_name": "nitish2112/ALADDIN", "max_forks_repo_head_hexsha": "2cef8aa854f25483f9f50a8a07d185b56c53e150", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.3059210526, "max_line_length": 114, "alphanum_fraction": 0.6049041761, "num_tokens": 2813}
|
/*
* Copyright 2011 Matthias Fuchs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stromx/runtime/XmlReader.h>
#include <stromx/runtime/AbstractFactory.h>
#include <stromx/runtime/FileInput.h>
#include <stromx/runtime/Stream.h>
#include <boost/python.hpp>
using namespace boost::python;
using namespace stromx::runtime;
namespace
{
Stream* (XmlReader::*readStreamFromFileWrap)(const std::string &, const AbstractFactory *) const = &XmlReader::readStream;
void (XmlReader::*readParametersFromFileWrap)(const std::string &, const AbstractFactory*, const std::vector<stromx::runtime::Operator*> &) const = &XmlReader::readParameters;
Stream* (XmlReader::*readStreamFromInputWrap)(FileInput&, const std::string &, const AbstractFactory *) const = &XmlReader::readStream;
void (XmlReader::*readParametersFromInputWrap)(FileInput&, const std::string &, const AbstractFactory*, const std::vector<stromx::runtime::Operator*> &) const = &XmlReader::readParameters;
}
void exportXmlReader()
{
class_<XmlReader>("XmlReader")
.def("readStream", readStreamFromFileWrap, return_value_policy<manage_new_object>())
.def("readParameters", readParametersFromFileWrap)
.def("readStream", readStreamFromInputWrap, return_value_policy<manage_new_object>())
.def("readParameters", readParametersFromInputWrap)
;
}
|
{"hexsha": "bf98c498d6fa946652d3af7c000e090b785dc24e", "size": 1883, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "python/stromx/runtime/XmlReader.cpp", "max_stars_repo_name": "roteroktober/stromx", "max_stars_repo_head_hexsha": "e081a35114f68a77e99a4761946b8b8c64eb591a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 11.0, "max_stars_repo_stars_event_min_datetime": "2015-08-16T09:59:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-15T14:39:20.000Z", "max_issues_repo_path": "python/stromx/runtime/XmlReader.cpp", "max_issues_repo_name": "roteroktober/stromx", "max_issues_repo_head_hexsha": "e081a35114f68a77e99a4761946b8b8c64eb591a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/stromx/runtime/XmlReader.cpp", "max_forks_repo_name": "roteroktober/stromx", "max_forks_repo_head_hexsha": "e081a35114f68a77e99a4761946b8b8c64eb591a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 8.0, "max_forks_repo_forks_event_min_datetime": "2015-05-10T02:25:37.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-28T13:06:01.000Z", "avg_line_length": 41.8444444444, "max_line_length": 192, "alphanum_fraction": 0.7445565587, "num_tokens": 440}
|
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
/*
* File: RpcServerModule.cpp
* Author: ubuntu
*
* Created on January 20, 2018, 2:46 PM
*/
#include <boost/beast.hpp>
#include <boost/asio/strand.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/ssl/stream.hpp>
#include "keto/rpc_server/RpcServerModule.hpp"
#include "keto/common/MetaInfo.hpp"
namespace keto {
namespace rpc_server {
RpcServerModule::RpcServerModule() {
}
RpcServerModule::~RpcServerModule() {
}
// meta methods
const std::string RpcServerModule::getName() const {
return "RpcServerModule";
}
const std::string RpcServerModule::getDescription() const {
return "The RPC Server End point used by the RPC Client";
}
const std::string RpcServerModule::getVersion() const {
return keto::common::MetaInfo::VERSION;
}
}
}
|
{"hexsha": "2a9ef07e9cfdefc8041cfa09027a8e4f96644281", "size": 960, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/modules/rpc_server/RpcServerModule.cpp", "max_stars_repo_name": "burntjam/keto", "max_stars_repo_head_hexsha": "dbe32916a3bbc92fa0bbcb97d9de493d7ed63fd8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2020-03-04T10:38:00.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-04T10:38:00.000Z", "max_issues_repo_path": "src/modules/rpc_server/RpcServerModule.cpp", "max_issues_repo_name": "burntjam/keto", "max_issues_repo_head_hexsha": "dbe32916a3bbc92fa0bbcb97d9de493d7ed63fd8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/modules/rpc_server/RpcServerModule.cpp", "max_forks_repo_name": "burntjam/keto", "max_forks_repo_head_hexsha": "dbe32916a3bbc92fa0bbcb97d9de493d7ed63fd8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-03-04T10:38:01.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-04T10:38:01.000Z", "avg_line_length": 20.4255319149, "max_line_length": 79, "alphanum_fraction": 0.7229166667, "num_tokens": 236}
|
using MedicalImagingUtils
using Documenter
DocMeta.setdocmeta!(MedicalImagingUtils, :DocTestSetup, :(using MedicalImagingUtils); recursive=true)
makedocs(;
modules=[MedicalImagingUtils],
authors="Dale <djblack@uci.edu> and contributors",
repo="https://github.com/Dale-Black/MedicalImagingUtils.jl/blob/{commit}{path}#{line}",
sitename="MedicalImagingUtils.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://Dale-Black.github.io/MedicalImagingUtils.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/Dale-Black/MedicalImagingUtils.jl",
)
|
{"hexsha": "8d0ad85d9d84c00df3fffee28c73f96a3fc09d40", "size": 691, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/make.jl", "max_stars_repo_name": "Dale-Black/MedicalImagingUtils.jl", "max_stars_repo_head_hexsha": "3e58d48fd93c7608f8a557139bd902a96137b974", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/make.jl", "max_issues_repo_name": "Dale-Black/MedicalImagingUtils.jl", "max_issues_repo_head_hexsha": "3e58d48fd93c7608f8a557139bd902a96137b974", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/make.jl", "max_forks_repo_name": "Dale-Black/MedicalImagingUtils.jl", "max_forks_repo_head_hexsha": "3e58d48fd93c7608f8a557139bd902a96137b974", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7916666667, "max_line_length": 101, "alphanum_fraction": 0.6830680174, "num_tokens": 193}
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
import unittest
import numpy
import scipy.special
from pyscf import lib
from pyscf import gto
from pyscf.dft import radi
from pyscf.symm import sph
libecp = gto.moleintor.libcgto
mol = gto.M(atom='''
Na 0.5 0.5 0.
H 0. 1. 1.
''',
basis={'Na':'lanl2dz',
'H':[[0,[1.21,1.],[.521,1.]],
[1,[3.12,1.],[.512,1.]],
[2,[2.54,1.],[.554,1.]],
[3,[0.98,1.],[.598,1.]],
[4,[0.79,1.],[.579,1.]]]},
ecp = {'Na': gto.basis.parse_ecp('''
Na nelec 10
Na ul
0 2.0000000 6.0000000
1 175.5502590 -10.0000000
2 2.3365719 -6.0637782
2 0.7799867 -0.7299393
Na S
0 243.3605846 3.0000000
#1 41.5764759 36.2847626
#2 13.2649167 72.9304880
#2 0.9764209 6.0123861
#Na P
#0 1257.2650682 5.0000000
#1 189.6248810 117.4495683
#2 54.5247759 423.3986704
#2 0.9461106 7.1241813
''')})
CHARGE_OF = 0
PTR_COORD = 1
NUC_MOD_OF = 2
PTR_ZETA = 3
ATM_SLOTS = 6
# for _ecpbas
ATOM_OF = 0
ANG_OF = 1 # <0 means local function
NPRIM_OF = 2
RADI_POWER = 3
SO_TYPE_OF = 4
PTR_EXP = 5
PTR_COEFF = 6
BAS_SLOTS = 8
def type1_by_shell(mol, shls, ecpatm_id, ecpbas):
ish, jsh = shls
li = mol.bas_angular(ish)
npi = mol.bas_nprim(ish)
nci = mol.bas_nctr(ish)
ai = mol.bas_exp(ish)
ci = mol._libcint_ctr_coeff(ish)
icart = (li+1) * (li+2) // 2
lj = mol.bas_angular(jsh)
npj = mol.bas_nprim(jsh)
ncj = mol.bas_nctr(jsh)
aj = mol.bas_exp(jsh)
cj = mol._libcint_ctr_coeff(jsh)
jcart = (lj+1) * (lj+2) // 2
rc = mol.atom_coord(ecpatm_id)
rca = rc - mol.bas_coord(ish)
r2ca = numpy.dot(rca, rca)
rcb = rc - mol.bas_coord(jsh)
r2cb = numpy.dot(rcb, rcb)
# Note the Mole._libcint_ctr_coeff are normalized to radial part
cei = numpy.einsum('ij,i->ij', ci, numpy.exp(-ai * r2ca))
cej = numpy.einsum('ij,i->ij', cj, numpy.exp(-aj * r2cb))
#rs, ws = radi.treutler(99)
rs, ws = radi.gauss_chebyshev(99)
ur = rad_part(mol, ecpbas, rs) * ws
rad_ang_all = numpy.zeros((nci,ncj,li+lj+1,li+lj+1,li+lj+1))
for ip in range(npi):
for jp in range(npj):
rij = ai[ip] * rca + aj[jp] * rcb
aij = ai[ip] + aj[jp]
k = 2*numpy.linalg.norm(rij)
rad_all = type1_rad_part(li+lj, k, aij, ur, rs)
#ang_all = type1_ang_part(li+lj, -rij)
#rad_ang = numpy.einsum('pl,lijk->pijk', rad_all, ang_all)
rad_ang = type1_rad_ang(li+lj, rij, rad_all)
for ic in range(nci):
for jc in range(ncj):
rad_ang_all[ic,jc] += rad_ang * cei[ip,ic]*cej[jp,jc] * (4*numpy.pi)**2
ifac = type1_cache_fac(li, rca)
jfac = type1_cache_fac(lj, rcb)
g1 = numpy.zeros((nci,ncj,icart,jcart))
for ic in range(nci):
for jc in range(ncj):
for mi,(ix,iy,iz) in enumerate(loop_cart(li)):
for mj,(jx,jy,jz) in enumerate(loop_cart(lj)):
tmp = 0
for i1, i2, i3 in loop_xyz(ix, iy, iz):
for j1, j2, j3 in loop_xyz(jx, jy, jz):
fac = ifac[mi,i1,i2,i3] * jfac[mj,j1,j2,j3]
tmp += fac * rad_ang_all[ic,jc,i1+j1,i2+j2,i3+j3]
g1[ic,jc,mi,mj] = tmp
gsph = numpy.empty((nci,ncj,li*2+1,lj*2+1))
for ic in range(nci):
for jc in range(ncj):
tmp = c2s_bra(lj, g1[ic,jc].T.copy())
gsph[ic,jc] = c2s_bra(li, tmp.T.copy())
return gsph.transpose(0,2,1,3).reshape(nci*(li*2+1),-1)
def type1_cache_fac(li, ri):
facs = cache_fac(li, ri)
facs4 = numpy.zeros(((li+1)*(li+2)//2,li+1,li+1,li+1))
for mi,(ix,iy,iz) in enumerate(loop_cart(li)):
for i1, i2, i3 in loop_xyz(ix, iy, iz):
facs4[mi,i1,i2,i3] =(facs[0,ix,i1] * facs[1,iy,i2] * facs[2,iz,i3])
return facs4
def type1_rad_part(lmax, k, aij, ur, rs):
rad_all = numpy.empty((lmax+1,lmax+1))
bessel_val = sph_ine(lmax, k*rs)
ur_base = numpy.exp(k**2/(4*aij)) * ur * numpy.exp(-aij*(rs-k/(2*aij))**2)
idx = abs(ur_base) > 1e-80
for lab in range(lmax+1):
val = ur_base[idx] * rs[idx]**lab
for l in range(lmax+1):
if (lab+l) % 2 == 0:
val1 = val * bessel_val[l,idx]
rad_all[lab,l] = val1.sum()
else:
rad_all[lab,l] = 0
return rad_all
def type1_rad_ang(lmax, rij, rad_all):
norm_rij = numpy.linalg.norm(rij)
if norm_rij > 1e-18:
unitr = -rij/norm_rij
else:
unitr = -rij
omega_nuc = []
for lmb in range(lmax+1):
c2smat = c2s_bra(lmb, numpy.eye((lmb+1)*(lmb+2)//2))
omega_nuc.append(numpy.dot(ang_nuc_part(lmb, unitr), c2smat))
rad_ang = numpy.zeros((lmax+1,lmax+1,lmax+1))
for i in range(lmax+1):
for j in range(lmax+1-i):
for k in range(lmax+1-i-j):
for lmb in range(lmax+1):
if (i+j+k+lmb) % 2 == 0:
tmp = 0
for n, (i1, j1, k1) in enumerate(loop_cart(lmb)):
tmp += omega_nuc[lmb][n] * int_unit_xyz(i+i1, j+j1, k+k1)
rad_ang[i,j,k] += rad_all[i+j+k,lmb] * tmp
return rad_ang
def type2_by_shell(mol, shls, ecpatm_id, ecpbas):
ish, jsh = shls
li = mol.bas_angular(ish)
npi = mol.bas_nprim(ish)
nci = mol.bas_nctr(ish)
ai = mol.bas_exp(ish)
ci = mol._libcint_ctr_coeff(ish)
icart = (li+1) * (li+2) // 2
lj = mol.bas_angular(jsh)
npj = mol.bas_nprim(jsh)
ncj = mol.bas_nctr(jsh)
aj = mol.bas_exp(jsh)
cj = mol._libcint_ctr_coeff(jsh)
jcart = (lj+1) * (lj+2) // 2
rc = mol.atom_coord(ecpatm_id)
rcb = rc - mol.bas_coord(jsh)
r_cb = numpy.linalg.norm(rcb)
rca = rc - mol.bas_coord(ish)
r_ca = numpy.linalg.norm(rca)
#rs, ws = radi.treutler(99)
rs, ws = radi.gauss_chebyshev(99)
i_fac_cache = cache_fac(li, rca)
j_fac_cache = cache_fac(lj, rcb)
g1 = numpy.zeros((nci,ncj,icart,jcart))
for lc in range(5): # up to g function
ecpbasi = ecpbas[ecpbas[:,ANG_OF] == lc]
if len(ecpbasi) == 0:
continue
ur = rad_part(mol, ecpbasi, rs) * ws
idx = abs(ur) > 1e-80
rur = numpy.array([ur[idx] * rs[idx]**lab for lab in range(li+lj+1)])
fi = facs_rad(mol, ish, lc, r_ca, rs)[:,:,idx].copy()
fj = facs_rad(mol, jsh, lc, r_cb, rs)[:,:,idx].copy()
angi = facs_ang(type2_ang_part(li, lc, -rca), li, lc, i_fac_cache)
angj = facs_ang(type2_ang_part(lj, lc, -rcb), lj, lc, j_fac_cache)
for ic in range(nci):
for jc in range(ncj):
rad_all = numpy.einsum('pr,ir,jr->pij', rur, fi[ic], fj[jc])
for i1 in range(li+1):
for j1 in range(lj+1):
g1[ic,jc] += numpy.einsum('pq,imp,jmq->ij', rad_all[i1+j1],
angi[i1], angj[j1])
g1 *= (numpy.pi*4)**2
gsph = numpy.empty((nci,ncj,li*2+1,lj*2+1))
for ic in range(nci):
for jc in range(ncj):
tmp = c2s_bra(lj, g1[ic,jc].T.copy())
gsph[ic,jc] = c2s_bra(li, tmp.T.copy())
return gsph.transpose(0,2,1,3).reshape(nci*(li*2+1),-1)
def so_by_shell(mol, shls, ecpatm_id, ecpbas):
'''SO-ECP
i/2 <Pauli_matrix dot l U(r)>
'''
ish, jsh = shls
li = mol.bas_angular(ish)
npi = mol.bas_nprim(ish)
nci = mol.bas_nctr(ish)
ai = mol.bas_exp(ish)
ci = mol._libcint_ctr_coeff(ish)
icart = (li+1) * (li+2) // 2
lj = mol.bas_angular(jsh)
npj = mol.bas_nprim(jsh)
ncj = mol.bas_nctr(jsh)
aj = mol.bas_exp(jsh)
cj = mol._libcint_ctr_coeff(jsh)
jcart = (lj+1) * (lj+2) // 2
rc = mol.atom_coord(ecpatm_id)
rcb = rc - mol.bas_coord(jsh)
r_cb = numpy.linalg.norm(rcb)
rca = rc - mol.bas_coord(ish)
r_ca = numpy.linalg.norm(rca)
#rs, ws = radi.treutler(99)
rs, ws = radi.gauss_chebyshev(99)
i_fac_cache = cache_fac(li, rca)
j_fac_cache = cache_fac(lj, rcb)
g1 = numpy.zeros((nci,ncj,3,icart,jcart), dtype=numpy.complex128)
for lc in range(5): # up to g function
ecpbasi = ecpbas[ecpbas[:,ANG_OF] == lc]
if len(ecpbasi) == 0:
continue
ur = rad_part(mol, ecpbasi, rs) * ws
idx = abs(ur) > 1e-80
rur = numpy.array([ur[idx] * rs[idx]**lab for lab in range(li+lj+1)])
fi = facs_rad(mol, ish, lc, r_ca, rs)[:,:,idx].copy()
fj = facs_rad(mol, jsh, lc, r_cb, rs)[:,:,idx].copy()
angi = facs_ang(type2_ang_part(li, lc, -rca), li, lc, i_fac_cache)
angj = facs_ang(type2_ang_part(lj, lc, -rcb), lj, lc, j_fac_cache)
# Note the factor 2/(2l+1) in JCP 82, 2664 (1985); DOI:10.1063/1.448263 is not multiplied here
# because the ECP parameter has been scaled by 2/(2l+1) in CRENBL
jmm = angular_moment_matrix(lc)
for ic in range(nci):
for jc in range(ncj):
rad_all = numpy.einsum('pr,ir,jr->pij', rur, fi[ic], fj[jc])
for i1 in range(li+1):
for j1 in range(lj+1):
g1[ic,jc] += numpy.einsum('pq,imp,jnq,lmn->lij', rad_all[i1+j1],
angi[i1], angj[j1], jmm)
g1 *= (numpy.pi*4)**2
gspinor = numpy.empty((nci,ncj,li*4+2,lj*4+2), dtype=numpy.complex128)
for ic in range(nci):
for jc in range(ncj):
ui = numpy.asarray(gto.cart2spinor_l(li))
uj = numpy.asarray(gto.cart2spinor_l(lj))
s = lib.PauliMatrices * .5j
gspinor[ic,jc] = numpy.einsum('sxy,spq,xpi,yqj->ij', s,
g1[ic,jc], ui.conj(), uj)
return gspinor.transpose(0,2,1,3).reshape(nci*(li*4+2),-1)
def cache_fac(l, r):
facs = numpy.empty((3,l+1,l+1))
for i in range(l+1):
for j in range(i+1):
facs[0,i,j] = scipy.special.binom(i,j) * r[0]**(i-j)
facs[1,i,j] = scipy.special.binom(i,j) * r[1]**(i-j)
facs[2,i,j] = scipy.special.binom(i,j) * r[2]**(i-j)
return facs
def sph_in(l, xs):
'''Modified spherical Bessel function of the first kind'''
return numpy.asarray([scipy.special.spherical_in(numpy.arange(l+1), x) for x in xs]).T
def sph_ine(l, xs):
'''exponentially scaled modified spherical Bessel function'''
bval = sph_in(l, xs)
return numpy.einsum('ij,j->ij', bval, numpy.exp(-xs))
def loop_xyz(nx, ny, nz):
for ix in range(nx+1):
for iy in range(ny+1):
for iz in range(nz+1):
yield ix, iy, iz
def loop_cart(l):
for ix in reversed(range(l+1)):
for iy in reversed(range(l-ix+1)):
iz = l - ix - iy
yield ix, iy, iz
def rad_part(mol, ecpbas, rs):
ur = numpy.zeros_like(rs)
for ecpsh in ecpbas:
npk = ecpsh[NPRIM_OF]
r_order = ecpsh[RADI_POWER]
ak = mol._env[ecpsh[PTR_EXP]:ecpsh[PTR_EXP]+npk]
ck = mol._env[ecpsh[PTR_COEFF]:ecpsh[PTR_COEFF]+npk]
u1 = numpy.zeros_like(ur)
for kp, a1 in enumerate(ak):
u1 += ck[kp] * numpy.exp(-a1*rs**2)
u1 *= rs**r_order
ur += u1
return ur
def facs_rad(mol, ish, lc, r_ca, rs):
facs = []
li = mol.bas_angular(ish)
ai = mol.bas_exp(ish)
ci = mol._libcint_ctr_coeff(ish)
npi = mol.bas_nprim(ish)
for ip in range(npi):
ka = 2*ai[ip]*r_ca
facs.append(numpy.einsum('ij,j->ij', sph_ine(li+lc, ka*rs),
numpy.exp(-ai[ip]*(rs-r_ca)**2)))
facs = numpy.einsum('pk,pij->kij', ci, facs)
return facs
# x**n*y**n*z**n * c2s * c2s.T, to project out 3s, 4p, ...
def type1_ang_part(lmax, rij):
norm_rij = numpy.linalg.norm(rij)
if norm_rij > 1e-18:
unitr = rij/norm_rij
else:
unitr = rij
omega_nuc = []
for lmb in range(lmax+1):
c2smat = c2s_bra(lmb, numpy.eye((lmb+1)*(lmb+2)//2))
omega_nuc.append(4*numpy.pi * numpy.dot(ang_nuc_part(lmb, unitr), c2smat))
omega = numpy.empty((lmax+1,lmax+1,lmax+1,lmax+1))
for lmb in range(lmax+1):
omega_elec = numpy.empty((lmb+1)*(lmb+2)//2)
for i in range(lmax+1):
for j in range(lmax+1-i):
for k in range(lmax+1-i-j):
if (i+j+k+lmb) % 2 == 0:
for n, (i1, j1, k1) in enumerate(loop_cart(lmb)):
omega_elec[n] = int_unit_xyz(i+i1, j+j1, k+k1)
omega[lmb,i,j,k] = numpy.dot(omega_nuc[lmb], omega_elec)
else:
omega[lmb,i,j,k] = 0
return omega
def type2_ang_part(li, lc, ri):
# [lambda,m,a,b,c]
norm_ri = numpy.linalg.norm(ri)
if norm_ri > 1e-18:
unitr = ri/norm_ri
else:
unitr = ri
omega = numpy.empty((li+1,li+1,li+1,lc*2+1,li+lc+1))
lcart = (lc+1)*(lc+2)//2
omega_nuc = []
for lmb in range(li+lc+1):
c2smat = c2s_bra(lmb, numpy.eye((lmb+1)*(lmb+2)//2))
omega_nuc.append(4*numpy.pi * numpy.dot(ang_nuc_part(lmb, unitr), c2smat))
tmp = numpy.empty((lcart,li+lc+1))
for a in range(li+1):
for b in range(li+1-a):
for c in range(li+1-a-b):
for lmb in range(li+lc+1):
if (lc+a+b+c+lmb) % 2 == 0:
omega_xyz = numpy.empty((lcart, (lmb+1)*(lmb+2)//2))
for m,(u,v,w) in enumerate(loop_cart(lc)):
for n, (i1, j1, k1) in enumerate(loop_cart(lmb)):
omega_xyz[m,n] = int_unit_xyz(a+u+i1, b+v+j1, c+w+k1)
tmp[:,lmb] = numpy.dot(omega_xyz, omega_nuc[lmb])
else:
tmp[:,lmb] = 0
omega[a,b,c,:,:] = c2s_bra(lc, tmp)
return omega
def angular_moment_matrix(l):
'''Matrix of angular moment operator l*1j on the real spherical harmonic
basis'''
lz = numpy.diag(numpy.arange(-l, l+1, dtype=numpy.complex128))
lx = numpy.zeros_like(lz)
ly = numpy.zeros_like(lz)
for mi in range(-l, l+1):
mj = mi + 1
if mj <= l:
lx[l+mi,l+mj] = .5 * ((l+mj)*(l-mj+1))**.5
ly[l+mi,l+mj] = .5j * ((l+mj)*(l-mj+1))**.5
mj = mi - 1
if mj >= -l:
lx[l+mi,l+mj] = .5 * ((l-mj)*(l+mj+1))**.5
ly[l+mi,l+mj] =-.5j * ((l-mj)*(l+mj+1))**.5
u = sph.sph_pure2real(l)
lx = u.conj().T.dot(lx).dot(u)
ly = u.conj().T.dot(ly).dot(u)
lz = u.conj().T.dot(lz).dot(u)
return numpy.array((lx, ly, lz))
def facs_ang(omega, l, lc, fac_cache):
# (a+b+c,cart_nlm, m, lambda )
facs = numpy.zeros((l+1,(l+1)*(l+2)//2,lc*2+1,l+lc+1))
for mi,(ix,iy,iz) in enumerate(loop_cart(l)):
for i1, i2, i3 in loop_xyz(ix, iy, iz):
fac = fac_cache[0,ix,i1] * fac_cache[1,iy,i2] * fac_cache[2,iz,i3]
facs[i1+i2+i3,mi,:,:] += fac * omega[i1,i2,i3]
return facs
def ang_nuc_part(l, rij):
omega_xyz = numpy.empty((l+1)*(l+2)//2)
k = 0
for i1 in reversed(range(l+1)):
for j1 in reversed(range(l-i1+1)):
k1 = l - i1 - j1
omega_xyz[k] = rij[0]**i1 * rij[1]**j1 * rij[2]**k1
k += 1
if l == 0:
return omega_xyz * 0.282094791773878143
elif l == 1:
return omega_xyz * 0.488602511902919921
else:
omega = numpy.empty((2*l+1))
fc2s = libecp.CINTc2s_ket_sph
fc2s(omega.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(1),
omega_xyz.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(l))
return omega
def int_unit_xyz(i, j, k):
if i % 2 or j % 2 or k % 2:
return 0
else:
return (_fac2[i-1] * _fac2[j-1] * _fac2[k-1] / _fac2[i+j+k+1])
_fac2 = scipy.special.factorial2(numpy.arange(80))
_fac2[-1] = 1
def c2s_bra(l, gcart):
if l == 0:
return gcart * 0.282094791773878143
elif l == 1:
return gcart * 0.488602511902919921
else:
m = gcart.shape[1]
gsph = numpy.empty((l*2+1,m))
fc2s = libecp.CINTc2s_ket_sph
fc2s(gsph.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(m),
gcart.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(l))
return gsph
class KnownValues(unittest.TestCase):
def test_bessel(self):
rs = radi.gauss_chebyshev(99)[0]
bessel1 = numpy.empty(8)
for i,x in enumerate(rs):
bessel0 = scipy.special.spherical_in(numpy.arange(7+1), x) * numpy.exp(-x)
libecp.ECPsph_ine(bessel1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(7), ctypes.c_double(x))
self.assertTrue(numpy.allclose(bessel0, bessel1))
def test_gauss_chebyshev(self):
rs0, ws0 = radi.gauss_chebyshev(99)
rs = numpy.empty_like(rs0)
ws = numpy.empty_like(ws0)
libecp.ECPgauss_chebyshev(rs.ctypes.data_as(ctypes.c_void_p),
ws.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(99))
self.assertTrue(numpy.allclose(rs0, rs))
self.assertTrue(numpy.allclose(ws0, ws))
def test_rad_part(self):
rs, ws = radi.gauss_chebyshev(99)
ur0 = rad_part(mol, mol._ecpbas, rs)
ur1 = numpy.empty_like(ur0)
cache = numpy.empty(100000)
libecp.ECPrad_part(ur1.ctypes.data_as(ctypes.c_void_p),
rs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(0), ctypes.c_int(len(rs)), ctypes.c_int(1),
(ctypes.c_int*2)(0, len(mol._ecpbas)),
mol._ecpbas.ctypes.data_as(ctypes.c_void_p),
mol._atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.natm),
mol._bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.nbas),
mol._env.ctypes.data_as(ctypes.c_void_p),
lib.c_null_ptr(), cache.ctypes.data_as(ctypes.c_void_p))
self.assertTrue(numpy.allclose(ur0, ur1))
def test_type2_ang_part(self):
numpy.random.seed(3)
rca = numpy.random.random(3)
cache = numpy.empty(100000)
def type2_facs_ang(li, lc):
i_fac_cache = cache_fac(li, rca)
facs0 = facs_ang(type2_ang_part(li, lc, -rca), li, lc, i_fac_cache)
facs1 = numpy.empty_like(facs0)
libecp.type2_facs_ang(facs1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(li), ctypes.c_int(lc),
rca.ctypes.data_as(ctypes.c_void_p),
cache.ctypes.data_as(ctypes.c_void_p))
self.assertTrue(numpy.allclose(facs0, facs1))
for li in range(6):
for lc in range(5):
type2_facs_ang(li, lc)
def test_type2_rad_part(self):
rc = .8712
rs, ws = radi.gauss_chebyshev(99)
cache = numpy.empty(100000)
def type2_facs_rad(ish, lc):
facs0 = facs_rad(mol, ish, lc, rc, rs).transpose(0,2,1).copy()
facs1 = numpy.empty_like(facs0)
libecp.type2_facs_rad(facs1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(ish), ctypes.c_int(lc),
ctypes.c_double(rc),
rs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(len(rs)), ctypes.c_int(1),
mol._atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.natm),
mol._bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.nbas),
mol._env.ctypes.data_as(ctypes.c_void_p),
cache.ctypes.data_as(ctypes.c_void_p))
self.assertTrue(numpy.allclose(facs0, facs1))
for ish in range(mol.nbas):
for lc in range(5):
type2_facs_rad(ish, lc)
def test_type2(self):
cache = numpy.empty(100000)
def gen_type2(shls):
di = (mol.bas_angular(shls[0])*2+1) * mol.bas_nctr(shls[0])
dj = (mol.bas_angular(shls[1])*2+1) * mol.bas_nctr(shls[1])
mat0 = numpy.zeros((di,dj))
for ia in range(mol.natm):
ecpbas = mol._ecpbas[mol._ecpbas[:,ATOM_OF] == ia]
if len(ecpbas) == 0:
continue
mat0 += type2_by_shell(mol, shls, ia, ecpbas)
mat1 = numpy.empty(mat0.shape, order='F')
libecp.ECPtype2_sph(mat1.ctypes.data_as(ctypes.c_void_p),
(ctypes.c_int*2)(*shls),
mol._ecpbas.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(len(mol._ecpbas)),
mol._atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.natm),
mol._bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.nbas),
mol._env.ctypes.data_as(ctypes.c_void_p),
lib.c_null_ptr(), cache.ctypes.data_as(ctypes.c_void_p))
if not numpy.allclose(mat0, mat1, atol=1e-8):
print(i, j, 'error = ', numpy.linalg.norm(mat0-mat1))
self.assertTrue(numpy.allclose(mat0, mat1, atol=1e-6))
mat2 = gto.ecp.type2_by_shell(mol, shls)
self.assertTrue(numpy.allclose(mat0, mat2, atol=1e-6))
for i in range(mol.nbas):
for j in range(mol.nbas):
gen_type2((i,j))
def test_type1_state_fac(self):
numpy.random.seed(3)
ri = numpy.random.random(3) - .5
cache = numpy.empty(100000)
def tfacs(li):
facs0 = type1_cache_fac(li, ri)
facs1 = numpy.zeros_like(facs0)
libecp.type1_static_facs(facs1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(li),
ri.ctypes.data_as(ctypes.c_void_p),
cache.ctypes.data_as(ctypes.c_void_p))
self.assertTrue(numpy.allclose(facs0, facs1))
for l in range(6):
tfacs(l)
def test_type1_rad_ang(self):
numpy.random.seed(4)
ri = numpy.random.random(3) - .5
def tfacs(lmax):
rad_all = numpy.random.random((lmax+1,lmax+1))
rad_ang0 = type1_rad_ang(lmax, ri, rad_all)
rad_ang1 = numpy.empty_like(rad_ang0)
libecp.type1_rad_ang(rad_ang1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(lmax),
ri.ctypes.data_as(ctypes.c_void_p),
rad_all.ctypes.data_as(ctypes.c_void_p))
self.assertTrue(numpy.allclose(rad_ang0, rad_ang1))
for l in range(13):
tfacs(l)
def test_type1_rad(self):
k = 1.621
aij = .792
rs, ws = radi.gauss_chebyshev(99)
ur = rad_part(mol, mol._ecpbas, rs) * ws
cache = numpy.empty(100000)
def gen_type1_rad(li):
rad_all0 = type1_rad_part(li, k, aij, ur, rs)
rad_all1 = numpy.zeros_like(rad_all0)
libecp.type1_rad_part(rad_all1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(li),
ctypes.c_double(k), ctypes.c_double(aij),
ur.ctypes.data_as(ctypes.c_void_p),
rs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(len(rs)), ctypes.c_int(1),
cache.ctypes.data_as(ctypes.c_void_p))
self.assertTrue(numpy.allclose(rad_all0, rad_all1))
for l in range(13):
gen_type1_rad(l)
def test_type1(self):
def gen_type1(shls):
di = (mol.bas_angular(shls[0])*2+1) * mol.bas_nctr(shls[0])
dj = (mol.bas_angular(shls[1])*2+1) * mol.bas_nctr(shls[1])
mat0 = numpy.zeros((di,dj))
for ia in range(mol.natm):
ecpbas = mol._ecpbas[mol._ecpbas[:,ATOM_OF] == ia]
if len(ecpbas) == 0:
continue
ecpbas0 = ecpbas[ecpbas[:,ANG_OF] < 0]
if len(ecpbas0) == 0:
continue
mat0 += type1_by_shell(mol, shls, ia, ecpbas0)
mat1 = numpy.empty(mat0.shape, order='F')
cache = numpy.empty(100000)
libecp.ECPtype1_sph(mat1.ctypes.data_as(ctypes.c_void_p),
(ctypes.c_int*2)(*shls),
mol._ecpbas.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(len(mol._ecpbas)),
mol._atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.natm),
mol._bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.nbas),
mol._env.ctypes.data_as(ctypes.c_void_p),
lib.c_null_ptr(), cache.ctypes.data_as(ctypes.c_void_p))
if not numpy.allclose(mat0, mat1, atol=1e-8):
print(i, j, numpy.linalg.norm(mat0-mat1))
self.assertTrue(numpy.allclose(mat0, mat1, atol=1e-6))
mat2 = gto.ecp.type1_by_shell(mol, shls)
self.assertTrue(numpy.allclose(mat0, mat2, atol=1e-6))
for i in range(mol.nbas):
for j in range(mol.nbas):
gen_type1((i,j))
def test_so_1atom(self):
mol = gto.M(atom='''
Na 0.5 0.5 0.
''',
charge=1,
basis={'Na': [(0, (1, 1)), (1, (4, 1)), (1, (1, 1)), (2, (1, 1))]},
ecp = {'Na': gto.basis.parse_ecp('''
Na nelec 8
Na ul
1 0. -3. -3.
Na S
1 0. -3. -3.
Na P
1 0. -3. -3.
Na D
1 0. -3. -3.
Na F
1 0. -3. -3.
''')})
def gen_so(shls):
mat0 = 0
for ia in range(mol.natm):
ecpbas = mol._ecpbas[(mol._ecpbas[:,ATOM_OF]==ia) &
(mol._ecpbas[:,SO_TYPE_OF]==1)]
if len(ecpbas) == 0:
continue
mat0 += so_by_shell(mol, shls, ia, ecpbas)
s = lib.PauliMatrices * .5
ui = numpy.asarray(gto.sph2spinor_l(mol.bas_angular(shls[0])))
uj = numpy.asarray(gto.sph2spinor_l(mol.bas_angular(shls[1])))
ref = numpy.einsum('sxy,spq,xpi,yqj->ij', s,
mol.intor_by_shell('int1e_inuc_rxp', shls),
ui.conj(), uj)
self.assertAlmostEqual(abs(ref-mat0).max(), 0, 12)
mat2 = .5 * gto.ecp.so_by_shell(mol, shls)
self.assertTrue(numpy.allclose(ref, mat2, atol=1e-6))
for i in range(mol.nbas):
for j in range(mol.nbas):
gen_so((i,j))
if __name__ == '__main__':
print('Full Tests for ecp')
unittest.main()
|
{"hexsha": "0ee8634fb2ddf829f231dc38f931da6f63b5af01", "size": 28209, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyscf/lib/gto/test/test_ecp.py", "max_stars_repo_name": "mfkasim1/pyscf", "max_stars_repo_head_hexsha": "7be5e015b2b40181755c71d888449db936604660", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyscf/lib/gto/test/test_ecp.py", "max_issues_repo_name": "mfkasim1/pyscf", "max_issues_repo_head_hexsha": "7be5e015b2b40181755c71d888449db936604660", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyscf/lib/gto/test/test_ecp.py", "max_forks_repo_name": "mfkasim1/pyscf", "max_forks_repo_head_hexsha": "7be5e015b2b40181755c71d888449db936604660", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.8553719008, "max_line_length": 102, "alphanum_fraction": 0.5263568365, "include": true, "reason": "import numpy,import scipy", "num_tokens": 8973}
|
! { dg-do compile }
!
! PR 42188: [OOP] F03:C612. The leftmost part-name shall be the name of a data object
!
! Contributed by Janus Weil <janus@gcc.gnu.org>
module grid_module
implicit none
type grid
contains
procedure :: new_grid
procedure :: new_int
end type
contains
subroutine new_grid(this)
class(grid) :: this
end subroutine
integer function new_int(this)
class(grid) :: this
new_int = 42
end function
end module
module field_module
use grid_module
implicit none
type field
type(grid) :: mesh
end type
contains
type(field) function new_field()
end function
subroutine test
integer :: i
type(grid) :: g
g = new_field()%mesh ! { dg-error "can not be a function reference" }
call new_field()%mesh%new_grid() ! { dg-error "Syntax error" }
i = new_field() % mesh%new_int() ! { dg-error "can not be a function reference" }
end subroutine
end module
|
{"hexsha": "51f5b8634945d3ea36da08db7777f9a9827d0af3", "size": 921, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "validation_tests/llvm/f18/gfortran.dg/derived_result_2.f90", "max_stars_repo_name": "brugger1/testsuite", "max_stars_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2019-02-12T18:20:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-09T19:46:19.000Z", "max_issues_repo_path": "validation_tests/llvm/f18/gfortran.dg/derived_result_2.f90", "max_issues_repo_name": "brugger1/testsuite", "max_issues_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2020-08-31T22:05:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T18:30:03.000Z", "max_forks_repo_path": "validation_tests/llvm/f18/gfortran.dg/derived_result_2.f90", "max_forks_repo_name": "brugger1/testsuite", "max_forks_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2020-08-31T21:59:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T22:06:46.000Z", "avg_line_length": 20.0217391304, "max_line_length": 85, "alphanum_fraction": 0.6872964169, "num_tokens": 247}
|
from sklearn.svm import LinearSVR
from sklearn.linear_model import SGDRegressor
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv("finalenc.csv")
y = df['price']
X = df.drop(columns=['price'])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=1)
regr = make_pipeline(StandardScaler(), LinearSVR(random_state=0, tol=1e-03))
reg = LinearRegression().fit(X_train, y_train)
regr.fit(X_train,y_train)
y_pred = regr.predict(X_test)
plt.figure()
plt.plot(range(100000))
plt.scatter(y_test ,y_pred, alpha=0.4, c='red', label='Ground Truth vs Predicted')
plt.savefig('SVR.png')
|
{"hexsha": "844a059771536f1bdead9e1a1dcd9d080ba67b15", "size": 916, "ext": "py", "lang": "Python", "max_stars_repo_path": "nn2.py", "max_stars_repo_name": "WayneFerrao/autofocus", "max_stars_repo_head_hexsha": "80a5d2366639177dbd16708a79b88df17528054c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-11T22:51:52.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-11T22:51:52.000Z", "max_issues_repo_path": "nn2.py", "max_issues_repo_name": "WayneFerrao/autofocus", "max_issues_repo_head_hexsha": "80a5d2366639177dbd16708a79b88df17528054c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nn2.py", "max_forks_repo_name": "WayneFerrao/autofocus", "max_forks_repo_head_hexsha": "80a5d2366639177dbd16708a79b88df17528054c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.7142857143, "max_line_length": 90, "alphanum_fraction": 0.769650655, "include": true, "reason": "import numpy", "num_tokens": 223}
|
\setchapterpreamble[u]{\margintoc}
\chapter{Energy Systems}
\labch{tut1}
After completing this session successfully, you should be able
\begin{itemize}
\item to understand what an energy system is,
\item to know about issues connected with current energy systems, and
\item to explain the necessity of energy system transitions.
\end{itemize}
\section{What is an energy system?}
\paragraph*{Defining energy and energy system.}
\begin{kaobox}[frametitle=Task]
What do you think about when you hear the word \textit{energy}? What do you think an \textit{energy system} is?
\end{kaobox}
\begin{definition}
\labdef{energy}
Energy is the capacity for doing work. It can exist in potential, kinetic, thermal, chemical, and various other forms \sidecite{britannica_the_editors_of_encyclopaedia_energy_nodate}.
\end{definition}
\begin{definition}
\labdef{es}
The energy system encompasses all components involved in the production, conversion, delivery, and use of energy \sidecite{intergovernmental_panel_on_climate_change_climate_2014}.
\end{definition}
It is important to have in mind that the whole purpose of the energy system is to fulfil the demand for energy services to satisfy human needs. Thus, the energy system is also sometimes defined as all arrangements whereby humans make use of the Earth’s energy resources to enhance their lives \sidecite{smil_energy_2010}.
% TODO: add a side note on what energy resources are (including fossil, renewable examples, etc.)
\paragraph*{Understanding what is part of the energy system.}
\begin{kaobox}[frametitle=Task]
Look at the photos in \reffig{es_photos}, what do you think is part of the energy system? Can you identify different types of elements of the energy system?
\end{kaobox}
\begin{figure}[hb]
\includegraphics[height=0.34\textwidth]{files/ES_photo_1.jpg}
\includegraphics[height=0.34\textwidth]{files/ES_photo_2.jpg}\\
\includegraphics[height=0.34\textwidth]{files/ES_photo_3.jpg}
\includegraphics[height=0.34\textwidth]{files/ES_photo_4.jpg}
\caption[Photos showing different parts of the energy system.]{Photos showing different parts of the energy system. Bottom right photo, \copyright Leonhard Hofbauer, 2021, released under a CC-BY-4.0, the remaining photos, clockwise, are from Steve Wilson, VasenkaPhotography, and Stig Nygaard, respectively, and all licensed under a CC-BY-2.0.}
\labfig{es_photos}
\end{figure}
\section{Major issues of current energy systems}
\labsec{issues}
\paragraph*{Considering problems connected to current energy systems.}
\begin{kaobox}[frametitle= Excerpt from \sidecite{maciel_energy_2012} (reprinted under a CC-BY-4.0), backgroundcolor=Goldenrod!45!white,frametitlebackgroundcolor=Goldenrod!45!white]
Worldwide, transportation generates over 23\% of the total of greenhouse gas emissions arising from the use of energy and accounts for at least 26\% of the planet's energy consumption [1,2,3,4,5,6]. The transportation sector occupies between 15\% and 25\% of the land mass in major cities throughout the World [7,8,9,10,11], and the time lost in traffic congestion in several countries leads to an economic loss of approximately 1 to 3\% of GDP [12].\\
Furthermore, over a million people die and three million are injured every year in road traffic accidents worldwide [13,14,15]. These accidents result in economic costs of approximately 5\% of GDP in some countries [16]. Several countries considered to be `emerging' economically, such as Brazil, have adopted transportation systems that repeat the errors committed by more developed countries, including the encouragement of individual motorized transportation as the standard model. This has not proved to be the optimal solution [17].\\
Additionally, studies on the causes of persistent poverty in the peripheral areas of large cities, both in developed and emerging countries, point to a lack of transportation as one of the principal causes of social ills [18]. It is clear that an economy suffers significant losses in the absence of adequate transportation support. A sustainable transportation strategy has the potential to make a significant contribution to the environmental, economic and social development of cities and surrounding areas [19].\\
In Brazil, where the government regularly proclaims its overriding commitment to both the efficient use of public resources and the improvement of living standards for the population, there is an urgent and evident need for a drastic overhaul of the currently unsustainable transportation system. An accurate and reliable projection of the consumption of resources and the level of emissions produced by our transportation system in the coming decades is a key factor in ensuring the adequate redirection of public policies and the consequent benefits to the population.
\end{kaobox}
\begin{kaobox}[frametitle=Task]
Read through the box above, which talks about the transport sector, a part of the energy system. What issues related to the current transport system are mentioned? Are these issues also existing more widely in other parts of the energy system? Can you think of any other issues connected to the current transport and energy system not mentioned in the text?
\end{kaobox}
\section{The energy transition}
\paragraph*{The need for energy transitions.}~\\
% TODO: add a definition and/or more information on the energy transition in a side note
The issues discussed in the previous section -- climate change, pollution, health impacts, energy poverty, and others -- require that we change the way societies extract, transport, and use energy. This is often described as energy transition.
\begin{kaobox}[frametitle=Task]
Who do you think is responsible for the energy transition? Why?
\end{kaobox}
\section{Homework}
\labsec{hw1}
\begin{figure}[hb]
\includegraphics[width=0.95\textwidth]{files/india_transport.pdf}
\caption[Transport sector pathway for India.]{Graph showing the passenger transport performance in billion person kilometres (bpkm) for a potential pathway for the Indian road transport system (2W: motorcycle, 3W: 3-wheeler motorcycle, BEV: battery electric vehicle, CIEV: diesel vehicle, FCV: fuel cell hydrogen vehicle, HEV: hybrid electric vehicle, SIEGV: CNG/gas vehicle, SIEV: petrol vehicle). Adopted from \cite{hofbauer_shaping_2018}.}
\labfig{india_transport}
\end{figure}
\reffig{india_transport} above shows how the road transport sector in India might develop in future. It was part of a study that described several of those possible future pathways.
Write a text that covers the following three elements:
\begin{itemize}
\item Write a short description the development shown in the figure.
\item Research and discuss one or two social or environmental problems of the energy system in India at the moment and explain how this development could worsen or improve these problems.
\item Describe changes one could make to this development to solve these issues.
\end{itemize}
Please have the following points in mind when working on your assignment:
\begin{itemize}
\item Your assignment needs to be 500 $\pm$ 10\% words long.
\item You need to submit your assignment on the platform and before the deadline mentioned by your course leader.
\item If you have any questions while working on the assignment, do not hesitate to approach your course leader.
\end{itemize}
|
{"hexsha": "e8fdd6944784a2336e51aa96ad843806c6207127", "size": 7357, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "handbook/chapters/session_1.tex", "max_stars_repo_name": "lhofbauer/outreach_course", "max_stars_repo_head_hexsha": "2c15f6e33103f3a06be2e1228cc43ac2547632ef", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "handbook/chapters/session_1.tex", "max_issues_repo_name": "lhofbauer/outreach_course", "max_issues_repo_head_hexsha": "2c15f6e33103f3a06be2e1228cc43ac2547632ef", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "handbook/chapters/session_1.tex", "max_forks_repo_name": "lhofbauer/outreach_course", "max_forks_repo_head_hexsha": "2c15f6e33103f3a06be2e1228cc43ac2547632ef", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 65.6875, "max_line_length": 570, "alphanum_fraction": 0.8008699198, "num_tokens": 1695}
|
# -*- coding: utf-8 -*-
import numpy as np
import pycuda.autoinit # noqa
import pycuda.driver as cuda
class _CalibratorBuffer:
def __init__(self, bindings, batch_size):
self.bindings = bindings
self.allocations = {}
for binding in self.bindings.values():
elem_size = binding.type.size
elem_count = binding.dimensions.size
self.allocations[binding.name] = \
cuda.mem_alloc(batch_size * elem_size * elem_count)
def release(self):
for mem in self.allocations.values():
mem.free()
self.allocations = {}
def put(self, name, index, value):
binding = self.bindings[name]
if value.dtype != binding.type.nptype:
raise TypeError()
if value.shape != binding.dimensions.shape:
raise ValueError()
allocation = self.allocations[name]
elem_size = binding.type.size
elem_count = binding.dimensions.size
dstptr = int(allocation) + index * elem_size * elem_count
if value.flags["C_CONTIGUOUS"]:
cuda.memcpy_htod(dstptr, value)
else:
cuda.memcpy_htod(dstptr, np.ascontiguousarray(value))
class Int8Calibrator:
"""The object to use INT8 calibrator.
Args:
samples(object): The samples for INT8 calibrator.
batch_size(int): The batch size.
"""
def __init__(self, samples, batch_size):
self.batch_size = batch_size
self.iterator = iter(samples)
self.network = None
self.buffer = None
def get_batch(self, names):
"""Get the batch of input for calibration.
Args:
names(list): The names of the network input.
Returns:
batch(list): The batch of input for calibration.
"""
assert self.network is not None
if self.buffer is not None:
self.buffer.release()
self.buffer = None
self.buffer = _CalibratorBuffer(
self.network.input_bindings, self.batch_size)
for i in range(self.batch_size):
try:
sample = next(self.iterator)
except StopIteration:
self.buffer.release()
self.buffer = None
return None
if type(sample) is not dict:
if len(names) == 1:
sample = {names[0]: sample}
else:
raise ValueError()
for key in names:
self.buffer.put(key, i, sample[key])
return [self.buffer.allocations[key] for key in names]
def get_batch_size(self):
"""Get the batch size.
Returns:
batch_size(int): The batch size.
"""
return self.batch_size
|
{"hexsha": "005f7f255a02dc68f96e22301a5020a1c29e723f", "size": 2803, "ext": "py", "lang": "Python", "max_stars_repo_path": "turret/int8/int8_calibrator.py", "max_stars_repo_name": "hemantranvir/turret", "max_stars_repo_head_hexsha": "bc3df21541ce2f808c749c985db47a210149f22c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-03-14T18:27:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-05T05:34:30.000Z", "max_issues_repo_path": "turret/int8/int8_calibrator.py", "max_issues_repo_name": "hemantranvir/turret", "max_issues_repo_head_hexsha": "bc3df21541ce2f808c749c985db47a210149f22c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-06-07T06:03:04.000Z", "max_issues_repo_issues_event_max_datetime": "2019-06-07T06:03:04.000Z", "max_forks_repo_path": "turret/int8/int8_calibrator.py", "max_forks_repo_name": "hemantranvir/turret", "max_forks_repo_head_hexsha": "bc3df21541ce2f808c749c985db47a210149f22c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-10-30T10:30:47.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-30T11:15:40.000Z", "avg_line_length": 29.8191489362, "max_line_length": 67, "alphanum_fraction": 0.5704602212, "include": true, "reason": "import numpy,import pycuda", "num_tokens": 577}
|
import ITlib
import numpy
import matplotlib.pyplot as plt
print "\nExample 2"
print "Entropy example for a binary source\n"
step = 0.001
p1 = numpy.arange(0,1+step,step) # p1 defined in the [0,1] range
p2 = 1 - p1 # p2 = 1 - p1
H = numpy.zeros(p1.shape) # entropy initialization
for i in range(p1.shape[0]): # for each p value
Probs = numpy.array([p1[i], p2[i]]) # define probabiitlies matrix
H[i] = ITlib.computeEntropy(Probs) # compute entropy
plt.plot(p1, H)
plt.xlabel("p1")
plt.ylabel("Entropy")
plt.show()
|
{"hexsha": "b1c68e1078b2f77e8a79653434e0a75d65f76f85", "size": 703, "ext": "py", "lang": "Python", "max_stars_repo_path": "example2.py", "max_stars_repo_name": "tyiannak/inf_teiste_info_theory_lab", "max_stars_repo_head_hexsha": "94ac5469d850472e492afdc36785e3590760660d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-11-04T23:12:35.000Z", "max_stars_repo_stars_event_max_datetime": "2016-11-04T23:12:35.000Z", "max_issues_repo_path": "example2.py", "max_issues_repo_name": "tyiannak/inf_teiste_info_theory_lab", "max_issues_repo_head_hexsha": "94ac5469d850472e492afdc36785e3590760660d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "example2.py", "max_forks_repo_name": "tyiannak/inf_teiste_info_theory_lab", "max_forks_repo_head_hexsha": "94ac5469d850472e492afdc36785e3590760660d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.0, "max_line_length": 91, "alphanum_fraction": 0.5149359886, "include": true, "reason": "import numpy", "num_tokens": 177}
|
\documentclass[a4paper,12pt]{article}
% Margins
\usepackage{a4wide}
% Write english
\usepackage[english]{babel}
% Used for images
\usepackage{graphicx}
% Used to show eps on Windows
\usepackage{epstopdf}
\usepackage{float}
% Text encoding
% Needed to use headers
\usepackage{fancyhdr}
\usepackage{hyperref}
% Used for the euro symbol
\usepackage[gen]{eurosym}
% Used for optimal usage of gensymb4
\usepackage{textcomp}
% Used for the degree symbol
%\usepackage{gensymb}
% Used for captions and subcaptions on images
\usepackage{caption}
\usepackage{subcaption}
% Colors
\usepackage{color}
% Code fragments
\usepackage{listings}
% No tab at start of paragraph
\setlength{\parindent}{0em}
\setlength{\parskip}{1em}
% Defines the todo macro
\newcommand{\todo}[1]{\textcolor{red}{\textbf{TODO: #1}}\PackageWarning{TODO:}{#1!}}
\usepackage{booktabs}% http://ctan.org/pkg/booktabs
\usepackage{colortbl}% http://ctan.org/pkg/colortbl
\usepackage{amsmath}% http://ctan.org/pkg/amsmath
\usepackage{xcolor}% http://ctan.org/pkg/xcolor
\usepackage{graphicx}% http://ctan.org/pkg/graphicx
\colorlet{tableheadcolor}{gray!25} % Table header colour = 25% gray
\newcommand{\headcol}{\rowcolor{tableheadcolor}} %
\colorlet{tablerowcolor}{gray!10} % Table row separator colour = 10% gray
\newcommand{\rowcol}{\rowcolor{tablerowcolor}} %
% Command \topline consists of a (slightly modified) \toprule followed by a \heavyrule rule of colour tableheadcolor (hence, 2 separate rules)
\newcommand{\topline}{\arrayrulecolor{black}\specialrule{0.1em}{\abovetopsep}{0pt}%
\arrayrulecolor{tableheadcolor}\specialrule{\belowrulesep}{0pt}{0pt}%
\arrayrulecolor{black}}
% Command \midline consists of 3 rules (top colour tableheadcolor, middle colour black, bottom colour white)
\newcommand{\midline}{\arrayrulecolor{tableheadcolor}\specialrule{\aboverulesep}{0pt}{0pt}%
\arrayrulecolor{black}\specialrule{\lightrulewidth}{0pt}{0pt}%
\arrayrulecolor{white}\specialrule{\belowrulesep}{0pt}{0pt}%
\arrayrulecolor{black}}
% Command \rowmidlinecw consists of 3 rules (top colour tablerowcolor, middle colour black, bottom colour white)
\newcommand{\rowmidlinecw}{\arrayrulecolor{tablerowcolor}\specialrule{\aboverulesep}{0pt}{0pt}%
\arrayrulecolor{black}\specialrule{\lightrulewidth}{0pt}{0pt}%
\arrayrulecolor{white}\specialrule{\belowrulesep}{0pt}{0pt}%
\arrayrulecolor{black}}
% Command \rowmidlinewc consists of 3 rules (top colour white, middle colour black, bottom colour tablerowcolor)
\newcommand{\rowmidlinewc}{\arrayrulecolor{white}\specialrule{\aboverulesep}{0pt}{0pt}%
\arrayrulecolor{black}\specialrule{\lightrulewidth}{0pt}{0pt}%
\arrayrulecolor{tablerowcolor}\specialrule{\belowrulesep}{0pt}{0pt}%
\arrayrulecolor{black}}
% Command \rowmidlinew consists of 1 white rule
\newcommand{\rowmidlinew}{\arrayrulecolor{white}\specialrule{\aboverulesep}{0pt}{0pt}%
\arrayrulecolor{black}}
% Command \rowmidlinec consists of 1 tablerowcolor rule
\newcommand{\rowmidlinec}{\arrayrulecolor{tablerowcolor}\specialrule{\aboverulesep}{0pt}{0pt}%
\arrayrulecolor{black}}
% Command \bottomline consists of 2 rules (top colour
\newcommand{\bottomline}{\arrayrulecolor{white}\specialrule{\aboverulesep}{0pt}{0pt}%
\arrayrulecolor{black}\specialrule{\heavyrulewidth}{0pt}{\belowbottomsep}}%
\newcommand{\bottomlinec}{\arrayrulecolor{tablerowcolor}\specialrule{\aboverulesep}{0pt}{0pt}%
\arrayrulecolor{black}\specialrule{\heavyrulewidth}{0pt}{\belowbottomsep}}%
\begin{document}
\begin{titlepage}
\fontsize{12pt}{14pt}\selectfont
\begin{center}
% The logo of the University of Ghent
\includegraphics[height=4cm]{figures/logo}
\vspace{1cm}
\fontsize{14pt}{17pt}\selectfont
% The course:
\textsc{Advanced Multimedia Applications}
\fontsize{12pt}{14pt}\selectfont
\vspace{1.5cm}
% De auteur van de thesis:
Feliciaan De Palmenaer\\
Wouter Pinnoo\\
Stefaan Vermassen\\
Titouan Vervack
\vspace{2.8cm}
\fontsize{17.28pt}{21pt}\selectfont
% The title:
\textsc{Academic Data - Automatic Course Assembly}
\fontseries{m}
\fontsize{12pt}{14pt}\selectfont
\vspace{3cm}
2015-2016
\vspace{2cm}
\end{center}
\end{titlepage}
\thispagestyle{empty}
\tableofcontents
\newpage
% ---------------------------------------------------------------------------- %
% Titlepage %
% ---------------------------------------------------------------------------- %
% ---------------------------------------------------------------------------- %
% Body %
% ---------------------------------------------------------------------------- %
\fontsize{12pt}{16pt}\selectfont
\section{General Information}
Onsophic, a Silicon Valley based startup focused on transforming learning, offers an intuitive, data driven, online training platform. The platform continuously gathers and analyses learning data and can
therefore be seen as the equivalent of Google Analytics for learning by providing organizations with a toolset to measure, analyse and discover what works and, more importantly, what doesn't work in training. In this business process, integrating existing content into the e-learning
platform is crucial.
The goal in this project is to automate the course assembly process, by transforming raw learning materials into structured courses.
\section{General project decisions}
This section will outline all of the important decisions we have made during the development of the project.
\subsection{Decisions}
\textbf{Iteration 1 (Course introduction - 18/02/2016)}
\begin{itemize}
\item Stefaan will fulfil the role of team lead. (18/02)
\item Stefaan will be responsible for the communication with the other parties. (18/02)
\item Git will be used as distributed versioning control system for the code, with the UGent GitHub platform as tool. (20/02)
\item Titouan will be responsible for the progress report and will make sure it is always up to date. (22/02)
\item Wouter will be responsible for git and over the code quality. (22/02)
\item Feliciaan will be responsible for the planning of the project. (22/02)
\item For scalability purposes, a web service is chosen over a native application (22/02)
\end{itemize}
\textbf{Iteration 2 (Starting 24/02/2016)}
\begin{itemize}
\item Play web application framework is chosen. Play is written in Scala and Java and is a clean alternative to the legacy Enterprise Java stacks focusing on predictable, minimal resource consumption (25/2)
\item Akka is chosen to create the asynchronous task system. Akka is a toolkit and runtime for building highly concurrent, distributed, and resilient message-driven applications on the JVM (25/2)
\item For Named-Entity Recognition, the NERD API was chosen for its abstraction of other NER APIs. (25/2)
\item We chose to use jsoup for our HTML parser as it doesn't require us to create the syntax tree ourselves and because it implements the WHATWG HTML5 specification. This on it's own, using ANTR4, would take quite some time. The license used for jsoup is the MIT license, strengthening our choice. (25/2)
\end{itemize}
\textbf{Iteration 3 (Starting 09/03/2016)}
\begin{itemize}
\item To decrease the impact of the risk \textit{``NER processing is not fast enough for practical usage of our tool"}, use several NER services in parallel and only choose the fastest one (12/03)
\item Implement threading for parallel processing of the several documents that have to be recognised in one task (12/03)
\end{itemize}
\textbf{Iteration 4 (Starting 16/03/2016)}
\begin{itemize}
\item We have defined a set of supported input formats. We noticed that most user manuals come with a Toc. The web service will expect this Toc only, and will dynamically decide which formatting is used (nested unordered lists, nested tables, divs,..).
\item Error handling implementation
\item Get partial JSON before NER to avoid long wating times before any result
\item Chapters are meant to group modules, no other metadata is attached to a chapter. Chapters are optional. So if you are able to detect an extra level on top of modules, you can use chapters to reflect that extra level.
\item The only tags that matter for now are the section tags, so we only focus on these.
\end{itemize}
\subsection{Responsibilities}
\begin{tabular}{lp{12cm}}
\topline
\headcol Responsible & Task \\
\midline
\textbf{Iteration 2} \\\\
\rowcol Titouan & - Responsible for the initial architecture\\
\rowcol & - Digitalising the component-connector diagram \\
\rowcol & - Initial wireframe of classes \\
Feliciaan & - Library research for input parsing\\
& - Adding HTML parser to the solution \\
& - Handle multiple URLs \\
\rowcol Wouter & - Research on Named-Entity Recognition libraries\\
\rowcol & - Making a motivated decision on which NER tool or tools are most suitable for what you need, and which will be the easiest to integrate into the solution, and be most future-proof \\
\rowcol & - Implement the most optimal solution for NER.\\
Stefaan & - Setting up the initial Play project\\
& - Creating an asyncronous task system using Akka to come up with a scalable approach \\
& \\\hline
\textbf{Iteration 3} \\\\
\rowcol Titouan & - Implement threading (parallel functionality for different documents in the same task)\\
Feliciaan & - Use the parsed HTML document to fill in section names in the Onsophic JSON document\\
\rowcol Wouter & - Risk list, planning and presentation\\
Stefaan & - Research on Onsophic JSON format, progress report\\
& \\\hline
\textbf{Iteration 4} \\\\
\bottomlinec
\end{tabular}
\subsection{Requirements}
\subsubsection{Functional requirements}
\textbf{Must-haves}
\begin{itemize}
\item parse the input source (HTML)
\item detect the learning modules and their learning activities
\item tag these detected modules and activities based on named entity recognition
\item estimate the Bloom level for each of the detected activities
\end{itemize}
\textbf{Nice-to-haves}
\begin{itemize}
\item Detect chapters in the HTML document, as these are optional (property of a module).
\item Parse other types of documents than use manuals.
\end{itemize}
\subsubsection{Non-functional requirements}
\textbf{Must-haves}
\begin{itemize}
\item Interoperability. Our tool must be able to interchange information with third-party services correctly. See Quality-Attribute Scenario in Figure \ref{fig:qas-interoperability}.
\begin{figure}[H]
\centering
\includegraphics[width=0.9\textwidth]{figures/QAS-interoperability}
\caption{QAS: interoperability}
\label{fig:qas-interoperability}
\end{figure}
\item Modifiability. A developer must be able to change formats of input/output documents without it having an impact on other modules. See Quality-Attribute Scenario in Figure \ref{fig:qas-modifiability}.
\begin{figure}[H]
\centering
\includegraphics[width=0.9\textwidth]{figures/QAS-modifiability}
\caption{QAS: modifiability}
\label{fig:qas-modifiability}
\end{figure}
\end{itemize}
\subsection{Assumptions}
\subsubsection{Input}
After surveying multiple different helpsites we didn't find any similarities that worked for all sites.
Some sites can be parsed by counting the depth of the node in the parse tree.\\
Some sites have a first page which is the index, other sites have the index on every page, some even need use javascript to create the whole index of the site.\\
At the moment we have implemented our parser for sites that have a table of contents.\\
We assume that every html document contains exactly one document.
\subsection{Risk list}
\begin{tabular}{p{5.5cm}llp{5.5cm}}
\topline
\headcol Risk & Probability & Impact & Mitigation \\
\midline
\rowcol Not finding a generic line in the different input sources & M & H & Review the assumptions about the input data with the client\\
No suitable NER tool that is free to use and license to use in closed-source software & L & H & Looking into commercial tools\\
\rowcol Time-management issues due to other projects and master dissertation & M & M & Discuss planning and requirements with client\\
NER processing is not fast enough for practical usage of our tool & M & M & Use several services in parallel and only use the fastest one\\
\rowcol Not able to fill in all metadata (not fully compatible JSON) & H & L & Use simplified JSON\\
\bottomlinec
\end{tabular}
\subsection{APIs \& frameworks}
\begin{enumerate}
\item Play Framework: web application framework, written in Scala and Java, clean alternative to the legacy Enterprise Java stacks, focussing on predictable, minimal resource consumption
\item Akka: toolkit and runtime for building highly concurrent, distributed, and resilient message-driven applications on the JVM
\item For the parsing of HTML we use jsoup. It implements the WHATWG HTML5 specification and creates the same DOM (Document Object Model) as modern browsers create.
\item Named-Entity recognition will be done using the NERD API.
\end{enumerate}
\section{Prototype}
\subsection*{Iteration 2 (Starting 24/02/2016)}
\subsubsection*{Architecture}
\begin{figure}[H]
\centering
\includegraphics[width=0.9\textwidth]{design/components}
\caption{Initial high-level architecture of the Automatic Course Assembly service}
\label{fig:intelligence}
\end{figure}
Our initial architecture is modular and has as goal to easily allow to extends our application with more features later on. Adding a new type of input or output format for example should be as easy as possible.\\
First and foremost we have the REST API, this is the starting points of our application. A url will be passed to our REST API after which it will use the right DocumentGrabber to grab the document from the url. An example of a DocumentGrabber would be an HTMLGrabber that knows how to grab HTML documents and returns an unprocessed HTMLDocument.\\
Throughout the application we will work with Documents to pass around the (un)processed data.\\
The REST API then passes the Document it grabbed to the DocumentProcessor. The Processor calls the DocumentParser which will create a parse tree, analyse it and create a new Document, during analysis the learning modules and activities are created. In this Document only the useful parts of the input document are left over.\\
The DocumentProcessor than passes the parsed Document to the EntityRecogniser which will perform Named Entity Recognition (NER) on the Document. This module will add tags to the Document and pass it back to the DocumentProcessor.\\
Last but not least the DocumentProcessor passes the Document to a DocumentTranslator which will translate a completely processed Document into a Document fit for outputting. In our current case, this will translate an HTMLDocument into a JSONDocument.\\
The only document that can be accessed from the outside is the REST API, everything else is hidden from the user. The user can query the API to start a new import, get the status of a previous task or get the generated output document.
\subsubsection*{NER}
In the assembly process of the JSON-representation of a course document, tags will be created in addition to the parsed content of the (HTML-) documents. For each document, these tags will represent the subject of the document. For this purpose, the content of the whole document will have to be scanned, since the title of the document is not always sufficient to serve as tag for the document. For example, user manuals often contain \textit{Prerequisites} as a title for the first section of the manual. This section will probably contain more useful information than only this subject line.\par
For the automatic scanning of the document for useful tags, Named-Entity Recognition (NER) will be used. NER uses machine learning techniques to find (sequences of) words in the text, i.e. entities, that can be related to concepts from the semantic web. For example, in the sentence \textit{Trump and Clinton are two candidates for the presidential elections of 2016}, the entity \textit{Trump} will be linked to \textit{http://dbpedia.org/page/Donald\_Trump}. The used algorithm will make decisions based on relevance of the word to the concept of the semantic web and its confidence.\par
We think NER is the best solution for the generation of tags, since
\begin{itemize}
\item NER finds sequences of words that are relevant (i.e. leave out words like \textit{'and'}, \textit{'or'}, \textit{'the'}, etc.
\item NER provides a measure of relevance, which we can use to sort all entities on. Only the 5 most relevant concepts in the document will be used as tag for the document.
\end{itemize}\par
Several NER libraries exist that serve an API for easy access. We chose to use the NERD API\footnote{see http://nerd.eurecom.fr/}. This API acts as an abstraction layer for other existing NER APIs, one of which is the most commonly used NER API: AlchemyAPI. The NERD API provides easy methods to submit a document, choose a NER library and retrieve the results of the chosen extractor.\par
This abstraction layer will be a big advantage in this project, because we can easily switch from extractors to tune performance and correctness of the tags.\par
A disadvantage of the NERD API is its license, which prohibits any commercial use of its services. However, we think that the benefit of the abstraction layer outweighs the disadvantage of the license, since one can use the learned results of all the different used extractors gained in this project course to eventually implement the best performing extractor in the project (which will take more effort than implementing this abstraction layer).\par
\subsection*{Demo}
\begin{itemize}
\item Entering new URL in the system:
\begin{lstlisting}
$ http http://localhost:9000/start\
?url=https://docs.oracle.com/cd/E18727_01/doc.121/e13522/toc.htm
HTTP/1.1 200 OK
Content-Length: 27
Content-Type: application/json; charset=utf-8
{
"id": 1,
"status": "QUEUED"
}
\end{lstlisting}
\item Checking progress of a task
\begin{lstlisting}
$ http localhost:9000/check?id=1
HTTP/1.1 200 OK
{
"id": 1,
"status": "RECOGNISING"
}
\end{lstlisting}
\item Retrieving the results of a task
\begin{lstlisting}
$ http localhost:9000/result?id=1
HTTP/1.1 200 OK
{
"tags": ["tag1", "tag2"],
"text": "..."
}
\end{lstlisting}
\end{itemize}
\begin{figure}[H]
\centering
\includegraphics[width=\textwidth]{figures/screenshot-queueing}
\caption{Screenshot of the queueing process}
\label{fig:screenshot-queueing}
\end{figure}
\subsection*{Iteration 3 (Starting 09/03/2016)}
\subsubsection*{Architecture}
\begin{figure}[H]
\centering
\includegraphics[width=0.9\textwidth]{design/componentsv2}
\caption{Revised high-level architecture of the Automatic Course Assembly service}
\label{fig:intelligence}
\end{figure}
A few changes were made in this iteration because we noticed that the document we receive as input has links to other documents that should also be grabbed and parsed.\\
In our second architecture the DocumentGrabber has moved to the DocumentProcessor. The API just passes the url of the first url to the DocumentProcessor.\\
In the DocumentGrabber two new thread are created, one in which the DocumentGrabber grabs new documents and one in which the DocumentParser parses the grabbed Document. In the latter more urls can be derived from the document and these are added to the list of documents to grab.\\
The EntityRecogniser and DocumentTranslator remain the same but they are called once for every document that went through the parser.
\subsubsection*{Retrieve section headers}
As a first attempt in parsing, section headers from a HTML manual document were retrieved. Those headers are used in the \verb|embeddedSections|-\verb|title| field in the JSON format of Onsophic, and the tags (retrieval implementation made in iteration 2) are used in the corresponding JSON field. The code below shows the output of our tool at the end of this iteration.
\begin{lstlisting}
$ http localhost:9000/result?id=1
HTTP/1.1 200 OK
{
"@type":"CourseEdit",
"title":"AMMA test course",
"embeddedSections":[
{
"@type":"SectionEdit",
"title":"Oracle Receivables User Guide",
"activity":{
"url":"https://docs.oracle.com/cd/E18727_01/doc.121
/e13522/toc.htm",
"modality":"text",
"activityType":{
"id":"READ",
"title":"Read"
},
"title":"Oracle Receivables User Guide"
},
"tags":[
"Receipts",
"Transactions",
"Bills Receivable",
"Customer",
"Receivables"
],
"visible":true,
"optionality":"required"
},
{
"@type": "SectionEdit",
"title": "...",
...
}
]
}
\end{lstlisting}
\subsection*{Iteration 4 (Starting 16/03/2016)}
We have defined a set of supported input formats. We noticed that most user manuals come with a table of contents. The web service will expect this table of contents only, and will dynamically decide which formatting is used (nested unordered lists, nested tables, divs,..).
\par
As Onsophic indicated that they would like to test the service during the development to give more feedback and evaluate the integration in the learning platform, we decided to expose stable versions of this web service on a web server. For this purpose, an Azure web service has been created that always reflects the stable version on the master branch and is accessible on the following endpoint: \textbf{http://13.94.197.112:9000}.
\par
The supported input formats will be explained in detail in the sections below. For each format, an example is provided.
\subsubsection*{HTML unordered lists}
The table of content lists all the sections in the form of a nested unordered list.
\begin{lstlisting}
<ul>
<li>List item one</li>
<li>List item two with subitems:
<ul>
<li>Subitem 1</li>
<li>Subitem 2</li>
</ul>
</li>
<li>Final list item</li>
</ul>
\end{lstlisting}
Example: \url{http://13.94.197.112:9000/assets/examples/lists.html}.
\begin{lstlisting}
Optional<Element> toc = document.getHtmlDocument()
.select("ul").stream().filter(l -> !l.parent().tag().equals("li"))
.sorted((e1, e2) ->
e1.childNodes().size() > e2.childNodes().size() ? -1 : 1).findFirst();
\end{lstlisting}
To find the table of contents on the page, we first get all the lists that are not nested inside another list. The list with most childnodes will be selected. Each child in the parent list will be treated as a Module. If it seems that a Module node has no children, an Section and Activity with the same name will be created artificially. The URL of the activity will represent the URL of the Module node.
\begin{lstlisting}
//No sublevels found, create new activity with this element's content
//If there is a link assigned to this listelement
if (!e.select(":root > a").isEmpty() &&
!e.select(":root > a").get(0).attr("href").isEmpty()) {
Section section = new Section();
Element linkElement = e.select(":root > a").get(0);
section.setTitle(linkElement.ownText());
setActivity(linkElement, section);
module.addSection(section);
}
\end{lstlisting}
However, as lists can be nested, the children of the Module nodes will be treated as sections. If the Section node has no children, an activity node will be created artificially. The children of Section nodes will be treated as activities.
\subsubsection*{Description lists}
The table of content lists all the sections in the form of a description list.
\begin{lstlisting}
<dl>
<dt>Firefox</dt>
<dt>Mozilla Firefox</dt>
<dt>Fx</dt>
<dd>A free, open source, cross-platform, graphical web browser
developed by the Mozilla Corporation and hundreds of volunteers.</dd>
</dl>
\end{lstlisting}
Example: \url{http://13.94.197.112:9000/assets/examples/definitionlists.html}.
\subsubsection*{Raw weblinks}
The table of content lists all the sections in the form hyperlinks. The parser will automatically follow each link and parse the pages on the deeper levels.
\begin{lstlisting}
<a href="http://help.apple.com/ipad/5/voiceover/en/iPad73fccd85.html"
alt="At a Glance" class="voiceoverListLink">At a Glance</a><br />
<a href="http://help.apple.com/ipad/5/voiceover/en/iPad741db878.html"
alt="Getting Started" class="voiceoverListLink">Getting Started</a><br />
<a href="http://help.apple.com/ipad/5/voiceover/en/iPad743b0e91.html"
alt="Basics" class="voiceoverListLink">Basics</a>
\end{lstlisting}
Example: \url{http://13.94.197.112:9000/assets/examples/rawlists.html}.
\subsubsection*{Error handling}
Since we decided that the parsing will be handled asynchronously, it is possible that an error occurs of which the web service consumer doesn't know about. Therefore, we implemented better error handling. If an unhandled exception occurs in the parsing process, this will be visible for the consumer as the error message will now be shown in response of the check method. \par
Before this implementation, the status of a task would not change and the consumer could wait forever.
\subsubsection*{NER after writing out the JSON}
As the NER lookup is the slowest operation in the process, we decided to make the JSON result accessible before the NER is completed. As soon as the parsing part is done, querying the \verb|result| endpoint will result in the status \verb|DONE_WITHOUT_TAGS|. The returned JSON document will be complete except that the tags array will be empty. As soon as the NER processing is done, the tags array will be filled and the status will be \verb|DONE_WITH_TAGS|.
\subsubsection*{Prefix handling}
We encountered some problems with documents that contain relative links to other documents. Instead of having to modify all links in a document before submitting it to the system, the prefix of the links can be passed to the system in the \verb|start| endpoint. For example, using \verb|?prefix=http://example.com/| will make sure that relative links (e.g. \verb|subpage/page1.html/|) are retrieved correctly.\par
In case no prefix is provided in the \verb|start| endpoint, a prefix will be automatically generated based on the URL of the page that contains the table of contents. For example, submitting \verb|http://example.com/subpage/toc.html| without \verb|prefix| parameter will cause the system to use the prefix \verb|http://example.com/subpage/|.
\section{Application}
The application we create is a web API that takes a url of an HTML document. The document is then processed and a JSON document compatible with the JSON format used in Onsophic is created. Multiple requests can be issued simultaneously and the status of a document can also be queried.
\section{Overview of the different meetings}
\subsection{Initial meeting with client (24/02/2016)}
\begin{itemize}
\item \textbf{Present:} Stefaan, Titouan and Wouter
\item \textbf{Excused:} Feliciaan
\item \textbf{Goal:} Getting to know the specific details and requirements for the project.
\item \textbf{Meeting notes:} We met with Davy, the CTO of Onsophic. Onsophic has seven employees, of which two to three are technical employees. The platform that Onsophic produces is an educational platform, aimed at different companies such as: software, retail,\ldots They are based in Europe \& the USA.\\
The platform offers courses and learning activities. A course exists of modules, each module contains learning activities, e.g. youtube clip, drive document,\ldots. Onsophic hosts nothing by itself, everything is hosted somewhere else.
Documents that have to be imported into the platform are for example user manuals about products. The support can than learn more about the products through the courses. We have to convert our input documents to a series of modules and learning activities.
\item \textbf{Decisions:} This week we received a sample JSON file and test logins for the Onsophic platform. We will have a meeting with the client weekly, the day before meeting with the teaching staff.
\end{itemize}
\subsection{Review with teaching staff (25/02/2016)}
\begin{itemize}
\item \textbf{Meeting notes}
Make sure to discuss these topics next time:
\begin{itemize}
\item Overview of architecture
\item Who is doing what
\item Risk list
\item What can go wrong, what problems do you have to tackle first, prioritize
\item What features are we going to offer
\item Use your client
\item Include assumptions about inputs in the report!
\item Agile approach, get a demo ready as fast as possible
\item Planning in a powerpoint, what technologies to work together
\end{itemize}
\item \textbf{Decisions:} Next time, come up with a presentation that gives a summary of the above subjects.
\end{itemize}
\subsection{Review with teaching staff (10/03/2016)}
\begin{itemize}
\item \textbf{Meeting notes:}
\begin{itemize}
\item Define more risks. Not only encountered events, but also risks of events that may happen in the future.
\item Make sure a good planning for the next six weeks is defined and presented on the next meeting (17/03/2016).
\end{itemize}
\end{itemize}
\subsection{Review with client (16/03/2016)}
\begin{itemize}
\item \textbf{Meeting notes:}
\begin{itemize}
\item An external NER service can be down. Think about mitigations for this risk.
\item Think about possibilities to use our system in a more user-friendly way.
\end{itemize}
\end{itemize}
\subsection{Review with teaching staff (17/03/2016)}
\begin{itemize}
\item \textbf{Meeting notes:}
\begin{itemize}
\item A more detailed sprint planning must be added to the progress report.
\item A self-reflection on the proposed sprint must be discussed during the next meeting.
\end{itemize}
\end{itemize}
\subsection{Extra internal meeting (28/03/2016)}
\begin{itemize}
\item \textbf{Meeting notes:}
\begin{itemize}
\item Several sources were researched, and a categorisation is made to
\begin{enumerate}
\item sources with navigation based on simple HTML list tags;
\item sources with navigation places in HTML tables;
\item sources with raw (hierarchical) links.
\end{enumerate}
\end{itemize}
\item \textbf{Decisions:}
\begin{itemize}
\item Stefaan implements a parser based on category 1.
\item Titouan implements a parser based on category 2.
\item Wouter implements a parser based on category 3.
\item Feliciaan implements the detection of each category and executes the corresponding parser.
\end{itemize}
\end{itemize}
\subsection{Review with client (30/03/2016)}
\begin{itemize}
\item \textbf{Meeting notes:}
\begin{itemize}
\item There're a lot of possible formats. Make a list of the format we want to support and provide some examples for it.
\item The parsing algorithm should be described more with all relevant constraints.
\item Sometimes a \textit{parent} in the hierarchical tree has content too. In that case, make a separate section that contains that content, e.g. an \textit{``Introduction"} section.
\item Make a better error handling system, where a user gets notified it things go wrong.
\item Units are not yet implemented. Without units, we can not import our JSON into the client's system.
\item Both \textit{Bloomlevels} and \textit{Modalities} have to be implemented.
\end{itemize}
\item \textbf{Decisions:}
\begin{itemize}
\item Stefaan provides the client with a link to the Azure production server.
\item Wouter checks whether the used NER service has a usage limitation.
\item The client provides the team with correct user permissions on his platform.
\end{itemize}
\end{itemize}
\subsection{Review with client (13/04/2016)}
\begin{itemize}
\item \textbf{Meeting notes:}
\begin{itemize}
\item The proposed categorisation of Bloomlevels is too wide. The client will provide the team with the exact list of keywords that will have to be used in the application.
\item Testing is not yet possible because there was an error while deploying on Azure. This will be fixes as soon as possible.
\item For the Bloom level detection: the title of an activity is more important that keywords in the content of that activity.
\end{itemize}
\item \textbf{Decisions:}
\begin{itemize}
\item As temporarily solution for the Bloom levels, use only the first two categorisation columns from the proposed levels.
\item A draft of the progress report will be sent to the client before next week.
\item Set a priority: deploy on Azure.
\end{itemize}
\end{itemize}
\end{document}
|
{"hexsha": "acd01daf36e9073d17c7c95bde3fa9b8400931f2", "size": 32904, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "AMMA/ProgressReport.tex", "max_stars_repo_name": "FlashYoshi/UGentProjects", "max_stars_repo_head_hexsha": "5561ce3bb73d5bc5bf31bcda2be7e038514c7072", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "AMMA/ProgressReport.tex", "max_issues_repo_name": "FlashYoshi/UGentProjects", "max_issues_repo_head_hexsha": "5561ce3bb73d5bc5bf31bcda2be7e038514c7072", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "AMMA/ProgressReport.tex", "max_forks_repo_name": "FlashYoshi/UGentProjects", "max_forks_repo_head_hexsha": "5561ce3bb73d5bc5bf31bcda2be7e038514c7072", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-07-18T11:23:49.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-18T11:23:49.000Z", "avg_line_length": 56.829015544, "max_line_length": 598, "alphanum_fraction": 0.7485716022, "num_tokens": 8092}
|
import time
import mrcfile
import argparse
import numpy as np
import multiprocessing
from scipy import ndimage as ndi
from scipy.stats import wasserstein_distance
from skimage import transform, measure
from multiprocessing import cpu_count as mul_cpu_count
SHIFT = ['Euclidean', 'L1', 'cosine'] # Metrics requiring real space translation
def main():
"""calculates similarity between line projections from 2D class averages"""
parser = argparse.ArgumentParser(description='compare similarity of 2D class averages based on common lines')
parser.add_argument('-i', '--input', action='store', dest='mrc_input', required=False,
default='../data/synthetic_more_projs_noisy_dataset/synthetic_noisy.mrcs',help='path to mrcs file of 2D class averages')
parser.add_argument('-o', '--outpath', action='store', dest='outpath', required=False,
default='../data/synthetic_more_projs_noisy_dataset/',help='path for output files')
parser.add_argument('-m', '--metric', action='store', dest='metric', required=False,
default='Euclidean', choices=['Euclidean', 'L1', 'cosine', 'EMD', 'correlate'],
help='choose scoring method, default Euclidean')
parser.add_argument('-s', '--scale_factor', action='store', dest='scale_factor', required=False, type=float, default=1,
help='scale factor for downsampling. (e.g. -s 2 converts 200pix box --> 100pix box)')
parser.add_argument('-c', '--num_workers', action='store', dest='num_workers', required=False, type=int, default=0,
help='number of CPUs to use, default - all cores')
parser.add_argument('-d', '--domain', action='store', dest='domain', required=False,
default='Fourier', choices=['Fourier', 'Real'], help='Fourier or Real space, default Fourier')
parser.add_argument('-t', '--translate', action='store', dest='translate', required=False,
default='full', choices=['full', 'valid'],
help='indicate size of score vector, numpy convention, default full')
parser.add_argument('-a', '--angular_sampling', action='store', dest='angular_sampling', required=False,
type=int, default=5, help='angle sampling for 1D projections in degrees, default 5')
args = parser.parse_args()
num_cores = mul_cpu_count()
if args.num_workers == 0:
args.num_workers = num_cores
print('No. of workers = ',args.num_workers)
if args.domain == 'Fourier':
rotation_degrees = np.arange(0, 180, args.angular_sampling)
else:
rotation_degrees = np.arange(0, 360, args.angular_sampling)
shape, projection_2D = get_projection_2D(mrcs=args.mrc_input, factor=args.scale_factor)
num_class_avg = len(projection_2D)
num_1D = num_class_avg*len(rotation_degrees)
print("number of 2D class averages: {}".format(num_class_avg))
print("number of 1D projection vectors: {}".format(num_1D))
print("total number of pairwise scores: {}".format(int(num_1D*(num_1D-1)/2)))
if args.metric == 'Euclidean':
pairwise_score = pairwise_l2
elif args.metric == 'L1':
pairwise_score = pairwise_l1
elif args.metric == 'cosine':
pairwise_score = pairwise_cosine
elif args.metric == 'EMD':
pairwise_score = pairwise_wasserstein
elif args.metric == 'correlate':
pairwise_score = pairwise_correlate
if args.metric in SHIFT:
wrapper_function = wrapper_slide_function
else:
wrapper_function = wrapper_single_function
final_scores = {}
with multiprocessing.Pool(args.num_workers) as pool:
for i in range(num_class_avg-1):
line_projections_1 = vectorize(i, projection_2D[i], rotation_degrees, shape, args.domain)
for j in range(i+1, num_class_avg):
line_projections_2 = vectorize(j, projection_2D[j], rotation_degrees, shape, args.domain)
projection_pairs = []
for line_1 in line_projections_1.values():
for line_2 in line_projections_2.values():
projection_pairs.append((line_1, line_2))
pair_scores = pool.starmap(
wrapper_function,
[(pair, pairwise_score, args.translate, args.domain) for pair in projection_pairs]
)
optimum = min(pair_scores, key = lambda x: x[4])
avg_1, deg_1, avg_2, deg_2, score = [value for value in optimum]
final_scores[(avg_1, avg_2)] = (deg_1, deg_2, score)
final_scores[(avg_2, avg_1)] = (deg_2, deg_1, score)
write_scores(final_scores, outpath=args.outpath)
class Projection:
"""for 1D projection vectors"""
def __init__(self,
class_avg,
angle,
vector):
self.class_avg = class_avg
self.angle = angle
self.vector = vector
def size(self):
return len(self.vector)
def get_projection_2D(mrcs, factor,out_size=(200,200),resize=False):
"""read, scale and extract class averages"""
projection_2D = {}
with mrcfile.open(mrcs) as mrc:
for i, data in enumerate(mrc.data):
projection_2D[i] = data
mrc.close()
shape = transform.rotate(projection_2D[0].copy(), 45, resize=True).shape[0]
for k, avg in projection_2D.items():
resized_image = avg.copy()
if resize:
resized_image = transform.resize(resized_image, out_size)
if factor == 1:
projection_2D[k] = extract_class_avg(resized_image)
else:
scaled_img = transform.rescale(
resized_image,
scale=(1/factor),
anti_aliasing=True,
multichannel=False, # Add to supress warning
mode='constant' # Add to supress warning
)
projection_2D[k] = extract_class_avg(scaled_img)
return shape, projection_2D
def extract_class_avg(avg):
"""fit in minimal bounding box"""
image = avg.copy()
image[image < 0] = 0
struct = np.ones((2, 2), dtype=bool)
dilate = ndi.binary_dilation(image, struct)
labeled = measure.label(dilate, connectivity=2)
rprops = measure.regionprops(labeled, image, cache=False)
if len(rprops) == 1:
select_region = 0
else:
img_y, img_x = image.shape
if labeled[int(img_y/2), int(img_x/2)] != 0: # Check for central region
select_region = labeled[int(img_y/2), int(img_x/2)] - 1 # For index
else:
distances = [
(i, np.linalg.norm(np.array((img_y/2, img_x/2)) - np.array(r.weighted_centroid)))
for i, r in enumerate(rprops)
]
select_region = min(distances, key=lambda x: x[1])[0] # Pick first closest region
y_min, x_min, y_max, x_max = [p for p in rprops[select_region].bbox]
return image[y_min:y_max, x_min:x_max]
def vectorize(key, image, rotation_degrees, shape, domain):
"""
takes image and creates 1D projections
similar to Radon transform
"""
projection_1D = {}
projection_1D_FT = {}
for degree in rotation_degrees:
proj_1D = transform.rotate(image, degree, resize=True).sum(axis=0).astype('float32')
trim_1D = np.trim_zeros(proj_1D, trim='fb')
pad_1D = np.pad(proj_1D, (0, shape-len(proj_1D))) # Pad to largest possible shape from 2D
F = abs(np.fft.rfft(pad_1D))
projection_1D[(key, degree)] = Projection(class_avg=key, angle=degree, vector=trim_1D)
projection_1D_FT[(key, degree)] = Projection(class_avg=key, angle=degree, vector=F)
if domain == 'Fourier':
return projection_1D_FT
else:
return projection_1D
def pairwise_l2(a, b):
return np.linalg.norm(a - b)
def pairwise_l1(a, b):
return np.linalg.norm(a - b, 1)
def pairwise_cosine(a, b):
return 1 - (np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)))
def pairwise_correlate(a, b, translate):
s = np.correlate(a, b, mode=translate)
return 1 / (1 + np.amax(s)) # Convert to distance
def pairwise_wasserstein(a, b, translate):
return wasserstein_distance(a, b)
def slide_score(a, b, pairwise_score, translate, domain):
"""
finds minimum pairwise score for translations of 1D projections
a, b are instances of the Projection class
'valid' is elements without zero padding
'full' is scores at all translations
"""
scores = []
if domain == 'Fourier':
scores.append(pairwise_score(a.vector[1:], b.vector[1:])) #Drop 0th seems to help
else:
if a.size() > b.size():
l, s = a.vector, b.vector
else:
l, s = b.vector, a.vector
l_size, s_size = len(l), len(s)
if translate == 'valid':
diff_of_len = abs(l_size - s_size)
if diff_of_len == 0:
scores.append(pairwise_score(l, s))
else:
pad_s = np.pad(s, pad_width=(diff_of_len, diff_of_len))
for i in range(0, diff_of_len+1):
shift_s = pad_s[i:i+l_size]
scores.append(pairwise_score(l, shift_s))
elif translate == 'full':
pad_l = np.pad(l, pad_width=(s_size-1, s_size-1))
pad_s = np.pad(s, pad_width=(l_size+s_size-2, l_size+s_size-2))
for i in range(0, l_size+s_size-1):
shift_s = pad_s[i:i+len(pad_l)]
scores.append(pairwise_score(pad_l, shift_s))
return min(scores)
def wrapper_slide_function(pair, pairwise, translate, domain):
"""
pair is tuple from Projection class to be scored
pairwise is function to score vectores (e.g. Euclidean)
"""
score = slide_score(pair[0], pair[1], pairwise, translate, domain)
return [pair[0].class_avg, pair[0].angle, pair[1].class_avg, pair[1].angle, score]
def wrapper_single_function(pair, pairwise, translate, domain):
"""same as above but for correlate and EMD"""
score = pairwise(pair[0].vector[1:], pair[1].vector[1:], translate) # Skip 0th component
return [pair[0].class_avg, pair[0].angle, pair[1].class_avg, pair[1].angle, score]
def write_scores(final_scores, outpath):
"""
tab separted file of final scores
load scores into the slicem gui
"""
stamp = time.strftime('%Y%m%d_%H%M%S')
header = ['projection_1', 'degree_1', 'projection_2', 'degree_2', 'score']
with open(outpath+'/slicem_scores_{0}.txt'.format(stamp), 'w') as f:
for h in header:
f.write(h+'\t')
f.write('\n')
for p, v in final_scores.items():
f.write(str(p[0])+'\t'+str(v[0])+'\t'+str(p[1])+'\t'+str(v[1])+'\t'+str(v[2])+'\n')
if __name__ == "__main__":
starttime = time.time()
main()
print('Runtime: {} minutes'.format((time.time() - starttime)/60))
|
{"hexsha": "772d7cb94fa360435db10493fd8f932528f54028", "size": 11355, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/slicem.py", "max_stars_repo_name": "marcottelab/2D_projection_clustering", "max_stars_repo_head_hexsha": "35431b16af7d1ca9d0647aa5d0bcff62243dbcdc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/slicem.py", "max_issues_repo_name": "marcottelab/2D_projection_clustering", "max_issues_repo_head_hexsha": "35431b16af7d1ca9d0647aa5d0bcff62243dbcdc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/slicem.py", "max_forks_repo_name": "marcottelab/2D_projection_clustering", "max_forks_repo_head_hexsha": "35431b16af7d1ca9d0647aa5d0bcff62243dbcdc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0476190476, "max_line_length": 144, "alphanum_fraction": 0.6023778071, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2826}
|
import numpy as np
import HEngine
def OnUpdateRuntime(transformComponent, ts):
transformComponent.SetTranslation(transformComponent.GetTranslation().x + ts.GetSeconds(), transformComponent.GetTranslation().y, transformComponent.GetTranslation().z)
return transformComponent;
def OnUpdateEditor(transformComponent):
return transformComponent.GetTranslation().x
|
{"hexsha": "541fb899d81bf88f5676ab0790175487d2de51ba", "size": 373, "ext": "py", "lang": "Python", "max_stars_repo_path": "Engine/Source/Scripts/MoveRight.py", "max_stars_repo_name": "hebohang/HEngine", "max_stars_repo_head_hexsha": "82f40797a7cfabaa11aeeb7797fba70551d18017", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Engine/Source/Scripts/MoveRight.py", "max_issues_repo_name": "hebohang/HEngine", "max_issues_repo_head_hexsha": "82f40797a7cfabaa11aeeb7797fba70551d18017", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Engine/Source/Scripts/MoveRight.py", "max_forks_repo_name": "hebohang/HEngine", "max_forks_repo_head_hexsha": "82f40797a7cfabaa11aeeb7797fba70551d18017", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.4444444444, "max_line_length": 172, "alphanum_fraction": 0.8230563003, "include": true, "reason": "import numpy", "num_tokens": 68}
|
# Copyright 2020 Forschungszentrum Jülich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import tempfile
import nibabel as nib
import os
import re
import logging
import xmltodict
import numpy as np
def get_filename_from_resp(resp):
# determine the type of the file. look at the disposition header, use PMapURL as a fallback
content_disposition_header = resp.headers.get('content-disposition')
filename = re.search(r'filename=(.*?)$', content_disposition_header).group(1) if content_disposition_header is not None and re.search(r'filename=(.*?)$', content_disposition_header) is not None else resp.url
return filename
# given url as either a string or obj, interpretes, and performs get/post request
# returns resp
# may raise HTTP exception
def get_pmap(url, json=None):
if json is None:
resp = requests.get(url)
else:
resp = requests.post(url, json=json)
resp.raise_for_status()
return resp
# input is byte
# save to cache, then generate a random hashed file name
# specify gzip to append .gz
def read_byte_via_nib(content, gzip=False):
fp, fp_name = tempfile.mkstemp(suffix='.nii.gz' if gzip else '.nii')
os.write(fp, content)
img_array = nib.load(fp_name)
os.close(fp)
return img_array
def is_gzipped(filename):
return re.search(r"\.gz$", filename) is not None
def from_brainmap_retrieve_gene(gene, verbose=False):
"""
Retrieve probe ids for the given gene lists, update self.probe_ids which will be used by download_and_save_zscores_samples() or download_and_save_zscores_samples_partial() to
form the url and update self.gene_symbols to be used by get_mean_zscores()
"""
base_retrieve_probe_ids = "http://api.brain-map.org/api/v2/data/query.xml?criteria=model::Probe,rma::criteria,[probe_type$eq'DNA'],products[abbreviation$eq'HumanMA'],gene[acronym$eq"
end_retrieve_probe_ids = "],rma::options[only$eq'probes.id']"
url = '{}{}{}'.format(base_retrieve_probe_ids, gene, end_retrieve_probe_ids)
if verbose:
logging.getLogger(__name__).info('url: {}'.format(url))
resp = requests.get(url)
if resp.ok:
return xmltodict.parse(resp.text)
else:
raise requests.exceptions.HTTPError
def from_brainmap_retrieve_specimen(specimen_id, verbose=False):
"""
Download names and transformation matrix for each specimen/donor from Allen Brain Api and save them on disk as specimenName.txt
and specimenMat.txt respectively, load.
"""
base_url_download_specimens = "http://api.brain-map.org/api/v2/data/Specimen/query.json?criteria=[name$eq"+"'"
end_url_download_specimens = "']&include=alignment3d"
url = '{}{}{}'.format(base_url_download_specimens, specimen_id, end_url_download_specimens)
resp = requests.get(url)
if resp.ok:
return resp.json()
else:
raise requests.exceptions.HTTPError
def from_brainmap_retrieve_microarray_filterby_donorids_probeids(donor_id, probe_ids, verbose=False):
"""
Query Allen Brain Api for given set of genes for the donor given by donor_id
Args:
donor_id (int): Id of a donor which is used to query Allen Brain API.
Returns:
dict: A dictionary representing the just downloaded samples, probes and zscores for the given donor_id and the probes given by self.probe_ids.
"""
base_query_api = "http://api.brain-map.org/api/v2/data/query.json?criteria=service::human_microarray_expression[probes$in"
probes = ','.join(probe_ids)
end_query_api = "][donors$eq{}]".format(donor_id)
url = '{}{}{}'.format(base_query_api, probes, end_query_api)
resp = requests.get(url)
resp.raise_for_status()
return resp.json()
# TODO cleanup
# TODO need tests
def transform_samples_MRI_to_MNI152(samples, transformation_mat):
"""
Convert the MRI coordinates of samples to MNI152 space
Args:
samples (dict): Contains mri coordinates, well and polygon id for each sample used in Allen Brain.
transformation_mat (numpy.ndarray): A 4x4 numpy array to convert the above mentioned MRI coordinates to MNI152 space.
Returns:
dict: A dictionary containing three keys -
mnicoords - two dimensional numpy array where each row represents a three dimensional coordinate in MNI152 space, for all the samples.
well - list of well id for the sample the respective coordinate belongs to
polygon - list of polygon id for the sample the respective coordinate belongs to
"""
np_T = np.array(transformation_mat[0:3, 0:4])
mri = np.vstack(s['sample']['mri'] for s in samples)
add = np.ones((len(mri), 1), dtype=np.int)
mri = np.append(mri, add, axis=1)
mri = np.transpose(mri)
coords = np.matmul(np_T, mri)
coords = coords.transpose()
well = [s['sample']['well'] for s in samples]
polygon = [s['sample']['polygon'] for s in samples]
return {'mnicoords' : coords, 'well' : well, 'polygon' : polygon}
#return coords
# TODO write test
def filter_coordinates_and_zscores(roi_nii, index_to_samples_zscores_and_specimen_dict, specimen, index, roi_name='Unnamed ROI', filter_threshold=0.2):
"""
Populate self.filtered_coords_and_zscores with zscores and coords for samples which belong to a particular specimen and spatially represented in the given roi.
Args:
roi (nib.nifti1.Nifti1Image): probability map of a region of interest.
index_to_samples_zscores_and_specimen_dict (dict): Index into samples_zscores_and_specimen_dict
specimen (dict): dictionary representing a specimen with its name and transformation matrix as keys
index (int): 0 or 1, representing which region of interest it is.
Returns:
dict : Contains the following keys -
a) zscores - Lists of zscore corresponding to the Allen Brain coordinates (in MNI152 space) which are spatially represented in region of interest given by roi parameter.
b) coords - Lists of Allen Brain coordinates (in MNI152 space) which are spatially represented in region of interest given by roi parameter.
c) coord_well - Lists of well id for all the samples which are spatially represented in region of interest given by roi parameter.
d) coord_polygon - Lists of polygon id for all the samples which are spatially represented in region of interest given by roi parameter.
e) specimen - same as specimen['name'].
f) name - 'img1' representing first region of interest, 'img2' representing second region of interest.
"""
revised_samples_zscores_and_specimen_dict = dict.fromkeys(['zscores', 'coords', 'coord_well', 'coord_polygon', 'specimen', 'name'])
revised_samples_zscores_and_specimen_dict['realname'] = roi_name
revised_samples_zscores_and_specimen_dict['name'] = 'img{}'.format(str(index+1))
img_arr = roi_nii.get_data()
invroiMni = np.linalg.inv(roi_nii.affine)
T = np.dot(invroiMni, specimen['alignment3d'])
'''
coords = transform_samples_MRI_to_MNI152(index_to_samples_zscores_and_specimen_dict['samples'], T)
coords = (np.rint(coords)).astype(int)
coords = [np.array([-1, -1, -1]) if (coord > 0).sum() != 3 or img_arr[coord[0],coord[1],coord[2]] <= self.filter_threshold or img_arr[coord[0],coord[1],coord[2]] == 0 else coord for coord in coords]
revised_samples_zscores_and_specimen_dict['coords'] = [coord for coord in coords if (coord > 0).sum() == 3]
'''
coords_dict = transform_samples_MRI_to_MNI152(index_to_samples_zscores_and_specimen_dict['samples'], T)
coords = (np.rint(coords_dict['mnicoords'])).astype(int)
coords = [np.array([-1, -1, -1]) if (coord > 0).sum() != 3 or img_arr[coord[0],coord[1],coord[2]] <= filter_threshold or img_arr[coord[0],coord[1],coord[2]] == 0 else coord for coord in coords]
revised_samples_zscores_and_specimen_dict['coords'] = [coord for coord in coords if (coord > 0).sum() == 3]
revised_samples_zscores_and_specimen_dict['coord_well'] = [well for (coord, well) in zip(coords, coords_dict['well']) if (coord > 0).sum() == 3]
revised_samples_zscores_and_specimen_dict['coord_polygon'] = [polygon for (coord, polygon) in zip(coords, coords_dict['polygon']) if (coord > 0).sum() == 3]
revised_samples_zscores_and_specimen_dict['zscores'] = [zscore for (coord, zscore) in zip(coords, index_to_samples_zscores_and_specimen_dict['zscores']) if (coord > 0).sum() == 3]
revised_samples_zscores_and_specimen_dict['specimen'] = specimen['name']
return revised_samples_zscores_and_specimen_dict
|
{"hexsha": "f31ebfeb75233f679de37ac4c1ea153e2d80efc8", "size": 8866, "ext": "py", "lang": "Python", "max_stars_repo_path": "webjugex/util.py", "max_stars_repo_name": "FZJ-INM1-BDA/PyJuGEx", "max_stars_repo_head_hexsha": "6cb82679a3a25512f7faa388df98445d46e9471a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "webjugex/util.py", "max_issues_repo_name": "FZJ-INM1-BDA/PyJuGEx", "max_issues_repo_head_hexsha": "6cb82679a3a25512f7faa388df98445d46e9471a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-04-08T11:52:17.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-07T14:27:10.000Z", "max_forks_repo_path": "webjugex/util.py", "max_forks_repo_name": "FZJ-INM1-BDA/PyJuGEx", "max_forks_repo_head_hexsha": "6cb82679a3a25512f7faa388df98445d46e9471a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.530726257, "max_line_length": 209, "alphanum_fraction": 0.7405819986, "include": true, "reason": "import numpy", "num_tokens": 2255}
|
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝⁷ : Semiring R
inst✝⁶ : (i : ι) → AddCommMonoid (M i)
inst✝⁵ : (i : ι) → Module R (M i)
inst✝⁴ : AddCommMonoid N
inst✝³ : Module R N
inst✝² : Semiring S
inst✝¹ : Module S N
inst✝ : SMulCommClass R S N
F : (i : ι) → M i →ₗ[R] N
c : R
f : Π₀ (i : ι), M i
⊢ AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f
[PROOFSTEP]
dsimp
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝⁷ : Semiring R
inst✝⁶ : (i : ι) → AddCommMonoid (M i)
inst✝⁵ : (i : ι) → Module R (M i)
inst✝⁴ : AddCommMonoid N
inst✝³ : Module R N
inst✝² : Semiring S
inst✝¹ : Module S N
inst✝ : SMulCommClass R S N
F : (i : ι) → M i →ₗ[R] N
c : R
f : Π₀ (i : ι), M i
⊢ ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)) (c • f) =
c • ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)) f
[PROOFSTEP]
apply DFinsupp.induction f
[GOAL]
case h0
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝⁷ : Semiring R
inst✝⁶ : (i : ι) → AddCommMonoid (M i)
inst✝⁵ : (i : ι) → Module R (M i)
inst✝⁴ : AddCommMonoid N
inst✝³ : Module R N
inst✝² : Semiring S
inst✝¹ : Module S N
inst✝ : SMulCommClass R S N
F : (i : ι) → M i →ₗ[R] N
c : R
f : Π₀ (i : ι), M i
⊢ ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)) (c • 0) =
c • ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)) 0
[PROOFSTEP]
rw [smul_zero, AddMonoidHom.map_zero, smul_zero]
[GOAL]
case ha
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝⁷ : Semiring R
inst✝⁶ : (i : ι) → AddCommMonoid (M i)
inst✝⁵ : (i : ι) → Module R (M i)
inst✝⁴ : AddCommMonoid N
inst✝³ : Module R N
inst✝² : Semiring S
inst✝¹ : Module S N
inst✝ : SMulCommClass R S N
F : (i : ι) → M i →ₗ[R] N
c : R
f : Π₀ (i : ι), M i
⊢ ∀ (i : ι) (b : M i) (f : Π₀ (i : ι), M i),
↑f i = 0 →
b ≠ 0 →
↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)) (c • f) =
c • ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)) f →
↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)) (c • (single i b + f)) =
c • ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)) (single i b + f)
[PROOFSTEP]
intro a b f _ _ hf
[GOAL]
case ha
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝⁷ : Semiring R
inst✝⁶ : (i : ι) → AddCommMonoid (M i)
inst✝⁵ : (i : ι) → Module R (M i)
inst✝⁴ : AddCommMonoid N
inst✝³ : Module R N
inst✝² : Semiring S
inst✝¹ : Module S N
inst✝ : SMulCommClass R S N
F : (i : ι) → M i →ₗ[R] N
c : R
f✝ : Π₀ (i : ι), M i
a : ι
b : M a
f : Π₀ (i : ι), M i
a✝¹ : ↑f a = 0
a✝ : b ≠ 0
hf :
↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)) (c • f) =
c • ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)) f
⊢ ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)) (c • (single a b + f)) =
c • ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)) (single a b + f)
[PROOFSTEP]
rw [smul_add, AddMonoidHom.map_add, AddMonoidHom.map_add, smul_add, hf, ← single_smul, sumAddHom_single,
sumAddHom_single, LinearMap.toAddMonoidHom_coe, LinearMap.map_smul]
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝⁷ : Semiring R
inst✝⁶ : (i : ι) → AddCommMonoid (M i)
inst✝⁵ : (i : ι) → Module R (M i)
inst✝⁴ : AddCommMonoid N
inst✝³ : Module R N
inst✝² : Semiring S
inst✝¹ : Module S N
inst✝ : SMulCommClass R S N
F G : (i : ι) → M i →ₗ[R] N
⊢ (fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
(F + G) =
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
F +
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
G
[PROOFSTEP]
refine DFinsupp.lhom_ext' (fun i ↦ ?_)
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝⁷ : Semiring R
inst✝⁶ : (i : ι) → AddCommMonoid (M i)
inst✝⁵ : (i : ι) → Module R (M i)
inst✝⁴ : AddCommMonoid N
inst✝³ : Module R N
inst✝² : Semiring S
inst✝¹ : Module S N
inst✝ : SMulCommClass R S N
F G : (i : ι) → M i →ₗ[R] N
i : ι
⊢ LinearMap.comp
((fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
(F + G))
(lsingle i) =
LinearMap.comp
((fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
F +
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
G)
(lsingle i)
[PROOFSTEP]
ext
[GOAL]
case h
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝⁷ : Semiring R
inst✝⁶ : (i : ι) → AddCommMonoid (M i)
inst✝⁵ : (i : ι) → Module R (M i)
inst✝⁴ : AddCommMonoid N
inst✝³ : Module R N
inst✝² : Semiring S
inst✝¹ : Module S N
inst✝ : SMulCommClass R S N
F G : (i : ι) → M i →ₗ[R] N
i : ι
x✝ : M i
⊢ ↑(LinearMap.comp
((fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
(F + G))
(lsingle i))
x✝ =
↑(LinearMap.comp
((fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
F +
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
G)
(lsingle i))
x✝
[PROOFSTEP]
simp
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝⁷ : Semiring R
inst✝⁶ : (i : ι) → AddCommMonoid (M i)
inst✝⁵ : (i : ι) → Module R (M i)
inst✝⁴ : AddCommMonoid N
inst✝³ : Module R N
inst✝² : Semiring S
inst✝¹ : Module S N
inst✝ : SMulCommClass R S N
c : S
F : (i : ι) → M i →ₗ[R] N
⊢ AddHom.toFun
{
toFun := fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) },
map_add' :=
(_ :
∀ (F G : (i : ι) → M i →ₗ[R] N),
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
(F + G) =
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
F +
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
G) }
(c • F) =
↑(RingHom.id S) c •
AddHom.toFun
{
toFun := fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) },
map_add' :=
(_ :
∀ (F G : (i : ι) → M i →ₗ[R] N),
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
(F + G) =
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
F +
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
G) }
F
[PROOFSTEP]
refine DFinsupp.lhom_ext' (fun i ↦ ?_)
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝⁷ : Semiring R
inst✝⁶ : (i : ι) → AddCommMonoid (M i)
inst✝⁵ : (i : ι) → Module R (M i)
inst✝⁴ : AddCommMonoid N
inst✝³ : Module R N
inst✝² : Semiring S
inst✝¹ : Module S N
inst✝ : SMulCommClass R S N
c : S
F : (i : ι) → M i →ₗ[R] N
i : ι
⊢ LinearMap.comp
(AddHom.toFun
{
toFun := fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) },
map_add' :=
(_ :
∀ (F G : (i : ι) → M i →ₗ[R] N),
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
(F + G) =
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
F +
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
G) }
(c • F))
(lsingle i) =
LinearMap.comp
(↑(RingHom.id S) c •
AddHom.toFun
{
toFun := fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) },
map_add' :=
(_ :
∀ (F G : (i : ι) → M i →ₗ[R] N),
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
(F + G) =
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
F +
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
G) }
F)
(lsingle i)
[PROOFSTEP]
ext
[GOAL]
case h
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝⁷ : Semiring R
inst✝⁶ : (i : ι) → AddCommMonoid (M i)
inst✝⁵ : (i : ι) → Module R (M i)
inst✝⁴ : AddCommMonoid N
inst✝³ : Module R N
inst✝² : Semiring S
inst✝¹ : Module S N
inst✝ : SMulCommClass R S N
c : S
F : (i : ι) → M i →ₗ[R] N
i : ι
x✝ : M i
⊢ ↑(LinearMap.comp
(AddHom.toFun
{
toFun := fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) },
map_add' :=
(_ :
∀ (F G : (i : ι) → M i →ₗ[R] N),
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
(F + G) =
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
F +
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
G) }
(c • F))
(lsingle i))
x✝ =
↑(LinearMap.comp
(↑(RingHom.id S) c •
AddHom.toFun
{
toFun := fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) },
map_add' :=
(_ :
∀ (F G : (i : ι) → M i →ₗ[R] N),
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
(F + G) =
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
F +
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
G) }
F)
(lsingle i))
x✝
[PROOFSTEP]
simp
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝⁷ : Semiring R
inst✝⁶ : (i : ι) → AddCommMonoid (M i)
inst✝⁵ : (i : ι) → Module R (M i)
inst✝⁴ : AddCommMonoid N
inst✝³ : Module R N
inst✝² : Semiring S
inst✝¹ : Module S N
inst✝ : SMulCommClass R S N
F : (i : ι) → M i →ₗ[R] N
⊢ (fun F i => LinearMap.comp F (lsingle i))
(AddHom.toFun
{
toAddHom :=
{
toFun := fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) },
map_add' :=
(_ :
∀ (F G : (i : ι) → M i →ₗ[R] N),
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
(F + G) =
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
F +
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
G) },
map_smul' :=
(_ :
∀ (c : S) (F : (i : ι) → M i →ₗ[R] N),
AddHom.toFun
{
toFun := fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) },
map_add' :=
(_ :
∀ (F G : (i : ι) → M i →ₗ[R] N),
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
f) })
(F + G) =
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
f) })
F +
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
f) })
G) }
(c • F) =
↑(RingHom.id S) c •
AddHom.toFun
{
toFun := fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) },
map_add' :=
(_ :
∀ (F G : (i : ι) → M i →ₗ[R] N),
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
f) })
(F + G) =
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
f) })
F +
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
f) })
G) }
F) }.toAddHom
F) =
F
[PROOFSTEP]
ext
[GOAL]
case h.h
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝⁷ : Semiring R
inst✝⁶ : (i : ι) → AddCommMonoid (M i)
inst✝⁵ : (i : ι) → Module R (M i)
inst✝⁴ : AddCommMonoid N
inst✝³ : Module R N
inst✝² : Semiring S
inst✝¹ : Module S N
inst✝ : SMulCommClass R S N
F : (i : ι) → M i →ₗ[R] N
x✝¹ : ι
x✝ : M x✝¹
⊢ ↑((fun F i => LinearMap.comp F (lsingle i))
(AddHom.toFun
{
toAddHom :=
{
toFun := fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) },
map_add' :=
(_ :
∀ (F G : (i : ι) → M i →ₗ[R] N),
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
(F + G) =
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
F +
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
G) },
map_smul' :=
(_ :
∀ (c : S) (F : (i : ι) → M i →ₗ[R] N),
AddHom.toFun
{
toFun := fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) },
map_add' :=
(_ :
∀ (F G : (i : ι) → M i →ₗ[R] N),
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
f) })
(F + G) =
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i =>
LinearMap.toAddMonoidHom (F i))
b) }
f) })
F +
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i =>
LinearMap.toAddMonoidHom (F i))
b) }
f) })
G) }
(c • F) =
↑(RingHom.id S) c •
AddHom.toFun
{
toFun := fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) },
map_add' :=
(_ :
∀ (F G : (i : ι) → M i →ₗ[R] N),
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i =>
LinearMap.toAddMonoidHom (F i))
b) }
f) })
(F + G) =
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i =>
LinearMap.toAddMonoidHom (F i))
b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{
toFun :=
↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i =>
LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i =>
LinearMap.toAddMonoidHom (F i))
b) }
f) })
F +
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i =>
LinearMap.toAddMonoidHom (F i))
b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{
toFun :=
↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i =>
LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i =>
LinearMap.toAddMonoidHom (F i))
b) }
f) })
G) }
F) }.toAddHom
F)
x✝¹)
x✝ =
↑(F x✝¹) x✝
[PROOFSTEP]
simp
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝⁷ : Semiring R
inst✝⁶ : (i : ι) → AddCommMonoid (M i)
inst✝⁵ : (i : ι) → Module R (M i)
inst✝⁴ : AddCommMonoid N
inst✝³ : Module R N
inst✝² : Semiring S
inst✝¹ : Module S N
inst✝ : SMulCommClass R S N
F : (Π₀ (i : ι), M i) →ₗ[R] N
⊢ AddHom.toFun
{
toAddHom :=
{
toFun := fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) },
map_add' :=
(_ :
∀ (F G : (i : ι) → M i →ₗ[R] N),
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
(F + G) =
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
F +
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
G) },
map_smul' :=
(_ :
∀ (c : S) (F : (i : ι) → M i →ₗ[R] N),
AddHom.toFun
{
toFun := fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) },
map_add' :=
(_ :
∀ (F G : (i : ι) → M i →ₗ[R] N),
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
(F + G) =
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
f) })
F +
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
f) })
G) }
(c • F) =
↑(RingHom.id S) c •
AddHom.toFun
{
toFun := fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) },
map_add' :=
(_ :
∀ (F G : (i : ι) → M i →ₗ[R] N),
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
f) })
(F + G) =
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
f) })
F +
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
f) })
G) }
F) }.toAddHom
((fun F i => LinearMap.comp F (lsingle i)) F) =
F
[PROOFSTEP]
refine DFinsupp.lhom_ext' (fun i ↦ ?_)
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝⁷ : Semiring R
inst✝⁶ : (i : ι) → AddCommMonoid (M i)
inst✝⁵ : (i : ι) → Module R (M i)
inst✝⁴ : AddCommMonoid N
inst✝³ : Module R N
inst✝² : Semiring S
inst✝¹ : Module S N
inst✝ : SMulCommClass R S N
F : (Π₀ (i : ι), M i) →ₗ[R] N
i : ι
⊢ LinearMap.comp
(AddHom.toFun
{
toAddHom :=
{
toFun := fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) },
map_add' :=
(_ :
∀ (F G : (i : ι) → M i →ₗ[R] N),
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
(F + G) =
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
F +
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
G) },
map_smul' :=
(_ :
∀ (c : S) (F : (i : ι) → M i →ₗ[R] N),
AddHom.toFun
{
toFun := fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) },
map_add' :=
(_ :
∀ (F G : (i : ι) → M i →ₗ[R] N),
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
f) })
(F + G) =
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
f) })
F +
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
f) })
G) }
(c • F) =
↑(RingHom.id S) c •
AddHom.toFun
{
toFun := fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) },
map_add' :=
(_ :
∀ (F G : (i : ι) → M i →ₗ[R] N),
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
f) })
(F + G) =
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
f) })
F +
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
f) })
G) }
F) }.toAddHom
((fun F i => LinearMap.comp F (lsingle i)) F))
(lsingle i) =
LinearMap.comp F (lsingle i)
[PROOFSTEP]
ext
[GOAL]
case h
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝⁷ : Semiring R
inst✝⁶ : (i : ι) → AddCommMonoid (M i)
inst✝⁵ : (i : ι) → Module R (M i)
inst✝⁴ : AddCommMonoid N
inst✝³ : Module R N
inst✝² : Semiring S
inst✝¹ : Module S N
inst✝ : SMulCommClass R S N
F : (Π₀ (i : ι), M i) →ₗ[R] N
i : ι
x✝ : M i
⊢ ↑(LinearMap.comp
(AddHom.toFun
{
toAddHom :=
{
toFun := fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) },
map_add' :=
(_ :
∀ (F G : (i : ι) → M i →ₗ[R] N),
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
(F + G) =
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
F +
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) })
G) },
map_smul' :=
(_ :
∀ (c : S) (F : (i : ι) → M i →ₗ[R] N),
AddHom.toFun
{
toFun := fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) },
map_add' :=
(_ :
∀ (F G : (i : ι) → M i →ₗ[R] N),
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
f) })
(F + G) =
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i =>
LinearMap.toAddMonoidHom (F i))
b) }
f) })
F +
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i =>
LinearMap.toAddMonoidHom (F i))
b) }
f) })
G) }
(c • F) =
↑(RingHom.id S) c •
AddHom.toFun
{
toFun := fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) }
f) },
map_add' :=
(_ :
∀ (F G : (i : ι) → M i →ₗ[R] N),
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i =>
LinearMap.toAddMonoidHom (F i))
b) }
f) })
(F + G) =
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i =>
LinearMap.toAddMonoidHom (F i))
b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{
toFun :=
↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i =>
LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i =>
LinearMap.toAddMonoidHom (F i))
b) }
f) })
F +
(fun F =>
{
toAddHom :=
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) (a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) a +
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i)) b) },
map_smul' :=
(_ :
∀ (c : R) (f : Π₀ (i : ι), M i),
AddHom.toFun
{ toFun := ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i =>
LinearMap.toAddMonoidHom (F i))
b) }
(c • f) =
↑(RingHom.id R) c •
AddHom.toFun
{
toFun :=
↑(sumAddHom fun i => LinearMap.toAddMonoidHom (F i)),
map_add' :=
(_ :
∀ (a b : Π₀ (i : ι), M i),
↑(↑liftAddHom fun i => LinearMap.toAddMonoidHom (F i))
(a + b) =
↑(↑liftAddHom fun i =>
LinearMap.toAddMonoidHom (F i))
a +
↑(↑liftAddHom fun i =>
LinearMap.toAddMonoidHom (F i))
b) }
f) })
G) }
F) }.toAddHom
((fun F i => LinearMap.comp F (lsingle i)) F))
(lsingle i))
x✝ =
↑(LinearMap.comp F (lsingle i)) x✝
[PROOFSTEP]
simp
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝⁷ : Semiring R
inst✝⁶ : (i : ι) → AddCommMonoid (M i)
inst✝⁵ : (i : ι) → Module R (M i)
inst✝⁴ : AddCommMonoid N
inst✝³ : Module R N
inst✝² : Semiring S
inst✝¹ : Module S N
inst✝ : SMulCommClass R S N
F : (i : ι) → M i →ₗ[R] N
i : ι
x : M i
⊢ ↑(↑(lsum S) F) (single i x) = ↑(F i) x
[PROOFSTEP]
simp
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝¹⁰ : Semiring R
inst✝⁹ : (i : ι) → AddCommMonoid (M i)
inst✝⁸ : (i : ι) → Module R (M i)
inst✝⁷ : AddCommMonoid N
inst✝⁶ : Module R N
β : ι → Type u_6
β₁ : ι → Type u_7
β₂ : ι → Type u_8
inst✝⁵ : (i : ι) → AddCommMonoid (β i)
inst✝⁴ : (i : ι) → AddCommMonoid (β₁ i)
inst✝³ : (i : ι) → AddCommMonoid (β₂ i)
inst✝² : (i : ι) → Module R (β i)
inst✝¹ : (i : ι) → Module R (β₁ i)
inst✝ : (i : ι) → Module R (β₂ i)
f : (i : ι) → β₁ i → β₂ i
hf : ∀ (i : ι), f i 0 = 0
r : R
hf' : ∀ (i : ι) (x : β₁ i), f i (r • x) = r • f i x
g : Π₀ (i : ι), β₁ i
⊢ mapRange f hf (r • g) = r • mapRange f hf g
[PROOFSTEP]
ext
[GOAL]
case h
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝¹⁰ : Semiring R
inst✝⁹ : (i : ι) → AddCommMonoid (M i)
inst✝⁸ : (i : ι) → Module R (M i)
inst✝⁷ : AddCommMonoid N
inst✝⁶ : Module R N
β : ι → Type u_6
β₁ : ι → Type u_7
β₂ : ι → Type u_8
inst✝⁵ : (i : ι) → AddCommMonoid (β i)
inst✝⁴ : (i : ι) → AddCommMonoid (β₁ i)
inst✝³ : (i : ι) → AddCommMonoid (β₂ i)
inst✝² : (i : ι) → Module R (β i)
inst✝¹ : (i : ι) → Module R (β₁ i)
inst✝ : (i : ι) → Module R (β₂ i)
f : (i : ι) → β₁ i → β₂ i
hf : ∀ (i : ι), f i 0 = 0
r : R
hf' : ∀ (i : ι) (x : β₁ i), f i (r • x) = r • f i x
g : Π₀ (i : ι), β₁ i
i✝ : ι
⊢ ↑(mapRange f hf (r • g)) i✝ = ↑(r • mapRange f hf g) i✝
[PROOFSTEP]
simp only [mapRange_apply f, coe_smul, Pi.smul_apply, hf']
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝¹⁰ : Semiring R
inst✝⁹ : (i : ι) → AddCommMonoid (M i)
inst✝⁸ : (i : ι) → Module R (M i)
inst✝⁷ : AddCommMonoid N
inst✝⁶ : Module R N
β : ι → Type u_6
β₁ : ι → Type u_7
β₂ : ι → Type u_8
inst✝⁵ : (i : ι) → AddCommMonoid (β i)
inst✝⁴ : (i : ι) → AddCommMonoid (β₁ i)
inst✝³ : (i : ι) → AddCommMonoid (β₂ i)
inst✝² : (i : ι) → Module R (β i)
inst✝¹ : (i : ι) → Module R (β₁ i)
inst✝ : (i : ι) → Module R (β₂ i)
⊢ (linearMap fun i => LinearMap.id) = LinearMap.id
[PROOFSTEP]
ext
[GOAL]
case h.h.h
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝¹⁰ : Semiring R
inst✝⁹ : (i : ι) → AddCommMonoid (M i)
inst✝⁸ : (i : ι) → Module R (M i)
inst✝⁷ : AddCommMonoid N
inst✝⁶ : Module R N
β : ι → Type u_6
β₁ : ι → Type u_7
β₂ : ι → Type u_8
inst✝⁵ : (i : ι) → AddCommMonoid (β i)
inst✝⁴ : (i : ι) → AddCommMonoid (β₁ i)
inst✝³ : (i : ι) → AddCommMonoid (β₂ i)
inst✝² : (i : ι) → Module R (β i)
inst✝¹ : (i : ι) → Module R (β₁ i)
inst✝ : (i : ι) → Module R (β₂ i)
i✝¹ : ι
x✝ : β₂ i✝¹
i✝ : ι
⊢ ↑(↑(LinearMap.comp (linearMap fun i => LinearMap.id) (lsingle i✝¹)) x✝) i✝ =
↑(↑(LinearMap.comp LinearMap.id (lsingle i✝¹)) x✝) i✝
[PROOFSTEP]
simp [linearMap]
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝¹⁰ : Semiring R
inst✝⁹ : (i : ι) → AddCommMonoid (M i)
inst✝⁸ : (i : ι) → Module R (M i)
inst✝⁷ : AddCommMonoid N
inst✝⁶ : Module R N
β : ι → Type u_6
β₁ : ι → Type u_7
β₂ : ι → Type u_8
inst✝⁵ : (i : ι) → AddCommMonoid (β i)
inst✝⁴ : (i : ι) → AddCommMonoid (β₁ i)
inst✝³ : (i : ι) → AddCommMonoid (β₂ i)
inst✝² : (i : ι) → Module R (β i)
inst✝¹ : (i : ι) → Module R (β₁ i)
inst✝ : (i : ι) → Module R (β₂ i)
f : (i : ι) → β₁ i →ₗ[R] β₂ i
f₂ : (i : ι) → β i →ₗ[R] β₁ i
⊢ ∀ (i : ι), ((fun i x => ↑(f i) x) i ∘ (fun i x => ↑(f₂ i) x) i) 0 = 0
[PROOFSTEP]
simp
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝¹² : Semiring R
inst✝¹¹ : (i : ι) → AddCommMonoid (M i)
inst✝¹⁰ : (i : ι) → Module R (M i)
inst✝⁹ : AddCommMonoid N
inst✝⁸ : Module R N
β : ι → Type u_6
β₁ : ι → Type u_7
β₂ : ι → Type u_8
inst✝⁷ : (i : ι) → AddCommMonoid (β i)
inst✝⁶ : (i : ι) → AddCommMonoid (β₁ i)
inst✝⁵ : (i : ι) → AddCommMonoid (β₂ i)
inst✝⁴ : (i : ι) → Module R (β i)
inst✝³ : (i : ι) → Module R (β₁ i)
inst✝² : (i : ι) → Module R (β₂ i)
inst✝¹ : (i : ι) → (x : β₁ i) → Decidable (x ≠ 0)
inst✝ : (i : ι) → (x : β₂ i) → Decidable (x ≠ 0)
f : (i : ι) → β₁ i →ₗ[R] β₂ i
h : (i : ι) → β₂ i →ₗ[R] N
l : Π₀ (i : ι), β₁ i
⊢ ↑(↑(lsum ℕ) h) (↑(mapRange.linearMap f) l) = ↑(↑(lsum ℕ) fun i => LinearMap.comp (h i) (f i)) l
[PROOFSTEP]
simpa [DFinsupp.sumAddHom_apply] using sum_mapRange_index fun i => by simp
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝¹² : Semiring R
inst✝¹¹ : (i : ι) → AddCommMonoid (M i)
inst✝¹⁰ : (i : ι) → Module R (M i)
inst✝⁹ : AddCommMonoid N
inst✝⁸ : Module R N
β : ι → Type u_6
β₁ : ι → Type u_7
β₂ : ι → Type u_8
inst✝⁷ : (i : ι) → AddCommMonoid (β i)
inst✝⁶ : (i : ι) → AddCommMonoid (β₁ i)
inst✝⁵ : (i : ι) → AddCommMonoid (β₂ i)
inst✝⁴ : (i : ι) → Module R (β i)
inst✝³ : (i : ι) → Module R (β₁ i)
inst✝² : (i : ι) → Module R (β₂ i)
inst✝¹ : (i : ι) → (x : β₁ i) → Decidable (x ≠ 0)
inst✝ : (i : ι) → (x : β₂ i) → Decidable (x ≠ 0)
f : (i : ι) → β₁ i →ₗ[R] β₂ i
h : (i : ι) → β₂ i →ₗ[R] N
l : Π₀ (i : ι), β₁ i
i : ι
⊢ ↑(h i) 0 = 0
[PROOFSTEP]
simp
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝¹⁰ : Semiring R
inst✝⁹ : (i : ι) → AddCommMonoid (M i)
inst✝⁸ : (i : ι) → Module R (M i)
inst✝⁷ : AddCommMonoid N
inst✝⁶ : Module R N
β : ι → Type u_6
β₁ : ι → Type u_7
β₂ : ι → Type u_8
inst✝⁵ : (i : ι) → AddCommMonoid (β i)
inst✝⁴ : (i : ι) → AddCommMonoid (β₁ i)
inst✝³ : (i : ι) → AddCommMonoid (β₂ i)
inst✝² : (i : ι) → Module R (β i)
inst✝¹ : (i : ι) → Module R (β₁ i)
inst✝ : (i : ι) → Module R (β₂ i)
f : (i : ι) → β i ≃ₗ[R] β₁ i
f₂ : (i : ι) → β₁ i ≃ₗ[R] β₂ i
⊢ ∀ (i : ι), ((fun i x => ↑(f₂ i) x) i ∘ (fun i x => ↑(f i) x) i) 0 = 0
[PROOFSTEP]
simp
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝⁶ : Semiring R
inst✝⁵ : (i : ι) → AddCommMonoid (M i)
inst✝⁴ : (i : ι) → Module R (M i)
inst✝³ : AddCommMonoid N
inst✝² : Module R N
inst✝¹ : DecidableEq ι
inst✝ : (x : N) → Decidable (x ≠ 0)
f : (i : ι) → M i →ₗ[R] N
i : ι
x : M i
⊢ ↑(coprodMap f) (single i x) = ↑(f i) x
[PROOFSTEP]
simp [coprodMap_apply]
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
p : ι → Submodule R N
⊢ iSup p = LinearMap.range (↑(lsum ℕ) fun i => Submodule.subtype (p i))
[PROOFSTEP]
apply le_antisymm
[GOAL]
case a
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
p : ι → Submodule R N
⊢ iSup p ≤ LinearMap.range (↑(lsum ℕ) fun i => Submodule.subtype (p i))
[PROOFSTEP]
apply iSup_le _
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
p : ι → Submodule R N
⊢ ∀ (i : ι), p i ≤ LinearMap.range (↑(lsum ℕ) fun i => Submodule.subtype (p i))
[PROOFSTEP]
intro i y hy
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
p : ι → Submodule R N
i : ι
y : N
hy : y ∈ p i
⊢ y ∈ LinearMap.range (↑(lsum ℕ) fun i => Submodule.subtype (p i))
[PROOFSTEP]
simp only [LinearMap.mem_range, lsum_apply_apply]
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
p : ι → Submodule R N
i : ι
y : N
hy : y ∈ p i
⊢ ∃ y_1, ↑(sumAddHom fun i => LinearMap.toAddMonoidHom (Submodule.subtype (p i))) y_1 = y
[PROOFSTEP]
exact ⟨DFinsupp.single i ⟨y, hy⟩, DFinsupp.sumAddHom_single _ _ _⟩
[GOAL]
case a
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
p : ι → Submodule R N
⊢ LinearMap.range (↑(lsum ℕ) fun i => Submodule.subtype (p i)) ≤ iSup p
[PROOFSTEP]
rintro x ⟨v, rfl⟩
[GOAL]
case a.intro
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
p : ι → Submodule R N
v : Π₀ (i : ι), { x // x ∈ p i }
⊢ ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) v ∈ iSup p
[PROOFSTEP]
exact dfinsupp_sumAddHom_mem _ v _ fun i _ => (le_iSup p i : p i ≤ _) (v i).2
[GOAL]
ι : Type u_1
R : Type u_2
S✝ : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Semiring R
inst✝² : AddCommMonoid N
inst✝¹ : Module R N
p : ι → Prop
inst✝ : DecidablePred p
S : ι → Submodule R N
⊢ ⨆ (i : ι) (_ : p i), S i =
LinearMap.range
(LinearMap.comp (↑(lsum ℕ) fun i => Submodule.subtype (S i)) (filterLinearMap R (fun i => { x // x ∈ S i }) p))
[PROOFSTEP]
apply le_antisymm
[GOAL]
case a
ι : Type u_1
R : Type u_2
S✝ : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Semiring R
inst✝² : AddCommMonoid N
inst✝¹ : Module R N
p : ι → Prop
inst✝ : DecidablePred p
S : ι → Submodule R N
⊢ ⨆ (i : ι) (_ : p i), S i ≤
LinearMap.range
(LinearMap.comp (↑(lsum ℕ) fun i => Submodule.subtype (S i)) (filterLinearMap R (fun i => { x // x ∈ S i }) p))
[PROOFSTEP]
refine' iSup₂_le fun i hi y hy => ⟨DFinsupp.single i ⟨y, hy⟩, _⟩
[GOAL]
case a
ι : Type u_1
R : Type u_2
S✝ : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Semiring R
inst✝² : AddCommMonoid N
inst✝¹ : Module R N
p : ι → Prop
inst✝ : DecidablePred p
S : ι → Submodule R N
i : ι
hi : p i
y : N
hy : y ∈ S i
⊢ ↑(LinearMap.comp (↑(lsum ℕ) fun i => Submodule.subtype (S i)) (filterLinearMap R (fun i => { x // x ∈ S i }) p))
(single i { val := y, property := hy }) =
y
[PROOFSTEP]
rw [LinearMap.comp_apply, filterLinearMap_apply, filter_single_pos _ _ hi]
[GOAL]
case a
ι : Type u_1
R : Type u_2
S✝ : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Semiring R
inst✝² : AddCommMonoid N
inst✝¹ : Module R N
p : ι → Prop
inst✝ : DecidablePred p
S : ι → Submodule R N
i : ι
hi : p i
y : N
hy : y ∈ S i
⊢ ↑(↑(lsum ℕ) fun i => Submodule.subtype (S i)) (single i { val := y, property := hy }) = y
[PROOFSTEP]
simp only [lsum_apply_apply, sumAddHom_single, LinearMap.toAddMonoidHom_coe, coeSubtype]
[GOAL]
case a
ι : Type u_1
R : Type u_2
S✝ : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Semiring R
inst✝² : AddCommMonoid N
inst✝¹ : Module R N
p : ι → Prop
inst✝ : DecidablePred p
S : ι → Submodule R N
⊢ LinearMap.range
(LinearMap.comp (↑(lsum ℕ) fun i => Submodule.subtype (S i)) (filterLinearMap R (fun i => { x // x ∈ S i }) p)) ≤
⨆ (i : ι) (_ : p i), S i
[PROOFSTEP]
rintro x ⟨v, rfl⟩
[GOAL]
case a.intro
ι : Type u_1
R : Type u_2
S✝ : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Semiring R
inst✝² : AddCommMonoid N
inst✝¹ : Module R N
p : ι → Prop
inst✝ : DecidablePred p
S : ι → Submodule R N
v : Π₀ (i : ι), { x // x ∈ S i }
⊢ ↑(LinearMap.comp (↑(lsum ℕ) fun i => Submodule.subtype (S i)) (filterLinearMap R (fun i => { x // x ∈ S i }) p)) v ∈
⨆ (i : ι) (_ : p i), S i
[PROOFSTEP]
refine' dfinsupp_sumAddHom_mem _ _ _ fun i _ => _
[GOAL]
case a.intro
ι : Type u_1
R : Type u_2
S✝ : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Semiring R
inst✝² : AddCommMonoid N
inst✝¹ : Module R N
p : ι → Prop
inst✝ : DecidablePred p
S : ι → Submodule R N
v : Π₀ (i : ι), { x // x ∈ S i }
i : ι
x✝ : ↑(↑(filterLinearMap R (fun i => { x // x ∈ S i }) p) v) i ≠ 0
⊢ ↑((fun i => LinearMap.toAddMonoidHom ((fun i => Submodule.subtype (S i)) i)) i)
(↑(↑(filterLinearMap R (fun i => { x // x ∈ S i }) p) v) i) ∈
⨆ (i : ι) (_ : p i), S i
[PROOFSTEP]
refine' mem_iSup_of_mem i _
[GOAL]
case a.intro
ι : Type u_1
R : Type u_2
S✝ : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Semiring R
inst✝² : AddCommMonoid N
inst✝¹ : Module R N
p : ι → Prop
inst✝ : DecidablePred p
S : ι → Submodule R N
v : Π₀ (i : ι), { x // x ∈ S i }
i : ι
x✝ : ↑(↑(filterLinearMap R (fun i => { x // x ∈ S i }) p) v) i ≠ 0
⊢ ↑((fun i => LinearMap.toAddMonoidHom ((fun i => Submodule.subtype (S i)) i)) i)
(↑(↑(filterLinearMap R (fun i => { x // x ∈ S i }) p) v) i) ∈
⨆ (_ : p i), S i
[PROOFSTEP]
by_cases hp : p i
[GOAL]
case pos
ι : Type u_1
R : Type u_2
S✝ : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Semiring R
inst✝² : AddCommMonoid N
inst✝¹ : Module R N
p : ι → Prop
inst✝ : DecidablePred p
S : ι → Submodule R N
v : Π₀ (i : ι), { x // x ∈ S i }
i : ι
x✝ : ↑(↑(filterLinearMap R (fun i => { x // x ∈ S i }) p) v) i ≠ 0
hp : p i
⊢ ↑((fun i => LinearMap.toAddMonoidHom ((fun i => Submodule.subtype (S i)) i)) i)
(↑(↑(filterLinearMap R (fun i => { x // x ∈ S i }) p) v) i) ∈
⨆ (_ : p i), S i
[PROOFSTEP]
simp [hp]
[GOAL]
case neg
ι : Type u_1
R : Type u_2
S✝ : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Semiring R
inst✝² : AddCommMonoid N
inst✝¹ : Module R N
p : ι → Prop
inst✝ : DecidablePred p
S : ι → Submodule R N
v : Π₀ (i : ι), { x // x ∈ S i }
i : ι
x✝ : ↑(↑(filterLinearMap R (fun i => { x // x ∈ S i }) p) v) i ≠ 0
hp : ¬p i
⊢ ↑((fun i => LinearMap.toAddMonoidHom ((fun i => Submodule.subtype (S i)) i)) i)
(↑(↑(filterLinearMap R (fun i => { x // x ∈ S i }) p) v) i) ∈
⨆ (_ : p i), S i
[PROOFSTEP]
simp [hp]
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Semiring R
inst✝² : AddCommMonoid N
inst✝¹ : Module R N
p : ι → Submodule R N
inst✝ : (i : ι) → (x : { x // x ∈ p i }) → Decidable (x ≠ 0)
x : N
⊢ x ∈ iSup p ↔ ∃ f, (sum f fun i xi => ↑xi) = x
[PROOFSTEP]
rw [mem_iSup_iff_exists_dfinsupp]
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Semiring R
inst✝² : AddCommMonoid N
inst✝¹ : Module R N
p : ι → Submodule R N
inst✝ : (i : ι) → (x : { x // x ∈ p i }) → Decidable (x ≠ 0)
x : N
⊢ (∃ f, ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) f = x) ↔ ∃ f, (sum f fun i xi => ↑xi) = x
[PROOFSTEP]
simp_rw [DFinsupp.lsum_apply_apply, DFinsupp.sumAddHom_apply, LinearMap.toAddMonoidHom_coe, coeSubtype]
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
⊢ a ∈ ⨆ (i : ι) (_ : i ∈ s), p i ↔ ∃ μ, ∑ i in s, ↑(μ i) = a
[PROOFSTEP]
classical
rw [Submodule.mem_iSup_iff_exists_dfinsupp']
constructor <;> rintro ⟨μ, hμ⟩
· use fun i => ⟨μ i, (iSup_const_le : _ ≤ p i) (coe_mem <| μ i)⟩
rw [← hμ]
symm
apply Finset.sum_subset
· intro x
contrapose
intro hx
rw [mem_support_iff, not_ne_iff]
ext
rw [coe_zero, ← mem_bot R]
suffices : ⊥ = ⨆ (_ : x ∈ s), p x
· exact this.symm ▸ coe_mem (μ x)
exact (iSup_neg hx).symm
· intro x _ hx
rw [mem_support_iff, not_ne_iff] at hx
rw [hx]
rfl
· refine' ⟨DFinsupp.mk s _, _⟩
· rintro ⟨i, hi⟩
refine' ⟨μ i, _⟩
rw [iSup_pos]
· exact coe_mem _
· exact hi
simp only [DFinsupp.sum]
rw [Finset.sum_subset support_mk_subset, ← hμ]
exact Finset.sum_congr rfl fun x hx => congr_arg Subtype.val <| mk_of_mem hx
· intro x _ hx
rw [mem_support_iff, not_ne_iff] at hx
rw [hx]
rfl
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
⊢ a ∈ ⨆ (i : ι) (_ : i ∈ s), p i ↔ ∃ μ, ∑ i in s, ↑(μ i) = a
[PROOFSTEP]
rw [Submodule.mem_iSup_iff_exists_dfinsupp']
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
⊢ (∃ f, (sum f fun i xi => ↑xi) = a) ↔ ∃ μ, ∑ i in s, ↑(μ i) = a
[PROOFSTEP]
constructor
[GOAL]
case mp
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
⊢ (∃ f, (sum f fun i xi => ↑xi) = a) → ∃ μ, ∑ i in s, ↑(μ i) = a
[PROOFSTEP]
rintro ⟨μ, hμ⟩
[GOAL]
case mpr
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
⊢ (∃ μ, ∑ i in s, ↑(μ i) = a) → ∃ f, (sum f fun i xi => ↑xi) = a
[PROOFSTEP]
rintro ⟨μ, hμ⟩
[GOAL]
case mp.intro
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : Π₀ (i : ι), { x // x ∈ ⨆ (_ : i ∈ s), p i }
hμ : (sum μ fun i xi => ↑xi) = a
⊢ ∃ μ, ∑ i in s, ↑(μ i) = a
[PROOFSTEP]
use fun i => ⟨μ i, (iSup_const_le : _ ≤ p i) (coe_mem <| μ i)⟩
[GOAL]
case h
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : Π₀ (i : ι), { x // x ∈ ⨆ (_ : i ∈ s), p i }
hμ : (sum μ fun i xi => ↑xi) = a
⊢ ∑ i in s, ↑{ val := ↑(↑μ i), property := (_ : ↑(↑μ i) ∈ p i) } = a
[PROOFSTEP]
rw [← hμ]
[GOAL]
case h
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : Π₀ (i : ι), { x // x ∈ ⨆ (_ : i ∈ s), p i }
hμ : (sum μ fun i xi => ↑xi) = a
⊢ ∑ i in s, ↑{ val := ↑(↑μ i), property := (_ : ↑(↑μ i) ∈ p i) } = sum μ fun i xi => ↑xi
[PROOFSTEP]
symm
[GOAL]
case h
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : Π₀ (i : ι), { x // x ∈ ⨆ (_ : i ∈ s), p i }
hμ : (sum μ fun i xi => ↑xi) = a
⊢ (sum μ fun i xi => ↑xi) = ∑ i in s, ↑{ val := ↑(↑μ i), property := (_ : ↑(↑μ i) ∈ p i) }
[PROOFSTEP]
apply Finset.sum_subset
[GOAL]
case h.h
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : Π₀ (i : ι), { x // x ∈ ⨆ (_ : i ∈ s), p i }
hμ : (sum μ fun i xi => ↑xi) = a
⊢ support μ ⊆ s
[PROOFSTEP]
intro x
[GOAL]
case h.h
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : Π₀ (i : ι), { x // x ∈ ⨆ (_ : i ∈ s), p i }
hμ : (sum μ fun i xi => ↑xi) = a
x : ι
⊢ x ∈ support μ → x ∈ s
[PROOFSTEP]
contrapose
[GOAL]
case h.h
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : Π₀ (i : ι), { x // x ∈ ⨆ (_ : i ∈ s), p i }
hμ : (sum μ fun i xi => ↑xi) = a
x : ι
⊢ ¬x ∈ s → ¬x ∈ support μ
[PROOFSTEP]
intro hx
[GOAL]
case h.h
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : Π₀ (i : ι), { x // x ∈ ⨆ (_ : i ∈ s), p i }
hμ : (sum μ fun i xi => ↑xi) = a
x : ι
hx : ¬x ∈ s
⊢ ¬x ∈ support μ
[PROOFSTEP]
rw [mem_support_iff, not_ne_iff]
[GOAL]
case h.h
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : Π₀ (i : ι), { x // x ∈ ⨆ (_ : i ∈ s), p i }
hμ : (sum μ fun i xi => ↑xi) = a
x : ι
hx : ¬x ∈ s
⊢ ↑μ x = 0
[PROOFSTEP]
ext
[GOAL]
case h.h.a
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : Π₀ (i : ι), { x // x ∈ ⨆ (_ : i ∈ s), p i }
hμ : (sum μ fun i xi => ↑xi) = a
x : ι
hx : ¬x ∈ s
⊢ ↑(↑μ x) = ↑0
[PROOFSTEP]
rw [coe_zero, ← mem_bot R]
[GOAL]
case h.h.a
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : Π₀ (i : ι), { x // x ∈ ⨆ (_ : i ∈ s), p i }
hμ : (sum μ fun i xi => ↑xi) = a
x : ι
hx : ¬x ∈ s
⊢ ↑(↑μ x) ∈ ⊥
[PROOFSTEP]
suffices : ⊥ = ⨆ (_ : x ∈ s), p x
[GOAL]
case h.h.a
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : Π₀ (i : ι), { x // x ∈ ⨆ (_ : i ∈ s), p i }
hμ : (sum μ fun i xi => ↑xi) = a
x : ι
hx : ¬x ∈ s
this : ⊥ = ⨆ (_ : x ∈ s), p x
⊢ ↑(↑μ x) ∈ ⊥
[PROOFSTEP]
exact this.symm ▸ coe_mem (μ x)
[GOAL]
case this
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : Π₀ (i : ι), { x // x ∈ ⨆ (_ : i ∈ s), p i }
hμ : (sum μ fun i xi => ↑xi) = a
x : ι
hx : ¬x ∈ s
⊢ ⊥ = ⨆ (_ : x ∈ s), p x
[PROOFSTEP]
exact (iSup_neg hx).symm
[GOAL]
case h.hf
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : Π₀ (i : ι), { x // x ∈ ⨆ (_ : i ∈ s), p i }
hμ : (sum μ fun i xi => ↑xi) = a
⊢ ∀ (x : ι), x ∈ s → ¬x ∈ support μ → (fun i xi => ↑xi) x (↑μ x) = 0
[PROOFSTEP]
intro x _ hx
[GOAL]
case h.hf
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : Π₀ (i : ι), { x // x ∈ ⨆ (_ : i ∈ s), p i }
hμ : (sum μ fun i xi => ↑xi) = a
x : ι
a✝ : x ∈ s
hx : ¬x ∈ support μ
⊢ (fun i xi => ↑xi) x (↑μ x) = 0
[PROOFSTEP]
rw [mem_support_iff, not_ne_iff] at hx
[GOAL]
case h.hf
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : Π₀ (i : ι), { x // x ∈ ⨆ (_ : i ∈ s), p i }
hμ : (sum μ fun i xi => ↑xi) = a
x : ι
a✝ : x ∈ s
hx : ↑μ x = 0
⊢ (fun i xi => ↑xi) x (↑μ x) = 0
[PROOFSTEP]
rw [hx]
[GOAL]
case h.hf
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : Π₀ (i : ι), { x // x ∈ ⨆ (_ : i ∈ s), p i }
hμ : (sum μ fun i xi => ↑xi) = a
x : ι
a✝ : x ∈ s
hx : ↑μ x = 0
⊢ (fun i xi => ↑xi) x 0 = 0
[PROOFSTEP]
rfl
[GOAL]
case mpr.intro
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : (i : ι) → { x // x ∈ p i }
hμ : ∑ i in s, ↑(μ i) = a
⊢ ∃ f, (sum f fun i xi => ↑xi) = a
[PROOFSTEP]
refine' ⟨DFinsupp.mk s _, _⟩
[GOAL]
case mpr.intro.refine'_1
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : (i : ι) → { x // x ∈ p i }
hμ : ∑ i in s, ↑(μ i) = a
⊢ (i : ↑↑s) → { x // x ∈ ⨆ (_ : ↑i ∈ s), p ↑i }
[PROOFSTEP]
rintro ⟨i, hi⟩
[GOAL]
case mpr.intro.refine'_1.mk
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : (i : ι) → { x // x ∈ p i }
hμ : ∑ i in s, ↑(μ i) = a
i : ι
hi : i ∈ ↑s
⊢ { x // x ∈ ⨆ (_ : ↑{ val := i, property := hi } ∈ s), p ↑{ val := i, property := hi } }
[PROOFSTEP]
refine' ⟨μ i, _⟩
[GOAL]
case mpr.intro.refine'_1.mk
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : (i : ι) → { x // x ∈ p i }
hμ : ∑ i in s, ↑(μ i) = a
i : ι
hi : i ∈ ↑s
⊢ ↑(μ i) ∈ ⨆ (_ : ↑{ val := i, property := hi } ∈ s), p ↑{ val := i, property := hi }
[PROOFSTEP]
rw [iSup_pos]
[GOAL]
case mpr.intro.refine'_1.mk
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : (i : ι) → { x // x ∈ p i }
hμ : ∑ i in s, ↑(μ i) = a
i : ι
hi : i ∈ ↑s
⊢ ↑(μ i) ∈ p ↑{ val := i, property := hi }
[PROOFSTEP]
exact coe_mem _
[GOAL]
case mpr.intro.refine'_1.mk
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : (i : ι) → { x // x ∈ p i }
hμ : ∑ i in s, ↑(μ i) = a
i : ι
hi : i ∈ ↑s
⊢ ↑{ val := i, property := hi } ∈ s
[PROOFSTEP]
exact hi
[GOAL]
case mpr.intro.refine'_2
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : (i : ι) → { x // x ∈ p i }
hμ : ∑ i in s, ↑(μ i) = a
⊢ (sum
(DFinsupp.mk s fun i =>
Subtype.casesOn i fun i hi =>
{ val := ↑(μ i),
property := (_ : ↑(μ i) ∈ ⨆ (_ : ↑{ val := i, property := hi } ∈ s), p ↑{ val := i, property := hi }) })
fun i xi => ↑xi) =
a
[PROOFSTEP]
simp only [DFinsupp.sum]
[GOAL]
case mpr.intro.refine'_2
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : (i : ι) → { x // x ∈ p i }
hμ : ∑ i in s, ↑(μ i) = a
⊢ ∑ x in
support
(DFinsupp.mk s fun i =>
{ val := ↑(μ ↑i),
property :=
(_ :
↑(μ ↑i) ∈
⨆ (_ : ↑{ val := ↑i, property := (_ : ↑i ∈ ↑s) } ∈ s),
p ↑{ val := ↑i, property := (_ : ↑i ∈ ↑s) }) }),
↑(↑(DFinsupp.mk s fun i =>
{ val := ↑(μ ↑i),
property :=
(_ :
↑(μ ↑i) ∈
⨆ (_ : ↑{ val := ↑i, property := (_ : ↑i ∈ ↑s) } ∈ s),
p ↑{ val := ↑i, property := (_ : ↑i ∈ ↑s) }) })
x) =
a
[PROOFSTEP]
rw [Finset.sum_subset support_mk_subset, ← hμ]
[GOAL]
case mpr.intro.refine'_2
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : (i : ι) → { x // x ∈ p i }
hμ : ∑ i in s, ↑(μ i) = a
⊢ ∑ x in s,
↑(↑(DFinsupp.mk s fun i =>
{ val := ↑(μ ↑i),
property :=
(_ :
↑(μ ↑i) ∈
⨆ (_ : ↑{ val := ↑i, property := (_ : ↑i ∈ ↑s) } ∈ s),
p ↑{ val := ↑i, property := (_ : ↑i ∈ ↑s) }) })
x) =
∑ i in s, ↑(μ i)
case mpr.intro.refine'_2
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : (i : ι) → { x // x ∈ p i }
hμ : ∑ i in s, ↑(μ i) = a
⊢ ∀ (x : ι),
x ∈ s →
¬x ∈
support
(DFinsupp.mk s fun i =>
{ val := ↑(μ ↑i),
property :=
(_ :
↑(μ ↑i) ∈
⨆ (_ : ↑{ val := ↑i, property := (_ : ↑i ∈ ↑s) } ∈ s),
p ↑{ val := ↑i, property := (_ : ↑i ∈ ↑s) }) }) →
↑(↑(DFinsupp.mk s fun i =>
{ val := ↑(μ ↑i),
property :=
(_ :
↑(μ ↑i) ∈
⨆ (_ : ↑{ val := ↑i, property := (_ : ↑i ∈ ↑s) } ∈ s),
p ↑{ val := ↑i, property := (_ : ↑i ∈ ↑s) }) })
x) =
0
[PROOFSTEP]
exact Finset.sum_congr rfl fun x hx => congr_arg Subtype.val <| mk_of_mem hx
[GOAL]
case mpr.intro.refine'_2
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : (i : ι) → { x // x ∈ p i }
hμ : ∑ i in s, ↑(μ i) = a
⊢ ∀ (x : ι),
x ∈ s →
¬x ∈
support
(DFinsupp.mk s fun i =>
{ val := ↑(μ ↑i),
property :=
(_ :
↑(μ ↑i) ∈
⨆ (_ : ↑{ val := ↑i, property := (_ : ↑i ∈ ↑s) } ∈ s),
p ↑{ val := ↑i, property := (_ : ↑i ∈ ↑s) }) }) →
↑(↑(DFinsupp.mk s fun i =>
{ val := ↑(μ ↑i),
property :=
(_ :
↑(μ ↑i) ∈
⨆ (_ : ↑{ val := ↑i, property := (_ : ↑i ∈ ↑s) } ∈ s),
p ↑{ val := ↑i, property := (_ : ↑i ∈ ↑s) }) })
x) =
0
[PROOFSTEP]
intro x _ hx
[GOAL]
case mpr.intro.refine'_2
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : (i : ι) → { x // x ∈ p i }
hμ : ∑ i in s, ↑(μ i) = a
x : ι
a✝ : x ∈ s
hx :
¬x ∈
support
(DFinsupp.mk s fun i =>
{ val := ↑(μ ↑i),
property :=
(_ :
↑(μ ↑i) ∈
⨆ (_ : ↑{ val := ↑i, property := (_ : ↑i ∈ ↑s) } ∈ s), p ↑{ val := ↑i, property := (_ : ↑i ∈ ↑s) }) })
⊢ ↑(↑(DFinsupp.mk s fun i =>
{ val := ↑(μ ↑i),
property :=
(_ :
↑(μ ↑i) ∈
⨆ (_ : ↑{ val := ↑i, property := (_ : ↑i ∈ ↑s) } ∈ s),
p ↑{ val := ↑i, property := (_ : ↑i ∈ ↑s) }) })
x) =
0
[PROOFSTEP]
rw [mem_support_iff, not_ne_iff] at hx
[GOAL]
case mpr.intro.refine'_2
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : (i : ι) → { x // x ∈ p i }
hμ : ∑ i in s, ↑(μ i) = a
x : ι
a✝ : x ∈ s
hx :
↑(DFinsupp.mk s fun i =>
{ val := ↑(μ ↑i),
property :=
(_ :
↑(μ ↑i) ∈
⨆ (_ : ↑{ val := ↑i, property := (_ : ↑i ∈ ↑s) } ∈ s), p ↑{ val := ↑i, property := (_ : ↑i ∈ ↑s) }) })
x =
0
⊢ ↑(↑(DFinsupp.mk s fun i =>
{ val := ↑(μ ↑i),
property :=
(_ :
↑(μ ↑i) ∈
⨆ (_ : ↑{ val := ↑i, property := (_ : ↑i ∈ ↑s) } ∈ s),
p ↑{ val := ↑i, property := (_ : ↑i ∈ ↑s) }) })
x) =
0
[PROOFSTEP]
rw [hx]
[GOAL]
case mpr.intro.refine'_2
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
s : Finset ι
p : ι → Submodule R N
a : N
μ : (i : ι) → { x // x ∈ p i }
hμ : ∑ i in s, ↑(μ i) = a
x : ι
a✝ : x ∈ s
hx :
↑(DFinsupp.mk s fun i =>
{ val := ↑(μ ↑i),
property :=
(_ :
↑(μ ↑i) ∈
⨆ (_ : ↑{ val := ↑i, property := (_ : ↑i ∈ ↑s) } ∈ s), p ↑{ val := ↑i, property := (_ : ↑i ∈ ↑s) }) })
x =
0
⊢ ↑0 = 0
[PROOFSTEP]
rfl
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
p : ι → Submodule R N
⊢ Independent p ↔
∀ (i : ι) (x : { x // x ∈ p i }) (v : Π₀ (i : ι), { x // x ∈ p i }),
↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) (erase i v) = ↑x → x = 0
[PROOFSTEP]
simp_rw [CompleteLattice.independent_def, Submodule.disjoint_def, Submodule.mem_biSup_iff_exists_dfinsupp, exists_imp,
filter_ne_eq_erase]
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
p : ι → Submodule R N
⊢ (∀ (i : ι) (x : N),
x ∈ p i →
∀ (x_1 : Π₀ (i : ι), { x // x ∈ p i }),
↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) (erase i x_1) = x → x = 0) ↔
∀ (i : ι) (x : { x // x ∈ p i }) (v : Π₀ (i : ι), { x // x ∈ p i }),
↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) (erase i v) = ↑x → x = 0
[PROOFSTEP]
refine' forall_congr' fun i => Subtype.forall'.trans _
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
p : ι → Submodule R N
i : ι
⊢ (∀ (x : { a // a ∈ p i }) (x_1 : Π₀ (i : ι), { x // x ∈ p i }),
↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) (erase i x_1) = ↑x → ↑x = 0) ↔
∀ (x : { x // x ∈ p i }) (v : Π₀ (i : ι), { x // x ∈ p i }),
↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) (erase i v) = ↑x → x = 0
[PROOFSTEP]
simp_rw [Submodule.coe_eq_zero]
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
p : ι → Submodule R N
h : Function.Injective ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i))
⊢ Independent p
[PROOFSTEP]
rw [independent_iff_forall_dfinsupp]
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
p : ι → Submodule R N
h : Function.Injective ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i))
⊢ ∀ (i : ι) (x : { x // x ∈ p i }) (v : Π₀ (i : ι), { x // x ∈ p i }),
↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) (erase i v) = ↑x → x = 0
[PROOFSTEP]
intro i x v hv
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
p : ι → Submodule R N
h : Function.Injective ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i))
i : ι
x : { x // x ∈ p i }
v : Π₀ (i : ι), { x // x ∈ p i }
hv : ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) (erase i v) = ↑x
⊢ x = 0
[PROOFSTEP]
replace hv :
lsum ℕ (M := fun i ↦ ↥(p i)) (fun i => (p i).subtype) (erase i v) =
lsum ℕ (M := fun i ↦ ↥(p i)) (fun i => (p i).subtype) (single i x)
[GOAL]
case hv
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
p : ι → Submodule R N
h : Function.Injective ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i))
i : ι
x : { x // x ∈ p i }
v : Π₀ (i : ι), { x // x ∈ p i }
hv : ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) (erase i v) = ↑x
⊢ ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) (erase i v) = ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) (single i x)
[PROOFSTEP]
simpa only [lsum_single] using hv
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
p : ι → Submodule R N
h : Function.Injective ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i))
i : ι
x : { x // x ∈ p i }
v : Π₀ (i : ι), { x // x ∈ p i }
hv :
↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) (erase i v) = ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) (single i x)
⊢ x = 0
[PROOFSTEP]
have := FunLike.ext_iff.mp (h hv) i
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
p : ι → Submodule R N
h : Function.Injective ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i))
i : ι
x : { x // x ∈ p i }
v : Π₀ (i : ι), { x // x ∈ p i }
hv :
↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) (erase i v) = ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) (single i x)
this : ↑(erase i v) i = ↑(single i x) i
⊢ x = 0
[PROOFSTEP]
simpa [eq_comm] using this
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
p : ι → AddSubmonoid N
h : Function.Injective ↑(sumAddHom fun i => AddSubmonoid.subtype (p i))
⊢ Independent p
[PROOFSTEP]
rw [← independent_map_orderIso_iff (AddSubmonoid.toNatSubmodule : AddSubmonoid N ≃o _)]
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Semiring R
inst✝¹ : AddCommMonoid N
inst✝ : Module R N
p : ι → AddSubmonoid N
h : Function.Injective ↑(sumAddHom fun i => AddSubmonoid.subtype (p i))
⊢ Independent (↑AddSubmonoid.toNatSubmodule ∘ p)
[PROOFSTEP]
exact independent_of_dfinsupp_lsum_injective _ h
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Semiring R
inst✝² : AddCommMonoid N
inst✝¹ : Module R N
inst✝ : (m : R) → Decidable (m ≠ 0)
p : ι → Submodule R N
v : ι → N
hv : ∀ (i : ι), v i ∈ p i
⊢ LinearMap.comp (↑(lsum ℕ) fun i => Submodule.subtype (p i))
(LinearMap.comp
(mapRange.linearMap fun i =>
LinearMap.toSpanSingleton R { x // x ∈ p i } { val := v i, property := (_ : v i ∈ p i) })
↑(finsuppLequivDFinsupp R)) =
Finsupp.total ι N R v
[PROOFSTEP]
ext
[GOAL]
case h.h
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Semiring R
inst✝² : AddCommMonoid N
inst✝¹ : Module R N
inst✝ : (m : R) → Decidable (m ≠ 0)
p : ι → Submodule R N
v : ι → N
hv : ∀ (i : ι), v i ∈ p i
a✝ : ι
⊢ ↑(LinearMap.comp
(LinearMap.comp (↑(lsum ℕ) fun i => Submodule.subtype (p i))
(LinearMap.comp
(mapRange.linearMap fun i =>
LinearMap.toSpanSingleton R { x // x ∈ p i } { val := v i, property := (_ : v i ∈ p i) })
↑(finsuppLequivDFinsupp R)))
(Finsupp.lsingle a✝))
1 =
↑(LinearMap.comp (Finsupp.total ι N R v) (Finsupp.lsingle a✝)) 1
[PROOFSTEP]
simp
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Ring R
inst✝¹ : AddCommGroup N
inst✝ : Module R N
p : ι → AddSubgroup N
h : Function.Injective ↑(sumAddHom fun i => AddSubgroup.subtype (p i))
⊢ Independent p
[PROOFSTEP]
rw [← independent_map_orderIso_iff (AddSubgroup.toIntSubmodule : AddSubgroup N ≃o _)]
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Ring R
inst✝¹ : AddCommGroup N
inst✝ : Module R N
p : ι → AddSubgroup N
h : Function.Injective ↑(sumAddHom fun i => AddSubgroup.subtype (p i))
⊢ Independent (↑AddSubgroup.toIntSubmodule ∘ p)
[PROOFSTEP]
exact independent_of_dfinsupp_lsum_injective _ h
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Ring R
inst✝¹ : AddCommGroup N
inst✝ : Module R N
p : ι → Submodule R N
h : Independent p
⊢ Function.Injective ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i))
[PROOFSTEP]
rw [independent_iff_forall_dfinsupp] at h
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Ring R
inst✝¹ : AddCommGroup N
inst✝ : Module R N
p : ι → Submodule R N
h✝ : Independent p
h :
∀ (i : ι) (x : { x // x ∈ p i }) (v : Π₀ (i : ι), { x // x ∈ p i }),
↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) (erase i v) = ↑x → x = 0
⊢ Function.Injective ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i))
[PROOFSTEP]
suffices LinearMap.ker (lsum ℕ (M := fun i ↦ ↥(p i)) fun i => (p i).subtype) = ⊥ by
-- Lean can't find this without our help
letI thisI : AddCommGroup (Π₀ i, p i) := inferInstance
rw [LinearMap.ker_eq_bot] at this
exact this
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Ring R
inst✝¹ : AddCommGroup N
inst✝ : Module R N
p : ι → Submodule R N
h✝ : Independent p
h :
∀ (i : ι) (x : { x // x ∈ p i }) (v : Π₀ (i : ι), { x // x ∈ p i }),
↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) (erase i v) = ↑x → x = 0
this : LinearMap.ker (↑(lsum ℕ) fun i => Submodule.subtype (p i)) = ⊥
⊢ Function.Injective ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i))
[PROOFSTEP]
letI thisI : AddCommGroup (Π₀ i, p i) := inferInstance
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Ring R
inst✝¹ : AddCommGroup N
inst✝ : Module R N
p : ι → Submodule R N
h✝ : Independent p
h :
∀ (i : ι) (x : { x // x ∈ p i }) (v : Π₀ (i : ι), { x // x ∈ p i }),
↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) (erase i v) = ↑x → x = 0
this : LinearMap.ker (↑(lsum ℕ) fun i => Submodule.subtype (p i)) = ⊥
thisI : AddCommGroup (Π₀ (i : ι), { x // x ∈ p i }) := inferInstance
⊢ Function.Injective ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i))
[PROOFSTEP]
rw [LinearMap.ker_eq_bot] at this
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Ring R
inst✝¹ : AddCommGroup N
inst✝ : Module R N
p : ι → Submodule R N
h✝ : Independent p
h :
∀ (i : ι) (x : { x // x ∈ p i }) (v : Π₀ (i : ι), { x // x ∈ p i }),
↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) (erase i v) = ↑x → x = 0
thisI : AddCommGroup (Π₀ (i : ι), { x // x ∈ p i }) := inferInstance
this : Function.Injective ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i))
⊢ Function.Injective ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i))
[PROOFSTEP]
exact this
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Ring R
inst✝¹ : AddCommGroup N
inst✝ : Module R N
p : ι → Submodule R N
h✝ : Independent p
h :
∀ (i : ι) (x : { x // x ∈ p i }) (v : Π₀ (i : ι), { x // x ∈ p i }),
↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) (erase i v) = ↑x → x = 0
⊢ LinearMap.ker (↑(lsum ℕ) fun i => Submodule.subtype (p i)) = ⊥
[PROOFSTEP]
rw [LinearMap.ker_eq_bot']
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Ring R
inst✝¹ : AddCommGroup N
inst✝ : Module R N
p : ι → Submodule R N
h✝ : Independent p
h :
∀ (i : ι) (x : { x // x ∈ p i }) (v : Π₀ (i : ι), { x // x ∈ p i }),
↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) (erase i v) = ↑x → x = 0
⊢ ∀ (m : Π₀ (i : ι), { x // x ∈ p i }), ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) m = 0 → m = 0
[PROOFSTEP]
intro m hm
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Ring R
inst✝¹ : AddCommGroup N
inst✝ : Module R N
p : ι → Submodule R N
h✝ : Independent p
h :
∀ (i : ι) (x : { x // x ∈ p i }) (v : Π₀ (i : ι), { x // x ∈ p i }),
↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) (erase i v) = ↑x → x = 0
m : Π₀ (i : ι), { x // x ∈ p i }
hm : ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) m = 0
⊢ m = 0
[PROOFSTEP]
ext i : 1
-- split `m` into the piece at `i` and the pieces elsewhere, to match `h`
[GOAL]
case h
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Ring R
inst✝¹ : AddCommGroup N
inst✝ : Module R N
p : ι → Submodule R N
h✝ : Independent p
h :
∀ (i : ι) (x : { x // x ∈ p i }) (v : Π₀ (i : ι), { x // x ∈ p i }),
↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) (erase i v) = ↑x → x = 0
m : Π₀ (i : ι), { x // x ∈ p i }
hm : ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) m = 0
i : ι
⊢ ↑m i = ↑0 i
[PROOFSTEP]
rw [DFinsupp.zero_apply, ← neg_eq_zero]
[GOAL]
case h
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Ring R
inst✝¹ : AddCommGroup N
inst✝ : Module R N
p : ι → Submodule R N
h✝ : Independent p
h :
∀ (i : ι) (x : { x // x ∈ p i }) (v : Π₀ (i : ι), { x // x ∈ p i }),
↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) (erase i v) = ↑x → x = 0
m : Π₀ (i : ι), { x // x ∈ p i }
hm : ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) m = 0
i : ι
⊢ -↑m i = 0
[PROOFSTEP]
refine' h i (-m i) m _
[GOAL]
case h
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Ring R
inst✝¹ : AddCommGroup N
inst✝ : Module R N
p : ι → Submodule R N
h✝ : Independent p
h :
∀ (i : ι) (x : { x // x ∈ p i }) (v : Π₀ (i : ι), { x // x ∈ p i }),
↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) (erase i v) = ↑x → x = 0
m : Π₀ (i : ι), { x // x ∈ p i }
hm : ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) m = 0
i : ι
⊢ ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) (erase i m) = ↑(-↑m i)
[PROOFSTEP]
rwa [← erase_add_single i m, LinearMap.map_add, lsum_single, Submodule.subtype_apply, add_eq_zero_iff_eq_neg, ←
Submodule.coe_neg] at hm
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Ring R
inst✝¹ : AddCommGroup N
inst✝ : Module R N
p : ι → AddSubgroup N
h : Independent p
⊢ Function.Injective ↑(sumAddHom fun i => AddSubgroup.subtype (p i))
[PROOFSTEP]
rw [← independent_map_orderIso_iff (AddSubgroup.toIntSubmodule : AddSubgroup N ≃o _)] at h
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝² : Ring R
inst✝¹ : AddCommGroup N
inst✝ : Module R N
p : ι → AddSubgroup N
h : Independent (↑AddSubgroup.toIntSubmodule ∘ p)
⊢ Function.Injective ↑(sumAddHom fun i => AddSubgroup.subtype (p i))
[PROOFSTEP]
exact h.dfinsupp_lsum_injective
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Ring R
inst✝² : AddCommGroup N
inst✝¹ : Module R N
inst✝ : NoZeroSMulDivisors R N
p : ι → Submodule R N
hp : Independent p
v : ι → N
hv : ∀ (i : ι), v i ∈ p i
hv' : ∀ (i : ι), v i ≠ 0
⊢ LinearIndependent R v
[PROOFSTEP]
let _ := Classical.decEq ι
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Ring R
inst✝² : AddCommGroup N
inst✝¹ : Module R N
inst✝ : NoZeroSMulDivisors R N
p : ι → Submodule R N
hp : Independent p
v : ι → N
hv : ∀ (i : ι), v i ∈ p i
hv' : ∀ (i : ι), v i ≠ 0
x✝ : DecidableEq ι := Classical.decEq ι
⊢ LinearIndependent R v
[PROOFSTEP]
let _ := Classical.decEq R
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Ring R
inst✝² : AddCommGroup N
inst✝¹ : Module R N
inst✝ : NoZeroSMulDivisors R N
p : ι → Submodule R N
hp : Independent p
v : ι → N
hv : ∀ (i : ι), v i ∈ p i
hv' : ∀ (i : ι), v i ≠ 0
x✝¹ : DecidableEq ι := Classical.decEq ι
x✝ : DecidableEq R := Classical.decEq R
⊢ LinearIndependent R v
[PROOFSTEP]
rw [linearIndependent_iff]
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Ring R
inst✝² : AddCommGroup N
inst✝¹ : Module R N
inst✝ : NoZeroSMulDivisors R N
p : ι → Submodule R N
hp : Independent p
v : ι → N
hv : ∀ (i : ι), v i ∈ p i
hv' : ∀ (i : ι), v i ≠ 0
x✝¹ : DecidableEq ι := Classical.decEq ι
x✝ : DecidableEq R := Classical.decEq R
⊢ ∀ (l : ι →₀ R), ↑(Finsupp.total ι N R v) l = 0 → l = 0
[PROOFSTEP]
intro l hl
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Ring R
inst✝² : AddCommGroup N
inst✝¹ : Module R N
inst✝ : NoZeroSMulDivisors R N
p : ι → Submodule R N
hp : Independent p
v : ι → N
hv : ∀ (i : ι), v i ∈ p i
hv' : ∀ (i : ι), v i ≠ 0
x✝¹ : DecidableEq ι := Classical.decEq ι
x✝ : DecidableEq R := Classical.decEq R
l : ι →₀ R
hl : ↑(Finsupp.total ι N R v) l = 0
⊢ l = 0
[PROOFSTEP]
let a := DFinsupp.mapRange.linearMap (fun i => LinearMap.toSpanSingleton R (p i) ⟨v i, hv i⟩) l.toDFinsupp
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Ring R
inst✝² : AddCommGroup N
inst✝¹ : Module R N
inst✝ : NoZeroSMulDivisors R N
p : ι → Submodule R N
hp : Independent p
v : ι → N
hv : ∀ (i : ι), v i ∈ p i
hv' : ∀ (i : ι), v i ≠ 0
x✝¹ : DecidableEq ι := Classical.decEq ι
x✝ : DecidableEq R := Classical.decEq R
l : ι →₀ R
hl : ↑(Finsupp.total ι N R v) l = 0
a : (fun x => Π₀ (i : ι), { x // x ∈ p i }) (Finsupp.toDFinsupp l) :=
↑(mapRange.linearMap fun i =>
LinearMap.toSpanSingleton R { x // x ∈ p i } { val := v i, property := (_ : v i ∈ p i) })
(Finsupp.toDFinsupp l)
⊢ l = 0
[PROOFSTEP]
have ha : a = 0 := by
apply hp.dfinsupp_lsum_injective
rwa [← lsum_comp_mapRange_toSpanSingleton _ hv] at hl
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Ring R
inst✝² : AddCommGroup N
inst✝¹ : Module R N
inst✝ : NoZeroSMulDivisors R N
p : ι → Submodule R N
hp : Independent p
v : ι → N
hv : ∀ (i : ι), v i ∈ p i
hv' : ∀ (i : ι), v i ≠ 0
x✝¹ : DecidableEq ι := Classical.decEq ι
x✝ : DecidableEq R := Classical.decEq R
l : ι →₀ R
hl : ↑(Finsupp.total ι N R v) l = 0
a : (fun x => Π₀ (i : ι), { x // x ∈ p i }) (Finsupp.toDFinsupp l) :=
↑(mapRange.linearMap fun i =>
LinearMap.toSpanSingleton R { x // x ∈ p i } { val := v i, property := (_ : v i ∈ p i) })
(Finsupp.toDFinsupp l)
⊢ a = 0
[PROOFSTEP]
apply hp.dfinsupp_lsum_injective
[GOAL]
case a
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Ring R
inst✝² : AddCommGroup N
inst✝¹ : Module R N
inst✝ : NoZeroSMulDivisors R N
p : ι → Submodule R N
hp : Independent p
v : ι → N
hv : ∀ (i : ι), v i ∈ p i
hv' : ∀ (i : ι), v i ≠ 0
x✝¹ : DecidableEq ι := Classical.decEq ι
x✝ : DecidableEq R := Classical.decEq R
l : ι →₀ R
hl : ↑(Finsupp.total ι N R v) l = 0
a : (fun x => Π₀ (i : ι), { x // x ∈ p i }) (Finsupp.toDFinsupp l) :=
↑(mapRange.linearMap fun i =>
LinearMap.toSpanSingleton R { x // x ∈ p i } { val := v i, property := (_ : v i ∈ p i) })
(Finsupp.toDFinsupp l)
⊢ ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) a = ↑(↑(lsum ℕ) fun i => Submodule.subtype (p i)) 0
[PROOFSTEP]
rwa [← lsum_comp_mapRange_toSpanSingleton _ hv] at hl
[GOAL]
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Ring R
inst✝² : AddCommGroup N
inst✝¹ : Module R N
inst✝ : NoZeroSMulDivisors R N
p : ι → Submodule R N
hp : Independent p
v : ι → N
hv : ∀ (i : ι), v i ∈ p i
hv' : ∀ (i : ι), v i ≠ 0
x✝¹ : DecidableEq ι := Classical.decEq ι
x✝ : DecidableEq R := Classical.decEq R
l : ι →₀ R
hl : ↑(Finsupp.total ι N R v) l = 0
a : (fun x => Π₀ (i : ι), { x // x ∈ p i }) (Finsupp.toDFinsupp l) :=
↑(mapRange.linearMap fun i =>
LinearMap.toSpanSingleton R { x // x ∈ p i } { val := v i, property := (_ : v i ∈ p i) })
(Finsupp.toDFinsupp l)
ha : a = 0
⊢ l = 0
[PROOFSTEP]
ext i
[GOAL]
case h
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Ring R
inst✝² : AddCommGroup N
inst✝¹ : Module R N
inst✝ : NoZeroSMulDivisors R N
p : ι → Submodule R N
hp : Independent p
v : ι → N
hv : ∀ (i : ι), v i ∈ p i
hv' : ∀ (i : ι), v i ≠ 0
x✝¹ : DecidableEq ι := Classical.decEq ι
x✝ : DecidableEq R := Classical.decEq R
l : ι →₀ R
hl : ↑(Finsupp.total ι N R v) l = 0
a : (fun x => Π₀ (i : ι), { x // x ∈ p i }) (Finsupp.toDFinsupp l) :=
↑(mapRange.linearMap fun i =>
LinearMap.toSpanSingleton R { x // x ∈ p i } { val := v i, property := (_ : v i ∈ p i) })
(Finsupp.toDFinsupp l)
ha : a = 0
i : ι
⊢ ↑l i = ↑0 i
[PROOFSTEP]
apply smul_left_injective R (hv' i)
[GOAL]
case h.a
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Ring R
inst✝² : AddCommGroup N
inst✝¹ : Module R N
inst✝ : NoZeroSMulDivisors R N
p : ι → Submodule R N
hp : Independent p
v : ι → N
hv : ∀ (i : ι), v i ∈ p i
hv' : ∀ (i : ι), v i ≠ 0
x✝¹ : DecidableEq ι := Classical.decEq ι
x✝ : DecidableEq R := Classical.decEq R
l : ι →₀ R
hl : ↑(Finsupp.total ι N R v) l = 0
a : (fun x => Π₀ (i : ι), { x // x ∈ p i }) (Finsupp.toDFinsupp l) :=
↑(mapRange.linearMap fun i =>
LinearMap.toSpanSingleton R { x // x ∈ p i } { val := v i, property := (_ : v i ∈ p i) })
(Finsupp.toDFinsupp l)
ha : a = 0
i : ι
⊢ (fun c => c • v i) (↑l i) = (fun c => c • v i) (↑0 i)
[PROOFSTEP]
have : l i • v i = a i := rfl
[GOAL]
case h.a
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Ring R
inst✝² : AddCommGroup N
inst✝¹ : Module R N
inst✝ : NoZeroSMulDivisors R N
p : ι → Submodule R N
hp : Independent p
v : ι → N
hv : ∀ (i : ι), v i ∈ p i
hv' : ∀ (i : ι), v i ≠ 0
x✝¹ : DecidableEq ι := Classical.decEq ι
x✝ : DecidableEq R := Classical.decEq R
l : ι →₀ R
hl : ↑(Finsupp.total ι N R v) l = 0
a : (fun x => Π₀ (i : ι), { x // x ∈ p i }) (Finsupp.toDFinsupp l) :=
↑(mapRange.linearMap fun i =>
LinearMap.toSpanSingleton R { x // x ∈ p i } { val := v i, property := (_ : v i ∈ p i) })
(Finsupp.toDFinsupp l)
ha : a = 0
i : ι
this : ↑l i • v i = ↑(↑a i)
⊢ (fun c => c • v i) (↑l i) = (fun c => c • v i) (↑0 i)
[PROOFSTEP]
simp only [coe_zero, Pi.zero_apply, ZeroMemClass.coe_zero, smul_eq_zero, ha] at this
[GOAL]
case h.a
ι : Type u_1
R : Type u_2
S : Type u_3
M : ι → Type u_4
N : Type u_5
dec_ι : DecidableEq ι
inst✝³ : Ring R
inst✝² : AddCommGroup N
inst✝¹ : Module R N
inst✝ : NoZeroSMulDivisors R N
p : ι → Submodule R N
hp : Independent p
v : ι → N
hv : ∀ (i : ι), v i ∈ p i
hv' : ∀ (i : ι), v i ≠ 0
x✝¹ : DecidableEq ι := Classical.decEq ι
x✝ : DecidableEq R := Classical.decEq R
l : ι →₀ R
hl : ↑(Finsupp.total ι N R v) l = 0
a : (fun x => Π₀ (i : ι), { x // x ∈ p i }) (Finsupp.toDFinsupp l) :=
↑(mapRange.linearMap fun i =>
LinearMap.toSpanSingleton R { x // x ∈ p i } { val := v i, property := (_ : v i ∈ p i) })
(Finsupp.toDFinsupp l)
ha : a = 0
i : ι
this : ↑l i = 0 ∨ v i = 0
⊢ (fun c => c • v i) (↑l i) = (fun c => c • v i) (↑0 i)
[PROOFSTEP]
simpa
|
{"mathlib_filename": "Mathlib.LinearAlgebra.DFinsupp", "llama_tokens": 90178}
|
import matplotlib.pyplot as plt
from pyexocross.exomol import ExomolDef
from pyexocross import ExocrossRunner
import numpy as np
path_to_exocross = '/Users/ahmed/Documents/repos/exocross/xcross.exe'
path_to_linelist = '/Users/ahmed/Documents/Linelists/H2O/'
path_to_def_file = '/Users/ahmed/Documents/Linelists/H2O/1H2-16O__BT2.def'
# Create our exocross runner
exo_run = ExocrossRunner(path_to_exocross)
# Read exomol-def file
exomol_def = ExomolDef(path_to_def_file)
print(f'Available Broadeners: {exomol_def.availableBroadeners}')
# Create an exocross input
exocross_input = exomol_def.create_exocross_input(path_to_linelist)
# Generate broadeners
h2_broad = exomol_def.create_broadeners('H2')
he_broad = exomol_def.create_broadeners('He')
# Add broadeners to input
exocross_input.add_broadener(h2_broad)
exocross_input.add_broadener(he_broad)
# Set their ratios
h2_broad.ratio = 0.9
he_broad.ratio = 0.1
# Set wavelength range and point
exocross_input.set_range([0.1, 10], units='um')
exocross_input.Npoints = 10001
# Lets set some temperature and pressures to run
temperature_points = np.linspace(500, 2000, 2)
pressure_points = np.logspace(0, 6, 2)
final_grid = np.zeros(shape=(2,2,exocross_input.Npoints))
wn_grid = None
for ip, p in enumerate(pressure_points):
for it, t in enumerate(temperature_points):
exocross_input.temperature = t
exocross_input.pressure = p
res = exo_run.run(exocross_input)
if wn_grid is None:
wngrid = res[:, 0]
final_grid[ip, it] = res[:, 1]
plt.figure()
plt.plot(10000/wngrid, final_grid[-1, -1])
plt.show()
|
{"hexsha": "e496f82d128ebab7193ad055146593959948886d", "size": 1618, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/example_run.py", "max_stars_repo_name": "ucl-exoplanets/pyexocross", "max_stars_repo_head_hexsha": "703341cd0fddafcbb04e935c89ddc9d02dda9f59", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/example_run.py", "max_issues_repo_name": "ucl-exoplanets/pyexocross", "max_issues_repo_head_hexsha": "703341cd0fddafcbb04e935c89ddc9d02dda9f59", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/example_run.py", "max_forks_repo_name": "ucl-exoplanets/pyexocross", "max_forks_repo_head_hexsha": "703341cd0fddafcbb04e935c89ddc9d02dda9f59", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-15T12:54:04.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-15T12:54:04.000Z", "avg_line_length": 26.0967741935, "max_line_length": 74, "alphanum_fraction": 0.7583436341, "include": true, "reason": "import numpy", "num_tokens": 492}
|
from __future__ import absolute_import, division, print_function
import numpy as np
from numpy.testing import (assert_allclose,
assert_equal, assert_almost_equal, assert_raises)
from scipy.spatial import procrustes
class TestProcrustes(object):
def setup_method(self):
"""creates inputs"""
# an L
self.data1 = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd')
# a larger, shifted, mirrored L
self.data2 = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd')
# an L shifted up 1, right 1, and with point 4 shifted an extra .5
# to the right
# pointwise distance disparity with data1: 3*(2) + (1 + 1.5^2)
self.data3 = np.array([[2, 4], [2, 3], [2, 2], [3, 2.5]], 'd')
# data4, data5 are standardized (trace(A*A') = 1).
# procrustes should return an identical copy if they are used
# as the first matrix argument.
shiftangle = np.pi / 8
self.data4 = np.array([[1, 0], [0, 1], [-1, 0],
[0, -1]], 'd') / np.sqrt(4)
self.data5 = np.array([[np.cos(shiftangle), np.sin(shiftangle)],
[np.cos(np.pi / 2 - shiftangle),
np.sin(np.pi / 2 - shiftangle)],
[-np.cos(shiftangle),
-np.sin(shiftangle)],
[-np.cos(np.pi / 2 - shiftangle),
-np.sin(np.pi / 2 - shiftangle)]],
'd') / np.sqrt(4)
def test_procrustes(self):
# tests procrustes' ability to match two matrices.
#
# the second matrix is a rotated, shifted, scaled, and mirrored version
# of the first, in two dimensions only
#
# can shift, mirror, and scale an 'L'?
a, b, disparity = procrustes(self.data1, self.data2)
assert_allclose(b, a)
assert_almost_equal(disparity, 0.)
# if first mtx is standardized, leaves first mtx unchanged?
m4, m5, disp45 = procrustes(self.data4, self.data5)
assert_equal(m4, self.data4)
# at worst, data3 is an 'L' with one point off by .5
m1, m3, disp13 = procrustes(self.data1, self.data3)
#assert_(disp13 < 0.5 ** 2)
def test_procrustes2(self):
# procrustes disparity should not depend on order of matrices
m1, m3, disp13 = procrustes(self.data1, self.data3)
m3_2, m1_2, disp31 = procrustes(self.data3, self.data1)
assert_almost_equal(disp13, disp31)
# try with 3d, 8 pts per
rand1 = np.array([[2.61955202, 0.30522265, 0.55515826],
[0.41124708, -0.03966978, -0.31854548],
[0.91910318, 1.39451809, -0.15295084],
[2.00452023, 0.50150048, 0.29485268],
[0.09453595, 0.67528885, 0.03283872],
[0.07015232, 2.18892599, -1.67266852],
[0.65029688, 1.60551637, 0.80013549],
[-0.6607528, 0.53644208, 0.17033891]])
rand3 = np.array([[0.0809969, 0.09731461, -0.173442],
[-1.84888465, -0.92589646, -1.29335743],
[0.67031855, -1.35957463, 0.41938621],
[0.73967209, -0.20230757, 0.52418027],
[0.17752796, 0.09065607, 0.29827466],
[0.47999368, -0.88455717, -0.57547934],
[-0.11486344, -0.12608506, -0.3395779],
[-0.86106154, -0.28687488, 0.9644429]])
res1, res3, disp13 = procrustes(rand1, rand3)
res3_2, res1_2, disp31 = procrustes(rand3, rand1)
assert_almost_equal(disp13, disp31)
def test_procrustes_shape_mismatch(self):
assert_raises(ValueError, procrustes,
np.array([[1, 2], [3, 4]]),
np.array([[5, 6, 7], [8, 9, 10]]))
def test_procrustes_empty_rows_or_cols(self):
empty = np.array([[]])
assert_raises(ValueError, procrustes, empty, empty)
def test_procrustes_no_variation(self):
assert_raises(ValueError, procrustes,
np.array([[42, 42], [42, 42]]),
np.array([[45, 45], [45, 45]]))
def test_procrustes_bad_number_of_dimensions(self):
# fewer dimensions in one dataset
assert_raises(ValueError, procrustes,
np.array([1, 1, 2, 3, 5, 8]),
np.array([[1, 2], [3, 4]]))
# fewer dimensions in both datasets
assert_raises(ValueError, procrustes,
np.array([1, 1, 2, 3, 5, 8]),
np.array([1, 1, 2, 3, 5, 8]))
# zero dimensions
assert_raises(ValueError, procrustes, np.array(7), np.array(11))
# extra dimensions
assert_raises(ValueError, procrustes,
np.array([[[11], [7]]]),
np.array([[[5, 13]]]))
|
{"hexsha": "2903a0b68b33773f95d9cd90df2bb75db2224967", "size": 5049, "ext": "py", "lang": "Python", "max_stars_repo_path": "scipy/spatial/tests/test__procrustes.py", "max_stars_repo_name": "rostyboost/scipy", "max_stars_repo_head_hexsha": "2f5aa264724099c03772ed784e7a947d2bea8398", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scipy/spatial/tests/test__procrustes.py", "max_issues_repo_name": "rostyboost/scipy", "max_issues_repo_head_hexsha": "2f5aa264724099c03772ed784e7a947d2bea8398", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scipy/spatial/tests/test__procrustes.py", "max_forks_repo_name": "rostyboost/scipy", "max_forks_repo_head_hexsha": "2f5aa264724099c03772ed784e7a947d2bea8398", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-17T03:51:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-17T03:51:02.000Z", "avg_line_length": 42.4285714286, "max_line_length": 79, "alphanum_fraction": 0.5165379283, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 1487}
|
# -*- coding: utf-8 -*-
# import numpy as np
# from numpy import testing
#
# from sktime.contrib.interval_based._cif import CanonicalIntervalForest
# from sktime.datasets import load_gunpoint, load_italy_power_demand
#
#
# def test_cif_on_gunpoint():
# # load gunpoint data
# X_train, y_train = load_gunpoint(split="train", return_X_y=True)
# X_test, y_test = load_gunpoint(split="test", return_X_y=True)
# indices = np.random.RandomState(0).permutation(10)
#
# # train CIF
# cif = CanonicalIntervalForest(n_estimators=100, random_state=0)
# cif.fit(X_train.iloc[indices], y_train[indices])
#
# # assert probabilities are the same
# probas = cif.predict_proba(X_test.iloc[indices])
# testing.assert_array_equal(probas, cif_gunpoint_probas)
#
#
# def test_cif_on_power_demand():
# # load power demand data
# X_train, y_train = load_italy_power_demand(split="train", return_X_y=True)
# X_test, y_test = load_italy_power_demand(split="test", return_X_y=True)
# indices = np.random.RandomState(0).permutation(100)
#
# # train CIF
# cif = CanonicalIntervalForest(n_estimators=100, random_state=0)
# cif.fit(X_train, y_train)
#
# score = cif.score(X_test.iloc[indices], y_test[indices])
# assert score >= 0.92
#
#
# cif_gunpoint_probas = np.array(
# [
# [
# 0.12,
# 0.88,
# ],
# [
# 0.5,
# 0.5,
# ],
# [
# 0.57,
# 0.43,
# ],
# [
# 0.41,
# 0.59,
# ],
# [
# 0.06,
# 0.94,
# ],
# [
# 0.59,
# 0.41,
# ],
# [
# 0.25,
# 0.75,
# ],
# [
# 0.62,
# 0.38,
# ],
# [
# 0.57,
# 0.43,
# ],
# [
# 0.11,
# 0.89,
# ],
# ]
# )
#
#
# # def print_array(array):
# # print('[')
# # for sub_array in array:
# # print('[')
# # for value in sub_array:
# # print(value.astype(str), end='')
# # print(', ')
# # print('],')
# # print(']')
# #
# #
# # if __name__ == "__main__":
# # X_train, y_train = load_gunpoint(split="train", return_X_y=True)
# # X_test, y_test = load_gunpoint(split="test", return_X_y=True)
# # indices = np.random.RandomState(0).permutation(10)
# #
# # cif_u = CanonicalIntervalForest(n_estimators=100, random_state=0)
# #
# # cif_u.fit(X_train.iloc[indices], y_train[indices])
# # probas = cif_u.predict_proba(X_test.iloc[indices])
# # print_array(probas)
|
{"hexsha": "1d25e22b4bebae4595fa73b321318c5c90515f12", "size": 2717, "ext": "py", "lang": "Python", "max_stars_repo_path": "sktime/contrib/interval_based/test_cif.py", "max_stars_repo_name": "bmurdata/sktime", "max_stars_repo_head_hexsha": "fb079e76e3f3dfbb849fb805e53f09adec6cdf79", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-10T06:32:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-10T06:32:00.000Z", "max_issues_repo_path": "sktime/contrib/interval_based/test_cif.py", "max_issues_repo_name": "bmurdata/sktime", "max_issues_repo_head_hexsha": "fb079e76e3f3dfbb849fb805e53f09adec6cdf79", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sktime/contrib/interval_based/test_cif.py", "max_forks_repo_name": "bmurdata/sktime", "max_forks_repo_head_hexsha": "fb079e76e3f3dfbb849fb805e53f09adec6cdf79", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-08T07:07:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-08T07:07:42.000Z", "avg_line_length": 25.8761904762, "max_line_length": 80, "alphanum_fraction": 0.5193227825, "include": true, "reason": "import numpy,from numpy", "num_tokens": 784}
|
<a href="https://colab.research.google.com/github/john-s-butler-dit/Numerical-Analysis-Python/blob/master/Chapter%2009%20-%20Elliptic%20Equations/902_Poisson%20Equation-Zero%20Boundary%20Conditions.ipynb" target="_parent"></a>
# Finite Difference Methods for the Poisson Equation with Zero Boundary
This notebook will focus on numerically approximating a inhomogenous second order Poisson Equation with zero boundary conditions.
## The Differential Equation
The general two dimensional Poisson Equation is of the form:
\begin{equation} \frac{\partial^2 u}{\partial y^2} + \frac{\partial^2 u}{\partial x^2}=f(x,y), \ \ \ (x,y) \in \Omega=(0,1)\times (0,1),\end{equation}
with boundary conditions
\begin{equation}U(x,y) = g(x,y), \ \ \ (x,y)\in\delta\Omega\text{ - boundary}. \end{equation}
## Homogenous Poisson Equation
This notebook will implement a finite difference scheme to approximate the inhomogenous form of the Poisson Equation $f(x,y)=x^2+y^2$, with a zero boundary:
\begin{equation} \frac{\partial^2 u}{\partial y^2} + \frac{\partial^2 u}{\partial x^2}=x^2+y^2.\end{equation}
with the Boundary Conditions:
\begin{equation} u(x,0)=0, \ \ \ \ \ 0 \leq x \leq 1, \text{ lower},\end{equation}
\begin{equation} u(x,1)=0, \ \ \ \ \ 0 \leq x \leq 1, \text{ upper},\end{equation}
\begin{equation} u(0,y)=0, \ \ \ \ \ 0 \leq y \leq 1, \text{ left},\end{equation}
\begin{equation} u(1,y)=0, \ \ \ \ \ 0 \leq y \leq 1, \text{ right}.\end{equation}
```python
# LIBRARY
# vector manipulation
import numpy as np
# math functions
import math
# THIS IS FOR PLOTTING
%matplotlib inline
import matplotlib.pyplot as plt # side-stepping mpl backend
import warnings
warnings.filterwarnings("ignore")
from IPython.display import HTML
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
```
## Discete Grid
The region $\Omega=(0,1)\times(0,1)$ is discretised into a uniform mesh $\Omega_h$. In the $x$ and $y$ directions into $N$ steps giving a stepsize of
\begin{equation} h=\frac{1-0}{N},\end{equation}
resulting in
\begin{equation}x[i]=0+ih, \ \ \ i=0,1,...,N,\end{equation}
and
\begin{equation}x[j]=0+jh, \ \ \ j=0,1,...,N,\end{equation}
The Figure below shows the discrete grid points for $N=10$, the known boundary conditions (green), and the unknown values (red) of the Poisson Equation.
```python
N=10
h=1/N
x=np.arange(0,1.0001,h)
y=np.arange(0,1.0001,h)
X, Y = np.meshgrid(x, y)
fig = plt.figure()
plt.plot(x[1],y[1],'ro',label='unknown');
plt.plot(X,Y,'ro');
plt.plot(np.ones(N+1),y,'go',label='Boundary Condition');
plt.plot(np.zeros(N+1),y,'go');
plt.plot(x,np.zeros(N+1),'go');
plt.plot(x, np.ones(N+1),'go');
plt.xlim((-0.1,1.1))
plt.ylim((-0.1,1.1))
plt.xlabel('x')
plt.ylabel('y')
plt.gca().set_aspect('equal', adjustable='box')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.title(r'Discrete Grid $\Omega_h,$ h= %s'%(h),fontsize=24,y=1.08)
plt.show();
```
## Boundary Conditions
The discrete boundary conditions are
\begin{equation} w[i,0]=0, \text{ for } i=0,...,10, \text{ upper},\end{equation}
\begin{equation} w[i,N]=0, \text{ for } i=0,...,10, \text{ lower},\end{equation}
\begin{equation} w[0,j]=0, \text{ for } j=0,...,10, \text{ left},\end{equation}
\begin{equation} w[N,j]=0, \text{ for } i=0,...,10,\text{ right}. \end{equation}
The Figure below plots the boundary values of $w[i,j]$.
```python
w=np.zeros((N+1,N+1))
for i in range (0,N):
w[i,0]=0 #left Boundary
w[i,N]=0 #Right Boundary
for j in range (0,N):
w[0,j]=0 #Lower Boundary
w[N,j]=0 #Upper Boundary
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Plot a basic wireframe.
ax.plot_wireframe(X, Y, w,color='r', rstride=10, cstride=10)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('w')
plt.title(r'Boundary Values',fontsize=24,y=1.08)
plt.show()
```
## Numerical Method
The Poisson Equation is discretised using
$\delta_x^2$ is the central difference approximation of the second derivative in the $x$ direction
\begin{equation}\delta_x^2=\frac{1}{h^2}(w_{i+1j}-2w_{ij}+w_{i-1j}), \end{equation}
and $\delta_y^2$ is the central difference approximation of the second derivative in the $y$ direction
\begin{equation}\delta_y^2=\frac{1}{h^2}(w_{ij+1}-2w_{ij}+w_{ij-1}). \end{equation}
The gives the Poisson Difference Equation,
\begin{equation}-(\delta_x^2w_{ij}+\delta_y^2w_{ij})=f_{ij} \ \ (x_i,y_j) \in \Omega_h, \end{equation}
\begin{equation}w_{ij}=g_{ij} \ \ (x_i,y_j) \in \partial\Omega_h, \end{equation}
where $w_ij$ is the numerical approximation of $U$ at $x_i$ and $y_j$.
Expanding the the Poisson Difference Equation gives the five point method,
\begin{equation}-(w_{i-1j}+w_{ij-1}-4w_{ij}+w_{ij+1}+w_{i+1j})=h^2f_{ij} \end{equation}
for $i=1,...,N-1$ and $j=1,...,N-1.$
### Matrix form
This can be written as a system of $(N-1)\times(N-1)$ equations can be arranged in matrix form
\begin{equation} A\mathbf{w}=\mathbf{r},\end{equation}
where $A$ is an $(N-1)^2\times(N-1)^2$ matrix made up of the following block tridiagonal structure
\begin{equation}\left(\begin{array}{ccccccc}
T&I&0&0&.&.&.\\
I&T&I&0&0&.&.\\
.&.&.&.&.&.&.\\
.&.&.&0&I&T&I\\
.&.&.&.&0&I&T\\
\end{array}\right),
\end{equation}
where $I$ denotes an $N-1 \times N-1$ identity matrix and $T$ is the tridiagonal matrix of the form:
\begin{equation} T=\left(\begin{array}{ccccccc}
-4&1&0&0&.&.&.\\
1&-4&1&0&0&.&.\\
.&.&.&.&.&.&.\\
.&.&.&0&1&-4&1\\
.&.&.&.&0&1&-4\\
\end{array}\right).
\end{equation}
The plot below shows the matrix $A$ and its inverse $A^{-1}$ as a colourplot.
```python
N2=(N-1)*(N-1)
A=np.zeros((N2,N2))
## Diagonal
for i in range (0,N-1):
for j in range (0,N-1):
A[i+(N-1)*j,i+(N-1)*j]=-4
# LOWER DIAGONAL
for i in range (1,N-1):
for j in range (0,N-1):
A[i+(N-1)*j,i+(N-1)*j-1]=1
# UPPPER DIAGONAL
for i in range (0,N-2):
for j in range (0,N-1):
A[i+(N-1)*j,i+(N-1)*j+1]=1
# LOWER IDENTITY MATRIX
for i in range (0,N-1):
for j in range (1,N-1):
A[i+(N-1)*j,i+(N-1)*(j-1)]=1
# UPPER IDENTITY MATRIX
for i in range (0,N-1):
for j in range (0,N-2):
A[i+(N-1)*j,i+(N-1)*(j+1)]=1
Ainv=np.linalg.inv(A)
fig = plt.figure(figsize=(12,4));
plt.subplot(121)
plt.imshow(A,interpolation='none');
clb=plt.colorbar();
clb.set_label('Matrix elements values');
plt.title('Matrix A ',fontsize=24)
plt.subplot(122)
plt.imshow(Ainv,interpolation='none');
clb=plt.colorbar();
clb.set_label('Matrix elements values');
plt.title(r'Matrix $A^{-1}$ ',fontsize=24)
fig.tight_layout()
plt.show();
```
The vector $\mathbf{w}$ is of length $(N-1)\times(N-1)$ which made up of $N-1$ subvectors $\mathbf{w}_j$ of length $N-1$ of the form
\begin{equation}\mathbf{w}_j=\left(\begin{array}{c}
w_{1j}\\
w_{2j}\\
.\\
.\\
w_{N-2j}\\
w_{N-1j}\\
\end{array}\right).
\end{equation}
The vector $\mathbf{r}$ is of length $(N-1)\times(N-1)$ which made up of $N-1$ subvectors of the form $\mathbf{r}_j=-h^2\mathbf{f}_j-\mathbf{bx}_{j}-\mathbf{by}_j$. In this example the boundary is $0$ which means that
\begin{equation}\mathbf{bx}_j =0,\end{equation}
\begin{equation}
\mathbf{by}_{j} =0,
\end{equation}
and
\begin{equation}\mathbf{f}_j =\left(\begin{array}{c}
x_1^2+y_j^2\\
x_2^2+y_j^2\\
.\\
.\\
x_{N-2}^2+y_j^2\\
x_{N-1}^2+y_j^2\\
\end{array}\right)
\end{equation}
for $j=1,...,N-1$.
```python
r=np.zeros(N2)
# vector r
for i in range (0,N-1):
for j in range (0,N-1):
r[i+(N-1)*j]=h*h*(x[i]*x[i]+y[j]*y[j])
# Boundary
b_bottom_top=np.zeros(N2)
for i in range (0,N-1):
b_bottom_top[i]=0 #Bottom Boundary
b_bottom_top[i+(N-1)*(N-2)]=0# Top Boundary
b_left_right=np.zeros(N2)
for j in range (0,N-1):
b_left_right[(N-1)*j]=0 # Left Boundary
b_left_right[N-2+(N-1)*j]=0# Right Boundary
b=b_left_right+b_bottom_top
```
## Results
To solve the system for $\mathbf{w}$ invert the matrix $A$
\begin{equation} A\mathbf{w}=\mathbf{r},\end{equation}
such that
\begin{equation} \mathbf{w}=A^{-1}\mathbf{r}.\end{equation}
Lastly, as $\mathbf{w}$ is in vector it has to be reshaped into grid form to plot.
The figure below shows the numerical approximation of the homogeneous Equation.
```python
C=np.dot(Ainv,r-b)
w[1:N,1:N]=C.reshape((N-1,N-1))
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111, projection='3d');
# Plot a basic wireframe.
ax.plot_wireframe(X, Y, w,color='r');
ax.set_xlabel('x');
ax.set_ylabel('y');
ax.set_zlabel('w');
plt.title(r'Numerical Approximation of the Poisson Equation',fontsize=24,y=1.08);
plt.show();
```
# Consistency and Convergence
We now ask how well the grid function determined by the five point scheme approximates the exact solution of the Poisson problem.
## Consistency
### Consitency (Definition)
Let
\begin{equation}\nabla^2_h(\varphi)=-(\varphi_{i-1j}+\varphi_{ij-1}-4\varphi_{ij}+\varphi_{ij+1}+\varphi_{i+1j})\end{equation}
denote the finite difference approximation associated with the grid $\Omega_h$ having the mesh size $h$, to a partial differential operator
\begin{equation}\nabla^2(\varphi)=\frac{\partial^2 \varphi}{\partial x^2}+\frac{\partial^2 \varphi}{\partial y^2}\end{equation} defined on
a simply connected, open set $\Omega \subset R^2$. For a given function $\varphi\in C^{\infty}(\Omega)$,
the truncation error of $\nabla^2_h$ is
\begin{equation}\tau_{h}(\mathbf{x})=(\nabla^2-\nabla^2_h)\varphi(\mathbf{x}) \end{equation}
The approximation $\nabla^2_h$ is consistent with $\nabla^2$ if
\begin{equation}\lim_{h\rightarrow 0}\tau_h(\mathbf{x})=0,\end{equation}
for all $\mathbf{x} \in D$ and all $\varphi \in C^{\infty}(\Omega)$. The approximation is consistent to order $p$ if $\tau_h(\mathbf{x})=O(h^p)$.
_In other words a method is consistent with the differential equation it is approximating._
## Proof of Consistency
The five-point difference analog $\nabla^2_h$ is consistent to order 2 with $\nabla^2$.
__Proof__
Pick $\varphi \in C^{\infty}(D)$, and let $(x,y) \in \Omega$ be a point such that $(x\pm h, y),(x,y \pm h) \in \Omega\bigcup \partial\Omega$. By the Taylor Theorem
\begin{eqnarray*}
\varphi(x\pm h,y)&=&\varphi(x,y) \pm h \frac{\partial \varphi}{\partial x}(x,y)+\frac{h^2}{2!}\frac{\partial^2 \varphi}{\partial x^2}(x,y) \pm\frac{h^3}{3!}\frac{\partial^3 \varphi}{\partial x^3}(x,y)+\frac{h^4}{4!}\frac{\partial^4 \varphi}{\partial x^4}(\zeta^{\pm},y)
\end{eqnarray*}
where $\zeta^{\pm} \in (x-h,x+h)$. Adding this pair of equation together and rearranging , we get
\begin{equation}\frac{1}{h^2}[\varphi(x+h,y)-2\varphi(x,y)+\varphi(x-h,y) ] -\frac{\partial^2 \varphi}{\partial x^2}(x,y)=\frac{h^2}{4!}\left[\frac{\partial^4 \varphi}{\partial x^4}(\zeta^{+},y)+
\frac{\partial^4 \varphi}{\partial x^4}(\zeta^{-},y)
\right]
\end{equation}
By the intermediate value theorem
\end{equation}\left[\frac{\partial^4 \varphi}{\partial x^4}(\zeta^{+},y)+
\frac{\partial^4 \varphi}{\partial x^4}(\zeta^{-},y)
\right]
=2\frac{\partial^4 \varphi}{\partial x^4}(\zeta,y),\end{equation}
for some $\zeta \in (x-h,x+h)$. Therefore,
\begin{equation}\delta_x^2(x,y)
=\frac{\partial^2 \varphi}{\partial x^2}(x,y)+\frac{h^2}{2!}\frac{\partial^4 \varphi}{\partial x^4}(\zeta,y)\end{equation}
Similar reasoning shows that
\begin{equation}\delta_y^2(x,y)
=\frac{\partial^2 \varphi}{\partial y^2}(x,y)+\frac{h^2}{2!}\frac{\partial^4 \varphi}{\partial y^4}(x,\eta)
\end{equation}
for some $\eta \in (y-h,y+h)$. We conclude that $\tau_h(x,y)=(\nabla-\nabla_h)\varphi(x,y)=O(h^2).$
## Convergence
### Definition
Let $\nabla^2_hw(\mathbf{x}_j)=f(\mathbf{x}_j)$ be a finite difference approximation, defined on a grid mesh size $h$, to a PDE $\nabla^2U(\mathbf{x})=f(\mathbf{x})$ on a simply connected set $D \subset R^n$. Assume that $w(x,y)=U(x,y)$ at all points $(x,y)$ on the boundary $\partial\Omega$. The finite difference scheme converges (or is convergent) if
\end{equation} \max_j|U(\mathbf{x}_j)-w(\mathbf{x}_j)| \rightarrow 0 \mbox{ as } h \rightarrow 0.\end{equation}
### Theorem (DISCRETE MAXIMUM PRINCIPLE).
If $\nabla^2_hV_{ij}\geq 0$ for all points $(x_i,y_j) \in \Omega_h$, then
\begin{equation} \max_{(x_i,y_j)\in\Omega_h}V_{ij}\leq \max_{(x_i,y_j)\in\partial\Omega_h}V_{ij},\end{equation}
If $\nabla^2_hV_{ij}\leq 0$ for all points $(x_i,y_j) \in \Omega_h$, then
\begin{equation} \min_{(x_i,y_j)\in\Omega_h}V_{ij}\geq \min_{(x_i,y_j)\in\partial\Omega_h}V_{ij}.\end{equation}
### Propositions
1. The zero grid function for which $U_{ij}=0$ for all $(x_i,y_j) \in \Omega_h \bigcup \partial\Omega_h$
is the only solution to the finite difference problem
\begin{equation}\nabla_h^2U_{ij}=0 \mbox{ for }(x_i,y_j)\in\Omega_h,\end{equation}
\begin{equation}U_{ij}=0 \mbox{ for }(x_i,y_j)\in\partial\Omega_h.\end{equation}
2. For prescribed grid functions $f_{ij}$ and $g_{ij}$, there exists a unique solution to the problem
\begin{equation}\nabla_h^2U_{ij}=f_{ij} \mbox{ for }(x_i,y_j)\in\Omega_h,\end{equation}
\begin{equation}U_{ij}=g_{ij} \mbox{ for }(x_i,y_j)\in\partial\Omega_h.\end{equation}
### Definition
For any grid function $V:\Omega_h\bigcup\partial\Omega_h \rightarrow R$,
\begin{equation}||V||_{\Omega} =\max_{(x_i,y_j)\in\Omega_h}|V_{ij}|, \end{equation}
\begin{equation}||V||_{\partial\Omega} =\max_{(x_i,y_j)\in\partial\Omega_h}|V_{ij}|. \end{equation}
### Lemma
If the grid function $V:\Omega_h\bigcup\partial\Omega_h\rightarrow R$ satisfies the boundary condition $V_{ij}=0$ for $(x_i,y_j)\in \partial\Omega_h$, then
\begin{equation}||V_||_{\Omega}\leq \frac{1}{8}||\nabla_h^2V||_{\Omega}. \end{equation}
Given these Lemmas and Propositions, we can now prove that the solution to the five point scheme $\nabla^2_h$ is convergent to the exact solution of the Poisson Equation $\nabla^2$.
### Convergence Theorem
Let $U$ be a solution to the Poisson equation and let $w$ be the grid function
that satisfies the discrete analog
\begin{equation}-\nabla_h^2w_{ij}=f_{ij} \ \ \mbox{ for } (x_i,y_j)\in\Omega_h, \end{equation}
\begin{equation}w_{ij}=g_{ij} \ \ \mbox{ for } (x_i,y_j)\in\partial\Omega_h. \end{equation}
Then there exists a positive constant $K$ such that
\begin{equation}||U-w||_{\Omega}\leq KMh^2, \end{equation}
where
\begin{equation} M=\left\{
\left|\left|\frac{\partial^4 U}{\partial x^4} \right|\right|_{\infty},
\left|\left|\frac{\partial^4 U}{\partial y^4} \right|\right|_{\infty}
\right\}\end{equation}
__Proof__
The statement of the theorem assumes that $U\in C^4(\bar{\Omega})$. This assumption
holds if $f$ and $g$ are smooth enough.
\begin{proof}
Following from the proof of the Proposition we have
\begin{equation} (\nabla_h^2-\nabla^2)U_{ij}=\frac{h^2}{12}\left[ \frac{\partial^4 U}{\partial x^4}(\zeta_i,y_j)+\frac{\partial^4 U}{\partial y^4}(x_i,\eta_j) \right],\end{equation}
for some $\zeta \in (x_{i-1},x_{i+1})$ and $\eta_j\in(y_{j-1},y_{j+1})$. Therefore,
\begin{equation} -\nabla_h^2U_{ij}=f_{ij}-\frac{h^2}{12}\left[ \frac{\partial^4 U}{\partial x^4}(\zeta_i,y_j)+\frac{\partial^4 U}{\partial y^4}(x_i,\eta_j) \right].\end{equation}
If we subtract from this the identity equation $-\nabla_h^2w_{ij}=f_{ij}$ and note
that $U-w$ vanishes on $\partial\Omega_h$, we find that
\begin{equation} \nabla_h^2(U_{ij}-w_{ij})=\frac{h^2}{12}\left[ \frac{\partial^4 U}{\partial x^4}(\zeta_i,y_j)+\frac{\partial^4 U}{\partial y^4}(x_i,\eta_j) \right].\end{equation}
It follows that
\begin{equation} ||U-w||_{\Omega}\leq\frac{1}{8}||\nabla_h^2(U-w)||_{\Omega}\leq KMh^2.\end{equation}
```python
```
```python
```
|
{"hexsha": "7024f9f33fe98e64988f7443c1a7eff56a41ce09", "size": 264356, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "Chapter 09 - Elliptic Equations/902_Poisson Equation-Zero Boundary Conditions.ipynb", "max_stars_repo_name": "jjcrofts77/Numerical-Analysis-Python", "max_stars_repo_head_hexsha": "97e4b9274397f969810581ff95f4026f361a56a2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 69, "max_stars_repo_stars_event_min_datetime": "2019-09-05T21:39:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T14:00:25.000Z", "max_issues_repo_path": "Chapter 09 - Elliptic Equations/902_Poisson Equation-Zero Boundary Conditions.ipynb", "max_issues_repo_name": "jjcrofts77/Numerical-Analysis-Python", "max_issues_repo_head_hexsha": "97e4b9274397f969810581ff95f4026f361a56a2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Chapter 09 - Elliptic Equations/902_Poisson Equation-Zero Boundary Conditions.ipynb", "max_forks_repo_name": "jjcrofts77/Numerical-Analysis-Python", "max_forks_repo_head_hexsha": "97e4b9274397f969810581ff95f4026f361a56a2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2021-06-17T15:34:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-14T14:53:43.000Z", "avg_line_length": 424.3274478331, "max_line_length": 131302, "alphanum_fraction": 0.9200207296, "converted": true, "num_tokens": 5745}
|
from unittest import TestCase
import numpy as np
import nucleoatac.NucleosomeCalling as Nuc
import pyatac.VMat as V
from pyatac.chunkmat2d import BiasMat2D
from pyatac.chunk import ChunkList
from pyatac.bias import InsertionBiasTrack
class Test_variance(TestCase):
"""class for testing variance calculation on background signal
"""
def setUp(self):
""" set up class for testing variance calculation for background signal
"""
bed_list = ChunkList.read('example/example.bed')
chunk = bed_list[0]
vmat = V.VMat.open('example/example.VMat')
biastrack = InsertionBiasTrack(chunk.chrom, chunk.start, chunk.end)
biastrack.read_track('example/example.Scores.bedgraph.gz')
biasmat = BiasMat2D(chunk.chrom,chunk.start+200,chunk.end-200,100,250)
biasmat.makeBiasMat(biastrack)
self.signaldist = Nuc.SignalDistribution(chunk.start+300,vmat,biasmat,35)
def test_sd1(self):
"""Make sure variance calculation is close to what is obtained by simulation
"""
self.signaldist.simulateDist(5000)
sd1 = np.std(self.signaldist.scores)
sd2 = self.signaldist.analStd()
self.assertTrue(abs(sd1-sd2)<0.05*sd1)
def test_sd2(self):
"""Make sure variance calculation is same as would be obtained through alternate calculation
"""
var_term = np.sum(self.signaldist.prob_mat*(1-self.signaldist.prob_mat)*self.signaldist.vmat.mat**2)
tmp = self.signaldist.prob_mat *self.signaldist.vmat.mat
cov_term = np.sum(np.outer(tmp,tmp))-np.sum(tmp**2)
sd1 = np.sqrt(self.signaldist.reads * (var_term - cov_term))
sd2 = self.signaldist.analStd()
self.assertTrue(abs(sd1-sd2)<0.001*sd1)
|
{"hexsha": "e633004fb704ae3c5d9deac20f9dd45f4a61e6ee", "size": 1764, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_var.py", "max_stars_repo_name": "oaxiom/NucleoATAC", "max_stars_repo_head_hexsha": "e1ec144df924ab48f5f6fe65c52e4ec40d5a89f8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 101, "max_stars_repo_stars_event_min_datetime": "2015-03-25T14:33:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-04T09:42:10.000Z", "max_issues_repo_path": "tests/test_var.py", "max_issues_repo_name": "oaxiom/NucleoATAC", "max_issues_repo_head_hexsha": "e1ec144df924ab48f5f6fe65c52e4ec40d5a89f8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 93, "max_issues_repo_issues_event_min_datetime": "2015-04-15T00:25:47.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-17T07:55:18.000Z", "max_forks_repo_path": "tests/test_var.py", "max_forks_repo_name": "oaxiom/NucleoATAC", "max_forks_repo_head_hexsha": "e1ec144df924ab48f5f6fe65c52e4ec40d5a89f8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 32, "max_forks_repo_forks_event_min_datetime": "2015-08-20T17:09:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-20T21:10:16.000Z", "avg_line_length": 38.347826087, "max_line_length": 108, "alphanum_fraction": 0.6910430839, "include": true, "reason": "import numpy", "num_tokens": 452}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 3 11:53:06 2021
@author: ghiggi
"""
import pandas as pd
import numpy as np
def _define_event_id(timesteps, maximum_interval_without_timesteps):
# Check type validity
if not isinstance(timesteps, (list, pd.Series,np.ndarray)):
raise TypeError("'timesteps' must be a list, pd.Series or np.array with datetime values.")
if isinstance(timesteps, list):
timesteps = np.array(timesteps)
if not np.issubdtype(timesteps.dtype, np.datetime64):
raise TypeError("'timesteps' must have datetime values")
if isinstance(timesteps, pd.Series):
timesteps = timesteps.to_numpy()
if not np.issubdtype(timesteps.dtype, np.datetime64):
raise TypeError("'timesteps' must have np.datetime64 dtype")
if isinstance(timesteps, np.ndarray):
if not np.issubdtype(timesteps.dtype, np.datetime64):
raise TypeError("'timesteps' must have np.datetime64 dtype")
if not isinstance(maximum_interval_without_timesteps, (np.timedelta64, pd.Timedelta)):
raise TypeError("'maximum_interval_without_timesteps' must be a np.timedelta64 or pd.Timedelta object.")
#-------------------------------------------------------------------------.
# Check there are timesteps
if len(timesteps) == 0:
raise ValueError("No timesteps provided.")
#-------------------------------------------------------------------------.
# Retrieve event id
if len(timesteps) == 1:
event_ids = np.array([0])
else:
cont_groups = np.diff(timesteps) < maximum_interval_without_timesteps
cont_groups = np.insert(cont_groups, 0, True)
event_id = 0
l_event_id = []
is_previous_True = True
for is_True in cont_groups:
if not is_True:
event_id += 1
l_event_id.append(event_id)
is_previous_True = is_True
elif is_True and not is_previous_True:
event_id += 1
l_event_id.append(event_id)
is_previous_True = is_True
else: # is_True and is_previous_True:
l_event_id.append(event_id)
is_previous_True = is_True
event_ids = np.array(l_event_id)
return event_ids
def _get_timesteps_duration(timesteps, unit="s"):
duration = np.max(timesteps) - np.min(timesteps)
return duration.to_numpy().astype('timedelta64['+ unit + "]")
|
{"hexsha": "58c57c6628484b1b963b783e8ae92e7a260d5915", "size": 2537, "ext": "py", "lang": "Python", "max_stars_repo_path": "mascdb/utils_event.py", "max_stars_repo_name": "ltelab/pymascdb", "max_stars_repo_head_hexsha": "d4ff0888aefcfa84a551459c61b0a93ab7d61622", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-13T23:28:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-13T23:28:55.000Z", "max_issues_repo_path": "mascdb/utils_event.py", "max_issues_repo_name": "jacgraz/pymascdb", "max_issues_repo_head_hexsha": "cf3f8e973fbcc6dce43c6e9f9661663f6a7e76eb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mascdb/utils_event.py", "max_forks_repo_name": "jacgraz/pymascdb", "max_forks_repo_head_hexsha": "cf3f8e973fbcc6dce43c6e9f9661663f6a7e76eb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-11-11T14:17:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-29T13:12:53.000Z", "avg_line_length": 41.5901639344, "max_line_length": 112, "alphanum_fraction": 0.6014978321, "include": true, "reason": "import numpy", "num_tokens": 560}
|
import numpy as np
from car.Car import Car
# Create a 2D world of 0's
height = 4
width = 6
world = np.zeros((height, width))
# Define the initial car state
initial_position = [0, 0] # [y, x] (top-left corner)
velocity = [0, 1] # [vy, vx] (moving to the right)
cara = Car(initial_position,velocity,world)
cara.move()
cara.move()
cara.turn_right()
cara.move()
cara.move()
cara.move()
cara.turn_right()
cara.move()
cara.move()
cara.turn_right()
cara.move()
cara.move()
cara.move()
cara.display_world()
|
{"hexsha": "38c9b5a06f15e93d47ae182e46b7611a13c4bf38", "size": 503, "ext": "py", "lang": "Python", "max_stars_repo_path": "self-driving/Matrix/move.py", "max_stars_repo_name": "xta0/Python-Playground", "max_stars_repo_head_hexsha": "513ebd2ad7f0a8c69f2f04b4f7524b31e76fa5bc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "self-driving/Matrix/move.py", "max_issues_repo_name": "xta0/Python-Playground", "max_issues_repo_head_hexsha": "513ebd2ad7f0a8c69f2f04b4f7524b31e76fa5bc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "self-driving/Matrix/move.py", "max_forks_repo_name": "xta0/Python-Playground", "max_forks_repo_head_hexsha": "513ebd2ad7f0a8c69f2f04b4f7524b31e76fa5bc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.9642857143, "max_line_length": 52, "alphanum_fraction": 0.6958250497, "include": true, "reason": "import numpy", "num_tokens": 161}
|
import numpy as np
from common.prioritized import PrioritizedExperienceReplay
class ReplayMemory(object):
def __init__(self, n_buffer,len_state,len_action):
# Parameters
self.n_buffer = n_buffer
self.len_state = len_state
self.len_action = len_action
self.n_experiences = 0
self.pointer = 0
# Buffer
self.state = np.zeros([n_buffer,len_state ],dtype=float)
self.action = np.zeros([n_buffer,len_action],dtype=float)
self.reward = np.zeros( n_buffer ,dtype=float)
self.new_state = np.zeros([n_buffer,len_state ],dtype=float)
# Priority
self.priority = PrioritizedExperienceReplay(n_buffer)
def getBatch(self):
idx,weight = self.priority.sample()
return (self. state[idx],self. action[idx],
self.reward[idx],self.new_state[idx]), weight
def size(self):
return self.n_buffer
def count(self):
return self.n_experiences
def add(self, state, action, reward, new_state, done):
n = self.n_experiences
if n < self.n_buffer:
self.n_experiences += 1
else:
n = self.pointer
self.pointer += 1
if self.pointer>=self.n_buffer:
self.pointer = 0
self.state [n] = state
self.action [n] = action
self.reward [n] = reward
self.new_state[n] = new_state
def erase(self):
self.state = np.zeros([self.n_buffer,self.len_state ],dtype=float)
self.action = np.zeros([self.n_buffer,self.len_action],dtype=float)
self.reward = np.zeros( self.n_buffer ,dtype=float)
self.new_state = np.zeros([self.n_buffer,self.len_state ],dtype=float)
self.n_experiences = 0
self.pointer = 0
|
{"hexsha": "d72479d0e051c4a22354d598e1cbb9a5f7561db5", "size": 1907, "ext": "py", "lang": "Python", "max_stars_repo_path": "ReinforcementLearning/ExperienceReplay.py", "max_stars_repo_name": "Suryavf/SelfDrivingCar", "max_stars_repo_head_hexsha": "362ac830516366b1c31ef01ea0456eb99f0d9722", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2019-08-14T18:55:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-10T05:54:49.000Z", "max_issues_repo_path": "ReinforcementLearning/ExperienceReplay.py", "max_issues_repo_name": "Suryavf/SelfDrivingCar", "max_issues_repo_head_hexsha": "362ac830516366b1c31ef01ea0456eb99f0d9722", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-05-05T15:20:20.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-22T07:47:26.000Z", "max_forks_repo_path": "ReinforcementLearning/ExperienceReplay.py", "max_forks_repo_name": "Suryavf/SelfDrivingCar", "max_forks_repo_head_hexsha": "362ac830516366b1c31ef01ea0456eb99f0d9722", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-18T15:46:09.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-18T15:46:09.000Z", "avg_line_length": 35.3148148148, "max_line_length": 78, "alphanum_fraction": 0.5825904562, "include": true, "reason": "import numpy", "num_tokens": 437}
|
# """
# Runs libEnsemble on the 6-hump camel problem. Documented here:
# https://www.sfu.ca/~ssurjano/camel6.html
#
# Execute via the following command:
# mpiexec -np 4 python3 test_6-hump_camel_uniform_sampling.py
# The number of concurrent evaluations of the objective function will be 4-1=3.
# """
from __future__ import division
from __future__ import absolute_import
from mpi4py import MPI # for libE communicator
import sys, os # for adding to path
import numpy as np
# Import libEnsemble main
from libensemble.libE import libE
# Import sim_func
from libensemble.sim_funcs.six_hump_camel import six_hump_camel
# Import gen_func
from libensemble.gen_funcs.uniform_sampling import uniform_random_sample
script_name = os.path.splitext(os.path.basename(__file__))[0]
#State the objective function, its arguments, output, and necessary parameters (and their sizes)
sim_specs = {'sim_f': six_hump_camel, # This is the function whose output is being minimized
'in': ['x'], # These keys will be given to the above function
'out': [('f',float), # This is the output from the function being minimized
],
'save_every_k': 400
}
# State the generating function, its arguments, output, and necessary parameters.
gen_specs = {'gen_f': uniform_random_sample,
'in': ['sim_id'],
'out': [('x',float,2)],
'lb': np.array([-3,-2]),
'ub': np.array([ 3, 2]),
'gen_batch_size': 500,
'save_every_k': 300
}
# Tell libEnsemble when to stop
exit_criteria = {'gen_max': 501}
np.random.seed(1)
persis_info = {}
for i in range(MPI.COMM_WORLD.Get_size()):
persis_info[i] = {'rand_stream': np.random.RandomState(i)}
# Perform the run
H, persis_info, flag = libE(sim_specs, gen_specs, exit_criteria, persis_info)
if MPI.COMM_WORLD.Get_rank() == 0:
short_name = script_name.split("test_", 1).pop()
filename = short_name + '_results_History_length=' + str(len(H)) + '_evals=' + str(sum(H['returned'])) + '_ranks=' + str(MPI.COMM_WORLD.Get_size())
print("\n\n\nRun completed.\nSaving results to file: " + filename)
np.save(filename, H)
minima = np.array([[ -0.089842, 0.712656],
[ 0.089842, -0.712656],
[ -1.70361, 0.796084],
[ 1.70361, -0.796084],
[ -1.6071, -0.568651],
[ 1.6071, 0.568651]])
tol = 0.1
for m in minima:
assert np.min(np.sum((H['x']-m)**2,1)) < tol
print("\nlibEnsemble with Uniform random sampling has identified the 6 minima within a tolerance " + str(tol))
|
{"hexsha": "c4d164d61d1e68fbf3646ee21d28e4a71c382f5d", "size": 2711, "ext": "py", "lang": "Python", "max_stars_repo_path": "libensemble/tests/regression_tests/test_6-hump_camel_uniform_sampling.py", "max_stars_repo_name": "Kardyne/libensemble", "max_stars_repo_head_hexsha": "566c8f5daafe2ad4deebc13198a1e131e4ce6542", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "libensemble/tests/regression_tests/test_6-hump_camel_uniform_sampling.py", "max_issues_repo_name": "Kardyne/libensemble", "max_issues_repo_head_hexsha": "566c8f5daafe2ad4deebc13198a1e131e4ce6542", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "libensemble/tests/regression_tests/test_6-hump_camel_uniform_sampling.py", "max_forks_repo_name": "Kardyne/libensemble", "max_forks_repo_head_hexsha": "566c8f5daafe2ad4deebc13198a1e131e4ce6542", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6710526316, "max_line_length": 151, "alphanum_fraction": 0.6318701586, "include": true, "reason": "import numpy", "num_tokens": 725}
|
#ifndef __COMMON_H__
#define __COMMON_H__
#include <array>
#include <assert.h>
#include "otype.hpp"
// define random seed
#define SEED 2333333
// define stencil type
#define STENCIL_STAR 0
#define STENCIL_BOX 1
// define boundary type
#define BOUND_OPEN 0
#define BOUNDARY_OPEN 0
#define BOUND_PERIODIC 1
#define BOUNDARY_PERIODIC 1
// define basic data types
#define DATA_UNKNOWN -1
#define DATA_INT 0
#define DATA_FLOAT 1
#define DATA_DOUBLE 2
#define DATA_BOOL 3
#define NO_STENCIL 0
// define basic data size
#define DATA_SIZE(i) ((int[]{4, 4, 8})[i])
// define shape dimension [x, y, z]
typedef std::array<int, 3> Shape;
typedef std::array<int, 3> int3;
///:for o in ['+','-','*']
int3 operator${o}$(const int3& a, const int3& b);
///:endfor
typedef int DataType;
typedef int DATA_TYPE;
///:mute
///:set i = 0
///:include "NodeType.fypp"
///:endmute
//define node types
enum NodeType{
///:for i in range(len(L))
///:if i == 0
${L[i][0]}$ = 0,
///:elif i == len(L) - 1
${L[i][0]}$
///:else
${L[i][0]}$,
///:endif
///:endfor
};
#define NUM_NODE_TYPES ${len(L)}$
#include <armadillo>
typedef arma::Cube<int> cube_int;
typedef arma::Cube<float> cube_float;
typedef arma::Cube<double> cube_double;
#define SCALAR_SHAPE Shape({{1,1,1}})
#define WITHOUT_FUSION_KERNEL 0
#define WITH_FUSION_KERNEL 1
#define CSET(A, B) \
tmp_node_key = gen_node_key(__FILE__, __LINE__); \
find_node(tmp_node, tmp_node_key); \
if (is_valid(tmp_node)) { \
A = tmp_node; \
} else { \
tmp_node = B; \
cache_node(tmp_node, tmp_node_key); \
A = tmp_node; \
}
#define THROW_LOGIC_EXCEPTION(msg) \
BOOST_THROW_EXCEPTION(std::logic_error(msg))
#define ENSURE_VALID_PTR(A) \
assert(A && "pointer is null");
//if(A == NULL) THROW_LOGIC_EXCEPTION("pointer is null.");
#define ENSURE_VALID_PTR_MSG(A, MSG) \
assert(A && MSG);
//if(A == NULL) THROW_LOGIC_EXCEPTION(MSG);
#define MPI_ORDER_START oa::MPI::global()->order_start();
#define MPI_ORDER_END oa::MPI::global()->order_end();
#define MPI_RANK oa::MPI::global()->rank()
#define MPI_SIZE oa::MPI::global()->size()
extern bool g_cache;
extern bool g_debug;
extern bool transticbegin;
// #define DEBUG
#endif
|
{"hexsha": "f4fc322f99650dd6b33b76920352a1d5e1efb60e", "size": 2494, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "common.hpp", "max_stars_repo_name": "hxmhuang/OpenArray_Dev", "max_stars_repo_head_hexsha": "863866a6b7accf21fa253567b0e66143c7506cdf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2020-09-08T05:01:56.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-23T13:11:25.000Z", "max_issues_repo_path": "common.hpp", "max_issues_repo_name": "hxmhuang/OpenArray_Dev", "max_issues_repo_head_hexsha": "863866a6b7accf21fa253567b0e66143c7506cdf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "common.hpp", "max_forks_repo_name": "hxmhuang/OpenArray_Dev", "max_forks_repo_head_hexsha": "863866a6b7accf21fa253567b0e66143c7506cdf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2019-08-16T08:32:30.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-10T08:44:04.000Z", "avg_line_length": 21.8771929825, "max_line_length": 60, "alphanum_fraction": 0.601042502, "num_tokens": 677}
|
import numpy as np
import matplotlib.pyplot as plt
from pprint import pprint
def to_pt(l):
# return tuple(map(float, l.split(" - ")[0].replace("(", "").split(",")))
return l.split(" - ")[0].replace("(", "").split(",")
with open("/home/slam_data/data_sets/pcl_plane_pts.txt", "r") as conn:
pts1 = conn.readlines()
pts1 = [tuple(map(float, to_pt(l))) for l in pts1 if l!="\n"]
pts1 = np.asarray(pts1)
# pts1 = np.zeros_like(pts1)
# exit()
coefs = np.asarray([0.0338028, 0.930114, 0.365712, -1417.56])
coefs1 = np.asarray([0.0413848, 0.936528, 0.348142, -1354.07])
pixels = np.asarray([[422, 220], [310, 478], [638, 459], [638, 240], [476, 220]])
pixels = np.concatenate([pixels, np.ones((pixels.shape[0], 1))], axis=1)
R = np.asarray([
[5.7592685448804468e+02, 0., 3.1515026356388171e+02],
[0., 5.7640791601093247e+02, 2.3058580662101753e+02],
[0., 0., 1.]
])
R1 = np.linalg.pinv(R)
print(R.shape)
print(pixels.shape)
# pixels1 = np.matmul(pixels, R1.T)
pixels1 = []
f1 = R[0, 0]
f2 = R[1, 1]
c1 = R[0, 2]
c2 = R[1, 2]
for y1, y2, _ in pixels:
pixels1.append([
(y1 - c1) / f1,
(y2 - c2) / f2, 1
])
pixels1 = np.asarray(pixels1)
print(pixels1.shape)
# a = pixels
# plt.scatter(a[:, 0], a[:, 1])
# plt.show()
a = coefs[:3]
thetas = [coefs[3] / np.dot(a, b) for b in pixels1]
pts = np.vstack([x * t for x, t in zip(pixels1, thetas)])
# print(pts1.shape)
# print(pts.shape)
ds = np.average(np.abs(np.asarray([
np.dot(x, a) - coefs[3] for x in pts
])))
pixels2 = np.matmul(pts, R.T)
for pixel in pixels2:
pixel /= pixel[2]
# print(np.round(pixels2, 2))
# print(pixels)
# exit()
a = coefs1[:3]
ds1 = np.average(np.abs(np.asarray([
np.dot(x, a) - coefs1[3] for x in pts1
])))
print(ds, ds1)
# vals1, vecs1 = np.linalg.eig(np.cov(pts1.T))
# vals, vecs = np.linalg.eig(np.cov(pts.T))
|
{"hexsha": "6c1ead886d2fe69bbfc3956c17c72469d136bd92", "size": 1855, "ext": "py", "lang": "Python", "max_stars_repo_path": "temp3.py", "max_stars_repo_name": "BOpermanis/pyORBSLAM2", "max_stars_repo_head_hexsha": "ff7c303bc6d2023fc3c22090e6af048072cce90b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "temp3.py", "max_issues_repo_name": "BOpermanis/pyORBSLAM2", "max_issues_repo_head_hexsha": "ff7c303bc6d2023fc3c22090e6af048072cce90b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "temp3.py", "max_forks_repo_name": "BOpermanis/pyORBSLAM2", "max_forks_repo_head_hexsha": "ff7c303bc6d2023fc3c22090e6af048072cce90b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.1875, "max_line_length": 81, "alphanum_fraction": 0.6037735849, "include": true, "reason": "import numpy", "num_tokens": 697}
|
[STATEMENT]
lemma Qp_pow_ConsI:
assumes "t \<in> carrier Q\<^sub>p"
assumes "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)"
shows "t#x \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. t # x \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>)
[PROOF STEP]
using assms cartesian_power_cons[of x Q\<^sub>p m t] Suc_eq_plus1
[PROOF STATE]
proof (prove)
using this:
t \<in> carrier Q\<^sub>p
x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)
\<lbrakk>x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>); t \<in> carrier Q\<^sub>p\<rbrakk> \<Longrightarrow> t # x \<in> carrier (Q\<^sub>p\<^bsup>m + 1\<^esup>)
Suc ?n = ?n + 1
goal (1 subgoal):
1. t # x \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>)
[PROOF STEP]
by presburger
|
{"llama_tokens": 355, "file": "Padic_Field_Padic_Semialgebraic_Function_Ring", "length": 2}
|
import operator
import os
import re
from collections import OrderedDict
from typing import List
import numpy as np
from sortedcollections import OrderedSet
from EXIFnaming.helpers import constants as c, settings
from EXIFnaming.helpers.decode import read_exiftag
from EXIFnaming.helpers.program_dir import log
from EXIFnaming.helpers.tags import SceneModeAbbreviations
__all__ = ["FileMetaData", "Location", "add_dict", "FilenameAccessor", "FilenameBuilder"]
class Location:
location_keys = ['Country', 'State', 'City', 'Location']
tag_names = {'Country': ['Country', 'LocationCreatedCountryName'],
'State': ['State', 'LocationCreatedProvinceState'],
'City': ['City', 'LocationCreatedCity'],
'Location': ['Location', 'LocationCreatedSublocation']}
def __init__(self, data=None, i=-1):
self.location = OrderedDict()
if data:
if i > -1:
self.update_via_tag_array(data, i)
else:
self.update(data)
def update(self, data: dict):
for key in Location.location_keys:
if key in data and data[key]:
if data[key] == "none":
self.location.pop(key, None)
else:
self.location[key] = data[key]
def update_via_tag_array(self, data: dict, i: int):
for key in Location.location_keys:
if key in data and data[key][i]:
self.location[key] = data[key][i]
def get_minor(self) -> list:
out = []
minor_keys = ["City", "Location"]
for key in minor_keys:
if not key in self.location: continue
out.append(self.location[key])
return out
def to_tag_dict(self) -> dict:
tag_dict = {}
if self.location.keys():
loc_tags = []
for key in self.location:
loc_tags.append(self.location[key])
for tag_name in Location.tag_names[key]:
tag_dict[tag_name] = self.location[key]
tag_dict['Keywords'] = loc_tags
tag_dict['Subject'] = list(loc_tags)
return tag_dict
def __str__(self):
out = ""
for key in self.location:
out += self.location[key] + ", "
return out.strip(", ")
class FileMetaData:
restriction_keys = ['directory', 'name_main', 'first', 'last', 'name_part']
tag_setting_keys = ['title', 'tags', 'tags2', 'tags3', 'rating', 'description', 'gps']
kown_keys = restriction_keys + tag_setting_keys + Location.location_keys
linesep = " | "
secondary_regex = re.compile(r"_[0-9]+[A-Z]+\d*[2-9]")
dateTimeOriginal_regex = re.compile(r"\d{4}:\d{2}:\d{2} \d{2}:\d{2}:\d{2}")
def __init__(self, directory, filename):
self.directory = directory
self.filename = filename
self.id = self.filename
self.title = ""
self.tags = []
self.tags2 = []
self.tags3 = []
self.descriptions = []
self.description_tree = OrderedDict()
self.location = Location()
self.rating = None
self.gps = ()
self.has_gps_in_exif = False
self.dateTimeOriginal = ""
self.has_dateTimeOriginal_in_exif = False
self.tagDict = None
self.filenameAccessor = FilenameAccessor(filename)
self.main_name = self.filenameAccessor.pre
self.counter = self.filenameAccessor.counter_main()
self.has_changed = False
def import_filename(self):
self.id = self.filenameAccessor.identifier()
self.tags = self.filenameAccessor.tags()
self.tags2 = self.filenameAccessor.mapped_modes()
match = FileMetaData.secondary_regex.search(self.id)
if match:
self.rating = 2
else:
self.rating = 3
for process in self.filenameAccessor.processes:
des = process_to_description(process)
add_dict(self.description_tree, des)
def import_fullname(self, startdir: str):
self.id, self.tags = fullname_to_tag(self.directory, self.filenameAccessor.name, startdir)
def import_exif(self, overwrite_gps=False):
self.tagDict = read_exiftag(self.directory, self.filename)
self.location.update(self.tagDict)
if "Rating" in self.tagDict and int(self.tagDict["Rating"]) > 0:
self.rating = self.tagDict["Rating"]
if not overwrite_gps and "GPS Latitude" in self.tagDict and self.tagDict["GPS Latitude"]:
self.has_gps_in_exif = True
if "DateTimeOriginal" in self.tagDict and self.tagDict["DateTimeOriginal"]:
self.has_dateTimeOriginal_in_exif = True
if "User Comment" in self.tagDict:
user_comment = self.tagDict["User Comment"]
user_comment_split = user_comment.split("..")
user_comment_split = [string for line in user_comment_split for string in
line.split(FileMetaData.linesep + ".")]
pano_keys = ["Projection", "FOV", "Ev"]
for entry in user_comment_split:
if not ": " in entry: continue
entry = entry.strip(FileMetaData.linesep)
key, value = entry.split(": ", 1)
if not key or not value: continue
if key in FileMetaData.kown_keys: continue
if key in pano_keys:
key = "PANO-" + key
self.description_tree[key] = value
def update(self, data: dict):
def good_key(key: str):
return key in data and data[key]
if not self.passes_restrictions(data):
return
if good_key('title'): self.title = data['title']
if good_key('tags'): self.tags += [tag for tag in data['tags'].split(', ') if tag and tag not in self.tags]
if good_key('tags2'): self.tags2 += [tag for tag in data['tags2'].split(', ') if tag and tag not in self.tags2]
if good_key('tags3'): self.tags3 += [tag for tag in data['tags3'].split(', ') if tag and tag not in self.tags3]
if good_key('gps') and not self.has_gps_in_exif: self.gps = data['gps'].split(', ')
if good_key('rating'): self.rating = data['rating']
if good_key('description'): self.descriptions.append(data['description'])
if good_key('DateTimeOriginal') and not self.has_dateTimeOriginal_in_exif: self.dateTimeOriginal = data['DateTimeOriginal']
self.location.update(data)
for key in data:
if not data[key] or key in FileMetaData.kown_keys: continue
self.description_tree[key] = data[key]
def passes_restrictions(self, data):
def not_match_entry(key: str, func):
return key in data and data[key] and not func(data[key])
if not_match_entry('directory', lambda value: all(val in self.directory for val in value.split(', '))):
return False
if not_match_entry('name_main', lambda value: value == self.main_name):
return False
if not_match_entry('first', lambda value: value <= self.counter):
return False
if not_match_entry('last', lambda value: self.counter <= value):
return False
if not_match_entry('name_part', lambda value: all(val in self.filename for val in value.split(', '))):
return False
self.has_changed = True
return True
def _write_description_tree(self):
if any(["HDR" in key or "TM" in key for key in self.description_tree]):
self.description_tree["HDR-program"] = settings.hdr_program
if any(["PANO" in key for key in self.description_tree]):
self.description_tree["PANO-program"] = settings.panorama_program
description_tree = OrderedDict()
process_order = ["HDR", "TM", "PANO", ""]
for key_part in process_order:
process_subkeys = [key for key in self.description_tree if key_part in key]
for key in process_subkeys:
description_tree[key] = self.description_tree[key]
description_formated = format_plain(description_tree)
self.descriptions.append(description_formated)
def _write_description_tags(self):
tags = []
if self.tags:
tags.append(", ".join(self.tags))
if str(self.location):
tags.append(str(self.location))
if self.tags2:
tags.append(", ".join(self.tags2))
if tags:
self.descriptions.append((FileMetaData.linesep + "\n").join(tags))
def to_tag_dict(self) -> dict:
if not self.title:
self.title = ", ".join(OrderedSet(self.location.get_minor() + self.tags))
self._write_description_tags()
if len(self.description_tree.keys()) > 0:
self._write_description_tree()
full_description = (FileMetaData.linesep + "\n\n").join(self.descriptions)
tagDict = {'Label': self.filenameAccessor.name, 'Title': self.title,
'Keywords': self.tags, 'Subject': list(self.tags),
'Description': full_description, 'UserComment': full_description,
'Identifier': self.id, 'Rating': self.rating}
if settings.photographer:
tagDict['Artist'] = settings.photographer
if len(self.gps) == 2:
tagDict["GPSLatitudeRef"] = self.gps[0]
tagDict["GPSLatitude"] = self.gps[0]
tagDict["GPSLongitudeRef"] = self.gps[1]
tagDict["GPSLongitude"] = self.gps[1]
if self.dateTimeOriginal and FileMetaData.dateTimeOriginal_regex.match(self.dateTimeOriginal):
tagDict["DateTimeOriginal"] = self.dateTimeOriginal
add_dict(tagDict, self.location.to_tag_dict())
tagDict['Keywords'].extend(self.tags2 + self.tags3)
tagDict['Subject'].extend(self.tags2 + self.tags3)
return tagDict
def __str__(self):
return "FileMetaData(" + self.title + " " + str(self.tags) + " " + str(self.descriptions) + " " + str(
self.location) + ")"
def add_dict(dict1: dict, dict2: dict):
for key in dict2:
if key in dict1:
dict1[key] += dict2[key]
else:
dict1[key] = dict2[key]
def format_as_tree(data: dict) -> str:
def indent(string: str) -> str:
return indented_newline + string.replace("\n", indented_newline)
out = ""
indented_newline = "\n- "
for key in data:
if not data[key]:
continue
if type(data[key]) == str:
value = data[key]
if "\n" in value: value = indent(value)
else:
value = format_as_tree(data[key])
value = indent(value)
out += key + ": " + value + " \n"
out = out.strip(indented_newline)
return out
def sort_by_list(data: dict, order: list) -> OrderedDict:
index_map = {v: i for i, v in enumerate(order)}
return OrderedDict(sorted(data.items(), key=lambda pair: index_map[pair[0]]))
def format_plain(data: dict) -> str:
out = ""
for key in data:
out += key + ": " + data[key] + FileMetaData.linesep + "\n"
out = out[:-2]
return out
def format_tree_plain(data: dict) -> str:
out = ""
for key in data:
if not data[key]:
continue
if type(data[key]) == str:
value = data[key]
else:
value = format_plain(data[key])
out += key + ": " + value + " - "
out = out[:-2]
return out
def set_path(data: dict, path, value=None):
sub_data = data
for key in path[:-1]:
if not key in sub_data:
sub_data[key] = OrderedDict()
sub_data = sub_data[key]
if value:
sub_data[path[-1]] = value
elif not path[-1] in sub_data:
sub_data[path[-1]] = OrderedDict()
def fullname_to_tag(dirpath: str, filename: str, startdir=""):
relpath = os.path.relpath(dirpath, startdir)
if relpath == ".": relpath = ""
dirpath_split = relpath.split(os.sep)
filename_prim = filename.split("_")[0]
image_id = filename
image_tags = dirpath_split + [filename_prim]
return image_id, image_tags
def scene_to_tag(scene: str) -> list:
out = [scene]
scene_striped = scene.strip('123456789').split('$')[0]
if not scene in c.RecModes:
out.append(scene_striped.lower())
return out
def process_to_tag(process: str) -> list:
process_striped = process.strip('123456789').split('$')[0]
process_main = process_striped
process_sub = ""
if '-' in process_main:
process_main, process_sub = process_striped.split('-', 1)
out = [process_striped]
if process_main in process_to_tag.map:
out.extend(process_to_tag.map[process_main])
if process_main == "HDR" and "-" in process_sub:
out.append("Tone Mapping")
return out
process_to_tag.map = {"HDR": ["HDR"], "HDRT": ["HDR", "Tone Mapping"], "PANO": ["Panorama"],
"ANIMA": ["Animation"], "RET": ["retouch"], "ZOOM": ["magnified"], "CUT": ["CUT"],
"SMALL": ["SMALL"]}
def is_scene_abbreviation(name: str):
return name in SceneModeAbbreviations or name in c.RecModes
def is_process_tag(name: str):
scene_striped = name.strip('123456789').split('$')[0]
scene_main = scene_striped.split('-')[0]
return scene_main in process_to_tag.map.keys()
def process_to_description(process: str) -> dict:
description = {}
if not "HDR" in process: return description
process_striped = process.strip('123456789').split('$')[0]
process_split = process_striped.split('-')
if len(process_split) > 1:
if process_split[1] in c.hdr_algorithm:
description["HDR-Algorithm"] = c.hdr_algorithm[process_split[1]]
else:
log().info("%s not in hdr_algorithm", process_split[1])
if len(process_split) > 2:
if process_split[2] in c.tm_preset:
description["TM-Preset"] = c.tm_preset[process_split[2]]
else:
log().info("%s not in tm_preset", process_split[2])
return description
class FilenameAccessor:
counter_main_regex = re.compile(r'(M?\d+)')
def __init__(self, filename):
self.filename = filename
if '.' in filename:
self.name, self.ext = filename.rsplit('.', 1)
self.ext = "." + self.ext
else:
self.name = self.filename
self.ext = ''
self.main = []
self.pre = ""
self.primmodes = []
self.primtags = []
self.counter = ""
self.scenes = []
self.processes = []
self.posttags = []
self._split_filename()
def _split_filename(self):
filename_splited = self.name.split('_')
if len(filename_splited) == 0: return
counter_index = self._counter_index()
if counter_index < 0: return
for i, subname in enumerate(filename_splited):
if not subname: continue
if i > counter_index:
if is_process_tag(subname):
self.processes.append(subname)
elif is_scene_abbreviation(subname):
self.scenes.append(subname)
else:
self.posttags.append(subname)
else:
self.main.append(subname)
if i == 0:
self.pre = subname
elif i == counter_index:
self.counter = subname
elif subname.isupper() or subname.isnumeric():
self.primmodes.append(subname)
else:
self.primtags.append(subname)
def tags(self) -> List[str]:
return self.primtags + self.posttags
def modes(self) -> List[str]:
return self.scenes + self.processes
def mapped_modes(self) -> List[str]:
modes = [tag2 for tag in self.scenes for tag2 in scene_to_tag(tag)]
modes += [tag2 for tag in self.processes for tag2 in process_to_tag(tag)]
return modes
def identifier(self) -> str:
return "_".join(self.main + self.modes())
def has_tag(self, tag) -> bool:
return tag in self.primtags or tag in self.posttags
def sorted_filename(self):
arr = self.main + self.scenes + self.processes + self.posttags
if len(arr) == 0:
return self.filename
return "_".join(arr) + self.ext
def counter_main(self) -> str:
match = FilenameAccessor.counter_main_regex.search(self.counter)
if match:
return match.group(1)
return self.counter
def mainname(self) -> str:
arr = self.main[:-1] + [self.counter_main()]
return "_".join(arr)
def _counter_index_longest(self) -> int:
filename_splited = self.name.split('_')
if len(filename_splited) == 0: return -1
# get index of counter via longest item that looks like a counter
indeces = [(i, len(e)) for i, e in enumerate(filename_splited) if self._is_counter(e)]
if len(indeces) == 0:
return -1
if len(indeces) == 1:
return indeces[0][0]
indeces.sort(key=operator.itemgetter(1))
return indeces[-1][0]
def _counter_index(self) -> int:
filename_splited = self.name.split('_')
if len(filename_splited) == 0: return -1
indeces = [i for i, e in enumerate(filename_splited) if self._is_counter(e)]
if len(indeces) == 0:
return -1
return indeces[-1]
def _is_counter(self, subname) -> bool:
if not subname:
return False
if self.ext in settings.video_types:
return subname[0] == "M" and np.chararray.isdigit(subname[-1])
starts_and_ends_with_digit = (np.chararray.isdigit(subname[0]) and np.chararray.isdigit(subname[-1]))
return starts_and_ends_with_digit
def is_direct_successor_of(self, other: 'FilenameAccessor'):
selfcounter = int(self.counter_main())
othercounter = int(other.counter_main())
diff = selfcounter - othercounter
return self.pre == other.pre and (diff == 0 or diff == 1) and self.has_similar_tags(other)
def has_similar_tags(self, other: 'FilenameAccessor'):
return self.first_posttag() == other.first_posttag()
def first_posttag(self):
return self.posttags[0] if len(self.posttags) > 0 else ""
class FilenameBuilder:
def __init__(self, old_filename: str):
self.main = []
self.post = []
self.version = ""
self.accessor = FilenameAccessor(old_filename)
def add_main(self, part):
if part:
self.main.append(part)
return self
def add_post(self, part):
if part:
self.post.append(part)
return self
def set_version(self, version):
self.version = version
return self
def use_old_tags(self):
self.post += [tag for tag in self.accessor.processes if not tag in self.post]
self.post += [tag for tag in self.accessor.primtags if not tag in self.main]
self.post += [tag for tag in self.accessor.posttags if not tag in self.post]
def build(self) -> str:
if self.version:
arr = self.main + [self.version] + self.post
else:
arr = self.main + self.post
return "_".join(arr) + self.accessor.ext
|
{"hexsha": "3b07a7f67ad452d41b2d743c2c4d264e2e08cc54", "size": 19501, "ext": "py", "lang": "Python", "max_stars_repo_path": "EXIFnaming/helpers/tag_conversion.py", "max_stars_repo_name": "mvolkert/EXIFnaming", "max_stars_repo_head_hexsha": "2cd7aa03cec8e05e046ccc84c523d560ccc3b8c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "EXIFnaming/helpers/tag_conversion.py", "max_issues_repo_name": "mvolkert/EXIFnaming", "max_issues_repo_head_hexsha": "2cd7aa03cec8e05e046ccc84c523d560ccc3b8c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "EXIFnaming/helpers/tag_conversion.py", "max_forks_repo_name": "mvolkert/EXIFnaming", "max_forks_repo_head_hexsha": "2cd7aa03cec8e05e046ccc84c523d560ccc3b8c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.5187265918, "max_line_length": 131, "alphanum_fraction": 0.5970975847, "include": true, "reason": "import numpy", "num_tokens": 4517}
|
#ifndef LIB_INCLUDE_TICK_ARRAY_VECTOR_OPERATIONS_H_
#define LIB_INCLUDE_TICK_ARRAY_VECTOR_OPERATIONS_H_
// License: BSD 3 clause
#include <atomic>
#include <vector>
#include <numeric>
#include <algorithm>
#include <type_traits>
#include "promote.h"
#include "tick/base/defs.h"
#if defined(TICK_USE_MKL)
#include "mkl.h"
#elif defined(TICK_USE_CBLAS)
#if defined(__APPLE__)
#include <Accelerate/Accelerate.h>
// TODO(svp) Disabling this feature until we find
// a good way to determine if ATLAS is actually available
#else
extern "C" {
#include <cblas.h>
}
#endif // defined(__APPLE__)
#else
#include "tick/array/vector/ops_unoptimized.h"
namespace tick {
template <typename T>
using vector_operations = detail::vector_operations_unoptimized<T>;
}
#endif
#if defined(TICK_USE_MKL) || defined(TICK_USE_CBLAS)
#include "tick/array/vector/ops_blas.h"
namespace tick {
template <typename T>
using vector_operations = detail::vector_operations_cblas<T>;
}
#endif
#include "tick/array/vector/ops_unoptimized_impl.h"
#endif // LIB_INCLUDE_TICK_ARRAY_VECTOR_OPERATIONS_H_
|
{"hexsha": "bb288e98d09b991dacd20587d2b805bfeb4df702", "size": 1085, "ext": "h", "lang": "C", "max_stars_repo_path": "lib/include/tick/array/vector_operations.h", "max_stars_repo_name": "sumau/tick", "max_stars_repo_head_hexsha": "1b56924a35463e12f7775bc0aec182364f26f2c6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 411.0, "max_stars_repo_stars_event_min_datetime": "2017-03-30T15:22:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T01:58:34.000Z", "max_issues_repo_path": "lib/include/tick/array/vector_operations.h", "max_issues_repo_name": "saurabhdash/tick", "max_issues_repo_head_hexsha": "bbc561804eb1fdcb4c71b9e3e2d83a66e7b13a48", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 345.0, "max_issues_repo_issues_event_min_datetime": "2017-04-13T14:53:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-26T00:46:22.000Z", "max_forks_repo_path": "lib/include/tick/array/vector_operations.h", "max_forks_repo_name": "saurabhdash/tick", "max_forks_repo_head_hexsha": "bbc561804eb1fdcb4c71b9e3e2d83a66e7b13a48", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 102.0, "max_forks_repo_forks_event_min_datetime": "2017-04-25T11:47:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-15T11:45:49.000Z", "avg_line_length": 20.0925925926, "max_line_length": 67, "alphanum_fraction": 0.7751152074, "num_tokens": 263}
|
import warnings
import numpy as np
import numpy.linalg as la
import numpy.random as rnd
from pymanopt.manifolds.manifold import EuclideanEmbeddedSubmanifold
class _Sphere(EuclideanEmbeddedSubmanifold):
"""Base class for tensors with unit Frobenius norm."""
def __init__(self, *shape, name, dimension):
if len(shape) == 0:
raise TypeError("Need shape parameters.")
self._shape = shape
super().__init__(name, dimension)
@property
def typicaldist(self):
return np.pi
def inner(self, X, U, V):
return float(np.tensordot(U, V, axes=U.ndim))
def norm(self, X, U):
return la.norm(U)
def dist(self, U, V):
# Make sure inner product is between -1 and 1
inner = max(min(self.inner(None, U, V), 1), -1)
return np.arccos(inner)
def proj(self, X, H):
return H - self.inner(None, X, H) * X
def weingarten(self, X, U, V):
return -self.inner(X, X, V) * U
def exp(self, X, U):
norm_U = self.norm(None, U)
# Check that norm_U isn't too tiny. If very small then
# sin(norm_U) / norm_U ~= 1 and retr is extremely close to exp.
if norm_U > 1e-3:
return X * np.cos(norm_U) + U * np.sin(norm_U) / norm_U
else:
return self.retr(X, U)
def retr(self, X, U):
Y = X + U
return self._normalize(Y)
def log(self, X, Y):
P = self.proj(X, Y - X)
dist = self.dist(X, Y)
# If the two points are "far apart", correct the norm.
if dist > 1e-6:
P *= dist / self.norm(None, P)
return P
def rand(self):
Y = rnd.randn(*self._shape)
return self._normalize(Y)
def randvec(self, X):
H = rnd.randn(*self._shape)
P = self.proj(X, H)
return self._normalize(P)
def transp(self, X, Y, U):
return self.proj(Y, U)
def pairmean(self, X, Y):
return self._normalize(X + Y)
def zerovec(self, X):
return np.zeros(self._shape)
def _normalize(self, X):
"""Return Frobenius-normalized version of X in ambient space."""
return X / self.norm(None, X)
class Sphere(_Sphere):
r"""The sphere manifold.
Manifold of shape :math:`n_1 \times n_2 \times \ldots \times n_k` tensors
with unit 2-norm.
The metric is such that the sphere is a Riemannian submanifold of Euclidean
space.
Notes:
The implementation of the Weingarten map is taken from [AMT2013]_.
"""
def __init__(self, *shape):
if len(shape) == 0:
raise TypeError("Need shape parameters.")
if len(shape) == 1:
(n1,) = shape
name = f"Sphere manifold of {n1}-vectors"
elif len(shape) == 2:
n1, n2 = shape
name = f"Sphere manifold of {n1}x{n2} matrices"
else:
name = f"Sphere manifold of shape {shape} tensors"
dimension = np.prod(shape) - 1
super().__init__(*shape, name=name, dimension=dimension)
class _SphereSubspaceIntersectionManifold(_Sphere):
def __init__(self, projector, name, dimension):
m, n = projector.shape
assert m == n, "projection matrix is not square"
if dimension == 0:
warnings.warn(
"Intersected subspace is 1-dimensional. The manifold "
"therefore has dimension 0 as it only consists of isolated "
"points"
)
self._subspace_projector = projector
super().__init__(n, name=name, dimension=dimension)
def _validate_span_matrix(self, U):
if len(U.shape) != 2:
raise ValueError("Input array must be 2-dimensional")
num_rows, num_columns = U.shape
if num_rows < num_columns:
raise ValueError(
"The span matrix cannot have fewer rows than columns"
)
def proj(self, X, H):
Y = super().proj(X, H)
return self._subspace_projector @ Y
def rand(self):
X = super().rand()
return self._normalize(self._subspace_projector @ X)
def randvec(self, X):
Y = super().randvec(X)
return self._normalize(self._subspace_projector @ Y)
class SphereSubspaceIntersection(_SphereSubspaceIntersectionManifold):
r"""Sphere-subspace intersection manifold.
Manifold of n-dimensional unit 2-norm vectors intersecting the
:math:`r`-dimensional subspace of :math:`\R^n` spanned by the columns of
the matrix ``U`` of size :math:`n \times r`.
"""
def __init__(self, U):
self._validate_span_matrix(U)
m = U.shape[0]
Q, _ = la.qr(U)
projector = Q @ Q.T
subspace_dimension = la.matrix_rank(projector)
name = (
f"Sphere manifold of {m}-dimensional vectors intersecting a "
f"{subspace_dimension}-dimensional subspace"
)
dimension = subspace_dimension - 1
super().__init__(projector, name, dimension)
class SphereSubspaceComplementIntersection(
_SphereSubspaceIntersectionManifold
):
r"""Sphere-subspace compliment intersection manifold.
Manifold of n-dimensional unit 2-norm vectors which are orthogonal to
the :math:`r`-dimensional subspace of :math:`\R^n` spanned by columns of
the matrix ``U``.
"""
def __init__(self, U):
self._validate_span_matrix(U)
m = U.shape[0]
Q, _ = la.qr(U)
projector = np.eye(m) - Q @ Q.T
subspace_dimension = la.matrix_rank(projector)
name = (
f"Sphere manifold of {m}-dimensional vectors orthogonal "
f"to a {subspace_dimension}-dimensional subspace"
)
dimension = subspace_dimension - 1
super().__init__(projector, name, dimension)
|
{"hexsha": "73f3a1c9dc76ca62b023f8a43eeda5ba10b97d38", "size": 5816, "ext": "py", "lang": "Python", "max_stars_repo_path": "manifolds/sphere.py", "max_stars_repo_name": "cjyaras/pymanopt", "max_stars_repo_head_hexsha": "447545fd9a6f33f3060a083fde1a2ac643ed340e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "manifolds/sphere.py", "max_issues_repo_name": "cjyaras/pymanopt", "max_issues_repo_head_hexsha": "447545fd9a6f33f3060a083fde1a2ac643ed340e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "manifolds/sphere.py", "max_forks_repo_name": "cjyaras/pymanopt", "max_forks_repo_head_hexsha": "447545fd9a6f33f3060a083fde1a2ac643ed340e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6105263158, "max_line_length": 79, "alphanum_fraction": 0.598349381, "include": true, "reason": "import numpy", "num_tokens": 1496}
|
subroutine runP_i(k,i1,f,Gr,Shat2,N0)
implicit none
include 'pvDnames.f'
include 'pvDv.f'
include 'Darraydef.f'
include 'Darrays.f'
integer ep,N0,k,i1,np
parameter(np=3)
double precision f(np),Gr(np,np)
double complex Shat2(np,np,-2:0)
do ep=-2,0
Dv(di(i1)+N0,ep)=
. (Shat2(k,i1,ep)
. -2d0*delta(k,i1)*Dv(dd00+N0,ep)
. -Gr(k,1)*Dv(dii(z2(1,i1))+N0,ep)
. -Gr(k,2)*Dv(dii(z2(2,i1))+N0,ep)
. -Gr(k,3)*Dv(dii(z2(3,i1))+N0,ep))/f(k)
enddo
return
end
|
{"hexsha": "13e8b640cd39501d0c24d0d6f403cde9061eaf4e", "size": 577, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "MCFM-JHUGen/TensorReduction/recur/smallP/runP_i.f", "max_stars_repo_name": "tmartini/JHUGen", "max_stars_repo_head_hexsha": "80da31668d7b7eb5b02bb4cac435562c45075d24", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2015-06-08T13:09:28.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-04T19:59:36.000Z", "max_issues_repo_path": "MCFM-JHUGen/TensorReduction/recur/smallP/runP_i.f", "max_issues_repo_name": "tmartini/JHUGen", "max_issues_repo_head_hexsha": "80da31668d7b7eb5b02bb4cac435562c45075d24", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 64, "max_issues_repo_issues_event_min_datetime": "2015-06-24T15:08:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-25T04:59:32.000Z", "max_forks_repo_path": "MCFM-JHUGen/TensorReduction/recur/smallP/runP_i.f", "max_forks_repo_name": "tmartini/JHUGen", "max_forks_repo_head_hexsha": "80da31668d7b7eb5b02bb4cac435562c45075d24", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2015-05-04T22:15:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T10:04:40.000Z", "avg_line_length": 25.0869565217, "max_line_length": 46, "alphanum_fraction": 0.5095320624, "num_tokens": 228}
|
[STATEMENT]
lemma composition_of_substs_eq :
shows "(subst_equation (subst_equation e \<sigma>) \<eta>)
= (subst_equation e (comp \<sigma> \<eta>))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. subst_equation (subst_equation e \<sigma>) \<eta> = subst_equation e (\<sigma> \<lozenge> \<eta>)
[PROOF STEP]
by (metis subst_equation.simps composition_of_substs vars_of_eq.elims)
|
{"llama_tokens": 140, "file": "SuperCalc_equational_clausal_logic", "length": 1}
|
[STATEMENT]
lemma wfT_fun_return_t:
fixes \<tau>a'::\<tau> and \<tau>'::\<tau>
assumes "\<Theta>; \<B>; (xa, b, ca) #\<^sub>\<Gamma> GNil \<turnstile>\<^sub>w\<^sub>f \<tau>a'" and "(AF_fun_typ x b c \<tau>' s') = (AF_fun_typ xa b ca \<tau>a' sa')"
shows "\<Theta>; \<B>; (x, b, c) #\<^sub>\<Gamma> GNil \<turnstile>\<^sub>w\<^sub>f \<tau>'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Theta> ; \<B> ; (x, b, c) #\<^sub>\<Gamma> GNil \<turnstile>\<^sub>w\<^sub>f \<tau>'
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<Theta> ; \<B> ; (x, b, c) #\<^sub>\<Gamma> GNil \<turnstile>\<^sub>w\<^sub>f \<tau>'
[PROOF STEP]
obtain cb::x where xf: "atom cb \<sharp> (c, \<tau>', s', sa', \<tau>a', ca, x , xa)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>cb. atom cb \<sharp> (c, \<tau>', s', sa', \<tau>a', ca, x, xa) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using obtain_fresh
[PROOF STATE]
proof (prove)
using this:
(\<And>a. atom a \<sharp> ?x \<Longrightarrow> ?thesis) \<Longrightarrow> ?thesis
goal (1 subgoal):
1. (\<And>cb. atom cb \<sharp> (c, \<tau>', s', sa', \<tau>a', ca, x, xa) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
atom cb \<sharp> (c, \<tau>', s', sa', \<tau>a', ca, x, xa)
goal (1 subgoal):
1. \<Theta> ; \<B> ; (x, b, c) #\<^sub>\<Gamma> GNil \<turnstile>\<^sub>w\<^sub>f \<tau>'
[PROOF STEP]
hence "atom cb \<sharp> (c, \<tau>', s', sa', \<tau>a', ca) \<and> atom cb \<sharp> (x, xa, ((c, \<tau>'), s'), (ca, \<tau>a'), sa')"
[PROOF STATE]
proof (prove)
using this:
atom cb \<sharp> (c, \<tau>', s', sa', \<tau>a', ca, x, xa)
goal (1 subgoal):
1. atom cb \<sharp> (c, \<tau>', s', sa', \<tau>a', ca) \<and> atom cb \<sharp> (x, xa, ((c, \<tau>'), s'), (ca, \<tau>a'), sa')
[PROOF STEP]
using fresh_prod6 fresh_prod4 fresh_prod8
[PROOF STATE]
proof (prove)
using this:
atom cb \<sharp> (c, \<tau>', s', sa', \<tau>a', ca, x, xa)
?x \<sharp> (?a, ?b, ?c, ?d, ?e, ?f) = (?x \<sharp> ?a \<and> ?x \<sharp> ?b \<and> ?x \<sharp> ?c \<and> ?x \<sharp> ?d \<and> ?x \<sharp> ?e \<and> ?x \<sharp> ?f)
?x \<sharp> (?a, ?b, ?c, ?d) = (?x \<sharp> ?a \<and> ?x \<sharp> ?b \<and> ?x \<sharp> ?c \<and> ?x \<sharp> ?d)
?x \<sharp> (?a, ?b, ?c, ?d, ?e, ?f, ?g, ?h) = (?x \<sharp> ?a \<and> ?x \<sharp> ?b \<and> ?x \<sharp> ?c \<and> ?x \<sharp> ?d \<and> ?x \<sharp> ?e \<and> ?x \<sharp> ?f \<and> ?x \<sharp> ?g \<and> ?x \<sharp> ?h)
goal (1 subgoal):
1. atom cb \<sharp> (c, \<tau>', s', sa', \<tau>a', ca) \<and> atom cb \<sharp> (x, xa, ((c, \<tau>'), s'), (ca, \<tau>a'), sa')
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
atom cb \<sharp> (c, \<tau>', s', sa', \<tau>a', ca) \<and> atom cb \<sharp> (x, xa, ((c, \<tau>'), s'), (ca, \<tau>a'), sa')
goal (1 subgoal):
1. \<Theta> ; \<B> ; (x, b, c) #\<^sub>\<Gamma> GNil \<turnstile>\<^sub>w\<^sub>f \<tau>'
[PROOF STEP]
hence *:"c[x::=V_var cb]\<^sub>c\<^sub>v = ca[xa::=V_var cb]\<^sub>c\<^sub>v \<and> \<tau>'[x::=V_var cb]\<^sub>\<tau>\<^sub>v = \<tau>a'[xa::=V_var cb]\<^sub>\<tau>\<^sub>v"
[PROOF STATE]
proof (prove)
using this:
atom cb \<sharp> (c, \<tau>', s', sa', \<tau>a', ca) \<and> atom cb \<sharp> (x, xa, ((c, \<tau>'), s'), (ca, \<tau>a'), sa')
goal (1 subgoal):
1. c[x::=[ cb ]\<^sup>v]\<^sub>c\<^sub>v = ca[xa::=[ cb ]\<^sup>v]\<^sub>c\<^sub>v \<and> \<tau>'[x::=[ cb ]\<^sup>v]\<^sub>\<tau>\<^sub>v = \<tau>a'[xa::=[ cb ]\<^sup>v]\<^sub>\<tau>\<^sub>v
[PROOF STEP]
using assms \<tau>.eq_iff Abs1_eq_iff_all
[PROOF STATE]
proof (prove)
using this:
atom cb \<sharp> (c, \<tau>', s', sa', \<tau>a', ca) \<and> atom cb \<sharp> (x, xa, ((c, \<tau>'), s'), (ca, \<tau>a'), sa')
\<Theta> ; \<B> ; (xa, b, ca) #\<^sub>\<Gamma> GNil \<turnstile>\<^sub>w\<^sub>f \<tau>a'
AF_fun_typ x b c \<tau>' s' = AF_fun_typ xa b ca \<tau>a' sa'
(\<lbrace> ?x : ?b | ?c \<rbrace> = \<lbrace> ?xa : ?ba | ?ca \<rbrace>) = ([[atom ?x]]lst. ?c = [[atom ?xa]]lst. ?ca \<and> ?b = ?ba)
([{atom ?a}]set. ?x = [{atom ?b}]set. ?y) = (\<forall>c. atom c \<sharp> ?z \<longrightarrow> atom c \<sharp> (?a, ?b, ?x, ?y) \<longrightarrow> (?a \<leftrightarrow> c) \<bullet> ?x = (?b \<leftrightarrow> c) \<bullet> ?y)
([{atom ?a}]res. ?x = [{atom ?b}]res. ?y) = (\<forall>c. atom c \<sharp> ?z \<longrightarrow> atom c \<sharp> (?a, ?b, ?x, ?y) \<longrightarrow> (?a \<leftrightarrow> c) \<bullet> ?x = (?b \<leftrightarrow> c) \<bullet> ?y)
([[atom ?a]]lst. ?x = [[atom ?b]]lst. ?y) = (\<forall>c. atom c \<sharp> ?z \<longrightarrow> atom c \<sharp> (?a, ?b, ?x, ?y) \<longrightarrow> (?a \<leftrightarrow> c) \<bullet> ?x = (?b \<leftrightarrow> c) \<bullet> ?y)
goal (1 subgoal):
1. c[x::=[ cb ]\<^sup>v]\<^sub>c\<^sub>v = ca[xa::=[ cb ]\<^sup>v]\<^sub>c\<^sub>v \<and> \<tau>'[x::=[ cb ]\<^sup>v]\<^sub>\<tau>\<^sub>v = \<tau>a'[xa::=[ cb ]\<^sup>v]\<^sub>\<tau>\<^sub>v
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
c[x::=[ cb ]\<^sup>v]\<^sub>c\<^sub>v = ca[xa::=[ cb ]\<^sup>v]\<^sub>c\<^sub>v \<and> \<tau>'[x::=[ cb ]\<^sup>v]\<^sub>\<tau>\<^sub>v = \<tau>a'[xa::=[ cb ]\<^sup>v]\<^sub>\<tau>\<^sub>v
goal (1 subgoal):
1. \<Theta> ; \<B> ; (x, b, c) #\<^sub>\<Gamma> GNil \<turnstile>\<^sub>w\<^sub>f \<tau>'
[PROOF STEP]
have **: "\<Theta>; \<B>; (xa \<leftrightarrow> cb ) \<bullet> ((xa, b, ca) #\<^sub>\<Gamma> GNil) \<turnstile>\<^sub>w\<^sub>f (xa \<leftrightarrow> cb ) \<bullet> \<tau>a'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Theta> ; \<B> ; (xa \<leftrightarrow> cb) \<bullet> ((xa, b, ca) #\<^sub>\<Gamma> GNil) \<turnstile>\<^sub>w\<^sub>f (xa \<leftrightarrow> cb) \<bullet> \<tau>a'
[PROOF STEP]
using assms True_eqvt beta_flip_eq theta_flip_eq wfG_wf
[PROOF STATE]
proof (prove)
using this:
\<Theta> ; \<B> ; (xa, b, ca) #\<^sub>\<Gamma> GNil \<turnstile>\<^sub>w\<^sub>f \<tau>a'
AF_fun_typ x b c \<tau>' s' = AF_fun_typ xa b ca \<tau>a' sa'
?p \<bullet> True = True
(?x \<leftrightarrow> ?xa) \<bullet> ?\<B> = ?\<B>
\<turnstile>\<^sub>w\<^sub>f ?\<Theta> \<Longrightarrow> (?x \<leftrightarrow> ?xa) \<bullet> ?\<Theta> = ?\<Theta>
?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?\<Gamma> \<Longrightarrow> \<turnstile>\<^sub>w\<^sub>f ?\<Theta>
goal (1 subgoal):
1. \<Theta> ; \<B> ; (xa \<leftrightarrow> cb) \<bullet> ((xa, b, ca) #\<^sub>\<Gamma> GNil) \<turnstile>\<^sub>w\<^sub>f (xa \<leftrightarrow> cb) \<bullet> \<tau>a'
[PROOF STEP]
by (metis GCons_eqvt GNil_eqvt wfT.eqvt wfT_wf)
[PROOF STATE]
proof (state)
this:
\<Theta> ; \<B> ; (xa \<leftrightarrow> cb) \<bullet> ((xa, b, ca) #\<^sub>\<Gamma> GNil) \<turnstile>\<^sub>w\<^sub>f (xa \<leftrightarrow> cb) \<bullet> \<tau>a'
goal (1 subgoal):
1. \<Theta> ; \<B> ; (x, b, c) #\<^sub>\<Gamma> GNil \<turnstile>\<^sub>w\<^sub>f \<tau>'
[PROOF STEP]
have "\<Theta>; \<B>; (x \<leftrightarrow> cb ) \<bullet> ((x, b, c) #\<^sub>\<Gamma> GNil) \<turnstile>\<^sub>w\<^sub>f (x \<leftrightarrow> cb ) \<bullet> \<tau>'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Theta> ; \<B> ; (x \<leftrightarrow> cb) \<bullet> ((x, b, c) #\<^sub>\<Gamma> GNil) \<turnstile>\<^sub>w\<^sub>f (x \<leftrightarrow> cb) \<bullet> \<tau>'
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<Theta> ; \<B> ; (x \<leftrightarrow> cb) \<bullet> ((x, b, c) #\<^sub>\<Gamma> GNil) \<turnstile>\<^sub>w\<^sub>f (x \<leftrightarrow> cb) \<bullet> \<tau>'
[PROOF STEP]
have "(xa \<leftrightarrow> cb ) \<bullet> xa = (x \<leftrightarrow> cb ) \<bullet> x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (xa \<leftrightarrow> cb) \<bullet> xa = (x \<leftrightarrow> cb) \<bullet> x
[PROOF STEP]
using xf
[PROOF STATE]
proof (prove)
using this:
atom cb \<sharp> (c, \<tau>', s', sa', \<tau>a', ca, x, xa)
goal (1 subgoal):
1. (xa \<leftrightarrow> cb) \<bullet> xa = (x \<leftrightarrow> cb) \<bullet> x
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(xa \<leftrightarrow> cb) \<bullet> xa = (x \<leftrightarrow> cb) \<bullet> x
goal (1 subgoal):
1. \<Theta> ; \<B> ; (x \<leftrightarrow> cb) \<bullet> ((x, b, c) #\<^sub>\<Gamma> GNil) \<turnstile>\<^sub>w\<^sub>f (x \<leftrightarrow> cb) \<bullet> \<tau>'
[PROOF STEP]
hence "(x \<leftrightarrow> cb ) \<bullet> ((x, b, c) #\<^sub>\<Gamma> GNil) = (xa \<leftrightarrow> cb ) \<bullet> ((xa, b, ca) #\<^sub>\<Gamma> GNil)"
[PROOF STATE]
proof (prove)
using this:
(xa \<leftrightarrow> cb) \<bullet> xa = (x \<leftrightarrow> cb) \<bullet> x
goal (1 subgoal):
1. (x \<leftrightarrow> cb) \<bullet> ((x, b, c) #\<^sub>\<Gamma> GNil) = (xa \<leftrightarrow> cb) \<bullet> ((xa, b, ca) #\<^sub>\<Gamma> GNil)
[PROOF STEP]
using * ** xf G_cons_flip fresh_GNil
[PROOF STATE]
proof (prove)
using this:
(xa \<leftrightarrow> cb) \<bullet> xa = (x \<leftrightarrow> cb) \<bullet> x
c[x::=[ cb ]\<^sup>v]\<^sub>c\<^sub>v = ca[xa::=[ cb ]\<^sup>v]\<^sub>c\<^sub>v \<and> \<tau>'[x::=[ cb ]\<^sup>v]\<^sub>\<tau>\<^sub>v = \<tau>a'[xa::=[ cb ]\<^sup>v]\<^sub>\<tau>\<^sub>v
\<Theta> ; \<B> ; (xa \<leftrightarrow> cb) \<bullet> ((xa, b, ca) #\<^sub>\<Gamma> GNil) \<turnstile>\<^sub>w\<^sub>f (xa \<leftrightarrow> cb) \<bullet> \<tau>a'
atom cb \<sharp> (c, \<tau>', s', sa', \<tau>a', ca, x, xa)
(?x \<leftrightarrow> ?x') \<bullet> ((?x'', ?b, ?c) #\<^sub>\<Gamma> ?\<Gamma>) = ((?x \<leftrightarrow> ?x') \<bullet> ?x'', ?b, (?x \<leftrightarrow> ?x') \<bullet> ?c) #\<^sub>\<Gamma> (?x \<leftrightarrow> ?x') \<bullet> ?\<Gamma>
?a \<sharp> GNil
goal (1 subgoal):
1. (x \<leftrightarrow> cb) \<bullet> ((x, b, c) #\<^sub>\<Gamma> GNil) = (xa \<leftrightarrow> cb) \<bullet> ((xa, b, ca) #\<^sub>\<Gamma> GNil)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(x \<leftrightarrow> cb) \<bullet> ((x, b, c) #\<^sub>\<Gamma> GNil) = (xa \<leftrightarrow> cb) \<bullet> ((xa, b, ca) #\<^sub>\<Gamma> GNil)
goal (1 subgoal):
1. \<Theta> ; \<B> ; (x \<leftrightarrow> cb) \<bullet> ((x, b, c) #\<^sub>\<Gamma> GNil) \<turnstile>\<^sub>w\<^sub>f (x \<leftrightarrow> cb) \<bullet> \<tau>'
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
(x \<leftrightarrow> cb) \<bullet> ((x, b, c) #\<^sub>\<Gamma> GNil) = (xa \<leftrightarrow> cb) \<bullet> ((xa, b, ca) #\<^sub>\<Gamma> GNil)
goal (1 subgoal):
1. \<Theta> ; \<B> ; (x \<leftrightarrow> cb) \<bullet> ((x, b, c) #\<^sub>\<Gamma> GNil) \<turnstile>\<^sub>w\<^sub>f (x \<leftrightarrow> cb) \<bullet> \<tau>'
[PROOF STEP]
using ** * xf
[PROOF STATE]
proof (prove)
using this:
(x \<leftrightarrow> cb) \<bullet> ((x, b, c) #\<^sub>\<Gamma> GNil) = (xa \<leftrightarrow> cb) \<bullet> ((xa, b, ca) #\<^sub>\<Gamma> GNil)
\<Theta> ; \<B> ; (xa \<leftrightarrow> cb) \<bullet> ((xa, b, ca) #\<^sub>\<Gamma> GNil) \<turnstile>\<^sub>w\<^sub>f (xa \<leftrightarrow> cb) \<bullet> \<tau>a'
c[x::=[ cb ]\<^sup>v]\<^sub>c\<^sub>v = ca[xa::=[ cb ]\<^sup>v]\<^sub>c\<^sub>v \<and> \<tau>'[x::=[ cb ]\<^sup>v]\<^sub>\<tau>\<^sub>v = \<tau>a'[xa::=[ cb ]\<^sup>v]\<^sub>\<tau>\<^sub>v
atom cb \<sharp> (c, \<tau>', s', sa', \<tau>a', ca, x, xa)
goal (1 subgoal):
1. \<Theta> ; \<B> ; (x \<leftrightarrow> cb) \<bullet> ((x, b, c) #\<^sub>\<Gamma> GNil) \<turnstile>\<^sub>w\<^sub>f (x \<leftrightarrow> cb) \<bullet> \<tau>'
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<Theta> ; \<B> ; (x \<leftrightarrow> cb) \<bullet> ((x, b, c) #\<^sub>\<Gamma> GNil) \<turnstile>\<^sub>w\<^sub>f (x \<leftrightarrow> cb) \<bullet> \<tau>'
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<Theta> ; \<B> ; (x \<leftrightarrow> cb) \<bullet> ((x, b, c) #\<^sub>\<Gamma> GNil) \<turnstile>\<^sub>w\<^sub>f (x \<leftrightarrow> cb) \<bullet> \<tau>'
goal (1 subgoal):
1. \<Theta> ; \<B> ; (x, b, c) #\<^sub>\<Gamma> GNil \<turnstile>\<^sub>w\<^sub>f \<tau>'
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
\<Theta> ; \<B> ; (x \<leftrightarrow> cb) \<bullet> ((x, b, c) #\<^sub>\<Gamma> GNil) \<turnstile>\<^sub>w\<^sub>f (x \<leftrightarrow> cb) \<bullet> \<tau>'
goal (1 subgoal):
1. \<Theta> ; \<B> ; (x, b, c) #\<^sub>\<Gamma> GNil \<turnstile>\<^sub>w\<^sub>f \<tau>'
[PROOF STEP]
using beta_flip_eq theta_flip_eq wfT_wf wfG_wf * ** True_eqvt wfT.eqvt permute_flip_cancel
[PROOF STATE]
proof (prove)
using this:
\<Theta> ; \<B> ; (x \<leftrightarrow> cb) \<bullet> ((x, b, c) #\<^sub>\<Gamma> GNil) \<turnstile>\<^sub>w\<^sub>f (x \<leftrightarrow> cb) \<bullet> \<tau>'
(?x \<leftrightarrow> ?xa) \<bullet> ?\<B> = ?\<B>
\<turnstile>\<^sub>w\<^sub>f ?\<Theta> \<Longrightarrow> (?x \<leftrightarrow> ?xa) \<bullet> ?\<Theta> = ?\<Theta>
?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile>\<^sub>w\<^sub>f ?\<tau> \<Longrightarrow> ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?\<Gamma> \<and> \<turnstile>\<^sub>w\<^sub>f ?\<Theta> \<and> ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f b_of ?\<tau>
?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?\<Gamma> \<Longrightarrow> \<turnstile>\<^sub>w\<^sub>f ?\<Theta>
c[x::=[ cb ]\<^sup>v]\<^sub>c\<^sub>v = ca[xa::=[ cb ]\<^sup>v]\<^sub>c\<^sub>v \<and> \<tau>'[x::=[ cb ]\<^sup>v]\<^sub>\<tau>\<^sub>v = \<tau>a'[xa::=[ cb ]\<^sup>v]\<^sub>\<tau>\<^sub>v
\<Theta> ; \<B> ; (xa \<leftrightarrow> cb) \<bullet> ((xa, b, ca) #\<^sub>\<Gamma> GNil) \<turnstile>\<^sub>w\<^sub>f (xa \<leftrightarrow> cb) \<bullet> \<tau>a'
?p \<bullet> True = True
?x13.0 ; ?x14.0 ; ?x15.0 \<turnstile>\<^sub>w\<^sub>f ?x16.0 \<Longrightarrow> ?p \<bullet> ?x13.0 ; ?p \<bullet> ?x14.0 ; ?p \<bullet> ?x15.0 \<turnstile>\<^sub>w\<^sub>f ?p \<bullet> ?x16.0
(?a \<leftrightarrow> ?b) \<bullet> (?a \<leftrightarrow> ?b) \<bullet> ?x = ?x
goal (1 subgoal):
1. \<Theta> ; \<B> ; (x, b, c) #\<^sub>\<Gamma> GNil \<turnstile>\<^sub>w\<^sub>f \<tau>'
[PROOF STEP]
by metis
[PROOF STATE]
proof (state)
this:
\<Theta> ; \<B> ; (x, b, c) #\<^sub>\<Gamma> GNil \<turnstile>\<^sub>w\<^sub>f \<tau>'
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 6078, "file": "MiniSail_WellformedL", "length": 29}
|
import pandas as pd
import numpy as np
from pycaret.classification import predict_model, load_model
def load_data(filepath):
"""
Loads churn data into a DataFrame from a string filepath.
"""
df = pd.read_csv(filepath, index_col='customerID')
return df
def make_predictions(df):
"""
Uses the pycaret best model to make predictions on data in the df dataframe.
"""
model = load_model('BEST') # searching for the best model which changes based on modeling
predictions = predict_model(model, data=df)
predictions.rename({'Label': 'Churn_prediction'}, axis=1, inplace=True)
predictions['Churn_prediction'].replace({1: 'Churn', 0: 'No Churn'},
inplace=True)
predictions.rename({'Score': 'Percentage'}, axis=1, inplace=True)
return predictions[predictions.columns[6:8]]
if __name__ == "__main__":
"""
Runs full script if main is loaded
Transforms new data to create matching features to the model
"""
df = load_data('data/new_churn_data_unmodified.csv')
df.fillna(df['TotalCharges'].median(), inplace=True)
df.at[df['tenure'] == 0, 'tenure'] = np.nan
df['tenure'].fillna(df['tenure'].median(), inplace=True)
df['PhoneService'] = df['PhoneService'].replace({'No': 0, 'Yes': 1})
df['Contract'] = df['Contract'].replace({'Month-to-month': 0, 'One year': 1, 'Two year': 2})
df['PaymentMethod'] = df['PaymentMethod'].replace({'Electronic check': 0, 'Mailed check': 1,
'Bank transfer (automatic)': 2, 'Credit card (automatic)': 3})
predictions = make_predictions(df)
print('predictions:')
print(predictions)
|
{"hexsha": "be37f8db7b78c231762b5cfa59214910a6dd6ae8", "size": 1679, "ext": "py", "lang": "Python", "max_stars_repo_path": "predict_churn.py", "max_stars_repo_name": "kodama3d/msds_600_week5", "max_stars_repo_head_hexsha": "746fbc1bb92811e4ef568a8220a1c93301d14909", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "predict_churn.py", "max_issues_repo_name": "kodama3d/msds_600_week5", "max_issues_repo_head_hexsha": "746fbc1bb92811e4ef568a8220a1c93301d14909", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "predict_churn.py", "max_forks_repo_name": "kodama3d/msds_600_week5", "max_forks_repo_head_hexsha": "746fbc1bb92811e4ef568a8220a1c93301d14909", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.3111111111, "max_line_length": 96, "alphanum_fraction": 0.6509827278, "include": true, "reason": "import numpy", "num_tokens": 416}
|
import GitLab
using Base.Test
myauth = GitLab.authenticate(ENV["GITLAB_AUTH"]) # don't hardcode your access tokens!
println("Authentication successful")
options = Dict("private_token" => myauth.token)
myrepo = GitLab.repo_by_name("TestProject1"; headers=options)
file = GitLab.file(myrepo, "src/file1", "master"; headers=options)
@test get(file.file_path) == "src/file1"
println("Content Tests Done !!!")
|
{"hexsha": "af1305dd4b0fb9abe489e06f6c0fe8436c771229", "size": 410, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/contents.jl", "max_stars_repo_name": "UnofficialJuliaMirror/GitLab.jl-ec55e9df-579d-5e55-a10d-b795213e2edd", "max_stars_repo_head_hexsha": "28fc1fc4f561b027ebf117f1bc7a497252f47123", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2017-06-24T00:32:44.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-17T14:19:05.000Z", "max_issues_repo_path": "test/contents.jl", "max_issues_repo_name": "UnofficialJuliaMirror/GitLab.jl-ec55e9df-579d-5e55-a10d-b795213e2edd", "max_issues_repo_head_hexsha": "28fc1fc4f561b027ebf117f1bc7a497252f47123", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2016-08-29T20:39:49.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-09T08:32:13.000Z", "max_forks_repo_path": "test/contents.jl", "max_forks_repo_name": "UnofficialJuliaMirror/GitLab.jl-ec55e9df-579d-5e55-a10d-b795213e2edd", "max_forks_repo_head_hexsha": "28fc1fc4f561b027ebf117f1bc7a497252f47123", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2016-07-19T06:07:42.000Z", "max_forks_repo_forks_event_max_datetime": "2018-10-03T08:32:46.000Z", "avg_line_length": 27.3333333333, "max_line_length": 85, "alphanum_fraction": 0.7463414634, "num_tokens": 101}
|
"""
Propagation module
"""
# struct for handling information of the problem
struct ShootingSettings
n::Int
nr::Int
n_sv::Int
prob_stm::ODEProblem
tofs::Array
r0::Array # fixed vector
rf::Array # fixed vector
v0::Array
vf::Array
method
reltol::Float64
abstol::Float64
tolDC::Float64
tolConv::Float64
maxiter::Int
verbosity::Int
end
"""
Two-stage shooting algorithm for n-impulsive trajectory design
"""
function twostage_shooting(n_sv::Int, svs::Array, tofs::Array, prob_stm::ODEProblem; kwargs...)
# FIXME -- take from kwargs
reltol = 1.e-12
abstol = 1.e-12
method = Tsit5()
tolDC = 1.e-11
tolConv = 1.e-6
maxiter = 15
verbosity = 1
n = length(svs) # number of nodes
nr = n_sv ÷ 2 # number of elements of positions (== length of velocities)
println("Using $n nodes")
for (i,sv) in enumerate(svs)
print("Node $i: \n")
println(sv)
end
# initial and final position and velocity vectors
r0 = svs[1][1:nr]
v0 = svs[1][nr+1:end]
rf = svs[end][1:nr]
vf = svs[end][nr+1:end]
# inner-loop decision vector: velocities at first n-1 nodes
x_inner = zeros(nr*(n-1))
for i = 1:n-1
x_inner[1+(i-1)*nr:i*nr] = svs[i][nr+1:end]
end
# outer-loop decision vector
x_outer = zeros((n-2)*nr)
for j = 1:n-2
x_outer[1+(j-1)*nr:j*nr] = svs[j+1][1:nr]
end
# construct ShootingSettings
Settings = ShootingSettings(n, nr, n_sv, prob_stm, tofs, r0, rf, v0, vf,
method, reltol, abstol, tolDC, tolConv, maxiter, verbosity)
# initialize storage
J_inner = zeros(nr*(n-1), nr*(n-1))
# outer-loop shooting to minimize velocity discontinuity
outer_loop_shooting!(x_outer, x_inner, J_inner, Settings)
# re-construct nodes of final solution
conv_rs = vcat(r0, x_outer, rf)
conv_vs = vcat(x_inner, Settings.vf)
nodes_conv = []
for i = 1:n
sv = vcat( conv_rs[1+(i-1)*nr : i*nr] , conv_vs[1+(i-1)*nr : i*nr] )
push!(nodes_conv, sv)
end
return nodes_conv
end
"""
Outer-loop finds least-square solution to minimize velocity discontinuity.
Function mutates `x0_outer`
"""
function outer_loop_shooting!(x0_outer, x0_inner, J_inner, Settings::ShootingSettings, verbose::Bool=true)
# initialize old velocities
old_vs = vcat(x0_inner, Settings.vf)
old_cost = 0.0
for i_outer = 1:Settings.maxiter
# inner-loop shooting to ensure position continuity
inner_loop_shooting!(x0_outer, x0_inner, J_inner, Settings, true)
# compute new velocities
new_vs = vcat(x0_inner, Settings.vf)
# break if norm of cost stops improving
dvs = new_vs - old_vs
cost = norm(dvs)
if verbose==true
println("Outer iteration $i_outer ... cost: $cost")
end
if (i_outer > 1) && (old_cost - cost < Settings.tolConv)
println("Outer-loop achieved tolerance!")
break
end
# compute J_outer via finite difference
function g(x_outer)
inner_loop_shooting!(x_outer, x0_inner, J_inner, Settings, false)
new_vs = vcat(x0_inner, Settings.vf)
return new_vs - old_vs
end
J_outer = FiniteDiff.finite_difference_jacobian(g, x0_outer)
# least-square update
# print("x0_outer: "); println(length(x0_outer))
# print("J_outer: "); println(size(J_outer))
# print("new_vs: "); println(length(new_vs))
# updates
x0_outer[:] = x0_outer -inv(transpose(J_outer)*J_outer) * transpose(J_outer) * new_vs
old_vs[:] = new_vs
old_cost = cost
end
return
end
"""
Inner-loop corrects velocity vectors to ensure position continuity.
Function mutates `x0_inner`
"""
function inner_loop_shooting!(x0_outer, x0_inner, J_inner, Settings::ShootingSettings, verbose::Bool=false)
r_i2_prop = zeros(Settings.nr * (Settings.n-1))
for i_inner = 1:Settings.maxiter
# propagate n nodes
for i = 1:Settings.n-1
# re-construct array of positions from node 1 ~ n-1
rs_vec = vcat(Settings.r0, x0_outer)
#
r_i = rs_vec[1+(i-1)*Settings.nr : i*Settings.nr]
v_i = x0_inner[1+(i-1)*Settings.nr : i*Settings.nr]
x0i = vcat(r_i, v_i, reshape(I(Settings.n_sv), (Settings.n_sv*Settings.n_sv)))[:]
_prob = remake(Settings.prob_stm; tspan=(0.0, Settings.tofs[i]), u0=x0i)
sol = DifferentialEquations.solve(_prob, Settings.method, reltol=Settings.reltol, abstol=Settings.abstol)
# store results
r_i2_prop[1+(i-1)*Settings.nr : i*Settings.nr] = sol.u[end][1:Settings.nr]
# fill-in upper-left submatrix of STM into Jacobian
STM = transpose( reshape(sol.u[end][Settings.n_sv+1:end], (Settings.n_sv, Settings.n_sv)) )
J_inner[1+(i-1)*Settings.nr:i*Settings.nr, 1+(i-1)*Settings.nr:i*Settings.nr] = -STM[1:Settings.nr, Settings.nr+1:end] # FIXME
end
# compute final position offset: δr_i2 = r_i2_guess - r_i2_prop
δr_i2 = vcat(x0_outer, Settings.rf) - r_i2_prop
# check breaking condition
err = norm(δr_i2)
if verbose==true
println(" Inner iteration $i_inner ... err: $err")
end
if err < Settings.tolDC
if verbose==true
println(" Inner-loop achieved tolerance!")
end
break
end
# correct velocity: v_i = v_i_guess - J_inner^-1 * δr_i2
x0_inner[:] = x0_inner - inv(J_inner) * δr_i2
end
return
end
|
{"hexsha": "c5f66a16623bd8bcd3b3d585551bf7cacefd132f", "size": 5257, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/twostage_shooting.jl", "max_stars_repo_name": "Yuricst/ShootingStar.jl", "max_stars_repo_head_hexsha": "89243d55bb53ea28c25719767d9addef20dcdb4f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/twostage_shooting.jl", "max_issues_repo_name": "Yuricst/ShootingStar.jl", "max_issues_repo_head_hexsha": "89243d55bb53ea28c25719767d9addef20dcdb4f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/twostage_shooting.jl", "max_forks_repo_name": "Yuricst/ShootingStar.jl", "max_forks_repo_head_hexsha": "89243d55bb53ea28c25719767d9addef20dcdb4f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.6852791878, "max_line_length": 134, "alphanum_fraction": 0.6633060681, "num_tokens": 1651}
|
import numpy as np
import matplotlib.pyplot as plt
values_mV = np.loadtxt("ljpValues.txt")
values_uV = values_mV * 1000
valuesMean_uV = np.mean(values_uV)
valuesMean_mV = valuesMean_uV / 1000.0
baselineValues_uV = values_uV - valuesMean_uV
n, bins, patches = plt.hist(baselineValues_uV, 100)
details = "n: %d, mean: %f mV, stdev: %f µV" % (
len(values_uV), valuesMean_mV, np.std(values_uV))
plt.title("Repeated LJP Calculations")
plt.ylabel("Count")
plt.xlabel(f"Difference from Mean (µV)\n{details}")
plt.tight_layout()
plt.savefig("ljpVariance.png")
plt.show()
|
{"hexsha": "abf1265de170651ca42b53859d960dd72db98762", "size": 568, "ext": "py", "lang": "Python", "max_stars_repo_path": "dev/variance/calculateVariance.py", "max_stars_repo_name": "swharden/JLJP", "max_stars_repo_head_hexsha": "9644ceb2d7a6ed5c61f94e535e556a0887ef0753", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-01-06T18:10:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-26T04:10:13.000Z", "max_issues_repo_path": "dev/variance/calculateVariance.py", "max_issues_repo_name": "swharden/JLJP", "max_issues_repo_head_hexsha": "9644ceb2d7a6ed5c61f94e535e556a0887ef0753", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2020-02-08T00:07:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-13T22:29:53.000Z", "max_forks_repo_path": "dev/variance/calculateVariance.py", "max_forks_repo_name": "swharden/JLJP", "max_forks_repo_head_hexsha": "9644ceb2d7a6ed5c61f94e535e556a0887ef0753", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-01T18:02:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-01T18:02:40.000Z", "avg_line_length": 31.5555555556, "max_line_length": 53, "alphanum_fraction": 0.7447183099, "include": true, "reason": "import numpy", "num_tokens": 180}
|
import numpy as np
import pandas as pd
from feature_selection import read_data
from keras.models import Sequential
from keras.layers import Dense
from sklearn import svm
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import (accuracy_score, f1_score, precision_score,
recall_score, classification_report,
confusion_matrix)
import mlflow
import warnings
from collections import Counter
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
def split(train,test):
y_train=train.iloc[:,-1]
X_train=train.iloc[:,:-1]
y_test=test.iloc[:,-1]
X_test=test.iloc[:,:-1]
print("\n")
print("train classes ", sorted(Counter(y_train).items()))
print("test classes ", sorted(Counter(y_test).items()))
# mlflow.log_param("test classes", sorted(Counter(y_test).items()))
return X_train, y_train, X_test, y_test
def reporter(y_test,y_pred):
print("_____________________________________________________________________\n")
print("test:\t", y_test, "\npred:\t", y_pred)
# mlflow.log_param("test", y_test)
# mlflow.log_param("pred", y_pred)
print("_____________________________________________________________________\n")
with warnings.catch_warnings():
# ignore all caught warnings
warnings.filterwarnings("ignore")
print(classification_report(y_test,y_pred))
print(confusion_matrix(y_test, y_pred))
a = accuracy_score(y_test, y_pred)
f = f1_score(y_test, y_pred, average="macro")
p = precision_score(y_test, y_pred, average=None)
r = recall_score(y_test, y_pred, average=None)
return a,f,p,r
def svm(train,test,kernel='rbf'):
print("\n")
print("svm ... ")
print("kernel function : ", kernel)
mlflow.log_param("CLASSIFICATION-SVM kernel function", kernel)
X_train, y_train, X_test, y_test = split(train,test)
svclassifier = SVC(kernel='rbf')
svclassifier.fit(X_train, y_train)
y_pred = svclassifier.predict(X_test)
a,f,p,r = reporter(y_test.to_numpy(),y_pred)
return a,f,p,r
def decision_tree(train,test,criterion):
print("\n")
print("decision_tree ... ")
X_train, y_train, X_test, y_test = split(train,test)
classifier = DecisionTreeClassifier(criterion=criterion)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
a,f,p,r = reporter(y_test,y_pred)
return a,f,p,r
def feedForwardNN(train, test,
layer1=32, layer2=32,
activation1='relu',
activation2='sigmoid',
lossF='categorical_crossentropy',
optimizerF='sgd',
metrics=['accuracy'],
epochs=40):
print("\n")
print("feedForwardNN ... ")
print("loss function : ", lossF)
print("optimizer function : ", optimizerF)
print("CLASSIFICATION-NN layers", [layer1, layer2])
print("CLASSIFICATION-NN optimizer function", optimizerF)
print("CLASSIFICATION-NN epochs", epochs)
mlflow.log_param("CLASSIFICATION-NN loss function", lossF)
mlflow.log_param("CLASSIFICATION-NN optimizer function", optimizerF)
mlflow.log_param("CLASSIFICATION-NN layers", [layer1, layer2])
mlflow.log_param("CLASSIFICATION-NN optimizer function", optimizerF)
mlflow.log_param("CLASSIFICATION-NN epochs", epochs)
X_train, y_train, X_test, y_test = split(train,test)
model = Sequential()
model.add(Dense(layer1, input_dim=len(X_train.columns), activation=activation1))
model.add(Dense(layer2, activation=activation1))
model.add(Dense(3, activation=activation2))
model.compile(loss=lossF,
optimizer=optimizerF,
metrics=metrics)
y_train_one_hot = np.zeros((y_train.size, y_train.max()+1))
y_train_one_hot[np.arange(y_train.size),y_train] = 1
model.fit(X_train, y_train_one_hot,
epochs=epochs,
verbose=0)
y_pred = model.predict(X_test).tolist()
cat_pred = []
for i in range (len(y_pred)):
cat_pred.append(np.asarray(y_pred[i]).argmax()) # integers)
a,f,p,r = reporter(y_test.tolist(), cat_pred)
return a, f, p, r
def randomForest(train, test, max_depth=3, random_state=0):
print("\n")
print("randomForest ... ")
print("max_depth : ", max_depth)
mlflow.log_param("CLASSIFICATION-randomForest max_depth", max_depth)
X_train, y_train, X_test, y_test = split(train,test)
classifier = RandomForestClassifier(max_depth=3, random_state=0)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
a,f,p,r = reporter(y_test.tolist(),y_pred)
return a,f,p,r
def randomForest_neuralNet_svm(train, test):
X_train, y_train, X_test, y_test = split(train,test)
RFclass = RandomForestClassifier(max_depth=3, random_state=0)
RFclass.fit(X_train, y_train)
y_pred_forest = RFclass.predict(X_test)
SVCclass = SVC(kernel='rbf')
SVCclass.fit(X_train, y_train)
y_pred_svm = SVCclass.predict(X_test)
NNet = Sequential()
NNet.add(Dense(32, input_dim=len(X_train.columns), activation='relu'))
NNet.add(Dense(32, activation='relu'))
NNet.add(Dense(3, activation='sigmoid'))
NNet.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
y_train_one_hot = np.zeros((y_train.size, y_train.max()+1))
y_train_one_hot[np.arange(y_train.size),y_train] = 1
NNet.fit(X_train, y_train_one_hot,
epochs=40,
verbose=0)
y_pred = NNet.predict(X_test).tolist()
y_pred_net = []
for i in range (len(y_pred)):
y_pred_net.append(np.asarray(y_pred[i]).argmax()) # integers)
majority_vote =[]
for i in range (len(y_pred_net)):
if y_pred_net[i] == y_pred_svm[i] == y_pred_forest[i]:
majority_vote.append(y_pred_net[i])
elif y_pred_net[i] == y_pred_svm[i]:
majority_vote.append(y_pred_net[i])
elif y_pred_net[i] == y_pred_forest[i]:
majority_vote.append(y_pred_net[i])
else:
majority_vote.append(y_pred_forest[i])
print("\nnnet\t",y_pred_net)
print("svm\t",y_pred_svm.tolist())
print("forest\t",y_pred_forest.tolist())
a,f,p,r = reporter(y_test.tolist(),majority_vote)
return a,f,p,r
|
{"hexsha": "9110695003914a46f145bf493da85108a8f998ed", "size": 5964, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/classification.py", "max_stars_repo_name": "federico-code/PedMS-Classification", "max_stars_repo_head_hexsha": "04215b78dd299463cf5ae6ca33c1deb3d49acd41", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/classification.py", "max_issues_repo_name": "federico-code/PedMS-Classification", "max_issues_repo_head_hexsha": "04215b78dd299463cf5ae6ca33c1deb3d49acd41", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-03-24T17:58:23.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T22:17:53.000Z", "max_forks_repo_path": "code/classification.py", "max_forks_repo_name": "federico-code/PedMS-Classification", "max_forks_repo_head_hexsha": "04215b78dd299463cf5ae6ca33c1deb3d49acd41", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-20T14:32:59.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-20T14:32:59.000Z", "avg_line_length": 26.2731277533, "max_line_length": 82, "alphanum_fraction": 0.7233400402, "include": true, "reason": "import numpy", "num_tokens": 1515}
|
[STATEMENT]
lemma (in comm_group) subgroup_iso_DirProds_IDirProds:
assumes "subgroup J G" "is_idirprod J S I" "finite I"
shows "(\<lambda>x. \<Otimes>\<^bsub>G\<^esub>i\<in>I. x i) \<in> iso (DirProds (\<lambda>i. G\<lparr>carrier := (S i)\<rparr>) I) (G\<lparr>carrier := J\<rparr>)"
(is "?fp \<in> iso ?DP ?J")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. finprod G x I) \<in> Group.iso (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<lambda>x. finprod G x I) \<in> Group.iso (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
from assms(2)
[PROOF STATE]
proof (chain)
picking this:
is_idirprod J S I
[PROOF STEP]
have assm: "J = IDirProds G S I"
"compl_fam S I"
[PROOF STATE]
proof (prove)
using this:
is_idirprod J S I
goal (1 subgoal):
1. J = IDirProds G S I &&& compl_fam S I
[PROOF STEP]
unfolding is_idirprod_def
[PROOF STATE]
proof (prove)
using this:
(\<forall>i\<in>I. S i \<lhd> G) \<and> J = IDirProds G S I \<and> compl_fam S I
goal (1 subgoal):
1. J = IDirProds G S I &&& compl_fam S I
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
J = IDirProds G S I
compl_fam S I
goal (1 subgoal):
1. (\<lambda>x. finprod G x I) \<in> Group.iso (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
from assms(1, 2)
[PROOF STATE]
proof (chain)
picking this:
subgroup J G
is_idirprod J S I
[PROOF STEP]
have assm': "\<And>i. i \<in> I \<Longrightarrow> subgroup (S i) (G\<lparr>carrier := J\<rparr>)"
[PROOF STATE]
proof (prove)
using this:
subgroup J G
is_idirprod J S I
goal (1 subgoal):
1. \<And>i. i \<in> I \<Longrightarrow> subgroup (S i) (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
using normal_imp_subgroup subgroup_incl
[PROOF STATE]
proof (prove)
using this:
subgroup J G
is_idirprod J S I
?H \<lhd> ?G \<Longrightarrow> subgroup ?H ?G
\<lbrakk>subgroup ?I G; subgroup ?J G; ?I \<subseteq> ?J\<rbrakk> \<Longrightarrow> subgroup ?I (G\<lparr>carrier := ?J\<rparr>)
goal (1 subgoal):
1. \<And>i. i \<in> I \<Longrightarrow> subgroup (S i) (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
by (metis IDirProds_incl assms(2) is_idirprod_def)
[PROOF STATE]
proof (state)
this:
?i2 \<in> I \<Longrightarrow> subgroup (S ?i2) (G\<lparr>carrier := J\<rparr>)
goal (1 subgoal):
1. (\<lambda>x. finprod G x I) \<in> Group.iso (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
interpret J: comm_group ?J
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. comm_group (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
using subgroup_is_comm_group[OF assms(1)]
[PROOF STATE]
proof (prove)
using this:
comm_group (G\<lparr>carrier := J\<rparr>)
goal (1 subgoal):
1. comm_group (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<lambda>x. finprod G x I) \<in> Group.iso (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
interpret DP: comm_group ?DP
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. comm_group (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I)
[PROOF STEP]
by (intro DirProds_is_comm_group; use J.subgroup_is_comm_group[OF assm'] in simp)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<lambda>x. finprod G x I) \<in> Group.iso (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
have inJ: "S i \<subseteq> J" if "i \<in> I" for i
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. S i \<subseteq> J
[PROOF STEP]
using subgroup.subset[OF assm'[OF that]]
[PROOF STATE]
proof (prove)
using this:
S i \<subseteq> carrier (G\<lparr>carrier := J\<rparr>)
goal (1 subgoal):
1. S i \<subseteq> J
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
?i2 \<in> I \<Longrightarrow> S ?i2 \<subseteq> J
goal (1 subgoal):
1. (\<lambda>x. finprod G x I) \<in> Group.iso (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
have hom: "?fp \<in> hom ?DP ?J"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. finprod G x I) \<in> hom (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
proof (rule homI)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>x. x \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) \<Longrightarrow> finprod G x I \<in> carrier (G\<lparr>carrier := J\<rparr>)
2. \<And>x y. \<lbrakk>x \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I); y \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I)\<rbrakk> \<Longrightarrow> finprod G (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) I = finprod G x I \<otimes>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub> finprod G y I
[PROOF STEP]
fix x
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>x. x \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) \<Longrightarrow> finprod G x I \<in> carrier (G\<lparr>carrier := J\<rparr>)
2. \<And>x y. \<lbrakk>x \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I); y \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I)\<rbrakk> \<Longrightarrow> finprod G (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) I = finprod G x I \<otimes>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub> finprod G y I
[PROOF STEP]
assume x[simp]: "x \<in> carrier ?DP"
[PROOF STATE]
proof (state)
this:
x \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I)
goal (2 subgoals):
1. \<And>x. x \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) \<Longrightarrow> finprod G x I \<in> carrier (G\<lparr>carrier := J\<rparr>)
2. \<And>x y. \<lbrakk>x \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I); y \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I)\<rbrakk> \<Longrightarrow> finprod G (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) I = finprod G x I \<otimes>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub> finprod G y I
[PROOF STEP]
show "finprod G x I \<in> carrier ?J"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finprod G x I \<in> carrier (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
proof (subst finprod_subgroup[OF _ assms(1)])
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. x \<in> I \<rightarrow> J
2. finprod (G\<lparr>carrier := J\<rparr>) x I \<in> carrier (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
show "x \<in> I \<rightarrow> J"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<in> I \<rightarrow> J
[PROOF STEP]
using inJ comp_in_carr[OF x]
[PROOF STATE]
proof (prove)
using this:
?i2 \<in> I \<Longrightarrow> S ?i2 \<subseteq> J
?i \<in> I \<Longrightarrow> x ?i \<in> carrier (G\<lparr>carrier := S ?i\<rparr>)
goal (1 subgoal):
1. x \<in> I \<rightarrow> J
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<in> I \<rightarrow> J
goal (1 subgoal):
1. finprod (G\<lparr>carrier := J\<rparr>) x I \<in> carrier (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
thus "finprod ?J x I \<in> carrier ?J"
[PROOF STATE]
proof (prove)
using this:
x \<in> I \<rightarrow> J
goal (1 subgoal):
1. finprod (G\<lparr>carrier := J\<rparr>) x I \<in> carrier (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
by (intro J.finprod_closed; simp)
[PROOF STATE]
proof (state)
this:
finprod (G\<lparr>carrier := J\<rparr>) x I \<in> carrier (G\<lparr>carrier := J\<rparr>)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
finprod G x I \<in> carrier (G\<lparr>carrier := J\<rparr>)
goal (1 subgoal):
1. \<And>x y. \<lbrakk>x \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I); y \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I)\<rbrakk> \<Longrightarrow> finprod G (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) I = finprod G x I \<otimes>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub> finprod G y I
[PROOF STEP]
fix y
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x y. \<lbrakk>x \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I); y \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I)\<rbrakk> \<Longrightarrow> finprod G (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) I = finprod G x I \<otimes>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub> finprod G y I
[PROOF STEP]
assume y[simp]: "y \<in> carrier ?DP"
[PROOF STATE]
proof (state)
this:
y \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I)
goal (1 subgoal):
1. \<And>x y. \<lbrakk>x \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I); y \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I)\<rbrakk> \<Longrightarrow> finprod G (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) I = finprod G x I \<otimes>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub> finprod G y I
[PROOF STEP]
show "finprod G (x \<otimes>\<^bsub>?DP\<^esub> y) I = finprod G x I \<otimes>\<^bsub>?J\<^esub> finprod G y I"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finprod G (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) I = finprod G x I \<otimes>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub> finprod G y I
[PROOF STEP]
proof(subst (1 2 3) finprod_subgroup[of _ _ J])
[PROOF STATE]
proof (state)
goal (5 subgoals):
1. y \<in> I \<rightarrow> J
2. subgroup J G
3. x \<in> I \<rightarrow> J
4. x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y \<in> I \<rightarrow> J
5. finprod (G\<lparr>carrier := J\<rparr>) (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) I = finprod (G\<lparr>carrier := J\<rparr>) x I \<otimes>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub> finprod (G\<lparr>carrier := J\<rparr>) y I
[PROOF STEP]
show xyJ: "x \<in> I \<rightarrow> J" "y \<in> I \<rightarrow> J"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<in> I \<rightarrow> J &&& y \<in> I \<rightarrow> J
[PROOF STEP]
using x y inJ comp_in_carr[OF x] comp_in_carr[OF y]
[PROOF STATE]
proof (prove)
using this:
x \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I)
y \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I)
?i2 \<in> I \<Longrightarrow> S ?i2 \<subseteq> J
?i \<in> I \<Longrightarrow> x ?i \<in> carrier (G\<lparr>carrier := S ?i\<rparr>)
?i \<in> I \<Longrightarrow> y ?i \<in> carrier (G\<lparr>carrier := S ?i\<rparr>)
goal (1 subgoal):
1. x \<in> I \<rightarrow> J &&& y \<in> I \<rightarrow> J
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<in> I \<rightarrow> J
y \<in> I \<rightarrow> J
goal (3 subgoals):
1. subgroup J G
2. x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y \<in> I \<rightarrow> J
3. finprod (G\<lparr>carrier := J\<rparr>) (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) I = finprod (G\<lparr>carrier := J\<rparr>) x I \<otimes>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub> finprod (G\<lparr>carrier := J\<rparr>) y I
[PROOF STEP]
show xyJ1: "x \<otimes>\<^bsub>?DP\<^esub> y \<in> I \<rightarrow> J"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y \<in> I \<rightarrow> J
[PROOF STEP]
using inJ x y comp_in_carr[of "x \<otimes>\<^bsub>?DP\<^esub> y"]
[PROOF STATE]
proof (prove)
using this:
?i2 \<in> I \<Longrightarrow> S ?i2 \<subseteq> J
x \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I)
y \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I)
\<lbrakk>x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y \<in> carrier (DirProds ?G ?I); ?i \<in> ?I\<rbrakk> \<Longrightarrow> (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) ?i \<in> carrier (?G ?i)
goal (1 subgoal):
1. x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y \<in> I \<rightarrow> J
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y \<in> I \<rightarrow> J
goal (2 subgoals):
1. subgroup J G
2. finprod (G\<lparr>carrier := J\<rparr>) (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) I = finprod (G\<lparr>carrier := J\<rparr>) x I \<otimes>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub> finprod (G\<lparr>carrier := J\<rparr>) y I
[PROOF STEP]
show "subgroup J G"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. subgroup J G
[PROOF STEP]
using assms(1)
[PROOF STATE]
proof (prove)
using this:
subgroup J G
goal (1 subgoal):
1. subgroup J G
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
subgroup J G
goal (1 subgoal):
1. finprod (G\<lparr>carrier := J\<rparr>) (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) I = finprod (G\<lparr>carrier := J\<rparr>) x I \<otimes>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub> finprod (G\<lparr>carrier := J\<rparr>) y I
[PROOF STEP]
show "finprod ?J (x \<otimes>\<^bsub>?DP\<^esub> y) I = finprod ?J x I \<otimes>\<^bsub>?J\<^esub> finprod ?J y I"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finprod (G\<lparr>carrier := J\<rparr>) (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) I = finprod (G\<lparr>carrier := J\<rparr>) x I \<otimes>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub> finprod (G\<lparr>carrier := J\<rparr>) y I
[PROOF STEP]
proof (rule J.finprod_cong_split)
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. \<And>a. a \<in> I \<Longrightarrow> x a \<otimes>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub> y a = (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) a
2. x \<in> I \<rightarrow> carrier (G\<lparr>carrier := J\<rparr>)
3. y \<in> I \<rightarrow> carrier (G\<lparr>carrier := J\<rparr>)
4. x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y \<in> I \<rightarrow> carrier (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
show "x \<in> I \<rightarrow> carrier ?J" "y \<in> I \<rightarrow> carrier ?J"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<in> I \<rightarrow> carrier (G\<lparr>carrier := J\<rparr>) &&& y \<in> I \<rightarrow> carrier (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
using xyJ
[PROOF STATE]
proof (prove)
using this:
x \<in> I \<rightarrow> J
y \<in> I \<rightarrow> J
goal (1 subgoal):
1. x \<in> I \<rightarrow> carrier (G\<lparr>carrier := J\<rparr>) &&& y \<in> I \<rightarrow> carrier (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
x \<in> I \<rightarrow> carrier (G\<lparr>carrier := J\<rparr>)
y \<in> I \<rightarrow> carrier (G\<lparr>carrier := J\<rparr>)
goal (2 subgoals):
1. \<And>a. a \<in> I \<Longrightarrow> x a \<otimes>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub> y a = (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) a
2. x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y \<in> I \<rightarrow> carrier (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
show "x \<otimes>\<^bsub>?DP\<^esub> y \<in> I \<rightarrow> carrier ?J"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y \<in> I \<rightarrow> carrier (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
using xyJ1
[PROOF STATE]
proof (prove)
using this:
x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y \<in> I \<rightarrow> J
goal (1 subgoal):
1. x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y \<in> I \<rightarrow> carrier (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y \<in> I \<rightarrow> carrier (G\<lparr>carrier := J\<rparr>)
goal (1 subgoal):
1. \<And>a. a \<in> I \<Longrightarrow> x a \<otimes>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub> y a = (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) a
[PROOF STEP]
fix i
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a. a \<in> I \<Longrightarrow> x a \<otimes>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub> y a = (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) a
[PROOF STEP]
assume i: "i \<in> I"
[PROOF STATE]
proof (state)
this:
i \<in> I
goal (1 subgoal):
1. \<And>a. a \<in> I \<Longrightarrow> x a \<otimes>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub> y a = (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) a
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
i \<in> I
[PROOF STEP]
have "x i \<otimes>\<^bsub>G\<lparr>carrier := (S i) \<rparr>\<^esub> y i = (x \<otimes>\<^bsub>?DP\<^esub> y) i"
[PROOF STATE]
proof (prove)
using this:
i \<in> I
goal (1 subgoal):
1. x i \<otimes>\<^bsub>G\<lparr>carrier := S i\<rparr>\<^esub> y i = (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) i
[PROOF STEP]
by (intro comp_mult[symmetric])
[PROOF STATE]
proof (state)
this:
x i \<otimes>\<^bsub>G\<lparr>carrier := S i\<rparr>\<^esub> y i = (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) i
goal (1 subgoal):
1. \<And>a. a \<in> I \<Longrightarrow> x a \<otimes>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub> y a = (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) a
[PROOF STEP]
thus "x i \<otimes>\<^bsub>?J\<^esub> y i = (x \<otimes>\<^bsub>?DP\<^esub> y) i"
[PROOF STATE]
proof (prove)
using this:
x i \<otimes>\<^bsub>G\<lparr>carrier := S i\<rparr>\<^esub> y i = (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) i
goal (1 subgoal):
1. x i \<otimes>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub> y i = (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) i
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
x i \<otimes>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub> y i = (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) i
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
finprod (G\<lparr>carrier := J\<rparr>) (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) I = finprod (G\<lparr>carrier := J\<rparr>) x I \<otimes>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub> finprod (G\<lparr>carrier := J\<rparr>) y I
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
finprod G (x \<otimes>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> y) I = finprod G x I \<otimes>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub> finprod G y I
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(\<lambda>x. finprod G x I) \<in> hom (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)
goal (1 subgoal):
1. (\<lambda>x. finprod G x I) \<in> Group.iso (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
(\<lambda>x. finprod G x I) \<in> hom (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
interpret fp: group_hom ?DP ?J ?fp
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. finprod G x I) \<in> hom (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)
goal (1 subgoal):
1. group_hom (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>) (\<lambda>x. finprod G x I)
[PROOF STEP]
unfolding group_hom_def group_hom_axioms_def
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. finprod G x I) \<in> hom (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)
goal (1 subgoal):
1. Group.group (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) \<and> Group.group (G\<lparr>carrier := J\<rparr>) \<and> (\<lambda>x. finprod G x I) \<in> hom (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<lambda>x. finprod G x I) \<in> Group.iso (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
have s: "subgroup (S i) G" if "i \<in> I" for i
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. subgroup (S i) G
[PROOF STEP]
using incl_subgroup[OF assms(1) assm'[OF that]]
[PROOF STATE]
proof (prove)
using this:
subgroup (S i) G
goal (1 subgoal):
1. subgroup (S i) G
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
?i2 \<in> I \<Longrightarrow> subgroup (S ?i2) G
goal (1 subgoal):
1. (\<lambda>x. finprod G x I) \<in> Group.iso (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
have "kernel ?DP ?J ?fp = {\<one>\<^bsub>?DP\<^esub>}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. kernel (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>) (\<lambda>x. finprod G x I) = {\<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub>}
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. kernel (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>) (\<lambda>x. finprod G x I) = {\<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub>}
[PROOF STEP]
have "a = \<one>\<^bsub>?DP\<^esub>" if "a \<in> kernel ?DP ?J ?fp" for a
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a = \<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub>
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. a = \<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub>
[PROOF STEP]
from that
[PROOF STATE]
proof (chain)
picking this:
a \<in> kernel (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>) (\<lambda>x. finprod G x I)
[PROOF STEP]
have a: "finprod G a I = \<one>" "a \<in> carrier ?DP"
[PROOF STATE]
proof (prove)
using this:
a \<in> kernel (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>) (\<lambda>x. finprod G x I)
goal (1 subgoal):
1. finprod G a I = \<one> &&& a \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I)
[PROOF STEP]
unfolding kernel_def
[PROOF STATE]
proof (prove)
using this:
a \<in> {x \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I). finprod G x I = \<one>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub>}
goal (1 subgoal):
1. finprod G a I = \<one> &&& a \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I)
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
finprod G a I = \<one>
a \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I)
goal (1 subgoal):
1. a = \<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub>
[PROOF STEP]
from compl_fam_imp_triv_finprod[OF assm(2) assms(3) s a(1)] comp_in_carr[OF a(2)]
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>\<And>i. i \<in> I \<Longrightarrow> i \<in> I; a \<in> Pi I S\<rbrakk> \<Longrightarrow> \<forall>i\<in>I. a i = \<one>
?i \<in> I \<Longrightarrow> a ?i \<in> carrier (G\<lparr>carrier := S ?i\<rparr>)
[PROOF STEP]
have "\<forall>i\<in>I. a i = \<one>"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>\<And>i. i \<in> I \<Longrightarrow> i \<in> I; a \<in> Pi I S\<rbrakk> \<Longrightarrow> \<forall>i\<in>I. a i = \<one>
?i \<in> I \<Longrightarrow> a ?i \<in> carrier (G\<lparr>carrier := S ?i\<rparr>)
goal (1 subgoal):
1. \<forall>i\<in>I. a i = \<one>
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<forall>i\<in>I. a i = \<one>
goal (1 subgoal):
1. a = \<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub>
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<forall>i\<in>I. a i = \<one>
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<forall>i\<in>I. a i = \<one>
goal (1 subgoal):
1. a = \<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub>
[PROOF STEP]
using DirProds_one[OF a(2)]
[PROOF STATE]
proof (prove)
using this:
\<forall>i\<in>I. a i = \<one>
(\<forall>i\<in>I. a i = \<one>\<^bsub>G\<lparr>carrier := S i\<rparr>\<^esub>) = (a = \<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub>)
goal (1 subgoal):
1. a = \<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub>
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
a = \<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub>
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
?a2 \<in> kernel (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>) (\<lambda>x. finprod G x I) \<Longrightarrow> ?a2 = \<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub>
goal (1 subgoal):
1. kernel (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>) (\<lambda>x. finprod G x I) = {\<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub>}
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
?a2 \<in> kernel (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>) (\<lambda>x. finprod G x I) \<Longrightarrow> ?a2 = \<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub>
goal (1 subgoal):
1. kernel (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>) (\<lambda>x. finprod G x I) = {\<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub>}
[PROOF STEP]
using fp.one_in_kernel
[PROOF STATE]
proof (prove)
using this:
?a2 \<in> kernel (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>) (\<lambda>x. finprod G x I) \<Longrightarrow> ?a2 = \<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub>
\<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub> \<in> kernel (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>) (\<lambda>x. finprod G x I)
goal (1 subgoal):
1. kernel (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>) (\<lambda>x. finprod G x I) = {\<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub>}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
kernel (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>) (\<lambda>x. finprod G x I) = {\<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub>}
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
kernel (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>) (\<lambda>x. finprod G x I) = {\<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub>}
goal (1 subgoal):
1. (\<lambda>x. finprod G x I) \<in> Group.iso (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
kernel (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>) (\<lambda>x. finprod G x I) = {\<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub>}
goal (1 subgoal):
1. (\<lambda>x. finprod G x I) \<in> Group.iso (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
have "J \<subseteq> ?fp ` carrier ?DP"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. J \<subseteq> (\<lambda>x. finprod G x I) ` carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I)
[PROOF STEP]
using assm(1) IDirProds_eq_finprod_PiE[OF assms(3) incl_subgroup[OF assms(1) assm']]
[PROOF STATE]
proof (prove)
using this:
J = IDirProds G S I
(\<And>i. i \<in> I \<Longrightarrow> ?i4 i \<in> I) \<Longrightarrow> IDirProds G (\<lambda>i. S (?i4 i)) I = (\<lambda>x. finprod G x I) ` (\<Pi>\<^sub>E i\<in>I. S (?i4 i))
goal (1 subgoal):
1. J \<subseteq> (\<lambda>x. finprod G x I) ` carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I)
[PROOF STEP]
unfolding DirProds_def PiE_def Pi_def
[PROOF STATE]
proof (prove)
using this:
J = IDirProds G S I
(\<And>i. i \<in> I \<Longrightarrow> ?i4 i \<in> I) \<Longrightarrow> IDirProds G (\<lambda>i. S (?i4 i)) I = (\<lambda>x. finprod G x I) ` ({f. \<forall>x. x \<in> I \<longrightarrow> f x \<in> S (?i4 x)} \<inter> extensional I)
goal (1 subgoal):
1. J \<subseteq> (\<lambda>x. finprod G x I) ` carrier \<lparr>carrier = {f. \<forall>x. x \<in> I \<longrightarrow> f x \<in> (carrier \<circ> (\<lambda>i. G\<lparr>carrier := S i\<rparr>)) x} \<inter> extensional I, monoid.mult = \<lambda>x y. \<lambda>i\<in>I. x i \<otimes>\<^bsub>G\<lparr>carrier := S i\<rparr>\<^esub> y i, one = \<lambda>i\<in>I. \<one>\<^bsub>G\<lparr>carrier := S i\<rparr>\<^esub>\<rparr>
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
J \<subseteq> (\<lambda>x. finprod G x I) ` carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I)
goal (1 subgoal):
1. (\<lambda>x. finprod G x I) \<in> Group.iso (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
kernel (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>) (\<lambda>x. finprod G x I) = {\<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub>}
J \<subseteq> (\<lambda>x. finprod G x I) ` carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
kernel (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>) (\<lambda>x. finprod G x I) = {\<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub>}
J \<subseteq> (\<lambda>x. finprod G x I) ` carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I)
goal (1 subgoal):
1. (\<lambda>x. finprod G x I) \<in> Group.iso (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
using hom fp.iso_iff
[PROOF STATE]
proof (prove)
using this:
kernel (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>) (\<lambda>x. finprod G x I) = {\<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub>}
J \<subseteq> (\<lambda>x. finprod G x I) ` carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I)
(\<lambda>x. finprod G x I) \<in> hom (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)
((\<lambda>x. finprod G x I) \<in> Group.iso (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)) = (carrier (G\<lparr>carrier := J\<rparr>) \<subseteq> (\<lambda>x. finprod G x I) ` carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) \<and> (\<forall>x\<in>carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I). finprod G x I = \<one>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub> \<longrightarrow> x = \<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub>))
goal (1 subgoal):
1. (\<lambda>x. finprod G x I) \<in> Group.iso (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
unfolding kernel_def
[PROOF STATE]
proof (prove)
using this:
{x \<in> carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I). finprod G x I = \<one>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub>} = {\<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub>}
J \<subseteq> (\<lambda>x. finprod G x I) ` carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I)
(\<lambda>x. finprod G x I) \<in> hom (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)
((\<lambda>x. finprod G x I) \<in> Group.iso (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)) = (carrier (G\<lparr>carrier := J\<rparr>) \<subseteq> (\<lambda>x. finprod G x I) ` carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) \<and> (\<forall>x\<in>carrier (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I). finprod G x I = \<one>\<^bsub>G\<lparr>carrier := J\<rparr>\<^esub> \<longrightarrow> x = \<one>\<^bsub>DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I\<^esub>))
goal (1 subgoal):
1. (\<lambda>x. finprod G x I) \<in> Group.iso (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(\<lambda>x. finprod G x I) \<in> Group.iso (DirProds (\<lambda>i. G\<lparr>carrier := S i\<rparr>) I) (G\<lparr>carrier := J\<rparr>)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 15144, "file": "Finitely_Generated_Abelian_Groups_DirProds", "length": 98}
|
import numpy as np
class Tree(object):
def __init__(self, dim, rank):
self.dim = dim
self.rank = rank
self.nodes = []
self.edges = []
def check_structure(matrix):
"""Check if a given matrix is a vine array and should respect the conditions a regular vine.
Parameters:
----------
matrix : {array}
A vine array.
"""
# Convert the matrix in np.array if it is not
if not isinstance(matrix, np.ndarray):
matrix = np.asarray(matrix)
dim = matrix.shape[0]
assert dim == matrix.shape[1], "The matrix should be symetric"
return matrix
def init_trees(dim):
"""Initialize the tree objects of a vine array.
Parameters:
----------
dim : {int}
The problem dimension.
"""
trees = []
for i in range(dim-1):
tree = Tree(dim=dim, rank=i)
trees.append(tree)
return trees
class Vine(object):
"""Describe a vine structure
Parameters:
----------
structure : {array}
The vine array describing the structure
"""
def __init__(self, structure):
self.structure = check_structure(structure)
self.dim = structure.shape[0]
self.trees = init_trees(self.dim)
def build_new(self):
dim = self.dim
structure = self.structure
conditionning = None
for k_tree in range(dim-1):
row = dim - k_tree - 1
tree = Tree(dim, k_tree)
for col in range(dim-k_tree-1):
i = structure[col, col]
j = structure[row, col]
conditionned = [i, j]
if k_tree > 0:
conditionning = structure[row+1:, col].tolist()
print(conditionned, conditionning)
def build(self):
dim = self.dim
structure = self.structure
tmp = structure.diagonal().tolist()
self.trees[0].nodes = [([k], []) for k in tmp]
# Explore the structure matrix
for col in range(dim-1):
# The other pairs
rows = range(1+col, dim)[::-1]
for k_tree, row in enumerate(rows):
tree = self.trees[k_tree]
i = structure[col, col]
j = structure[row, col]
conditionned = [i, j]
conditionning = structure[row+1:, col].tolist()
edge = (conditionned, conditionning)
tree.edges.append(edge)
for k_tree in range(dim-2):
self.trees[k_tree+1].nodes = self.trees[k_tree].edges
|
{"hexsha": "9a93edf82d4cc8b3a90d408921481971c887981e", "size": 2620, "ext": "py", "lang": "Python", "max_stars_repo_path": "depimpact/model.py", "max_stars_repo_name": "NazBen/dep-impact", "max_stars_repo_head_hexsha": "284e72bccfb6309110df5191dfae3c0a93ce813b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "depimpact/model.py", "max_issues_repo_name": "NazBen/dep-impact", "max_issues_repo_head_hexsha": "284e72bccfb6309110df5191dfae3c0a93ce813b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "depimpact/model.py", "max_forks_repo_name": "NazBen/dep-impact", "max_forks_repo_head_hexsha": "284e72bccfb6309110df5191dfae3c0a93ce813b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.1149425287, "max_line_length": 97, "alphanum_fraction": 0.5320610687, "include": true, "reason": "import numpy", "num_tokens": 585}
|
#----------------------------------------------------------------
# When run this script, plwase consider whether there is a need to comment line
# 741-746 in Env2DCyliner (just to make sure baseline runs longer time than
# single run session)
#
# The default running time steps are used for Re=200, and it may run a
# little long time, you can change the value of max_nbr_actuations
#
#----------------------------------------------------------------
import os
import socket
import numpy as np
import csv
from tensorforce.agents import Agent
from tensorforce.execution import ParallelRunner
from env import resume_env, nb_actuations
example_environment = resume_env(plot=False, dump=100, single_run=True)
deterministic = True
network = [dict(type='dense', size=512), dict(type='dense', size=512)]
agent = Agent.create(
# Agent + Environment
agent='ppo', environment=example_environment, max_episode_timesteps=nb_actuations,
# TODO: nb_actuations could be specified by Environment.max_episode_timesteps() if it makes sense...
# Network
network=network,
# Optimization
batch_size=20, learning_rate=1e-3, subsampling_fraction=0.2, optimization_steps=25,
# Reward estimation
likelihood_ratio_clipping=0.2, estimate_terminal=True, # ???
# TODO: gae_lambda=0.97 doesn't currently exist
# Critic
critic_network=network,
critic_optimizer=dict(
type='multi_step', num_steps=5,
optimizer=dict(type='adam', learning_rate=1e-3)
),
# Regularization
entropy_regularization=0.01,
# TensorFlow etc
parallel_interactions=1,
saver=None,
)
# restore_directory = './saver_data/'
# restore_file = 'model-40000'
# agent.restore(restore_directory, restore_file)
# agent.restore()
agent.initialize()
if(os.path.exists("saved_models/test_strategy.csv")):
os.remove("saved_models/test_strategy.csv")
if(os.path.exists("saved_models/test_strategy_avg.csv")):
os.remove("saved_models/test_strategy_avg.csv")
def one_run():
print("start simulation")
state = example_environment.reset()
example_environment.render = True
null_action = {}
null_action = np.zeros(example_environment.actions()['shape'])
for k in range(6*nb_actuations):
#environment.print_state()
action = agent.act(state, deterministic=deterministic, independent=True)
state, terminal, reward = example_environment.execute(null_action)
print("finish simulation\n")
# just for test, too few timesteps
# runner.run(episodes=10000, max_episode_timesteps=20, episode_finished=episode_finished)
data = np.genfromtxt("saved_models/test_strategy.csv", delimiter=";")
data = data[1:,1:]
m_data = np.average(data[len(data)//2:], axis=0)
nb_jets = len(m_data)-4
# Print statistics
print("Single Run finished. AvgDrag : {}, AvgRecircArea : {}".format(m_data[1], m_data[2]))
name = "test_strategy_avg.csv"
if(not os.path.exists("saved_models")):
os.mkdir("saved_models")
if(not os.path.exists("saved_models/"+name)):
with open("saved_models/"+name, "w") as csv_file:
spam_writer=csv.writer(csv_file, delimiter=";", lineterminator="\n")
spam_writer.writerow(["Name", "Drag", "Lift", "RecircArea"] + ["Jet" + str(v) for v in range(nb_jets)])
spam_writer.writerow([example_environment.simu_name] + m_data[1:].tolist())
else:
with open("saved_models/"+name, "a") as csv_file:
spam_writer=csv.writer(csv_file, delimiter=";", lineterminator="\n")
spam_writer.writerow([example_environment.simu_name] + m_data[1:].tolist())
if not deterministic:
for _ in range(10):
one_run()
else:
one_run()
|
{"hexsha": "f1a2a9dc712c49937233cb8120a4cbea355171d8", "size": 3733, "ext": "py", "lang": "Python", "max_stars_repo_path": "RobustDRLCylinder2DControl/simulation_base/baseline_single_run.py", "max_stars_repo_name": "thw1021/Cylinder2DFlowControlGeneral", "max_stars_repo_head_hexsha": "ac5ea7fd304dedefa4489790e45c42ca9428440e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2020-05-04T08:07:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T09:33:04.000Z", "max_issues_repo_path": "RobustDRLCylinder2DControl/simulation_base/baseline_single_run.py", "max_issues_repo_name": "dianalyx/Cylinder2DFlowControlGeneral", "max_issues_repo_head_hexsha": "ac5ea7fd304dedefa4489790e45c42ca9428440e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-05-07T08:31:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T14:50:08.000Z", "max_forks_repo_path": "RobustDRLCylinder2DControl/simulation_base/baseline_single_run.py", "max_forks_repo_name": "dianalyx/Cylinder2DFlowControlGeneral", "max_forks_repo_head_hexsha": "ac5ea7fd304dedefa4489790e45c42ca9428440e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2020-05-04T08:08:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T08:47:13.000Z", "avg_line_length": 34.5648148148, "max_line_length": 115, "alphanum_fraction": 0.6774712028, "include": true, "reason": "import numpy", "num_tokens": 879}
|
"""Class that schedules motion on can bus."""
import asyncio
from collections import defaultdict
import logging
from typing import List, Set, Tuple, Iterator, Union
import numpy as np
from opentrons_hardware.firmware_bindings import ArbitrationId
from opentrons_hardware.firmware_bindings.constants import NodeId
from opentrons_hardware.drivers.can_bus.can_messenger import CanMessenger
from opentrons_hardware.firmware_bindings.messages import MessageDefinition
from opentrons_hardware.firmware_bindings.messages.message_definitions import (
ClearAllMoveGroupsRequest,
AddLinearMoveRequest,
MoveCompleted,
ExecuteMoveGroupRequest,
HomeRequest,
GripperGripRequest,
GripperHomeRequest,
TipActionRequest,
TipActionResponse,
)
from opentrons_hardware.firmware_bindings.messages.payloads import (
AddLinearMoveRequestPayload,
ExecuteMoveGroupRequestPayload,
HomeRequestPayload,
GripperMoveRequestPayload,
TipActionRequestPayload,
)
from .constants import interrupts_per_sec
from opentrons_hardware.hardware_control.motion import (
MoveGroups,
MoveGroupSingleAxisStep,
MoveGroupSingleGripperStep,
MoveGroupTipActionStep,
MoveType,
SingleMoveStep,
)
from opentrons_hardware.firmware_bindings.utils import (
UInt8Field,
UInt32Field,
Int32Field,
)
from opentrons_hardware.firmware_bindings.messages.fields import (
PipetteTipActionTypeField,
)
from opentrons_hardware.hardware_control.motion import MoveStopCondition
from opentrons_hardware.hardware_control.motion_planning.move_utils import (
MoveConditionNotMet,
)
from .types import NodeDict
log = logging.getLogger(__name__)
_AcceptableMoves = Union[MoveCompleted, TipActionResponse]
_CompletionPacket = Tuple[ArbitrationId, _AcceptableMoves]
_Completions = List[_CompletionPacket]
class MoveGroupRunner:
"""A move command scheduler."""
def __init__(self, move_groups: MoveGroups, start_at_index: int = 0) -> None:
"""Constructor.
Args:
move_groups: The move groups to run.
start_at_index: The index the MoveGroupManager will start at
"""
self._move_groups = move_groups
self._start_at_index = start_at_index
self._is_prepped: bool = False
@staticmethod
def _has_moves(move_groups: MoveGroups) -> bool:
for move_group in move_groups:
for move in move_group:
for node, step in move.items():
return True
return False
async def prep(self, can_messenger: CanMessenger) -> None:
"""Prepare the move group. The first thing that happens during run().
prep() and execute() can be used to replace a single call to run() to
ensure tighter timing, if you want something else to start as soon as
possible to the actual execution of the move.
"""
if not self._has_moves(self._move_groups):
log.debug("No moves. Nothing to do.")
return
await self._clear_groups(can_messenger)
await self._send_groups(can_messenger)
self._is_prepped = True
async def execute(self, can_messenger: CanMessenger) -> NodeDict[float]:
"""Execute a pre-prepared move group. The second thing that run() does.
prep() and execute() can be used to replace a single call to run() to
ensure tighter timing, if you want something else to start as soon as
possible to the actual execution of the move.
"""
if not self._has_moves(self._move_groups):
log.debug("No moves. Nothing to do.")
return {}
if not self._is_prepped:
raise RuntimeError("A group must be prepped before it can be executed.")
move_completion_data = await self._move(can_messenger)
return self._accumulate_move_completions(move_completion_data)
async def run(self, can_messenger: CanMessenger) -> NodeDict[float]:
"""Run the move group.
Args:
can_messenger: a can messenger
Returns:
The current position after the move for all the axes that
acknowledged completing moves.
This function first prepares all connected devices to move (by sending
all the data for the moves over) and then executes the move with a
single call.
prep() and execute() can be used to replace a single call to run() to
ensure tighter timing, if you want something else to start as soon as
possible to the actual execution of the move.
"""
await self.prep(can_messenger)
return await self.execute(can_messenger)
@staticmethod
def _accumulate_move_completions(completions: _Completions) -> NodeDict[float]:
position: NodeDict[List[Tuple[Tuple[int, int], float]]] = defaultdict(list)
for arbid, completion in completions:
position[NodeId(arbid.parts.originating_node_id)].append(
(
(
completion.payload.group_id.value,
completion.payload.seq_id.value,
),
float(completion.payload.current_position_um.value) / 1000.0,
)
)
# for each node, pull the position from the completion with the largest
# combination of group id and sequence id
return {
node: next(
reversed(
sorted(poslist, key=lambda position_element: position_element[0])
)
)[1]
for node, poslist in position.items()
}
async def _clear_groups(self, can_messenger: CanMessenger) -> None:
"""Send commands to clear the message groups.
Args:
can_messenger: a can messenger
"""
await can_messenger.send(
node_id=NodeId.broadcast,
message=ClearAllMoveGroupsRequest(),
)
async def _send_groups(self, can_messenger: CanMessenger) -> None:
"""Send commands to set up the message groups."""
for group_i, group in enumerate(self._move_groups):
for seq_i, sequence in enumerate(group):
for node, step in sequence.items():
await can_messenger.send(
node_id=node,
message=self._get_message_type(
step, group_i + self._start_at_index, seq_i
),
)
def _convert_velocity(
self, velocity: Union[float, np.float64], interrupts: int
) -> Int32Field:
return Int32Field(int((velocity / interrupts) * (2**31)))
def _get_message_type(
self, step: SingleMoveStep, group: int, seq: int
) -> MessageDefinition:
"""Return the correct payload type."""
if isinstance(step, MoveGroupSingleAxisStep):
return self._get_stepper_motor_message(step, group, seq)
elif isinstance(step, MoveGroupTipActionStep):
return self._get_tip_action_motor_message(step, group, seq)
else:
return self._get_brushed_motor_message(step, group, seq)
def _get_brushed_motor_message(
self, step: MoveGroupSingleGripperStep, group: int, seq: int
) -> MessageDefinition:
if step.move_type == MoveType.home:
home_payload = GripperMoveRequestPayload(
group_id=UInt8Field(group),
seq_id=UInt8Field(seq),
duration=UInt32Field(int(step.duration_sec * step.pwm_frequency)),
freq=UInt32Field(int(step.pwm_frequency)),
duty_cycle=UInt32Field(int(step.pwm_duty_cycle)),
)
return GripperHomeRequest(payload=home_payload)
else:
linear_payload = GripperMoveRequestPayload(
group_id=UInt8Field(group),
seq_id=UInt8Field(seq),
duration=UInt32Field(int(step.duration_sec * step.pwm_frequency)),
freq=UInt32Field(int(step.pwm_frequency)),
duty_cycle=UInt32Field(int(step.pwm_duty_cycle)),
)
return GripperGripRequest(payload=linear_payload)
def _get_stepper_motor_message(
self, step: MoveGroupSingleAxisStep, group: int, seq: int
) -> MessageDefinition:
if step.move_type == MoveType.home:
home_payload = HomeRequestPayload(
group_id=UInt8Field(group),
seq_id=UInt8Field(seq),
duration=UInt32Field(int(step.duration_sec * interrupts_per_sec)),
velocity=self._convert_velocity(
step.velocity_mm_sec, interrupts_per_sec
),
)
return HomeRequest(payload=home_payload)
else:
linear_payload = AddLinearMoveRequestPayload(
request_stop_condition=UInt8Field(step.stop_condition),
group_id=UInt8Field(group),
seq_id=UInt8Field(seq),
duration=UInt32Field(int(step.duration_sec * interrupts_per_sec)),
acceleration=Int32Field(
int(
(
step.acceleration_mm_sec_sq
/ interrupts_per_sec
/ interrupts_per_sec
)
* (2**31)
)
),
velocity=Int32Field(
int((step.velocity_mm_sec / interrupts_per_sec) * (2**31))
),
)
return AddLinearMoveRequest(payload=linear_payload)
def _get_tip_action_motor_message(
self, step: MoveGroupTipActionStep, group: int, seq: int
) -> TipActionRequest:
tip_action_payload = TipActionRequestPayload(
group_id=UInt8Field(group),
seq_id=UInt8Field(seq),
duration=UInt32Field(int(step.duration_sec * interrupts_per_sec)),
velocity=self._convert_velocity(step.velocity_mm_sec, interrupts_per_sec),
action=PipetteTipActionTypeField(step.action),
request_stop_condition=UInt8Field(step.stop_condition),
)
return TipActionRequest(payload=tip_action_payload)
async def _move(self, can_messenger: CanMessenger) -> _Completions:
"""Run all the move groups."""
scheduler = MoveScheduler(self._move_groups)
try:
can_messenger.add_listener(scheduler)
completions = await scheduler.run(can_messenger)
finally:
can_messenger.remove_listener(scheduler)
return completions
class MoveScheduler:
"""A message listener that manages the sending of execute move group messages."""
def __init__(self, move_groups: MoveGroups) -> None:
"""Constructor."""
# For each move group create a set identifying the node and seq id.
self._moves: List[Set[Tuple[int, int]]] = []
self._durations: List[float] = []
self._stop_condition: List[MoveStopCondition] = []
for move_group in move_groups:
move_set = set()
duration = 0.0
for seq_id, move in enumerate(move_group):
move_set.update(set((k.value, seq_id) for k in move.keys()))
duration += float(list(move.values())[0].duration_sec)
for step in move_group[seq_id]:
self._stop_condition.append(move_group[seq_id][step].stop_condition)
self._moves.append(move_set)
self._durations.append(duration)
log.debug(f"Move scheduler running for groups {move_groups}")
self._completion_queue: asyncio.Queue[_CompletionPacket] = asyncio.Queue()
self._event = asyncio.Event()
def _remove_move_group(
self, message: _AcceptableMoves, arbitration_id: ArbitrationId
) -> None:
seq_id = message.payload.seq_id.value
group_id = message.payload.group_id.value
node_id = arbitration_id.parts.originating_node_id
log.info(
f"Received completion for {node_id} group {group_id} seq {seq_id}"
", which "
f"{'is' if (node_id, seq_id) in self._moves[group_id] else 'isn''t'}"
" in group"
)
try:
self._moves[group_id].remove((node_id, seq_id))
self._completion_queue.put_nowait((arbitration_id, message))
except KeyError:
log.warning(
f"Got a move ack for ({node_id}, {seq_id}) which is not in this "
"group; may have leaked from an earlier timed-out group"
)
if not self._moves[group_id]:
log.info(f"Move group {group_id} has completed.")
self._event.set()
def _handle_move_completed(self, message: MoveCompleted) -> None:
group_id = message.payload.group_id.value
ack_id = message.payload.ack_id.value
if self._stop_condition[
group_id
] == MoveStopCondition.limit_switch and ack_id != UInt8Field(2):
if ack_id == UInt8Field(1):
condition = "Homing timed out."
log.warning(f"Homing failed. Condition: {condition}")
raise MoveConditionNotMet()
def _handle_tip_action(self, message: TipActionResponse) -> None:
group_id = message.payload.group_id.value
ack_id = message.payload.ack_id.value
limit_switch = bool(
self._stop_condition[group_id] == MoveStopCondition.limit_switch
)
success = message.payload.success.value
# TODO need to add tip action type to the response message.
if limit_switch and limit_switch != ack_id and not success:
condition = "Tip still detected."
log.warning(f"Drop tip failed. Condition {condition}")
raise MoveConditionNotMet()
elif not limit_switch and not success:
condition = "Tip not detected."
log.warning(f"Pick up tip failed. Condition {condition}")
raise MoveConditionNotMet()
def __call__(
self, message: MessageDefinition, arbitration_id: ArbitrationId
) -> None:
"""Incoming message handler."""
if isinstance(message, MoveCompleted):
self._remove_move_group(message, arbitration_id)
self._handle_move_completed(message)
elif isinstance(message, TipActionResponse):
self._remove_move_group(message, arbitration_id)
self._handle_tip_action(message)
async def run(self, can_messenger: CanMessenger) -> _Completions:
"""Start each move group after the prior has completed."""
for group_id in range(len(self._moves)):
self._event.clear()
log.info(f"Executing move group {group_id}.")
await can_messenger.send(
node_id=NodeId.broadcast,
message=ExecuteMoveGroupRequest(
payload=ExecuteMoveGroupRequestPayload(
group_id=UInt8Field(group_id),
# TODO (al, 2021-11-8): The triggers should be populated
# with actual values.
start_trigger=UInt8Field(0),
cancel_trigger=UInt8Field(0),
)
),
)
try:
# TODO: The max here can be removed once can_driver.send() no longer
# returns before the message actually hits the bus. Right now it
# returns when the message is enqueued in the kernel, meaning that
# for short move durations we can see the timeout expiring before
# the execute even gets sent.
await asyncio.wait_for(
self._event.wait(), max(1.0, self._durations[group_id] * 1.1)
)
except asyncio.TimeoutError:
log.warning("Move set timed out")
def _reify_queue_iter() -> Iterator[_CompletionPacket]:
while not self._completion_queue.empty():
yield self._completion_queue.get_nowait()
return list(_reify_queue_iter())
|
{"hexsha": "f3b765c7643dbde437e1a2d4765e0d9891e6d6e7", "size": 16281, "ext": "py", "lang": "Python", "max_stars_repo_path": "hardware/opentrons_hardware/hardware_control/move_group_runner.py", "max_stars_repo_name": "Opentrons/protocol_framework", "max_stars_repo_head_hexsha": "ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hardware/opentrons_hardware/hardware_control/move_group_runner.py", "max_issues_repo_name": "Opentrons/protocol_framework", "max_issues_repo_head_hexsha": "ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hardware/opentrons_hardware/hardware_control/move_group_runner.py", "max_forks_repo_name": "Opentrons/protocol_framework", "max_forks_repo_head_hexsha": "ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.6009975062, "max_line_length": 88, "alphanum_fraction": 0.6252687181, "include": true, "reason": "import numpy", "num_tokens": 3344}
|
#!/usr/bin/env python3
import sys
try:
#TODO this is a hack, at minimum should be done s.t. it'll work for aaaany ros distribution
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
sys.path.append('/opt/ros/kinetic/lib/python2.7/dist-packages')
except Exception as e:
print(e)
print("no ros kinetic found in path")
import numpy as np
from imutils.video import VideoStream
from imutils.video import FPS
import imutils
import cv2
import time
import atexit
import rospy
import os
this_folder_abs_path = os.path.abspath(os.path.dirname(__file__))
class Tracker:
def __init__(self, source=0, show_position=True):
self.cap = cv2.VideoCapture(source)
time.sleep(1)
self.x = 0
self.y = 0
self.success = False
self.capture_flag = False
self.capture_counter = 0
self.show_position = show_position
def updateTracker(self):
ret, image = self.cap.read()
if ret:
self.last_frame_time = time.time()
self.frame = image.copy()
image = cv2.GaussianBlur(image,(3,3),0)
output = image.copy()
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower_red = np.array([30,50,100])
upper_red = np.array([36,255,255])
mask = cv2.inRange(hsv, lower_red, upper_red)
kernel = np.ones((5,5),np.uint8)
mask = cv2.erode(mask,kernel,iterations = 3)
mask = cv2.dilate(mask,kernel,iterations = 6)
res = cv2.bitwise_and(image,image, mask= mask)
hsv2 = cv2.cvtColor(res, cv2.COLOR_BGR2HSV)
lower_red = np.array([28,18,75])
upper_red = np.array([40,255,255])
mask2 = cv2.inRange(hsv2, lower_red, upper_red)
res2 = cv2.bitwise_and(res,res, mask= mask2)
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# edge = cv2.Canny(gray, 20, 220)
# edge = cv2.bitwise_and(edge,edge, mask= mask)
circles = None
# circles = cv2.HoughCircles(edge, cv2.HOUGH_GRADIENT, dp=1, minDist=10,param1=5, param2=8, minRadius=10, maxRadius=13)
# cv2.imshow("res", res2)
I = np.where(mask2 > 0)
print(len(I[1]))
# cv2.circle(output, (x, y), 10, (0, 255, 0), 4)
# cv2.rectangle(output, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)
# show the output image
# cv2.imshow("output", np.hstack([image, output]))
if len(I[0] > 100) :
self.x = np.mean(I[1])
self.y = np.mean(I[0])
self.success = True
else:
# self.x = 0
# self.y = 0
self.success = False
else:
# self.x = 0
# self.y = 0
self.success = False
def getTrackerCenter(self):
return (self.x, self.y)
def render(self,target=None, capture_flag=False, save_path='./video.avi'):
if self.capture_flag == False and capture_flag == True:
print('starting capture', save_path)
self.capture_flag = True
self.writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc('M','J','P','G'), 20, (640,480))
if self.capture_flag == True and capture_flag == False:
self.capture_flag = False
self.writer.release()
output = self.frame.copy()
x = np.int32(self.x)
y = np.int32(self.y)
#if self.show_position:
#cv2.circle(output, (x, y), 10, (255, 100, 0), 4)
#cv2.rectangle(output, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)
if target is not None:
x = np.int32(target[0])
y = np.int32(target[1])
cv2.circle(output, (x, y), 10, (0, 255, 0), -1)
cv2.imshow('output', output)
key = cv2.waitKey(1) & 0xFF
# if key == ord('S'):
# self.writer_flag
if self.capture_flag:
print("capturing", save_path)
self.writer.write(output)
def getDims(self):
return self.frame.shape
|
{"hexsha": "7ce65a01169f459ed9442d59105a5e88cfee4f16", "size": 4156, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/environments/simple_tracker.py", "max_stars_repo_name": "gamerDecathlete/neural_jacobian_estimation", "max_stars_repo_head_hexsha": "44deed91f0650830dd2da1796e67d084f0918995", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-03-26T23:58:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-17T18:06:49.000Z", "max_issues_repo_path": "src/environments/simple_tracker.py", "max_issues_repo_name": "gamerDecathlete/neural_jacobian_estimation", "max_issues_repo_head_hexsha": "44deed91f0650830dd2da1796e67d084f0918995", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/environments/simple_tracker.py", "max_forks_repo_name": "gamerDecathlete/neural_jacobian_estimation", "max_forks_repo_head_hexsha": "44deed91f0650830dd2da1796e67d084f0918995", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2203389831, "max_line_length": 131, "alphanum_fraction": 0.5526948989, "include": true, "reason": "import numpy", "num_tokens": 1130}
|
/**
* Copyright (c) 2015 Eugene Lazin <4lazin@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "queryprocessor.h"
#include "seriesparser.h"
#include "util.h"
#include "datetime.h"
#include <string>
#include <map>
#include <algorithm>
#include <regex>
#include <boost/property_tree/ptree.hpp>
#include <boost/property_tree/json_parser.hpp>
namespace Akumuli {
// //
// Series Matcher //
// //
static const SeriesMatcher::StringT EMPTY = std::make_pair(nullptr, 0);
SeriesMatcher::SeriesMatcher(uint64_t starting_id)
: table(StringTools::create_table(0x1000))
, series_id(starting_id)
{
if (starting_id == 0u) {
AKU_PANIC("Bad series ID");
}
}
uint64_t SeriesMatcher::add(const char* begin, const char* end) {
auto id = series_id++;
StringT pstr = pool.add(begin, end, id);
auto tup = std::make_tuple(std::get<0>(pstr), std::get<1>(pstr), id);
table[pstr] = id;
inv_table[id] = pstr;
names.push_back(tup);
return id;
}
void SeriesMatcher::_add(std::string series, uint64_t id) {
if (series.empty()) {
return;
}
const char* begin = &series[0];
const char* end = begin + series.size();
StringT pstr = pool.add(begin, end, id);
table[pstr] = id;
inv_table[id] = pstr;
}
uint64_t SeriesMatcher::match(const char* begin, const char* end) {
int len = end - begin;
StringT str = std::make_pair(begin, len);
auto it = table.find(str);
if (it == table.end()) {
return 0ul;
}
return it->second;
}
SeriesMatcher::StringT SeriesMatcher::id2str(uint64_t tokenid) {
auto it = inv_table.find(tokenid);
if (it == inv_table.end()) {
return EMPTY;
}
return it->second;
}
void SeriesMatcher::pull_new_names(std::vector<SeriesMatcher::SeriesNameT> *buffer) {
std::swap(names, *buffer);
}
// //
// Series Parser //
// //
//! Move pointer to the of the whitespace, return this pointer or end on error
static const char* skip_space(const char* p, const char* end) {
while(p < end && (*p == ' ' || *p == '\t')) {
p++;
}
return p;
}
static const char* copy_until(const char* begin, const char* end, const char pattern, char** out) {
char* it_out = *out;
while(begin < end) {
*it_out = *begin;
it_out++;
begin++;
if (*begin == pattern) {
break;
}
}
*out = it_out;
return begin;
}
//! Move pointer to the beginning of the next tag, return this pointer or end on error
static const char* skip_tag(const char* p, const char* end, bool *error) {
// skip until '='
while(p < end && *p != '=' && *p != ' ' && *p != '\t') {
p++;
}
if (p == end || *p != '=') {
*error = true;
return end;
}
// skip until ' '
const char* c = p;
while(c < end && *c != ' ') {
c++;
}
*error = c == p;
return c;
}
aku_Status SeriesParser::to_normal_form(const char* begin, const char* end,
char* out_begin, char* out_end,
const char** keystr_begin,
const char** keystr_end)
{
// Verify args
if (end < begin) {
return AKU_EBAD_ARG;
}
if (out_end < out_begin) {
return AKU_EBAD_ARG;
}
int series_name_len = end - begin;
if (series_name_len > AKU_LIMITS_MAX_SNAME) {
return AKU_EBAD_DATA;
}
if (series_name_len > (out_end - out_begin)) {
return AKU_EBAD_ARG;
}
char* it_out = out_begin;
const char* it = begin;
// Get metric name
it = skip_space(it, end);
it = copy_until(it, end, ' ', &it_out);
it = skip_space(it, end);
if (it == end) {
// At least one tag should be specified
return AKU_EBAD_DATA;
}
*keystr_begin = it_out;
// Get pointers to the keys
const char* tags[AKU_LIMITS_MAX_TAGS];
auto ix_tag = 0u;
bool error = false;
while(it < end && ix_tag < AKU_LIMITS_MAX_TAGS) {
tags[ix_tag] = it;
it = skip_tag(it, end, &error);
it = skip_space(it, end);
if (!error) {
ix_tag++;
} else {
break;
}
}
if (error) {
// Bad string
return AKU_EBAD_DATA;
}
if (ix_tag == 0) {
// User should specify at least one tag
return AKU_EBAD_DATA;
}
std::sort(tags, tags + ix_tag, [tags, end](const char* lhs, const char* rhs) {
// lhs should be always less thenn rhs
auto lenl = 0u;
auto lenr = 0u;
if (lhs < rhs) {
lenl = rhs - lhs;
lenr = end - rhs;
} else {
lenl = end - lhs;
lenr = lhs - rhs;
}
auto it = 0u;
while(true) {
if (it >= lenl || it >= lenr) {
return it < lenl;
}
if (lhs[it] == '=' || rhs[it] == '=') {
return lhs[it] == '=';
}
if (lhs[it] < rhs[it]) {
return true;
} else if (lhs[it] > rhs[it]) {
return false;
}
it++;
}
return true;
});
// Copy tags to output string
for (auto i = 0u; i < ix_tag; i++) {
// insert space
*it_out++ = ' ';
// insert tag
const char* tag = tags[i];
copy_until(tag, end, ' ', &it_out);
}
*keystr_begin = skip_space(*keystr_begin, out_end);
*keystr_end = it_out;
return AKU_SUCCESS;
}
}
|
{"hexsha": "18fa38c904d89c7f9f2e60e079e581a4c9ab6fe3", "size": 6202, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "libakumuli/seriesparser.cpp", "max_stars_repo_name": "vladon/Akumuli", "max_stars_repo_head_hexsha": "c45672a23b929ccb3a5743cc5e9aae980c160eb0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "libakumuli/seriesparser.cpp", "max_issues_repo_name": "vladon/Akumuli", "max_issues_repo_head_hexsha": "c45672a23b929ccb3a5743cc5e9aae980c160eb0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "libakumuli/seriesparser.cpp", "max_forks_repo_name": "vladon/Akumuli", "max_forks_repo_head_hexsha": "c45672a23b929ccb3a5743cc5e9aae980c160eb0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-09-22T07:11:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-22T07:11:13.000Z", "avg_line_length": 26.1687763713, "max_line_length": 99, "alphanum_fraction": 0.5386971945, "num_tokens": 1635}
|
#!/usr/bin/env python
"""
Local Processing Unit (LPU) with plugin support for various neuron/synapse models.
"""
import time
import collections
import numbers
import copy
import itertools
from future.utils import iteritems
from past.builtins import long
from builtins import zip
import pycuda.gpuarray as garray
from pycuda.tools import dtype_to_ctype
import pycuda.driver as cuda
from pycuda.compiler import SourceModule
import pycuda.elementwise as elementwise
import numpy as np
import networkx as nx
#import time
# Work around bug in networkx < 1.9 that causes networkx to choke on GEXF
# files with boolean attributes that contain the strings 'True' or 'False'
# (bug already observed in https://github.com/networkx/networkx/pull/971)
nx.readwrite.gexf.GEXF.convert_bool['false'] = False
nx.readwrite.gexf.GEXF.convert_bool['False'] = False
nx.readwrite.gexf.GEXF.convert_bool['true'] = True
nx.readwrite.gexf.GEXF.convert_bool['True'] = True
from neurokernel.mixins import LoggerMixin
from neurokernel.core_gpu import Module, CTRL_TAG, GPOT_TAG, SPIKE_TAG
from neurokernel.tools.gpu import get_by_inds
from neurokernel.plsel import Selector
from types import *
from collections import Counter
from .utils.simpleio import *
from .utils import parray
from .NDComponents import *
from .MemoryManager import MemoryManager
import pdb
PORT_IN_GPOT = 'port_in_gpot'
PORT_IN_SPK = 'port_in_spk'
PORT_OUT_GPOT = 'port_out_gpot'
PORT_OUT_SPK = 'port_out_spk'
class LPU(Module):
@staticmethod
def conv_legacy_graph(g):
"""
Converts a gexf from legacy neurodriver format to one currently
supported
"""
# Find maximum ID in given graph so that we can use it to create new nodes
# with IDs that don't overlap with those that already exist:
max_id = 0
for id in g.nodes():
if isinstance(id, basestring):
if id.isdigit():
max_id = max(max_id, int(id))
else:
raise ValueError('node id must be an integer')
elif isinstance(id, numbers.Integral):
max_id = max(max_id, id)
else:
raise ValueError('node id must be an integer')
gen_new_id = itertools.count(max_id+1).next
# Create LPU and interface nodes and connect the latter to the former via an
# Owns edge:
g_new = nx.MultiDiGraph()
# Transformation:
# 1. nonpublic neuron node -> neuron node
# 2. public neuron node -> neuron node with
# output edge to output port
# 3. input port -> input port
# 4. synapse edge -> synapse node + 2 edges connecting
# transformed original input/output nodes
edges_to_out_ports = [] # edges to new output port nodes:
for id, data in g.nodes(data=True):
# Don't clobber the original graph's data:
data = copy.deepcopy(data)
if 'public' in data and data['public']:
new_id = gen_new_id()
port_data = {'selector': data['selector'],
'port_type': 'spike' if data['spiking'] else 'gpot',
'port_io': 'out',
'class': 'Port'}
g_new.add_node(new_id, port_data)
edges_to_out_ports.append((id, new_id))
del data['selector']
if 'model' in data:
if data['model'] == 'port_in_gpot':
for a in data:
if a!='selector': del data[a]
data['class'] = 'Port'
data['port_type'] = 'gpot'
data['port_io'] = 'in'
elif data['model'] == 'port_in_spk':
for a in data:
if a!='selector': del data[a]
data['class'] = 'Port'
data['port_type'] = 'spike'
data['port_io'] = 'in'
else:
data['class'] = data['model']
# Don't need to several attributes that are implicit:
for a in ['model', 'public', 'spiking','extern']:
if a in data: del data[a]
g_new.add_node(id, attr_dict=data)
# Create synapse nodes for each edge in original graph and connect them to
# the source/dest neuron/port nodes:
for from_id, to_id, data in g.edges(data=True):
data = copy.deepcopy(data)
if data['model'] == 'power_gpot_gpot':
data['class'] = 'PowerGPotGPot'
else:
data['class'] = data['model']
del data['model']
if 'id' in data: del data['id']
new_id = gen_new_id()
g_new.add_node(new_id, attr_dict=data)
g_new.add_edge(from_id, new_id, attr_dict={})
g_new.add_edge(new_id, to_id, attr_dict={})
# Connect output ports to the neurons that emit data through them:
for from_id, to_id in edges_to_out_ports:
g_new.add_edge(from_id, to_id, attr_dict={})
return g_new
@staticmethod
def graph_to_dicts(graph, uid_key=None, class_key='class',
remove_edge_id = False):
"""
Convert graph of LPU neuron/synapse data to Python data structures.
Parameters
----------
graph : networkx.MultiDiGraph
NetworkX graph containing LPU data.
Returns
-------
comp_dict : dict
A dictionary of components of which
keys are model names, and
values are dictionaries of parameters/attributes associated
with the model.
Keys of a dictionary of parameters are the names of them,
and values of corresponding keys are lists of value of parameters.
One of the parameters is called 'id' and by default it
uses the id of the node in the graph.
If uid_keys is specified, id will use the specified parameter.
Therefore, comp_dict has the following structure:
comp_dict = {}
comp_dict[model_name_1] = {}
comp_dict[model_name_1][parameter_1] = []
...
comp_dict[model_name_1][parameter_N] = []
comp_dict[model_name_1][id] = []
...
comp_dict[model_name_M] = {}
comp_dict[model_name_M][parameter_1] = []
...
comp_dict[model_name_M][parameter_N] = []
comp_dict[model_name_M][id] = []
conns : list
A list of edges contained in graph describing the relation
between components
Example
-------
TODO: Update
Notes
-----
TODO: Update
"""
comp_dict = {}
comps = graph.nodes.items()
all_component_types = list(set([comp[class_key] for uid, comp in comps]))
for model in all_component_types:
sub_comps = [comp for comp in comps \
if comp[1][class_key] == model]
all_keys = [set(comp[1]) for comp in sub_comps]
key_intersection = set.intersection(*all_keys)
key_union = set.union(*all_keys)
# For visually checking if any essential parameter is dropped
ignored_keys = list(key_union-key_intersection)
if ignored_keys:
print('parameters of model {} ignored: {}'.format(model, ignored_keys))
del all_keys
sub_comp_keys = list(key_intersection)
if model == 'Port':
assert('selector' in sub_comp_keys)
comp_dict[model] = {
k: [comp[k] for uid, comp in sub_comps] \
for k in sub_comp_keys if not k in [uid_key, class_key]}
comp_dict[model]['id'] = [comp[uid_key] if uid_key else uid \
for uid, comp in sub_comps]
print('Number of {}: {}'.format(model, len(comp_dict[model]['id'])))
# for id, comp in comps:
# model = comp[class_key]
#
# # For port, make sure selector is specified
# if model == 'Port':
# assert('selector' in comp.keys())
#
# # if the neuron model does not appear before, add it into n_dict
# if model not in comp_dict:
# comp_dict[model] = {k:[] for k in comp.keys() + ['id']}
#
# # Same model should have the same attributes
# if not set(comp_dict[model].keys()) == set(comp.keys() + ['id']):
# raise KeyError("keys of component does not match with that of "+\
# model+": "+ str(set(comp_dict[model].keys())) +
# str(set(comp.keys() + ['id'])))
#
# # add data to the subdictionary of comp_dict
# for key in comp.iterkeys():
# if not key==uid_key:
# comp_dict[model][key].append( comp[key] )
# if uid_key:
# comp_dict[model]['id'].append(comp[uid_key])
# else:
# comp_dict[model]['id'].append( id )
#
# # Remove duplicate model information:
# for val in comp_dict.itervalues(): val.pop(class_key)
# Extract connections
conns = graph.edges(data=True)
if remove_edge_id:
for pre, post, conn in conns:
conn.pop('id', None)
return comp_dict, conns
@staticmethod
def lpu_parser(filename):
"""
GEXF LPU specification parser.
Extract LPU specification data from a GEXF file and store it in
Python data structures.
TODO: Update
Parameters
----------
filename : str
GEXF filename.
Returns
-------
TODO: Update
"""
graph = nx.read_gexf(filename)
return LPU.graph_to_dicts(graph, remove_edge_id = True)
@staticmethod
def lpu_parser_legacy(filename):
"""
TODO: Update
"""
graph = nx.read_gexf(filename)
return LPU.graph_to_dicts(LPU.conv_legacy_graph(graph))
@classmethod
def extract_in_gpot(cls, comp_dict, uid_key):
"""
Return selectors of non-spiking input ports.
"""
if not 'Port' in comp_dict: return ('',[])
a = list(zip(*[(sel,uid) for sel,ptype,io,uid in \
zip(comp_dict['Port']['selector'],
comp_dict['Port']['port_type'],
comp_dict['Port']['port_io'],
comp_dict['Port'][uid_key]) if ptype=='gpot' \
and io=='in']))
if not a: a = ('',[])
return a
@classmethod
def extract_in_spk(cls, comp_dict, uid_key):
"""
Return selectors of spiking input ports.
"""
if not 'Port' in comp_dict: return ('',[])
a = list(zip(*[(sel,uid) for sel,ptype,io,uid in \
zip(comp_dict['Port']['selector'],
comp_dict['Port']['port_type'],
comp_dict['Port']['port_io'],
comp_dict['Port'][uid_key]) if ptype=='spike' \
and io=='in']))
if not a: a = ('',[])
return a
@classmethod
def extract_out_gpot(cls, comp_dict, uid_key):
"""
Return selectors of non-spiking output neurons.
"""
if not 'Port' in comp_dict: return ('',[])
a = list(zip(*[(sel,uid) for sel,ptype,io,uid in \
zip(comp_dict['Port']['selector'],
comp_dict['Port']['port_type'],
comp_dict['Port']['port_io'],
comp_dict['Port'][uid_key]) if ptype=='gpot' \
and io=='out']))
if not a: a = ('',[])
return a
@classmethod
def extract_out_spk(cls, comp_dict, uid_key):
"""
Return selectors of spiking output neurons.
"""
if not 'Port' in comp_dict: return ('',[])
a = list(zip(*[(sel,uid) for sel,ptype,io,uid in \
zip(comp_dict['Port']['selector'],
comp_dict['Port']['port_type'],
comp_dict['Port']['port_io'],
comp_dict['Port'][uid_key]) if ptype=='spike' \
and io=='out']))
if not a: a = ('',[])
return a
@classmethod
def extract_sel_in_gpot(cls, comp_dict):
"""
Return selectors of non-spiking input ports.
"""
if not 'Port' in comp_dict: return ''
return ','.join([sel for sel,ptype,io in \
zip(comp_dict['Port']['selector'],
comp_dict['Port']['port_type'],
comp_dict['Port']['port_io']) \
if ptype=='gpot' and io=='in'])
@classmethod
def extract_sel_in_spk(cls, comp_dict):
"""
Return selectors of spiking input ports.
"""
if not 'Port' in comp_dict: return ''
return ','.join([sel for sel,ptype,io in \
zip(comp_dict['Port']['selector'],
comp_dict['Port']['port_type'],
comp_dict['Port']['port_io']) \
if ptype=='spike' and io=='in'])
@classmethod
def extract_sel_out_gpot(cls, comp_dict):
"""
Return selectors of non-spiking output neurons.
"""
if not 'Port' in comp_dict: return ''
return ','.join([sel for sel,ptype,io in \
zip(comp_dict['Port']['selector'],
comp_dict['Port']['port_type'],
comp_dict['Port']['port_io']) \
if ptype=='gpot' and io=='out'])
@classmethod
def extract_sel_out_spk(cls, comp_dict):
"""
Return selectors of spiking output neurons.
"""
if not 'Port' in comp_dict: return ''
return ','.join([sel for sel,ptype,io,uid in \
zip(comp_dict['Port']['selector'],
comp_dict['Port']['port_type'],
comp_dict['Port']['port_io']) \
if ptype=='spike' and io=='out'])
@classmethod
def extract_sel_in(cls, comp_dict):
"""
Return selectors of all input ports.
"""
if not 'Port' in comp_dict: return ''
return ','.join([sel for sel, io in \
zip(comp_dict['Port']['selector'],
comp_dict['Port']['port_io']) if io=='in'])
@classmethod
def extract_sel_out(cls, comp_dict):
"""
Return selectors of all output neurons.
"""
if not 'Port' in comp_dict: return ''
return ','.join([sel for sel, io in \
zip(comp_dict['Port']['selector'],
comp_dict['Port']['port_io']) if io=='out'])
@classmethod
def extract_sel_all(cls, comp_dict):
"""
Return selectors for all input ports and output neurons.
"""
return ','.join(filter(None, \
[cls.extract_in(comp_dict), cls.extract_out(comp_dict)]))
def __init__(self, dt, comp_dict, conn_list, device=0, input_processors=[],
output_processors=[], ctrl_tag=CTRL_TAG, gpot_tag=GPOT_TAG,
spike_tag=SPIKE_TAG, rank_to_id=None, routing_table=None,
uid_key='id', debug=False, columns=['io', 'type', 'interface'],
cuda_verbose=False, time_sync=False, default_dtype=np.double,
control_inteface=None, id=None, extra_comps=[],
print_timing=False):
LoggerMixin.__init__(self, 'LPU {}'.format(id))
assert('io' in columns)
assert('type' in columns)
assert('interface' in columns)
self.LPU_id = id
self.dt = dt
self.time = 0
self.debug = debug
self.device = device
self.default_dtype = default_dtype
self.control_inteface = control_inteface
self.print_timing = print_timing
if cuda_verbose:
self.compile_options = ['--ptxas-options=-v']
else:
self.compile_options = []
if not isinstance(input_processors, list):
input_processors = [input_processors]
if not isinstance(output_processors, list):
input_processors = [output_processors]
self.output_processors = output_processors
self.input_processors = input_processors
self.uid_key = uid_key
self.uid_generator = uid_generator()
# Load all NDComponents:
self._load_components(extra_comps=extra_comps)
if self.print_timing:
start = time.time()
# Ignore models without implementation
models_to_be_deleted = []
for model in comp_dict:
if not model in self._comps and not model in ['Port','Input']:
self.log_info("Ignoring Model %s: Can not find implementation"
% model)
models_to_be_deleted.append(model)
for model in models_to_be_deleted:
del comp_dict[model]
# Assume zero delay by default
self.variable_delay_map = {}
# Generate a uid to model map of components
self.uid_model_map = {}
for model,attribs in iteritems(comp_dict):
for i,uid in enumerate(attribs[uid_key]):
self.uid_model_map[uid] = model
# Map from post synaptic component to aggregator uid
agg_map = {}
agg = {}
#start = time.time()
conns = []
self.in_port_vars = {}
self.out_port_conns = []
comp_uid_order = {}
for model, attribs in comp_dict.items():
comp_uid_order[model] = {uid: i for i, uid in enumerate(attribs[uid_key])}
if self.print_timing:
self.log_info("Elapsed time for processing comp_dict: {:.3f} seconds".format(time.time()-start))
start = time.time()
# Process connections between components, remove inconsitent connections
# calculate required delays, infer variable if required
in_port_vars_set = {}
reverse_set = ['reverse','Vr','VR','reverse_potential']
comp_updates = {name: v['updates'] if not name=='Port' else [] \
for name, v in self._comps.items()}
comp_updates.update({'Port': []})
comp_accesses = {name: v['accesses'] if not name=='Port' else [] \
for name, v in self._comps.items()}
comp_accesses.update({'Port': []})
for pre, post, data in conn_list:
try:
pre_model = self.uid_model_map[pre]
except KeyError:
continue
try:
post_model = self.uid_model_map[post]
except KeyError:
continue
pre_updates = comp_updates[pre_model]
pre_updates_set = set(pre_updates)
post_accesses = comp_accesses[post_model]
post_accesses_set = set(post_accesses)
pre_post_intersection = pre_updates_set&post_accesses_set
g_in_pre_update = 'g' in pre_updates_set # not so useful
V_in_pre_update = 'V' in pre_updates_set
# Update delay
delay = max(int(round((data['delay']/dt))) \
if 'delay' in data else 0, 1) - 1
data['delay'] = delay
if post_model == 'Aggregator':
agg_map[post] = post
reverse_key = None
for k in reverse_set:
if k in data:
reverse_key = k
reverse = data[reverse_key]
break
if reverse_key is None:
# else look in the attibutes of the synapse
s = (set(['reverse','Vr','VR','reverse_potential'])&
set(comp_dict[pre_model]))
if s: reverse_key = s.pop()
if reverse_key:
reverse = comp_dict[pre_model][reverse_key][\
comp_uid_order[pre_model][pre]]
if g_in_pre_update:
data['reverse'] = reverse
else:
if g_in_pre_update:
self.log_info('Assuming reverse potential ' +
'to be zero for connection from' +
'%s to %s'%(pre,post))
data['reverse'] = 0
reverse = 0
if post in agg:
if g_in_pre_update:
agg[post].append({'pre':pre,'reverse':reverse,
'variable':'g'})
elif V_in_pre_update:
agg[post].append({'pre':pre, 'variable':'V'})
else:
# Make sure aggregator has access to postsynaptic voltage
if g_in_pre_update:
agg[post] = [{'pre':pre,'reverse':reverse,'variable':'g'}]
elif V_in_pre_update:
agg[post] = [{'pre':pre,'variable':'V'}]
if g_in_pre_update:
agg[post][-1].update(data)
self.variable_delay_map['g'] = max(data['delay'],
self.variable_delay_map['g'] if 'g' in \
self.variable_delay_map else 0)
# Ensure consistency
# Insert Aggregator between g->V if required. Assume 'reverse' or
# 'Vr' or 'VR' or 'reverse_potential' id present as a param in the
# synapse in that case
if not pre_post_intersection:
if g_in_pre_update and 'I' in post_accesses_set:
# start2 = time.time()
# First look for reverse in the attributes of the edge
reverse_key = None
for k in reverse_set:
if k in data:
reverse_key = k
reverse = data[reverse_key]
break
if reverse_key is None:
# else look in the attibutes of the synapse
s = (set(['reverse','Vr','VR','reverse_potential'])&
set(comp_dict[pre_model]))
if s: reverse_key = s.pop()
if reverse_key:
reverse = comp_dict[pre_model][reverse_key][\
comp_uid_order[pre_model][pre]]
else:
self.log_info('Assuming reverse potential ' +
'to be zero for connection from' +
'%s to %s'%(pre,post))
reverse = 0
data.update({'pre':pre,'reverse':reverse,
'variable':'g'})
if post in agg:
agg[post].append(data)
else:
# Make sure aggregator has access to postsynaptic voltage
agg[post] = [{'pre':post,'variable':'V'},
data]
if post not in agg_map:
uid = self.uid_generator.generate_uid()
agg_map[post] = uid
self.variable_delay_map['g'] = max(data['delay'],
self.variable_delay_map['g'] if 'g' in \
self.variable_delay_map else 0)
elif pre_model == 'Port':
if not 'variable' in data:
data['variable'] = post_accesses[0]
if not data['variable'] in self.in_port_vars:
self.in_port_vars[data['variable']] = []
in_port_vars_set[data['variable']] = set()
if pre not in in_port_vars_set[data['variable']]:
self.in_port_vars[data['variable']].append(pre)
in_port_vars_set[data['variable']].add(pre)
conns.append((pre, post, data))
self.variable_delay_map[data['variable']] = max(data['delay'],
self.variable_delay_map[data['variable']] if \
data['variable'] in self.variable_delay_map else 0)
elif post_model == 'Port':
if not 'variable' in data:
data['variable'] = pre_updates[0]
self.out_port_conns.append((pre, post, data['variable']))
else:
self.log_info("Ignoring connection %s -> %s"%(pre,post))
continue
var = data['variable'] if 'variable' in data else None
if not var:
var = pre_post_intersection.pop()
elif not (var in pre_updates_set and var in post_accesses_set):
continue
data['variable'] = var
self.variable_delay_map[data['variable']] = max(data['delay'],
self.variable_delay_map[data['variable']] if \
data['variable'] in self.variable_delay_map else 0)
# connection to Aggregator will be added later
if 'Aggregator' not in post_model:
conns.append((pre,post,data))
if self.print_timing:
self.log_info("Elapsed time for processing conn_list: {:.3f} seconds".format(time.time()-start))
start = time.time()
if agg and not 'Aggregator' in comp_dict:
comp_dict['Aggregator'] = {uid_key: []}
if agg:
agg_uid_key_set = set(comp_dict['Aggregator'][uid_key])
# Add updated aggregator components to component dictionary
# and create connections for aggregator
for post, conn_list in agg.items():
uid = agg_map[post]
if uid not in agg_uid_key_set:
keys = [k for k in comp_dict['Aggregator'] if k != uid_key]
comp_dict['Aggregator'][uid_key].append(uid)
agg_uid_key_set.add(uid)
self.uid_model_map[uid] = 'Aggregator'
for k in keys:
comp_dict['Aggregator'][k].append(str(uid))
for conn in conn_list:
conns.append((conn['pre'],uid,{k:v for k,v in conn.items()
if k!='pre'}))
# Add a 'I' connection between Aggregator and neuron if they are
# automatically generated.
# This can be checking if the 'pre' attribute in the item
# in conn_list with 'variable' 'V' is the same neuron as post
if post == [tmp['pre'] for tmp in conn_list if tmp['variable']=='V'][0]:
conns.append((uid,post,{'variable':'I', 'delay': 0}))
if self.print_timing:
self.log_info("Elapsed time for processing aggregator: {:.3f} seconds".format(time.time()-start))
start = time.time()
self.conn_dict = {}
# RePackage connections
for (pre, post, data) in conns:
if not post in self.conn_dict:
self.conn_dict[post] = {}
var = data['variable']
data.pop('variable')
if not var in self.conn_dict[post]:
self.conn_dict[post][var] = {k:[] for k in ['pre'] + list(data)}
self.conn_dict[post][var]['pre'].append(pre)
for k in data: self.conn_dict[post][var][k].append(data[k])
if self.print_timing:
self.log_info("Elapsed time for repackaging connections: {:.3f} seconds".format(time.time()-start))
start = time.time()
# Add connections for component with no incoming connections
for uid, model in iteritems(self.uid_model_map):
if model == 'Port':
continue
for var in self._comps[model]['accesses']:
if ((not uid in self.conn_dict or not var in self.conn_dict[uid])):
pre = self.uid_generator.generate_uid(input=True)
if not var in self.variable_delay_map:
self.variable_delay_map[var]=0
if not uid in self.conn_dict: self.conn_dict[uid] = {}
if model == 'Aggregator' and var == 'g':
self.conn_dict[uid][var] = {'pre':[pre],'delay':[0],
'reverse': [0]} #'id': [0],
else:
self.conn_dict[uid][var] = {'pre':[pre],'delay':[0]}
if not 'Input' in comp_dict:
comp_dict['Input'] = {}
if not var in comp_dict['Input']:
comp_dict['Input'][var] = {self.uid_key: []}
comp_dict['Input'][var][self.uid_key].append(pre)
if self.print_timing:
self.log_info("Elapsed time for adding connections for component with no incoming connections: {:.3f} seconds".format(time.time()-start))
start = time.time()
# Optimize ordering (TODO)
self.uid_ind_map = {m:{uid:i for i,uid in enumerate(n[uid_key])}
for m,n in comp_dict.items() if not m=='Input'}
if 'Input' in comp_dict:
self.uid_ind_map['Input'] = {var:{uid:i for i, uid in enumerate(d[uid_key])}
for var, d in comp_dict['Input'].items()}
# Reorder components
for m, n in comp_dict.items():
if m=='Input':
for var, d in n.items():
order = np.argsort([self.uid_ind_map[m][var][uid] for uid in d[uid_key]])
d[uid_key] = [d[uid_key][i] for i in order]
continue
order = np.argsort([self.uid_ind_map[m][uid] for uid in n[uid_key]])
for k in n:
n[k] = [n[k][i] for i in order]
# Reorder input port variables
for var, uids in self.in_port_vars.items():
order = np.argsort([self.uid_ind_map['Port'][uid] for uid in uids])
self.in_port_vars[var] = [uids[i] for i in order]
if self.print_timing:
self.log_info("Elapsed time for optimizing ordering: {:.3f} seconds".format(time.time()-start))
# Try to figure out order of stepping through components
# If a loop of dependencies is present, update order behaviour is undefined
models = list(comp_dict)
try:
models.remove('Port')
except:
pass
try:
models.remove('Input')
except:
pass
deps = {i:[] for i in range(len(models))}
for i in range(len(models)):
for j in range(i+1,len(models)):
in12 = set(self._comps[models[i]]['updates'])&\
set(self._comps[models[j]]['accesses'])
in21 = set(self._comps[models[i]]['accesses'])&\
set(self._comps[models[j]]['updates'])
if in12 or in21:
if len(in12) > len(in21):
deps[j].append(i)
else:
deps[i].append(j)
self.exec_order = []
for i, model in enumerate(models):
if not model in self.exec_order: self.exec_order.append(model)
for j in deps[i]:
try:
if self.exec_order.index(models[j]) > \
self.exec_order.index(model):
self.exec_order.remove(models[j])
self.exec_order.insert(self.exec_order.index(model),
models[j])
except ValueError:
self.exec_order.insert(self.exec_order.index(model),
models[j])
var_mod = {}
for i, model in enumerate(models):
for var in self._comps[model]['updates']:
if not var in var_mod: var_mod[var] = []
var_mod[var].append(model)
self.model_var_inj = {}
for var, models in var_mod.items():
i = 0
for model in models:
i = max(self.exec_order.index(model),i)
if not self.exec_order[i] in self.model_var_inj:
self.model_var_inj[self.exec_order[i]] = []
self.model_var_inj[self.exec_order[i]].append(var)
#Variables not updated by any component (for example those coming from
#external input or Ports) are slated to be injected at the end of a step
for var in self.variable_delay_map:
if not var in var_mod:
if not self.exec_order[-1] in self.model_var_inj:
self.model_var_inj[self.exec_order[-1]] = []
self.model_var_inj[self.exec_order[-1]].append(var)
if self.print_timing:
start = time.time()
# Get selectors of input ports:
sel_in_gpot, self.in_gpot_uids = self.extract_in_gpot(comp_dict,
self.uid_key)
self.sel_in_gpot = Selector(','.join(sel_in_gpot))
sel_in_spk, self.in_spk_uids = self.extract_in_spk(comp_dict,
self.uid_key)
self.sel_in_spk = Selector(','.join(sel_in_spk))
sel_in = Selector.add(self.sel_in_gpot, self.sel_in_spk)
# Get selectors of output neurons:
sel_out_gpot, self.out_gpot_uids = self.extract_out_gpot(comp_dict,
self.uid_key)
self.sel_out_gpot = Selector(','.join(sel_out_gpot))
sel_out_spk, self.out_spk_uids = self.extract_out_spk(comp_dict,
self.uid_key)
self.sel_out_spk = Selector(','.join(sel_out_spk))
sel_out = Selector.add(self.sel_out_gpot, self.sel_out_spk)
sel_gpot = Selector.add(self.sel_in_gpot, self.sel_out_gpot)
sel_spk = Selector.add(self.sel_in_spk, self.sel_out_spk)
sel = Selector.add(sel_gpot, sel_spk)
if self.print_timing:
self.log_info("Elapsed time for generating selectors: {:.3f} seconds".format( time.time()-start))
# Save component parameters data in the form
# [('Model0', {'attrib0': [..], 'attrib1': [..]}), ('Model1', ...)]
self.comp_list = comp_dict.items()
self.models = {m:i for i,(m,_) in enumerate(self.comp_list)}
# Number of components of each model:
self.model_num = [len(n[uid_key]) if not m=='Input' else
len(sum([d[uid_key] for d in n.values()],[]))
for m, n in self.comp_list]
data_gpot = np.zeros(len(self.in_gpot_uids)+len(self.out_gpot_uids),
self.default_dtype)
data_spike = np.zeros(len(self.in_spk_uids)+len(self.out_spk_uids),
self.default_dtype)
if self.print_timing:
start = time.time()
super(LPU, self).__init__(sel=sel, sel_in=sel_in, sel_out=sel_out,
sel_gpot=sel_gpot, sel_spike=sel_spk,
data_gpot=data_gpot, data_spike=data_spike,
columns=columns, ctrl_tag=ctrl_tag, gpot_tag=gpot_tag,
spike_tag=spike_tag, id=self.LPU_id,
rank_to_id=rank_to_id, routing_table=routing_table,
device=device, debug=debug, time_sync=time_sync,
print_timing=print_timing)
if self.print_timing:
cuda.Context.synchronize()
self.log_info("Elapsed time for initializing parent class: {:.3f} seconds".format(time.time()-start))
# Integer indices in port map data arrays corresponding to input/output
# gpot/spiking ports:
self.in_gpot_inds = np.array(self.pm['gpot'].ports_to_inds(
self.sel_in_gpot), dtype=np.int32)
self.out_gpot_inds = np.array(self.pm['gpot'].ports_to_inds(
self.sel_out_gpot), dtype=np.int32)
self.in_spk_inds = np.array(self.pm['spike'].ports_to_inds(
self.sel_in_spk), dtype=np.int32)
self.out_spk_inds = np.array(self.pm['spike'].ports_to_inds(
self.sel_out_spk), dtype=np.int32)
# def generate_uid(self, input=False):
# if input:
# uid = 'input_' + str(np.random.randint(100000))
# else:
# uid = 'auto_' + str(np.random.randint(100000))
# while uid in self.gen_uids:
# if input:
# uid = 'input_' + str(np.random.randint(100000))
# else:
# uid = 'auto_' + str(np.random.randint(100000))
# return uid
def pre_run(self):
if self.print_timing:
start = time.time()
super(LPU, self).pre_run()
if self.print_timing:
start = time.time()
self.log_info("LPU pre_run parent took {} seconds".format(time.time()-start))
if self.print_timing:
start = time.time()
self.memory_manager = MemoryManager()
self.init_variable_memory()
if self.print_timing:
cuda.Context.synchronize()
self.log_info('Elapsed time for initialing variable memory: {:.3f} seconds'.format( time.time()-start))
start = time.time()
self.process_connections()
if self.print_timing:
self.log_info('Elapsed time for process_connections: {:.3f} seconds'.format(time.time()-start))
start = time.time()
self.init_parameters()
if self.print_timing:
cuda.Context.synchronize()
self.log_info('Elapsed time for init_paramseters: {:.3f} seconds'.format( time.time()-start))
start = time.time()
self.components = {}
# Instantiate components
for model in self.models:
if model in ['Port','Input']: continue
self.components[model] = self._instantiate_component(model)
update_pointers = {}
for var in self._comps[model]['updates']:
buff = self.memory_manager.get_buffer(var)
mind = self.memory_manager.variables[var]['models'].index(model)
shift = self.memory_manager.variables[var]['cumlen'][mind]
update_pointers[var] = int(buff.gpudata)+(buff.current*buff.ld+\
shift)*buff.dtype.itemsize
self.components[model].pre_run(update_pointers)
for var in self._comps[model]['updates']:
buff = self.memory_manager.get_buffer(var)
mind = self.memory_manager.variables[var]['models'].index(model)
shift = self.memory_manager.variables[var]['cumlen'][mind]
for j in range(buff.buffer_length):
if j is not buff.current:
cuda.memcpy_dtod(
int(int(buff.gpudata)+(j*buff.ld+\
shift)*buff.dtype.itemsize),
int(int(buff.gpudata)+(buff.current*buff.ld+\
shift)*buff.dtype.itemsize),
int(buff.dtype.itemsize*self.model_num[self.models[model]]))
if self.print_timing:
cuda.Context.synchronize()
self.log_info('Elapsed time for instantiating components: {:.3f} seconds'.format(time.time()-start))
start = time.time()
# Setup ports
self._setup_input_ports()
self._setup_output_ports()
if self.print_timing:
cuda.Context.synchronize()
self.log_info('Elapsed time for setting up ports: {:.3f} seconds'.format( time.time()-start))
start = time.time()
for p in self.input_processors:
p.LPU_obj = self
p._pre_run()
for p in self.output_processors:
p.LPU_obj = self
p._pre_run()
if self.print_timing:
cuda.Context.synchronize()
self.log_info('Elapsed time for prerun input and output processors: {:.3f} seconds'.format( time.time()-start))
self.memory_manager.precompile_fill_zeros()
if self.control_inteface: self.control_inteface.register(self)
if self.print_timing:
cuda.Context.synchronize()
self.log_info("Elapsed time for LPU pre_run: {:.3f} seconds".format(time.time()-start))
if self.print_timing:
self.timing = {'read_input': 0, 'input_processors': 0, 'inject_input': 0,
'model_run': 0, 'output_processors': 0,
'extract_output': 0, 'total': 0}
# TODO: optimize the order of self.out_port_conns beforehand
def _setup_output_ports(self):
self.out_port_inds_gpot = {}
self.out_var_inds_gpot = {}
self.out_port_inds_spk = {}
self.out_var_inds_spk = {}
# assuming that the UIDs are unique
out_gpot_index = {uid: i for i, uid in enumerate(self.out_gpot_uids)}
out_spk_index = {uid: i for i, uid in enumerate(self.out_spk_uids)}
for pre_uid, post_uid, var in self.out_port_conns:
if not var in self.out_port_inds_gpot:
self.out_port_inds_gpot[var] = []
self.out_var_inds_gpot[var] = []
self.out_port_inds_spk[var] = []
self.out_var_inds_spk[var] = []
ind = self.memory_manager.variables[var]['uids'][pre_uid]
if post_uid in out_gpot_index:
# self.out_port_inds_gpot[var].append(self.out_gpot_inds[\
# self.out_gpot_uids.index(post_uid)])
self.out_port_inds_gpot[var].append(self.out_gpot_inds[\
out_gpot_index[post_uid]])
self.out_var_inds_gpot[var].append(ind)
else:
self.out_port_inds_spk[var].append(self.out_spk_inds[\
out_spk_index[post_uid]])
self.out_var_inds_spk[var].append(ind)
tmp = self.out_port_inds_gpot.copy()
for var in tmp:
if not self.out_port_inds_gpot[var]:
del self.out_port_inds_gpot[var]
del self.out_var_inds_gpot[var]
else:
self.out_port_inds_gpot[var] = garray.to_gpu(\
np.array(self.out_port_inds_gpot[var],np.int32))
self.out_var_inds_gpot[var] = garray.to_gpu(\
np.array(self.out_var_inds_gpot[var],np.int32))
tmp = self.out_port_inds_spk.copy()
for var in tmp:
if not self.out_port_inds_spk[var]:
del self.out_port_inds_spk[var]
del self.out_var_inds_spk[var]
else:
self.out_port_inds_spk[var] = garray.to_gpu(\
np.array(self.out_port_inds_spk[var],np.int32))
self.out_var_inds_spk[var] = garray.to_gpu(\
np.array(self.out_var_inds_spk[var],np.int32))
def _setup_input_ports(self):
self.port_inds_gpot = {}
self.var_inds_gpot = {}
self.port_inds_spk = {}
self.var_inds_spk = {}
# assuming that the UIDs are unique
in_gpot_index = {uid: i for i, uid in enumerate(self.in_gpot_uids)}
in_spk_index = {uid: i for i, uid in enumerate(self.in_spk_uids)}
for var, uids in self.in_port_vars.items():
self.port_inds_gpot[var] = []
self.var_inds_gpot[var] = []
self.port_inds_spk[var] = []
self.var_inds_spk[var] = []
mind = self.memory_manager.variables[var]['models'].index('Port')
shift = self.memory_manager.variables[var]['cumlen'][mind]
# The following assumes the intersection of set of variables
# accessed via spiking with those accessed via gpot ports is null
for i,uid in enumerate(uids):
if uid in in_gpot_index:
self.port_inds_gpot[var].append(self.in_gpot_inds[\
in_gpot_index[uid]])
self.var_inds_gpot[var].append(i + shift)
else:
self.port_inds_spk[var].append(self.in_spk_inds[\
in_spk_index[uid]])
self.var_inds_spk[var].append(i + shift)
tmp = self.port_inds_gpot.copy()
for var in tmp:
if not self.port_inds_gpot[var]:
del self.port_inds_gpot[var]
del self.var_inds_gpot[var]
else:
self.port_inds_gpot[var] = garray.to_gpu(\
np.array(self.port_inds_gpot[var],np.int32))
self.var_inds_gpot[var] = garray.to_gpu(\
np.array(self.var_inds_gpot[var],np.int32))
tmp = self.port_inds_spk.copy()
for var in tmp:
if not self.port_inds_spk[var]:
del self.port_inds_spk[var]
del self.var_inds_spk[var]
else:
self.port_inds_spk[var] = garray.to_gpu(\
np.array(self.port_inds_spk[var],np.int32))
self.var_inds_spk[var] = garray.to_gpu(\
np.array(self.var_inds_spk[var],np.int32))
def init_parameters(self):
for m, n in self.comp_list:
if not m in ['Port','Input']:
nn = n.copy()
nn.pop(self.uid_key)
# copy integer and boolean parameters into separate dictionary
nn_int = {k:v for k, v in iteritems(nn) if (isinstance(v, list)
and len(v) and type(v[0]) in [int, bool])}
nn_rest = {k:v for k, v in iteritems(nn) if (
(not isinstance(v, list)) or (len(v) and
type(v[0]) not in [int, long, bool]))}
if nn_int:
self.memory_manager.params_htod(m, nn_int, np.int32)
if nn_rest:
self.memory_manager.params_htod(m, nn_rest,
self.default_dtype)
def init_variable_memory(self):
var_info = {}
for (model, attribs) in self.comp_list:
if model in ['Port']: continue
# Add memory for external inputs if required
if model == 'Input':
for var, d in iteritems(attribs):
if not var in var_info:
var_info[var] = {'models':[],'len':[],'delay':0,'uids':[]}
var_info[var]['models'].append('Input')
var_info[var]['len'].append(len(d[self.uid_key]))
var_info[var]['uids'].extend(d[self.uid_key])
continue
for var in self._comps[model]['updates']:
if not var in var_info:
var_info[var] = {'models':[],'len':[],'delay':0,'uids':[]}
var_info[var]['models'].append(model)
var_info[var]['len'].append(len(attribs[self.uid_key]))
var_info[var]['uids'].extend(attribs[self.uid_key])
# Add memory for input ports
for var in self.in_port_vars:
if not var in var_info:
var_info[var] = {'models':[],'len':[],'delay':0,'uids':[]}
var_info[var]['models'].append('Port')
var_info[var]['len'].append(len(self.in_port_vars[var]))
var_info[var]['uids'].extend(self.in_port_vars[var])
for var in self.variable_delay_map:
var_info[var]['delay'] = self.variable_delay_map[var]
for var, d in var_info.items():
d['cumlen'] = np.cumsum([0]+d['len'])
d['uids'] = {uid:i for i, uid in enumerate(d['uids'])}
self.memory_manager.memory_alloc(var, d['cumlen'][-1], d['delay']+2,\
dtype=self.default_dtype,
info=d)
def process_connections(self):
for (model, attribs) in self.comp_list:
if model in ['Port','Input']: continue
pre = {var:[] for var in self._comps[model]['accesses']}
npre = {var:[] for var in self._comps[model]['accesses']}
data = {var:{} for var in self._comps[model]['accesses']}
for uid in attribs[self.uid_key]:
cnt = {var:0 for var in self._comps[model]['accesses']}
if uid in self.conn_dict:
for var in self.conn_dict[uid]:
for i in range(len(self.conn_dict[uid][var]['pre'])):
# Figure out index of the precomponent in the
# particular variable memory
p = self.conn_dict[uid][var]['pre'][i]
ind = self.memory_manager.variables[var]['uids'][p]
pre[var].append(ind)
cnt[var] += 1
for k in self.conn_dict[uid][var]:
if k in ['pre','variable']: continue
if k not in data[var]: data[var][k] = []
data[var][k].append(self.conn_dict[uid][var][k][i])
l = len(pre[var])
assert(all([len(data[var][k])==l for k in data[var]]))
for var,c in cnt.items():
npre[var].append(cnt[var])
else:
for n in npre.values(): n.append(0)
cumpre = {var: np.cumsum([0]+n) for var, n in npre.items()}
attribs['pre'] = pre
attribs['cumpre'] = cumpre
attribs['npre'] = npre
attribs['conn_data'] = data
def post_run(self):
super(LPU, self).post_run()
for comp in self.components.values():
comp.post_run()
# Cycle through IO processors as well
for p in self.input_processors: p.post_run()
for p in self.output_processors: p.post_run()
if self.print_timing:
print('time spent on:', self.timing)
def run_step(self):
super(LPU, self).run_step()
# Update input ports
if self.print_timing:
start_all = time.time()
start = time.time()
self._read_LPU_input()
if self.print_timing:
cuda.Context.synchronize()
self.timing['read_input'] += time.time()-start
# Fetch updated input if available from all input processors
if self.print_timing:
start = time.time()
for p in self.input_processors: p.run_step()
if self.print_timing:
cuda.Context.synchronize()
self.timing['input_processors'] += time.time()-start
if self.print_timing:
start = time.time()
for model in self.exec_order:
if model in self.model_var_inj:
for var in self.model_var_inj[model]:
# Reset memory for external input to zero if present
self.memory_manager.fill_zeros(model='Input', variable=var)
for p in self.input_processors:
p.inject_input(var)
if self.print_timing:
cuda.Context.synchronize()
self.timing['inject_input'] += time.time()-start
# Call run_step of components
if self.print_timing:
start = time.time()
for model in self.exec_order:
# Get correct position in buffer for update
update_pointers = {}
for var in self._comps[model]['updates']:
buff = self.memory_manager.get_buffer(var)
mind = self.memory_manager.variables[var]['models'].index(model)
shift = self.memory_manager.variables[var]['cumlen'][mind]
buffer_current_plus_one = buff.current + 1
if buffer_current_plus_one >= buff.buffer_length:
buffer_current_plus_one = 0
update_pointers[var] = int(buff.gpudata)+\
(buffer_current_plus_one*buff.ld+\
shift)*buff.dtype.itemsize
self.components[model].run_step(update_pointers)
if self.print_timing:
cuda.Context.synchronize()
self.timing['model_run'] += time.time()-start
# Process output processors
if self.print_timing:
start = time.time()
for p in self.output_processors: p.run_step()
if self.print_timing:
cuda.Context.synchronize()
self.timing['output_processors'] += time.time()-start
# Check for transforms
# Update output ports
if self.print_timing:
start = time.time()
self._extract_output()
if self.print_timing:
cuda.Context.synchronize()
self.timing['extract_output'] += time.time()-start
# Step through buffers
self.memory_manager.step()
self.time += self.dt
# Instruct Control inteface to process any pending commands
if self.control_inteface: self.control_inteface.process_commands()
if self.print_timing:
cuda.Context.synchronize()
self.timing['total'] += time.time()-start_all
def _read_LPU_input(self):
"""
Extract membrane voltages/spike states from LPU's port map data arrays and
store them in buffers.
"""
for var in self.port_inds_gpot:
# Get correct position in buffer for update
buff = self.memory_manager.get_buffer(var)
dest_mem = garray.GPUArray((1,buff.size),buff.dtype,
gpudata=int(buff.gpudata)+\
buff.current*buff.ld*\
buff.dtype.itemsize)
self.set_inds_both(self.pm['gpot'].data, dest_mem,
self.port_inds_gpot[var],self.var_inds_gpot[var])
for var in self.port_inds_spk:
# Get correct position in buffer for update
buff = self.memory_manager.get_buffer(var)
dest_mem = garray.GPUArray((1,buff.size),buff.dtype,
gpudata=int(buff.gpudata)+\
buff.current*buff.ld*\
buff.dtype.itemsize)
self.set_inds_both(self.pm['spike'].data, dest_mem, \
self.port_inds_spk[var],self.var_inds_spk[var])
def _extract_output(self):
"""
Extract membrane voltages/spike states from LPU's port map data arrays and
store them in buffers.
"""
for var in self.out_port_inds_gpot:
# Get correct position in buffer for update
buff = self.memory_manager.get_buffer(var)
src_mem = garray.GPUArray((1,buff.size),buff.dtype,
gpudata=int(buff.gpudata)+\
buff.current*buff.ld*\
buff.dtype.itemsize)
self.set_inds_both(src_mem, self.pm['gpot'].data, \
self.out_var_inds_gpot[var], self.out_port_inds_gpot[var])
for var in self.out_port_inds_spk:
# Get correct position in buffer for update
buff = self.memory_manager.get_buffer(var)
src_mem = garray.GPUArray((1,buff.size),buff.dtype,
gpudata=int(buff.gpudata)+\
buff.current*buff.ld*\
buff.dtype.itemsize)
self.set_inds_both(src_mem, self.pm['spike'].data, \
self.out_var_inds_spk[var], self.out_port_inds_spk[var])
def set_inds_both(self, src, dest, src_inds, dest_inds):
"""
Set `dest[dest_inds[i]] = src[src_inds[i]] for i in range(len(src_inds))`
"""
try:
func = self.set_inds_both.cache[(src_inds.dtype, src.dtype)]
except KeyError:
inds_ctype = dtype_to_ctype(src_inds.dtype)
data_ctype = dtype_to_ctype(src.dtype)
v = ("{data_ctype} *dest, {inds_ctype} *dest_inds, " +\
"{inds_ctype} *src_inds, {data_ctype} *src").format(\
data_ctype=data_ctype,inds_ctype=inds_ctype)
func = elementwise.ElementwiseKernel(v,\
"dest[dest_inds[i]] = src[src_inds[i]]")
self.set_inds_both.cache[(src_inds.dtype, src.dtype)] = func
func(dest, dest_inds, src_inds, src, range=slice(0, len(src_inds), 1) )
set_inds_both.cache = {}
def _instantiate_component(self, comp_name):
try:
cls = self._comps[comp_name]['cls']
except:
self.log_info("Error instantiating component of model '%s'" \
% comp_name)
return None
params_dict = self.memory_manager.parameters[comp_name]
access_buffers = {var:self.memory_manager.get_buffer(var) \
for var in self._comps[comp_name]['accesses'] \
if var in self.memory_manager.variables}
return cls(params_dict, access_buffers, self.dt,
LPU_id=self.LPU_id, debug=self.debug,
cuda_verbose=bool(self.compile_options))
def _load_components(self, extra_comps=[]):
"""
Load all available NDcomponents
"""
child_classes = NDComponent.NDComponent.__subclasses__()
comp_classes = child_classes[:]
for cls in child_classes:
comp_classes.extend(cls.__subclasses__())
comp_classes.extend(extra_comps)
self._comps = {cls.__name__:{'accesses': cls.accesses ,
'updates':cls.updates,
'cls':cls} \
for cls in comp_classes if not cls.__name__[:4]=='Base'}
class uid_generator(object):
def __init__(self):
self.input_count = 0
self.auto_count = 0
def generate_uid(self, input=False):
if input:
uid = 'input_' + str(self.input_count)
self.input_count += 1
else:
uid = 'auto_' + str(self.auto_count)
self.auto_count += 1
return uid
|
{"hexsha": "ffe97310fe40db1979f26e7e8035250b02684660", "size": 60005, "ext": "py", "lang": "Python", "max_stars_repo_path": "neurokernel/LPU/LPU.py", "max_stars_repo_name": "yiyin/neurodriver", "max_stars_repo_head_hexsha": "34e6874a1cf35633cda1191920cbaeac5d25dc9b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "neurokernel/LPU/LPU.py", "max_issues_repo_name": "yiyin/neurodriver", "max_issues_repo_head_hexsha": "34e6874a1cf35633cda1191920cbaeac5d25dc9b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "neurokernel/LPU/LPU.py", "max_forks_repo_name": "yiyin/neurodriver", "max_forks_repo_head_hexsha": "34e6874a1cf35633cda1191920cbaeac5d25dc9b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.3764124294, "max_line_length": 149, "alphanum_fraction": 0.5244896259, "include": true, "reason": "import numpy,import networkx,import pycuda,from pycuda", "num_tokens": 12632}
|
\subsection{Burn}
\label{sec:721Burn}
We continue with the notation and indices from the prior sections.\\
\\
Suppose Bob is the owner of the token commitment $Z_B$ which represents the ERC-721 asset with tokenId $\alpha$ (as discussed in the prior section).
The asset $\alpha$ can continue to be transferred under zero-knowledge between parties within the Shield contract indefinitely.
Any third-party observers would not be able to infer "who sent what to whom".\\
\\
Recall that whilst the ERC-721 token represented by $\alpha$ has a `private' token commitment representation within the Shield contract, the underlying `public' ERC-721 token is owned by the Shield contract; effectively `locked up' in escrow.\\
\\
Suppose Bob (now the owner of $\alpha$ because he knows the secret key $sk^{Z,(n+m+1)}_{B,0}$) wishes to `release' his public ERC-721 token represented by $\alpha$ from escrow.
Then he will need to effectively `reveal' the contents of his token commitment $Z_B$ in order to convince the Shield contract that he is indeed entitled to withdraw $\alpha$ from escrow.
We call this act of converting from a `private' token commitment back to its `public' counterpart a `\textbf{burn}'.\\
\\
Note that by burning a token commitment, Bob is revealing information which was previously private; namely, the asset $\alpha$. Bob could continue to use an anonymous Ethereum address when calling the `burn' transaction, but analytics of public ERC-721 transactions thereafter will likely eventually reveal that it was Bob who burned $\alpha$. We'll have Bob use his public Ethereum address `burn', for simplicity.\\
\\
\noindent
For Bob to burn $Z_B$ within the Shield contract, under zero knowledge, he follows the steps in Figure~\ref{fig:nfBurnAlgorithm}.
\begin{figure}[htp]
\ContinuedFloat*
\begin{center}
\begin{framed}
\begin{tabular}{p{16cm}}
\textbf{Non-fungible burn algorithm} \\
\\
\midrule
\textbf{Bob's steps:}\\
\begin{enumerate}
\setcounter{enumi}{\value{ongoingEnumCounter}}
\item Compute $N_B := h(\;\sigma_{\vec{AB}}\;|\;sk^Z_B\;)$, the nullifier of Bob's commitment $Z_B$.
\item Get $\psi_{Z_B}$ -- the sister-path of $Z_B$ -- from the Shield contract (see Details below).
\item Get the latest Merkle root from the Shield contract: $\roott_{n+m+k-1}$ (see Details below).
\item Set public inputs $x = (\alpha,\;N_B,\;\roott_{n+m+k-1})$
\item Set private inputs $\omega = (\psi_{Z_A},\;sk_B,\;\sigma_{\vec{AB}})$
\item Select $C_{nft-burn}(\;\omega,\;x\;)$ -- the set of constraints which are satisfied if and only if:
\begin{enumerate}
\item $pk_B$ equals $h(\;sk_B\;)$; (Proof of knowledge of the secret key to $pk_B$) (see Details for why $pk_B$ isn't an input to $C$)
\item $Z_B$ equals $h(\;\alpha\;|\;pk_B\;|\;\sigma_{\vec{AB}}\;)$ (Proof of the constituent values of $Z_B$) (see Details for why $Z_B$ isn't an input to $C$)
\item $\roott_{n+m+k-1}$ equals $h\br*{\psi_{1}\;|...|\;h\br*{\psi_{d-2}\;|\;h\br*{\psi_{d-1}\;|\;Z_B}\;}...}$ (Proof that $Z_B$ belongs to the on-chain Merkle Tree)
\item $N_B$ equals $h(\;\sigma_{\vec{AB}}\;|\;sk^Z_B\;)$ (Proof $N_B$ is indeed the nullifier of $Z_B$)
\end{enumerate}
\item Generate $\pi := P(\;p_C\;,\;x,\;\omega\;)$; a proof of knowledge of satisfying arguments $(\omega, x)\;s.t.\;C(\omega, x) = 1$. Recall: $p_C$ -- the proving key for $C$ -- will be stored on Alice's computer.
The pair $(\pi, x)$ is the zk-SNARK which attests to knowledge of private inputs $\omega$ without revealing them.
\item Send $(\pi, x)$ to the Shield contract for verification.
Using web3: \texttt{nfTokenShield.burn(payTo, proof, inputs, vkId)}
where \texttt{payTo} is an Ethereum address, specified by Bob, into which he wishes for the ERC-721 token with tokenId $=\alpha$ to be transferred.
%remember where the count (enumi) is up to and store it in ongoingEnumCounter:
\setcounter{ongoingEnumCounter}{\value{enumi}}
\end{enumerate}
\ \\
\midrule
\textbf{Shield contract's steps:}\\
\begin{enumerate}
%resume counter
\setcounter{enumi}{\value{ongoingEnumCounter}}
\item Verify the proof as correct: call a Verifier contract to verify the \texttt{(proof, inputs)} pair against the verification key represented by \texttt{vkId}.
\setcounter{ongoingEnumCounter}{\value{enumi}}
\end{enumerate}
\ \\
\hline
...
\end{tabular}
\end{framed}
\end{center}
\caption{Non-Fungible Burn Algorithm}
\label{fig:nfBurnAlgorithm}
\end{figure}
%continue on next page
\begin{figure}[htp]
\ContinuedFloat %to continue
\begin{center}
\begin{framed}
\begin{tabular}{p{16cm}}
\textbf{Verifier contract's steps:}\\
\begin{enumerate}
\setcounter{enumi}{\value{ongoingEnumCounter}}
\item Compute \texttt{result = verify(proof, inputs, vkId)}.
I.e. Verify the \texttt{(proof, inputs)} pair against the verification key.
\item Return \texttt{result}$\in$\texttt{\{false, true\}} to the Shield contract.
\setcounter{ongoingEnumCounter}{\value{enumi}}
\end{enumerate}
\ \\
\midrule
\textbf{Shield contract's steps:}\\
\begin{enumerate}
\setcounter{enumi}{\value{ongoingEnumCounter}}
\item If \texttt{result = false}, revert.
\item Else:
\begin{enumerate}
\item Check $\roott_{n+m+k-1}$ is in $\rootsList$. (Revert if not).
\item Check $N_B$ is not already in its list of `spent' nullifiers. (Revert if not).
\item Transfer the ERC-721 token with tokenId $=\alpha$ from the Shield contract (which has been holding it in escrow) to Bob's \texttt{payTo} Ethereum address.
\item Append the nullifier $N_{B}$ to the ever-increasing array $\bm N$.
\end{enumerate}
\setcounter{ongoingEnumCounter}{\value{enumi}}
\end{enumerate}
\ \\
\midrule
\textbf{Bob's steps:}\\
\begin{enumerate}
\setcounter{enumi}{\value{ongoingEnumCounter}}
\item Check the ERC-721 contract to ensure he owns the token with tokenId $=\alpha$.
\item Store any relevant data in his local database.
\setcounter{ongoingEnumCounter}{0} %reset for next figure
\end{enumerate}
\end{tabular}
\end{framed}
\end{center}
\caption{Non-Fungible Burn Algorithm} %same caption as the first part of this figure
%\label{fig:nfBurnAlgorithm} - no label in this second part of the figure
\end{figure}
\newpage
\subsubsection{Details}
\label{sec:721BurnDetails}
We refer to the numbered steps of Figure~\ref{fig:nfBurnAlgorithm}.\\
\\
\textbf{Step $1$}
\ \\
This is handled within \hyperref[sec:nf-token-controller]{\texttt{nf-token-controller.js}}.\\
\\
\textbf{Steps $2 - 3$}
\ \\
These calls to the Shield contract are handled within \hyperref[sec:nf-token-zkp]{\texttt{nf-token-zkp.js}}.\\
\\
\noindent
It is important at this stage to note that there are an unknown number of other parties utilising the Shield smart contract.
Hence, the dynamic array of tokens $\bm{Z}$ might have grown since Alice appended Bob's $Z_B$ as the $(n+m)^{th}$ leaf of $M$.\\
\\
Suppose there have been $k-1$ additional tokens added to $\bm{Z}$ since Alice added Bob's $Z_B$.
That is,\\
\begin{align*}
\bm{Z}_{n+m+k-1} = (Z_0, Z_1,..., Z_{n-1}, Z_A,
Z_{n+1},..., Z_{n+m-1}, Z_B, Z_{n+m+1},..., Z_{n+m+k-1})
\end{align*}
\ \\
\noindent
We denote the corresponding Merkle Tree which holds tokens $\bm{Z}_{n+m+k-1}$ by $M_{n+m+k-1}$. We denote its root by $\roott_{n+m+k-1}$; an element of $\rootsList$.
\begin{align*}
\scalebox{0.9}{
\begin{forest}
[{$\roott_{n+m+k-1}:= h\br*{
h\br*{
h\br*{
h\br*{
Z_0,Z_1
},
...
},
h\br*{
h\br*{
Z_{n-1},Z_A
},
h\br*{
Z_{n+1},...
}
}
},
h\br*{
h\br*{
h\br*{
Z_{n+m-1}, Z_B
},
h\br*{
Z_{n+m+1}, ...
}
},
h\br*{
h\br*{
Z_{n+m+k-1}, 0
},
0
}
}
}
$}
[{$ h\br*{
h\br*{
h\br*{
Z_0,Z_1
},
...
},
h\br*{
h\br*{
Z_{n-1},Z_A
},
h\br*{
Z_{n+1},...
}
}
}
$}
[{$ h\br*{
h\br*{
Z_0,Z_1
},
...
}
$}
[{$ h\br*{
Z_0,Z_1
}
$}
[{$Z_0$}][{$Z_1$}]
]
[...
[...][...]
]
]
[{$ h\br*{
h\br*{
Z_{n-1},Z_A
},
h\br*{
Z_{n+1},...
}
}
$}
[{$ h\br*{
Z_{n-1},Z_A
}
$}
[{$Z_{n-1}$}][{$Z_A$}]
]
[{$ h\br*{
Z_{n+1},...
}
$}
[$Z_{n+1}$][...]
]
]
]
[{$ h\br*{
h\br*{
h\br*{
Z_{n+m-1}, Z_B
},
h\br*{
Z_{n+m+1}, ...
}
},
h\br*{
h\br*{
Z_{n+m+k-1}, 0
},
0
}
}
$}
[{$ h\br*{
h\br*{
Z_{n+m-1}, Z_B
},
h\br*{
Z_{n+m+1}, ...
}
}
$}
[{$ h\br*{
Z_{n+m-1}, Z_B
}
$}
[{$Z_{n+m-1}$}][{$Z_B$}]
]
[{$ h\br*{
Z_{n+m+1}, ...
}
$}
[{$Z_{n+m+1}$}][...]
]
]
[{$ h\br*{
h\br*{
Z_{n+m+k-1}, 0
},
0
}
$}
[{$ h\br*{
Z_{n+m+k-1}, 0
}
$}
[{$Z_{n+m+k-1}$}][0]
]
[0
[0][0]
]
]
]
]
\end{forest}
}
\end{align*}
\noindent
Bob retrieves the value of the current Merkle root, $\roott_{n+m+k-1}$, from the Shield contract.\\
\\
Since Bob knows that $Z_B$ is at leaf-index $n+m$ of $M_{n+m+k-1}$, Bob can also retrieve the path from the leaf $Z_{n+m}=Z_B$ to the root $\roott_{n+m+k-1}$. Path computations are done in \texttt{zkp/src/format-inputs.js}.\\
\\
We denote this path
\begin{align*}
\phi_{Z_B} = [\phi_{d-1}, \phi_{d-2},..., \phi_{1}, \phi_0]
\end{align*}
Note that $\phi_0 = \roott_{n+m+k-1}$.\\
\\
Bob also retrieve's the `sister-path' of this path:
\begin{align*}
\psi_{Z_B} = [\psi_{d-1}, \psi_{d-2},..., \psi_{1}, \psi_0]
\end{align*}
where $\psi_0 = \phi_0 = \roott_{n+m+k-1}$.\\
\\
For ease of reading, let's focus only on the nodes of $M_{n+m+k-1}$ which Bob cares about for the purposes of burning his token commitment $Z_B$:
\begin{align*}
\begin{forest}
[{$\roott_{n+m+k-1}:=\phi_0=\psi_0$}
[{$\psi_1$}
[...
[...
[...][...]
]
[...
[...][...]
]
]
[...
[...
[...][...]
]
[...
[...][...]
]
]
]
[{$\phi_1$}
[{$\phi_2$}
[{$\phi_3$}
[{$\psi_4$}][{$Z_B$}]
]
[{$\psi_3$}
[...][...]
]
]
[{$\psi_2$}
[...
[...][0]
]
[0
[0][0]
]
]
]
]
\end{forest}
\end{align*}
\noindent
Equipped with $\psi_{Z_B}$, Bob can prove that he owns a token commitment at one of the leaves of $M_{n+m+k-1}$, without revealing that it is "$Z_{n+m}$ located at leaf-index $n+m$".\\
\\
\textbf{Steps $4-5$}
\ \\
These steps are handled within \hyperref[sec:nf-token-controller]{\texttt{nf-token-controller.js}}.\\
\\
As a reminder, we let:
\begin{center}
\begin{tabular}{l l}
$x = (\alpha,\
N_{B},\
\roott_{n+m+k-1})$ & Public Inputs used to generate the Proof\\
$\omega = (\psi_{Z_B},\
sk_B,\
\sigma_{\vec{AB}})$ & Private Inputs used to generate the Proof\\
\end{tabular}
\end{center}
\ \\
\textbf{Steps $6 - 7$}
\ \\
These steps are handled within a \hyperref[sec:zokrates]{ZoKrates} container.\\
\\
Bob uses the $C_{nft-burn}$ (or $C$) -- the set of constraints for a non-fungible burn, located in \texttt{zkp/code/gm17/nft-burn} (see \hyperref[sec:trustedSetup]{Trusted Setup}). $C_{nft-burn}(\;\omega,\;x\;)$ returns a value of $true$ if Bob provides a set of valid `satisfying' arguments $(\omega, x)$ to $C$.\\
\\
Let's elaborate on each of the checks and calculations constraining the inputs to $C$ (we highlight public inputs in \textbf{bold} below):
\begin{enumerate}
\item Calculate $h(sk_B) =: pk_B'$.\\
Note that this newly calculated $pk_B'$ should equal $pk_B$ (Bob's public key), but we don't need to pass $pk_B$ as a private input and explicitly check that $pk_B'=pk_B$; a check on the correctness of $sk_B$ (and hence $pk_B'$) is implicitly achieved in the next two steps:
\item Calculate $h(\bm{\alpha}\;|\;pk_B'\;|\;\sigma_{\vec{AB}}) =: Z_B'$.\\
Note again that this newly calculated $Z_B'$ should equal $Z_B$ (Bob's token commitment), but we don't need to pass $Z_B$ as a private input and explicitly check that $Z_B'=Z_B$; a check on the correctness of $Z_B$ (and hence $Z_B'$) is implicitly achieved in the next step:
\item Check inputs $\psi_{Z_B}=[\psi_{d-1}, \psi_{d-2},..., \psi_{1}, \bm{\psi_{0}=\roott_{n+m+k-1}}]$ and the newly calculated $Z_B'$ satisfy:\\
$h\br*{\psi_{1}\;|...|\;h\br*{\psi_{d-2}\;|\;h\br*{\psi_{d-1}\;|\;Z_B'}\;}...} = \roott_{n+m+k-1} ( =: \bm{\psi_{0}})$\\
Given the one-way nature of our hashing function $h$, the only feasible way we could have arrived at the correct value of $\roott_{n+m+k-1}$ is if the sister-path $\psi_{Z_B}$ is correct, and if $Z_B'$ is correct, which (working backwards) must mean that $sk_B$ is correct.
How does the circuit know the value of $\roott_{n+m+k-1}$ is correct? It doesn't; but it is a `public input', and we can rely upon the Shield smart contract to check the correctness of all public inputs.\\
\\
We've therefore shown in the steps so far, that:
\begin{itemize}
\item[--] Bob is the owner of a token commitment (because he knows its secret key)
\item[--] Said token commitment is indeed a leaf of the on-chain Merkle Tree $M_{n+m+k-1}$.
\item[--] The token commitment does indeed represent the ERC-721 token with tokenId $=\bm{\alpha}$ (remember that $\alpha$ is a public input to a `burn' zk-SNARK).
\end{itemize}
Bob commits to burning his token $Z_B$ in the next step:
\item Check inputs $\sigma_{\vec{AB}}, sk_B, \bm{N_B}$ satisfy:
$h(\sigma_{\vec{AB}}\;|\;sk_B) = \bm{N_{B}}$\\
$N_B$ is referred to as a `nullifier' because it is understood by all participants to be an indisputable commitment to spend (`nullify') a token commitment. Remember that the token commitment being spent isn't revealed; the earlier steps have allowed Bob to demonstrate hidden knowledge of the secret key $sk_B$ of a token commitment which does indeed exist. By including $sk_B$ in the nullifier's preimage, Bob is binding himself as the executor of this `burn'. By including $\sigma_{\vec{AB}}$, Bob is specifying a serial number which is unique to the token $Z_B$ (thereby distinguishing this nullifier from those which would nullify any other token commitments he may own).
\end{enumerate}
Notice how each stage is linked to the last, and that at each of the `Check' stages, private inputs are being reconciled against at least one public input (highlighted in \textbf{bold} to help you notice). By structuring the circuit $C$ in this way, we are able to share only the public inputs with the Shield contract (along with a `proof' $\pi_{C,x,\omega}$). We'll see shortly that the Shield contract checks the correctness of each of the public inputs against its current states.\\
\\
\noindent
If all of the above constraints are satisfied by the public and private inputs, ZoKrates will generate the proof $\pi_{C,x,\omega}$; a proof of knowledge of satisfying arguments $(\omega, x) \ s.t. \ C(\omega, x) = 1$.\\
\\
\textbf{Step $8$}
\ \\
This transaction is handled within \hyperref[sec:nf-token-zkp]{\texttt{nf-token-zkp.js}}.\\
\\
Having generated $\pi_{C,x,\omega}$, Bob then sends the following to the Shield contract from his Ethereum address $E_B$:
\begin{align*}
&E_B\\
&\pi_{C,x,\omega}\\
&x = (\alpha, N_{B}, \roott_{n+m+k-1})
\end{align*}
\\
Recall that everyone knows the checks and calculations which have been performed in the circuit $C_{nft-burn}$, because it is a public file in the Nightfall repository. Further, everyone knows the verification key $vk_C$ which uniquely represents this circuit, because it has been publicly stored in the Verifier Registry contract. Therefore, when Bob shares the pair $(x, \pi_{C,x,\omega})$, and the `unique id' of the relevant verification key $vk_C$; everyone will interpret this information as the Bob's intention to burn; and everyone will be convinced that he knows the secret key which permits him to transfer ownership of a token commitment; and everyone will be convinced that that token commitment represents the ERC-721 token with tokenId $=\alpha$.\\
\\
\textbf{Steps $9 - 11$}
\ \\
The Verifier Registry contract already has stored within it the verification key $vk_C$.
It runs a verification function $V(vk_C, \pi_{C,x,\omega}, x)$.
\begin{align*}
V: (vk_C, \pi_{C,x,\omega}, x) \to \{0,1\}
\end{align*}
where:
\[
V=
\begin{cases}
1,& \text{if } \pi_{C,x,\omega} \text{ and } x \text{ satisfy } vk_C\\
0,& \text{otherwise}
\end{cases}
\]
\ \\
\textbf{Steps $12 - 13$}
\ \\
If the Verifier contract returns $1$ ($true$) (verified) to the Shield contract, then the Shield contract will be satisfied that Bob's proof and public inputs represent his commitment to burning a token commitment, and to withdrawing its underlying ERC-721 token $=\alpha$. If the Verifier contract returns $0$, then the transaction will revert.\\
\\
Let's suppose Bob's $(x, \pi_{C,x,\omega})$ pair is verified.\\
\\
Following verification of the proof, the Shield contract will do the following:
\begin{enumerate}
\item Check $\roott_{n+m+k-1}$ is in $\rootsList$.\\
(If not, the burn will fail)
\item Check $N_B$ is not already in the list of nullifiers, which we denote $\bm{N}$.\\
(If $N_B$ is already in $\bm{N}$, the burn will fail)
\item Transfer the ERC-721 token with tokenId $=\alpha$ from the Shield contract (i.e. from escrow) to Bob's Ethereum address.
\item Append the nullifier $N_{B}$ to the ever-increasing array $\bm N$.
\end{enumerate}
\textbf{Steps $14 - 15$}
\ \\
Bob is now the owner of the public ERC-721 token. The Nightfall UI queries the linked ERC-721 contract for tokens Bob owns.
If Bob ever wished to convert this token back into a token commitment, he would need to do a non-fungible `mint' (discussed earlier).
|
{"hexsha": "db6d2ec8d49bc9b65b8c629317536b2194675556", "size": 21278, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/whitepaper/protocols/ERC721/burn721.tex", "max_stars_repo_name": "roggerJose/nightfall", "max_stars_repo_head_hexsha": "59d7d83bcaf920bbf7c7427d14b5fcdb7e53843c", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 893, "max_stars_repo_stars_event_min_datetime": "2019-04-16T18:49:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T20:02:38.000Z", "max_issues_repo_path": "doc/whitepaper/protocols/ERC721/burn721.tex", "max_issues_repo_name": "roggerJose/nightfall", "max_issues_repo_head_hexsha": "59d7d83bcaf920bbf7c7427d14b5fcdb7e53843c", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 292, "max_issues_repo_issues_event_min_datetime": "2019-05-06T12:08:58.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-26T11:16:49.000Z", "max_forks_repo_path": "doc/whitepaper/protocols/ERC721/burn721.tex", "max_forks_repo_name": "roggerJose/nightfall", "max_forks_repo_head_hexsha": "59d7d83bcaf920bbf7c7427d14b5fcdb7e53843c", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 161, "max_forks_repo_forks_event_min_datetime": "2019-05-28T15:33:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T10:29:50.000Z", "avg_line_length": 42.8128772636, "max_line_length": 763, "alphanum_fraction": 0.5391484162, "num_tokens": 5841}
|
#ifndef SIMPLEBOT_DRIVER_HPP
#define SIMPLEBOT_DRIVER_HPP
#include <JetsonGPIO.h>
#include <string>
#include <cmath>
#include <boost/asio.hpp>
#include "nlohmann/json.hpp"
#include <ros/ros.h>
typedef struct {
int pinPWM;
int pinDirA;
int pinDirB;
} pinInfo;
typedef struct{
int left;
int right;
} encoderData;
class simplebotDriver
{
public:
simplebotDriver(pinInfo left,pinInfo right, std::string serialPort,int baudRate);
~simplebotDriver();
void outputToMotor(int outputDutyLeft,int outputDutyRight);
encoderData readEncoderFromMotor();//todo リクエスト送ったらエンコーダ更新実装(マイコン側要変更)
private:
std::shared_ptr<GPIO::PWM> rightPWM_;
std::shared_ptr<GPIO::PWM> leftPWM_;
boost::asio::serial_port *serial_;
pinInfo rightPin_;
pinInfo leftPin_;
std::string serialPort_;
int baudRate_;
void outputToMotorDir_(int Duty,pinInfo pin);
};
#endif //SIMPLEBOT_DRIVER_HPP
|
{"hexsha": "a2ae4cd0154c2a1fa398b19d292dc255b584625c", "size": 902, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "simplebot_hardware/include/simplebot_hardware/simplebot_driver.hpp", "max_stars_repo_name": "yukimakura/simplebot", "max_stars_repo_head_hexsha": "21625af59bbfd70b63e6881a02e41dc0ed67c3fa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "simplebot_hardware/include/simplebot_hardware/simplebot_driver.hpp", "max_issues_repo_name": "yukimakura/simplebot", "max_issues_repo_head_hexsha": "21625af59bbfd70b63e6881a02e41dc0ed67c3fa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "simplebot_hardware/include/simplebot_hardware/simplebot_driver.hpp", "max_forks_repo_name": "yukimakura/simplebot", "max_forks_repo_head_hexsha": "21625af59bbfd70b63e6881a02e41dc0ed67c3fa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.4081632653, "max_line_length": 83, "alphanum_fraction": 0.7505543237, "num_tokens": 263}
|
from typing_extensions import SupportsIndex
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from .forms import InputForm
import pandas as pd
import numpy as np
import pickle
from pymongo import MongoClient
client = MongoClient('localhost', 27017)
db = client['PatientDB']
loaded_model = pickle.load(open("C:/Users/Kyle/Untitled Folder/finalized_model.pkl", 'rb'))
def index(request):
if request.method == "POST":
myform = InputForm(request.POST)
if myform.is_valid():
age = myform.cleaned_data['age_v']
sex = myform.cleaned_data['sex_v']
cp = myform.cleaned_data['cp_v']
thalach = myform.cleaned_data['thalach_v']
exang = myform.cleaned_data['exang_v']
oldpeak = myform.cleaned_data['oldpeak_v']
slope = myform.cleaned_data['slope_v']
ca = myform.cleaned_data['ca_v']
m_inputs = [[age, sex, cp, thalach, exang, oldpeak, slope, ca]]
y_pred = [np.exp(point)/np.sum(np.exp(point), axis=0)
for point in m_inputs]
return render(request, 'index.html', {'prediction': round(y_pred.mean())})
else:
myform = InputForm()
return render(request, 'index.html', {'form': myform})
def updateDataBase(request):
temp={}
temp['age']= myform.cleaned_data['age_v']
temp['sex']= myform.cleaned_data['sex_v']
temp['cp']= myform.cleaned_data['cp_v']
temp['thalach']= myform.cleaned_data['thalach_v']
temp['exang']= myform.cleaned_data['exang_v']
temp['oldpeak']= myform.cleaned_data['oldpeak_v']
temp['slope']= myform.cleaned_data['slope_v']
temp['ca']= myform.cleaned_data['ca_v']
collectionD.insert_one(temp)
countOfrow = collectionD.find().count()
context = {"Row Count": countOfrow}
return render(request,'viewDB.html',context)
|
{"hexsha": "f71650e79d12d7042562e07291e570fa83922710", "size": 2081, "ext": "py", "lang": "Python", "max_stars_repo_path": "heart_app/views.py", "max_stars_repo_name": "kylepgr/heart-disease-pred", "max_stars_repo_head_hexsha": "d128cc815dde4839ba18e887113bb47387499ce1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "heart_app/views.py", "max_issues_repo_name": "kylepgr/heart-disease-pred", "max_issues_repo_head_hexsha": "d128cc815dde4839ba18e887113bb47387499ce1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "heart_app/views.py", "max_forks_repo_name": "kylepgr/heart-disease-pred", "max_forks_repo_head_hexsha": "d128cc815dde4839ba18e887113bb47387499ce1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.9027777778, "max_line_length": 92, "alphanum_fraction": 0.6001922153, "include": true, "reason": "import numpy", "num_tokens": 479}
|
#=
Similar to Batch Normalization, except online and without the rescaling/skew
y = (a .- μ) ./ σ
TODO: This is currently broken because OnlineStats.Variances no longer
exists.
=#
type InputNorm{T,W<:Weight} <: Transformation
n::Int # maps n --> n
vars::Variances{W}
input::SumNode{T,1} # a
output::OutputNode{T,1} # y
end
function InputNorm{T}(::Type{T}, n::Int,
lookback::Int = 100,
α::Float64 = NaN,
wgt::Weight = BoundedEqualWeight(isnan(α) ? lookback : α)
)
InputNorm(n,
Variances(n, wgt),
InputNode(T, n),
OutputNode(T, n)
)
end
InputNorm(n::Int, args...; kw...) = InputNorm(Float64, n, args...; kw...)
function Base.show(io::IO, t::InputNorm)
print(io, "InputNorm{n=$(t.n)}")
end
function transform!{T}(layer::InputNorm{T})
a = layer.input.val
y = layer.output.val
OnlineStats.fit!(layer.vars, a)
μ = mean(layer.vars)
σ = std(layer.vars)
for i=1:layer.n
y[i] = (a[i] - μ[i]) / max(σ[i], T(1e-10))
end
y
end
function grad!{T}(layer::InputNorm{T})
∇a = layer.input.∇
∇y = layer.output.∇
σ = std(layer.vars)
for i=1:layer.n
∇a[i] = ∇y[i] / max(σ[i], T(1e-10))
end
∇a
end
|
{"hexsha": "f1fba208cc66623f99cac30d8ade1ef4de191ae0", "size": 1306, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/inputnorm.jl", "max_stars_repo_name": "JuliaTagBot/Transformations.jl", "max_stars_repo_head_hexsha": "96041d4b3ef913c3f2f2e7cd65f9090cf40bb851", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 31, "max_stars_repo_stars_event_min_datetime": "2016-06-30T19:57:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-12T01:27:49.000Z", "max_issues_repo_path": "src/inputnorm.jl", "max_issues_repo_name": "JuliaTagBot/Transformations.jl", "max_issues_repo_head_hexsha": "96041d4b3ef913c3f2f2e7cd65f9090cf40bb851", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2016-07-01T05:07:19.000Z", "max_issues_repo_issues_event_max_datetime": "2017-05-05T20:26:51.000Z", "max_forks_repo_path": "src/inputnorm.jl", "max_forks_repo_name": "JuliaTagBot/Transformations.jl", "max_forks_repo_head_hexsha": "96041d4b3ef913c3f2f2e7cd65f9090cf40bb851", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2016-11-17T04:47:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-10T23:29:46.000Z", "avg_line_length": 23.3214285714, "max_line_length": 78, "alphanum_fraction": 0.5444104135, "num_tokens": 426}
|
!+ gaseous absorption after Rosenkranz 98 model
subroutine rosen98_gasabs &
(errorstatus,& ! out
freq, & ! in
tempK, & ! in
rhoWv, & ! in
pres, & ! in
absAir,& ! out
absWv ) ! out
! Description:
! Based on frequency, temperature, water vapor density, and pressure, this routine
! calculates the absorption due to air (N2 and O2) and water vapor in the frequency
! range from 0.1 to 800.0 GHz, pressure range from 10 to 1.2e5 Pa, and absolute
! temperatures larger than 100 K.
!
! Method:
! ROSENKRANZ (1998) model -- reference "Water vapor microwave
! continuum absorption: a comparison of measurements and results"
! To appear in Radio Science
!
! Owner: IGMK
!
! History:
!
! Version Date Comment
! ------- ---- -------
! 0.01 02/08/2001 Fixed division by zero when rhowv = 0 - G. Petty
! 0.1 21/09/2009 Code adaption from G. Petty - M. Mech
! 0.2 27/10/2012 Corrections to WV bands and continuum - E. Orlandi
! 0.3 13/11/2012 Application of European Standards for Writing and
! Documenting Exchangeable Fortran 90 Code - M. Mech
! 0.4 27/02/2013 functions in module gasabs_module
!
! Code Description:
! Language: Fortran 90.
! Software Standards: "European Standards for Writing and
! Documenting Exchangeable Fortran 90 Code".
!
! Parent Module: get_gasabs
!
! Declarations:
! Modules used:
use kinds, only: dbl, & ! integer parameter specifying double precision
long ! integer parameter specifying long integer
use gasabs_module ! functions for calculating absorption by gases
! use settings, only: verbose
!! use report_module
! Imported Scalar Variables with intent (in):
implicit none
!- End of header ---------------------------------------------------------------
! Subroutine arguments
! Scalar arguments with intent(in):
real(kind=dbl), intent(in) :: freq ! frequency [GHz]
real(kind=dbl), intent(in) :: tempK ! temperature [K]
real(kind=dbl), intent(in) :: pres ! pressure [Pa]
real(kind=dbl), intent(in) :: rhoWv ! water vapor density [kg/m**3]
! Scalar arguments with intent(out):
real(kind=dbl), intent(out) :: absAir ! extinction by dry air [Np/km]
real(kind=dbl), intent(out) :: absWv ! extinction by water vapor [Np/km]
! End of Subroutine arguments
! Local scalars:
real(kind=dbl) :: pmb ! pressure [mb]
real(kind=dbl) :: vapden ! water vpor density [g/m**3]
real(kind=dbl) :: e ! water vapor pressure [Pa]
real(kind=dbl) :: q ! specific humidity
real(kind=dbl) :: Tv ! virtuel temperature [K]
real(kind=dbl) :: rhoair ! moist air density [kg/m**3]
! Used Functions
! real(kind=dbl) :: absn2 ! function to calculate extinction by n_2
! real(kind=dbl) :: o2abs ! function to calculate extinction by o2
! real(kind=dbl) :: abh2o ! function to calculate extinction by h_2o
! Error handling
integer(kind=long), intent(out) :: errorstatus
integer(kind=long) :: err = 0
character(len=80) :: msg
character(len=14) :: nameOfRoutine = 'rosen98_gasabs'
! if (verbose >= 2) call report(info,'Start of ', nameOfRoutine)
! ! check for "reasonable" input values
! ! print*, freq
! if ((freq <= 0.0_dbl) .or. (freq > 800.0_dbl)) then
! errorstatus = fatal
! msg = 'Frequency not between 0 and 800 GHz in rosen98_gasabs!'
! call report(errorstatus, msg, nameOfRoutine)
! return
! elseif (tempK <= 100.0_dbl) then
! errorstatus = fatal
! msg = 'Temperature lower than 100 K in rosen98_gasabs!'
! call report(errorstatus, msg, nameOfRoutine)
! return
! elseif ((pres < 10.0_dbl) .or. (pres > 1.2d5)) then
! print*, pres
! errorstatus = fatal
! msg = 'Pressure not between 10 and 1.2d5 Pa in rosen98_gasabs!'
! call report(errorstatus, msg, nameOfRoutine)
! return
! else
! err = success
! end if
print*,"checking input consistency"
print*,"freq ","temp ","abs_hum ","press"
print*,freq, tempK, rhoWv, pres
! convert pressure from Pa to Mb
pmb = pres / 100.0_dbl
! convert vapor density from kg/m**3 to g/m**3
vapden = rhoWv * 1000.0_dbl
! get volume extinction coefficients
absair = absn2(tempK,pmb,freq) + o2abs(tempK,pmb,vapden,freq)
abswv = abh2o(tempK,pmb,vapden,freq)
! convert vapor density to vapor pressure
e = rhoWv * (tempK * 461.5_dbl)
! calculate specific humidity
q = 0.622_dbl * e / pres
! calculate virtual temperature
Tv = (1._dbl + 0.61_dbl * q) * tempK
! moist air density
rhoair = pres / (Tv * 287.06_dbl)
!! errorstatus = err
errorstatus = 1
!! if (verbose >= 2) call report(info,'End of ', nameOfRoutine)
return
end subroutine rosen98_gasabs
|
{"hexsha": "373a6051c49f566f28fea02c60afac0984cf2b59", "size": 4830, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "rosen98_gasabs.f90", "max_stars_repo_name": "DaveOri/pyGasAbs", "max_stars_repo_head_hexsha": "966eae26d0d356f33bb8964c912d63e2fffe2345", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rosen98_gasabs.f90", "max_issues_repo_name": "DaveOri/pyGasAbs", "max_issues_repo_head_hexsha": "966eae26d0d356f33bb8964c912d63e2fffe2345", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rosen98_gasabs.f90", "max_forks_repo_name": "DaveOri/pyGasAbs", "max_forks_repo_head_hexsha": "966eae26d0d356f33bb8964c912d63e2fffe2345", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.568627451, "max_line_length": 84, "alphanum_fraction": 0.6374741201, "num_tokens": 1451}
|
# test on generic type
using Pseudospectra, Test, GenericSVD
@testset "Generic(Big)" begin
@info("This test is expected to comment about lack of methods for BigFloat eigvals")
A = Matrix{BigFloat}(Pseudospectra.grcar(8))
# ax is needed until ∃ eigvals(BigFloat)
opts = Dict{Symbol,Any}(:ax => [-1,3,-3,3], :npts => 20)
ps_data = (@test_logs (:warn, r"^Failed to compute eigenvalues") new_matrix(A,opts))
driver!(ps_data,opts,gs)
@test iscomputed(ps_data)
# Just big enough to get to the inverse-Lanczos branch
# this is a stunt, so don't waste time with usual npts.
A = Matrix{BigFloat}(Pseudospectra.grcar(56))
opts = Dict{Symbol,Any}(:ax => [-1,3,-3,3], :npts => 10)
@test_logs (:warn, r"^Failed to compute eigenvalues") ps_data = new_matrix(A,opts)
driver!(ps_data,opts,gs)
@test iscomputed(ps_data)
end
|
{"hexsha": "4a26464f11fba634c95938324db727ba288bb2f0", "size": 867, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/big.jl", "max_stars_repo_name": "ranocha/Pseudospectra.jl", "max_stars_repo_head_hexsha": "63b3c62dc34707289bb73959bbeaee0529ecee36", "max_stars_repo_licenses": ["BSD-3-Clause", "MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/big.jl", "max_issues_repo_name": "ranocha/Pseudospectra.jl", "max_issues_repo_head_hexsha": "63b3c62dc34707289bb73959bbeaee0529ecee36", "max_issues_repo_licenses": ["BSD-3-Clause", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/big.jl", "max_forks_repo_name": "ranocha/Pseudospectra.jl", "max_forks_repo_head_hexsha": "63b3c62dc34707289bb73959bbeaee0529ecee36", "max_forks_repo_licenses": ["BSD-3-Clause", "MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.6956521739, "max_line_length": 88, "alphanum_fraction": 0.6747404844, "num_tokens": 272}
|
! { dg-do compile { target { ! *-*-* } } }
!
program bug
use H5GLOBAL
implicit none
integer :: i
i=H5P_DEFAULT_F
end program bug
|
{"hexsha": "5eee0986b809806b127477ae63996d8926cb70c5", "size": 137, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "validation_tests/llvm/f18/gfortran.dg/pr77420_4.f90", "max_stars_repo_name": "brugger1/testsuite", "max_stars_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2019-02-12T18:20:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-09T19:46:19.000Z", "max_issues_repo_path": "validation_tests/llvm/f18/gfortran.dg/pr77420_4.f90", "max_issues_repo_name": "brugger1/testsuite", "max_issues_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2020-08-31T22:05:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T18:30:03.000Z", "max_forks_repo_path": "validation_tests/llvm/f18/gfortran.dg/pr77420_4.f90", "max_forks_repo_name": "brugger1/testsuite", "max_forks_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2020-08-31T21:59:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T22:06:46.000Z", "avg_line_length": 15.2222222222, "max_line_length": 42, "alphanum_fraction": 0.6204379562, "num_tokens": 47}
|
using Test
using PyCall
using BenchmarkTools
using Fermi
using Fermi.Wavefunction
using Fermi.CoupledCluster: RCCSD,RCCD,DFRCCD
psi4.core.be_quiet() #turn off output
psi4.set_num_threads(6)
using LinearAlgebra
BLAS.set_num_threads(6)
# > setup
tol = 1E-14
psi4.set_options(Dict("D_CONVERGENCE" => 14,
"E_CONVERGENCE" => 12,
"scf_type" => "pk"))
mol2 = psi4.geometry("""
O
H 1 1.1
H 1 1.1 2 104.0
symmetry c1
""")
e2,wfn2 = psi4.energy("hf/cc-pvtz",mol=mol2,return_wfn=true)
psi4.set_options(Dict("D_CONVERGENCE" => 14,
"E_CONVERGENCE" => 12,
"scf_type" => "df"))
e3,wfn3 = psi4.energy("hf/cc-pvtz",mol=mol2,return_wfn=true)
JuWfn3 = Wfn(wfn3)
JuWfn2 = Wfn(wfn2)
printdo=false
#println(@btime RCCD.do_rccd(JuWfn2; doprint=printdo))
println(@btime RCCSD.do_rccsd(JuWfn2; doprint=printdo))
#println(@btime DFRCCD.do_df_rccd(JuWfn3; doprint=printdo))
#println(psi4.energy("ccsd/sto-3g",mol=mol2) - e2)
|
{"hexsha": "7bfa0bc84a856aa2a79cd95158ff535b61c3aabe", "size": 1052, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "benchmark/BenchCoupledCluster.jl", "max_stars_repo_name": "jturney/Fermi.jl", "max_stars_repo_head_hexsha": "46a22ee67b0de7496c6767cb73c71d21dce397cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "benchmark/BenchCoupledCluster.jl", "max_issues_repo_name": "jturney/Fermi.jl", "max_issues_repo_head_hexsha": "46a22ee67b0de7496c6767cb73c71d21dce397cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "benchmark/BenchCoupledCluster.jl", "max_forks_repo_name": "jturney/Fermi.jl", "max_forks_repo_head_hexsha": "46a22ee67b0de7496c6767cb73c71d21dce397cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0571428571, "max_line_length": 60, "alphanum_fraction": 0.6330798479, "num_tokens": 378}
|
# importing modules and packages
# system tools
import os
import sys
import argparse
sys.path.append(os.path.join("..", ".."))
from contextlib import redirect_stdout
# pandas, numpy, gensim
import pandas as pd
import numpy as np
import gensim.downloader
# import my classifier utility functions - see the Github repo!
import utils.classifier_utils as clf
# Machine learning stuff
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import ShuffleSplit
from sklearn import metrics
# matplotlib
import matplotlib.pyplot as plt
class lr_classifier():
def __init__(self, args):
self.args = args
self.data = pd.read_csv(self.args["filename"])
def preprocessing(self):
'''
The preprocessing function performs various transformations to the data
1. Data is balanced to have an equal amount of label classes
2. Data is split into x and y
3. Data is further split into train and test values
4. X features are vectorized
'''
print("[INFO] Preprocessing Game of Thrones data...")
# I'm interested in seeing how many sentences there are from each season
n_sentences = []
for val in set(self.data['Season']):
length = len(self.data['Sentence'].loc[self.data['Season'] == val])
n_sentences.append(length) # I can see there is a different amount of sentences in each season, which might affect the classification. So I am saving all lengths and choose the minimum as n in the balance function to have a distribution that isn't skewed
# Balancing data to not bias classifier
balanced_data = clf.balance(self.data, label = "Season", n=min(n_sentences))
# Splitting up to x features and y from the balanced data
x = balanced_data['Sentence'].values
y = np.array(balanced_data['Season'].str.extract('(\d+)')).ravel()# Extracting only numbers to have cleaner output - ravel to make it a row-vector instead of column vector
self.y = [int(numeric_string) for numeric_string in y] # Integer
# Splitting into train and test sets
# I am only attributing "self" to y because these are finished being preprocessed. self.X features are defined in vectorization
X_train, X_test, self.y_train, self.y_test = train_test_split(x, # Creating two lists - sentences is an array
self.y, # Labels
test_size=0.25,
random_state=42,
stratify=self.y) # This should keep an equal amount of labels in each set - keeps the original distribution, which is equal.
# .fit_transform(X) = learn feature names + .transform(X)
# Vectorization
print("[INFO] Vectorizing text...")
vectorizer = CountVectorizer()
# Fitting the vectorizer to our data
# Transform to traning featues
self.X_train_feats = vectorizer.fit_transform(X_train)
#... then we do it for our test data
self.X_test_feats = vectorizer.transform(X_test)
# Create a list of the feature names.
feature_names = vectorizer.get_feature_names()
# Vectorize full dataset
self.X_vect = vectorizer.fit_transform(x)
def model(self):
'''
Function that fit a Logistic Regression to countvectorized X and y features and generates predictions
'''
print("[INFO] Defining logistic regression model...")
# Basic logistic regression
classifier = LogisticRegression(random_state=42).fit(self.X_train_feats, self.y_train)
self.y_pred = classifier.predict(self.X_test_feats)
def evaluation(self):
'''
Evaluation function that saves classification report and learning curve in defined paths. The learning curve is made from a 10-fold cross-validation of the the entired dataset
'''
print("[INFO] Evaluating logistic regression model...")
# Evaluation
classifier_metrics = pd.DataFrame(metrics.classification_report(self.y_test,
self.y_pred,
output_dict = True))
print(classifier_metrics)
classifier_metrics.to_csv(os.path.join(self.args['outpath'], "lr_classification_report.csv"))
def main():
# Argparse
ap = argparse.ArgumentParser(description="[INFO] LR classifier arguments")
ap.add_argument("-f",
"--filename",
required=False,
type=str,
default= os.path.join("..","..", "data", "4", "Game_of_Thrones_Script.csv"),
help="str, file name and location")
ap.add_argument("-o",
"--outpath",
required=False,
type=str,
default= os.path.join("..","..", "out","4"),
help="str, output location")
args = vars(ap.parse_args())
# Define class
lr_classifier_got = lr_classifier(args)
lr_classifier_got.preprocessing()
lr_classifier_got.model()
lr_classifier_got.evaluation()
if __name__=="__main__":
main()
|
{"hexsha": "ade713c52195e4d636a5f8f37736575316cae117", "size": 5648, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/4/lr_got.py", "max_stars_repo_name": "marmor97/cds-language-exam", "max_stars_repo_head_hexsha": "d6f1aa543ba3f78d1a9f34c67a687e4f0944a665", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/4/lr_got.py", "max_issues_repo_name": "marmor97/cds-language-exam", "max_issues_repo_head_hexsha": "d6f1aa543ba3f78d1a9f34c67a687e4f0944a665", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/4/lr_got.py", "max_forks_repo_name": "marmor97/cds-language-exam", "max_forks_repo_head_hexsha": "d6f1aa543ba3f78d1a9f34c67a687e4f0944a665", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.7746478873, "max_line_length": 266, "alphanum_fraction": 0.611898017, "include": true, "reason": "import numpy", "num_tokens": 1119}
|
Require Import List.
Require Import ZArith.
Require Import String.
Open Scope string_scope.
Ltac inv H := inversion H; subst.
Ltac break_match :=
match goal with
| _ : context [ if ?cond then _ else _ ] |- _ =>
destruct cond as [] eqn:?
| |- context [ if ?cond then _ else _ ] =>
destruct cond as [] eqn:?
| _ : context [ match ?cond with _ => _ end ] |- _ =>
destruct cond as [] eqn:?
| |- context [ match ?cond with _ => _ end ] =>
destruct cond as [] eqn:?
end.
(** syntax *)
Inductive expr : Set :=
| Bool : bool -> expr
| Int : Z -> expr
| Var : string -> expr
| App : expr -> expr -> expr
| Lam : string -> expr -> expr.
Coercion Bool : bool >-> expr.
Coercion Int : Z >-> expr.
Coercion Var : string >-> expr.
Notation "X @ Y" := (App X Y) (at level 49).
Notation "\ X , Y" := (Lam X Y) (at level 50).
(** substitution *)
(** e1[e2/x] = e3 *)
Inductive Subst : expr -> expr -> string ->
expr -> Prop :=
| SubstBool:
forall b e x,
Subst (Bool b) e x
(Bool b)
| SubstInt:
forall i e x,
Subst (Int i) e x
(Int i)
| SubstVar_same:
forall e x,
Subst (Var x) e x
e
| SubstVar_diff:
forall e x1 x2,
x1 <> x2 ->
Subst (Var x1) e x2
(Var x1)
| SubstApp:
forall e1 e2 e x e1' e2',
Subst e1 e x e1' ->
Subst e2 e x e2' ->
Subst (App e1 e2) e x
(App e1' e2')
| SubstLam_same:
forall e1 x e,
Subst (Lam x e1) e x
(Lam x e1)
| SubstLam_diff:
forall e1 x1 x2 e e1',
x1 <> x2 ->
Subst e1 e x2 e1' ->
Subst (Lam x1 e1) e x2
(Lam x1 e1').
(** careful to make IH sufficiently strong *)
Lemma subst_det:
forall e1 e2 x e3,
Subst e1 e2 x e3 ->
forall e3',
Subst e1 e2 x e3' ->
e3 = e3'.
Proof.
induction 1; intros; auto.
- inv H; auto.
- inv H; auto.
- inv H; auto. congruence.
- inv H0; auto. congruence.
- inv H1.
erewrite IHSubst1; eauto.
erewrite IHSubst2; eauto.
- inv H; auto. congruence.
- inv H1; auto. congruence.
erewrite IHSubst; eauto.
Qed.
Lemma can_subst:
forall e1 e2 x,
exists e3, Subst e1 e2 x e3.
Proof.
induction e1; intros.
- econstructor; constructor.
- econstructor; constructor.
- case (string_dec x s); intros.
+ subst. econstructor; constructor.
+ econstructor; constructor; auto.
- edestruct IHe1_1; edestruct IHe1_2.
econstructor; econstructor; eauto.
- edestruct IHe1.
case (string_dec x s); intros.
+ subst. econstructor; constructor.
+ econstructor; constructor; eauto.
Qed.
(** define free variables *)
Inductive free : expr -> string -> Prop :=
| FreeVar:
forall x,
free (Var x) x
| FreeApp_l:
forall x e1 e2,
free e1 x ->
free (App e1 e2) x
| FreeApp_r:
forall x e1 e2,
free e2 x ->
free (App e1 e2) x
| FreeLam:
forall x1 x2 e,
free e x1 ->
x1 <> x2 ->
free (Lam x2 e) x1.
Lemma subst_only_free:
forall e1 e2 x e3,
Subst e1 e2 x e3 ->
~ free e1 x ->
e1 = e3.
Proof.
induction 1; intros; auto.
- destruct H. constructor.
- f_equal.
+ apply IHSubst1; intuition.
apply H1; apply FreeApp_l; auto.
+ apply IHSubst2; intuition.
apply H1; apply FreeApp_r; auto.
- rewrite IHSubst; auto.
intuition. apply H1.
constructor; auto.
Qed.
(** closed terms have no free variables *)
Definition closed (e: expr) : Prop :=
forall x, ~ free e x.
Lemma closed_app_intro:
forall e1 e2,
closed e1 ->
closed e2 ->
closed (e1 @ e2).
Proof.
unfold closed, not; intros.
inv H1.
- eapply H; eauto.
- eapply H0; eauto.
Qed.
Lemma closed_app_inv:
forall e1 e2,
closed (e1 @ e2) ->
closed e1 /\ closed e2.
Proof.
unfold closed, not; split; intros.
- eapply H; eauto.
apply FreeApp_l; eauto.
- eapply H; eauto.
apply FreeApp_r; eauto.
Qed.
Lemma closed_lam_intro:
forall x e,
(forall y, y <> x -> ~ free e y) ->
closed (\x, e).
Proof.
unfold closed, not; intros.
inv H0. eapply H; eauto.
Qed.
Lemma closed_lam_inv:
forall x e,
closed (\x, e) ->
(forall y, y <> x -> ~ free e y).
Proof.
unfold closed, not; intros.
cut (free (\x, e) y); intros.
- eapply H; eauto.
- constructor; auto.
Qed.
(** closed-ness preserved by substitution *)
Lemma subst_pres_closed:
forall e1 e2 x e3,
Subst e1 e2 x e3 ->
closed e1 ->
closed e2 ->
closed e3.
Proof.
induction 1; intros; auto.
- apply closed_app_inv in H1.
apply closed_app_intro; intuition.
- apply subst_only_free in H0; subst; auto.
unfold closed in *; intuition.
eapply H1; eauto.
econstructor; eauto.
Qed.
(**
Call By Name
<<
e1 --> e1'
---------------------
e1 e2 --> e1' e2
-----------------------------
(\x. e1) e2 --> e1[e2/x]
>>
*)
Inductive step_cbn : expr -> expr -> Prop :=
| CBN_crunch:
forall e1 e1' e2,
step_cbn e1 e1' ->
step_cbn (App e1 e2) (App e1' e2)
| CBN_subst:
forall x e1 e2 e1',
Subst e1 e2 x e1' ->
step_cbn (App (Lam x e1) e2) e1'.
Notation "e1 ==> e2" := (step_cbn e1 e2) (at level 51).
Inductive star_cbn : expr -> expr -> Prop :=
| scbn_refl:
forall e,
star_cbn e e
| scbn_step:
forall e1 e2 e3,
step_cbn e1 e2 ->
star_cbn e2 e3 ->
star_cbn e1 e3.
Notation "e1 ==>* e2" := (star_cbn e1 e2) (at level 51).
Definition stuck (e: expr) : Prop :=
forall e', ~ e ==> e'.
Lemma step_cbn_det:
forall e e1,
e ==> e1 ->
forall e2,
e ==> e2 ->
e1 = e2.
Proof.
induction 1; intros.
- inv H0.
+ f_equal. apply IHstep_cbn; auto.
+ inv H.
- inv H0.
+ inv H4.
+ eapply subst_det; eauto.
Qed.
(** values *)
Inductive value : expr -> Prop :=
| VBool:
forall b,
value (Bool b)
| VInt:
forall i,
value (Int i)
| VLam:
forall x e,
value (Lam x e).
Lemma value_stuck:
forall e,
value e ->
stuck e.
Proof.
unfold stuck, not; intros;
inv H; inv H0.
Qed.
(** types and typing *)
Inductive typ : Set :=
| TBool : typ
| TInt : typ
| TFun : typ -> typ -> typ.
Notation "X ~> Y" := (TFun X Y) (at level 60).
Definition env : Type :=
string -> option typ.
Definition E0 : env :=
fun _ => None.
Definition extend (e: env) x t : env :=
fun y =>
if string_dec y x then
Some t
else
e y.
Inductive typed : env -> expr -> typ -> Prop :=
| WTBool:
forall env b,
typed env (Bool b) TBool
| WTInt:
forall env i,
typed env (Int i) TInt
| WTVar:
forall env x t,
env x = Some t ->
typed env (Var x) t
| WTApp:
forall env e1 e2 tA tB,
typed env e1 (tA ~> tB) ->
typed env e2 tA ->
typed env (e1 @ e2) tB
| WTLam:
forall env x e tA tB,
typed (extend env x tA) e tB ->
typed env (\x, e) (tA ~> tB).
(** env must bind all free vars to type *)
Lemma typed_free_env:
forall env e t,
typed env e t ->
forall x,
free e x ->
exists tx, env x = Some tx.
Proof.
induction 1; intros.
- inv H.
- inv H.
- inv H0; eauto.
- inv H1.
+ apply IHtyped1; auto.
+ apply IHtyped2; auto.
- inv H0. apply IHtyped in H3.
destruct H3 as [tx Htx].
exists tx. unfold extend in Htx.
break_match; congruence.
Qed.
(** therefore, typing in empty env
implies term is closed *)
Lemma typed_E0_closed:
forall e t,
typed E0 e t ->
closed e.
Proof.
unfold closed, not; intros.
eapply typed_free_env in H0; eauto.
destruct H0. discriminate.
Qed.
(** canonical forms *)
Lemma cannon_bool:
forall env e,
value e ->
typed env e TBool ->
exists b, e = Bool b.
Proof.
intros.
inv H; inv H0; eauto.
Qed.
Lemma cannon_int:
forall env e,
value e ->
typed env e TInt ->
exists i, e = Int i.
Proof.
intros.
inv H; inv H0; eauto.
Qed.
Lemma cannon_fun:
forall env e tA tB,
value e ->
typed env e (tA ~> tB) ->
exists x, exists b, e = \x, b.
Proof.
intros.
inv H; inv H0; eauto.
Qed.
(** progress *)
Lemma progress:
forall e t,
typed E0 e t ->
(exists e', e ==> e') \/ value e.
Proof.
remember E0.
induction 1; subst; intros.
- right; constructor.
- right; constructor.
- unfold E0 in H; congruence.
- left. destruct IHtyped1; auto.
+ destruct H1 as [e1']. exists (e1' @ e2).
constructor; auto.
+ eapply cannon_fun in H1; eauto.
destruct H1 as [x [e1' He1']]; subst.
destruct (can_subst e1' e2 x) as [e3].
exists e3. constructor; auto.
- right; constructor.
Qed.
(** preservation *)
Definition env_equiv (e1 e2: env) : Prop :=
forall s, e1 s = e2 s.
Lemma env_equiv_refl:
forall env,
env_equiv env env.
Proof.
unfold env_equiv; auto.
Qed.
Lemma env_equiv_sym:
forall e1 e2,
env_equiv e1 e2 ->
env_equiv e2 e1.
Proof.
unfold env_equiv; auto.
Qed.
Lemma env_equiv_trans:
forall e1 e2 e3,
env_equiv e1 e2 ->
env_equiv e2 e3 ->
env_equiv e1 e3.
Proof.
unfold env_equiv; intros.
congruence.
Qed.
Lemma env_equiv_extend:
forall env1 env2 x t,
env_equiv env1 env2 ->
env_equiv (extend env1 x t) (extend env2 x t).
Proof.
unfold env_equiv, extend; intros.
break_match; auto.
Qed.
Lemma env_equiv_overwrite:
forall env x t1 t2,
env_equiv (extend (extend env x t1) x t2)
(extend env x t2).
Proof.
unfold env_equiv, extend; intros.
break_match; auto.
Qed.
Lemma env_equiv_neq:
forall env1 env2 x1 t1 x2 t2,
x1 <> x2 ->
env_equiv env1 env2 ->
env_equiv (extend (extend env1 x1 t1) x2 t2)
(extend (extend env2 x2 t2) x1 t1).
Proof.
unfold env_equiv, extend; intros.
break_match; break_match; congruence.
Qed.
Lemma env_equiv_typed:
forall env1 e t,
typed env1 e t ->
forall env2,
env_equiv env1 env2 ->
typed env2 e t.
Proof.
unfold env_equiv.
induction 1; intros.
- constructor.
- constructor.
- constructor; congruence.
- econstructor; eauto.
- econstructor; eauto.
apply IHtyped; auto.
intros; apply env_equiv_extend; auto.
Qed.
Lemma strengthen:
forall e env t x t',
typed (extend env x t') e t ->
~ free e x ->
typed env e t.
Proof.
induction e; intros; inv H.
- constructor.
- constructor.
- constructor. unfold extend in H3.
break_match; subst; auto.
destruct H0. constructor.
- econstructor; eauto.
+ eapply IHe1; eauto. intuition.
apply H0; apply FreeApp_l; auto.
+ eapply IHe2; eauto. intuition.
apply H0; apply FreeApp_r; auto.
- constructor.
case (string_dec s x); intros; subst.
+ eapply env_equiv_typed; eauto.
apply env_equiv_overwrite.
+ cut (~ free e x); intros.
* eapply IHe; eauto.
eapply env_equiv_typed; eauto.
apply env_equiv_neq; auto.
apply env_equiv_refl.
* intuition. apply H0.
constructor; auto.
Qed.
Lemma weaken:
forall env e t,
typed env e t ->
forall x t',
~ free e x ->
typed (extend env x t') e t.
Proof.
induction 1; intros.
- constructor.
- constructor.
- constructor. unfold extend.
break_match; subst; auto.
destruct H0. constructor.
- econstructor; eauto.
+ apply IHtyped1. intuition.
apply H1; apply FreeApp_l; auto.
+ apply IHtyped2. intuition.
apply H1; apply FreeApp_r; auto.
- constructor.
case (string_dec x x0); intros; subst.
+ eapply env_equiv_typed; eauto.
apply env_equiv_sym.
apply env_equiv_overwrite.
+ cut (~ free e x0); intros.
* apply IHtyped with (t' := t') in H1; auto.
eapply env_equiv_typed; eauto.
apply env_equiv_neq; auto.
apply env_equiv_refl.
* intuition. apply H0.
constructor; auto.
Qed.
Definition free_env_equiv (E: expr) (e1 e2: env) : Prop :=
forall x,
free E x ->
e1 x = e2 x.
Lemma free_env_equiv_refl:
forall E env,
free_env_equiv E env env.
Proof.
unfold free_env_equiv; auto.
Qed.
Lemma free_env_equiv_sym:
forall E e1 e2,
free_env_equiv E e1 e2 ->
free_env_equiv E e2 e1.
Proof.
unfold free_env_equiv; intros.
symmetry. apply H; auto.
Qed.
Lemma free_env_equiv_trans:
forall E e1 e2 e3,
free_env_equiv E e1 e2 ->
free_env_equiv E e2 e3 ->
free_env_equiv E e1 e3.
Proof.
unfold free_env_equiv; intros.
apply eq_trans with (y := e2 x); auto.
Qed.
Lemma free_env_equiv_extend:
forall E env1 env2 x t,
free_env_equiv E env1 env2 ->
free_env_equiv E (extend env1 x t) (extend env2 x t).
Proof.
unfold free_env_equiv, extend; intros.
break_match; auto.
Qed.
Lemma free_env_equiv_overwrite:
forall E env x t1 t2,
free_env_equiv E (extend (extend env x t1) x t2)
(extend env x t2).
Proof.
unfold free_env_equiv, extend; intros.
break_match; auto.
Qed.
Lemma free_env_equiv_neq:
forall E env1 env2 x1 t1 x2 t2,
x1 <> x2 ->
free_env_equiv E env1 env2 ->
free_env_equiv E (extend (extend env1 x1 t1) x2 t2)
(extend (extend env2 x2 t2) x1 t1).
Proof.
unfold free_env_equiv, extend; intros.
break_match; break_match; subst; auto.
congruence.
Qed.
Lemma free_env_equiv_typed:
forall env1 e t,
typed env1 e t ->
forall env2,
free_env_equiv e env1 env2 ->
typed env2 e t.
Proof.
unfold free_env_equiv.
induction 1; intros.
- constructor.
- constructor.
- constructor. symmetry.
rewrite <- H. apply H0.
constructor.
- econstructor; eauto.
+ apply IHtyped1; intuition.
apply H1; apply FreeApp_l; auto.
+ apply IHtyped2; intuition.
apply H1; apply FreeApp_r; auto.
- econstructor; eauto.
apply IHtyped; auto.
unfold extend; intros.
break_match; auto.
apply H0. constructor; auto.
Qed.
Lemma typed_closed:
forall env e t,
typed env e t ->
closed e ->
typed E0 e t.
Proof.
induction 1; intros.
- constructor.
- constructor.
- unfold closed in H0.
destruct H0 with (x0 := x).
constructor.
- apply closed_app_inv in H1; intuition.
econstructor; eauto.
- constructor.
eapply free_env_equiv_typed; eauto.
unfold free_env_equiv; intros.
unfold extend. break_match; auto.
apply closed_lam_inv with (y := x0) in H0; auto.
contradiction.
Qed.
Lemma subst_pres_typed:
forall e1 e2 x e3,
Subst e1 e2 x e3 ->
closed e2 ->
forall env tA tB,
typed (extend env x tA) e1 tB ->
typed env e2 tA ->
typed env e3 tB.
Proof.
induction 1; intros; auto.
- inv H0. constructor.
- inv H0. constructor.
- inv H0. unfold extend in H4.
break_match; congruence.
- inv H1. unfold extend in H5.
break_match; try congruence.
constructor; auto.
- inv H2. econstructor; eauto.
- eapply free_env_equiv_typed; eauto.
unfold free_env_equiv, extend; intros.
break_match; auto; subst.
inv H2; congruence.
- inv H2. constructor.
eapply IHSubst; eauto.
+ eapply env_equiv_typed; eauto.
apply env_equiv_neq; auto.
apply env_equiv_refl.
+ apply weaken; auto.
Qed.
Lemma preserve:
forall e e',
e ==> e' ->
closed e ->
forall env t,
typed env e t ->
typed env e' t.
Proof.
induction 1; intros.
- apply closed_app_inv in H0; intuition.
inv H1. apply H0 in H7.
econstructor; eauto.
- apply closed_app_inv in H0; intuition.
inv H1. inv H6.
eapply subst_pres_typed in H; eauto.
Qed.
(** type soundness *)
Lemma soundness:
forall e t e',
typed E0 e t ->
e ==>* e' ->
(exists e'', e' ==> e'') \/ value e'.
Proof.
intros. induction H0.
- eapply progress; eauto.
- apply IHstar_cbn.
eapply preserve; eauto.
eapply typed_E0_closed; eauto.
Qed.
|
{"author": "palmskog", "repo": "street-fighting-proof-assistants", "sha": "f89660fab17a8c1a6c9cd9c14484ed8d72fb0088", "save_path": "github-repos/coq/palmskog-street-fighting-proof-assistants", "path": "github-repos/coq/palmskog-street-fighting-proof-assistants/street-fighting-proof-assistants-f89660fab17a8c1a6c9cd9c14484ed8d72fb0088/old-505-lc/L10_in_class.v"}
|
import argparse
import math
import numpy as np
import torch
from torch import nn
from basicsr.archs.stylegan2_arch import StyleGAN2Generator
from basicsr.metrics.fid import (calculate_fid, extract_inception_features,
load_patched_inception_v3)
def calculate_stylegan2_fid():
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
parser = argparse.ArgumentParser()
parser.add_argument(
'ckpt', type=str, help='Path to the stylegan2 checkpoint.')
parser.add_argument(
'fid_stats', type=str, help='Path to the dataset fid statistics.')
parser.add_argument('--size', type=int, default=256)
parser.add_argument('--channel_multiplier', type=int, default=2)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--num_sample', type=int, default=50000)
parser.add_argument('--truncation', type=float, default=1)
parser.add_argument('--truncation_mean', type=int, default=4096)
args = parser.parse_args()
# create stylegan2 model
generator = StyleGAN2Generator(
out_size=args.size,
num_style_feat=512,
num_mlp=8,
channel_multiplier=args.channel_multiplier,
resample_kernel=(1, 3, 3, 1))
generator.load_state_dict(torch.load(args.ckpt)['params_ema'])
generator = nn.DataParallel(generator).eval().to(device)
if args.truncation < 1:
with torch.no_grad():
truncation_latent = generator.mean_latent(args.truncation_mean)
else:
truncation_latent = None
# inception model
inception = load_patched_inception_v3(device)
total_batch = math.ceil(args.num_sample / args.batch_size)
def sample_generator(total_batch):
for i in range(total_batch):
with torch.no_grad():
latent = torch.randn(args.batch_size, 512, device=device)
samples, _ = generator([latent],
truncation=args.truncation,
truncation_latent=truncation_latent)
yield samples
features = extract_inception_features(
sample_generator(total_batch), inception, total_batch, device)
features = features.numpy()
total_len = features.shape[0]
features = features[:args.num_sample]
print(f'Extracted {total_len} features, '
f'use the first {features.shape[0]} features to calculate stats.')
sample_mean = np.mean(features, 0)
sample_cov = np.cov(features, rowvar=False)
# load the dataset stats
stats = torch.load(args.fid_stats)
real_mean = stats['mean']
real_cov = stats['cov']
# calculate FID metric
fid = calculate_fid(sample_mean, sample_cov, real_mean, real_cov)
print('fid:', fid)
if __name__ == '__main__':
calculate_stylegan2_fid()
|
{"hexsha": "8a3d8a978fa0f5971f6d03aaa3d6add2fff87013", "size": 2851, "ext": "py", "lang": "Python", "max_stars_repo_path": "core/modules/BasicSR/scripts/metrics/calculate_stylegan2_fid.py", "max_stars_repo_name": "harryprabowo/multipurpose-iris-recognition", "max_stars_repo_head_hexsha": "549cb95499abb3ea7900aa29ced55a9ddce895cc", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "core/modules/BasicSR/scripts/metrics/calculate_stylegan2_fid.py", "max_issues_repo_name": "harryprabowo/multipurpose-iris-recognition", "max_issues_repo_head_hexsha": "549cb95499abb3ea7900aa29ced55a9ddce895cc", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "core/modules/BasicSR/scripts/metrics/calculate_stylegan2_fid.py", "max_forks_repo_name": "harryprabowo/multipurpose-iris-recognition", "max_forks_repo_head_hexsha": "549cb95499abb3ea7900aa29ced55a9ddce895cc", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6375, "max_line_length": 76, "alphanum_fraction": 0.67064188, "include": true, "reason": "import numpy", "num_tokens": 642}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.