text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import pytest
import numpy as np
from zodipy._functions import blackbody_emission, interplanetary_temperature
TEMPERATURE = 30
TEMPERATURE_ARRAY = np.array([31,45,53])
R = 3
R_ARRAY = np.array([4, 5.3, 6])
DELTA = 0.324
FREQUENCY = 549 * 1e9
def test_blackbody_emission_value():
"""Tests that return value."""
emission = blackbody_emission(TEMPERATURE, FREQUENCY)
assert emission == pytest.approx(1.73442848898e-15, abs=1e-20)
def test_blackbody_emission_value_array():
"""Tests the return value given a temperature array."""
emission = blackbody_emission(TEMPERATURE_ARRAY, FREQUENCY)
true_values = np.array([1.82147825366e-15, 3.06550295038e-15, 3.78860400626e-15])
assert emission == pytest.approx(true_values, abs=1e-20)
def test_blackbody_emission_returns_float():
"""Tests that the returned value is a float given a float temperature."""
emission = blackbody_emission(TEMPERATURE, FREQUENCY)
assert isinstance(emission, float)
def test_blackbody_emission_returns_array():
"""Tests that the returned value is an array given an array temperature."""
emission = blackbody_emission(TEMPERATURE_ARRAY, FREQUENCY)
assert isinstance(emission, np.ndarray)
def test_interplanetary_temperature_value():
"""Tests that the returned value given a float R."""
ipd_temperature = interplanetary_temperature(R, TEMPERATURE, DELTA)
assert ipd_temperature == pytest.approx(21.0152213243, abs=1e-10)
def test_interplanetary_temperature_value_array():
"""Tests that the returned value given a float R."""
ipd_temperature = interplanetary_temperature(R_ARRAY, TEMPERATURE, DELTA)
true_values = np.array([19.1449315324, 17.4765568067, 16.7880498296])
assert ipd_temperature == pytest.approx(true_values, abs=1e-10)
def test_interplanetary_temperature_returns_float():
"""Tests that the returned value is a float given a float R."""
ipd_temperature = interplanetary_temperature(R, TEMPERATURE, DELTA)
assert isinstance(ipd_temperature, float)
def test_interplanetary_temperature_returns_array():
"""Tests that the returned value is a array given a array R."""
ipd_temperature = interplanetary_temperature(R_ARRAY, TEMPERATURE, DELTA)
assert isinstance(ipd_temperature, np.ndarray)
|
{"hexsha": "6449367be674895576a85f0e7265e1fe7d6d430d", "size": 2295, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_functions.py", "max_stars_repo_name": "MetinSa/zodipy", "max_stars_repo_head_hexsha": "44725b106d8f09412b24667caedc6c8fa081f786", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-08-16T08:11:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-03T09:06:28.000Z", "max_issues_repo_path": "tests/test_functions.py", "max_issues_repo_name": "MetinSa/zodipy", "max_issues_repo_head_hexsha": "44725b106d8f09412b24667caedc6c8fa081f786", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_functions.py", "max_forks_repo_name": "MetinSa/zodipy", "max_forks_repo_head_hexsha": "44725b106d8f09412b24667caedc6c8fa081f786", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.323943662, "max_line_length": 85, "alphanum_fraction": 0.7594771242, "include": true, "reason": "import numpy", "num_tokens": 582}
|
# Copyright 2020 Zhi Huang. All rights reserved
# Created on Wed Feb 19 13:20:25 2020
# Author: Zhi Huang, Purdue University
#
# This is a concise version rewrite from sklearn_decomposition_nmf.
#
# The original code came with the following disclaimer:
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Zhi Huang be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
#
from typing import Callable, Iterator, List, Optional, Tuple, Union, Any, Iterable
import numpy as np
import scipy.sparse as sp
import pandas as pd
from sklearn.utils import check_random_state, check_array
from sklearn.decomposition._cdnmf_fast import _update_cdnmf_fast
from sklearn.utils.extmath import safe_sparse_dot
import copy
from math import sqrt
from sklearn.utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from sklearn.utils.validation import check_non_negative
from lifelines.utils import concordance_index
from ..survival import newton_rhapson_for_efron_model
import time
import warnings
import logging
EPSILON = np.finfo(np.float32).eps
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
Parameters
----------
x : array-like
Vector for which to compute the norm
"""
return sqrt(squared_norm(x))
def _initialize_nmf(X, n_components, init=None, eps=1e-6,
random_state=None):
"""Algorithms for NMF initialization.
Computes an initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix to be decomposed.
n_components : integer
The number of components desired in the approximation.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar'
Method used to initialize the procedure.
Default: None.
Valid options:
- None: 'nndsvd' if n_components <= min(n_samples, n_features),
otherwise 'random'.
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
eps : float
Truncate all values less then this in output to zero.
random_state : int, RandomState instance, default=None
Used when ``init`` == 'nndsvdar' or 'random'. Pass an int for
reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
W : array-like, shape (n_samples, n_components)
Initial guesses for solving X ~= WH
H : array-like, shape (n_components, n_features)
Initial guesses for solving X ~= WH
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
n_samples, n_features = X.shape
if (init is not None and init != 'random'
and n_components > min(n_samples, n_features)):
raise ValueError("init = '{}' can only be used when "
"n_components <= min(n_samples, n_features)"
.format(init))
if init is None:
if n_components <= min(n_samples, n_features):
init = 'nndsvd'
else:
init = 'random'
# Random initialization
if init == 'random':
avg = np.sqrt(X.mean() / n_components)
rng = check_random_state(random_state)
H = avg * rng.randn(n_components, n_features).astype(X.dtype,
copy=False)
W = avg * rng.randn(n_samples, n_components).astype(X.dtype,
copy=False)
np.abs(H, out=H)
np.abs(W, out=W)
return W, H
# NNDSVD initialization
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W = np.zeros_like(U)
H = np.zeros_like(V)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if init == "nndsvd":
pass
elif init == "nndsvda":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif init == "nndsvdar":
rng = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * rng.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * rng.randn(len(H[H == 0])) / 100)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'random', 'nndsvd', 'nndsvda', 'nndsvdar')))
return W, H
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T).
Parameters
----------
X : array-like
First matrix
Y : array-like
Second matrix
"""
return np.dot(X.ravel(), Y.ravel())
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Faster than norm(x) ** 2.
Parameters
----------
x : array_like
Returns
-------
float
The Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array).
"""
x = np.ravel(x, order='K')
if np.issubdtype(x.dtype, np.integer):
warnings.warn('Array type is integer, np.dot may overflow. '
'Data should be float type to avoid this issue',
UserWarning)
return np.dot(x, x)
def calcuate_Frobenius_norm(X, W, H, square_root=False):
"""Compute the beta-divergence of X and dot(W, H).
Parameters
----------
X : float or array-like, shape (n_samples, n_features)
W : float or dense array-like, shape (n_samples, n_components)
H : float or dense array-like, shape (n_components, n_features)
Returns
-------
res : float
Frobenius norm of X and np.dot(W, H)
"""
# The method can be called with scalars
if not sp.issparse(X):
X = np.atleast_2d(X)
W = np.atleast_2d(W)
H = np.atleast_2d(H)
# Frobenius norm
# Avoid the creation of the dense np.dot(W, H) if X is sparse.
if sp.issparse(X):
norm_X = np.dot(X.data, X.data)
norm_WH = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
res = (norm_X + norm_WH - 2. * cross_prod) / 2.
else:
res = squared_norm(X - np.dot(W, H)) / 2.
if square_root:
return np.sqrt(res * 2)
else:
return res
def _multiplicative_update_w(X, W, H, HHt=None, XHt=None, update_H=True):
"""update W in Multiplicative Update NMF"""
# Numerator
if XHt is None:
XHt = safe_sparse_dot(X, H.T)
if update_H:
# avoid a copy of XHt, which will be re-computed (update_H=True)
numerator = XHt
else:
# preserve the XHt, which is not re-computed (update_H=False)
numerator = XHt.copy()
# Denominator
if HHt is None:
HHt = np.dot(H, H.T)
denominator = np.dot(W, HHt)
denominator[denominator == 0] = EPSILON
numerator /= denominator
delta_W = numerator
return delta_W, HHt, XHt
def _multiplicative_update_w_orth(X, W, H, HHt=None, XHt=None, sigma=0):
'''
Implemented based on equation (18) from:
Mirzal, Andri. "A convergent algorithm for orthogonal nonnegative matrix factorization."
Journal of Computational and Applied Mathematics 260 (2014): 149-166.
'''
if XHt is None:
XHt = safe_sparse_dot(X, H.T)
numerator = XHt + sigma*W
# Denominator
if HHt is None:
HHt = np.dot(H, H.T)
# ONMF on W
denominator = np.dot(W, HHt) + sigma * W.dot(W.T).dot(W)
denominator[denominator == 0] = EPSILON
numerator /= denominator
delta_W = numerator
# # ONMF on W
# denominator = W.dot(W.T).dot(X).dot(H.T) # Ding et al. (2006) Orthogonal Nonnegative Matrix Tri-factorizations for Clustering
# delta_W = np.sqrt(numerator)
return delta_W, HHt, XHt
def _multiplicative_update_h(X, W, H, beta_loss, l1_reg_H, l2_reg_H, gamma):
"""update H in Multiplicative Update NMF"""
numerator = safe_sparse_dot(W.T, X)
denominator = np.dot(np.dot(W.T, W), H)
denominator[denominator == 0] = EPSILON
numerator /= denominator
delta_H = numerator
return delta_H
def _update_coordinate_descent(X, W, Ht, shuffle,
random_state):
"""Helper function for _fit_coordinate_descent
Update W to minimize the objective function, iterating once over all
coordinates. By symmetry, to update H, one can call
_update_coordinate_descent(X.T, Ht, W, ...)
"""
n_components = Ht.shape[1]
HHt = np.dot(Ht.T, Ht)
XHt = safe_sparse_dot(X, Ht)
if shuffle:
permutation = random_state.permutation(n_components)
else:
permutation = np.arange(n_components)
# The following seems to be required on 64-bit Windows w/ Python 3.5.
permutation = np.asarray(permutation, dtype=np.intp)
return _update_cdnmf_fast(W, HHt, XHt, permutation)
def NMF(X, n_components, solver = 'cd', max_iter=1000, tol=1e-6, update_H = True, random_state=None, shuffle=False, verbose=0):
'''
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant input matrix.
W : array-like, shape (n_samples, n_components)
Initial guess for the solution.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
'''
W, H = _initialize_nmf(X, n_components, init = 'random', random_state=random_state)
if solver == 'mu':
# used for the convergence criterion
error_at_init = calcuate_Frobenius_norm(X, W, H, square_root=True)
previous_error = error_at_init
start_time = time.time()
HHt, XHt = None, None
for n_iter in range(1, max_iter + 1):
# update W
# HHt and XHt are saved and reused if not update_H
delta_W, HHt, XHt = _multiplicative_update_w(X, W, H, HHt, XHt, update_H = update_H)
W *= delta_W
# update H
if update_H:
delta_H = _multiplicative_update_h(X, W, H)
H *= delta_H
# These values will be recomputed since H changed
HHt, XHt = None, None
# test convergence criterion every 10 iterations
if tol > 0 and n_iter % 10 == 0:
error = calcuate_Frobenius_norm(X, W, H, square_root=True)
if verbose:
iter_time = time.time()
print("Epoch %02d reached after %.3f seconds, error: %f" %
(n_iter, iter_time - start_time, error))
if (previous_error - error) / error_at_init < tol:
break
previous_error = error
# do not print if we have already printed in the convergence test
if verbose and (tol == 0 or n_iter % 10 != 0):
end_time = time.time()
print("Epoch %02d reached after %.3f seconds." %
(n_iter, end_time - start_time))
return W, H, n_iter
if solver == 'cd':
# so W and Ht are both in C order in memory
Ht = check_array(H.T, order='C')
X = check_array(X, accept_sparse='csr')
rng = check_random_state(random_state)
for n_iter in range(max_iter):
violation = 0.
# Update W
violation += _update_coordinate_descent(X, W, Ht, shuffle, rng)
# Update H
if update_H:
violation += _update_coordinate_descent(X.T, Ht, W, shuffle, rng)
if n_iter == 0:
violation_init = violation
if violation_init == 0:
break
if verbose:
print("violation:", violation / violation_init)
if violation / violation_init <= tol:
if verbose:
print("Converged at iteration", n_iter + 1)
break
return W, Ht.T, n_iter
def CoxNMF(X: np.ndarray,
t: np.ndarray,
e: np.ndarray,
W_init = None,
H_init = None,
n_components: Optional[int] = 10,
alpha: Optional[float] = 1e-5,
sigma: Optional[float] = 0,
penalizer: Optional[float] = 0,
l1_ratio: Optional[float] = 0,
ci_tol: Optional[float] = 0.02,
max_iter: Optional[int] = 1000,
solver: Optional[str] = 'mu',
update_rule: Optional[str] = 'projection',
tol: Optional[float] = 1e-6,
random_state: Optional[int] = None,
update_H: bool = True,
update_beta: bool = True,
W_normalization: bool = False,
H_normalization: bool = False,
beta_normalization: bool = True,
logger=None,
verbose: Optional[int] = 0):
'''
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant input matrix.
W : array-like, shape (n_samples, n_components)
Initial guess for the solution.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
t : array-like, shape (n_components)
Survival time.
e : array-like, shape (n_components)
Survival event (death = 1).
alpha : scalar value.
parameter used for learning the H guided by Cox model.
ci_tol: Tolerace of decrease of oncordance index to stop iteration.
'''
if W_init is None or H_init is None:
W, H = _initialize_nmf(X, n_components, init = 'random', random_state=random_state)
else:
W, H = W_init, H_init
# used for the convergence criterion
error_at_init = calcuate_Frobenius_norm(X, W, H, square_root=True)
previous_error = error_at_init
max_cindex = 0.5
start_time = time.time()
HHt, XHt = None, None
t_geq_matrix = np.array([[int(y >= x) for i,x in enumerate(t)] for j,y in enumerate(t)])
error_list = []
cindex_list = []
max_cindex_res = None
beta = None
for n_iter in range(1, max_iter + 1):
# update W
# HHt and XHt are saved and reused if not update_H
if sigma == 0:
delta_W, HHt, XHt = _multiplicative_update_w(X, W, H, HHt, XHt, update_H=update_H)
elif sigma > 0:
delta_W, HHt, XHt = _multiplicative_update_w_orth(X, W, H, HHt, XHt, sigma = sigma)
W *= delta_W
if W_normalization:
# column normalization on W
W = (W / np.linalg.norm(W, axis=0).T)
if update_beta:
beta, ll_, hessian = newton_rhapson_for_efron_model(X=H.T,
T=t,
E=e,
initial_point=beta,
penalizer=penalizer,
l1_ratio=l1_ratio,
max_steps=1)
# normalize beta
if beta_normalization:
beta = beta / (np.max(beta)-np.min(beta))
cindex = concordance_index(t, -np.dot(H.T, beta), e)
# update H
if update_H:
n_patients = t.shape[0]
numerator = safe_sparse_dot(W.T, X)
denominator = np.dot(np.dot(W.T, W), H)
H_mu = H*(numerator/denominator)
if beta is not None:
cox_numerator = np.repeat(np.expand_dims(np.matmul(beta, np.exp(np.matmul(beta.T, H)) ), axis = 2), n_patients, axis = 2).swapaxes(1,2) * t_geq_matrix.T
cox_numerator[:, np.where(e==0)[0], :] = 0
cox_denominator = np.expand_dims(np.matmul(np.exp(np.matmul(beta.T, H)), t_geq_matrix), axis = 2)
cox_fraction = e * np.repeat(beta, n_patients, axis = 1) - np.sum(cox_numerator / cox_denominator, axis = 1)
H_partial = alpha / 2 * (numerator/denominator) * cox_fraction
if update_rule == 'projection':
H_partial[H_partial < 0] = 0
H = H_mu + H_partial
else:
H = H_mu
if np.sum(np.isnan(H)) > 0:
print('Detected NaN value in CoxNMF @H. Possibly due to overflow large value in exp(beta*H). Algorithm stopped. H row normalization is suggested.')
break
if H_normalization:
# row normalization on H
H = (H.T / np.linalg.norm(H, axis=1)).T
# These values will be recomputed since H changed
HHt, XHt = None, None
error = calcuate_Frobenius_norm(X, W, H, square_root=True)
relative_error = error/np.linalg.norm(X,'fro')
if verbose:
print("Epoch %04d error: %f, relative_error: %f, concordance index: %f" % (n_iter, error, relative_error, cindex))
if logger:
logger.log(logging.INFO, "Epoch %04d error: %f, relative_error: %f, concordance index: %f" % (n_iter, error, relative_error, cindex))
error_list.append(error)
cindex_list.append(cindex)
# test convergence criterion every 10 iterations
# if tol > 0 and n_iter % 10 == 0:
if n_iter % 10 == 0:
if (previous_error - error) / error_at_init < tol:
print('Detected non-decreasing NMF error. Algorithm stopped.')
break
previous_error = error
if (cindex - max_cindex) < - ci_tol: # if new concordance index smaller than previous 0.02
print('Detected non-increasing C-Index. Algorithm stopped.')
break
if cindex >= max_cindex:
max_cindex = cindex
max_cindex_res = {}
max_cindex_res['W'] = W
max_cindex_res['H'] = H
max_cindex_res['error'] = error
max_cindex_res['cindex'] = cindex
max_cindex_res['beta'] = beta.reshape(-1)
# do not print if we have already printed in the convergence test
if verbose and (tol == 0 or n_iter % 10 != 0):
end_time = time.time()
print("Epoch %04d reached after %.3f seconds." %
(n_iter, end_time - start_time))
return W, H, n_iter, error_list, cindex_list, max_cindex_res
|
{"hexsha": "0765d1446b26e0f958da4c86db74637b44175ffa", "size": 20893, "ext": "py", "lang": "Python", "max_stars_repo_path": "biolearns/decomposition/_nmf.py", "max_stars_repo_name": "huangzhii/biolearns", "max_stars_repo_head_hexsha": "95d58d55690e550fff94730f34ed7c0fb96f12af", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-02-26T17:30:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-22T06:06:47.000Z", "max_issues_repo_path": "biolearns/decomposition/_nmf.py", "max_issues_repo_name": "huangzhii/biolearns", "max_issues_repo_head_hexsha": "95d58d55690e550fff94730f34ed7c0fb96f12af", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-06-29T16:17:56.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-29T16:19:20.000Z", "max_forks_repo_path": "biolearns/decomposition/_nmf.py", "max_forks_repo_name": "huangzhii/biolearns", "max_forks_repo_head_hexsha": "95d58d55690e550fff94730f34ed7c0fb96f12af", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1470588235, "max_line_length": 168, "alphanum_fraction": 0.5703824247, "include": true, "reason": "import numpy,import scipy", "num_tokens": 5291}
|
#!/usr/bin/python3
from datetime import datetime
import sys
import numpy
PKTS=300
slip=2
try:
if sys.argv[1]:
fileName = sys.argv[1]
except IndexError:
print("Using default file name.")
fileName = 'loglistener.txt'
f = open(fileName,"r")
#f.close()
def test():
list=[]
summ=0
first=0
txcounter=0.0
rxcounter=[0]*PKTS
min=1000000
max=0
for i in range(1,PKTS):
f.seek(0)
print(i)
dTime=0
for line in f.readlines():
hello = "hello " + str(i)
if hello in line:
if "sending "+hello +" from 100:fe80::212:7401:1" in line:
sTime = datetime.strptime(line[0:9], '%M:%S.%f')
txcounter+=1
print("add tx 1")
print (line)
if "ID:"+str(slip) in line and hello + " from 100:fe80::212:7401:1" in line:
if(first==0):
first=i
print("add rx 1")
print (line)
rTime = datetime.strptime(line[0:9], '%M:%S.%f')
dTime=rTime-sTime
dTime=dTime.seconds*1000000+dTime.microseconds
rxcounter[i]+=1
if(min>dTime):
min=dTime
if(max<dTime):
max=dTime
list.append(dTime)
print("delay:"+str(dTime)+"\n")
break
# if "sending "+hello+" from 100:fe80::212:7402:2" in line:
# sTime = datetime.strptime(line[0:9], '%M:%S.%f')
# txcounter+=1
# print("add tx 2")
# print (line)
# if "ID:"+str(slip) in line and hello + " from 100:fe80::212:7402:2" in line:
# if(first==0):
# first=i
# print("add rx 2")
# print (line)
# rTime = datetime.strptime(line[0:9], '%M:%S.%f')
# dTime=rTime-sTime
# dTime=dTime.seconds*1000000+dTime.microseconds
# rxcounter[i]+=1
# if(min>dTime):
# min=dTime
# if(max<dTime):
# max=dTime
# list.append(dTime)
# print("delay:"+str(dTime)+"\n")
# if "sending "+hello +" from 100:fe80::212:7403:3" in line:
# sTime = datetime.strptime(line[0:9], '%M:%S.%f')
# txcounter+=1
# print("add tx 3")
# print (line)
# if "ID:"+str(slip) in line and hello + " from 100:fe80::212:7403:3" in line:
# if(first==0):
# first=i
# print("add rx 3")
# print (line)
# rTime = datetime.strptime(line[0:9], '%M:%S.%f')
# dTime=rTime-sTime
# dTime=dTime.seconds*1000000+dTime.microseconds
# rxcounter[i]+=1
# if(min>dTime):
# min=dTime
# if(max<dTime):
# max=dTime
# list.append(dTime)
# print("delay:"+str(dTime)+"\n")
# break
summ=summ+int(dTime)
rx=0
for el in rxcounter:
rx+=el
print("avg="+str(summ/rx)+"\n")
print("max:"+str(max)+ " Min:"+str(min)," StdDev:"+str(numpy.std(list)))
print(rx)
print(txcounter)
print("PDR="+str((rx)/(txcounter))+"\n")
if __name__ == '__main__' :
test()
|
{"hexsha": "5e5bb65fba345722ec7237ed640a2109d19f9d1b", "size": 3833, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/sock/latency.py", "max_stars_repo_name": "iliar-rabet/dao-projection", "max_stars_repo_head_hexsha": "e24a00ba29ce92f37bfbcb2595713f2764cd8e9d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-21T05:43:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-21T05:43:32.000Z", "max_issues_repo_path": "examples/sock/latency.py", "max_issues_repo_name": "iliar-rabet/dao-projection", "max_issues_repo_head_hexsha": "e24a00ba29ce92f37bfbcb2595713f2764cd8e9d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/sock/latency.py", "max_forks_repo_name": "iliar-rabet/dao-projection", "max_forks_repo_head_hexsha": "e24a00ba29ce92f37bfbcb2595713f2764cd8e9d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6228070175, "max_line_length": 94, "alphanum_fraction": 0.4046438821, "include": true, "reason": "import numpy", "num_tokens": 973}
|
[GOAL]
X : Scheme
⊢ T0Space ↑↑X.toPresheafedSpace
[PROOFSTEP]
refine' T0Space.of_open_cover fun x => _
[GOAL]
X : Scheme
x : ↑↑X.toPresheafedSpace
⊢ ∃ s, x ∈ s ∧ IsOpen s ∧ T0Space ↑s
[PROOFSTEP]
obtain ⟨U, R, ⟨e⟩⟩ := X.local_affine x
[GOAL]
case intro.intro.intro
X : Scheme
x : ↑↑X.toPresheafedSpace
U : OpenNhds x
R : CommRingCat
e :
LocallyRingedSpace.restrict X.toLocallyRingedSpace (_ : OpenEmbedding ↑(Opens.inclusion U.obj)) ≅
Spec.toLocallyRingedSpace.obj (op R)
⊢ ∃ s, x ∈ s ∧ IsOpen s ∧ T0Space ↑s
[PROOFSTEP]
let e' : U.1 ≃ₜ PrimeSpectrum R :=
homeoOfIso ((LocallyRingedSpace.forgetToSheafedSpace ⋙ SheafedSpace.forget _).mapIso e)
[GOAL]
case intro.intro.intro
X : Scheme
x : ↑↑X.toPresheafedSpace
U : OpenNhds x
R : CommRingCat
e :
LocallyRingedSpace.restrict X.toLocallyRingedSpace (_ : OpenEmbedding ↑(Opens.inclusion U.obj)) ≅
Spec.toLocallyRingedSpace.obj (op R)
e' : { x_1 // x_1 ∈ U.obj } ≃ₜ PrimeSpectrum ↑R :=
homeoOfIso ((LocallyRingedSpace.forgetToSheafedSpace ⋙ SheafedSpace.forget CommRingCat).mapIso e)
⊢ ∃ s, x ∈ s ∧ IsOpen s ∧ T0Space ↑s
[PROOFSTEP]
exact ⟨U.1.1, U.2, U.1.2, e'.embedding.t0Space⟩
[GOAL]
X : Scheme
⊢ QuasiSober ↑↑X.toPresheafedSpace
[PROOFSTEP]
apply (config := { allowSynthFailures := true })
quasiSober_of_open_cover (Set.range fun x => Set.range <| (X.affineCover.map x).1.base)
[GOAL]
case hS
X : Scheme
⊢ ∀ (s : ↑(Set.range fun x => Set.range ↑(Scheme.OpenCover.map (Scheme.affineCover X) x).val.base)), IsOpen ↑s
[PROOFSTEP]
rintro ⟨_, i, rfl⟩
[GOAL]
case hS.mk.intro
X : Scheme
i : (Scheme.affineCover X).J
⊢ IsOpen
↑{ val := (fun x => Set.range ↑(Scheme.OpenCover.map (Scheme.affineCover X) x).val.base) i,
property :=
(_ :
∃ y,
(fun x => Set.range ↑(Scheme.OpenCover.map (Scheme.affineCover X) x).val.base) y =
(fun x => Set.range ↑(Scheme.OpenCover.map (Scheme.affineCover X) x).val.base) i) }
[PROOFSTEP]
exact (X.affineCover.IsOpen i).base_open.open_range
[GOAL]
case hS'
X : Scheme
⊢ ∀ (s : ↑(Set.range fun x => Set.range ↑(Scheme.OpenCover.map (Scheme.affineCover X) x).val.base)), QuasiSober ↑↑s
[PROOFSTEP]
rintro ⟨_, i, rfl⟩
[GOAL]
case hS'.mk.intro
X : Scheme
i : (Scheme.affineCover X).J
⊢ QuasiSober
↑↑{ val := (fun x => Set.range ↑(Scheme.OpenCover.map (Scheme.affineCover X) x).val.base) i,
property :=
(_ :
∃ y,
(fun x => Set.range ↑(Scheme.OpenCover.map (Scheme.affineCover X) x).val.base) y =
(fun x => Set.range ↑(Scheme.OpenCover.map (Scheme.affineCover X) x).val.base) i) }
[PROOFSTEP]
exact
@OpenEmbedding.quasiSober _ _ _ _ _
(Homeomorph.ofEmbedding _ (X.affineCover.IsOpen i).base_open.toEmbedding).symm.openEmbedding
PrimeSpectrum.quasiSober
[GOAL]
case hS''
X : Scheme
⊢ (⋃₀ Set.range fun x => Set.range ↑(Scheme.OpenCover.map (Scheme.affineCover X) x).val.base) = ⊤
[PROOFSTEP]
rw [Set.top_eq_univ, Set.sUnion_range, Set.eq_univ_iff_forall]
[GOAL]
case hS''
X : Scheme
⊢ ∀ (x : (forget TopCat).obj ↑X.toPresheafedSpace),
x ∈ ⋃ (x : (Scheme.affineCover X).J), Set.range ↑(Scheme.OpenCover.map (Scheme.affineCover X) x).val.base
[PROOFSTEP]
intro x
[GOAL]
case hS''
X : Scheme
x : (forget TopCat).obj ↑X.toPresheafedSpace
⊢ x ∈ ⋃ (x : (Scheme.affineCover X).J), Set.range ↑(Scheme.OpenCover.map (Scheme.affineCover X) x).val.base
[PROOFSTEP]
exact ⟨_, ⟨_, rfl⟩, X.affineCover.Covers x⟩
[GOAL]
X : Scheme
inst✝ : ∀ (x : ↑↑X.toPresheafedSpace), _root_.IsReduced ↑(Presheaf.stalk X.presheaf x)
⊢ IsReduced X
[PROOFSTEP]
refine' ⟨fun U => ⟨fun s hs => _⟩⟩
[GOAL]
X : Scheme
inst✝ : ∀ (x : ↑↑X.toPresheafedSpace), _root_.IsReduced ↑(Presheaf.stalk X.presheaf x)
U : Opens ↑↑X.toPresheafedSpace
s : ↑(X.presheaf.obj (op U))
hs : IsNilpotent s
⊢ s = 0
[PROOFSTEP]
apply Presheaf.section_ext X.sheaf U s 0
[GOAL]
X : Scheme
inst✝ : ∀ (x : ↑↑X.toPresheafedSpace), _root_.IsReduced ↑(Presheaf.stalk X.presheaf x)
U : Opens ↑↑X.toPresheafedSpace
s : ↑(X.presheaf.obj (op U))
hs : IsNilpotent s
⊢ ∀ (x : { x // x ∈ U }),
↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) 0
[PROOFSTEP]
intro x
[GOAL]
X : Scheme
inst✝ : ∀ (x : ↑↑X.toPresheafedSpace), _root_.IsReduced ↑(Presheaf.stalk X.presheaf x)
U : Opens ↑↑X.toPresheafedSpace
s : ↑(X.presheaf.obj (op U))
hs : IsNilpotent s
x : { x // x ∈ U }
⊢ ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) 0
[PROOFSTEP]
rw [RingHom.map_zero]
[GOAL]
X : Scheme
inst✝ : ∀ (x : ↑↑X.toPresheafedSpace), _root_.IsReduced ↑(Presheaf.stalk X.presheaf x)
U : Opens ↑↑X.toPresheafedSpace
s : ↑(X.presheaf.obj (op U))
hs : IsNilpotent s
x : { x // x ∈ U }
⊢ ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0
[PROOFSTEP]
change X.presheaf.germ x s = 0
[GOAL]
X : Scheme
inst✝ : ∀ (x : ↑↑X.toPresheafedSpace), _root_.IsReduced ↑(Presheaf.stalk X.presheaf x)
U : Opens ↑↑X.toPresheafedSpace
s : ↑(X.presheaf.obj (op U))
hs : IsNilpotent s
x : { x // x ∈ U }
⊢ ↑(Presheaf.germ X.presheaf x) s = 0
[PROOFSTEP]
exact (hs.map _).eq_zero
[GOAL]
X : Scheme
inst✝ : IsReduced X
x : ↑↑X.toPresheafedSpace
⊢ _root_.IsReduced ↑(Presheaf.stalk X.presheaf x)
[PROOFSTEP]
constructor
[GOAL]
case eq_zero
X : Scheme
inst✝ : IsReduced X
x : ↑↑X.toPresheafedSpace
⊢ ∀ (x_1 : ↑(Presheaf.stalk X.presheaf x)), IsNilpotent x_1 → x_1 = 0
[PROOFSTEP]
rintro g ⟨n, e⟩
[GOAL]
case eq_zero.intro
X : Scheme
inst✝ : IsReduced X
x : ↑↑X.toPresheafedSpace
g : ↑(Presheaf.stalk X.presheaf x)
n : ℕ
e : g ^ n = 0
⊢ g = 0
[PROOFSTEP]
obtain ⟨U, hxU, s, rfl⟩ := X.presheaf.germ_exist x g
[GOAL]
case eq_zero.intro.intro.intro.intro
X : Scheme
inst✝ : IsReduced X
x : ↑↑X.toPresheafedSpace
n : ℕ
U : Opens ↑↑X.toPresheafedSpace
hxU : x ∈ U
s : (forget CommRingCat).obj (X.presheaf.obj (op U))
e : ↑(Presheaf.germ X.presheaf { val := x, property := hxU }) s ^ n = 0
⊢ ↑(Presheaf.germ X.presheaf { val := x, property := hxU }) s = 0
[PROOFSTEP]
rw [← map_pow, ← map_zero (X.presheaf.germ ⟨x, hxU⟩)] at e
[GOAL]
case eq_zero.intro.intro.intro.intro
X : Scheme
inst✝ : IsReduced X
x : ↑↑X.toPresheafedSpace
n : ℕ
U : Opens ↑↑X.toPresheafedSpace
hxU : x ∈ U
s : (forget CommRingCat).obj (X.presheaf.obj (op U))
e :
↑(Presheaf.germ X.presheaf { val := x, property := hxU }) (s ^ n) =
↑(Presheaf.germ X.presheaf { val := x, property := hxU }) 0
⊢ ↑(Presheaf.germ X.presheaf { val := x, property := hxU }) s = 0
[PROOFSTEP]
obtain ⟨V, hxV, iU, iV, e'⟩ := X.presheaf.germ_eq x hxU hxU _ 0 e
[GOAL]
case eq_zero.intro.intro.intro.intro.intro.intro.intro.intro
X : Scheme
inst✝ : IsReduced X
x : ↑↑X.toPresheafedSpace
n : ℕ
U : Opens ↑↑X.toPresheafedSpace
hxU : x ∈ U
s : (forget CommRingCat).obj (X.presheaf.obj (op U))
e :
↑(Presheaf.germ X.presheaf { val := x, property := hxU }) (s ^ n) =
↑(Presheaf.germ X.presheaf { val := x, property := hxU }) 0
V : Opens ↑↑X.toPresheafedSpace
hxV : x ∈ V
iU iV : V ⟶ U
e' : ↑(X.presheaf.map iU.op) (s ^ n) = ↑(X.presheaf.map iV.op) 0
⊢ ↑(Presheaf.germ X.presheaf { val := x, property := hxU }) s = 0
[PROOFSTEP]
rw [map_pow, map_zero] at e'
[GOAL]
case eq_zero.intro.intro.intro.intro.intro.intro.intro.intro
X : Scheme
inst✝ : IsReduced X
x : ↑↑X.toPresheafedSpace
n : ℕ
U : Opens ↑↑X.toPresheafedSpace
hxU : x ∈ U
s : (forget CommRingCat).obj (X.presheaf.obj (op U))
e :
↑(Presheaf.germ X.presheaf { val := x, property := hxU }) (s ^ n) =
↑(Presheaf.germ X.presheaf { val := x, property := hxU }) 0
V : Opens ↑↑X.toPresheafedSpace
hxV : x ∈ V
iU iV : V ⟶ U
e' : ↑(X.presheaf.map iU.op) s ^ n = 0
⊢ ↑(Presheaf.germ X.presheaf { val := x, property := hxU }) s = 0
[PROOFSTEP]
replace e' := (IsNilpotent.mk _ _ e').eq_zero (R := X.presheaf.obj <| op V)
[GOAL]
case eq_zero.intro.intro.intro.intro.intro.intro.intro.intro
X : Scheme
inst✝ : IsReduced X
x : ↑↑X.toPresheafedSpace
n : ℕ
U : Opens ↑↑X.toPresheafedSpace
hxU : x ∈ U
s : (forget CommRingCat).obj (X.presheaf.obj (op U))
e :
↑(Presheaf.germ X.presheaf { val := x, property := hxU }) (s ^ n) =
↑(Presheaf.germ X.presheaf { val := x, property := hxU }) 0
V : Opens ↑↑X.toPresheafedSpace
hxV : x ∈ V
iU iV : V ⟶ U
e' : ↑(X.presheaf.map iU.op) s = 0
⊢ ↑(Presheaf.germ X.presheaf { val := x, property := hxU }) s = 0
[PROOFSTEP]
erw [← ConcreteCategory.congr_hom (X.presheaf.germ_res iU ⟨x, hxV⟩) s]
[GOAL]
case eq_zero.intro.intro.intro.intro.intro.intro.intro.intro
X : Scheme
inst✝ : IsReduced X
x : ↑↑X.toPresheafedSpace
n : ℕ
U : Opens ↑↑X.toPresheafedSpace
hxU : x ∈ U
s : (forget CommRingCat).obj (X.presheaf.obj (op U))
e :
↑(Presheaf.germ X.presheaf { val := x, property := hxU }) (s ^ n) =
↑(Presheaf.germ X.presheaf { val := x, property := hxU }) 0
V : Opens ↑↑X.toPresheafedSpace
hxV : x ∈ V
iU iV : V ⟶ U
e' : ↑(X.presheaf.map iU.op) s = 0
⊢ ↑(X.presheaf.map iU.op ≫ Presheaf.germ X.presheaf { val := x, property := hxV }) s = 0
[PROOFSTEP]
rw [comp_apply, e', map_zero]
[GOAL]
X✝ : Scheme
X Y : Scheme
f : X ⟶ Y
H : IsOpenImmersion f
inst✝ : IsReduced Y
⊢ IsReduced X
[PROOFSTEP]
constructor
[GOAL]
case component_reduced
X✝ : Scheme
X Y : Scheme
f : X ⟶ Y
H : IsOpenImmersion f
inst✝ : IsReduced Y
⊢ autoParam (∀ (U : Opens ↑↑X.toPresheafedSpace), _root_.IsReduced ↑(X.presheaf.obj (op U))) _auto✝
[PROOFSTEP]
intro U
[GOAL]
case component_reduced
X✝ : Scheme
X Y : Scheme
f : X ⟶ Y
H : IsOpenImmersion f
inst✝ : IsReduced Y
U : Opens ↑↑X.toPresheafedSpace
⊢ _root_.IsReduced ↑(X.presheaf.obj (op U))
[PROOFSTEP]
have : U = (Opens.map f.1.base).obj (H.base_open.isOpenMap.functor.obj U) := by ext1;
exact (Set.preimage_image_eq _ H.base_open.inj).symm
[GOAL]
X✝ : Scheme
X Y : Scheme
f : X ⟶ Y
H : IsOpenImmersion f
inst✝ : IsReduced Y
U : Opens ↑↑X.toPresheafedSpace
⊢ U = (Opens.map f.val.base).obj ((IsOpenMap.functor (_ : IsOpenMap ↑f.val.base)).obj U)
[PROOFSTEP]
ext1
[GOAL]
case h
X✝ : Scheme
X Y : Scheme
f : X ⟶ Y
H : IsOpenImmersion f
inst✝ : IsReduced Y
U : Opens ↑↑X.toPresheafedSpace
⊢ ↑U = ↑((Opens.map f.val.base).obj ((IsOpenMap.functor (_ : IsOpenMap ↑f.val.base)).obj U))
[PROOFSTEP]
exact (Set.preimage_image_eq _ H.base_open.inj).symm
[GOAL]
case component_reduced
X✝ : Scheme
X Y : Scheme
f : X ⟶ Y
H : IsOpenImmersion f
inst✝ : IsReduced Y
U : Opens ↑↑X.toPresheafedSpace
this : U = (Opens.map f.val.base).obj ((IsOpenMap.functor (_ : IsOpenMap ↑f.val.base)).obj U)
⊢ _root_.IsReduced ↑(X.presheaf.obj (op U))
[PROOFSTEP]
rw [this]
[GOAL]
case component_reduced
X✝ : Scheme
X Y : Scheme
f : X ⟶ Y
H : IsOpenImmersion f
inst✝ : IsReduced Y
U : Opens ↑↑X.toPresheafedSpace
this : U = (Opens.map f.val.base).obj ((IsOpenMap.functor (_ : IsOpenMap ↑f.val.base)).obj U)
⊢ _root_.IsReduced
↑(X.presheaf.obj (op ((Opens.map f.val.base).obj ((IsOpenMap.functor (_ : IsOpenMap ↑f.val.base)).obj U))))
[PROOFSTEP]
exact
isReduced_of_injective (inv <| f.1.c.app (op <| H.base_open.isOpenMap.functor.obj U))
(asIso <| f.1.c.app (op <| H.base_open.isOpenMap.functor.obj U) :
Y.presheaf.obj _ ≅ _).symm.commRingCatIsoToRingEquiv.injective
[GOAL]
X : Scheme
R : CommRingCat
H : _root_.IsReduced ↑R
⊢ IsReduced (Scheme.Spec.obj (op R))
[PROOFSTEP]
apply (config := { allowSynthFailures := true }) isReducedOfStalkIsReduced
[GOAL]
case inst
X : Scheme
R : CommRingCat
H : _root_.IsReduced ↑R
⊢ ∀ (x : ↑↑(Scheme.Spec.obj (op R)).toPresheafedSpace),
_root_.IsReduced ↑(Presheaf.stalk (Scheme.Spec.obj (op R)).presheaf x)
[PROOFSTEP]
intro x
[GOAL]
case inst
X : Scheme
R : CommRingCat
H : _root_.IsReduced ↑R
x : ↑↑(Scheme.Spec.obj (op R)).toPresheafedSpace
⊢ _root_.IsReduced ↑(Presheaf.stalk (Scheme.Spec.obj (op R)).presheaf x)
[PROOFSTEP]
dsimp
[GOAL]
case inst
X : Scheme
R : CommRingCat
H : _root_.IsReduced ↑R
x : ↑↑(Scheme.Spec.obj (op R)).toPresheafedSpace
⊢ _root_.IsReduced ↑(Presheaf.stalk (Scheme.Spec.obj (op R)).presheaf x)
[PROOFSTEP]
have : _root_.IsReduced (CommRingCat.of <| Localization.AtPrime (PrimeSpectrum.asIdeal x)) := by dsimp; infer_instance
[GOAL]
X : Scheme
R : CommRingCat
H : _root_.IsReduced ↑R
x : ↑↑(Scheme.Spec.obj (op R)).toPresheafedSpace
⊢ _root_.IsReduced ↑(CommRingCat.of (Localization.AtPrime x.asIdeal))
[PROOFSTEP]
dsimp
[GOAL]
X : Scheme
R : CommRingCat
H : _root_.IsReduced ↑R
x : ↑↑(Scheme.Spec.obj (op R)).toPresheafedSpace
⊢ _root_.IsReduced (Localization.AtPrime x.asIdeal)
[PROOFSTEP]
infer_instance
[GOAL]
case inst
X : Scheme
R : CommRingCat
H : _root_.IsReduced ↑R
x : ↑↑(Scheme.Spec.obj (op R)).toPresheafedSpace
this : _root_.IsReduced ↑(CommRingCat.of (Localization.AtPrime x.asIdeal))
⊢ _root_.IsReduced ↑(Presheaf.stalk (Scheme.Spec.obj (op R)).presheaf x)
[PROOFSTEP]
exact
isReduced_of_injective (StructureSheaf.stalkIso R x).hom
(StructureSheaf.stalkIso R x).commRingCatIsoToRingEquiv.injective
[GOAL]
X : Scheme
R : CommRingCat
⊢ IsReduced (Scheme.Spec.obj (op R)) ↔ _root_.IsReduced ↑R
[PROOFSTEP]
refine' ⟨_, fun h => inferInstance⟩
[GOAL]
X : Scheme
R : CommRingCat
⊢ IsReduced (Scheme.Spec.obj (op R)) → _root_.IsReduced ↑R
[PROOFSTEP]
intro h
[GOAL]
X : Scheme
R : CommRingCat
h : IsReduced (Scheme.Spec.obj (op R))
⊢ _root_.IsReduced ↑R
[PROOFSTEP]
have : _root_.IsReduced (LocallyRingedSpace.Γ.obj (op <| Spec.toLocallyRingedSpace.obj <| op R)) := by
change _root_.IsReduced ((Scheme.Spec.obj <| op R).presheaf.obj <| op ⊤); infer_instance
[GOAL]
X : Scheme
R : CommRingCat
h : IsReduced (Scheme.Spec.obj (op R))
⊢ _root_.IsReduced ↑(LocallyRingedSpace.Γ.obj (op (Spec.toLocallyRingedSpace.obj (op R))))
[PROOFSTEP]
change _root_.IsReduced ((Scheme.Spec.obj <| op R).presheaf.obj <| op ⊤)
[GOAL]
X : Scheme
R : CommRingCat
h : IsReduced (Scheme.Spec.obj (op R))
⊢ _root_.IsReduced ↑((Scheme.Spec.obj (op R)).presheaf.obj (op ⊤))
[PROOFSTEP]
infer_instance
[GOAL]
X : Scheme
R : CommRingCat
h : IsReduced (Scheme.Spec.obj (op R))
this : _root_.IsReduced ↑(LocallyRingedSpace.Γ.obj (op (Spec.toLocallyRingedSpace.obj (op R))))
⊢ _root_.IsReduced ↑R
[PROOFSTEP]
exact isReduced_of_injective (toSpecΓ R) (asIso <| toSpecΓ R).commRingCatIsoToRingEquiv.injective
[GOAL]
X : Scheme
inst✝ : IsAffine X
h : _root_.IsReduced ↑(X.presheaf.obj (op ⊤))
⊢ IsReduced (Scheme.Spec.obj (op (Scheme.Γ.obj (op X))))
[PROOFSTEP]
rw [affine_isReduced_iff]
[GOAL]
X : Scheme
inst✝ : IsAffine X
h : _root_.IsReduced ↑(X.presheaf.obj (op ⊤))
⊢ _root_.IsReduced ↑(Scheme.Γ.obj (op X))
[PROOFSTEP]
exact h
[GOAL]
X : Scheme
P : (X : Scheme) → Opens ↑↑X.toPresheafedSpace → Prop
h₁ : ∀ (X : Scheme) (U : Opens ↑↑X.toPresheafedSpace), (∀ (x : { x // x ∈ U }), ∃ V x x, P X V) → P X U
h₂ :
∀ {X Y : Scheme} (f : X ⟶ Y) [hf : IsOpenImmersion f],
∃ U V hU hV, P X { carrier := U, is_open' := (_ : IsOpen U) } → P Y { carrier := V, is_open' := (_ : IsOpen V) }
h₃ : ∀ (R : CommRingCat), P (Scheme.Spec.obj (op R)) ⊤
⊢ ∀ (X : Scheme) (U : Opens ↑↑X.toPresheafedSpace), P X U
[PROOFSTEP]
intro X U
[GOAL]
X✝ : Scheme
P : (X : Scheme) → Opens ↑↑X.toPresheafedSpace → Prop
h₁ : ∀ (X : Scheme) (U : Opens ↑↑X.toPresheafedSpace), (∀ (x : { x // x ∈ U }), ∃ V x x, P X V) → P X U
h₂ :
∀ {X Y : Scheme} (f : X ⟶ Y) [hf : IsOpenImmersion f],
∃ U V hU hV, P X { carrier := U, is_open' := (_ : IsOpen U) } → P Y { carrier := V, is_open' := (_ : IsOpen V) }
h₃ : ∀ (R : CommRingCat), P (Scheme.Spec.obj (op R)) ⊤
X : Scheme
U : Opens ↑↑X.toPresheafedSpace
⊢ P X U
[PROOFSTEP]
apply h₁
[GOAL]
case a
X✝ : Scheme
P : (X : Scheme) → Opens ↑↑X.toPresheafedSpace → Prop
h₁ : ∀ (X : Scheme) (U : Opens ↑↑X.toPresheafedSpace), (∀ (x : { x // x ∈ U }), ∃ V x x, P X V) → P X U
h₂ :
∀ {X Y : Scheme} (f : X ⟶ Y) [hf : IsOpenImmersion f],
∃ U V hU hV, P X { carrier := U, is_open' := (_ : IsOpen U) } → P Y { carrier := V, is_open' := (_ : IsOpen V) }
h₃ : ∀ (R : CommRingCat), P (Scheme.Spec.obj (op R)) ⊤
X : Scheme
U : Opens ↑↑X.toPresheafedSpace
⊢ ∀ (x : { x // x ∈ U }), ∃ V x x, P X V
[PROOFSTEP]
intro x
[GOAL]
case a
X✝ : Scheme
P : (X : Scheme) → Opens ↑↑X.toPresheafedSpace → Prop
h₁ : ∀ (X : Scheme) (U : Opens ↑↑X.toPresheafedSpace), (∀ (x : { x // x ∈ U }), ∃ V x x, P X V) → P X U
h₂ :
∀ {X Y : Scheme} (f : X ⟶ Y) [hf : IsOpenImmersion f],
∃ U V hU hV, P X { carrier := U, is_open' := (_ : IsOpen U) } → P Y { carrier := V, is_open' := (_ : IsOpen V) }
h₃ : ∀ (R : CommRingCat), P (Scheme.Spec.obj (op R)) ⊤
X : Scheme
U : Opens ↑↑X.toPresheafedSpace
x : { x // x ∈ U }
⊢ ∃ V x x, P X V
[PROOFSTEP]
obtain ⟨_, ⟨j, rfl⟩, hx, i⟩ := X.affineBasisCover_is_basis.exists_subset_of_mem_open (SetLike.mem_coe.2 x.prop) U.isOpen
[GOAL]
case a.intro.intro.intro.intro
X✝ : Scheme
P : (X : Scheme) → Opens ↑↑X.toPresheafedSpace → Prop
h₁ : ∀ (X : Scheme) (U : Opens ↑↑X.toPresheafedSpace), (∀ (x : { x // x ∈ U }), ∃ V x x, P X V) → P X U
h₂ :
∀ {X Y : Scheme} (f : X ⟶ Y) [hf : IsOpenImmersion f],
∃ U V hU hV, P X { carrier := U, is_open' := (_ : IsOpen U) } → P Y { carrier := V, is_open' := (_ : IsOpen V) }
h₃ : ∀ (R : CommRingCat), P (Scheme.Spec.obj (op R)) ⊤
X : Scheme
U : Opens ↑↑X.toPresheafedSpace
x : { x // x ∈ U }
j : (Scheme.affineBasisCover X).J
hx : ↑x ∈ Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base
i : Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base ⊆ ↑U
⊢ ∃ V x x, P X V
[PROOFSTEP]
let U' : Opens _ := ⟨_, (X.affineBasisCover.IsOpen j).base_open.open_range⟩
[GOAL]
case a.intro.intro.intro.intro
X✝ : Scheme
P : (X : Scheme) → Opens ↑↑X.toPresheafedSpace → Prop
h₁ : ∀ (X : Scheme) (U : Opens ↑↑X.toPresheafedSpace), (∀ (x : { x // x ∈ U }), ∃ V x x, P X V) → P X U
h₂ :
∀ {X Y : Scheme} (f : X ⟶ Y) [hf : IsOpenImmersion f],
∃ U V hU hV, P X { carrier := U, is_open' := (_ : IsOpen U) } → P Y { carrier := V, is_open' := (_ : IsOpen V) }
h₃ : ∀ (R : CommRingCat), P (Scheme.Spec.obj (op R)) ⊤
X : Scheme
U : Opens ↑↑X.toPresheafedSpace
x : { x // x ∈ U }
j : (Scheme.affineBasisCover X).J
hx : ↑x ∈ Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base
i : Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base ⊆ ↑U
U' : Opens ((forget TopCat).obj ↑X.toPresheafedSpace) :=
{ carrier := Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base,
is_open' := (_ : IsOpen (Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base)) }
⊢ ∃ V x x, P X V
[PROOFSTEP]
let i' : U' ⟶ U := homOfLE i
[GOAL]
case a.intro.intro.intro.intro
X✝ : Scheme
P : (X : Scheme) → Opens ↑↑X.toPresheafedSpace → Prop
h₁ : ∀ (X : Scheme) (U : Opens ↑↑X.toPresheafedSpace), (∀ (x : { x // x ∈ U }), ∃ V x x, P X V) → P X U
h₂ :
∀ {X Y : Scheme} (f : X ⟶ Y) [hf : IsOpenImmersion f],
∃ U V hU hV, P X { carrier := U, is_open' := (_ : IsOpen U) } → P Y { carrier := V, is_open' := (_ : IsOpen V) }
h₃ : ∀ (R : CommRingCat), P (Scheme.Spec.obj (op R)) ⊤
X : Scheme
U : Opens ↑↑X.toPresheafedSpace
x : { x // x ∈ U }
j : (Scheme.affineBasisCover X).J
hx : ↑x ∈ Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base
i : Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base ⊆ ↑U
U' : Opens ((forget TopCat).obj ↑X.toPresheafedSpace) :=
{ carrier := Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base,
is_open' := (_ : IsOpen (Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base)) }
i' : U' ⟶ U := homOfLE i
⊢ ∃ V x x, P X V
[PROOFSTEP]
refine' ⟨U', hx, i', _⟩
[GOAL]
case a.intro.intro.intro.intro
X✝ : Scheme
P : (X : Scheme) → Opens ↑↑X.toPresheafedSpace → Prop
h₁ : ∀ (X : Scheme) (U : Opens ↑↑X.toPresheafedSpace), (∀ (x : { x // x ∈ U }), ∃ V x x, P X V) → P X U
h₂ :
∀ {X Y : Scheme} (f : X ⟶ Y) [hf : IsOpenImmersion f],
∃ U V hU hV, P X { carrier := U, is_open' := (_ : IsOpen U) } → P Y { carrier := V, is_open' := (_ : IsOpen V) }
h₃ : ∀ (R : CommRingCat), P (Scheme.Spec.obj (op R)) ⊤
X : Scheme
U : Opens ↑↑X.toPresheafedSpace
x : { x // x ∈ U }
j : (Scheme.affineBasisCover X).J
hx : ↑x ∈ Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base
i : Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base ⊆ ↑U
U' : Opens ((forget TopCat).obj ↑X.toPresheafedSpace) :=
{ carrier := Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base,
is_open' := (_ : IsOpen (Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base)) }
i' : U' ⟶ U := homOfLE i
⊢ P X U'
[PROOFSTEP]
obtain ⟨_, _, rfl, rfl, h₂'⟩ := h₂ (X.affineBasisCover.map j)
[GOAL]
case a.intro.intro.intro.intro.intro.intro.intro.intro
X✝ : Scheme
P : (X : Scheme) → Opens ↑↑X.toPresheafedSpace → Prop
h₁ : ∀ (X : Scheme) (U : Opens ↑↑X.toPresheafedSpace), (∀ (x : { x // x ∈ U }), ∃ V x x, P X V) → P X U
h₂ :
∀ {X Y : Scheme} (f : X ⟶ Y) [hf : IsOpenImmersion f],
∃ U V hU hV, P X { carrier := U, is_open' := (_ : IsOpen U) } → P Y { carrier := V, is_open' := (_ : IsOpen V) }
h₃ : ∀ (R : CommRingCat), P (Scheme.Spec.obj (op R)) ⊤
X : Scheme
U : Opens ↑↑X.toPresheafedSpace
x : { x // x ∈ U }
j : (Scheme.affineBasisCover X).J
hx : ↑x ∈ Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base
i : Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base ⊆ ↑U
U' : Opens ((forget TopCat).obj ↑X.toPresheafedSpace) :=
{ carrier := Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base,
is_open' := (_ : IsOpen (Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base)) }
i' : U' ⟶ U := homOfLE i
h₂' :
P (Scheme.OpenCover.obj (Scheme.affineBasisCover X) j) { carrier := ⊤, is_open' := (_ : IsOpen ⊤) } →
P X
{ carrier := Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base,
is_open' := (_ : IsOpen (Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base)) }
⊢ P X U'
[PROOFSTEP]
apply h₂'
[GOAL]
case a.intro.intro.intro.intro.intro.intro.intro.intro
X✝ : Scheme
P : (X : Scheme) → Opens ↑↑X.toPresheafedSpace → Prop
h₁ : ∀ (X : Scheme) (U : Opens ↑↑X.toPresheafedSpace), (∀ (x : { x // x ∈ U }), ∃ V x x, P X V) → P X U
h₂ :
∀ {X Y : Scheme} (f : X ⟶ Y) [hf : IsOpenImmersion f],
∃ U V hU hV, P X { carrier := U, is_open' := (_ : IsOpen U) } → P Y { carrier := V, is_open' := (_ : IsOpen V) }
h₃ : ∀ (R : CommRingCat), P (Scheme.Spec.obj (op R)) ⊤
X : Scheme
U : Opens ↑↑X.toPresheafedSpace
x : { x // x ∈ U }
j : (Scheme.affineBasisCover X).J
hx : ↑x ∈ Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base
i : Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base ⊆ ↑U
U' : Opens ((forget TopCat).obj ↑X.toPresheafedSpace) :=
{ carrier := Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base,
is_open' := (_ : IsOpen (Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base)) }
i' : U' ⟶ U := homOfLE i
h₂' :
P (Scheme.OpenCover.obj (Scheme.affineBasisCover X) j) { carrier := ⊤, is_open' := (_ : IsOpen ⊤) } →
P X
{ carrier := Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base,
is_open' := (_ : IsOpen (Set.range ↑(Scheme.OpenCover.map (Scheme.affineBasisCover X) j).val.base)) }
⊢ P (Scheme.OpenCover.obj (Scheme.affineBasisCover X) j) { carrier := ⊤, is_open' := (_ : IsOpen ⊤) }
[PROOFSTEP]
apply h₃
[GOAL]
X : Scheme
P : (X : Scheme) → ↑↑X.toPresheafedSpace → Prop
h₁ : ∀ (R : CommRingCat) (x : PrimeSpectrum ↑R), P (Scheme.Spec.obj (op R)) x
h₂ : ∀ {X Y : Scheme} (f : X ⟶ Y) [inst : IsOpenImmersion f] (x : ↑↑X.toPresheafedSpace), P X x → P Y (↑f.val.base x)
⊢ ∀ (X : Scheme) (x : ↑↑X.toPresheafedSpace), P X x
[PROOFSTEP]
intro X x
[GOAL]
X✝ : Scheme
P : (X : Scheme) → ↑↑X.toPresheafedSpace → Prop
h₁ : ∀ (R : CommRingCat) (x : PrimeSpectrum ↑R), P (Scheme.Spec.obj (op R)) x
h₂ : ∀ {X Y : Scheme} (f : X ⟶ Y) [inst : IsOpenImmersion f] (x : ↑↑X.toPresheafedSpace), P X x → P Y (↑f.val.base x)
X : Scheme
x : ↑↑X.toPresheafedSpace
⊢ P X x
[PROOFSTEP]
obtain ⟨y, e⟩ := X.affineCover.Covers x
[GOAL]
case intro
X✝ : Scheme
P : (X : Scheme) → ↑↑X.toPresheafedSpace → Prop
h₁ : ∀ (R : CommRingCat) (x : PrimeSpectrum ↑R), P (Scheme.Spec.obj (op R)) x
h₂ : ∀ {X Y : Scheme} (f : X ⟶ Y) [inst : IsOpenImmersion f] (x : ↑↑X.toPresheafedSpace), P X x → P Y (↑f.val.base x)
X : Scheme
x : ↑↑X.toPresheafedSpace
y :
(forget TopCat).obj
↑(Scheme.OpenCover.obj (Scheme.affineCover X) (Scheme.OpenCover.f (Scheme.affineCover X) x)).toPresheafedSpace
e : ↑(Scheme.OpenCover.map (Scheme.affineCover X) (Scheme.OpenCover.f (Scheme.affineCover X) x)).val.base y = x
⊢ P X x
[PROOFSTEP]
convert h₂ (X.affineCover.map (X.affineCover.f x)) y _
[GOAL]
case h.e'_2
X✝ : Scheme
P : (X : Scheme) → ↑↑X.toPresheafedSpace → Prop
h₁ : ∀ (R : CommRingCat) (x : PrimeSpectrum ↑R), P (Scheme.Spec.obj (op R)) x
h₂ : ∀ {X Y : Scheme} (f : X ⟶ Y) [inst : IsOpenImmersion f] (x : ↑↑X.toPresheafedSpace), P X x → P Y (↑f.val.base x)
X : Scheme
x : ↑↑X.toPresheafedSpace
y :
(forget TopCat).obj
↑(Scheme.OpenCover.obj (Scheme.affineCover X) (Scheme.OpenCover.f (Scheme.affineCover X) x)).toPresheafedSpace
e : ↑(Scheme.OpenCover.map (Scheme.affineCover X) (Scheme.OpenCover.f (Scheme.affineCover X) x)).val.base y = x
⊢ x = ↑(Scheme.OpenCover.map (Scheme.affineCover X) (Scheme.OpenCover.f (Scheme.affineCover X) x)).val.base y
[PROOFSTEP]
rw [e]
[GOAL]
case intro
X✝ : Scheme
P : (X : Scheme) → ↑↑X.toPresheafedSpace → Prop
h₁ : ∀ (R : CommRingCat) (x : PrimeSpectrum ↑R), P (Scheme.Spec.obj (op R)) x
h₂ : ∀ {X Y : Scheme} (f : X ⟶ Y) [inst : IsOpenImmersion f] (x : ↑↑X.toPresheafedSpace), P X x → P Y (↑f.val.base x)
X : Scheme
x : ↑↑X.toPresheafedSpace
y :
(forget TopCat).obj
↑(Scheme.OpenCover.obj (Scheme.affineCover X) (Scheme.OpenCover.f (Scheme.affineCover X) x)).toPresheafedSpace
e : ↑(Scheme.OpenCover.map (Scheme.affineCover X) (Scheme.OpenCover.f (Scheme.affineCover X) x)).val.base y = x
⊢ P (Scheme.OpenCover.obj (Scheme.affineCover X) (Scheme.OpenCover.f (Scheme.affineCover X) x)) y
[PROOFSTEP]
apply h₁
[GOAL]
X✝ : Scheme
X : Scheme
hX : IsReduced X
U : Opens ↑↑X.toPresheafedSpace
s : ↑(X.presheaf.obj (op U))
hs : Scheme.basicOpen X s = ⊥
⊢ s = 0
[PROOFSTEP]
apply TopCat.Presheaf.section_ext X.sheaf U
[GOAL]
case h
X✝ : Scheme
X : Scheme
hX : IsReduced X
U : Opens ↑↑X.toPresheafedSpace
s : ↑(X.presheaf.obj (op U))
hs : Scheme.basicOpen X s = ⊥
⊢ ∀ (x : { x // x ∈ U }),
↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) 0
[PROOFSTEP]
conv => intro x; rw [RingHom.map_zero]
[GOAL]
X✝ : Scheme
X : Scheme
hX : IsReduced X
U : Opens ↑↑X.toPresheafedSpace
s : ↑(X.presheaf.obj (op U))
hs : Scheme.basicOpen X s = ⊥
| ∀ (x : { x // x ∈ U }),
↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) 0
[PROOFSTEP]
intro x; rw [RingHom.map_zero]
[GOAL]
X✝ : Scheme
X : Scheme
hX : IsReduced X
U : Opens ↑↑X.toPresheafedSpace
s : ↑(X.presheaf.obj (op U))
hs : Scheme.basicOpen X s = ⊥
| ∀ (x : { x // x ∈ U }),
↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) 0
[PROOFSTEP]
intro x; rw [RingHom.map_zero]
[GOAL]
X✝ : Scheme
X : Scheme
hX : IsReduced X
U : Opens ↑↑X.toPresheafedSpace
s : ↑(X.presheaf.obj (op U))
hs : Scheme.basicOpen X s = ⊥
| ∀ (x : { x // x ∈ U }),
↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) 0
[PROOFSTEP]
intro x
[GOAL]
X✝ : Scheme
X : Scheme
hX : IsReduced X
U : Opens ↑↑X.toPresheafedSpace
s : ↑(X.presheaf.obj (op U))
hs : Scheme.basicOpen X s = ⊥
x : { x // x ∈ U }
| ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) 0
[PROOFSTEP]
rw [RingHom.map_zero]
[GOAL]
case h
X✝ : Scheme
X : Scheme
hX : IsReduced X
U : Opens ↑↑X.toPresheafedSpace
s : ↑(X.presheaf.obj (op U))
hs : Scheme.basicOpen X s = ⊥
⊢ ∀ (x : { x // x ∈ U }), ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0
[PROOFSTEP]
refine'
(@reduce_to_affine_global
(fun X U => ∀ [IsReduced X] (s : X.presheaf.obj (op U)), X.basicOpen s = ⊥ → ∀ x, (X.sheaf.presheaf.germ x) s = 0)
_ _ _)
X U s hs
[GOAL]
case h.refine'_1
X✝ : Scheme
X : Scheme
hX : IsReduced X
U : Opens ↑↑X.toPresheafedSpace
s : ↑(X.presheaf.obj (op U))
hs : Scheme.basicOpen X s = ⊥
⊢ ∀ (X : Scheme) (U : Opens ↑↑X.toPresheafedSpace),
(∀ (x : { x // x ∈ U }),
∃ V x x,
(fun X U =>
∀ [inst : IsReduced X] (s : ↑(X.presheaf.obj (op U))),
Scheme.basicOpen X s = ⊥ →
∀ (x : { x // x ∈ U }), ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0)
X V) →
(fun X U =>
∀ [inst : IsReduced X] (s : ↑(X.presheaf.obj (op U))),
Scheme.basicOpen X s = ⊥ →
∀ (x : { x // x ∈ U }), ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0)
X U
[PROOFSTEP]
intro X U hx hX s hs x
[GOAL]
case h.refine'_1
X✝¹ : Scheme
X✝ : Scheme
hX✝ : IsReduced X✝
U✝ : Opens ↑↑X✝.toPresheafedSpace
s✝ : ↑(X✝.presheaf.obj (op U✝))
hs✝ : Scheme.basicOpen X✝ s✝ = ⊥
X : Scheme
U : Opens ↑↑X.toPresheafedSpace
hx :
∀ (x : { x // x ∈ U }),
∃ V x x,
(fun X U =>
∀ [inst : IsReduced X] (s : ↑(X.presheaf.obj (op U))),
Scheme.basicOpen X s = ⊥ →
∀ (x : { x // x ∈ U }), ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0)
X V
hX : IsReduced X
s : ↑(X.presheaf.obj (op U))
hs : Scheme.basicOpen X s = ⊥
x : { x // x ∈ U }
⊢ ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0
[PROOFSTEP]
obtain ⟨V, hx, i, H⟩ := hx x
[GOAL]
case h.refine'_1.intro.intro.intro
X✝¹ : Scheme
X✝ : Scheme
hX✝ : IsReduced X✝
U✝ : Opens ↑↑X✝.toPresheafedSpace
s✝ : ↑(X✝.presheaf.obj (op U✝))
hs✝ : Scheme.basicOpen X✝ s✝ = ⊥
X : Scheme
U : Opens ↑↑X.toPresheafedSpace
hx✝ :
∀ (x : { x // x ∈ U }),
∃ V x x,
(fun X U =>
∀ [inst : IsReduced X] (s : ↑(X.presheaf.obj (op U))),
Scheme.basicOpen X s = ⊥ →
∀ (x : { x // x ∈ U }), ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0)
X V
hX : IsReduced X
s : ↑(X.presheaf.obj (op U))
hs : Scheme.basicOpen X s = ⊥
x : { x // x ∈ U }
V : Opens ↑↑X.toPresheafedSpace
hx : ↑x ∈ V
i : V ⟶ U
H :
∀ [inst : IsReduced X] (s : ↑(X.presheaf.obj (op V))),
Scheme.basicOpen X s = ⊥ → ∀ (x : { x // x ∈ V }), ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0
⊢ ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0
[PROOFSTEP]
specialize H (X.presheaf.map i.op s)
[GOAL]
case h.refine'_1.intro.intro.intro
X✝¹ : Scheme
X✝ : Scheme
hX✝ : IsReduced X✝
U✝ : Opens ↑↑X✝.toPresheafedSpace
s✝ : ↑(X✝.presheaf.obj (op U✝))
hs✝ : Scheme.basicOpen X✝ s✝ = ⊥
X : Scheme
U : Opens ↑↑X.toPresheafedSpace
hx✝ :
∀ (x : { x // x ∈ U }),
∃ V x x,
(fun X U =>
∀ [inst : IsReduced X] (s : ↑(X.presheaf.obj (op U))),
Scheme.basicOpen X s = ⊥ →
∀ (x : { x // x ∈ U }), ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0)
X V
hX : IsReduced X
s : ↑(X.presheaf.obj (op U))
hs : Scheme.basicOpen X s = ⊥
x : { x // x ∈ U }
V : Opens ↑↑X.toPresheafedSpace
hx : ↑x ∈ V
i : V ⟶ U
H :
Scheme.basicOpen X (↑(X.presheaf.map i.op) s) = ⊥ →
∀ (x : { x // x ∈ V }), ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) (↑(X.presheaf.map i.op) s) = 0
⊢ ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0
[PROOFSTEP]
erw [Scheme.basicOpen_res] at H
[GOAL]
case h.refine'_1.intro.intro.intro
X✝¹ : Scheme
X✝ : Scheme
hX✝ : IsReduced X✝
U✝ : Opens ↑↑X✝.toPresheafedSpace
s✝ : ↑(X✝.presheaf.obj (op U✝))
hs✝ : Scheme.basicOpen X✝ s✝ = ⊥
X : Scheme
U : Opens ↑↑X.toPresheafedSpace
hx✝ :
∀ (x : { x // x ∈ U }),
∃ V x x,
(fun X U =>
∀ [inst : IsReduced X] (s : ↑(X.presheaf.obj (op U))),
Scheme.basicOpen X s = ⊥ →
∀ (x : { x // x ∈ U }), ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0)
X V
hX : IsReduced X
s : ↑(X.presheaf.obj (op U))
hs : Scheme.basicOpen X s = ⊥
x : { x // x ∈ U }
V : Opens ↑↑X.toPresheafedSpace
hx : ↑x ∈ V
i : V ⟶ U
H :
V ⊓ Scheme.basicOpen X s = ⊥ →
∀ (x : { x // x ∈ V }), ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) (↑(X.presheaf.map i.op) s) = 0
⊢ ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0
[PROOFSTEP]
rw [hs] at H
[GOAL]
case h.refine'_1.intro.intro.intro
X✝¹ : Scheme
X✝ : Scheme
hX✝ : IsReduced X✝
U✝ : Opens ↑↑X✝.toPresheafedSpace
s✝ : ↑(X✝.presheaf.obj (op U✝))
hs✝ : Scheme.basicOpen X✝ s✝ = ⊥
X : Scheme
U : Opens ↑↑X.toPresheafedSpace
hx✝ :
∀ (x : { x // x ∈ U }),
∃ V x x,
(fun X U =>
∀ [inst : IsReduced X] (s : ↑(X.presheaf.obj (op U))),
Scheme.basicOpen X s = ⊥ →
∀ (x : { x // x ∈ U }), ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0)
X V
hX : IsReduced X
s : ↑(X.presheaf.obj (op U))
hs : Scheme.basicOpen X s = ⊥
x : { x // x ∈ U }
V : Opens ↑↑X.toPresheafedSpace
hx : ↑x ∈ V
i : V ⟶ U
H :
V ⊓ ⊥ = ⊥ →
∀ (x : { x // x ∈ V }), ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) (↑(X.presheaf.map i.op) s) = 0
⊢ ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0
[PROOFSTEP]
specialize H inf_bot_eq ⟨x, hx⟩
[GOAL]
case h.refine'_1.intro.intro.intro
X✝¹ : Scheme
X✝ : Scheme
hX✝ : IsReduced X✝
U✝ : Opens ↑↑X✝.toPresheafedSpace
s✝ : ↑(X✝.presheaf.obj (op U✝))
hs✝ : Scheme.basicOpen X✝ s✝ = ⊥
X : Scheme
U : Opens ↑↑X.toPresheafedSpace
hx✝ :
∀ (x : { x // x ∈ U }),
∃ V x x,
(fun X U =>
∀ [inst : IsReduced X] (s : ↑(X.presheaf.obj (op U))),
Scheme.basicOpen X s = ⊥ →
∀ (x : { x // x ∈ U }), ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0)
X V
hX : IsReduced X
s : ↑(X.presheaf.obj (op U))
hs : Scheme.basicOpen X s = ⊥
x : { x // x ∈ U }
V : Opens ↑↑X.toPresheafedSpace
hx : ↑x ∈ V
i : V ⟶ U
H : ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) { val := ↑x, property := hx }) (↑(X.presheaf.map i.op) s) = 0
⊢ ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0
[PROOFSTEP]
erw [TopCat.Presheaf.germ_res_apply] at H
[GOAL]
case h.refine'_1.intro.intro.intro
X✝¹ : Scheme
X✝ : Scheme
hX✝ : IsReduced X✝
U✝ : Opens ↑↑X✝.toPresheafedSpace
s✝ : ↑(X✝.presheaf.obj (op U✝))
hs✝ : Scheme.basicOpen X✝ s✝ = ⊥
X : Scheme
U : Opens ↑↑X.toPresheafedSpace
hx✝ :
∀ (x : { x // x ∈ U }),
∃ V x x,
(fun X U =>
∀ [inst : IsReduced X] (s : ↑(X.presheaf.obj (op U))),
Scheme.basicOpen X s = ⊥ →
∀ (x : { x // x ∈ U }), ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0)
X V
hX : IsReduced X
s : ↑(X.presheaf.obj (op U))
hs : Scheme.basicOpen X s = ⊥
x : { x // x ∈ U }
V : Opens ↑↑X.toPresheafedSpace
hx : ↑x ∈ V
i : V ⟶ U
H :
↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X))
((fun x => { val := ↑x, property := (_ : ↑x ∈ ↑U) }) { val := ↑x, property := hx }))
s =
0
⊢ ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0
[PROOFSTEP]
exact H
[GOAL]
case h.refine'_2
X✝ : Scheme
X : Scheme
hX : IsReduced X
U : Opens ↑↑X.toPresheafedSpace
s : ↑(X.presheaf.obj (op U))
hs : Scheme.basicOpen X s = ⊥
⊢ ∀ {X Y : Scheme} (f : X ⟶ Y) [hf : IsOpenImmersion f],
∃ U V hU hV,
(fun X U =>
∀ [inst : IsReduced X] (s : ↑(X.presheaf.obj (op U))),
Scheme.basicOpen X s = ⊥ →
∀ (x : { x // x ∈ U }), ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0)
X { carrier := U, is_open' := (_ : IsOpen U) } →
(fun X U =>
∀ [inst : IsReduced X] (s : ↑(X.presheaf.obj (op U))),
Scheme.basicOpen X s = ⊥ →
∀ (x : { x // x ∈ U }), ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0)
Y { carrier := V, is_open' := (_ : IsOpen V) }
[PROOFSTEP]
rintro X Y f hf
[GOAL]
case h.refine'_2
X✝¹ : Scheme
X✝ : Scheme
hX : IsReduced X✝
U : Opens ↑↑X✝.toPresheafedSpace
s : ↑(X✝.presheaf.obj (op U))
hs : Scheme.basicOpen X✝ s = ⊥
X Y : Scheme
f : X ⟶ Y
hf : IsOpenImmersion f
⊢ ∃ U V hU hV,
(fun X U =>
∀ [inst : IsReduced X] (s : ↑(X.presheaf.obj (op U))),
Scheme.basicOpen X s = ⊥ →
∀ (x : { x // x ∈ U }), ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0)
X { carrier := U, is_open' := (_ : IsOpen U) } →
(fun X U =>
∀ [inst : IsReduced X] (s : ↑(X.presheaf.obj (op U))),
Scheme.basicOpen X s = ⊥ →
∀ (x : { x // x ∈ U }), ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0)
Y { carrier := V, is_open' := (_ : IsOpen V) }
[PROOFSTEP]
have e : f.val.base ⁻¹' Set.range ↑f.val.base = Set.univ := by
rw [← Set.image_univ, Set.preimage_image_eq _ hf.base_open.inj]
[GOAL]
X✝¹ : Scheme
X✝ : Scheme
hX : IsReduced X✝
U : Opens ↑↑X✝.toPresheafedSpace
s : ↑(X✝.presheaf.obj (op U))
hs : Scheme.basicOpen X✝ s = ⊥
X Y : Scheme
f : X ⟶ Y
hf : IsOpenImmersion f
⊢ ↑f.val.base ⁻¹' Set.range ↑f.val.base = Set.univ
[PROOFSTEP]
rw [← Set.image_univ, Set.preimage_image_eq _ hf.base_open.inj]
[GOAL]
case h.refine'_2
X✝¹ : Scheme
X✝ : Scheme
hX : IsReduced X✝
U : Opens ↑↑X✝.toPresheafedSpace
s : ↑(X✝.presheaf.obj (op U))
hs : Scheme.basicOpen X✝ s = ⊥
X Y : Scheme
f : X ⟶ Y
hf : IsOpenImmersion f
e : ↑f.val.base ⁻¹' Set.range ↑f.val.base = Set.univ
⊢ ∃ U V hU hV,
(fun X U =>
∀ [inst : IsReduced X] (s : ↑(X.presheaf.obj (op U))),
Scheme.basicOpen X s = ⊥ →
∀ (x : { x // x ∈ U }), ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0)
X { carrier := U, is_open' := (_ : IsOpen U) } →
(fun X U =>
∀ [inst : IsReduced X] (s : ↑(X.presheaf.obj (op U))),
Scheme.basicOpen X s = ⊥ →
∀ (x : { x // x ∈ U }), ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0)
Y { carrier := V, is_open' := (_ : IsOpen V) }
[PROOFSTEP]
refine' ⟨_, _, e, rfl, _⟩
[GOAL]
case h.refine'_2
X✝¹ : Scheme
X✝ : Scheme
hX : IsReduced X✝
U : Opens ↑↑X✝.toPresheafedSpace
s : ↑(X✝.presheaf.obj (op U))
hs : Scheme.basicOpen X✝ s = ⊥
X Y : Scheme
f : X ⟶ Y
hf : IsOpenImmersion f
e : ↑f.val.base ⁻¹' Set.range ↑f.val.base = Set.univ
⊢ (fun X U =>
∀ [inst : IsReduced X] (s : ↑(X.presheaf.obj (op U))),
Scheme.basicOpen X s = ⊥ → ∀ (x : { x // x ∈ U }), ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0)
X
{ carrier := ↑f.val.base ⁻¹' Set.range ↑f.val.base,
is_open' := (_ : IsOpen (↑f.val.base ⁻¹' Set.range ↑f.val.base)) } →
(fun X U =>
∀ [inst : IsReduced X] (s : ↑(X.presheaf.obj (op U))),
Scheme.basicOpen X s = ⊥ → ∀ (x : { x // x ∈ U }), ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0)
Y { carrier := Set.range ↑f.val.base, is_open' := (_ : IsOpen (Set.range ↑f.val.base)) }
[PROOFSTEP]
rintro H hX s hs ⟨_, x, rfl⟩
[GOAL]
case h.refine'_2.mk.intro
X✝¹ : Scheme
X✝ : Scheme
hX✝ : IsReduced X✝
U : Opens ↑↑X✝.toPresheafedSpace
s✝ : ↑(X✝.presheaf.obj (op U))
hs✝ : Scheme.basicOpen X✝ s✝ = ⊥
X Y : Scheme
f : X ⟶ Y
hf : IsOpenImmersion f
e : ↑f.val.base ⁻¹' Set.range ↑f.val.base = Set.univ
H :
∀ [inst : IsReduced X]
(s :
↑(X.presheaf.obj
(op
{ carrier := ↑f.val.base ⁻¹' Set.range ↑f.val.base,
is_open' := (_ : IsOpen (↑f.val.base ⁻¹' Set.range ↑f.val.base)) }))),
Scheme.basicOpen X s = ⊥ →
∀
(x :
{ x //
x ∈
{ carrier := ↑f.val.base ⁻¹' Set.range ↑f.val.base,
is_open' := (_ : IsOpen (↑f.val.base ⁻¹' Set.range ↑f.val.base)) } }),
↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0
hX : IsReduced Y
s : ↑(Y.presheaf.obj (op { carrier := Set.range ↑f.val.base, is_open' := (_ : IsOpen (Set.range ↑f.val.base)) }))
hs : Scheme.basicOpen Y s = ⊥
x : (forget TopCat).obj ↑X.toPresheafedSpace
⊢ ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf Y))
{ val := ↑f.val.base x, property := (_ : ∃ y, ↑f.val.base y = ↑f.val.base x) })
s =
0
[PROOFSTEP]
haveI := isReducedOfOpenImmersion f
[GOAL]
case h.refine'_2.mk.intro
X✝¹ : Scheme
X✝ : Scheme
hX✝ : IsReduced X✝
U : Opens ↑↑X✝.toPresheafedSpace
s✝ : ↑(X✝.presheaf.obj (op U))
hs✝ : Scheme.basicOpen X✝ s✝ = ⊥
X Y : Scheme
f : X ⟶ Y
hf : IsOpenImmersion f
e : ↑f.val.base ⁻¹' Set.range ↑f.val.base = Set.univ
H :
∀ [inst : IsReduced X]
(s :
↑(X.presheaf.obj
(op
{ carrier := ↑f.val.base ⁻¹' Set.range ↑f.val.base,
is_open' := (_ : IsOpen (↑f.val.base ⁻¹' Set.range ↑f.val.base)) }))),
Scheme.basicOpen X s = ⊥ →
∀
(x :
{ x //
x ∈
{ carrier := ↑f.val.base ⁻¹' Set.range ↑f.val.base,
is_open' := (_ : IsOpen (↑f.val.base ⁻¹' Set.range ↑f.val.base)) } }),
↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0
hX : IsReduced Y
s : ↑(Y.presheaf.obj (op { carrier := Set.range ↑f.val.base, is_open' := (_ : IsOpen (Set.range ↑f.val.base)) }))
hs : Scheme.basicOpen Y s = ⊥
x : (forget TopCat).obj ↑X.toPresheafedSpace
this : IsReduced X
⊢ ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf Y))
{ val := ↑f.val.base x, property := (_ : ∃ y, ↑f.val.base y = ↑f.val.base x) })
s =
0
[PROOFSTEP]
specialize H (f.1.c.app _ s) _ ⟨x, by rw [Opens.mem_mk, e]; trivial⟩
[GOAL]
X✝¹ : Scheme
X✝ : Scheme
hX✝ : IsReduced X✝
U : Opens ↑↑X✝.toPresheafedSpace
s✝ : ↑(X✝.presheaf.obj (op U))
hs✝ : Scheme.basicOpen X✝ s✝ = ⊥
X Y : Scheme
f : X ⟶ Y
hf : IsOpenImmersion f
e : ↑f.val.base ⁻¹' Set.range ↑f.val.base = Set.univ
H :
∀ [inst : IsReduced X]
(s :
↑(X.presheaf.obj
(op
{ carrier := ↑f.val.base ⁻¹' Set.range ↑f.val.base,
is_open' := (_ : IsOpen (↑f.val.base ⁻¹' Set.range ↑f.val.base)) }))),
Scheme.basicOpen X s = ⊥ →
∀
(x :
{ x //
x ∈
{ carrier := ↑f.val.base ⁻¹' Set.range ↑f.val.base,
is_open' := (_ : IsOpen (↑f.val.base ⁻¹' Set.range ↑f.val.base)) } }),
↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0
hX : IsReduced Y
s : ↑(Y.presheaf.obj (op { carrier := Set.range ↑f.val.base, is_open' := (_ : IsOpen (Set.range ↑f.val.base)) }))
hs : Scheme.basicOpen Y s = ⊥
x : (forget TopCat).obj ↑X.toPresheafedSpace
this : IsReduced X
⊢ x ∈
{ carrier := ↑f.val.base ⁻¹' Set.range ↑f.val.base,
is_open' := (_ : IsOpen (↑f.val.base ⁻¹' Set.range ↑f.val.base)) }
[PROOFSTEP]
rw [Opens.mem_mk, e]
[GOAL]
X✝¹ : Scheme
X✝ : Scheme
hX✝ : IsReduced X✝
U : Opens ↑↑X✝.toPresheafedSpace
s✝ : ↑(X✝.presheaf.obj (op U))
hs✝ : Scheme.basicOpen X✝ s✝ = ⊥
X Y : Scheme
f : X ⟶ Y
hf : IsOpenImmersion f
e : ↑f.val.base ⁻¹' Set.range ↑f.val.base = Set.univ
H :
∀ [inst : IsReduced X]
(s :
↑(X.presheaf.obj
(op
{ carrier := ↑f.val.base ⁻¹' Set.range ↑f.val.base,
is_open' := (_ : IsOpen (↑f.val.base ⁻¹' Set.range ↑f.val.base)) }))),
Scheme.basicOpen X s = ⊥ →
∀
(x :
{ x //
x ∈
{ carrier := ↑f.val.base ⁻¹' Set.range ↑f.val.base,
is_open' := (_ : IsOpen (↑f.val.base ⁻¹' Set.range ↑f.val.base)) } }),
↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0
hX : IsReduced Y
s : ↑(Y.presheaf.obj (op { carrier := Set.range ↑f.val.base, is_open' := (_ : IsOpen (Set.range ↑f.val.base)) }))
hs : Scheme.basicOpen Y s = ⊥
x : (forget TopCat).obj ↑X.toPresheafedSpace
this : IsReduced X
⊢ x ∈ Set.univ
[PROOFSTEP]
trivial
[GOAL]
case h.refine'_2.mk.intro
X✝¹ : Scheme
X✝ : Scheme
hX✝ : IsReduced X✝
U : Opens ↑↑X✝.toPresheafedSpace
s✝ : ↑(X✝.presheaf.obj (op U))
hs✝ : Scheme.basicOpen X✝ s✝ = ⊥
X Y : Scheme
f : X ⟶ Y
hf : IsOpenImmersion f
e : ↑f.val.base ⁻¹' Set.range ↑f.val.base = Set.univ
H :
∀ [inst : IsReduced X]
(s :
↑(X.presheaf.obj
(op
{ carrier := ↑f.val.base ⁻¹' Set.range ↑f.val.base,
is_open' := (_ : IsOpen (↑f.val.base ⁻¹' Set.range ↑f.val.base)) }))),
Scheme.basicOpen X s = ⊥ →
∀
(x :
{ x //
x ∈
{ carrier := ↑f.val.base ⁻¹' Set.range ↑f.val.base,
is_open' := (_ : IsOpen (↑f.val.base ⁻¹' Set.range ↑f.val.base)) } }),
↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0
hX : IsReduced Y
s : ↑(Y.presheaf.obj (op { carrier := Set.range ↑f.val.base, is_open' := (_ : IsOpen (Set.range ↑f.val.base)) }))
hs : Scheme.basicOpen Y s = ⊥
x : (forget TopCat).obj ↑X.toPresheafedSpace
this : IsReduced X
⊢ Scheme.basicOpen X
(↑(NatTrans.app f.val.c
(op { carrier := Set.range ↑f.val.base, is_open' := (_ : IsOpen (Set.range ↑f.val.base)) }))
s) =
⊥
[PROOFSTEP]
rw [← Scheme.preimage_basicOpen, hs]
[GOAL]
case h.refine'_2.mk.intro
X✝¹ : Scheme
X✝ : Scheme
hX✝ : IsReduced X✝
U : Opens ↑↑X✝.toPresheafedSpace
s✝ : ↑(X✝.presheaf.obj (op U))
hs✝ : Scheme.basicOpen X✝ s✝ = ⊥
X Y : Scheme
f : X ⟶ Y
hf : IsOpenImmersion f
e : ↑f.val.base ⁻¹' Set.range ↑f.val.base = Set.univ
H :
∀ [inst : IsReduced X]
(s :
↑(X.presheaf.obj
(op
{ carrier := ↑f.val.base ⁻¹' Set.range ↑f.val.base,
is_open' := (_ : IsOpen (↑f.val.base ⁻¹' Set.range ↑f.val.base)) }))),
Scheme.basicOpen X s = ⊥ →
∀
(x :
{ x //
x ∈
{ carrier := ↑f.val.base ⁻¹' Set.range ↑f.val.base,
is_open' := (_ : IsOpen (↑f.val.base ⁻¹' Set.range ↑f.val.base)) } }),
↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0
hX : IsReduced Y
s : ↑(Y.presheaf.obj (op { carrier := Set.range ↑f.val.base, is_open' := (_ : IsOpen (Set.range ↑f.val.base)) }))
hs : Scheme.basicOpen Y s = ⊥
x : (forget TopCat).obj ↑X.toPresheafedSpace
this : IsReduced X
⊢ (Opens.map f.val.base).obj ⊥ = ⊥
[PROOFSTEP]
ext1
[GOAL]
case h.refine'_2.mk.intro.h
X✝¹ : Scheme
X✝ : Scheme
hX✝ : IsReduced X✝
U : Opens ↑↑X✝.toPresheafedSpace
s✝ : ↑(X✝.presheaf.obj (op U))
hs✝ : Scheme.basicOpen X✝ s✝ = ⊥
X Y : Scheme
f : X ⟶ Y
hf : IsOpenImmersion f
e : ↑f.val.base ⁻¹' Set.range ↑f.val.base = Set.univ
H :
∀ [inst : IsReduced X]
(s :
↑(X.presheaf.obj
(op
{ carrier := ↑f.val.base ⁻¹' Set.range ↑f.val.base,
is_open' := (_ : IsOpen (↑f.val.base ⁻¹' Set.range ↑f.val.base)) }))),
Scheme.basicOpen X s = ⊥ →
∀
(x :
{ x //
x ∈
{ carrier := ↑f.val.base ⁻¹' Set.range ↑f.val.base,
is_open' := (_ : IsOpen (↑f.val.base ⁻¹' Set.range ↑f.val.base)) } }),
↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0
hX : IsReduced Y
s : ↑(Y.presheaf.obj (op { carrier := Set.range ↑f.val.base, is_open' := (_ : IsOpen (Set.range ↑f.val.base)) }))
hs : Scheme.basicOpen Y s = ⊥
x : (forget TopCat).obj ↑X.toPresheafedSpace
this : IsReduced X
⊢ ↑((Opens.map f.val.base).obj ⊥) = ↑⊥
[PROOFSTEP]
simp [Opens.map]
[GOAL]
case h.refine'_2.mk.intro
X✝¹ : Scheme
X✝ : Scheme
hX✝ : IsReduced X✝
U : Opens ↑↑X✝.toPresheafedSpace
s✝ : ↑(X✝.presheaf.obj (op U))
hs✝ : Scheme.basicOpen X✝ s✝ = ⊥
X Y : Scheme
f : X ⟶ Y
hf : IsOpenImmersion f
e : ↑f.val.base ⁻¹' Set.range ↑f.val.base = Set.univ
hX : IsReduced Y
s : ↑(Y.presheaf.obj (op { carrier := Set.range ↑f.val.base, is_open' := (_ : IsOpen (Set.range ↑f.val.base)) }))
hs : Scheme.basicOpen Y s = ⊥
x : (forget TopCat).obj ↑X.toPresheafedSpace
this : IsReduced X
H :
↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X))
{ val := x,
property :=
(_ :
x ∈
{ carrier := ↑f.val.base ⁻¹' Set.range ↑f.val.base,
is_open' := (_ : IsOpen (↑f.val.base ⁻¹' Set.range ↑f.val.base)) }) })
(↑(NatTrans.app f.val.c
(op { carrier := Set.range ↑f.val.base, is_open' := (_ : IsOpen (Set.range ↑f.val.base)) }))
s) =
0
⊢ ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf Y))
{ val := ↑f.val.base x, property := (_ : ∃ y, ↑f.val.base y = ↑f.val.base x) })
s =
0
[PROOFSTEP]
erw [← PresheafedSpace.stalkMap_germ_apply f.1 ⟨_, _⟩ ⟨x, _⟩] at H
[GOAL]
case h.refine'_2.mk.intro
X✝¹ : Scheme
X✝ : Scheme
hX✝ : IsReduced X✝
U : Opens ↑↑X✝.toPresheafedSpace
s✝ : ↑(X✝.presheaf.obj (op U))
hs✝ : Scheme.basicOpen X✝ s✝ = ⊥
X Y : Scheme
f : X ⟶ Y
hf : IsOpenImmersion f
e : ↑f.val.base ⁻¹' Set.range ↑f.val.base = Set.univ
hX : IsReduced Y
s : ↑(Y.presheaf.obj (op { carrier := Set.range ↑f.val.base, is_open' := (_ : IsOpen (Set.range ↑f.val.base)) }))
hs : Scheme.basicOpen Y s = ⊥
x : (forget TopCat).obj ↑X.toPresheafedSpace
this : IsReduced X
H :
↑(PresheafedSpace.stalkMap f.val
↑{ val := x,
property :=
(_ :
x ∈
{ carrier := ↑f.val.base ⁻¹' Set.range ↑f.val.base,
is_open' := (_ : IsOpen (↑f.val.base ⁻¹' Set.range ↑f.val.base)) }) })
(↑(Presheaf.germ Y.presheaf
{
val :=
↑f.val.base
↑{ val := x,
property :=
(_ :
x ∈
{ carrier := ↑f.val.base ⁻¹' Set.range ↑f.val.base,
is_open' := (_ : IsOpen (↑f.val.base ⁻¹' Set.range ↑f.val.base)) }) },
property :=
(_ :
↑{ val := x,
property :=
(_ :
x ∈
{ carrier := ↑f.val.base ⁻¹' Set.range ↑f.val.base,
is_open' := (_ : IsOpen (↑f.val.base ⁻¹' Set.range ↑f.val.base)) }) } ∈
(Opens.map f.val.base).obj
{ carrier := fun x => ∃ y, ↑f.val.base y = x,
is_open' := (_ : IsOpen (Set.range ↑f.val.base)) }) })
s) =
0
⊢ ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf Y))
{ val := ↑f.val.base x, property := (_ : ∃ y, ↑f.val.base y = ↑f.val.base x) })
s =
0
[PROOFSTEP]
apply_fun inv <| PresheafedSpace.stalkMap f.val x at H
[GOAL]
case h.refine'_2.mk.intro
X✝¹ : Scheme
X✝ : Scheme
hX✝ : IsReduced X✝
U : Opens ↑↑X✝.toPresheafedSpace
s✝ : ↑(X✝.presheaf.obj (op U))
hs✝ : Scheme.basicOpen X✝ s✝ = ⊥
X Y : Scheme
f : X ⟶ Y
hf : IsOpenImmersion f
e : ↑f.val.base ⁻¹' Set.range ↑f.val.base = Set.univ
hX : IsReduced Y
s : ↑(Y.presheaf.obj (op { carrier := Set.range ↑f.val.base, is_open' := (_ : IsOpen (Set.range ↑f.val.base)) }))
hs : Scheme.basicOpen Y s = ⊥
x : (forget TopCat).obj ↑X.toPresheafedSpace
this : IsReduced X
H :
↑(inv (PresheafedSpace.stalkMap f.val x))
(↑(PresheafedSpace.stalkMap f.val
↑{ val := x,
property :=
(_ :
x ∈
{ carrier := ↑f.val.base ⁻¹' Set.range ↑f.val.base,
is_open' := (_ : IsOpen (↑f.val.base ⁻¹' Set.range ↑f.val.base)) }) })
(↑(Presheaf.germ Y.presheaf
{
val :=
↑f.val.base
↑{ val := x,
property :=
(_ :
x ∈
{ carrier := ↑f.val.base ⁻¹' Set.range ↑f.val.base,
is_open' := (_ : IsOpen (↑f.val.base ⁻¹' Set.range ↑f.val.base)) }) },
property :=
(_ :
↑{ val := x,
property :=
(_ :
x ∈
{ carrier := ↑f.val.base ⁻¹' Set.range ↑f.val.base,
is_open' := (_ : IsOpen (↑f.val.base ⁻¹' Set.range ↑f.val.base)) }) } ∈
(Opens.map f.val.base).obj
{ carrier := fun x => ∃ y, ↑f.val.base y = x,
is_open' := (_ : IsOpen (Set.range ↑f.val.base)) }) })
s)) =
↑(inv (PresheafedSpace.stalkMap f.val x)) 0
⊢ ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf Y))
{ val := ↑f.val.base x, property := (_ : ∃ y, ↑f.val.base y = ↑f.val.base x) })
s =
0
[PROOFSTEP]
erw [CategoryTheory.IsIso.hom_inv_id_apply, map_zero] at H
[GOAL]
case h.refine'_2.mk.intro
X✝¹ : Scheme
X✝ : Scheme
hX✝ : IsReduced X✝
U : Opens ↑↑X✝.toPresheafedSpace
s✝ : ↑(X✝.presheaf.obj (op U))
hs✝ : Scheme.basicOpen X✝ s✝ = ⊥
X Y : Scheme
f : X ⟶ Y
hf : IsOpenImmersion f
e : ↑f.val.base ⁻¹' Set.range ↑f.val.base = Set.univ
hX : IsReduced Y
s : ↑(Y.presheaf.obj (op { carrier := Set.range ↑f.val.base, is_open' := (_ : IsOpen (Set.range ↑f.val.base)) }))
hs : Scheme.basicOpen Y s = ⊥
x : (forget TopCat).obj ↑X.toPresheafedSpace
this : IsReduced X
H :
↑(Presheaf.germ Y.presheaf
{
val :=
↑f.val.base
↑{ val := x,
property :=
(_ :
x ∈
{ carrier := ↑f.val.base ⁻¹' Set.range ↑f.val.base,
is_open' := (_ : IsOpen (↑f.val.base ⁻¹' Set.range ↑f.val.base)) }) },
property :=
(_ :
↑{ val := x,
property :=
(_ :
x ∈
{ carrier := ↑f.val.base ⁻¹' Set.range ↑f.val.base,
is_open' := (_ : IsOpen (↑f.val.base ⁻¹' Set.range ↑f.val.base)) }) } ∈
(Opens.map f.val.base).obj
{ carrier := fun x => ∃ y, ↑f.val.base y = x, is_open' := (_ : IsOpen (Set.range ↑f.val.base)) }) })
s =
0
⊢ ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf Y))
{ val := ↑f.val.base x, property := (_ : ∃ y, ↑f.val.base y = ↑f.val.base x) })
s =
0
[PROOFSTEP]
exact H
[GOAL]
case h.refine'_3
X✝ : Scheme
X : Scheme
hX : IsReduced X
U : Opens ↑↑X.toPresheafedSpace
s : ↑(X.presheaf.obj (op U))
hs : Scheme.basicOpen X s = ⊥
⊢ ∀ (R : CommRingCat),
(fun X U =>
∀ [inst : IsReduced X] (s : ↑(X.presheaf.obj (op U))),
Scheme.basicOpen X s = ⊥ → ∀ (x : { x // x ∈ U }), ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf X)) x) s = 0)
(Scheme.Spec.obj (op R)) ⊤
[PROOFSTEP]
intro R hX s hs x
[GOAL]
case h.refine'_3
X✝ : Scheme
X : Scheme
hX✝ : IsReduced X
U : Opens ↑↑X.toPresheafedSpace
s✝ : ↑(X.presheaf.obj (op U))
hs✝ : Scheme.basicOpen X s✝ = ⊥
R : CommRingCat
hX : IsReduced (Scheme.Spec.obj (op R))
s : ↑((Scheme.Spec.obj (op R)).presheaf.obj (op ⊤))
hs : Scheme.basicOpen (Scheme.Spec.obj (op R)) s = ⊥
x : { x // x ∈ ⊤ }
⊢ ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf (Scheme.Spec.obj (op R)))) x) s = 0
[PROOFSTEP]
erw [basicOpen_eq_of_affine', PrimeSpectrum.basicOpen_eq_bot_iff] at hs
[GOAL]
case h.refine'_3
X✝ : Scheme
X : Scheme
hX✝ : IsReduced X
U : Opens ↑↑X.toPresheafedSpace
s✝ : ↑(X.presheaf.obj (op U))
hs✝ : Scheme.basicOpen X s✝ = ⊥
R : CommRingCat
hX : IsReduced (Scheme.Spec.obj (op R))
s : ↑((Scheme.Spec.obj (op R)).presheaf.obj (op ⊤))
hs : IsNilpotent (↑(SpecΓIdentity.app R).hom s)
x : { x // x ∈ ⊤ }
⊢ ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf (Scheme.Spec.obj (op R)))) x) s = 0
[PROOFSTEP]
replace hs := hs.map (SpecΓIdentity.app R).inv
[GOAL]
case h.refine'_3
X✝ : Scheme
X : Scheme
hX✝ : IsReduced X
U : Opens ↑↑X.toPresheafedSpace
s✝ : ↑(X.presheaf.obj (op U))
hs✝ : Scheme.basicOpen X s✝ = ⊥
R : CommRingCat
hX : IsReduced (Scheme.Spec.obj (op R))
s : ↑((Scheme.Spec.obj (op R)).presheaf.obj (op ⊤))
x : { x // x ∈ ⊤ }
hs : IsNilpotent (↑(SpecΓIdentity.app R).inv (↑(SpecΓIdentity.app R).hom s))
⊢ ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf (Scheme.Spec.obj (op R)))) x) s = 0
[PROOFSTEP]
replace hs := @IsNilpotent.eq_zero _ _ _ _ (show _ from ?_) hs
[GOAL]
case h.refine'_3.refine_2
X✝ : Scheme
X : Scheme
hX✝ : IsReduced X
U : Opens ↑↑X.toPresheafedSpace
s✝ : ↑(X.presheaf.obj (op U))
hs✝ : Scheme.basicOpen X s✝ = ⊥
R : CommRingCat
hX : IsReduced (Scheme.Spec.obj (op R))
s : ↑((Scheme.Spec.obj (op R)).presheaf.obj (op ⊤))
x : { x // x ∈ ⊤ }
hs : ↑(SpecΓIdentity.app R).inv (↑(SpecΓIdentity.app R).hom s) = 0
⊢ ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf (Scheme.Spec.obj (op R)))) x) s = 0
case h.refine'_3.refine_1
X✝ : Scheme
X : Scheme
hX✝ : IsReduced X
U : Opens ↑↑X.toPresheafedSpace
s✝ : ↑(X.presheaf.obj (op U))
hs✝ : Scheme.basicOpen X s✝ = ⊥
R : CommRingCat
hX : IsReduced (Scheme.Spec.obj (op R))
s : ↑((Scheme.Spec.obj (op R)).presheaf.obj (op ⊤))
x : { x // x ∈ ⊤ }
hs : IsNilpotent (↑(SpecΓIdentity.app R).inv (↑(SpecΓIdentity.app R).hom s))
⊢ _root_.IsReduced
((fun x => ↑((Spec.toLocallyRingedSpace.rightOp ⋙ LocallyRingedSpace.Γ).obj R)) (↑(SpecΓIdentity.app R).hom s))
[PROOFSTEP]
rw [Iso.hom_inv_id_apply] at hs
[GOAL]
case h.refine'_3.refine_2
X✝ : Scheme
X : Scheme
hX✝ : IsReduced X
U : Opens ↑↑X.toPresheafedSpace
s✝ : ↑(X.presheaf.obj (op U))
hs✝ : Scheme.basicOpen X s✝ = ⊥
R : CommRingCat
hX : IsReduced (Scheme.Spec.obj (op R))
s : ↑((Scheme.Spec.obj (op R)).presheaf.obj (op ⊤))
x : { x // x ∈ ⊤ }
hs : s = 0
⊢ ↑(Presheaf.germ (Sheaf.presheaf (Scheme.sheaf (Scheme.Spec.obj (op R)))) x) s = 0
case h.refine'_3.refine_1
X✝ : Scheme
X : Scheme
hX✝ : IsReduced X
U : Opens ↑↑X.toPresheafedSpace
s✝ : ↑(X.presheaf.obj (op U))
hs✝ : Scheme.basicOpen X s✝ = ⊥
R : CommRingCat
hX : IsReduced (Scheme.Spec.obj (op R))
s : ↑((Scheme.Spec.obj (op R)).presheaf.obj (op ⊤))
x : { x // x ∈ ⊤ }
hs : IsNilpotent (↑(SpecΓIdentity.app R).inv (↑(SpecΓIdentity.app R).hom s))
⊢ _root_.IsReduced
((fun x => ↑((Spec.toLocallyRingedSpace.rightOp ⋙ LocallyRingedSpace.Γ).obj R)) (↑(SpecΓIdentity.app R).hom s))
[PROOFSTEP]
rw [hs, map_zero]
[GOAL]
case h.refine'_3.refine_1
X✝ : Scheme
X : Scheme
hX✝ : IsReduced X
U : Opens ↑↑X.toPresheafedSpace
s✝ : ↑(X.presheaf.obj (op U))
hs✝ : Scheme.basicOpen X s✝ = ⊥
R : CommRingCat
hX : IsReduced (Scheme.Spec.obj (op R))
s : ↑((Scheme.Spec.obj (op R)).presheaf.obj (op ⊤))
x : { x // x ∈ ⊤ }
hs : IsNilpotent (↑(SpecΓIdentity.app R).inv (↑(SpecΓIdentity.app R).hom s))
⊢ _root_.IsReduced
((fun x => ↑((Spec.toLocallyRingedSpace.rightOp ⋙ LocallyRingedSpace.Γ).obj R)) (↑(SpecΓIdentity.app R).hom s))
[PROOFSTEP]
exact @IsReduced.component_reduced _ hX ⊤
[GOAL]
X✝ : Scheme
X : Scheme
inst✝ : IsReduced X
U : Opens ↑↑X.toPresheafedSpace
s : ↑(X.presheaf.obj (op U))
⊢ Scheme.basicOpen X s = ⊥ ↔ s = 0
[PROOFSTEP]
refine' ⟨eq_zero_of_basicOpen_eq_bot s, _⟩
[GOAL]
X✝ : Scheme
X : Scheme
inst✝ : IsReduced X
U : Opens ↑↑X.toPresheafedSpace
s : ↑(X.presheaf.obj (op U))
⊢ s = 0 → Scheme.basicOpen X s = ⊥
[PROOFSTEP]
rintro rfl
[GOAL]
X✝ : Scheme
X : Scheme
inst✝ : IsReduced X
U : Opens ↑↑X.toPresheafedSpace
⊢ Scheme.basicOpen X 0 = ⊥
[PROOFSTEP]
simp
[GOAL]
X : Scheme
h : IsIntegral X
⊢ Nonempty { x // x ∈ ⊤ }
[PROOFSTEP]
simp only [Set.univ_nonempty, Opens.nonempty_coeSort, Opens.coe_top]
[GOAL]
X : Scheme
inst✝ : IsIntegral X
⊢ IsReduced X
[PROOFSTEP]
constructor
[GOAL]
case component_reduced
X : Scheme
inst✝ : IsIntegral X
⊢ autoParam (∀ (U : Opens ↑↑X.toPresheafedSpace), _root_.IsReduced ↑(X.presheaf.obj (op U))) _auto✝
[PROOFSTEP]
intro U
[GOAL]
case component_reduced
X : Scheme
inst✝ : IsIntegral X
U : Opens ↑↑X.toPresheafedSpace
⊢ _root_.IsReduced ↑(X.presheaf.obj (op U))
[PROOFSTEP]
cases' U.1.eq_empty_or_nonempty with h h
[GOAL]
case component_reduced.inl
X : Scheme
inst✝ : IsIntegral X
U : Opens ↑↑X.toPresheafedSpace
h : U.carrier = ∅
⊢ _root_.IsReduced ↑(X.presheaf.obj (op U))
[PROOFSTEP]
have : U = ⊥ := SetLike.ext' h
[GOAL]
case component_reduced.inl
X : Scheme
inst✝ : IsIntegral X
U : Opens ↑↑X.toPresheafedSpace
h : U.carrier = ∅
this : U = ⊥
⊢ _root_.IsReduced ↑(X.presheaf.obj (op U))
[PROOFSTEP]
haveI := CommRingCat.subsingleton_of_isTerminal (X.sheaf.isTerminalOfEqEmpty this)
[GOAL]
case component_reduced.inl
X : Scheme
inst✝ : IsIntegral X
U : Opens ↑↑X.toPresheafedSpace
h : U.carrier = ∅
this✝ : U = ⊥
this : Subsingleton ↑((Scheme.sheaf X).val.obj (op U))
⊢ _root_.IsReduced ↑(X.presheaf.obj (op U))
[PROOFSTEP]
change _root_.IsReduced (X.sheaf.val.obj (op U))
[GOAL]
case component_reduced.inl
X : Scheme
inst✝ : IsIntegral X
U : Opens ↑↑X.toPresheafedSpace
h : U.carrier = ∅
this✝ : U = ⊥
this : Subsingleton ↑((Scheme.sheaf X).val.obj (op U))
⊢ _root_.IsReduced ↑((Scheme.sheaf X).val.obj (op U))
[PROOFSTEP]
infer_instance
[GOAL]
case component_reduced.inr
X : Scheme
inst✝ : IsIntegral X
U : Opens ↑↑X.toPresheafedSpace
h : Set.Nonempty U.carrier
⊢ _root_.IsReduced ↑(X.presheaf.obj (op U))
[PROOFSTEP]
haveI : Nonempty U := by simpa
[GOAL]
X : Scheme
inst✝ : IsIntegral X
U : Opens ↑↑X.toPresheafedSpace
h : Set.Nonempty U.carrier
⊢ Nonempty { x // x ∈ U }
[PROOFSTEP]
simpa
[GOAL]
case component_reduced.inr
X : Scheme
inst✝ : IsIntegral X
U : Opens ↑↑X.toPresheafedSpace
h : Set.Nonempty U.carrier
this : Nonempty { x // x ∈ U }
⊢ _root_.IsReduced ↑(X.presheaf.obj (op U))
[PROOFSTEP]
infer_instance
[GOAL]
X : Scheme
inst✝ : IsIntegral X
⊢ IrreducibleSpace ↑↑X.toPresheafedSpace
[PROOFSTEP]
by_contra H
[GOAL]
X : Scheme
inst✝ : IsIntegral X
H : ¬IrreducibleSpace ↑↑X.toPresheafedSpace
⊢ False
[PROOFSTEP]
replace H : ¬IsPreirreducible (⊤ : Set X.carrier) := fun h =>
H { toPreirreducibleSpace := ⟨h⟩
toNonempty := inferInstance }
[GOAL]
X : Scheme
inst✝ : IsIntegral X
H : ¬IsPreirreducible ⊤
⊢ False
[PROOFSTEP]
simp_rw [isPreirreducible_iff_closed_union_closed, not_forall, not_or] at H
[GOAL]
X : Scheme
inst✝ : IsIntegral X
H : ∃ x x_1 h h h, ¬⊤ ⊆ x ∧ ¬⊤ ⊆ x_1
⊢ False
[PROOFSTEP]
rcases H with ⟨S, T, hS, hT, h₁, h₂, h₃⟩
[GOAL]
case intro.intro.intro.intro.intro.intro
X : Scheme
inst✝ : IsIntegral X
S T : Set ↑↑X.toPresheafedSpace
hS : IsClosed S
hT : IsClosed T
h₁ : ⊤ ⊆ S ∪ T
h₂ : ¬⊤ ⊆ S
h₃ : ¬⊤ ⊆ T
⊢ False
[PROOFSTEP]
erw [not_forall] at h₂ h₃
[GOAL]
case intro.intro.intro.intro.intro.intro
X : Scheme
inst✝ : IsIntegral X
S T : Set ↑↑X.toPresheafedSpace
hS : IsClosed S
hT : IsClosed T
h₁ : ⊤ ⊆ S ∪ T
h₂ : ∃ x, ¬(x ∈ ⊤ → x ∈ S)
h₃ : ∃ x, ¬(x ∈ ⊤ → x ∈ T)
⊢ False
[PROOFSTEP]
simp_rw [not_forall] at h₂ h₃
[GOAL]
case intro.intro.intro.intro.intro.intro
X : Scheme
inst✝ : IsIntegral X
S T : Set ↑↑X.toPresheafedSpace
hS : IsClosed S
hT : IsClosed T
h₁ : ⊤ ⊆ S ∪ T
h₂ : ∃ x x_1, ¬x ∈ S
h₃ : ∃ x x_1, ¬x ∈ T
⊢ False
[PROOFSTEP]
haveI : Nonempty (⟨Sᶜ, hS.1⟩ : Opens X.carrier) := ⟨⟨_, h₂.choose_spec.choose_spec⟩⟩
[GOAL]
case intro.intro.intro.intro.intro.intro
X : Scheme
inst✝ : IsIntegral X
S T : Set ↑↑X.toPresheafedSpace
hS : IsClosed S
hT : IsClosed T
h₁ : ⊤ ⊆ S ∪ T
h₂ : ∃ x x_1, ¬x ∈ S
h₃ : ∃ x x_1, ¬x ∈ T
this : Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } }
⊢ False
[PROOFSTEP]
haveI : Nonempty (⟨Tᶜ, hT.1⟩ : Opens X.carrier) := ⟨⟨_, h₃.choose_spec.choose_spec⟩⟩
[GOAL]
case intro.intro.intro.intro.intro.intro
X : Scheme
inst✝ : IsIntegral X
S T : Set ↑↑X.toPresheafedSpace
hS : IsClosed S
hT : IsClosed T
h₁ : ⊤ ⊆ S ∪ T
h₂ : ∃ x x_1, ¬x ∈ S
h₃ : ∃ x x_1, ¬x ∈ T
this✝ : Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } }
this : Nonempty { x // x ∈ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
⊢ False
[PROOFSTEP]
haveI : Nonempty (⟨Sᶜ, hS.1⟩ ⊔ ⟨Tᶜ, hT.1⟩ : Opens X.carrier) := ⟨⟨_, Or.inl h₂.choose_spec.choose_spec⟩⟩
[GOAL]
case intro.intro.intro.intro.intro.intro
X : Scheme
inst✝ : IsIntegral X
S T : Set ↑↑X.toPresheafedSpace
hS : IsClosed S
hT : IsClosed T
h₁ : ⊤ ⊆ S ∪ T
h₂ : ∃ x x_1, ¬x ∈ S
h₃ : ∃ x x_1, ¬x ∈ T
this✝¹ : Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } }
this✝ : Nonempty { x // x ∈ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
this :
Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊔ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
⊢ False
[PROOFSTEP]
let e : X.presheaf.obj _ ≅ CommRingCat.of _ :=
(X.sheaf.isProductOfDisjoint ⟨_, hS.1⟩ ⟨_, hT.1⟩ ?_).conePointUniqueUpToIso (CommRingCat.prodFanIsLimit _ _)
[GOAL]
case intro.intro.intro.intro.intro.intro.refine_2
X : Scheme
inst✝ : IsIntegral X
S T : Set ↑↑X.toPresheafedSpace
hS : IsClosed S
hT : IsClosed T
h₁ : ⊤ ⊆ S ∪ T
h₂ : ∃ x x_1, ¬x ∈ S
h₃ : ∃ x x_1, ¬x ∈ T
this✝¹ : Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } }
this✝ : Nonempty { x // x ∈ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
this :
Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊔ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
e : X.presheaf.obj
(op ({ carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊔ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) })) ≅
CommRingCat.of
↑(CommRingCat.prodFan ((Scheme.sheaf X).val.obj (op { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) }))
((Scheme.sheaf X).val.obj (op { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) }))).1 :=
IsLimit.conePointUniqueUpToIso
(Sheaf.isProductOfDisjoint (Scheme.sheaf X) { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) }
{ carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } ?intro.intro.intro.intro.intro.intro.refine_1)
(CommRingCat.prodFanIsLimit ((Scheme.sheaf X).val.obj (op { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) }))
((Scheme.sheaf X).val.obj (op { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) })))
⊢ False
case intro.intro.intro.intro.intro.intro.refine_1
X : Scheme
inst✝ : IsIntegral X
S T : Set ↑↑X.toPresheafedSpace
hS : IsClosed S
hT : IsClosed T
h₁ : ⊤ ⊆ S ∪ T
h₂ : ∃ x x_1, ¬x ∈ S
h₃ : ∃ x x_1, ¬x ∈ T
this✝¹ : Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } }
this✝ : Nonempty { x // x ∈ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
this :
Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊔ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
⊢ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊓ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } = ⊥
[PROOFSTEP]
apply (config := { allowSynthFailures := true }) false_of_nontrivial_of_product_domain
[GOAL]
case inst
X : Scheme
inst✝ : IsIntegral X
S T : Set ↑↑X.toPresheafedSpace
hS : IsClosed S
hT : IsClosed T
h₁ : ⊤ ⊆ S ∪ T
h₂ : ∃ x x_1, ¬x ∈ S
h₃ : ∃ x x_1, ¬x ∈ T
this✝¹ : Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } }
this✝ : Nonempty { x // x ∈ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
this :
Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊔ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
e : X.presheaf.obj
(op ({ carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊔ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) })) ≅
CommRingCat.of
↑(CommRingCat.prodFan ((Scheme.sheaf X).val.obj (op { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) }))
((Scheme.sheaf X).val.obj (op { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) }))).1 :=
IsLimit.conePointUniqueUpToIso
(Sheaf.isProductOfDisjoint (Scheme.sheaf X) { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) }
{ carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } ?intro.intro.intro.intro.intro.intro.refine_1)
(CommRingCat.prodFanIsLimit ((Scheme.sheaf X).val.obj (op { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) }))
((Scheme.sheaf X).val.obj (op { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) })))
⊢ IsDomain (?intro.intro.intro.intro.intro.intro.refine_2.R × ?intro.intro.intro.intro.intro.intro.refine_2.S)
[PROOFSTEP]
exact e.symm.commRingCatIsoToRingEquiv.toMulEquiv.isDomain _
[GOAL]
case inst
X : Scheme
inst✝ : IsIntegral X
S T : Set ↑↑X.toPresheafedSpace
hS : IsClosed S
hT : IsClosed T
h₁ : ⊤ ⊆ S ∪ T
h₂ : ∃ x x_1, ¬x ∈ S
h₃ : ∃ x x_1, ¬x ∈ T
this✝¹ : Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } }
this✝ : Nonempty { x // x ∈ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
this :
Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊔ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
e : X.presheaf.obj
(op ({ carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊔ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) })) ≅
CommRingCat.of
↑(CommRingCat.prodFan ((Scheme.sheaf X).val.obj (op { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) }))
((Scheme.sheaf X).val.obj (op { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) }))).1 :=
IsLimit.conePointUniqueUpToIso
(Sheaf.isProductOfDisjoint (Scheme.sheaf X) { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) }
{ carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } ?intro.intro.intro.intro.intro.intro.refine_1)
(CommRingCat.prodFanIsLimit ((Scheme.sheaf X).val.obj (op { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) }))
((Scheme.sheaf X).val.obj (op { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) })))
⊢ Nontrivial ↑((Scheme.sheaf X).val.obj (op { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) }))
[PROOFSTEP]
apply X.toLocallyRingedSpace.component_nontrivial
[GOAL]
case inst
X : Scheme
inst✝ : IsIntegral X
S T : Set ↑↑X.toPresheafedSpace
hS : IsClosed S
hT : IsClosed T
h₁ : ⊤ ⊆ S ∪ T
h₂ : ∃ x x_1, ¬x ∈ S
h₃ : ∃ x x_1, ¬x ∈ T
this✝¹ : Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } }
this✝ : Nonempty { x // x ∈ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
this :
Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊔ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
e : X.presheaf.obj
(op ({ carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊔ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) })) ≅
CommRingCat.of
↑(CommRingCat.prodFan ((Scheme.sheaf X).val.obj (op { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) }))
((Scheme.sheaf X).val.obj (op { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) }))).1 :=
IsLimit.conePointUniqueUpToIso
(Sheaf.isProductOfDisjoint (Scheme.sheaf X) { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) }
{ carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } ?intro.intro.intro.intro.intro.intro.refine_1)
(CommRingCat.prodFanIsLimit ((Scheme.sheaf X).val.obj (op { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) }))
((Scheme.sheaf X).val.obj (op { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) })))
⊢ Nontrivial ↑((Scheme.sheaf X).val.obj (op { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) }))
[PROOFSTEP]
apply X.toLocallyRingedSpace.component_nontrivial
[GOAL]
case intro.intro.intro.intro.intro.intro.refine_1
X : Scheme
inst✝ : IsIntegral X
S T : Set ↑↑X.toPresheafedSpace
hS : IsClosed S
hT : IsClosed T
h₁ : ⊤ ⊆ S ∪ T
h₂ : ∃ x x_1, ¬x ∈ S
h₃ : ∃ x x_1, ¬x ∈ T
this✝¹ : Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } }
this✝ : Nonempty { x // x ∈ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
this :
Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊔ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
⊢ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊓ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } = ⊥
[PROOFSTEP]
ext x
[GOAL]
case intro.intro.intro.intro.intro.intro.refine_1.h.h
X : Scheme
inst✝ : IsIntegral X
S T : Set ↑↑X.toPresheafedSpace
hS : IsClosed S
hT : IsClosed T
h₁ : ⊤ ⊆ S ∪ T
h₂ : ∃ x x_1, ¬x ∈ S
h₃ : ∃ x x_1, ¬x ∈ T
this✝¹ : Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } }
this✝ : Nonempty { x // x ∈ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
this :
Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊔ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
x : ↑↑X.toPresheafedSpace
⊢ x ∈ ↑({ carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊓ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) }) ↔ x ∈ ↑⊥
[PROOFSTEP]
constructor
[GOAL]
case intro.intro.intro.intro.intro.intro.refine_1.h.h.mp
X : Scheme
inst✝ : IsIntegral X
S T : Set ↑↑X.toPresheafedSpace
hS : IsClosed S
hT : IsClosed T
h₁ : ⊤ ⊆ S ∪ T
h₂ : ∃ x x_1, ¬x ∈ S
h₃ : ∃ x x_1, ¬x ∈ T
this✝¹ : Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } }
this✝ : Nonempty { x // x ∈ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
this :
Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊔ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
x : ↑↑X.toPresheafedSpace
⊢ x ∈ ↑({ carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊓ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) }) → x ∈ ↑⊥
[PROOFSTEP]
rintro ⟨hS, hT⟩
[GOAL]
case intro.intro.intro.intro.intro.intro.refine_1.h.h.mp.intro
X : Scheme
inst✝ : IsIntegral X
S T : Set ↑↑X.toPresheafedSpace
hS✝ : IsClosed S
hT✝ : IsClosed T
h₁ : ⊤ ⊆ S ∪ T
h₂ : ∃ x x_1, ¬x ∈ S
h₃ : ∃ x x_1, ¬x ∈ T
this✝¹ : Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } }
this✝ : Nonempty { x // x ∈ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
this :
Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊔ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
x : ↑↑X.toPresheafedSpace
hS : x ∈ ↑{ carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) }
hT : x ∈ ↑{ carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) }
⊢ x ∈ ↑⊥
[PROOFSTEP]
cases' h₁ (show x ∈ ⊤ by trivial) with h h
[GOAL]
X : Scheme
inst✝ : IsIntegral X
S T : Set ↑↑X.toPresheafedSpace
hS✝ : IsClosed S
hT✝ : IsClosed T
h₁ : ⊤ ⊆ S ∪ T
h₂ : ∃ x x_1, ¬x ∈ S
h₃ : ∃ x x_1, ¬x ∈ T
this✝¹ : Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } }
this✝ : Nonempty { x // x ∈ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
this :
Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊔ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
x : ↑↑X.toPresheafedSpace
hS : x ∈ ↑{ carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) }
hT : x ∈ ↑{ carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) }
⊢ x ∈ ⊤
[PROOFSTEP]
trivial
[GOAL]
case intro.intro.intro.intro.intro.intro.refine_1.h.h.mp.intro.inl
X : Scheme
inst✝ : IsIntegral X
S T : Set ↑↑X.toPresheafedSpace
hS✝ : IsClosed S
hT✝ : IsClosed T
h₁ : ⊤ ⊆ S ∪ T
h₂ : ∃ x x_1, ¬x ∈ S
h₃ : ∃ x x_1, ¬x ∈ T
this✝¹ : Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } }
this✝ : Nonempty { x // x ∈ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
this :
Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊔ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
x : ↑↑X.toPresheafedSpace
hS : x ∈ ↑{ carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) }
hT : x ∈ ↑{ carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) }
h : x ∈ S
⊢ x ∈ ↑⊥
case intro.intro.intro.intro.intro.intro.refine_1.h.h.mp.intro.inr
X : Scheme
inst✝ : IsIntegral X
S T : Set ↑↑X.toPresheafedSpace
hS✝ : IsClosed S
hT✝ : IsClosed T
h₁ : ⊤ ⊆ S ∪ T
h₂ : ∃ x x_1, ¬x ∈ S
h₃ : ∃ x x_1, ¬x ∈ T
this✝¹ : Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } }
this✝ : Nonempty { x // x ∈ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
this :
Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊔ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
x : ↑↑X.toPresheafedSpace
hS : x ∈ ↑{ carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) }
hT : x ∈ ↑{ carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) }
h : x ∈ T
⊢ x ∈ ↑⊥
[PROOFSTEP]
exacts [hS h, hT h]
[GOAL]
case intro.intro.intro.intro.intro.intro.refine_1.h.h.mpr
X : Scheme
inst✝ : IsIntegral X
S T : Set ↑↑X.toPresheafedSpace
hS : IsClosed S
hT : IsClosed T
h₁ : ⊤ ⊆ S ∪ T
h₂ : ∃ x x_1, ¬x ∈ S
h₃ : ∃ x x_1, ¬x ∈ T
this✝¹ : Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } }
this✝ : Nonempty { x // x ∈ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
this :
Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊔ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
x : ↑↑X.toPresheafedSpace
⊢ x ∈ ↑⊥ → x ∈ ↑({ carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊓ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) })
[PROOFSTEP]
intro x
[GOAL]
case intro.intro.intro.intro.intro.intro.refine_1.h.h.mpr
X : Scheme
inst✝ : IsIntegral X
S T : Set ↑↑X.toPresheafedSpace
hS : IsClosed S
hT : IsClosed T
h₁ : ⊤ ⊆ S ∪ T
h₂ : ∃ x x_1, ¬x ∈ S
h₃ : ∃ x x_1, ¬x ∈ T
this✝¹ : Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } }
this✝ : Nonempty { x // x ∈ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
this :
Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊔ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
x✝ : ↑↑X.toPresheafedSpace
x : x✝ ∈ ↑⊥
⊢ x✝ ∈ ↑({ carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊓ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) })
[PROOFSTEP]
refine' x.rec (by contradiction)
[GOAL]
X : Scheme
inst✝ : IsIntegral X
S T : Set ↑↑X.toPresheafedSpace
hS : IsClosed S
hT : IsClosed T
h₁ : ⊤ ⊆ S ∪ T
h₂ : ∃ x x_1, ¬x ∈ S
h₃ : ∃ x x_1, ¬x ∈ T
this✝¹ : Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } }
this✝ : Nonempty { x // x ∈ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
this :
Nonempty { x // x ∈ { carrier := Sᶜ, is_open' := (_ : IsOpen Sᶜ) } ⊔ { carrier := Tᶜ, is_open' := (_ : IsOpen Tᶜ) } }
x✝ : ↑↑X.toPresheafedSpace
x : x✝ ∈ ↑⊥
⊢ False
[PROOFSTEP]
contradiction
[GOAL]
X : Scheme
inst✝ : IsReduced X
H : IrreducibleSpace ↑↑X.toPresheafedSpace
⊢ IsIntegral X
[PROOFSTEP]
constructor
[GOAL]
case nonempty
X : Scheme
inst✝ : IsReduced X
H : IrreducibleSpace ↑↑X.toPresheafedSpace
⊢ autoParam (Nonempty ↑↑X.toPresheafedSpace) _auto✝
[PROOFSTEP]
infer_instance
[GOAL]
case component_integral
X : Scheme
inst✝ : IsReduced X
H : IrreducibleSpace ↑↑X.toPresheafedSpace
⊢ autoParam (∀ (U : Opens ↑↑X.toPresheafedSpace) [inst : Nonempty { x // x ∈ U }], IsDomain ↑(X.presheaf.obj (op U)))
_auto✝
[PROOFSTEP]
intro U hU
[GOAL]
case component_integral
X : Scheme
inst✝ : IsReduced X
H : IrreducibleSpace ↑↑X.toPresheafedSpace
U : Opens ↑↑X.toPresheafedSpace
hU : Nonempty { x // x ∈ U }
⊢ IsDomain ↑(X.presheaf.obj (op U))
[PROOFSTEP]
haveI := (@LocallyRingedSpace.component_nontrivial X.toLocallyRingedSpace U hU).1
[GOAL]
case component_integral
X : Scheme
inst✝ : IsReduced X
H : IrreducibleSpace ↑↑X.toPresheafedSpace
U : Opens ↑↑X.toPresheafedSpace
hU : Nonempty { x // x ∈ U }
this : ∃ x y, x ≠ y
⊢ IsDomain ↑(X.presheaf.obj (op U))
[PROOFSTEP]
have : NoZeroDivisors (X.toLocallyRingedSpace.toSheafedSpace.toPresheafedSpace.presheaf.obj (op U)) :=
by
refine' ⟨fun {a b} e => _⟩
simp_rw [← basicOpen_eq_bot_iff, ← Opens.not_nonempty_iff_eq_bot]
by_contra' h
obtain ⟨_, ⟨x, hx₁, rfl⟩, ⟨x, hx₂, e'⟩⟩ := nonempty_preirreducible_inter (X.basicOpen a).2 (X.basicOpen b).2 h.1 h.2
replace e' := Subtype.eq e'
subst e'
replace e := congr_arg (X.presheaf.germ x) e
rw [RingHom.map_mul, RingHom.map_zero] at e
refine' zero_ne_one' (X.presheaf.stalk x.1) (isUnit_zero_iff.1 _)
convert hx₁.mul hx₂
exact e.symm
[GOAL]
X : Scheme
inst✝ : IsReduced X
H : IrreducibleSpace ↑↑X.toPresheafedSpace
U : Opens ↑↑X.toPresheafedSpace
hU : Nonempty { x // x ∈ U }
this : ∃ x y, x ≠ y
⊢ NoZeroDivisors ↑(X.presheaf.obj (op U))
[PROOFSTEP]
refine' ⟨fun {a b} e => _⟩
[GOAL]
X : Scheme
inst✝ : IsReduced X
H : IrreducibleSpace ↑↑X.toPresheafedSpace
U : Opens ↑↑X.toPresheafedSpace
hU : Nonempty { x // x ∈ U }
this : ∃ x y, x ≠ y
a b : ↑(X.presheaf.obj (op U))
e : a * b = 0
⊢ a = 0 ∨ b = 0
[PROOFSTEP]
simp_rw [← basicOpen_eq_bot_iff, ← Opens.not_nonempty_iff_eq_bot]
[GOAL]
X : Scheme
inst✝ : IsReduced X
H : IrreducibleSpace ↑↑X.toPresheafedSpace
U : Opens ↑↑X.toPresheafedSpace
hU : Nonempty { x // x ∈ U }
this : ∃ x y, x ≠ y
a b : ↑(X.presheaf.obj (op U))
e : a * b = 0
⊢ ¬Set.Nonempty ↑(Scheme.basicOpen X a) ∨ ¬Set.Nonempty ↑(Scheme.basicOpen X b)
[PROOFSTEP]
by_contra' h
[GOAL]
X : Scheme
inst✝ : IsReduced X
H : IrreducibleSpace ↑↑X.toPresheafedSpace
U : Opens ↑↑X.toPresheafedSpace
hU : Nonempty { x // x ∈ U }
this : ∃ x y, x ≠ y
a b : ↑(X.presheaf.obj (op U))
e : a * b = 0
h : Set.Nonempty ↑(Scheme.basicOpen X a) ∧ Set.Nonempty ↑(Scheme.basicOpen X b)
⊢ False
[PROOFSTEP]
obtain ⟨_, ⟨x, hx₁, rfl⟩, ⟨x, hx₂, e'⟩⟩ := nonempty_preirreducible_inter (X.basicOpen a).2 (X.basicOpen b).2 h.1 h.2
[GOAL]
case intro.intro.intro.intro.intro.intro
X : Scheme
inst✝ : IsReduced X
H : IrreducibleSpace ↑↑X.toPresheafedSpace
U : Opens ↑↑X.toPresheafedSpace
hU : Nonempty { x // x ∈ U }
this : ∃ x y, x ≠ y
a b : ↑(X.presheaf.obj (op U))
e : a * b = 0
h : Set.Nonempty ↑(Scheme.basicOpen X a) ∧ Set.Nonempty ↑(Scheme.basicOpen X b)
x✝ : { x // x ∈ U }
hx₁ :
x✝ ∈
{x |
IsUnit
(↑(Presheaf.germ (LocallyRingedSpace.toRingedSpace X.toLocallyRingedSpace).toPresheafedSpace.presheaf x) a)}
x : { x // x ∈ U }
hx₂ :
x ∈
{x |
IsUnit
(↑(Presheaf.germ (LocallyRingedSpace.toRingedSpace X.toLocallyRingedSpace).toPresheafedSpace.presheaf x) b)}
e' : ↑x = ↑x✝
⊢ False
[PROOFSTEP]
replace e' := Subtype.eq e'
[GOAL]
case intro.intro.intro.intro.intro.intro
X : Scheme
inst✝ : IsReduced X
H : IrreducibleSpace ↑↑X.toPresheafedSpace
U : Opens ↑↑X.toPresheafedSpace
hU : Nonempty { x // x ∈ U }
this : ∃ x y, x ≠ y
a b : ↑(X.presheaf.obj (op U))
e : a * b = 0
h : Set.Nonempty ↑(Scheme.basicOpen X a) ∧ Set.Nonempty ↑(Scheme.basicOpen X b)
x✝ : { x // x ∈ U }
hx₁ :
x✝ ∈
{x |
IsUnit
(↑(Presheaf.germ (LocallyRingedSpace.toRingedSpace X.toLocallyRingedSpace).toPresheafedSpace.presheaf x) a)}
x : { x // x ∈ U }
hx₂ :
x ∈
{x |
IsUnit
(↑(Presheaf.germ (LocallyRingedSpace.toRingedSpace X.toLocallyRingedSpace).toPresheafedSpace.presheaf x) b)}
e' : x = x✝
⊢ False
[PROOFSTEP]
subst e'
[GOAL]
case intro.intro.intro.intro.intro.intro
X : Scheme
inst✝ : IsReduced X
H : IrreducibleSpace ↑↑X.toPresheafedSpace
U : Opens ↑↑X.toPresheafedSpace
hU : Nonempty { x // x ∈ U }
this : ∃ x y, x ≠ y
a b : ↑(X.presheaf.obj (op U))
e : a * b = 0
h : Set.Nonempty ↑(Scheme.basicOpen X a) ∧ Set.Nonempty ↑(Scheme.basicOpen X b)
x : { x // x ∈ U }
hx₂ :
x ∈
{x |
IsUnit
(↑(Presheaf.germ (LocallyRingedSpace.toRingedSpace X.toLocallyRingedSpace).toPresheafedSpace.presheaf x) b)}
hx₁ :
x ∈
{x |
IsUnit
(↑(Presheaf.germ (LocallyRingedSpace.toRingedSpace X.toLocallyRingedSpace).toPresheafedSpace.presheaf x) a)}
⊢ False
[PROOFSTEP]
replace e := congr_arg (X.presheaf.germ x) e
[GOAL]
case intro.intro.intro.intro.intro.intro
X : Scheme
inst✝ : IsReduced X
H : IrreducibleSpace ↑↑X.toPresheafedSpace
U : Opens ↑↑X.toPresheafedSpace
hU : Nonempty { x // x ∈ U }
this : ∃ x y, x ≠ y
a b : ↑(X.presheaf.obj (op U))
h : Set.Nonempty ↑(Scheme.basicOpen X a) ∧ Set.Nonempty ↑(Scheme.basicOpen X b)
x : { x // x ∈ U }
hx₂ :
x ∈
{x |
IsUnit
(↑(Presheaf.germ (LocallyRingedSpace.toRingedSpace X.toLocallyRingedSpace).toPresheafedSpace.presheaf x) b)}
hx₁ :
x ∈
{x |
IsUnit
(↑(Presheaf.germ (LocallyRingedSpace.toRingedSpace X.toLocallyRingedSpace).toPresheafedSpace.presheaf x) a)}
e : ↑(Presheaf.germ X.presheaf x) (a * b) = ↑(Presheaf.germ X.presheaf x) 0
⊢ False
[PROOFSTEP]
rw [RingHom.map_mul, RingHom.map_zero] at e
[GOAL]
case intro.intro.intro.intro.intro.intro
X : Scheme
inst✝ : IsReduced X
H : IrreducibleSpace ↑↑X.toPresheafedSpace
U : Opens ↑↑X.toPresheafedSpace
hU : Nonempty { x // x ∈ U }
this : ∃ x y, x ≠ y
a b : ↑(X.presheaf.obj (op U))
h : Set.Nonempty ↑(Scheme.basicOpen X a) ∧ Set.Nonempty ↑(Scheme.basicOpen X b)
x : { x // x ∈ U }
hx₂ :
x ∈
{x |
IsUnit
(↑(Presheaf.germ (LocallyRingedSpace.toRingedSpace X.toLocallyRingedSpace).toPresheafedSpace.presheaf x) b)}
hx₁ :
x ∈
{x |
IsUnit
(↑(Presheaf.germ (LocallyRingedSpace.toRingedSpace X.toLocallyRingedSpace).toPresheafedSpace.presheaf x) a)}
e : ↑(Presheaf.germ X.presheaf x) a * ↑(Presheaf.germ X.presheaf x) b = 0
⊢ False
[PROOFSTEP]
refine' zero_ne_one' (X.presheaf.stalk x.1) (isUnit_zero_iff.1 _)
[GOAL]
case intro.intro.intro.intro.intro.intro
X : Scheme
inst✝ : IsReduced X
H : IrreducibleSpace ↑↑X.toPresheafedSpace
U : Opens ↑↑X.toPresheafedSpace
hU : Nonempty { x // x ∈ U }
this : ∃ x y, x ≠ y
a b : ↑(X.presheaf.obj (op U))
h : Set.Nonempty ↑(Scheme.basicOpen X a) ∧ Set.Nonempty ↑(Scheme.basicOpen X b)
x : { x // x ∈ U }
hx₂ :
x ∈
{x |
IsUnit
(↑(Presheaf.germ (LocallyRingedSpace.toRingedSpace X.toLocallyRingedSpace).toPresheafedSpace.presheaf x) b)}
hx₁ :
x ∈
{x |
IsUnit
(↑(Presheaf.germ (LocallyRingedSpace.toRingedSpace X.toLocallyRingedSpace).toPresheafedSpace.presheaf x) a)}
e : ↑(Presheaf.germ X.presheaf x) a * ↑(Presheaf.germ X.presheaf x) b = 0
⊢ IsUnit 0
[PROOFSTEP]
convert hx₁.mul hx₂
[GOAL]
case h.e'_3.h
X : Scheme
inst✝ : IsReduced X
H : IrreducibleSpace ↑↑X.toPresheafedSpace
U : Opens ↑↑X.toPresheafedSpace
hU : Nonempty { x // x ∈ U }
this : ∃ x y, x ≠ y
a b : ↑(X.presheaf.obj (op U))
h : Set.Nonempty ↑(Scheme.basicOpen X a) ∧ Set.Nonempty ↑(Scheme.basicOpen X b)
x : { x // x ∈ U }
hx₂ :
x ∈
{x |
IsUnit
(↑(Presheaf.germ (LocallyRingedSpace.toRingedSpace X.toLocallyRingedSpace).toPresheafedSpace.presheaf x) b)}
hx₁ :
x ∈
{x |
IsUnit
(↑(Presheaf.germ (LocallyRingedSpace.toRingedSpace X.toLocallyRingedSpace).toPresheafedSpace.presheaf x) a)}
e : ↑(Presheaf.germ X.presheaf x) a * ↑(Presheaf.germ X.presheaf x) b = 0
e_1✝ :
↑(Presheaf.stalk X.presheaf ↑x) =
(fun x_1 =>
(forget CommRingCat).obj
(Presheaf.stalk (LocallyRingedSpace.toRingedSpace X.toLocallyRingedSpace).toPresheafedSpace.presheaf ↑x))
a
⊢ 0 =
↑(Presheaf.germ (LocallyRingedSpace.toRingedSpace X.toLocallyRingedSpace).toPresheafedSpace.presheaf x) a *
↑(Presheaf.germ (LocallyRingedSpace.toRingedSpace X.toLocallyRingedSpace).toPresheafedSpace.presheaf x) b
[PROOFSTEP]
exact e.symm
[GOAL]
case component_integral
X : Scheme
inst✝ : IsReduced X
H : IrreducibleSpace ↑↑X.toPresheafedSpace
U : Opens ↑↑X.toPresheafedSpace
hU : Nonempty { x // x ∈ U }
this✝ : ∃ x y, x ≠ y
this : NoZeroDivisors ↑(X.presheaf.obj (op U))
⊢ IsDomain ↑(X.presheaf.obj (op U))
[PROOFSTEP]
exact NoZeroDivisors.to_isDomain _
[GOAL]
X✝ : Scheme
X Y : Scheme
f : X ⟶ Y
H : IsOpenImmersion f
inst✝¹ : IsIntegral Y
inst✝ : Nonempty ↑↑X.toPresheafedSpace
⊢ IsIntegral X
[PROOFSTEP]
constructor
[GOAL]
case nonempty
X✝ : Scheme
X Y : Scheme
f : X ⟶ Y
H : IsOpenImmersion f
inst✝¹ : IsIntegral Y
inst✝ : Nonempty ↑↑X.toPresheafedSpace
⊢ autoParam (Nonempty ↑↑X.toPresheafedSpace) _auto✝
[PROOFSTEP]
infer_instance
[GOAL]
case component_integral
X✝ : Scheme
X Y : Scheme
f : X ⟶ Y
H : IsOpenImmersion f
inst✝¹ : IsIntegral Y
inst✝ : Nonempty ↑↑X.toPresheafedSpace
⊢ autoParam (∀ (U : Opens ↑↑X.toPresheafedSpace) [inst : Nonempty { x // x ∈ U }], IsDomain ↑(X.presheaf.obj (op U)))
_auto✝
[PROOFSTEP]
intro U hU
[GOAL]
case component_integral
X✝ : Scheme
X Y : Scheme
f : X ⟶ Y
H : IsOpenImmersion f
inst✝¹ : IsIntegral Y
inst✝ : Nonempty ↑↑X.toPresheafedSpace
U : Opens ↑↑X.toPresheafedSpace
hU : Nonempty { x // x ∈ U }
⊢ IsDomain ↑(X.presheaf.obj (op U))
[PROOFSTEP]
have : U = (Opens.map f.1.base).obj (H.base_open.isOpenMap.functor.obj U) := by ext1;
exact (Set.preimage_image_eq _ H.base_open.inj).symm
[GOAL]
X✝ : Scheme
X Y : Scheme
f : X ⟶ Y
H : IsOpenImmersion f
inst✝¹ : IsIntegral Y
inst✝ : Nonempty ↑↑X.toPresheafedSpace
U : Opens ↑↑X.toPresheafedSpace
hU : Nonempty { x // x ∈ U }
⊢ U = (Opens.map f.val.base).obj ((IsOpenMap.functor (_ : IsOpenMap ↑f.val.base)).obj U)
[PROOFSTEP]
ext1
[GOAL]
case h
X✝ : Scheme
X Y : Scheme
f : X ⟶ Y
H : IsOpenImmersion f
inst✝¹ : IsIntegral Y
inst✝ : Nonempty ↑↑X.toPresheafedSpace
U : Opens ↑↑X.toPresheafedSpace
hU : Nonempty { x // x ∈ U }
⊢ ↑U = ↑((Opens.map f.val.base).obj ((IsOpenMap.functor (_ : IsOpenMap ↑f.val.base)).obj U))
[PROOFSTEP]
exact (Set.preimage_image_eq _ H.base_open.inj).symm
[GOAL]
case component_integral
X✝ : Scheme
X Y : Scheme
f : X ⟶ Y
H : IsOpenImmersion f
inst✝¹ : IsIntegral Y
inst✝ : Nonempty ↑↑X.toPresheafedSpace
U : Opens ↑↑X.toPresheafedSpace
hU : Nonempty { x // x ∈ U }
this : U = (Opens.map f.val.base).obj ((IsOpenMap.functor (_ : IsOpenMap ↑f.val.base)).obj U)
⊢ IsDomain ↑(X.presheaf.obj (op U))
[PROOFSTEP]
rw [this]
[GOAL]
case component_integral
X✝ : Scheme
X Y : Scheme
f : X ⟶ Y
H : IsOpenImmersion f
inst✝¹ : IsIntegral Y
inst✝ : Nonempty ↑↑X.toPresheafedSpace
U : Opens ↑↑X.toPresheafedSpace
hU : Nonempty { x // x ∈ U }
this : U = (Opens.map f.val.base).obj ((IsOpenMap.functor (_ : IsOpenMap ↑f.val.base)).obj U)
⊢ IsDomain ↑(X.presheaf.obj (op ((Opens.map f.val.base).obj ((IsOpenMap.functor (_ : IsOpenMap ↑f.val.base)).obj U))))
[PROOFSTEP]
have : IsDomain (Y.presheaf.obj (op (H.base_open.isOpenMap.functor.obj U))) :=
by
apply (config := { allowSynthFailures := true }) IsIntegral.component_integral
refine' ⟨⟨_, _, hU.some.prop, rfl⟩⟩
[GOAL]
X✝ : Scheme
X Y : Scheme
f : X ⟶ Y
H : IsOpenImmersion f
inst✝¹ : IsIntegral Y
inst✝ : Nonempty ↑↑X.toPresheafedSpace
U : Opens ↑↑X.toPresheafedSpace
hU : Nonempty { x // x ∈ U }
this : U = (Opens.map f.val.base).obj ((IsOpenMap.functor (_ : IsOpenMap ↑f.val.base)).obj U)
⊢ IsDomain ↑(Y.presheaf.obj (op ((IsOpenMap.functor (_ : IsOpenMap ↑f.val.base)).obj U)))
[PROOFSTEP]
apply (config := { allowSynthFailures := true }) IsIntegral.component_integral
[GOAL]
case inst
X✝ : Scheme
X Y : Scheme
f : X ⟶ Y
H : IsOpenImmersion f
inst✝¹ : IsIntegral Y
inst✝ : Nonempty ↑↑X.toPresheafedSpace
U : Opens ↑↑X.toPresheafedSpace
hU : Nonempty { x // x ∈ U }
this : U = (Opens.map f.val.base).obj ((IsOpenMap.functor (_ : IsOpenMap ↑f.val.base)).obj U)
⊢ Nonempty { x // x ∈ (IsOpenMap.functor (_ : IsOpenMap ↑f.val.base)).obj U }
[PROOFSTEP]
refine' ⟨⟨_, _, hU.some.prop, rfl⟩⟩
[GOAL]
case component_integral
X✝ : Scheme
X Y : Scheme
f : X ⟶ Y
H : IsOpenImmersion f
inst✝¹ : IsIntegral Y
inst✝ : Nonempty ↑↑X.toPresheafedSpace
U : Opens ↑↑X.toPresheafedSpace
hU : Nonempty { x // x ∈ U }
this✝ : U = (Opens.map f.val.base).obj ((IsOpenMap.functor (_ : IsOpenMap ↑f.val.base)).obj U)
this : IsDomain ↑(Y.presheaf.obj (op ((IsOpenMap.functor (_ : IsOpenMap ↑f.val.base)).obj U)))
⊢ IsDomain ↑(X.presheaf.obj (op ((Opens.map f.val.base).obj ((IsOpenMap.functor (_ : IsOpenMap ↑f.val.base)).obj U))))
[PROOFSTEP]
exact
(asIso <| f.1.c.app (op <| H.base_open.isOpenMap.functor.obj U) :
Y.presheaf.obj _ ≅ _).symm.commRingCatIsoToRingEquiv.toMulEquiv.isDomain
_
[GOAL]
X : Scheme
R : CommRingCat
H : IsDomain ↑R
⊢ IrreducibleSpace ↑↑(Scheme.Spec.obj (op R)).toPresheafedSpace
[PROOFSTEP]
convert PrimeSpectrum.irreducibleSpace (R := R)
[GOAL]
X : Scheme
inst✝¹ : IsAffine X
inst✝ : Nonempty ↑↑X.toPresheafedSpace
h : IsDomain ↑(X.presheaf.obj (op ⊤))
⊢ IsIntegral (Scheme.Spec.obj (op (Scheme.Γ.obj (op X))))
[PROOFSTEP]
rw [affine_isIntegral_iff]
[GOAL]
X : Scheme
inst✝¹ : IsAffine X
inst✝ : Nonempty ↑↑X.toPresheafedSpace
h : IsDomain ↑(X.presheaf.obj (op ⊤))
⊢ IsDomain ↑(Scheme.Γ.obj (op X))
[PROOFSTEP]
exact h
[GOAL]
X : Scheme
inst✝ : IsIntegral X
U V : Opens ↑↑X.toPresheafedSpace
i : U ⟶ V
H : Nonempty { x // x ∈ U }
⊢ Function.Injective ↑(X.presheaf.map i.op)
[PROOFSTEP]
rw [injective_iff_map_eq_zero]
[GOAL]
X : Scheme
inst✝ : IsIntegral X
U V : Opens ↑↑X.toPresheafedSpace
i : U ⟶ V
H : Nonempty { x // x ∈ U }
⊢ ∀ (a : (forget CommRingCat).obj (X.presheaf.obj (op V))), ↑(X.presheaf.map i.op) a = 0 → a = 0
[PROOFSTEP]
intro x hx
[GOAL]
X : Scheme
inst✝ : IsIntegral X
U V : Opens ↑↑X.toPresheafedSpace
i : U ⟶ V
H : Nonempty { x // x ∈ U }
x : (forget CommRingCat).obj (X.presheaf.obj (op V))
hx : ↑(X.presheaf.map i.op) x = 0
⊢ x = 0
[PROOFSTEP]
rw [← basicOpen_eq_bot_iff] at hx ⊢
[GOAL]
X : Scheme
inst✝ : IsIntegral X
U V : Opens ↑↑X.toPresheafedSpace
i : U ⟶ V
H : Nonempty { x // x ∈ U }
x : (forget CommRingCat).obj (X.presheaf.obj (op V))
hx : Scheme.basicOpen X (↑(X.presheaf.map i.op) x) = ⊥
⊢ Scheme.basicOpen X x = ⊥
[PROOFSTEP]
rw [Scheme.basicOpen_res] at hx
[GOAL]
X : Scheme
inst✝ : IsIntegral X
U V : Opens ↑↑X.toPresheafedSpace
i : U ⟶ V
H : Nonempty { x // x ∈ U }
x : (forget CommRingCat).obj (X.presheaf.obj (op V))
hx : U ⊓ Scheme.basicOpen X x = ⊥
⊢ Scheme.basicOpen X x = ⊥
[PROOFSTEP]
revert hx
[GOAL]
X : Scheme
inst✝ : IsIntegral X
U V : Opens ↑↑X.toPresheafedSpace
i : U ⟶ V
H : Nonempty { x // x ∈ U }
x : (forget CommRingCat).obj (X.presheaf.obj (op V))
⊢ U ⊓ Scheme.basicOpen X x = ⊥ → Scheme.basicOpen X x = ⊥
[PROOFSTEP]
contrapose!
[GOAL]
X : Scheme
inst✝ : IsIntegral X
U V : Opens ↑↑X.toPresheafedSpace
i : U ⟶ V
H : Nonempty { x // x ∈ U }
x : (forget CommRingCat).obj (X.presheaf.obj (op V))
⊢ Scheme.basicOpen X x ≠ ⊥ → U ⊓ Scheme.basicOpen X x ≠ ⊥
[PROOFSTEP]
simp_rw [Ne.def, ← Opens.not_nonempty_iff_eq_bot, Classical.not_not]
[GOAL]
X : Scheme
inst✝ : IsIntegral X
U V : Opens ↑↑X.toPresheafedSpace
i : U ⟶ V
H : Nonempty { x // x ∈ U }
x : (forget CommRingCat).obj (X.presheaf.obj (op V))
⊢ Set.Nonempty ↑(Scheme.basicOpen X x) → Set.Nonempty ↑(U ⊓ Scheme.basicOpen X x)
[PROOFSTEP]
apply nonempty_preirreducible_inter U.isOpen (RingedSpace.basicOpen _ _).isOpen
[GOAL]
X : Scheme
inst✝ : IsIntegral X
U V : Opens ↑↑X.toPresheafedSpace
i : U ⟶ V
H : Nonempty { x // x ∈ U }
x : (forget CommRingCat).obj (X.presheaf.obj (op V))
⊢ Set.Nonempty ↑U
[PROOFSTEP]
simpa using H
|
{"mathlib_filename": "Mathlib.AlgebraicGeometry.Properties", "llama_tokens": 45231}
|
import os,sys
from os.path import dirname, realpath
sys.path.append(dirname(dirname(realpath(__file__))))
import pickle
import numpy as np
import PIL.Image
import dnnlib
import dnnlib.tflib as tflib
import config
import operator
import argparse
url_ffhq = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ' # karras2019stylegan-ffhq-1024x1024.pkl
url_celebahq = 'https://drive.google.com/uc?id=1MGqJl28pN4t7SAtSrPdSRJSQJqahkzUf' # karras2019stylegan-celebahq-1024x1024.pkl
synthesis_kwargs = dict(output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True))
_Gs_cache = dict()
def load_Gs(url):
if url not in _Gs_cache:
with dnnlib.util.open_url(url, cache_dir='../cache') as f:
_G, _D, Gs = pickle.load(f)
_Gs_cache[url] = Gs
return _Gs_cache[url]
def generate_from_latent(Gs):
read_path = 'monte_carlo_sampling_1m/neighbors/0.23/clustered_latents'
latents = [os.path.join(read_path, latent) for latent in os.listdir(read_path)]
save_path = 'monte_carlo_sampling_1m/neighbors/0.23/clustered_images'
if not os.path.exists(save_path):
os.makedirs(save_path)
for i in range(len(latents)):
latent = np.load(latents[i])
print(latent.shape)
image = Gs.run(np.expand_dims(latent, axis=0), None, **synthesis_kwargs)
print(image.shape)
image = np.squeeze(image)
image = PIL.Image.fromarray(image, 'RGB')
dst = os.path.join(save_path, '{}.png'.format(latents[i].split('/')[-1][:-4]))
print(dst)
image.save(dst, 'PNG')
def main():
tflib.init_tf()
generate_from_latent(load_Gs(url_celebahq))
#----------------------------------------------------------------------------
if __name__ == "__main__":
main()
#----------------------------------------------------------------------------
|
{"hexsha": "216097e311b7e6bb3d72aef60333f5227e0dfb1e", "size": 1870, "ext": "py", "lang": "Python", "max_stars_repo_path": "pggan/sampling/draw_from_latent.py", "max_stars_repo_name": "VITA-Group/BlackBoxGANCollapse", "max_stars_repo_head_hexsha": "e52afab99e8b2e08a92aab86d84d53db77aa8c75", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-08-04T09:08:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-26T07:32:41.000Z", "max_issues_repo_path": "pggan/sampling/draw_from_latent.py", "max_issues_repo_name": "VITA-Group/BlackBoxGANCollapse", "max_issues_repo_head_hexsha": "e52afab99e8b2e08a92aab86d84d53db77aa8c75", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pggan/sampling/draw_from_latent.py", "max_forks_repo_name": "VITA-Group/BlackBoxGANCollapse", "max_forks_repo_head_hexsha": "e52afab99e8b2e08a92aab86d84d53db77aa8c75", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-09T06:37:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-09T06:37:22.000Z", "avg_line_length": 35.2830188679, "max_line_length": 128, "alphanum_fraction": 0.6449197861, "include": true, "reason": "import numpy", "num_tokens": 521}
|
"""
Utility functions
"""
import rastercube
import numpy as np
import os
import errno
from datetime import datetime
import calendar
import cPickle as pickle
import pkg_resources
import atexit
# Cleanup tmpdir used by asset_fname on interpreter exit
atexit.register(lambda: pkg_resources.cleanup_resources())
def asset_fname(relpath):
"""
Gets the filename to an asset relative to the rastercube package root.
When rastercube is packaged as an egg, you can't access assets using
os.path.join(rastercube.__file__, 'assets/foo.json') since the egg is a
zip. So you should use this function.
See :
http://peak.telecommunity.com/DevCenter/PythonEggs#accessing-package-resources
>>> fname = asset_fname('assets/modis_tiles.geojson')
"""
return pkg_resources.resource_filename(rastercube.__name__, relpath)
def get_data_dir():
assert 'RASTERCUBE_DATA' in os.environ
return os.environ['RASTERCUBE_DATA']
def get_worldgrid():
assert 'RASTERCUBE_WORLDGRID' in os.environ
return os.environ['RASTERCUBE_WORLDGRID']
def get_modis_hdf_dir():
"""Returns the default directory where we store MODIS HDF files"""
return os.path.join(get_data_dir(), '0_input', 'MODIS_HDF')
def get_glcf_tif_dir():
"""Returns the default directory where we store MODIS HDF files"""
return os.path.join(get_data_dir(), '0_input', 'glcf_5.1')
def mkdir_p(path):
"""Create all directory in path. Like mkdir -p"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def load_properties(filename):
properties = {}
with open(filename) as f:
for line in f:
line = line.strip()
if line.startswith(';') or len(line) == 0:
continue
key, value = line.split('=')
key = key.strip()
value = value.strip()
properties[key] = value
return properties
def date_from_timestamp_ms(timestamp_ms):
date = datetime.utcfromtimestamp(timestamp_ms / 1000.0)
return date
def format_date(timestamp_ms, sep=None):
"""
Like jGridUtils.formatDate
"""
if sep is None:
sep = '_'
date = date_from_timestamp_ms(timestamp_ms)
return date.strftime('%Y{0}%m{0}%d'.format(sep))
def day_to_timestamp_ms(year, month, day):
"""Returns the milliseconds timestamp for the given date"""
return calendar.timegm(datetime(year, month, day).timetuple()) * 1000
def timestamp_ms_to_doy(timestamp_ms):
"""Convert a timestamp in milliseconds to day-of-year"""
d = datetime.fromtimestamp(timestamp_ms / 1000.).date()
return int(d.strftime('%j'))
def parse_date(datestr, sep=None):
if sep is None:
sep = '_'
date = datetime.strptime(datestr, '%Y{0}%m{0}%d'.format(sep))
timestamp_ms = int(calendar.timegm(date.timetuple()) * 1000.0)
return timestamp_ms
def confirm(prompt=None, resp=False):
"""
Prompts for yes or no response from the user. Returns True for yes and
False for no.
'resp' should be set to the default value assumed by the caller when
user simply types ENTER.
"""
if prompt is None:
prompt = 'Confirm'
if resp:
prompt = '%s [%s]|%s: ' % (prompt, 'y', 'n')
else:
prompt = '%s [%s]|%s: ' % (prompt, 'n', 'y')
while True:
ans = raw_input(prompt)
if not ans:
return resp
if ans not in ['y', 'Y', 'n', 'N']:
print 'please enter y or n.'
continue
if ans == 'y' or ans == 'Y':
return True
if ans == 'n' or ans == 'N':
return False
def save(fname, obj):
with open(fname, 'w') as f:
pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL)
def load(fname):
with open(fname) as f:
return pickle.load(f)
def index_3d_with_2d(array, indices):
"""
Given a 3D array a, will index it with a 2D array b that contains,
the index along the z axis to select.
This will return a 2D array c where
c[i,j] = array[i,j,indices[i,j]]
This ought to be done with choice but is somewhat complicated. Relevant
stackoverflow discussion: http://stackoverflow.com/a/32090582
>>> a = np.arange(24).reshape(2, 3, 4)
>>> a
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
<BLANKLINE>
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
>>> b = np.array([[0, 1, 2],
... [3, 0, 1]])
>>> index_3d_with_2d(a, b)
array([[ 0, 5, 10],
[15, 16, 21]])
"""
assert len(array.shape) == 3
assert len(indices.shape) == 2
h, w, d = array.shape
return array.reshape(-1, d)[np.arange(h * w), indices.reshape(-1)]\
.reshape(h, w)
|
{"hexsha": "0ad065d3086d66c8db71b97519264066c47398db", "size": 4895, "ext": "py", "lang": "Python", "max_stars_repo_path": "rastercube/utils.py", "max_stars_repo_name": "terrai/rastercube", "max_stars_repo_head_hexsha": "c8c6214fd682f72e94df4979f5d737cea4778617", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2017-06-23T15:11:07.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-02T19:32:11.000Z", "max_issues_repo_path": "rastercube/utils.py", "max_issues_repo_name": "terrai/rastercube", "max_issues_repo_head_hexsha": "c8c6214fd682f72e94df4979f5d737cea4778617", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rastercube/utils.py", "max_forks_repo_name": "terrai/rastercube", "max_forks_repo_head_hexsha": "c8c6214fd682f72e94df4979f5d737cea4778617", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2017-07-28T08:45:06.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-28T03:19:17.000Z", "avg_line_length": 26.8956043956, "max_line_length": 82, "alphanum_fraction": 0.6100102145, "include": true, "reason": "import numpy", "num_tokens": 1309}
|
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
def input_point(objectx,atloc, lonloc, sizex, colorx, alphax):
'''
- Our function to draw a specific x and y on a map
- This function need a m-object from basemap to be define first.
this is not an efficient way to define a function since it rely heavily on define
the object (m) which is needed to plot the map itself.
- [Update] : I was able to fix this problem by allowing the drawing object as one of the inputs.
for our current case the object name (m).
'''
lat, lon = atloc, lonloc
x, y = objectx(lon, lat)
return objectx.plot(x, y, 'go', markersize=sizex, color=colorx, alpha=alphax)
|
{"hexsha": "f819dcfa3b8362ca72424e3709155df6675ef338", "size": 805, "ext": "py", "lang": "Python", "max_stars_repo_path": "Project_files/Part5_Improving_the_plots/input_point_function.py", "max_stars_repo_name": "Ghasak/Geographical_Basemap", "max_stars_repo_head_hexsha": "80e9555da46f1a0227f345dc29b60ed7cbe1f5ec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Project_files/Part5_Improving_the_plots/input_point_function.py", "max_issues_repo_name": "Ghasak/Geographical_Basemap", "max_issues_repo_head_hexsha": "80e9555da46f1a0227f345dc29b60ed7cbe1f5ec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2021-02-15T17:37:03.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:47:23.000Z", "max_forks_repo_path": "Project_files/Part5_Improving_the_plots/input_point_function.py", "max_forks_repo_name": "Ghasak/Geographical_Basemap", "max_forks_repo_head_hexsha": "80e9555da46f1a0227f345dc29b60ed7cbe1f5ec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.5909090909, "max_line_length": 104, "alphanum_fraction": 0.6807453416, "include": true, "reason": "import numpy", "num_tokens": 205}
|
# -*- coding: utf-8 -*-
import os
import datetime as dt
import numpy as np
import pandas as pd
from log import LogHandler
from src.data.tdx.setting import tdx_dir, MARKET2TDX_CODE, MARKET_DIR, PERIOD_DIR, PERIOD_EXT
log = LogHandler(os.path.basename('tdx.hq.log'))
def int2date(x):
year = int(x / 2048) + 2004
month = int(x % 2048 / 100)
day = x % 2048 % 100
return dt.datetime(year, month, day)
def _get_future_day_hq(file_handler, count=-1):
names = 'datetime', 'open', 'high', 'low', 'close', 'openInt', 'volume', 'comment'
offsets = tuple(range(0, 31, 4))
formats = 'i4', 'f4', 'f4', 'f4', 'f4', 'i4', 'i4', 'i4'
dt_types = np.dtype({'names': names, 'offsets': offsets, 'formats': formats}, align=True)
hq_day_df = pd.DataFrame(np.fromfile(file_handler, dtype=dt_types, count=count))
hq_day_df.index = pd.to_datetime(hq_day_df['datetime'].astype('str'), errors='coerce')
hq_day_df.pop('datetime')
return hq_day_df
def _get_future_min_hq(file_handler, count=-1):
names = 'date', 'time', 'open', 'high', 'low', 'close', 'openInt', 'volume', 'comment'
formats = 'u2', 'u2', 'f4', 'f4', 'f4', 'f4', 'i4', 'i4', 'i4'
offsets = (0, 2) + tuple(range(4, 31, 4))
dt_types = np.dtype({'names': names, 'offsets': offsets, 'formats': formats}, align=True)
hq_min_df = pd.DataFrame(np.fromfile(file_handler, dtype=dt_types, count=count))
hq_min_df.index = hq_min_df.date.transform(int2date) + pd.to_timedelta(hq_min_df.time, unit='m')
hq_min_df.pop('date')
hq_min_df.pop('time')
return hq_min_df
def get_future_day_hq(market, code, start=None, end=None):
"""
:param market: 交易市场
:param code: IL8 主力合约 IL9 期货指数 I1801
:param start: 开始日期
:param end: 结束日期
:return: pd.DateFrame
"""
tdx_hq_dir = os.path.join(tdx_dir, 'vipdoc', MARKET_DIR[market], PERIOD_DIR['d'])
hq_filename = MARKET2TDX_CODE[market] + '#' + code.upper() + PERIOD_EXT['d']
hq_path = os.path.join(tdx_hq_dir, hq_filename)
if not os.path.exists(hq_path):
return None
f = open(hq_path, "rb")
f.seek(0, 0)
start_dt = np.fromfile(f, dtype=np.int32, count=1)
start_dt = dt.datetime.strptime(start_dt.astype(str)[0], '%Y%m%d')
f.seek(-32, 2)
end_dt = np.fromfile(f, dtype=np.int32, count=1)
end_dt = dt.datetime.strptime(end_dt.astype(str)[0], '%Y%m%d')
if not start:
start = dt.datetime(1970, 1, 1)
if start < start_dt:
f.seek(0, 0)
return _get_future_day_hq(f)
elif start > end_dt:
return None
# TODO 根据交易日历计算实际的交易天数
delta = (end_dt - start) + dt.timedelta(1)
factor = delta.days
try:
f.seek(-32 * factor, 2)
except OSError:
f.seek(0, 0)
log.info('%s trade recodes are few and factor = %d is too big.', code, factor)
hq_day_df = _get_future_day_hq(f)
if end:
return hq_day_df.loc[start: end]
else:
return hq_day_df.loc[start:]
def get_future_min_hq(market, code, start=None, end=None, freq='5m'):
"""
:param market: 交易市场
:param code: IL8 主力合约 IL9 期货指数 I1801
:param start: 开始时间
:param end: 结束时间
:param freq: 周期'1m','5m'
:return: 返回
"""
tdx_hq_dir = os.path.join(tdx_dir, 'vipdoc', MARKET_DIR[market], PERIOD_DIR[freq])
hq_filename = MARKET2TDX_CODE[market] + '#' + code.upper() + PERIOD_EXT[freq]
hq_path = os.path.join(tdx_hq_dir, hq_filename)
if not os.path.exists(hq_path):
return None
f = open(hq_path, "rb")
f.seek(0, 0)
start_dt = np.fromfile(f, dtype=np.int16, count=1)
start_dt = int2date(start_dt)
f.seek(-32, 2)
end_dt = np.fromfile(f, dtype=np.int16, count=1)
end_dt = int2date(end_dt)
if not start:
start = dt.datetime(1970, 1, 1)
if start < start_dt:
f.seek(0, 0)
return _get_future_min_hq(f)
elif start > end_dt:
return None
k_num = 400 # 每天大多数期货交易的时间 9:00-10:15 10:30-11:30 13:30-15:00 21:00-23:30
if freq == '5m':
k_num = int(k_num / 5)
# TODO 计算两个日期之间的工作日,需要自己添加交易日历
# https://www.cnblogs.com/fangbei/p/9075153.html
# https: // pypi.org / project / business_calendar /
delta = (end_dt - start)
factor = delta.days * k_num
while start < end_dt:
try:
f.seek(-32 * factor, 2)
end_dt = np.fromfile(f, dtype=np.int16, count=1)
f.seek(-32 * factor, 2) # 数据读取后移位,文件指针要回到原来位置
end_dt = int2date(end_dt)
factor = factor * 2
except OSError:
f.seek(0, 0)
log.warning('%s trade recodes are few and factor = %d is too big.', code, factor)
break
hq_min_df = _get_future_min_hq(f)
if end:
return hq_min_df.loc[start: end]
else:
return hq_min_df.loc[start:]
if __name__ == '__main__':
start = dt.datetime(2019, 2, 20)
code = 'srl8'
df = get_future_min_hq(market='czce', start=start, code=code, freq='5m')
|
{"hexsha": "ca2106dd4f2b15108e5c38234b2c3f88cd09853a", "size": 4998, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/data/tdx/hq.py", "max_stars_repo_name": "newlyedward/datascinece", "max_stars_repo_head_hexsha": "2a6148511832552991e115cb468ba4cc1db24353", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-03-22T16:14:33.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-12T07:56:57.000Z", "max_issues_repo_path": "src/data/tdx/hq.py", "max_issues_repo_name": "newlyedward/datascinece", "max_issues_repo_head_hexsha": "2a6148511832552991e115cb468ba4cc1db24353", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/data/tdx/hq.py", "max_forks_repo_name": "newlyedward/datascinece", "max_forks_repo_head_hexsha": "2a6148511832552991e115cb468ba4cc1db24353", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2909090909, "max_line_length": 100, "alphanum_fraction": 0.6148459384, "include": true, "reason": "import numpy", "num_tokens": 1694}
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import os
import pickle
import pprint
import time
import pyross
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
#from matplotlib import rc;
#postFigFileName = 'figPostHistos_pop1e8.pdf'
#trajFigFileName = 'figTraj_pop1e8.pdf'
#mapFigFileName = 'figInfTraj_pop1e8.pdf'
useTex = True
if useTex :
plt.rc('text', usetex=True)
plt.rcParams.update({'font.size': 20})
plt.rcParams['font.family'] = 'serif'
import synth_fns
## total population
popN = 1e6
## tau-leaping param, take this negative to force gillespie
## or set a small value for high-accuracy tau-leap (eg 1e-4 or 1e-5)
leapEps = 1e-5
## do we use small tolerances for the likelihood computations? (use False for debug etc)
isHighAccuracy = True
# absolute tolerance for logp for MAP
inf_atol = 1.0
## prior mean of beta, divided by true value (set to 1.0 for the simplest case)
betaPriorOffset = 1.0
betaPriorLogNorm = False
## setup model etc ( copied from synthInfTest-pop1e8.ipynb )
model_dict = synth_fns.get_model(popN)
model_spec = model_dict['mod']
contactMatrix = model_dict['CM']
parameters_true = model_dict['params']
cohortsM = model_dict['cohortsM']
Ni = model_dict['cohortsPop']
## total trajectory time (bare units)
Tf_bare = 20
## total inf time
Tf_inf_bare = 5
## inference period starts when the total deaths reach this amount (as a fraction)
fracDeaths = 2e-3 # int(N*200/1e5)
## hack to get higher-frequency data
## how many data points per "timestep" (in original units)
fineData = 4
## this assumes that all parameters are rates !!
for key in parameters_true:
#print(key,parameters_true[key])
parameters_true[key] /= fineData
#Tf = Tf_bare * fineData;
#Nf = Tf+1
#
#Tf_inference = Tf_inf_bare * fineData
#Nf_inference = Tf_inference+1
def getResults(fileRoot,minSeed) :
# ipFile = fileRoot+'-run'+str(0)+'-stochTraj'+str(minSeed)+'.npy'
# syntheticData = np.load(ipFile)
# print('loading trajectory from',ipFile)
# Nf_start = synth_fns.get_start_time(syntheticData, popN, fracDeaths)
# print('inf starts at timePoint',Nf_start)
runVals = [0,1]
allResultsInf = []
allResultsMC = []
for runVal in runVals :
ipFile = fileRoot+'-run'+str(runVal)+ "-mcmcAll.pik"
print('ipf',ipFile)
with open(ipFile, 'rb') as f:
[loadInf,loadMC]= pickle.load(f)
print('** read',len(loadInf),'data sets')
allResultsInf += loadInf
allResultsMC += loadMC
print('** tot',len(allResultsInf),'data sets ( check',len(allResultsMC),')')
return [allResultsInf,allResultsMC]
def computeBetaStats(allResultsMC,allResultsInf,printMe=True) :
betaStats = []
for trajIndex,result_mcmc in enumerate(allResultsMC) :
betas = [ rr['params_dict']['beta'] for rr in result_mcmc ]
postMeanBeta = np.mean(betas)
postStdBeta = np.std(betas)
postCIBeta = [ np.percentile(betas,2.5) , np.percentile(betas,97.5)]
betaStats += [{'m':postMeanBeta,'s':postStdBeta,'c':postCIBeta,
'map':allResultsInf[trajIndex]['params_dict']['beta']}]
if printMe :
print("post: mean {m:.4f} std {s:.4f} CI95 {l:.4f} {u:.4f}".format(m=postMeanBeta,
s=postStdBeta,
l=postCIBeta[0],u=postCIBeta[1]))
meanPostMean = np.mean(np.array([ b['m'] for b in betaStats ]))
stdPostMean = np.std(np.array([ b['m'] for b in betaStats ]))
errPostMean = stdPostMean/np.sqrt(len(allResultsInf)-1)
meanPostStd = np.mean(np.array([ b['s'] for b in betaStats ]))
meanPostCI = [ np.mean(np.array([ b['c'][ii] for b in betaStats ])) for ii in [0,1] ]
meanMAP = np.mean(np.array([ b['map'] for b in betaStats ]))
if printMe :
print('\n')
print('** true {:.4f}'.format(parameters_true['beta']))
print('** meanPostMAP {:.4f}'.format(meanMAP))
print('** meanPostMean {:.4f}'.format(meanPostMean))
print('** stdPostMean {:.4f} stderr {:.4f} (n {:d})'.format(stdPostMean,
stdPostMean/np.sqrt(len(allResultsInf)-1) ,
len(allResultsInf)
))
print('** meanPostCI {:.4f} {:.4f}'.format(meanPostCI[0],meanPostCI[1]))
print('** meanPostStd {:.4f}'.format(meanPostStd))
return [betas,meanPostMean,stdPostMean,errPostMean,meanPostCI]
minSeed = 19
rootList = ['dataSynthInfQuality-pop1e4','dataSynthInfQuality-pop1e5','dataSynthInfQuality-pop1e6']
popList = [1e4,1e5,1e6]
yVals = []
barVals = []
ciVals = []
for jj,fileRoot in enumerate(rootList) :
print('***',fileRoot)
[fileResultsInf,fileResultsMC] = getResults(fileRoot,minSeed)
[betas,meanPostMean,stdPostMean,errPostMean,meanPostCI] = computeBetaStats(fileResultsMC,fileResultsInf)
yVals += [meanPostMean]
barVals += [errPostMean]
ciVals += [meanPostCI]
fig,ax = plt.subplots(1,1,figsize=(7, 4))
plt.subplots_adjust(left=0.15,right=0.95,bottom=0.2,top=0.95)
ax.set_xscale('log')
ax.set_xlabel('population $N$')
ax.set_ylabel('$\\beta$')
ax.errorbar(popList,yVals,yerr=barVals,fmt='o',label='average posterior mean')
ax.fill_between(popList,[c[0] for c in ciVals],[c[1] for c in ciVals],
color='dodgerblue',alpha=0.2,label='average posterior CI')
ax.plot([np.min(popList),np.max(popList)],[parameters_true['beta'],parameters_true['beta']],
linestyle='dashed',color='red')
ax.set_ylim(bottom=0.0,top=2.0*parameters_true['beta'])
ax.legend(handlelength=0.2,frameon=False)
plt.savefig('figQuality.pdf')
|
{"hexsha": "1736ac060cd4dbc9cd8ac98fe90cda2090653edc", "size": 5833, "ext": "py", "lang": "Python", "max_stars_repo_path": "SimpleTestModel/figs_quality.py", "max_stars_repo_name": "rljack2002/infExampleCovidEW", "max_stars_repo_head_hexsha": "351e0605c80a51a2cd285136d7a05d969ac6c6fd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-10-28T17:01:05.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-30T11:07:20.000Z", "max_issues_repo_path": "SimpleTestModel/figs_quality.py", "max_issues_repo_name": "rljack2002/infExampleCovidEW", "max_issues_repo_head_hexsha": "351e0605c80a51a2cd285136d7a05d969ac6c6fd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SimpleTestModel/figs_quality.py", "max_forks_repo_name": "rljack2002/infExampleCovidEW", "max_forks_repo_head_hexsha": "351e0605c80a51a2cd285136d7a05d969ac6c6fd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0265957447, "max_line_length": 108, "alphanum_fraction": 0.6408366192, "include": true, "reason": "import numpy", "num_tokens": 1671}
|
from pytorch_pretrained_bert import BertTokenizer, BertModel
from keras.preprocessing.sequence import pad_sequences
import torch
import numpy as np
class BertWrapper:
def __init__(self, model_string='bert-base-multilingual-cased'):
self.model_string = model_string
self.tokenizer = BertTokenizer.from_pretrained(self.model_string, do_lower_case=False)
self.model = BertModel.from_pretrained(self.model_string)
def enter_eval_mode(self):
self.model.eval()
def compute_embeddings(self, input_ids_tensor, attention_mask, target_word_idx_dict):
target_embeddings = {target: [] for target in target_word_idx_dict}
with torch.no_grad():
encoded_layers, _ = self.model(input_ids_tensor, token_type_ids=None, attention_mask=attention_mask)
embedding = encoded_layers[11]
for target, target_idx in target_word_idx_dict.items():
embeddings = [torch.mean(embedding[:, idx[0]:idx[1], :], dim=1).numpy().flatten() for idx in target_idx]
target_embeddings[target].extend([emb for emb in embeddings
if not (np.isnan(np.sum(emb)) or np.sum(emb) == 0)])
return target_embeddings
def tokenize_sentences(self, sentences, word_to_index=None):
word_to_index = word_to_index or []
tokenized_target_words = {word: self.tokenizer.tokenize(word) for word in word_to_index}
for sentence in sentences:
tokenized_text = self.tokenizer.tokenize(' '.join(["[CLS]"] + sentence + ["[SEP]"]))
word_to_idx_dict = {
word: [(i, i + len(tokenized_target_words[word])) for i, tok in enumerate(tokenized_text)
if tokenized_text[i: i + len(tokenized_target_words[word])] ==
tokenized_target_words[word]] for word in word_to_index}
yield tokenized_text, word_to_idx_dict
def tokenize_sentences_direct_mapping(self, sentences, word_array, word_to_index=None):
word_to_index = word_to_index or []
sentences = list(sentences)
tokenized_target_words = {word: self.tokenizer.tokenize(word) for word in word_to_index}
for sentence, target_word in zip(sentences, word_array):
tokenized_text = self.tokenizer.tokenize(' '.join(["[CLS]"] + sentence + ["[SEP]"]))
word_to_idx_dict = {target_word: [(i, i + len(tokenized_target_words[target_word]))
for i, tok in enumerate(tokenized_text)
if tokenized_text[i: i + len(tokenized_target_words[target_word])] ==
tokenized_target_words[target_word]]}
yield tokenized_text, word_to_idx_dict
def get_tokenized_input_ids(self, tokenized_text, padding_length):
return pad_sequences([self.tokenizer.convert_tokens_to_ids(tokenized_text)], maxlen=padding_length,
dtype="long", truncating="post", padding="post")[0]
@staticmethod
def get_attention_mask(input_ids):
return [float(i > 0) for i in input_ids]
|
{"hexsha": "1d41389539f38781f1d31d5ead3341cd0aa4007f", "size": 3181, "ext": "py", "lang": "Python", "max_stars_repo_path": "semeval2020/language_models/bertwrapper.py", "max_stars_repo_name": "DavidRother/semeval2020-task1", "max_stars_repo_head_hexsha": "715f82afb8b282669d59ff610b63714d19db4618", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-12-02T23:18:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-19T11:19:28.000Z", "max_issues_repo_path": "semeval2020/language_models/bertwrapper.py", "max_issues_repo_name": "DavidRother/semeval2020-task1", "max_issues_repo_head_hexsha": "715f82afb8b282669d59ff610b63714d19db4618", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-05-24T15:22:26.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-25T08:08:07.000Z", "max_forks_repo_path": "semeval2020/language_models/bertwrapper.py", "max_forks_repo_name": "DavidRother/semeval2020-task1", "max_forks_repo_head_hexsha": "715f82afb8b282669d59ff610b63714d19db4618", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 54.8448275862, "max_line_length": 120, "alphanum_fraction": 0.6501100283, "include": true, "reason": "import numpy", "num_tokens": 656}
|
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
#N Population Size
N = 1000
#Initial conditions and vector
I0 = 1
R0 = 0
S0 = 999
initial = S0, I0, R0
#time
t = np.linspace(0, 200, 200)
#SIR model
def SIRmodel(v, t, N, beta, gamma):
"""Determines three differential equations of the SIR model depending on initial
conditions and chosen parameters. dSdt determines the rate of change of
those that are not infected but are susceptible to being infected. dIdt
determines the rate of change of the total infected individuals. dRdt
determines the rate of change of the individuals who have recovered.
Paramaters:
v - vector of integers
t - numeric sequence of form np.linspace(start, end, number of breakpoints)
N - integer
beta - float
gamma - float
Returns:
Tuple of 3 floats, the change in the values of the differential
equations of the model at one instant in time
"""
S, I, R = v
dSdt = (-1 * beta * S * I) / N
dIdt = (beta * S * I / N) - (gamma * I)
dRdt = gamma * I
return dSdt, dIdt, dRdt
# Integrating and plotting the differential equations in the SIR model
def integrate_SIR(gamma, beta):
"""Plots the SIR model of disease for given gamma and beta parameters
Parameters:
gamma - float, reciprocal of average period of infectiousness
beta - float, proportionality of how many people get into contact with the infected person.
Returns:
Plot of number of susceptible individuals, infected individuals, and
recovered individuals over time
"""
int_SIR = odeint(SIRmodel, initial, t, args=(N, beta, gamma))
S, I, R = int_SIR.T
# Plot S(t), I(t) and R(t)
plt.plot(t, S, 'b', label='Susceptible Individuals')
plt.plot(t, I, 'r', label='Infected Individuals')
plt.plot(t, R, 'g', label='Recovered Individuals')
plt.xlabel('Time (days)')
plt.ylabel('Number of Individuals')
plt.title('SIR Model of Disease Spread with \u03B3 = '+ str(round(gamma,2))+ ' \u03B2 = '+ str(beta))
plt.legend()
plt.show()
#Plotting for a variety of chosen beta and gamma parameters
integrate_SIR(1/10, 0.2)
integrate_SIR(1/10, 0.1)
integrate_SIR(1/7, 0.2)
integrate_SIR(1/14, 0.2)
|
{"hexsha": "da5e338c2683df72499a3bef99e2a335aa614b2b", "size": 2285, "ext": "py", "lang": "Python", "max_stars_repo_path": "assignment_2/question2.py", "max_stars_repo_name": "jpas3/CTA200", "max_stars_repo_head_hexsha": "231b22c476638be63da945d24d0de8ddaaaf702f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "assignment_2/question2.py", "max_issues_repo_name": "jpas3/CTA200", "max_issues_repo_head_hexsha": "231b22c476638be63da945d24d0de8ddaaaf702f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "assignment_2/question2.py", "max_forks_repo_name": "jpas3/CTA200", "max_forks_repo_head_hexsha": "231b22c476638be63da945d24d0de8ddaaaf702f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7361111111, "max_line_length": 105, "alphanum_fraction": 0.6792122538, "include": true, "reason": "import numpy,from scipy", "num_tokens": 648}
|
# -*- coding: utf-8 -*-
"""
Navigation toolbar for matplotlib widgets
"""
import numpy as np
from PyQt5.QtCore import QObject
from PyQt5.QtCore import QPoint
from PyQt5.QtCore import QSize
from PyQt5.QtCore import QVariant
from PyQt5.QtCore import Qt
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtGui import QGuiApplication
from PyQt5.QtGui import QIcon
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QAction
from PyQt5.QtWidgets import QHBoxLayout
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QMenu
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QSizePolicy
from PyQt5.QtWidgets import QSpinBox
from PyQt5.QtWidgets import QToolBar
from PyQt5.QtWidgets import QToolButton
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QWidgetAction
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT
from matplotlib.path import Path
from matplotlib.widgets import LassoSelector
from mpl4qt.widgets.utils import COLOR_CYCLE
from mpl4qt.widgets.markers_view import MarkersView
TBSTY_FLOATING = """
QToolBar {
background-color: white;
border-radius: 0px;
border-bottom: 1px solid #8f8f91;
border-top: 1px solid #8f8f91;
spacing: 2px;
padding: 4px;
}
"""
TBSTY_NONFLOATING = """
QToolBar {{
background-color: {};
border-radius: 0px;
border-bottom: 0.5px solid #8f8f91;
border-top: 0.5px solid #8f8f91;
spacing: 0px;
padding: 1px;
}}
"""
class NavigationToolbar(NavigationToolbar2QT):
def __init__(self, canvas, parent=None):
super(self.__class__, self).__init__(canvas, parent)
self.tb = parent
self.mpl = self.tb.parent
def release_zoom(self, e):
NavigationToolbar2QT.release_zoom(self, e)
xlim = self.mpl.axes.get_xlim()
ylim = self.mpl.axes.get_ylim()
self.tb.zoom_roi_changed.emit(xlim, ylim)
class MToolbar(QToolBar):
"""Toolbar for mpl widgets.
Parameters
----------
canvas :
Canvas for drawing.
parent :
Mpl figure widget.
"""
# indices list of points selected by lasso tool
selectedIndicesUpdated = pyqtSignal(QVariant, QVariant)
# toolbar floatable status
floatable_changed = pyqtSignal(bool)
# zoomed ROI changed
zoom_roi_changed = pyqtSignal(tuple, tuple)
# add marker tool is checked or not, with mk_name, update/new flag
marker_add_checked = pyqtSignal(bool, 'QString', bool)
# reset marker pos,false, x, y, mk_name
reset_marker_pos = pyqtSignal(bool, float, float, 'QString')
# snap enabled or not, tuple of xy(z)data
snap_updated = pyqtSignal([bool], [bool, tuple])
# shaded area xylims
shaded_area_updated = pyqtSignal(tuple, tuple)
def __init__(self, canvas, parent=None):
super(MToolbar, self).__init__()
self.parent = parent
self.canvas = canvas
self.init_ui()
# window flags
self.setWindowFlags(Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint)
self._bgcolor = self.parent.getFigureBgColor().name()
def show_toolbar(self):
self.move(self.get_pos())
self.show()
self.raise_()
@pyqtSlot(bool)
def on_floatable_changed(self, f):
if f: # floatable
if self.parent.vbox.count() > 1:
w = self.parent.vbox.takeAt(0).widget()
self.parent.vbox.removeWidget(w)
else:
w = self
w.setStyleSheet(TBSTY_FLOATING)
w.setParent(None)
w.dock_act.setIcon(QIcon(QPixmap(":/tools/top_dock.png")))
w.dock_act.setToolTip("Dock toolbar")
w.show_toolbar()
else: # non-floatable
self.setStyleSheet(TBSTY_NONFLOATING.format(self._bgcolor))
self.setSizePolicy(
QSizePolicy.Expanding, QSizePolicy.Preferred)
self.parent.vbox.insertWidget(0, self)
self.dock_act.setIcon(QIcon(QPixmap(":/tools/popup.png")))
self.dock_act.setToolTip("Undock toolbar")
self._floating = f
def init_ui(self):
self._isize = self.iconSize().height()
#
self._floating = True
self.floatable_changed.connect(self.on_floatable_changed)
# bg
self.parent.bgColorChanged.connect(self.update_bgcolor)
self.tb = tb = NavigationToolbar(self.canvas, self)
tb.hide()
# zoom tool
zoom_act = QAction(QIcon(QPixmap(":/tools/zoom.png")), "Zoom", self)
zoom_act.setCheckable(True)
self.zoom_act = zoom_act
zoom_act.setToolTip("Zoom into selected region")
# home tool
home_act = QAction(QIcon(QPixmap(":/tools/home.png")), "Home", self)
home_act.setToolTip("Reset to original view")
# backward
backward_act = QAction(QIcon(QPixmap(":/tools/backward.png")),
"Backward", self)
backward_act.setToolTip("Backward view")
# forward
forward_act = QAction(QIcon(QPixmap(":/tools/forward.png")),
"Forward", self)
forward_act.setToolTip("Forward view")
# auto scale tool
auto_scale_act = QAction(QIcon(QPixmap(":/tools/auto-scale.png")), "Auto Scale", self)
auto_scale_act.setToolTip("Auto Scale (a)")
# auto xscale tool
auto_xscale_act = QAction(QIcon(QPixmap(":/tools/auto-xscale.png")), "Auto X-Scale", self)
auto_xscale_act.setToolTip("Auto X-Scale (a,x)")
# auto yscale tool
auto_yscale_act = QAction(QIcon(QPixmap(":/tools/auto-yscale.png")), "Auto Y-Scale", self)
auto_yscale_act.setToolTip("Auto Y-Scale (a,y)")
# pan tool
pan_act = QAction(QIcon(QPixmap(":/tools/pan.png")), "Pan", self)
pan_act.setCheckable(True)
self.pan_act = pan_act
pan_act.setToolTip("Pan axes with left mouse")
# save tool
save_act = QAction(QIcon(QPixmap(":/tools/save.png")), "Save", self)
save_act.setToolTip("Save figure as file")
# lasso tool
lasso_act = QAction(QIcon(QPixmap(":/tools/lasso.png")), "Selector", self)
self.lasso_act = lasso_act
lasso_act.setCheckable(True)
lasso_act.setToolTip("Select point(s) by lassoing")
# cross ruler tool
self.snap_cursor = None
cross_act = QAction(QIcon(QPixmap(":/tools/cross.png")), "Crosshair", self)
cross_act.setCheckable(True)
cross_act.setShortcut(Qt.SHIFT + Qt.Key_R)
cross_act.setToolTip("Coordinate locator (Shift + R) and marker")
cross_marker_text_act = QAction("Marker with (x, y)", self)
cross_marker_text_act.setCheckable(True)
cross_marker_text_act.setShortcut(Qt.SHIFT + Qt.Key_P)
cross_marker_text_act.setToolTip("Check to mark with (x, y)")
cross_marker_text_act.toggled.connect(self.on_marker_with_xy)
cross_hide_act = QAction(QIcon(QPixmap(":/tools/visibility_off.png")), "Hide Markers", self)
cross_hide_act.setShortcut(Qt.CTRL + Qt.Key_H)
cross_hide_act.setToolTip("Click to hide crosshair markers.")
cross_hide_act.triggered.connect(self.on_hide_crosses)
cross_snap_act = QAction("Snap", self)
self.cross_snap_act = cross_snap_act
cross_snap_act.setShortcut(Qt.SHIFT + Qt.Key_S)
cross_snap_act.setCheckable(True)
cross_snap_act.setToolTip("Check to snap to point")
cross_snap_act.toggled.connect(self.on_snap_cross)
if self.parent.widget_type == '__BasePlotWidget':
self._is_snap_point = False
else:
self._is_snap_point = True
cross_snap_act.setChecked(self._is_snap_point)
cross_marker_act = QAction(QIcon(QPixmap(":/tools/add_marker.png")), "Add Marker", self)
self.cross_marker_act = cross_marker_act
cross_marker_act.setShortcut(Qt.CTRL + Qt.Key_M)
cross_marker_act.setCheckable(True)
cross_marker_act.setToolTip("Click to add a crosshair marker.")
cross_marker_act.toggled.connect(self.on_add_marker)
self.marker_add_checked.connect(self.parent.on_marker_add_checked)
self.mk_view = None
cross_show_mk_act = QAction(QIcon(QPixmap(":/icons/view_list.png")), "Show Markers", self)
cross_show_mk_act.setShortcut(Qt.CTRL + Qt.Key_V)
cross_show_mk_act.setToolTip("Show all markers.")
cross_show_mk_act.triggered.connect(self.on_show_mks)
menu = QMenu(self)
menu.setToolTipsVisible(True)
menu.addAction(cross_snap_act)
menu.addAction(cross_marker_act)
menu.addAction(cross_marker_text_act)
menu.addAction(cross_show_mk_act)
menu.addAction(cross_hide_act)
cross_act.setMenu(menu)
# info tool
info_act = QAction(QIcon(QPixmap(":/tools/info.png")), "About", self)
info_act.setToolTip("About")
# exit tool
exit_act = QAction(QIcon(QPixmap(":/tools/exit.png")), "Exit", self)
exit_act.setToolTip("Close toolbar")
# tb config tool
conf_act = QAction(QIcon(QPixmap(":/tools/preferences.png")), "Preferences", self)
conf_act.setToolTip("Preferences")
conf_isize_w = QWidget(self)
conf_isize_box = QHBoxLayout()
conf_isize_box.setContentsMargins(2, 0, 0, 0)
conf_isize_sbox = QSpinBox(self)
conf_isize_sbox.setToolTip("Adjust icon size")
conf_isize_sbox.setValue(self._isize)
conf_isize_sbox.setRange(6, 128)
conf_isize_btn = QToolButton(self)
conf_isize_btn.setToolTip("Reset icon size")
conf_isize_btn.setIcon(QIcon(QPixmap(":/icons/reset_btn.png")))
conf_isize_box.addWidget(QLabel("Icon Size", self))
conf_isize_box.addWidget(conf_isize_sbox, 1)
conf_isize_box.addWidget(conf_isize_btn)
conf_isize_w.setLayout(conf_isize_box)
conf_menu = QMenu(self)
conf_menu.setToolTipsVisible(True)
conf_isize_act = QWidgetAction(self)
conf_isize_act.setDefaultWidget(conf_isize_w)
conf_menu.addAction(conf_isize_act)
conf_act.setMenu(conf_menu)
# dock tool
dock_act = QAction(QIcon(QPixmap(":/tools/top_dock.png")), "Dock", self)
self.dock_act = dock_act
dock_act.setToolTip("Dock toolbar")
# repos to center (toolbar) tool
repos_act = QAction(QIcon(QPixmap(":/tools/repos.png")), "Repos", self)
repos_act.setToolTip(
"Reposition toolbar wrt figure widget, drag & move to otherwhere")
# pos display tool
self.pos_lbl = QLabel(self)
self.pos_lbl.setSizePolicy(
QSizePolicy.MinimumExpanding, QSizePolicy.Preferred)
self.pos_lbl.setToolTip("Pointed Cartesian coordinate")
self.parent.xyposUpdated.connect(self.on_update_xypos)
# widgets in toolbar
self.addAction(dock_act)
self.addAction(repos_act)
self.addSeparator()
self.addAction(home_act)
self.addAction(backward_act)
self.addAction(forward_act)
self.addSeparator()
self.addAction(auto_scale_act)
self.addAction(auto_xscale_act)
self.addAction(auto_yscale_act)
self.addSeparator()
self.addAction(pan_act)
self.addAction(zoom_act)
self.addAction(lasso_act)
self.addAction(cross_act)
self.addAction(save_act)
self.addSeparator()
self.addWidget(self.pos_lbl)
self.addSeparator()
self.addAction(conf_act)
self.addAction(info_act)
self.addAction(exit_act)
# events
home_act.triggered.connect(self.home)
forward_act.triggered.connect(self.forward)
backward_act.triggered.connect(self.backward)
auto_scale_act.triggered.connect(self.auto_scale)
auto_xscale_act.triggered.connect(self.auto_xscale)
auto_yscale_act.triggered.connect(self.auto_yscale)
pan_act.toggled.connect(self.pan)
zoom_act.toggled.connect(self.zoom)
lasso_act.toggled.connect(self.lasso)
cross_act.toggled.connect(self.cross_ruler)
save_act.triggered.connect(self.save)
repos_act.triggered.connect(self.repos_toolbar)
exit_act.triggered.connect(self.close)
dock_act.triggered.connect(self.dock)
info_act.triggered.connect(self.about_info)
conf_isize_sbox.valueChanged.connect(self.on_update_isize)
conf_isize_btn.clicked.connect(lambda:conf_isize_sbox.setValue(self._isize))
#
self.floatable_changed.emit(self._floating)
@pyqtSlot(bool)
def on_snap_cross(self, is_snap):
# snap to point or not
self._is_snap_point = is_snap
if is_snap:
self.snap_updated[bool, tuple].emit(
True, self.parent.get_all_data())
else:
self.snap_updated[bool].emit(False)
def on_update_isize(self, i):
"""icon size
"""
self.setIconSize(QSize(i, i))
def update_bgcolor(self, color):
if not self._floating:
self._bgcolor = color.name()
self.setStyleSheet(TBSTY_NONFLOATING.format(self._bgcolor))
@pyqtSlot(bool)
def on_marker_with_xy(self, marker_with_xy):
"""Marker the markers with (x,y) or literally with `M{i}`.
"""
self.parent._marker_with_xy = marker_with_xy
for mk_name, (_, _, _, pt, (x, y)) in self.parent._markers.items():
if marker_with_xy:
pt.set_text('{0:g},{1:g}'.format(x,y))
self.sender().setToolTip("Uncheck to mark with literal names")
else:
pt.set_text(mk_name)
self.sender().setToolTip("Check to mark with (x, y)")
self.parent.update_figure()
@pyqtSlot(bool)
def cross_ruler(self, enabled):
"""Enable free crosshair tool.
"""
if enabled:
try:
if self.snap_cursor is None:
if self._is_snap_point:
if self.parent.widget_type == 'image':
data_tuple = self.parent.im, *self.parent.get_all_data()
else:
if self.parent._last_sel_lines == {}:
lobj = self.parent._lines[0]
else:
lobj = list(self.parent._last_sel_lines.values())[0][0]
data_tuple = lobj, *self.parent.get_all_data(lobj)
if data_tuple[1].size == 0:
raise SnapCursorNoDataProbe("No data to probe.")
else:
data_tuple = None
raise SnapCursorNotExist("SnapCursor does not exist.")
else:
raise SnapCursorAlreadyExisted('SnapCursor is existed.')
except SnapCursorNoDataProbe as err:
QMessageBox.warning(self, 'Snap Cursor Tool',
str(err), QMessageBox.Ok)
self.sender().setChecked(False)
except SnapCursorAlreadyExisted:
self.snap_cursor.is_snap = self._is_snap_point
self.parent.xyposUpdated.connect(self.snap_cursor.on_move)
if self.parent.widget_type != '__BasePlotWidget':
self.parent.dataChanged.connect(self.snap_cursor.set_data)
self.parent.selectedLineChanged.connect(self.snap_cursor.on_change_gobj)
self.snap_updated[bool].connect(self.snap_cursor.snap_disabled)
self.snap_updated[bool, tuple].connect(self.snap_cursor.snap_enabled)
except SnapCursorNotExist:
self.snap_cursor = SnapCursor(self.parent.axes, data_tuple,
self._is_snap_point)
self.parent.xyposUpdated.connect(self.snap_cursor.on_move)
if self.parent.widget_type != '__BasePlotWidget':
self.parent.dataChanged.connect(self.snap_cursor.set_data)
self.parent.selectedLineChanged.connect(self.snap_cursor.on_change_gobj)
self.snap_updated[bool].connect(self.snap_cursor.snap_disabled)
self.snap_updated[bool, tuple].connect(self.snap_cursor.snap_enabled)
else:
if self.snap_cursor is None:
return
self.parent.xyposUpdated.disconnect(self.snap_cursor.on_move)
if self.parent.widget_type != '__BasePlotWidget':
self.parent.dataChanged.disconnect(self.snap_cursor.set_data)
self.snap_updated.disconnect()
self.snap_cursor.delete()
self.snap_cursor = None
@pyqtSlot()
def on_hide_crosses(self):
# hide/show all markers
if not self.parent._markers:
return
o = self.sender()
show_flag = o.text() == "Show Markers"
self.parent.set_visible_hvlines(show_flag)
if show_flag:
icon = QIcon(QPixmap(":/tools/visibility_off.png"))
lbl = 'Hide Markers'
tp = "Click to show crosshair markers."
else:
icon = QIcon(QPixmap(":/tools/visibility.png"))
lbl = 'Show Markers'
tp = "Click to hide crosshair markers."
o.setIcon(icon)
o.setText(lbl)
o.setToolTip(tp)
@pyqtSlot(bool)
def on_add_marker(self, is_checked, mk_name=None):
# place a new cross marker if checked.
self.parent._to_add_marker = is_checked
update_flag = False
if is_checked:
if mk_name is None: # new cross marker
self.parent._mk_name = 'M{}'.format(self.parent._marker_id)
self.parent._current_mc = next(COLOR_CYCLE)
else: # update mk_name marker
update_flag = True
hl, _, _, _, _ = self.parent._markers[mk_name]
self.parent._mk_name = mk_name
self.parent._current_mc = hl.get_color()
self.parent._added_marker = False
self.cross_marker_act.setText("Add/Update Marker (click when done)")
QGuiApplication.setOverrideCursor(Qt.CrossCursor)
else:
if self.parent._added_marker:
self.parent._marker_id += 1
self.sender().setText("Add Marker")
self.marker_add_checked.emit(is_checked, self.parent._mk_name, update_flag)
@pyqtSlot('QString')
def on_remove_marker(self, mk_name):
# remove marker of the name *mk_name*, maintain marker_id/n_markers
hl, vl, cp, pt, _ = self.parent._markers.pop(mk_name)
[o.remove() for o in (hl, vl, cp, pt)]
self.parent.update_figure()
@pyqtSlot('QString', float, float)
def on_repos_marker(self, mk_name, x, y):
# repos marker with (x, y)
self.parent.draw_hvlines(x, y, mk_name)
@pyqtSlot('QString')
def on_reset_marker_pos(self, mk_name):
_, _, _, _, (x, y) = self.parent._markers[mk_name]
self.reset_marker_pos.emit(False, x, y, mk_name)
@pyqtSlot('QString')
def on_relocate_marker(self, mk_name):
# relocate marker with the name *mk_name*
self.on_add_marker(True, mk_name)
@pyqtSlot('QString', 'QString', bool)
def on_shade_marked_area(self, mk_name1, mk_name2, is_shade):
# shade marked rect (m1, m2) or not.
_, _, _, _, (x1, y1) = self.parent._markers[mk_name1]
_, _, _, _, (x2, y2) = self.parent._markers[mk_name2]
if is_shade:
if 'mk_area' not in self.parent._patches:
self.parent.draw_shade_area((x1, y1), (x2, y2), alpha=0.5, color="#D3D7CF")
self.shaded_area_updated.emit(tuple(sorted((x1, x2))), tuple(sorted((y1, y2))))
else:
p = self.parent._patches.pop('mk_area', None)
if p is not None:
p.remove()
self.parent.update_figure()
@pyqtSlot()
def on_show_mks(self):
# show all markers.
if self.mk_view is None:
self.mk_view = MarkersView(self.parent._markers, self)
self.mk_view.marker_removed.connect(self.on_remove_marker)
self.mk_view.relocate_marker['QString'].connect(self.on_relocate_marker)
self.mk_view.relocate_marker['QString', float, float].connect(self.on_repos_marker)
self.mk_view.reset_marker_pos.connect(self.on_reset_marker_pos)
self.mk_view.shade_area_changed.connect(self.on_shade_marked_area)
self.reset_marker_pos.connect(self.mk_view.on_add_marker)
self.parent.markerUpdated.connect(self.mk_view.on_add_marker)
self.mk_view._show()
else:
self.mk_view.show()
@pyqtSlot()
def repos_toolbar(self):
self.move(self.get_pos())
self.adjustSize()
@pyqtSlot(list)
def on_update_xypos(self, coord):
if len(coord) == 2:
x, y = coord
self.pos_lbl.setText(
"<html><pre><sup>(x,y)</sup>({0:<.4g},{1:<.4g})</pre></html>".format(x, y))
elif len(coord) == 3:
x, y, z = coord
self.pos_lbl.setText(
"<html><pre><sup>(x,y,z)</sup>({0:<.4g},{1:<.4g},{2:<.4g})</pre></html>".format(x, y, z))
@pyqtSlot()
def zoom(self):
self.tb.zoom()
@pyqtSlot()
def pan(self):
self.tb.pan()
@pyqtSlot()
def home(self):
self.tb.home()
@pyqtSlot()
def forward(self):
self.tb.forward()
@pyqtSlot()
def backward(self):
self.tb.back()
@pyqtSlot()
def auto_scale(self):
self.parent.set_autoscale()
@pyqtSlot()
def auto_xscale(self):
self.parent.set_autoscale('x')
@pyqtSlot()
def auto_xscale(self):
self.parent.set_autoscale('x')
@pyqtSlot()
def auto_yscale(self):
self.parent.set_autoscale('y')
@pyqtSlot()
def save(self):
self.tb.save_figure()
@pyqtSlot()
def lasso(self):
if self.sender().isChecked():
pts = self.parent.get_points()
ax = self.parent.axes
self.selector = SelectFromPoints(ax, pts)
self.selector.selectedIndicesReady.connect(self.update_selected_indices)
else:
self.selector.disconnect()
self.selector.selectedIndicesReady.disconnect()
@pyqtSlot()
def about_info(self):
from ._info import get_pkg_info
QMessageBox.about(self, 'About mpl4qt', get_pkg_info())
@pyqtSlot()
def dock(self):
# dock tb to mplwidget or undock
self.floatable_changed.emit(not self._floating)
@pyqtSlot(QVariant, QVariant)
def update_selected_indices(self, ind, pts):
"""Emit selected indice list and points.
"""
if ind.size == 0:
return
self.selectedIndicesUpdated.emit(ind, pts)
def closeEvent(self, e):
for o in (self.lasso_act, self.zoom_act, self.pan_act,):
if o.isChecked():
o.setChecked(False)
"""
if self.lasso_act.isChecked():
self.lasso_act.setChecked(False)
if self.zoom_act.isChecked():
self.zoom_act.setChecked(False) # emit toggled
"""
self.close()
def get_pos(self):
"""Get the position to put this dialog in the middle of the parent
widget.
"""
x = self.parent.geometry().x() + 0.5 * (
self.parent.geometry().width() - self.geometry().width())
y = self.parent.geometry().y()
return self.parent.mapToGlobal(QPoint(x, y))
def mousePressEvent(self, e):
self.pos_x = e.x()
self.pos_y = e.y()
def mouseMoveEvent(self, e):
try:
self.move(e.globalX() - self.pos_x, e.globalY() - self.pos_y)
except:
pass
class SelectFromPoints(QObject):
"""Select indices from points using `LassoSelector`.
"""
# selected points indices list and points list
# ind: index of orginal xy points array,
# pts: selected points
selectedIndicesReady = pyqtSignal(QVariant, QVariant)
def __init__(self, ax, points, alpha_other=0.3, radius=0):
super(SelectFromPoints, self).__init__()
self.canvas = ax.figure.canvas
self.points = points
self.alpha_other = alpha_other
self.radius = radius
self.lasso = LassoSelector(ax, onselect=self.on_select)
def on_select(self, verts):
path = Path(verts)
ind = np.nonzero(
path.contains_points(self.points, radius=self.radius))[0]
self.canvas.draw_idle()
self.selectedIndicesReady.emit(ind, self.points[ind])
def disconnect(self):
self.lasso.disconnect_events()
self.canvas.draw_idle()
class SnapCursor(QObject):
snap_enabled = pyqtSignal(bool, tuple)
snap_disabled = pyqtSignal(bool)
def __init__(self, ax, data_tuple=None, is_snap=True):
super(SnapCursor, self).__init__()
self.gobj = None
self.ax = ax
self.canvas = ax.figure.canvas
self.is_snap = is_snap
if is_snap:
self.set_data(data_tuple)
self.init_cursor()
self.snap_enabled.connect(self.on_enable_snap)
self.snap_disabled.connect(self.on_disable_snap)
def on_change_gobj(self, o):
self.gobj = o
self.set_data((o, *o.get_data()))
@pyqtSlot(bool, tuple)
def on_enable_snap(self, is_snap, t):
# enable snap, tuple of gobj,xy(z)data
self.is_snap = is_snap
self.set_data(t)
@pyqtSlot(bool)
def on_disable_snap(self, is_snap):
self.is_snap = is_snap
def init_cursor(self):
x0, y0 = 0, 0
self._hline = self.ax.axhline(color='#343A40', alpha=0.95)
self._vline = self.ax.axvline(color='#343A40', alpha=0.95)
self._text_x = self.ax.annotate('', xy=(x0, 1.005),
ha='center', va='bottom',
xycoords=('data', 'axes fraction'),
rotation=90, color='w',
bbox=dict(
boxstyle='larrow,pad=0.25',
fc='#007BFF', ec='b',
lw=1.0, alpha=0.95))
self._text_y = self.ax.annotate('', xy=(1.005, y0),
ha='left', va='center',
xycoords=('axes fraction', 'data'),
color='w',
bbox=dict(
boxstyle='larrow,pad=0.25',
fc='#007BFF', ec='b',
lw=1.0, alpha=0.95))
def set_data(self, t):
gobj, *tdata = t
if self.gobj is None:
self.gobj = gobj
if gobj == self.gobj:
if len(tdata) == 2:
self.set_xydata(*tdata)
else:
x, y, self.z = tdata
xdata, ydata = x[0,:], y[:,0]
self.set_xydata(xdata, ydata)
def set_xydata(self, xdata, ydata):
# set x y array data.
ascend_data = np.asarray(sorted(zip(xdata, ydata), key=lambda i:i[0]))
self.xdata = ascend_data[:, 0]
self.ydata = ascend_data[:, 1]
def on_move(self, pos_tuple):
if len(pos_tuple) == 2:
x, y = pos_tuple
if self.is_snap:
idx = min(np.searchsorted(self.xdata, x), len(self.xdata) - 1)
x, y = self.xdata[idx], self.ydata[idx]
xtext = "{0:g}".format(x)
ytext = "{0:g}".format(y)
else: # 3d
x, y, z = pos_tuple
if self.is_snap:
idx = min(np.searchsorted(self.xdata, x), len(self.xdata) - 1)
idy = min(np.searchsorted(self.ydata, y), len(self.ydata) - 1)
x, y, z = self.xdata[idx], self.ydata[idy], self.z[idy, idx]
xtext = "{0:g}".format(x)
ytext = "{0:g} [{1:g}]".format(y, z)
self._hline.set_ydata(y)
self._vline.set_xdata(x)
self._text_x.set_x(x)
self._text_y.set_y(y)
self._text_x.set_text(xtext)
self._text_y.set_text(ytext)
self.canvas.draw_idle()
def delete(self):
for o in (self._hline, self._vline, self._text_x, self._text_y):
o.remove()
self.canvas.draw_idle()
class SnapCursorNoDataProbe(Exception):
def __init__(self, *args, **kws):
super(self.__class__, self).__init__(*args, **kws)
class SnapCursorNotExist(Exception):
def __init__(self, *args, **kws):
super(self.__class__, self).__init__(*args, **kws)
class SnapCursorAlreadyExisted(Exception):
def __init__(self, *args, **kws):
super(self.__class__, self).__init__(*args, **kws)
|
{"hexsha": "d7a95031b2d8ddd722de89408d743c3d8afc74dc", "size": 29262, "ext": "py", "lang": "Python", "max_stars_repo_path": "mpl4qt/widgets/mpltoolbar.py", "max_stars_repo_name": "archman/python-mpl4qt", "max_stars_repo_head_hexsha": "f84fefb95113492407899206269ff82b609279b2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mpl4qt/widgets/mpltoolbar.py", "max_issues_repo_name": "archman/python-mpl4qt", "max_issues_repo_head_hexsha": "f84fefb95113492407899206269ff82b609279b2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mpl4qt/widgets/mpltoolbar.py", "max_forks_repo_name": "archman/python-mpl4qt", "max_forks_repo_head_hexsha": "f84fefb95113492407899206269ff82b609279b2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3955223881, "max_line_length": 105, "alphanum_fraction": 0.6046066571, "include": true, "reason": "import numpy", "num_tokens": 6680}
|
\documentclass[simplex.tex]{subfiles}
% NO NEED TO INPUT PREAMBLES HERE
% packages are inherited; you can compile this on its own
\onlyinsubfile{
\title{NeuroData SIMPLEX Report: Subfile}
}
\begin{document}
\onlyinsubfile{
\maketitle
\thispagestyle{empty}
The following report documents the progress made by the labs of Randal~Burns and Joshua~T.~Vogelstein at Johns Hopkins University towards goals set by the DARPA SIMPLEX grant.
%%%% Table of Contents
\tableofcontents
%%%% Publications
\bibliographystyle{IEEEtran}
\begin{spacing}{0.5}
\section*{Publications, Presentations, and Talks}
%\vspace{-20pt}
\nocite{*}
{\footnotesize \bibliography{simplex}}
\end{spacing}
%%%% End Publications
}
\subsection{Multiscale Network Test}
To guarantee validity and consistency of MGC applied to testing in
network, we should find independent and identically distributed (i.i.d.)
configuration of each vertex in a graph (network), of which metric well
reflects the distance between vertices. We demonstrated that Euclidean
distance of raw adjacency matrix does not satisfy i.i.d assumption
generally; while diffusion maps at every time step are i.i.d under
certain latent function, which is supported by Aldous-Hoover
representation theorem and de Finette’s theorem. On the other hand,
under these theorem, graph is empty or dense. Fortunately, we have found
that exchangeable graph can be generated more generally, even containing
sparse graphs. We generate a simple simulation to check whether MGC
works or not. Thus we are going to test independence between diffusion
maps at each time point $t$ and nodal attribute $X$.
For simulation, Stochastic Block Model (SBM) and additive and
multiplicative network model have been explored which also exhibit
non-linear dependence properties. What MGC does in this case is to test
distance matrix of diffusion maps and nodal attributes, considering
$(k,l)$ nearest neighbors in each.
\begin{figure}[h!]
\begin{cframed}
\centering
\includegraphics[width=0.45\textwidth]{./figs/msnt1.png}
\includegraphics[width=0.45\textwidth]{./figs/msnt2.png}
\caption{
The above figures illustrate power maps of
three-block stochastic block model in terms of - nearest neighbor choice
in terms of network diffusion maps and $k$-nearest neighbor choice in
terms of nodal attributes. You can also notice that diffusion time
matters in testing.
}
\label{fig:msnt}
\end{cframed}
\end{figure}
Thus if there exists
local dependency structures or nonlinearity, the optimal neighborhood
choice of $(k,l)$ would not count every node in network in compute
distance correlation matrix. We also demonstrated that testing power of MGC
applied to diffusion maps is higher in SBM and also degree-corrected
SBM, compared to dCov, Heller-Heller-Gorfine, and latent factor network
test proposed by Fosdick and Hoff (2015).
\end{document}
|
{"hexsha": "07ab8b88b6c9732f3716fec59a527e33d021e503", "size": 2855, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Reporting/reports/2016-12Q4/multiscaleNetworkTest.tex", "max_stars_repo_name": "openconnectome/SIMPLEX_Q2", "max_stars_repo_head_hexsha": "f10a6c4b9548670f9bf8e177914aa8d25fa1230b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Reporting/reports/2016-12Q4/multiscaleNetworkTest.tex", "max_issues_repo_name": "openconnectome/SIMPLEX_Q2", "max_issues_repo_head_hexsha": "f10a6c4b9548670f9bf8e177914aa8d25fa1230b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Reporting/reports/2016-12Q4/multiscaleNetworkTest.tex", "max_forks_repo_name": "openconnectome/SIMPLEX_Q2", "max_forks_repo_head_hexsha": "f10a6c4b9548670f9bf8e177914aa8d25fa1230b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6025641026, "max_line_length": 175, "alphanum_fraction": 0.7947460595, "num_tokens": 702}
|
import logging
import pickle
from functools import partial
import det3d.core.sampler.preprocess as prep
import numpy as np
import torch
from det3d.core.anchor.anchor_generator import (
AnchorGeneratorRange,
AnchorGeneratorStride,
BevAnchorGeneratorRange,
)
from det3d.core.bbox import region_similarity
from det3d.core.bbox.box_coders import GroundBox3dCoderAF
from det3d.core.input.voxel_generator import VoxelGenerator
from det3d.core.sampler.preprocess import DataBasePreprocessor
from det3d.core.sampler.sample_ops import DataBaseSamplerV2
from det3d.models.losses import losses
from det3d.solver import learning_schedules
from det3d.solver import learning_schedules_fastai as lsf
from det3d.solver import optim
from det3d.solver.fastai_optim import FastAIMixedOptim, OptimWrapper
from torch import nn
def build_voxel_generator(voxel_config):
voxel_generator = VoxelGenerator(
voxel_size=voxel_config.VOXEL_SIZE,
point_cloud_range=voxel_config.RANGE,
max_num_points=voxel_config.MAX_POINTS_NUM_PER_VOXEL,
max_voxels=20000,
)
return voxel_generator
def build_similarity_metric(similarity_config):
"""Create optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
similarity_type = similarity_config.type
if similarity_type == "rotate_iou_similarity":
return region_similarity.RotateIouSimilarity()
elif similarity_type == "nearest_iou_similarity":
return region_similarity.NearestIouSimilarity()
elif similarity_type == "distance_similarity":
cfg = similarity_config.distance_similarity
return region_similarity.DistanceSimilarity(
distance_norm=cfg.distance_norm,
with_rotation=cfg.with_rotation,
rotation_alpha=cfg.rotation_alpha,
)
else:
raise ValueError("unknown similarity type")
def build_db_preprocess(db_prep_config, logger=None):
logger = logging.getLogger("build_db_preprocess")
cfg = db_prep_config
if "filter_by_difficulty" in cfg:
v = cfg["filter_by_difficulty"]
return prep.DBFilterByDifficulty(v, logger=logger)
elif "filter_by_min_num_points" in cfg:
v = cfg["filter_by_min_num_points"]
return prep.DBFilterByMinNumPoint(v, logger=logger)
else:
raise ValueError("unknown database prep type")
def children(m: nn.Module):
"Get children of `m`."
return list(m.children())
def num_children(m: nn.Module) -> int:
"Get number of children modules in `m`."
return len(children(m))
def flatten_model(m: nn.Module):
return sum(map(flatten_model, m.children()), []) if num_children(m) else [m]
def get_layer_groups(m: nn.Module):
return [nn.Sequential(*flatten_model(m))]
def build_optimizer(optimizer_config, net, name=None, mixed=False, loss_scale=512.0):
"""Create optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
optimizer_type = optimizer_config.TYPE
config = optimizer_config.VALUE
if optimizer_type == "rms_prop_optimizer":
optimizer_func = partial(
torch.optim.RMSprop,
alpha=config.decay,
momentum=config.momentum_optimizer_value,
eps=config.epsilon,
)
elif optimizer_type == "momentum_optimizer":
optimizer_func = partial(
torch.optim.SGD,
momentum=config.momentum_optimizer_value,
eps=config.epsilon,
)
elif optimizer_type == "adam":
if optimizer_config.FIXED_WD:
optimizer_func = partial(
torch.optim.Adam, betas=(0.9, 0.99), amsgrad=config.amsgrad
)
else:
# regular adam
optimizer_func = partial(torch.optim.Adam, amsgrad=config.amsgrad)
optimizer = OptimWrapper.create(
optimizer_func,
3e-3,
get_layer_groups(net),
wd=config.WD,
true_wd=optimizer_config.FIXED_WD,
bn_wd=True,
)
if optimizer is None:
raise ValueError("Optimizer %s not supported." % optimizer_type)
if optimizer_config.MOVING_AVERAGE:
raise ValueError("torch don't support moving average")
if name is None:
# assign a name to optimizer for checkpoint system
optimizer.name = optimizer_type
else:
optimizer.name = name
return optimizer
def build_lr_scheduler(optimizer, optimizer_config, total_step):
"""Create lr scheduler based on config. note that
lr_scheduler must accept a optimizer that has been restored.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
optimizer_type = optimizer_config.type
config = optimizer_config
if optimizer_type == "rms_prop_optimizer":
lr_scheduler = _create_learning_rate_scheduler(
config, optimizer, total_step=total_step
)
elif optimizer_type == "momentum_optimizer":
lr_scheduler = _create_learning_rate_scheduler(
config, optimizer, total_step=total_step
)
elif optimizer_type == "adam":
lr_scheduler = _create_learning_rate_scheduler(
config, optimizer, total_step=total_step
)
return lr_scheduler
def _create_learning_rate_scheduler(optimizer, learning_rate_config, total_step):
"""Create optimizer learning rate scheduler based on config.
Args:
learning_rate_config: A LearningRate proto message.
Returns:
A learning rate.
Raises:
ValueError: when using an unsupported input data type.
"""
lr_scheduler = None
learning_rate_type = learning_rate_config.type
config = learning_rate_config
if learning_rate_type == "multi_phase":
lr_phases = []
mom_phases = []
for phase_cfg in config.phases:
lr_phases.append((phase_cfg.start, phase_cfg.lambda_func))
mom_phases.append((phase_cfg.start, phase_cfg.momentum_lambda_func))
lr_scheduler = lsf.LRSchedulerStep(optimizer, total_step, lr_phases, mom_phases)
elif learning_rate_type == "one_cycle":
lr_scheduler = lsf.OneCycle(
optimizer,
total_step,
config.lr_max,
config.moms,
config.div_factor,
config.pct_start,
)
elif learning_rate_type == "exponential_decay":
lr_scheduler = lsf.ExponentialDecay(
optimizer,
total_step,
config.initial_learning_rate,
config.decay_length,
config.decay_factor,
config.staircase,
)
elif learning_rate_type == "manual_stepping":
lr_scheduler = lsf.ManualStepping(
optimizer, total_step, config.boundaries, config.rates
)
elif lr_scheduler is None:
raise ValueError("Learning_rate %s not supported." % learning_rate_type)
return lr_scheduler
def build_loss(loss_config):
"""Build losses based on the config.
Builds classification, localization losses and optionally a hard example miner
based on the config.
Args:
loss_config: A losses_pb2.Loss object.
Returns:
classification_loss: Classification loss object.
localization_loss: Localization loss object.
classification_weight: Classification loss weight.
localization_weight: Localization loss weight.
hard_example_miner: Hard example miner object.
Raises:
ValueError: If hard_example_miner is used with sigmoid_focal_loss.
"""
classification_loss = _build_classification_loss(loss_config.classification_loss)
localization_loss = _build_localization_loss(loss_config.localization_loss)
classification_weight = loss_config.classification_weight
localization_weight = loss_config.localization_weight
hard_example_miner = None # 'Pytorch don\'t support HardExampleMiner'
return (
classification_loss,
localization_loss,
classification_weight,
localization_weight,
hard_example_miner,
)
def build_faster_rcnn_classification_loss(loss_config):
"""Builds a classification loss for Faster RCNN based on the loss config.
Args:
loss_config: A losses_pb2.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
loss_type = loss_config.TYPE
config = loss_config.VALUE
# By default, Faster RCNN second stage classifier uses Softmax loss
# with anchor-wise outputs.
return losses.WeightedSoftmaxClassificationLoss(logit_scale=config.logit_scale)
def _build_localization_loss(loss_config):
"""Builds a localization loss based on the loss config.
Args:
loss_config: A losses_pb2.LocalizationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
loss_type = loss_config.type
config = loss_config
if loss_type == "weighted_l2":
if len(config.code_weight) == 0:
code_weight = None
else:
code_weight = config.code_weight
return losses.WeightedL2LocalizationLoss(code_weight)
if loss_type == "weighted_smooth_l1":
if len(config.code_weight) == 0:
code_weight = None
else:
code_weight = config.code_weight
return losses.WeightedSmoothL1LocalizationLoss(config.sigma, code_weight)
if loss_type == "weighted_ghm":
if len(config.code_weight) == 0:
code_weight = None
else:
code_weight = config.code_weight
return GHMRLoss(config.mu, config.bins, config.momentum, code_weight)
raise ValueError("Empty loss config.")
def _build_classification_loss(loss_config):
"""Builds a classification loss based on the loss config.
Args:
loss_config: A losses_pb2.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
loss_type = loss_config.TYPE
config = loss_config.VALUE
if loss_type == "weighted_sigmoid":
return losses.WeightedSigmoidClassificationLoss()
elif loss_type == "weighted_sigmoid_focal":
if config.alpha > 0:
alpha = config.alpha
else:
alpha = None
return losses.SigmoidFocalClassificationLoss(gamma=config.gamma, alpha=alpha)
elif loss_type == "weighted_softmax_focal":
if config.alpha > 0:
alpha = config.alpha
else:
alpha = None
return losses.SoftmaxFocalClassificationLoss(gamma=config.gamma, alpha=alpha)
elif loss_type == "weighted_ghm":
return GHMCLoss(bins=config.bins, momentum=config.momentum)
elif loss_type == "weighted_softmax":
return losses.WeightedSoftmaxClassificationLoss(logit_scale=config.logit_scale)
elif loss_type == "bootstrapped_sigmoid":
return losses.BootstrappedSigmoidClassificationLoss(
alpha=config.alpha,
bootstrap_type=("hard" if config.hard_bootstrap else "soft"),
)
raise ValueError("Empty loss config.")
def build_dbsampler(cfg, logger=None):
logger = logging.getLogger("build_dbsampler")
prepors = [build_db_preprocess(c, logger=logger) for c in cfg.db_prep_steps]
db_prepor = DataBasePreprocessor(prepors)
rate = cfg.rate
grot_range = cfg.global_random_rotation_range_per_object
groups = cfg.sample_groups
# groups = [dict(g.name_to_max_num) for g in groups]
info_path = cfg.db_info_path
with open(info_path, "rb") as f:
db_infos = pickle.load(f)
grot_range = list(grot_range)
if len(grot_range) == 0:
grot_range = None
sampler = DataBaseSamplerV2(
db_infos, groups, db_prepor, rate, grot_range, logger=logger
)
return sampler
def build_box_coder(box_coder_config):
"""Create optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
box_coder_type = box_coder_config["type"]
cfg = box_coder_config
n_dim = cfg.get("n_dim", 9)
norm_velo = cfg.get("norm_velo", False)
if box_coder_type == "ground_box3d_coder_anchor_free":
return GroundBox3dCoderAF(
cfg["velocity"],
cfg["center"],
cfg["height"],
cfg["dim"],
cfg["rotation"],
cfg["pc_range"],
cfg["kwargs"]
)
else:
raise ValueError("unknown box_coder type")
def build_anchor_generator(anchor_config):
"""Create optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
ag_type = anchor_config.type
config = anchor_config
if "velocities" not in config:
velocities = None
else:
velocities = config.velocities
if ag_type == "anchor_generator_stride":
ag = AnchorGeneratorStride(
sizes=config.sizes,
anchor_strides=config.strides,
anchor_offsets=config.offsets,
rotations=config.rotations,
velocities=velocities,
match_threshold=config.matched_threshold,
unmatch_threshold=config.unmatched_threshold,
class_name=config.class_name,
)
return ag
elif ag_type == "anchor_generator_range":
ag = AnchorGeneratorRange(
sizes=config.sizes,
anchor_ranges=config.anchor_ranges,
rotations=config.rotations,
velocities=velocities,
match_threshold=config.matched_threshold,
unmatch_threshold=config.unmatched_threshold,
class_name=config.class_name,
)
return ag
elif ag_type == "bev_anchor_generator_range":
ag = BevAnchorGeneratorRange(
sizes=config.sizes,
anchor_ranges=config.anchor_ranges,
rotations=config.rotations,
velocities=velocities,
match_threshold=config.matched_threshold,
unmatch_threshold=config.unmatched_threshold,
class_name=config.class_name,
)
return ag
else:
raise ValueError(" unknown anchor generator type")
|
{"hexsha": "2925130b43dd80debb9328f74d59b03e476b8eb9", "size": 15031, "ext": "py", "lang": "Python", "max_stars_repo_path": "det3d/builder.py", "max_stars_repo_name": "Lelin-HUNUST/VISTA", "max_stars_repo_head_hexsha": "7bf34132d719cb0e5e803b92cd15451df58a9a5d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 47, "max_stars_repo_stars_event_min_datetime": "2022-03-21T02:41:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T17:25:29.000Z", "max_issues_repo_path": "det3d/builder.py", "max_issues_repo_name": "Lelin-HUNUST/VISTA", "max_issues_repo_head_hexsha": "7bf34132d719cb0e5e803b92cd15451df58a9a5d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-28T15:11:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T16:27:40.000Z", "max_forks_repo_path": "det3d/builder.py", "max_forks_repo_name": "Lelin-HUNUST/VISTA", "max_forks_repo_head_hexsha": "7bf34132d719cb0e5e803b92cd15451df58a9a5d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-03-23T12:56:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T14:25:50.000Z", "avg_line_length": 30.9279835391, "max_line_length": 88, "alphanum_fraction": 0.6762025148, "include": true, "reason": "import numpy", "num_tokens": 3175}
|
import numpy as np
import polars as pl
import talib
from talib import abstract
from talib.test_data import series, assert_np_arrays_equal
def test_MOM():
values = pl.Series([90.0,88.0,89.0])
result = talib.MOM(values, timeperiod=1)
assert isinstance(result, pl.Series)
assert_np_arrays_equal(result.to_numpy(), [np.nan, -2, 1])
result = talib.MOM(values, timeperiod=2)
assert isinstance(result, pl.Series)
assert_np_arrays_equal(result.to_numpy(), [np.nan, np.nan, -1])
result = talib.MOM(values, timeperiod=3)
assert isinstance(result, pl.Series)
assert_np_arrays_equal(result.to_numpy(), [np.nan, np.nan, np.nan])
result = talib.MOM(values, timeperiod=4)
assert isinstance(result, pl.Series)
assert_np_arrays_equal(result.to_numpy(), [np.nan, np.nan, np.nan])
def test_MAVP():
a = pl.Series([1,5,3,4,7,3,8,1,4,6], dtype=pl.Float64)
b = pl.Series([2,4,2,4,2,4,2,4,2,4], dtype=pl.Float64)
result = talib.MAVP(a, b, minperiod=2, maxperiod=4)
assert isinstance(result, pl.Series)
assert_np_arrays_equal(result.to_numpy(), [np.nan,np.nan,np.nan,3.25,5.5,4.25,5.5,4.75,2.5,4.75])
sma2 = talib.SMA(a, 2)
assert isinstance(sma2, pl.Series)
assert_np_arrays_equal(result.to_numpy()[4::2], sma2.to_numpy()[4::2])
sma4 = talib.SMA(a, 4)
assert isinstance(sma4, pl.Series)
assert_np_arrays_equal(result.to_numpy()[3::2], sma4.to_numpy()[3::2])
result = talib.MAVP(a, b, minperiod=2, maxperiod=3)
assert isinstance(result, pl.Series)
assert_np_arrays_equal(result.to_numpy(), [np.nan,np.nan,4,4,5.5,4.666666666666667,5.5,4,2.5,3.6666666666666665])
sma3 = talib.SMA(a, 3)
assert isinstance(sma3, pl.Series)
assert_np_arrays_equal(result.to_numpy()[2::2], sma2.to_numpy()[2::2])
assert_np_arrays_equal(result.to_numpy()[3::2], sma3.to_numpy()[3::2])
def test_TEVA():
size = 50
df = pl.DataFrame(
{
"open": np.random.uniform(low=0.0, high=100.0, size=size).astype("float32"),
"high": np.random.uniform(low=0.0, high=100.0, size=size).astype("float32"),
"low": np.random.uniform(low=0.0, high=100.0, size=size).astype("float32"),
"close": np.random.uniform(low=0.0, high=100.0, size=size).astype("float32"),
"volume": np.random.uniform(low=0.0, high=100.0, size=size).astype("float32")
}
)
tema1 = abstract.TEMA(df, timeperiod=9)
assert isinstance(tema1, pl.Series)
assert len(tema1) == 50
inputs = abstract.TEMA.get_input_arrays()
assert inputs.columns == df.columns
for column in df.columns:
assert_np_arrays_equal(inputs[column].to_numpy(), df[column].to_numpy())
tema2 = abstract.TEMA(df, timeperiod=9)
assert isinstance(tema2, pl.Series)
assert len(tema2) == 50
inputs = abstract.TEMA.get_input_arrays()
assert inputs.columns == df.columns
for column in df.columns:
assert_np_arrays_equal(inputs[column].to_numpy(), df[column].to_numpy())
assert_np_arrays_equal(tema1.to_numpy(), tema2.to_numpy())
|
{"hexsha": "60f390e80f83e3b774cf84d7080d63fe70191f03", "size": 3074, "ext": "py", "lang": "Python", "max_stars_repo_path": "talib/test_polars.py", "max_stars_repo_name": "aberja/ta-lib", "max_stars_repo_head_hexsha": "75fbfa86824b675ac03b7e30aaa2eaade8a817cc", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-10T02:51:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T02:51:59.000Z", "max_issues_repo_path": "talib/test_polars.py", "max_issues_repo_name": "aberja/ta-lib", "max_issues_repo_head_hexsha": "75fbfa86824b675ac03b7e30aaa2eaade8a817cc", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "talib/test_polars.py", "max_forks_repo_name": "aberja/ta-lib", "max_forks_repo_head_hexsha": "75fbfa86824b675ac03b7e30aaa2eaade8a817cc", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-31T11:51:01.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-31T11:51:01.000Z", "avg_line_length": 43.2957746479, "max_line_length": 117, "alphanum_fraction": 0.6720884841, "include": true, "reason": "import numpy", "num_tokens": 943}
|
# Tetris square class
import pygame
# import numpy
class Square:
def __init__(self, pygame_screen, color, column, row):
self.pygame_screen = pygame_screen
self.color = color
# self.grid_coordinates = (col, row)
self.row = row
self.column = column
self.screen_coordinates = (0, 0)
self.image = pygame.image.load("files/squares/color%d.jpg" % color)
def blit(self):
if self.row >= 0:
self.convert_grid_to_screen()
self.pygame_screen.blit(self.image, self.screen_coordinates)
def convert_grid_to_screen(self):
self.screen_coordinates = ((self.column*29)+147, (self.row*29)+193)
def move_down(self):
# self.grid_coordinates = tuple(numpy.add(self.grid_coordinates, (0, 1)))
self.row += 1
def move_sideways(self, direction):
if direction == 'right':
# self.grid_coordinates = tuple(numpy.add(self.grid_coordinates, (1, 0)))
self.column += 1
else:
# self.grid_coordinates = tuple(numpy.subtract(self.grid_coordinates, (1, 0)))
self.column -= 1
# def get_grid_coordinates(self):
# return self.grid_coordinates
def get_row(self):
return self.row
def get_column(self):
return self.column
|
{"hexsha": "5939743b5d9ac31f77b6830d511c0c7276e1bb5a", "size": 1322, "ext": "py", "lang": "Python", "max_stars_repo_path": "Square.py", "max_stars_repo_name": "palu3492/tetris-game-new", "max_stars_repo_head_hexsha": "b3c980dacefe0ab53f1a4ad474e5319966ecb781", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Square.py", "max_issues_repo_name": "palu3492/tetris-game-new", "max_issues_repo_head_hexsha": "b3c980dacefe0ab53f1a4ad474e5319966ecb781", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Square.py", "max_forks_repo_name": "palu3492/tetris-game-new", "max_forks_repo_head_hexsha": "b3c980dacefe0ab53f1a4ad474e5319966ecb781", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7391304348, "max_line_length": 90, "alphanum_fraction": 0.6187594554, "include": true, "reason": "import numpy", "num_tokens": 303}
|
import pandas as pd
import seaborn
from bokeh.plotting import figure, show, output_notebook
from bokeh.models import ColumnDataSource, FactorRange, CategoricalAxis, HoverTool
from numpy import nan
import os
file_path = os.path.dirname(os.path.abspath(__file__))
palette = seaborn.color_palette("GnBu", 2).as_hex()*10
df = pd.read_excel(os.path.join(file_path, "data/Refined Features checklist.xlsx"), "refined")
categories = df[["Category", "Features", "Definition"]]
unique_categories = df["Category"].drop_duplicates().tolist()
# df = df[["Features", "NVivo", "Dedoose", "QDA Miner", "Atlas.ti", "Transana", "TOM", "MAXQDA"]]
df.drop(["Definition", "Category"], inplace=True, axis=1)
df.dropna(axis=1, how="all", inplace=True)
tool_list = df.columns.tolist()[1:]
df = df.loc[~df.Features.isnull()]
df = df.melt(id_vars=["Features"], var_name=["Tool"])
df['Tool'] = pd.Categorical(df['Tool'], tool_list)
df["Features"] = pd.Categorical(df["Features"], df["Features"].drop_duplicates(keep="first").tolist())
df["value"].replace(0, nan, inplace=True)
df = df.loc[~df.value.isnull()]
categories = categories[["Features", "Category", "Definition"]]
df = df.merge(categories, how="left", on=["Features"])
y_range = FactorRange(factors=[i for i in df.sort_values(by="Category")[["Category", "Features"]].drop_duplicates().values.tolist()[::-1]])
x_range = FactorRange(factors=df["Tool"].drop_duplicates().tolist())
choose_color = 0
for c in df.sort_values(by="Category")["Category"].drop_duplicates().tolist():
df.loc[df.Category == c, "color"] = palette[choose_color]
choose_color += 1
source = ColumnDataSource(data=dict(tool=df["Tool"].tolist(), feature=df[["Category", "Features"]].values.tolist(),
color=df["color"].tolist(), category=df["Category"].tolist(),
desc=df["Definition"].tolist()))
hover = HoverTool(tooltips=[("Tool", "@tool"),
("Feature Definition", "@desc")])
p = figure(x_range=x_range, y_range=y_range, plot_height=7500, plot_width=1500, x_axis_location="above", tools=[hover])
p.circle(x="tool", y="feature", color="black", source=source, size=28)
p.add_layout(CategoricalAxis(), 'below')
p.xaxis.major_label_text_font_size = "12pt"
p.yaxis.major_label_text_font_size = "12pt"
|
{"hexsha": "a25dac758f3ed0935ff3628d4a82897c18a1a2f0", "size": 2329, "ext": "py", "lang": "Python", "max_stars_repo_path": "app/features_checklist.py", "max_stars_repo_name": "jannettim/MCDM", "max_stars_repo_head_hexsha": "cc1ade5a53e844cd7e23055a2811173e2e6d08ce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "app/features_checklist.py", "max_issues_repo_name": "jannettim/MCDM", "max_issues_repo_head_hexsha": "cc1ade5a53e844cd7e23055a2811173e2e6d08ce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2018-05-03T17:36:03.000Z", "max_issues_repo_issues_event_max_datetime": "2018-05-03T18:36:26.000Z", "max_forks_repo_path": "app/features_checklist.py", "max_forks_repo_name": "jannettim/MCDM", "max_forks_repo_head_hexsha": "cc1ade5a53e844cd7e23055a2811173e2e6d08ce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-05-03T15:14:55.000Z", "max_forks_repo_forks_event_max_datetime": "2018-05-10T18:13:16.000Z", "avg_line_length": 34.7611940299, "max_line_length": 139, "alphanum_fraction": 0.6788321168, "include": true, "reason": "from numpy", "num_tokens": 589}
|
import os
from pandas import DataFrame as df
import numpy as np
import json
from sklearn.preprocessing import minmax_scale
data_config = json.load(open("data_config.json", "r"))
def get_dataframe():
csv_dir = data_config["csv_dir"]
data_initialized = False
for f in os.listdir(csv_dir):
csv_filepath = os.path.join(csv_dir, f)
new_data = df.from_csv(csv_filepath, encoding="ISO-8859-1", sep=';')
new_data.fillna(data_config["nan_replace"])
if not data_initialized:
data = new_data
data_initialized = True
else:
data = data.append(new_data)
return data.sort_index()
def clean_feature(dataframe, feature_name, valid_time_period=None, invalid_time_value=0):
feature = dataframe[feature_name]
if valid_time_period:
times_list = [i.hour for i in feature.index.time]
invalid_times = [not(valid_time_period[1] > i > valid_time_period[0]) for i in times_list]
feature.loc[invalid_times] = invalid_time_value
dataframe[feature_name] = feature
return dataframe
def split_and_save_data(dataframe, save_data_stats=True):
data_dir = data_config["data_dir"]
if not os.path.exists(data_dir):
os.makedirs(data_dir)
data_stats = {}
# Clean all features
feature_names = data_config["input_features"]
for feature_name in feature_names:
if feature_name not in data_config["feature_cleaning"]:
continue
clean_settings = data_config["feature_cleaning"][feature_name]
valid_time = clean_settings["valid_time"]
invalid_time_value = clean_settings["invalid_time_value"]
dataframe = clean_feature(dataframe, feature_name, valid_time_period=valid_time, invalid_time_value=invalid_time_value)
output = dataframe[data_config["output_feature"]]
o_max = output.max()
o_min = output.min()
o_mean = output.mean()
data_stats["output"] = {"min": o_min,
"max": o_max,
"mean": o_mean}
output = minmax_scale(output)
output[np.isnan(output)] = data_config["nan_replace"]
feature_stats = {}
features = []
for f in feature_names:
feature = dataframe[f]
f_min = feature.min()
f_max = feature.max()
f_mean = feature.mean()
feature = minmax_scale(feature)
feature[np.isnan(feature)] = data_config["nan_replace"]
features.append(feature)
feature_stats[f] = {"min": f_min,
"max": f_max,
"mean": f_mean}
features = np.array(features)
data_stats["features"] = feature_stats
n_points = output.shape[0]
print("[INFO] Number of data points = {}".format(n_points))
print("[INFO] Number of features = {}".format(features.shape[0]))
idx_1 = int(n_points * data_config["train_test_val_split"][0])
idx_2 = int(idx_1 + n_points * data_config["train_test_val_split"][1])
train_output, train_features = output[:idx_1], features[:, :idx_1]
test_output, test_features = output[idx_1: idx_2], features[:, idx_1: idx_2]
val_output, val_features = output[idx_2:], features[:, idx_2:]
print("[INFO] Number of train points: {}".format(len(train_output)))
print("[INFO] Number of test points: {}".format(len(test_output)))
print("[INFO] Number of validation points: {}".format(len(val_output)))
np.save(os.path.join(data_dir, "train_output.npy"), train_output)
np.save(os.path.join(data_dir, "train_features.npy"), train_features)
np.save(os.path.join(data_dir, "test_output.npy"), test_output)
np.save(os.path.join(data_dir, "test_features.npy"), test_features)
np.save(os.path.join(data_dir, "val_output.npy"), val_output)
np.save(os.path.join(data_dir, "val_features.npy"), val_features)
if save_data_stats:
print("[INFO] Saving data statistics...")
json.dump(data_stats, open(os.path.join(data_dir, "data_stats.json"), "w"))
if __name__ == '__main__':
data = get_dataframe()
split_and_save_data(data)
|
{"hexsha": "c7dbd3e4a155f6e3a3acef00a048606974c2e90e", "size": 4091, "ext": "py", "lang": "Python", "max_stars_repo_path": "smart_alarm_repo_main/smart_alarm/sensehawk_smart_alarm_local/smart_alarm/main/sensehawk_scada_lstm_tut/dataset_create_lstm_scada.py", "max_stars_repo_name": "codersupreme99101/SmartAlarm", "max_stars_repo_head_hexsha": "9aa05e1e82a05e592eda3765c3e51e83a74710db", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "smart_alarm_repo_main/smart_alarm/sensehawk_smart_alarm_local/smart_alarm/main/sensehawk_scada_lstm_tut/dataset_create_lstm_scada.py", "max_issues_repo_name": "codersupreme99101/SmartAlarm", "max_issues_repo_head_hexsha": "9aa05e1e82a05e592eda3765c3e51e83a74710db", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "smart_alarm_repo_main/smart_alarm/sensehawk_smart_alarm_local/smart_alarm/main/sensehawk_scada_lstm_tut/dataset_create_lstm_scada.py", "max_forks_repo_name": "codersupreme99101/SmartAlarm", "max_forks_repo_head_hexsha": "9aa05e1e82a05e592eda3765c3e51e83a74710db", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.5943396226, "max_line_length": 127, "alphanum_fraction": 0.6646296749, "include": true, "reason": "import numpy", "num_tokens": 942}
|
import os
import struct
import redis
import numpy as np
from yolact import Yolact
from utils.augmentations import FastBaseTransform
from utils.functions import SavePath
from layers.output_utils import postprocess
from data import cfg, set_cfg
import torch
import torch.backends.cudnn as cudnn
# Detection
trained_model = 'weights/yolact_plus_resnet50_54_800000.pth'
model_path = SavePath.from_str(trained_model)
config = model_path.model_name + '_config'
set_cfg(config)
score_threshold = 0.15
top_k = 5
with torch.no_grad():
cudnn.fastest = True
torch.set_default_tensor_type('torch.cuda.FloatTensor')
print('Loading model...', end='')
net = Yolact()
net.load_weights(trained_model)
net.eval()
print(' Done.')
net = net.cuda()
net.detect.use_fast_nms = True
net.detect.use_cross_class_nms = False
cfg.mask_proto_debug = False
# Redis
r = redis.Redis(host=os.getenv('SMART_SPACE_TRACKING_REDIS_PORT_6379_TCP_ADDR'), port=6379, db=0)
pipe = r.pipeline()
p = r.pubsub()
def fromRedis(r, name):
encoded = r.get(name)
if struct.unpack('>I', encoded[:4])[0] == 3:
h, w, c = struct.unpack('>III', encoded[4:16])
a = np.frombuffer(encoded[16:], dtype=np.uint8).reshape(h, w, c)
else:
h, w = struct.unpack('>II', encoded[4:12])
a = np.frombuffer(encoded[12:], dtype=np.uint16).reshape(h, w)
return a
def processing(dets_out, img):
boxes = np.array([])
masks = np.array([])
h, w, _ = img.shape
cfg.rescore_bbox = True
t = postprocess(dets_out, w, h, score_threshold=score_threshold)
idx = t[1].argsort(0, descending=True)[:top_k]
if cfg.eval_mask_branch:
# Masks are drawn on the GPU, so don't copy
masks = t[3][idx]
classes, scores, boxes = [x[idx].cpu().numpy() for x in t[:3]]
boxes = boxes[classes == 0]
masks = masks[classes == 0]
scores = scores[classes == 0]
boxes = boxes[scores >= 0.6]
masks = masks[scores >= 0.6]
scores = scores[scores >= 0.6]
return boxes, masks.to(torch.bool).detach().cpu().numpy(), scores
def detect(msg):
# import time
# start = time.time()
msg_data = msg['data'].split()
color = fromRedis(r, msg_data[0])
color = torch.from_numpy(color).cuda().float()
batch = FastBaseTransform()(color.unsqueeze(0))
preds = net(batch)
boxes, masks, scores = processing(preds, color)
scores = np.float64(scores)
if len(masks.shape) > 3:
masks = np.squeeze(masks)
if len(masks) > 0:
pipe.set('detect_scores', scores.tobytes())
pipe.set('detect_boxes', boxes.tobytes())
pipe.set('detect_masks',
struct.pack('>IIII', 3, masks.shape[0], masks.shape[1], masks.shape[2]) + masks.tobytes())
else:
pipe.set('detect_scores', np.array([]).tobytes())
pipe.set('detect_boxes', np.array([]).tobytes())
pipe.set('detect_masks', np.array([]).tobytes())
pipe.execute()
r.publish('tracker-server',
'frame_color frame_depth frame_colorized'
' depth_scale'
' detect_scores detect_boxes detect_masks')
# print('\rTime: {}'.format(time.time() - start), end='')
if __name__ == '__main__':
p.subscribe(**{'detection-server': detect})
thread = p.run_in_thread(sleep_time=0.00001)
thread.join()
|
{"hexsha": "a87b0623e5454026d32d1922ff317668a1860f95", "size": 3373, "ext": "py", "lang": "Python", "max_stars_repo_path": "Modules/yolact/server.py", "max_stars_repo_name": "NikAbba/video_tracking", "max_stars_repo_head_hexsha": "c624a9d3596befa4a941e4ff4092b9545bfdd28d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Modules/yolact/server.py", "max_issues_repo_name": "NikAbba/video_tracking", "max_issues_repo_head_hexsha": "c624a9d3596befa4a941e4ff4092b9545bfdd28d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Modules/yolact/server.py", "max_forks_repo_name": "NikAbba/video_tracking", "max_forks_repo_head_hexsha": "c624a9d3596befa4a941e4ff4092b9545bfdd28d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-23T19:12:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-23T19:12:44.000Z", "avg_line_length": 26.1472868217, "max_line_length": 107, "alphanum_fraction": 0.6400830122, "include": true, "reason": "import numpy", "num_tokens": 911}
|
// Copyright (c) 2017-2019 The QuantisNet Core developers
// Copyright (c) 2014-2017 The Dash Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "spork.h"
#include "base58.h"
#include "chainparams.h"
#include "validation.h"
#include "messagesigner.h"
#include "net_processing.h"
#include "netmessagemaker.h"
#include "checkpoints.h"
#include <algorithm>
#include <boost/lexical_cast.hpp>
CSporkManager sporkManager;
std::map<uint256, CSporkMessage> mapSporks;
std::map<int, int64_t> mapSporkDefaults = {
{SPORK_2_INSTANTSEND_ENABLED, 0}, // ON
{SPORK_3_INSTANTSEND_BLOCK_FILTERING, 0}, // ON
{SPORK_5_INSTANTSEND_MAX_VALUE, 1000}, // 1000 QUAN
{SPORK_6_NEW_SIGS, 1551207600ULL}, // ON @mainnet
{SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT, 1541375412ULL}, // ON
{SPORK_9_SUPERBLOCKS_ENABLED, 0}, // ON
{SPORK_10_MASTERNODE_PAY_UPDATED_NODES, 1551200400ULL}, // ON @mainnet
{SPORK_12_RECONSIDER_BLOCKS, 0}, // 0 BLOCKS
{SPORK_14_REQUIRE_SENTINEL_FLAG, 1545415606ULL}, // ON
{SPORK_15_FIRST_POS_BLOCK, 315ULL}, // ON @mainnet
{SPORK_16_MASTERNODE_MIN_PROTOCOL, MIN_PEER_PROTO_VERSION_BEFORE_ENFORCEMENT }, // Actual
{SPORK_17_NEWPROTO_ENFORCE, 1563195813ULL}, // July 15th
};
SporkCheckpointMap mapSporkCheckpoints GUARDED_BY(cs_main);
SporkBlacklistMap mapSporkBlacklist GUARDED_BY(cs_main);
void CSporkManager::ProcessSpork(CNode* pfrom, const std::string& strCommand, CDataStream& vRecv, CConnman& connman)
{
if(fLiteMode) return; // disable all QuantisNet specific functionality
if (strCommand == NetMsgType::SPORK) {
CSporkMessage spork;
vRecv >> spork;
uint256 hash = spork.GetHash();
std::string strLogMsg;
{
LOCK(cs_main);
pfrom->setAskFor.erase(hash);
if(!chainActive.Tip()) return;
strLogMsg = strprintf("SPORK -- hash: %s id: %d value: %10d bestHeight: %d peer=%d", hash.ToString(), spork.nSporkID, spork.nValue, chainActive.Height(), pfrom->id);
}
if(mapSporksActive.count(spork.nSporkID)) {
if (mapSporksActive[spork.nSporkID].nTimeSigned >= spork.nTimeSigned) {
LogPrint("spork", "%s seen\n", strLogMsg);
return;
} else {
LogPrintf("%s updated\n", strLogMsg);
}
} else {
LogPrintf("%s new\n", strLogMsg);
}
if(!spork.CheckSignature(sporkPubKeyID)) {
LOCK(cs_main);
LogPrintf("CSporkManager::ProcessSpork -- ERROR: invalid signature\n");
Misbehaving(pfrom->GetId(), 100);
return;
}
mapSporks[hash] = spork;
mapSporksActive[spork.nSporkID] = spork;
spork.Relay(connman);
//does a task if needed
ExecuteSpork(spork.nSporkID, spork.nValue);
} else if (strCommand == NetMsgType::GETSPORKS) {
std::map<int, CSporkMessage>::iterator it = mapSporksActive.begin();
while(it != mapSporksActive.end()) {
connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetSendVersion()).Make(NetMsgType::SPORK, it->second));
it++;
}
// Dynamic checkpoints are closely related to sporks functionality
if (pfrom->nVersion >= SPORK_CHECKPOINT_VERSION) {
LOCK(cs_main);
auto min_time = GetAdjustedTime() - CSporkCheckpoint::MAX_AGE;
for (auto iter = mapSporkCheckpoints.begin(); iter != mapSporkCheckpoints.end();) {
auto& cp = iter->second;
// Avoid polluting network with old checkpoints
if (cp.nTimeSigned < min_time) {
// Cleanup active as side-effect
auto active_iter = mapCheckpointsActive.find(cp.nHeight);
if ((active_iter != mapCheckpointsActive.end()) &&
(active_iter->second == cp)
) {
mapCheckpointsActive.erase(active_iter);
}
auto todel = iter++;
mapSporkCheckpoints.erase(todel);
continue;
}
connman.PushMessage(
pfrom,
CNetMsgMaker(pfrom->GetSendVersion()).Make(NetMsgType::CHECKPOINT, cp));
++iter;
}
}
// Dynamic blacklists are closely related to sporks functionality
if (pfrom->nVersion >= SPORK_BLACKLIST_VERSION) {
LOCK(cs_main);
auto min_time = GetAdjustedTime() - CSporkBlacklist::MAX_AGE;
for (auto iter = mapSporkBlacklist.begin(); iter != mapSporkBlacklist.end();) {
auto& bl = iter->second;
// Avoid polluting network with old checkpoints
if (bl.nTimeSigned < min_time) {
// Cleanup active as side-effect
auto active_iter = mapBlacklistActive.find(bl.scriptPubKey);
if ((active_iter != mapBlacklistActive.end()) &&
(active_iter->second == bl)
) {
mapBlacklistActive.erase(active_iter);
}
auto todel = iter++;
mapSporkBlacklist.erase(todel);
continue;
}
connman.PushMessage(
pfrom,
CNetMsgMaker(pfrom->GetSendVersion()).Make(NetMsgType::BLACKLIST, bl));
++iter;
}
}
} else if (strCommand == NetMsgType::CHECKPOINT) {
CSporkCheckpoint checkpoint;
vRecv >> checkpoint;
{
auto height = checkpoint.nHeight;
auto hash = checkpoint.GetHash();
LOCK(cs_main);
pfrom->setAskFor.erase(hash);
std::string strLogMsg = strprintf("DYNCHECKPOINT -- hash: %s height: %d block: %s peer=%d", hash.ToString(), height, checkpoint.hashBlock.ToString(), pfrom->id);
auto iter = mapCheckpointsActive.find(height);
if(iter != mapCheckpointsActive.end()) {
if (iter->second.nTimeSigned >= checkpoint.nTimeSigned) {
LogPrint("spork", "%s seen\n", strLogMsg);
return;
} else {
LogPrintf("%s updated\n", strLogMsg);
}
} else {
LogPrintf("%s new\n", strLogMsg);
}
if(!checkpoint.CheckSignature(sporkPubKeyID)) {
LogPrintf("CSporkManager::ProcessSpork checkpoint -- ERROR: invalid signature\n");
Misbehaving(pfrom->GetId(), 100);
return;
}
mapSporkCheckpoints[hash] = checkpoint;
mapCheckpointsActive[height] = checkpoint;
ExecuteCheckpoint(height, checkpoint.hashBlock);
}
checkpoint.Relay(connman);
} else if (strCommand == NetMsgType::BLACKLIST) {
CSporkBlacklist blacklist;
vRecv >> blacklist;
{
auto scriptPubKey = blacklist.scriptPubKey;
auto hash = blacklist.GetHash();
LOCK(cs_main);
pfrom->setAskFor.erase(hash);
std::string strLogMsg = strprintf("DYNBLACKLIST -- hash: %s script: %d since: %s peer=%d",
hash.ToString(),
HexStr(scriptPubKey).c_str(),
blacklist.nTimeSince, pfrom->id);
auto iter = mapBlacklistActive.find(scriptPubKey);
if(iter != mapBlacklistActive.end()) {
if (iter->second.nTimeSigned >= blacklist.nTimeSigned) {
LogPrint("spork", "%s seen\n", strLogMsg);
return;
} else {
LogPrintf("%s updated\n", strLogMsg);
}
} else {
LogPrintf("%s new\n", strLogMsg);
}
if(!blacklist.CheckSignature(sporkPubKeyID)) {
LogPrintf("CSporkManager::ProcessSpork blacklist -- ERROR: invalid signature\n");
Misbehaving(pfrom->GetId(), 100);
return;
}
mapSporkBlacklist[hash] = blacklist;
mapBlacklistActive[scriptPubKey] = blacklist;
ExecuteBlacklist(scriptPubKey, blacklist.nTimeSince);
}
blacklist.Relay(connman);
}
}
void CSporkManager::ExecuteSpork(int nSporkID, int nValue)
{
//correct fork via spork technology
if(nSporkID == SPORK_12_RECONSIDER_BLOCKS && nValue > 0) {
// allow to reprocess 24h of blocks max, which should be enough to resolve any issues
int64_t nMaxBlocks = 1440;
// this potentially can be a heavy operation, so only allow this to be executed once per 10 minutes
int64_t nTimeout = 10 * 60;
static int64_t nTimeExecuted = 0; // i.e. it was never executed before
if(GetTime() - nTimeExecuted < nTimeout) {
LogPrint("spork", "CSporkManager::ExecuteSpork -- ERROR: Trying to reconsider blocks, too soon - %d/%d\n", GetTime() - nTimeExecuted, nTimeout);
return;
}
if(nValue > nMaxBlocks) {
LogPrintf("CSporkManager::ExecuteSpork -- ERROR: Trying to reconsider too many blocks %d/%d\n", nValue, nMaxBlocks);
return;
}
LogPrintf("CSporkManager::ExecuteSpork -- Reconsider Last %d Blocks\n", nValue);
ReprocessBlocks(nValue);
nTimeExecuted = GetTime();
}
// if (nSporkID == SPORK_15_FIRST_POS_BLOCK) {
// LOCK(cs_main);
// if ((nValue < int(nFirstPoSBlock)) &&
// (nValue > chainActive.Tip()->nHeight)) {
// nFirstPoSBlock = nValue;
// } else if (nValue != int(nFirstPoSBlock)) {
// error("SPORK15 conflicts with current chain %d vs. %d", nValue, nFirstPoSBlock);
// }
// }
}
bool CSporkManager::UpdateSpork(int nSporkID, int64_t nValue, CConnman& connman)
{
CSporkMessage spork = CSporkMessage(nSporkID, nValue, GetAdjustedTime());
if(spork.Sign(sporkPrivKey)) {
spork.Relay(connman);
mapSporks[spork.GetHash()] = spork;
mapSporksActive[nSporkID] = spork;
ExecuteSpork(nSporkID, nValue);
return true;
}
return false;
}
void CSporkManager::ExecuteCheckpoint(int height, const uint256& block_hash)
{
LOCK(cs_main);
LogPrintf("Adding dynamic checkpoint at height %d with hash %s\n",
height, block_hash.ToString().c_str());
auto& chainparams = Params();
Params(chainparams.NetworkIDString()).AddCheckpoint(height, block_hash);
CheckpointValidateBlockIndex(chainparams);
}
bool CSporkManager::UpdateCheckpoint(int height, const uint256& block_hash, CConnman& connman)
{
auto checkpoint = CSporkCheckpoint(height, block_hash, GetAdjustedTime());
if(checkpoint.Sign(sporkPrivKey)) {
checkpoint.Relay(connman);
{
LOCK(cs_main);
mapSporkCheckpoints[checkpoint.GetHash()] = checkpoint;
mapCheckpointsActive[height] = checkpoint;
ExecuteCheckpoint(height, block_hash);
}
return true;
}
return false;
}
void CSporkManager::ExecuteBlacklist(const CScript &scriptPubKey, int64_t nTimeSince)
{
LOCK(cs_main);
LogPrintf("Adding dynamic blacklist for %s since %lld\n",
HexStr(scriptPubKey).c_str(), nTimeSince);
auto& chainparams = Params();
Params(chainparams.NetworkIDString()).SetBlacklist(scriptPubKey, nTimeSince);
ProcessScriptBlacklist(scriptPubKey, nTimeSince);
}
bool CSporkManager::UpdateBlacklist(const CScript &scriptPubKey, int64_t nTimeSince, CConnman& connman)
{
auto blacklist = CSporkBlacklist(scriptPubKey, nTimeSince, GetAdjustedTime());
if(blacklist.Sign(sporkPrivKey)) {
blacklist.Relay(connman);
{
LOCK(cs_main);
mapSporkBlacklist[blacklist.GetHash()] = blacklist;
mapBlacklistActive[scriptPubKey] = blacklist;
ExecuteBlacklist(scriptPubKey, nTimeSince);
}
return true;
}
return false;
}
// grab the spork, otherwise say it's off
bool CSporkManager::IsSporkActive(int nSporkID)
{
int64_t r = -1;
if(mapSporksActive.count(nSporkID)){
r = mapSporksActive[nSporkID].nValue;
} else if (mapSporkDefaults.count(nSporkID)) {
r = mapSporkDefaults[nSporkID];
} else {
LogPrint("spork", "CSporkManager::IsSporkActive -- Unknown Spork ID %d\n", nSporkID);
r = 4070908800ULL; // 2099-1-1 i.e. off by default
}
return r < GetAdjustedTime();
}
// grab the value of the spork on the network, or the default
int64_t CSporkManager::GetSporkValue(int nSporkID)
{
if (mapSporksActive.count(nSporkID))
return mapSporksActive[nSporkID].nValue;
if (mapSporkDefaults.count(nSporkID)) {
return mapSporkDefaults[nSporkID];
}
LogPrint("spork", "CSporkManager::GetSporkValue -- Unknown Spork ID %d\n", nSporkID);
return -1;
}
int CSporkManager::GetSporkIDByName(const std::string& strName)
{
if (strName == "SPORK_2_INSTANTSEND_ENABLED") return SPORK_2_INSTANTSEND_ENABLED;
if (strName == "SPORK_3_INSTANTSEND_BLOCK_FILTERING") return SPORK_3_INSTANTSEND_BLOCK_FILTERING;
if (strName == "SPORK_5_INSTANTSEND_MAX_VALUE") return SPORK_5_INSTANTSEND_MAX_VALUE;
if (strName == "SPORK_6_NEW_SIGS") return SPORK_6_NEW_SIGS;
if (strName == "SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT") return SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT;
if (strName == "SPORK_9_SUPERBLOCKS_ENABLED") return SPORK_9_SUPERBLOCKS_ENABLED;
if (strName == "SPORK_10_MASTERNODE_PAY_UPDATED_NODES") return SPORK_10_MASTERNODE_PAY_UPDATED_NODES;
if (strName == "SPORK_12_RECONSIDER_BLOCKS") return SPORK_12_RECONSIDER_BLOCKS;
if (strName == "SPORK_14_REQUIRE_SENTINEL_FLAG") return SPORK_14_REQUIRE_SENTINEL_FLAG;
if (strName == "SPORK_15_FIRST_POS_BLOCK") return SPORK_15_FIRST_POS_BLOCK;
if (strName == "SPORK_16_MASTERNODE_MIN_PROTOCOL") return SPORK_16_MASTERNODE_MIN_PROTOCOL;
if (strName == "SPORK_17_NEWPROTO_ENFORCE") return SPORK_17_NEWPROTO_ENFORCE;
LogPrint("spork", "CSporkManager::GetSporkIDByName -- Unknown Spork name '%s'\n", strName);
return -1;
}
std::string CSporkManager::GetSporkNameByID(int nSporkID)
{
switch (nSporkID) {
case SPORK_2_INSTANTSEND_ENABLED: return "SPORK_2_INSTANTSEND_ENABLED";
case SPORK_3_INSTANTSEND_BLOCK_FILTERING: return "SPORK_3_INSTANTSEND_BLOCK_FILTERING";
case SPORK_5_INSTANTSEND_MAX_VALUE: return "SPORK_5_INSTANTSEND_MAX_VALUE";
case SPORK_6_NEW_SIGS: return "SPORK_6_NEW_SIGS";
case SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT: return "SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT";
case SPORK_9_SUPERBLOCKS_ENABLED: return "SPORK_9_SUPERBLOCKS_ENABLED";
case SPORK_10_MASTERNODE_PAY_UPDATED_NODES: return "SPORK_10_MASTERNODE_PAY_UPDATED_NODES";
case SPORK_12_RECONSIDER_BLOCKS: return "SPORK_12_RECONSIDER_BLOCKS";
case SPORK_14_REQUIRE_SENTINEL_FLAG: return "SPORK_14_REQUIRE_SENTINEL_FLAG";
case SPORK_15_FIRST_POS_BLOCK: return "SPORK_15_FIRST_POS_BLOCK";
case SPORK_16_MASTERNODE_MIN_PROTOCOL: return "SPORK_16_MASTERNODE_MIN_PROTOCOL";
case SPORK_17_NEWPROTO_ENFORCE: return "SPORK_17_NEWPROTO_ENFORCE";
default:
LogPrint("spork", "CSporkManager::GetSporkNameByID -- Unknown Spork ID %d\n", nSporkID);
return "Unknown";
}
}
bool CSporkManager::SetSporkAddress(const std::string& strAddress) {
CBitcoinAddress address(strAddress);
if (!address.IsValid() || !address.GetKeyID(sporkPubKeyID)) {
LogPrintf("CSporkManager::SetSporkAddress -- Failed to parse spork address\n");
return false;
}
return true;
}
bool CSporkManager::SetPrivKey(const std::string& strPrivKey)
{
CKey key;
CPubKey pubKey;
if(!CMessageSigner::GetKeysFromSecret(strPrivKey, key, pubKey)) {
LogPrintf("CSporkManager::SetPrivKey -- Failed to parse private key\n");
return false;
}
if (pubKey.GetID() != sporkPubKeyID) {
LogPrintf("CSporkManager::SetPrivKey -- New private key does not belong to spork address\n");
return false;
}
CSporkMessage spork;
if (spork.Sign(key)) {
// Test signing successful, proceed
LogPrintf("CSporkManager::SetPrivKey -- Successfully initialized as spork signer\n");
sporkPrivKey = key;
return true;
} else {
LogPrintf("CSporkManager::SetPrivKey -- Test signing failed\n");
return false;
}
}
CSporkManager::ActiveCheckpointMap CSporkManager::GetActiveCheckpoints() const {
LOCK(cs_main);
auto ret = mapCheckpointsActive;
return ret;
}
CSporkManager::ActiveBlacklistMap CSporkManager::GetActiveBlacklists() const {
LOCK(cs_main);
auto ret = mapBlacklistActive;
return ret;
}
//---
template<typename SporkType, int MsgType, int MinProtocol>
uint256 CSporkBase<SporkType, MsgType, MinProtocol>::GetHash() const
{
return SerializeHash(*this);
}
template<typename SporkType, int MsgType, int MinProtocol>
uint256 CSporkBase<SporkType, MsgType, MinProtocol>::GetSignatureHash() const
{
return GetHash();
}
template<typename SporkType, int MsgType, int MinProtocol>
bool CSporkBase<SporkType, MsgType, MinProtocol>::Sign(const CKey& key)
{
if (!key.IsValid()) {
LogPrintf("CSporkBase::Sign -- signing key is not valid\n");
return false;
}
CKeyID pubKeyId = key.GetPubKey().GetID();
std::string strError = "";
uint256 hash = GetSignatureHash();
if(!CHashSigner::SignHash(hash, key, vchSig)) {
LogPrintf("CSporkBase::Sign -- SignHash() failed\n");
return false;
}
if (!CHashSigner::VerifyHash(hash, pubKeyId, vchSig, strError)) {
LogPrintf("CSporkBase::Sign -- VerifyHash() failed, error: %s\n", strError);
return false;
}
return true;
}
// Backward compatibility support
bool CSporkMessage::Sign(const CKey& key)
{
if (!key.IsValid()) {
LogPrintf("CSporkMessage::Sign -- signing key is not valid\n");
return false;
}
CKeyID pubKeyId = key.GetPubKey().GetID();
std::string strError = "";
if (sporkManager.IsSporkActive(SPORK_6_NEW_SIGS)) {
uint256 hash = GetSignatureHash();
if(!CHashSigner::SignHash(hash, key, vchSig)) {
LogPrintf("CSporkMessage::Sign -- SignHash() failed\n");
return false;
}
if (!CHashSigner::VerifyHash(hash, pubKeyId, vchSig, strError)) {
LogPrintf("CSporkMessage::Sign -- VerifyHash() failed, error: %s\n", strError);
return false;
}
} else {
std::string strMessage = boost::lexical_cast<std::string>(nSporkID) + boost::lexical_cast<std::string>(nValue) + boost::lexical_cast<std::string>(nTimeSigned);
if(!CMessageSigner::SignMessage(strMessage, vchSig, key)) {
LogPrintf("CSporkMessage::Sign -- SignMessage() failed\n");
return false;
}
if(!CMessageSigner::VerifyMessage(pubKeyId, vchSig, strMessage, strError)) {
LogPrintf("CSporkMessage::Sign -- VerifyMessage() failed, error: %s\n", strError);
return false;
}
}
return true;
}
template<typename SporkType, int MsgType, int MinProtocol>
bool CSporkBase<SporkType, MsgType, MinProtocol>::CheckSignature(const CKeyID& pubKeyId) const
{
std::string strError = "";
uint256 hash = GetSignatureHash();
if (!CHashSigner::VerifyHash(hash, pubKeyId, vchSig, strError)) {
// Note: unlike for many other messages when SPORK_6_NEW_SIGS is ON sporks with sigs in old format
// and newer timestamps should not be accepted, so if we failed here - that's it
LogPrintf("CSporkBase::CheckSignature -- VerifyHash() failed, error: %s\n", strError);
return false;
}
return true;
}
bool CSporkMessage::CheckSignature(const CKeyID& pubKeyId) const
{
std::string strError = "";
if (sporkManager.IsSporkActive(SPORK_6_NEW_SIGS) &&
(nTimeSigned >= sporkManager.GetSporkValue(SPORK_6_NEW_SIGS))
) {
uint256 hash = GetSignatureHash();
if (!CHashSigner::VerifyHash(hash, pubKeyId, vchSig, strError)) {
// Note: unlike for many other messages when SPORK_6_NEW_SIGS is ON sporks with sigs in old format
// and newer timestamps should not be accepted, so if we failed here - that's it
LogPrintf("CSporkMessage::CheckSignature -- VerifyHash() failed, error: %s\n", strError);
return false;
}
} else {
std::string strMessage = boost::lexical_cast<std::string>(nSporkID) + boost::lexical_cast<std::string>(nValue) + boost::lexical_cast<std::string>(nTimeSigned);
if (!CMessageSigner::VerifyMessage(pubKeyId, vchSig, strMessage, strError)){
// Note: unlike for other messages we have to check for new format even with SPORK_6_NEW_SIGS
// inactive because SPORK_6_NEW_SIGS default is OFF and it is not the first spork to sync
// (and even if it would, spork order can't be guaranteed anyway).
uint256 hash = GetSignatureHash();
if (!CHashSigner::VerifyHash(hash, pubKeyId, vchSig, strError)) {
LogPrintf("CSporkMessage::CheckSignature -- VerifyHash() failed, error: %s\n", strError);
return false;
}
}
}
return true;
}
template<typename SporkType, int MsgType, int MinProtocol>
void CSporkBase<SporkType, MsgType, MinProtocol>::Relay(CConnman& connman)
{
CInv inv(MsgType, GetHash());
connman.RelayInv(inv, MinProtocol);
}
|
{"hexsha": "6000c7c7e8de72aeb4f1f603081603164338e48f", "size": 22522, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/spork.cpp", "max_stars_repo_name": "JSKitty/QuantisNet-Core", "max_stars_repo_head_hexsha": "75c66b11e29ea0597965471505e5da552d900d49", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/spork.cpp", "max_issues_repo_name": "JSKitty/QuantisNet-Core", "max_issues_repo_head_hexsha": "75c66b11e29ea0597965471505e5da552d900d49", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/spork.cpp", "max_forks_repo_name": "JSKitty/QuantisNet-Core", "max_forks_repo_head_hexsha": "75c66b11e29ea0597965471505e5da552d900d49", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3258064516, "max_line_length": 177, "alphanum_fraction": 0.6206819998, "num_tokens": 5711}
|
"""
Implementation of Machendran's DeSaliNet[1], including Zeiler's deconvolutional visualizing method (DeconvNet)[2],
and Simonyan's Network saliency (SaliNet)[3] as a special case.
This method is based on the back-propagation of the network activation similar to Zeiler's one.
DeSaliNet has a explicitness on its visualization result but sometimes provide a propitious visualization to excess.
All of these methods require that the network should be a "sequential",
that has no recursions, bypasses or something strange connections.
(e.g. LeNet, AlexNet, VGG. Not GoogLeNet, ResNet etc...)
[References]
[1] Mahendran, Aravindh, and Andrea Vedaldi. "Salient deconvolutional networks."
European Conference on Computer Vision. Springer International Publishing, 2016.
[2] Zeiler, Matthew D., and Rob Fergus. "Visualizing and understanding convolutional networks."
European conference on computer vision. Springer, Cham, 2014.
(https://arxiv.org/abs/1311.2901)
[3] Simonyan, Karen, Andrea Vedaldi, and Andrew Zisserman.
"Deep inside convolutional networks: Visualising image classification models and saliency maps."
arXiv preprint arXiv:1312.6034 (2013).
(https://arxiv.org/abs/1312.6034)
"""
from abc import ABCMeta
import numpy
from dcnn_visualizer.visualizer import ActivationVisualizer
from dcnn_visualizer.traceable_chain import TraceableChain
import dcnn_visualizer.traceable_nodes as tn
import dcnn_visualizer.backward_functions as bf
class BackwardNetBase(ActivationVisualizer):
"""
Base class of backward-oriented activation visualizers (i.e. DeconvNet, SaliNet and DeSaliNet [1])
"""
def __init__(self, model: TraceableChain):
"""
Args:
model(TraceableChain): model to visualize
"""
super().__init__(model)
def inverse_traceable_node(self, node, traced, raw):
raise NotImplementedError()
def analyze(self, img, layer, index=None, verbose=False):
'''
Visualize a neuronal activation in feature maps.
Forward propagation will automatically be calculated and cached.
Args:
img (numpy.ndarray, cupy.ndarray): input images to visualize. It is expected that the shape is BCHW;
layer (str): attention layer name in the model
index (int, optional): index of a neuron in the specified layer.
Defaults to None that means that the entire of layer activation will be visualized.
verbose (bool, optional): If True, the method will be verbose.
Returns:
numpy.ndarray: visualization result which has the same shape of input (`img`)
'''
super().analyze(img, layer, )
start_index = 0
for layername in self.layers:
if layer == layername:
break
else:
start_index += 1
if index is None:
start_activation = self.current_activations[start_index]
else:
start_activation = numpy.zeros_like(self.current_activations[start_index]).astype('f')
start_activation[:, index] = self.current_activations[start_index][:, index].data
current_index = start_index
current_activation = start_activation
# backward propagation loop
while True:
current_attention_layer_name = self.layers[current_index]
current_attention_layer = getattr(self.model, current_attention_layer_name)
if current_index == 0:
previous_activation = self.img_
else:
previous_activation = self.current_activations[current_index - 1]
if isinstance(current_attention_layer, tn.TraceableNode):
current_activation = self.inverse_traceable_node(current_attention_layer,
current_activation,
previous_activation)
else:
if verbose:
print(f'Named layer {current_attention_layer_name} has been ignored. '
'It is not an instance of TraceableNode.')
current_index -= 1
if current_index < 0:
break
# check shape of current_activation
if current_activation.shape != self.current_activations[current_index].shape:
raise ValueError('Shape of forward and backward were mismatched. '
f'forward: {self.current_activations[current_index].shape}, '
f'backward: {current_activation.shape}')
return current_activation
class DeconvNet(BackwardNetBase):
def __init__(self, model):
super().__init__(model)
def inverse_traceable_node(self, node, traced, raw):
if isinstance(node, tn.TraceableConvolution2D):
return bf.deconvolution2d(node, traced, raw)
elif isinstance(node, tn.TraceableMaxPooling2D):
return bf.max_unpooling_locational(node, traced, raw)
elif isinstance(node, tn.TraceableReLU):
return bf.inverse_relu_anew(node, traced, raw)
elif isinstance(node, tn.TraceableLinear):
return bf.inverse_linear(node, traced, raw)
class SaliNet(BackwardNetBase):
def __init__(self, model):
super().__init__(model)
def inverse_traceable_node(self, node, traced, raw):
if isinstance(node, tn.TraceableConvolution2D):
return bf.deconvolution2d(node, traced, raw)
elif isinstance(node, tn.TraceableMaxPooling2D):
return bf.max_unpooling_non_locational(node, traced, raw)
elif isinstance(node, tn.TraceableReLU):
return bf.inverse_relu_locational(node, traced, raw)
elif isinstance(node, tn.TraceableLinear):
return bf.inverse_linear(node, traced, raw)
class DeSaliNet(BackwardNetBase):
def __init__(self, model, locational_pooling=True):
super().__init__(model)
if locational_pooling:
self.unpooling_fun = bf.max_unpooling_locational
else:
self.unpooling_fun = bf.max_unpooling_non_locational
def inverse_traceable_node(self, node, traced, raw):
if isinstance(node, tn.TraceableConvolution2D):
return bf.deconvolution2d(node, traced, raw)
elif isinstance(node, tn.TraceableMaxPooling2D):
return self.unpooling_fun(node, traced, raw)
elif isinstance(node, tn.TraceableReLU):
return bf.relu(bf.inverse_relu_locational(node, traced, raw))
elif isinstance(node, tn.TraceableLinear):
return bf.inverse_linear(node, traced, raw)
if __name__ == '__main__':
import chainer.functions as F
import numpy as np
class SimpleCNN(TraceableChain):
def __init__(self):
super().__init__()
with self.init_scope():
self.conv1 = tn.TraceableConvolution2D(3, 10, 3)
self.conv1_relu = tn.TraceableReLU()
self.conv1_mp = tn.TraceableMaxPooling2D(ksize=2)
self.conv1_bn = F.local_response_normalization
self.conv2 = tn.TraceableConvolution2D(10, 5, 3)
self.conv2_relu = tn.TraceableReLU()
self.conv2_mp = tn.TraceableMaxPooling2D(ksize=2)
self.fc3 = tn.TraceableLinear(None, 32)
self.fc3_relu = tn.TraceableReLU()
self.fc4 = tn.TraceableLinear(None, 10)
self.fc4_relu = tn.TraceableReLU()
model = SimpleCNN()
img = np.random.rand(1, 3, 28, 28).astype('f')
visualizer = SaliNet(model)
visualized_whole = visualizer.analyze(img, 'fc3', verbose=True)
visualized_filter = visualizer.analyze(img, 'conv2', 1, verbose=True)
print(visualized_whole.shape)
print(visualized_filter.shape)
|
{"hexsha": "c2e3ada3652f41367f4e60a2973d80badd1b56ae", "size": 7940, "ext": "py", "lang": "Python", "max_stars_repo_path": "dcnn_visualizer/desalinet.py", "max_stars_repo_name": "tochikuji/DNN-Visualizer", "max_stars_repo_head_hexsha": "902eba04463c5c17ba81b85db7184a91d2cb4c49", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-02-09T07:21:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-16T02:52:18.000Z", "max_issues_repo_path": "dcnn_visualizer/desalinet.py", "max_issues_repo_name": "tochikuji/DNN-Visualizer", "max_issues_repo_head_hexsha": "902eba04463c5c17ba81b85db7184a91d2cb4c49", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2018-01-11T05:47:02.000Z", "max_issues_repo_issues_event_max_datetime": "2018-02-08T09:12:39.000Z", "max_forks_repo_path": "dcnn_visualizer/desalinet.py", "max_forks_repo_name": "tochikuji/DNN-Visualizer", "max_forks_repo_head_hexsha": "902eba04463c5c17ba81b85db7184a91d2cb4c49", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.7, "max_line_length": 116, "alphanum_fraction": 0.6542821159, "include": true, "reason": "import numpy", "num_tokens": 1725}
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import torch.nn as nn
class filter1():
def plot_filters_single_channel_big(t, title):
# setting the rows and columns
nrows = t.shape[0] * t.shape[2]
ncols = t.shape[1] * t.shape[3]
npimg = np.array(t.cpu().numpy(), np.float32)
npimg = npimg.transpose((0, 2, 1, 3))
npimg = npimg.ravel().reshape(nrows, ncols)
npimg = npimg.T
fig, ax = plt.subplots(figsize=(ncols / 10, nrows / 200))
imgplot = sns.heatmap(npimg, xticklabels=False, yticklabels=False, cmap='gray', ax=ax, cbar=False)
plt.title(title, fontsize=5)
plt.show()
def plot_filters_single_channel(t, title):
# kernels depth * number of kernels
nplots = t.shape[0] * t.shape[1]
ncols = 12
nrows = 1 + nplots // ncols
# convert tensor to numpy image
npimg = np.array(t.cpu().numpy(), np.float32)
count = 0
fig = plt.figure(figsize=(ncols, nrows))
# looping through all the kernels in each channel
for i in range(t.shape[0]):
for j in range(t.shape[1]):
count += 1
ax1 = fig.add_subplot(nrows, ncols, count)
npimg = np.array(t[i, j].cpu().numpy(), np.float32)
npimg = (npimg - np.mean(npimg)) / np.std(npimg)
npimg = np.minimum(1, np.maximum(0, (npimg + 0.5)))
ax1.imshow(npimg)
ax1.set_title(str(i) + ',' + str(j))
ax1.axis('off')
ax1.set_xticklabels([])
ax1.set_yticklabels([])
plt.title(title, fontsize=5)
plt.tight_layout()
plt.show()
def plot_filters_multi_channel(t, title):
# get the number of kernals
num_kernels = t.shape[0]
# define number of columns for subplots
num_cols = 12
# rows = num of kernels
num_rows = num_kernels
# set the figure size
fig = plt.figure(figsize=(num_cols, num_rows))
# looping through all the kernels
for i in range(t.shape[0]):
ax1 = fig.add_subplot(num_rows, num_cols, i + 1)
# for each kernel, we convert the tensor to numpy
npimg = np.array(t[i].numpy(), np.float32)
# standardize the numpy image
npimg = (npimg - np.mean(npimg)) / np.std(npimg)
npimg = np.minimum(1, np.maximum(0, (npimg + 0.5)))
npimg = npimg.transpose((1, 2, 0))
ax1.imshow(npimg)
ax1.axis('off')
ax1.set_title(str(i))
ax1.set_xticklabels([])
ax1.set_yticklabels([])
plt.savefig('myimage.png', dpi=100)
plt.tight_layout()
plt.title(title, fontsize=5)
plt.show()
def plot_weights(self,model, layer_num, single_channel=True, collated=False, title=None):
# extracting the model features at the particular layer number
layer = model[layer_num]
# checking whether the layer is convolution layer or not
if isinstance(layer, nn.Conv2d):
# getting the weight tensor data
weight_tensor = model[layer_num].weight.data
if single_channel:
if collated:
self.plot_filters_single_channel_big(weight_tensor, title)
else:
self.plot_filters_single_channel(weight_tensor, title)
else:
if weight_tensor.shape[1] == 3:
self.plot_filters_multi_channel(weight_tensor, title)
else:
print("Can only plot weights with three channels with single channel = False")
else:
print("Can only visualize layers which are convolutional")
class filter2():
def __init__(self,model):
self.model_weights = [] # we will save the conv layer weights in this list
self.conv_layers = [] # we will save the 49 conv layers in this list
# get all the model children as list
self.model_children = list(model.net.encoder.children())
def access_layers(self):
# counter to keep count of the conv layers
counter = 0
# append all the conv layers and their respective weights to the list
for i in range(len(self.model_children)):
if type(self.model_children[i]) == nn.Conv2d:
counter += 1
self.model_weights.append(self.model_children[i].weight.cpu())
self.conv_layers.append(self.model_children[i].cpu())
elif type(self.model_children[i]) == nn.Sequential:
for j in range(len(self.model_children[i])):
for child in self.model_children[i][j].children():
if type(child) == nn.Conv2d:
counter += 1
self.model_weights.append(child.weight)
self.conv_layers.append(child)
print(f"Total convolutional layers: {counter}")
def visualize_filter(self):
# visualize the first conv layer filters
plt.figure(figsize=(20, 17))
for i, filter in enumerate(self.model_weights[0]):
plt.subplot(8, 8, i + 1) # (8, 8) because in conv0 we have 7x7 filters and total of 64 (see printed shapes)
plt.imshow(filter[0, :, :].cpu().detach(), cmap='gray')
plt.axis('off')
# plt.savefig('../outputs/filter.png')
plt.show()
def visualize_feature(self,img):
# plt.imshow(np.einsum('zxy->xyz',img.cpu().squeeze(0).detach().numpy()))
# plt.title("original image")
# plt.show()
# pass the image through all the layers
results = [self.conv_layers[0](img)]
for i in range(1, len(self.conv_layers)):
# pass the result from the last layer to the next layer
results.append(self.conv_layers[i](results[-1]))
# make a copy of the `results`
outputs = results
# visualize 64 features from each layer
# (although there are more feature maps in the upper layers)
for num_layer in range(len(outputs)):
plt.figure(figsize=(30, 30))
layer_viz = outputs[num_layer][0, :, :, :]
layer_viz = layer_viz.data
print(layer_viz.size())
for i, filter in enumerate(layer_viz):
if i == 64: # we will visualize only 8x8 blocks from each layer
break
plt.subplot(8, 8, i + 1)
plt.imshow(filter, cmap='gray')
plt.axis("off")
print(f"Saving layer {num_layer} feature maps...")
# plt.savefig(f"../outputs/layer_{num_layer}.png")
plt.title(f"Saving layer {num_layer} feature maps")
plt.show()
# plt.close()
|
{"hexsha": "7b91be0884b91b0fc264fe8c81281792475ecd59", "size": 6932, "ext": "py", "lang": "Python", "max_stars_repo_path": "visualize.py", "max_stars_repo_name": "shadimanafi/SCAN-PyTorch", "max_stars_repo_head_hexsha": "f40f621a1d4af66e610f4714d2efd559d531de34", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "visualize.py", "max_issues_repo_name": "shadimanafi/SCAN-PyTorch", "max_issues_repo_head_hexsha": "f40f621a1d4af66e610f4714d2efd559d531de34", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "visualize.py", "max_forks_repo_name": "shadimanafi/SCAN-PyTorch", "max_forks_repo_head_hexsha": "f40f621a1d4af66e610f4714d2efd559d531de34", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.6114285714, "max_line_length": 120, "alphanum_fraction": 0.5644835545, "include": true, "reason": "import numpy", "num_tokens": 1592}
|
\input{../header_basic_util}
%---------- start document ---------- %
\section{compatibility -- Keep compatibility between \python versions}\linkedzero{compatibility}
%
This module should be simply imported:\\
{\tt import nzmath.compatibility}\\
then it will do its tasks.
\subsection{set, frozenset}\linkedone{compatibility}{set}
The module provides {\tt set} for \python~2.3. \python \(\geq\) 2.4
have \linklibraryone{stdtypes\#set-types-set-frozenset}{set} in
built-in namespace, while \python~2.3 has {\tt sets} module and
{\tt sets.Set}. The {\tt set} the module provides for \python~2.3
is the {\tt sets.Set}. Similarly, {\tt sets.ImmutableSet} would be
assigned to {\tt frozenset}. Be careful that the compatibility
is not perfect. Note also that \nzmath's recommendation is
\python~2.5 or higher in 2.x series.
\subsection{card(virtualset)}\linkedone{compatibility}{card}
Return cardinality of the virtualset.
The built-in \linklibraryone{stdfunc\#len}{len()} raises
\linklibraryone{exceptions\#exceptions.OverflowError}{OverflowError}
when the result is greater than
\linklibrary{sys}.\linklibraryone{sys\#maxint}{maxint}. It is not
clear this restriction will go away in the future.
The function {\tt card()} ought to be used instead of {\tt len()} for
obtaining cardinality of sets or set-like objects in nzmath.
\C
%---------- end document ---------- %
\input{../footer}
|
{"hexsha": "e2d884330f368eb879595ae8a4f28f61b6a015e1", "size": 1465, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "manual/en/compatibility.tex", "max_stars_repo_name": "turkeydonkey/nzmath3", "max_stars_repo_head_hexsha": "a48ae9efcf0d9ad1485c2e9863c948a7f1b20311", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-26T19:22:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-26T19:22:17.000Z", "max_issues_repo_path": "manual/ja/compatibility.tex", "max_issues_repo_name": "turkeydonkey/nzmath3", "max_issues_repo_head_hexsha": "a48ae9efcf0d9ad1485c2e9863c948a7f1b20311", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "manual/ja/compatibility.tex", "max_forks_repo_name": "turkeydonkey/nzmath3", "max_forks_repo_head_hexsha": "a48ae9efcf0d9ad1485c2e9863c948a7f1b20311", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.5945945946, "max_line_length": 98, "alphanum_fraction": 0.6976109215, "num_tokens": 390}
|
# Databricks notebook source
# MAGIC %md
# MAGIC ScaDaMaLe Course [site](https://lamastex.github.io/scalable-data-science/sds/3/x/) and [book](https://lamastex.github.io/ScaDaMaLe/index.html)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## CNN for MNIST
# MAGIC
# MAGIC Let us move to a classic machine learning task: Image classification with Convolutional Neural Networks (CNN). The general idea is as follows:
# MAGIC 1. Train a CNN on normal training data. Evaluate its performance on a conventional ("unmixed") validation set and on a MixUp ("mixed") version of the same validation set.
# MAGIC 2. Train a CNN on MixUp training data. Evaluate its performance on both unmixed and mixed validation data.
# MAGIC
# MAGIC When training on MixUp training data, we compute a new MixUp of each batch in every epoch. As explained in the introduction, this effectively augments the training set and hopefully makes the network more robust. Evaluating the performance of both networks on unmixed and mixed validation data allows us to compare the generalization properties of both networks, the working hypothesis being that training on MixUp data enhances generalization. To reduce the dependence of our results on the specific choice of hyperparameters, we train several CNNs with varying numbers of convolutional and dense layers. This is done for both kinds of training data (unmixed, mixed) in a distributed fashion using Ray Tune.
# MAGIC
# MAGIC In this notebook, we train a simple MNIST classifier. This notebook runs on a CPU, but with a hyperparameter search method that can be scaled up to different workers and be run in parallel.
# COMMAND ----------
# MAGIC %md
# MAGIC Import the necessary packages.
# COMMAND ----------
import tensorflow as tf
import numpy as np
from tensorflow import keras
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense,Conv2D,Flatten,BatchNormalization,Dropout
from ray import tune
from ray.tune import CLIReporter
from sklearn.metrics import confusion_matrix
#from sparkdl import HorovodRunner
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import shutil
import os
# Fixes the issue "AttributeError: 'ConsoleBuffer has no attribute 'fileno'"
import sys
sys.stdout.fileno = lambda: False
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC A data generator class that performs MixUp in the loaded data. This is done with two Tensorflow data generators that both load data from our dataset in a shuffled manner and then linearly combined in order to construct the mixed data. The time complexity of this loader is at least twice the time as a normal Tensorflow data loader.
# COMMAND ----------
class MixupImageDataGenerator_from_tensor(tf.keras.utils.Sequence):
"""
A datagenerator that performs mixup on the input data. The input to the generator is numpy arrays with data and labels.
"""
def __init__(self, X,Y, batch_size, alpha=0.2, subset=None):
self.batch_size = batch_size
self.batch_index = 0
self.alpha = alpha
self.X = X
self.Y = Y
# First iterator yielding tuples of (x, y)
ind = np.random.permutation(len(X))
self.generator1 = iter(tf.data.Dataset.from_tensor_slices((X[ind],Y[ind])).batch(self.batch_size))
# Second iterator yielding tuples of (x, y)
ind = np.random.permutation(len(X))
self.generator2 = iter(tf.data.Dataset.from_tensor_slices((X[ind],Y[ind])).batch(self.batch_size))
# Number of images across all classes in image directory.
self.n = len(X)
def __len__(self):
# returns the number of batches
return (self.n + self.batch_size - 1) // self.batch_size
def __getitem__(self, index):
if self.batch_index >= self.__len__()-1:
self.reset_index()
self.batch_index = 0
else:
self.batch_index += 1
# Get a pair of inputs and outputs from two iterators.
X1, y1 = self.generator1.next()
X2, y2 = self.generator2.next()
# random sample the lambda value from beta distribution.
l = np.random.beta(self.alpha, self.alpha, X1.shape[0])
X_l = l.reshape(X1.shape[0], 1, 1, 1)
y_l = l.reshape(X1.shape[0], 1)
# Perform the mixup.
X = X1 * X_l + X2 * (1 - X_l)
y = y1 * y_l + y2 * (1 - y_l)
return X, y
def reset_index(self):
"""Reset the generator indexes array.
"""
# First iterator yielding tuples of (x, y)
ind = np.random.permutation(len(self.X))
self.generator1 = iter(tf.data.Dataset.from_tensor_slices((self.X[ind],self.Y[ind])).batch(self.batch_size))
# Second iterator yielding tuples of (x, y)
ind = np.random.permutation(len(self.X))
self.generator2 = iter(tf.data.Dataset.from_tensor_slices((self.X[ind],self.Y[ind])).batch(self.batch_size))
def on_epoch_end(self):
return
#self.reset_index()
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC Two helping methods that create the model based on the hyperparameters "number_conv" and "number_dense" and create the dataloaders needed for training and validation.
# COMMAND ----------
"""
creates the CNN with number_conv convolutional layers followed by number_dense dense layers. THe model is compiled with a SGD optimizer and a categorical crossentropy loss.
"""
def create_model(number_conv,number_dense):
model = Sequential()
model.add(Conv2D(24,kernel_size = 3, activation='relu',padding="same", input_shape=(img_height, img_width,channels)))
model.add(BatchNormalization())
for s in range(1,number_conv):
model.add(Conv2D(24+12*s,kernel_size = 3,padding="same", activation = 'relu'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dropout(0.4))
for s in range(number_dense):
model.add(Dense(units=num_classes, activation='relu'))
model.add(Dropout(0.4))
model.add(BatchNormalization())
model.add(Dense(num_classes,activation= "softmax"))
model.compile(optimizer="adam", loss='categorical_crossentropy', metrics=['accuracy'])
return model
"""
A method that gives us the different dataloaders that we need for training and validation.
train_mix_loader: A data loader that will give us mixes data for training
train_loader: A data loader that gives us the unmixed training data
val_mixed_loader: A data loader that gives us the mixed validation data
val_loader: A data loader with the unmixed validation data
"""
def get_mnist_dataloaders():
(trainX,trainY),(testX,testY) = tf.keras.datasets.mnist.load_data()
trainX,testX = tf.cast(trainX,tf.float32),tf.cast(testX,tf.float32)
trainX,testX = tf.expand_dims(trainX, 3),tf.expand_dims(testX, 3)
trainY_oh,testY_oh = tf.one_hot(trainY,10),tf.one_hot(testY,10)
trainY_oh,testY_oh = tf.cast(trainY_oh,tf.float32).numpy(),tf.cast(testY_oh,tf.float32).numpy()
trainX,testX = trainX.numpy()/255 * 2 - 2,testX.numpy()/255 * 2 - 2
train_loader_mix = MixupImageDataGenerator_from_tensor(trainX,trainY_oh,batch_size)
train_loader = tf.data.Dataset.from_tensor_slices((trainX,trainY_oh)).batch(batch_size)
test_loader_mix = MixupImageDataGenerator_from_tensor(testX,testY_oh,batch_size)
test_loader = tf.data.Dataset.from_tensor_slices((trainX,trainY_oh)).batch(batch_size)
return train_loader_mix,train_loader,test_loader_mix,test_loader
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC The method that describes how to construct and train the model.
# MAGIC
# MAGIC The steps here are, loading the data and generate the different data loaders, train the model on the preprocessed data and validate the method on the different data sets and report back to the scheduler.
# COMMAND ----------
def training_function(config, checkpoint_dir=None):
# Hyperparameters
number_conv, number_dense,train_with_mixed_data = config["number_conv"], config["number_dense"],config["train_with_mixed_data"]
"""
Get the different dataloaders
One with training data using mixing
One with training without mixing
One with validation data with mixing
One with validation without mixing
"""
#train_mix_dataloader,train_dataloader,val_mix_dataloader,val_dataloader = get_data_loaders(train_dir,test_dir,for_training = True)
train_mix_dataloader,train_dataloader,val_mix_dataloader,val_dataloader = get_mnist_dataloaders()
"""
Construct the model based on hyperparameters
"""
model = create_model( number_conv,number_dense )
"""
Adds earlystopping to training. This is based on the performance accuracy on the validation dataset. Chould we have validation loss here?
"""
callbacks = [tf.keras.callbacks.EarlyStopping(patience=10,monitor="val_accuracy",min_delta=0.01,restore_best_weights=True)]
"""
Train the model and give the training history.
"""
if train_with_mixed_data:
history = model.fit_generator(train_mix_dataloader, validation_data = val_mix_dataloader,callbacks = callbacks,verbose = False,epochs = 200)
else:
history = model.fit_generator(train_dataloader, validation_data = val_mix_dataloader,callbacks = callbacks,verbose = False,epochs = 200)
"""
Logg the results
"""
#x_mix, y_mix = mixup_data( x_val, y_val)
#mix_loss, mix_acc = model.evaluate( x_mix, y_mix )
#test_loss, test_acc = model.evaluate( x_val, y_val )
ind_max = np.argmax(history.history['val_accuracy'])
train_acc = history.history['accuracy'][ind_max]
val_acc = history.history['val_accuracy'][ind_max]
tune.report(mean_loss=train_acc,val_mix_accuracy = val_acc)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC The global hyperparameters that we need for training.
# COMMAND ----------
img_height,img_width,channels = 28,28,1
batch_size = 50
alpha = 0.2
num_classes = 10
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC The cell that runs the code. In order to train the different models in parallel, we use the ray.tune package that will schedule the training and split the available resources to the various workers.
# COMMAND ----------
# Limit the number of rows.
reporter = CLIReporter(max_progress_rows=10)
# Add a custom metric column, in addition to the default metrics.
# Note that this must be a metric that is returned in your training results.
reporter.add_metric_column("val_mix_accuracy")
#reporter.add_metric_column("test_accuracy")
#config = {"number_conv" : 3,"number_dense" : 5}
#training_function(config)
#get_data_loaders()
analysis = tune.run(
training_function,
config={
"number_conv": tune.grid_search(np.arange(2,5,1).tolist()),
"number_dense": tune.grid_search(np.arange(0,3,1).tolist()),
"train_with_mixed_data": tune.grid_search([True,False])
},
local_dir='ray_results',
progress_reporter=reporter)
print("Best config: ", analysis.get_best_config(
metric="mean_loss", mode="max"))
#Get a dataframe for analyzing trial results.
df = analysis.results_df
# COMMAND ----------
#print(df)
df
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC #### Conclusion
# MAGIC
# MAGIC From the dataframe of the results shown above, we can see the accuracy on the validation dataset for the different settings. If we compare the runs with mixup against those without mixup for the different network architectures, we can investigate how much of an effect the mixup implementation has. As we can see, one of the runs did not converge at all. By not including that run, we can see that the average difference off accuracy is 0.01 to the advantage of unmixed data. Without any statistical analysis, we assume this difference is practically zero.
# MAGIC Our reasoning to why we don't see any impact of mixup in this simulation is that MNIST is such an easy task to train on that a mixup of the data will not affect the results much.
|
{"hexsha": "c73eed66e1a8649812a4c702a724ed0e63249e3e", "size": 12071, "ext": "py", "lang": "Python", "max_stars_repo_path": "dbcArchives/2021/000_0-sds-3-x-projects/student-project-20_group-Generalization/03_CNN_MNIST.py", "max_stars_repo_name": "r-e-x-a-g-o-n/scalable-data-science", "max_stars_repo_head_hexsha": "a97451a768cf12eec9a20fbe5552bbcaf215d662", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 138, "max_stars_repo_stars_event_min_datetime": "2017-07-25T06:48:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T12:23:36.000Z", "max_issues_repo_path": "dbcArchives/2021/000_0-sds-3-x-projects/student-project-20_group-Generalization/03_CNN_MNIST.py", "max_issues_repo_name": "r-e-x-a-g-o-n/scalable-data-science", "max_issues_repo_head_hexsha": "a97451a768cf12eec9a20fbe5552bbcaf215d662", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2017-08-17T13:45:54.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-04T09:06:53.000Z", "max_forks_repo_path": "dbcArchives/2021/000_0-sds-3-x-projects/student-project-20_group-Generalization/03_CNN_MNIST.py", "max_forks_repo_name": "r-e-x-a-g-o-n/scalable-data-science", "max_forks_repo_head_hexsha": "a97451a768cf12eec9a20fbe5552bbcaf215d662", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 74, "max_forks_repo_forks_event_min_datetime": "2017-08-18T17:04:46.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T14:30:51.000Z", "avg_line_length": 40.2366666667, "max_line_length": 716, "alphanum_fraction": 0.7175876067, "include": true, "reason": "import numpy", "num_tokens": 2826}
|
#!/usr/bin/python3
# Copyright (C) 2017 Infineon Technologies & pmdtechnologies ag
#
# THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY
# KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A
# PARTICULAR PURPOSE.
"""This sample shows how to shows how to capture image data.
It uses Python's numpy and matplotlib to process and display the data.
"""
import sys
import tensorflow as tf
import argparse
#import cv2
from random import *
import time
import queue
from sample_camera_info import print_camera_info
from roypy_sample_utils import CameraOpener, add_camera_opener_options
from roypy_platform_utils import PlatformHelper
from utils import label_map_util
from utils import visualization_utils_color as vis_util
from utils import vis_depth_util
from utils.model_util import TensorflowFaceDetector
import roypy
import numpy as np
import matplotlib.pyplot as plt
print('Setting up paths')
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = 'frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = 'seed_label_map.pbtxt'
NUM_CLASSES = 2
print('Loading labelmaps')
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
tDetector = TensorflowFaceDetector(PATH_TO_CKPT)
class MyListener(roypy.IDepthDataListener):
def __init__(self, q, q2):
super(MyListener, self).__init__()
self.queue = q
self.queue2 = q2
def onNewData(self, data):
zvalues = []
gvalues = []
for i in range(data.getNumPoints()):
zvalues.append(data.getZ(i))
gvalues.append(data.getGrayValue(i))
zarray = np.asarray(zvalues)
garray = np.asarray(gvalues)
p = zarray.reshape (-1, data.width)
print(p)
self.queue.put(p)
p = garray.reshape (-1, data.width)
self.queue2.put(p)
def paint (self, data, data2):
"""Called in the main thread, with data containing one of the items that was added to the
queue in onNewData.
"""
# create a figure and show the raw data
#cmap1 = Colormap
plt.figure(1)
plt.subplot(211)
plt.imshow(data)
plt.subplot(212)
plt.imshow(data2, cmap="gray")
plt.show(block = False)
plt.draw()
# this pause is needed to ensure the drawing for
# some backends
plt.pause(0.001)
def main ():
platformhelper = PlatformHelper()
parser = argparse.ArgumentParser (usage = __doc__)
add_camera_opener_options (parser)
parser.add_argument ("--seconds", type=int, default=15, help="duration to capture data")
options = parser.parse_args()
opener = CameraOpener (options)
cam = opener.open_camera ()
cam.setUseCase("MODE_5_45FPS_500")
cam.setExposureTime(80)
print_camera_info (cam)
print("isConnected", cam.isConnected())
print("getFrameRate", cam.getFrameRate())
# we will use this queue to synchronize the callback with the main
# thread, as drawing should happen in the main thread
q = queue.Queue()
q2 = queue.Queue()
l = MyListener(q, q2)
cam.registerDataListener(l)
cam.startCapture()
# create a loop that will run for a time (default 15 seconds)
process_event_queue (q, q2, l, options.seconds)
cam.stopCapture()
def process_event_queue (q, q2, painter, seconds):
# create a loop that will run for the given amount of time
t_end = time.time() + seconds
while time.time() < t_end:
try:
# try to retrieve an item from the queue.
# this will block until an item can be retrieved
# or the timeout of 1 second is hit
item = q.get(True, 1)
item2 = q2.get(True, 1)
(boxes, scores, classes, num_detections) = tDetector.run(item2)
# Draws bounding boxes
vis_util.visualize_boxes_and_labels_on_image_array(
item2,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=4)
# Draws the depth information
vis_depth_util.apply_depth_to_boxes(item2, np.squeeze(boxes), np.squeeze(scores), depth_frame)
except queue.Empty:
# this will be thrown when the timeout is hit
break
else:
painter.paint (item, item2)
if (__name__ == "__main__"):
main()
|
{"hexsha": "007fb61146ba0805b64a143cfd042649d9ff6fb8", "size": 5033, "ext": "py", "lang": "Python", "max_stars_repo_path": "pmd_implementation/pauls_files/pmd_implementation/roypy_util/sample_retrieve_data.py", "max_stars_repo_name": "iggy12345/emerson_seed_object_detection", "max_stars_repo_head_hexsha": "121c6fe55fb4c903cb2c05f12077c3940973eadc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-03-18T15:16:18.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-18T15:16:18.000Z", "max_issues_repo_path": "pmd_implementation/pauls_files/pmd_implementation/roypy_util/sample_retrieve_data.py", "max_issues_repo_name": "iggy12345/emerson_seed_object_detection", "max_issues_repo_head_hexsha": "121c6fe55fb4c903cb2c05f12077c3940973eadc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 35, "max_issues_repo_issues_event_min_datetime": "2020-01-28T22:10:25.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:43:16.000Z", "max_forks_repo_path": "pmd_implementation/pauls_files/pmd_implementation/roypy_util/sample_retrieve_data.py", "max_forks_repo_name": "iggy12345/emerson_seed_object_detection", "max_forks_repo_head_hexsha": "121c6fe55fb4c903cb2c05f12077c3940973eadc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3311258278, "max_line_length": 123, "alphanum_fraction": 0.6536856745, "include": true, "reason": "import numpy", "num_tokens": 1127}
|
/************************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2014, Péter Fankhauser, Christian Gehring, Stelian Coros
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Autonomous Systems Lab nor ETH Zurich
* nor the names of its contributors may be used to endorse or
* promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*!
* @file OoqpEigenInterface.hpp
* @author Péter Fankhauser, Christian Gehring
* @date Aug 13, 2013
* @brief Uses the Object Oriented QP solver package (OOQP) to solve
* convex quadratic optimization problems of the type:
* Find x: min 1/2 x' Q x + c' x such that A x = b, d <= Cx <= f, and l <= x <= u
* where Q is symmetric positive semidefinite (nxn), x is a vector (nx1),
* A and C are (possibly null) matrices and b and d are vectors of appropriate dimensions.
* We are using sparse matrices in the Harwell-Boeing row-major format.
* Adapted from 'simulationandcontrol' by Stelian Coros.
*/
#pragma once
#include <Eigen/Core>
#include <Eigen/SparseCore>
namespace ooqpei {
class OoqpEigenInterface
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
/*!
* Solve min 1/2 x' Q x + c' x, such that A x = b, d <= Cx <= f, and l <= x <= u.
* @param [in] Q a symmetric positive semidefinite matrix (nxn)
* @param [in] c a vector (nx1)
* @param [in] A a (possibly null) matrices (m_axn)
* @param [in] b a vector (m_ax1)
* @param [in] C a (possibly null) matrices (m_cxn)
* @param [in] d a vector (m_cx1)
* @param [in] f a vector (m_cx1)
* @param [in] l a vector (nx1)
* @param [in] u a vector (nx1)
* @param [out] x a vector of variables (nx1)
* @return true if successful
*/
static bool solve(const Eigen::SparseMatrix<double, Eigen::RowMajor>& Q,
const Eigen::VectorXd& c,
const Eigen::SparseMatrix<double, Eigen::RowMajor>& A,
const Eigen::VectorXd& b,
const Eigen::SparseMatrix<double, Eigen::RowMajor>& C,
const Eigen::VectorXd& d, const Eigen::VectorXd& f,
const Eigen::VectorXd& l, const Eigen::VectorXd& u,
Eigen::VectorXd& x,
const bool ignoreUnknownError = false);
/*!
* Solve min 1/2 x' Q x + c' x, such that A x = b, and d <= Cx <= f
* @param [in] Q a symmetric positive semidefinite matrix (nxn)
* @param [in] c a vector (nx1)
* @param [in] A a (possibly null) matrices (m_axn)
* @param [in] b a vector (m_ax1)
* @param [in] C a (possibly null) matrices (m_cxn)
* @param [in] d a vector (m_cx1)
* @param [in] f a vector (m_cx1)
* @param [out] x a vector of variables (nx1)
* @return true if successful
*/
static bool solve(const Eigen::SparseMatrix<double, Eigen::RowMajor>& Q,
const Eigen::VectorXd& c,
const Eigen::SparseMatrix<double, Eigen::RowMajor>& A,
const Eigen::VectorXd& b,
const Eigen::SparseMatrix<double, Eigen::RowMajor>& C,
const Eigen::VectorXd& d, const Eigen::VectorXd& f,
Eigen::VectorXd& x,
const bool ignoreUnknownError = false);
/*!
* Solve min 1/2 x' Q x + c' x, such that A x = b, and l <= x <= u.
* @param [in] Q a symmetric positive semidefinite matrix (nxn)
* @param [in] c a vector (nx1)
* @param [in] A a (possibly null) matrices (m_axn)
* @param [in] b a vector (m_ax1)
* @param [in] l a vector (nx1)
* @param [in] u a vector (nx1)
* @param [out] x a vector of variables (nx1)
* @return true if successful
*/
static bool solve(const Eigen::SparseMatrix<double, Eigen::RowMajor>& Q,
const Eigen::VectorXd& c,
const Eigen::SparseMatrix<double, Eigen::RowMajor>& A,
const Eigen::VectorXd& b,
const Eigen::VectorXd& l, const Eigen::VectorXd& u,
Eigen::VectorXd& x,
const bool ignoreUnknownError = false);
/*!
* Solve min 1/2 x' Q x + c' x, such that Cx <= f
* @param [in] Q a symmetric positive semidefinite matrix (nxn)
* @param [in] c a vector (nx1)
* @param [in] C a (possibly null) matrices (m_cxn)
* @param [in] f a vector (m_cx1)
* @param [out] x a vector of variables (nx1)
* @return true if successful
*/
static bool solve(const Eigen::SparseMatrix<double, Eigen::RowMajor>& Q,
const Eigen::VectorXd& c,
const Eigen::SparseMatrix<double, Eigen::RowMajor>& C,
const Eigen::VectorXd& f,
Eigen::VectorXd& x,
const bool ignoreUnknownError = false);
/*!
* Solve min 1/2 x' Q x + c' x
* @param [in] Q a symmetric positive semidefinite matrix (nxn)
* @param [in] c a vector (nx1)
* @param [out] x a vector of variables (nx1)
* @return true if successful
*/
static bool solve(const Eigen::SparseMatrix<double, Eigen::RowMajor>& Q,
const Eigen::VectorXd& c,
Eigen::VectorXd& x,
const bool ignoreUnknownError = false);
/*!
* Change to true to print debug information.
* @return true if in debug mode
*/
static bool isInDebugMode() { return isInDebugMode_; };
static void setIsInDebugMode(bool isInDebugMode) {
isInDebugMode_ = isInDebugMode;
}
private:
/*!
* Determine which limits are active and which are not.
* @param [in] l
* @param [in] u
* @param [out] useLowerLimit
* @param [out] useUpperLimit
* @param [out] lowerLimit
* @param [out] upperLimit
*/
static void generateLimits(const Eigen::VectorXd& l, const Eigen::VectorXd& u,
Eigen::Matrix<char, Eigen::Dynamic, 1>& useLowerLimit,
Eigen::Matrix<char, Eigen::Dynamic, 1>& useUpperLimit,
Eigen::VectorXd& lowerLimit, Eigen::VectorXd& upperLimit);
static void printProblemFormulation(
const Eigen::SparseMatrix<double, Eigen::RowMajor>& Q, const Eigen::VectorXd& c,
const Eigen::SparseMatrix<double, Eigen::RowMajor>& A, const Eigen::VectorXd& b,
const Eigen::SparseMatrix<double, Eigen::RowMajor>& C, const Eigen::VectorXd& d, const Eigen::VectorXd& f,
const Eigen::VectorXd& l, const Eigen::VectorXd& u);
static void printLimits(const Eigen::Matrix<char, Eigen::Dynamic, 1>& useLowerLimit,
const Eigen::Matrix<char, Eigen::Dynamic, 1>& useUpperLimit,
const Eigen::VectorXd& lowerLimit,
const Eigen::VectorXd& upperLimit);
static void printSolution(const int status, const Eigen::VectorXd& x);
private:
static bool isInDebugMode_;
};
} /* namespace ooqpei */
|
{"hexsha": "9c5004c179944aa6bad551722fc722ff18a69281", "size": 8294, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/ooqp_eigen_interface/OoqpEigenInterface.hpp", "max_stars_repo_name": "tomlankhorst/ooqp_eigen_interface", "max_stars_repo_head_hexsha": "682bb537946e6ff3bb6d68ed5187bb0f888004c8", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 29.0, "max_stars_repo_stars_event_min_datetime": "2016-04-26T15:41:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-19T06:49:55.000Z", "max_issues_repo_path": "include/ooqp_eigen_interface/OoqpEigenInterface.hpp", "max_issues_repo_name": "tomlankhorst/ooqp_eigen_interface", "max_issues_repo_head_hexsha": "682bb537946e6ff3bb6d68ed5187bb0f888004c8", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 8.0, "max_issues_repo_issues_event_min_datetime": "2015-02-13T12:40:02.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-01T08:59:02.000Z", "max_forks_repo_path": "include/ooqp_eigen_interface/OoqpEigenInterface.hpp", "max_forks_repo_name": "ethz-asl/ooqp-eigen_interface", "max_forks_repo_head_hexsha": "682bb537946e6ff3bb6d68ed5187bb0f888004c8", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 14.0, "max_forks_repo_forks_event_min_datetime": "2016-02-17T13:21:52.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-22T06:08:12.000Z", "avg_line_length": 42.5333333333, "max_line_length": 112, "alphanum_fraction": 0.6200868097, "num_tokens": 2124}
|
#Read in the data
cdata <- read.table("german.data", h=F, sep = ' ')
#Add readable column names
colnames(cdata) <- c("chkngAcctStatus", "durationMonths", "creditHistory", "loanPurpose", "creditAmount", "savingsTotal", "crrntEmplmtSince", "instllmtPct", "persnlStatus", "othrDebtorGuaranters", "crrntResidenceSince", "propertyType", "age", "otherInstllmtType", "housingType", "existingCredits","jobStatus", "numDependents", "registeredPhone", "foriegnWorker", "goodBad")
set.seed(252)
split <- createDataPartition(y = cdata$goodBad, p = 0.7, list = F)
train <- cdata[split,]
test <- cdata[-split,]
startMdl <- glm(goodBad ~., train, family = binomial)
stepMdl <- step(startMdl, trace = FALSE, steps = 5000, k= log(nrow(train)))
#display summary
summary(stepMdl)
varImp(stepMdl, scale = FALSE)
stepPr = predict(stepMdl, newdata = test, type = "response")
stepPred = prediction(stepPr, test$goodBad)
stepPerf = performance(stepPred, "tpr", "fpr")
plot(stepPerf, colorize =TRUE, main = "ROC Curve", col = 2, lwd = 2, print.cutoffs.at=seq(0,1,by=0.1), text.adj=c(-0.2,1.7))
abline( a =0, b = 1, lwd = 2, lty = 2, col = "gray")
round(auc(test$goodBad, stepPr), digits = 2)
set.seed(123)
ctrl <- trainControl(method = "repeatedcv",repeats = 3,classProbs = TRUE,summaryFunction = twoClassSummary)
cvFit <- train(goodBad ~ .,data = train, method = "glm", family = binomial, tuneLength = 5, trControl = ctrl,
metric = "ROC")
#display summary
summary(cvFit)
varImp(cvFit, scale = FALSE)
cvPr = predict(cvFit, newdata = test, type = "prob")
cvPred = prediction(cvPr[,2], test$goodBad)
cvPerf = performance(cvPred, "tpr", "fpr")
plot(cvPerf, colorize =TRUE, main = "ROC Curve", col = 2, lwd = 2, print.cutoffs.at=seq(0,1,by=0.1), text.adj=c(-0.2,1.7))
abline( a =0, b = 1, lwd = 2, lty = 2, col = "gray")
round(auc(test$goodBad, cvPr[,2]), digits = 2)
|
{"hexsha": "5e7aac0dbf7139b1667a83025edd19fa2fce2ed7", "size": 1896, "ext": "r", "lang": "R", "max_stars_repo_path": "logisticReg.r", "max_stars_repo_name": "siddhaling/User-Credit-Score-Prediction-Naive-Bayes-And-Logistic-Regression", "max_stars_repo_head_hexsha": "96e4a26f7d6b8f4de3f611b34a98358e065a6829", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "logisticReg.r", "max_issues_repo_name": "siddhaling/User-Credit-Score-Prediction-Naive-Bayes-And-Logistic-Regression", "max_issues_repo_head_hexsha": "96e4a26f7d6b8f4de3f611b34a98358e065a6829", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "logisticReg.r", "max_forks_repo_name": "siddhaling/User-Credit-Score-Prediction-Naive-Bayes-And-Logistic-Regression", "max_forks_repo_head_hexsha": "96e4a26f7d6b8f4de3f611b34a98358e065a6829", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.2432432432, "max_line_length": 374, "alphanum_fraction": 0.6698312236, "num_tokens": 651}
|
# -*- coding: utf-8 -*-
import pickle
import numpy as np
import keras.models as km
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import os
import sklearn.covariance as skc
import imageutils, flickrutils
import time
import keras.backend as K
import tensorflow as tf
K.set_image_data_format('channels_last')
maindir='.'
with open(maindir+'/lv_train.dat','rb') as f:
lv_train=pickle.load(f)
with open(maindir+'/labels.dat','rb') as f:
labels=pickle.load(f)
with open(maindir+'/pca.dat','rb') as f:
pca=pickle.load(f)
with open(maindir+'/y_train.dat','rb') as f:
y_train=pickle.load(f)
encoder = km.load_model(maindir+'/vae_encoder_gen_maxpool_16fm.h5')
'''
Every web request handled by Flask will create a new threads (or
something similar to threads), which will generate their own Tensorflow
session, Not the default one that we have loaded with our models. To fix
this, We just tell them to use the default session that loaded with our
models [https://kobkrit.com/tensor-something-is-not-an-element-of-this-graph-error-in-keras-on-flask-web-server-4173a8fe15e1].
'''
graph = tf.get_default_graph()
image_shape=(128,128,3)
lv_train_pca = pca.transform(lv_train)
nclass=len(labels)
empirical_covs = []
for i in range(nclass):
empirical_covs.append(skc.EmpiricalCovariance().fit(lv_train_pca[y_train==i,]))
def get_lables(image,uploadfolder='.'):
img = mpimg.imread(image)
# if there is an alpha channel, just ignore it
if img.shape[-1] == 4:
img = img[:,:,0:-1]
normalized = imageutils.normalize_image(img, image_shape)
x = np.expand_dims(normalized,axis=0)
x = np.float32(x)/255.0
global graph
with graph.as_default():
lv_x = encoder.predict(x)[2]
lv_x_pca = pca.transform(lv_x)
d_robust = np.zeros(nclass)
for i in range(nclass):
d_robust[i] = empirical_covs[i].mahalanobis(lv_x_pca)
#predicted_class = np.argmin(d_robust,axis=-1)
predicted_labels = np.argsort(d_robust,axis=-1)
# select top 5 labels
toplabels = [labels[i] for i in predicted_labels[:5]]
# plots
uploadfolder = os.path.abspath(uploadfolder)
timestr = time.strftime("%Y%m%d-%H%M%S")
plotfilename_latent='latent_space'+timestr+'.png'
plotfile_latent = uploadfolder+'/'+plotfilename_latent
plotfilename_pca='pca'+timestr+'.png'
plotfile_pca = uploadfolder+'/'+plotfilename_pca
# (a) plot class centroids and dispersions in the PCA domain
n = 20 # number of most related labels to plot
mean_class = np.zeros((n,lv_train_pca.shape[1]))
dispersion_class = np.zeros(n)
for i in range(n):
mean_class[i,:] = np.mean(lv_train_pca[y_train==predicted_labels[i],],axis=0)
#dispersion_class[i]=np.mean(empirical_covs[predicted_labels[i]].mahalanobis(lv_train_pca[y_train==predicted_labels[i],]))
dispersion_class[i]= np.linalg.det(empirical_covs[predicted_labels[i]].covariance_)
dispersion_class = flickrutils.normalize_to01(dispersion_class)
plt.figure()
for i in range(n):
plt.scatter(mean_class[i,0],mean_class[i,1], label=str(labels[predicted_labels[i]]), s=(20+dispersion_class[i]*20)**2, alpha=0.5)
plt.annotate(str(labels[predicted_labels[i]]),(mean_class[i,0],mean_class[i,1]))
plt.scatter(lv_x_pca[0,0],lv_x_pca[0,1], c='red',marker='x',alpha=1)
plt.annotate('Input Image',(lv_x_pca[0,0],lv_x_pca[0,1]))
plt.xlabel('PC1 ('+str(int(pca.explained_variance_ratio_[0]*100))+'% variance explained)')
plt.ylabel('PC2 ('+str(int(pca.explained_variance_ratio_[1]*100))+'% variance explained)')
plt.savefig(plotfile_latent)
#plt.show()
# (b) plot pca of the predicted labels
n = 3 # number of most related labels to plot
plt.figure()
for i in predicted_labels[:n]:
plt.scatter(lv_train_pca[y_train==i,0],lv_train_pca[y_train==i,1], label=str(labels[i]), alpha=0.3)
plt.scatter(lv_x_pca[0,0],lv_x_pca[0,1], c='red',marker='x',alpha=1)
plt.annotate('Input Image',(lv_x_pca[0,0],lv_x_pca[0,1]))
plt.legend()
plt.xlabel('PC1 ('+str(int(pca.explained_variance_ratio_[0]*100))+'% variance explained)')
plt.ylabel('PC2 ('+str(int(pca.explained_variance_ratio_[1]*100))+'% variance explained)')
plt.savefig(plotfile_pca)
#plt.show()
return toplabels,plotfilename_latent,plotfilename_pca
|
{"hexsha": "415c746e8ea8765170d6af0bb5bf92a38779f6a0", "size": 4402, "ext": "py", "lang": "Python", "max_stars_repo_path": "flickr_getlabel.py", "max_stars_repo_name": "kayvanrad/lablr", "max_stars_repo_head_hexsha": "8ff5928a4a2fdfbd57cd331952815492d4a5820c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-04-21T02:12:33.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-21T02:12:33.000Z", "max_issues_repo_path": "flickr_getlabel.py", "max_issues_repo_name": "kayvanrad/lablr", "max_issues_repo_head_hexsha": "8ff5928a4a2fdfbd57cd331952815492d4a5820c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "flickr_getlabel.py", "max_forks_repo_name": "kayvanrad/lablr", "max_forks_repo_head_hexsha": "8ff5928a4a2fdfbd57cd331952815492d4a5820c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.216, "max_line_length": 137, "alphanum_fraction": 0.697864607, "include": true, "reason": "import numpy", "num_tokens": 1257}
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Quantity helpers for the scipy.special ufuncs.
Available ufuncs in this module are at
https://docs.scipy.org/doc/scipy/reference/special.html
"""
import numpy as np
from astropy.units.core import UnitsError, UnitTypeError, dimensionless_unscaled
from . import UFUNC_HELPERS
from .helpers import (
get_converter, helper_cbrt, helper_dimensionless_to_dimensionless, helper_two_arg_dimensionless)
# ufuncs that require dimensionless input and give dimensionless output.
dimensionless_to_dimensionless_sps_ufuncs = (
'erf', 'erfc', 'erfcx', 'erfi', 'erfinv', 'erfcinv',
'gamma', 'gammaln', 'loggamma', 'gammasgn', 'psi', 'rgamma', 'digamma',
'wofz', 'dawsn', 'entr', 'exprel', 'expm1', 'log1p', 'exp2', 'exp10',
'j0', 'j1', 'y0', 'y1', 'i0', 'i0e', 'i1', 'i1e',
'k0', 'k0e', 'k1', 'k1e', 'itj0y0', 'it2j0y0', 'iti0k0', 'it2i0k0',
'ndtr', 'ndtri')
scipy_special_ufuncs = dimensionless_to_dimensionless_sps_ufuncs
# ufuncs that require input in degrees and give dimensionless output.
degree_to_dimensionless_sps_ufuncs = ('cosdg', 'sindg', 'tandg', 'cotdg')
scipy_special_ufuncs += degree_to_dimensionless_sps_ufuncs
# ufuncs that require 2 dimensionless inputs and give dimensionless output.
# note: 'jv' and 'jn' are aliases in some scipy versions, which will
# cause the same key to be written twice, but since both are handled by the
# same helper there is no harm done.
two_arg_dimensionless_sps_ufuncs = (
'jv', 'jn', 'jve', 'yn', 'yv', 'yve', 'kn', 'kv', 'kve', 'iv', 'ive',
'hankel1', 'hankel1e', 'hankel2', 'hankel2e')
scipy_special_ufuncs += two_arg_dimensionless_sps_ufuncs
# ufuncs handled as special cases
scipy_special_ufuncs += ('cbrt', 'radian')
def helper_degree_to_dimensionless(f, unit):
from astropy.units.si import degree
try:
return [get_converter(unit, degree)], dimensionless_unscaled
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_degree_minute_second_to_radian(f, unit1, unit2, unit3):
from astropy.units.si import arcmin, arcsec, degree, radian
try:
return [get_converter(unit1, degree),
get_converter(unit2, arcmin),
get_converter(unit3, arcsec)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{}' function to "
"quantities with angle units"
.format(f.__name__))
def get_scipy_special_helpers():
import scipy.special as sps
SCIPY_HELPERS = {}
for name in dimensionless_to_dimensionless_sps_ufuncs:
# In SCIPY_LT_1_5, erfinv and erfcinv are not ufuncs.
ufunc = getattr(sps, name, None)
if isinstance(ufunc, np.ufunc):
SCIPY_HELPERS[ufunc] = helper_dimensionless_to_dimensionless
for ufunc in degree_to_dimensionless_sps_ufuncs:
SCIPY_HELPERS[getattr(sps, ufunc)] = helper_degree_to_dimensionless
for ufunc in two_arg_dimensionless_sps_ufuncs:
SCIPY_HELPERS[getattr(sps, ufunc)] = helper_two_arg_dimensionless
# ufuncs handled as special cases
SCIPY_HELPERS[sps.cbrt] = helper_cbrt
SCIPY_HELPERS[sps.radian] = helper_degree_minute_second_to_radian
return SCIPY_HELPERS
UFUNC_HELPERS.register_module('scipy.special', scipy_special_ufuncs,
get_scipy_special_helpers)
|
{"hexsha": "869a1c5734b6ae3f2a030e8644582bda299ad832", "size": 3553, "ext": "py", "lang": "Python", "max_stars_repo_path": "astropy/units/quantity_helper/scipy_special.py", "max_stars_repo_name": "zabop/astropy", "max_stars_repo_head_hexsha": "11b3214f18b74aea5e3f8349e50ae1b09c39d30e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-03-11T12:26:49.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-11T12:26:49.000Z", "max_issues_repo_path": "astropy/units/quantity_helper/scipy_special.py", "max_issues_repo_name": "nabobalis/astropy", "max_issues_repo_head_hexsha": "9f77b9a0ffe18e4c767e36f00e2e8728135c0e11", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-10-09T18:54:27.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-09T18:54:27.000Z", "max_forks_repo_path": "astropy/units/quantity_helper/scipy_special.py", "max_forks_repo_name": "nabobalis/astropy", "max_forks_repo_head_hexsha": "9f77b9a0ffe18e4c767e36f00e2e8728135c0e11", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.8390804598, "max_line_length": 100, "alphanum_fraction": 0.6946242612, "include": true, "reason": "import numpy,import scipy,from astropy", "num_tokens": 1021}
|
\documentclass[t]{beamer}
\usetheme{Copenhagen}
\setbeamertemplate{headline}{} % remove toc from headers
\beamertemplatenavigationsymbolsempty
\usepackage{amsmath, tikz, pgfplots, tcolorbox, xcolor, marvosym}
\pgfplotsset{compat = 1.16}
\tikzstyle{input} = [circle, text centered, radius = 1cm, draw = black]
\tikzstyle{function} = [rectangle, text centered, minimum width = 2cm, minimum height = 1cm, draw = black]
\title{Intro to Functions}
\author{}
\date{}
\AtBeginSection[]
{
\begin{frame}
\frametitle{Objectives}
\tableofcontents[currentsection]
\end{frame}
}
\begin{document}
\begin{frame}
\maketitle
\end{frame}
\section{Determine if a relation is a function.}
\begin{frame}{Relations and Functions}
\begin{tcolorbox}[colback=red!15!white, colframe=red!60!black, title=Relations]
A \textbf{relation} is a set of ordered pairs.
\end{tcolorbox}
\vspace{1cm} \pause
\begin{tcolorbox}[colback=red!15!white, colframe=red!60!black, title=Domain]
The \textbf{domain} is the set of all input values (usually $x$) of a relation.
\end{tcolorbox}
\end{frame}
\begin{frame}{Relations and Functions}
\begin{tcolorbox}[colback=red!15!white, colframe=red!60!black, title=Range]
The \textbf{range} is the set of all output values (usually $y$) of a relation.
\end{tcolorbox}
\vspace{1cm} \pause
\begin{tcolorbox}[colback=red!15!white, colframe=red!60!black, title=Function]
A \textbf{function} is a relation is which each element of the domain has only 1 element in the range.
\end{tcolorbox}
\end{frame}
\begin{frame}{Example 1}
Determine whether each relation represents a function. For those that do, state the domain and range. \newline\\
(a) \quad $\{(1,5), \, (2, 5), \, (3, 7), \, (4, 8)\}$ \newline\\
\onslide<2->{All $x$-coordinates are different.\quad}
\onslide<3->{Is a function.} \newline\\
\onslide<4->{Domain: 1, 2, 3, 4} \newline\\
\onslide<5->{Range: 5, 7, 8}
\end{frame}
\begin{frame}{Example 1}
(b) \quad $\{(5,1), \, (5,2), \, (7,3), \, (8,4)\}$ \newline\\
\onslide<2->{$x$-coordinates are not all different. \quad}
\onslide<3->{Is \textbf{not} a function.}
\end{frame}
\begin{frame}{Vertical Line Test}
It is also possible to determine if a relation is a function visually by using the \alert{vertical line test}: \newline\\ \pause
\begin{tcolorbox}[colback=white!50!green, title=\textbf{Vertical Line Test}]
If every vertical line drawn hits the graph \underline{\textbf{at most once}}, then the relation is a function.
\end{tcolorbox}
\end{frame}
\begin{frame}{Example 1a Passes V.L.T.}
\begin{center}
\begin{tikzpicture}
\begin{axis}[
axis lines = middle,
grid=both,
xmin = 0, xmax = 5.5,
ymin = 0, ymax = 10.5
]
\addplot[color=blue, mark = *, only marks] coordinates {(1,5) (2,5) (3,7) (4,8)};
\end{axis}
\end{tikzpicture}
\end{center}
\end{frame}
\begin{frame}{Example 1b Fails V.L.T.}
\begin{center}
\begin{tikzpicture}
\begin{axis}[
axis lines = middle,
grid=both, minor tick num = 1,
xmin = 0, xmax = 10.5,
ymin = 0, ymax = 5.5
]
\addplot[color=blue, mark = *, only marks] coordinates {(5,1) (5,2) (7,3) (8,4)};
\end{axis}
\end{tikzpicture}
\end{center}
\end{frame}
\begin{frame}{Example 2}
Determine whether the graph of each represents a function. \newline\\
(a) \newline\\
\begin{minipage}{0.5\textwidth}
\begin{tikzpicture}[domain = -1.5:1.5]
\draw [<->] (-2,0) -- (2,0);
\node at (2,0) [anchor = west] {\tiny $x$};
\draw [<->] (0,-2) -- (0,2);
\node at (0,2) [anchor = west] {\tiny $y$};
\draw [<->, color = blue, very thick] plot(\x, {-1/2*\x-1/2});
\end{tikzpicture}
\end{minipage}
\begin{minipage}{0.4\textwidth}
\onslide<2->{Is a function}
\end{minipage}
\end{frame}
\begin{frame}{Example 2}
(b) \newline\\
\begin{minipage}{0.5\textwidth}
\begin{tikzpicture}[domain = -1.5:1.5]
\draw [<->] (-2,0) -- (2,0);
\node at (2,0) [anchor = west] {\tiny $x$};
\draw [<->] (0,-2) -- (0,2);
\node at (0,2) [anchor = west] {\tiny $y$};
\draw [<->, color = blue, very thick] plot(\x, {-1/2*\x*\x+1/2});
\end{tikzpicture}
\end{minipage}
\begin{minipage}{0.4\textwidth}
\onslide<2->{Is a function}
\end{minipage}
\end{frame}
\begin{frame}{Example 2}
(c) \newline\\
\begin{minipage}{0.5\textwidth}
\begin{tikzpicture}
\draw [<->] (-2,0) -- (2,0);
\node at (2,0) [anchor = west] {\tiny $x$};
\draw [<->] (0,-2) -- (0,2);
\node at (0,2) [anchor = west] {\tiny $y$};
\draw [color = blue, very thick] (0,0) ellipse (1.5 and 0.75);
\end{tikzpicture}
\end{minipage}
\begin{minipage}{0.4\textwidth}
\onslide<2->{Is not a function}
\end{minipage}
\end{frame}
\section{Evaluate a function using function notation.}
\begin{frame}{Functions}
Think of a function as a \alert{machine}. \newline\\ \pause
You give the function (machine) a value (input), it will process that value, and then return a value back to you (output). \newline\\ \pause
For instance, if you input 10 into the $x^2$ function, it will return $10^2$, or 100: \newline\\
\begin{center}
\begin{tikzpicture}[node distance = 2.5 cm]
\node (inputVal) [input, color=blue] {\color{blue}\textbf{10}};
\node (func) [function, right of = inputVal] {$x^2$};
\node (outputVal) [input, right of = func] {\color{red}\textbf{100}};
\draw [->, >=stealth, thick, line width = 1.5] (inputVal) -- (func);
\draw [->, >=stealth, thick, line width = 1.5] (func) -- (outputVal);
\end{tikzpicture}
\end{center}
\end{frame}
\begin{frame}{Function Notation}
A function can be described using \alert{function notation}. \newline\\ \pause
$f(x)$ represents the value of the function when the value of $x$ is substituted into it. \newline\\ \pause
We can use other notations for functions including, but not limited to
\[ g(x) \quad h(x), \quad f(n) \quad f\left(\text{\Smiley}\right) \]
\pause \newline\\
When we substitute a value for the variable and evaluate it, that is called {\color{blue}\textbf{evaluating the function}}.
\end{frame}
\begin{frame}{Example 3}
Evaluate $f(2), \, f(-2), \, \text{and } f(0)$ for each. \newline\\
(a) \quad $f(x) = 2x+3$
\begin{align*}
\onslide<2->{f(2) &= 2(2) + 3} \\
\onslide<3->{&= 7} \\[10pt]
\onslide<4->{f(-2) &= 2(-2) + 3} \\
\onslide<5->{&= -1} \\[10pt]
\onslide<6->{f(0) &= 2(0) + 3} \\
\onslide<7->{&= 3}
\end{align*}
\end{frame}
\begin{frame}{Example 3}
Evaluate $f(2), \, f(-2), \, \text{and } f(0)$ for each. \newline\\
(b) \quad $f(x) = 3x^2-1$
\begin{align*}
\onslide<2->{f(2) &= 3(2)^2-1} \\
\onslide<3->{&= 11} \\[10pt]
\onslide<4->{f(-2) &= 3(-2)^2-1} \\
\onslide<5->{&= 11} \\[10pt]
\onslide<6->{f(0) &= 3(0)^2-1} \\
\onslide<7->{&= -1}
\end{align*}
\end{frame}
\begin{frame}{Example 3}
(c) \newline\\
\begin{minipage}{0.6\textwidth}
\begin{tikzpicture}[scale=0.8, domain = -2.15:2.15]
\draw [step = 0.5cm, color = gray!110, dotted] (-3.5,-3.5) grid (3.5,3.5);
\draw[<->, > = latex] (-3.5,0) -- (3.5,0);
\node at (3.5,0) [anchor = west] {\tiny $x$};
\draw[<->, > = latex] (0,-3.5) -- (0,3.5);
\node at (0,3.5) [anchor = south west] {\tiny $y$};
\foreach \x in {-3,-2,-1,1,2,3}
\draw[shift = {(\x,0)}] (0pt,2pt) -- (0pt, -2pt) node[below] {\footnotesize $\x$};
\foreach \y in {-3,-2, -1, 1, 2, 3}
\draw[shift = {(0,\y)}] (2pt,0pt) -- (-2pt,0pt) node[left] {\footnotesize $\y$};
\draw [<->, > = latex, color = blue, very thick] plot (\x, {\x*\x - 1});
\end{tikzpicture}
\end{minipage}
\begin{minipage}{0.25\textwidth}
\begin{align*}
\onslide<2->{f(2) &= 3} \\[12pt]
\onslide<3->{f(-2) &= 3} \\[12pt]
\onslide<4->{f(0) &= -1}
\end{align*}
\end{minipage}
\end{frame}
\end{document}
|
{"hexsha": "832a72e312e072b667b50cddb80a03050c0b1c01", "size": 7455, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Intro_to_Functions(BEAMER).tex", "max_stars_repo_name": "BryanBain/HA2_BEAMER", "max_stars_repo_head_hexsha": "a5e021f12d3cdd0541353c9e121ff5e4df7decd1", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Intro_to_Functions(BEAMER).tex", "max_issues_repo_name": "BryanBain/HA2_BEAMER", "max_issues_repo_head_hexsha": "a5e021f12d3cdd0541353c9e121ff5e4df7decd1", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Intro_to_Functions(BEAMER).tex", "max_forks_repo_name": "BryanBain/HA2_BEAMER", "max_forks_repo_head_hexsha": "a5e021f12d3cdd0541353c9e121ff5e4df7decd1", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-26T15:49:45.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-26T15:49:45.000Z", "avg_line_length": 31.858974359, "max_line_length": 140, "alphanum_fraction": 0.6425217975, "num_tokens": 2935}
|
function [loc_assort_pos,loc_assort_neg] = local_assortativity_wu_sign(W)
%LOCAL_ASSORTATIVITY_WU_SIGN Local Assortativity
%
% [loc_assort_pos,loc_assort_neg] = local_assortativity_wu_sign(W);
%
% Local Assortativity measures the extent to which nodes are connected to
% nodes of similar strength (vs. higher or lower strength). Adapted from
% Thedchanamoorthy et al. (2014)'s formula to allow weighted/signed
% networks (node degree replaced with node strength). Note, output values
% sum to total assortativity.
%
% Inputs: W, undirected connection matrix with positive and
% negative weights
%
% Output: loc_assort_pos, local assortativity from positive weights
%
% loc_assort_neg, local assortativity from negative weights
%
% Reference: Thedchanamoorthy G, Piraveenan M, Kasthuriratna D,
% Senanayake U. Proc Comp Sci (2014) 29:2449-2461.
%
%
% Jeff Spielberg, Boston University
% Modification History:
% May 2015: Original
W(1:(size(W,1)+1):end) = 0;
r_pos = assortativity_wei(W.*(W>0),0);
r_neg = assortativity_wei(-W.*(W<0),0);
[str_pos,str_neg] = strengths_und_sign(W);
loc_assort_pos = nan(size(W,1),1);
loc_assort_neg = nan(size(W,1),1);
for curr_node = 1:size(W,1)
[~,j_pos] = find(W(curr_node,:)>0);
loc_assort_pos(curr_node,1) = sum(abs(str_pos(j_pos)-str_pos(curr_node)))/str_pos(curr_node);
[~,j_neg] = find(W(curr_node,:)<0);
loc_assort_neg(curr_node,1) = sum(abs(str_neg(j_neg)-str_neg(curr_node)))/str_neg(curr_node);
end
loc_assort_pos = ((r_pos+1)/size(W,1))-(loc_assort_pos/sum(loc_assort_pos));
loc_assort_neg = ((r_neg+1)/size(W,1))-(loc_assort_neg/sum(loc_assort_neg));
|
{"author": "fieldtrip", "repo": "fieldtrip", "sha": "c2039be598a02d86b39aae76bfa7aaa720f9801c", "save_path": "github-repos/MATLAB/fieldtrip-fieldtrip", "path": "github-repos/MATLAB/fieldtrip-fieldtrip/fieldtrip-c2039be598a02d86b39aae76bfa7aaa720f9801c/external/bct/local_assortativity_wu_sign.m"}
|
import numpy as np
import matplotlib.pyplot as plt # 시각화 도구
# a=np.array([[1,2,3],[4,5,6]])
# b = np.ones_like(a) # _like : a 배열과 같은 형태로 1을 채워넣은 배열을 만들어라
# print(b)
#
#
# #데이터 생성 함수
#
# #0~1범위 내에서 균등 간격으로 5개의 수를 생성
# a=np.linspace(0,1,5)
# print(a)
# # a=np.linspace(0,100) # 생성 숫자를 지정하지 않으면 50개 생성
# # print(a)
# # plt.plot(a,'o')
# # plt.show()
#
#
# a=np.arange(0,10,2,np.float) #list의 data 생성 방식과 동일(시작,끝+1,간격)
# print(a)
# print(type(a))
# # plt.plot(a,'*')
# # plt.show()
#
# #정규분포
# mean = 0 #평균
# std = 1 #표준편차
# a = np.random.normal(mean,std,10000)
# print(a)
# # plt.hist(a,bins=200) #bins : 막대그래프의 갯수
# # plt.show()
#
#
# #균등분포[0<=x<1]
# a=np.random.rand(10000)
# # plt.hist(a,bins=20)
# # plt.show()
#
# #randint : 범위 내에서 균등하게 나온다.
# a=np.random.randint(-100,100,size=10000)
# print(a)
# # plt.hist(a,bins=10)
# # plt.show()
# a=np.random.randint(0,10,(2,3))
# print('a=', a)
# b=np.random.randint(0,10,(2,3))
# print('b=', b)
#
# #save() : 배열을 바이너리 형태로 저장(용량이 작아서 빠름)
# np.save('myarr1',a) #myarr1.npy
#
# np.savez('myarr2',a,b) #myarr2.npz
#
# print("myarr1",np.load('myarr1.npy'))
# # print("myarr2",np.load('myarr2.npz'))
# npzfiles=np.load('myarr2.npz')
# print(npzfiles.files) #배열의 형식을 알수 없기에 형식을 먼저 출력
# # ['arr_0', 'arr_1']
# print(npzfiles['arr_0'])
# print(npzfiles['arr_1'])
#
# print(np.loadtxt('simple.csv', dtype=np.int) ) # 띄어 쓰기로 구분하기 때문에 , 가 있는 문서는 구분자를 줘야한다.
#
# #skiprows=1 : 첫줄이 문자열이고 형식이 다르다면 스킵
# #문자와 숫자가 뒤섞여 있는 자료
# #('i','S20','f') : i= 숫자, S20=b(바이너리)문자, f=실수
# data = np.loadtxt('height.csv', delimiter=',' ,skiprows=1,dtype={'names':('order','name','height(cm)'),'formats': ('i','S20','f')})
# print(data)
#
# #배열을 텍스트파일로 저장
# data = np.random.random((3,4))
# print(data)
# np.savetxt('saved.csv',data,delimiter=',')
# print(np.loadtxt('saved.csv',delimiter=','))
#
# arr=np.random.random((5,2,3))
# print(type(arr))
# print(arr.shape)
# print(len(arr))
# print(arr.ndim)
# print(arr.size)
# print(arr)
# print(arr.dtype)
#
# # astype : 데이터 타입 변환 = > 원본을 변형시키진 않는다.
# print(arr.astype(np.int))
# # astype : 데이터 타입 변환 = > 원본을 변형.
# arr=arr.astype(np.int)
# print(arr)
# arr=arr.astype(np.float)
# print(arr)
#
# #numpy안의 함수 정보 확인
# print(np.info(np.ndarray.dtype))
# # reshape: 배열 형식을 바꿔주는 함수
a=np.arange(1,10).reshape(3,3)
# print(a)
#
b=np.arange(9,0,-1).reshape(3,3)
# print(b)
#
# print(a-b)
# # np.subtract(a,b) == (a-b)
# print(np.subtract(a,b))
#
# print(a+b)
# # np.add(a,b) == (a-b)
# print(np.add(a,b))
#
# print(a/b)
# # np.divide(a,b) == (a/b)
# print(np.divide(a,b))
#
# print(a*b)
# # np.multiply(a,b) == (a*b)
# print(np.multiply(a,b))
#
# print(b)
# # exp :
# print(np.exp(b))
# # [[8.10308393e+03 2.98095799e+03 1.09663316e+03]
# # [4.03428793e+02 1.48413159e+02 5.45981500e+01]
# # [2.00855369e+01 7.38905610e+00 2.71828183e+00]]
# #2.71828183e+00(자연 상수)
#
# # sqrt : 제곱근( 절대치이기 때문에 항상 양수)
# print(np.sqrt(a))
# print(a)
# print(np.sin(a))
# print(np.cos(a))
# print(np.tan(a))
# print(np.log(a))
# a=np.arange(1,5).reshape(2,2)
# b=np.arange(9,5,-1).reshape(2,2)
# print(a)
# print(b)
# # dot : 벡터의 내적 구하는 함수
# #(1,2,3) (4,5,6) = 1*4+2*5+3*6=32
# print(np.dot(a,b))
# #
# #a,b 모두 array 이기에 비교연산 가능
# print(a==b)
# print(type(a==b))
# # <class 'numpy.ndarray'>
#
# #행렬 전체를 비교, 모두 동일한 것인가 ?
# print(np.array_equal(a,b))
# #False
#
#축에 대한 이해를 바탕으로 벡터의 연산을 이해
#축을 따로 지정하지 않으면 전체 행렬을 계산
# print(a.sum())
# print(np.sum(a))
#
# print(a)
#axis =0 : 행을 기준으로 각 행의 동일한 인덱스 요소를 그룹화 해라
print(a.sum(axis=0))
# [[1 2 3]
# [4 5 6]
# [7 8 9]]
# [12 15 18]
#axis =1 : 열을 기준으로 각 열의 동일한 인덱스 요소를 그룹화 해라
print(a.sum(axis=1))
# [[1 2 3] :6
# [4 5 6] :15
# [7 8 9]] :24
# [ 6 15 24]
#연습문제
# 다음 행렬과 같은 행렬이 있다.
m = np.array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14]])
# 1.이 행렬에서 값 7 을 인덱싱한다.
print(m[1, 2:3])
# 2.이 행렬에서 값 14 을 인덱싱한다.
print(m[2, 4:])
# 3.이 행렬에서 배열 [6, 7] 을 슬라이싱한다.
print(m[1, 1:3])
# 4.이 행렬에서 배열 [7, 12] 을 슬라이싱한다.
print(m[[1,2],[2,2]])
# 5.이 행렬에서 배열 [[3, 4], [8, 9]] 을 슬라이싱한다
print(m[0:2, 3:])
|
{"hexsha": "bf480fd2e0b13b4c7e7caf5f19e4f75c5debbad4", "size": 4298, "ext": "py", "lang": "Python", "max_stars_repo_path": "14_2.py", "max_stars_repo_name": "yunjung-lee/class_python_numpy", "max_stars_repo_head_hexsha": "589817c8bbca85d70596e4097c0ece093b5353c3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "14_2.py", "max_issues_repo_name": "yunjung-lee/class_python_numpy", "max_issues_repo_head_hexsha": "589817c8bbca85d70596e4097c0ece093b5353c3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "14_2.py", "max_forks_repo_name": "yunjung-lee/class_python_numpy", "max_forks_repo_head_hexsha": "589817c8bbca85d70596e4097c0ece093b5353c3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.8617021277, "max_line_length": 133, "alphanum_fraction": 0.536528618, "include": true, "reason": "import numpy", "num_tokens": 2037}
|
// Copyright 2020-2022 The Defold Foundation
// Copyright 2014-2020 King
// Copyright 2009-2014 Ragnar Svensson, Christian Murray
// Licensed under the Defold License version 1.0 (the "License"); you may not use
// this file except in compliance with the License.
//
// You may obtain a copy of the License, together with FAQs at
// https://www.defold.com/license
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "crash_private.h"
#include <fcntl.h>
#include <unistd.h>
#include <sys/stat.h>
#include <dlib/dlib.h>
#include <dlib/log.h>
#include <stdio.h>
namespace dmCrash
{
void WriteCrash(const char* file_name, AppState* data)
{
bool is_debug_mode = dLib::IsDebugMode();
dLib::SetDebugMode(true);
int fhandle = open(file_name, O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
if (fhandle != -1)
{
AppStateHeader header;
header.version = AppState::VERSION;
header.struct_size = sizeof(AppState);
if (write(fhandle, &header, sizeof(AppStateHeader)) == sizeof(AppStateHeader))
{
if (write(fhandle, data, sizeof(AppState)) == sizeof(AppState))
{
dmLogInfo("Successfully wrote Crashdump to file: %s", file_name);
close(fhandle);
}
else
{
dmLogError("Failed to write Crashdump content.");
close(fhandle);
unlink(file_name);
}
}
else
{
dmLogError("Failed to write Crashdump header.");
close(fhandle);
unlink(file_name);
}
}
else
{
dmLogError("Failed to write Crashdump file.");
}
dLib::SetDebugMode(is_debug_mode);
}
}
|
{"hexsha": "9d861670b788a86f5945778f7baefcd5d50eb85c", "size": 2181, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "engine/crash/src/file_posix.cpp", "max_stars_repo_name": "cmarincia/defold", "max_stars_repo_head_hexsha": "2bf9ec3dfa2f59a9e1808f4768ff9a1fbaac61b4", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "engine/crash/src/file_posix.cpp", "max_issues_repo_name": "cmarincia/defold", "max_issues_repo_head_hexsha": "2bf9ec3dfa2f59a9e1808f4768ff9a1fbaac61b4", "max_issues_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "engine/crash/src/file_posix.cpp", "max_forks_repo_name": "cmarincia/defold", "max_forks_repo_head_hexsha": "2bf9ec3dfa2f59a9e1808f4768ff9a1fbaac61b4", "max_forks_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.0454545455, "max_line_length": 107, "alphanum_fraction": 0.5887207703, "num_tokens": 478}
|
// Copyright 2013 Cloudera, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <boost/foreach.hpp>
#include <glog/logging.h>
#include "kudu/gutil/ref_counted.h"
#include "kudu/gutil/stringprintf.h"
#include "kudu/gutil/walltime.h"
#include "kudu/util/locks.h"
#include "kudu/util/status.h"
#include "kudu/util/test_graph.h"
#include "kudu/util/thread.h"
using std::string;
using std::tr1::shared_ptr;
namespace kudu {
void TimeSeries::AddValue(double val) {
lock_guard<simple_spinlock> l(&lock_);
val_ += val;
}
void TimeSeries::SetValue(double val) {
lock_guard<simple_spinlock> l(&lock_);
val_ = val;
}
double TimeSeries::value() const {
lock_guard<simple_spinlock> l(&lock_);
return val_;
}
TimeSeriesCollector::~TimeSeriesCollector() {
if (started_) {
StopDumperThread();
}
}
shared_ptr<TimeSeries> TimeSeriesCollector::GetTimeSeries(const string &key) {
MutexLock l(series_lock_);
SeriesMap::const_iterator it = series_map_.find(key);
if (it == series_map_.end()) {
shared_ptr<TimeSeries> ts(new TimeSeries());
series_map_[key] = ts;
return ts;
} else {
return (*it).second;
}
}
void TimeSeriesCollector::StartDumperThread() {
LOG(INFO) << "Starting metrics dumper";
CHECK(!started_);
exit_latch_.Reset(1);
started_ = true;
CHECK_OK(kudu::Thread::Create("time series", "dumper",
&TimeSeriesCollector::DumperThread, this, &dumper_thread_));
}
void TimeSeriesCollector::StopDumperThread() {
CHECK(started_);
exit_latch_.CountDown();
CHECK_OK(ThreadJoiner(dumper_thread_.get()).Join());
started_ = false;
}
void TimeSeriesCollector::DumperThread() {
CHECK(started_);
WallTime start_time = WallTime_Now();
faststring metrics_str;
while (true) {
metrics_str.clear();
metrics_str.append("metrics: ");
BuildMetricsString(WallTime_Now() - start_time, &metrics_str);
LOG(INFO) << metrics_str.ToString();
// Sleep until next dump time, or return if we should exit
if (exit_latch_.WaitFor(MonoDelta::FromMilliseconds(250))) {
return;
}
}
}
void TimeSeriesCollector::BuildMetricsString(
WallTime time_since_start, faststring *dst_buf) const {
MutexLock l(series_lock_);
dst_buf->append(StringPrintf("{ \"scope\": \"%s\", \"time\": %.3f",
scope_.c_str(), time_since_start));
BOOST_FOREACH(SeriesMap::const_reference entry, series_map_) {
dst_buf->append(StringPrintf(", \"%s\": %.3f",
entry.first.c_str(), entry.second->value()));
}
dst_buf->append("}");
}
} // namespace kudu
|
{"hexsha": "1dd23075c8f749a50b7f61483bded0757d8bd9bf", "size": 3104, "ext": "cc", "lang": "C++", "max_stars_repo_path": "src/kudu/util/test_graph.cc", "max_stars_repo_name": "kv-zuiwanyuan/kudu", "max_stars_repo_head_hexsha": "251defb69b1a252cedd5d707d9c84b67cf63726d", "max_stars_repo_licenses": ["Apache-2.0", "CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/kudu/util/test_graph.cc", "max_issues_repo_name": "kv-zuiwanyuan/kudu", "max_issues_repo_head_hexsha": "251defb69b1a252cedd5d707d9c84b67cf63726d", "max_issues_repo_licenses": ["Apache-2.0", "CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/kudu/util/test_graph.cc", "max_forks_repo_name": "kv-zuiwanyuan/kudu", "max_forks_repo_head_hexsha": "251defb69b1a252cedd5d707d9c84b67cf63726d", "max_forks_repo_licenses": ["Apache-2.0", "CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.2280701754, "max_line_length": 79, "alphanum_fraction": 0.6945876289, "num_tokens": 787}
|
from dolfin import *
import sys
from random import gauss, expovariate
import math
from math import atan, pi, atan2, sqrt
import numpy as np
import nanopores as nano
import nanopores.geometries.pughpore as pughpore
from get_F import Force, Current
from get_D import Dx, Dy, Dz, dxDx, dyDy, dzDz, dis
import os
from time import time as timer
HOME = os.path.expanduser("~")
PAPERDIR = os.path.join(HOME, "papers", "paper-howorka")
FIGDIR = os.path.join(PAPERDIR, "figures", "")
DATADIR = os.path.join(HOME,"Dropbox", "nanopores", "fields")
import nanopores.tools.fields as fields
fields.set_dir(DATADIR)
def argument(x,y,z):
return np.array([float(x),float(y),float(z)])
geop = nano.Params(pughpore.params)
physp = nano.Physics(name="pore_mol")
kT = physp.kT
eta = physp.eta
l0 = geop.l0
l1 = geop.l1
l2 = geop.l2
l3 = geop.l3
l4 = geop.l4
hpore = geop.hpore
hmem = geop.hmem
h2 = geop.h2
h1 = geop.h1
h4 = geop.h4
rMolecule = geop.rMolecule
beps = (l3 - rMolecule)*1e-1
Dmol = kT/(6.*math.pi*eta*rMolecule*1e-9) # [m^2/s]
gamma = (6.*math.pi*eta*rMolecule) #friction [microgramm/s]
maxiter = 1e6 # [ns]
tau = 1. # [ns]
C = tau/gamma*1e9 # [s^2/kg * 1e9 nm/m]
coeff = math.sqrt(2*Dmol*1e9*tau) # [nm]
def run(params,fieldsname,outcome,outside,b1,b2):
def area1(x,y,z):
for seg in b1:
h=np.array([p[1] for p in seg])
if np.min(h)<=z and z<=np.max(h):
return True
return False
def area2(x,y,z):
for seg in b2:
h=np.array([p[1] for p in seg])
if np.min(h)<=z and z<=np.max(h):
return True
return False
z0 = params["z0"]
avgbind1=params["avgbind1"]
P_bind1=params["P_bind1"]
avgbind2=params["avgbind2"]
P_bind2=params["P_bind2"]
should_restart = True
while should_restart:
should_restart = False
X = np.array([0.])
Y = np.array([0.])
Z = np.array([z0])
J1 = np.array([])
T = np.array([])
Nc = 0
ffa = True
i=0
ood = False
while i<maxiter and Z[-1]>=-hpore/2.-4.:
add=tau
xi_x=gauss(0.,1.)
xi_y=gauss(0.,1.)
xi_z=gauss(0.,1.)
arg = argument(X[-1],Y[-1],Z[-1])
F = Force(X[-1],Y[-1],Z[-1])
D = [Dx(arg)*1e9,Dy(arg)*1e9,Dz(arg)*1e9]
dD = [dxDx(arg)*1e9,dyDy(arg)*1e9,dzDz(arg)*1e9]
# x_new = X[-1] + coeff*xi_x*math.sqrt(abs(Dxfac)) + C*Force[0]*Dxfac + DDx*tau*Dmol
# y_new = Y[-1] + coeff*xi_y*math.sqrt(abs(Dyfac)) + C*Force[1]*Dyfac + DDy*tau*Dmol
# z_new = Z[-1] + coeff*xi_z*math.sqrt(abs(Dzfac)) + C*Force[2]*Dzfac + DDz*tau*Dmol
# x_new = X[-1] + coeff*xi_x + C*Force[0]
# y_new = Y[-1] + coeff*xi_y + C*Force[1]
# z_new = Z[-1] + coeff*xi_z + C*Force[2]
x_new = X[-1] + sqrt(2*D[0]*tau)*xi_x + F[0]*D[0]*1e-9*tau/kT+dD[0]*tau
y_new = Y[-1] + sqrt(2*D[1]*tau)*xi_y + F[1]*D[1]*1e-9*tau/kT+dD[1]*tau
z_new = Z[-1] + sqrt(2*D[2]*tau)*xi_z + F[2]*D[2]*1e-9*tau/kT+dD[2]*tau
if dis(argument(x_new,y_new,z_new)) < rMolecule:
x_new = X[-1]
y_new = Y[-1]
z_new = Z[-1]
if ffa and area2(0.,0.,Z[-1]): Nc+=1
if ffa and np.random.binomial(1,P_bind1)==1 and area2(0.,0.,Z[-1]):
add+=expovariate(lambd=1./avgbind1)
elif ffa and np.random.binomial(1,P_bind2)==1 and area1(0.,0.,Z[-1]):
add+=expovariate(lambd=1./avgbind2)
else:
add+=0.
ffa = False
elif dis(argument(x_new,y_new,z_new)) < rMolecule + beps:
pass
else:
ffa = True
X = np.append(X,x_new)
Y = np.append(Y,y_new)
Z = np.append(Z,z_new)
if abs(Z[-1])>35. or abs(X[-1])>10. or abs(Y[-1])>10.:
print 'Out of domain!'
ood = True
if not outside or np.unique(J1).shape[0]==1:
should_restart = True
print 'restart!'
break
Jx=Current(X[-1],Y[-1],Z[-1])
if math.isnan(Jx):
if add<=tau:
Jx = J1[-1]
else:
print 'current at binding position is NaN!!!'
print 'current = %.1e A'%Jx
print 'X = %.8f'%X[-1]
print 'Y = %.8f'%Y[-1]
print 'Z = %.8f'%Z[-1]
print 'add = %.2f nanoseconds'%add
exit()
J1=np.append(J1,Jx)
T =np.append(T,add)
i+=1
if i>=maxiter:
print 'randomwalk: more than 1e6 steps!'
fields.save_fields(fieldsname,params,Nc=[Nc])
if outcome=='type' or outcome=='both':
tau_off = np.sum(T)*1e-6
curr = 7.523849e-10
amp = (curr-np.inner(T*1e-6,J1)/tau_off)/curr*100.
if math.isnan(amp):
np.save('T',T)
np.save('J1',J1)
file=open('nanerror.txt','w')
file.write('tau_off = %.10f\n'% tau_off)
file.write('amp = %.10f\n'% amp)
file.close()
exit()
t=[tau_off]
a=[amp]
if ood:
ood=[1]
else:
ood=[0]
fields.save_fields(fieldsname,params,t=t,a=a,ood=ood)
if outcome=='traj' or outcome=='both':
X=[X]
Y=[Y]
Z=[Z]
T=[T]
J1=[J1]
fields.save_fields(fieldsname,params,X=X, Y=Y, Z=Z, T=T, J=J1)
|
{"hexsha": "130b7692074c2260da6453e984e567d035c62d84", "size": 5761, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/pughpore/randomwalk/run.py", "max_stars_repo_name": "jhwnkim/nanopores", "max_stars_repo_head_hexsha": "98b3dbb5d36464fbdc03f59d224d38e4255324ce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2016-09-07T01:59:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-06T12:14:31.000Z", "max_issues_repo_path": "scripts/pughpore/randomwalk/run.py", "max_issues_repo_name": "jhwnkim/nanopores", "max_issues_repo_head_hexsha": "98b3dbb5d36464fbdc03f59d224d38e4255324ce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/pughpore/randomwalk/run.py", "max_forks_repo_name": "jhwnkim/nanopores", "max_forks_repo_head_hexsha": "98b3dbb5d36464fbdc03f59d224d38e4255324ce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2017-12-06T17:43:01.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-01T05:41:14.000Z", "avg_line_length": 33.3005780347, "max_line_length": 95, "alphanum_fraction": 0.4929699705, "include": true, "reason": "import numpy", "num_tokens": 1893}
|
# -*- coding:utf-8 -*-
"""
python for MSA to slove the static traffic assignment
"""
import networkx as nx
import matplotlib.pyplot as plt
import math
demand = 500
theta = 0.1
walk_link_cap = 9999999999 # very large capacity for the walking link
class path_class():
"""
path class
"""
def __init__(self):
self.flow = 0
self.cost = 0
self.links = []
self.logit_prob = 0 # probability computed via logit formula
self.prob = 0 # probability computed by flow/demand
def get_cost(self,_graph:nx.DiGraph):
"""
get path cost
"""
self.cost = 0
for e in self.links:
self.cost = self.cost + _graph.edges[e]['weight']
class my_network_class():
"""
my network class
"""
def __init__(self,_graph):
self.graph = _graph
self.paths = []
def update_edge_flow(self):
"""
compute link flow from path flow
"""
for e in self.graph.edges.items():
e[1]['v'] = 0
for p in self.paths:
for l in range(0, len(p.links)):
self.graph.edges[p.links[l]]['v'] += p.flow
def update_edge_cost(self):
"""
compute links cost given link flow
this is a BPR type function
cost = t0 + v/cap
"""
for e in self.graph.edges.items():
e[1]['weight'] = e[1]['t0'] + e[1]['v']/e[1]['cap']
def update_path_cost(self):
"""
compute path cost using the updated edge cost
"""
self.update_edge_cost()
for p in self.paths:
p.get_cost(self.graph)
def update_path_prob(self):
"""
compute path probability, using both logit and flow/demand
"""
path_exp = []
for p in self.paths:
path_exp.append(math.exp(-theta*p.cost))
for p in range(0, len(self.paths)):
self.paths[p].logit_prob = path_exp[p]/sum(path_exp)
self.paths[p].prob = self.paths[p].flow/demand
def update_path_flow(self,_path_flow):
"""
update path flow
"""
for p in range(0, len(self.paths)):
self.paths[p].flow = _path_flow[p]
def set_network():
# creat a network
graph = nx.DiGraph()
# add nodes
graph.add_node('A')
graph.add_node('B')
graph.add_node('O')
graph.add_node('D')
# add edges, and set initial flow = 0
graph.add_edge('O','A',t0=4,cap=100,v=0,weight=0)
graph.add_edge('O', 'B', t0=25, cap=walk_link_cap, v=0, weight=0)
graph.add_edge('A', 'B', t0=5, cap=walk_link_cap, v=0, weight=0)
graph.add_edge('A', 'D', t0=25, cap=200, v=0, weight=0)
graph.add_edge('B', 'D', t0=5, cap=500, v=0, weight=0)
# path = nx.shortest_path(graph, 'O', weight='t0')
path = set_path_set()
print(path)
return graph
pass
def set_path_set():
paths = []
temp_path = path_class()
temp_path.links.append(('O', 'B'))
temp_path.links.append(('B', 'D'))
paths.append(temp_path)
temp_path = path_class()
temp_path.links.append(('O', 'A'))
temp_path.links.append(('A', 'D'))
paths.append(temp_path)
temp_path = path_class()
temp_path.links.append(('O', 'A'))
temp_path.links.append(('A', 'B'))
temp_path.links.append(('B', 'D'))
paths.append(temp_path)
return paths
def MSA():
"""
msa method for the assignment
"""
# set default parameters values
maximum_iter = 100
acceptable_gap = 0.001
gap = 100 ## initial gap value
# step 0: read network
my_graph = my_network_class(set_network())
my_graph.update_edge_cost() # get initial edge cost
my_graph.paths = set_path_set() # define path set
# Step 1: set initial flow
I = 1 # Iteration counter or steosize
x = [demand/len(my_graph.paths)]*len(my_graph.paths) # creat initial path flow
while I < maximum_iter and gap > acceptable_gap:
# Step 2: update path flow
my_graph.update_path_flow(x)
# update edge flow and cost
my_graph.update_edge_flow()
my_graph.update_path_cost()
# update path prob
my_graph.update_path_prob()
# Step 3:compute Y flow based on the logit prob
y = []
for p in my_graph.paths:
y.append(demand*p.logit_prob)
# Step 4: update x flow for the next iteration based on MSA updating method
for i in range(0, len(x)):
x[i] = x[i] + 1/I*(y[i]-x[i])
# Step 5: check the convergence, which is the maximum abs difference between the two prob values
gap = max([abs(my_graph.paths[p].prob - my_graph.paths[p].logit_prob) for p in range(0, len(my_graph.paths))])
print('Iteration = {0}, gap = {1}'.format(I, gap))
I += 1
# print final solution
print("*********Final Solution********")
print("PathID,Flow,Cost,Prob,Logit_prob")
for p in range(0, len(my_graph.paths)):
print("{0},{1:.2f},{2:.2f},{3:.2f},{4:.2f}".format(p,my_graph.paths[p].flow,my_graph.paths[p].cost,my_graph.paths[p].prob,my_graph.paths[p].logit_prob))
pass
return
if __name__ == "__main__":
MSA()
pass
|
{"hexsha": "4dff3ed18e5fca64da201b42df34724467bff2a1", "size": 5432, "ext": "py", "lang": "Python", "max_stars_repo_path": "MSA.py", "max_stars_repo_name": "mzyKi/nadaLink", "max_stars_repo_head_hexsha": "a328b322ce5920f3a315bfa41ece0b69f0fbb38c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MSA.py", "max_issues_repo_name": "mzyKi/nadaLink", "max_issues_repo_head_hexsha": "a328b322ce5920f3a315bfa41ece0b69f0fbb38c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MSA.py", "max_forks_repo_name": "mzyKi/nadaLink", "max_forks_repo_head_hexsha": "a328b322ce5920f3a315bfa41ece0b69f0fbb38c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.1777777778, "max_line_length": 161, "alphanum_fraction": 0.5594624448, "include": true, "reason": "import networkx", "num_tokens": 1418}
|
//
// detail/variadic_templates.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef BOOST_ASIO_DETAIL_VARIADIC_TEMPLATES_HPP
#define BOOST_ASIO_DETAIL_VARIADIC_TEMPLATES_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include <boost/asio/detail/config.hpp>
#if !defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES)
# define BOOST_ASIO_VARIADIC_TPARAMS(n) BOOST_ASIO_VARIADIC_TPARAMS_##n
# define BOOST_ASIO_VARIADIC_TPARAMS_1 \
typename T1
# define BOOST_ASIO_VARIADIC_TPARAMS_2 \
typename T1, typename T2
# define BOOST_ASIO_VARIADIC_TPARAMS_3 \
typename T1, typename T2, typename T3
# define BOOST_ASIO_VARIADIC_TPARAMS_4 \
typename T1, typename T2, typename T3, typename T4
# define BOOST_ASIO_VARIADIC_TPARAMS_5 \
typename T1, typename T2, typename T3, typename T4, typename T5
# define BOOST_ASIO_VARIADIC_TARGS(n) BOOST_ASIO_VARIADIC_TARGS_##n
# define BOOST_ASIO_VARIADIC_TARGS_1 x1
# define BOOST_ASIO_VARIADIC_TARGS_2 x1, x2
# define BOOST_ASIO_VARIADIC_TARGS_3 x1, x2, x3
# define BOOST_ASIO_VARIADIC_TARGS_4 x1, x2, x3, x4
# define BOOST_ASIO_VARIADIC_TARGS_5 x1, x2, x3, x4, x5
# define BOOST_ASIO_VARIADIC_PARAMS(n) BOOST_ASIO_VARIADIC_PARAMS_##n
# define BOOST_ASIO_VARIADIC_PARAMS_1 T1 x1
# define BOOST_ASIO_VARIADIC_PARAMS_2 T1 x1, T2 x2
# define BOOST_ASIO_VARIADIC_PARAMS_3 T1 x1, T2 x2, T3 x3
# define BOOST_ASIO_VARIADIC_PARAMS_4 T1 x1, T2 x2, T3 x3, T4 x4
# define BOOST_ASIO_VARIADIC_PARAMS_5 T1 x1, T2 x2, T3 x3, T4 x4, T5 x5
# define BOOST_ASIO_VARIADIC_ARGS(n) BOOST_ASIO_VARIADIC_ARGS_##n
# define BOOST_ASIO_VARIADIC_ARGS_1 x1
# define BOOST_ASIO_VARIADIC_ARGS_2 x1, x2
# define BOOST_ASIO_VARIADIC_ARGS_3 x1, x2, x3
# define BOOST_ASIO_VARIADIC_ARGS_4 x1, x2, x3, x4
# define BOOST_ASIO_VARIADIC_ARGS_5 x1, x2, x3, x4, x5
# define BOOST_ASIO_VARIADIC_GENERATE(m) m(1) m(2) m(3) m(4) m(5)
#endif // !defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES)
#endif // BOOST_ASIO_DETAIL_VARIADIC_TEMPLATES_HPP
|
{"hexsha": "8807a3aae321ae73613ac12a86287b6cc09e9f29", "size": 2299, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "ios/Pods/boost-for-react-native/boost/asio/detail/variadic_templates.hpp", "max_stars_repo_name": "rudylee/expo", "max_stars_repo_head_hexsha": "b3e65a7a5b205f14a3eb6cd6fa8d13c8d663b1cc", "max_stars_repo_licenses": ["Apache-2.0", "MIT"], "max_stars_count": 8805.0, "max_stars_repo_stars_event_min_datetime": "2015-11-03T00:52:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T22:30:03.000Z", "max_issues_repo_path": "ios/Pods/boost-for-react-native/boost/asio/detail/variadic_templates.hpp", "max_issues_repo_name": "rudylee/expo", "max_issues_repo_head_hexsha": "b3e65a7a5b205f14a3eb6cd6fa8d13c8d663b1cc", "max_issues_repo_licenses": ["Apache-2.0", "MIT"], "max_issues_count": 14694.0, "max_issues_repo_issues_event_min_datetime": "2015-02-24T15:13:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T13:16:45.000Z", "max_forks_repo_path": "ios/Pods/boost-for-react-native/boost/asio/detail/variadic_templates.hpp", "max_forks_repo_name": "rudylee/expo", "max_forks_repo_head_hexsha": "b3e65a7a5b205f14a3eb6cd6fa8d13c8d663b1cc", "max_forks_repo_licenses": ["Apache-2.0", "MIT"], "max_forks_count": 1329.0, "max_forks_repo_forks_event_min_datetime": "2015-11-03T20:25:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T18:10:38.000Z", "avg_line_length": 35.921875, "max_line_length": 80, "alphanum_fraction": 0.7633753806, "num_tokens": 827}
|
import torch
from pathlib import Path
import sys
import cv2
sys.path.append("..")
from models.model import get_tsn_model
import numpy as np
import json
import argparse
parser = argparse.ArgumentParser(description='running inference on video')
parser.add_argument("weights", type=Path, help="weights file for model")
parser.add_argument("video_file", type=Path, help="path to video file")
parser.add_argument("json_file", type=Path, help="json file containing index to class mappings")
args = parser.parse_args()
weights = args.weights
video_file = args.video_file
json_file = args.json_file
def pre_process_img(img):
img = cv2.resize(img,(tsn.input_size, tsn.input_size), interpolation=cv2.INTER_LINEAR)
#convert to RGB..
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def get_class_name(out):
#load json file that contains index to class name mapping ..
with open(json_file, "r") as f:
content = json.load(f)
_, pred = out.topk(1, dim=-1, largest=True, sorted =True) #returns index of largest element
pred = pred.item()
class_name = [k for k, v in content.items() if v == pred][0]
return class_name
def infer(img_stack):
img_tensor = torch.from_numpy(img_stack)
#normalize and permute
img_tensor = (img_tensor.float()/255.0 - tsn.input_mean[0])/tsn.input_std[0]
img_tensor = img_tensor.permute(2,0, 1)
#add batch dimenstion
img_tensor = img_tensor.unsqueeze(0)
with torch.no_grad():
#run inference on img
out, _ = tsn(img_tensor)
class_name = get_class_name(out)
return class_name
#load model and weights ..
tsn = get_tsn_model(base_model="resnet50", segment_count=8, tune_model=True)
tsn.eval()
w_dict = torch.load(weights)
tsn.load_state_dict(w_dict)
cap = cv2.VideoCapture(str(args.video_file))
#write video
fourcc = cv2.VideoWriter_fourcc(*'XVID')
_, frame = cap.read()
out = cv2.VideoWriter('output.avi',fourcc, 10.0, (frame.shape[1], frame.shape[0]))
img_stack = []
num_segments = 8
while (cap.isOpened()):
ret, frame = cap.read()
if frame is None:
break
img_stack.append(frame.copy())
if len(img_stack) == num_segments:
images = list(map(pre_process_img,img_stack))
images = np.stack(images, axis=2)
images = images.reshape((images.shape[0], images.shape[1], -1))
class_name = infer(images)
img_stack = []
cv2.putText(frame, class_name, org= (frame.shape[1] -250, 55),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=2.5,
color=(255, 0, 0))
out.write(frame)
cv2.imshow("frame", frame)
if cv2.waitKey(100) & 0xFF == ord('q'): #output at 10FPS.
break
cap.release()
cv2.destroyAllWindows()
out.release()
|
{"hexsha": "4a24b140337b2e2f1ba9e2e28a5096fdc57168f0", "size": 2757, "ext": "py", "lang": "Python", "max_stars_repo_path": "exp/taskB/inference.py", "max_stars_repo_name": "temi92/epic-kitchens-55-action-models", "max_stars_repo_head_hexsha": "40e984bbdcf502539b3569774cb6b5526eb71c3c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "exp/taskB/inference.py", "max_issues_repo_name": "temi92/epic-kitchens-55-action-models", "max_issues_repo_head_hexsha": "40e984bbdcf502539b3569774cb6b5526eb71c3c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "exp/taskB/inference.py", "max_forks_repo_name": "temi92/epic-kitchens-55-action-models", "max_forks_repo_head_hexsha": "40e984bbdcf502539b3569774cb6b5526eb71c3c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.4226804124, "max_line_length": 119, "alphanum_fraction": 0.6862531737, "include": true, "reason": "import numpy", "num_tokens": 726}
|
[STATEMENT]
lemma lms_minus_aref: "(list_remove_all,op_mset_minus) \<in> list_mset_rel \<rightarrow> list_mset_rel \<rightarrow> list_mset_rel"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (list_remove_all, op_mset_minus) \<in> list_mset_rel \<rightarrow> list_mset_rel \<rightarrow> list_mset_rel
[PROOF STEP]
unfolding list_mset_rel_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (list_remove_all, op_mset_minus) \<in> br mset (\<lambda>_. True) \<rightarrow> br mset (\<lambda>_. True) \<rightarrow> br mset (\<lambda>_. True)
[PROOF STEP]
by (auto simp: in_br_conv)
|
{"llama_tokens": 235, "file": "Refine_Imperative_HOL_IICF_Impl_IICF_List_Mset", "length": 2}
|
"""
This is a python version of this function:
https://github.com/yeatmanlab/AFQ/blob/master/functions/AFQ_MultiCompCorrection.m
"""
import random
import numpy as np
import scipy.stats
def get_significant_areas(pvals, clusterFWE, alpha=0.05):
"""
Mark clusters of size clusterFWE of consecutive values smaller than alpha with 1. All other will be 0.
Used for plotting significant areas.
"""
result = []
ctr = 0
for i in range(len(pvals)):
p = pvals[i]
if p > alpha:
if ctr > 0:
# cluster was not big enough -> append as many 0 as cluster had elements
result += [0] * ctr
ctr = 0
result.append(0)
else:
ctr += 1
if ctr >= clusterFWE:
# cluster is big enough and the next element would end the cluster to is end of array -> add cluster
# to results
if i == len(pvals) - 1 or pvals[i + 1] > alpha:
result += [1] * ctr
ctr = 0
# Array ends, but still elements in ctr (cluster started, but was not big enough before array ended)
if i == len(pvals) - 1 and ctr > 0:
result += [0] * ctr
return np.array(result)
def _corr(a, b):
"""
Correlate a with each row of b
Args:
a: 1d array
b: 2d array
Returns:
c: 1d array with correlations
p: 1d array with p-values
"""
b = b.T
c = []
p = []
for i in range(len(b)):
c_i, p_i = scipy.stats.pearsonr(a, b[i])
c.append(c_i)
p.append(p_i)
return c, p
def AFQ_MultiCompCorrection(data=None, y=None, alpha=0.05, cThresh=None, nperm=1000):
"""
Compute a multiple comparison correction for Tract Profile data
This is an implementation of the permutation method described by Nichols
and Holmes (2001). Nonparametric permutation tests for functional
neuroimaging: A primer with examples. Human Brain Mapping. This will
return the faily wise error (FWE) corrected alpha value for pointwise
comparisons. It will also compute the FWE corrected cluster size at the
user defined alpha. This means that significant clusters of this size or
greater are pass the multiple comparison threshold and do not need
further p-value adjustment.
Written by Jason D. Yeatman, August 2012
Ported to python by Jakob Wasserthal, September 2019
Args:
data: Either a matrix of data for a single tract, or a matrix of data
for all the tracts combined.
y: A vector of either behavioral measurements or a binary
grouping variable for which pointwise statistics will be
computed on the Tract Profile and the p-value adjusted for
mulltiple comparisons will be determined. If y is a
continuous variable then correlations will be computed. If y
is a binary vector then T-tests will be computed.
alpha: The desired alpha (pvalue) to adjust
cThresh: For clusterwise corrections the threshold for computing a
cluster can be different than the desired alpha. For example
you can set a cluster threshold of 0.01 and then find clusters
that a large enough to pass FWE at a threshold of 0.05.
nperm: number of permutations
Returns:
alphaFWE: This is the alpha (p value) that corresponds after adjustment
for multiple comparisons
statFWE: This is the value of the statistic corresponding to alphaFWE.
statFWE will either be a correlation coeficient or T-statistic
clusterFWE: Clusters of points on a Tract Profile that are larger than
clusterFWE are significant at pvalue = alpha.
stats: A structure containing the results of each permutation
There are two ways how to use these results:
- p-values below alphaFWE are considered significant with multiple comparisons correction.
- A cluster (of at least size clusterFWE) with p-values below alpha are considered significant with multiple
comparisons correction.
"""
if cThresh is None:
cThresh = alpha
# If y is continues perform a correlation if binary perform a ttest
if y is None or len(y) == 0:
y = np.random.randn(data.shape[0], 1)
print('No behavioral data provided so randn will be used')
stattest = 'corr'
else:
if len(y) == np.sum((y == np.logical_or(0, y)) == 1) or len(y) == np.sum((y == np.logical_or(1, y)) == 2):
stattest = 'ttest'
else:
stattest = 'corr'
# print("using stattest: {}".format(stattest))
p = np.zeros([nperm, data.shape[1]])
stat = np.zeros([nperm, data.shape[1]])
clusMax = np.zeros([nperm])
stats = {}
if ('corr') == (stattest):
for ii in range(nperm):
# Shuffle the rows of the data
rows = np.array(random.sample(range(len(y)), len(y))) # random shuffling of row indices
stat[ii, :], p[ii, :] = _corr(y, data[rows, :])
else:
if ('ttest') == (stattest):
for ii in range(nperm):
rows = np.array(random.sample(list(y), len(y)))
rows = rows > 0 # to bool
ttest_res = scipy.stats.ttest_ind(data[rows, :], data[~rows, :]) #independent t-test
p[ii, :] = ttest_res.pvalue
stat[ii, :] = ttest_res.statistic
# Sort the pvals and associated statistics such that the first
# entry is the most significant
stats["pMin"] = np.sort(p.min(axis=1))
stats["statMax"] = np.sort(stat.max(axis=1))[::-1]
alphaFWE = stats["pMin"][int(round(alpha*nperm))]
statFWE = stats["statMax"][int(round(alpha*nperm))]
# If a cluster size is defined, also determine the significant
# cluster size at the specified alpha value
# Threshold the pvalue
pThresh = p < cThresh
pThresh = np.array(pThresh)
for ii in range(nperm):
# Find indices where significant clusters end.
# The method used requires significant p-values to be included
# between non-significant p-values. 0 are therefore added at
# both ends of the thresholded p-value vector
# (for cases when significant p-values are located at its ends)
pThresh_ii = [0] + list(pThresh[ii, :].astype(np.uint8)) + [0]
pThresh_ii = np.array(pThresh_ii)
clusEnd = np.where(pThresh_ii == 0)[0]
clusSiz = np.diff(clusEnd)
clusMax[ii] = clusSiz.max()
# Sort the clusters in descending order of significance
stats["clusMax"] = np.sort(clusMax)[::-1]
clusterFWE = stats["clusMax"][int(round(alpha*nperm))]
return alphaFWE, statFWE, clusterFWE, stats
# if __name__ == '__main__':
# data = np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [1, 4, 2, 3, 5],
# [1, 4, 2, 9, 5], [5, 4, 2, 9, 5], [5, 4, 2, 9, 1]])
# y = np.array([0.3, 1.2, 1.5, 0.1, 0.2, 1.9]).T
#
# alphaFWE, statFWE, clusterFWE, stats = AFQ_MultiCompCorrection(data, y)
#
# print(alphaFWE)
# print(statFWE)
# print(clusterFWE)
|
{"hexsha": "f4d54bc8e548dccdfeb5b259cf138e0e6816a38d", "size": 7262, "ext": "py", "lang": "Python", "max_stars_repo_path": "tractseg/libs/AFQ_MultiCompCorrection.py", "max_stars_repo_name": "inaccel/TractSeg", "max_stars_repo_head_hexsha": "cc9feefd71ba9fcfacc4d3a7656f1a77bab9a287", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 148, "max_stars_repo_stars_event_min_datetime": "2017-11-09T10:28:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T16:45:24.000Z", "max_issues_repo_path": "tractseg/libs/AFQ_MultiCompCorrection.py", "max_issues_repo_name": "inaccel/TractSeg", "max_issues_repo_head_hexsha": "cc9feefd71ba9fcfacc4d3a7656f1a77bab9a287", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 170, "max_issues_repo_issues_event_min_datetime": "2018-06-25T17:33:27.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-17T12:42:21.000Z", "max_forks_repo_path": "tractseg/libs/AFQ_MultiCompCorrection.py", "max_forks_repo_name": "inaccel/TractSeg", "max_forks_repo_head_hexsha": "cc9feefd71ba9fcfacc4d3a7656f1a77bab9a287", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 57, "max_forks_repo_forks_event_min_datetime": "2018-05-21T00:10:56.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T02:56:39.000Z", "avg_line_length": 38.8342245989, "max_line_length": 116, "alphanum_fraction": 0.6107133021, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1913}
|
using SparseUtils
import SparseArrays
import SparseArrays: SparseMatrixCSC
#import SparseUtils: materialize
import SparseUtils: SparseMatrixCOO
import LinearAlgebra
using Serialization
using Test
let # typeof(sparse_array) = SparseMatrixCSC
sparse_array = open("sparse_array.dat", "r") do io
deserialize(io)
end
expected_size = (30, 70)
expected_sum = 1.6277046559059591
@testset "measurements" begin
@test size(sparse_array) == expected_size
@test length(sparse_array) == prod(size(sparse_array))
end
@testset "density" begin
expected_density = 0.008095238095238095
@test density(sparse_array) == expected_density
end
@testset "transpose" begin
@test transpose(sparse_array) |> copy |> size == (expected_size[2], expected_size[1])
@test sparse_array |> transpose |> copy |> transpose |> copy == sparse_array
end
@testset "nnz" begin
# nnz defined for columns
@test sum(map(i -> SparseArrays.nnz(sparse_array, i), 1:size(sparse_array)[2])) == SparseArrays.nnz(sparse_array)
end
@testset "construction" begin
S = SparseArrays.sparse([1], [1], [1], 1, 1; sparsetype=SparseMatrixCOO)
@test isa(S, SparseMatrixCOO{Int,Int})
@test size(S) == (1, 1)
@test length(S) == 1
S1 = SparseArrays.sparse([5, 7], [2, 1], [1.0, 2.0], 10, 10; sparsetype=SparseMatrixCOO)
@test size(S1) == (10, 10)
end
let sparse_array_coo = SparseMatrixCOO(sparse_array)
@testset "conversion" begin
@test SparseMatrixCSC(sparse_array_coo) == sparse_array
end
@testset "transpose" begin
pdcoo = permutedims(sparse_array_coo)
@test copy(transpose(sparse_array_coo)) == pdcoo
@test isa(transpose(sparse_array_coo), LinearAlgebra.Transpose)
@test permutedims(pdcoo) == sparse_array_coo
@test sparse_array_coo |> transpose |> transpose == sparse_array_coo
end
@testset "sum" begin
@test sum(sparse_array) == sum(sparse_array_coo)
end
@testset "prod" begin
@test prod(sparse_array) == prod(sparse_array_coo)
zerotoone(x) = x == 0 ? one(x) : x
@test prod(zerotoone, sparse_array) == prod(zerotoone, sparse_array_coo)
end
end
# @test isapprox(sum(sparse_array), expected_sum)
end
|
{"hexsha": "c90cb30d7717d670061107545840011cf263bd84", "size": 2422, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "JuliaTagBot/SparseUtils.jl", "max_stars_repo_head_hexsha": "b5469701e8af53c415bb5fb0468de52db8b17a85", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "JuliaTagBot/SparseUtils.jl", "max_issues_repo_head_hexsha": "b5469701e8af53c415bb5fb0468de52db8b17a85", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-12-13T08:46:24.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-08T17:15:00.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "JuliaTagBot/SparseUtils.jl", "max_forks_repo_head_hexsha": "b5469701e8af53c415bb5fb0468de52db8b17a85", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-08T10:41:50.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T10:41:50.000Z", "avg_line_length": 33.6388888889, "max_line_length": 122, "alphanum_fraction": 0.6424442609, "num_tokens": 685}
|
import numpy as np
from PIL import Image
import torch
from torchvision import transforms
class JigsawCrop(object):
"""
The class implements the process of generating jigsaw crops for PIRL. The implementation is based on
https://github.com/HobbitLong/PyContrast
"""
def __init__(self, n_grid=2, img_size=512, crop_size=256):
"""
Constructor, the function initializes the paramters.
:param n_grid: Grid size to divide the original image
:param img_size: Original image size
:param crop_size: Jigsaw crop size
"""
self.n_grid = n_grid
self.img_size = img_size
self.crop_size = crop_size
self.grid_size = int(img_size / self.n_grid)
self.side = self.grid_size - self.crop_size
yy, xx = np.meshgrid(np.arange(n_grid), np.arange(n_grid))
self.yy = np.reshape(yy * self.grid_size, (n_grid * n_grid,))
self.xx = np.reshape(xx * self.grid_size, (n_grid * n_grid,))
def __call__(self, img):
"""
The function generates the jigsaw crops of a provided original image.
:param img: Original image
:return: Jigsaw crops
"""
r_x = np.random.randint(0, self.side + 1, self.n_grid * self.n_grid)
r_y = np.random.randint(0, self.side + 1, self.n_grid * self.n_grid)
img = np.asarray(img, np.uint8)
crops = []
for i in range(self.n_grid * self.n_grid):
crops.append(img[self.xx[i] + r_x[i]: self.xx[i] + r_x[i] + self.crop_size,
self.yy[i] + r_y[i]: self.yy[i] + r_y[i] + self.crop_size, :])
crops = [Image.fromarray(crop) for crop in crops]
return crops
class StackTransform(object):
"""
The transform to group images independently.
"""
def __init__(self, transform):
self.transform = transform
def __call__(self, imgs):
return torch.stack([self.transform(crop) for crop in imgs])
class JigsawTransform(object):
"""
The implementation of generating jigsaw crops and torchvision transformation.
"""
def __init__(self):
self.transform = transforms.Compose(
[transforms.Resize(1024),
transforms.CenterCrop(512),
transforms.RandomHorizontalFlip(),
JigsawCrop(),
StackTransform(transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]))]
)
def __call__(self, img):
return [], self.transform(img)
|
{"hexsha": "98f3ad9ddb35ff1f641fb184583f2cd1813e5880", "size": 2597, "ext": "py", "lang": "Python", "max_stars_repo_path": "transforms/pirl.py", "max_stars_repo_name": "mmaaz60/ssl_for_fgvc", "max_stars_repo_head_hexsha": "9a4bf0a112b818caca8794868a903dc736839a43", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2021-05-24T13:23:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T06:54:02.000Z", "max_issues_repo_path": "transforms/pirl.py", "max_issues_repo_name": "mmaaz60/ssl_for_fgvc", "max_issues_repo_head_hexsha": "9a4bf0a112b818caca8794868a903dc736839a43", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "transforms/pirl.py", "max_forks_repo_name": "mmaaz60/ssl_for_fgvc", "max_forks_repo_head_hexsha": "9a4bf0a112b818caca8794868a903dc736839a43", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-06-10T13:59:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-05T08:54:40.000Z", "avg_line_length": 33.2948717949, "max_line_length": 104, "alphanum_fraction": 0.6130150173, "include": true, "reason": "import numpy", "num_tokens": 618}
|
import os
import numpy as np
import pprint
import pdb
import time
import _init_paths
import torch
import torch.nn as nn
from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.utils.net_utils import adjust_learning_rate, save_checkpoint, FocalLoss, sampler, EFocalLoss
from model.utils.parser_func_multi import parse_args, set_dataset_args
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
args = set_dataset_args(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
np.random.seed(cfg.RNG_SEED)
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
cfg.TRAIN.USE_FLIPPED = False
cfg.USE_GPU_NMS = args.cuda
# source dataset
imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdb_name)
train_size = len(roidb)
# target dataset
imdb_t, roidb_t, ratio_list_t, ratio_index_t = combined_roidb(args.imdb_name_target)
train_size_t = len(roidb_t)
imdb_tv, roidb_tv, ratio_list_tv, ratio_index_tv = combined_roidb(args.imdbval_name_target,False)
print('{:d} source roidb entries'.format(len(roidb)))
print('{:d} target roidb entries'.format(len(roidb_t)))
sampler_batch = sampler(train_size, args.batch_size)
sampler_batch_t = sampler(train_size_t, args.batch_size)
dataset_s = roibatchLoader(roidb, ratio_list, ratio_index, args.batch_size, \
imdb.num_classes, training=True)
dataloader_s = torch.utils.data.DataLoader(dataset_s, batch_size=args.batch_size,
sampler=sampler_batch, num_workers=args.num_workers, drop_last=True)
dataset_t = roibatchLoader(roidb_t, ratio_list_t, ratio_index_t, args.batch_size, \
imdb.num_classes, training=True)
dataloader_t = torch.utils.data.DataLoader(dataset_t, 1,
sampler=sampler_batch_t, num_workers=args.num_workers, drop_last=True)
dataset_tv = roibatchLoader(roidb_tv, ratio_list_tv, ratio_index_tv, 1, \
imdb.num_classes, training=False, normalize=False)
output_dir = args.save_dir + "/" + args.net + "/" + args.dataset + '2' + args.dataset_t +'/'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if args.cuda:
cfg.CUDA = True
from model.faster_rcnn.resnet_adv import resnet
if args.net == 'res101':
fasterRCNN = resnet(imdb.classes, 101, pretrained=True, class_agnostic=args.class_agnostic,lc=args.lc, gc=args.gc)
elif args.net == 'res50':
fasterRCNN = resnet(imdb.classes, 50, pretrained=True, class_agnostic=args.class_agnostic, lc=args.lc, gc=args.gc)
else:
print("network is not defined")
pdb.set_trace()
fasterRCNN.create_architecture()
lr = cfg.TRAIN.LEARNING_RATE
lr = args.lr
params = []
for key, value in dict(fasterRCNN.named_parameters()).items():
if value.requires_grad:
if 'bias' in key:
params += [{'params': [value], 'lr': lr * (cfg.TRAIN.DOUBLE_BIAS + 1), \
'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]
else:
params += [{'params': [value], 'lr': lr, 'weight_decay': cfg.TRAIN.WEIGHT_DECAY}]
if args.optimizer == "adam":
lr = lr * 0.1
optimizer = torch.optim.Adam(params)
elif args.optimizer == "sgd":
optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM)
if args.cuda:
fasterRCNN.cuda()
best_score = 0
if args.resume:
checkpoint = torch.load(args.load_name)
args.session = checkpoint['session']
args.start_epoch = checkpoint['epoch']
fasterRCNN.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr = optimizer.param_groups[0]['lr']
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print("loaded checkpoint %s" % (args.load_name))
if args.mGPUs:
fasterRCNN = nn.DataParallel(fasterRCNN)
iters_per_epoch = int(len(dataloader_s) / args.batch_size)
if args.ef:
FL = EFocalLoss(class_num=2, gamma=args.gamma)
else:
FL = FocalLoss(class_num=2, gamma=args.gamma)
count_iter = 0
counter = 0
for epoch in range(args.start_epoch, args.max_epochs + 1):
# setting to train mode
fasterRCNN.train()
loss_temp = 0
start = time.time()
if epoch-1 in args.lr_decay_step:
adjust_learning_rate(optimizer, args.lr_decay_gamma)
lr *= args.lr_decay_gamma
data_iter_s = iter(dataloader_s)
data_iter_t = iter(dataloader_t)
for step in range(iters_per_epoch):
try:
data_s = next(data_iter_s)
except:
data_iter_s = iter(dataloader_s)
data_s = next(data_iter_s)
try:
data_t = next(data_iter_t)
except:
data_iter_t = iter(dataloader_t)
data_t = next(data_iter_t)
#eta = 1.0
count_iter += 1
if args.cuda:
im_data = data_s[0].cuda()
im_info = data_s[1].cuda()
gt_boxes = data_s[2].cuda()
num_boxes = data_s[3].cuda()
# print(im_data.shape)
if(len(im_data.size()) != 4):
print("skipping due to image size")
counter += 1
continue
fasterRCNN.zero_grad()
outputs = fasterRCNN(im_data, im_info, gt_boxes, num_boxes)
rois, cls_prob, bbox_pred = outputs['predict']
rpn_loss_cls, rpn_loss_box, RCNN_loss_cls, RCNN_loss_bbox = outputs['loss']
out_d_pixel, out_d = outputs['d_loss']
rois_label = outputs['rois_label']
loss = rpn_loss_cls.mean() + rpn_loss_box.mean() \
+ RCNN_loss_cls.mean() + RCNN_loss_bbox.mean()
loss_temp += loss.item()
# domain label
domain_s = torch.zeros(out_d.size(0)).long().cuda()
# global alignment loss
dloss_s = 0.5 * FL(out_d, domain_s)
# local alignment loss
dloss_s_p = 0.5 * torch.mean(out_d_pixel ** 2)
if args.cuda:
im_data = data_t[0].cuda()
im_info = data_t[1].cuda()
gt_boxes = data_t[2].cuda()
num_boxes = data_t[3].cuda()
# print(im_data.size())
if(len(im_data.size()) != 4):
print(im_data.size())
counter += 1
continue
outputs = fasterRCNN(im_data, im_info, gt_boxes, num_boxes, target=True)
out_d_pixel, out_d = outputs
# domain label
domain_t = torch.ones(out_d.size(0)).long().cuda()
dloss_t = 0.5 * FL(out_d, domain_t)
# local alignment loss
dloss_t_p = 0.5 * torch.mean((1 - out_d_pixel) ** 2)
if args.dataset == 'sim10k':
loss += (dloss_s + dloss_t + dloss_s_p + dloss_t_p) * args.eta
else:
loss += (dloss_s + dloss_t + dloss_s_p + dloss_t_p) * args.eta
optimizer.zero_grad()
loss.backward()
if 'vgg' in args.net:
nn.utils.clip_grad_norm_(fasterRCNN.parameters(), 7)
else:
nn.utils.clip_grad_norm_(fasterRCNN.parameters(), 10)
optimizer.step()
if step % args.disp_interval == 0:
end = time.time()
if step > 0:
loss_temp /= (args.disp_interval + 1)
if args.mGPUs:
loss_rpn_cls = rpn_loss_cls.mean().item()
loss_rpn_box = rpn_loss_box.mean().item()
loss_rcnn_cls = RCNN_loss_cls.mean().item()
loss_rcnn_box = RCNN_loss_bbox.mean().item()
fg_cnt = torch.sum(rois_label.data.ne(0))
bg_cnt = rois_label.numel() - fg_cnt
else:
loss_rpn_cls = rpn_loss_cls.item()
loss_rpn_box = rpn_loss_box.item()
loss_rcnn_cls = RCNN_loss_cls.item()
loss_rcnn_box = RCNN_loss_bbox.item()
dloss_s = dloss_s.item()
dloss_t = dloss_t.item()
dloss_s_p = dloss_s_p.item()
dloss_t_p = dloss_t_p.item()
fg_cnt = torch.sum(rois_label.data.ne(0))
bg_cnt = rois_label.numel() - fg_cnt
print("[session %d][epoch %2d][iter %4d/%4d] loss: %.4f, lr: %.2e" \
% (args.session, epoch, step, iters_per_epoch, loss_temp, lr))
print("\t\t\tfg/bg=(%d/%d), time cost: %f" % (fg_cnt, bg_cnt, end - start))
print(
"\t\t\trpn_cls: %.4f, rpn_box: %.4f, rcnn_cls: %.4f, rcnn_box %.4f dloss s: %.4f dloss t: %.4f dloss s pixel: %.4f dloss t pixel: %.4f eta: %.4f counter: %.4f" \
% (loss_rpn_cls, loss_rpn_box, loss_rcnn_cls, loss_rcnn_box, dloss_s, dloss_t, dloss_s_p, dloss_t_p,
args.eta, counter))
counter = 0
loss_temp = 0
start = time.time()
save_name = os.path.join(output_dir,
'session_{}_epoch_{}_step_{}.pth'.format(
args.dataset_t,args.eta,
args.lc, args.gc, args.gamma,
args.session, epoch,
step))
if epoch % 1 == 0:
save_name = os.path.join(output_dir,
'epoch_{}.pth'.format(
epoch,))
save_checkpoint({
'session': args.session,
'epoch': epoch,
'model': fasterRCNN.module.state_dict() if args.mGPUs else fasterRCNN.state_dict(),
'optimizer': optimizer.state_dict(),
'pooling_mode': cfg.POOLING_MODE,
'class_agnostic': args.class_agnostic,
'best_score': 0
}, save_name, False, best_score)
print('save model: {}'.format(save_name))
|
{"hexsha": "7640b40ffab37ae6433c25470265fb6d33f8101c", "size": 10859, "ext": "py", "lang": "Python", "max_stars_repo_path": "train_adv.py", "max_stars_repo_name": "strongwolf/CDG", "max_stars_repo_head_hexsha": "a78864ca3519de77deb60a11f68059b76e076b5c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2021-04-15T11:35:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T12:24:25.000Z", "max_issues_repo_path": "train_adv.py", "max_issues_repo_name": "strongwolf/CDG", "max_issues_repo_head_hexsha": "a78864ca3519de77deb60a11f68059b76e076b5c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-04-29T06:26:15.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T11:06:12.000Z", "max_forks_repo_path": "train_adv.py", "max_forks_repo_name": "strongwolf/CDG", "max_forks_repo_head_hexsha": "a78864ca3519de77deb60a11f68059b76e076b5c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-29T06:26:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-29T06:26:42.000Z", "avg_line_length": 41.288973384, "max_line_length": 181, "alphanum_fraction": 0.5657058661, "include": true, "reason": "import numpy", "num_tokens": 2627}
|
import os
import shutil
from tqdm import tqdm
import numpy as np
import pandas as pd
from PIL import Image as im
train_csv = "MNIST\/train.csv"
test_csv = "MNIST\/test.csv"
label = []
for _type, csv in [['train', train_csv], ['test', test_csv]]:
# new folder
path = "MNIST\/" + _type
if os.path.isdir(path):
print("Path is exist, DROP it? [Y/N]")
isy = input()
if isy in ['Y','y', 'yes']:
shutil.rmtree(path)
else:
exit()
os.mkdir(path)
with open(csv) as f:
title = f.readline()
for n, line in tqdm(enumerate(f)):
im_list = list(eval(line))
if _type == 'train':
im_label = im_list.pop(0)
label.append(im_label)
im_array = np.array(im_list).reshape([28, 28]).astype(np.uint8)
im_img = im.fromarray(im_array, mode="L")
img_path = os.path.join(path, "{n}.png".format(n = n))
im_img.save(img_path, format="PNG")
if _type == 'train':
df = pd.DataFrame(label, columns=['label'])
df.to_csv('MNIST\/label.csv')
print("{_type} done".format(_type = _type))
|
{"hexsha": "c6e759ac1104414e8703ddf058f3b70a67768f3e", "size": 1175, "ext": "py", "lang": "Python", "max_stars_repo_path": "MNIST/to_img.py", "max_stars_repo_name": "chamhoo/FiaTorch", "max_stars_repo_head_hexsha": "f905255f5f9eccdd58f3693d9db71bd203a2fcf2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MNIST/to_img.py", "max_issues_repo_name": "chamhoo/FiaTorch", "max_issues_repo_head_hexsha": "f905255f5f9eccdd58f3693d9db71bd203a2fcf2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MNIST/to_img.py", "max_forks_repo_name": "chamhoo/FiaTorch", "max_forks_repo_head_hexsha": "f905255f5f9eccdd58f3693d9db71bd203a2fcf2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.3255813953, "max_line_length": 75, "alphanum_fraction": 0.5506382979, "include": true, "reason": "import numpy", "num_tokens": 310}
|
from numpy.lib.function_base import diff
import torch
from torch import nn
from torch.nn import functional as F
from itertools import accumulate
import numpy as np
import os
import importlib
from utils.my_utils import carving_t, carving_t2, FeatExt, get_in_range, idx_cam2img, idx_world2cam, normalize_for_grid_sample
import model.conf as conf
if os.environ.get('IDR_USE_ENV', '0') == '1' and os.environ.get('IDR_CONF', '') != '':
print('override conf: ', os.environ.get('IDR_CONF'))
conf = importlib.import_module(os.environ.get('IDR_CONF'))
class IDRLoss(nn.Module):
def __init__(self):
super().__init__()
self.l1_loss = nn.L1Loss(reduction='sum')
def get_rgb_loss(self,rgb_values, rgb_gt, network_object_mask, object_mask):
if (network_object_mask & object_mask).sum() == 0:
return torch.tensor(0.0).cuda().float()
rgb_values = rgb_values[network_object_mask & object_mask]
rgb_gt = rgb_gt.reshape(-1, 3)[network_object_mask & object_mask]
rgb_loss = self.l1_loss(rgb_values, rgb_gt) / float(object_mask.shape[0])
return rgb_loss
def get_eikonal_loss(self, grad_theta):
if grad_theta.shape[0] == 0:
return torch.tensor(0.0).cuda().float()
eikonal_loss = ((grad_theta.norm(2, dim=1) - 1) ** 2).mean()
return eikonal_loss
def get_depth_loss(self, eikonal_points_hom, eikonal_output, depths, cams, size, center, far_thresh, far_att, near_thresh, near_att, smooth):
eikonal_points_hom = eikonal_points_hom.detach()
depths = depths.permute(1,0,2,3,4)
cams = cams.permute(1,0,2,3,4)
eikonal_points_hom[:,:,:3,0] = eikonal_points_hom[:,:,:3,0] / 2 * size.view(1,1,1) + center.view(1,1,3)
if conf.use_invalid: # treat out-of-mask depth as inf
dist, occ, in_range = carving_t(eikonal_points_hom, depths, cams, out_thresh_perc=conf.out_thresh_perc)
else: # ignore out-of-mask depth
dist, occ, in_range = carving_t2(eikonal_points_hom, depths, cams, out_thresh_perc=conf.out_thresh_perc) # scale is applied in cams NOTE: hard code
dist_r = (dist / size.view(1,1) * 2 + (-1.25) * (~in_range).to(torch.float32)).clamp(-1.25,1.25)
# loss = nn.SmoothL1Loss()(eikonal_output, -dist_r)
# single depth
# not_inside = (dist_r < int_thresh)
# inside_weight = not_inside + (~not_inside) * int_att
far_mask = dist_r.abs() > far_thresh
far_weight = far_mask * far_att + (~far_mask)
near_mask = dist_r.abs() < near_thresh
near_weight = near_mask * near_att + (~near_mask)
if smooth is not None:
loss = nn.SmoothL1Loss(reduction='none')(eikonal_output / smooth, -dist_r / smooth) * smooth
else:
loss = nn.L1Loss(reduction='none')(eikonal_output, -dist_r)
loss = (loss * far_weight * near_weight * in_range).mean()
return loss
def get_feat_loss2(self, diff_surf_pts, uncerts, feat, cam, feat_src, src_cams, size, center, network_object_mask, object_mask):
mask = network_object_mask & object_mask
if (mask).sum() == 0:
return torch.tensor(0.0).float().cuda()
sample_mask = mask.view(feat.size()[0], -1)
hit_nums = sample_mask.sum(-1)
accu_nums = [0] + hit_nums.cumsum(0).tolist()
slices = [slice(accu_nums[i], accu_nums[i+1]) for i in range(len(accu_nums)-1)]
loss = []
## for each image in minibatch
for view_i, slice_ in enumerate(slices):
if slice_.start < slice_.stop:
## projection
diff_surf_pts_slice = diff_surf_pts[slice_]
pts_world = (diff_surf_pts_slice / 2 * size.view(1,1) + center.view(1,3)).view(1,-1,1,3,1) # 1m131
pts_world = torch.cat([pts_world, torch.ones_like(pts_world[...,-1:,:])], dim=-2) # 1m141
# rgb_pack = torch.cat([rgb[view_i:view_i+1], rgb_src[view_i]], dim=0) # v3hw
cam_pack = torch.cat([cam[view_i:view_i+1], src_cams[view_i]], dim=0) # v244
pts_img = idx_cam2img(idx_world2cam(pts_world, cam_pack), cam_pack) # vm131
## gathering
grid = pts_img[...,:2,0] # vm12
# feat2_pack = self.feat_ext(rgb_pack)[2] # vchw # TODO: multi-scale feature
feat2_pack = torch.cat([feat[view_i:view_i+1], feat_src[view_i]], dim=0)
grid_n = normalize_for_grid_sample(feat2_pack, grid/2)
grid_in_range = get_in_range(grid_n)
valid_mask = (grid_in_range[:1,...] * grid_in_range[1:,...]).unsqueeze(1) > 0.5 # and
gathered_feat = F.grid_sample(feat2_pack, grid_n, mode='bilinear', padding_mode='zeros', align_corners=False) # vcm1
## calculation
diff = gathered_feat[:1] - gathered_feat[1:]
if uncerts is None:
gathered_norm = gathered_feat.norm(dim=1, keepdim=True) # vcm1
diff_mask = diff.norm(dim=1, keepdim=True) < ((gathered_norm[:1,...] + gathered_norm[1:,...])/2*1)
print('feat loss mask', (valid_mask & diff_mask).sum().item(), '/', valid_mask.size()[0] * valid_mask.size()[2])
sample_loss = (diff * valid_mask * diff_mask).abs().mean()
else:
uncert = uncerts[view_i].unsqueeze(1).unsqueeze(3) # (v-1)1m1
print(f'uncert: {uncert.min():.4f}, {uncert.median():.4f}, {uncert.max():.4f}')
sample_loss = ((diff.abs() * (-uncert).exp() + 0.01 * uncert)*valid_mask).mean()
else:
sample_loss = torch.zeros(1).float().cuda()
loss.append(sample_loss)
loss = sum(loss) / len(loss)
return loss
def get_feat_loss_corr(self, diff_surf_pts, uncerts, feat, cam, feat_src, src_cams, size, center, network_object_mask, object_mask):
mask = network_object_mask & object_mask
if (mask).sum() == 0:
return torch.tensor(0.0).float().cuda()
sample_mask = mask.view(feat.size()[0], -1)
hit_nums = sample_mask.sum(-1)
accu_nums = [0] + hit_nums.cumsum(0).tolist()
slices = [slice(accu_nums[i], accu_nums[i+1]) for i in range(len(accu_nums)-1)]
loss = []
## for each image in minibatch
for view_i, slice_ in enumerate(slices):
if slice_.start < slice_.stop:
## projection
diff_surf_pts_slice = diff_surf_pts[slice_]
pts_world = (diff_surf_pts_slice / 2 * size.view(1,1) + center.view(1,3)).view(1,-1,1,3,1) # 1m131
pts_world = torch.cat([pts_world, torch.ones_like(pts_world[...,-1:,:])], dim=-2) # 1m141
# rgb_pack = torch.cat([rgb[view_i:view_i+1], rgb_src[view_i]], dim=0) # v3hw
cam_pack = torch.cat([cam[view_i:view_i+1], src_cams[view_i]], dim=0) # v244
pts_img = idx_cam2img(idx_world2cam(pts_world, cam_pack), cam_pack) # vm131
## gathering
grid = pts_img[...,:2,0] # vm12
# feat2_pack = self.feat_ext(rgb_pack)[2] # vchw # TODO: multi-scale feature
feat2_pack = torch.cat([feat[view_i:view_i+1], feat_src[view_i]], dim=0)
grid_n = normalize_for_grid_sample(feat2_pack, grid/2)
grid_in_range = get_in_range(grid_n)
valid_mask = (grid_in_range[:1,...] * grid_in_range[1:,...]).unsqueeze(1) > 0.5 # and
gathered_feat = F.grid_sample(feat2_pack, grid_n, mode='bilinear', padding_mode='zeros', align_corners=False) # vcm1
## calculation
gathered_norm = gathered_feat.norm(dim=1, keepdim=True) # v1m1
corr = (gathered_feat[:1] * gathered_feat[1:]).sum(dim=1, keepdim=True) \
/ gathered_norm[:1].clamp(min=1e-9) / gathered_norm[1:].clamp(min=1e-9) # (v-1)1m1
corr_loss = (1 - corr).abs()
if uncerts is None:
diff_mask = corr_loss < 0.5
print('feat loss mask', (valid_mask & diff_mask).sum().item(), '/', valid_mask.size()[0] * valid_mask.size()[2])
sample_loss = (corr_loss * valid_mask * diff_mask).mean()
else:
uncert = uncerts[view_i].unsqueeze(1).unsqueeze(3) # (v-1)1m1
print(f'uncert: {uncert.min():.4f}, {uncert.median():.4f}, {uncert.max():.4f}')
sample_loss = ((corr_loss * (-uncert).exp() + uncert)*valid_mask).mean()
else:
sample_loss = torch.zeros(1).float().cuda()
loss.append(sample_loss)
loss = sum(loss) / len(loss)
return loss
def get_surf_loss(self, surf_indicator_output, network_object_mask, object_mask_true):
mask = network_object_mask & object_mask_true
N = mask.sum()
gt1 = torch.ones(N, dtype=surf_indicator_output.dtype, device=surf_indicator_output.device)
gt0 = torch.zeros(surf_indicator_output.size()[0]-N, dtype=surf_indicator_output.dtype, device=surf_indicator_output.device)
gt = torch.cat([gt1, gt0], dim=0)
loss = nn.BCEWithLogitsLoss(reduction='mean')(surf_indicator_output, gt)
return loss
def forward(self, model_outputs, ground_truth, train_progress, n_img):
rgb_gt = ground_truth['rgb'].cuda()
network_object_mask = model_outputs['network_object_mask']
object_mask = model_outputs['object_mask']
ground_truth['size'] = ground_truth['size'][:1]
ground_truth['center'] = ground_truth['center'][:1]
if conf.enable_rgb:
rgb_loss = self.get_rgb_loss(model_outputs['rgb_values'], rgb_gt, network_object_mask, object_mask)
else:
rgb_loss = torch.zeros(1).float().cuda()
eikonal_loss = self.get_eikonal_loss(model_outputs['grad_theta'])
depth_loss = self.get_depth_loss(model_outputs['eikonal_points_hom'], model_outputs['eikonal_output'], ground_truth['depths'], ground_truth['depth_cams'], ground_truth['size'], ground_truth['center'],
far_thresh=conf.far_thresh, far_att=conf.far_att(train_progress),
near_thresh=conf.near_thresh, near_att=conf.near_att(train_progress),
smooth=conf.smooth(train_progress))
if conf.phase[0] <= train_progress and conf.enable_feat:
feat_loss = self.get_feat_loss_corr(model_outputs['diff_surf_pts'], model_outputs.get('uncerts'), *[ground_truth[attr] for attr in ['feat', 'cam', 'feat_src', 'src_cams', 'size', 'center']], network_object_mask, object_mask)
else:
feat_loss = torch.zeros(1).float().cuda()
if conf.phase[0] <= train_progress:
surf_loss = self.get_surf_loss(model_outputs['surf_indicator_output'], network_object_mask, model_outputs['object_mask_true'])
else:
surf_loss = torch.zeros(1).float().cuda()
loss = rgb_loss * conf.rgb_weight(train_progress) + \
eikonal_loss * conf.eikonal_weight + \
surf_loss * conf.surf_weight + \
feat_loss * conf.feat_weight(train_progress) + \
depth_loss * conf.depth_weight(train_progress)
return {
'loss': loss,
'rgb_loss': rgb_loss,
'eikonal_loss': eikonal_loss,
'depth_loss': depth_loss,
'feat_loss': feat_loss,
'surf_loss': surf_loss
}
|
{"hexsha": "68c9488a495919a38f2ff870419c8d832b14221b", "size": 11909, "ext": "py", "lang": "Python", "max_stars_repo_path": "model/loss.py", "max_stars_repo_name": "arthurlirui/MVSDF", "max_stars_repo_head_hexsha": "0b1014682e9b5cd5a92fea715d26ebc9845da4bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 76, "max_stars_repo_stars_event_min_datetime": "2022-02-11T12:04:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T10:43:59.000Z", "max_issues_repo_path": "model/loss.py", "max_issues_repo_name": "arthurlirui/MVSDF", "max_issues_repo_head_hexsha": "0b1014682e9b5cd5a92fea715d26ebc9845da4bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-22T12:57:43.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T12:57:43.000Z", "max_forks_repo_path": "model/loss.py", "max_forks_repo_name": "arthurlirui/MVSDF", "max_forks_repo_head_hexsha": "0b1014682e9b5cd5a92fea715d26ebc9845da4bf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2022-02-13T11:47:50.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-02T12:07:21.000Z", "avg_line_length": 54.1318181818, "max_line_length": 237, "alphanum_fraction": 0.5914854312, "include": true, "reason": "import numpy,from numpy", "num_tokens": 3092}
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 26 15:15:55 2018
@author: Madhur Kashyap 2016EEZ8350
"""
import os
import sys
import logging
import numpy as np
from functools import partial
from keras.optimizers import Adadelta
from sklearn.metrics import confusion_matrix
prog = os.path.basename(__file__)
codedir = os.path.join(os.path.dirname(__file__),"..","code")
sys.path.append(codedir)
from Utils import *
from PlotUtils import *
from SpeechCorpus import Timit
from AcousticModels import *
from TrainUtils import train_model,weighted_categorical_crossentropy
from AcousticDataGenerator import AcousticDataGenerator
#logfile = prog+'.log'
#rootlog = initlog(logfile,level=logging.DEBUG);
#rootlog.info('Starting new session');
if len(sys.argv)>1:
corpus = Timit(root=sys.argv[1]);
else:
corpus = Timit(root='C:/Users/nxa17016/ML/pyml/RNN/assignment3/dataset')
corpus.split_validation();
#rootlog.info(corpus.report_statistics(folder='report/images'));
adg = AcousticDataGenerator(corpus=corpus,mbatch_size=32,
mfcc_win=0.0125,mfcc_step=0.005,
ce_encoding_mode='best',
mode='phoneme', model_silence=True);
adg.fit_train(n_samples=1000);
model = bidi_lstm(input_dim=adg.feature_dim,units=20,output_dim=adg.n_classes,
batchnorm=True,after_dropout=0.0);
train_model(model,adg.train_generator(),adg.valid_generator(),'bidi_gru_20',
epochs=1,steps_per_epoch=adg.nb_train-100,validation_steps=adg.nb_valid-10,
verbose=1,save_period=0,optimizer=Adadelta(),report_stats=True,
class_names=list(adg.outmap[0].keys()));
|
{"hexsha": "ae8f6695bbd5479f68722beaaddc25f79b3991ec", "size": 1659, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_ce3.py", "max_stars_repo_name": "madhurkashyap/boundary_detection", "max_stars_repo_head_hexsha": "f7fb98c8bcbc204b1fcd0eb34a8699f16a8725a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_ce3.py", "max_issues_repo_name": "madhurkashyap/boundary_detection", "max_issues_repo_head_hexsha": "f7fb98c8bcbc204b1fcd0eb34a8699f16a8725a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_ce3.py", "max_forks_repo_name": "madhurkashyap/boundary_detection", "max_forks_repo_head_hexsha": "f7fb98c8bcbc204b1fcd0eb34a8699f16a8725a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.18, "max_line_length": 88, "alphanum_fraction": 0.7251356239, "include": true, "reason": "import numpy", "num_tokens": 429}
|
import pandas as pd
import numpy as np
### MULTINDEX
df = pd.DataFrame(np.random.rand(4, 2),
index=[['Temperatura', 'Temperatura', 'Fuente carbono', 'Fuente carbono'],
['30', '35', 'glc', 'ace']],
columns=['Gen1', 'Gen2'])
print(df)
df_inverso = df = pd.DataFrame(np.random.rand(2, 4),
columns=[['Temperatura', 'Temperatura', 'Fuente carbono', 'Fuente carbono'],
['30', '35', 'glc', 'ace']],
index=['Gen1', 'Gen2'])
### Constructores
pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'], [1, 2, 1, 2]])
pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b', 2)])
pd.MultiIndex.from_product([['a', 'b'], [1, 2]])
### Multindex en index y columnas
expresion = pd.DataFrame(np.random.rand(4, 4),
columns=[['Temperatura', 'Temperatura', 'Fuente carbono', 'Fuente carbono'],
['30', '35', 'glc', 'ace']],
index=[['E. coli', 'E. coli', 'P. putida', 'P. putida'],
['Gen1', 'Gen2', 'Gen1', 'Gen2']])
print(expresion)
expresion.index.names = ['Organismo', 'Gen']
print(expresion)
# Acceso con loc
print(expresion.loc['E. coli', 'Temperatura'])
print(expresion.loc[('E. coli','Gen1'), 'Temperatura'])
# Acceso con xs
print(expresion.xs("Gen1", level="Gen"))
print(expresion.xs("E. coli", level="Organismo", axis=0))
# Acceso xs y columnas
expresion.columns.names = ['Estres', 'Variacion']
print(expresion)
print(expresion.xs("Temperatura", level="Estrés", axis=1))
print(expresion.xs("glc", level="Variacion", axis=1))
### Groupby : index
df3 = pd.DataFrame({'expresion':np.random.rand(6)},
index=['gen1', 'gen2', 'gen3', 'gen1', 'gen2', 'gen3'])
print(df3)
grupos = df3.groupby(level=0)
print(grupos.mean())
### Groupby : columna
df4 = pd.DataFrame({'gen': ['gen1', 'gen2', 'gen3', 'gen1', 'gen2', 'gen3'],
'expresion': np.random.rand(6)}, columns=['gen', 'expresion'])
print(df4)
grupos2 = df4.groupby('gen')
print(grupos2.sum())
###>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Matplotlib
### import matplotlib as mpl (otra manera de importar matplotlib)
import matplotlib.pyplot as plt
### enfoques>
# Prodedural (se va modificando al anadir funciones)
# Orientado a objeteos (se va modificando usando metodos
# aplicados al objeto axes)
# creamos figura
fig = plt.figure()
# creamos un eje
ax = plt.axes()
x = np.linspace(0, 10, 1000)
ax.plot(x, np.sin(x))
plt.show()
ax.set(xlim=(0, 10), ylim=(-2, 2), #limites
xlabel="x", ylabel="sen(x)", #etiquetas
title="grafiquita") #titulo
# al momento de usar un plt show , matplotlib descarta la figura,
plt.show()
### plt.subplots()
fig, ax = plt.subplots()
plt.plot(x, np.sin(x), "-.")
plt.plot(x, np.cos(x), "o")
plt.show()
### 1:03
### organismos = np.random.choice(['procariotas', 'eucariotas', 'arqueas'], 5, p=[0.5, 0.3, 0.2])
### costo_beneficio['organismos'] = organismos
### costo_beneficio
### https://pandas-profiling.github.io/pandas-profiling/docs/master/rtd/
### https://www.tabnine.com/ este es el link del programa "inteligente" para predecir código
|
{"hexsha": "c5492ef96038be6d7d340f723d691abe4a6a63ac", "size": 3294, "ext": "py", "lang": "Python", "max_stars_repo_path": "ejer/Dia_9_2.py", "max_stars_repo_name": "zara-ms/python_class-2", "max_stars_repo_head_hexsha": "edd5a4b7a3b3f2759f63208bbf42d5f9e7acb45b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ejer/Dia_9_2.py", "max_issues_repo_name": "zara-ms/python_class-2", "max_issues_repo_head_hexsha": "edd5a4b7a3b3f2759f63208bbf42d5f9e7acb45b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-12-01T17:05:15.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-01T17:05:15.000Z", "max_forks_repo_path": "ejer/Dia_9_2.py", "max_forks_repo_name": "zara-ms/python_class-2", "max_forks_repo_head_hexsha": "edd5a4b7a3b3f2759f63208bbf42d5f9e7acb45b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-04-09T19:06:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-29T01:17:50.000Z", "avg_line_length": 34.3125, "max_line_length": 98, "alphanum_fraction": 0.5731633273, "include": true, "reason": "import numpy", "num_tokens": 993}
|
# --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Code starts here
data=np.genfromtxt(path,delimiter=",",skip_header=1)
census = np.concatenate((data,new_record),axis=0)
# --------------
#Code starts here
import numpy as np
age = census[:,0]
print(age)
max_age = np.max(age)
print(max_age)
min_age = np.min(age)
print(min_age)
age_mean = np.mean(age)
print(age_mean)
age_std = np.std(age)
print(age_std)
# --------------
#Code starts here
import numpy as np
race_0 = census[census[:,2]==0]
race_1 = census[census[:,2]==1]
race_2 = census[census[:,2]==2]
race_3 = census[census[:,2]==3]
race_4 = census[census[:,2]==4]
len_0 =len(race_0)
len_1 =len(race_1)
len_2 =len(race_2)
len_3 =len(race_3)
len_4 =len(race_4)
race_list=[len_0, len_1,len_2, len_3, len_4]
#Storing the race with minimum length into a variable
minority_race=race_list.index(min(race_list))
# --------------
#Code starts here
import numpy as np
senior_citizens = census[census[:,0]>60]
#print(senior_citizens)
working_hours_sum = senior_citizens.sum(axis=0)[6]
print(working_hours_sum)
senior_citizens_len = len(senior_citizens)
avg_working_hours = working_hours_sum/senior_citizens_len
print(avg_working_hours)
# --------------
#Code starts here
import numpy as np
high = census[census[:,1]>10]
low = census[census[:,1]<=10]
avg_pay_high = high.mean(axis=0)[7]
avg_pay_low = low.mean(axis=0)[7]
|
{"hexsha": "4d93519cfc1e5aeb7074c9e7b996fa8ceda88586", "size": 1565, "ext": "py", "lang": "Python", "max_stars_repo_path": "numpybasic/code.py", "max_stars_repo_name": "varunbonagiri/ga-learner-dsb-repo", "max_stars_repo_head_hexsha": "f7055de15287dbd3010bac72458697965a168cd7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "numpybasic/code.py", "max_issues_repo_name": "varunbonagiri/ga-learner-dsb-repo", "max_issues_repo_head_hexsha": "f7055de15287dbd3010bac72458697965a168cd7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "numpybasic/code.py", "max_forks_repo_name": "varunbonagiri/ga-learner-dsb-repo", "max_forks_repo_head_hexsha": "f7055de15287dbd3010bac72458697965a168cd7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.4383561644, "max_line_length": 61, "alphanum_fraction": 0.6690095847, "include": true, "reason": "import numpy", "num_tokens": 484}
|
#########
## map ##
#########
# Single input
@generated function map{T}(f, a1::StaticArray{T})
newtype = :(similar_type($a1, promote_op(f, T)))
exprs = [:(f(a1[$j])) for j = 1:length(a1)]
return quote
$(Expr(:meta, :inline))
$(Expr(:call, newtype, Expr(:tuple, exprs...)))
end
end
# Two inputs
@generated function map{T1,T2}(f, a1::StaticArray{T1}, a2::StaticArray{T2})
if size(a1) != size(a2)
error("Dimensions must match. Got sizes $(size(a1)) and $(size(a2))")
end
newtype = :(similar_type($a1, promote_op(f, T1, T2)))
exprs = [:(f(a1[$j], a2[$j])) for j = 1:length(a1)]
return quote
$(Expr(:meta, :inline))
$(Expr(:call, newtype, Expr(:tuple, exprs...)))
end
end
# TODO these assume linear fast...
@generated function map{T1,T2}(f, a1::StaticArray{T1}, a2::AbstractArray{T2})
newtype = :(similar_type($a1, promote_op(f, T1, T2)))
exprs = [:(f(a1[$j], a2[$j])) for j = 1:length(a1)]
return quote
$(Expr(:meta, :inline))
if size(a1) != size(a2)
error("Dimensions must match. Got sizes $(size(a1)) and $(size(a2))")
end
@inbounds return $(Expr(:call, newtype, Expr(:tuple, exprs...)))
end
end
@generated function map{T1,T2}(f, a1::AbstractArray{T1}, a2::StaticArray{T2})
newtype = :(similar_type($a2, promote_op(f, T1, T2)))
exprs = [:(f(a1[$j], a2[$j])) for j = 1:length(a2)]
return quote
$(Expr(:meta, :inline))
@boundscheck if size(a1) != size(a2)
error("Dimensions must match. Got sizes $(size(a1)) and $(size(a2))")
end
@inbounds return $(Expr(:call, newtype, Expr(:tuple, exprs...)))
end
end
# TODO General case involving arbitrary many inputs?
############
## reduce ##
############
@generated function reduce(op, a1::StaticArray)
if length(a1) == 1
return :(a1[1])
else
expr = :(op(a1[1], a1[2]))
for j = 3:length(a1)
expr = :(op($expr, a1[$j]))
end
return quote
$(Expr(:meta, :inline))
$expr
end
end
end
@generated function reduce(op, v0, a1::StaticArray)
if length(a1) == 0
return :(v0)
else
expr = :(op(v0, a1[1]))
for j = 2:length(a1)
expr = :(op($expr, a1[$j]))
end
return quote
$(Expr(:meta, :inline))
$expr
end
end
end
###############
## mapreduce ##
###############
# Single array
@generated function mapreduce(f, op, a1::StaticArray)
if length(a1) == 1
return :(f(a1[1]))
else
expr = :(op(f(a1[1]), f(a1[2])))
for j = 3:length(a1)
expr = :(op($expr, f(a1[$j])))
end
return quote
$(Expr(:meta, :inline))
$expr
end
end
end
@generated function mapreduce(f, op, v0, a1::StaticArray)
if length(a1) == 0
return :(v0)
else
expr = :(op(v0, f(a1[1])))
for j = 2:length(a1)
expr = :(op($expr, f(a1[$j])))
end
return quote
$(Expr(:meta, :inline))
$expr
end
end
end
# Two arrays (e.g. dot has f(a,b) = a' * b, op = +)
@generated function mapreduce(f, op, a1::StaticArray, a2::StaticArray)
if size(a1) != size(a2)
error("Dimensions must match. Got sizes $(size(a)) and $(size(a2))")
end
if length(a1) == 1
return :(f(a1[1], a2[1]))
else
expr = :(op(f(a1[1], a2[1]), f(a1[2], a2[2])))
for j = 3:length(a1)
expr = :(op($expr, f(a1[$j], a2[$j])))
end
return quote
$(Expr(:meta, :inline))
$expr
end
end
end
@generated function mapreduce(f, op, v0, a1::StaticArray, a2::StaticArray)
if size(a1) != size(a2)
error("Dimensions must match. Got sizes $(size(a)) and $(size(a2))")
end
if length(a1) == 0
return :(v0)
else
expr = :(op(v0, f(a1[1], a2[1])))
for j = 2:length(a1)
expr = :(op($expr, f(a1[$j], a2[$j])))
end
return quote
$(Expr(:meta, :inline))
$expr
end
end
end
# TODO General case involving arbitrary many inputs?
###############
## broadcast ##
###############
# Single input version
@inline broadcast(f, a::StaticArray) = map(f, a)
# Two input versions
@generated function broadcast(f, a1::StaticArray, a2::StaticArray)
if size(a1) == size(a2)
return quote
$(Expr(:meta, :inline))
map(f, a1, a2)
end
else
s1 = size(a1)
s2 = size(a2)
ndims = max(length(s1), length(s2))
s = Vector{Int}(ndims)
expands1 = Vector{Bool}(ndims)
expands2 = Vector{Bool}(ndims)
for i = 1:ndims
if length(s1) < i
s[i] = s2[i]
expands1[i] = false
expands2[i] = s2[i] > 1
elseif length(s2) < i
s[i] = s1[i]
expands1[i] = s1[i] > 1
expands2[i] = false
else
s[i] = max(s1[i], s1[i])
@assert s1[i] == 1 || s1[i] == s[i]
@assert s2[i] == 1 || s2[i] == s[i]
expands1[i] = s1[i] > 1
expands2[i] = s2[i] > 1
end
end
s = (s...)
L = prod(s)
if s == s1
newtype = :( similar_type($a1, promote_op(f, $(eltype(a1)), $(eltype(a2)))) )
else
newtype = :( similar_type($a1, promote_op(f, $(eltype(a1)), $(eltype(a2))), $s) )
end
exprs = Vector{Expr}(L)
i = 1
ind = ones(Int, ndims)
while i <= L
ind1 = [expands1[j] ? ind[j] : 1 for j = 1:length(s1)]
ind2 = [expands2[j] ? ind[j] : 1 for j = 1:length(s2)]
exprs[i] = Expr(:call, :f, Expr(:ref, :a1, ind1...), Expr(:ref, :a2, ind2...))
i += 1
ind[1] += 1
j = 1
while j < length(s)
if ind[j] > s[j]
ind[j] = 1
ind[j+1] += 1
else
break
end
j += 1
end
end
return quote
$(Expr(:meta, :inline))
@inbounds return $(Expr(:call, newtype, Expr(:tuple, exprs...)))
end
end
end
@inline broadcast(f, a::StaticArray, n::Number) = map(x -> f(x, n), a)
@inline broadcast(f, n::Number, a::StaticArray) = map(x -> f(n, x), a)
# Other two-input versions with AbstractArray
##########
## map! ##
##########
# Single input
@generated function map!{F}(f::F, out::StaticArray, a1::StaticArray)
exprs = [:(out[$j] = f(a1[$j])) for j = 1:length(a1)]
return quote
$(Expr(:meta, :inline))
@inbounds $(Expr(:block, exprs...))
end
end
# Two inputs
@generated function map!{F}(f::F, out::StaticArray, a1::StaticArray, a2::StaticArray)
if size(a1) != size(a2)
error("Dimensions must match. Got sizes $(size(a)) and $(size(a2))")
end
if size(a1) != size(a2)
error("Dimensions must match. Got sizes $(size(out)) and $(size(a1))")
end
exprs = [:(out[$j] = f(a1[$j], a2[$j])) for j = 1:length(a1)]
return quote
#$(Expr(:meta, :inline))
@inbounds $(Expr(:block, exprs...))
end
end
################
## broadcast! ##
################
@inline broadcast!{F}(f::F, out::StaticArray, a::StaticArray) = map!(f, out, a)
@inline broadcast!(f::typeof(identity), out::StaticArray, a::StaticArray) = map!(f, out, a)
# Two input versions
@generated function broadcast!{F}(f::F, out::StaticArray, a1::StaticArray, a2::StaticArray)
if size(a1) == size(a2) && size(out) == size(a1)
return quote
$(Expr(:meta, :inline))
@inbounds map!(f, out, a1, a2)
end
else
s1 = size(a1)
s2 = size(a2)
ndims = max(length(s1), length(s2))
s = Vector{Int}(ndims)
expands1 = Vector{Bool}(ndims)
expands2 = Vector{Bool}(ndims)
for i = 1:ndims
if length(s1) < i
s[i] = s2[i]
expands1[i] = false
expands2[i] = s2[i] > 1
elseif length(s2) < i
s[i] = s1[i]
expands1[i] = s1[i] > 1
expands2[i] = false
else
s[i] = max(s1[i], s1[i])
@assert s1[i] == 1 || s1[i] == s[i]
@assert s2[i] == 1 || s2[i] == s[i]
expands1[i] = s1[i] > 1
expands2[i] = s2[i] > 1
end
end
s = (s...)
L = prod(s)
if s != size(out)
error("Dimension mismatch")
end
exprs = Vector{Expr}(L)
i = 1
ind = ones(Int, ndims)
while i <= L
ind1 = [expands1[j] ? ind[j] : 1 for j = 1:length(s1)]
ind2 = [expands2[j] ? ind[j] : 1 for j = 1:length(s2)]
index1 = sub2ind(s1, ind1...)
index2 = sub2ind(s2, ind2...)
exprs[i] = :(out[$i] = $(Expr(:call, :f, Expr(:ref, :a1, index1), Expr(:ref, :a2, index2))))
i += 1
ind[1] += 1
j = 1
while j < length(s)
if ind[j] > s[j]
ind[j] = 1
ind[j+1] += 1
else
break
end
j += 1
end
end
return quote
$(Expr(:meta, :inline))
@inbounds $(Expr(:block, exprs...))
end
end
end
|
{"hexsha": "aa3b5e84e206a99ca8a5504d79de3c3c5df2a94a", "size": 9664, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/mapreduce.jl", "max_stars_repo_name": "JuliaPackageMirrors/StaticArrays.jl", "max_stars_repo_head_hexsha": "c453f137c163fa435659b263d8af8e6f87f07d42", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/mapreduce.jl", "max_issues_repo_name": "JuliaPackageMirrors/StaticArrays.jl", "max_issues_repo_head_hexsha": "c453f137c163fa435659b263d8af8e6f87f07d42", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mapreduce.jl", "max_forks_repo_name": "JuliaPackageMirrors/StaticArrays.jl", "max_forks_repo_head_hexsha": "c453f137c163fa435659b263d8af8e6f87f07d42", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.4043715847, "max_line_length": 104, "alphanum_fraction": 0.4620240066, "num_tokens": 2990}
|
# use the tensroflow
try:
from base import layers
except:
print('[%s] no tensorflow.' % __name__)
# do not use the tensorflow
from base import ngram
from base import parser
from base import wblib as wb
from base import matlib as mlib
from base import reader
from base import vocab
from base import sampling as sp
from base import word2vec
from base import learningrate as lr
from base import log
from base import seq
import numpy as np
# from scipy.misc import logsumexp
from scipy.special import logsumexp
from collections import OrderedDict
|
{"hexsha": "35d0b62d4439c53fe8d73b6a5e39fe91c154e6a2", "size": 578, "ext": "py", "lang": "Python", "max_stars_repo_path": "base/__init__.py", "max_stars_repo_name": "thu-spmi/semi-EBM", "max_stars_repo_head_hexsha": "393e3ea3566dd60c48872a5c573a335e8e802707", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-18T14:21:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-20T03:39:13.000Z", "max_issues_repo_path": "base/__init__.py", "max_issues_repo_name": "thu-spmi/semi-EBM", "max_issues_repo_head_hexsha": "393e3ea3566dd60c48872a5c573a335e8e802707", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "base/__init__.py", "max_forks_repo_name": "thu-spmi/semi-EBM", "max_forks_repo_head_hexsha": "393e3ea3566dd60c48872a5c573a335e8e802707", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-12T07:02:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-12T07:02:23.000Z", "avg_line_length": 23.12, "max_line_length": 44, "alphanum_fraction": 0.7560553633, "include": true, "reason": "import numpy,from scipy", "num_tokens": 137}
|
import numpy as np
import sklearn.datasets
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
def umatrix(som_model, use_colorbar=True, **kwargs):
"""Plot Self-organizing map U-Matrix
Args:
som_model (minisom.MiniSom): MiniSom Model
use_colorbar (bool): Flag to enable colorbar on figure plot
kwargs (dict): Parameters to matplotlib.pyplot.imshow function
Returns:
matplotlib.figure.Figure: grid figure
"""
im = plt.imshow(som_model.distance_map(), **kwargs)
if use_colorbar: plt.colorbar(im)
def umatrix_labeled(som_model,
data,
labels,
colors,
markers,
use_colorbar=True,
plot_lbl_args=None,
**kwargs):
"""Plot a U-Matrix with labels in each pixel
Args:
som_model (minisom.MiniSom): MiniSom Model
data (np.ndarray): n-dimensional data to estimulate neurons and generate
activation map
labels (np.ndarray): 1-dimensional data with label of data. Must be a
discrete values (e. g. 0, 1, 2, ...)
colors (list): List of color to use in each class
markers (list): List of markers to use in each class
use_colorbar (bool): Flag to enable colorbar on figure plot
plot_lbl_args (dict): Parameters to matplotlib.pyplot.plot function used
on plot labels
kwargs (dict): Parameters to matplotlib.pyplot.imshow function
Returns:
None
"""
if not plot_lbl_args:
plot_lbl_args = {
'markerfacecolor': 'None',
'markersize': 12,
'markeredgewidth': 2
}
im = plt.imshow(som_model.distance_map(), **kwargs)
if use_colorbar: plt.colorbar(im)
for idx, de in enumerate(data):
label_idx = labels[idx] - 1
winner = som_model.winner(de)
plt.plot(winner[1], winner[0],
markers[label_idx],
markeredgecolor=colors[label_idx],
**plot_lbl_args)
def hitmap(som_model, data, use_colorbar=True, **kwargs):
"""Plot Self-organizing map hitmap
Args:
som_model (minisom.MiniSom): MiniSom Model
data (np.ndarray): n-dimensional data to estimulate neurons and generate
activation map
use_colorbar (bool): Flag to enable colorbar on figure plot
kwargs (dict): Parameters to matplotlib.pyplot.imshow function
Returns:
None
"""
frequencies = som_model.activation_response(data).astype(int)
im = plt.imshow(frequencies, **kwargs)
if use_colorbar: plt.colorbar(im)
for (i, j), value in np.ndenumerate(frequencies):
plt.text(j, i, value, verticalalignment='center',
horizontalalignment='center')
def heatmap(som_model, feature_names, grid_spec, use_colorbar=True, **kwargs):
"""Plot Self-organizing map heatmap
Args:
som_model (minisom.MiniSom): MiniSom Model
feature_names (list): list of feature names
grid_spec (tuple): tuple with grid plot dimensions
use_colorbar (bool): Flag to enable colorbar on figure plot
kwargs (dict): Parameters to matplotlib.pyplot.imshow function
Returns:
None
"""
weights = som_model.get_weights()
for i, fname in enumerate(feature_names):
plt.subplot(*grid_spec, i + 1)
plt.title(fname)
im = plt.imshow(weights[:, :, i], **kwargs)
if use_colorbar: plt.colorbar(im)
def grid_pie_labeled(som_model, data, labels_name) -> tuple:
"""
Args:
som_model (minisom.MiniSom): MiniSom Model
data (np.ndarray): n-dimensional data to estimulate neurons and generate
activation map
labels_name (np.ndarray): 1-dimensional data with name of data label. Must be a
string values (e. g. 'classe1', 'classe2', ...)
Returns:
tuple: Tuple with patches and texts used in each plotted neuron
"""
patches, texts = None, None
labels_map = som_model.labels_map(data, labels_name)
n_neurons, m_neurons = som_model.get_weights().shape[0:2]
grid_spec = gridspec.GridSpec(n_neurons, m_neurons, plt.gcf())
for position in labels_map.keys():
label_fracs = [labels_map[position][l] for l in labels_name]
plt.subplot(grid_spec[n_neurons-1-position[1],
position[0]], aspect=1)
patches, texts = plt.pie(label_fracs)
return (patches, texts)
|
{"hexsha": "139227ae06f3fb8ff387b3d323d8bfe0322f4ebe", "size": 4808, "ext": "py", "lang": "Python", "max_stars_repo_path": "minisom_plot/evaluation.py", "max_stars_repo_name": "M3nin0/minisom.plot", "max_stars_repo_head_hexsha": "9922a9652d674ccc0fbc65af39be3a5796f1d83e", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "minisom_plot/evaluation.py", "max_issues_repo_name": "M3nin0/minisom.plot", "max_issues_repo_head_hexsha": "9922a9652d674ccc0fbc65af39be3a5796f1d83e", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "minisom_plot/evaluation.py", "max_forks_repo_name": "M3nin0/minisom.plot", "max_forks_repo_head_hexsha": "9922a9652d674ccc0fbc65af39be3a5796f1d83e", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3170731707, "max_line_length": 87, "alphanum_fraction": 0.5958818636, "include": true, "reason": "import numpy", "num_tokens": 1070}
|
c
c
c ###################################################
c ## COPYRIGHT (C) 1991 by Jay William Ponder ##
c ## All Rights Reserved ##
c ###################################################
c
c ###############################################################
c ## ##
c ## subroutine eimptor2 -- atomwise impr. torsion Hessian ##
c ## ##
c ###############################################################
c
c
c "eimptor2" calculates second derivatives of the improper
c torsion energy for a single atom
c
c
subroutine eimptor2 (i)
implicit none
include 'sizes.i'
include 'atoms.i'
include 'bound.i'
include 'group.i'
include 'hessn.i'
include 'imptor.i'
include 'torpot.i'
integer i,kitors
integer ia,ib,ic,id
real*8 dedphi,d2edphi2
real*8 rcb,fgrp
real*8 v1,v2,v3
real*8 c1,c2,c3
real*8 s1,s2,s3
real*8 sine,cosine
real*8 sine2,cosine2
real*8 sine3,cosine3
real*8 xia,yia,zia
real*8 xib,yib,zib
real*8 xic,yic,zic
real*8 xid,yid,zid
real*8 xba,yba,zba
real*8 xcb,ycb,zcb
real*8 xdc,ydc,zdc
real*8 xca,yca,zca
real*8 xdb,ydb,zdb
real*8 xt,yt,zt,xu,yu,zu
real*8 xtu,ytu,ztu
real*8 rt2,ru2,rtru
real*8 dphi1,dphi2,dphi3
real*8 d2phi1,d2phi2,d2phi3
real*8 dphidxt,dphidyt,dphidzt
real*8 dphidxu,dphidyu,dphidzu
real*8 dphidxia,dphidyia,dphidzia
real*8 dphidxib,dphidyib,dphidzib
real*8 dphidxic,dphidyic,dphidzic
real*8 dphidxid,dphidyid,dphidzid
real*8 xycb2,xzcb2,yzcb2
real*8 rcbxt,rcbyt,rcbzt,rcbt2
real*8 rcbxu,rcbyu,rcbzu,rcbu2
real*8 dphidxibt,dphidyibt,dphidzibt
real*8 dphidxibu,dphidyibu,dphidzibu
real*8 dphidxict,dphidyict,dphidzict
real*8 dphidxicu,dphidyicu,dphidzicu
real*8 dxiaxia,dyiayia,dziazia
real*8 dxibxib,dyibyib,dzibzib
real*8 dxicxic,dyicyic,dziczic
real*8 dxidxid,dyidyid,dzidzid
real*8 dxiayia,dxiazia,dyiazia
real*8 dxibyib,dxibzib,dyibzib
real*8 dxicyic,dxiczic,dyiczic
real*8 dxidyid,dxidzid,dyidzid
real*8 dxiaxib,dxiayib,dxiazib
real*8 dyiaxib,dyiayib,dyiazib
real*8 dziaxib,dziayib,dziazib
real*8 dxiaxic,dxiayic,dxiazic
real*8 dyiaxic,dyiayic,dyiazic
real*8 dziaxic,dziayic,dziazic
real*8 dxiaxid,dxiayid,dxiazid
real*8 dyiaxid,dyiayid,dyiazid
real*8 dziaxid,dziayid,dziazid
real*8 dxibxic,dxibyic,dxibzic
real*8 dyibxic,dyibyic,dyibzic
real*8 dzibxic,dzibyic,dzibzic
real*8 dxibxid,dxibyid,dxibzid
real*8 dyibxid,dyibyid,dyibzid
real*8 dzibxid,dzibyid,dzibzid
real*8 dxicxid,dxicyid,dxiczid
real*8 dyicxid,dyicyid,dyiczid
real*8 dzicxid,dzicyid,dziczid
logical proceed
c
c
c compute Hessian elements for the improper torsional angles
c
do kitors = 1, nitors
ia = iitors(1,kitors)
ib = iitors(2,kitors)
ic = iitors(3,kitors)
id = iitors(4,kitors)
c
c decide whether to compute the current interaction
c
proceed = .true.
if (use_group) call groups (proceed,fgrp,ia,ib,ic,id,0,0)
if (proceed) proceed = (i.eq.ia .or. i.eq.ib .or.
& i.eq.ic .or. i.eq.id)
c
c compute the value of the torsional angle
c
if (proceed) then
xia = x(ia)
yia = y(ia)
zia = z(ia)
xib = x(ib)
yib = y(ib)
zib = z(ib)
xic = x(ic)
yic = y(ic)
zic = z(ic)
xid = x(id)
yid = y(id)
zid = z(id)
xba = xib - xia
yba = yib - yia
zba = zib - zia
xcb = xic - xib
ycb = yic - yib
zcb = zic - zib
xdc = xid - xic
ydc = yid - yic
zdc = zid - zic
if (use_polymer) then
call image (xba,yba,zba)
call image (xcb,ycb,zcb)
call image (xdc,ydc,zdc)
end if
xt = yba*zcb - ycb*zba
yt = zba*xcb - zcb*xba
zt = xba*ycb - xcb*yba
xu = ycb*zdc - ydc*zcb
yu = zcb*xdc - zdc*xcb
zu = xcb*ydc - xdc*ycb
xtu = yt*zu - yu*zt
ytu = zt*xu - zu*xt
ztu = xt*yu - xu*yt
rt2 = xt*xt + yt*yt + zt*zt
ru2 = xu*xu + yu*yu + zu*zu
rtru = sqrt(rt2 * ru2)
if (rtru .ne. 0.0d0) then
rcb = sqrt(xcb*xcb + ycb*ycb + zcb*zcb)
cosine = (xt*xu + yt*yu + zt*zu) / rtru
sine = (xcb*xtu + ycb*ytu + zcb*ztu) / (rcb*rtru)
c
c set the improper torsional parameters for this angle
c
v1 = itors1(1,kitors)
c1 = itors1(3,kitors)
s1 = itors1(4,kitors)
v2 = itors2(1,kitors)
c2 = itors2(3,kitors)
s2 = itors2(4,kitors)
v3 = itors3(1,kitors)
c3 = itors3(3,kitors)
s3 = itors3(4,kitors)
c
c compute the multiple angle trigonometry and the phase terms
c
cosine2 = cosine*cosine - sine*sine
sine2 = 2.0d0 * cosine * sine
cosine3 = cosine*cosine2 - sine*sine2
sine3 = cosine*sine2 + sine*cosine2
dphi1 = (cosine*s1 - sine*c1)
dphi2 = 2.0d0 * (cosine2*s2 - sine2*c2)
dphi3 = 3.0d0 * (cosine3*s3 - sine3*c3)
d2phi1 = -(cosine*c1 + sine*s1)
d2phi2 = -4.0d0 * (cosine2*c2 + sine2*s2)
d2phi3 = -9.0d0 * (cosine3*c3 + sine3*s3)
c
c calculate the improper torsion master chain rule terms
c
dedphi = itorunit * (v1*dphi1+v2*dphi2+v3*dphi3)
d2edphi2 = itorunit * (v1*d2phi1+v2*d2phi2+v3*d2phi3)
c
c scale the interaction based on its group membership
c
if (use_group) then
dedphi = dedphi * fgrp
d2edphi2 = d2edphi2 * fgrp
end if
c
c abbreviations for first derivative chain rule terms
c
xca = xic - xia
yca = yic - yia
zca = zic - zia
xdb = xid - xib
ydb = yid - yib
zdb = zid - zib
if (use_polymer) then
call image (xca,yca,zca)
call image (xdb,ydb,zdb)
end if
dphidxt = (yt*zcb - ycb*zt) / (rt2*rcb)
dphidyt = (zt*xcb - zcb*xt) / (rt2*rcb)
dphidzt = (xt*ycb - xcb*yt) / (rt2*rcb)
dphidxu = -(yu*zcb - ycb*zu) / (ru2*rcb)
dphidyu = -(zu*xcb - zcb*xu) / (ru2*rcb)
dphidzu = -(xu*ycb - xcb*yu) / (ru2*rcb)
c
c abbreviations for second derivative chain rule terms
c
xycb2 = xcb*xcb + ycb*ycb
xzcb2 = xcb*xcb + zcb*zcb
yzcb2 = ycb*ycb + zcb*zcb
rcbxt = -2.0d0 * rcb * dphidxt
rcbyt = -2.0d0 * rcb * dphidyt
rcbzt = -2.0d0 * rcb * dphidzt
rcbt2 = rcb * rt2
rcbxu = 2.0d0 * rcb * dphidxu
rcbyu = 2.0d0 * rcb * dphidyu
rcbzu = 2.0d0 * rcb * dphidzu
rcbu2 = rcb * ru2
dphidxibt = yca*dphidzt - zca*dphidyt
dphidxibu = zdc*dphidyu - ydc*dphidzu
dphidyibt = zca*dphidxt - xca*dphidzt
dphidyibu = xdc*dphidzu - zdc*dphidxu
dphidzibt = xca*dphidyt - yca*dphidxt
dphidzibu = ydc*dphidxu - xdc*dphidyu
dphidxict = zba*dphidyt - yba*dphidzt
dphidxicu = ydb*dphidzu - zdb*dphidyu
dphidyict = xba*dphidzt - zba*dphidxt
dphidyicu = zdb*dphidxu - xdb*dphidzu
dphidzict = yba*dphidxt - xba*dphidyt
dphidzicu = xdb*dphidyu - ydb*dphidxu
c
c chain rule terms for first derivative components
c
dphidxia = zcb*dphidyt - ycb*dphidzt
dphidyia = xcb*dphidzt - zcb*dphidxt
dphidzia = ycb*dphidxt - xcb*dphidyt
dphidxib = dphidxibt + dphidxibu
dphidyib = dphidyibt + dphidyibu
dphidzib = dphidzibt + dphidzibu
dphidxic = dphidxict + dphidxicu
dphidyic = dphidyict + dphidyicu
dphidzic = dphidzict + dphidzicu
dphidxid = zcb*dphidyu - ycb*dphidzu
dphidyid = xcb*dphidzu - zcb*dphidxu
dphidzid = ycb*dphidxu - xcb*dphidyu
c
c chain rule terms for second derivative components
c
dxiaxia = rcbxt*dphidxia
dxiayia = rcbxt*dphidyia - zcb*rcb/rt2
dxiazia = rcbxt*dphidzia + ycb*rcb/rt2
dxiaxic = rcbxt*dphidxict + xcb*xt/rcbt2
dxiayic = rcbxt*dphidyict - dphidzt
& - (xba*zcb*xcb+zba*yzcb2)/rcbt2
dxiazic = rcbxt*dphidzict + dphidyt
& + (xba*ycb*xcb+yba*yzcb2)/rcbt2
dxiaxid = 0.0d0
dxiayid = 0.0d0
dxiazid = 0.0d0
dyiayia = rcbyt*dphidyia
dyiazia = rcbyt*dphidzia - xcb*rcb/rt2
dyiaxib = rcbyt*dphidxibt - dphidzt
& - (yca*zcb*ycb+zca*xzcb2)/rcbt2
dyiaxic = rcbyt*dphidxict + dphidzt
& + (yba*zcb*ycb+zba*xzcb2)/rcbt2
dyiayic = rcbyt*dphidyict + ycb*yt/rcbt2
dyiazic = rcbyt*dphidzict - dphidxt
& - (yba*xcb*ycb+xba*xzcb2)/rcbt2
dyiaxid = 0.0d0
dyiayid = 0.0d0
dyiazid = 0.0d0
dziazia = rcbzt*dphidzia
dziaxib = rcbzt*dphidxibt + dphidyt
& + (zca*ycb*zcb+yca*xycb2)/rcbt2
dziayib = rcbzt*dphidyibt - dphidxt
& - (zca*xcb*zcb+xca*xycb2)/rcbt2
dziaxic = rcbzt*dphidxict - dphidyt
& - (zba*ycb*zcb+yba*xycb2)/rcbt2
dziayic = rcbzt*dphidyict + dphidxt
& + (zba*xcb*zcb+xba*xycb2)/rcbt2
dziazic = rcbzt*dphidzict + zcb*zt/rcbt2
dziaxid = 0.0d0
dziayid = 0.0d0
dziazid = 0.0d0
dxibxic = -xcb*dphidxib/(rcb*rcb)
& - (yca*(zba*xcb+yt)-zca*(yba*xcb-zt))/rcbt2
& - 2.0d0*(yt*zba-yba*zt)*dphidxibt/rt2
& - (zdc*(ydb*xcb+zu)-ydc*(zdb*xcb-yu))/rcbu2
& + 2.0d0*(yu*zdb-ydb*zu)*dphidxibu/ru2
dxibyic = -ycb*dphidxib/(rcb*rcb) + dphidzt + dphidzu
& - (yca*(zba*ycb-xt)+zca*(xba*xcb+zcb*zba))/rcbt2
& - 2.0d0*(zt*xba-zba*xt)*dphidxibt/rt2
& + (zdc*(xdb*xcb+zcb*zdb)+ydc*(zdb*ycb+xu))/rcbu2
& + 2.0d0*(zu*xdb-zdb*xu)*dphidxibu/ru2
dxibxid = rcbxu*dphidxibu + xcb*xu/rcbu2
dxibyid = rcbyu*dphidxibu - dphidzu
& - (ydc*zcb*ycb+zdc*xzcb2)/rcbu2
dxibzid = rcbzu*dphidxibu + dphidyu
& + (zdc*ycb*zcb+ydc*xycb2)/rcbu2
dyibzib = ycb*dphidzib/(rcb*rcb)
& - (xca*(xca*xcb+zcb*zca)+yca*(ycb*xca+zt))/rcbt2
& - 2.0d0*(xt*zca-xca*zt)*dphidzibt/rt2
& + (ydc*(xdc*ycb-zu)+xdc*(xdc*xcb+zcb*zdc))/rcbu2
& + 2.0d0*(xu*zdc-xdc*zu)*dphidzibu/ru2
dyibxic = -xcb*dphidyib/(rcb*rcb) - dphidzt - dphidzu
& + (xca*(zba*xcb+yt)+zca*(zba*zcb+ycb*yba))/rcbt2
& - 2.0d0*(yt*zba-yba*zt)*dphidyibt/rt2
& - (zdc*(zdb*zcb+ycb*ydb)+xdc*(zdb*xcb-yu))/rcbu2
& + 2.0d0*(yu*zdb-ydb*zu)*dphidyibu/ru2
dyibyic = -ycb*dphidyib/(rcb*rcb)
& - (zca*(xba*ycb+zt)-xca*(zba*ycb-xt))/rcbt2
& - 2.0d0*(zt*xba-zba*xt)*dphidyibt/rt2
& - (xdc*(zdb*ycb+xu)-zdc*(xdb*ycb-zu))/rcbu2
& + 2.0d0*(zu*xdb-zdb*xu)*dphidyibu/ru2
dyibxid = rcbxu*dphidyibu + dphidzu
& + (xdc*zcb*xcb+zdc*yzcb2)/rcbu2
dyibyid = rcbyu*dphidyibu + ycb*yu/rcbu2
dyibzid = rcbzu*dphidyibu - dphidxu
& - (zdc*xcb*zcb+xdc*xycb2)/rcbu2
dzibxic = -xcb*dphidzib/(rcb*rcb) + dphidyt + dphidyu
& - (xca*(yba*xcb-zt)+yca*(zba*zcb+ycb*yba))/rcbt2
& - 2.0d0*(yt*zba-yba*zt)*dphidzibt/rt2
& + (ydc*(zdb*zcb+ycb*ydb)+xdc*(ydb*xcb+zu))/rcbu2
& + 2.0d0*(yu*zdb-ydb*zu)*dphidzibu/ru2
dzibzic = -zcb*dphidzib/(rcb*rcb)
& - (xca*(yba*zcb+xt)-yca*(xba*zcb-yt))/rcbt2
& - 2.0d0*(xt*yba-xba*yt)*dphidzibt/rt2
& - (ydc*(xdb*zcb+yu)-xdc*(ydb*zcb-xu))/rcbu2
& + 2.0d0*(xu*ydb-xdb*yu)*dphidzibu/ru2
dzibxid = rcbxu*dphidzibu - dphidyu
& - (xdc*ycb*xcb+ydc*yzcb2)/rcbu2
dzibyid = rcbyu*dphidzibu + dphidxu
& + (ydc*xcb*ycb+xdc*xzcb2)/rcbu2
dzibzid = rcbzu*dphidzibu + zcb*zu/rcbu2
dxicxid = rcbxu*dphidxicu - xcb*(zdb*ycb-ydb*zcb)/rcbu2
dxicyid = rcbyu*dphidxicu + dphidzu
& + (ydb*zcb*ycb+zdb*xzcb2)/rcbu2
dxiczid = rcbzu*dphidxicu - dphidyu
& - (zdb*ycb*zcb+ydb*xycb2)/rcbu2
dyicxid = rcbxu*dphidyicu - dphidzu
& - (xdb*zcb*xcb+zdb*yzcb2)/rcbu2
dyicyid = rcbyu*dphidyicu - ycb*(xdb*zcb-zdb*xcb)/rcbu2
dyiczid = rcbzu*dphidyicu + dphidxu
& + (zdb*xcb*zcb+xdb*xycb2)/rcbu2
dzicxid = rcbxu*dphidzicu + dphidyu
& + (xdb*ycb*xcb+ydb*yzcb2)/rcbu2
dzicyid = rcbyu*dphidzicu - dphidxu
& - (ydb*xcb*ycb+xdb*xzcb2)/rcbu2
dziczid = rcbzu*dphidzicu - zcb*(ydb*xcb-xdb*ycb)/rcbu2
dxidxid = rcbxu*dphidxid
dxidyid = rcbxu*dphidyid + zcb*rcb/ru2
dxidzid = rcbxu*dphidzid - ycb*rcb/ru2
dyidyid = rcbyu*dphidyid
dyidzid = rcbyu*dphidzid + xcb*rcb/ru2
dzidzid = rcbzu*dphidzid
c
c get some second derivative chain rule terms by difference
c
dxiaxib = -dxiaxia - dxiaxic - dxiaxid
dxiayib = -dxiayia - dxiayic - dxiayid
dxiazib = -dxiazia - dxiazic - dxiazid
dyiayib = -dyiayia - dyiayic - dyiayid
dyiazib = -dyiazia - dyiazic - dyiazid
dziazib = -dziazia - dziazic - dziazid
dxibxib = -dxiaxib - dxibxic - dxibxid
dxibyib = -dyiaxib - dxibyic - dxibyid
dxibzib = -dxiazib - dzibxic - dzibxid
dxibzic = -dziaxib - dxibzib - dxibzid
dyibyib = -dyiayib - dyibyic - dyibyid
dyibzic = -dziayib - dyibzib - dyibzid
dzibzib = -dziazib - dzibzic - dzibzid
dzibyic = -dyiazib - dyibzib - dzibyid
dxicxic = -dxiaxic - dxibxic - dxicxid
dxicyic = -dyiaxic - dyibxic - dxicyid
dxiczic = -dziaxic - dzibxic - dxiczid
dyicyic = -dyiayic - dyibyic - dyicyid
dyiczic = -dziayic - dzibyic - dyiczid
dziczic = -dziazic - dzibzic - dziczid
c
c increment diagonal and off-diagonal Hessian elements
c
if (i .eq. ia) then
hessx(1,ia) = hessx(1,ia) + dedphi*dxiaxia
& + d2edphi2*dphidxia*dphidxia
hessy(1,ia) = hessy(1,ia) + dedphi*dxiayia
& + d2edphi2*dphidxia*dphidyia
hessz(1,ia) = hessz(1,ia) + dedphi*dxiazia
& + d2edphi2*dphidxia*dphidzia
hessx(2,ia) = hessx(2,ia) + dedphi*dxiayia
& + d2edphi2*dphidxia*dphidyia
hessy(2,ia) = hessy(2,ia) + dedphi*dyiayia
& + d2edphi2*dphidyia*dphidyia
hessz(2,ia) = hessz(2,ia) + dedphi*dyiazia
& + d2edphi2*dphidyia*dphidzia
hessx(3,ia) = hessx(3,ia) + dedphi*dxiazia
& + d2edphi2*dphidxia*dphidzia
hessy(3,ia) = hessy(3,ia) + dedphi*dyiazia
& + d2edphi2*dphidyia*dphidzia
hessz(3,ia) = hessz(3,ia) + dedphi*dziazia
& + d2edphi2*dphidzia*dphidzia
hessx(1,ib) = hessx(1,ib) + dedphi*dxiaxib
& + d2edphi2*dphidxia*dphidxib
hessy(1,ib) = hessy(1,ib) + dedphi*dyiaxib
& + d2edphi2*dphidyia*dphidxib
hessz(1,ib) = hessz(1,ib) + dedphi*dziaxib
& + d2edphi2*dphidzia*dphidxib
hessx(2,ib) = hessx(2,ib) + dedphi*dxiayib
& + d2edphi2*dphidxia*dphidyib
hessy(2,ib) = hessy(2,ib) + dedphi*dyiayib
& + d2edphi2*dphidyia*dphidyib
hessz(2,ib) = hessz(2,ib) + dedphi*dziayib
& + d2edphi2*dphidzia*dphidyib
hessx(3,ib) = hessx(3,ib) + dedphi*dxiazib
& + d2edphi2*dphidxia*dphidzib
hessy(3,ib) = hessy(3,ib) + dedphi*dyiazib
& + d2edphi2*dphidyia*dphidzib
hessz(3,ib) = hessz(3,ib) + dedphi*dziazib
& + d2edphi2*dphidzia*dphidzib
hessx(1,ic) = hessx(1,ic) + dedphi*dxiaxic
& + d2edphi2*dphidxia*dphidxic
hessy(1,ic) = hessy(1,ic) + dedphi*dyiaxic
& + d2edphi2*dphidyia*dphidxic
hessz(1,ic) = hessz(1,ic) + dedphi*dziaxic
& + d2edphi2*dphidzia*dphidxic
hessx(2,ic) = hessx(2,ic) + dedphi*dxiayic
& + d2edphi2*dphidxia*dphidyic
hessy(2,ic) = hessy(2,ic) + dedphi*dyiayic
& + d2edphi2*dphidyia*dphidyic
hessz(2,ic) = hessz(2,ic) + dedphi*dziayic
& + d2edphi2*dphidzia*dphidyic
hessx(3,ic) = hessx(3,ic) + dedphi*dxiazic
& + d2edphi2*dphidxia*dphidzic
hessy(3,ic) = hessy(3,ic) + dedphi*dyiazic
& + d2edphi2*dphidyia*dphidzic
hessz(3,ic) = hessz(3,ic) + dedphi*dziazic
& + d2edphi2*dphidzia*dphidzic
hessx(1,id) = hessx(1,id) + dedphi*dxiaxid
& + d2edphi2*dphidxia*dphidxid
hessy(1,id) = hessy(1,id) + dedphi*dyiaxid
& + d2edphi2*dphidyia*dphidxid
hessz(1,id) = hessz(1,id) + dedphi*dziaxid
& + d2edphi2*dphidzia*dphidxid
hessx(2,id) = hessx(2,id) + dedphi*dxiayid
& + d2edphi2*dphidxia*dphidyid
hessy(2,id) = hessy(2,id) + dedphi*dyiayid
& + d2edphi2*dphidyia*dphidyid
hessz(2,id) = hessz(2,id) + dedphi*dziayid
& + d2edphi2*dphidzia*dphidyid
hessx(3,id) = hessx(3,id) + dedphi*dxiazid
& + d2edphi2*dphidxia*dphidzid
hessy(3,id) = hessy(3,id) + dedphi*dyiazid
& + d2edphi2*dphidyia*dphidzid
hessz(3,id) = hessz(3,id) + dedphi*dziazid
& + d2edphi2*dphidzia*dphidzid
else if (i .eq. ib) then
hessx(1,ib) = hessx(1,ib) + dedphi*dxibxib
& + d2edphi2*dphidxib*dphidxib
hessy(1,ib) = hessy(1,ib) + dedphi*dxibyib
& + d2edphi2*dphidxib*dphidyib
hessz(1,ib) = hessz(1,ib) + dedphi*dxibzib
& + d2edphi2*dphidxib*dphidzib
hessx(2,ib) = hessx(2,ib) + dedphi*dxibyib
& + d2edphi2*dphidxib*dphidyib
hessy(2,ib) = hessy(2,ib) + dedphi*dyibyib
& + d2edphi2*dphidyib*dphidyib
hessz(2,ib) = hessz(2,ib) + dedphi*dyibzib
& + d2edphi2*dphidyib*dphidzib
hessx(3,ib) = hessx(3,ib) + dedphi*dxibzib
& + d2edphi2*dphidxib*dphidzib
hessy(3,ib) = hessy(3,ib) + dedphi*dyibzib
& + d2edphi2*dphidyib*dphidzib
hessz(3,ib) = hessz(3,ib) + dedphi*dzibzib
& + d2edphi2*dphidzib*dphidzib
hessx(1,ia) = hessx(1,ia) + dedphi*dxiaxib
& + d2edphi2*dphidxib*dphidxia
hessy(1,ia) = hessy(1,ia) + dedphi*dxiayib
& + d2edphi2*dphidyib*dphidxia
hessz(1,ia) = hessz(1,ia) + dedphi*dxiazib
& + d2edphi2*dphidzib*dphidxia
hessx(2,ia) = hessx(2,ia) + dedphi*dyiaxib
& + d2edphi2*dphidxib*dphidyia
hessy(2,ia) = hessy(2,ia) + dedphi*dyiayib
& + d2edphi2*dphidyib*dphidyia
hessz(2,ia) = hessz(2,ia) + dedphi*dyiazib
& + d2edphi2*dphidzib*dphidyia
hessx(3,ia) = hessx(3,ia) + dedphi*dziaxib
& + d2edphi2*dphidxib*dphidzia
hessy(3,ia) = hessy(3,ia) + dedphi*dziayib
& + d2edphi2*dphidyib*dphidzia
hessz(3,ia) = hessz(3,ia) + dedphi*dziazib
& + d2edphi2*dphidzib*dphidzia
hessx(1,ic) = hessx(1,ic) + dedphi*dxibxic
& + d2edphi2*dphidxib*dphidxic
hessy(1,ic) = hessy(1,ic) + dedphi*dyibxic
& + d2edphi2*dphidyib*dphidxic
hessz(1,ic) = hessz(1,ic) + dedphi*dzibxic
& + d2edphi2*dphidzib*dphidxic
hessx(2,ic) = hessx(2,ic) + dedphi*dxibyic
& + d2edphi2*dphidxib*dphidyic
hessy(2,ic) = hessy(2,ic) + dedphi*dyibyic
& + d2edphi2*dphidyib*dphidyic
hessz(2,ic) = hessz(2,ic) + dedphi*dzibyic
& + d2edphi2*dphidzib*dphidyic
hessx(3,ic) = hessx(3,ic) + dedphi*dxibzic
& + d2edphi2*dphidxib*dphidzic
hessy(3,ic) = hessy(3,ic) + dedphi*dyibzic
& + d2edphi2*dphidyib*dphidzic
hessz(3,ic) = hessz(3,ic) + dedphi*dzibzic
& + d2edphi2*dphidzib*dphidzic
hessx(1,id) = hessx(1,id) + dedphi*dxibxid
& + d2edphi2*dphidxib*dphidxid
hessy(1,id) = hessy(1,id) + dedphi*dyibxid
& + d2edphi2*dphidyib*dphidxid
hessz(1,id) = hessz(1,id) + dedphi*dzibxid
& + d2edphi2*dphidzib*dphidxid
hessx(2,id) = hessx(2,id) + dedphi*dxibyid
& + d2edphi2*dphidxib*dphidyid
hessy(2,id) = hessy(2,id) + dedphi*dyibyid
& + d2edphi2*dphidyib*dphidyid
hessz(2,id) = hessz(2,id) + dedphi*dzibyid
& + d2edphi2*dphidzib*dphidyid
hessx(3,id) = hessx(3,id) + dedphi*dxibzid
& + d2edphi2*dphidxib*dphidzid
hessy(3,id) = hessy(3,id) + dedphi*dyibzid
& + d2edphi2*dphidyib*dphidzid
hessz(3,id) = hessz(3,id) + dedphi*dzibzid
& + d2edphi2*dphidzib*dphidzid
else if (i .eq. ic) then
hessx(1,ic) = hessx(1,ic) + dedphi*dxicxic
& + d2edphi2*dphidxic*dphidxic
hessy(1,ic) = hessy(1,ic) + dedphi*dxicyic
& + d2edphi2*dphidxic*dphidyic
hessz(1,ic) = hessz(1,ic) + dedphi*dxiczic
& + d2edphi2*dphidxic*dphidzic
hessx(2,ic) = hessx(2,ic) + dedphi*dxicyic
& + d2edphi2*dphidxic*dphidyic
hessy(2,ic) = hessy(2,ic) + dedphi*dyicyic
& + d2edphi2*dphidyic*dphidyic
hessz(2,ic) = hessz(2,ic) + dedphi*dyiczic
& + d2edphi2*dphidyic*dphidzic
hessx(3,ic) = hessx(3,ic) + dedphi*dxiczic
& + d2edphi2*dphidxic*dphidzic
hessy(3,ic) = hessy(3,ic) + dedphi*dyiczic
& + d2edphi2*dphidyic*dphidzic
hessz(3,ic) = hessz(3,ic) + dedphi*dziczic
& + d2edphi2*dphidzic*dphidzic
hessx(1,ia) = hessx(1,ia) + dedphi*dxiaxic
& + d2edphi2*dphidxic*dphidxia
hessy(1,ia) = hessy(1,ia) + dedphi*dxiayic
& + d2edphi2*dphidyic*dphidxia
hessz(1,ia) = hessz(1,ia) + dedphi*dxiazic
& + d2edphi2*dphidzic*dphidxia
hessx(2,ia) = hessx(2,ia) + dedphi*dyiaxic
& + d2edphi2*dphidxic*dphidyia
hessy(2,ia) = hessy(2,ia) + dedphi*dyiayic
& + d2edphi2*dphidyic*dphidyia
hessz(2,ia) = hessz(2,ia) + dedphi*dyiazic
& + d2edphi2*dphidzic*dphidyia
hessx(3,ia) = hessx(3,ia) + dedphi*dziaxic
& + d2edphi2*dphidxic*dphidzia
hessy(3,ia) = hessy(3,ia) + dedphi*dziayic
& + d2edphi2*dphidyic*dphidzia
hessz(3,ia) = hessz(3,ia) + dedphi*dziazic
& + d2edphi2*dphidzic*dphidzia
hessx(1,ib) = hessx(1,ib) + dedphi*dxibxic
& + d2edphi2*dphidxic*dphidxib
hessy(1,ib) = hessy(1,ib) + dedphi*dxibyic
& + d2edphi2*dphidyic*dphidxib
hessz(1,ib) = hessz(1,ib) + dedphi*dxibzic
& + d2edphi2*dphidzic*dphidxib
hessx(2,ib) = hessx(2,ib) + dedphi*dyibxic
& + d2edphi2*dphidxic*dphidyib
hessy(2,ib) = hessy(2,ib) + dedphi*dyibyic
& + d2edphi2*dphidyic*dphidyib
hessz(2,ib) = hessz(2,ib) + dedphi*dyibzic
& + d2edphi2*dphidzic*dphidyib
hessx(3,ib) = hessx(3,ib) + dedphi*dzibxic
& + d2edphi2*dphidxic*dphidzib
hessy(3,ib) = hessy(3,ib) + dedphi*dzibyic
& + d2edphi2*dphidyic*dphidzib
hessz(3,ib) = hessz(3,ib) + dedphi*dzibzic
& + d2edphi2*dphidzic*dphidzib
hessx(1,id) = hessx(1,id) + dedphi*dxicxid
& + d2edphi2*dphidxic*dphidxid
hessy(1,id) = hessy(1,id) + dedphi*dyicxid
& + d2edphi2*dphidyic*dphidxid
hessz(1,id) = hessz(1,id) + dedphi*dzicxid
& + d2edphi2*dphidzic*dphidxid
hessx(2,id) = hessx(2,id) + dedphi*dxicyid
& + d2edphi2*dphidxic*dphidyid
hessy(2,id) = hessy(2,id) + dedphi*dyicyid
& + d2edphi2*dphidyic*dphidyid
hessz(2,id) = hessz(2,id) + dedphi*dzicyid
& + d2edphi2*dphidzic*dphidyid
hessx(3,id) = hessx(3,id) + dedphi*dxiczid
& + d2edphi2*dphidxic*dphidzid
hessy(3,id) = hessy(3,id) + dedphi*dyiczid
& + d2edphi2*dphidyic*dphidzid
hessz(3,id) = hessz(3,id) + dedphi*dziczid
& + d2edphi2*dphidzic*dphidzid
else if (i .eq. id) then
hessx(1,id) = hessx(1,id) + dedphi*dxidxid
& + d2edphi2*dphidxid*dphidxid
hessy(1,id) = hessy(1,id) + dedphi*dxidyid
& + d2edphi2*dphidxid*dphidyid
hessz(1,id) = hessz(1,id) + dedphi*dxidzid
& + d2edphi2*dphidxid*dphidzid
hessx(2,id) = hessx(2,id) + dedphi*dxidyid
& + d2edphi2*dphidxid*dphidyid
hessy(2,id) = hessy(2,id) + dedphi*dyidyid
& + d2edphi2*dphidyid*dphidyid
hessz(2,id) = hessz(2,id) + dedphi*dyidzid
& + d2edphi2*dphidyid*dphidzid
hessx(3,id) = hessx(3,id) + dedphi*dxidzid
& + d2edphi2*dphidxid*dphidzid
hessy(3,id) = hessy(3,id) + dedphi*dyidzid
& + d2edphi2*dphidyid*dphidzid
hessz(3,id) = hessz(3,id) + dedphi*dzidzid
& + d2edphi2*dphidzid*dphidzid
hessx(1,ia) = hessx(1,ia) + dedphi*dxiaxid
& + d2edphi2*dphidxid*dphidxia
hessy(1,ia) = hessy(1,ia) + dedphi*dxiayid
& + d2edphi2*dphidyid*dphidxia
hessz(1,ia) = hessz(1,ia) + dedphi*dxiazid
& + d2edphi2*dphidzid*dphidxia
hessx(2,ia) = hessx(2,ia) + dedphi*dyiaxid
& + d2edphi2*dphidxid*dphidyia
hessy(2,ia) = hessy(2,ia) + dedphi*dyiayid
& + d2edphi2*dphidyid*dphidyia
hessz(2,ia) = hessz(2,ia) + dedphi*dyiazid
& + d2edphi2*dphidzid*dphidyia
hessx(3,ia) = hessx(3,ia) + dedphi*dziaxid
& + d2edphi2*dphidxid*dphidzia
hessy(3,ia) = hessy(3,ia) + dedphi*dziayid
& + d2edphi2*dphidyid*dphidzia
hessz(3,ia) = hessz(3,ia) + dedphi*dziazid
& + d2edphi2*dphidzid*dphidzia
hessx(1,ib) = hessx(1,ib) + dedphi*dxibxid
& + d2edphi2*dphidxid*dphidxib
hessy(1,ib) = hessy(1,ib) + dedphi*dxibyid
& + d2edphi2*dphidyid*dphidxib
hessz(1,ib) = hessz(1,ib) + dedphi*dxibzid
& + d2edphi2*dphidzid*dphidxib
hessx(2,ib) = hessx(2,ib) + dedphi*dyibxid
& + d2edphi2*dphidxid*dphidyib
hessy(2,ib) = hessy(2,ib) + dedphi*dyibyid
& + d2edphi2*dphidyid*dphidyib
hessz(2,ib) = hessz(2,ib) + dedphi*dyibzid
& + d2edphi2*dphidzid*dphidyib
hessx(3,ib) = hessx(3,ib) + dedphi*dzibxid
& + d2edphi2*dphidxid*dphidzib
hessy(3,ib) = hessy(3,ib) + dedphi*dzibyid
& + d2edphi2*dphidyid*dphidzib
hessz(3,ib) = hessz(3,ib) + dedphi*dzibzid
& + d2edphi2*dphidzid*dphidzib
hessx(1,ic) = hessx(1,ic) + dedphi*dxicxid
& + d2edphi2*dphidxid*dphidxic
hessy(1,ic) = hessy(1,ic) + dedphi*dxicyid
& + d2edphi2*dphidyid*dphidxic
hessz(1,ic) = hessz(1,ic) + dedphi*dxiczid
& + d2edphi2*dphidzid*dphidxic
hessx(2,ic) = hessx(2,ic) + dedphi*dyicxid
& + d2edphi2*dphidxid*dphidyic
hessy(2,ic) = hessy(2,ic) + dedphi*dyicyid
& + d2edphi2*dphidyid*dphidyic
hessz(2,ic) = hessz(2,ic) + dedphi*dyiczid
& + d2edphi2*dphidzid*dphidyic
hessx(3,ic) = hessx(3,ic) + dedphi*dzicxid
& + d2edphi2*dphidxid*dphidzic
hessy(3,ic) = hessy(3,ic) + dedphi*dzicyid
& + d2edphi2*dphidyid*dphidzic
hessz(3,ic) = hessz(3,ic) + dedphi*dziczid
& + d2edphi2*dphidzid*dphidzic
end if
end if
end if
end do
return
end
|
{"hexsha": "8e88522a337b0599b6410cdb9fd345d82b98cbee", "size": 34245, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "HCsbLib/HCsbLib/HTLib2.Bioinfo/External.Tinker/src/tinker-6.2.06/eimptor2.f", "max_stars_repo_name": "htna/HCsbLib", "max_stars_repo_head_hexsha": "dae7f4e3e5e2fbc3b6e619f2ea037f661a8ae097", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-01-21T23:45:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-03T16:34:24.000Z", "max_issues_repo_path": "HCsbLib/HCsbLib/HTLib2.Bioinfo/External.Tinker/src/tinker-6.2.06/eimptor2.f", "max_issues_repo_name": "htna/HCsbLib", "max_issues_repo_head_hexsha": "dae7f4e3e5e2fbc3b6e619f2ea037f661a8ae097", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "HCsbLib/HCsbLib/HTLib2.Bioinfo/External.Tinker/src/tinker-6.2.06/eimptor2.f", "max_forks_repo_name": "htna/HCsbLib", "max_forks_repo_head_hexsha": "dae7f4e3e5e2fbc3b6e619f2ea037f661a8ae097", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-03-05T00:26:38.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-08T23:25:29.000Z", "avg_line_length": 49.9927007299, "max_line_length": 70, "alphanum_fraction": 0.4568550153, "num_tokens": 12469}
|
Name = "coname"
DividendYieldPercent = "yie"
LongTermDebtToEquity = "qto"
MarketCapitalizationInMillion = "mkt"
NetProfitMarginPercent = "qpm"
OneDayPriceChangePercent = "prl"
PriceEarningsRatio = "pee"
PriceToBookValue = "pri"
PriceToFreeCashFlow = "prf"
ReturnOnEquityPercent = "ttm"
|
{"hexsha": "b8ba1ea43895b2844f72f1ba230f7eb64abda478", "size": 285, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/yahoo_finance_api/MarketQuoteProperties.jl", "max_stars_repo_name": "tjolsen/YahooFinanceAPI.jl", "max_stars_repo_head_hexsha": "e828a039357c6766ae6da1a32ec8fd7863eb7e39", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2016-10-21T04:39:00.000Z", "max_stars_repo_stars_event_max_datetime": "2018-02-13T11:31:11.000Z", "max_issues_repo_path": "src/yahoo_finance_api/MarketQuoteProperties.jl", "max_issues_repo_name": "tjolsen/YahooFinanceAPI.jl", "max_issues_repo_head_hexsha": "e828a039357c6766ae6da1a32ec8fd7863eb7e39", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/yahoo_finance_api/MarketQuoteProperties.jl", "max_forks_repo_name": "tjolsen/YahooFinanceAPI.jl", "max_forks_repo_head_hexsha": "e828a039357c6766ae6da1a32ec8fd7863eb7e39", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2016-10-21T04:42:12.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-21T19:00:49.000Z", "avg_line_length": 28.5, "max_line_length": 37, "alphanum_fraction": 0.7929824561, "num_tokens": 94}
|
import sys
from datetime import timedelta, datetime
from pyspark import HiveContext
from pyspark.sql import functions as f, SparkSession
def algo(src, from_dt, to_dt):
res = steps(src, from_dt, to_dt)
return res
def steps(src, from_dt, to_dt):
import sys
MODULES_PATH = '../code/'
if MODULES_PATH not in sys.path:
sys.path.append(MODULES_PATH)
import mfuncs
import pandas as pd
import numpy as np
from tqdm import tqdm
tqdm.pandas()
pd.options.display.max_columns = 1000
import lightgbm as lgb
from sklearn.neighbors import NearestNeighbors
# start of step 01
df_train = pd.read_csv('../data/train_set.csv')
df_test = pd.read_csv('../data/test_set.csv')
rnm = {
'atm_address_lat': 'atm_lat',
'atm_address_lon': 'atm_lon',
'pos_adress_lat': 'pos_lat',
'pos_adress_lon': 'pos_lon',
'home_add_lat': 'home_lat',
'home_add_lon': 'home_lon',
'work_add_lat': 'work_lat',
'work_add_lon': 'work_lon',
}
df_train.rename(columns=rnm, inplace=True)
df_test.rename(columns=rnm, inplace=True)
# start of step 02
df_train['target_work'] = df_train.progress_apply(mfuncs.add_poswork_target, axis=1)
df_train['target_home'] = df_train.progress_apply(mfuncs.add_poshome_target, axis=1)
# start of step 03
df_train.to_csv('../data/train_1.csv', index=None)
# start of step 04
df_train.info()
# start of step 05
df_train.head()
# start of step 06
df_train.country.value_counts(normalize=True)[:10]
print(df_train.shape, df_test.shape)
df_train = df_train[df_train.country.isin(['RUS', 'RU'])]
df_test = df_test[df_test.country.isin(['RUS', 'RU'])]
print(df_train.shape, df_test.shape)
del df_train['country'], df_test['country']
# start of step 07
print(df_train.shape, df_train.currency.value_counts(normalize=True))
df_train = df_train[df_train.currency == 643]
print(df_train.shape)
del df_train['currency']
# start of step 08
print(df_train.shape, df_train.currency.value_counts(normalize=True))
df_train = df_train[df_train.currency == 643]
print(df_train.shape)
del df_train['currency']
# start of step 09
print(df_train.shape)
gb = df_train.groupby('customer_id')['work_lat'].agg('nunique')
cid_incorrect = gb[gb == 2].index
df_train = df_train[~df_train.customer_id.isin(cid_incorrect.values)]
print(df_train.shape)
gb = df_train.groupby('customer_id')['home_lat'].agg('nunique')
cid_incorrect = gb[gb == 2].index
df_train = df_train[~df_train.customer_id.isin(cid_incorrect.values)]
print(df_train.shape)
# start of step 10
print(df_train.shape)
df_train = df_train[df_train[['atm_lat', 'pos_lat']].isnull().sum(axis=1) == 1]
print(df_train.shape)
df_train['type'] = 'atm'
df_train.loc[~df_train['pos_lat'].isnull(), 'type'] = 'pos'
df_train['type'].value_counts()
# start of step 11
cid = df_train.sample(1)['customer_id'].values[0]
df_an = df_train[df_train.customer_id == cid]
df_point_dup = df_an.groupby(['pos_lat', 'pos_lon']).agg('size').reset_index()
df_point_dup.columns = ['pos_lat', 'pos_lon', 'pos_customer_freq']
df_an = pd.merge(df_an, df_point_dup, on=['pos_lat', 'pos_lon'], how='left')
df_an.head()
# start of step 12
df_train.head()
df_train[df_train.type == 'pos'].drop_duplicates(['pos_lat',
'pos_lon']).groupby(['terminal_id']).agg('size').value_counts()
df_train[df_train.type == 'atm'].drop_duplicates(['atm_lat',
'atm_lon']).groupby(['terminal_id']).agg('size').value_counts()
df_train[df_train.terminal_id == '1e15d02895068c3a864432f0c06f5ece']['atm_address'].unique()
df_train[df_train.type == 'atm'].drop_duplicates(['atm_lat',
'atm_lon']).groupby(['terminal_id']).agg('size')
import gmaps
API_KEY = 'AIzaSyCG_RL0_kavuEaJAqEN5xXbU4h0VJUbA9M'
gmaps.configure(api_key=API_KEY) # Your Google API key
cid = '0dc0137d280a2a82d2dc89282450ff1b'
cid = df_train.sample(1)['customer_id'].values[0]
df_an = df_train[df_train.customer_id == cid]
center_home = df_an[['home_lat', 'home_lon']].drop_duplicates().values
center_work = df_an[['work_lat', 'work_lon']].drop_duplicates().values
points_pos = df_an[['pos_lat', 'pos_lon']].dropna().values
points_atm = df_an[['atm_lat', 'atm_lon']].dropna().values
print(center_home.shape, center_work.shape, points_pos.shape, points_atm.shape)
gmap = gmaps.Map()
if len(points_pos) > 0:
gmap.add_layer(gmaps.symbol_layer(points_pos, hover_text='pos',
fill_color="blue", stroke_color="blue", scale=3))
if len(points_atm) > 0:
gmap.add_layer(gmaps.symbol_layer(points_atm, hover_text='atm',
fill_color="red", stroke_color="red", scale=3))
if not np.isnan(center_home)[0][0]:
gmap.add_layer(gmaps.marker_layer(center_home, label='home'))
if not np.isnan(center_work)[0][0]:
gmap.add_layer(gmaps.marker_layer(center_work, label='work'))
gmap
center_home = df_train[['home_lat', 'home_lon']].dropna().values
center_work = df_train[['work_lat', 'work_lon']].dropna().values
gmap = gmaps.Map()
gmap.add_layer(gmaps.symbol_layer(center_home, fill_color="red", stroke_color="red"))
gmap
np.isnan(center_home)
df_train.groupby(['customer_id']).agg('size').sort_values().value_counts()
df_test.customer_id.drop_duplicates().isin(df_train.customer_id.unique()).mean()
df_train['duplicated'] = df_train.duplicated()
df_pos = df_train[df_train['type'] == 'pos']
# target == pos in
df_pos['target_work'] = df_pos.progress_apply(mfuncs.add_poswork_target, axis=1)
df_pos['target_home'] = df_pos.progress_apply(mfuncs.add_poshome_target, axis=1)
df_pos['target_work'].mean(), df_pos['target_home'].mean()
df_pos.to_csv('../data/df_pos.csv', index=None)
df_pos = pd.read_csv('../data/df_pos.csv')
df_point_dup = df_pos.groupby(['customer_id', 'pos_lat', 'pos_lon']).agg('size').reset_index()
df_point_dup.columns = ['customer_id', 'pos_lat', 'pos_lon', 'pos_customer_freq']
df_pos = pd.merge(df_pos, df_point_dup, on=['customer_id', 'pos_lat', 'pos_lon'], how='left')
dfs = []
for cid in tqdm(df_pos.customer_id.unique()):
df_an = df_pos[df_pos.customer_id == cid]
df_an = mfuncs.add_dist_to_neighbours(df_an)
dfs.append(df_an)
df_pos['transaction_date'] = pd.to_datetime(df_pos['transaction_date'], format='%Y-%m-%d')
df_pos['month'] = df_pos.transaction_date.dt.month
df_pos['day'] = df_pos.transaction_date.dt.day
df_pos['dayofyear'] = df_pos.transaction_date.dt.dayofyear
df_pos['dayofweek'] = df_pos.transaction_date.dt.dayofweek
df_pos.transaction_date.dtype
df_gb = df_pos.groupby('customer_id')
coord_stat_df = df_gb[['amount', 'pos_lat', 'pos_lon']].agg(['mean', 'max', 'min'])
coord_stat_df['transactions_per_user'] = df_gb.agg('size')
coord_stat_df.columns = ['_'.join(col).strip() for col in coord_stat_df.columns.values]
coord_stat_df.reset_index(inplace=True)
df_pos = pd.merge(df_pos, coord_stat_df, on='customer_id', how='left')
cols = ['pos_lat', 'pos_lon']
types = ['min', 'max', 'mean']
for c in cols:
for t in types:
df_pos['{}_diff_{}'.format(c, t)] = np.abs(df_pos[c] - df_pos['{}_{}'.format(c, t)])
df_pos = pd.concat([df_pos, pd.get_dummies(df_pos['mcc'], prefix='mcc')], axis=1)
del df_pos['mcc']
df_pos.head()
drop_cols = ['customer_id', 'terminal_id', 'target_home', 'target_work', 'atm_address',
'pos_address', 'work_add_lat', 'work_add_lon', 'home_add_lat', 'home_add_lon',
'city', 'type', 'transaction_date']
drop_cols += ['atm_address', 'atm_address_lat', 'atm_address_lon']
df_pos.drop(drop_cols, 1, errors='ignore').head()
# drop_cols = ['pos_address', 'pos_address_lat', 'pos_address_lon']
from sklearn.model_selection import train_test_split, StratifiedKFold, KFold
df_pos_id = df_pos.customer_id.drop_duplicates().reset_index(drop=True)
skf_id = list(KFold(n_splits=5, shuffle=True, random_state=15).split(df_pos_id))
skf = []
for train_ind, test_ind in skf_id:
train_ind_ = df_pos[df_pos.customer_id.isin(df_pos_id.loc[train_ind].values)].index.values
test_ind_ = df_pos[df_pos.customer_id.isin(df_pos_id.loc[test_ind].values)].index.values
skf.append([train_ind_, test_ind_])
df_pos['target_work'].mean()
df_pos.head()
cid = '442fd7e3af4d8c3acd7807aa65bb5e85'
df_an = df_pos[df_pos.customer_id == cid]
df_an = mfuncs.add_dist_to_neighbours(df_an)
df_pos.customer_id.unique
if np.array([1]).size:
print(1)
lgb_train = lgb.Dataset(df_pos.drop(drop_cols, 1, errors='ignore'), df_pos['target_home'])
params = {
'objective': 'binary',
'num_leaves': 511,
'learning_rate': 0.05,
# 'metric' : 'error',
'feature_fraction': 0.8,
'bagging_fraction': 0.8,
'bagging_freq': 1,
'num_threads': 12,
'verbose': 0,
}
gbm = lgb.cv(params,
lgb_train,
num_boost_round=2000,
folds=skf,
verbose_eval=10,
early_stopping_rounds=500)
df_pos.loc[i2].shape
i1, i2 = skf[0]
df_pos[df_pos.loc[i1]]['customer_id'].unique
df_pos.loc[i1]
df_pos.dtypes
res = df_pos
return res
def update_last_partition(dst, from_dt, to_dt):
prev_day = datetime.strptime(from_dt, '%Y-%m-%d') - timedelta(days=1)
res = spark.table(dst["d_train"]).checkpoint()
res = res.where(res.day == to_dt)
res = res.withColumn("period_to_dt", f.lit(prev_day)).withColumn("day", f.lit(prev_day.strftime('%Y-%m-%d')))
res.coalesce(8).write.format("orc").insertInto(dst["d_train"], overwrite=True)
def calc_06(src, dst, from_dt, to_dt):
res = algo(src, from_dt, to_dt)
res.coalesce(8).write.format("orc").insertInto(dst["d_subway_entrance"], overwrite=True)
def sandbox_src():
return {
"psg_train": spark.table("sandbox_mck.train"),
"psg_test": spark.table("sandbox_mck.test"),
"psg_dev": spark.table("sandbox_mck.dev")
}
def sandbox_dst():
return {
"psg_result": "sandbox_mck.psg_result"
}
def prod_src():
return {
"psg_train": spark.table("prod_data.psg_train"),
"psg_test": spark.table("prod_data.psg_test"),
"psg_dev": spark.table("prod_data.psg_dev")
}
def prod_dst():
return {
"psg_result": "prod_data.psg_result"
}
if __name__ == '__main__':
spark = SparkSession.builder.appName("calc_06_task").enableHiveSupport().getOrCreate()
spark.conf.set("spark.sql.sources.partitionOverwriteMode", "dynamic")
hivecontext = HiveContext(spark.sparkContext)
hivecontext.setConf("hive.exec.dynamic.partition", "true")
hivecontext.setConf("hive.exec.dynamic.partition.mode", "nonstrict")
spark.sparkContext.setCheckpointDir("hdfs:///user/airflow/psg/calc_06_task")
opts = {
'from_dt': sys.argv[1],
"to_dt": "9999-12-31"
}
update_last_partition(prod_dst(), opts["from_dt"], opts["to_dt"])
calc_06(prod_src(), prod_dst(), opts["from_dt"], opts["to_dt"])
|
{"hexsha": "7ce916c555392272957aff619fb0711956545918", "size": 11988, "ext": "py", "lang": "Python", "max_stars_repo_path": "Raif/pyspark/calc_06.py", "max_stars_repo_name": "musicnova/7a_task", "max_stars_repo_head_hexsha": "2e34776de3706aabcac1afe66728b8701068a968", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Raif/pyspark/calc_06.py", "max_issues_repo_name": "musicnova/7a_task", "max_issues_repo_head_hexsha": "2e34776de3706aabcac1afe66728b8701068a968", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Raif/pyspark/calc_06.py", "max_forks_repo_name": "musicnova/7a_task", "max_forks_repo_head_hexsha": "2e34776de3706aabcac1afe66728b8701068a968", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3272727273, "max_line_length": 118, "alphanum_fraction": 0.6322989656, "include": true, "reason": "import numpy", "num_tokens": 3182}
|
from arkouda.pdarrayclass import pdarray
from pandas import Series, Timestamp, Timedelta as pdTimedelta, date_range as pd_date_range, timedelta_range as pd_timedelta_range, to_datetime, to_timedelta # type: ignore
from arkouda.dtypes import int64, isSupportedInt
from arkouda.pdarraycreation import from_series, array as ak_array
from arkouda.numeric import cast, abs as akabs
import numpy as np # type: ignore
import datetime
_BASE_UNIT = 'ns'
_unit2normunit = {'weeks': 'w',
'days': 'd',
'hours': 'h',
'hrs': 'h',
'minutes': 'm',
't': 'm',
'milliseconds': 'ms',
'l': 'ms',
'microseconds': 'us',
'u': 'us',
'nanoseconds': 'ns',
'n': 'ns'}
_unit2factor = {'w': 7*24*60*60*10**9,
'd': 24*60*60*10**9,
'h': 60*60*10**9,
'm': 60*10**9,
's': 10**9,
'ms': 10**6,
'us': 10**3,
'ns': 1}
def _get_factor(unit : str) -> int:
unit = unit.lower()
if unit in _unit2factor:
return _unit2factor[unit]
else:
for key, normunit in _unit2normunit.items():
if key.startswith(unit):
return _unit2factor[normunit]
raise ValueError("Argument must be one of {}".format(set(_unit2factor.keys()) |
set(_unit2normunit.keys())))
def _identity(x, **kwargs):
return x
class _Timescalar:
def __init__(self, scalar):
if isinstance(scalar, np.datetime64) or isinstance(scalar, datetime.datetime):
scalar = to_datetime(scalar).to_numpy()
elif isinstance(scalar, np.timedelta64) or isinstance(scalar, datetime.timedelta):
scalar = to_timedelta(scalar).to_numpy()
self.unit = np.datetime_data(scalar.dtype)[0]
self._factor = _get_factor(self.unit)
# int64 in nanoseconds
self._data = self._factor * scalar.astype('int64')
class _AbstractBaseTime(pdarray):
'''Base class for Datetime and Timedelta; not user-facing. Arkouda handles
time similar to Pandas (albeit with less functionality), in that all absolute
and relative times are represented in nanoseconds as int64 behind the scenes.
Datetime and Timedelta can be constructed from Arkouda, NumPy, or Pandas arrays;
in each case, the input values are normalized to nanoseconds on initialization,
so that all resulting operations are transparent.
'''
def __init__(self, array, unit : str=_BASE_UNIT): # type: ignore
# Convert the input to int64 pdarray of nanoseconds
if isinstance(array, pdarray):
if array.dtype != int64:
raise TypeError("{} array must have int64 dtype".format(self.__class__.__name__))
# Already int64 pdarray, just scale
self.unit = unit
self._factor = _get_factor(self.unit)
# This makes a copy of the input array, to leave input unchanged
self._data = self._factor * array # Mimics a datetime64[ns] array
elif hasattr(array, 'dtype'):
# Handles all pandas and numpy datetime/timedelta arrays
if array.dtype.kind not in ('M', 'm'):
# M = datetime64, m = timedelta64
raise TypeError("Invalid dtype: {}".format(array.dtype.name))
if isinstance(array, Series):
# Pandas Datetime and Timedelta
# Get units of underlying numpy datetime64 array
self.unit = np.datetime_data(array.values.dtype)[0]
self._factor = _get_factor(self.unit)
# Create pdarray
self._data = from_series(array)
# Scale if necessary
# This is futureproofing; it will not be used unless pandas
# changes its Datetime implementation
if self._factor != 1:
# Scale inplace because we already created a copy
self._data *= self._factor
elif isinstance(array, np.ndarray):
# Numpy datetime64 and timedelta64
# Force through pandas.Series
self.__init__(to_datetime(array).to_series()) # type: ignore
elif hasattr(array, 'to_series'):
# Pandas DatetimeIndex
# Force through pandas.Series
self.__init__(array.to_series()) # type: ignore
else:
raise TypeError("Unsupported type: {}".format(type(array)))
# Now that self._data is correct, init self with same metadata
super().__init__(self._data.name, self._data.dtype.name, self._data.size, self._data.ndim, self._data.shape, self._data.itemsize)
@classmethod
def _get_callback(cls, other, op):
# Will be overridden by all children
return _identity
def floor(self, freq):
'''Round times down to the nearest integer of a given frequency.
Parameters
----------
freq : str {'d', 'm', 'h', 's', 'ms', 'us', 'ns'}
Frequency to round to
Returns
-------
self.__class__
Values rounded down to nearest frequency
'''
f = _get_factor(freq)
return self.__class__(self._data // f, unit=freq)
def ceil(self, freq):
'''Round times up to the nearest integer of a given frequency.
Parameters
----------
freq : str {'d', 'm', 'h', 's', 'ms', 'us', 'ns'}
Frequency to round to
Returns
-------
self.__class__
Values rounded up to nearest frequency
'''
f = _get_factor(freq)
return self.__class__((self._data + (f - 1)) // f, unit=freq)
def round(self, freq):
'''Round times to the nearest integer of a given frequency. Midpoint
values will be rounded to nearest even integer.
Parameters
----------
freq : str {'d', 'm', 'h', 's', 'ms', 'us', 'ns'}
Frequency to round to
Returns
-------
self.__class__
Values rounded to nearest frequency
'''
f = _get_factor(freq)
offset = self._data + ((f + 1) // 2)
rounded = offset // f
# Halfway values are supposed to round to the nearest even integer
# Need to figure out which ones ended up odd and fix them
decrement = ((offset % f) == 0) & ((rounded % 2) == 1)
rounded[decrement] = rounded[decrement] - 1
return self.__class__(rounded, unit=freq)
def to_ndarray(self):
__doc__ = super().to_ndarray.__doc__
return np.array(self._data.to_ndarray(), dtype="{}64[ns]".format(self.__class__.__name__.lower()))
def __str__(self):
from arkouda.client import pdarrayIterThresh
if self.size <= pdarrayIterThresh:
vals = ["'{}'".format(self[i]) for i in range(self.size)]
else:
vals = ["'{}'".format(self[i]) for i in range(3)]
vals.append('... ')
vals.extend(["'{}'".format(self[i]) for i in range(self.size-3, self.size)])
spaces = ' '*(len(self.__class__.__name__)+1)
return "{}([{}],\n{}dtype='{}64[ns]')".format(self.__class__.__name__,
',\n{} '.format(spaces).join(vals),
spaces,
self.__class__.__name__.lower())
def __repr__(self) -> str:
return self.__str__()
def _binop(self, other, op):
# Need to do 2 things:
# 1) Determine return type, based on other's class
# 2) Get other's int64 data to combine with self's data
if isinstance(other, Datetime) or self._is_datetime_scalar(other):
if op not in self.supported_with_datetime:
raise TypeError("{} not supported between {} and Datetime".format(op, self.__class__.__name__))
otherclass = 'Datetime'
if self._is_datetime_scalar(other):
otherdata = _Timescalar(other)._data
else:
otherdata = other._data
elif isinstance(other, Timedelta) or self._is_timedelta_scalar(other):
if op not in self.supported_with_timedelta:
raise TypeError("{} not supported between {} and Timedelta".format(op, self.__class__.__name__))
otherclass = 'Timedelta'
if self._is_timedelta_scalar(other):
otherdata = _Timescalar(other)._data
else:
otherdata = other._data
elif (isinstance(other, pdarray) and other.dtype == int64) or isSupportedInt(other):
if op not in self.supported_with_pdarray:
raise TypeError("{} not supported between {} and integer".format(op, self.__class__.__name__))
otherclass = 'pdarray'
otherdata = other
else:
return NotImplemented
# Determines return type (Datetime, Timedelta, or pdarray)
callback = self._get_callback(otherclass, op)
# Actual operation evaluates on the underlying int64 data
return callback(self._data._binop(otherdata, op))
def _r_binop(self, other, op):
# Need to do 2 things:
# 1) Determine return type, based on other's class
# 2) Get other's int64 data to combine with self's data
# First case is pdarray <op> self
if (isinstance(other, pdarray) and other.dtype == int64):
if op not in self.supported_with_r_pdarray:
raise TypeError("{} not supported between int64 and {}".format(op, self.__class__.__name__))
callback = self._get_callback('pdarray', op)
# Need to use other._binop because self._data._r_binop can only handle scalars
return callback(other._binop(self._data, op))
# All other cases are scalars, so can use self._data._r_binop
elif self._is_datetime_scalar(other):
if op not in self.supported_with_r_datetime:
raise TypeError("{} not supported between scalar datetime and {}".format(op, self.__class__.__name__))
otherclass = 'Datetime'
otherdata = _Timescalar(other)._data
elif self._is_timedelta_scalar(other):
if op not in self.supported_with_r_timedelta:
raise TypeError("{} not supported between scalar timedelta and {}".format(op, self.__class__.__name__))
otherclass = 'Timedelta'
otherdata = _Timescalar(other)._data
elif isSupportedInt(other):
if op not in self.supported_with_r_pdarray:
raise TypeError("{} not supported between int64 and {}".format(op, self.__class__.__name__))
otherclass = 'pdarray'
otherdata = other
else:
# If here, type is not handled
return NotImplemented
callback = self._get_callback(otherclass, op)
return callback(self._data._r_binop(otherdata, op))
def opeq(self, other, op):
if isinstance(other, Timedelta) or self._is_timedelta_scalar(other):
if op not in self.supported_opeq:
raise TypeError("{} {} Timedelta not supported".format(self.__class__.__name__, op))
if self._is_timedelta_scalar(other):
other = _Timescalar(other)
self._data.opeq(other._data, op)
elif isinstance(other, Datetime) or self._is_datetime_scalar(other):
raise TypeError("{} {} datetime not supported".format(self.__class__.__name__, op))
else:
return NotImplemented
@staticmethod
def _is_datetime_scalar(scalar):
return (isinstance(scalar, Timestamp) or
(isinstance(scalar, np.datetime64) and np.isscalar(scalar)) or
isinstance(scalar, datetime.datetime))
@staticmethod
def _is_timedelta_scalar(scalar):
return (isinstance(scalar, pdTimedelta) or
(isinstance(scalar, np.timedelta64) and np.isscalar(scalar)) or
isinstance(scalar, datetime.timedelta))
def _scalar_callback(self, key):
# Will be overridden in all children
return key
def __getitem__(self, key):
if isSupportedInt(key):
# Single integer index will return a pandas scalar
return self._scalar_callback(self._data[key])
else:
# Slice or array index should return same class
return self.__class__(self._data[key])
def __setitem__(self, key, value):
# RHS can only be vector or scalar of same class
if isinstance(value, self.__class__):
# Value._data is already in nanoseconds, so self._data
# can be set directly
self._data[key] = value._data
elif self._is_supported_scalar(value):
# _Timescalar takes care of normalization to nanoseconds
normval = _Timescalar(value)
self._data[key] = normval._data
else:
return NotImplemented
def min(self):
__doc__ = super().min.__doc__
# Return type is pandas scalar
return self._scalar_callback(self._data.min())
def max(self):
__doc__ = super().max.__doc__
# Return type is pandas scalar
return self._scalar_callback(self._data.max())
def mink(self, k):
__doc__ = super().mink.__doc__
# Return type is same class
return self.__class__(self._data.mink(k))
def maxk(self, k):
__doc__ = super().maxk.__doc__
# Return type is same class
return self.__class__(self._data.maxk(k))
class Datetime(_AbstractBaseTime):
'''Represents a date and/or time.
Datetime is the Arkouda analog to pandas DatetimeIndex and
other timeseries data types.
Parameters
----------
array : int64 pdarray, pd.DatetimeIndex, pd.Series, or np.datetime64 array
uint : str, default 'ns'
For int64 pdarray, denotes the unit of the input. Ignored for pandas
and numpy arrays, which carry their own unit. Not case-sensitive;
prefixes of full names (like 'sec') are accepted.
Possible values:
* 'weeks' or 'w'
* 'days' or 'd'
* 'hours' or 'h'
* 'minutes', 'm', or 't'
* 'seconds' or 's'
* 'milliseconds', 'ms', or 'l'
* 'microseconds', 'us', or 'u'
* 'nanoseconds', 'ns', or 'n'
Unlike in pandas, units cannot be combined or mixed with integers
Notes
-----
The ``._data`` attribute is always in nanoseconds with int64 dtype.
'''
supported_with_datetime = frozenset(('==', '!=', '<', '<=', '>', '>=', '-'))
supported_with_r_datetime = frozenset(('==', '!=', '<', '<=', '>', '>=', '-'))
supported_with_timedelta = frozenset(('+', '-', '/', '//', '%'))
supported_with_r_timedelta = frozenset(('+'))
supported_opeq = frozenset(('+=', '-='))
supported_with_pdarray = frozenset(()) # type: ignore
supported_with_r_pdarray = frozenset(()) # type: ignore
@classmethod
def _get_callback(cls, otherclass, op):
callbacks = {('Datetime', '-'): Timedelta, # Datetime - Datetime -> Timedelta
('Timedelta', '+'): cls, # Datetime + Timedelta -> Datetime
('Timedelta', '-'): cls, # Datetime - Timedelta -> Datetime
('Timedelta', '%'): Timedelta} # Datetime % Timedelta -> Timedelta
# Every other supported op returns an int64 pdarray, so callback is identity
return callbacks.get((otherclass, op), _identity)
def _scalar_callback(self, scalar):
# Formats a scalar return value as pandas Timestamp
return Timestamp(int(scalar), unit=_BASE_UNIT)
@staticmethod
def _is_supported_scalar(scalar):
# Tests whether scalar has compatible type with self's elements
return self.is_datetime_scalar(scalar)
def to_pandas(self):
'''Convert array to a pandas DatetimeIndex. Note: if the array size
exceeds client.maxTransferBytes, a RuntimeError is raised.
See Also
--------
to_ndarray
'''
return to_datetime(self.to_ndarray())
def sum(self):
raise TypeError("Cannot sum datetime64 values")
class Timedelta(_AbstractBaseTime):
'''Represents a duration, the difference between two dates or times.
Timedelta is the Arkouda equivalent of pandas.TimedeltaIndex.
Parameters
----------
array : int64 pdarray, pd.TimedeltaIndex, pd.Series, or np.timedelta64 array
unit : str, default 'ns'
For int64 pdarray, denotes the unit of the input. Ignored for pandas
and numpy arrays, which carry their own unit. Not case-sensitive;
prefixes of full names (like 'sec') are accepted.
Possible values:
* 'weeks' or 'w'
* 'days' or 'd'
* 'hours' or 'h'
* 'minutes', 'm', or 't'
* 'seconds' or 's'
* 'milliseconds', 'ms', or 'l'
* 'microseconds', 'us', or 'u'
* 'nanoseconds', 'ns', or 'n'
Unlike in pandas, units cannot be combined or mixed with integers
Notes
-----
The ``._data`` attribute is always in nanoseconds with int64 dtype.
'''
supported_with_datetime = frozenset(('+'))
supported_with_r_datetime = frozenset(('+', '-', '/', '//', '%'))
supported_with_timedelta = frozenset(('==', '!=', '<', '<=', '>', '>=', '+', '-', '/', '//', '%'))
supported_with_r_timedelta = frozenset(('==', '!=', '<', '<=', '>', '>=', '+', '-', '/', '//', '%'))
supported_opeq = frozenset(('+=', '-=', '%='))
supported_with_pdarray = frozenset(('*', '//'))
supported_with_r_pdarray = frozenset(('*'))
@classmethod
def _get_callback(cls, otherclass, op):
callbacks = {('Timedelta', '-'): cls, # Timedelta - Timedelta -> Timedelta
('Timedelta', '+'): cls, # Timedelta + Timedelta -> Timedelta
('Datetime', '+'): Datetime, # Timedelta + Datetime -> Datetime
('Datetime', '-'): Datetime, # Datetime - Timedelta -> Datetime
('Timedelta', '%'): cls, # Timedelta % Timedelta -> Timedelta
('pdarray', '//'): cls, # Timedelta // pdarray -> Timedelta
('pdarray', '*'): cls} # Timedelta * pdarray -> Timedelta
# Every other supported op returns an int64 pdarray, so callback is identity
return callbacks.get((otherclass, op), _identity)
def _scalar_callback(self, scalar):
# Formats a returned scalar as a pandas.Timedelta
return pdTimedelta(int(scalar), unit=_BASE_UNIT)
@staticmethod
def _is_supported_scalar(scalar):
return self.is_timedelta_scalar(scalar)
def to_pandas(self):
'''Convert array to a pandas TimedeltaIndex. Note: if the array size
exceeds client.maxTransferBytes, a RuntimeError is raised.
See Also
--------
to_ndarray
'''
return to_timedelta(self.to_ndarray())
def sum(self):
# Sum as a pd.Timedelta
return self._scalar_callback(self._data.sum())
def abs(self):
'''Absolute value of time interval.
'''
return self.__class__(cast(akabs(self._data), 'int64'))
def date_range(start=None, end=None, periods=None, freq=None,
tz=None, normalize=False, name=None, closed=None, **kwargs):
'''Creates a fixed frequency Datetime range. Alias for
``ak.Datetime(pd.date_range(args))``. Subject to size limit
imposed by client.maxTransferBytes.
Parameters
----------
start : str or datetime-like, optional
Left bound for generating dates.
end : str or datetime-like, optional
Right bound for generating dates.
periods : int, optional
Number of periods to generate.
freq : str or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'. See
timeseries.offset_aliases for a list of
frequency aliases.
tz : str or tzinfo, optional
Time zone name for returning localized DatetimeIndex, for example
'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is
timezone-naive.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
name : str, default None
Name of the resulting DatetimeIndex.
closed : {None, 'left', 'right'}, optional
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None, the default).
**kwargs
For compatibility. Has no effect on the result.
Returns
-------
rng : DatetimeIndex
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``DatetimeIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
'''
return Datetime(pd_date_range(start, end, periods, freq,
tz, normalize, name, closed, **kwargs))
def timedelta_range(start=None, end=None, periods=None, freq=None,
name=None, closed=None, **kwargs):
'''Return a fixed frequency TimedeltaIndex, with day as the default
frequency. Alias for ``ak.Timedelta(pd.timedelta_range(args))``.
Subject to size limit imposed by client.maxTransferBytes.
Parameters
----------
start : str or timedelta-like, default None
Left bound for generating timedeltas.
end : str or timedelta-like, default None
Right bound for generating timedeltas.
periods : int, default None
Number of periods to generate.
freq : str or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'.
name : str, default None
Name of the resulting TimedeltaIndex.
closed : str, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None).
Returns
-------
rng : TimedeltaIndex
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``TimedeltaIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
'''
return Timedelta(pd_timedelta_range(start, end, periods, freq,
name, closed, **kwargs))
|
{"hexsha": "ab2c1049f98da5ada069bac23a5750a6b8130218", "size": 23318, "ext": "py", "lang": "Python", "max_stars_repo_path": "arkouda/timeclass.py", "max_stars_repo_name": "jackgoodier/arkouda", "max_stars_repo_head_hexsha": "4a3855fd940160355880a5194736500fb896d982", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-25T18:05:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-25T18:05:45.000Z", "max_issues_repo_path": "arkouda/timeclass.py", "max_issues_repo_name": "jackgoodier/arkouda", "max_issues_repo_head_hexsha": "4a3855fd940160355880a5194736500fb896d982", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "arkouda/timeclass.py", "max_forks_repo_name": "jackgoodier/arkouda", "max_forks_repo_head_hexsha": "4a3855fd940160355880a5194736500fb896d982", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.5650623886, "max_line_length": 172, "alphanum_fraction": 0.5931469251, "include": true, "reason": "import numpy", "num_tokens": 5380}
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import glob
import os
import os.path as osp
import sys
import numpy as np
from PIL import Image
from pdseg.vis import get_color_map_list
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('dir_or_file',
help='input gray label directory or file list path')
parser.add_argument('output_dir',
help='output colorful label directory')
parser.add_argument('--dataset_dir',
help='dataset directory')
parser.add_argument('--file_separator',
help='file list separator')
return parser.parse_args()
def gray2pseudo_color(args):
"""将灰度标注图片转换为伪彩色图片"""
input = args.dir_or_file
output_dir = args.output_dir
if not osp.exists(output_dir):
os.makedirs(output_dir)
print('Creating colorful label directory:', output_dir)
color_map = get_color_map_list(256)
if os.path.isdir(input):
for grt_path in glob.glob(osp.join(input, '*.png')):
print('Converting original label:', grt_path)
basename = osp.basename(grt_path)
im = Image.open(grt_path)
lbl = np.asarray(im)
lbl_pil = Image.fromarray(lbl.astype(np.uint8), mode='P')
lbl_pil.putpalette(color_map)
new_file = osp.join(output_dir, basename)
lbl_pil.save(new_file)
elif os.path.isfile(input):
if args.dataset_dir is None or args.file_separator is None:
print('No dataset_dir or file_separator input!')
sys.exit()
with open(input) as f:
for line in f:
parts = line.strip().split(args.file_separator)
grt_name = parts[1]
grt_path = os.path.join(args.dataset_dir, grt_name)
print('Converting original label:', grt_path)
basename = osp.basename(grt_path)
im = Image.open(grt_path)
lbl = np.asarray(im)
lbl_pil = Image.fromarray(lbl.astype(np.uint8), mode='P')
lbl_pil.putpalette(color_map)
new_file = osp.join(output_dir, basename)
lbl_pil.save(new_file)
else:
print('It\'s neither a dir nor a file')
if __name__ == '__main__':
args = parse_args()
gray2pseudo_color(args)
|
{"hexsha": "b385049172c4b134aca849682cbf76193c569f62", "size": 2500, "ext": "py", "lang": "Python", "max_stars_repo_path": "pdseg/tools/gray2pseudo_color.py", "max_stars_repo_name": "Channingss/PaddleSeg", "max_stars_repo_head_hexsha": "19e89e7f938b75b362aea5fba71ab5b51af00150", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pdseg/tools/gray2pseudo_color.py", "max_issues_repo_name": "Channingss/PaddleSeg", "max_issues_repo_head_hexsha": "19e89e7f938b75b362aea5fba71ab5b51af00150", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pdseg/tools/gray2pseudo_color.py", "max_forks_repo_name": "Channingss/PaddleSeg", "max_forks_repo_head_hexsha": "19e89e7f938b75b362aea5fba71ab5b51af00150", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6455696203, "max_line_length": 76, "alphanum_fraction": 0.6096, "include": true, "reason": "import numpy", "num_tokens": 539}
|
from sstcam_sandbox.d181023_dc_tf import all_files
from sstcam_sandbox import get_data, HDF5Writer
from CHECLabPy.core.io import TIOReader
import numpy as np
import pandas as pd
from tqdm import trange
from IPython import embed
def get_df(paths, vped_list):
assert (len(paths) == len(vped_list))
readers = [TIOReader(p) for p in paths]
n_files = len(paths)
first_reader = TIOReader(paths[0])
n_pixels = first_reader.n_pixels
n_samples = first_reader.n_samples
# mean = np.zeros((n_files, n_pixels, n_samples))
# std = np.zeros((n_files, n_pixels, n_samples))
# vped = np.zeros((n_files, n_pixels, n_samples))
jpixel, jsample = np.indices((n_pixels, n_samples))
df_list = []
for ifile in trange(n_files):
reader = readers[ifile]
r_n_events = reader.n_events
r_n_pixels = reader.n_pixels
r_n_samples = reader.n_samples
samples = np.zeros((r_n_events, r_n_pixels, r_n_samples))
for iev, wf in enumerate(reader):
samples[iev] = wf
mean = np.mean(samples, 0)
std = np.std(samples, 0)
vped = vped_list[ifile]
df_list.append(pd.DataFrame(dict(
vped_dac=vped,
pixel=jpixel.ravel(),
sample=jsample.ravel(),
mean=mean.ravel(),
std=std.ravel(),
)))
df = pd.concat(df_list, ignore_index=True)
return df
def process(file):
r0_paths = file.r0_paths
tfnone_paths = file.tfnone_paths
tfpoly_paths = file.tfpoly_paths
vped_list = file.vped_list
output_path = file.averages_path
try:
r0_df = get_df(r0_paths, vped_list)
tfnone_df = get_df(tfnone_paths, vped_list)
tfpoly_df = get_df(tfpoly_paths, vped_list)
except:
embed()
with HDF5Writer(output_path) as writer:
writer.write(
r0=r0_df,
tfnone=tfnone_df,
tfpoly_df=tfpoly_df,
)
def main():
[process(f) for f in all_files]
if __name__ == '__main__':
main()
|
{"hexsha": "c9f1f8a76ab14a14c73b76df320365ad28546415", "size": 2048, "ext": "py", "lang": "Python", "max_stars_repo_path": "sstcam_sandbox/d181023_dc_tf/averages.py", "max_stars_repo_name": "watsonjj/CHECLabPySB", "max_stars_repo_head_hexsha": "91330d3a6f510a392f635bd7f4abd2f77871322c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sstcam_sandbox/d181023_dc_tf/averages.py", "max_issues_repo_name": "watsonjj/CHECLabPySB", "max_issues_repo_head_hexsha": "91330d3a6f510a392f635bd7f4abd2f77871322c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sstcam_sandbox/d181023_dc_tf/averages.py", "max_forks_repo_name": "watsonjj/CHECLabPySB", "max_forks_repo_head_hexsha": "91330d3a6f510a392f635bd7f4abd2f77871322c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-30T09:46:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-30T09:46:56.000Z", "avg_line_length": 25.2839506173, "max_line_length": 65, "alphanum_fraction": 0.634765625, "include": true, "reason": "import numpy", "num_tokens": 550}
|
# struct WiderFactor{S<:Unsigned, T<:Unsigned} <: AbstractFactor{T}
# basefactor::AbstractFactor{S}
# end
# Base.length(factor::WiderFactor{S, T}) where {S<:Unsigned} where {T<:Unsigned} = length(factor.basefactor)
# getlevels(factor::WiderFactor{S, T}) where {S<:Unsigned} where {T<:Unsigned} = getlevels(factor.basefactor)
# getname(factor::WiderFactor{S, T}) where {S<:Unsigned} where {T<:Unsigned} = getname(factor.basefactor)
# function slice(factor::WiderFactor{S, T}, fromobs::Integer, toobs::Integer, slicelength::Integer) where {S<:Unsigned} where {T<:Unsigned}
# slicelength = verifyslicelength(fromobs, toobs, slicelength)
# if S == T
# slice(factor.basefactor, fromobs, toobs, slicelength)
# else
# f = x -> convert(T, x)
# mapslice(f, slice(factor.basefactor, fromobs, toobs, slicelength), slicelength, T)
# end
# end
# function Base.convert(::Type{WiderFactor{S, T}}, x::AbstractFactor{S}) where {S<:Unsigned} where {T<:Unsigned}
# WiderFactor{S, T}(x)
# end
# function Base.map(factor::WiderFactor, dataframe::AbstractDataFrame)
# map(factor.basefactor, dataframe)
# end
# function isordinal(factor::WiderFactor)
# isordinal(factor.basefactor)
# end
|
{"hexsha": "82b99eca53e66cc4cce30e985e8dccc4686ba096", "size": 1230, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Factors/widerfactor.jl", "max_stars_repo_name": "Statfactory/JuML", "max_stars_repo_head_hexsha": "fe9de3a2ac2a7ee862e47ed5be72e565d9b01e94", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 37, "max_stars_repo_stars_event_min_datetime": "2018-02-08T06:26:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-15T21:50:26.000Z", "max_issues_repo_path": "src/Factors/widerfactor.jl", "max_issues_repo_name": "Statfactory/JuML", "max_issues_repo_head_hexsha": "fe9de3a2ac2a7ee862e47ed5be72e565d9b01e94", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-09-11T03:17:44.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-01T21:48:49.000Z", "max_forks_repo_path": "src/Factors/widerfactor.jl", "max_forks_repo_name": "Statfactory/JuML", "max_forks_repo_head_hexsha": "fe9de3a2ac2a7ee862e47ed5be72e565d9b01e94", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2018-08-10T07:47:08.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-26T00:11:55.000Z", "avg_line_length": 39.6774193548, "max_line_length": 139, "alphanum_fraction": 0.6926829268, "num_tokens": 373}
|
import base64
from io import BytesIO
from itertools import product
import pandas as pd
import numpy as np
run_aggs = {
"m": "mean",
"n": "mean",
"j": "mean",
"p_1": "mean",
"p_2": "mean",
"p_3": "mean",
"q_h1": "mean",
"q_h2": "mean",
"q_ml": "mean",
"alpha_ml": "mean",
"p_turb": "mean",
"q_ml_scaling": "last",
"avg_q_ml": "mean",
"code_kl": ["mean", "std"],
"human_kl": ["mean", "std"],
"human_kl_var": "mean",
"human_kl_dissim": "mean",
}
time_aggs = {
"m": "mean",
"n": "last",
"j": "last",
"p_1": "mean",
"p_2": "mean",
"p_3": "mean",
"q_h1": "mean",
"q_h2": "mean",
"q_ml": "mean",
"alpha_ml": "mean",
"p_turb": "mean",
"q_ml_scaling": "last",
"avg_q_ml": ["max", "last"],
"code_kl": ["max", "last"],
"human_kl": ["max", "last"],
"human_kl_var": ["max", "last"],
"code_kl_std": "last",
"human_kl_std": "last",
"human_kl_dissim": ["max", "last"],
}
col_names = {
"m_mean": "m",
"n_last": "n",
"n_mean": "n",
"j_last": "j",
"j_mean": "j",
"p_1_mean": "p_1",
"p_2_mean": "p_2",
"p_3_mean": "p_3",
"q_h1_mean": "q_h1",
"q_h2_mean": "q_h2",
"q_ml_mean": "q_ml",
"alpha_ml_mean": "alpha_ml",
"p_turb_mean": "p_turb",
"avg_q_ml_mean": "avg_q_ml",
"code_kl_mean": "code_kl",
"q_ml_scaling_last": "q_ml_scaling",
"human_kl_mean": "human_kl",
"human_kl_var_mean": "human_kl_var",
"human_kl_dissim_mean": "human_kl_dissim",
"code_kl_std_last": "code_kl_std",
"human_kl_std_last": "human_kl_std",
}
def preprocess_dataset(data, run_aggs, time_aggs, col_names):
# round values to enable secure indexing
data = data.round(4)
data.reset_index(inplace=True)
# reindex
configs = pd.unique(data.config)
runs = pd.unique(data.run)
steps = pd.unique(data.step)
index = pd.MultiIndex.from_product(
[configs, runs, steps], names=['config', 'run', 'step'])
data.index = index
data = data.drop(columns=['config', 'run', 'step'])
# aggregate over runs
time_data = data.groupby(level=[0, 2]).agg(run_aggs)
time_data = time_data.round(4)
time_data.columns = ['_'.join(col).strip()
for col in time_data.columns.values]
time_data.rename(columns=col_names, inplace=True)
# aggregate over time steps
agg_data = time_data.groupby(level=0).agg(time_aggs)
agg_data = agg_data.round(4)
agg_data.columns = ['_'.join(col).strip()
for col in agg_data.columns.values]
agg_data.rename(columns=col_names, inplace=True)
return time_data, agg_data
|
{"hexsha": "b0c27ee9876de6d46b108898462b23af4d9abb7e", "size": 2686, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/analysis.py", "max_stars_repo_name": "felixpeters/ai-sim-job", "max_stars_repo_head_hexsha": "d185e91ed153c37bf7ade61a07399ad3f1dbf7b2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/analysis.py", "max_issues_repo_name": "felixpeters/ai-sim-job", "max_issues_repo_head_hexsha": "d185e91ed153c37bf7ade61a07399ad3f1dbf7b2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-06-08T22:27:23.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:48:34.000Z", "max_forks_repo_path": "utils/analysis.py", "max_forks_repo_name": "felixpeters/ml-ol-simulation", "max_forks_repo_head_hexsha": "d185e91ed153c37bf7ade61a07399ad3f1dbf7b2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1313131313, "max_line_length": 64, "alphanum_fraction": 0.5766939687, "include": true, "reason": "import numpy", "num_tokens": 881}
|
# This file is a part of FaceCraker. License is MIT
using Documenter, FaceCraker
makedocs(
modules = [FaceCraker],
sitename = "FaceCraker.jl",
pages = Any[
"index.md"
],
versions = ["v#.#", "dev" => "dev"],
assets = [""],
)
deploydocs(
repo = "github.com/fetaxyu/FaceCraker.jl",
)
|
{"hexsha": "f2c01f591288f0aeb35418240a2615b61d434053", "size": 294, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/make.jl", "max_stars_repo_name": "gitter-badger/FaceCracker.jl", "max_stars_repo_head_hexsha": "063933e38fc46df37ad33e976580832c8e9ae2f6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/make.jl", "max_issues_repo_name": "gitter-badger/FaceCracker.jl", "max_issues_repo_head_hexsha": "063933e38fc46df37ad33e976580832c8e9ae2f6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/make.jl", "max_forks_repo_name": "gitter-badger/FaceCracker.jl", "max_forks_repo_head_hexsha": "063933e38fc46df37ad33e976580832c8e9ae2f6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.3333333333, "max_line_length": 51, "alphanum_fraction": 0.6360544218, "num_tokens": 101}
|
# -*- coding: utf-8 -*-
# Computing Persistent Homology and its histogram
import os,glob
import numpy as np
from tqdm import tqdm
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.neighbors import KernelDensity
from scipy.stats import gaussian_kde
import argparse,json
import cripser
from scipy.ndimage.morphology import distance_transform_edt
from skimage import feature,morphology
from skimage.filters import threshold_otsu, scharr
from PIL import Image
from multiprocessing import Pool
from functools import partial
try:
import persim
from gudhi.representations import Landscape
except:
pass
# preprocess image before computing PH
def preprocess_image(img, gradient=False, img_size=None, filtration=None, origin=(0,0)):
if len(img.shape)>2:
im = np.dot(img[...,:3], [0.2989, 0.5870, 0.1140])
else:
im = img
if img_size:
import skimage.transform
im = skimage.transform.resize(im,(img_size,img_size))
if gradient:
#im = feature.canny(im, sigma=10)
im = scharr(im)
if filtration is not None:
if im.max()==im.min():
return(np.zeros_like(im))
bw_img = (im >= threshold_otsu(im))
#bw_img = morphology.area_opening(bw_img, area_threshold=8, connectivity=2)
if filtration=='binarise':
return(bw_img)
if filtration == 'distance':
dt_img = distance_transform_edt(~bw_img) # distance from the foreground 0
#print(dt_img.max())
elif filtration == 'signed_distance':
dt_img = distance_transform_edt(~bw_img)-distance_transform_edt(bw_img)
elif filtration in ['downward','upward']:
null_idx = (bw_img == 0)
## height transform
if len(im.shape) == 3: #(z,y,x)
h = np.arange(im.shape[0]).reshape(-1,1,1)
else:
h = np.arange(im.shape[0]).reshape(-1,1)
if filtration=='upward':
h = np.max(h) - h
dt_img = (bw_img * h)
dt_img[null_idx] = np.max(h)
elif 'radial' in filtration:
null_idx = (bw_img == 0)
h = np.linalg.norm(np.stack(np.meshgrid(*map(range,im.shape),indexing='ij'),axis=-1)-np.array(origin), axis=-1)
dt_img = (bw_img * h)
if filtration=='radial_inv':
dt_img = np.max(dt_img) - dt_img
else:
dt_img[null_idx] = np.max(h)
dt_img *= 256/dt_img.shape[0] # scaling normalisation
return(dt_img)
else:
return(im)
## computing PH of an image
def comp_PH(img, gradient=True, img_size=None, filtration=None):
im = preprocess_image(img, gradient=gradient, img_size=img_size, filtration=filtration)
pd = cripser.computePH(im.astype(np.float64))
#pd = cripser.computePH(im,maxdim=args.maxdim,top_dim=args.top_dim,embedded=args.embedded)
#print(sum(pd[:,0] == 1))
#print(im.shape)
return(pd)
def comp_save_PH(fname, args):
bfn = os.path.splitext(os.path.basename(fname))[0]
img = np.array(Image.open(fname).convert('L'),dtype=np.float64)
ph = comp_PH(img, gradient=args.gradient, filtration=args.filtration, img_size=args.img_size)
np.save(os.path.join(args.output, bfn), ph)
def comp_landscape(ph, dim, min_birth=None, max_birth=None, max_life=None,n=5):
res = []
for d in [0,1]:
pds = ph[ph[:,0] == d, 1:3]
#pds[:,1] = pds[:,0]+(np.clip(pds[:,1]-pds[:,0],0,max_life))
res.append(Landscape(num_landscapes=n, resolution=dim[d]//n, sample_range=[min_birth[d],max_birth[d]]).fit_transform([pds]).ravel().astype(np.float32))
return(np.sqrt(np.concatenate(res)))
def comp_persistence_image(ph, args=None):
pims = []
for d in [0,1]:
s = np.sqrt((args.max_birth[d]-args.min_birth[d])*args.max_life[d]/args.num_bins[d])
p = int((args.max_birth[d]-args.min_birth[d])/s)
q = int(args.max_life[d]/s)
while p*q < args.num_bins[d]:
s = max((args.max_birth[d]-args.min_birth[d])/(p+1), args.max_life[d]/(q+1))
p = int((args.max_birth[d]-args.min_birth[d])/s)
q = int(args.max_life[d]/s)
pim = persim.PersistenceImager(birth_range=(args.min_birth[d],args.max_birth[d]),
pers_range=(0,args.max_life[d]),pixel_size=s,
kernel_params={'sigma': [[args.persImg_sigma, 0.0], [0.0, args.persImg_sigma]]},
weight_params={'n': args.persImg_weight})
p = (ph[ph[:,0]==d])[:,1:3] # extract dim=d cycles
life = p[:,1]-p[:,0]
life = np.clip(life,a_min=None,a_max=args.max_life[d])
p[:,1] = life
pi = pim.transform(p, skew=False)
#print(s, args.num_bins[d])
#print(pi.shape)
pi = pi.ravel()
#pi = np.pad(pi,(0,args.num_bins[d]))
pi = pi[:args.num_bins[d]]
pi = np.abs(pi) ** args.persImg_power # to suppress overflow during learning
pims.append(pi.astype(np.float32))
return(pims)
def comp_betticurve(ph, dim, min_birth=None, max_birth=None, max_life=None):
res = []
for d in range(2):
pds = ph[ph[:,0] == d, 1:3]
mlife = (np.clip(pds[:,1]-pds[:,0],0,max_life[d]))
res.append(np.zeros(dim[d]))
for i,th in enumerate(np.linspace(min_birth[d],max_birth[d],num=dim[d])):
#print(th, np.sum(np.logical_and(pds[:,0] < th, pds[:,1] > th)))
res[-1][i] = np.sum(mlife[np.logical_and(pds[:,0] < th, pds[:,1] > th)])
return np.sqrt(np.concatenate(res))
def comp_persistence_histogram(ph, num_bins, min_birth=None, max_birth=None, max_life=None, bandwidth=1):
# print(args.num_bins)
pds =[ph[ph[:,0] == i, 1:3] for i in range(2)]
#print(len(pds[0]),len(pds[1]))
life = [pd[:,1]-pd[:,0] for pd in pds]
life = [l[l<1e+10] for l in life] # remove permanent cycle
life = [np.clip(l,0,max_life[i]) for i,l in enumerate(life)]
# hsb = np.zeros(args.num_bins[2])
# for k,ind in enumerate(np.searchsorted(lsb,pds[1][:,0])):
# hsb[ind] += (pds[1][k,1]-pds[1][k,0]) ## lifetime weighted count
# hs0 = gaussian_kde(life[0])(ls) * len(life[0])
# hs1 = gaussian_kde(life[1])(ls) * len(life[1])
## histogram for lifetime for each dimension
hsl0, _ = np.histogram(life[0],bins=num_bins[0], range=(0,max_life[0]))
hsl1, _ = np.histogram(life[1],bins=num_bins[1], range=(0,max_life[1]))
# plt.hist(pds[0][:,0],weights=pds[0][:,1]-pds[0][:,0])
# plt.show()
# plt.hist(pds[1][:,0],weights=pds[1][:,1]-pds[1][:,0])
# plt.show()
## histogram for birthtime for each dimension
hsb0, _ = np.histogram(pds[0][:,0],bins=num_bins[2], range=(min_birth[0],max_birth[0]), weights=pds[0][:,1]-pds[0][:,0])
hsb1, _ = np.histogram(pds[1][:,0],bins=num_bins[3], range=(min_birth[1],max_birth[1]), weights=pds[1][:,1]-pds[1][:,0])
# lifetime weighting
hsl0 = hsl0*(1.0+np.linspace(0,max_life[0],num_bins[0]))
hsl1 = hsl1*(1.0+np.linspace(0,max_life[1],num_bins[1]))
# smoothing
hsl0 = kern_smooth(hsl0, bandwidth=bandwidth, kern='hanning')
hsl1 = kern_smooth(hsl1, bandwidth=bandwidth, kern='hanning')
hsb0 = kern_smooth(hsb0, bandwidth=bandwidth, kern='hanning')
hsb1 = kern_smooth(hsb1, bandwidth=bandwidth, kern='hanning')
# log and scaling
hsl0,hsl1,hsb0,hsb1 = np.log(hsl0+1), np.log(hsl1+1), np.log(hsb0+1)/100, np.log(hsb1+1)
# print(np.min(pds[0][:,0]),np.max(pds[0][:,0]),np.min(pds[1][:,0]),np.max(pds[1][:,0]))
# print(np.min(pds[0][:,1]),np.max(pds[0][:,1]),np.min(pds[1][:,1]),np.max(pds[1][:,1]),"\n")
return(np.concatenate([hsl0,hsl1,hsb0,hsb1]))
# kern = ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']
def kern_smooth(y, bandwidth=11, kern='flat'):
if bandwidth<2:
return(y)
b = int(bandwidth)
if kern == 'flat':
w=np.ones(bandwidth,'d')
else:
w=getattr(np,kern)(b)
res = np.convolve(w/w.sum(),np.r_[y[b-1:0:-1],y,y[-2:-b-1:-1]],mode='valid')
c = (len(res)-len(y))//2
return(res[c:(c+len(y))])
if __name__== "__main__":
parser = argparse.ArgumentParser("")
parser.add_argument('target_dir',type=str)
parser.add_argument('--output', '-o', default=None)
parser.add_argument('--max_life', '-ml', type=int, nargs=2, default=[50,50])
parser.add_argument('--max_birth', '-maxb', type=int, nargs=2, default=None)
parser.add_argument('--min_birth', '-minb', type=int, nargs=2, default=None)
parser.add_argument('--num_bins', '-n', type=int, nargs="*", default=[50,50,50,50])
parser.add_argument('--bandwidth', '-b', type=int, default=1)
parser.add_argument('--persImg_sigma', '-ps', type=float, default=1)
parser.add_argument('--persImg_power', '-pp', type=float, default=0.5, help='scaling for the vector')
parser.add_argument('--persImg_weight', '-pn', type=float, default=1.0, help='weight for persistence weighting in persistence image')
parser.add_argument('--imgtype', '-it', type=str, default=None)
parser.add_argument('--type', '-t', type=str, choices=['raw','persistence_betticurve','persistence_histogram','persistence_image','persistence_landscape','grid'], help="type of label")
parser.add_argument('--filtration', '-f', default='signed_distance', choices=[None,'distance','signed_distance','radial','radial_inv','upward','downward'], help="type of filtration")
parser.add_argument("--num_workers", '-nw', default=8, type = int, help="num of workers (data_loader)")
parser.add_argument('--save_fig', '-sf', action="store_true", help="save graphs")
parser.add_argument('--gradient', '-g', action="store_true", default=False, help="apply gradient filter")
parser.add_argument("--img_size", '-is', default=None, type = int, help="input images will be resized initially")
args = parser.parse_args()
# adjustment w.r.t. the possible minimum value for the image
if args.max_birth is None:
args.max_birth = [args.max_life[0],args.max_life[1]]
if args.min_birth is None:
if args.filtration=='signed_distance':
args.min_birth = [-args.max_life[0],-args.max_life[1]]
else:
args.min_birth = [0,0]
grad = "grad" if args.gradient else ""
if args.output is None:
dn1,dn2 = os.path.split((os.path.normpath(args.target_dir))) # the leaf name
phdn = os.path.join(dn1,"PH{}_{}_{}".format(grad,args.filtration,dn2))
# if os.path.isdir(phdn):
# print("Please specify output directory!")
# exit()
# else:
args.output = phdn
print("output will be saved under: ", args.output)
###
print(args)
target_dir = args.target_dir
os.makedirs(args.output, exist_ok=True)
with open(os.path.join(args.output, "args.json"), mode="w") as f:
json.dump(args.__dict__, f, indent=4)
gfns = []
imgtypes = [args.imgtype] if args.imgtype else ['png','PNG','jpg','JPG','tif','TIF','tiff','TIFF']
for it in imgtypes:
gfns.extend(glob.glob(os.path.join(target_dir,"**/*.{}".format(it)), recursive=True))
fns=sorted(list(set(gfns)))
if args.type == "persistence_histogram":
print("compute and save persistence histogram...")
meanPHl0 = np.zeros(args.num_bins[0])
meanPHl1 = np.zeros(args.num_bins[1])
meanPHb0 = np.zeros(args.num_bins[2])
meanPHb1 = np.zeros(args.num_bins[3])
for fname in tqdm(fns, total=len(fns)):
bfn = os.path.splitext(os.path.basename(fname))[0]
if args.imgtype=="npy":
ph = np.load(fname)
else:
sample = np.array(Image.open(fname).convert('L'),dtype=np.float64)
ph = comp_PH(sample, gradient=args.gradient, filtration=args.filtration)
np.save(os.path.join(args.output, bfn), ph)
hs = comp_persistence_histogram(ph, args.num_bins, min_birth=args.min_birth, max_birth=args.max_birth, max_life=args.max_life, bandwidth=args.bandwidth)
np.save(os.path.join(args.output, bfn+"_hist"), hs.astype(np.float32))
c1 = hs[:args.num_bins[0]]
c2 = hs[args.num_bins[0]:(args.num_bins[0]+args.num_bins[1])]
c3 = hs[(args.num_bins[0]+args.num_bins[1]):(args.num_bins[0]+args.num_bins[1]+args.num_bins[2])]
c4 = hs[(args.num_bins[0]+args.num_bins[1]+args.num_bins[2]):]
meanPHl0 += c1
meanPHl1 += c2
meanPHb0 += c3
meanPHb1 += c4
if args.save_fig:
sns.lineplot(x=np.arange(len(c1)),y=c1, legend="full")
sns.lineplot(x=np.arange(len(c2)),y=c2, legend="full",style=True, dashes=[(2,2)])
sns.lineplot(x=np.arange(len(c3)),y=c3, legend="full",linewidth=2.5)
sns.lineplot(x=np.arange(len(c4)),y=c4, legend="full",style=True, dashes=[(2,2)],linewidth=2.5)
plt.savefig(os.path.join(args.output, bfn+"_histCurve.jpg"))
plt.close()
meanPHl0 /= len(fns)
meanPHl1 /= len(fns)
meanPHb0 /= len(fns)
meanPHb1 /= len(fns)
print(meanPHl0.max(), meanPHl1.max(),meanPHb0.max(),meanPHb1.max())
print(sum(meanPHl0>0), sum(meanPHl1>0), sum(meanPHb0>0), sum(meanPHb1>0))
if args.save_fig:
sns.lineplot(x=np.arange(len(meanPHl0)),y=meanPHl0, legend="full")
sns.lineplot(x=np.arange(len(meanPHl1)),y=meanPHl1, legend="full",style=True, dashes=[(2,2)])
sns.lineplot(x=np.arange(len(meanPHb0)),y=meanPHb0, legend="full",linewidth=2.5)
sns.lineplot(x=np.arange(len(meanPHb1)),y=meanPHb1, legend="full",style=True, dashes=[(2,2)],linewidth=2.5)
plt.show()
elif args.type == "persistence_betticurve":
print("compute and save betti curve...")
meanPHl0 = np.zeros(args.num_bins[0])
meanPHl1 = np.zeros(args.num_bins[1])
for fname in tqdm(fns, total=len(fns)):
sample = np.array(Image.open(fname).convert('L'),dtype=np.float64)
ph = comp_PH(sample, gradient=args.gradient, filtration=args.filtration)
res = comp_betticurve(ph, args.num_bins, min_birth=args.min_birth, max_birth=args.max_birth, max_life=args.max_life)
bfn = os.path.splitext(os.path.basename(fname))[0]
np.save(os.path.join(args.output, bfn+"_bettiCurve"), res.astype(np.float32))
c1 = res[:args.num_bins[0]]
c2 = res[args.num_bins[0]:]
if args.save_fig:
sns.lineplot(x=np.arange(len(c1)),y=c1, legend="full")
sns.lineplot(x=np.arange(len(c2)),y=c2, legend="full",style=True, dashes=[(2,2)])
plt.savefig(os.path.join(args.output, bfn+"_bettiCurve.jpg"))
plt.close()
meanPHl0 += c1
meanPHl1 += c2
meanPHl0 /= len(fns)
meanPHl1 /= len(fns)
print(meanPHl0.max(), meanPHl1.max())
if args.save_fig:
sns.lineplot(x=np.arange(len(meanPHl0)),y=meanPHl0, legend="full")
sns.lineplot(x=np.arange(len(meanPHl1)),y=meanPHl1, legend="full",style=True, dashes=[(2,2)])
plt.show()
elif args.type == "persistence_image":
print("compute and save persistence images...")
for fname in tqdm(fns, total=len(fns)):
bfn = os.path.splitext(os.path.basename(fname))[0]
img = Image.open(fname).convert('L')
img = np.array(img, dtype=np.float64)
ph = comp_PH(img, gradient=args.gradient, img_size=args.img_size, filtration=args.filtration)
pims = comp_persistence_image(ph, args)
np.save(os.path.join(args.output, bfn+"_persImg"), np.concatenate(pims).astype(np.float32))
if args.save_fig:
sns.lineplot(x=np.arange(len(pims[0])),y=pims[0], legend="full")
sns.lineplot(x=np.arange(len(pims[1])),y=pims[1], legend="full",style=True, dashes=[(2,2)])
plt.savefig(os.path.join(args.output, bfn+"_persImg.jpg"))
plt.close()
# plt.imshow(pims[0].reshape(10,5))
# plt.savefig(os.path.join(args.output, bfn+"_persImg0.jpg"))
# plt.close()
# plt.imshow(pims[1].reshape(10,5))
# plt.savefig(os.path.join(args.output, bfn+"_persImg1.jpg"))
# plt.close()
elif args.type == "grid":
from mpl_toolkits.axes_grid1 import make_axes_locatable
fig = plt.figure(figsize=(21,10),tight_layout=True)
n = min(len(fns),10)
axes = fig.subplots(n, 6)
for i in tqdm(range(n)):
fname = fns[i]
colour = Image.open(fname)
sample = (np.array(colour.convert('L'),dtype=np.float64))
mask = preprocess_image(sample, gradient=args.gradient, filtration='binarise', img_size=args.img_size) ## used only for preview
dt = preprocess_image(sample, gradient=args.gradient, filtration=args.filtration, img_size=args.img_size)
print(dt.min(),dt.max())
ph = comp_PH(sample, gradient=args.gradient, filtration=args.filtration, img_size=args.img_size)
axes[i,0].imshow(colour)
axes[i,0].set_axis_off()
axes[i,0].set_title(os.path.basename(fns[i]))
axes[i,1].imshow(mask)
axes[i,1].set_axis_off()
im2 = axes[i,2].imshow(dt,vmin=args.min_birth[0],vmax=args.max_birth[0], )
axes[i,2].set_axis_off()
divider = make_axes_locatable(axes[i,2])
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im2, cax=cax, orientation='vertical')
axes[i,3].set_title("LC")
res = comp_betticurve(ph, args.num_bins, min_birth=args.min_birth, max_birth=args.max_birth, max_life=args.max_life)
#print(ph)
sns.lineplot(x=np.arange(args.num_bins[0]),y=res[:args.num_bins[0]], ax=axes[i,3])
sns.lineplot(x=np.arange(args.num_bins[1]),y=res[args.num_bins[0]:], style=True, dashes=[(2,3)], ax=axes[i,3])
axes[i,4].set_title("HS")
res = comp_persistence_histogram(ph, args.num_bins, min_birth=args.min_birth, max_birth=args.max_birth, max_life=args.max_life)
sns.lineplot(x=np.arange(args.num_bins[0]),y=res[:args.num_bins[0]], ax=axes[i,4])
sns.lineplot(x=np.arange(args.num_bins[1]),y=res[args.num_bins[0]:(args.num_bins[0]+args.num_bins[1])], style=True, dashes=[(2,2)], ax=axes[i,4])
sns.lineplot(x=np.arange(args.num_bins[2]),y=res[(args.num_bins[0]+args.num_bins[1]):(args.num_bins[0]+args.num_bins[1]+args.num_bins[2])],linewidth=2.5, ax=axes[i,4])
sns.lineplot(x=np.arange(args.num_bins[3]),y=res[(args.num_bins[0]+args.num_bins[1]+args.num_bins[2]):], style=True, dashes=[(2,2)],linewidth=2.5, ax=axes[i,4])
axes[i,5].set_title("PI")
res = comp_persistence_image(ph, args)
#print(res[0].shape)
sns.lineplot(x=np.arange(len(res[0])),y=res[0], ax=axes[i,5])
sns.lineplot(x=np.arange(len(res[0])),y=res[1], style=True, dashes=[(2,2)], ax=axes[i,5])
for ax in axes[i]:
ax.legend([],[], frameon=False)
plt.savefig(os.path.join(args.output,"persistence_vectors.jpg"))
plt.show()
## compute persistence diagrams only
else:
print("compute and save persistent homology...")
task = partial(comp_save_PH, args=args)
with Pool(args.num_workers) as pool:
with tqdm(total=len(fns), ascii=True, ncols=100) as t:
for _ in pool.imap_unordered(task, fns):
t.update(1)
|
{"hexsha": "acfabbad5e237e09517704d2d3a49b4d942ce3ab", "size": 19734, "ext": "py", "lang": "Python", "max_stars_repo_path": "PHdict.py", "max_stars_repo_name": "shizuo-kaji/PretrainCNNwithNoData", "max_stars_repo_head_hexsha": "6d076e4bc2effcd91e9275470db79e0125704087", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-18T07:18:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-18T07:18:44.000Z", "max_issues_repo_path": "PHdict.py", "max_issues_repo_name": "shizuo-kaji/PretrainCNNwithNoData", "max_issues_repo_head_hexsha": "6d076e4bc2effcd91e9275470db79e0125704087", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PHdict.py", "max_forks_repo_name": "shizuo-kaji/PretrainCNNwithNoData", "max_forks_repo_head_hexsha": "6d076e4bc2effcd91e9275470db79e0125704087", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.4705882353, "max_line_length": 188, "alphanum_fraction": 0.607580825, "include": true, "reason": "import numpy,from scipy", "num_tokens": 5523}
|
! Test alternate entry points for functions when the result types
! of all entry points match
function f1 (a)
integer a, b, f1, e1
f1 = 15 + a
return
entry e1 (b)
e1 = 42 + b
end function
function f2 ()
real f2, e2
entry e2 ()
e2 = 45
end function
function f3 ()
double precision a, b, f3, e3
entry e3 ()
f3 = 47
end function
function f4 (a) result (r)
double precision a, b, r, s
r = 15 + a
return
entry e4 (b) result (s)
s = 42 + b
end function
function f5 () result (r)
integer r, s
entry e5 () result (s)
r = 45
end function
function f6 () result (r)
real r, s
entry e6 () result (s)
s = 47
end function
function f7 ()
entry e7 ()
e7 = 163
end function
function f8 () result (r)
entry e8 ()
e8 = 115
end function
function f9 ()
entry e9 () result (r)
r = 119
end function
program entrytest
integer f1, e1, f5, e5
real f2, e2, f6, e6, f7, e7, f8, e8, f9, e9
double precision f3, e3, f4, e4, d
if (f1 (6) .ne. 21) call abort ()
if (e1 (7) .ne. 49) call abort ()
if (f2 () .ne. 45) call abort ()
if (e2 () .ne. 45) call abort ()
if (f3 () .ne. 47) call abort ()
if (e3 () .ne. 47) call abort ()
d = 17
if (f4 (d) .ne. 32) call abort ()
if (e4 (d) .ne. 59) call abort ()
if (f5 () .ne. 45) call abort ()
if (e5 () .ne. 45) call abort ()
if (f6 () .ne. 47) call abort ()
if (e6 () .ne. 47) call abort ()
if (f7 () .ne. 163) call abort ()
if (e7 () .ne. 163) call abort ()
if (f8 () .ne. 115) call abort ()
if (e8 () .ne. 115) call abort ()
if (f9 () .ne. 119) call abort ()
if (e9 () .ne. 119) call abort ()
end
|
{"hexsha": "bef8a98dfd92daabfe52cfee2e3091b78d09a2a2", "size": 1583, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "gcc-gcc-7_3_0-release/gcc/testsuite/gfortran.fortran-torture/execute/entry_1.f90", "max_stars_repo_name": "best08618/asylo", "max_stars_repo_head_hexsha": "5a520a9f5c461ede0f32acc284017b737a43898c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-05-02T17:34:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-17T10:15:18.000Z", "max_issues_repo_path": "llvm-gcc-4.2-2.9/gcc/testsuite/gfortran.fortran-torture/execute/entry_1.f90", "max_issues_repo_name": "vidkidz/crossbridge", "max_issues_repo_head_hexsha": "ba0bf94aee0ce6cf7eb5be882382e52bc57ba396", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "llvm-gcc-4.2-2.9/gcc/testsuite/gfortran.fortran-torture/execute/entry_1.f90", "max_forks_repo_name": "vidkidz/crossbridge", "max_forks_repo_head_hexsha": "ba0bf94aee0ce6cf7eb5be882382e52bc57ba396", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-07-27T00:22:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-01T09:41:02.000Z", "avg_line_length": 21.1066666667, "max_line_length": 65, "alphanum_fraction": 0.5824384081, "num_tokens": 616}
|
!! Copyright (C) Stichting Deltares, 2012-2016.
!!
!! This program is free software: you can redistribute it and/or modify
!! it under the terms of the GNU General Public License version 3,
!! as published by the Free Software Foundation.
!!
!! This program is distributed in the hope that it will be useful,
!! but WITHOUT ANY WARRANTY; without even the implied warranty of
!! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
!! GNU General Public License for more details.
!!
!! You should have received a copy of the GNU General Public License
!! along with this program. If not, see <http://www.gnu.org/licenses/>.
!!
!! contact: delft3d.support@deltares.nl
!! Stichting Deltares
!! P.O. Box 177
!! 2600 MH Delft, The Netherlands
!!
!! All indications and logos of, and references to registered trademarks
!! of Stichting Deltares remain the property of Stichting Deltares. All
!! rights reserved.
SUBROUTINE DLWQ5H ( LUNUT , IAR , ITMNR , NOITM , IDMNR ,
* NODIM , IORDER , CNAMES , IOFFI , IOFFC ,
* IODS , IOFFD , I , ICNT )
!
!
! Deltares SECTOR WATERRESOURCES AND ENVIRONMENT
!
! CREATED : October '00 by L. Postma
!
! MODIFIED :
!
! FUNCTION : Compacts USEFOR lists if unresolved externals
!
! SUBROUTINES CALLED : none
!
! LOGICAL UNITS : LUN(27) = unit stripped DELWAQ input file
! LUN(29) = unit formatted output file
!
! PARAMETERS :
!
! NAME KIND LENGTH FUNCT. DESCRIPTION
! ---------------------------------------------------------
! LUNUT INTEGER 1 INPUT unit number for ASCII output
! IAR INTEGER IIMAX IN/OUT integer workspace
! ITMNR INTEGER 1 IN/OUT nr of items for assignment
! NOITM INTEGER 1 IN nr of items in computational rule
! IDMNR INTEGER 1 IN/OUT nr of subst for assignment
! NODIM INTEGER 1 IN nr of subst in computational rule
! IORDER INTEGER 1 IN 1 = items first, 2 is subst first
! CNAMES CHAR*(*) NITM INPUT Items to check for presence
! IOFFI INTEGER 1 IN/OUT Offset in input array
! IOFFC INTEGER 1 IN/OUT Offset in character array
! IOFFD INTEGER 1 IN/OUT Base offset in both arrays
! IODS INTEGER 1 INPUT Shift counter ODS files
! I INTEGER 1 INPUT loop counter
! ICNT INTEGER 1 IN/OUT counter
!
!
use timers ! performance timers
CHARACTER*(*) CNAMES(*)
DIMENSION IAR(*)
CHARACTER*20 CHULP
integer(4) :: ithndl = 0
if (timon) call timstrt( "dlwq5h", ithndl )
!
! Write message
!
WRITE ( LUNUT , * )
WRITE ( LUNUT , 1010 ) I+ICNT, CNAMES(I+IOFFC)
IF ( IORDER .EQ. 1 ) THEN
NTT = IDMNR
NITM = NODIM
ELSE
NTT = ITMNR
NITM = NOITM
ENDIF
!
! Look backwards
!
DO 10 I1 = I,1,-1
I2 = IAR(I1+IOFFC)
IF ( I2 .GT. -100000 ) GOTO 20
10 CONTINUE
!
! Additional messages for this sequence
!
I4 = 0
20 IF ( I2 .LE. 0 .AND. I2 .GT. -100000 ) THEN
! Try to find the reference
DO 25 I3 = 1 , I
I5 = IAR(I3+IOFFC)
IF ( I5 .GT. 0 ) I4 = IAR(I3+IOFFC)
IF ( I5 .LE. 0 .AND. I5 .GT. -100000 ) I4 = I4 + 1
25 CONTINUE
CHULP = CNAMES(I4+IOFFD)
IF ( CNAMES(I+IOFFC) .NE. CHULP ) THEN
IF ( IORDER .EQ. 2 ) THEN
WRITE (LUNUT,1030) I4,CHULP
ELSE
WRITE (LUNUT,1040) I4,CHULP
ENDIF
ENDIF
ENDIF
IF ( I2 .GT. 0 .AND. I2 .LT. 100000 ) THEN
I4 = I2
CHULP = CNAMES( I2+IOFFD)
IF ( CNAMES(I+IOFFC) .NE. CHULP ) THEN
IF ( IORDER .EQ. 2 ) THEN
WRITE (LUNUT,1030) I2,CHULP
ELSE
WRITE (LUNUT,1040) I2,CHULP
ENDIF
ENDIF
ENDIF
I2 = I4
!
! Determine the shift in locations
!
ISHFT = 1
DO 30 I4 = I1+1,NITM
I3 = IAR(I4+IOFFC)
IF ( I3 .GT. -1000000 ) GOTO 40
ISHFT = ISHFT + 1
30 CONTINUE
!
! Shift the third array heap
!
40 DO 50 I4 = I1, NITM
IAR (I4+IOFFI) = IAR(I4+IOFFI+ISHFT)
50 CONTINUE
!
! Shift the second array heap
!
DO 60 I4 = I1, NITM*2+IODS
IAR (I4+IOFFC) = IAR (I4+IOFFC+ISHFT)
CNAMES(I4+IOFFC) = CNAMES(I4+IOFFC+ISHFT)
60 CONTINUE
NITM = NITM - ISHFT
IOFFI = IOFFI - ISHFT
IOFFC = IOFFC - 1
IOFFI = IOFFI - 1
ICNT = ICNT + ISHFT
!
! Shift the base array heap
!
DO 70 I5 = I2+IOFFD , NTT+IOFFD+NITM*2+IODS
IAR (I5) = IAR (I5+1)
CNAMES(I5) = CNAMES(I5+1)
70 CONTINUE
!
! Renumber the second array heap
!
DO 80 I4 = I1 , NITM
IF ( IAR(I4+IOFFC) .GT. I2 ) IAR(I4+IOFFC) = IAR(I4+IOFFC) -1
80 CONTINUE
!
! Update totals
!
IF ( IORDER .EQ. 1 .OR. IODS .GT. 0 ) THEN
IDMNR = IDMNR-1
NODIM = NODIM-ISHFT
ENDIF
IF ( IORDER .EQ. 2 .AND. IODS .EQ. 0 ) THEN
ITMNR = ITMNR-1
NOITM = NOITM-ISHFT
ENDIF
!
if (timon) call timstop( ithndl )
RETURN
!
1010 FORMAT ( ' WARNING: Input item : ',I3,' not resolved: ',A)
1020 FORMAT ( ' WARNING: also not resolved: ',A)
1030 FORMAT ( ' WARNING: Item number: ',I3,' also not resolved: ',A)
1040 FORMAT ( ' WARNING: Substance : ',I3,' also not resolved: ',A)
!
END
|
{"hexsha": "759f6a51025c54168294930d3912f6c1d0382a42", "size": 5749, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "docker/water/delft3d/tags/v6686/src/engines_gpl/waq/packages/waq_io/src/waq_io/dlwq5h.f", "max_stars_repo_name": "liujiamingustc/phd", "max_stars_repo_head_hexsha": "4f815a738abad43531d02ac66f5bd0d9a1def52a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-01-06T03:01:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T03:02:55.000Z", "max_issues_repo_path": "docker/water/delft3d/tags/v6686/src/engines_gpl/waq/packages/waq_io/src/waq_io/dlwq5h.f", "max_issues_repo_name": "liujiamingustc/phd", "max_issues_repo_head_hexsha": "4f815a738abad43531d02ac66f5bd0d9a1def52a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docker/water/delft3d/tags/v6686/src/engines_gpl/waq/packages/waq_io/src/waq_io/dlwq5h.f", "max_forks_repo_name": "liujiamingustc/phd", "max_forks_repo_head_hexsha": "4f815a738abad43531d02ac66f5bd0d9a1def52a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7624309392, "max_line_length": 76, "alphanum_fraction": 0.5383544964, "num_tokens": 1953}
|
import deeptools.bigwigCompare as bwComp
import deeptools.multiBigwigSummary as bwCorr
import numpy as np
import numpy.testing as nt
import os.path
from os import unlink
ROOT = os.path.dirname(os.path.abspath(__file__)) + "/test_data/"
BIGWIG_A = ROOT + "testA_skipNAs.bw"
BIGWIG_B = ROOT + "testB_skipNAs.bw"
BIGWIG_C = ROOT + "test1.bw.bw"
"""
The distribution of reads for the bam file is:
0 100 200
|------------------------------------------------------------|
testA.bam 3R ==============>
<==============
testB.bam 3R <============== ==============>
==============>
==============>
The resulting bigwig files are as follows:
testA_skipNas:
3R 100 200 1
chr_cigar 0 50 2
testB_skipNas:
3R 50 150 1
3R 150 200 2
"""
def test_bigwigCompare():
outfile = '/tmp/result.bg'
args = "-b1 {} -b2 {} -o {} --operation add --outFileFormat bedgraph".format(BIGWIG_A, BIGWIG_B, outfile).split()
bwComp.main(args)
_foo = open(outfile, 'r')
resp = _foo.readlines()
_foo.close()
expected = ['3R\t0\t50\t0\n', '3R\t50\t100\t1\n', '3R\t100\t150\t2\n', '3R\t150\t200\t3\n']
assert resp == expected, "{} != {}".format(resp, expected)
unlink(outfile)
def test_bigwigCompare_skipnas():
outfile = '/tmp/result.bg'
args = "-b1 {} -b2 {} -o {} --operation add --skipNAs " \
"--outFileFormat bedgraph".format(BIGWIG_A, BIGWIG_B, outfile).split()
bwComp.main(args)
_foo = open(outfile, 'r')
resp = _foo.readlines()
_foo.close()
expected = ['3R\t100\t150\t2\n', '3R\t150\t200\t3\n']
assert resp == expected, "{} != {}".format(resp, expected)
unlink(outfile)
def test_bigwigCompare_skipZeroOverZero():
outfile = '/tmp/result.bg"'
args = "-b1 {} -b2 {} -o {} --skipZeroOverZero --pseudocount 1 3 --outFileFormat bedgraph".format(BIGWIG_A, BIGWIG_A, outfile).split()
bwComp.main(args)
_foo = open(outfile, 'r')
resp = _foo.readlines()
_foo.close()
expected = ['3R\t100\t200\t-1\n']
assert resp == expected, "{} != {}".format(resp, expected)
unlink(outfile)
def test_multiBigwigSummary():
outfile = '/tmp/result.bg'
args = "bins -b {} {} --binSize 50 -o {}".format(BIGWIG_A, BIGWIG_B, outfile).split()
bwCorr.main(args)
resp = np.load(outfile)
matrix = resp['matrix']
labels = resp['labels']
nt.assert_equal(matrix, np.array([[np.nan, np.nan],
[np.nan, 1.],
[1., 1.],
[1., 2.]]))
nt.assert_equal(labels, ['testA_skipNAs.bw', 'testB_skipNAs.bw'])
unlink(outfile)
def test_multiBigwigSummary_outrawcounts():
"""
Test multiBigwigSummary raw counts output
"""
outfile = '/tmp/result.bg'
args = "bins -b {} {} --binSize 50 -o /tmp/null --outRawCounts {} ".format(BIGWIG_A, BIGWIG_B, outfile).split()
bwCorr.main(args)
_foo = open(outfile, 'r')
resp = _foo.read()
_foo.close()
expected = """#'chr' 'start' 'end' 'testA_skipNAs.bw' 'testB_skipNAs.bw'
3R 0 50 nan nan
3R 50 100 nan 1.0
3R 100 150 1.0 1.0
3R 150 200 1.0 2.0
"""
assert resp == expected, "{} != {}".format(resp, expected)
unlink(outfile)
unlink("/tmp/null")
def test_multiBigwigSummary_gtf():
outfile = '/tmp/_test.npz'
args = "BED-file -b {0} {0} --BED {1}/test.gtf -o {2}".format(BIGWIG_C, ROOT, outfile).split()
bwCorr.main(args)
resp = np.load(outfile)
matrix = resp['matrix']
labels = resp['labels']
nt.assert_equal(labels, ['test1.bw.bw', 'test1.bw.bw'])
nt.assert_allclose(matrix, np.array([[27.475, 27.475],
[27.31248719, 27.31248719]]))
unlink(outfile)
def test_multiBigwigSummary_metagene():
outfile = '/tmp/_test.npz'
args = "BED-file --metagene -b {0} {0} --BED {1}/test.gtf -o {2}".format(BIGWIG_C, ROOT, outfile).split()
bwCorr.main(args)
resp = np.load(outfile)
matrix = resp['matrix']
labels = resp['labels']
nt.assert_equal(labels, ['test1.bw.bw', 'test1.bw.bw'])
nt.assert_allclose(matrix, np.array([[20.28956028, 20.28956028],
[22.1923501, 22.1923501]]))
unlink(outfile)
|
{"hexsha": "8319242779397ed658a588e0e30951cfcd16b1af", "size": 4603, "ext": "py", "lang": "Python", "max_stars_repo_path": "deeptools/test/test_bigwigCompare_and_multiBigwigSummary.py", "max_stars_repo_name": "gartician/deepTools", "max_stars_repo_head_hexsha": "78cbddf3ea038e12b8ff1fc749cfeca3fa5f2f88", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 351, "max_stars_repo_stars_event_min_datetime": "2017-11-09T17:27:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T09:50:56.000Z", "max_issues_repo_path": "deeptools/test/test_bigwigCompare_and_multiBigwigSummary.py", "max_issues_repo_name": "gartician/deepTools", "max_issues_repo_head_hexsha": "78cbddf3ea038e12b8ff1fc749cfeca3fa5f2f88", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 467, "max_issues_repo_issues_event_min_datetime": "2017-11-09T17:14:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T15:31:59.000Z", "max_forks_repo_path": "deeptools/test/test_bigwigCompare_and_multiBigwigSummary.py", "max_forks_repo_name": "gartician/deepTools", "max_forks_repo_head_hexsha": "78cbddf3ea038e12b8ff1fc749cfeca3fa5f2f88", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 132, "max_forks_repo_forks_event_min_datetime": "2017-11-13T19:18:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T12:32:11.000Z", "avg_line_length": 33.598540146, "max_line_length": 138, "alphanum_fraction": 0.5374755594, "include": true, "reason": "import numpy", "num_tokens": 1328}
|
export LRMat;
struct LRMat{T<:Number}
# variables
height:: Int
width:: Int
UMat:: Array{T,2}
VMat:: Array{T,2}
# global settings
EPS:: Float64
MAXRANK:: Int
function LRMat(D,Eps,MaxRank)
h = size(D,1);
w = size(D,2);
[U,S,V] = svdtrunc(D,Eps,MaxRank);
new(h,w,U*sqrt(S),V*sqrt(S),Eps,MaxRank);
end
end
|
{"hexsha": "12511ec77f1e58fe824496af3726959df7ff5853", "size": 401, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "HMat.jl/src/LRMat/LRMat.jl", "max_stars_repo_name": "YingzhouLi/HMat", "max_stars_repo_head_hexsha": "518f497e8140505ea7c69896ac27675ccbe9f3c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2015-09-15T16:03:39.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-05T07:06:11.000Z", "max_issues_repo_path": "HMat.jl/src/LRMat/LRMat.jl", "max_issues_repo_name": "YingzhouLi/HMat", "max_issues_repo_head_hexsha": "518f497e8140505ea7c69896ac27675ccbe9f3c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2015-02-09T18:36:28.000Z", "max_issues_repo_issues_event_max_datetime": "2015-02-09T18:36:28.000Z", "max_forks_repo_path": "HMat.jl/src/LRMat/LRMat.jl", "max_forks_repo_name": "YingzhouLi/HMat.jl", "max_forks_repo_head_hexsha": "518f497e8140505ea7c69896ac27675ccbe9f3c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2018-10-12T06:56:41.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-11T22:31:42.000Z", "avg_line_length": 20.05, "max_line_length": 49, "alphanum_fraction": 0.5087281796, "num_tokens": 138}
|
using Optim
mutable struct HLT
α::Float64
β::Float64
l₀::Float64
b₀::Float64
HLT() = new(0, 0, 0, 0)
HLT(α::Number, β::Number, l₀::Number, b₀::Number) = new(Float64(α), Float64(β),
Float64(l₀), Float64(b₀))
end
function loss(model::HLT, time_series)
α, β, l₀, b₀ = model.α, model.β, model.l₀, model.b₀
N = length(time_series)
l_t, l_t_, b_t = 0, 0, 0 # l_t_ is the variable to save l(t-1)
loss = 0
for t in 1:N
if t == 1
l_t = l₀
b_t = b₀
else
l_t = time_series[t - 1] * α + (l_t + b_t) * (1 - α) #b_t is taking b(t-1) value
end
l_t_ = l_t
y_pred = l_t + b_t
loss += (time_series[t] - y_pred)^2
end
return loss
end
function fit(model::HLT, y)
lower = [-Inf, -0.001, -Inf, -Inf]
upper = [1., 1., Inf, Inf]
initial = [model.α, model.β, model.l₀, model.b₀]
function loss_(parameters::Array{Float64, 1})
α, β, l₀, b₀ = parameters
return loss(HLT(α, β, l₀, b₀), y)
end
res = Optim.optimize(loss_, lower, upper, initial)
optimal = Optim.minimizer(res)
return HLT(optimal[1], optimal[2], optimal[3], optimal[4])
end
function forecast(model::HLT, time_series, forecast_length)
N = length(time_series)
α, β, l₀, b₀ = model.α, model.β, model.l₀, model.b₀
l_t, l_t_, b_t = 0, 0, 0
pred = Array{Float64, 1}(undef, forecast_length)
#go through the whole time series making the point by point estimate
for t in 1:N
if t == 1
l_t = l₀
b_t = b₀
else
l_t = time_series[t - 1] * α + (l_t + b_t) * (1 - α) #b_t "is" b(t-1)
b_t = β * (l_t - l_t_) + (1 - β) * b_t
end
l_t_ = l_t
end
#The parameter´s values to make the forecast are those estimated in the last step of the time series
l_t = time_series[end] * α + (l_t + b_t) * (1 - α)
b_t = β * (l_t - l_t_) + (1 - β) * b_t
for i in 1:forecast_length
#y_pred = l_t + b_t * i
#push!(pred, y_pred)
pred[i] = l_t + b_t * i
end
return pred
end
|
{"hexsha": "7ddd8caec558e0f412db60e558ad3f67186bedc7", "size": 1892, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "TSeriesForecast/src/holts_trend_method.jl", "max_stars_repo_name": "lambdaclass/julia_time_series_library", "max_stars_repo_head_hexsha": "4e02a71b485f16aff60ce741b0ad3ce2481fed91", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "TSeriesForecast/src/holts_trend_method.jl", "max_issues_repo_name": "lambdaclass/julia_time_series_library", "max_issues_repo_head_hexsha": "4e02a71b485f16aff60ce741b0ad3ce2481fed91", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TSeriesForecast/src/holts_trend_method.jl", "max_forks_repo_name": "lambdaclass/julia_time_series_library", "max_forks_repo_head_hexsha": "4e02a71b485f16aff60ce741b0ad3ce2481fed91", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.2588235294, "max_line_length": 104, "alphanum_fraction": 0.61205074, "num_tokens": 755}
|
# ---
# title: 743. Network Delay Time
# id: problem743
# author: Tian Jun
# date: 2020-10-31
# difficulty: Medium
# categories: Heap, Depth-first Search, Breadth-first Search, Graph
# link: <https://leetcode.com/problems/network-delay-time/description/>
# hidden: true
# ---
#
# There are `N` network nodes, labelled `1` to `N`.
#
# Given `times`, a list of travel times as **directed** edges `times[i] = (u, v,
# w)`, where `u` is the source node, `v` is the target node, and `w` is the time
# it takes for a signal to travel from source to target.
#
# Now, we send a signal from a certain node `K`. How long will it take for all
# nodes to receive the signal? If it is impossible, return `-1`.
#
#
#
# **Example 1:**
#
# 
#
#
#
# Input: times = [[2,1,1],[2,3,1],[3,4,1]], N = 4, K = 2
# Output: 2
#
#
#
#
# **Note:**
#
# 1. `N` will be in the range `[1, 100]`.
# 2. `K` will be in the range `[1, N]`.
# 3. The length of `times` will be in the range `[1, 6000]`.
# 4. All edges `times[i] = (u, v, w)` will have `1 <= u, v <= N` and `0 <= w <= 100`.
#
#
## @lc code=start
using LeetCode
## add your code here:
## @lc code=end
|
{"hexsha": "43102905ed8062441a704d8067e67785703e0041", "size": 1242, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/unresolved/743.network-delay-time.jl", "max_stars_repo_name": "noob-data-analaysis/LeetCode.jl", "max_stars_repo_head_hexsha": "94d91b295e988948e77e737c10d2f0e3ecb7c2b0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/unresolved/743.network-delay-time.jl", "max_issues_repo_name": "noob-data-analaysis/LeetCode.jl", "max_issues_repo_head_hexsha": "94d91b295e988948e77e737c10d2f0e3ecb7c2b0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-12-10T02:19:49.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-05T05:00:12.000Z", "max_forks_repo_path": "src/unresolved/743.network-delay-time.jl", "max_forks_repo_name": "noob-data-analaysis/LeetCode.jl", "max_forks_repo_head_hexsha": "94d91b295e988948e77e737c10d2f0e3ecb7c2b0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.875, "max_line_length": 87, "alphanum_fraction": 0.5925925926, "num_tokens": 437}
|
import torch
import paddle
import os
import numpy as np
from ppgan.models.discriminators.discriminator_styleganv2ada import StyleGANv2ADA_Discriminator
c_dim = 0
w_dim = 512
# img_resolution = 512
# img_resolution = 128
img_resolution = 32
img_channels = 3
channel_base = 32768
channel_max = 512
num_fp16_res = 4
conv_clamp = 256
epilogue_kwargs = dict(
mbstd_group_size=8,
)
batch_size = 2
x_shape = [batch_size, img_channels, img_resolution, img_resolution]
lr = 0.0001
model = StyleGANv2ADA_Discriminator(c_dim=c_dim,
img_resolution=img_resolution,
img_channels=img_channels,
channel_base=channel_base,
channel_max=channel_max,
num_fp16_res=num_fp16_res,
conv_clamp=conv_clamp,
block_kwargs={},
mapping_kwargs={},
epilogue_kwargs=epilogue_kwargs,
)
model.train()
use_gpu = True
gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0))
place = paddle.CUDAPlace(gpu_id) if use_gpu else paddle.CPUPlace()
def copy(name, w, std):
value2 = paddle.to_tensor(w, place=place)
value = std[name]
value = value * 0 + value2
std[name] = value
fullyConnectedLayer_std = model.state_dict()
ckpt_file = '54.pth'
save_name = '54.pdparams'
state_dict = torch.load(ckpt_file, map_location=torch.device('cpu'))
fullyConnectedLayer_dic = {}
for key, value in state_dict.items():
fullyConnectedLayer_dic[key] = value.data.numpy()
for key in fullyConnectedLayer_dic.keys():
name2 = key
w = fullyConnectedLayer_dic[key]
if '.linear.weight' in key:
w = w.transpose(1, 0) # pytorch的nn.Linear()的weight权重要转置才能赋值给paddle的nn.Linear()
if '.noise_strength' in key:
print()
w = np.reshape(w, [1, ])
print(key)
copy(name2, w, fullyConnectedLayer_std)
model.set_state_dict(fullyConnectedLayer_std)
paddle.save(fullyConnectedLayer_std, save_name)
|
{"hexsha": "b6cc9c16b544494018d7479f15cf368b93c2751c", "size": 2238, "ext": "py", "lang": "Python", "max_stars_repo_path": "test_grad/test2_54_Discriminator_grad_2paddle.py", "max_stars_repo_name": "miemie2013/ppgan", "max_stars_repo_head_hexsha": "48008d85ec6c5fa2e1469acf8507b2614fa550cc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test_grad/test2_54_Discriminator_grad_2paddle.py", "max_issues_repo_name": "miemie2013/ppgan", "max_issues_repo_head_hexsha": "48008d85ec6c5fa2e1469acf8507b2614fa550cc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test_grad/test2_54_Discriminator_grad_2paddle.py", "max_forks_repo_name": "miemie2013/ppgan", "max_forks_repo_head_hexsha": "48008d85ec6c5fa2e1469acf8507b2614fa550cc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-19T03:01:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-19T03:01:13.000Z", "avg_line_length": 30.2432432432, "max_line_length": 95, "alphanum_fraction": 0.6041108132, "include": true, "reason": "import numpy", "num_tokens": 522}
|
import time
import numpy as np
import quaternion
from .coordinatemath import (apply_rotation, pos_quats_to_plot_coords)
from .latency import Latency
from .testpaths import test_paths
# TODO modify actual coordinate generator to send between [-1,1] [-1,1] for x, y
# ensure proper aspect ratio that we expect
class CoordinateGenerator:
"""Generates coordinates to simulate a moving object."""
def __init__(self, coord_getter_func=None):
self.coord_getter_func = coord_getter_func
self.coord = (0.0, 0.0)
self.width = 0.55
self.height = 0.4
self.update(0, quaternion.x, False)
def draw(self, ax, color="#ff55bb"):
"""Draw a coordinate at location in image frame."""
ax.scatter3D(*pos_quats_to_plot_coords([self._draw_quat]),
s=50, color=color)
def draw_quat(self, ax, color="#ff55bb"):
"""Draw destination on sphere."""
ax.scatter3D(*pos_quats_to_plot_coords([self.dest_quat]),
s=50, color=color)
def update(self, dt, rot, update_coord=True):
"""Updates generated coordinate.
Args:
dt (float): Time elapsed since last update() call.
rot (float): Rotation quaternion to same frame as camera.
"""
if update_coord:
self._update_coord(dt, rot)
v = self._get_offset_quat()
self._draw_quat = apply_rotation(v, rot)
self.dest_quat = self._draw_quat / np.abs(self._draw_quat)
def _get_offset_quat(self):
"""Get position quaternion to express offset from (1,0,0) axis."""
return np.quaternion(0., 1.,
-self.width * self.coord[0],
self.height * self.coord[1])
def _update_coord(self, dt, rot):
"""Calculates next coord from coord_getter_func or path."""
if self.coord_getter_func is not None:
self.coord = self.coord_getter_func()
return
v = test_paths[0].get_next_pos_quat(dt)
offset = apply_rotation(v, rot.inverse())
coord = np.clip([
-offset.y / self.width,
offset.z / self.height], -1., 1.)
self.coord = tuple(coord)
class LatentCoordinateGenerator(CoordinateGenerator):
COORD_LATENCY = 0.200
coord = Latency(COORD_LATENCY)
def __init__(self, parent, fps=20):
self.parent = parent
self.fps = fps
self.time_elapsed = 0.0
self.time_since_update = 0.0
super().__init__(lambda: self.parent.coord)
def update(self, dt, rot, update_coord=True):
# self.parent.update(dt, rot)
self.time_elapsed += dt
self.time_since_update += dt
update_coord = (self.time_since_update >= 1. / self.fps)
if update_coord:
self.time_since_update %= 1. / self.fps
super().update(dt, rot, update_coord)
def draw(self, ax, color="#772255"):
"""Draw a coordinate at location in image frame."""
super().draw(ax, color)
self.parent.draw(ax)
def draw_quat(self, ax, color="#772255"):
"""Draw destination on sphere."""
super().draw_quat(ax, color)
self.parent.draw_quat(ax)
def _time_func(self):
return self.time_elapsed
|
{"hexsha": "d145cf0d892ae9e999624115f6618d163f34a1ce", "size": 3238, "ext": "py", "lang": "Python", "max_stars_repo_path": "tracker/coordinategenerator.py", "max_stars_repo_name": "SicariusNoctis/eagle-eye-tracker", "max_stars_repo_head_hexsha": "31e160057f1d2fa2c5fbd94ba4f5e9d064481c77", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-02-10T00:59:29.000Z", "max_stars_repo_stars_event_max_datetime": "2018-08-18T06:38:45.000Z", "max_issues_repo_path": "tracker/coordinategenerator.py", "max_issues_repo_name": "SicariusNoctis/eagle-eye-tracker", "max_issues_repo_head_hexsha": "31e160057f1d2fa2c5fbd94ba4f5e9d064481c77", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2018-05-11T21:48:00.000Z", "max_issues_repo_issues_event_max_datetime": "2018-08-07T11:31:51.000Z", "max_forks_repo_path": "tracker/coordinategenerator.py", "max_forks_repo_name": "SicariusNoctis/eagle-eye-tracker", "max_forks_repo_head_hexsha": "31e160057f1d2fa2c5fbd94ba4f5e9d064481c77", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-09-10T01:03:25.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-10T01:03:25.000Z", "avg_line_length": 33.0408163265, "max_line_length": 80, "alphanum_fraction": 0.6229153799, "include": true, "reason": "import numpy", "num_tokens": 798}
|
# Hello world example, similar to the Boost.Python hello world
using CxxWrap
using Base.Test
using Compat
# Wrap the functions defined in C++
wrap_modules(CxxWrap._l_parametric)
import ParametricTypes.TemplateType, ParametricTypes.NonTypeParam
p1 = TemplateType{ParametricTypes.P1, ParametricTypes.P2}()
p2 = TemplateType{ParametricTypes.P2, ParametricTypes.P1}()
println("Dumping object p1:")
dump(p1)
@test ParametricTypes.get_first(p1) == 1
@test ParametricTypes.get_second(p2) == 1
@test typeof(ParametricTypes.get_first(p1)) == Int32
@test typeof(ParametricTypes.get_second(p2)) == Int32
@test ParametricTypes.get_first(p2) == 10.
@test ParametricTypes.get_second(p1) == 10.
@test typeof(ParametricTypes.get_first(p2)) == Float64
@test typeof(ParametricTypes.get_second(p1)) == Float64
nontype1 = ParametricTypes.NonTypeParam{Int32, Int32(1)}()
@test ParametricTypes.get_nontype(nontype1) == 1
nontype2 = ParametricTypes.NonTypeParam{UInt32, UInt32(2)}()
@test ParametricTypes.get_nontype(nontype2) == UInt32(2)
nontype3 = ParametricTypes.NonTypeParam{Int32, Int32(1)}(3)
@test ParametricTypes.get_nontype(nontype3) == 3
nontype4 = ParametricTypes.NonTypeParam{Int64, Int64(64)}()
@test ParametricTypes.get_nontype(nontype4) == Int64(64)
|
{"hexsha": "f21d0c74a525d82259bdc703b4011c9f03cc6e3a", "size": 1254, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/parametric.jl", "max_stars_repo_name": "JuliaPackageMirrors/CxxWrap.jl", "max_stars_repo_head_hexsha": "532498b8157238f765530a1cd7eb1674b9eea738", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/parametric.jl", "max_issues_repo_name": "JuliaPackageMirrors/CxxWrap.jl", "max_issues_repo_head_hexsha": "532498b8157238f765530a1cd7eb1674b9eea738", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/parametric.jl", "max_forks_repo_name": "JuliaPackageMirrors/CxxWrap.jl", "max_forks_repo_head_hexsha": "532498b8157238f765530a1cd7eb1674b9eea738", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1538461538, "max_line_length": 65, "alphanum_fraction": 0.7830940989, "num_tokens": 377}
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
def sample_test1(img):
#return img[159, 73:393]
return img[159, 73:193]
def sample_test2():
signal = np.sin(np.linspace(0, 60 * np.pi, 1200))
return signal
def divide_4signals(signal):
s1 = signal[0::4]
s2 = signal[1::4]
s3 = signal[2::4]
s4 = signal[3::4]
return s1, s2, s3, s4
def divide_signals(signal, div_num):
signals = np.zeros((div_num, len(signal)/ div_num))
for i in range(div_num):
signals[i,:] = signal[i::div_num]
return signals
def upsample(s, div_num = 4, offset = 0):
signal_len = len(s)
dst = np.zeros((signal_len * div_num))
for i in range(offset):
dst[i] = s[0]
src_id = 0
count = 0
for i in range(0, signal_len - 1):
dst[4 * i + offset] = s[i]
dst[4 * i + offset + 1] = s[i] * ( 1 - 1.0 / div_num ) + s[i+1] * ( 1.0 / div_num )
dst[4 * i + offset + 2] = s[i] * ( 1 - 2.0 / div_num ) + s[i+1] * ( 2.0 / div_num )
dst[4 * i + offset + 3] = s[i] * ( 1 - 3.0 / div_num ) + s[i+1] * ( 3.0 / div_num )
for i in range(offset):
dst[4 * (signal_len - 1) + i] = s[-1]
return dst
def upsample2(s, div_num, offset):
signal_len = s.shape[0]
dst = np.zeros((signal_len * div_num))
for i in range(offset):
dst[i] = s[0]
for i in range(0, signal_len - 1):
for d in range(div_num):
dst[div_num * i + offset + d] = s[i] * ( 1 - float(d) / div_num ) + s[i+1] * ( float(d) / div_num )
for i in range(offset):
dst[div_num * (signal_len - 1) + i] = s[-1]
return dst
def calcTheta(s1, s2, s3, s4):
Nr = 4
denom = 0 # 分母
numer = 0 # 分子
denom += s1 * np.cos(0)
numer += s1 * np.sin(0)
denom += s2 * np.cos(1 * 2 * np.pi / Nr)
numer += s2 * np.sin(1 * 2 * np.pi / Nr)
denom += s3 * np.cos(2 * 2 * np.pi / Nr)
numer += s3 * np.sin(2 * 2 * np.pi / Nr)
denom += s4 * np.cos(3 * 2 * np.pi / Nr)
numer += s4 * np.sin(3 * 2 * np.pi / Nr)
theta = np.arctan2( numer, denom )
return theta
def calcThetas( signals ):
Nr = signals.shape[0]
denom = 0 # 分母
numer = 0 # 分子
for k, signal in enumerate(signals):
denom += signal * np.cos(k * 2 * np.pi / Nr)
numer += signal * np.sin(k * 2 * np.pi / Nr)
theta = np.arctan2( numer, denom )
return theta
def test1():
print('aa')
img = cv2.imread('pattern1.png', 0)
signal = sample_test1(img)
# graph plot
plt.plot(signal)
plt.show()
#%%
s1, s2, s3, s4 = divide_4signals(signal)
#%%アップサンプル後
S1 = upsample(s1, 4, 0)
S2 = upsample(s2, 4, 1)
S3 = upsample(s3, 4, 2)
S4 = upsample(s4, 4, 3)
plt.plot(S1, color='r')
plt.plot(S2, color='g')
plt.plot(S3, color='b')
plt.plot(S4, color='y')
plt.show()
thetaList = np.zeros((len(signal)))
for i in range(len(thetaList)):
thetaList[i] = calcTheta(S1[i], S2[i], S3[i], S4[i])
plt.plot(thetaList)
print('aaa')
#%%
def test2():
print('bb')
img = cv2.imread('pattern1.png', 0)
#signal = sample_test1(img)
signal = sample_test2()
signal_len = len(signal)
# graph plot
plt.plot(signal)
plt.show()
#%%
div_num = 30
signals = divide_signals(signal, div_num)
#signals = signals.T
plot_num = min(div_num , 3)
for d in range(plot_num):
plt.plot(signals[d])
plt.show()
#%%アップサンプル後
upSignals = np.zeros((div_num, signal_len))
for d in range(div_num):
upSignals[d] = upsample2(signals[d], div_num, d)
plt.plot(upSignals[0], color='r')
plt.plot(upSignals[1], color='g')
plt.plot(upSignals[2], color='b')
plt.plot(upSignals[3], color='y')
plt.show()
thetaList = np.zeros((len(signal)))
for i in range(len(thetaList)):
thetaList[i] = calcThetas(upSignals[:, i])
plt.plot(thetaList)
plt.show()
print('aaa')
#%%
if __name__ == '__main__':
#test1()
test2()
|
{"hexsha": "bdd0ff3ced1d154546463280ff27ad88cbfaa285", "size": 4324, "ext": "py", "lang": "Python", "max_stars_repo_path": "phase_shift.py", "max_stars_repo_name": "kibekibe/sample_moire_sample", "max_stars_repo_head_hexsha": "7ebd4897baff4866b678cc4c05cc0750ede6c8ba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-23T08:47:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-23T08:47:57.000Z", "max_issues_repo_path": "phase_shift.py", "max_issues_repo_name": "kibekibe/sample_moire_sample", "max_issues_repo_head_hexsha": "7ebd4897baff4866b678cc4c05cc0750ede6c8ba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "phase_shift.py", "max_forks_repo_name": "kibekibe/sample_moire_sample", "max_forks_repo_head_hexsha": "7ebd4897baff4866b678cc4c05cc0750ede6c8ba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.2886597938, "max_line_length": 111, "alphanum_fraction": 0.507400555, "include": true, "reason": "import numpy", "num_tokens": 1439}
|
Ordnung und Sauberkeit!!!
Tyrants have not yet discovered any chains with which to fetter the mind.
Andrew Banta is a Violinist, addicted to Coffee, and nothing more. Image(andrewblanche.jpg, Andrew and Users/BlancheNonken Blanche share a meal at a Wiki BBQ Oct 2005 BBQ, right, thumbnail)
was machst du hier? fragst du. ich bin hier nerven zu sägen!!! und du?
20060218 20:21:58 nbsp I love you too, Andrew. You are my Wurst Friend Ever. :) Users/BlancheNonken
20060219 08:04:42 nbsp w00t! Im working at Meat until 7pm; lets see what the rest of the Quartet has to say. Users/BlancheNonken
20060317 09:00:48 nbsp You know where I havent been in a long time? The wiki. We gotta do this thing we do again, with that michelle girl. naawmeen? Users/AlexNorris
20060410 13:08:01 nbsp I have no idea what youre talking about. Speak English, good man, or find that your cow will be tipped. Users/MatthewKeys
Oh no! Not my nonexistant cow! Wherever shall I fetch my milk?
20060410 16:02:51 nbsp dont EVER fix my links, asshole Users/MichelleAccurso
OOH. OK.
20060411 14:51:56 nbsp let the record show, that i love andrew Users/MichelleAccurso
20060411 19:55:24 nbsp Im unfortunately shaving my back in preparation for my date with Mat.... err I mean, Ive got a lot of stuff I need to finish for work. Ill make sure Im free next week though. Users/ZacMorris
20060424 17:49:11 nbsp Barrabis! Barrabis! Barrabis! Let him be put to death. Users/AndrewBanta
20060424 18:07:20 nbsp Twas just trying to get through to him with logic. Do you know why he seems to dislike you so much? Users/JosephBleckman
20060812 09:58:24 nbsp Hey Andrew, Im a comtemporary singersongwriter looking for a violinist to play out with at open mic nights. If youre interested, please email me at carnelian1@sbcglobal.net Users/LouLasprugato
20070520 20:02:26 nbsp man, I have to resist the temptation to give steve ostrowski the other badge of honour Users/StevenDaubert
20071011 09:10:45 nbsp Hi Andrew, Im writing a story about caffeine for the Davis Enterprise, and Im looking for a heavy coffee drinker to interview briefly and take a picture of with a stack of coffee cups. What do you think? Can you email me really soon at cstjohn@davisenterprise.net?
Thanks! Users/ClaireStJohn
20080813 15:55:42 nbsp You were right (re: a discussion we had at a BBQ a while ago) french press is really effin good when done right. I cant even get myself to go out for coffee because I can make it better at home (unless I want espresso)! Users/PhilipNeustrom
|
{"hexsha": "70eef7f6bb885a261d225ffbf4e432d24eccc998", "size": 2555, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/AndrewBanta.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/AndrewBanta.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/AndrewBanta.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 58.0681818182, "max_line_length": 287, "alphanum_fraction": 0.7769080235, "num_tokens": 740}
|
"""
BiMPM (Bilateral Multi-Perspective Matching) model implementation.
"""
from typing import Dict, Optional, List, Any
from overrides import overrides
import torch
import numpy
from allennlp.common.checks import check_dimensions_match
from allennlp.data import Vocabulary
from allennlp.modules import FeedForward, Seq2SeqEncoder, Seq2VecEncoder, TextFieldEmbedder
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.common.params import Params
from pytorch_models.model.bimpm_matching import BiMpmMatching
from pytorch_models.commons.utils import to_numpy
@Model.register("bertclassifier")
class BERTClassifier(Model):
"""
This ``Model`` implements BiMPM model described in `Bilateral Multi-Perspective Matching
for Natural Language Sentences <https://arxiv.org/abs/1702.03814>`_ by Zhiguo Wang et al., 2017.
Also please refer to the `TensorFlow implementation <https://github.com/zhiguowang/BiMPM/>`_ and
`PyTorch implementation <https://github.com/galsang/BIMPM-pytorch>`_.
Parameters
----------
vocab : ``Vocabulary``
text_field_embedder : ``TextFieldEmbedder``
Used to embed the ``premise`` and ``hypothesis`` ``TextFields`` we get as input to the
model.
aggregator : ``Seq2VecEncoder``
Aggregator of all BiMPM matching vectors
classifier_feedforward : ``FeedForward``
Fully connected layers for classification.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
If provided, will be used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
classifier_feedforward: FeedForward,
dropout : Optional[torch.nn.Dropout] = None,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super(BERTClassifier, self).__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
self.dropout = dropout
self.classifier_feedforward = classifier_feedforward
self.metrics = {"accuracy": CategoricalAccuracy()}
self.loss = torch.nn.CrossEntropyLoss()
initializer(self)
@overrides
def forward(self, # type: ignore
input: Dict[str, torch.Tensor],
label: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None # pylint:disable=unused-argument
) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Parameters
----------
premise : Dict[str, torch.LongTensor]
The premise from a ``TextField``
hypothesis : Dict[str, torch.LongTensor]
The hypothesis from a ``TextField``
label : torch.LongTensor, optional (default = None)
The label for the pair of the premise and the hypothesis
metadata : ``List[Dict[str, Any]]``, optional, (default = None)
Additional information about the pair
Returns
-------
An output dictionary consisting of:
logits : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing unnormalised log
probabilities of the entailment label.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
pooled_output = self.text_field_embedder(input)
if self.dropout is not None:
pooled_output = self.dropout(pooled_output)
# the final forward layer
logits = self.classifier_feedforward(pooled_output)
probs = torch.nn.functional.softmax(logits, dim=-1)
output_dict = {'logits': logits, "probs": probs}
if label is not None:
label = label.view(-1)
loss = self.loss(logits, label)
for metric in self.metrics.values():
metric(logits, label)
output_dict["loss"] = loss
return output_dict
@overrides
def decode(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
Converts indices to string labels, and adds a ``"label"``
key to the result.
"""
return_dict = {}
for key in output_dict:
return_dict[key] = to_numpy(
output_dict[key], output_dict[key].is_cuda)
argmax_indices = numpy.argmax(return_dict["probs"], axis=-1)
labels = [self.vocab.get_token_from_index(x, namespace="labels")
for x in argmax_indices]
return_dict['label'] = labels
return return_dict
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {metric_name: metric.get_metric(reset) for metric_name, metric in self.metrics.items()}
@classmethod
def from_params(cls, vocab: Vocabulary, params: Params) -> 'BERTClassifier':
text_field_embedder_params = params.pop("text_field_embedder")
text_field_embedder = TextFieldEmbedder.from_params(
vocab=vocab, params=text_field_embedder_params
)
dropout = params.pop("dropout", None)
if dropout is not None:
pval = dropout.pop("value")
dropout = torch.nn.Dropout(pval)
classifier_feedforward = FeedForward.from_params(params.pop("classifier_feedforward"))
regularizer = RegularizerApplicator.from_params(params.pop("regularizer", None))
return cls(
vocab=vocab,
text_field_embedder=text_field_embedder,
dropout=dropout,
classifier_feedforward=classifier_feedforward,
regularizer=regularizer
)
|
{"hexsha": "eb99344481c722824393197e3659c98faf3e5018", "size": 6109, "ext": "py", "lang": "Python", "max_stars_repo_path": "pytorch_models/model/bert_model.py", "max_stars_repo_name": "codedecde/BiMPM", "max_stars_repo_head_hexsha": "818602fcf7a018632707b8fbfe33200036795731", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pytorch_models/model/bert_model.py", "max_issues_repo_name": "codedecde/BiMPM", "max_issues_repo_head_hexsha": "818602fcf7a018632707b8fbfe33200036795731", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pytorch_models/model/bert_model.py", "max_forks_repo_name": "codedecde/BiMPM", "max_forks_repo_head_hexsha": "818602fcf7a018632707b8fbfe33200036795731", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.0, "max_line_length": 102, "alphanum_fraction": 0.6587002783, "include": true, "reason": "import numpy", "num_tokens": 1307}
|
import numpy as np
import xlrd
import matplotlib.pyplot as plt
import pandas as pd
from scipy.linalg import svd
from categoric2numeric import categoric2numeric
from matplotlib.pyplot import figure, plot, xlabel, ylabel, legend, show
import sklearn.linear_model as lm
import sklearn.model_selection as skmd
from toolbox.Toolbox_Python02450.Tools.toolbox_02450 import feature_selector_lr, bmplot, rlr_validate, train_neural_net, draw_neural_net
from matplotlib.pyplot import figure, plot, xlabel, ylabel, clim, semilogx, loglog, legend, title, subplot, show, grid
import pprint
import random
import torch
import scipy.stats as stats
# from regression_part_a import OPT_lambda_part_2, X2, YY, XX, X2_labels
# Again import data
airbnb_data = "../data/AB_NYC_2019.csv"
attributes_datatype = {
'id': np.float64, # 0
'name': str, # 1
'host_id': np.float64, # 2
'host_name': str, # 3
'neighbourhood_group': str, # 4
'neighbourhood': str, # 5
'latitude': np.float64, # 6
'longitude': np.float64, # 7
'room_type': str, # 8
'price': np.float64, # 9
'minimum_nights': np.float64, # 10
'number_of_reviews': np.float64, # 11
# 'last_review': str, # 12
'reviews_per_month': np.float64, # 13
'calculated_host_listings_count': np.float64, # 14
'availability_365': np.float64 # 15
}
attributes_dates = ["last_review"]
data_frame_original = pd.read_csv(airbnb_data, dtype=attributes_datatype, parse_dates=attributes_dates)
data_frame_original.fillna(0, inplace=True)
print("Size of original dataframe: ", data_frame_original.size)
# TODO TAKE CARE
# Get random part of data to get more sense of visualization:
data_frame = data_frame_original.sample(frac=0.1)
# print(data_frame)
raw_data = data_frame.get_values()
attributes = list(data_frame.columns)
print("Atributes of dataframe: ", attributes)
print("Size of dataframe: ", data_frame.size)
prity_atributes = [
'id',
'name',
'host id',
'host name',
'borough',
'neighbourhood',
'latitude',
'longitude',
'room type',
'price',
'minimum nights',
'review number',
'last review',
'rev per month',
'host listing count',
'availability']
# Make a list of unique room types and neighbourhoods and unique boroughs
unique_boroughs = data_frame['neighbourhood_group'].unique()
unique_roomtypes = data_frame['room_type'].unique()
unique_neighbourhoods = data_frame['neighbourhood'].unique()
# # print(unique_neighbourhoods)
# print(unique_roomtypes)
# print(unique_boroughs)
# -- Regression PART B --
# -- 1) --
print("\n Part B \n 3) \n")
result_atributes = (9)
result_data = raw_data[:, result_atributes]
Y = np.array(result_data).T
Y = Y.reshape((Y.shape[0], 1))
print("Shape of Y")
print(Y.shape)
# print(Y)
# Standarize our data matrix
# One out K for nbh
nbh_data = raw_data[:, (4)]
x_nbh = np.array(nbh_data).T
X_K1, K1_labels = categoric2numeric(x_nbh)
roomtype_data = raw_data[:, (8)]
x_rty = np.array(roomtype_data).T
X_K2, K2_labels = categoric2numeric(x_rty)
# Get other parameters and standardise them
other_params = (10, 15)
other_data = np.array(raw_data[:, other_params])
# Shape of
N, M = other_data.shape
# To get a shape of (n,1) to use in concatenate (only if we only use one additional parameter
if N == 1:
other_data = other_data.reshape((other_data.shape[0], 1))
other_data = other_data - np.ones((N, 1)) * other_data.mean(axis=0)
other_data = other_data.astype(np.float64)
other_data = other_data * (1 / np.std(other_data, 0))
# Concatenate all of the data int one matrix
X = np.concatenate((X_K1, X_K2, other_data), axis=1)
X_labels = K1_labels + K2_labels + [attributes[i] for i in other_params]
print("X shape: ")
print(X.shape)
print("X labels")
print(X_labels)
def compare_ann_lin_reg_old():
opt_lam =100
h_lays = 15
N, M = X.shape
K = 10
cvf = 10
CV = skmd.KFold(K, random_state=17, shuffle=False)
Error_test_lin = [0 for i in range(K)]
Error_test_ann = [0 for i in range(K)]
r_values = [0 for i in range(K)]
outk = 0
for train_index, test_index in CV.split(X, Y):
X_train = X[train_index]
y_train = Y[train_index]
X_test = X[test_index]
y_test = Y[test_index]
X_train = X_train.astype(np.float64)
y_train = y_train.astype(np.float64)
X_test = X_test.astype(np.float64)
y_test = y_test.astype(np.float64)
CV = skmd.KFold(cvf, random_state=17, shuffle=True)
Error_test_lin_inner = [0 for i in range(cvf)]
Error_test_ann_inner = [0 for i in range(cvf)]
ink = 0
for inner_train_index, inner_test_index in CV.split(X_train, y_train):
X_train_in = X[inner_train_index].astype(np.float64)
y_train_in = Y[inner_train_index].astype(np.float64)
X_test_in = X[inner_test_index].astype(np.float64)
y_test_in = Y[inner_test_index].astype(np.float64)
X_train_in_torch = torch.tensor(X_train_in, dtype=torch.float)
y_train_in_torch = torch.tensor(y_train_in, dtype=torch.float)
X_test_in_torch = torch.tensor(X_test_in, dtype=torch.float)
y_train_in = y_train_in.reshape((y_train_in.shape[0],))
y_test_in = y_test_in.reshape((y_test_in.shape[0],))
# Linear regressoin
mu = np.mean(X_train_in[:, 1:], 0)
sigma = np.std(X_train_in[:, 1:], 0)
X_train_in[:, 1:] = (X_train_in[:, 1:] - mu) / sigma
X_test_in[:, 1:] = (X_test_in[:, 1:] - mu) / sigma
Xty = X_train_in.T @ y_train_in
XtX = X_train_in.T @ X_train_in
# Compute mean squared error without using the input data at all
Error_train_nofeatures = np.square(y_train_in - y_train_in.mean()).sum(axis=0) / y_train_in.shape[0]
Error_test_nofeatures = np.square(y_test_in - y_test_in.mean()).sum(axis=0) / y_test_in.shape[0]
# Estimate weights for the optimal value of lambda, on entire training set
lambdaI = opt_lam * np.eye(M)
lambdaI[0, 0] = 0 # Do no regularize the bias term
w_rlr = np.linalg.solve(XtX + lambdaI, Xty).squeeze()
# Compute mean squared error with regularization with optimal lambda
Error_train_rlr = np.square(y_train_in - X_train_in @ w_rlr).sum(axis=0) / y_train_in.shape[0]
Error_test_rlr = np.square(y_test_in - X_test_in @ w_rlr).sum(axis=0) / y_test_in.shape[0]
# Estimate weights for unregularized linear regression, on entire training set
w_noreg = np.linalg.solve(XtX, Xty).squeeze()
# Compute mean squared error without regularization
Error_train_lin = np.square(y_train_in - X_train_in @ w_noreg).sum(axis=0) / y_train_in.shape[0]
# The importatn thing
Error_test_lin_e = np.square(y_test_in - X_test_in @ w_noreg).sum(axis=0) / y_test_in.shape[0]
Error_test_lin_inner[ink] = Error_test_lin_e
# ANN
model = lambda: torch.nn.Sequential(
torch.nn.Linear(M, h_lays), # M features to H hiden units
# 1st transfer function, either Tanh or ReLU:
torch.nn.ReLU(),
# torch.nn.Tanh(),
torch.nn.Linear(h_lays, 1), # H hidden units to 1 output neuron
# torch.nn.Sigmoid() # final tranfer function
)
loss_fn = torch.nn.MSELoss()
# Train for a maximum of 10000 steps, or until convergence (see help for the
# function train_neural_net() for more on the tolerance/convergence))
max_iter = 10000
# Go to the file 'toolbox_02450.py' in the Tools sub-folder of the toolbox
# and see how the network is trained (search for 'def train_neural_net',
# which is the place the function below is defined)
net, final_loss, learning_curve = train_neural_net(model,
loss_fn,
X=X_train_in_torch,
y=y_train_in_torch,
n_replicates=3,
max_iter=max_iter)
y_res = net(X_test_in_torch)
y_res = y_res.data.numpy()
# y_test = y_test.data.numpy()
eval_error = np.square(y_test_in - y_res).sum(axis=0) / y_test_in.shape[0]
Error_test_ann_inner[ink] = eval_error
# increment inner index
ink += 1
# save errors
Error_test_lin[outk] = Error_test_lin_inner
Error_test_ann[outk] = Error_test_ann_inner
# Calculate error as in 11.4.1
r_j = sum(i-j for i,j in zip(Error_test_lin_inner,Error_test_ann_inner))/len(Error_test_lin[outk])
r_values[outk] = r_j
# increment outter index
outk += 1
return Error_test_lin,Error_test_ann,r_values
def compare_baseline_lin_reg_old():
opt_lam =100
h_lays = 15
N, M = X.shape
K = 10
cvf = 10
CV = skmd.KFold(K, random_state=17, shuffle=False)
Error_test_lin = [0 for i in range(K)]
Error_test_baseline = [0 for i in range(K)]
r_values = [0 for i in range(K)]
outk = 0
for train_index, test_index in CV.split(X, Y):
X_train = X[train_index]
y_train = Y[train_index]
X_test = X[test_index]
y_test = Y[test_index]
X_train = X_train.astype(np.float64)
y_train = y_train.astype(np.float64)
X_test = X_test.astype(np.float64)
y_test = y_test.astype(np.float64)
CV = skmd.KFold(cvf, random_state=17, shuffle=True)
Error_test_lin_inner = [0 for i in range(cvf)]
Error_test_basline_inner = [0 for i in range(cvf)]
ink = 0
for inner_train_index, inner_test_index in CV.split(X_train, y_train):
X_train_in = X[inner_train_index].astype(np.float64)
y_train_in = Y[inner_train_index].astype(np.float64)
X_test_in = X[inner_test_index].astype(np.float64)
y_test_in = Y[inner_test_index].astype(np.float64)
X_train_in_torch = torch.tensor(X_train_in, dtype=torch.float)
y_train_in_torch = torch.tensor(y_train_in, dtype=torch.float)
X_test_in_torch = torch.tensor(X_test_in, dtype=torch.float)
y_train_in = y_train_in.reshape((y_train_in.shape[0],))
y_test_in = y_test_in.reshape((y_test_in.shape[0],))
# Linear regressoin
mu = np.mean(X_train_in[:, 1:], 0)
sigma = np.std(X_train_in[:, 1:], 0)
X_train_in[:, 1:] = (X_train_in[:, 1:] - mu) / sigma
X_test_in[:, 1:] = (X_test_in[:, 1:] - mu) / sigma
Xty = X_train_in.T @ y_train_in
XtX = X_train_in.T @ X_train_in
# Compute mean squared error without using the input data at all
Error_train_nofeatures = np.square(y_train_in - y_train_in.mean()).sum(axis=0) / y_train_in.shape[0]
Error_test_nofeatures = np.square(y_test_in - y_test_in.mean()).sum(axis=0) / y_test_in.shape[0]
# Estimate weights for the optimal value of lambda, on entire training set
lambdaI = opt_lam * np.eye(M)
lambdaI[0, 0] = 0 # Do no regularize the bias term
w_rlr = np.linalg.solve(XtX + lambdaI, Xty).squeeze()
# Compute mean squared error with regularization with optimal lambda
Error_train_rlr = np.square(y_train_in - X_train_in @ w_rlr).sum(axis=0) / y_train_in.shape[0]
Error_test_rlr = np.square(y_test_in - X_test_in @ w_rlr).sum(axis=0) / y_test_in.shape[0]
# Estimate weights for unregularized linear regression, on entire training set
w_noreg = np.linalg.solve(XtX, Xty).squeeze()
# Compute mean squared error without regularization
Error_train_lin = np.square(y_train_in - X_train_in @ w_noreg).sum(axis=0) / y_train_in.shape[0]
# The importatn thing
Error_test_lin_e = np.square(y_test_in - X_test_in @ w_noreg).sum(axis=0) / y_test_in.shape[0]
Error_test_lin_inner[ink] = Error_test_lin_e
# baseline
y_pred = np.mean(y_train_in)
eval_error = np.square(y_test_in - y_pred).sum(axis=0) / y_test.shape[0]
Error_test_basline_inner[ink] = eval_error
# increment inner index
ink += 1
# save errors
Error_test_lin[outk] = Error_test_lin_inner
Error_test_baseline[outk] = Error_test_basline_inner
# Calculate error as in 11.4.1
r_j = sum(i-j for i,j in zip(Error_test_lin_inner,Error_test_basline_inner))/len(Error_test_lin[outk])
r_values[outk] = r_j
# increment outter index
outk += 1
return Error_test_lin,Error_test_baseline,r_values
def compare_ann_baseline_old():
opt_lam =100
h_lays = 15
N, M = X.shape
K = 10
cvf = 10
CV = skmd.KFold(K, random_state=17, shuffle=False)
Error_test_baseline = []
Error_test_ann = []
r_values = []
outk = 0
for train_index, test_index in CV.split(X, Y):
X_train = X[train_index]
y_train = Y[train_index]
X_test = X[test_index]
y_test = Y[test_index]
X_train = X_train.astype(np.float64)
y_train = y_train.astype(np.float64)
X_test = X_test.astype(np.float64)
y_test = y_test.astype(np.float64)
# print(test_index)
# print(y_train)
# print(len(y_train))
CV = skmd.KFold(cvf, random_state=17, shuffle=True)
Error_test_baseline_inner = []
Error_test_ann_inner = []
for inner_train_index, inner_test_index in CV.split(X_train, y_train):
# print(inner_test_index)
print(len(inner_test_index))
# print(Y[inner_test_index])
# print(np.matrix(Error_test_baseline_inner).shape)
# print(np.matrix(Error_test_ann_inner).shape)
X_train_in = X[inner_train_index].astype(np.float64)
y_train_in = Y[inner_train_index].astype(np.float64)
X_test_in = X[inner_test_index].astype(np.float64)
y_test_in = Y[inner_test_index].astype(np.float64)
X_train_in_torch = torch.tensor(X_train_in, dtype=torch.float)
y_train_in_torch = torch.tensor(y_train_in, dtype=torch.float)
X_test_in_torch = torch.tensor(X_test_in, dtype=torch.float)
y_train_in = y_train_in.reshape((y_train_in.shape[0],))
# print(y_test_in.shape)
# print(y_test_in.shape[0])
y_test_in = y_test_in.reshape((y_test_in.shape[0],))
# print(y_test_in.shape)
# Baseline
y_pred = np.mean(y_train_in)
eval_error = np.square(y_test_in - y_pred).sum(axis=0) / y_test.shape[0]
Error_test_baseline_inner.append(eval_error)
# ANN
model = lambda: torch.nn.Sequential(
torch.nn.Linear(M, h_lays), # M features to H hiden units
# 1st transfer function, either Tanh or ReLU:
torch.nn.ReLU(),
# torch.nn.Tanh(),
torch.nn.Linear(h_lays, 1), # H hidden units to 1 output neuron
# torch.nn.Sigmoid() # final tranfer function
)
loss_fn = torch.nn.MSELoss()
# Train for a maximum of 10000 steps, or until convergence (see help for the
# function train_neural_net() for more on the tolerance/convergence))
max_iter = 50
# Go to the file 'toolbox_02450.py' in the Tools sub-folder of the toolbox
# and see how the network is trained (search for 'def train_neural_net',
# which is the place the function below is defined)
net, final_loss, learning_curve = train_neural_net(model,
loss_fn,
X=X_train_in_torch,
y=y_train_in_torch,
n_replicates=3,
max_iter=max_iter)
y_res = net(X_test_in_torch)
y_res = y_res.data.numpy()
# print(y_res.shape)
# y_test = y_test.data.numpy()
eval_error = np.square(y_test_in - y_res).sum(axis=0) / y_test_in.shape[0]
Error_test_ann_inner.append(eval_error)
# save errors
Error_test_baseline.append(Error_test_baseline_inner)
Error_test_ann.append(Error_test_ann_inner)
print(len(Error_test_baseline), len(Error_test_baseline[0]))
print(len(Error_test_ann), len(Error_test_ann[0]))
denominator = len(Error_test_ann_inner[outk])
Error_test_ann_inner = list(map(np.mean, Error_test_ann_inner))
# Calculate error as in 11.4.1
r_j = sum(i-j for i, j in zip(Error_test_ann_inner, Error_test_baseline_inner)) / denominator
r_values.append(r_j)
outk += 1
return Error_test_baseline, Error_test_ann, r_values
def baseline(opt_lam, X_train_in, X_test_in, y_train_in, y_test, y_test_in, X_train_in_torch, X_test_in_torch, y_train_in_torch, m, h_lays):
y_pred = np.mean(y_train_in)
eval_error = np.square(y_test_in - y_pred).sum(axis=0) / y_test.shape[0]
return eval_error
def lin_reg(opt_lam, X_train_in, X_test_in, y_train_in, y_test, y_test_in, X_train_in_torch, X_test_in_torch, y_train_in_torch, m, h_lays):
M = m
mu = np.mean(X_train_in[:, 1:], 0)
sigma = np.std(X_train_in[:, 1:], 0)
X_train_in[:, 1:] = (X_train_in[:, 1:] - mu) / sigma
X_test_in[:, 1:] = (X_test_in[:, 1:] - mu) / sigma
Xty = X_train_in.T @ y_train_in
XtX = X_train_in.T @ X_train_in
# Compute mean squared error without using the input data at all
Error_train_nofeatures = np.square(y_train_in - y_train_in.mean()).sum(axis=0) / y_train_in.shape[0]
Error_test_nofeatures = np.square(y_test_in - y_test_in.mean()).sum(axis=0) / y_test_in.shape[0]
# Estimate weights for the optimal value of lambda, on entire training set
lambdaI = opt_lam * np.eye(M)
lambdaI[0, 0] = 0 # Do no regularize the bias term
w_rlr = np.linalg.solve(XtX + lambdaI, Xty).squeeze()
# Compute mean squared error with regularization with optimal lambda
Error_train_rlr = np.square(y_train_in - X_train_in @ w_rlr).sum(axis=0) / y_train_in.shape[0]
Error_test_rlr = np.square(y_test_in - X_test_in @ w_rlr).sum(axis=0) / y_test_in.shape[0]
# Estimate weights for unregularized linear regression, on entire training set
w_noreg = np.linalg.solve(XtX, Xty).squeeze()
# Compute mean squared error without regularization
Error_train_lin = np.square(y_train_in - X_train_in @ w_noreg).sum(axis=0) / y_train_in.shape[0]
# The important thing
Error_test_lin_e = np.square(y_test_in - X_test_in @ w_noreg).sum(axis=0) / y_test_in.shape[0]
return Error_test_lin_e
def ann(opt_lam, X_train_in, X_test_in, y_train_in, y_test, y_test_in, X_train_in_torch, X_test_in_torch, y_train_in_torch, m, h_lays):
model = lambda: torch.nn.Sequential(
torch.nn.Linear(m, h_lays), # M features to H hiden units
# 1st transfer function, either Tanh or ReLU:
torch.nn.ReLU(),
# torch.nn.Tanh(),
torch.nn.Linear(h_lays, 1), # H hidden units to 1 output neuron
# torch.nn.Sigmoid() # final tranfer function
)
loss_fn = torch.nn.MSELoss()
# Train for a maximum of 10000 steps, or until convergence (see help for the
# function train_neural_net() for more on the tolerance/convergence))
max_iter = 50
# Go to the file 'toolbox_02450.py' in the Tools sub-folder of the toolbox
# and see how the network is trained (search for 'def train_neural_net',
# which is the place the function below is defined)
net, final_loss, learning_curve = train_neural_net(model,
loss_fn,
X=X_train_in_torch,
y=y_train_in_torch,
n_replicates=3,
max_iter=max_iter)
y_res = net(X_test_in_torch)
y_res = y_res.data.numpy()
# y_test = y_test.data.numpy()
eval_error = np.square(y_test_in - y_res).sum(axis=0) / y_test_in.shape[0]
return eval_error
def compare_wrapper(fun1, fun2):
opt_lam =100
h_lays = 15
N, M = X.shape
m = M
K = 10
cvf = 10
CV = skmd.KFold(K, random_state=17, shuffle=False)
error_test1 = []
error_test2 = []
r_values = []
outk = 0
for train_index, test_index in CV.split(X, Y):
X_train = X[train_index]
y_train = Y[train_index]
X_test = X[test_index]
y_test = Y[test_index]
X_train = X_train.astype(np.float64)
y_train = y_train.astype(np.float64)
X_test = X_test.astype(np.float64)
y_test = y_test.astype(np.float64)
CV = skmd.KFold(cvf, random_state=17, shuffle=True)
error_test_inner1 = []
error_test_inner2 = []
for inner_train_index, inner_test_index in CV.split(X_train, y_train):
X_train_in = X[inner_train_index].astype(np.float64)
y_train_in = Y[inner_train_index].astype(np.float64)
X_test_in = X[inner_test_index].astype(np.float64)
y_test_in = Y[inner_test_index].astype(np.float64)
X_train_in_torch = torch.tensor(X_train_in, dtype=torch.float)
y_train_in_torch = torch.tensor(y_train_in, dtype=torch.float)
X_test_in_torch = torch.tensor(X_test_in, dtype=torch.float)
y_train_in = y_train_in.reshape((y_train_in.shape[0],))
y_test_in = y_test_in.reshape((y_test_in.shape[0],))
eval_error1 = fun1(opt_lam, X_train_in, X_test_in, y_train_in, y_test, y_test_in, X_train_in_torch, X_test_in_torch, y_train_in_torch, m, h_lays)
eval_error2 = fun2(opt_lam, X_train_in, X_test_in, y_train_in, y_test, y_test_in, X_train_in_torch, X_test_in_torch, y_train_in_torch, m, h_lays)
error_test_inner1.append(eval_error1)
error_test_inner2.append(eval_error2)
# save errors
error_test1.append(error_test_inner1)
error_test2.append(error_test_inner2)
# print(len(error_test2))
# print(outk)
# print(len(Error_test_baseline), len(Error_test_baseline[0]))
# print(len(Error_test_ann), len(Error_test_ann[0]))
if fun2 == ann:
denominator = len(error_test_inner2[outk])
else:
denominator = len(error_test2[outk])
error_test_inner2 = list(map(np.mean, error_test_inner2))
# Calculate error as in 11.4.1
r_j = sum(i - j for i, j in zip(error_test_inner2, error_test_inner1)) / denominator
r_values.append(r_j)
outk += 1
return error_test1, error_test2, r_values
def compare_baseline_lin_reg():
return compare_wrapper(baseline, lin_reg)
def compare_ann_lin_reg():
return compare_wrapper(lin_reg, ann)
def compare_ann_baseline():
return compare_wrapper(baseline, ann)
def t_test_analysis(r_vals, alpha=.05):
j = len(r_vals)
npr_vals = np.array(r_vals)
r_mean = np.mean(npr_vals)
r_std = np.std(npr_vals)
conf_int = stats.t.interval(1 - alpha, j - 1, loc=r_mean, scale = stats.sem(r_vals))
p_value = stats.t.cdf(-abs(r_mean) / stats.sem(r_vals), df=j - 1)
return conf_int, p_value
# print("\n Comparison 1 \n")
# # ANN and lin reg
# Error_test_lin,Error_test_ann,r_values = compare_ann_lin_reg()
#
# print("Compare ANN and lin reg")
# print("ANN results")
# print("Errors: ")
# pprint.pprint(Error_test_ann)
#
# print("Lin reg results")
# print("Errors: ")
# pprint.pprint(Error_test_lin)
# print("\n Comparison 2 \n")
# baseline and lin reg
for strings, fun in ((["ANN", "baseline"], compare_ann_baseline),
(["baseline", "lin_reg"], compare_baseline_lin_reg),
(["ANN", "lin_reg"], compare_ann_lin_reg)):
string = "".join(strings)
string1, string2 = strings
error1, error2, r_values = fun()
conf_int, p_value = t_test_analysis(r_values)
print(f"compare {string1} and {string2}")
print(f"{string1} error")
pprint.pprint(error1)
print(f"{string2} error")
pprint.pprint(error2)
print("t-test")
print(f"confidence interval: {conf_int}")
print(f"p-value: {p_value}")
# print("Compare ANN and baseline")
#
# print("Lin reg results")
# print("Errors: ")
# pprint.pprint(Error_test_lin)
# print("11.4.1 analysis")
# print("\n Comparison 3 \n")
# baseline and ann
# Error_test_baseline,Error_test_ann,r_values = compare_ann_baseline()
#
# print("Compare ANN and Baseline")
# print("Baseline results")
# print("Errors: ")
# pprint.pprint(Error_test_baseline)
#
# print("ANN results")
# print("Errors: ")
# pprint.pprint(Error_test_ann)
# Latex table
# for index,res in enumerate(zip(Opt_h_ann, Error_test_ann, Opt_lambdas_lin, Error_test_lin, Error_test_baseline)):
# print(str(index)+" & {0:.3f} & {1:.3f} & {2:.3f} & {3:.3f} & {4:.3f}".format(*[i[0] for i in res])+r" \\")
# Draw neural net and learning curve for last layer
|
{"hexsha": "ce244ae54b99a26025b67cc72281d099a59bb89f", "size": 25875, "ext": "py", "lang": "Python", "max_stars_repo_path": "02_assignment/regression_part_b_comparrison.py", "max_stars_repo_name": "LukaAvbreht/ML_projects", "max_stars_repo_head_hexsha": "8b36acdeb017ce8a57959c609b96111968852d5f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "02_assignment/regression_part_b_comparrison.py", "max_issues_repo_name": "LukaAvbreht/ML_projects", "max_issues_repo_head_hexsha": "8b36acdeb017ce8a57959c609b96111968852d5f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "02_assignment/regression_part_b_comparrison.py", "max_forks_repo_name": "LukaAvbreht/ML_projects", "max_forks_repo_head_hexsha": "8b36acdeb017ce8a57959c609b96111968852d5f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7883817427, "max_line_length": 157, "alphanum_fraction": 0.6283671498, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 6756}
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (c) 2019 HERE Europe B.V.
#
# SPDX-License-Identifier: MIT
#
###############################################################################
import json
import random
import numpy as np
from test.utils import (BaseTestAsync, format_long_args)
from qgis.core import QgsFields
from qgis.testing import unittest
from XYZHubConnector.xyz_qgis.layer import parser
# import unittest
# class TestParser(BaseTestAsync, unittest.TestCase):
class TestFieldsSimilarity(BaseTestAsync):
def _similarity_of_fields_names_and_props_keys(self, fields_names, props_keys):
props = dict((v, k) for k, v in enumerate(props_keys))
# from parser.prepare_fields
orig_props_names = [k for k, v in props.items()
if v is not None]
parser.rename_special_props(props) # rename fid in props
props_names = [k for k, v in props.items()
if v is not None]
return parser.fields_similarity(fields_names, orig_props_names, props_names)
def subtest_similarity_score(self, fields_names, props_keys, expected):
with self.subTest(fields_names=fields_names,props_keys=props_keys):
score = self._similarity_of_fields_names_and_props_keys(fields_names, props_keys)
self._log_debug("score", score)
self.assertEqual(score, expected)
return score
def test_simple(self):
fid = parser.QGS_ID
xid = parser.QGS_XYZ_ID
xyz_special_key = "@ns:com:here:xyz"
score = self.subtest_similarity_score([fid, "a", "b"], ["a", "b"], 1)
score = self.subtest_similarity_score([fid,"a"], ["a","b"], 1)
score = self.subtest_similarity_score([fid,"a"], ["b"], 0)
score = self.subtest_similarity_score([fid,"a","c"], ["a","b"], 0.5)
score = self.subtest_similarity_score([fid, xyz_special_key,"a","b","c"],
[xyz_special_key,"a"], 1)
def test_empty(self):
fid = parser.QGS_ID
xid = parser.QGS_XYZ_ID
xyz_special_key = "@ns:com:here:xyz"
# empty fields, shall returns merge fields (score 1)
score = self.subtest_similarity_score([fid], [], 1)
score = self.subtest_similarity_score([], [], 1)
score = self.subtest_similarity_score([xyz_special_key], [], 1)
score = self.subtest_similarity_score([xyz_special_key], [xyz_special_key], 1)
score = self.subtest_similarity_score([fid, xyz_special_key], [], 1)
score = self.subtest_similarity_score([fid, xyz_special_key], [xyz_special_key], 1)
score = self.subtest_similarity_score([fid], [], 1)
score = self.subtest_similarity_score([fid], [xyz_special_key], 1)
def test_empty_variant_1(self):
fid = parser.QGS_ID
xid = parser.QGS_XYZ_ID
xyz_special_key = "@ns:com:here:xyz"
# variant 1: empty props will be merged to any fields
# empty fields will be merged with any props
score = self.subtest_similarity_score([fid], ["a"], 1)
score = self.subtest_similarity_score([fid,"a"], [], 1)
score = self.subtest_similarity_score([fid,xyz_special_key], ["a",xyz_special_key], 1)
score = self.subtest_similarity_score([fid], [fid], 1)
score = self.subtest_similarity_score([fid, xyz_special_key], [fid], 1)
def test_empty_variant_2(self):
fid = parser.QGS_ID
xid = parser.QGS_XYZ_ID
xyz_special_key = "@ns:com:here:xyz"
# variant 2: empty props will be merged to empty fields only
# empty fields is reserved for empty props only
# non-empty, shall returns new fields (score 0)
score = self.subtest_similarity_score([fid], ["a"], 0)
score = self.subtest_similarity_score([fid,"a"], [], 0)
score = self.subtest_similarity_score([fid,xyz_special_key], ["a",xyz_special_key], 0)
score = self.subtest_similarity_score([fid], [fid], 0)
score = self.subtest_similarity_score([fid, xyz_special_key], [fid], 0)
def test_complex(self):
feat_json = dict(properties=dict(a=1,b=2))
lst_fields = list()
# prepare_fields
if __name__ == "__main__":
# unittest.main()
tests = [
"TestFieldsSimilarity.test_simple",
"TestFieldsSimilarity.test_empty",
# "TestFieldsSimilarity.test_empty_variant_1",
"TestFieldsSimilarity.test_empty_variant_2",
]
# unittest.main(defaultTest = tests, failfast=True) # will not run all subtest
unittest.main(defaultTest = tests)
|
{"hexsha": "f9299d570ebf1103970f4bef7454568557c9e8e7", "size": 4742, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_fields_similarity.py", "max_stars_repo_name": "deeplook/xyz-qgis-plugin", "max_stars_repo_head_hexsha": "37b7d84992155fe35d9578b58c9d74a198eccb40", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-18T18:03:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-18T18:03:04.000Z", "max_issues_repo_path": "test/test_fields_similarity.py", "max_issues_repo_name": "deeplook/xyz-qgis-plugin", "max_issues_repo_head_hexsha": "37b7d84992155fe35d9578b58c9d74a198eccb40", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_fields_similarity.py", "max_forks_repo_name": "deeplook/xyz-qgis-plugin", "max_forks_repo_head_hexsha": "37b7d84992155fe35d9578b58c9d74a198eccb40", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.9074074074, "max_line_length": 96, "alphanum_fraction": 0.6210459722, "include": true, "reason": "import numpy", "num_tokens": 1140}
|
import os
import random
import time
import numpy as np
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from crossView import PVA_model, Argoverse
from opt import get_args
import tqdm
from datetime import datetime
from utils import mean_IU, mean_precision
import wandb
def readlines(filename):
"""Read all the lines in a text file and return as a list
"""
with open(filename, 'r') as f:
lines = f.read().splitlines()
return lines
class Trainer_argo:
def __init__(self):
self.opt = get_args()
self.models = {}
self.weight = {"static": self.opt.static_weight, "dynamic": self.opt.dynamic_weight}
self.seed = self.opt.global_seed
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.create_time = time.strftime("%Y-%m-%d-%H-%M", time.localtime())
self.epoch = 0
self.start_epoch = 0
if self.seed != 0:
self.set_seed() # set seed
# Initializing models
self.model = PVA_model(self.opt, self.device)
#self.model.to(self.device)
# Optimization
self.optimizer = optim.Adam(self.model.parameters_to_train)
# Data Loaders
fpath = os.path.join(
os.path.dirname(__file__),
"splits",
"argo",
"{}_files.txt")
train_filenames = readlines(fpath.format("train"))
val_filenames = readlines(fpath.format("val"))
self.val_filenames = val_filenames
self.train_filenames = train_filenames
train_dataset = Argoverse(self.opt, train_filenames)
val_dataset = Argoverse(self.opt, val_filenames, is_train=False)
self.train_loader = DataLoader(
dataset = train_dataset,
batch_size = self.opt.batch_size,
shuffle = True,
num_workers=self.opt.num_workers,
pin_memory=True,
drop_last=True)
self.val_loader = DataLoader(
dataset = val_dataset,
batch_size = 1,
shuffle = True,
num_workers=self.opt.num_workers,
pin_memory=True,
drop_last=True)
if self.opt.load_weights_folder != "":
self.load_model()
# Save log and models path
now = datetime.now()
self.opt.save_path = os.path.join(self.opt.save_path, now.strftime("%Y%m%d-%H%M%S"))
wandb.init(project="cross-view", entity="zzx9636", config={"epochs": self.opt.num_epochs,
"batch_size": self.opt.batch_size})
wandb.define_metric("eval/*", step_metric="eval/step")
print(
"There are {:d} training items and {:d} validation items\n".format(
len(train_dataset),
len(val_dataset)))
def train(self):
#self.validation()
for self.epoch in range(self.start_epoch, self.opt.num_epochs + 1):
self.adjust_learning_rate(self.optimizer, self.epoch, self.opt.lr_steps)
self.run_epoch()
self.validation()
if (self.epoch%5)==0:
self.save_model()
def run_epoch(self):
for inputs in self.train_loader:
self.model.train()
self.optimizer.zero_grad()
for key, input in inputs.items():
if key != "filename":
inputs[key] = input.to(self.device)
_, losses = self.model(inputs)
losses["loss"].backward()
self.optimizer.step()
wandb.log({"loss": losses["loss"], "topview_loss": losses["topview_loss"],
"transform_loss": losses["transform_loss"]})
#"transform_topview_loss": losses["transform_topview_loss"]})
def validation(self):
iou, mAP = np.array([0., 0., 0.]), np.array([0., 0., 0.])
#trans_iou, trans_mAP = np.array([0., 0.]), np.array([0., 0.])
with torch.no_grad():
for inputs in self.val_loader:
self.model.eval()
for key, input in inputs.items():
if key != "filename":
inputs[key] = input.to(self.device)
outputs, _ = self.model(inputs)
pred = np.squeeze(
torch.argmax(
outputs["topview"].detach(),
1).cpu().numpy())
true = np.squeeze(
inputs["combine"].detach().cpu().numpy())
#print(mean_IU(pred, true), mean_precision(pred, true))
iou += mean_IU(pred, true)
mAP += mean_precision(pred, true)
iou /= len(self.val_loader)
mAP /= len(self.val_loader)
print("Epoch: %d | Validation: mIOU: %.4f, %.4f mAP: %.4f, %.4f" % (self.epoch, iou[1], iou[2], mAP[1], mAP[2]))
log_dict = {"eval/step": self.epoch, "eval/map/mIOU": iou[1], "eval/map/mAP": mAP[1],
"eval/vehicle/mIOU": iou[2], "eval/vehicle/mAP": mAP[2]}
wandb.log(log_dict)
def save_model(self):
save_path = os.path.join(
self.opt.save_path,
"weights_{}".format(
self.epoch)
)
if not os.path.exists(save_path):
os.makedirs(save_path)
for model_name, model in self.model.models.items():
model_path = os.path.join(save_path, "{}.pth".format(model_name))
state_dict = model.state_dict()
state_dict['epoch'] = self.epoch
if model_name == "encoder":
state_dict["height"] = self.opt.height
state_dict["width"] = self.opt.width
torch.save(state_dict, model_path)
optim_path = os.path.join(save_path, "{}.pth".format("adam"))
torch.save(self.optimizer.state_dict(), optim_path)
print("Save models to ", save_path)
def load_model(self):
"""Load model(s) from disk
"""
self.opt.load_weights_folder = os.path.expanduser(
self.opt.load_weights_folder)
assert os.path.isdir(self.opt.load_weights_folder), \
"Cannot find folder {}".format(self.opt.load_weights_folder)
print(
"loading model from folder {}".format(
self.opt.load_weights_folder))
for key in self.model.models.keys():
if "discriminator" not in key:
print("Loading {} weights...".format(key))
path = os.path.join(
self.opt.load_weights_folder,
"{}.pth".format(key))
model_dict = self.model.models[key].state_dict()
pretrained_dict = torch.load(path)
if 'epoch' in pretrained_dict:
self.start_epoch = pretrained_dict['epoch']
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
self.model.models[key].load_state_dict(model_dict)
# loading adam state
if self.opt.load_weights_folder == "":
optimizer_load_path = os.path.join(
self.opt.load_weights_folder, "adam.pth")
if os.path.isfile(optimizer_load_path):
print("Loading Adam weights")
optimizer_dict = torch.load(optimizer_load_path)
self.optimizer.load_state_dict(optimizer_dict)
else:
print("Cannot find Adam weights so Adam is randomly initialized")
def adjust_learning_rate(self, optimizer, epoch, lr_steps):
"""Sets the learning rate to the initial LR decayed by 10 every 25 epochs"""
decay = 0.1 ** (sum(epoch >= np.array(lr_steps)))
decay = round(decay, 2)
lr = self.opt.lr * decay
lr_transform = self.opt.lr_transform * decay
decay = self.opt.weight_decay
optimizer.param_groups[0]['lr'] = lr_transform
optimizer.param_groups[1]['lr'] = lr
optimizer.param_groups[0]['weight_decay'] = decay
optimizer.param_groups[1]['weight_decay'] = decay
wandb.log({"lr": lr, "lr_transform":lr_transform, "decay": decay})
def set_seed(self):
seed = self.seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if __name__ == "__main__":
start_time = time.ctime()
print(start_time)
trainer = Trainer_argo()
trainer.train()
end_time = time.ctime()
print(end_time)
|
{"hexsha": "f75dad1a908cd9515ec0ceb89ed5c42182cd92e4", "size": 8725, "ext": "py", "lang": "Python", "max_stars_repo_path": "train_argo.py", "max_stars_repo_name": "zzx9636/cross-view", "max_stars_repo_head_hexsha": "9a7e874be607eefa7bd34934e274cc376e99f65f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "train_argo.py", "max_issues_repo_name": "zzx9636/cross-view", "max_issues_repo_head_hexsha": "9a7e874be607eefa7bd34934e274cc376e99f65f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train_argo.py", "max_forks_repo_name": "zzx9636/cross-view", "max_forks_repo_head_hexsha": "9a7e874be607eefa7bd34934e274cc376e99f65f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.5062761506, "max_line_length": 120, "alphanum_fraction": 0.5645845272, "include": true, "reason": "import numpy", "num_tokens": 1879}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Functions to plot the NN predictions
"""
from vrmslearn.Trainer import Trainer
from vrmslearn.SeismicGenerator import SeismicGenerator
from vrmslearn.RCNN import RCNN
from vrmslearn.ModelParameters import ModelParameters
from vrmslearn.SeismicGenerator import SeismicGenerator, mute_direct, random_static, random_noise, mute_nearoffset, random_filt
from semblance.nmo_correction import semblance_gather, nmo_correction
import argparse
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams.update({'font.size': 7})
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.gridspec as gridspec
import numpy as np
import os
from shutil import rmtree
import h5py as h5
from scipy.signal import butter, lfilter
from scipy import ndimage, misc
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
def plot_predictions(modeled_data,
vp, vrms, vpred, tlabels, refpred, vint, vint_pred, pars):
"""
This method creates one example by generating a random velocity model,
modeling a shot record with it, and also computes the vrms. The three
results are displayed side by side in an window.
@params:
@returns:
"""
# Plot results
fig, ax = plt.subplots(1, 3, figsize=[16, 8])
im1 = ax[0].imshow(vp, cmap=plt.get_cmap('hot'), aspect='auto',
vmin=0.9 * pars.vp_min, vmax=1.1 * pars.vp_max)
ax[0].set_xlabel("X Cell Index," + " dh = " + str(pars.dh) + " m",
fontsize=12, fontweight='normal')
ax[0].set_ylabel("Z Cell Index," + " dh = " + str(pars.dh) + " m",
fontsize=12, fontweight='normal')
ax[0].set_title("P Interval Velocity", fontsize=16, fontweight='bold')
p = ax[0].get_position().get_points().flatten()
axis_cbar = fig.add_axes([p[0], 0.03, p[2] - p[0], 0.02])
plt.colorbar(im1, cax=axis_cbar, orientation='horizontal')
clip = 0.05
vmax = np.max(modeled_data) * clip
vmin = -vmax
ax[1].imshow(modeled_data,
interpolation='bilinear',
cmap=plt.get_cmap('Greys'),
vmin=vmin, vmax=vmax,
aspect='auto')
tlabels = [ii for ii, t in enumerate(tlabels) if t == 1]
toff = np.zeros(len(tlabels)) + int(modeled_data.shape[1]/2)+1
ax[1].plot(toff, tlabels, '*')
refpred = [ii for ii, t in enumerate(refpred) if t == 1]
toff = np.zeros(len(refpred)) + int(modeled_data.shape[1]/2)-2
ax[1].plot(toff, refpred, 'r*')
ax[1].set_xlabel("Receiver Index", fontsize=12, fontweight='normal')
ax[1].set_ylabel("Time Index," + " dt = " + str(pars.dt * 1000 * pars.resampling) + " ms",
fontsize=12, fontweight='normal')
ax[1].set_title("Shot Gather", fontsize=16, fontweight='bold')
ax[2].plot(vrms * (pars.vp_max-pars.vp_min) + pars.vp_min,
np.arange(0, len(vrms)))
ax[2].plot(vpred * (pars.vp_max - pars.vp_min) + pars.vp_min,
np.arange(0, len(vpred)))
ax[2].plot(vint * (pars.vp_max-pars.vp_min) + pars.vp_min,
np.arange(0, len(vint)))
ax[2].plot(vint_pred * (pars.vp_max - pars.vp_min) + pars.vp_min,
np.arange(0, len(vint_pred)))
ax[2].invert_yaxis()
ax[2].set_ylim(top=0, bottom=len(vrms))
ax[2].set_xlim(0.9 * pars.vp_min, 1.1 * pars.vp_max)
ax[2].set_xlabel("RMS Velocity (m/s)", fontsize=12, fontweight='normal')
ax[2].set_ylabel("Time Index," + " dt = " + str(pars.dt * 1000 * pars.resampling) + " ms",
fontsize=12, fontweight='normal')
ax[2].set_title("P RMS Velocity", fontsize=16, fontweight='bold')
plt.show()
def plot_predictions_semb3(modeled_data,
vrms, vpred,
tlabels, refpred,
vint, vint_pred,
masks,
pars, dv=30, vmin=None, vmax = None,
clip=0.05, clipsemb=1.0,
plot_semb = True,
with_nmo = False,
textlabels = None,
savefile=None,
vint_pred_std=None,
vpred_std=None, tmin=None, tmax=None):
"""
This method creates one example by generating a random velocity model,
modeling a shot record with it, and also computes the vrms. The three
results are displayed side by side in a window.
@params:
@returns:
"""
NT = modeled_data[0].shape[0]
ng = modeled_data[0].shape[1]
dt = pars.resampling * pars.dt
if vmin is None:
vmin = pars.vp_min
if vmax is None:
vmax = pars.vp_max
if pars.gmin ==-1 or pars.gmax ==-1:
offsets = (np.arange(0, ng) - (ng) / 2) * pars.dh * pars.dg
else:
offsets = (np.arange(pars.gmin, pars.gmax, pars.dg)) * pars.dh
times = np.reshape(np.arange(0, NT * dt, dt) - pars.tdelay, [-1])
vels = np.arange(vmin - 5*dv, vmax + 2*dv, dv)
if with_nmo:
fig, ax = plt.subplots(3, 3, figsize=[11 / 2.54, 18 / 2.54])
else:
fig, ax = plt.subplots(3, 2, figsize=[8 / 2.54, 18 / 2.54])
titles = [["a)", "b)", "c)"], ["d)", "e)", "f)"], ["g)", "h)", "i)"]]
labels = ["True", "Pred", "Vint true", "Vint pred", "Vrms true", "Vrms pred", "Vrms std", "Vint std"]
plots = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for ii in range(3):
if plot_semb:
semb = semblance_gather(modeled_data[ii], times, offsets, vels)
vmax = np.max(modeled_data[ii]) * clip
vmin = -vmax
ax[ii, 0].imshow(modeled_data[ii],
interpolation='bilinear',
cmap=plt.get_cmap('Greys'),
extent=[offsets[0] / 1000, offsets[-1] / 1000, times[-1], times[0]],
vmin=vmin, vmax=vmax,
aspect='auto')
ymin, ymax = ax[ii, 0].get_ylim()
if tmin is not None:
if type(tmin) is list:
ymax = tmin[ii]
else:
ymax = tmin
if tmax is not None:
if type(tmax) is list:
ymin = tmax[ii]
else:
ymin = tmax
xmin, xmax = ax[ii, 0].get_xlim()
if tlabels is not None:
tlabels[ii] = [jj * dt - pars.tdelay for jj, t in enumerate(tlabels[ii]) if t == 1]
refpred[ii] = [jj * dt - pars.tdelay for jj, t in enumerate(refpred[ii]) if t == 1]
if np.min(offsets) < 0:
if tlabels is not None:
tofflabels = np.zeros(len(tlabels[ii])) - 2 * pars.dh * pars.dg
toffpreds = np.zeros(len(refpred[ii])) + 2 * pars.dh * pars.dg
else:
if tlabels is not None:
tofflabels = np.zeros(len(tlabels[ii])) + np.min(np.abs(offsets)) + 1 * pars.dh * pars.dg
toffpreds = np.zeros(len(refpred[ii])) + np.min(np.abs(offsets)) + 3 * pars.dh * pars.dg
if tlabels is not None:
plots[0], = ax[ii, 0].plot(tofflabels / 1000, tlabels[ii], 'r*', markersize=3)
plots[1], = ax[ii, 0].plot(toffpreds / 1000, refpred[ii], 'b*', markersize=3)
ax[ii, 0].set_xlabel("Offset (km)")
ax[ii, 0].set_ylabel("Time (s)")
#ax[ii, 0].set_title(titles[0][0])
ax[ii, 0].text(xmin - 0.3 * (xmax-xmin), ymax + 0.1*(ymax-ymin),
titles[0][ii], fontsize="large")
# ax[ii, 2 * jj].xaxis.set_ticks(np.arange(-1, 1.5, 0.5))
if ii == 0:
ax[ii, 0].legend(plots[0:2], labels[0:2], loc='upper right',
bbox_to_anchor=(1.13, 1.29))
if plot_semb:
vmax = np.max(semb) * clipsemb
vmin = np.min(semb)
ax[ii, 1].imshow(semb,
extent=[(vels[0] - dv / 2) / 1000,
(vels[-1] - dv / 2) / 1000, times[-1], times[0]],
cmap=plt.get_cmap('YlOrRd'),
vmin=vmin, vmax=vmax,
interpolation='bilinear',
aspect='auto')
if masks is not None:
if vint is not None:
vint[ii][masks[ii] == 0] = np.NaN
if vrms is not None:
vrms[ii][masks[ii] == 0] = np.NaN
vint_pred[ii][masks[ii] == 0] = np.NaN
vpred[ii][masks[ii] == 0] = np.NaN
if vint is not None:
plots[2], = ax[ii, 1].plot(vint[ii] / 1000, times, '-', color='lightgray')
if vint_pred_std is not None:
plots[6], = ax[ii, 1].plot((vint_pred[ii] + vint_pred_std[ii]) / 1000, times, '-', color='lightgreen', alpha=0.4)
ax[ii, 1].plot((vint_pred[ii] - vint_pred_std[ii]) / 1000, times, '-', color='lightgreen', alpha=0.4)
if vrms is not None:
plots[4], = ax[ii, 1].plot(vrms[ii] / 1000, times, '-g', color='black')
plots[5], = ax[ii, 1].plot(vpred[ii] / 1000, times, '-b')
plots[3], = ax[ii, 1].plot(vint_pred[ii] / 1000, times, '-', color='lightgreen')
if vpred_std is not None:
plots[7], = ax[ii, 1].plot((vpred[ii] + vpred_std[ii]) / 1000, times, '-b', alpha=0.2)
ax[ii, 1].plot((vpred[ii] - vpred_std[ii]) / 1000, times, '-b', alpha=0.2)
ax[ii, 1].xaxis.set_ticks(np.arange(np.ceil(np.min(vels)/1000),
1+np.floor(np.max(vels)/1000)))
ax[ii, 1].set_ylim(bottom=ymin, top=ymax)
ax[ii, 0].set_ylim(bottom=ymin, top=ymax)
xmin, xmax = ax[ii, 1].get_xlim()
ax[ii, 1].set_xlabel("Velocity (km/s)")
ax[ii, 1].set_ylabel("Time (s)")
ax[ii, 1].text(xmin - 0.3 * (xmax - xmin), ymax + 0.1 * (ymax - ymin),
titles[1][ii], fontsize="large")
if textlabels:
ax[ii, 1].text(xmin + 0.94 * (xmax - xmin), ymax + - 0.03 * (ymax - ymin),
textlabels[ii], ha="right", va="top", fontsize="large")
if ii == 0:
ax[ii, 1].legend(plots[2:6], labels[2:6],
loc='upper right',
bbox_to_anchor=(1.15, 1.50),
handlelength=0.4)
if with_nmo:
vmax = np.max(modeled_data[ii]) * clip
vmin = -vmax
data_nmo = nmo_correction(modeled_data[ii], times, offsets, vpred[ii], stretch_mute=0.3)
ax[ii, 2].imshow(data_nmo,
interpolation='bilinear',
cmap=plt.get_cmap('Greys'),
extent=[offsets[0] / 1000, offsets[-1] / 1000, times[-1], times[0]],
vmin=vmin, vmax=vmax,
aspect='auto')
ax[ii, 2].set_ylim(bottom=ymin, top=ymax)
ax[ii, 2].set_xlabel("Offset (km)")
ax[ii, 2].set_ylabel("Time (s)")
xmin, xmax = ax[ii, 0].get_xlim()
ax[ii, 2].text(xmin - 0.3 * (xmax-xmin), ymax + 0.1*(ymax-ymin),
titles[2][ii], fontsize="large")
plt.tight_layout(rect=[0, 0, 1, 0.995])
if savefile:
plt.savefig(savefile, dpi=600)
plt.savefig(savefile+"_lowres", dpi=100)
plt.show()
if __name__ == "__main__":
# Set pref_device_type = 4
pref_device_type = 4
# Initialize argument parser
parser = argparse.ArgumentParser()
# Add arguments to parse for training
parser.add_argument(
"--logdir",
type=str,
default="logs",
help="name of the directory to save logs : str"
)
parser.add_argument(
"--filename",
type=str,
default="dataset_1/dhmin40_layer_num_min5/example_1_31891",
help="name of the directory to save logs : str"
)
parser.add_argument(
"--fileparam",
type=str,
default="dataset_1/dhmin40_layer_num_min5/example_1_31891",
help="name of the directory that contains the model parameters: str"
)
parser.add_argument(
"--niter",
type=int,
default=5000,
help="number of training iterations : int > 0"
)
parser.add_argument(
"--nbatch",
type=int,
default=10,
help="number of gathers in one batch : int > 0"
)
parser.add_argument(
"--nlayers",
type=int,
default=2,
help="number of layers in the model : int > 0"
)
parser.add_argument(
"--layer_num_min",
type=int,
default=5,
help="number of layers in the model : int > 0"
)
parser.add_argument("-d", "--device",
type=int,
default=4,
help="device type : int = 2 or 4, default = 2")
# Parse the input for training parameters
args, unparsed = parser.parse_known_args()
# Test for input errors
def print_usage_error_message():
print("\nUsage error.\n")
parser.print_help()
if args.niter < 0:
print_usage_error_message()
exit()
if args.nlayers <= -1:
print_usage_error_message()
exit()
if args.nbatch <= 0:
print_usage_error_message()
exit()
parameters = ModelParameters()
parameters.read_parameters_from_disk(args.fileparam)
parameters.device_type = args.device
parameters.num_layers = args.nlayers
#parameters.read_parameters_from_disk(filename='dataset_3/dhmin40_layer_num_min5/model_parameters.hdf5')
gen = SeismicGenerator(parameters)
parameters.mute_nearoffset = False
parameters.random_static = False
parameters.random_noise = False
data, vrms, vint, valid, tlabels = gen.read_example(".", filename=args.filename)
# data = mute_direct(data, 1500, parameters)
# #data = random_static(data, 2)
## data = random_noise(data, 0.01)
## data = mute_nearoffset(data, 10)
## data = random_filt(data, 9)
data = np.expand_dims(data, axis=-1)
data = np.expand_dims(data, axis=0)
vrms = np.expand_dims(vrms, axis=0)
vint = np.expand_dims(vint, axis=0)
valid = np.expand_dims(valid, axis=0)
tlabels = np.expand_dims(tlabels, axis=0)
f = h5.File(args.filename, "r")
vp = f['vp'][:]
f.close()
nn = RCNN(input_size=gen.image_size,
batch_size=1)
trainer = Trainer(NN=nn,
data_generator=gen,
totrain=False)
preds = trainer.evaluate(toeval=[nn.output_ref, nn.output_vint, nn.output_vrms],
niter=args.niter,
dir=args.logdir,
batch=[data, vrms, vint, valid, tlabels])
refpred = np.argmax(preds[0][0,:], axis=1)
vint_pred = preds[1]
vpred = preds[2]
vp = np.stack([vp] * vp.shape[0], axis=1)
plot_predictions_semb(data[0,:,:,0],
vp,
vrms[0,:],
vpred[0,:],
tlabels[0,:],
refpred, vint[0,:], vint_pred[0,:], parameters, with_semb=False)
|
{"hexsha": "81cabe81b10de556097fda893d1927eec7c8c01e", "size": 15553, "ext": "py", "lang": "Python", "max_stars_repo_path": "plot_prediction.py", "max_stars_repo_name": "GeoCode-polymtl/Deep_1D_velocity", "max_stars_repo_head_hexsha": "8f42fc4f5c984d0e11b4c93ae7eee99ba3843b4c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-08-17T19:47:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T08:02:51.000Z", "max_issues_repo_path": "plot_prediction.py", "max_issues_repo_name": "GeoCode-polymtl/Deep_1D_velocity", "max_issues_repo_head_hexsha": "8f42fc4f5c984d0e11b4c93ae7eee99ba3843b4c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-01-28T22:17:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T23:31:59.000Z", "max_forks_repo_path": "plot_prediction.py", "max_forks_repo_name": "GeoCode-polymtl/Deep_1D_velocity", "max_forks_repo_head_hexsha": "8f42fc4f5c984d0e11b4c93ae7eee99ba3843b4c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-11-27T06:05:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-08T00:38:38.000Z", "avg_line_length": 37.6585956416, "max_line_length": 127, "alphanum_fraction": 0.5402173214, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4406}
|
[STATEMENT]
lemma weakPsiCongTransitive:
fixes \<Psi> :: 'b
and P :: "('a, 'b, 'c) psi"
and Q :: "('a, 'b, 'c) psi"
and R :: "('a, 'b, 'c) psi"
assumes "\<Psi> \<rhd> P \<doteq> Q"
and "\<Psi> \<rhd> Q \<doteq> R"
shows "\<Psi> \<rhd> P \<doteq> R"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Psi> \<rhd> P \<doteq> R
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<Psi> \<rhd> P \<doteq> R
[PROOF STEP]
from assms
[PROOF STATE]
proof (chain)
picking this:
\<Psi> \<rhd> P \<doteq> Q
\<Psi> \<rhd> Q \<doteq> R
[PROOF STEP]
have "\<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> R"
[PROOF STATE]
proof (prove)
using this:
\<Psi> \<rhd> P \<doteq> Q
\<Psi> \<rhd> Q \<doteq> R
goal (1 subgoal):
1. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> R
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> R
goal (1 subgoal):
1. \<Psi> \<rhd> P \<doteq> R
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
\<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> R
goal (1 subgoal):
1. \<Psi> \<rhd> P \<doteq> R
[PROOF STEP]
proof(induct rule: weakPsiCongSymI)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> Qa \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> P
2. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<approx> Qa
3. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<leadsto>\<guillemotleft>weakBisim\<guillemotright> Qa
[PROOF STEP]
case(cSym P R)
[PROOF STATE]
proof (state)
this:
\<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> R
goal (3 subgoals):
1. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> Qa \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> P
2. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<approx> Qa
3. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<leadsto>\<guillemotleft>weakBisim\<guillemotright> Qa
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
\<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> R
goal (1 subgoal):
1. \<Psi> \<rhd> R \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> P
[PROOF STEP]
by(auto dest: weakPsiCongSym)
[PROOF STATE]
proof (state)
this:
\<Psi> \<rhd> R \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> P
goal (2 subgoals):
1. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<approx> Qa
2. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<leadsto>\<guillemotleft>weakBisim\<guillemotright> Qa
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<approx> Qa
2. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<leadsto>\<guillemotleft>weakBisim\<guillemotright> Qa
[PROOF STEP]
case(cSim P R)
[PROOF STATE]
proof (state)
this:
\<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> R
goal (2 subgoals):
1. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<approx> Qa
2. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<leadsto>\<guillemotleft>weakBisim\<guillemotright> Qa
[PROOF STEP]
hence "\<Psi> \<rhd> P \<doteq> Q" and "\<Psi> \<rhd> Q \<doteq> R"
[PROOF STATE]
proof (prove)
using this:
\<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> R
goal (1 subgoal):
1. \<Psi> \<rhd> P \<doteq> Q &&& \<Psi> \<rhd> Q \<doteq> R
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<Psi> \<rhd> P \<doteq> Q
\<Psi> \<rhd> Q \<doteq> R
goal (2 subgoals):
1. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<approx> Qa
2. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<leadsto>\<guillemotleft>weakBisim\<guillemotright> Qa
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<Psi> \<rhd> P \<doteq> Q
\<Psi> \<rhd> Q \<doteq> R
goal (2 subgoals):
1. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<approx> Qa
2. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<leadsto>\<guillemotleft>weakBisim\<guillemotright> Qa
[PROOF STEP]
from \<open>\<Psi> \<rhd> P \<doteq> Q\<close>
[PROOF STATE]
proof (chain)
picking this:
\<Psi> \<rhd> P \<doteq> Q
[PROOF STEP]
have "\<Psi> \<rhd> P \<approx> Q"
[PROOF STATE]
proof (prove)
using this:
\<Psi> \<rhd> P \<doteq> Q
goal (1 subgoal):
1. \<Psi> \<rhd> P \<approx> Q
[PROOF STEP]
by(metis weakBisimE weakPsiCongE)
[PROOF STATE]
proof (state)
this:
\<Psi> \<rhd> P \<approx> Q
goal (2 subgoals):
1. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<approx> Qa
2. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<leadsto>\<guillemotleft>weakBisim\<guillemotright> Qa
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<Psi> \<rhd> P \<approx> Q
goal (2 subgoals):
1. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<approx> Qa
2. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<leadsto>\<guillemotleft>weakBisim\<guillemotright> Qa
[PROOF STEP]
from \<open>\<Psi> \<rhd> P \<doteq> Q\<close>
[PROOF STATE]
proof (chain)
picking this:
\<Psi> \<rhd> P \<doteq> Q
[PROOF STEP]
have "\<Psi> \<rhd> P \<leadsto>\<guillemotleft>weakBisim\<guillemotright> Q"
[PROOF STATE]
proof (prove)
using this:
\<Psi> \<rhd> P \<doteq> Q
goal (1 subgoal):
1. \<Psi> \<rhd> P \<leadsto>\<guillemotleft>weakBisim\<guillemotright> Q
[PROOF STEP]
by(rule weakPsiCongE)
[PROOF STATE]
proof (state)
this:
\<Psi> \<rhd> P \<leadsto>\<guillemotleft>weakBisim\<guillemotright> Q
goal (2 subgoals):
1. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<approx> Qa
2. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<leadsto>\<guillemotleft>weakBisim\<guillemotright> Qa
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<Psi> \<rhd> P \<leadsto>\<guillemotleft>weakBisim\<guillemotright> Q
goal (2 subgoals):
1. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<approx> Qa
2. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<leadsto>\<guillemotleft>weakBisim\<guillemotright> Qa
[PROOF STEP]
from \<open>\<Psi> \<rhd> Q \<doteq> R\<close>
[PROOF STATE]
proof (chain)
picking this:
\<Psi> \<rhd> Q \<doteq> R
[PROOF STEP]
have "\<Psi> \<rhd> Q \<leadsto>\<guillemotleft>weakBisim\<guillemotright> R"
[PROOF STATE]
proof (prove)
using this:
\<Psi> \<rhd> Q \<doteq> R
goal (1 subgoal):
1. \<Psi> \<rhd> Q \<leadsto>\<guillemotleft>weakBisim\<guillemotright> R
[PROOF STEP]
by(rule weakPsiCongE)
[PROOF STATE]
proof (state)
this:
\<Psi> \<rhd> Q \<leadsto>\<guillemotleft>weakBisim\<guillemotright> R
goal (2 subgoals):
1. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<approx> Qa
2. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<leadsto>\<guillemotleft>weakBisim\<guillemotright> Qa
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<Psi> \<rhd> Q \<leadsto>\<guillemotleft>weakBisim\<guillemotright> R
goal (2 subgoals):
1. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<approx> Qa
2. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<leadsto>\<guillemotleft>weakBisim\<guillemotright> Qa
[PROOF STEP]
have "{(\<Psi>, P, R) | \<Psi> P R. \<exists>Q. \<Psi> \<rhd> P \<approx> Q \<and> \<Psi> \<rhd> Q \<approx> R} \<subseteq> weakBisim"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {(\<Psi>, P, R) |\<Psi> P R. \<exists>Q. \<Psi> \<rhd> P \<approx> Q \<and> \<Psi> \<rhd> Q \<approx> R} \<subseteq> weakBisim
[PROOF STEP]
by(auto dest: weakBisimTransitive)
[PROOF STATE]
proof (state)
this:
{(\<Psi>, P, R) |\<Psi> P R. \<exists>Q. \<Psi> \<rhd> P \<approx> Q \<and> \<Psi> \<rhd> Q \<approx> R} \<subseteq> weakBisim
goal (2 subgoals):
1. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<approx> Qa
2. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<leadsto>\<guillemotleft>weakBisim\<guillemotright> Qa
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
\<Psi> \<rhd> P \<doteq> Q
\<Psi> \<rhd> Q \<doteq> R
\<Psi> \<rhd> P \<approx> Q
\<Psi> \<rhd> P \<leadsto>\<guillemotleft>weakBisim\<guillemotright> Q
\<Psi> \<rhd> Q \<leadsto>\<guillemotleft>weakBisim\<guillemotright> R
{(\<Psi>, P, R) |\<Psi> P R. \<exists>Q. \<Psi> \<rhd> P \<approx> Q \<and> \<Psi> \<rhd> Q \<approx> R} \<subseteq> weakBisim
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
\<Psi> \<rhd> P \<doteq> Q
\<Psi> \<rhd> Q \<doteq> R
\<Psi> \<rhd> P \<approx> Q
\<Psi> \<rhd> P \<leadsto>\<guillemotleft>weakBisim\<guillemotright> Q
\<Psi> \<rhd> Q \<leadsto>\<guillemotleft>weakBisim\<guillemotright> R
{(\<Psi>, P, R) |\<Psi> P R. \<exists>Q. \<Psi> \<rhd> P \<approx> Q \<and> \<Psi> \<rhd> Q \<approx> R} \<subseteq> weakBisim
goal (1 subgoal):
1. \<Psi> \<rhd> P \<leadsto>\<guillemotleft>weakBisim\<guillemotright> R
[PROOF STEP]
using weakBisimE(2)
[PROOF STATE]
proof (prove)
using this:
\<Psi> \<rhd> P \<doteq> Q
\<Psi> \<rhd> Q \<doteq> R
\<Psi> \<rhd> P \<approx> Q
\<Psi> \<rhd> P \<leadsto>\<guillemotleft>weakBisim\<guillemotright> Q
\<Psi> \<rhd> Q \<leadsto>\<guillemotleft>weakBisim\<guillemotright> R
{(\<Psi>, P, R) |\<Psi> P R. \<exists>Q. \<Psi> \<rhd> P \<approx> Q \<and> \<Psi> \<rhd> Q \<approx> R} \<subseteq> weakBisim
?\<Psi> \<rhd> ?P \<approx> ?Q \<Longrightarrow> ?\<Psi> \<rhd> ?P \<leadsto><weakBisim> ?Q
goal (1 subgoal):
1. \<Psi> \<rhd> P \<leadsto>\<guillemotleft>weakBisim\<guillemotright> R
[PROOF STEP]
by(rule_tac weakCongSimTransitive)
[PROOF STATE]
proof (state)
this:
\<Psi> \<rhd> P \<leadsto>\<guillemotleft>weakBisim\<guillemotright> R
goal (1 subgoal):
1. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<approx> Qa
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<approx> Qa
[PROOF STEP]
case(cWeakBisim P R)
[PROOF STATE]
proof (state)
this:
\<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> R
goal (1 subgoal):
1. \<And>P Qa. \<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> Qa \<Longrightarrow> \<Psi> \<rhd> P \<approx> Qa
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
\<Psi> \<rhd> P \<doteq> Q \<and> \<Psi> \<rhd> Q \<doteq> R
goal (1 subgoal):
1. \<Psi> \<rhd> P \<approx> R
[PROOF STEP]
by(auto dest: weakBisimTransitive weakPsiCongE)
[PROOF STATE]
proof (state)
this:
\<Psi> \<rhd> P \<approx> R
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<Psi> \<rhd> P \<doteq> R
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 5674, "file": "Psi_Calculi_Weak_Psi_Congruence", "length": 38}
|
!! Helper Variables
INTEGER :: inner_counter, outer_counter
INTEGER :: elements_per_inner
INTEGER :: total_counter
CALL ConstructEmptyMatrix(dense_matrix, sparse_matrix%rows, &
& sparse_matrix%columns)
!! Loop over elements.
dense_matrix%DATA = 0
total_counter = 1
DO outer_counter = 1, sparse_matrix%columns
elements_per_inner = sparse_matrix%outer_index(outer_counter+1) - &
& sparse_matrix%outer_index(outer_counter)
temporary%index_column = outer_counter
DO inner_counter = 1, elements_per_inner
temporary%index_row = sparse_matrix%inner_index(total_counter)
temporary%point_value = sparse_matrix%values(total_counter)
dense_matrix%DATA(temporary%index_row, temporary%index_column) = &
& temporary%point_value
total_counter = total_counter + 1
END DO
END DO
|
{"hexsha": "860a5895eea9f97afa1fc94fe258ef075b39ba45", "size": 866, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Source/Fortran/dense_includes/ConstructMatrixDFromS.f90", "max_stars_repo_name": "Kokookster/NTPoly", "max_stars_repo_head_hexsha": "717b2e344e800ea6c2de7061b96dd51ffd089f36", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2017-06-16T21:24:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-22T06:02:39.000Z", "max_issues_repo_path": "Source/Fortran/dense_includes/ConstructMatrixDFromS.f90", "max_issues_repo_name": "Kokookster/NTPoly", "max_issues_repo_head_hexsha": "717b2e344e800ea6c2de7061b96dd51ffd089f36", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 42, "max_issues_repo_issues_event_min_datetime": "2017-06-16T01:33:09.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-20T04:52:13.000Z", "max_forks_repo_path": "Source/Fortran/dense_includes/ConstructMatrixDFromS.f90", "max_forks_repo_name": "Kokookster/NTPoly", "max_forks_repo_head_hexsha": "717b2e344e800ea6c2de7061b96dd51ffd089f36", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2018-08-06T13:44:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-19T11:54:18.000Z", "avg_line_length": 36.0833333333, "max_line_length": 74, "alphanum_fraction": 0.7263279446, "num_tokens": 204}
|
import cv2
import torch
import random
import numpy as np
def flip_horizontal(img, mask):
img = np.flip(img, axis=1)
mask = np.flip(mask, axis=1)
return img, mask
def rotate(img, mask, angle_abs=5):
h, w, _ = img.shape
angle = random.choice([angle_abs, -angle_abs])
M = cv2.getRotationMatrix2D((h, w), angle, 1.0)
img = cv2.warpAffine(img, M, (h, w), flags=cv2.INTER_CUBIC)
mask = cv2.warpAffine(mask, M, (h, w), flags=cv2.INTER_CUBIC)
mask = np.expand_dims(mask, axis=-1)
return img, mask
class RandomAugmentation:
augmentations = [flip_horizontal, rotate]
def __init__(self, max_augment_count):
if max_augment_count <= len(self.augmentations):
self.max_augment_count = max_augment_count
else:
self.max_augment_count = len(self.augmentations)
def __call__(self, sample):
img, mask = sample['image'], sample['label']
augmentation_count = random.randint(0, self.max_augment_count)
selected_augmentations = random.sample(self.augmentations, k=augmentation_count)
for augmentation in selected_augmentations:
img, mask = augmentation(img, mask)
return {'img': img, 'mask': mask}
|
{"hexsha": "7264f6b633427e9ea9d50f4a8b28c0358370ed27", "size": 1229, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/transforms.py", "max_stars_repo_name": "garvm7/transunet_pytorch", "max_stars_repo_head_hexsha": "277c42d182ab9606607b0db782f0d00b55f06760", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/transforms.py", "max_issues_repo_name": "garvm7/transunet_pytorch", "max_issues_repo_head_hexsha": "277c42d182ab9606607b0db782f0d00b55f06760", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/transforms.py", "max_forks_repo_name": "garvm7/transunet_pytorch", "max_forks_repo_head_hexsha": "277c42d182ab9606607b0db782f0d00b55f06760", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2619047619, "max_line_length": 88, "alphanum_fraction": 0.6647681041, "include": true, "reason": "import numpy", "num_tokens": 334}
|
#DISCLAIRMER: ESTE CODIGO ES A MODO DE EJEMPLO DIDÁCTICO, NO CONTIENE CONTROL DE ERRORES, NI SOFISTICACIONES, NI MEJORAS DE
# PERFORMANCE. TODOS LOS USOS DE LIBRERIAS EXTERNAS PUEDEN SER MEJORADAS EN SU IMPLEMENTACIÓN.
# ===================================================================================
import matplotlib.pyplot as plt
import numpy as np
import csv
import ee
# ARCHIVOS A UTILIZAR
# ==================================================================================
workdir="/home/alfredo/Escritorio/desafiosAgTech2020/"
train_csv_name = workdir+"data_train_r.csv"
# ABRO LA IMAGEN RASTER DE GEE
# ==================================================================================
ee.Initialize()
S2_collection = ee.ImageCollection("COPERNICUS/S2_SR") \
.filterBounds(ee.Geometry.Point(-61.9055,-33.6756)) \
.filterDate('2020-01-01', '2020-01-31') \
.sort('CLOUDY_PIXEL_PERCENTAGE') \
.first() \
S2_info = S2_collection.getInfo()['id']
imagen = ee.Image(S2_info)
# ABRO LOS PUNTOS DE ENTRENAMIENTO Y LOS DE TESTEO
# ==================================================================================
puntos_train=list()
print("Busco datos para los puntos de entrenamiento")
# Esta parte es lenta porque se busca de a un punto! Los invito a mejorarla.
with open(train_csv_name, newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if (row['Campania']=='19/20'):
p = ee.Geometry.Point(float(row['Longitud']),float(row['Latitud']))
data = imagen.select("B2","B3","B4","B8","B11","B12").reduceRegion(ee.Reducer.first(),p,10).getInfo()
datos = np.asarray(list(data.values()))
puntos_train.append({'lat':row['Latitud'],'lon':row['Longitud'],
'cultivo':row['Cultivo'],'camp':row['Campania'],
'datos':datos[[2,3,4,5,0,1]]}) # reordeno los datos porque GEE me entregaba primero el SWIR
# OBTENGO LOS VALORES DE LOS PIXELES
# =================================================================================
valores_pixeles_entrenamiento = np.asarray([d['datos'] for d in puntos_train])
clase_entrenamiento = [d['cultivo'] for d in puntos_train]
# GRAFICO
# =================================================================================
plt.plot(np.array(np.transpose(valores_pixeles_entrenamiento[np.array(clase_entrenamiento)=='M',:])),'r',alpha=0.3)
plt.plot(np.array(np.transpose(valores_pixeles_entrenamiento[np.array(clase_entrenamiento)=='S',:])),'g',alpha=0.3)
plt.xticks(np.arange(6),("B","G","R","NIR","SWIR1","SWIR2"))
plt.show()
|
{"hexsha": "7d0e750d95b39baa0de1be2a2fa6fcd539a08ac8", "size": 2722, "ext": "py", "lang": "Python", "max_stars_repo_path": "ejemplo3.py", "max_stars_repo_name": "InoveAlumnos/desafiosAgTech2020", "max_stars_repo_head_hexsha": "f3cb21db12516dcf53b196ece5e40a3336d1a044", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-11-10T21:43:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-08T10:44:17.000Z", "max_issues_repo_path": "ejemplo3.py", "max_issues_repo_name": "camposalfredo/desafiosAgTech2020", "max_issues_repo_head_hexsha": "f3cb21db12516dcf53b196ece5e40a3336d1a044", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ejemplo3.py", "max_forks_repo_name": "camposalfredo/desafiosAgTech2020", "max_forks_repo_head_hexsha": "f3cb21db12516dcf53b196ece5e40a3336d1a044", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2020-11-10T13:55:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-04T04:14:10.000Z", "avg_line_length": 44.6229508197, "max_line_length": 123, "alphanum_fraction": 0.536002939, "include": true, "reason": "import numpy", "num_tokens": 661}
|
"""
This is an implementation of Neural Network Dynamics for Model-Based Deep Reinforcement Learning with Model-Free Fine-Tuning.
See https://arxiv.org/abs/1708.02596
"""
import torch
import torch.nn as nn
import numpy as np
from machina import loss_functional as lf
from machina.utils import detach_tensor_dict
from machina import logger
def update_dm(dm, optim_dm, batch, target='next_obs', td=True):
dm_loss = lf.dynamics(dm, batch, target=target, td=td)
optim_dm.zero_grad()
dm_loss.backward()
optim_dm.step()
return dm_loss.detach().cpu().numpy()
def train_dm(traj, dyn_model, optim_dm, epoch=60, batch_size=512, target='next_obs', td=True, num_epi_per_seq=1, log_enable=True):
"""
Train function for dynamics model.
Parameters
----------
traj : Traj
On policy trajectory.
dyn_model : Model
dynamics model.
optim_dm : torch.optim.Optimizer
Optimizer for dynamics model.
epoch : int
Number of iteration.
batch_size : int
Number of batches.
target : str
Target of prediction is next_obs or rews.
td : bool
If True, dyn_model learn temporal differance of target.
num_epi_per_seq : int
Number of episodes in one sequence for rnn.
log_enable: bool
If True, enable logging
Returns
-------
result_dict : dict
Dictionary which contains losses information.
"""
dm_losses = []
if log_enable:
logger.log("Optimizing...")
batch_size = min(batch_size, traj.num_epi)
if dyn_model.rnn:
iterator = traj.random_batch_rnn(
batch_size=batch_size, epoch=epoch)
else:
iterator = traj.random_batch(batch_size, epoch)
for batch in iterator:
dm_loss = update_dm(
dyn_model, optim_dm, batch, target=target, td=td)
dm_losses.append(dm_loss)
if log_enable:
logger.log("Optimization finished!")
return dict(DynModelLoss=dm_losses)
|
{"hexsha": "5a15c6ed1c7a81fe3b6c9b2516b047a39abe58e9", "size": 2066, "ext": "py", "lang": "Python", "max_stars_repo_path": "machina/algos/mpc.py", "max_stars_repo_name": "krish-dx/machina", "max_stars_repo_head_hexsha": "f93bb6f5aca1feccd71fc509bd6370d2015e2d85", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 302, "max_stars_repo_stars_event_min_datetime": "2019-03-13T10:21:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T10:01:46.000Z", "max_issues_repo_path": "machina/algos/mpc.py", "max_issues_repo_name": "krish-dx/machina", "max_issues_repo_head_hexsha": "f93bb6f5aca1feccd71fc509bd6370d2015e2d85", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 50, "max_issues_repo_issues_event_min_datetime": "2019-03-13T09:45:00.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-23T18:32:00.000Z", "max_forks_repo_path": "machina/algos/mpc.py", "max_forks_repo_name": "krish-dx/machina", "max_forks_repo_head_hexsha": "f93bb6f5aca1feccd71fc509bd6370d2015e2d85", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 55, "max_forks_repo_forks_event_min_datetime": "2019-03-17T01:59:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T01:13:40.000Z", "avg_line_length": 27.9189189189, "max_line_length": 131, "alphanum_fraction": 0.6442400774, "include": true, "reason": "import numpy", "num_tokens": 470}
|
From iris.algebra Require Import frac.
From iris.proofmode Require Import tactics monpred.
From iris.base_logic Require Import base_logic lib.fancy_updates.
Section base_logic_tests.
Context {M : ucmra}.
Implicit Types P Q R : uPred M.
(* Test scopes for bupd *)
Definition use_bupd_uPred (n : nat) : uPred M :=
□ |==> ∃ m : nat , ⌜ n = 2 ⌝.
Definition use_plainly_uPred (n : nat) : uPred M :=
■ |==> ∃ m : nat , ⌜ n = 2 ⌝.
(* Test scopes inside big-ops *)
Definition big_op_scope_uPred_1 (xs : list nat) : uPred M :=
[∗ list] _ ↦ x ∈ xs, True.
Definition big_op_scope_uPred_2 (xs : list nat) : uPred M :=
[∗ list] x; y ∈ xs; xs, True.
Definition big_op_scope_uPred_3 (m : gmap nat nat) : uPred M :=
[∗ map] _ ↦ x ∈ m, True.
Definition big_op_scope_uPred_4 (m : gmap nat nat) : uPred M :=
[∗ map] x; y ∈ m; m, True.
End base_logic_tests.
Section iris_tests.
Context `{!invGS_gen hlc Σ}.
Implicit Types P Q R : iProp Σ.
(* Test scopes for bupd and fupd *)
Definition use_bupd_iProp (n : nat) : iProp Σ :=
□ |==> ∃ m : nat , ⌜ n = 2 ⌝.
Definition use_fupd_iProp (n : nat) : iProp Σ :=
□ |={⊤}=> ∃ m : nat , ⌜ n = 2 ⌝.
(* Test scopes inside big-ops *)
Definition big_op_scope_iProp_1 (xs : list nat) : iProp Σ :=
[∗ list] _ ↦ x ∈ xs, True.
Definition big_op_scope_iProp_2 (xs : list nat) : iProp Σ :=
[∗ list] x; y ∈ xs; xs, True.
Definition big_op_scope_iProp_3 (m : gmap nat nat) : iProp Σ :=
[∗ map] _ ↦ x ∈ m, True.
Definition big_op_scope_iProp_4 (m : gmap nat nat) : iProp Σ :=
[∗ map] x; y ∈ m; m, True.
End iris_tests.
|
{"author": "amintimany", "repo": "iris", "sha": "03eaffa3b28bffc561b93f30a3ba40bab8ae1fd1", "save_path": "github-repos/coq/amintimany-iris", "path": "github-repos/coq/amintimany-iris/iris-03eaffa3b28bffc561b93f30a3ba40bab8ae1fd1/tests/iris_notation.v"}
|
"""Toy environment for testing option learning."""
import logging
from typing import Callable, ClassVar, Dict, List, Optional, Sequence, Set
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from gym.spaces import Box
from predicators.src import utils
from predicators.src.envs import BaseEnv
from predicators.src.settings import CFG
from predicators.src.structs import Action, Array, GroundAtom, Object, \
ParameterizedOption, Predicate, State, Task, Type
class TouchPointEnv(BaseEnv):
"""An environment where a 2D point mass robot must reach a static 2D point.
The action space is 1D, denoting the angle of movement. The
magnitude of the movement is constant. The point is considered
touched if the distance between the center of the robot and the
center of the target point is less than a certain threshold, which
is greater than the action magnitude.
"""
x_lb: ClassVar[float] = 0.0
x_ub: ClassVar[float] = 1.0
y_lb: ClassVar[float] = 0.0
y_ub: ClassVar[float] = 1.0
action_magnitude: ClassVar[float] = 0.1
# The target point is touched if the distance between the robot and target
# is less than action_magnitude * touch_multiplier.
touch_multiplier: ClassVar[float] = 1.5
def __init__(self) -> None:
super().__init__()
# Types
self._robot_type = Type("robot", ["x", "y"])
self._target_type = Type("target", ["x", "y"])
# Predicates
self._Touched = Predicate("Touched",
[self._robot_type, self._target_type],
self._Touched_holds)
# Options
self._MoveTo = ParameterizedOption(
"MoveTo",
types=[self._robot_type, self._target_type],
params_space=Box(0, 1, (0, )),
policy=self._MoveTo_policy,
initiable=lambda s, m, o, p: True,
terminal=self._MoveTo_terminal)
# Static objects (always exist no matter the settings).
self._robot = Object("robby", self._robot_type)
self._target = Object("target", self._target_type)
@classmethod
def get_name(cls) -> str:
return "touch_point"
def simulate(self, state: State, action: Action) -> State:
assert self.action_space.contains(action.arr)
rot, = action.arr
x = state.get(self._robot, "x")
y = state.get(self._robot, "y")
new_x = x + np.cos(rot) * self.action_magnitude
new_y = y + np.sin(rot) * self.action_magnitude
new_x = np.clip(new_x, self.x_lb, self.x_ub)
new_y = np.clip(new_y, self.y_lb, self.y_ub)
next_state = state.copy()
next_state.set(self._robot, "x", new_x)
next_state.set(self._robot, "y", new_y)
return next_state
def _generate_train_tasks(self) -> List[Task]:
return self._get_tasks(num=CFG.num_train_tasks, rng=self._train_rng)
def _generate_test_tasks(self) -> List[Task]:
return self._get_tasks(num=CFG.num_test_tasks, rng=self._test_rng)
@property
def predicates(self) -> Set[Predicate]:
return {self._Touched}
@property
def goal_predicates(self) -> Set[Predicate]:
return {self._Touched}
@property
def types(self) -> Set[Type]:
return {self._robot_type, self._target_type}
@property
def options(self) -> Set[ParameterizedOption]:
return {self._MoveTo}
@property
def action_space(self) -> Box:
# An angle in radians.
return Box(-np.pi, np.pi, (1, ))
def render_state_plt(
self,
state: State,
task: Task,
action: Optional[Action] = None,
caption: Optional[str] = None) -> matplotlib.figure.Figure:
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
robot_color = "red"
target_color = "blue"
rad = (self.touch_multiplier * self.action_magnitude) / 2
robot_x = state.get(self._robot, "x")
robot_y = state.get(self._robot, "y")
target_x = state.get(self._target, "x")
target_y = state.get(self._target, "y")
robot_circ = plt.Circle((robot_x, robot_y), rad, color=robot_color)
target_circ = plt.Circle((target_x, target_y), rad, color=target_color)
ax.add_patch(robot_circ)
ax.add_patch(target_circ)
ax.set_xlim(self.x_lb - rad, self.x_ub + rad)
ax.set_ylim(self.y_lb - rad, self.y_ub + rad)
title = f"{robot_color} = robot, {target_color} = target"
if caption is not None:
title += f";\n{caption}"
plt.suptitle(title, wrap=True)
plt.tight_layout()
return fig
def _get_tasks(self, num: int, rng: np.random.Generator) -> List[Task]:
# There is only one goal in this environment.
goal_atom = GroundAtom(self._Touched, [self._robot, self._target])
goal = {goal_atom}
# The initial positions of the robot and dot vary. The only constraint
# is that the initial positions should be far enough away that the goal
# is not initially satisfied.
tasks: List[Task] = []
while len(tasks) < num:
state = utils.create_state_from_dict({
self._robot: {
"x": rng.uniform(self.x_lb, self.x_ub),
"y": rng.uniform(self.y_lb, self.y_ub),
},
self._target: {
"x": rng.uniform(self.x_lb, self.x_ub),
"y": rng.uniform(self.y_lb, self.y_ub),
},
})
# Make sure goal is not satisfied.
if not goal_atom.holds(state):
tasks.append(Task(state, goal))
return tasks
@staticmethod
def _MoveTo_policy(state: State, memory: Dict, objects: Sequence[Object],
params: Array) -> Action:
# Move in the direction of the target.
del memory, params # unused
robot, target = objects
rx = state.get(robot, "x")
ry = state.get(robot, "y")
tx = state.get(target, "x")
ty = state.get(target, "y")
dx = tx - rx
dy = ty - ry
rot = np.arctan2(dy, dx) # between -pi and pi
return Action(np.array([rot], dtype=np.float32))
def _MoveTo_terminal(self, state: State, memory: Dict,
objects: Sequence[Object], params: Array) -> bool:
del memory, params # unused
return self._Touched_holds(state, objects)
def _Touched_holds(self, state: State, objects: Sequence[Object]) -> bool:
robot, target = objects
rx = state.get(robot, "x")
ry = state.get(robot, "y")
tx = state.get(target, "x")
ty = state.get(target, "y")
dist = np.sqrt((rx - tx)**2 + (ry - ty)**2)
return dist < self.action_magnitude * self.touch_multiplier
def get_event_to_action_fn(
self) -> Callable[[State, matplotlib.backend_bases.Event], Action]:
logging.info("Controls: mouse click to move")
def _event_to_action(state: State,
event: matplotlib.backend_bases.Event) -> Action:
assert event.key is None, "Keyboard controls not allowed."
rx = state.get(self._robot, "x")
ry = state.get(self._robot, "y")
tx = event.xdata
ty = event.ydata
assert tx is not None and ty is not None, "Out-of-bounds click"
dx = tx - rx
dy = ty - ry
rot = np.arctan2(dy, dx) # between -pi and pi
return Action(np.array([rot], dtype=np.float32))
return _event_to_action
|
{"hexsha": "33bf3a4f8bb61b675a3837db54d9869796484ee5", "size": 7705, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/envs/touch_point.py", "max_stars_repo_name": "Learning-and-Intelligent-Systems/predicators", "max_stars_repo_head_hexsha": "0b2e71cacf86ba2bfdc1d9059c3a78016d0a4d7e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2021-11-20T16:35:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T03:49:52.000Z", "max_issues_repo_path": "src/envs/touch_point.py", "max_issues_repo_name": "Learning-and-Intelligent-Systems/predicators", "max_issues_repo_head_hexsha": "0b2e71cacf86ba2bfdc1d9059c3a78016d0a4d7e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 214, "max_issues_repo_issues_event_min_datetime": "2021-10-12T01:17:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T20:18:36.000Z", "max_forks_repo_path": "src/envs/touch_point.py", "max_forks_repo_name": "Learning-and-Intelligent-Systems/predicators", "max_forks_repo_head_hexsha": "0b2e71cacf86ba2bfdc1d9059c3a78016d0a4d7e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-15T20:24:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-15T20:24:17.000Z", "avg_line_length": 38.525, "max_line_length": 79, "alphanum_fraction": 0.5976638546, "include": true, "reason": "import numpy", "num_tokens": 1894}
|
\section{Problem Statement}
The hereby \textbf{Report 4} will state an essay for literature knowledge that might support our research. We evaluate several research work. This report will focus on the topic \textbf{Interaction Methods} regarding \textbf{Recommender Systems}. Both topics are of chief importance to our research work, therefore should be analysed.
|
{"hexsha": "a731d1fb30e6008864af4d9aa1edd1dfe3a11408", "size": 363, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "state-of-the-art/report_4/sections/problem_statement.tex", "max_stars_repo_name": "mida-project/reading-reports", "max_stars_repo_head_hexsha": "f65c20947ba85df1f75aa86eab2b622230d8eda7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-03-26T14:14:02.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-19T09:55:38.000Z", "max_issues_repo_path": "state-of-the-art/report_4/sections/problem_statement.tex", "max_issues_repo_name": "mida-project/reading-reports", "max_issues_repo_head_hexsha": "f65c20947ba85df1f75aa86eab2b622230d8eda7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "state-of-the-art/report_4/sections/problem_statement.tex", "max_forks_repo_name": "mida-project/reading-reports", "max_forks_repo_head_hexsha": "f65c20947ba85df1f75aa86eab2b622230d8eda7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 121.0, "max_line_length": 334, "alphanum_fraction": 0.8181818182, "num_tokens": 75}
|
#!/usr/bin/env python3
import sqlite3
import numpy as np
import altair as alt
import sys
from scipy.spatial import ConvexHull
import os
import pandas as pd
DIR_ENVVAR = 'TOPK_DIR'
try:
BASE_DIR = os.environ[DIR_ENVVAR]
except:
print("You should set the {} environment variable to a directory".format(DIR_ENVVAR))
sys.exit(1)
DATASET_DIR = os.path.join(BASE_DIR, "datasets")
RESULT_FILES_DIR = os.path.join(BASE_DIR, "output")
def get_db():
db = sqlite3.connect(os.path.join(BASE_DIR, "join-results.db"))
return db
def get_pareto():
def compute_pareto(gdata):
gdata = gdata.sort_values(['time_total_s'], ascending=True)
points = np.vstack(
(gdata['recall'], gdata['time_total_s'])
).transpose()
# now we seek the vertices of the pareto
# frontier to select from the `gdata` object
indices = []
last_r = 0
for i, (r, t) in enumerate(points):
if r > last_r:
last_r = r
indices.append(i)
return gdata[['recall', 'time_total_s', 'params']].iloc[indices]
data = pd.read_sql("select dataset, workload, k, algorithm, params, threads, recall, time_index_s, time_join_s, time_index_s + time_join_s as time_total_s from main;", get_db())
pareto = data.groupby(['dataset', 'workload', 'k', 'algorithm', 'threads']).apply(compute_pareto)
return pareto.reset_index()
def plot_local_topk():
db = get_db()
all = pd.read_sql("select dataset, workload, k, algorithm, params, threads, recall, time_index_s, time_join_s, time_index_s + time_join_s as time_total_s from main;", db)
data = get_pareto()
datasets = [
t[0]
for t in db.execute("select distinct dataset from main order by 1;").fetchall()
]
input_dropdown = alt.binding_select(options=datasets, name='Dataset: ')
selection = alt.selection_single(fields=['dataset'], bind=input_dropdown)
chart_pareto = alt.Chart(data).transform_filter(selection).mark_line(point=True).encode(
x=alt.X('recall', type='quantitative', scale=alt.Scale(domain=(0, 1))),
y=alt.Y('time_total_s', type='quantitative', scale=alt.Scale(type='log')),
color='algorithm:N',
tooltip=[
'algorithm:N',
'params:N',
'recall:Q',
'time_total_s:Q'
]
)
chart_all = alt.Chart(all).transform_filter(selection).mark_point().encode(
x=alt.X('recall', type='quantitative', scale=alt.Scale(domain=(0, 1))),
y=alt.Y('time_total_s', type='quantitative', scale=alt.Scale(type='log')),
color='algorithm:N',
tooltip=[
'algorithm:N',
'params:N',
'recall:Q',
'time_total_s:Q'
]
)
chart = alt.layer(chart_all, chart_pareto).properties(
width=1000,
height=600,
title="Recall vs. time"
).add_selection(selection)
chart.save(os.path.join(BASE_DIR, "plot.html"))
if __name__ == "__main__":
plot_local_topk()
|
{"hexsha": "497d3b3cd9f2446d25386e0d624716686e3c8dc8", "size": 3054, "ext": "py", "lang": "Python", "max_stars_repo_path": "join-experiments/plot.py", "max_stars_repo_name": "Cecca/puffinn", "max_stars_repo_head_hexsha": "c613cd2e82ae334b5553099496d075cc16796fbe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "join-experiments/plot.py", "max_issues_repo_name": "Cecca/puffinn", "max_issues_repo_head_hexsha": "c613cd2e82ae334b5553099496d075cc16796fbe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2022-03-18T06:49:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T05:10:10.000Z", "max_forks_repo_path": "join-experiments/plot.py", "max_forks_repo_name": "Cecca/puffinn", "max_forks_repo_head_hexsha": "c613cd2e82ae334b5553099496d075cc16796fbe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8125, "max_line_length": 181, "alphanum_fraction": 0.6277013752, "include": true, "reason": "import numpy,from scipy", "num_tokens": 770}
|
[STATEMENT]
lemma ENR_delete:
fixes S :: "'a::euclidean_space set"
shows "ENR S \<Longrightarrow> ENR(S - {a})"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ENR S \<Longrightarrow> ENR (S - {a})
[PROOF STEP]
by (blast intro: ENR_openin openin_delete openin_subtopology_self)
|
{"llama_tokens": 114, "file": null, "length": 1}
|
C @(#)gettrf.f 20.3 2/13/96
subroutine gettrf (jt, lt, nt, senstl, senstt, pout, dpovld,
1 comp, tx)
C
C This subroutine computes compensation COMP and transfer TX in
C three modes:
C
C 1. JT = 0: No outage occurs, i.e., compute the base case transfe
C 2. LT = 0: No overload occurs, i.e., compute the compensation
C COMP
C 3. Normal: compute the compensation COMP and the transfe
C TX to alleviate overload DPOVLD.
C
C Input parameters:
C
C JT - outage index
C LT - overload index
C NT - transfer index
C SENSTL(2,*) - the Sensitivities G(x)**-1 * G(u) for outages
C SENSTF(*) - the Sensitivities G(x)**-1 * G(t) for transfer
C POUT - the base line flow of the outaged line
C DPOVLD - the required power excursion in the monitored line
C
C Output parameters:
C
C COMP - the outage compensation to simulate outage Pout
C TX - the transfer to alleviate the overload dPovld
C
C The general form is
C
C | L(l) - L(x)*SENSTL -L(X)*SENSTT || dl | | -L(0) |
C | || | = | |
C | - F(x)*SENSTL -F(x)*SENSTT || dt | | -dF(0) |
C
C where dL = COMP and dt = TX.
C
include 'ipfinc/ecvar.inc'
c Global variables used:
c idswb
include 'ipfinc/lfiles.inc'
c Global variables used:
c dbug
include 'ipfinc/transf.inc'
c Global variables used:
c fdata, ldata
double precision senstl(2,*)
c
real senstt(*)
c
if (jt .eq. 0) then
C
C Determine transfer without outage (base overload)
C
comp = 0.0
if (lt .eq. 0) then
tx = 0.0
else
k1 = kfdata(1,lt)
k2 = kfdata(2,lt)
x = -fdata(11,lt)*senstt(k1) - fdata(13,lt)*senstt(k2)
if (x .eq. 0.0) then
tx = sign (1.0e10,-dpovld)
else
tx = -dpovld / x
endif
endif
else if (nt .eq. 0) then
C
C Determine compensation without transfer (outage without
C corrective action)
C
tx = 0.0
if (jt .eq. 0) then
comp = 0.0
else
k1 = kldata(1,jt)
k2 = kldata(2,jt)
x = 1.0 - ldata(11,jt) * senstl(1,k1) -
1 ldata(13,jt) * senstl(1,k2)
if (x .eq. 0.0) then
comp = sign (1.0e10,-pout)
else
comp = -pout / x
endif
endif
else if (lt .eq. 0) then
comp = 0.0
tx = 0.0
else
C
C Determine compensation and transfer simultaneously
C
k1 = kldata(1,jt)
k2 = kldata(2,jt)
a11 = 1.0 - ldata(11,jt) * senstl(1,k1)
1 - ldata(13,jt) * senstl(1,k2)
a12 = -ldata(11,jt) * senstt(k1) - ldata(13,jt) * senstt(k2)
k1 = kfdata(1,lt)
k2 = kfdata(2,lt)
a21 = -fdata(11,lt) * senstl(1,k1)
1 - fdata(13,lt) * senstl(1,k2)
a22 = -fdata(11,lt) * senstt(k1) - fdata(13,lt) * senstt(k2)
denom = a11 * a22 - a21 * a12
if (abs (denom) .le. 1.0e-6) then
comp = sign (1.0e10,-pout)
tx = sign (1.0e10,-dpovld)
else
comp = (a22 * (-pout) - a12 * (-dpovld)) / denom
tx = (-a21 * (-pout) + a11 * (-dpovld)) / denom
endif
endif
if (idswb .gt. 0) then
write (dbug,100) jt, lt, nt, pout, dpovld, comp, tx
100 format (' GETTRF/ JT,LT,NT,POUT,DPOVLD,COMP,TX ',
1 3i5,4e12.5)
endif
return
end
|
{"hexsha": "3e22c19041cfb99371c39f2ab5cce83f6d91c383", "size": 3862, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "ipf/gettrf.f", "max_stars_repo_name": "mbheinen/bpa-ipf-tsp", "max_stars_repo_head_hexsha": "bf07dd456bb7d40046c37f06bcd36b7207fa6d90", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2020-04-02T15:34:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T08:57:45.000Z", "max_issues_repo_path": "ipf/gettrf.f", "max_issues_repo_name": "cuihantao/bpa-ipf-tsp", "max_issues_repo_head_hexsha": "cb2d0917ae42eff571017e9162f550f87900b83f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-02-08T14:21:23.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-13T01:27:56.000Z", "max_forks_repo_path": "ipf/gettrf.f", "max_forks_repo_name": "mbheinen/bpa-ipf-tsp", "max_forks_repo_head_hexsha": "bf07dd456bb7d40046c37f06bcd36b7207fa6d90", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2020-02-03T04:26:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T15:04:31.000Z", "avg_line_length": 30.4094488189, "max_line_length": 72, "alphanum_fraction": 0.4756602796, "num_tokens": 1280}
|
function [ar,e,dc]=v_lpccovar(s,p,t,w)
%V_LPCCOVAR performs covariance LPC analysis [AR,E,DC]=(S,P,T)
%
% Inputs: S(NS) is the input signal
% P is the order (default: 12)
% T(NF,:) specifies the frames size details: each row specifies one frame
% T can be a cell array if rows have unequal numbers of values
% T(:,1) gives the start of the analysis interval: must be >P
% T(:,2) gives the end of the anaylsis interval [default: t(:+1,1)-1]
% subsequent pairs can be used to specify multiple disjoint segments
% If T is omitted, T(1,1)=P+1, T(1,2)=NS;
% The elements of t need not be integers.
% W(NS) The error at each sample is weighted by W^2 (default: 1)
%
% Outputs: AR(NF,P+1) are the AR coefficients with AR(:,1) = 1
% E(NF,4) each row is [Er Es Pr Ps] and gives the energy ("E") and power ("P")
% in the input signal window ("s") and in the LPC residual "r".
% The 'gain' of the LPC filter is g=sqrt(Pr); x=filter(g,ar,randn(:,1)) will
% generate noise with approximately the same power spectrum as the input s.
% DC is the DC component of the signal S. If this output is included,
% the LPC equations are modified to include a DC offset.
% Notes:
%
% (1a) If no DC output is specified AR(j,:)*S(n-(0:P)) ~ 0 or, equivalently,
% S(n) ~ -AR(j,2:P)*S(n-(1:P)) where T(j,1) <= n <= T(j,2).
% (1b) If a DC output is specified AR(j,:)*(S(n-(0:P))-DC) ~ 0 or, equivalently,
% S(n) ~ DC - AR(j,2:P)*(S(n-(1:P))-DC) = DC*sum(AR,j,:)) - AR(j,2:P)*S(n-(1:P))
% where T(j,1) <= n <= T(j,2).
%
% (2) For speech processing P should be at least 2*F*L/C where F is the sampling
% frequency, L the vocal tract length and C the speed of sound. For a typical
% male (l=17 cm) this gives f/1000.
%
% (3) Each analysis frame should contain at least 2P samples. If note (1) is followed
% this implies at least 2 ms of speech signal per frame.
%
% (4) It can be advantageous to restrict the analysis regions to time intervals
% when the glottis is closed (closed-phase analysis). This can be achieved by
% setting the T input parameter appropriately. If the closed-phase is shorter than
% 2 ms then two or more successive closed-phases should be used by defining 4 or more
% elements in the corresponding row of T.
%
% (5) A previous version of this routine allowed T() to have a single row which would
% be replicated for the entire file length. This has been removed because it gave rise
% to an ambiguity.
% Bugs: should really detect a singular matrix and reduce the order accordingly
% Copyright (C) Mike Brookes 1995
% Version: $Id: v_lpccovar.m 10865 2018-09-21 17:22:45Z dmb $
%
% VOICEBOX is a MATLAB toolbox for speech processing.
% Home page: http://www.ee.ic.ac.uk/hp/staff/dmb/voicebox/voicebox.html
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This program is free software; you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation; either version 2 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You can obtain a copy of the GNU General Public License from
% http://www.gnu.org/copyleft/gpl.html or by writing to
% Free Software Foundation, Inc.,675 Mass Ave, Cambridge, MA 02139, USA.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
s = s(:); % make it a column vector
if nargin < 2 p=12; end;
if nargin < 3 t=[p+1 length(s)]; end;
wq = nargin>3;
[nf,ng]=size(t);
if iscell(t)
t{nf+1}=length(s)+1;
else
if rem(ng,2)
t(:,end+1)=[t(2:nf,1)-1; length(s)];
end
end
ar=zeros(nf,p+1);
ar(:,1)=1;
e=zeros(nf,4);
dc=zeros(nf,1);
d0=nargout >2;
rs=(1:p);
for jf=1:nf
if iscell(t)
tj=t{jf};
if rem(length(tj),2)
tj(end+1)=t{jf+1}(1)-1;
end
else
tj=t(jf,:);
end
ta = ceil(tj(1));
tb = floor(tj(2));
cs = (ta:tb).';
for js=3:2:length(tj)
ta = ceil(tj(js));
tb = floor(tj(js+1));
cs = [cs; (ta:tb).'];
end
%disp(cs([logical(1); (cs(2:end-1)~=cs(1:end-2)+1)|(cs(2:end-1)~=cs(3:end)-1); logical(1)])');
nc = length(cs);
pp=min(p,nc-d0);
dm=zeros(nc,pp); % predefine shape
dm(:) = s(cs(:,ones(1,pp))-rs(ones(nc,1),1:pp));
if nargout>2
if wq
dm = [ones(nc,1) dm].*w(cs(:,ones(1,1+pp)));
sc=(s(cs).*w(cs));
aa = (dm\sc).';
else
dm = [ones(nc,1) dm];
sc=s(cs);
aa = (dm\sc).';
end
ar(jf,2:pp+1) = -aa(2:pp+1);
e(jf,1)=sc.'*(sc - dm*aa.');
e(jf,2)=sc.'*sc;
e(jf,3:4)=e(jf,1:2)/nc;
dc(jf)=aa(1)/sum(ar(jf,:));
else
if wq
dm = dm.*w(cs(:,ones(1,pp)));
sc=(s(cs).*w(cs));
aa = (dm\sc).';
else
sc=s(cs);
aa = (dm\sc).';
end;
ar(jf,2:pp+1) = -aa;
if nargout~=1
e(jf,1)=real(sc'*(sc - dm*aa.'));
e(jf,2)=real(sc'*sc);
e(jf,3:4)=e(jf,1:2)/nc;
end
end
end
if ~nargout
v_lpcar2ff(repmat(sqrt(e(:,3).^(-1)),1,p+1).*ar,255);
ylabel('Power (dB)');
end
|
{"author": "ImperialCollegeLondon", "repo": "sap-voicebox", "sha": "28f2654b7584f724277ec81de533debe28ff51ac", "save_path": "github-repos/MATLAB/ImperialCollegeLondon-sap-voicebox", "path": "github-repos/MATLAB/ImperialCollegeLondon-sap-voicebox/sap-voicebox-28f2654b7584f724277ec81de533debe28ff51ac/voicebox/v_lpccovar.m"}
|
#Solving maze with morphological transformation
"""
usage:Solving maze with morphological transformation
needed module:cv2/numpy/sys
ref:
1.http://www.mazegenerator.net/
2.http://blog.leanote.com/post/leeyoung/539a629aab35bc44e2000000
@author:Robin Chen
"""
import cv2
import numpy as np
import sys
def SolvingMaze(image):
#load an image
try:
img = cv2.imread(image)
except Exception,e:
print 'Error:can not open the image!'
sys.exit()
#show image
#cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('maze_image',img)
#convert to gray
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#show gray image
#cv2.imshow('gray_image',gray_image)
#convert to binary image
retval,binary_image = cv2.threshold(gray_image, 10,255, cv2.THRESH_BINARY_INV)
#cv2.imshow('binary_image',binary_image)
_, contours,hierarchy = cv2.findContours(binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
if len(contours) != 2:
sys.exit("This is not a 'perfect maze' with just 2 walls!")
h, w, d = img.shape
#The first wall
path = np.zeros((h,w),dtype = np.uint8)#cv2.CV_8UC1
cv2.drawContours(path, contours, 0, (255,255,255),-1)#cv2.FILLED
#cv2.imshow('The first wall',path)
#Dilate the wall by a few pixels
kernel = np.ones((19, 19), dtype = np.uint8)
path = cv2.dilate(path, kernel)
#cv2.imshow('Dilate the wall by a few pixels',path)
#Erode by the same amount of pixels
path_erode = cv2.erode(path, kernel);
#cv2.imshow('Erode by the same amount of pixels',path_erode)
#absdiff
path = cv2.absdiff(path, path_erode);
#cv2.imshow('absdiff',path)
#solution
channels = cv2.split(img);
channels[0] &= ~path;
channels[1] &= ~path;
channels[2] |= path;
dst = cv2.merge(channels);
cv2.imwrite("solution.png", dst);
cv2.imshow("solution", dst);
#waiting for any key to close windows
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
image = sys.argv[-1]
SolvingMaze(image)
|
{"hexsha": "12436bada3c6b095817ae7e409ad21a63f382b4e", "size": 2045, "ext": "py", "lang": "Python", "max_stars_repo_path": "mazesolvermorph.py", "max_stars_repo_name": "huseyince/Image-Processing-and-Maze-Solving", "max_stars_repo_head_hexsha": "627b0a90b30e58167198f514c574075a85b2430d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2018-07-28T12:37:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-16T07:22:12.000Z", "max_issues_repo_path": "mazesolvermorph.py", "max_issues_repo_name": "huseyince/Image-Processing-and-Maze-Solving", "max_issues_repo_head_hexsha": "627b0a90b30e58167198f514c574075a85b2430d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mazesolvermorph.py", "max_forks_repo_name": "huseyince/Image-Processing-and-Maze-Solving", "max_forks_repo_head_hexsha": "627b0a90b30e58167198f514c574075a85b2430d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-10-05T19:58:01.000Z", "max_forks_repo_forks_event_max_datetime": "2018-10-05T19:58:01.000Z", "avg_line_length": 28.0136986301, "max_line_length": 101, "alphanum_fraction": 0.6841075795, "include": true, "reason": "import numpy", "num_tokens": 607}
|
import sys
import pickle
import json
from pathlib import Path
from typing import Dict, List
from datetime import datetime
import h5py
import pandas as pd
import numpy as np
import scipy as sp
from tqdm import tqdm
from .datasets import LumpedBasin
from .datautils import store_static_attributes
def create_h5_files(data_root: Path,
out_file: Path,
basins: List,
dates: List,
forcing_vars: List,
seq_length: int,
allow_negative_target: bool):
"""Creates H5 training set.
Parameters
----------
data_root : Path
Path to the main directory of the data set
out_file : Path
Path of the location where the hdf5 file should be stored
basins : List
List containing the gauge ids
dates : List
List of start and end date of the discharge period to use, when combining the data.
forcing_vars : List
Names of forcing variables
seq_length : int
Length of the requested input sequences
allow_negative_target : bool, optional
If False, will remove samples with negative target value from the dataset.
Raises
------
FileExistsError
If file at this location already exists.
"""
if out_file.is_file():
raise FileExistsError(f"File already exists at {out_file}")
with h5py.File(out_file, 'w') as out_f:
input_data = out_f.create_dataset('input_data',
shape=(0, seq_length, len(forcing_vars)),
maxshape=(None, seq_length, len(forcing_vars)),
chunks=True,
dtype=np.float32,
compression='gzip')
target_data = out_f.create_dataset('target_data',
shape=(0, 1),
maxshape=(None, 1),
chunks=True,
dtype=np.float32,
compression='gzip')
q_stds = out_f.create_dataset('q_stds',
shape=(0, 1),
maxshape=(None, 1),
dtype=np.float32,
compression='gzip',
chunks=True)
sample_2_basin = out_f.create_dataset('sample_2_basin',
shape=(0, ),
maxshape=(None, ),
dtype="S10",
compression='gzip',
chunks=True)
scalers = None
for basin in tqdm(basins, file=sys.stdout):
dataset = LumpedBasin(data_root=data_root,
basin=basin,
forcing_vars=forcing_vars,
is_train=True,
train_basins=basins,
seq_length=seq_length,
dates=dates,
scalers=scalers,
allow_negative_target=allow_negative_target,
with_attributes=False)
if len(dataset) == 0:
print (f"No data for basin {basin}. Skipping it.")
continue
# Reuse scalers across datasets to save computation time
if scalers is None:
scalers = dataset.input_scalers, dataset.output_scalers, dataset.static_scalers
num_samples = len(dataset)
total_samples = input_data.shape[0] + num_samples
# store input and output samples
input_data.resize((total_samples, seq_length, len(forcing_vars)))
target_data.resize((total_samples, 1))
input_data[-num_samples:, :, :] = dataset.x
target_data[-num_samples:, :] = dataset.y
# additionally store std of discharge of this basin for each sample
q_stds.resize((total_samples, 1))
q_std_array = np.array([dataset.q_std] * num_samples, dtype=np.float32).reshape(-1, 1)
q_stds[-num_samples:, :] = q_std_array
sample_2_basin.resize((total_samples, ))
str_arr = np.array([basin.encode("ascii", "ignore")] * num_samples)
sample_2_basin[-num_samples:] = str_arr
out_f.flush()
def store_results(user_cfg: Dict, run_cfg: Dict, results: Dict):
"""Stores prediction results in a pickle file.
Parameters
----------
user_cfg : Dict
Dictionary containing the user entered evaluation config
run_cfg : Dict
Dictionary containing the run config loaded from the cfg.json file
results : Dict
DataFrame containing the observed and predicted discharge.
"""
if run_cfg["no_static"]:
file_name = user_cfg["run_dir"] / f"results_no_static_seed{run_cfg['seed']}.p"
else:
if run_cfg["concat_static"]:
file_name = user_cfg["run_dir"] / f"results_concat_static_seed{run_cfg['seed']}.p"
else:
file_name = user_cfg["run_dir"] / f"results_seed{run_cfg['seed']}.p"
with (file_name).open('wb') as fp:
pickle.dump(results, fp)
print(f"Successfully stored results at {file_name}")
def prepare_data(cfg: Dict, basins: List) -> Dict:
"""Pre-processes training data.
Parameters
----------
cfg : Dict
Dictionary containing the run config
basins : List
List containing the gauge ids
Returns
-------
Dict
Dictionary containing the updated run config.
"""
# create database file containing the static basin attributes
cfg["db_path"] = cfg["run_dir"] / "static_attributes.db"
store_static_attributes(cfg["data_root"], db_path=cfg["db_path"],
attribute_names=cfg["static_attributes"])
# create .h5 files for train and validation data
cfg["train_file"] = cfg["train_dir"] / 'train_data.h5'
create_h5_files(data_root=cfg["data_root"],
out_file=cfg["train_file"],
basins=basins,
dates=[cfg["start_date"], cfg["end_date"]],
forcing_vars=cfg["forcing_attributes"],
seq_length=cfg["seq_length"],
allow_negative_target=cfg["allow_negative_target"])
return cfg
def setup_run(cfg: Dict) -> Dict:
"""Creates the folder structure for the experiment.
Parameters
----------
cfg : Dict
Dictionary containing the run config
Returns
-------
Dict
Dictionary containing the updated run config
"""
cfg["start_time"] = str(datetime.now())
if not cfg["run_dir"].is_dir():
cfg["train_dir"] = cfg["run_dir"] / 'data' / 'train'
cfg["train_dir"].mkdir(parents=True)
cfg["val_dir"] = cfg["run_dir"] / 'data' / 'val'
cfg["val_dir"].mkdir(parents=True)
else:
raise RuntimeError('There is already a folder at {}'.format(cfg["run_dir"]))
# dump a copy of cfg to run directory
with (cfg["run_dir"] / 'cfg.json').open('w') as fp:
temp_cfg = {}
for key, val in cfg.items():
if isinstance(val, Path):
temp_cfg[key] = str(val)
elif isinstance(val, pd.Timestamp):
temp_cfg[key] = val.strftime(format="%d%m%Y")
elif isinstance(val, np.ndarray):
temp_cfg[key] = val.tolist() # np.ndarrays are not serializable
elif 'param_dist' in key:
temp_dict = {}
for k, v in val.items():
if isinstance(v, sp.stats._distn_infrastructure.rv_frozen):
temp_dict[k] = f"{v.dist.name}{v.args}, *kwds={v.kwds}"
else:
temp_dict[k] = str(v)
temp_cfg[key] = str(temp_dict)
else:
temp_cfg[key] = val
json.dump(temp_cfg, fp, sort_keys=True, indent=4)
return cfg
def nse(qsim: np.ndarray, qobs: np.ndarray) -> float:
"""Calculates NSE, ignoring NANs in ``qobs``.
.. math::
\\text{NSE} =
1 - \\frac{\\sum_{t=1}^T{(q_s^t - q_o^t)^2}}{\\sum_{t=1}^T{(q_o^t - \\bar{q}_o)^2}}
Parameters
----------
qsim : np.ndarray
Predicted streamflow
qobs : np.ndarray
Ground truth streamflow
Returns
-------
nse : float
The prediction's NSE
Raises
------
ValueError
If lenghts of qsim and qobs are not equal.
"""
if len(qsim) != len(qobs):
raise ValueError(f"Lenghts of qsim {len(qsim)} and qobs {len(qobs)} mismatch.")
qsim = qsim[~np.isnan(qobs)]
qobs = qobs[~np.isnan(qobs)]
return 1 - (np.sum(np.square(qsim - qobs)) / np.sum(np.square(qobs - np.mean(qobs))))
|
{"hexsha": "e50b001ad6c4a3b43bede31e6c087011638c023c", "size": 9212, "ext": "py", "lang": "Python", "max_stars_repo_path": "mlstream/utils.py", "max_stars_repo_name": "gauchm/mlstream", "max_stars_repo_head_hexsha": "37cd59e48a6324f6f96f31416a1e25bab7645e64", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-01-15T03:51:56.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-12T07:35:19.000Z", "max_issues_repo_path": "mlstream/utils.py", "max_issues_repo_name": "gauchm/mlstream", "max_issues_repo_head_hexsha": "37cd59e48a6324f6f96f31416a1e25bab7645e64", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mlstream/utils.py", "max_forks_repo_name": "gauchm/mlstream", "max_forks_repo_head_hexsha": "37cd59e48a6324f6f96f31416a1e25bab7645e64", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-15T03:54:41.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-15T03:54:41.000Z", "avg_line_length": 36.1254901961, "max_line_length": 98, "alphanum_fraction": 0.530829353, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1956}
|
import numpy as np
import pyqtgraph as pg
from PyQt5 import QtCore
from acconeer_utils.clients.reg.client import RegClient
from acconeer_utils.clients.json.client import JSONClient
from acconeer_utils.clients import configs
from acconeer_utils import example_utils
from acconeer_utils.pg_process import PGProcess, PGProccessDiedException
def main():
args = example_utils.ExampleArgumentParser(num_sens=1).parse_args()
example_utils.config_logging(args)
if args.socket_addr:
client = JSONClient(args.socket_addr)
else:
port = args.serial_port or example_utils.autodetect_serial_port()
client = RegClient(port)
config = get_base_config()
config.sensor = args.sensors
client.setup_session(config)
pg_updater = PGUpdater(config)
pg_process = PGProcess(pg_updater)
pg_process.start()
client.start_streaming()
interrupt_handler = example_utils.ExampleInterruptHandler()
print("Press Ctrl-C to end session")
processor = PhaseTrackingProcessor(config)
while not interrupt_handler.got_signal:
info, sweep = client.get_next()
plot_data = processor.process(sweep)
if plot_data is not None:
try:
pg_process.put_data(plot_data)
except PGProccessDiedException:
break
print("Disconnecting...")
pg_process.close()
client.disconnect()
def get_base_config():
config = configs.IQServiceConfig()
config.range_interval = [0.3, 0.6]
config.sweep_rate = 80
config.gain = 0.7
return config
class PhaseTrackingProcessor:
def __init__(self, config):
self.f = config.sweep_rate
self.dt = 1 / self.f
num_hist_points = self.f * 3
self.lp_vel = 0
self.last_sweep = None
self.hist_vel = np.zeros(num_hist_points)
self.hist_pos = np.zeros(num_hist_points)
self.sweep_index = 0
def process(self, sweep):
n = len(sweep)
ampl = np.abs(sweep)
power = ampl*ampl
if np.sum(power) > 1e-6:
com = np.sum(np.arange(n)/n * power) / np.sum(power) # center of mass
else:
com = 0
if self.sweep_index == 0:
self.lp_ampl = ampl
self.lp_com = com
plot_data = None
else:
a = self.alpha(0.1, self.dt)
self.lp_ampl = a*ampl + (1 - a)*self.lp_ampl
a = self.alpha(0.25, self.dt)
self.lp_com = a*com + (1-a)*self.lp_com
com_idx = int(self.lp_com * n)
delta_angle = np.angle(sweep[com_idx] * np.conj(self.last_sweep[com_idx]))
vel = self.f * 2.5 * delta_angle / (2*np.pi)
a = self.alpha(0.1, self.dt)
self.lp_vel = a*vel + (1 - a)*self.lp_vel
self.hist_vel = np.roll(self.hist_vel, -1)
self.hist_vel[-1] = self.lp_vel
dp = self.lp_vel / self.f
self.hist_pos = np.roll(self.hist_pos, -1)
self.hist_pos[-1] = self.hist_pos[-2] + dp
hist_len = len(self.hist_pos)
plot_hist_pos = self.hist_pos - self.hist_pos.mean()
plot_hist_pos_zoom = self.hist_pos[hist_len//2:] - self.hist_pos[hist_len//2:].mean()
iq_val = np.exp(1j*np.angle(sweep[com_idx])) * self.lp_ampl[com_idx]
plot_data = {
"abs": self.lp_ampl,
"arg": np.angle(sweep),
"com": self.lp_com,
"hist_pos": plot_hist_pos,
"hist_pos_zoom": plot_hist_pos_zoom,
"iq_val": iq_val,
}
self.last_sweep = sweep
self.sweep_index += 1
return plot_data
def alpha(self, tau, dt):
return 1 - np.exp(-dt/tau)
class PGUpdater:
def __init__(self, config):
self.config = config
self.interval = config.range_interval
def setup(self, win):
win.resize(800, 600)
win.setWindowTitle("Acconeer phase tracking example")
self.abs_plot = win.addPlot(row=0, col=0)
self.abs_plot.showGrid(x=True, y=True)
self.abs_plot.setLabel("left", "Amplitude")
self.abs_plot.setLabel("bottom", "Depth (m)")
self.abs_curve = self.abs_plot.plot(pen=example_utils.pg_pen_cycler(0))
pen = example_utils.pg_pen_cycler(1)
pen.setStyle(QtCore.Qt.DashLine)
self.abs_inf_line = pg.InfiniteLine(pen=pen)
self.abs_plot.addItem(self.abs_inf_line)
self.arg_plot = win.addPlot(row=1, col=0)
self.arg_plot.showGrid(x=True, y=True)
self.arg_plot.setLabel("bottom", "Depth (m)")
self.arg_plot.setLabel("left", "Phase")
self.arg_plot.setYRange(-np.pi, np.pi)
self.arg_plot.getAxis("left").setTicks(example_utils.pg_phase_ticks)
self.arg_curve = self.arg_plot.plot(pen=example_utils.pg_pen_cycler(0))
self.arg_inf_line = pg.InfiniteLine(pen=pen)
self.arg_plot.addItem(self.arg_inf_line)
self.iq_plot = win.addPlot(row=1, col=1, title="IQ at line")
example_utils.pg_setup_polar_plot(self.iq_plot, 0.5)
self.iq_curve = self.iq_plot.plot(pen=example_utils.pg_pen_cycler())
self.iq_scatter = pg.ScatterPlotItem(
brush=pg.mkBrush(example_utils.color_cycler()),
size=15,
)
self.iq_plot.addItem(self.iq_scatter)
self.hist_plot = win.addPlot(row=0, col=1, colspan=2)
self.hist_plot.showGrid(x=True, y=True)
self.hist_plot.setLabel("bottom", "Time (s)")
self.hist_plot.setLabel("left", "Tracking (mm)")
self.hist_curve = self.hist_plot.plot(pen=example_utils.pg_pen_cycler())
self.hist_plot.setYRange(-5, 5)
self.hist_zoom_plot = win.addPlot(row=1, col=2)
self.hist_zoom_plot.showGrid(x=True, y=True)
self.hist_zoom_plot.setLabel("bottom", "Time (s)")
self.hist_zoom_plot.setLabel("left", "Tracking (mm)")
self.hist_zoom_curve = self.hist_zoom_plot.plot(pen=example_utils.pg_pen_cycler())
self.hist_zoom_plot.setYRange(-0.5, 0.5)
self.smooth_max = example_utils.SmoothMax(self.config.sweep_rate)
self.first = True
def update(self, data):
if self.first:
self.xs = np.linspace(*self.interval, len(data["abs"]))
self.ts = np.linspace(-3, 0, len(data["hist_pos"]))
self.ts_zoom = np.linspace(-1.5, 0, len(data["hist_pos_zoom"]))
self.first = False
com_x = (1-data["com"])*self.interval[0] + data["com"]*self.interval[1]
self.abs_curve.setData(self.xs, data["abs"])
self.abs_plot.setYRange(0, self.smooth_max.update(np.amax(data["abs"])))
self.abs_inf_line.setValue(com_x)
self.arg_curve.setData(self.xs, data["arg"])
self.arg_inf_line.setValue(com_x)
self.hist_curve.setData(self.ts, data["hist_pos"])
self.hist_zoom_curve.setData(self.ts_zoom, data["hist_pos_zoom"])
self.iq_curve.setData([0, np.real(data["iq_val"])], [0, np.imag(data["iq_val"])])
self.iq_scatter.setData([np.real(data["iq_val"])], [np.imag(data["iq_val"])])
if __name__ == "__main__":
main()
|
{"hexsha": "76bc8253fe86de1fbb0e0eb1e842cef6a85202d0", "size": 7216, "ext": "py", "lang": "Python", "max_stars_repo_path": "acconeer-python-exploration-master/examples/processing/phase_tracking.py", "max_stars_repo_name": "Kandidatarbete-Chalmers-MCCX02-19-06/RaspberryPiRadarProgram", "max_stars_repo_head_hexsha": "f5d69d9084d37246aaf0e0061b3353b86e8d59e3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2019-05-27T13:13:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T00:09:58.000Z", "max_issues_repo_path": "acconeer-python-exploration-master/examples/processing/phase_tracking.py", "max_issues_repo_name": "Kandidatarbete-Chalmers-MCCX02-19-06/Kandidatarbete-Chalmers-MCCX02-19-06", "max_issues_repo_head_hexsha": "f5d69d9084d37246aaf0e0061b3353b86e8d59e3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-02-04T08:32:16.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-26T17:44:11.000Z", "max_forks_repo_path": "acconeer-python-exploration-master/examples/processing/phase_tracking.py", "max_forks_repo_name": "Kandidatarbete-Chalmers-MCCX02-19-06/Kandidatarbete-Chalmers-MCCX02-19-06", "max_forks_repo_head_hexsha": "f5d69d9084d37246aaf0e0061b3353b86e8d59e3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-10T16:43:17.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-10T16:43:17.000Z", "avg_line_length": 34.6923076923, "max_line_length": 97, "alphanum_fraction": 0.6204268293, "include": true, "reason": "import numpy", "num_tokens": 1833}
|
[STATEMENT]
lemma chine_simps [simp]:
shows "arr chine" and "ide chine"
and "src chine = src r\<^sub>0" and "trg chine = src s\<^sub>0"
and "dom chine = chine" and "cod chine = chine"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (arr chine &&& ide chine &&& src chine = src r\<^sub>0) &&& trg chine = src s\<^sub>0 &&& local.dom chine = chine &&& cod chine = chine
[PROOF STEP]
using chine_in_hom
[PROOF STATE]
proof (prove)
using this:
\<guillemotleft>chine : src r\<^sub>0 \<rightarrow> src s\<^sub>0\<guillemotright>
\<guillemotleft>chine : chine \<Rightarrow> chine\<guillemotright>
goal (1 subgoal):
1. (arr chine &&& ide chine &&& src chine = src r\<^sub>0) &&& trg chine = src s\<^sub>0 &&& local.dom chine = chine &&& cod chine = chine
[PROOF STEP]
apply auto
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<guillemotleft>chine : src r\<^sub>0 \<rightarrow> src s\<^sub>0\<guillemotright>; \<guillemotleft>chine : chine \<Rightarrow> chine\<guillemotright>\<rbrakk> \<Longrightarrow> ide chine
[PROOF STEP]
by (meson arrow_of_spans_of_maps.is_ide chine_is_induced_map)
|
{"llama_tokens": 444, "file": "Bicategory_BicategoryOfSpans", "length": 3}
|
# -*- coding: utf-8 -*-
"""
```
"""
# import standard libraries
import os
from itertools import product
# import third-party libraries
import numpy as np
from colour.utilities.array import tstack
from colour import XYZ_to_RGB, xy_to_XYZ, RGB_COLOURSPACES
# import my libraries
import plot_utility as pu
import color_space as cs
from create_gamut_booundary_lut import CIELAB_CHROMA_MAX, TyLchLut,\
create_jzazbz_gamut_boundary_lut_type2, is_out_of_gamut_rgb,\
JZAZBZ_CHROMA_MAX, make_jzazbz_gb_lut_fname_method_c,\
make_jzazbz_gb_lut_fname_methodb_b,\
create_cielab_gamut_boundary_lut_method_b,\
make_cielab_gb_lut_fname_method_b, make_cielab_gb_lut_fname_method_c
from jzazbz import large_xyz_to_jzazbz, jzazbz_to_large_xyz, jzczhz_to_jzazbz
from jzazbz_azbz_czhz_plot import debug_plot_jzazbz,\
plot_cj_plane_with_interpolation_core
from cielab_ab_cl_plot import debug_plot_cielab
from common import MeasureExecTime
import transfer_functions as tf
# information
__author__ = 'Toru Yoshihara'
__copyright__ = 'Copyright (C) 2021 - Toru Yoshihara'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Toru Yoshihara'
__email__ = 'toru.ver.11 at-sign gmail.com'
__all__ = []
def calc_chroma_boundary_specific_ligheness_cielab_method_c(
lch, cs_name, c0):
"""
parameters
----------
lightness : float
lightness value(Jzazbz). range is 0.0 - 1.0.
hue_sample : int
Sample number of the Hue
cs_name : string
A color space name. ex. "ITU-R BT.709", "ITU-R BT.2020"
Examples
--------
>>> boundary_jch = calc_chroma_boundary_specific_ligheness_jzazbz(
... lightness=0.5, hue_sample=16, cs_name=cs.BT2020,
... peak_luminance=10000)
[[ 5.00000000e-01 2.72627831e-01 0.00000000e+00]
[ 5.00000000e-01 2.96944618e-01 2.40000000e+01]
[ 5.00000000e-01 3.19167137e-01 4.80000000e+01]
[ 5.00000000e-01 2.51322746e-01 7.20000000e+01]
[ 5.00000000e-01 2.41002083e-01 9.60000000e+01]
[ 5.00000000e-01 2.76854515e-01 1.20000000e+02]
[ 5.00000000e-01 3.99024010e-01 1.44000000e+02]
[ 5.00000000e-01 2.64456749e-01 1.68000000e+02]
[ 5.00000000e-01 2.32390404e-01 1.92000000e+02]
[ 5.00000000e-01 2.51740456e-01 2.16000000e+02]
[ 5.00000000e-01 3.38995934e-01 2.40000000e+02]
[ 5.00000000e-01 3.09918404e-01 2.64000000e+02]
[ 5.00000000e-01 2.71250725e-01 2.88000000e+02]
[ 5.00000000e-01 2.59991646e-01 3.12000000e+02]
[ 5.00000000e-01 2.63157845e-01 3.36000000e+02]
[ 5.00000000e-01 2.72627831e-01 3.60000000e+02]]
"""
# lch --> rgb
ll = lch[..., 0]
chroma_init = lch[..., 1]
hue = np.deg2rad(lch[..., 2])
trial_num = 20
r_val = chroma_init
for t_idx in range(trial_num):
aa = r_val * np.cos(hue)
bb = r_val * np.sin(hue)
lab = tstack((ll, aa, bb))
rgb = cs.lab_to_rgb(lab, cs_name)
ng_idx = is_out_of_gamut_rgb(rgb=rgb)
ok_idx = np.logical_not(ng_idx)
add_sub = c0 / (2 ** (t_idx))
r_val[ok_idx] = r_val[ok_idx] + add_sub
r_val[~ok_idx] = r_val[~ok_idx] - add_sub
zero_idx = (chroma_init <= 0)
r_val[zero_idx] = 0.0
lch_result = tstack([ll, r_val, np.rad2deg(hue)])
return lch_result
def plot_d65_multi_luminance():
range = 0.0003
fig, ax1 = pu.plot_1_graph(
fontsize=20,
figsize=(10, 10),
bg_color=(0.96, 0.96, 0.96),
graph_title="D65 in the az-bz plane",
graph_title_size=None,
xlabel="az", ylabel="bz",
axis_label_size=None,
legend_size=17,
xlim=[-range, range],
ylim=[-range, range],
xtick=None,
ytick=None,
xtick_size=None, ytick_size=None,
linewidth=3,
minor_xtick_num=None,
minor_ytick_num=None)
luminance_list = [0, 0.01, 0.1, 1, 10, 100, 1000, 10000]
for luminance in luminance_list:
d65_xyz = xy_to_XYZ(cs.D65) * luminance
jzazbz = large_xyz_to_jzazbz(d65_xyz)
az = jzazbz[..., 1]
bz = jzazbz[..., 2]
ax1.plot(az, bz, 'o', label=f"{luminance} nits")
fname = "./img/white_posi.png"
print(fname)
pu.show_and_save(
fig=fig, legend_loc='upper right', show=False, save_fname=fname)
def create_lab_gamut_boundary_method_c(
hue_sample=8, lightness_sample=8, chroma_sample=1024,
color_space_name=cs.BT709):
ll_num = lightness_sample
lut = create_cielab_gamut_boundary_lut_method_b(
lightness_sample=ll_num, chroma_sample=chroma_sample,
hue_sample=hue_sample, cs_name=color_space_name)
np.save(make_cielab_gb_lut_fname_method_b(
color_space_name=color_space_name, lightness_num=lightness_sample,
hue_num=hue_sample), lut)
# create 2d lut using method B
lut_b = np.load(
make_cielab_gb_lut_fname_method_b(
color_space_name=color_space_name, lightness_num=lightness_sample,
hue_num=hue_sample))
# create 2d lut using method C
c0 = CIELAB_CHROMA_MAX / (chroma_sample - 1)
lut_c = np.zeros_like(lut_b)
for l_idx in range(lightness_sample):
lch_init = lut_b[l_idx]
lch_result = calc_chroma_boundary_specific_ligheness_cielab_method_c(
lch=lch_init, cs_name=color_space_name, c0=c0)
lut_c[l_idx] = lch_result
fname = make_cielab_gb_lut_fname_method_c(
color_space_name=color_space_name,
lightness_num=lightness_sample, hue_num=hue_sample)
np.save(fname, np.float32(lut_c))
def calc_chroma_boundary_specific_ligheness_jzazbz_method_c(
lch, cs_name, peak_luminance, c0):
"""
parameters
----------
lightness : float
lightness value(Jzazbz). range is 0.0 - 1.0.
hue_sample : int
Sample number of the Hue
cs_name : string
A color space name. ex. "ITU-R BT.709", "ITU-R BT.2020"
Examples
--------
>>> boundary_jch = calc_chroma_boundary_specific_ligheness_jzazbz(
... lightness=0.5, hue_sample=16, cs_name=cs.BT2020,
... peak_luminance=10000)
[[ 5.00000000e-01 2.72627831e-01 0.00000000e+00]
[ 5.00000000e-01 2.96944618e-01 2.40000000e+01]
[ 5.00000000e-01 3.19167137e-01 4.80000000e+01]
[ 5.00000000e-01 2.51322746e-01 7.20000000e+01]
[ 5.00000000e-01 2.41002083e-01 9.60000000e+01]
[ 5.00000000e-01 2.76854515e-01 1.20000000e+02]
[ 5.00000000e-01 3.99024010e-01 1.44000000e+02]
[ 5.00000000e-01 2.64456749e-01 1.68000000e+02]
[ 5.00000000e-01 2.32390404e-01 1.92000000e+02]
[ 5.00000000e-01 2.51740456e-01 2.16000000e+02]
[ 5.00000000e-01 3.38995934e-01 2.40000000e+02]
[ 5.00000000e-01 3.09918404e-01 2.64000000e+02]
[ 5.00000000e-01 2.71250725e-01 2.88000000e+02]
[ 5.00000000e-01 2.59991646e-01 3.12000000e+02]
[ 5.00000000e-01 2.63157845e-01 3.36000000e+02]
[ 5.00000000e-01 2.72627831e-01 3.60000000e+02]]
"""
# lch --> rgb
jj = lch[..., 0]
chroma_init = lch[..., 1]
hue = np.deg2rad(lch[..., 2])
trial_num = 30
r_val = chroma_init
for t_idx in range(trial_num):
aa = r_val * np.cos(hue)
bb = r_val * np.sin(hue)
jzazbz = tstack((jj, aa, bb))
large_xyz = jzazbz_to_large_xyz(jzazbz)
rgb_luminance = XYZ_to_RGB(
large_xyz, cs.D65, cs.D65,
RGB_COLOURSPACES[cs_name].matrix_XYZ_to_RGB)
ng_idx = is_out_of_gamut_rgb(rgb=rgb_luminance/peak_luminance)
ok_idx = np.logical_not(ng_idx)
add_sub = c0 / (2 ** (t_idx))
r_val[ok_idx] = r_val[ok_idx] + add_sub
r_val[~ok_idx] = r_val[~ok_idx] - add_sub
zero_idx = (chroma_init <= 0)
r_val[zero_idx] = 0.0
jzczhz = tstack([jj, r_val, np.rad2deg(hue)])
return jzczhz
def create_jzazbz_gamut_boundary_method_c(
hue_sample=8, lightness_sample=8, chroma_sample=1024,
color_space_name=cs.BT709, luminance=100):
c0 = JZAZBZ_CHROMA_MAX / (chroma_sample - 1)
# create 2d lut using method B
create_jzazbz_gamut_boundary_lut_type2(
hue_sample=hue_sample, lightness_sample=lightness_sample,
chroma_sample=chroma_sample, color_space_name=color_space_name,
luminance=luminance)
lut_b = np.load(
make_jzazbz_gb_lut_fname_methodb_b(
color_space_name=color_space_name, luminance=luminance,
lightness_num=lightness_sample, hue_num=hue_sample))
# create 2d lut using method C
lut_c = np.zeros_like(lut_b)
for l_idx in range(lightness_sample):
jzczhz_init = lut_b[l_idx]
jzczhz = calc_chroma_boundary_specific_ligheness_jzazbz_method_c(
lch=jzczhz_init, cs_name=color_space_name,
peak_luminance=luminance, c0=c0)
lut_c[l_idx] = jzczhz
fname = make_jzazbz_gb_lut_fname_method_c(
color_space_name=color_space_name, luminance=luminance,
lightness_num=lightness_sample, hue_num=hue_sample)
np.save(fname, np.float32(lut_c))
def create_jzazbz_2dlut_using_method_c_and_plot(
luminance=1000, color_space_name=cs.BT709):
hue_num = 4096
lightness_sample = 1024
chroma_sample = 512
h_num_intp = 1200
j_num_intp = 1200
# create_jzazbz_gamut_boundary_method_c(
# hue_sample=hue_num, lightness_sample=lightness_sample,
# chroma_sample=chroma_sample, color_space_name=color_space_name,
# luminance=luminance)
debug_plot_jzazbz(
hue_sample=hue_num, lightness_sample=lightness_sample,
luminance=luminance, h_num_intp=h_num_intp, j_num_intp=j_num_intp,
color_space_name=color_space_name)
def create_cielab_2dlut_using_method_c_and_plot(color_space_name=cs.BT709):
chroma_sample = 512
hue_sample = 4096
lightness_sample = 1024
h_num_intp = 1200
l_num_intp = 1200
# create_lab_gamut_boundary_method_c(
# hue_sample=hue_sample, lightness_sample=lightness_sample,
# chroma_sample=chroma_sample,
# color_space_name=color_space_name)
debug_plot_cielab(
hue_sample=hue_sample, lightness_sample=lightness_sample,
h_num_intp=h_num_intp, l_num_intp=l_num_intp,
color_space_name=color_space_name)
def plot_plane_festival():
# create_cielab_2dlut_using_method_c_and_plot(color_space_name=cs.BT709)
# create_cielab_2dlut_using_method_c_and_plot(color_space_name=cs.P3_D65)
# create_cielab_2dlut_using_method_c_and_plot(color_space_name=cs.BT2020)
# create_jzazbz_2dlut_using_method_c_and_plot(
# luminance=100, color_space_name=cs.BT2020)
# create_jzazbz_2dlut_using_method_c_and_plot(
# luminance=1000, color_space_name=cs.BT2020)
# create_jzazbz_2dlut_using_method_c_and_plot(
# luminance=10000, color_space_name=cs.BT2020)
# create_jzazbz_2dlut_using_method_c_and_plot(
# luminance=100, color_space_name=cs.BT709)
# create_jzazbz_2dlut_using_method_c_and_plot(
# luminance=1000, color_space_name=cs.BT709)
# create_jzazbz_2dlut_using_method_c_and_plot(
# luminance=10000, color_space_name=cs.BT709)
# create_jzazbz_2dlut_using_method_c_and_plot(
# luminance=100, color_space_name=cs.P3_D65)
# create_jzazbz_2dlut_using_method_c_and_plot(
# luminance=1000, color_space_name=cs.P3_D65)
# create_jzazbz_2dlut_using_method_c_and_plot(
# luminance=10000, color_space_name=cs.P3_D65)
pass
def debug_ng_cusp():
bg_lut_name = make_jzazbz_gb_lut_fname_method_c(
color_space_name=cs.BT709, luminance=1000)
bg_lut = TyLchLut(lut=np.load(bg_lut_name))
hue_list = np.linspace(250, 260, 256)
for hue in hue_list:
cusp = bg_lut.get_cusp_without_intp(hue=hue)
rgb = cs.jzazbz_to_rgb(
jzazbz=jzczhz_to_jzazbz(cusp), color_space_name=cs.BT709,
luminance=1000)
print(f"hue={hue:.2f}, cusp={cusp}, rgb={rgb}")
def create_luts_all():
chroma_sample = 512
hue_sample = 4096
lightness_sample = 1024
# color_space_name_list = [cs.BT709, cs.BT2020, cs.P3_D65]
# luminance_list = [100, 300, 600, 1000, 2000, 4000, 10000]
color_space_name_list = [cs.P3_D65]
cv_list = [x * 16 for x in range(65)]
cv_list[-1] = cv_list[-1] - 1
luminance_list = [
int(round(tf.eotf_to_luminance(x/1023, tf.ST2084)))
for x in cv_list]
luminance_list = np.array(luminance_list, dtype=np.uint16)
luminance_list = [x for x in luminance_list if (x > 3) and (x < 100)]
print(luminance_list)
# luminance_list = [100 * x + 100 for x in range(33)]
# luminance_list = [1000]
# for color_space_name in color_space_name_list:
# create_lab_gamut_boundary_method_c(
# hue_sample=hue_sample, lightness_sample=lightness_sample,
# chroma_sample=chroma_sample,
# color_space_name=color_space_name)
met = MeasureExecTime()
met.start()
for color_space_name in color_space_name_list:
for luminance in luminance_list:
create_jzazbz_gamut_boundary_method_c(
hue_sample=hue_sample, lightness_sample=lightness_sample,
chroma_sample=chroma_sample, color_space_name=color_space_name,
luminance=luminance)
met.end()
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
create_luts_all()
# plot_plane_festival()
# debug_ng_cusp()
# debug plot hue angle 250 to 260
# bg_lut_name = make_jzazbz_gb_lut_fname_method_c(
# color_space_name=cs.BT709, luminance=1000)
# h_val_list = np.linspace(0, 360, 4096)
# h_val_list2_idx = (h_val_list > 252.5) & (h_val_list < 257.5)
# for h_idx, h_val in enumerate(h_val_list[h_val_list2_idx]):
# plot_cj_plane_with_interpolation_core(
# bg_lut_name=bg_lut_name, h_idx=h_idx, h_val=h_val,
# color_space_name=cs.BT709, maximum_luminance=1000)
# bg_lut_name = make_jzazbz_gb_lut_fname_method_c(
# color_space_name=cs.BT709, luminance=1000)
# h_val_list = np.linspace(0, 360, 4096)
# h_val_list2_idx = (h_val_list > 0) & (h_val_list < 5)
# for h_idx, h_val in enumerate(h_val_list[h_val_list2_idx]):
# plot_cj_plane_with_interpolation_core(
# bg_lut_name=bg_lut_name, h_idx=h_idx+1000, h_val=h_val,
# color_space_name=cs.BT709, maximum_luminance=1000)
|
{"hexsha": "4576e9e379392d2f0acc897604f87610a2ba4b26", "size": 14629, "ext": "py", "lang": "Python", "max_stars_repo_path": "2021/15_2-pass_gamut_boundary/debug_2_pass_lut.py", "max_stars_repo_name": "toru-ver4/sample_code", "max_stars_repo_head_hexsha": "9165b4cb07a3cb1b3b5a7f6b3a329be081bddabe", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2019-11-12T23:34:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T13:21:03.000Z", "max_issues_repo_path": "2021/15_2-pass_gamut_boundary/debug_2_pass_lut.py", "max_issues_repo_name": "toru-ver4/sample_code", "max_issues_repo_head_hexsha": "9165b4cb07a3cb1b3b5a7f6b3a329be081bddabe", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 101, "max_issues_repo_issues_event_min_datetime": "2019-08-12T01:20:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-18T12:17:01.000Z", "max_forks_repo_path": "2021/15_2-pass_gamut_boundary/debug_2_pass_lut.py", "max_forks_repo_name": "toru-ver4/sample_code", "max_forks_repo_head_hexsha": "9165b4cb07a3cb1b3b5a7f6b3a329be081bddabe", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-06-08T09:48:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-09T15:35:51.000Z", "avg_line_length": 36.1209876543, "max_line_length": 79, "alphanum_fraction": 0.6798140679, "include": true, "reason": "import numpy", "num_tokens": 4989}
|
import os
import pandas as pd
import snscrape
import re
from nltk.corpus import stopwords
import nltk
from nltk.tokenize import word_tokenize
import numpy as np
from tqdm import tqdm
import math
import snscrape.modules.twitter as sntwitter
import itertools
def remove_Punctuations(x):
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~'''
no_punct = ""
for letter in x:
if letter not in punctuations:
no_punct = no_punct + letter
return no_punct.strip(" ")
def deEmojify(text):
regrex_pattern = re.compile(pattern = "["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags = re.UNICODE)
return regrex_pattern.sub(r'',text)
def remove_url(x):
result = re.sub(r"http\S+", "", x)
return result
def remove_everything(x):
if "\n" in x:
x=str(x.split("\n"))
mn = re.sub("[^A-Za-z]", "", x)
return mn
def clean_up(x):
x=remove_Punctuations(x)
x=deEmojify(x)
x=remove_url(x)
x=remove_everything(x)
return x
def pre_process_tweet(df):
all_tweets=[]
for i in range (df.shape[0]):
low=[]
tweet=df.iloc[i,0]
word_l=tweet.split(" ")
for j in word_l:
if "\n" in j:
xy=j.split("\n")
word_l.extend(xy)
word_l.remove(j)
for w in word_l:
x=clean_up(w)
sw=[]
for kjh in stopwords.fileids():
sw.extend(stopwords.words('{}'.format(kjh)))
mn = word_tokenize(x)
for t in mn:
if t.lower() not in sw:
low.append(t.lower())
all_tweets.append(low)
df["new"]=all_tweets
return df
|
{"hexsha": "86aabcb0e3fe2bd05b3afd0cff3383ce27d55247", "size": 1993, "ext": "py", "lang": "Python", "max_stars_repo_path": "build/lib/TLA/Data/Pre_Process_Tweets.py", "max_stars_repo_name": "tusharsarkar3/TLA", "max_stars_repo_head_hexsha": "86957502840218860ddb876643bd5acf76e8957f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 50, "max_stars_repo_stars_event_min_datetime": "2021-07-22T05:52:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-30T07:26:50.000Z", "max_issues_repo_path": "build/lib/TLA/Data/Pre_Process_Tweets.py", "max_issues_repo_name": "victorknox/TLA", "max_issues_repo_head_hexsha": "a898617765e2af8ce4f416d8430a8ee9c92aba94", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-07-25T14:36:39.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-25T14:36:39.000Z", "max_forks_repo_path": "build/lib/TLA/Data/Pre_Process_Tweets.py", "max_forks_repo_name": "victorknox/TLA", "max_forks_repo_head_hexsha": "a898617765e2af8ce4f416d8430a8ee9c92aba94", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2021-07-23T01:22:48.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-30T07:26:52.000Z", "avg_line_length": 21.6630434783, "max_line_length": 64, "alphanum_fraction": 0.5283492223, "include": true, "reason": "import numpy", "num_tokens": 522}
|
"""
Script reads the csv file describing the details of people requiring help.
"""
__author__ = "Shameer Sathar"
__license__ = "MIT"
__version__ = "1.0.1"
# imports
import pandas as pd
import numpy as np
class CampDataReader:
def __init__(self, filename):
self.filename = filename
self.df = self._read_file()
self.df_filtered = pd.DataFrame()
def _read_file(self):
df = pd.read_csv(self.filename)
df.drop_duplicates(inplace=True)
df = df[['district', 'name', 'location', 'taluk', 'village', 'total_people', 'total_males',
'total_females', 'total_infants']]
# We are ignoring the location information more than 1000 meters
return df
def get_all_data(self):
return self.df
def get_districts(self):
return self.df['district'].unique()
def get_plot_data(self,list_requirements):
df = self.df
return df.groupby('district').sum()['total_people']
def get_plot_per_dist(self,list_requirements, dist):
df = self.df
df = df[df.district == dist]
return df.groupby('taluk').sum()['total_people']
def get_all_dist_data(self,dist):
df = self.df
df = df[df.district == dist]
return df
def get_all_taluk_data(self,dist, taluk):
df = self.df
df = df[df.district == dist]
df = df[df.taluk == taluk]
return df
if __name__ == '__main__':
main()
|
{"hexsha": "a2bb949d9543fd215792c05f2390b29c74a5b8d5", "size": 1465, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_reader/CampDataReader.py", "max_stars_repo_name": "ssat335/processkeralarescue", "max_stars_repo_head_hexsha": "c0c5a32fd3cf74c9487fcbff1192ef4bb82f3db8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-08-18T19:40:43.000Z", "max_stars_repo_stars_event_max_datetime": "2018-08-20T06:04:37.000Z", "max_issues_repo_path": "data_reader/CampDataReader.py", "max_issues_repo_name": "ssat335/processkeralarescue", "max_issues_repo_head_hexsha": "c0c5a32fd3cf74c9487fcbff1192ef4bb82f3db8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2018-08-18T17:14:13.000Z", "max_issues_repo_issues_event_max_datetime": "2018-09-05T17:21:09.000Z", "max_forks_repo_path": "data_reader/CampDataReader.py", "max_forks_repo_name": "ssat335/processkeralarescue", "max_forks_repo_head_hexsha": "c0c5a32fd3cf74c9487fcbff1192ef4bb82f3db8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-08-18T18:13:48.000Z", "max_forks_repo_forks_event_max_datetime": "2018-08-23T09:35:56.000Z", "avg_line_length": 26.6363636364, "max_line_length": 99, "alphanum_fraction": 0.6197952218, "include": true, "reason": "import numpy", "num_tokens": 362}
|
# Implementation of Blendenpik with Gaussian row mixing for the solution of least squares
# problem ||Ax - b||₂ where A has full column rank.
#
# This method for other row mixing strategies is described in
#
# Avron, Haim, Petar Maymounkov, and Sivan Toledo. "Blendenpik: Supercharging LAPACK's
# least-squares solver." SIAM Journal on Scientific Computing 32, no. 3 (2010): 1217-1236.
#
# January 2021
"""
blendenpick_gauss(A, b; r)
Solves the least squares problem with coefficient `A` and constant `b`, where `A` has full
column rank, using the blendenpick method with Gaussian row mixing. The number of sampled
rows, `r`, defaults to the number of columns.
"""
function blendenpick_gauss!(
x::Vector{T},
A::Matrix{T}, # Coefficient matrix of system
b::Vector{T}; # Constant vector of system
r::Int=size(A, 2) + 0, # Size of row sample
verbose::Bool=false #Show stats from lsqr solver
) where T <: Real
m = size(A, 1) # Number of rows in A
# Mix rows of a A with Gaussians to generate r by size(A, 2) matrix
A_mixed = randn(r, m) * A
# Generate preconditioner using R⁻ factor of qr decomposition of mixed matrix
_, R = qr(A_mixed)
Rinv = R \ I
# Run lsqr on transformed systems
y, stats = lsqr(A * Rinv, b)
verbose && show(stats)
# Recover and return solution to original system
x = Rinv * y
end
|
{"hexsha": "adcc8354f69a701a8b8c60962217c761ebf0edd7", "size": 1391, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/linear_solver_routines/blendenpik_gauss.jl", "max_stars_repo_name": "numlinalg/RLinearAlgebra.jl", "max_stars_repo_head_hexsha": "757cc7e581303c4fb6db228618f4be5caa02d3b0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-05-28T17:10:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T05:23:14.000Z", "max_issues_repo_path": "src/linear_solver_routines/blendenpik_gauss.jl", "max_issues_repo_name": "numlinalg/RLinearAlgebra.jl", "max_issues_repo_head_hexsha": "757cc7e581303c4fb6db228618f4be5caa02d3b0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2021-06-16T16:01:29.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-16T12:28:20.000Z", "max_forks_repo_path": "src/linear_solver_routines/blendenpik_gauss.jl", "max_forks_repo_name": "numlinalg/RLinearAlgebra.jl", "max_forks_repo_head_hexsha": "757cc7e581303c4fb6db228618f4be5caa02d3b0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.119047619, "max_line_length": 90, "alphanum_fraction": 0.6808051761, "num_tokens": 400}
|
import numpy as np
import cv2
buffer_size = 10
def nothing(x):
pass
cv2.namedWindow('FUJII_algorithm_demo')
cv2.createTrackbar('FUJII_SCALE','FUJII_algorithm_demo',20,100,nothing)
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
cv2.waitKey(1500)
if cap.isOpened() == False:
print("Unable to connect with selected capturing device")
cv2.destroyAllWindows()
sys.exit(0)
ret, current_frame = cap.read()
height = 0
width = 0
channels = 1
if len(current_frame.shape) == 2:
height, width = current_frame.shape
channels = 1
else:
height, width, channels = current_frame.shape
if channels > 1:
current_frame = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
current_frame = current_frame.astype(np.float32) * (1.0 / 255.0)
previous_frame = current_frame.copy()
fujii_buffer = []
total_fujii = np.zeros((height,width), np.float32)
for i in range(buffer_size):
ret, current_frame = cap.read()
if channels > 1:
current_frame = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
current_frame = current_frame.astype(np.float32) * (1.0 / 255.0)
abs_diff = cv2.absdiff(current_frame, previous_frame)
sum = current_frame + previous_frame
sum += (1.0 / 255.0)
fujii = cv2.multiply(abs_diff, cv2.pow(sum, -1.0))
fujii_buffer.append(fujii.copy())
total_fujii += fujii
previous_frame = current_frame.copy()
last_frame = buffer_size-1
while(True):
my_val = cv2.getTrackbarPos('FUJII_SCALE','FUJII_algorithm_demo')
max_fujii = 5.0 * ((my_val + 1.0) / 100.0)
scale_coeff = (1.0 / max_fujii) * 255.0
ret, current_frame = cap.read()
if np.shape(current_frame) != ():
if channels > 1:
current_frame = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
current_frame = current_frame.astype(np.float32) * (1.0 / 255.0)
abs_diff = cv2.absdiff(current_frame, previous_frame)
sum = current_frame + previous_frame
sum += (1.0 / 255.0)
fujii = cv2.multiply(abs_diff, cv2.pow(sum, -1.0))
total_fujii -= fujii_buffer[last_frame]
total_fujii += fujii
fujii_buffer[last_frame] = fujii.copy()
last_frame += 1
if last_frame == buffer_size:
last_frame = 0
previous_frame = current_frame.copy()
final = total_fujii * scale_coeff
ret, final = cv2.threshold(final, 255, 255,cv2.THRESH_TRUNC)
final = final.astype(np.uint8)
final = cv2.GaussianBlur(final,(5,5),0)
im_color = cv2.applyColorMap(final, cv2.COLORMAP_JET)
cv2.imshow('FUJII_algorithm_demo', im_color)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
cap.release()
cv2.waitKey(0)
cv2.destroyAllWindows()
|
{"hexsha": "09f2e95dc9eccb51fb0859be9c50a8e11f1584ac", "size": 2905, "ext": "py", "lang": "Python", "max_stars_repo_path": "FUJII.py", "max_stars_repo_name": "ppieczywek/SpecklePy", "max_stars_repo_head_hexsha": "4f7bcde7a8c38b5e2dda5d9e640ea4698ac0765b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-06-09T11:22:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T10:33:17.000Z", "max_issues_repo_path": "FUJII.py", "max_issues_repo_name": "ppieczywek/SpecklePy", "max_issues_repo_head_hexsha": "4f7bcde7a8c38b5e2dda5d9e640ea4698ac0765b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "FUJII.py", "max_forks_repo_name": "ppieczywek/SpecklePy", "max_forks_repo_head_hexsha": "4f7bcde7a8c38b5e2dda5d9e640ea4698ac0765b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.2608695652, "max_line_length": 76, "alphanum_fraction": 0.6254733219, "include": true, "reason": "import numpy", "num_tokens": 791}
|
import tensorflow as tf
import numpy as np
class seq2seq(object):
def __init__(self,emb_dim=16,vocab_size=101,encoder_size=5,decoder_size=5,lr=0.002,
forward_only=False,cell=tf.contrib.rnn.LSTMCell,num_units=128,name='seq2seq'):
self.name = name
self.vocab_size = vocab_size
self.emb_dim = emb_dim
self.decoder_size = decoder_size
self.encoder_size = encoder_size
cell = cell(num_units)
self.inputs = tf.placeholder(tf.int32, shape=[None, encoder_size + decoder_size + 1], name='inputs')
self.targets = tf.placeholder(tf.int32,shape=[None,decoder_size],name='targets')
with tf.variable_scope(self.name):
embeddings = tf.get_variable(name='embeddings', shape=[self.vocab_size,emb_dim],
initializer=tf.random_uniform_initializer())
w_proj = tf.get_variable(name='w_proj',shape=[num_units,vocab_size],initializer=tf.random_uniform_initializer())
b_proj = tf.get_variable(name='b_proj',shape=[vocab_size],initializer=tf.random_uniform_initializer())
print('inputs shape:',self.inputs.get_shape())
emb_inputs = tf.nn.embedding_lookup(embeddings,self.inputs)
print('emb_inputs shape:',emb_inputs.get_shape())
emb_inputs = tf.transpose(emb_inputs,[1,0,2])#[batch,step,emb_size]-->[step,batch,emb_size]
emb_inputs = tf.unstack(emb_inputs)
_outputs,_ = tf.contrib.rnn.static_rnn(cell,emb_inputs,dtype=tf.float32)
self.outputs = [tf.matmul(ele,w_proj)+b_proj for ele in _outputs[encoder_size+1:]]#step*[batch,emb_dims]
self.outputs = tf.concat(self.outputs,axis=0)#[]
#targets_one_hot = tf.one_hot(self.targets,vocab_size,1,0)
#print('toh shape:',targets_one_hot.get_shape())
#targets_ = tf.reshape(targets_one_hot,[-1,vocab_size])
targets_ = tf.reshape(self.targets,[-1])
print('targets_ shape:',targets_.get_shape())
print('outputs shape:',self.outputs.get_shape())
print('targets shape:',self.targets.get_shape())
self.loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(targets_,self.outputs))
self.opt = tf.train.GradientDescentOptimizer(lr).minimize(self.loss)
def train(self,data,batch_size,max_epoch,save_step=10,display_step=10,save2='../model/model_seq2seq.ckpt'):
num_steps = data.data_size//batch_size
sess = tf.Session()
init = tf.global_variables_initializer()
saver = tf.train.Saver()
try:
saver.restore(sess,save2)
print('model loaded from %s'%save2)
except:
print('create fresh model')
sess.run(init)
for i in range(max_epoch):
step = 0
while step<num_steps:
source,target = data.get_batch(step,batch_size)
#_inputs = np.column_stack((source,target))
feed_dict = {self.inputs.name:source,self.targets.name:target}
outp,cost,_ = sess.run([self.outputs,self.loss,self.opt],feed_dict=feed_dict)
if step%display_step == 0:
print('epoch:%d,step:%d,cost:%f'%(i,step,cost))
if step%save_step == 0:
saver.save(sess,save2)
print('model saved in %s'%save2)
step += 1
print(10*'*'+'Do pseudo test'+10*'*')
outp = np.reshape(np.argmax(outp,axis=1),[-1,data.pad_size])
if batch_size>10:
test_size = 10
else:
test_size = batch_size
query = data.logits2sentence(source[0:test_size,:])
real_resp = data.logits2sentence(target[0:test_size,:])
pred_resp = data.logits2sentence(outp[0:test_size,:])
for k in range(test_size):
print('Query:%s\nRResp:%s\nPResp:%s\n'%(query[k],real_resp[k],pred_resp[k]))
print(10*'*'+'Do real test'+10*'*')
source, target = data.get_testSet(test_size)
feed_dict = {self.inputs.name:source,self.targets.name:target}
outp, cost = sess.run([self.outputs,self.loss],feed_dict=feed_dict)
outp = np.argmax(outp,axis=1)
outp = np.reshape(outp,[-1,data.pad_size])
query = data.logits2sentence(source)
real_resp = data.logits2sentence(target)
pred_resp = data.logits2sentence(outp)
for k in range(test_size):
print('Query:%s\nRResp:%s\nPResp:%s\n'%(query[k],real_resp[k],pred_resp[k]))
data.shuffle_trainSet()
class test_data:
def __init__(self,sources,targets):
self.sources = sources
self.targets = targets
self.size = len(self.sources)
def get_batch(self,n,batch_size):
return self.sources[n*batch_size:(n+1)*batch_size,:],self.targets[n*batch_size:(n+1)*batch_size,:]
if __name__=='__main__':
np.random.seed(1)
n_samples = 100000
data_x = np.random.randint(1,101,[n_samples,10],np.int32)
print(data_x.shape)
data_y = data_x[:,5:]
print(data_y.shape)
data_x = np.column_stack((data_x,np.zeros([n_samples,1],np.int32)))
data = test_data(data_x,data_y)
model = seq2seq()
model.train(data,10,10)
|
{"hexsha": "6e1db7cd1f259ff1ea16f03d85825e3efcb43bc4", "size": 5330, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/models.py", "max_stars_repo_name": "MaZhiyuanBUAA/textGeneration", "max_stars_repo_head_hexsha": "72986e5c478febadf8f8a4cb068bb4ca28ddc071", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/models.py", "max_issues_repo_name": "MaZhiyuanBUAA/textGeneration", "max_issues_repo_head_hexsha": "72986e5c478febadf8f8a4cb068bb4ca28ddc071", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/models.py", "max_forks_repo_name": "MaZhiyuanBUAA/textGeneration", "max_forks_repo_head_hexsha": "72986e5c478febadf8f8a4cb068bb4ca28ddc071", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.7619047619, "max_line_length": 124, "alphanum_fraction": 0.6198874296, "include": true, "reason": "import numpy", "num_tokens": 1283}
|
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from abc import ABC, abstractproperty
from typing import Callable, Iterable, NewType, Mapping, Any, Optional
import numpy as np
from graph.types import Parameters
from quantization.quantization_record_base import QuantizationRecordBase
KernelFunction = NewType('KernelFunction',
Callable[
[Parameters,
Iterable[np.ndarray],
QuantizationRecordBase,
Optional[Mapping[Any, Any]]],
Iterable[np.ndarray]
])
class KernelFunctionSetBase(ABC):
@abstractproperty
def graph_input(self) -> KernelFunction:
pass
@abstractproperty
def graph_output(self) -> KernelFunction:
pass
@abstractproperty
def constant_input(self) -> KernelFunction:
pass
@abstractproperty
def relu(self) -> KernelFunction:
pass
@abstractproperty
def leaky(self) -> KernelFunction:
pass
@abstractproperty
def hswish(self) -> KernelFunction:
pass
@abstractproperty
def hsigmoid(self) -> KernelFunction:
pass
@abstractproperty
def matadd(self) -> KernelFunction:
pass
@abstractproperty
def matsub(self) -> KernelFunction:
pass
@abstractproperty
def matdiv(self) -> KernelFunction:
pass
@abstractproperty
def matmul(self) -> KernelFunction:
pass
@abstractproperty
def matscale(self) -> KernelFunction:
pass
@abstractproperty
def conv2d(self) -> KernelFunction:
pass
@abstractproperty
def linear(self) -> KernelFunction:
pass
@abstractproperty
def softmax(self) -> KernelFunction:
pass
@abstractproperty
def reshape(self) -> KernelFunction:
pass
@abstractproperty
def transpose(self) -> KernelFunction:
pass
@abstractproperty
def concat(self) -> KernelFunction:
pass
@abstractproperty
def av_pool(self) -> KernelFunction:
pass
@abstractproperty
def av_global_pool(self) -> KernelFunction:
pass
@abstractproperty
def max_pool(self) -> KernelFunction:
pass
@abstractproperty
def max_global_pool(self) -> KernelFunction:
pass
@abstractproperty
def sum_global_pool(self) -> KernelFunction:
pass
@abstractproperty
def pad(self) -> KernelFunction:
pass
@abstractproperty
def image_format(self) -> KernelFunction:
pass
@abstractproperty
def rnn(self) -> KernelFunction:
pass
@abstractproperty
def strided_slice(self) -> KernelFunction:
pass
@abstractproperty
def cast(self) -> KernelFunction:
pass
@abstractproperty
def split(self) -> KernelFunction:
pass
@abstractproperty
def copy(self) -> KernelFunction:
pass
@abstractproperty
def resize_nearest_neighbor(self) -> KernelFunction:
pass
@abstractproperty
def expression(self) -> KernelFunction:
pass
@abstractproperty
def revert(self) -> KernelFunction:
pass
|
{"hexsha": "2c63eb3acaf6af38d8d5c9dfccb0021fb175e2c7", "size": 3931, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/nntool/quantization/kernels/kernel_function.py", "max_stars_repo_name": "coWorkr-InSights/gap_sdk", "max_stars_repo_head_hexsha": "a934747441481ea3d9c029719d721780cdff9e46", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tools/nntool/quantization/kernels/kernel_function.py", "max_issues_repo_name": "coWorkr-InSights/gap_sdk", "max_issues_repo_head_hexsha": "a934747441481ea3d9c029719d721780cdff9e46", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tools/nntool/quantization/kernels/kernel_function.py", "max_forks_repo_name": "coWorkr-InSights/gap_sdk", "max_forks_repo_head_hexsha": "a934747441481ea3d9c029719d721780cdff9e46", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.5389221557, "max_line_length": 74, "alphanum_fraction": 0.6443653015, "include": true, "reason": "import numpy", "num_tokens": 846}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.