text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
export DeterministicDistributionModel, get_states, get_actions
import ReinforcementLearningEnvironments: observation_space, action_space
"""
DeterministicDistributionModel(table::Array{Vector{NamedTuple{(:nextstate, :reward, :prob),Tuple{Int,Float64,Float64}}}, 2})
Store all the transformations in the `table` field.
"""
struct DeterministicDistributionModel <: AbstractDistributionBasedModel
table::Array{
Vector{NamedTuple{(:nextstate, :reward, :prob),Tuple{Int,Float64,Float64}}},
2,
}
end
observation_space(m::DeterministicDistributionModel) = DiscreteSpace(size(m.table, 1))
action_space(m::DeterministicDistributionModel) = DiscreteSpace(size(m.table, 2))
(m::DeterministicDistributionModel)(s::Int, a::Int) = m.table[s, a]
|
{"hexsha": "11ced4572d8fb6d2bb83f84498d1a189be634694", "size": 763, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/components/environment_models/deterministic_distribution_model.jl", "max_stars_repo_name": "UnofficialJuliaMirror/ReinforcementLearning.jl-158674fc-8238-5cab-b5ba-03dfc80d1318", "max_stars_repo_head_hexsha": "5c10dba7fd85b15c8e10e826425c5be614c6aeb0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/components/environment_models/deterministic_distribution_model.jl", "max_issues_repo_name": "UnofficialJuliaMirror/ReinforcementLearning.jl-158674fc-8238-5cab-b5ba-03dfc80d1318", "max_issues_repo_head_hexsha": "5c10dba7fd85b15c8e10e826425c5be614c6aeb0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/components/environment_models/deterministic_distribution_model.jl", "max_forks_repo_name": "UnofficialJuliaMirror/ReinforcementLearning.jl-158674fc-8238-5cab-b5ba-03dfc80d1318", "max_forks_repo_head_hexsha": "5c10dba7fd85b15c8e10e826425c5be614c6aeb0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.15, "max_line_length": 128, "alphanum_fraction": 0.7693315858, "num_tokens": 188}
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 29 12:45:00 2022
Author: Adam Coxson, PhD student, University of Liverpool
Department of Chemistry, Materials Innovation Factory, Levershulme Research Centre
Project: Delta ML Zindo
Module: FNN.py
Dependancies: Sklearn library, Pandas, Scipy, all other libraries are standard
This is demonstration code to obtain the results in the corresponding paper.
Running this will read and format all training and testing data. The network
trains on 10506 molecules and is tested on a further 524. It uses the Morgan
fingerprint and the Radial Distribution Function as inputs.
Note, on a i7 12700KF cpu, the training takes ~520 seconds (10 minutes) to run.
"""
import time, os, csv
import numpy as np
import pandas as pd
from math import sqrt
from scipy.stats import pearsonr
from sklearn.metrics import mean_squared_error
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import KFold, cross_val_predict
from sklearn.linear_model import LinearRegression
def preprocess(df,df_test,xcols,separate_test=True):
'''
Parameters
----------
df: pd.dataframe
training data
df_test: pd.dataframe
test data
xcols: list
list of features
separate_test: bool
whether to use test set or not
Returns
-------
X: np.array
features of training set
y: np.array
target property of training set
E_lin: np.array
Linear fit prediction for y from X
X_test: np.array
features of test set
y_test: np.array
target property of test set
E_lin_test: np.array
Linear fit prediction for y_test from X_test
'''
# Calculate y as Delta_E
E_zindo = df['S1_ZINDO'].values.reshape(-1, 1)
E_tddft = df['S1_TDDFT'].values.reshape(-1, 1)
linear_regressor = LinearRegression()
linear_regressor.fit(E_zindo, E_tddft)
E_lin = linear_regressor.predict(E_zindo)
y = []
for i in range(len(E_lin)):
Delta_E = E_tddft[i][0] - E_lin[i][0]
y.append(Delta_E)
y=np.array(y)
X = df[xcols].values
X_fp = []
X_RDF = []
for i in range(len(X)):
# Assign features
fp = eval(X[i][0])
RDF = eval(X[i][1])
X_fp.append(fp)
X_RDF.append(RDF)
X = np.c_[X_RDF,X_fp]
if separate_test == False:
X_test=None
y_test=None
elif separate_test == True:
E_zindo_test = df_test['S1_ZINDO'].values.reshape(-1, 1)
E_tddft_test = df_test['S1_TDDFT'].values.reshape(-1, 1)
E_lin_test = linear_regressor.predict(E_zindo_test)
y_test = []
for i in range(len(E_tddft_test)):
Delta_E = E_tddft_test[i][0] - E_lin_test[i][0]
y_test.append(Delta_E)
y_test=np.array(y_test)
X_test = df_test[xcols].values
X_test_fp = []
X_test_RDF = []
for i in range(len(X_test)):
# Assign features
fp = eval(X_test[i][0])
x_RDF = eval(X_test[i][1])
X_test_fp.append(fp)
X_test_RDF.append(x_RDF)
X_test = np.c_[X_test_RDF,X_test_fp]
E_lin = np.squeeze(E_lin)
E_lin_test = np.squeeze(E_lin_test)
return X, y, E_lin, X_test, y_test, E_lin_test
def train_MLP(cfg, net_input, net_target, y_lin_pred, n_kfold):
"""
Function to train a Multi-layer Perceptron (Feed-Forward Neural Network).
Parameters
----------
cfg : List of tuples, strings, and floats.
Hyperparameters for the MLP architecture.
net_input : Array, size (N, M)
Input into network, N data points of M features each.
net_target : Array, size N
Target for network validation.
y_lin_pred : Array, size N
TDDFT energy prediction from linear fitting
n_kfold : int
number of folds in cross validation.
Returns
-------
ML_func : object neural_network.MLPRegressor
The optimised neural network model, can be used for further test data.
results : Array of float64 (N,2)
For N data points, y_real and y_pred are output.
metrics : list of floats
Metrics such as the rms and median from y_real, y_pred.
"""
t1=time.time()
ML_func = MLPRegressor(hidden_layer_sizes=cfg[0], max_iter=cfg[1], batch_size=cfg[2], learning_rate_init=cfg[3], activation=cfg[4],
random_state=1, learning_rate='adaptive',solver='adam',verbose=False, tol=1e-4).fit(net_input, net_target)
cv = KFold(n_splits=kfold,shuffle=True,random_state=0)
y_pred = cross_val_predict(estimator=ML_func, X=net_input, y=net_target, cv=cv, n_jobs=kfold)
t2=time.time()
y_real = net_target
rms = sqrt(mean_squared_error(y_real, y_pred))
rd,_ = pearsonr(y_real, y_pred)
r,_ = pearsonr(y_lin_pred+y_real, y_lin_pred+y_pred)
errors = abs(y_real - y_pred)
median_error = np.median(errors)
score = ML_func.score(net_input, net_target)
results = np.array([y_real,y_pred]).T
metrics = [rms, median_error, r, score]
print('\nTraining metrics:')
print('rms',rms)
print('median_error',median_error)
print("r",r)
print("r-\u0394",rd)
print("Score",score)
print('Process took %.3f seconds' %(t2-t1))
return ML_func, results, metrics
def test_MLP(ML_func, net_input, net_target, y_lin_pred, n_kfold):
"""
Function to test a Multi-layer Perceptron on unseen data after training.
Parameters
----------
ML_func : object neural_network.MLPRegressor
The optimised neural network model, obtained from previous training.
net_input : Array, size (N, M)
Input into network, N data points of M features each.
net_target : Array, size N
Target for network validation.
y_lin_pred : Array, size N
TDDFT energy prediction from linear fitting
n_kfold : int
number of folds in cross validation.
Returns
-------
results : Array of float64 (N,2)
For N data points, y_real and y_pred are output.
metrics : list of floats
Metrics such as the rms and median from y_real, y_pred.
"""
t1=time.time()
y_pred = ML_func.predict(X=net_input)
t2=time.time()
y_real = net_target
rms = sqrt(mean_squared_error(y_real, y_pred))
rd,_ = pearsonr(y_real, y_pred)
r,_ = pearsonr(y_lin_pred+y_real, y_lin_pred+y_pred)
errors = abs(y_real - y_pred)
median_error = np.median(errors)
score = ML_func.score(net_input, net_target)
results = np.array([y_real, y_pred]).T
metrics = [rms, median_error, r, score]
print('\nTesting metrics:')
print('rms',rms)
print('median_error',median_error)
print("r",r)
print("r-\u0394",rd)
print("Score",score)
print('Process took %.6f seconds' %(t2-t1))
return results, metrics
def write_to_csv(df, E_ml, filename='data_save'):
"""
Parameters
----------
df: pd.dataframe
training or testing data
E_ml : np.array, 1D
FNN prediction of TDDFT energy.
filename : TYPE, optional
filepath to save data to. The default is 'data_save'.
Returns
-------
None.
"""
E_zindo = df['S1_ZINDO'].values
E_tddft = df['S1_TDDFT'].values
data = [E_zindo, E_tddft, E_ml]
data = np.asarray(data).T
writer = csv.writer(open(filename,'w'),lineterminator ="\n")
writer.writerow(["S1_ZINDO","S1_TDDFT","S1_ML"])
writer.writerows(data)
print("Written to",filename,"successfully.")
return None
################################################################################
if __name__ == '__main__':
# Set file names
train_csv_file = 'train_data.csv'
test_csv_file = 'test_data.csv'
train_results_csv = 'results_train.csv'
test_results_csv = 'results_test.csv'
folder_path = os.getcwd() + "/database/"
results_path = os.getcwd() + "/"
separate_test = True
xcols = ['fingerprint','RDF']
# [(neurons layer 1, neurons layer 2), iterations, batch size, learning rate, activation]
cfg = [(703, 312), 1000, 100, 0.001118462, 'relu'] # FNN hyperparameters
kfold=10
# Preprocess
print("Data processing")
df = pd.read_csv(folder_path+train_csv_file)
df_test = pd.read_csv(folder_path+test_csv_file)
X,y,E_lin, X_test,y_test,E_lin_test = preprocess(df,df_test,xcols,separate_test)
print('Processing done\nTraining Network')
# Network training on RDF and FP of 10506 molecules
optimised_network, results, metrics = train_MLP(cfg, net_input=X, net_target=y, y_lin_pred=E_lin, n_kfold=kfold)
# Optimised network tested on 524 molecules.
results_test, metrics_test = test_MLP(optimised_network, net_input=X_test,
net_target=y_test, y_lin_pred=E_lin_test, n_kfold=kfold)
print("\nSaving results to",results_path)
write_to_csv(df=df, E_ml=E_lin+results[:,1], filename=results_path+train_results_csv)
write_to_csv(df=df_test, E_ml=E_lin_test+results_test[:,1], filename=results_path+test_results_csv)
|
{"hexsha": "3e5fe1ca87d093d0420845c2115ba57b4086eed6", "size": 9449, "ext": "py", "lang": "Python", "max_stars_repo_path": "FFNN.py", "max_stars_repo_name": "marcosdelcueto/DeltaML_excited_states", "max_stars_repo_head_hexsha": "aeb1f356e76ebf09334cba75f7d6da4b4020d4db", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "FFNN.py", "max_issues_repo_name": "marcosdelcueto/DeltaML_excited_states", "max_issues_repo_head_hexsha": "aeb1f356e76ebf09334cba75f7d6da4b4020d4db", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "FFNN.py", "max_forks_repo_name": "marcosdelcueto/DeltaML_excited_states", "max_forks_repo_head_hexsha": "aeb1f356e76ebf09334cba75f7d6da4b4020d4db", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2355072464, "max_line_length": 136, "alphanum_fraction": 0.6292729389, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2454}
|
import math
import cmath
import warnings
#import scipy.constants as physical_constants # Container of physical constants which might be of use
import numpy as np
from Material import Material
class MaterialSolid(Material):
"""
Solid material class
"""
__name__ = 'solid'
name__ = 'Solid'
description = 'A material in solid state.'
young = 0.0
shear = 0.0
poisson = 0.0
###class MaterialSolid(Material):
###"""
###Solid material class
###"""
#### Elastic moduli
###_young = None
###_bulk = None
###_shear = None
###_poisson = None
###_isotropic = True # Whether the material is isotropic or not.
####_tensile_strength_break = None
####_tensile_strength_yield = None
####_tensile_strength_ultimate = None
####_tensile_elongation_break = None
####_tensile_elongation_yield = None
####_tensile_elongation_ultimate = None
###def _get_isotropic(self):
###"""
###Return whether the solid is isotropic or not.
###"""
####if self._isotropic is not None:
###return self._isotropic
###def _set_isotropic(self, x):
###"""
###Set whether the solid is isotropic or not.
###:param x: is a boolean
###"""
###if type(x) is bool: # Perhaps use a try here?
###self._isotropic = x
###isotropic = property(fget=_get_isotropic, fset=_set_isotropic)
###"""
###Isotropic material or not.
###"""
###def elastic_moduli_given(self):
###"""
###Returns the amount of elastic module that were specified.
###"""
###return (bool(self._young) + bool(self._bulk) + bool(self._shear) + bool(self._poisson))
###def _del_elastic_moduli(attr):
###def del_attr(self):
###setattr(self, attr, None) # Check first whether it can actually be deleted? E.g. when it has not been set at all.
###return del_attr
###def _set_elastic_moduli(attr):
###def set_attr(self, x):
###if self.elastic_moduli_given() < 2: #and isinstance(x, float):
###setattr(self, attr, x)
###else:
###warnings.warn('Two elastic moduli have already been set. Please delete one before adding a new one.')
###return set_attr
###def _get_elastic_moduli(attr):
###"""Retrieve the value of the elastic modulus. Check first whether the value is stored. If not, calculate it from two given moduli."""
###def get_attr(self):
###if getattr(self, attr) is not None:
###return getattr(self, attr) # then we should return it instead of trying to calculate it.
###elif self.isotropic and self.elastic_moduli_given() >= 2: # But only when isotropic!!!
#### Calculate Bulk
###if bool(attr =='_bulk' and self._young and self._shear):
###return (self.young * self.shear) / (9.0 * self.shear - 3.0 * self.young)
###elif bool(attr =='_bulk' and self._young and self._poisson):
###return (self.young) / (3.0 - 6.0 * self.poisson)
###elif bool(attr =='_bulk' and self._shear and self._poisson):
###return 2.0 * self.shear * (1.0 + self.poisson) / (3.0 - 6.0 * self.poisson)
#### Calculate Young
###elif bool(attr =='_young' and self._bulk and self._shear):
###return 9.0 * self.bulk * self.shear / (3.0 * self.bulk + self.shear)
###elif bool(attr =='_young' and self._bulk and self._poisson):
###return 3.0 * self.bulk * (1.0 - 2.0 * self.poisson)
###elif bool(attr =='_young' and self._shear and self._poisson):
###return 2.0 * self.shear * (1.0 + self.poisson)
#### Calculate Shear
###elif bool(attr =='_shear' and self._bulk and self._young):
###return 3.0 * self.bulk * self.young / (9 * self.bulk - self.young)
###elif bool(attr =='_shear' and self._bulk and self._poisson):
###return 3.0 * self.bulk * (1.0 - 2.0 * self.poisson) / (2.0 + 2.0 * self.poisson)
###elif bool(attr =='_shear' and self._young and self._poisson):
###return self.young / (2.0 + 2.0 * self.poisson)
#### Calculate Poisson
###elif bool(attr =='_poisson' and self._bulk and self._young):
###return ( 3.0 * self.bulk - self.young) / (6.0 * self.bulk)
###elif bool(attr =='_poisson' and self._bulk and self._shear):
###return (3.0 * self.bulk - 2.0 * self.shear) / (6.0 * self.bulk + 2.0 * self.shear)
###elif bool(attr =='_poisson' and self._young and self._shear):
###return (self.young) / (2.0*self.shear) - 1.0
###else:
###ValueError
###else:
###warnings.warn('The modulus was not given for this material and could not be calculated either.')
###return get_attr
###young = property(fget=_get_elastic_moduli('_young'), fset=_set_elastic_moduli('_young'), fdel=_del_elastic_moduli('_young')) # Young's modulus, or Tensile modulus
###"""
###Young's modulus :math:`E`.
###The value can be set or calculated when the material is isotropic and elastic_moduli_given equals two.
###"""
###bulk = property(fget=_get_elastic_moduli('_bulk'), fset=_set_elastic_moduli('_bulk'), fdel=_del_elastic_moduli('_bulk')) # Bulk modulus
###"""
###Bulk modulus :math:`K`
###The value can be set or calculated when the material is isotropic and elastic_moduli_given equals two.
###"""
###shear = property(fget=_get_elastic_moduli('_shear'), fset=_set_elastic_moduli('_shear'), fdel=_del_elastic_moduli('_shear')) # Shear modulus
###"""
###Shear modulus :math:`G`
###The value can be set or calculated when the material is isotropic and elastic_moduli_given equals two.
###"""
###poisson = property(fget=_get_elastic_moduli('_poisson'), fset=_set_elastic_moduli('_poisson'), fdel=_del_elastic_moduli('_poisson')) # Poisson modulus
###"""
###Poisson ratio :math:`\\nu`
###The value can be set or calculated when the material is isotropic and elastic_moduli_given equals two.
###"""
|
{"hexsha": "488f839167b1a579f7172043e8f0d1a29a5b9f63", "size": 6651, "ext": "py", "lang": "Python", "max_stars_repo_path": "Sea/model/materials/MaterialSolid.py", "max_stars_repo_name": "FRidh/Sea", "max_stars_repo_head_hexsha": "b474e93a449570a9ba3b915c4d80f814feee2545", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2015-07-02T13:34:09.000Z", "max_stars_repo_stars_event_max_datetime": "2015-09-28T09:07:52.000Z", "max_issues_repo_path": "Sea/model/materials/MaterialSolid.py", "max_issues_repo_name": "FRidh/Sea", "max_issues_repo_head_hexsha": "b474e93a449570a9ba3b915c4d80f814feee2545", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Sea/model/materials/MaterialSolid.py", "max_forks_repo_name": "FRidh/Sea", "max_forks_repo_head_hexsha": "b474e93a449570a9ba3b915c4d80f814feee2545", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-22T03:01:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-22T03:01:54.000Z", "avg_line_length": 42.6346153846, "max_line_length": 177, "alphanum_fraction": 0.5596150955, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1706}
|
import numpy
from gensim import models, corpora
from gensim.similarities import MatrixSimilarity
from nltk.corpus import stopwords as pw
from nltk.tokenize import sent_tokenize
from nltk.tree import *
from spellchecker import SpellChecker
from stanfordcorenlp import StanfordCoreNLP
from src.config import STANFORDCORENLP_PATH
def spell_error(dataset):
"""
count the number of spelling errors in each essay
:param dataset: (list)
:return:
"""
print("spell_error")
spell = SpellChecker()
Max = 0
Min = 9999
ret = []
for data in dataset:
essay = [token for token in data['essay_token'] if token[0] != '@' and len(token) > 2]
misspelled = spell.unknown(essay)
data['spell_error'] = len(misspelled)
ret.append(len(misspelled))
if len(misspelled) > Max:
Max = len(misspelled)
if len(misspelled) < Min:
Min = len(misspelled)
#
# x = [sample['spell_error'] for sample in dataset]
# y = [sample['domain1_score'] for sample in dataset]
# plt.plot(x, y, 'o')
# plt.show()
# return {'Max': Max, 'Min': Min}
return numpy.array(ret).reshape(-1, 1)
def count_tree_depth(root):
def travel(root, length, res):
if type(root) == str:
res.append(length)
return
l = len(root)
for i in range(l):
if root[i]:
travel(root[i], length + 1, res)
if root is None:
return []
l = []
travel(root, 1, l)
return l
def Mean_sentence_depth_level(dataset):
"""
Sentence depth in the parser tree
:param data: (list)
:return:
"""
print("Mean_sentence_depth_level")
# nlp = stanfordnlp.Pipeline()
nlp = StanfordCoreNLP(STANFORDCORENLP_PATH)
Max_depth = 0
Min_depth = 9999
Max_level = 0
Min_level = 9999
ret_depth = []
ret_level = []
for data in dataset:
essay = data['essay']
# doc = nlp(essay)
# for sentence in doc.sentences:
# print(sentence)
# print('')
sentences = sent_tokenize(essay)
depth_all = 0
level_all = 0
for sentence in sentences:
parse_tree = nlp.parse(sentence)
tree = Tree.fromstring(parse_tree)
distance = count_tree_depth(tree)
distance = numpy.array(distance)
depth = numpy.sum(distance)
level = numpy.amax(distance)
depth_all += depth
level_all += level
data['mean_sentence_depth'] = depth_all / len(sentences)
data['mean_sentence_level'] = level_all / len(sentences)
ret_depth.append(data['mean_sentence_depth'])
ret_level.append(data['mean_sentence_level'])
if data['mean_sentence_depth'] > Max_depth:
Max_depth = data['mean_sentence_depth']
if data['mean_sentence_depth'] < Min_depth:
Min_depth = data['mean_sentence_depth']
if data['mean_sentence_level'] > Max_level:
Max_level = data['mean_sentence_level']
if data['mean_sentence_level'] < Min_level:
Min_level = data['mean_sentence_level']
# return {'Max_depth': Max_depth, 'Min_depth': Min_depth, 'Max_level': Max_level, 'Min_level': Min_level}
return numpy.array(ret_depth).reshape(-1, 1), numpy.array(ret_level).reshape(-1, 1)
def essay_length(dataset):
"""
Fourth root of essay length in words
:param dataset: list
:return:
"""
Max = 0
Min = 9999
ret = []
print("essay_length")
for data in dataset:
essay = data['essay_token']
length = len(essay)
length = pow(length, 1.0 / 4)
data['essay_length'] = length
ret.append(length)
if length > Max:
Max = length
if length < Min:
Min = length
# x = [sample['essay_length'] for sample in dataset]
# y = [sample['domain1_score'] for sample in dataset]
# plt.plot(x, y, 'o')
# plt.show()
# return {'Max': Max, 'Min': Min}
return numpy.array(ret).reshape(-1, 1)
def semantic_vector_similarity(dataset, test_data):
"""
Mean cosine similarity to other essays’ semantic vector
:param dataset: list
:return:
"""
print("semantic_vector_similarity")
cacheStopWords = pw.words("english")
punc = ['.', ',', '?', '!', '@', '"', 'n\'t']
cacheStopWords.extend(punc)
token_sets = []
# print(cacheStopWords)
for data in dataset:
essay_token = [word for word in data['essay_token'] if word.lower() not in cacheStopWords]
token_sets.append(essay_token)
dictionary = corpora.Dictionary(token_sets)
corpus = [dictionary.doc2bow(tokens) for tokens in token_sets]
lsi_model = models.LsiModel(corpus, id2word=dictionary, num_topics=20)
documents = lsi_model[corpus]
topics = lsi_model.show_topics(num_words=5, log=0)
Min = 9999
Max = 0
scores = numpy.array([sample['domain1_score'] for sample in dataset])
index = MatrixSimilarity(documents)
predict_score_list = []
score_list = []
for sample, essay in zip(test_data, corpus):
query = essay
query_vec = lsi_model[query]
# print(query)
sim = index[query_vec]
idxs = sim.argsort()[-20:-1][::-1]
# print(idxs)
_sim = [sim[idx] for idx in idxs]
_scores = [scores[idx] for idx in idxs]
# print(sim)
predict_score = numpy.sum(numpy.multiply(scores, sim)) / len(dataset)
sample['semantic_vector_similarity'] = predict_score
# print(predict_score, sample['domain1_score'])
predict_score_list.append(predict_score)
# score_list.append(sample['domain1_score'])
# plt.plot(predict_score_list, score_list, 'o')
# plt.show()
return numpy.array(predict_score_list).reshape(-1, 1)
|
{"hexsha": "990e9df175ccc3f3bd481e0087e5aafdbb0b95e4", "size": 5888, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/feature/iku.py", "max_stars_repo_name": "wangqi1996/Essay_Scoring", "max_stars_repo_head_hexsha": "1194ad6841de3d95cd7e3733f7be152f02e4d93c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-09T00:27:35.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-09T00:27:35.000Z", "max_issues_repo_path": "src/feature/iku.py", "max_issues_repo_name": "wangqi1996/Essay_Scoring", "max_issues_repo_head_hexsha": "1194ad6841de3d95cd7e3733f7be152f02e4d93c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/feature/iku.py", "max_forks_repo_name": "wangqi1996/Essay_Scoring", "max_forks_repo_head_hexsha": "1194ad6841de3d95cd7e3733f7be152f02e4d93c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5077720207, "max_line_length": 109, "alphanum_fraction": 0.6119225543, "include": true, "reason": "import numpy", "num_tokens": 1476}
|
'''
----------- Example_14 --------------
Load a turbine, tune a controller with open loop control commands
-------------------------------------
In this example:
- Load a turbine from OpenFAST
- Tune a controller
- Write open loop inputs
- Run simple simulation with open loop control
'''
# Python Modules
import yaml, os, platform
import numpy as np
import matplotlib.pyplot as plt
# ROSCO toolbox modules
from ROSCO_toolbox import controller as ROSCO_controller
from ROSCO_toolbox import turbine as ROSCO_turbine
from ROSCO_toolbox import utilities as ROSCO_utilities
from ROSCO_toolbox.ofTools.fast_io import output_processing
from ROSCO_toolbox.inputs.validation import load_rosco_yaml
from ROSCO_toolbox.ofTools.case_gen.CaseLibrary import set_channels
from ROSCO_toolbox.ofTools.case_gen.runFAST_pywrapper import runFAST_pywrapper, runFAST_pywrapper_batch
from ROSCO_toolbox.ofTools.case_gen.CaseGen_General import CaseGen_General
this_dir = os.path.dirname(os.path.abspath(__file__))
rosco_dir = os.path.dirname(this_dir)
example_out_dir = os.path.join(this_dir,'examples_out')
example_out_dir = os.path.join(this_dir,'examples_out')
if not os.path.isdir(example_out_dir):
os.makedirs(example_out_dir)
# Load yaml file (Open Loop Case)
parameter_filename = os.path.join(rosco_dir,'Tune_Cases/IEA15MW_OL.yaml')
inps = load_rosco_yaml(parameter_filename)
path_params = inps['path_params']
turbine_params = inps['turbine_params']
controller_params = inps['controller_params']
# Set up open loop input
olc = ROSCO_controller.OpenLoopControl(t_max=20)
olc.interp_timeseries(
'blade_pitch',
[0,20],
[0,0.0873] ,
'sigma'
)
olc.const_timeseries(
'generator_torque',
19624046*.5
)
olc.sine_timeseries('nacelle_yaw', 0.0524, 60)
# Plot open loop timeseries
fig,ax = olc.plot_timeseries()
if False:
plt.show()
else:
fig.savefig(os.path.join(example_out_dir,'14_OL_Inputs.png'))
# Write open loop input, get OL indices
ol_filename = os.path.join(example_out_dir,'14_OL_Input.dat')
ol_dict = olc.write_input(ol_filename)
controller_params['open_loop'] = ol_dict
# Instantiate turbine, controller, and file processing classes
turbine = ROSCO_turbine.Turbine(turbine_params)
controller = ROSCO_controller.Controller(controller_params)
# Load turbine data from OpenFAST and rotor performance text file
turbine.load_from_fast(path_params['FAST_InputFile'], \
os.path.join(this_dir,path_params['FAST_directory']), \
dev_branch=True,rot_source='txt',\
txt_filename=os.path.join(this_dir,path_params['FAST_directory'],path_params['rotor_performance_filename']))
# Tune controller
controller.tune_controller(turbine)
# Write parameter input file
param_file = os.path.join(this_dir,'DISCON.IN') # This must be named DISCON.IN to be seen by the compiled controller binary.
ROSCO_utilities.write_DISCON(turbine,controller,param_file=param_file, txt_filename=path_params['rotor_performance_filename'])
### Run OpenFAST using aeroelasticse tools
# Set rosco_dll
if platform.system() == 'Windows':
rosco_dll = os.path.join(rosco_dir, 'ROSCO/build/libdiscon.dll')
elif platform.system() == 'Darwin':
rosco_dll = os.path.join(rosco_dir, 'ROSCO/build/libdiscon.dylib')
else:
rosco_dll = os.path.join(rosco_dir, 'ROSCO/build/libdiscon.so')
case_inputs = {}
case_inputs[('ServoDyn','DLL_FileName')] = {'vals': [rosco_dll], 'group': 0}
# Apply all discon variables as case inputs
discon_vt = ROSCO_utilities.DISCON_dict(
turbine,
controller,
txt_filename=os.path.join(this_dir,path_params['FAST_directory'],path_params['rotor_performance_filename'])
)
for discon_input in discon_vt:
case_inputs[('DISCON_in',discon_input)] = {'vals': [discon_vt[discon_input]], 'group': 0}
case_inputs[('Fst','TMax')] = {'vals': [20], 'group': 0}
case_inputs[('InflowWind','HWindSpeed')] = {'vals': [10], 'group': 0}
case_inputs[('ElastoDyn','HWindSpeed')] = {'vals': [5.], 'group': 0}
case_inputs[('DISCON_in','LoggingLevel')] = {'vals': [3], 'group': 0}
# Generate cases
run_dir = os.path.join(example_out_dir,'14_OL_Sim')
if not os.path.exists(run_dir):
os.makedirs(run_dir)
case_list, case_name_list = CaseGen_General(case_inputs, dir_matrix=run_dir, namebase='OL_Example')
channels = set_channels()
# Run FAST cases
fastBatch = runFAST_pywrapper_batch(FAST_ver='OpenFAST',dev_branch = True)
fastBatch.FAST_directory = os.path.realpath(os.path.join(rosco_dir,'Tune_Cases',path_params['FAST_directory']))
fastBatch.FAST_InputFile = path_params['FAST_InputFile']
fastBatch.channels = channels
fastBatch.FAST_runDirectory = run_dir
fastBatch.case_list = case_list
fastBatch.case_name_list = case_name_list
fastBatch.debug_level = 2
fastBatch.FAST_exe = 'openfast'
fastBatch.run_serial()
# # Define Plot cases
cases = {}
cases['Baseline'] = ['Wind1VelX', 'BldPitch1', 'GenTq', 'RotSpeed','NacYaw']
out_file = os.path.join(example_out_dir,'14_OL_Sim/OL_Example_0.outb')
op = output_processing.output_processing()
fastout = op.load_fast_out(out_file, tmin=0)
fig, ax = op.plot_fast_out(cases=cases,showplot=False)
if False:
plt.show()
else:
fig[0].savefig(os.path.join(example_out_dir,'14_OL_FAST_Out.png'))
|
{"hexsha": "f91d4de5f847d8fcb09421395f3ad05e400a4092", "size": 5295, "ext": "py", "lang": "Python", "max_stars_repo_path": "Examples/example_14.py", "max_stars_repo_name": "ptrbortolotti/ROSCO", "max_stars_repo_head_hexsha": "5d201855f57d0773ad8304349257db19c6db6af2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Examples/example_14.py", "max_issues_repo_name": "ptrbortolotti/ROSCO", "max_issues_repo_head_hexsha": "5d201855f57d0773ad8304349257db19c6db6af2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-31T14:33:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T14:33:50.000Z", "max_forks_repo_path": "Examples/example_14.py", "max_forks_repo_name": "mvanv/ROSCO", "max_forks_repo_head_hexsha": "688b3edcb094062228997bc6a3f28c341301556f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3831168831, "max_line_length": 127, "alphanum_fraction": 0.7467422096, "include": true, "reason": "import numpy", "num_tokens": 1397}
|
## same as the analytic case but with the fft
import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import cond
import cmath;
from scipy import linalg as LA
from numpy.linalg import solve as bslash
import time
from convolution_matrices.convmat1D import *
from RCWA_1D_functions.grating_fft.grating_conv import *
def nonHermitianEigenSorter(eigenvalues):
N = len(eigenvalues);
sorted_indices=[];
sorted_eigs = [];
for i in range(N):
eig = eigenvalues[i];
if(np.real(eig)>0 and np.imag(eig) == 0):
sorted_indices.append(i); sorted_eigs.append(eig);
elif(np.real(eig)==0 and np.imag(eig) > 0):
sorted_indices.append(i); sorted_eigs.append(eig);
elif(np.real(eig)>0 and abs(np.imag(eig)) > 0):
sorted_indices.append(i); sorted_eigs.append(eig);
return sorted_eigs, sorted_indices;
# Moharam et. al Formulation for stable and efficient implementation for RCWA
plt.close("all")
'''
1D TM implementation of PLANAR DIFFRACTiON
STILL NOT WORKING YET
only: sign convention is exp(-ikr) (is the positive propagating wave), so loss is + not -
source for fourier decomps is from the paper: Formulation for stable and efficient implementation of
the rigorous coupled-wave analysis of binary gratings by Moharam et. al
'''
# plt.plot(x, np.real(fourier_reconstruction(x, period, 1000, 1,np.sqrt(12), fill_factor = 0.1)));
# plt.title('check that the analytic fourier series works')
# #'note that the lattice constant tells you the length of the ridge'
# plt.show()
L0 = 1e-6;
e0 = 8.854e-12;
mu0 = 4*np.pi*1e-8;
fill_factor = 0.3; # 50% of the unit cell is the ridge material
num_ord = 3; #INCREASING NUMBER OF ORDERS SEEMS TO CAUSE THIS THING TO FAIL, to many orders induce evanescence...particularly
# when there is a small fill factor
PQ = 2*num_ord+1;
indices = np.arange(-num_ord, num_ord+1)
n_ridge = 3.48; #3.48; # ridge
n_groove = 1; # groove (unit-less)
lattice_constant = 0.7; # SI units
# we need to be careful about what lattice constant means
# in the gaylord paper, lattice constant exactly means (0, L) is one unit cell
d = 0.46; # thickness, SI units
Nx = 2*256;
eps_r = n_groove**2*np.ones((2*Nx, 1)); #put in a lot of points in eps_r
border = int(2*Nx*fill_factor);
eps_r[0:border] = n_ridge**2;
fft_fourier_array = grating_fft(eps_r);
x = np.linspace(-lattice_constant,lattice_constant,1000);
period = lattice_constant;
## simulation parameters
theta = (0)*np.pi/180;
spectra = list();
spectra_T = list();
wavelength_scan = np.linspace(0.5, 2, 100)
## construct permittivity harmonic components E
#fill factor = 0 is complete dielectric, 1 is air
##construct convolution matrix
Ezz = np.zeros((2 * num_ord + 1, 2 * num_ord + 1)); Ezz = Ezz.astype('complex')
p0 = Nx; #int(Nx/2);
p_index = np.arange(-num_ord, num_ord + 1);
q_index = np.arange(-num_ord, num_ord + 1);
fourier_array = fft_fourier_array;#fourier_array_analytic;
detected_pffts = np.zeros_like(Ezz);
for prow in range(2 * num_ord + 1):
# first term locates z plane, 2nd locates y coumn, prow locates x
row_index = p_index[prow];
for pcol in range(2 * num_ord + 1):
pfft = p_index[prow] - p_index[pcol];
detected_pffts[prow, pcol] = pfft;
Ezz[prow, pcol] = fourier_array[p0 + pfft]; # fill conv matrix from top left to top right
Exz = np.zeros_like(Ezz);
Ezx = -np.zeros_like(Ezz);
Exz = 0.2*np.eye(PQ)
Ezx = Exz;
print((Exz.shape, Ezx.shape, Ezz.shape))
## FFT of 1/e;
inv_fft_fourier_array = grating_fft(1/eps_r);
##construct convolution matrix
E_conv_inv = np.zeros((2 * num_ord + 1, 2 * num_ord + 1));
E_conv_inv = E_conv_inv.astype('complex')
p0 = Nx;
p_index = np.arange(-num_ord, num_ord + 1);
for prow in range(2 * num_ord + 1):
# first term locates z plane, 2nd locates y coumn, prow locates x
for pcol in range(2 * num_ord + 1):
pfft = p_index[prow] - p_index[pcol];
E_conv_inv[prow, pcol] = inv_fft_fourier_array[p0 + pfft]; # fill conv matrix from top left to top right
## IMPORTANT TO NOTE: the indices for everything beyond this points are indexed from -num_ord to num_ord+1
## alternate construction of 1D convolution matrix
PQ =2*num_ord+1;
I = np.eye(PQ)
zeros = np.zeros((PQ, PQ))
# E is now the convolution of fourier amplitudes
for wvlen in wavelength_scan:
j = cmath.sqrt(-1);
lam0 = wvlen; k0 = 2 * np.pi / lam0; #free space wavelength in SI units
print('wavelength: ' + str(wvlen));
## =====================STRUCTURE======================##
## Region I: reflected region (half space)
n1 = 1;#cmath.sqrt(-1)*1e-12; #apparently small complex perturbations are bad in Region 1, these shouldn't be necessary
## Region 2; transmitted region
n2 = 1;
#from the kx_components given the indices and wvln
kx_array = k0*(n1*np.sin(theta) + indices*(lam0 / lattice_constant)); #0 is one of them, k0*lam0 = 2*pi
k_xi = kx_array;
## IMPLEMENT SCALING: these are the fourier orders of the x-direction decomposition.
KX = np.diag((k_xi/k0)); #singular since we have a n=0, m= 0 order and incidence is normal
# PQ_block = np.block([[zeros, np.linalg.inv(E_conv_inv)],[KX@bslash(E, KX) - I, zeros]])
# # plt.imshow(np.abs(PQ_block));
# # plt.show();
# print('condition of PQ block: '+str(np.linalg.cond(PQ_block)))
# big_eigenvals, bigW = LA.eig(PQ_block);
# print((bigW.shape, big_eigenvals.shape))
# Wp = bigW[0:PQ, PQ:]
# plt.imshow(abs(bigW))
# plt.show();
## construct matrix of Gamma^2 ('constant' term in ODE):
## one thing that isn't obvious is that are we doing element by element division or is it matricial
B = (KX@bslash(Ezz, KX) - I);
bE = np.linalg.inv(E_conv_inv) + bslash(Ezz,(Exz@Ezx)); #/Ezz;
G = j*bslash(Ezz,Ezx) @ KX;
H = j*KX @bslash(Ezz, Exz);
#print((G,H))
print((bE.shape,G.shape, H.shape))
print((np.linalg.cond(B), np.linalg.cond(bE)))
M = np.linalg.inv(bE);
K = -(B + H@np.linalg.inv(bE)@G);
C = -np.linalg.inv(bE)@G - H@np.linalg.inv(bE);
Z = np.zeros_like(M);
I = np.eye(M.shape[0], M.shape[1]);
OA = np.block([[M, Z],[Z, I]])
OB = np.block(np.block([[C, K],[-I, Z]]))
## these matrices aren't poorly conditioned
print((np.linalg.cond(OA), np.linalg.cond(OB)))
## solve eiegenvalues;
beigenvals, bigW = LA.eig(OB, OA); #W contains eigenmodes of the form (lambda x, x)
## AT THIS POINT, we have still extracted TWO times the number of eigenvalues...
#try rounding...
rounded_beigenvals = np.array([round(i,8) for i in beigenvals])
print(rounded_beigenvals)
#quadrant_sort = [1 if abs(np.real(i))>=0 and np.imag(i)>=0 else 0 for i in rounded_beigenvals];
sorted_eigs, sorted_indices = nonHermitianEigenSorter(rounded_beigenvals)
sorted_indices = np.nonzero(sorted_indices)[0];
#print(quadrant_sort)
# sorted_indices = np.nonzero(quadrant_sort)[0]
print(len(sorted_indices))
#sorted_indices = np.argsort(np.real(rounded_beigenvals))
sorted_eigenmodes = bigW[:, sorted_indices];
#print(sorted_eigenmodes)
#adding real and imaginary parts seems to work...
sorted_eigenvals = beigenvals[sorted_indices]
print(sorted_eigenvals)
W = sorted_eigenmodes[PQ:,:]
eigenvals_wp = (sorted_eigenvals[0:PQ]);
# plt.subplot(121)
# plt.plot(np.real(beigenvals), np.imag(beigenvals), '.', markersize = 20); plt.title('1st');
# plt.subplot(122)
# plt.plot(np.real(beigenvals), np.imag(beigenvals), '.', markersize = 20);
# plt.plot(np.real(eigenvals_wp), (np.imag(eigenvals_wp)), '.r', markersize = 10)
# plt.show();
# ##
Q = np.diag(eigenvals_wp); #eigenvalue problem is for kz, not kz^2
V = np.linalg.inv(bE)@(W @ Q + H @ W);
X = np.diag(np.exp(-k0*np.diag(Q)*d)); #this is poorly conditioned because exponentiation
## pointwise exponentiation vs exponentiating a matrix
## observation: almost everything beyond this point is worse conditioned
k_I = k0**2*(n1**2 - (k_xi/k0)**2); #k_z in reflected region k_I,zi
k_II = k0**2*(n2**2 - (k_xi/k0)**2); #k_z in transmitted region
k_I = k_I.astype('complex'); k_I = np.sqrt(k_I);
k_II = k_II.astype('complex'); k_II = np.sqrt(k_II);
Z_I = np.diag(k_I / (n1**2 * k0 ));
Z_II = np.diag(k_II /(n2**2 * k0));
delta_i0 = np.zeros((len(kx_array),1));
delta_i0[num_ord] = 1;
n_delta_i0 = delta_i0*j*np.cos(theta)/n1;
## design auxiliary variables: SEE derivation in notebooks: RCWA_note.ipynb
# we want to design the computation to avoid operating with X, particularly with inverses
# since X is the worst conditioned thing
print((W.shape, V.shape))
#this appears to be worse and worse conditioned at higher orders...
O = np.block([
[W, W],
[V,-V]
]); #this is much better conditioned than S..
print('condition of O: '+str(np.linalg.cond(O)))
print((np.linalg.cond(W), np.linalg.cond(V)))
# plt.imshow(abs(O))
# plt.show();
f = I;
g = j * Z_II; #all matrices
fg = np.concatenate((f,g),axis = 0)
ab = np.matmul(np.linalg.inv(O),fg);
a = ab[0:PQ,:];
b = ab[PQ:,:];
term = X @ a @ np.linalg.inv(b) @ X;
f = W @ (I + term);
g = V@(-I+term);
T = np.linalg.inv(np.matmul(j*Z_I, f) + g);
T = np.dot(T, (np.dot(j*Z_I, delta_i0) + n_delta_i0));
R = np.dot(f,T)-delta_i0; #shouldn't change
T = np.dot(np.matmul(np.linalg.inv(b),X),T)
## calculate diffraction efficiencies
#I would expect this number to be real...
DE_ri = R*np.conj(R)*np.real(np.expand_dims(k_I,1))/(k0*n1*np.cos(theta));
DE_ti = T*np.conj(T)*np.real(np.expand_dims(k_II,1)/n2**2)/(k0*np.cos(theta)/n1);
print('R(lam)='+str(np.sum(DE_ri))+' T(lam) = '+str(np.sum(DE_ti)))
spectra.append(np.sum(DE_ri)); #spectra_T.append(T);
spectra_T.append(np.sum(DE_ti))
spectra = np.array(spectra);
spectra_T = np.array(spectra_T)
plt.figure();
plt.plot(wavelength_scan, spectra);
plt.plot(wavelength_scan, spectra_T)
plt.plot(wavelength_scan, spectra+spectra_T)
# plt.legend(['reflection', 'transmission'])
# plt.axhline(((3.48-1)/(3.48+1))**2,xmin=0, xmax = max(wavelength_scan))
# plt.axhline(((3.48-1)/(3.48+1)),xmin=0, xmax = max(wavelength_scan), color='r')
#
plt.show()
|
{"hexsha": "aa207cf485bf01dae3e9ad7dedc77d78268995c4", "size": 10672, "ext": "py", "lang": "Python", "max_stars_repo_path": "anisotropy_explorations/1D_Longitudinal_Anisotropy.py", "max_stars_repo_name": "zhaonat/RCWA", "max_stars_repo_head_hexsha": "a28fdf90b5b5fc0fedacc8bb44a0a0c2f2a02143", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 66, "max_stars_repo_stars_event_min_datetime": "2019-03-11T11:59:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T05:17:11.000Z", "max_issues_repo_path": "anisotropy_explorations/1D_Longitudinal_Anisotropy.py", "max_issues_repo_name": "Ydeh22/Rigorous-Coupled-Wave-Analysis", "max_issues_repo_head_hexsha": "15f4300601899d08f57c95863df88280ab6f0d21", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2018-09-23T05:18:15.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-07T20:16:40.000Z", "max_forks_repo_path": "anisotropy_explorations/1D_Longitudinal_Anisotropy.py", "max_forks_repo_name": "Ydeh22/Rigorous-Coupled-Wave-Analysis", "max_forks_repo_head_hexsha": "15f4300601899d08f57c95863df88280ab6f0d21", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 33, "max_forks_repo_forks_event_min_datetime": "2018-09-23T05:16:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T08:37:07.000Z", "avg_line_length": 39.0915750916, "max_line_length": 126, "alphanum_fraction": 0.6384932534, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 3213}
|
import collections
import time
import numpy as np
from evalRnn import test, testCaptionedImages
from ReadCOCOUtil import ReadCOCOUtil
import gc
import ipdb
COCO = ReadCOCOUtil()
trainingImageIds = COCO.imgIdsTrain
validationImageIds = COCO.imgIdsVal
def validateModel(validationX, validationY, model, epoch, loss_acc):
metric = model.evaluate(validationX, validationY, batch_size=10, verbose=1)
loss_acc['acc'].append(metric[1])
loss_acc['loss'].append(metric[0])
loss_acc['iter'].append(epoch)
return loss_acc
def trainOnCaptionedImages(cocoVocab, model, batchSize, nbClasses,
weights=None, modelLabel='unlabelled', remind='Begining_Only'):
validationX, validationY = cocoVocab.captionActvToBatch(validationImageIds[:500], 'validation', 20, remind)
if weights is not None:
model.set_weights(weights)
#Save out our last best weights
model.save_weights(modelLabel + '.h5')
trainLen = .25*len(trainingImageIds)
miniBatchSize = 500
loss_acc = {'loss': [], 'acc':[], 'iter':[]}
for i in range(10):
print "Epoch " + str(i)
loss_acc = validateModel(validationX, validationY, model, i, loss_acc)
epoch = 0
while (miniBatchSize*epoch + miniBatchSize) <= trainLen:
print 'Percent complete: ' + str((float(miniBatchSize)*epoch)/trainLen * 100.0)
X, Y = cocoVocab.captionActvToBatch(trainingImageIds[(miniBatchSize*epoch):(miniBatchSize*epoch + miniBatchSize)], 'train', 20, remind)
model.fit(x=X, y=Y, batch_size=(10+50*i), nb_epoch=1, verbose=1)
model.save_weights(modelLabel + '.h5')
epoch += 1
X = []
Y = []
gc.collect()
weights_out = model.get_weights()
print loss_acc
return weights_out, loss_acc
|
{"hexsha": "9f135db6aef53638a05185821938ef2641aa50a0", "size": 1812, "ext": "py", "lang": "Python", "max_stars_repo_path": "training/trainOnCaptionedImages.py", "max_stars_repo_name": "dwright37/generative-concatenative-image-captioner", "max_stars_repo_head_hexsha": "2bb257d4791e362e42a30bf0e4ca32e84f80d942", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "training/trainOnCaptionedImages.py", "max_issues_repo_name": "dwright37/generative-concatenative-image-captioner", "max_issues_repo_head_hexsha": "2bb257d4791e362e42a30bf0e4ca32e84f80d942", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "training/trainOnCaptionedImages.py", "max_forks_repo_name": "dwright37/generative-concatenative-image-captioner", "max_forks_repo_head_hexsha": "2bb257d4791e362e42a30bf0e4ca32e84f80d942", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.5531914894, "max_line_length": 147, "alphanum_fraction": 0.6749448124, "include": true, "reason": "import numpy", "num_tokens": 482}
|
import numpy as np
from bolero.wrapper import CppBLLoader
from bolero.environment import ContextualEnvironment
from bolero.utils import check_random_state
from time import sleep
class ThrowEnvironment(ContextualEnvironment):
"""Extract the relevant feedbacks from the SpaceBot environment."""
def __init__(self, start=None, random_state=None, verbose=0):
env_name = "spacebot_throw_environment"
self.bll = CppBLLoader()
self.bll.load_library(env_name)
self.env = self.bll.acquire_contextual_environment(env_name)
self.start = start
self.random_state = check_random_state(random_state)
self.verbose = verbose
def init(self):
self.env.init()
def reset(self):
self.env.reset()
self.go_to_start()
def go_to_start(self):
if self.start is None:
return
n_joints = self.get_num_inputs()
inputs = np.copy(self.start)
outputs = np.empty(n_joints)
n_steps = 1000
for t in range(n_steps):
self.env.get_outputs(outputs)
if np.linalg.norm(self.start - outputs) < 0.01:
break
self.env.set_inputs(inputs)
self.env.step_action()
if self.verbose >= 1:
print("[COMPI] Start position has been initialized.")
sleep(0.1)
def get_num_inputs(self):
return self.env.get_num_inputs()
def get_num_outputs(self):
return self.env.get_num_outputs()
def get_outputs(self, values):
self.env.get_outputs(values)
def set_inputs(self, values):
self.env.set_inputs(values)
def step_action(self):
self.env.step_action()
def is_evaluation_done(self):
return self.env.is_evaluation_done()
def is_behavior_learning_done(self):
return self.env.is_behavior_learning_done()
def request_context(self, context=None):
if context is None:
context = self.random_state.uniform([1.0, -1.0], [2.5, 1.0])
return self.env.request_context(context)
def get_num_context_dims(self):
return self.env.get_num_context_dims()
def get_maximum_feedback(self, context):
return 0.0
def get_feedback(self):
feedbacks = self.env.get_feedback()
print("Ball hits the ground at %s" % np.round(feedbacks[1:3], 2))
return feedbacks[0]
|
{"hexsha": "730869418fbda1901033c43426555ce71fae924f", "size": 2403, "ext": "py", "lang": "Python", "max_stars_repo_path": "evaluation/throw_environment.py", "max_stars_repo_name": "rock-learning/approxik", "max_stars_repo_head_hexsha": "877d50d4d045457593a2fafefd267339a11de20f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-03-27T01:53:57.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-27T01:53:57.000Z", "max_issues_repo_path": "evaluation/throw_environment.py", "max_issues_repo_name": "rock-learning/approxik", "max_issues_repo_head_hexsha": "877d50d4d045457593a2fafefd267339a11de20f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "evaluation/throw_environment.py", "max_forks_repo_name": "rock-learning/approxik", "max_forks_repo_head_hexsha": "877d50d4d045457593a2fafefd267339a11de20f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-18T02:09:21.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-18T02:09:21.000Z", "avg_line_length": 28.9518072289, "max_line_length": 73, "alphanum_fraction": 0.6479400749, "include": true, "reason": "import numpy", "num_tokens": 529}
|
module Lens
# Lens laws:
# get (put x y) == x
# put x (put y z) == put x z
# put (get x) y == y
export test_lens_put_get, test_lens_put_put, test_lens_get_put
function test_lens_put_get(get, put, containers, vals)
passed = true
for x in vals
for y in containers
passed &= get(put(x, y)) == x
end
end
passed
end
function test_lens_put_put(get, put, containers, vals)
passed = true
for x in vals
for y in vals
for z in containers
passed &= put(x, put(y, z)) == put(x, z)
end
end
end
passed
end
function test_lens_get_put(get, put, containers, vals)
passed = true
for x in containers
passed &= put(get(x), x) == x
end
passed
end
end
|
{"hexsha": "82555bc1882372eb738e07d4916afc4135081ded", "size": 709, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/lens.jl", "max_stars_repo_name": "UnofficialJuliaMirror/Omega.jl-1af16e33-887a-59b3-8344-18f1671b3ade", "max_stars_repo_head_hexsha": "9dbaa559991a728e8239767d9627419e41037847", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 112, "max_stars_repo_stars_event_min_datetime": "2018-08-02T22:30:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T18:00:22.000Z", "max_issues_repo_path": "test/lens.jl", "max_issues_repo_name": "UnofficialJuliaMirror/Omega.jl-1af16e33-887a-59b3-8344-18f1671b3ade", "max_issues_repo_head_hexsha": "9dbaa559991a728e8239767d9627419e41037847", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 137, "max_issues_repo_issues_event_min_datetime": "2018-07-16T10:05:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T18:45:35.000Z", "max_forks_repo_path": "test/lens.jl", "max_forks_repo_name": "UnofficialJuliaMirror/Omega.jl-1af16e33-887a-59b3-8344-18f1671b3ade", "max_forks_repo_head_hexsha": "9dbaa559991a728e8239767d9627419e41037847", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2018-08-07T21:04:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-06T17:27:46.000Z", "avg_line_length": 17.725, "max_line_length": 62, "alphanum_fraction": 0.6318758815, "num_tokens": 224}
|
"""
Code to extract a box-like region, typically for another modeler to use
as a boundary contition. In cases where it gets velocity in addition to
the rho-grid variables the grid limits mimic the standard ROMS organization,
with the outermost corners being on the rho-grid.
Job definitions are in LO_user/extract/box/job_definitions.py
Testing:
run extract_box -gtx cas6_v3_lo8b -job sequim0 -test True
same but with all flags:
run extract_box -gtx cas6_v3_lo8b -ro 2 -0 2019.07.04 -1 2019.07.06 -lt daily -job sequim0 -test True
this command replicates what post/surface0 does
run extract_box -gtx cas6_v3_lo8b -ro 2 -0 2019.07.04 -1 2019.07.04 -lt hourly -job surface0 -uv_to_rho True -surf True
or
python extract_box.py -gtx cas6_v3_lo8b -ro 2 -0 2019.07.04 -1 2019.07.04 -lt hourly -job surface0 -uv_to_rho True -surf True
Performance: this is very fast, takes just a few seconds for three days on boiler (for yang_sequim).
"""
# imports
import sys
import argparse
from lo_tools import Lfun, zfun, zrfun
from subprocess import Popen as Po
from subprocess import PIPE as Pi
import os
from time import time
import numpy as np
import xarray as xr
pid = os.getpid()
print(' extract_box '.center(60,'='))
print('PID for this job = ' + str(pid))
# command line arugments
parser = argparse.ArgumentParser()
# which run to use
parser.add_argument('-gtx', '--gtagex', type=str) # e.g. cas6_v3_l08b
parser.add_argument('-ro', '--roms_out_num', type=int) # 2 = Ldir['roms_out2'], etc.
# select time period and frequency
parser.add_argument('-0', '--ds0', type=str) # e.g. 2019.07.04
parser.add_argument('-1', '--ds1', type=str) # e.g. 2019.07.06
parser.add_argument('-lt', '--list_type', type=str) # list type: hourly, daily, weekly
# select job name
parser.add_argument('-job', type=str) # job name
# these flags get only surface or bottom fields if True
# - cannot have both True -
parser.add_argument('-surf', default=False, type=Lfun.boolean_string)
parser.add_argument('-bot', default=False, type=Lfun.boolean_string)
# set this to True to interpolate all u, and v fields to the rho-grid
parser.add_argument('-uv_to_rho', default=False, type=Lfun.boolean_string)
# Optional: set max number of subprocesses to run at any time
parser.add_argument('-Nproc', type=int, default=10)
# Optional: for testing
parser.add_argument('-test', '--testing', default=False, type=Lfun.boolean_string)
# get the args and put into Ldir
args = parser.parse_args()
# test that main required arguments were provided
argsd = args.__dict__
for a in ['gtagex']:
if argsd[a] == None:
print('*** Missing required argument: ' + a)
sys.exit()
gridname, tag, ex_name = args.gtagex.split('_')
# get the dict Ldir
Ldir = Lfun.Lstart(gridname=gridname, tag=tag, ex_name=ex_name)
# add more entries to Ldir
for a in argsd.keys():
if a not in Ldir.keys():
Ldir[a] = argsd[a]
# testing
if Ldir['testing']:
Ldir['roms_out_num'] = 2
Ldir['ds0'] = '2019.07.04'
Ldir['ds1'] = '2019.07.06'
Ldir['list_type'] = 'daily'
# set where to look for model output
if Ldir['roms_out_num'] == 0:
pass
elif Ldir['roms_out_num'] > 0:
Ldir['roms_out'] = Ldir['roms_out' + str(Ldir['roms_out_num'])]
# check for input conflicts:
if Ldir['surf'] and Ldir['bot']:
print('Error: cannot have surf and bot both True.')
sys.exit()
# output location
out_dir = Ldir['LOo'] / 'extract' / Ldir['gtagex'] / 'box'
Lfun.make_dir(out_dir)
if Ldir['surf']:
box_fn = out_dir / (Ldir['job'] + '_surf_' + Ldir['ds0'] + '_' + Ldir['ds1'] + '.nc')
elif Ldir['bot']:
box_fn = out_dir / (Ldir['job'] + '_bot_' + Ldir['ds0'] + '_' + Ldir['ds1'] + '.nc')
else:
box_fn = out_dir / (Ldir['job'] + '_' + Ldir['ds0'] + '_' + Ldir['ds1'] + '.nc')
box_fn.unlink(missing_ok=True)
# name the temp dir to accumulate individual extractions
temp_dir = out_dir / ('temp_' + Ldir['job'])
Lfun.make_dir(temp_dir, clean=True)
# get list of files to work on
fn_list = Lfun.get_fn_list(Ldir['list_type'], Ldir, Ldir['ds0'], Ldir['ds1'])
if Ldir['testing']:
fn_list = fn_list[:5]
G, S, T = zrfun.get_basic_info(fn_list[0])
Lon = G['lon_rho'][0,:]
Lat = G['lat_rho'][:,0]
def check_bounds(lon, lat):
# error checking
if (lon < Lon[0]) or (lon > Lon[-1]):
print('ERROR: lon out of bounds ')
sys.exit()
if (lat < Lat[0]) or (lat > Lat[-1]):
print('ERROR: lat out of bounds ')
sys.exit()
# get indices
ilon = zfun.find_nearest_ind(Lon, lon)
ilat = zfun.find_nearest_ind(Lat, lat)
return ilon, ilat
# get the indices and check that they are in the grid
pth = Ldir['LOu'] / 'extract' / 'box'
if str(pth) not in sys.path:
sys.path.append(str(pth))
import job_definitions
from importlib import reload
reload(job_definitions)
aa, vn_list = job_definitions.get_box(Ldir['job'], Lon, Lat)
lon0, lon1, lat0, lat1 = aa
ilon0, ilat0 = check_bounds(lon0, lat0)
ilon1, ilat1 = check_bounds(lon1, lat1)
# NOTE: ncks indexing is zero-based but is INCLUSIVE of the last point.
# NOTE: ncks extractions retain singleton dimensions
# do the extractions
N = len(fn_list)
proc_list = []
tt0 = time()
print('Working on ' + box_fn.name + ' (' + str(N) + ' times)')
for ii in range(N):
fn = fn_list[ii]
sys.stdout.flush()
# extract one day at a time using ncks
count_str = ('000000' + str(ii))[-6:]
out_fn = temp_dir / ('box_' + count_str + '.nc')
cmd_list1 = ['ncks',
'-v', vn_list,
'-d', 'xi_rho,'+str(ilon0)+','+str(ilon1), '-d', 'eta_rho,'+str(ilat0)+','+str(ilat1),
'-d', 'xi_u,'+str(ilon0)+','+str(ilon1-1), '-d', 'eta_u,'+str(ilat0)+','+str(ilat1),
'-d', 'xi_v,'+str(ilon0)+','+str(ilon1), '-d', 'eta_v,'+str(ilat0)+','+str(ilat1-1)]
if Ldir['surf']:
cmd_list1 += ['-d','s_rho,'+str(S['N']-1)]
elif Ldir['bot']:
cmd_list1 += ['-d','s_rho,0']
cmd_list1 += ['-O', str(fn), str(out_fn)]
proc = Po(cmd_list1, stdout=Pi, stderr=Pi)
proc_list.append(proc)
# screen output about progress
if (np.mod(ii,10) == 0) and ii>0:
print(str(ii), end=', ')
sys.stdout.flush()
if (np.mod(ii,50) == 0) and (ii > 0):
print('') # line feed
sys.stdout.flush()
if (ii == N-1):
print(str(ii))
sys.stdout.flush()
# Nproc controls how many ncks subprocesses we allow to stack up
# before we require them all to finish.
if ((np.mod(ii,Ldir['Nproc']) == 0) and (ii > 0)) or (ii == N-1):
for proc in proc_list:
proc.communicate()
# make sure everyone is finished before continuing
proc_list = []
ii += 1
# Ensure that all days have the same fill value. This was required for cas6_v3_lo8b
# when passing from 2021.10.31 to 2021.11.01 because they had inconsistent fill values,
# which leaks through the ncrcat call below.
tt1 = time()
enc_dict = {'_FillValue':1e20}
vn_List = vn_list.split(',')
Enc_dict = {vn:enc_dict for vn in vn_List}
for out_fn in list(temp_dir.glob('box_*.nc')):
ds = xr.load_dataset(out_fn) # need to load, not open, for overwrite
ds.to_netcdf(out_fn, encoding=Enc_dict)
ds.close()
print(' - Time for adding fill value = %0.2f sec' % (time()- tt1))
# concatenate the records into one file
# This bit of code is a nice example of how to replicate a bash pipe
pp1 = Po(['ls', str(temp_dir)], stdout=Pi)
pp2 = Po(['grep','box'], stdin=pp1.stdout, stdout=Pi)
cmd_list = ['ncrcat','-p', str(temp_dir), '-O', str(box_fn)]
proc = Po(cmd_list, stdin=pp2.stdout, stdout=Pi, stderr=Pi)
stdout, stderr = proc.communicate()
if Ldir['testing']:
if len(stdout) > 0:
print('\n'+stdout.decode())
if len(stderr) > 0:
print('\n'+stderr.decode())
print('Time for initial extraction = %0.2f sec' % (time()- tt0))
# add z variables
if (Ldir['surf']==False) and (Ldir['bot']==False):
tt0 = time()
ds = xr.load_dataset(box_fn) # have to load in order to add new variables
NT, N, NR, NC = ds.salt.shape
ds['z_rho'] = (('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), np.nan*np.ones((NT, N, NR, NC)))
ds['z_w'] = (('ocean_time', 's_w', 'eta_rho', 'xi_rho'), np.nan*np.ones((NT, N+1, NR, NC)))
ds.z_rho.attrs = {'units':'m', 'long_name': 'vertical position on s_rho grid, positive up'}
ds.z_w.attrs = {'units':'m', 'long_name': 'vertical position on s_w grid, positive up'}
for ii in range(NT):
h = ds.h.values
zeta = ds.zeta[ii,:,:].values
z_rho, z_w = zrfun.get_z(h, zeta, S)
ds['z_rho'][ii,:,:,:] = z_rho
ds['z_w'][ii,:,:,:] = z_w
ds.to_netcdf(box_fn)
ds.close()
print('Time to add z variables = %0.2f sec' % (time()- tt0))
if Ldir['uv_to_rho']:
# interpolate anything on the u and v grids to the rho grid, assuming
# zero values where masked, and leaving a masked ring around the outermost edge
tt0 = time()
ds = xr.load_dataset(box_fn) # have to load in order to add new variables
Maskr = ds.mask_rho.values == 1 # True over water
NR, NC = Maskr.shape
for vn in ds.data_vars:
if ('xi_u' in ds[vn].dims) and ('ocean_time' in ds[vn].dims):
if len(ds[vn].dims) == 4:
uu = ds[vn].values
NT, N, NRu, NCu = uu.shape
uu[np.isnan(uu)] = 0
UU = (uu[:,:,1:-1,1:]+uu[:,:,1:-1,:-1])/2
uuu = np.nan * np.ones((NT, N, NR, NC))
uuu[:,:,1:-1,1:-1] = UU
Maskr3 = np.tile(Maskr.reshape(1,1,NR,NC),[NT,N,1,1])
uuu[~Maskr3] = np.nan
ds.update({vn:(('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), uuu)})
elif len(ds[vn].dims) == 3:
uu = ds[vn].values
NT, NRu, NCu = uu.shape
uu[np.isnan(uu)] = 0
UU = (uu[:,1:-1,1:]+uu[:,1:-1,:-1])/2
uuu = np.nan * np.ones((NT, NR, NC))
uuu[:,1:-1,1:-1] = UU
Maskr3 = np.tile(Maskr.reshape(1,NR,NC),[NT,1,1])
uuu[~Maskr3] = np.nan
ds.update({vn:(('ocean_time', 'eta_rho', 'xi_rho'), uuu)})
elif ('xi_v' in ds[vn].dims) and ('ocean_time' in ds[vn].dims):
if len(ds[vn].dims) == 4:
vv = ds[vn].values
NT, N, NRv, NCv = vv.shape
vv[np.isnan(vv)] = 0
VV = (vv[:,:,1:,1:-1]+vv[:,:,:-1,1:-1])/2
vvv = np.nan * np.ones((NT, N, NR, NC))
vvv[:,:,1:-1,1:-1] = VV
Maskr3 = np.tile(Maskr.reshape(1,1,NR,NC),[NT,N,1,1])
vvv[~Maskr3] = np.nan
ds.update({vn:(('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), vvv)})
elif len(ds[vn].dims) == 3:
vv = ds[vn].values
NT, NRv, NCv = vv.shape
vv[np.isnan(vv)] = 0
VV = (vv[:,1:,1:-1]+vv[:,:-1,1:-1])/2
vvv = np.nan * np.ones((NT, NR, NC))
vvv[:,1:-1,1:-1] = VV
Maskr3 = np.tile(Maskr.reshape(1,NR,NC),[NT,1,1])
vvv[~Maskr3] = np.nan
ds.update({vn:(('ocean_time', 'eta_rho', 'xi_rho'), vvv)})
ds.to_netcdf(box_fn)
ds.close()
print('Time to interpolate uv variables to rho grid = %0.2f sec' % (time()- tt0))
# squeeze and compress the resulting file
tt0 = time()
ds = xr.load_dataset(box_fn)
ds = ds.squeeze() # remove singleton dimensions
enc_dict = {'zlib':True, 'complevel':1, '_FillValue':1e20}
Enc_dict = {vn:enc_dict for vn in ds.data_vars if 'ocean_time' in ds[vn].dims}
ds.to_netcdf(box_fn, encoding=Enc_dict)
ds.close()
print('Time to compress = %0.2f sec' % (time()- tt0))
# clean up
Lfun.make_dir(temp_dir, clean=True)
temp_dir.rmdir()
print('Size of full rho-grid = %s' % (str(G['lon_rho'].shape)))
print(' Contents of extracted box file: '.center(60,'-'))
# check on the results
ds = xr.open_dataset(box_fn)
for vn in ds.data_vars:
print('%s %s max/min = %0.4f/%0.4f' % (vn, str(ds[vn].shape), ds[vn].max(), ds[vn].min()))
ds.close()
print('\nPath to file:\n%s' % (str(box_fn)))
|
{"hexsha": "5e03e280ee3d294abc4e24b53e89a4785246d493", "size": 12080, "ext": "py", "lang": "Python", "max_stars_repo_path": "extract/box/extract_box.py", "max_stars_repo_name": "parkermac/LO", "max_stars_repo_head_hexsha": "09e0197de7f2166bfa835ec62018b7a8fbfa7379", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-31T23:12:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-31T23:12:22.000Z", "max_issues_repo_path": "extract/box/extract_box.py", "max_issues_repo_name": "parkermac/LO", "max_issues_repo_head_hexsha": "09e0197de7f2166bfa835ec62018b7a8fbfa7379", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-18T23:51:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T18:02:44.000Z", "max_forks_repo_path": "extract/box/extract_box.py", "max_forks_repo_name": "parkermac/LO", "max_forks_repo_head_hexsha": "09e0197de7f2166bfa835ec62018b7a8fbfa7379", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.9677419355, "max_line_length": 125, "alphanum_fraction": 0.6050496689, "include": true, "reason": "import numpy", "num_tokens": 3831}
|
[STATEMENT]
lemma smc_Funct_Comp_vsv[intro]: "vsv (smc_Funct \<alpha> \<AA> \<BB>\<lparr>Comp\<rparr>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vsv (smc_Funct \<alpha> \<AA> \<BB>\<lparr>Comp\<rparr>)
[PROOF STEP]
unfolding smc_Funct_Comp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vsv (\<lambda>\<GG>\<FF>\<in>\<^sub>\<circ>composable_arrs (smc_Funct \<alpha> \<AA> \<BB>). \<GG>\<FF>\<lparr>[]\<^sub>\<circ>\<rparr> \<bullet>\<^sub>N\<^sub>T\<^sub>C\<^sub>F\<^bsub>\<AA>,\<BB>\<^esub> \<GG>\<FF>\<lparr>1\<^sub>\<nat>\<rparr>)
[PROOF STEP]
by simp
|
{"llama_tokens": 258, "file": "CZH_Elementary_Categories_czh_ecategories_CZH_SMC_FUNCT", "length": 2}
|
import logging
log = logging.getLogger(__name__)
from math import ceil
from numpy import linspace
import struct
from spacq.interface.resources import Resource
from spacq.tool.box import Synchronized
from ..abstract_device import AbstractDevice, AbstractSubdevice
from ..tools import str_to_bool, quantity_wrapped, quantity_unwrapped, BlockData
"""
Tektronix DPO7104 Digital Phosphor Oscilloscope
Control the DPO's settings and input waveforms.
"""
class Channel(AbstractSubdevice):
"""
Input channel of the DPO.
"""
def _setup(self):
AbstractSubdevice._setup(self)
# Resources.
read_only = ['waveform']
for name in read_only:
self.resources[name] = Resource(self, name)
read_write = ['enabled']
for name in read_write:
self.resources[name] = Resource(self, name, name)
self.resources['waveform'].slow = True
self.resources['waveform'].display_units = 'V'
self.resources['enabled'].converter = str_to_bool
def __init__(self, device, channel, *args, **kwargs):
self.channel = channel
AbstractSubdevice.__init__(self, device, *args, **kwargs)
@property
def acquisition_window(self):
"""
The minimum and maximum obtainable values in V.
"""
# 10 divisions total.
max_value = 5 * self.scale.value
min_value = -max_value
offset = self.offset.value
return (min_value + offset, max_value + offset)
def transform_waveform(self, waveform):
"""
Transform some curve data onto the true amplitude interval in V, and intermix time values in s.
"""
value_min, value_max = self.device.value_range
value_diff = value_max - value_min
real_min, real_max = self.acquisition_window
real_diff = real_max - real_min
times = linspace(0, self.device.time_scale.value, len(waveform))
return [(time, real_diff * float(x - value_min) / value_diff + real_min) for time, x in zip(times, waveform)]
@property
def enabled(self):
"""
The input state (on/off) of the channel.
"""
result = self.device.ask('select:ch{0}?'.format(self.channel))
return bool(int(result))
@enabled.setter
def enabled(self, value):
self.device.write('select:ch{0} {1}'.format(self.channel, 'on' if value else 'off'))
@property
@Synchronized()
def waveform(self):
"""
A waveform acquired by the scope.
Values are returned in the format [(time1, value1), (time2, value2), ...].
"""
self.device.status.append('Getting waveform for channel {0}'.format(self.channel))
try:
self.device.data_source = self.channel
if self.device.fastframe:
# Only get the last frame.
frame = self.device.fastframe_count
else:
frame = 1
self.device.fastframe_start = frame
self.device.fastframe_stop = frame
# Receive in chunks.
num_data_points = self.device.record_length
num_transmissions = int(ceil(num_data_points / self.device.max_receive_samples))
curve = []
for i in xrange(num_transmissions):
self.device.data_start = int(i * self.device.max_receive_samples) + 1
self.device.data_stop = int((i + 1) * self.device.max_receive_samples)
curve_raw = self.device.ask_raw('curve?')
curve.append(BlockData.from_block_data(curve_raw))
curve = ''.join(curve)
format_code = self.device.byte_format_letters[self.device.waveform_bytes]
curve_data = struct.unpack('!{0}{1}'.format(num_data_points, format_code), curve)
return self.transform_waveform(curve_data)
finally:
self.device.status.pop()
@property
@quantity_wrapped('V')
def scale(self):
"""
Vertical scale for the channel, as a quantity in V.
Note: This is for a single division, of which there are 10.
"""
return float(self.device.ask('ch{0}:scale?'.format(self.channel)))
@scale.setter
@quantity_unwrapped('V')
def scale(self, value):
self.device.write('ch{0}:scale {1}'.format(self.channel, value))
@property
@quantity_wrapped('V')
def offset(self):
"""
Vertical offset for the channel, as a quantity in V.
"""
return float(self.device.ask('ch{0}:offset?'.format(self.channel)))
@offset.setter
@quantity_unwrapped('V')
def offset(self, value):
self.device.write('ch{0}:offset {1}'.format(self.channel, value))
class DPO7104(AbstractDevice):
"""
Interface for Tektronix DPO7104 DPO.
"""
byte_format_letters = [None, 'b', 'h']
# The upper limit to the number of samples to be received per transmission.
max_receive_samples = 1e7
allowed_stopafters = ['runstop', 'sequence']
allowed_waveform_bytes = [1, 2] # Channel data only.
allowed_fastframe_sums = set(['none', 'average', 'envelope'])
def _setup(self):
AbstractDevice._setup(self)
self.channels = [None] # There is no channel 0.
for chan in xrange(1, 5):
channel = Channel(self, chan)
self.channels.append(channel)
self.subdevices['channel{0}'.format(chan)] = channel
# Resources.
read_write = ['sample_rate', 'time_scale']
for name in read_write:
self.resources[name] = Resource(self, name, name)
self.resources['sample_rate'].units = 'Hz'
self.resources['time_scale'].units = 's'
@Synchronized()
def reset(self):
"""
Reset the device to its default state.
"""
log.info('Resetting "{0}".'.format(self.name))
self.write('*rst')
def autoset(self):
"""
Autoset the scaling.
"""
self.write('autoset execute')
@property
def stopafter(self):
"""
The acqusition mode.
"""
value = self.ask('acquire:stopafter?').lower()
if value.startswith('runst'):
return 'runstop'
elif value.startswith('seq'):
return 'sequence'
@stopafter.setter
def stopafter(self, value):
if value not in self.allowed_stopafters:
raise ValueError('Invalid acquisition mode: {0}'.format(value))
self.write('acquire:stopafter {0}'.format(value))
@property
def waveform_bytes(self):
"""
Number of bytes per data point in the acquired waveforms.
"""
return int(self.ask('wfmoutpre:byt_nr?'))
@waveform_bytes.setter
def waveform_bytes(self, value):
self.write('wfmoutpre:byt_nr {0}'.format(value))
@property
def value_range(self):
"""
Range of values possible for each data point.
"""
# The returned values are signed.
bits = 8 * self.waveform_bytes - 1
max_val = 2 ** bits
return (-max_val, max_val - 1)
@property
def acquiring(self):
"""
Whether the device is currently acquiring data.
"""
result = self.ask('acquire:state?')
return bool(int(result))
@acquiring.setter
def acquiring(self, value):
self.write('acquire:state {0}'.format(str(int(value))))
@property
@quantity_wrapped('Hz')
def sample_rate(self):
"""
The sample rate in s-1.
"""
return float(self.ask('horizontal:mode:samplerate?'))
@sample_rate.setter
@quantity_unwrapped('Hz')
def sample_rate(self, value):
self.write('horizontal:mode:samplerate {0}'.format(value))
@property
@quantity_wrapped('s')
def time_scale(self):
"""
The length for a waveform.
"""
return float(self.ask('horizontal:divisions?')) * float(self.ask('horizontal:mode:scale?'))
@time_scale.setter
@quantity_unwrapped('s')
def time_scale(self, value):
self.write('horizontal:mode:scale {0}'.format(value / float(self.ask('horizontal:divisions?'))))
@property
def data_source(self):
"""
The source from which to transfer data.
"""
result = self.ask('data:source?')
assert len(result) == 3 and result.startswith('CH')
return int(result[2])
@data_source.setter
def data_source(self, value):
self.write('data:source ch{0}'.format(value))
@property
def data_start(self):
"""
The first data point to transfer.
"""
return int(self.ask('data:start?'))
@data_start.setter
def data_start(self, value):
self.write('data:start {0}'.format(value))
@property
def data_stop(self):
"""
The last data point to transfer.
"""
return int(self.ask('data:stop?'))
@data_stop.setter
def data_stop(self, value):
self.write('data:stop {0}'.format(value))
@property
def record_length(self):
"""
The number of data points in a waveform.
"""
return int(self.ask('horizontal:mode:recordlength?'))
@Synchronized()
def acquire(self):
"""
Cause the DPO to acquire a single waveform.
"""
self.acquiring = True
@property
def fastframe(self):
"""
Whether fastframe is enabled.
"""
return bool(int(self.ask('horizontal:fastframe:state?')))
@fastframe.setter
def fastframe(self, value):
return self.write('horizontal:fastframe:state {0}'.format(int(value)))
@property
def fastframe_sum(self):
"""
The fastframe summary frame.
"""
result = self.ask('horizontal:fastframe:sumframe?').lower()
if result.startswith('non'):
return 'none'
elif result.startswith('env'):
return 'envelope'
elif result.startswith('ave'):
return 'average'
else:
ValueError('Unknown summary mode: {0}'.format(result))
@fastframe_sum.setter
def fastframe_sum(self, value):
if value not in self.allowed_fastframe_sums:
raise ValueError('Invalid summary frame mode: {0}'.format(value))
return self.write('horizontal:fastframe:sumframe {0}'.format(value))
@property
def fastframe_count(self):
"""
The number of waveforms to acquire in fastframe mode.
"""
return int(self.ask('horizontal:fastframe:count?'))
@fastframe_count.setter
def fastframe_count(self, value):
if value <= 0:
raise ValueError('Must provide a positive integer, not "{0}"'.format(value))
self.write('horizontal:fastframe:count {0:d}'.format(value))
@property
def fastframe_start(self):
"""
The first frame to transfer.
"""
return int(self.ask('data:framestart?'))
@fastframe_start.setter
def fastframe_start(self, value):
self.write('data:framestart {0}'.format(value))
@property
def fastframe_stop(self):
"""
The last frame to transfer.
"""
return int(self.ask('data:framestop?'))
@fastframe_stop.setter
def fastframe_stop(self, value):
self.write('data:framestop {0}'.format(value))
@property
def acquisitions(self):
"""
The number of acquisitions made so far on the oscilloscope.
"""
return int(self.ask('acquire:numacq?'))
name = 'DPO7104'
implementation = DPO7104
|
{"hexsha": "45375e428bdc84b80771a9e6942484c260061e1c", "size": 10057, "ext": "py", "lang": "Python", "max_stars_repo_path": "spacq/devices/tektronix/dpo7104.py", "max_stars_repo_name": "zachparrott/SpanishAcquisitionIQC", "max_stars_repo_head_hexsha": "dd2e683c4cbc5fa420226d545077d94bf2dcb46b", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-30T15:52:48.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-30T15:52:48.000Z", "max_issues_repo_path": "spacq/devices/tektronix/dpo7104.py", "max_issues_repo_name": "zachparrott/SpanishAcquisitionIQC", "max_issues_repo_head_hexsha": "dd2e683c4cbc5fa420226d545077d94bf2dcb46b", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "spacq/devices/tektronix/dpo7104.py", "max_forks_repo_name": "zachparrott/SpanishAcquisitionIQC", "max_forks_repo_head_hexsha": "dd2e683c4cbc5fa420226d545077d94bf2dcb46b", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-06-13T20:59:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-07T03:23:11.000Z", "avg_line_length": 23.0665137615, "max_line_length": 111, "alphanum_fraction": 0.6989161778, "include": true, "reason": "from numpy", "num_tokens": 2659}
|
from misc import parallel, timing
import decomposition
from decomposition import NotDecomposableError
import networkx as nx
from datetime import datetime
# analysis of graphs on small numbe rof vertices
# we generate all possible graphs on n vertices
# and check how many are odd decomposable
def powerset(iterable, chunk_size=10000):
"""
Builds the power set of iterable
:param iterable: base set
:param chunk_size: size of chunks
:return: generator of chunk_size lists consiting of subsets of base iterable
"""
s = list(iterable)
n = len(s)
for i in range(0, 2 ** n, chunk_size):
yield [{s[k] for k, x in enumerate(reversed(bin(j)[2:])) if x == "1"} for j in
range(i, min(2 ** n, i + chunk_size))]
def create_graph(edges, nodes):
"""
Creates a graph from edges and nodes
"""
G = nx.Graph()
G.add_nodes_from(nodes)
G.add_edges_from(edges)
return G
@timing
def check(n, p):
chunk_size = 10000
all_edges = {(i, j) for i in range(n) for j in range(i + 1, n)} # all possible edges on n vertices
power = powerset(all_edges, chunk_size)
all = 2 ** len(all_edges) # number of elements in powerset
chunks = (all // chunk_size) + 1 # number of chunks in the powerset
print(f"Running decomposition: N={n}, all={all}")
def work(inp):
sum = 0
inp = map(lambda x: create_graph(x, range(n)), inp)
for y in inp:
# check if it has an isolated node
try:
d = decomposition.odd_decomposition(y)
sum += 1
except NotDecomposableError:
pass
return sum
# compute the actual number of decomposable graphs
# we use the helper function from misc, which uses joblib to run work in multiple threads
# this also displays the progress bar using tqdm
ok = sum(parallel(work, power, chunks))
print(f"Results for N={n}: {ok} out of {all}, {100 * ok / all:.2f}%")
print()
return (ok, all)
import csv
if __name__ == "__main__":
with open('../results/small_graphs.csv', 'w', newline='') as csvfile:
columns = ["n", "decomposable", "all", "percentage", "time"]
writer = csv.DictWriter(csvfile, fieldnames=columns)
writer.writeheader()
for i in range(3, 9):
(ok, all), time = check(i, True)
writer.writerow({"n": i, "decomposable": ok, "all": all, "percentage": ok / all * 100, "time": time})
csvfile.flush()
|
{"hexsha": "34cf12a66c11af42e9ec904d77db10e59cb4cbc2", "size": 2522, "ext": "py", "lang": "Python", "max_stars_repo_path": "decompositon/small_graphs.py", "max_stars_repo_name": "lodrantl/odd-decomposition", "max_stars_repo_head_hexsha": "11b51f72689d9912b6c31585c4d26aff3e384703", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "decompositon/small_graphs.py", "max_issues_repo_name": "lodrantl/odd-decomposition", "max_issues_repo_head_hexsha": "11b51f72689d9912b6c31585c4d26aff3e384703", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "decompositon/small_graphs.py", "max_forks_repo_name": "lodrantl/odd-decomposition", "max_forks_repo_head_hexsha": "11b51f72689d9912b6c31585c4d26aff3e384703", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1358024691, "max_line_length": 113, "alphanum_fraction": 0.6181601903, "include": true, "reason": "import networkx", "num_tokens": 661}
|
import numpy as np
from sklearn.linear_model import LinearRegression
def add_trend_feature(arr, abs_values=False):
idx = np.array(range(len(arr)))
if abs_values:
arr = np.abs(arr)
lr = LinearRegression()
lr.fit(idx.reshape(-1, 1), arr)
return lr.coef_[0]
def classic_sta_lta(x, length_sta, length_lta):
sta = np.cumsum(x ** 2)
# Convert to float
sta = np.require(sta, dtype=np.float)
# Copy for LTA
lta = sta.copy()
# Compute the STA and the LTA
sta[length_sta:] = sta[length_sta:] - sta[:-length_sta]
sta /= length_sta
lta[length_lta:] = lta[length_lta:] - lta[:-length_lta]
lta /= length_lta
# Pad zeros
sta[:length_lta - 1] = 0
# Avoid division by zero by setting zero values to tiny float
dtiny = np.finfo(0.0).tiny
idx = lta < dtiny
lta[idx] = dtiny
return sta / lta
def calc_change_rate(x):
change = (np.diff(x) / x[:-1]).values
change = change[np.nonzero(change)[0]]
change = change[~np.isnan(change)]
change = change[change != -np.inf]
change = change[change != np.inf]
return np.mean(change)
|
{"hexsha": "552802e00e5363994eded3df59e25a9984439767", "size": 1133, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/features/feature_utils.py", "max_stars_repo_name": "sbussmann/earthquake-prediction", "max_stars_repo_head_hexsha": "ba1e0f1a29cab40c1e659ed372f097b78e8f8483", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-06-10T16:43:42.000Z", "max_stars_repo_stars_event_max_datetime": "2019-06-10T16:43:42.000Z", "max_issues_repo_path": "src/features/feature_utils.py", "max_issues_repo_name": "sbussmann/earthquake-prediction", "max_issues_repo_head_hexsha": "ba1e0f1a29cab40c1e659ed372f097b78e8f8483", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-03-26T03:38:32.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-18T13:28:12.000Z", "max_forks_repo_path": "src/features/feature_utils.py", "max_forks_repo_name": "sbussmann/earthquake-prediction", "max_forks_repo_head_hexsha": "ba1e0f1a29cab40c1e659ed372f097b78e8f8483", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.1063829787, "max_line_length": 65, "alphanum_fraction": 0.6310679612, "include": true, "reason": "import numpy", "num_tokens": 332}
|
# MTRN4230 Robotics
# Group 6 Assignment
# Robot Motion Module
#
# Authors: Samir Mustavi & Matthew Bourke
# Date: 27.07.2020
# Description: ROS module for providing actuation functions to the UR5 robot arm in the simulated Gazebo environment.
# Desired x, y, z coordinates are received from the Image Processing node where this script calculates the
# optimal joint angles.
#
import numpy as np
import cmath
from math import cos as cos
from math import sin as sin
from math import atan2 as atan2
from math import acos as acos
from math import asin as asin
from math import sqrt as sqrt
from math import pi as pi
#from std_msgs.msg import Header
#from trajectory_msgs.msg import JointTrajectory
#from trajectory_msgs.msg import JointTrajectoryPoint
#import rospy
a = np.array([0, -0.425, -0.39225, 0, 0, 0])
d = np.array([0.089159, 0, 0, 0.10915, 0.09465, 0.0823], np.float)
alpha = np.array([pi / 2, 0, 0, pi / 2, -pi / 2, 0], np.float)
# Helper function for returning transformation matrix of requested link frame
def calculate_link_t_matrix(n, th, c):
a_m = a[n-1]
d_m = d[n-1]
alpha_m = alpha[n-1]
theta_m = th[n-1, c]
A_i = np.matrix([[cos(theta_m), -sin(theta_m)*cos(alpha_m), sin(theta_m)*sin(alpha_m), a_m*cos(theta_m)],
[sin(theta_m), cos(theta_m)*cos(alpha_m), -cos(theta_m)*sin(alpha_m), a_m*sin(theta_m)],
[0, sin(alpha_m), cos(alpha_m), d_m],
[0, 0, 0, 1]])
return A_i
# Forward kinematics calculations
# Useful for generating desired end effector orientation matrix and validating inverse kinematics
def forward_kinematics(joint_angles):
T_01 = calculate_link_t_matrix(1, joint_angles, 0)
T_12 = calculate_link_t_matrix(2, joint_angles, 0)
T_23 = calculate_link_t_matrix(3, joint_angles, 0)
T_34 = calculate_link_t_matrix(4, joint_angles, 0)
T_45 = calculate_link_t_matrix(5, joint_angles, 0)
T_56 = calculate_link_t_matrix(6, joint_angles, 0)
T_06 = T_01 * T_12 * T_23 * T_34 * T_45 * T_56
return T_06
# Inverse kinematics solution
# Takes in x, y, z coordinates (in meters) and outputs desired joint angles (in degrees)
def inverse_kinematics(x, y, z):
# Rotation matrix obtained from forward kinematics using ideal joint orientations
T_06 = np.array([[-1, 0, 0, x],
[ 0, 1, 0, y],
[ 0, 0, -1, z],
[ 0, 0, 0, 1]])
theta = np.matrix(np.zeros((6, 8)))
p_05 = (T_06 * np.matrix([0, 0, -d[5], 1]).T - np.matrix([0, 0, 0, 1]).T)
# Theta 1
gamma = atan2(p_05[2 - 1, 0], p_05[1 - 1, 0])
phi = acos(d[3] / sqrt(p_05[2 - 1, 0] * p_05[2 - 1, 0] + p_05[1 - 1, 0] * p_05[1 - 1, 0]))
# The two solutions for theta 1 correspond to the shoulder being either left or right
# 8 solutions exist in total
# The first 4 consider left oriented shoulder, the last 4 consider right oriented shoulder
theta[0, 0:4] = pi / 2 + gamma + phi
theta[0, 4:8] = pi / 2 + gamma - phi
theta = theta.real
# Theta 5
# Theta 5 determined if wrist orientation is 'up' or 'down'
# config_offset defines solutions offsets based on theta 1 (first 4 solutions vs last 4 solutions)
config_offset = [0, 4]
for i in range(len(config_offset)):
c = config_offset[i]
T_10 = np.linalg.inv(calculate_link_t_matrix(1, theta, c))
T_16 = T_10 * T_06
# For each theta 1, there exists 2 potential solutions for theta 5
theta[4, c:c+2] = + acos((T_16[2, 3] - d[3]) / d[5])
theta[4, c+2:c+4] = - acos((T_16[2, 3] - d[3]) / d[5])
theta = theta.real
# Theta 6
# Theta 6 is not well-defined when sin(theta5) = 0 or when T16(1,3), T16(2,3) = 0.
# config_offset redefined for solutions based on theta 1 and theta 5
config_offset = [0, 2, 4, 6]
for i in range(len(config_offset)):
c = config_offset[i]
T_10 = np.linalg.inv(calculate_link_t_matrix(1, theta, c))
T_16 = np.linalg.inv(T_10 * T_06)
# Based on theta 5, only one solutions exists for theta 6
theta[5, c:c+2] = atan2((-T_16[1, 2] / sin(theta[4, c])), (T_16[0, 2] / sin(theta[4, c])))
theta = theta.real
# Theta 3
# config_offset determined by theta 1 and theta 5 solutions
config_offset = [0, 2, 4, 6]
for i in range(0, len(config_offset)):
c = config_offset[i]
T_10 = np.linalg.inv(calculate_link_t_matrix(1, theta, c))
T_65 = calculate_link_t_matrix(6, theta, c)
T_54 = calculate_link_t_matrix(5, theta, c)
T_14 = (T_10 * T_06) * np.linalg.inv(T_54 * T_65)
P_13 = T_14 * np.matrix([0, -d[3], 0, 1]).T - np.matrix([0, 0, 0, 1]).T
t3 = cmath.acos((np.linalg.norm(P_13)**2-a[1]**2-a[2]**2)/(2*a[1]*a[2]))
# Two solutions exists, describing 'elbow up' or 'elbow down'
theta[2, c] = t3.real
theta[2, c+1] = -t3.real
# Theta 2 & 4
# config_offset redefined to present independent solutions for final joint angles theta 2 and theta 4
config_offset = [0, 1, 2, 3, 4, 5, 6, 7]
for i in range(0, len(config_offset)):
c = config_offset[i]
T_10 = np.linalg.inv(calculate_link_t_matrix(1, theta, c))
T_65 = np.linalg.inv(calculate_link_t_matrix(6, theta, c))
T_54 = np.linalg.inv(calculate_link_t_matrix(5, theta, c))
T_14 = (T_10 * T_06) * T_65 * T_54
P_13 = T_14 * np.matrix([0, -d[3], 0, 1]).T - np.matrix([0, 0, 0, 1]).T
# Theta 2
# Theta 2 has only one solution, depending on elbow orientation
theta[1, c] = -atan2(P_13[1], -P_13[0]) + asin(a[2] * sin(theta[2, c]) / np.linalg.norm(P_13))
# Theta 4
T_32 = np.linalg.inv(calculate_link_t_matrix(3, theta, c))
T_21 = np.linalg.inv(calculate_link_t_matrix(2, theta, c))
T_34 = T_32 * T_21 * T_14
# From all other defined joint angles, theta 4 only has one solution
theta[3, c] = atan2(T_34[1, 0], T_34[0, 0])
theta = theta.real
# Due to multiple joint solutions, joint 2 is given joint rotation limit to avoid unwanted solutions
joint2 = np.rad2deg(theta[1, :])
size = np.size(joint2)
min_value = 0
index = []
for i in range(size):
# Joint 2 (shoulder joint) is chosen to be in the range -80 to 0 degrees
# This corresponds to 'elbow up' orientation
if 0 >= joint2[0, i] >= -80:
element = i
index.append(element)
store_index = np.array(index)
if len(store_index) == 0:
min_value = np.min(np.abs(joint2))
else:
min_value = np.abs(joint2[0, store_index[0]])
# filter possible solutions to a single solution that is the most ideal
element = 0
for index in store_index:
if np.abs(joint2[0, index]) <= min_value:
min_value = np.abs(joint2[0, index])
element = index
# ideal_angles = np.rad2deg(theta[:, element])
# return theta[:, element].flatten().tolist()[0]
ideal_angles = theta[:, element].flatten().tolist()[0]
for i in range(len(ideal_angles)):
angle = ideal_angles[i]
if angle > pi:
angle = -2*pi + angle
ideal_angles[i] = angle
if angle < -pi:
angle = 2*pi + angle
ideal_angles[i] = angle
return ideal_angles
#def sub_echo(data):
# # Callback function should maybe set a flag that enables robot movement to begin
# rospy.loginfo("I heard %s", data.data)
def handle_get_coordinates(request):
print("Received coordinates: " + request.x + ", " + request.y + ", " + request.z)
angles = inverse_kinematics(request.x, request.y, request.z)
return angles
if __name__ == "__main__":
# Test values
# Actual x, y, z will be received from image processing node
x = 1.0
y = 0.0
z = 0.0
# rospy.init_node('robot_motion')
# server = rospy.Service('motion/move_to_object', ReceiveCoordinates, handle_get_coordinates)
#server2 = rospy.Service('receive_coordinates', ReceiveCoordinates, handle_get_coordinates)
#server3 = rospy.Service('receive_coordinates', ReceiveCoordinates, handle_get_coordinates)
# Set up publisher and subscriber protocols
#pub = rospy.Publisher('joint_waypoints', JointTrajectory, queue_size=10)
# sub = rospy.Subscriber('image_processing', String, sub_echo)
# Define a 'home' position for the robot to return to if no commands are received
home_pos = np.matrix([[191.588279356832517], [-13.63151596868293], [-133.13413638498815],
[56.76565235367108], [-90.00000000000000], [-78.41172064316748]])
test_pos = np.matrix([[11.46], [22.91], [34.38], [45.84], [57.3], [68.75]])
# Might have to initialise robot position before entering control loop
# got_coordinates helps keep track of robot state. Once coordinates are received, robot actuation sequence begins
got_coordinates = False
# while not rospy.is_shutdown():
#
# # Subscribe to image processing node and wait for x, y, z coordinates
# # When coordinates are received, set 'got_coordinates' to True
#
# if got_coordinates:
# got_coordinates = False
# angles = inverse_kinematics(x, y, z)
#
# # Publish joint angles to Gazebo node
#
# # Once robot motion is complete, activate gripper
#
# # Move end effector to box position
#
# # Once robot motion is complete, deactivate gripper
#
# # Return to 'home' position
# Testing section
angles = inverse_kinematics(x, y, z)
print(type(angles))
print(np.array(angles)* 180 / pi)
# angles.reshape()
# print(angles)
# print("Joint angles are:")
# for angle in angles:
# print("\t", angle[(0, 0)]*pi/180.0)
# H = forward_kinematics(angles * pi/180.0)
# print("\nEnd effector position calculated from FK is:")
# print("\tx = ", H[(0, 3)], "m\n\ty = ", H[(1, 3)], "m\n\tz = ", H[(2, 3)], "m")
# print("\nError in position is:")
# print("\tx_error = ", (H[(0, 3)]-x), "m\n\ty_error = ", (H[(1, 3)]-y), "m\n\tz_error = ", (H[(2, 3)]-z), "m")
|
{"hexsha": "10483409d6a3cf67bcf83d24a1a0e5e5808aefe6", "size": 10234, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/utils/kinematics.py", "max_stars_repo_name": "JimmeeX/ur5_t2_4230", "max_stars_repo_head_hexsha": "ae64c15a5c8040b5f3f5ba19710427c406607973", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-01-26T16:39:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-19T00:24:41.000Z", "max_issues_repo_path": "src/utils/kinematics.py", "max_issues_repo_name": "JimmeeX/ur5_t2_4230", "max_issues_repo_head_hexsha": "ae64c15a5c8040b5f3f5ba19710427c406607973", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/utils/kinematics.py", "max_forks_repo_name": "JimmeeX/ur5_t2_4230", "max_forks_repo_head_hexsha": "ae64c15a5c8040b5f3f5ba19710427c406607973", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.3615384615, "max_line_length": 119, "alphanum_fraction": 0.6202853234, "include": true, "reason": "import numpy", "num_tokens": 3161}
|
"""
TODO
----
More than one plot
"""
from string import Template
from os.path import join
import os
import pandas as pd
import numpy as np
def describe2latex(study_info, stats):
"""Function to translate the descriptions of the variables to latex.
TODO
----
- crete a plot folder
- get paths to save figures
"""
## 0. Needed variables
this_dir, this_filename = os.path.split(os.path.abspath(__file__))
if not os.path.exists(join(study_info['path'], 'Plots')):
os.mkdir(join(study_info['path'], 'Plots'))
header = built_header()
title = built_title(study_info)
content = built_content(study_info, stats)
## 1. Applying to the template
templ_fl = join(this_dir, '../data/templates/tex/document_template.txt')
file_ = open(templ_fl, "r")
filecode = file_.read()
filetext = Template(filecode).safe_substitute(header=header, title=title,
content=content)
filetext = filetext.encode('utf-8')
return filetext
#file_ = open(join(study_info['path'], 'report.tex'), "w")
#file_.write(filetext)
#return filetext
###############################################################################
############################## LEVEL 1 functions ##############################
###############################################################################
def built_content(study_info, stats):
intro = build_intro(study_info)
pages = []
for st in stats:
pages.append(page_builder(st, study_info))
content = '\\newpage\n'.join(pages)
content = '\\newpage\n'.join([intro, content])
content = content.decode('utf-8')
return content
def built_title(study_info):
## 0. Needed variables
title = study_info['title']
#summary = study_info['summary']
author = study_info['author']
#date = study_info['date']
## 1. Applying to the template
this_dir, this_filename = os.path.split(os.path.abspath(__file__))
templ_fl = join(this_dir, '../data/templates/tex/portada.txt')
file_ = open(templ_fl, "r")
filecode = file_.read()
filetext = Template(filecode).safe_substitute(title=title, author=author,
date='')
filetext = filetext.decode('utf-8')
return filetext
def built_header():
this_dir, this_filename = os.path.split(os.path.abspath(__file__))
templ_fl = join(this_dir, '../data/templates/tex/header.txt')
file_ = open(templ_fl, "r")
filecode = file_.read()
filecode = filecode.decode('utf-8')
return filecode
###############################################################################
############################## LEVEL 2 functions ##############################
###############################################################################
def build_intro(study_info):
global_stats = study_info['global_stats']
text = '\section{Variables}\n'+global_stats.to_latex()
text = text.decode('utf-8')
return text
def page_builder(info, study_info):
## 0. Needed variables
varname = info['variables_name'].decode('utf-8').encode('utf-8')
variables = info['variables']
vardesc = info['Description'].decode('utf-8').encode('utf-8')
typevar = info['type'].lower()
if typevar in ['discrete', 'categorical']:
tables = cat_tables(info)
plots = cat_plots(info, study_info)
elif typevar == 'continuous':
tables = cont_tables(info)
plots = cont_plots(info, study_info)
elif typevar == 'coordinates':
tables = coord_tables(info)
plots = coord_plots(info, study_info)
elif typevar in ['time', 'temporal']:
tables = coord_tables(info)
plots = coord_plots(info, study_info)
elif typevar == 'tmpdist':
tables = coord_tables(info)
plots = coord_plots(info, study_info)
else:
print typevar, info['variables']
## 1. Applying to the template
this_dir, this_filename = os.path.split(os.path.abspath(__file__))
templ_fl = join(this_dir, '../data/templates/tex/page.txt')
file_ = open(templ_fl, "r")
filecode = file_.read()
filetext = Template(filecode).safe_substitute(varname=varname,
variables=variables,
vardesc=vardesc,
tables=tables,
plots=plots,
comments='',
artificialcomments='')
filetext = filetext.decode('utf-8')
return filetext
###############################################################################
############################## LEVEL 3 functions ##############################
###############################################################################
def cat_tables(info, max_rows=15):
# 0. Needed variables
if info['count_table'].shape[0] > max_rows:
table = info['count_table'][:max_rows]
else:
table = info['count_table']
table = pd.DataFrame(table, columns=[info['variables']])
tabular = table.to_latex()
caption = 'Counts of the most common values of %s.'
caption = caption % info['variables_name']
tablelabel = info['variables_name']+'_01'
## 1. Applying to the template
this_dir, this_filename = os.path.split(os.path.abspath(__file__))
templ_fl = join(this_dir, '../data/templates/tex/table.txt')
file_ = open(templ_fl, "r")
filecode = file_.read()
filetext = Template(filecode).safe_substitute(tabular=tabular,
caption=caption,
tablelabel=tablelabel)
return filetext
def cat_plots(info, study_info):
# 0. Needed variables
if not 'plots' in info.keys():
return ''
# Save plots to computer
graphics, caption, imagelabel = plot_saving(info, study_info)
## 1. Applying to the template
this_dir, this_filename = os.path.split(os.path.abspath(__file__))
filename = get_filename_template(len(graphics))
templ_fl = join(this_dir, filename)
file_ = open(templ_fl, "r")
filecode = file_.read()
filetext = Template(filecode).safe_substitute(caption=caption,
imagelabel=imagelabel)
filetext = substitute_plots(graphics, filetext)
return filetext
def cont_tables(info, max_rows=15, nmax_cols=7):
## TODO: Number of nulls
# 0. Needed variables
# if info['hist_table'][0].shape[0] > max_rows:
# table = info['hist_table'][0][:max_rows]
# else:
# table = info['hist_table']
# 1
tabular = "\\begin{tabular}{lr}\n\\toprule\n\midrule\n\nmean"
tabular = tabular + " & %f \\\\\n\\bottomrule\n\end{tabular}\n"
tabular = tabular % info['mean']
caption = ''
tablelabel = info['variables_name']+'_mean'
# 2
aux = np.vstack([info['ranges'], info['quantiles']])
table = pd.DataFrame(aux.T, columns=['ranges', 'quantiles'])
# Formatting table to fit into a page
ni, na = table.index[0], table.shape[0]
nmax_cols = nmax_cols if na >= nmax_cols else na
idxs = np.linspace(ni, na-1, nmax_cols).round().astype(int)
table = table[['ranges', 'quantiles']]
table = table.transpose()
table = table[idxs]
tabular2 = table.to_latex(float_format=lambda x: '%.2f' % x)
caption2 = 'Comparative between quantiles and proportional segments of %s'
caption2 = caption2 % info['variables_name']
tablelabel2 = info['variables_name']+'_01'
# TODO counts
## 1. Applying to the template
this_dir, this_filename = os.path.split(os.path.abspath(__file__))
templ_fl = join(this_dir, '../data/templates/tex/table.txt')
file_ = open(templ_fl, "r")
filecode = file_.read()
filetext1 = Template(filecode).safe_substitute(tabular=tabular,
caption=caption,
tablelabel=tablelabel)
this_dir, this_filename = os.path.split(os.path.abspath(__file__))
templ_fl = join(this_dir, '../data/templates/tex/table.txt')
file_ = open(templ_fl, "r")
filecode = file_.read()
filetext2 = Template(filecode).safe_substitute(tabular=tabular2,
caption=caption2,
tablelabel=tablelabel2)
filetext = '\n\n'.join([filetext1, filetext2])
return filetext
def cont_plots(info, study_info):
# 0. Needed variables
if not 'plots' in info.keys():
return ''
# Save plots to computer
graphics, caption, imagelabel = plot_saving(info, study_info)
## 1. Applying to the template
this_dir, this_filename = os.path.split(os.path.abspath(__file__))
filename = get_filename_template(len(graphics))
templ_fl = join(this_dir, filename)
file_ = open(templ_fl, "r")
filecode = file_.read()
filetext = Template(filecode).safe_substitute(caption=caption,
imagelabel=imagelabel)
filetext = substitute_plots(graphics, filetext)
return filetext
def coord_plots(info, study_info):
# 0. Needed variables
if not 'plots' in info.keys():
return ''
# Save plots to computer
graphics, caption, imagelabel = plot_saving(info, study_info)
## 1. Applying to the template
this_dir, this_filename = os.path.split(os.path.abspath(__file__))
filename = get_filename_template(len(graphics))
templ_fl = join(this_dir, filename)
file_ = open(templ_fl, "r")
filecode = file_.read()
filetext = Template(filecode).safe_substitute(caption=caption,
imagelabel=imagelabel)
filetext = substitute_plots(graphics, filetext)
return filetext
def coord_tables(info):
return ''
def temp_plots(info, study_info):
# 0. Needed variables
if not 'plots' in info.keys():
return ''
# Save plots to computer
graphics, caption, imagelabel = plot_saving(info, study_info)
## 1. Applying to the template
this_dir, this_filename = os.path.split(os.path.abspath(__file__))
filename = get_filename_template(len(graphics))
templ_fl = join(this_dir, filename)
file_ = open(templ_fl, "r")
filecode = file_.read()
filetext = Template(filecode).safe_substitute(caption=caption,
imagelabel=imagelabel)
filetext = substitute_plots(graphics, filetext)
return filetext
def temp_tables(info):
# 0. Needed variables
pre_post = info['pre_post']
cols = ['pre', 'through', 'post']
table = np.array([pre_post[e] for e in cols]).reshape((1, 3))
tabular = table.to_latex()
caption = 'Counts of the most common values of %s.'
caption = caption % info['variables_name']
tablelabel = info['variables_name']+'_01'
## 1. Applying to the template
this_dir, this_filename = os.path.split(os.path.abspath(__file__))
templ_fl = join(this_dir, '../data/templates/tex/table.txt')
file_ = open(templ_fl, "r")
filecode = file_.read()
filetext = Template(filecode).safe_substitute(tabular=tabular,
caption=caption,
tablelabel=tablelabel)
return filetext
def tmpdist_plots(info, study_info):
# 0. Needed variables
if not 'plots' in info.keys():
return ''
# Save plots to computer
graphics, caption, imagelabel = plot_saving(info, study_info)
## 1. Applying to the template
this_dir, this_filename = os.path.split(os.path.abspath(__file__))
filename = get_filename_template(len(graphics))
templ_fl = join(this_dir, filename)
file_ = open(templ_fl, "r")
filecode = file_.read()
filetext = Template(filecode).safe_substitute(caption=caption,
imagelabel=imagelabel)
filetext = substitute_plots(graphics, filetext)
return filetext
def tmpdist_tables(info):
return ''
###############################################################################
############################## Auxiliar functions #############################
###############################################################################
def get_filename_template(n):
"""Return the template file for plotting."""
if n == 1:
filename = '../data/templates/tex/image.txt'
elif n == 2:
filename = '../data/templates/tex/image2.txt'
elif n == 4:
filename = '../data/templates/tex/image4.txt'
return filename
def substitute_plots(graphics, filetext):
"""Substitute the plot directions into the latex text."""
filetext = Template(filetext).safe_substitute(graphics1=graphics[0])
if len(graphics) > 1:
filetext = Template(filetext).safe_substitute(graphics2=graphics[1])
elif len(graphics) > 2:
filetext = Template(filetext).safe_substitute(graphics3=graphics[2])
filetext = Template(filetext).safe_substitute(graphics4=graphics[3])
return filetext
def plot_saving(info, study_info):
"""This function save the plots stored in the stats info and return its
path directions and additional information stored in stats info.
"""
varname = info['variables_name'].replace(".", "_")
varname = varname.replace("-", "_")
fig = info['plots']
if type(fig) != list:
fname = '%s.png' % (varname+'_01')
fname = fname.replace(" ", "")
fig.savefig(join(study_info['path']+'/Plots/', fname))
graphics = ['Plots/'+fname]
elif type(fig) == list:
graphics = []
for i in range(len(fig)):
fname = '%s.png' % (varname+'_0'+str(i+1))
fname = fname.replace(" ", "")
fig[i].savefig(join(study_info['path']+'/Plots/', fname))
graphics.append('Plots/'+fname)
caption = 'Plot of the distribution of %s' % info['variables_name']
imagelabel = varname+'_01'
return graphics, caption, imagelabel
|
{"hexsha": "5f50840f37018377e238f96df6af5808875894a3", "size": 14276, "ext": "py", "lang": "Python", "max_stars_repo_path": "FirmsLocations/IO/output_to_latex.py", "max_stars_repo_name": "tgquintela/Firms_locations", "max_stars_repo_head_hexsha": "476680cbc3eb1308811633d24810049e215101a0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "FirmsLocations/IO/output_to_latex.py", "max_issues_repo_name": "tgquintela/Firms_locations", "max_issues_repo_head_hexsha": "476680cbc3eb1308811633d24810049e215101a0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "FirmsLocations/IO/output_to_latex.py", "max_forks_repo_name": "tgquintela/Firms_locations", "max_forks_repo_head_hexsha": "476680cbc3eb1308811633d24810049e215101a0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.2335025381, "max_line_length": 79, "alphanum_fraction": 0.5780330625, "include": true, "reason": "import numpy", "num_tokens": 3241}
|
Require Export Iron.Language.SimpleData.Ty.
(* Data Constructors *)
Inductive datacon : Type :=
| DataCon : nat -> datacon.
Hint Constructors datacon.
Fixpoint datacon_beq t1 t2 :=
match t1, t2 with
| DataCon n1, DataCon n2 => beq_nat n1 n2
end.
(* Definitions.
Carries meta information about type and data constructors. *)
Inductive def : Type :=
(* Definition of a data type constructor *)
| DefDataType
: tycon (* Name of data type constructor *)
-> list datacon (* Data constructors that belong to this type *)
-> def
(* Definition of a data constructor *)
| DefData
: datacon (* Name of data constructor *)
-> list ty (* Types of arguments *)
-> ty (* Type of constructed data *)
-> def.
Hint Constructors def.
(* Definition environment.
Holds the definitions of all current type and data constructors. *)
Definition defs := list def.
(* Lookup the def of a given type constructor.
Returns None if it's not in the list. *)
Fixpoint getTypeDef (tc: tycon) (ds: defs) : option def :=
match ds with
| ds' :> DefDataType tc' _ as d
=> if tycon_beq tc tc'
then Some d
else getTypeDef tc ds'
| ds' :> _ => getTypeDef tc ds'
| Empty => None
end.
(* Lookup the def of a given data constructor.
Returns None if it's not in the list. *)
Fixpoint getDataDef (dc: datacon) (ds: defs) : option def :=
match ds with
| ds' :> DefData dc' _ _ as d
=> if datacon_beq dc dc'
then Some d
else getDataDef dc ds'
| ds' :> _ => getDataDef dc ds'
| Empty => None
end.
(* Boolean equality for data constructors. *)
Lemma datacon_beq_eq
: forall dc dc'
, true = datacon_beq dc dc'
-> dc = dc'.
Proof.
intros.
destruct dc.
destruct dc'.
simpl in H.
apply beq_nat_eq in H.
auto.
Qed.
(* Boolean negation for data constructors. *)
Lemma datacon_beq_false
: forall dc
, false = datacon_beq dc dc
-> False.
Proof.
intro.
destruct dc.
simpl.
intros.
induction n.
simpl in H. false.
simpl in H. auto.
Qed.
|
{"author": "discus-lang", "repo": "iron", "sha": "75c007375eb62e1c0be4b8b8eb17a0fe66880039", "save_path": "github-repos/coq/discus-lang-iron", "path": "github-repos/coq/discus-lang-iron/iron-75c007375eb62e1c0be4b8b8eb17a0fe66880039/done/Iron/Language/SimpleData/Def.v"}
|
# -*- coding: utf-8 -*-
"""generate_attack_files.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1CDyCghmEMadl1NHbQvvXXFEsQHckKUtH
"""
# Commented out IPython magic to ensure Python compatibility.
# %cd /content/drive/MyDrive/attacks/
!ls
# load function taken from https://github.com/itaygal/RS_TrueReputation/
import re
import copy
import os
"""load rating .csv file and save results to given data structures
Args:
dataset_path: path to movielens 100k rating file
user_movie_ratings: dic of user to a dic of movie to a rating. user_movie_ratings[user_id][movie_id] = rating
movie_user_ratings: dic of movie to a dic of user to a rating. movie_user_ratings[movie_id][user_id] = rating
movies: set of all movie names
Returns:
None.
"""
def load(dataset_path, user_movie_ratings, movie_user_ratings, movies):
# user id | item id | rating | timestamp
rating_match = re.compile("\D*(\d+)\D*(\d+)\D*(\d+)\D*(\d+)")
with open(dataset_path, 'r') as dataset_file:
for rating_line in dataset_file:
m = rating_match.match(rating_line)
if m:
user_id = m.group(1)
movie_id = m.group(2)
rating = m.group(3)
timestamp = m.group(4)
if user_id not in user_movie_ratings:
user_movie_ratings[user_id] = {}
user_movie_ratings[user_id][movie_id] = (int(rating), int(timestamp))
if movie_id not in movie_user_ratings:
movie_user_ratings[movie_id] = {}
movies.add(movie_id)
movie_user_ratings[movie_id][user_id] = (int(rating), int(timestamp))
user_movie_ratings = {} # dic of user to a dic of movie to a rating. user_movie_ratings[user_id][movie_id] = rating
movie_user_ratings = {} # dic of movie to a dic of user to a rating. movie_user_ratings[movie_id][user_id] = rating
movies = set() # set of all movie names
# load rating .csv file into data structures
load("/content/drive/MyDrive/attacks/ml-100k/u.data", user_movie_ratings, movie_user_ratings, movies)
# load item information .csv release year into movie release year
from tqdm import tqdm
Movies={}
for m in tqdm(movie_user_ratings):
Movies[m]=[]
Movies[m].append(len(movie_user_ratings[m]))
avg_rating=0
for i in movie_user_ratings[m]:
avg_rating+=movie_user_ratings[m][i][0]
Movies[m].append(avg_rating/len(movie_user_ratings[m]))
from collections import defaultdict
import numpy as np
import random
userProfile=defaultdict(dict)
itemProfile = defaultdict(dict)
timeProfile= defaultdict(dict)
random_time=[]
for user in user_movie_ratings:
for item in user_movie_ratings[user]:
userProfile[int(user)][int(item)]=int(user_movie_ratings[user][item][0])
itemProfile[int(item)][int(user)]=int(user_movie_ratings[user][item][0])
timeProfile[int(user)][int(item)]=int(user_movie_ratings[user][item][1])
random_time.append(int(user_movie_ratings[user][item][1]))
# attack functions are modified from https://github.com/Coder-Yu/SDLib
############################################### config ###################################
outputDir = "/content/drive/MyDrive/attack_datasets/Movielens1M/bandwagon/"
attackSize = 0.05
fillerSize = 0.05
selectedSize = 0.005
targetCount = 100
targetScore = 4.0
threshold = 3.0
maxScore = 4.0
minScore = 1.0
minCount = 50
maxCount = 200
linkSize = 0.001
itemList = []
spamProfile = defaultdict(dict)
spamItem = defaultdict(list) # items rated by spammers
spamTimeProfile = defaultdict(dict)
targetItems = []
itemAverage = {}
startUserID = 0
def getAverageRating():
for itemID in itemProfile:
li = itemProfile[itemID].values()
itemAverage[itemID] = float(sum(li)) / len(li)
def selectTarget():
print('Selecting target items...')
for i in itemProfile.keys():
itemList.append(i)
itemList.sort()
while len(targetItems) < targetCount:
# generate a target order at random
target = np.random.randint(len(itemList))
if len(itemProfile[itemList[target]]) < maxCount and len(itemProfile[itemList[target]]) > minCount \
and itemList[target] not in targetItems \
and itemAverage[itemList[target]] <= threshold:
targetItems.append(itemList[target])
#print(itemList[target], ' ', itemAverage[itemList[target]])
############################################### config ###################################
def generateLabels(filename):
labels = []
path = outputDir + filename
with open(path, 'w') as f:
for user in spamProfile:
labels.append(str(user)+' 1\n')
for user in userProfile:
labels.append(str(user)+' 0\n')
f.writelines(labels)
print('User profiles have been output')
def generateProfiles(filename):
ratings = []
path = outputDir+filename
with open(path, 'w') as f:
for user in userProfile:
for item in userProfile[user]:
ratings.append(str(user)+' '+str(item)+' ' +
str(userProfile[user][item])+' '+str(timeProfile[user][item])+'\n')
for user in spamProfile:
for item in spamProfile[user]:
ratings.append(str(user) + ' ' + str(item) + ' ' +
str(spamProfile[user][item])+' '+str(spamTimeProfile[user][item])+'\n')
print(len(spamProfile))
f.writelines(ratings)
print('User labels have been output')
############################################## average attack ##########################################
def average_attack(startID=0):
print('Modeling average attack...')
startUserID = len(userProfile) if startID == 0 else startID
for _ in range(int(len(userProfile)*attackSize)):
fillerItems = getFillerItems()
for item in fillerItems:
spamProfile[startUserID][itemList[item]] = round(itemAverage[itemList[item]])
spamTimeProfile[startUserID][itemList[item]]= random.sample(random_time,1)[0] # random time assigned
for _ in range(targetCount):
target = np.random.randint(len(targetItems))
spamProfile[startUserID][targetItems[target]] = targetScore
spamTimeProfile[startUserID][targetItems[target]]= random.sample(random_time,1)[0] # random time assigned
spamItem[startUserID].append(targetItems[target])
startUserID += 1
print(f"userid={startUserID}")
def getFillerItems():
mu = int(fillerSize*len(itemProfile))
sigma = int(0.1*mu)
markedItemsCount = abs(int(round(random.gauss(mu, sigma))))
markedItems = np.random.randint(len(itemProfile), size=markedItemsCount)
return markedItems.tolist()
############################################## average attack ##########################################
outputDir = "/content/drive/MyDrive/attack_datasets/Movielens100K/average/"
for i in [0.1,0.15,0.2,0.25]:
attackSize = i
fillerSize = 0.05
if i>0.15:
fillerSize = 0.1
selectedSize = 0.005
targetCount = 100
targetScore = 4.0
threshold = 3.0
maxScore = 4.0
minScore = 1.0
minCount = 5
maxCount = 150
linkSize = 0.001
itemList = []
spamProfile = defaultdict(dict)
spamItem = defaultdict(list) # items rated by spammers
spamTimeProfile = defaultdict(dict)
targetItems = []
itemAverage = {}
startUserID = 0
getAverageRating()
selectTarget()
average_attack()
#attack.farmLink()
generateLabels(f'labels_{i*10}.txt')
generateProfiles(f'profiles_{i*10}.txt')
import pandas as pd
names = ['user_id', 'item_id', 'rating', 'timestamp']
a=pd.read_csv("/content/drive/MyDrive/attack_datasets/Movielens1M/bandwagon_profiles.txt",delim_whitespace=True,names=names)
a
############################################## bandwagon attack ##########################################
hotItems = sorted(itemProfile.items(), key=lambda d: len(d[1]), reverse=True)[
: int(selectedSize * len(itemProfile))
]
def bandwagon_attack(startID=0):
print("Modeling bandwagon attack...")
startUserID = len(userProfile) if startID == 0 else startID
for _ in range(int(len(userProfile) * attackSize)):
fillerItems = getFillerItems()
for item in fillerItems:
spamProfile[startUserID][itemList[item]] = random.randint(
minScore, maxScore
)
spamTimeProfile[startUserID][itemList[item]]= random.sample(random_time,1)[0] # random time assigned
selectedItems = getSelectedItems()
for item in selectedItems:
spamProfile[startUserID][item] = targetScore
spamTimeProfile[startUserID][item]= random.sample(random_time,1)[0] # random time assigned
for _ in range(targetCount):
target = np.random.randint(len(targetItems))
spamProfile[startUserID][targetItems[target]] = targetScore
spamTimeProfile[startUserID][targetItems[target]]= random.sample(random_time,1)[0] # random time assigned
spamItem[startUserID].append(targetItems[target])
startUserID += 1
print(f"userid={startUserID}")
def getFillerItems():
mu = int(fillerSize * len(itemProfile))
sigma = int(0.1 * mu)
markedItemsCount = int(round(random.gauss(mu, sigma)))
markedItemsCount = max(markedItemsCount, 0)
return np.random.randint(len(itemProfile), size=markedItemsCount)
def getSelectedItems():
mu = int(selectedSize * len(itemProfile))
sigma = int(0.1 * mu)
markedItemsCount = abs(int(round(random.gauss(mu, sigma))))
markedIndexes = np.random.randint(len(hotItems), size=markedItemsCount)
return [hotItems[index][0] for index in markedIndexes]
outputDir = "/content/drive/MyDrive/attack_datasets/Movielens100K/bandwagon/"
for i in [0.1,0.15,0.2,0.25]:
attackSize = i
fillerSize = 0.05
if i>0.15:
fillerSize = 0.1
selectedSize = 0.005
targetCount = 100
targetScore = 4.0
threshold = 3.0
maxScore = 4.0
minScore = 1.0
minCount = 5
maxCount = 150
linkSize = 0.001
itemList = []
spamProfile = defaultdict(dict)
spamItem = defaultdict(list) # items rated by spammers
spamTimeProfile = defaultdict(dict)
targetItems = []
itemAverage = {}
startUserID = 0
getAverageRating()
selectTarget()
bandwagon_attack()
#attack.farmLink()
generateLabels(f'bandwagon_labels_{i*10}.txt')
generateProfiles(f'bandwagon_profiles_{i*10}.txt')
############################################## random attack ##########################################
def random_attack(startID=0):
print('Modeling random attack...')
startUserID = len(userProfile) if startID == 0 else startID
for _ in range(int(len(userProfile)*attackSize)):
fillerItems = getFillerItems()
for item in fillerItems:
spamProfile[startUserID][itemList[item]
] = random.randint(minScore, maxScore)
spamTimeProfile[startUserID][itemList[item]]= random.sample(random_time,1)[0] # random time assigned
for _ in range(targetCount):
target = np.random.randint(len(targetItems))
spamProfile[startUserID][targetItems[target]] = targetScore
spamTimeProfile[startUserID][targetItems[target]]= random.sample(random_time,1)[0] # random time assigned
spamItem[startUserID].append(targetItems[target])
startUserID += 1
print(f"userid={startUserID}")
def getFillerItems():
mu = int(fillerSize*len(itemProfile))
sigma = int(0.1*mu)
markedItemsCount = abs(int(round(random.gauss(mu, sigma))))
markedItems = np.random.randint(len(itemProfile), size=markedItemsCount)
return markedItems.tolist()
############################################## random attack ##########################################
outputDir = "/content/drive/MyDrive/attack_datasets/Movielens100K/random/"
for i in [0.1,0.15,0.2,0.25]:
attackSize = i
fillerSize = 0.05
if i>0.15:
fillerSize = 0.1
selectedSize = 0.005
targetCount = 100
targetScore = 4.0
threshold = 3.0
maxScore = 4.0
minScore = 1.0
minCount = 5
maxCount = 150
linkSize = 0.001
itemList = []
spamProfile = defaultdict(dict)
spamItem = defaultdict(list) # items rated by spammers
spamTimeProfile = defaultdict(dict)
targetItems = []
itemAverage = {}
startUserID = 0
getAverageRating()
selectTarget()
random_attack()
#attack.farmLink()
generateLabels(f'random_labels_{i*10}.txt')
generateProfiles(f'random_profiles_{i*10}.txt')
names = ['user_id', 'item_id', 'rating', 'timestamp']
ratings=pd.read_csv("/content/drive/MyDrive/attack_datasets/Netflix300K/random/random_profiles_1.0.txt",delim_whitespace=True,names=names)
names1=['user_id','label']
labels=pd.read_csv("/content/drive/MyDrive/attack_datasets/Netflix300K/random/random_labels_1.0.txt",delim_whitespace=True,names=names1)
|
{"hexsha": "5fd5b95dc55cdd354765c1cba41fc59ead706757", "size": 13008, "ext": "py", "lang": "Python", "max_stars_repo_path": "generate_attack_files.py", "max_stars_repo_name": "sert121/shilling_a_and_d", "max_stars_repo_head_hexsha": "48ab1f2e48c1e13f3b19ab897b3372638d9d4eb3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "generate_attack_files.py", "max_issues_repo_name": "sert121/shilling_a_and_d", "max_issues_repo_head_hexsha": "48ab1f2e48c1e13f3b19ab897b3372638d9d4eb3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "generate_attack_files.py", "max_forks_repo_name": "sert121/shilling_a_and_d", "max_forks_repo_head_hexsha": "48ab1f2e48c1e13f3b19ab897b3372638d9d4eb3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-15T07:22:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-15T07:22:44.000Z", "avg_line_length": 36.4369747899, "max_line_length": 138, "alphanum_fraction": 0.6470633456, "include": true, "reason": "import numpy", "num_tokens": 3215}
|
module class_Stack
type :: link
integer :: i
type (link), pointer :: previous
end type link
type, public :: StackIterator
integer, private :: index
type(link), pointer, private :: current
contains
procedure :: create => stack_iterator_create
procedure :: next => stack_iterator_next
procedure :: has_next => stack_iterator_has_next
procedure :: get_index => stack_iterator_get_index
end type StackIterator
type, public :: Stack
type (link), private, pointer :: current
integer :: length
contains
procedure :: init => stack_init
procedure :: push => stack_push
procedure :: pop => stack_pop
procedure :: dealloc => stack_dealloc
procedure :: has_items => stack_has_items
procedure :: peek => stack_peek
procedure :: get_head => stack_get_head
end type Stack
contains
subroutine stack_iterator_create(self, in_stack)
Class(StackIterator), intent(inout) :: self
Class(Stack), intent(inout) :: in_stack
self%current => in_stack%get_head()
self%index = 0
end subroutine stack_iterator_create
function stack_iterator_next(self) result(out_vertex_label)
Class(StackIterator), intent(inout) :: self
integer :: out_vertex_label
out_vertex_label = self%current%i
self%current => self%current%previous
self%index = self%index + 1
end function stack_iterator_next
function stack_iterator_has_next(self) result(out_bool)
Class(StackIterator), intent(inout) :: self
logical :: out_bool
out_bool = associated(self%current%previous)
end function stack_iterator_has_next
function stack_iterator_get_index(self) result(current_index)
Class(StackIterator), intent(inout) :: self
integer :: current_index
current_index = self%index
end function stack_iterator_get_index
function stack_get_head(self) result(out_pointer)
Class(Stack), intent(inout) :: self
type (link), pointer :: out_pointer
out_pointer => self%current
end function stack_get_head
subroutine stack_init(self)
Class(Stack), intent(inout) :: self
nullify(self%current)
self%length = 0
end subroutine stack_init
subroutine stack_dealloc(self)
Class(Stack), intent(inout) :: self
integer :: temp_datum
do while(self%length > 0)
temp_datum = self%pop()
enddo
end subroutine
subroutine stack_push(self, datum)
Class(Stack), intent(inout) :: self
integer, intent(in) :: datum
type(link), pointer :: new_leader, temp_link
self%length = self%length + 1 ! increment length
allocate(new_leader) ! allocate a new leader link
temp_link => self%current ! save the current pointer as a temp pointer
self%current => new_leader ! set the current pointer as the new leader
self%current%i = datum ! set current to new datum
self%current%previous => temp_link ! restore the old current as the now previous link
end subroutine stack_push
function stack_pop(self) result(out_datum)
use utility
Class(Stack), intent(inout) :: self
type(link), pointer :: temp_link
character(len=31) :: message
integer :: out_datum
out_datum = 0
if(self%length > 0) then
out_datum = self%current%i ! output the datum from the current link
temp_link => self%current ! save the current link as temp_link
self%current => self%current%previous ! set the current link to the previous
deallocate(temp_link) ! deallocate the old leader
self%length = self%length - 1 ! decrement the length
else
message = "Tried to pop empty stack!" ! throw an error if empty
call qstderr(message)
stop -1 ! die
end if
end function stack_pop
function stack_has_items(self) result(peek_bool)
Class(Stack), intent(inout) :: self
logical :: peek_bool
peek_bool = .false.
if( self%length > 0 ) then
peek_bool = .true.
endif
end function stack_has_items
function stack_peek(self) result(out_datum)
Class(Stack), intent(inout) :: self
integer :: out_datum
out_datum = self%current%i
end function stack_peek
end module class_Stack
|
{"hexsha": "61825ec248d8e888e8248cd09cf4a56712af8c35", "size": 4145, "ext": "f08", "lang": "FORTRAN", "max_stars_repo_path": "src/Stack.f08", "max_stars_repo_name": "ironmerchant/dfs_cycles", "max_stars_repo_head_hexsha": "ff2245934fab82f24d8d309d210d37f90f733c58", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Stack.f08", "max_issues_repo_name": "ironmerchant/dfs_cycles", "max_issues_repo_head_hexsha": "ff2245934fab82f24d8d309d210d37f90f733c58", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Stack.f08", "max_forks_repo_name": "ironmerchant/dfs_cycles", "max_forks_repo_head_hexsha": "ff2245934fab82f24d8d309d210d37f90f733c58", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6377952756, "max_line_length": 89, "alphanum_fraction": 0.6984318456, "num_tokens": 1020}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: Ampel-contrib-HU/ampel/contrib/hu/t0/XShooterFilter.py
# License: BSD-3-Clause
# Author: m. giomi <matteo.giomi@desy.de>
# Date: 28.08.2018
# Last Modified Date: 24.11.2021
# Last Modified By: jnordin
from typing import Optional
from numpy import array
from ampel.protocol.AmpelAlertProtocol import AmpelAlertProtocol
from ampel.ztf.t0.DecentFilter import DecentFilter
class XShooterFilter(DecentFilter):
"""
Filter derived from the DecentFilter, in addition selecting very new
transients which are visible from the South. In particular, the transient
are accepted if they are detected during the last 6h, at least one non-detection
during the last 5 days (and no detection during this time).
"""
max_dec: float # maximum allowed value for the declination
det_within: float # the transient must have been detected within the last 'DET_WITHIN' days
ul_within: float # the transient must AT LEAST one ulim within the last 'UL_WITHIN' days
# Updated parameters based on infant detections spring 2021. Defaults conservative
max_chipsf: float = 4 # Best guess value 2
max_seeratio: float = 2 # Best guess value 1.3
min_sumrat: float = 0.6 # Best guess value 0.8
def post_init(self):
super().post_init()
self.keys_to_check += ("jd",)
self.select_upper_limits = [{'attribute': 'magpsf', 'operator': 'is', 'value': None}]
self.select_photopoints = [{'attribute': 'magpsf', 'operator': 'is not', 'value': None}]
# Override
def process(self, alert: AmpelAlertProtocol) -> None | bool | int:
"""
run the decent filter on the alert
"""
# cut on declination
latest = alert.datapoints[0]
if latest["dec"] > self.max_dec:
self.logger.debug(
f"Rejected: declination {latest['dec']:.2f} deg is "
f"above maximum allowed of {self.max_dec:.2f} deg"
)
return None
# CUT ON LATEST SUBTRACTION QUALITY
#################################
if latest["chipsf"] > self.max_chipsf:
self.logger.debug(
f"Rejected: chipsf {latest['chipsf']:.2f} "
f"above maximum allowed of {self.max_chipsf:.2f}"
)
return None
if latest["seeratio"] > self.max_seeratio:
self.logger.debug(
f"Rejected: seeratio {latest['seeratio']:.2f} "
f"above maximum allowed of {self.max_seeratio:.2f}"
)
return None
if latest["sumrat"] < self.min_sumrat:
self.logger.debug(
f"Rejected: sumrat {latest['sumrat']:.2f} "
f"below min allowed of {self.min_sumrat:.2f}"
)
return None
# CUT ON THE HISTORY OF THE ALERT
#################################
now_jd = latest["jd"]
self.logger.debug(f"Setting 'now' to JD {now_jd:.4f} to cut on alert history")
# check on history 1: detected in the last 6h
detection_jds = array(alert.get_values("jd", filters=self.select_photopoints))
recent_detections = detection_jds > (now_jd - self.det_within)
if not any(recent_detections):
self.logger.debug(
f"Rejected: no detection within the last {self.det_within:.3f} days "
f"(latest one {(now_jd - max(detection_jds)):.3f} days ago)"
)
return None
# check on the history 2: at least one upper limit in the last 5 days
ulim_jds = alert.get_values("jd", filters=self.select_upper_limits)
if ulim_jds is None:
self.logger.debug("Rejected: this alert has no upper limits")
return None
if not any(array(ulim_jds) > (now_jd - self.ul_within)):
self.logger.debug(
f"Rejected: no upper limit in the last {self.ul_within:.3f} days"
)
return None
# check on history 3: no detection within the last 5 days
not_so_recent_detections = detection_jds[~recent_detections]
if any(not_so_recent_detections > (now_jd - self.ul_within)):
self.logger.debug(
f"Rejected: at least one detection within the last {self.ul_within:.3f} days "
f"(but not within {self.det_within:.3f} days)."
)
return None
# now apply the DecentFilter
return super().process(alert)
|
{"hexsha": "e8b760f7e6323bfb4f58f8cce1f844bc6b5c59bf", "size": 4730, "ext": "py", "lang": "Python", "max_stars_repo_path": "ampel/contrib/hu/t0/XShooterFilter.py", "max_stars_repo_name": "mafn/Ampel-HU-astro", "max_stars_repo_head_hexsha": "93cf14874439c1f5d44622407fceff69eef7af2e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ampel/contrib/hu/t0/XShooterFilter.py", "max_issues_repo_name": "mafn/Ampel-HU-astro", "max_issues_repo_head_hexsha": "93cf14874439c1f5d44622407fceff69eef7af2e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-03-07T07:11:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-07T07:11:59.000Z", "max_forks_repo_path": "ampel/contrib/hu/t0/XShooterFilter.py", "max_forks_repo_name": "mafn/Ampel-HU-astro", "max_forks_repo_head_hexsha": "93cf14874439c1f5d44622407fceff69eef7af2e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-01-26T13:49:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-28T12:32:05.000Z", "avg_line_length": 41.1304347826, "max_line_length": 97, "alphanum_fraction": 0.5797040169, "include": true, "reason": "from numpy", "num_tokens": 1159}
|
(* Title: Environments.thy
Author: Florian Kammuller and Henry Sudhof, 2008
*)
theory Environments imports Main begin
subsection {* Type Environments*}
text{*Some basic properties of our variable environments.*}
(* We use a wrapped map and an error element *)
datatype 'a environment =
Env "(string ~=> 'a)"
| Malformed
(* Adding an entry to an environment. Overwriting an entry switches to the error state*)
primrec
add :: "('a environment) \<Rightarrow> string \<Rightarrow> 'a \<Rightarrow> 'a environment"
("_<_:_>" [90, 50, 0] 91)
where
add_def: "(Env e)<x:a> =
(if (x \<notin> dom e) then (Env (e(x \<mapsto> a))) else Malformed)"
| add_mal: "Malformed<x:a> = Malformed"
notation (xsymbols)
add ("_\<lparr>_:_\<rparr>" [90, 0, 0] 91)
(* domains of environments, i.e. the set of used variable names *)
primrec
env_dom :: "('a environment) \<Rightarrow> string set"
where
env_dom_def: "env_dom (Env e) = dom e"
| env_dom_mal: "env_dom (Malformed) = {}"
(* Retrieving an entry from an environment *)
primrec
env_get :: "('a environment) \<Rightarrow> string \<Rightarrow> 'a option" ("_!_")
where
env_get_def: "env_get (Env e) x = e x "
| env_get_mal: "env_get (Malformed) x = None "
(* Environment well-formedness. For now weaker than usually recommended for LN; just finiteness and not being the error value *)
primrec ok::"('a environment) \<Rightarrow> bool"
where
OK_Env [intro]: "ok (Env e) = (finite (dom e))"
| OK_Mal [intro]: "ok Malformed = False"
(* commutativity of add *)
lemma subst_add:
fixes x y
assumes "x \<noteq> y"
shows "e\<lparr>x:a\<rparr>\<lparr>y:b\<rparr> = e\<lparr>y:b\<rparr>\<lparr>x:a\<rparr>"
proof (cases e)
case Malformed thus ?thesis by simp
next
case (Env f) with assms show ?thesis
proof (cases "x \<in> dom f", simp)
case False with assms Env show ?thesis
proof (cases "y \<in> dom f", simp_all, intro ext)
fix xa :: string
case False with assms show "(f(x \<mapsto> a,y \<mapsto> b)) xa = (f(y \<mapsto> b,x \<mapsto> a)) xa"
proof (cases "xa = x", simp)
case False with assms show ?thesis
by (cases "xa = y", simp_all)
qed
qed
qed
qed
(* A well-formed environment is finite *)
lemma ok_finite[simp]: "ok e \<Longrightarrow> finite (env_dom e)"
by (cases e, simp+)
(* A well-formed environment is not malformed *)
lemma ok_ok[simp]: "ok e \<Longrightarrow> \<exists>x. e = (Env x)"
by (cases e, simp+)
(* If something is in the set of variable names, then it has a value assigned to it *)
lemma env_defined:
fixes x :: string and e :: "'a environment"
assumes "x \<in> env_dom e"
shows "\<exists>T . e!x = Some T"
proof (cases e)
case Malformed with assms show ?thesis by simp (* contradiction *)
next
case Env with assms show ?thesis by (simp, force)
qed
(* adding of new elements does not remove elements *)
lemma env_bigger: "\<lbrakk> a \<notin> env_dom e; x \<in> (env_dom e) \<rbrakk> \<Longrightarrow> x \<in> env_dom (e\<lparr>a:X\<rparr>)"
by (cases e, simp_all)
(* Added for convenience *)
lemma env_bigger2:
"\<lbrakk> a \<notin> env_dom e; b \<notin> (env_dom e); x \<in> (env_dom e); a \<noteq> b \<rbrakk>
\<Longrightarrow> x \<in> env_dom (e\<lparr>a:X\<rparr>\<lparr>b:Y\<rparr>)"
by (cases e, simp_all)
(* If there is an entry, then the environment is sane. *)
lemma not_malformed: "x \<in> (env_dom e) \<Longrightarrow> \<exists>fun. e = Env fun"
by (cases e, simp_all)
(* Smaller environments are well formed. *)
lemma not_malformed_smaller:
fixes e :: "'a environment" and a :: string and X :: 'a
assumes "ok (e\<lparr>a:X\<rparr>)"
shows "ok e"
proof (cases e)
case Malformed with assms show ?thesis by simp (* contradiction *)
next
case (Env f) with ok_finite[OF assms] assms show ?thesis
by (cases "a \<notin> dom f", simp_all)
qed
(* Elements not in a bigger environment are not in a smaller one either *)
lemma not_in_smaller:
fixes e :: "'a environment" and a :: string and X :: 'a
assumes "ok (e\<lparr>a:X\<rparr>)"
shows "a \<notin> env_dom e"
proof (cases e)
case Malformed thus ?thesis by simp
next
case (Env f) with assms show ?thesis
by (cases "a \<notin> dom f", simp_all)
qed
(* A variable that got added is in the environment *)
lemma in_add:
fixes e :: "'a environment" and a :: string and X :: 'a
assumes "ok (e\<lparr>a:X\<rparr>)"
shows "a \<in> env_dom (e\<lparr>a:X\<rparr>)"
proof (cases e)
case Malformed with assms show ?thesis by simp (* contradiction *)
next
case (Env f) with assms show ?thesis
by (cases "a \<notin> dom f", simp_all)
qed
(* Similar to subst_add, but using a more convenient premise *)
lemma ok_add_reverse:
fixes
e :: "'a environment" and a :: string and X :: 'a and
b :: string and Y :: 'a
assumes "ok (e\<lparr>a:X\<rparr>\<lparr>b:Y\<rparr>)"
shows "(e\<lparr>b:Y\<rparr>\<lparr>a:X\<rparr>) = (e\<lparr>a:X\<rparr>\<lparr>b:Y\<rparr>)"
proof (cases e)
case Malformed with assms show ?thesis by simp (* contradiction *)
next
case (Env f)
with
not_in_smaller[OF `ok (e\<lparr>a:X\<rparr>\<lparr>b:Y\<rparr>)`] in_add[OF assms]
not_in_smaller[OF not_malformed_smaller[OF assms]]
in_add[OF not_malformed_smaller[OF assms]]
show ?thesis
by (simp, intro conjI impI, elim conjE, auto simp: fun_upd_twist)
qed
lemma not_in_env_bigger:
fixes e :: "'a environment" and a :: string and X :: 'a and x :: string
assumes "x \<notin> (env_dom e)" and "x \<noteq> a"
shows "x \<notin> env_dom (e\<lparr>a:X\<rparr>)"
proof (cases e)
case Malformed thus ?thesis by simp
next
case (Env f) with assms show ?thesis
by (cases "a \<notin> dom f", simp_all)
qed
lemma not_in_env_bigger_2:
fixes
e :: "'a environment" and a :: string and X :: 'a and
b :: string and Y :: 'a and x :: string
assumes "x \<notin> (env_dom e)" and "x \<noteq> a" and "x \<noteq> b"
shows "x \<notin> env_dom (e\<lparr>a:X\<rparr>\<lparr>b:Y\<rparr>)"
proof (cases e)
case Malformed thus ?thesis by simp
next
case (Env f) with assms show ?thesis
by (cases "a \<notin> dom f", simp_all)
qed
lemma not_in_env_smaller:
fixes e :: "'a environment" and a :: string and X :: 'a and x :: string
assumes "x \<notin> (env_dom (e\<lparr>a:X\<rparr>))" and "x \<noteq> a" and "ok (e\<lparr>a:X\<rparr>)"
shows "x \<notin> env_dom e"
proof (cases e)
case Malformed with assms(3) show ?thesis by simp (* contradiction *)
next
case (Env f) with assms show ?thesis
by (cases "a \<notin> dom f", simp_all)
qed
(* Conditions derivable from the well-formedness *)
lemma ok_add_2:
fixes
e :: "'a environment" and a :: string and X :: 'a and
b :: string and Y :: 'a
assumes "ok (e\<lparr>a:X\<rparr>\<lparr>b:Y\<rparr>)"
shows "ok e \<and> a \<notin> env_dom e \<and> b \<notin> env_dom e \<and> a \<noteq> b"
proof -
{
assume "ok (e\<lparr>b:X\<rparr>\<lparr>b:Y\<rparr>)"
from not_in_smaller[OF this] in_add[OF not_malformed_smaller[OF this]]
have False by simp
} with assms have "a \<noteq> b" by auto
moreover
from assms ok_add_reverse[OF assms] have "ok (e\<lparr>b:Y\<rparr>\<lparr>a:X\<rparr>)" by simp
note not_in_smaller[OF not_malformed_smaller[OF this]]
ultimately
show ?thesis
using
not_malformed_smaller[OF not_malformed_smaller[OF assms]]
not_in_smaller[OF not_malformed_smaller[OF assms]]
by simp
qed
(* A variable that got added is in the environment *)
lemma in_add_2:
fixes
e :: "'a environment" and a :: string and X :: 'a and
b :: string and Y :: 'a
assumes "ok (e\<lparr>a:X\<rparr>\<lparr>b:Y\<rparr>)"
shows "a \<in> env_dom (e\<lparr>a:X\<rparr>\<lparr>b:Y\<rparr>) \<and> b \<in> env_dom (e\<lparr>a:X\<rparr>\<lparr>b:Y\<rparr>)"
proof -
from ok_add_2[OF assms] show ?thesis
by (elim conjE, intro conjI, (cases e, simp_all)+)
qed
(* Convenience version *)
lemma ok_add_3:
fixes
e :: "'a environment" and a :: string and X :: 'a and
b :: string and Y :: 'a and c :: string and Z :: 'a
assumes "ok (e\<lparr>a:X\<rparr>\<lparr>b:Y\<rparr>\<lparr>c:Z\<rparr>)"
shows
"a \<notin> env_dom e \<and> b \<notin> env_dom e \<and> c \<notin> env_dom e \<and> a \<noteq> b \<and> b \<noteq> c \<and> a \<noteq> c"
proof -
{
assume "ok (e\<lparr>a:X\<rparr>\<lparr>c:Y\<rparr>\<lparr>c:Z\<rparr>)"
from not_in_smaller[OF this] in_add[OF not_malformed_smaller[OF this]]
have False by simp
} with assms have "b \<noteq> c" by auto
moreover
from assms ok_add_reverse[OF assms] have "ok (e\<lparr>a:X\<rparr>\<lparr>c:Z\<rparr>\<lparr>b:Y\<rparr>)" by simp
note ok_add_2[OF not_malformed_smaller[OF this]]
ultimately
show ?thesis using ok_add_2[OF not_malformed_smaller[OF assms]]
by simp
qed
lemma in_env_smaller:
fixes e :: "'a environment" and a :: string and X :: 'a and x :: string
assumes "x \<in> (env_dom (e\<lparr>a:X\<rparr>))" and "x \<noteq> a"
shows "x \<in> env_dom e"
proof -
from not_malformed[OF assms(1)] obtain f where f: "e\<lparr>a:X\<rparr> = Env f" by auto
with assms show ?thesis
proof (cases e)
case Malformed with `e\<lparr>a:X\<rparr> = Env f`
have False by simp
then show ?thesis ..
next
case (Env f') with assms f show ?thesis
by (simp, cases "a \<in> dom f'", simp_all, force)
qed
qed
lemma in_env_smaller2:
fixes
e :: "'a environment" and a :: string and X :: 'a and
b :: string and Y :: 'a and x :: string
assumes "x \<in> (env_dom (e\<lparr>a:X\<rparr>\<lparr>b:Y\<rparr>))" and "x \<noteq> a" and "x \<noteq> b"
shows "x \<in> env_dom e"
by (simp add: in_env_smaller[OF in_env_smaller[OF assms(1) assms(3)] assms(2)])
lemma get_env_bigger:
fixes e :: "'a environment" and a :: string and X :: 'a and x :: string
assumes "x \<in> (env_dom (e\<lparr>a:X\<rparr>))" and "x \<noteq> a"
shows "e!x = e\<lparr>a:X\<rparr>!x"
proof -
from not_malformed[OF assms(1)] obtain f where f: "e\<lparr>a:X\<rparr> = Env f" by auto
thus ?thesis proof (cases e)
case Malformed with `e\<lparr>a:X\<rparr> = Env f`
show ?thesis by simp (* contradiction *)
next
case (Env f') with assms f show ?thesis
by (cases "a \<notin> dom f'", auto)
qed
qed
lemma get_env_bigger2:
fixes
e :: "'a environment" and a :: string and X :: 'a and
b :: string and Y :: 'a and x :: string
assumes "x \<in> (env_dom (e\<lparr>a:X\<rparr>\<lparr>b:Y\<rparr>))" and "x \<noteq> a" and "x \<noteq> b"
shows "e!x = e\<lparr>a:X\<rparr>\<lparr>b:Y\<rparr>!x"
by (simp add: get_env_bigger[OF assms(1) assms(3)]
get_env_bigger[OF in_env_smaller[OF assms(1) assms(3)] assms(2)])
lemma get_env_smaller: "\<lbrakk> x \<in> env_dom e; a \<notin> env_dom e \<rbrakk> \<Longrightarrow> e\<lparr>a:X\<rparr>!x = e!x"
by (cases e, auto)
lemma get_env_smaller2:
"\<lbrakk> x \<in> env_dom e; a \<notin> env_dom e; b \<notin> env_dom e; a \<noteq> b \<rbrakk>
\<Longrightarrow> e\<lparr>a:X\<rparr>\<lparr>b:Y\<rparr>!x = e!x"
by (cases e, auto)
lemma add_get_eq: "\<lbrakk> xa \<notin> env_dom e; ok e; the e\<lparr>xa:U\<rparr>!xa = T \<rbrakk> \<Longrightarrow> U = T"
by (cases e, auto)
lemma add_get: "\<lbrakk> xa \<notin> env_dom e; ok e \<rbrakk> \<Longrightarrow> the e\<lparr>xa:U\<rparr>!xa = U"
by (cases e, auto)
lemma add_get2_1:
fixes e :: "'a environment" and x :: string and A :: 'a and y :: string and B :: 'a
assumes "ok (e\<lparr>x:A\<rparr>\<lparr>y:B\<rparr>)"
shows "the e\<lparr>x:A\<rparr>\<lparr>y:B\<rparr>!x = A"
proof -
from ok_add_2[OF assms] show ?thesis
by (cases e, elim conjE, simp_all)
qed
lemma add_get2_2:
fixes e :: "'a environment" and x :: string and A :: 'a and y :: string and B :: 'a
assumes "ok (e\<lparr>x:A\<rparr>\<lparr>y:B\<rparr>)"
shows "the e\<lparr>x:A\<rparr>\<lparr>y:B\<rparr>!y = B"
proof -
from ok_add_2[OF assms] show ?thesis
by (cases e, elim conjE, simp_all)
qed
lemma ok_add_ok: "\<lbrakk> ok e; x \<notin> env_dom e \<rbrakk> \<Longrightarrow> ok (e\<lparr>x:X\<rparr>)"
by (cases e, auto)
lemma env_add_dom:
fixes e :: "'a environment" and x :: string
assumes "ok e" and "x \<notin> env_dom e"
shows "env_dom (e\<lparr>x:X\<rparr>) = env_dom e \<union> {x}"
proof (auto simp: in_add[OF ok_add_ok[OF assms]], rule ccontr)
fix y assume "y \<in> env_dom (e<x:X>)" and "y \<notin> env_dom e" and "y \<noteq> x"
from in_env_smaller[OF this(1) this(3)] this(2) show False by simp
next
fix y assume "y \<in> env_dom e"
from env_bigger[OF not_in_smaller[OF ok_add_ok[OF assms]] this]
show "y \<in> env_dom (e\<lparr>x:X\<rparr>)" by assumption
qed
lemma env_add_dom_2:
fixes e :: "'a environment" and x :: string and y :: string
assumes "ok e" and "x \<notin> env_dom e" and "y \<notin> env_dom e" and "x \<noteq> y"
shows "env_dom (e\<lparr>x:X\<rparr>\<lparr>y:Y\<rparr>) = env_dom e \<union> {x,y}"
proof -
from env_add_dom[OF assms(1-2)] assms(3-4)
have "y \<notin> env_dom (e\<lparr>x:X\<rparr>)" by simp
from
env_add_dom[OF assms(1-2)]
env_add_dom[OF ok_add_ok[OF assms(1-2)] this]
show ?thesis by auto
qed
fun
env_app :: "('a environment) \<Rightarrow> ('a environment) \<Rightarrow> ('a environment)" ("_+_")
where
"env_app (Env a) (Env b) =
(if (ok (Env a) \<and> ok (Env b) \<and> env_dom (Env b) \<inter> env_dom (Env a) = {})
then Env (a ++ b) else Malformed )"
lemma env_app_dom:
fixes e1 :: "'a environment" and e2 :: "'a environment"
assumes "ok e1" and "env_dom e1 \<inter> env_dom e2 = {}" and "ok e2"
shows "env_dom (e1+e2) = env_dom e1 \<union> env_dom e2"
proof -
from ok_ok[OF `ok e1`] ok_ok[OF `ok e2`]
obtain f1 f2 where "e1 = Env f1" and "e2 = Env f2" by auto
with assms(2) ok_finite[OF `ok e1`] ok_finite[OF `ok e2`]
show ?thesis by auto
qed
lemma env_app_same[simp]:
fixes e1 :: "'a environment" and e2 :: "'a environment" and x :: string
assumes
"ok e1" and "x \<in> env_dom e1" and
"env_dom e1 \<inter> env_dom e2 = {}" and "ok e2"
shows "the (e1+e2!x) = the e1!x"
proof -
from ok_ok[OF `ok e1`] ok_ok[OF `ok e2`]
obtain f1 f2 where "e1 = Env f1" and "e2 = Env f2" by auto
with assms(2-3) ok_finite[OF `ok e1`] ok_finite[OF `ok e2`]
show ?thesis proof (auto)
fix y :: 'a assume "dom f1 \<inter> dom f2 = {}" and "f1 x = Some y"
from map_add_comm[OF this(1)] this(2) have "(f1 ++ f2) x = Some y"
by (simp add: map_add_Some_iff)
thus "the ((f1 ++ f2) x) = y" by auto
qed
qed
lemma env_app_ok[simp]:
fixes e1 :: "'a environment" and e2 :: "'a environment"
assumes "ok e1" and "env_dom e1 \<inter> env_dom e2 = {}" and "ok e2"
shows "ok (e1+e2)"
proof -
from ok_ok[OF `ok e1`] ok_ok[OF `ok e2`]
obtain f1 f2 where "e1 = Env f1" and "e2 = Env f2" by auto
with assms show ?thesis by (simp,force)
qed
lemma env_app_add[simp]:
fixes e1 :: "'a environment" and e2 :: "'a environment" and x :: string
assumes
"ok e1" and "env_dom e1 \<inter> env_dom e2 = {}" and "ok e2" and
"x \<notin> env_dom e1" and "x \<notin> env_dom e2"
shows "(e1+e2)\<lparr>x:X\<rparr> = e1\<lparr>x:X\<rparr>+e2"
proof -
from ok_ok[OF `ok e1`] ok_ok[OF `ok e2`]
obtain f1 f2 where "e1 = Env f1" and "e2 = Env f2" by auto
with assms show ?thesis proof (clarify, simp, intro impI ext)
fix xa :: string
assume "x \<notin> dom f1" and "x \<notin> dom f2"
thus "((f1 ++ f2)(x \<mapsto> X)) xa = (f1(x \<mapsto> X) ++ f2) xa"
proof (cases "x = xa", simp_all)
case False thus "(f1 ++ f2) xa = (f1(x \<mapsto> X) ++ f2) xa"
by (simp add: map_add_def split: option.split)
next
case True with `x \<notin> dom f1` `x \<notin> dom f2`
have "(f1(xa \<mapsto> X) ++ f2) xa = Some X"
by (auto simp: map_add_Some_iff)
thus "Some X = (f1(xa \<mapsto> X) ++ f2) xa" by simp
qed
qed
qed
lemma env_app_add2[simp]:
fixes
e1 :: "'a environment" and e2 :: "'a environment" and
x :: string and y :: string
assumes
"ok e1" and "env_dom e1 \<inter> env_dom e2 = {}" and "ok e2" and
"x \<notin> env_dom e1" and "x \<notin> env_dom e2" and "y \<notin> env_dom e1" and
"y \<notin> env_dom e2" and "x \<noteq> y"
shows "(e1+e2)\<lparr>x:X\<rparr>\<lparr>y:Y\<rparr> = e1\<lparr>x:X\<rparr>\<lparr>y:Y\<rparr>+e2"
proof -
from ok_ok[OF `ok e1`] ok_ok[OF `ok e2`]
obtain f1 f2 where "e1 = Env f1" and "e2 = Env f2" by auto
with assms show ?thesis proof (clarify, simp, intro impI ext)
fix xa :: string
assume "x \<notin> dom f1" and "x \<notin> dom f2" and "y \<notin> dom f1" and "y \<notin> dom f2"
with `x \<noteq> y`
show "((f1 ++ f2)(x \<mapsto> X, y \<mapsto> Y)) xa = (f1(x \<mapsto> X, y \<mapsto> Y) ++ f2) xa"
proof (cases "x = xa", simp)
case True
with `x \<noteq> y` `x \<notin> dom f1` `x \<notin> dom f2` `y \<notin> dom f1` `y \<notin> dom f2`
have "(f1(xa \<mapsto> X,y \<mapsto> Y) ++ f2) xa = Some X"
by (auto simp: map_add_Some_iff)
thus "Some X = (f1(xa \<mapsto> X,y \<mapsto> Y) ++ f2) xa" by simp
next
case False thus ?thesis
proof (cases "y = xa", simp_all)
case False with `x \<noteq> xa`
show "(f1 ++ f2) xa = (f1(x \<mapsto> X,y \<mapsto> Y) ++ f2) xa"
by (simp add: map_add_def split: option.split)
next
case True
with `x \<noteq> y` `x \<notin> dom f1` `x \<notin> dom f2` `y \<notin> dom f1` `y \<notin> dom f2`
have "(f1(x \<mapsto> X,xa \<mapsto> Y) ++ f2) xa = Some Y"
by (auto simp: map_add_Some_iff)
thus "Some Y = (f1(x \<mapsto> X, xa \<mapsto> Y) ++ f2) xa" by simp
qed
qed
qed
qed
end
|
{"author": "Josh-Tilles", "repo": "AFP", "sha": "f4bf1d502bde2a3469d482b62c531f1c3af3e881", "save_path": "github-repos/isabelle/Josh-Tilles-AFP", "path": "github-repos/isabelle/Josh-Tilles-AFP/AFP-f4bf1d502bde2a3469d482b62c531f1c3af3e881/thys/Locally-Nameless-Sigma/preliminary/Environments.thy"}
|
Some local artists of renown include: (listed alphabetically)
Jed Alexander Artist and illustrator. You can see his work at http://jedalexander.com
http://www.natsoulas.com/html/artists/robertArneson/robertArneson.html Robert Arneson Former Art Department Art Faculty at UCD
http://www.verisimilitudo.com/arneson Robert Arneson tribute site
Conrad Atkinson
http://www.chilltree.com/ Emily Barranco Earlenbaugh (Chill Tree Jewelry) Custom Art and Jewelry by Donation.
http://www.cuteware.net Heidi Bekebrede Ceramicist, vocalist, The Artery Artery member since 1988.
Kris Bell Artist, portrait and figure painter, kristhebell.blogspot.com
Donna Billick Tile and ceramics
Jill Bowlus Artist/Art Educator, Ceramics http://www.atheartart.com
http://www.marietheresebrown.com MarieTherese Brown painter The Artery Artery
Deborah Butterfield (B.A. 1971, M.F.A. 1973)
http://www.jamesharrisgallery.com/Artists/Squeak%20Carnwrath/carnwrath.htm Squeak Carnwath Former Art Department Art Faculty at UCD (now at UC Berkeley UC Berkeley)
http://www.melissachandon.com/ Melissa Chandon
http://www.tvcarrera.com/ Tracy Villa Carrera, http://www.carnahanfineart.com/products.cfm?ArtistID6&CatID3 profile
http://www.lynnecunningham.com Lynne Cunningham painter, public art, instructor also here:Users/LynneCunningham Lynne Cunningham
Users/DianaJahns Diana Jahns painter and photographer
http://modernsculpture.com/deforest.php Roy DeForest Former Art Department Art Faculty at UCD
Users/TorreyaCummings Torreya Cummings Not famous, but damn good.
Users/JasonDayne Jason Dayne Jeeba Jewelry http://www.jeeba.com Handcrafted jewelry in sterling silver with gemstones.
Jesse Drew Media artist. Associate Director of Technocultural Studies at UCD.
Arlen FeldwickJones
Henna by Funo Funo henna artist from Sudan
Users/PhilGeck Tattoo Artist/ artists, diverse in Oil, Acrylic and water color. works at http://daviswiki.org/Primary_Concepts_Tattooing?actionshow&redirectPrimary+Concepts/ Primary Concepts in downtown Davis.
http://www.davidgilhooly.com/ David Gilhooly Famous for his funk ceramics. Now does paintings.
Users/DanGlendening Daniel J. Glendening famous. nuff said.
http://www.philgross.net/ Phil Gross Painter of Sacramento valley and Northern California Landscapes
Users/GeraldHeffernon Gerald Heffernon created the Davis Food Coop Tomato
http://hollowell.ucdavis.edu/ David Hollowell Current Art Department Art Faculty at UCD
http://www.rasehallstudios.com/blog/archives/000970.html Steve Kaltenbach Conceptualist MFA UC Davis studied with Wiley and Nauman (also http://www.lawrencemarkey.com/kaltenbach_w.htm click here)
http://www.krop.com/katykhotography/ Katy Karns Photographer, also starts EC Garden Art Walks http://www.facebook.com/profile.php?refprofile&id100000620017970
Tim Lane contemporary artist painterhttp://www.timothylaneart.com/ http://www.myspace.com/flowersinspace
http://www.natsoulas.com/html/artists/williamMaul/williamMaul.html William Maul Artist
http://www.tonynatsoulas.com Tony Natsoulas Raised in Davis and went to UCD for BA and MFA. Made sculptures of The Joggers Joggers Downtown (and much more).
http://www.urbangoddessdesigns.com/ Lisa Novotny Jewelry designer
Jeffrey Blake Palmer artist, filmmaker, radio & media guy http://flickerpictures.com
http://paradox.rambisyouth.com Local artist who did a large Burroughs style cut up collage.
http://archivesofamericanart.si.edu/oralhist/peters02.htm Roland Petersen
http://www.lucypuls.com/ Lucy Puls Art Department Chair at UCD
Users/Pxlated Urban Art and http://www.flickr.com/photos/benignpxl political collage.
http://www.urbangoddessdesigns.com/ Misty Reed Jewelry, lampwork & graphic designer
http://nomanrileyphotography.com Norman E. Riley Large Format Photography
Mark Rivera Tile and ceramics
Kerry RowlandAvrech Painter and muralist
http://www.transitlounge.com/hassel_smith/ Hassel Smith
http://wwwenglish.ucdavis.edu/faculty/snyder/snyder.htm Gary Snyder
Users/ConnieTaxiera Connie Taxiera Piece of Mind Fabrications http://fabricartbyconnie.com Fabric art bowls by Connie.
Wayne Thiebaud Emeritus Art Department Art Faculty at UCD
Jean Van Keuren Ceramics, Bronzes, and Cement sculptures http://www.jeanvankeuren.com Jean Van Keuren Made Greenbelt Dogs, Senior Center Bronze, Cement Giraffe by Hallmark Inn
Daryl R. Vasquez Owner of Aesthetic Reflections: commissioned portraits
Tom Dotan and John King created The UC Davis Show, a Daviscentric sitcom. There appears to have only been a pilot episode.
http://www.facebook.com/sotoodeh.yarmahmoudi Sotoodeh Yarmahmoudi Artist, Illustrator and Character Designer. You can see her artworks at her http://www.sotoodehyari.com/ website.
Kirsten Elise Young users/Kirstenelise http://kirsteneyoung.viewbook.com/
Some of these open their studios for the Davis Artist Studio Tour.
|
{"hexsha": "3e49db84a31432d784bf90e21bb801303862efc0", "size": 4958, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Local_Artists.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Local_Artists.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Local_Artists.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 86.9824561404, "max_line_length": 212, "alphanum_fraction": 0.7993142396, "num_tokens": 1342}
|
# In order to understand how the code works, it is a good idea to check the
# final section of the file that starts with
# if __name__ == '__main__'
#
# Your task is essentially to replace all the parts marked as TODO or as
# instructed through comments in the functions, as well as the filenames
# and labels in the main part of the code which can be found at the end of
# this file.
#
# The raise command is used to help you out in finding where you still need to
# write your own code. When you successfully modified the code in that part,
# remove the `raise` command.
import sys
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
# ====================== FUNCTIONS USED BY THE MAIN CODE ===================
#
# ====================== FOR THE MAIN CODE SCROLL TO THE BOTTOM ============
def ER_properties(n, p):
'''
This function builds 100 ER networks with the given parameters and averages
over them to estimate the expected values of average clustering coefficient,
average degree and diameter of an ER network with the given parameters. The
diameter is always computed for the largest ("giant") connected component.
Parameters
----------
n : int
Number of nodes
p : float
the probability that a pair of nodes are linked is p.
Returns
-------
expected_c: float
expected value of average clustering coefficient
expected_k: float
expected value of average degree
expected_d: float
expected value of d*
Hints:
The following functions might be useful:
nx.fast_gnp_random_graph(n, p),
nx.average_clustering(graph, count_zeros=True),
nx.connected_component_subgraphs(graph),
nx.diameter(giant)
nx.connected_component_subgraphs gives you a list of subgraphs
To pick the largest, the fastest way is to use max with a key: max(x,key=len)
returns the longest/largest element of the list x
For computing averages over realizations, you can e.g. collect your
values to three lists, c,k,d, and use np.mean to get the average.
'''
# YOUR CODE HERE
average_degree = []
average_cluster = []
diameter = []
for i in range(100):
g=nx.fast_gnp_random_graph(n,p)
# degree
nodes = g.nodes()
degrees=[]
for node in nodes:
degrees.append(len(list(g.neighbors(node))))
ave_deg=np.mean(degrees)
average_degree.append(ave_deg)
# clustering coefficient
ave_cc = nx.average_clustering(g)
average_cluster.append(ave_cc)
# diameter
longest_subgraph = max(nx.connected_component_subgraphs(g), key=len)
dm = nx.diameter(longest_subgraph)
diameter.append(dm)
expected_c = np.mean(average_cluster) # change these!
expected_k = np.mean(average_degree) # change these!
expected_d = np.mean(diameter) # change these!
return expected_c, expected_k, expected_d
def ER_properties_theoretical(p):
'''
This function calculates the theoretical values for clustering coefficients,
average degree, and diameter for ER networks of size 3 and link probability p.
The theoretical values can be viewed as expectations, or ensemble averages.
Therefore, e.g., the expected diameter doesn't have to be integer, although it of
course always is for a single ER network.
Parameters
----------
p : float
the probability that a pair of nodes are linked is p.
Returns
-------
c_theory: float
theoretical value of average clustering coefficient
k_theory: float
theoretical value of average degree
d_theory: float
Theoretical value of diameter
'''
# Calculate the theoretical values for and ER network with parameters n, p
# YOUR CODE HERE
c_theory = p**3
k_theory = 2*p
d_theory = 3*p - 2*p**3
return c_theory, k_theory, d_theory
def plot_er_values(n, p_list):
'''
This function calculates the theoretical clustering coefficient, average
degree and diameter for ER network with parameters n and p and plots them
against the expected values from an ensemble of 100 realizations.
Parameters
----------
n : int
Number of nodes
p_list : list of floats
where each member is the probability that a pair of nodes are linked.
Returns
-------
fig: matplotlib Figure
plots of expected values against theoretical values
'''
k_list = [] # list for degrees
c_list = [] # list for clustering coeff values
d_list = [] # list for diameter values
c_list_theory = []
k_list_theory = []
d_list_theory = []
for p in p_list:
print("calculating for n=%d p=%f" % (n, p), file=sys.stderr)
average_c, average_k, average_d = ER_properties(n, p)
k_list.append(average_k)
c_list.append(average_c)
d_list.append(average_d)
if n is 3:
c_theory, k_theory, d_theory = ER_properties_theoretical(p)
c_list_theory.append(c_theory)
k_list_theory.append(k_theory)
d_list_theory.append(d_theory)
fig = plt.figure(figsize=(8,5))
ax = fig.add_subplot(1, 3, 1) # Three subplots for <K>, <C> and <d*> [(1,3,1) means 1 row, three columns, first subplot)
ax.plot(p_list, c_list, 'r-', label="Simulated", marker='o')
if len(c_list_theory) > 0 and c_list_theory[0] is not None:
ax.plot(p_list, c_list_theory, 'b-', label="Theoretical", marker='o')
ax.set_xlabel('p')
ax.set_ylabel('Clustering coefficient')
ax.legend(loc=0)
ax.set_title('N = %d' % n, size=18)
ax2 = fig.add_subplot(1, 3, 2)
ax2.plot(p_list, k_list, 'r-', label="Simulated", marker='o')
if len(k_list_theory) > 0 and k_list_theory[0] is not None:
ax2.plot(p_list, k_list_theory, 'b-', label="Theoretical", marker='o')
ax2.set_xlabel('p')
ax2.set_ylabel('Average degree')
ax2.legend(loc=0)
ax3 = fig.add_subplot(1, 3, 3)
ax3.plot(p_list, d_list, 'r-', label="Simulated", marker='o')
if len(d_list_theory) > 0 and d_list_theory[0] is not None:
ax3.plot(p_list, d_list_theory, 'b-', label="Theoretical", marker='o')
ax3.set_xlabel('p')
ax3.set_ylabel('Diameter')
ax3.legend(loc=0)
fig.tight_layout()
return fig
# =========================== MAIN CODE BELOW ==============================
if __name__ == "__main__":
ps = np.arange(0, 1.01, 0.05)
fig = plot_er_values(n=3, p_list=ps)
figure_filename = 'ER_properties_3_nodes.pdf'
fig.savefig(figure_filename)
# or just use plt.show() and save manually
# Do the same steps (calculation and visulization) for n=100
fig = plot_er_values(n=100, p_list=ps)
figure_filename = 'ER_properties_100_nodes.pdf'
fig.savefig(figure_filename)
# or just use plt.show() and save manually
|
{"hexsha": "06b82761abbad1af601d433894a876efe7428959", "size": 6988, "ext": "py", "lang": "Python", "max_stars_repo_path": "Exercice 2/properties_of_er_networks.py", "max_stars_repo_name": "Yanko96/CS-E5740-Complex-Networks", "max_stars_repo_head_hexsha": "708af24230218b77f1196c1a0ec5885165491a85", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Exercice 2/properties_of_er_networks.py", "max_issues_repo_name": "Yanko96/CS-E5740-Complex-Networks", "max_issues_repo_head_hexsha": "708af24230218b77f1196c1a0ec5885165491a85", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Exercice 2/properties_of_er_networks.py", "max_forks_repo_name": "Yanko96/CS-E5740-Complex-Networks", "max_forks_repo_head_hexsha": "708af24230218b77f1196c1a0ec5885165491a85", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7584541063, "max_line_length": 124, "alphanum_fraction": 0.6489696623, "include": true, "reason": "import numpy,import networkx", "num_tokens": 1719}
|
from deepface import DeepFace
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os.path as osp
import os
import logging
logging.basicConfig(filename='example.log', level=logging.DEBUG)
df = pd.read_excel('second/sample_identity_gallery_probe.xlsx')
galleries = df[df['proposed Gallery/Probe'] == 'G']
probes = df[df['proposed Gallery/Probe'] == 'P']
print(galleries.size)
print(probes.size)
def get_file_name(df, id):
name = df[df['id'] == id]['file name']
return name.tolist()
print(get_file_name(galleries, 3))
print(get_file_name(probes, 3))
print(get_file_name(probes, 3))
# should be general
here = os.path.dirname(os.path.realpath(__file__))
path = here + "/second/images"
for i in range(1, 121):
# gallery = get_file_name(galleries, i)[0]
for img in get_file_name(probes, i):
# gpath = osp.join(path, gallery)
ppath = osp.join(path, img)
# print(gpath,ppath,"pgpath")
res = DeepFace.find(ppath, db_path=here+'/second/gallery',
enforce_detection=False, detector_backend='retina')
# print(img, res)
logging.info(img)
logging.info(res)
|
{"hexsha": "fbcefad986174bedba148e89951ddf84b21bf42a", "size": 1182, "ext": "py", "lang": "Python", "max_stars_repo_path": "find.py", "max_stars_repo_name": "milad-4274/deepface", "max_stars_repo_head_hexsha": "bde16b2b79946f93c2934d2259daa6444defa248", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "find.py", "max_issues_repo_name": "milad-4274/deepface", "max_issues_repo_head_hexsha": "bde16b2b79946f93c2934d2259daa6444defa248", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "find.py", "max_forks_repo_name": "milad-4274/deepface", "max_forks_repo_head_hexsha": "bde16b2b79946f93c2934d2259daa6444defa248", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.8636363636, "max_line_length": 79, "alphanum_fraction": 0.6751269036, "include": true, "reason": "import numpy", "num_tokens": 297}
|
% !TEX root = ../thesis.tex
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "thesis"
%%% End:
\chapter{Chapter Title}\label{chap:chapter_name}
Chapter introduction goes here.
\section{Section heading}\label{section:section_name}
Section description goes here.
And if I want to include a graphic, I can use the code below and reference it \ref{graphics:sample_graphic}.
\begin{figure}\label{graphics:sample_graphic}
\centering
\includegraphics[width=\textwidth]{graphics/logos/SFI.png}
\caption{Sample image for display}
\label{graphics:sample_graphic}
\end{figure}
Although, figures, tables and equations will not necessarily appear where they are in the .tex file ..
\subsection{Subsection Heading}\label{subsection:subsection_name}
Subsection description goes here.
\subsubsection{Subsubsection Heading}\label{subsubsection:subsubsection_name}
Subsubsection description goes here. And if needed, I can include paragraphs, like so.
\paragraph{Paragraph heading}
Paragraph text (if needed). I can also include equations, and reference them \ref{equation:tiling_operation}.
% Comment: Change this to allow overlapping windows
\begin{equation}\label{equation:tiling_operation}
\alpha \cap \omega
\end{equation}
And I can also use tables, and reference them \ref{table:sample_table}.
\begin{table}[]
\begin{center}
\begin{tabular}{r r } \hline \hline
Column 1 & Column 2 \\
\hline
A & B \\
C & E \\
D & F \\
\end{tabular}
\end{center}
\caption{Sample table}
\label{table:sample_table}
\end{table}
\subsection{Subsection Heading 2}\label{subsection:subsection_name_2}
And so on ...
|
{"hexsha": "56181e8f8457b2fa0acb259133c9c20d59e77ae4", "size": 1641, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "thesis-template/template/chapters/chapter1.tex", "max_stars_repo_name": "igorbrigadir/insight-templates", "max_stars_repo_head_hexsha": "8722fc7741181fae9fcfa45bce3fc28382729108", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2015-01-29T23:23:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-26T01:31:29.000Z", "max_issues_repo_path": "thesis-template/template/chapters/chapter1.tex", "max_issues_repo_name": "igorbrigadir/insight-templates", "max_issues_repo_head_hexsha": "8722fc7741181fae9fcfa45bce3fc28382729108", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2015-03-12T17:34:39.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-27T14:41:31.000Z", "max_forks_repo_path": "thesis-template/template/chapters/chapter1.tex", "max_forks_repo_name": "igorbrigadir/insight-templates", "max_forks_repo_head_hexsha": "8722fc7741181fae9fcfa45bce3fc28382729108", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2015-08-26T12:28:02.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-27T19:41:28.000Z", "avg_line_length": 26.4677419355, "max_line_length": 109, "alphanum_fraction": 0.7471054235, "num_tokens": 443}
|
#include <string>
#include <gtest/gtest.h>
#include "ros/ros.h"
#include "nav_msgs/Odometry.h"
#include "geometry_msgs/PoseWithCovarianceStamped.h"
#include <boost/thread.hpp>
using namespace ros;
int g_argc;
char** g_argv;
typedef boost::shared_ptr<geometry_msgs::PoseWithCovarianceStamped const> EkfConstPtr;
class TestEKF : public testing::Test
{
public:
NodeHandle node_;
ros::Subscriber ekf_sub_;
double ekf_counter_;
void EKFCallback(const EkfConstPtr& ekf)
{
// count number of callbacks
ekf_counter_++;
}
protected:
/// constructor
TestEKF()
{
ekf_counter_ = 0;
}
/// Destructor
~TestEKF()
{
}
};
TEST_F(TestEKF, test)
{
ROS_INFO("Subscribing to robot_pose_ekf/odom_combined");
ekf_sub_ = node_.subscribe("/robot_pose_ekf/odom_combined", 10, &TestEKF::EKFCallback, (TestEKF*)this);
// wait for 20 seconds
ROS_INFO("Waiting for 20 seconds while bag is playing");
ros::Duration(20).sleep();
ROS_INFO("End time reached");
EXPECT_EQ(ekf_counter_, 0);
SUCCEED();
}
int main(int argc, char** argv)
{
testing::InitGoogleTest(&argc, argv);
g_argc = argc;
g_argv = argv;
init(g_argc, g_argv, "testEKF");
boost::thread spinner(boost::bind(&ros::spin));
int res = RUN_ALL_TESTS();
spinner.interrupt();
spinner.join();
return res;
}
|
{"hexsha": "ba7866f69e65ebcc765f9a83758f995ee4255d2c", "size": 1331, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "robot_pose_ekf/test/test_robot_pose_ekf_zero_covariance.cpp", "max_stars_repo_name": "SNU-SF4/viwo", "max_stars_repo_head_hexsha": "8ce0757617b4204e1a367552be7fe6a98ff9363f", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "robot_pose_ekf/test/test_robot_pose_ekf_zero_covariance.cpp", "max_issues_repo_name": "SNU-SF4/viwo", "max_issues_repo_head_hexsha": "8ce0757617b4204e1a367552be7fe6a98ff9363f", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "robot_pose_ekf/test/test_robot_pose_ekf_zero_covariance.cpp", "max_forks_repo_name": "SNU-SF4/viwo", "max_forks_repo_head_hexsha": "8ce0757617b4204e1a367552be7fe6a98ff9363f", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2022-03-28T13:34:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T08:31:48.000Z", "avg_line_length": 15.8452380952, "max_line_length": 105, "alphanum_fraction": 0.690458302, "num_tokens": 388}
|
# Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch as t
import torch.nn as nn
import torch.nn.functional as F
from pysot.core.config import cfg
from pysot.models.utile.loss import select_cross_entropy_loss,IOULoss,DISCLE
from pysot.models.backbone.temporalbackbone import TemporalAlexNet
from pysot.models.utile.utile import TCT
from pysot.models.utile.utiletest import TCTtest
import matplotlib.pyplot as plt
import numpy as np
class ModelBuilder(nn.Module):
def __init__(self,label):
super(ModelBuilder, self).__init__()
self.backbone = TemporalAlexNet().cuda()
if label=='test':
self.grader=TCTtest(cfg).cuda()
else:
self.grader=TCT(cfg).cuda()
self.cls3loss=nn.BCEWithLogitsLoss()
self.IOULOSS=IOULoss()
def template(self, z,x):
with t.no_grad():
zf,_,_ = self.backbone.init(z)
self.zf=zf
xf,xfeat1,xfeat2 = self.backbone.init(x)
ppres=self.grader.conv1(self.xcorr_depthwise(xf,zf))
self.memory=ppres
self.featset1=xfeat1
self.featset2=xfeat2
def xcorr_depthwise(self,x, kernel):
"""depthwise cross correlation
"""
batch = kernel.size(0)
channel = kernel.size(1)
x = x.view(1, batch*channel, x.size(2), x.size(3))
kernel = kernel.view(batch*channel, 1, kernel.size(2), kernel.size(3))
out = F.conv2d(x, kernel, groups=batch*channel)
out = out.view(batch, channel, out.size(2), out.size(3))
return out
def track(self, x):
with t.no_grad():
xf,xfeat1,xfeat2 = self.backbone.eachtest(x,self.featset1,self.featset2)
loc,cls2,cls3,memory=self.grader(xf,self.zf,self.memory)
self.memory=memory
self.featset1=xfeat1
self.featset2=xfeat2
return {
'cls2': cls2,
'cls3': cls3,
'loc': loc
}
def log_softmax(self, cls):
b, a2, h, w = cls.size()
cls = cls.view(b, 2, a2//2, h, w)
cls = cls.permute(0, 2, 3, 4, 1).contiguous()
cls = F.log_softmax(cls, dim=4)
return cls
def getcentercuda(self,mapp):
def dcon(x):
x[t.where(x<=-1)]=-0.99
x[t.where(x>=1)]=0.99
return (t.log(1+x)-t.log(1-x))/2
size=mapp.size()[3]
#location
x=t.Tensor(np.tile((16*(np.linspace(0,size-1,size))+63)-cfg.TRAIN.SEARCH_SIZE//2,size).reshape(-1)).cuda()
y=t.Tensor(np.tile((16*(np.linspace(0,size-1,size))+63).reshape(-1,1)-cfg.TRAIN.SEARCH_SIZE//2,size).reshape(-1)).cuda()
shap=dcon(mapp)*(cfg.TRAIN.SEARCH_SIZE//2)
xx=np.int16(np.tile(np.linspace(0,size-1,size),size).reshape(-1))
yy=np.int16(np.tile(np.linspace(0,size-1,size).reshape(-1,1),size).reshape(-1))
w=shap[:,0,yy,xx]+shap[:,1,yy,xx]
h=shap[:,2,yy,xx]+shap[:,3,yy,xx]
x=x-shap[:,0,yy,xx]+w/2+cfg.TRAIN.SEARCH_SIZE//2
y=y-shap[:,2,yy,xx]+h/2+cfg.TRAIN.SEARCH_SIZE//2
anchor=t.zeros((cfg.TRAIN.BATCH_SIZE//cfg.TRAIN.NUM_GPU,size**2,4)).cuda()
anchor[:,:,0]=x-w/2
anchor[:,:,1]=y-h/2
anchor[:,:,2]=x+w/2
anchor[:,:,3]=y+h/2
return anchor
def forward(self,data):
""" only used in training
"""
presearch=data['pre_search'].cuda()
template = data['template'].cuda()
search =data['search'].cuda()
bbox=data['bbox'].cuda()
labelcls2=data['label_cls2'].cuda()
labelxff=data['labelxff'].cuda()
labelcls3=data['labelcls3'].cuda()
weightxff=data['weightxff'].cuda()
presearch=t.cat((presearch,search.unsqueeze(1)),1)
zf = self.backbone(template.unsqueeze(1))
xf = self.backbone(presearch) ###b l c w h
xf=xf.view(cfg.TRAIN.BATCH_SIZE//cfg.TRAIN.NUM_GPU,cfg.TRAIN.videorange+1,xf.size(-3),xf.size(-2),xf.size(-1))
loc,cls2,cls3=self.grader(xf[:,-1,:,:,:],zf,xf[:,:-1,:,:,:].permute(1,0,2,3,4))
cls2 = self.log_softmax(cls2)
cls_loss2 = select_cross_entropy_loss(cls2, labelcls2)
cls_loss3 = self.cls3loss(cls3, labelcls3)
pre_bbox=self.getcentercuda(loc)
bbo=self.getcentercuda(labelxff)
loc_loss1=self.IOULOSS(pre_bbox,bbo,weightxff)
loc_loss2=DISCLE(pre_bbox,bbo,weightxff)
loc_loss=cfg.TRAIN.w2*loc_loss1+cfg.TRAIN.w3*loc_loss2
cls_loss=cfg.TRAIN.w4*cls_loss2+cfg.TRAIN.w5*cls_loss3
outputs = {}
outputs['total_loss'] =\
cfg.TRAIN.LOC_WEIGHT*loc_loss\
+cfg.TRAIN.CLS_WEIGHT*cls_loss
outputs['cls_loss'] = cls_loss
outputs['loc_loss1'] = loc_loss1
outputs['loc_loss2'] = loc_loss2
#2 4 1 都用loss2
return outputs
|
{"hexsha": "9c635f05bc5375071108ee16f8720c395bc52567", "size": 5347, "ext": "py", "lang": "Python", "max_stars_repo_path": "pysot/models/utile/model_builder.py", "max_stars_repo_name": "vision4robotics/TCTrack", "max_stars_repo_head_hexsha": "1a094f108e09b40b84e6fa0fa06fc6ae0f53ae54", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 54, "max_stars_repo_stars_event_min_datetime": "2022-03-03T16:35:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T06:13:26.000Z", "max_issues_repo_path": "pysot/models/utile/model_builder.py", "max_issues_repo_name": "vision4robotics/TCTrack", "max_issues_repo_head_hexsha": "1a094f108e09b40b84e6fa0fa06fc6ae0f53ae54", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2022-03-07T12:02:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T08:16:09.000Z", "max_forks_repo_path": "pysot/models/utile/model_builder.py", "max_forks_repo_name": "vision4robotics/TCTrack", "max_forks_repo_head_hexsha": "1a094f108e09b40b84e6fa0fa06fc6ae0f53ae54", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 17, "max_forks_repo_forks_event_min_datetime": "2022-03-04T01:52:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T11:57:42.000Z", "avg_line_length": 30.9075144509, "max_line_length": 128, "alphanum_fraction": 0.5631195063, "include": true, "reason": "import numpy", "num_tokens": 1489}
|
##
## A kind of meta-loader to import data if not already in the workspace
## This lets you run each part of the analysis separately rather than all in a batch, should you prefer
##
if (!exists("dyads"))
source("init.r")
if (!exists("common_theme"))
source("init plots.r")
|
{"hexsha": "7d2aab0273a46e2dc547fd7eee694460b788cc27", "size": 278, "ext": "r", "lang": "R", "max_stars_repo_path": "init if necessary.r", "max_stars_repo_name": "matthewgthomas/hierarchies-gifts", "max_stars_repo_head_hexsha": "7855e25e974b50c4c42d966ccd8ca75b3002f241", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "init if necessary.r", "max_issues_repo_name": "matthewgthomas/hierarchies-gifts", "max_issues_repo_head_hexsha": "7855e25e974b50c4c42d966ccd8ca75b3002f241", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "init if necessary.r", "max_forks_repo_name": "matthewgthomas/hierarchies-gifts", "max_forks_repo_head_hexsha": "7855e25e974b50c4c42d966ccd8ca75b3002f241", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8, "max_line_length": 103, "alphanum_fraction": 0.7050359712, "num_tokens": 67}
|
import argparse
import joblib
import json
import numpy as np
import os
import pandas as pd
import warnings
from itertools import chain
from scipy.io import mmread
from sklearn.pipeline import Pipeline
from sklearn.metrics._scorer import _check_multimetric_scoring
from sklearn.model_selection._validation import _score
from sklearn.utils import indexable, _safe_indexing
from galaxy_ml.model_validations import train_test_split
from galaxy_ml.keras_galaxy_models import (_predict_generator,
KerasGBatchClassifier)
from galaxy_ml.model_persist import load_model_from_h5, dump_model_to_h5
from galaxy_ml.utils import (SafeEval, clean_params, gen_compute_scores,
get_main_estimator, get_scoring, get_module,
read_columns)
N_JOBS = int(os.environ.get('GALAXY_SLOTS', 1))
CACHE_DIR = os.path.join(os.getcwd(), 'cached')
del os
NON_SEARCHABLE = ('n_jobs', 'pre_dispatch', 'memory', '_path', '_dir',
'nthread', 'callbacks')
ALLOWED_CALLBACKS = ('EarlyStopping', 'TerminateOnNaN', 'ReduceLROnPlateau',
'CSVLogger', 'None')
def _eval_swap_params(params_builder):
swap_params = {}
for p in params_builder['param_set']:
swap_value = p['sp_value'].strip()
if swap_value == '':
continue
param_name = p['sp_name']
if param_name.lower().endswith(NON_SEARCHABLE):
warnings.warn("Warning: `%s` is not eligible for search and was "
"omitted!" % param_name)
continue
if not swap_value.startswith(':'):
safe_eval = SafeEval(load_scipy=True, load_numpy=True)
ev = safe_eval(swap_value)
else:
# Have `:` before search list, asks for estimator evaluatio
safe_eval_es = SafeEval(load_estimators=True)
swap_value = swap_value[1:].strip()
# TODO maybe add regular express check
ev = safe_eval_es(swap_value)
swap_params[param_name] = ev
return swap_params
def train_test_split_none(*arrays, **kwargs):
"""extend train_test_split to take None arrays
and support split by group names.
"""
nones = []
new_arrays = []
for idx, arr in enumerate(arrays):
if arr is None:
nones.append(idx)
else:
new_arrays.append(arr)
if kwargs['shuffle'] == 'None':
kwargs['shuffle'] = None
group_names = kwargs.pop('group_names', None)
if group_names is not None and group_names.strip():
group_names = [name.strip() for name in
group_names.split(',')]
new_arrays = indexable(*new_arrays)
groups = kwargs['labels']
n_samples = new_arrays[0].shape[0]
index_arr = np.arange(n_samples)
test = index_arr[np.isin(groups, group_names)]
train = index_arr[~np.isin(groups, group_names)]
rval = list(chain.from_iterable(
(_safe_indexing(a, train),
_safe_indexing(a, test)) for a in new_arrays))
else:
rval = train_test_split(*new_arrays, **kwargs)
for pos in nones:
rval[pos * 2: 2] = [None, None]
return rval
def _evaluate_keras_and_sklearn_scores(estimator, data_generator, X,
y=None, sk_scoring=None,
steps=None, batch_size=32,
return_predictions=False):
"""output scores for bother keras and sklearn metrics
Parameters
-----------
estimator : object
Fitted `galaxy_ml.keras_galaxy_models.KerasGBatchClassifier`.
data_generator : object
From `galaxy_ml.preprocessors.ImageDataFrameBatchGenerator`.
X : 2-D array
Contains indecies of images that need to be evaluated.
y : None
Target value.
sk_scoring : dict
Galaxy tool input parameters.
steps : integer or None
Evaluation/prediction steps before stop.
batch_size : integer
Number of samples in a batch
return_predictions : bool, default is False
Whether to return predictions and true labels.
"""
scores = {}
generator = data_generator.flow(X, y=y, batch_size=batch_size)
# keras metrics evaluation
# handle scorer, convert to scorer dict
generator.reset()
score_results = estimator.model_.evaluate_generator(generator,
steps=steps)
metrics_names = estimator.model_.metrics_names
if not isinstance(metrics_names, list):
scores[metrics_names] = score_results
else:
scores = dict(zip(metrics_names, score_results))
if sk_scoring['primary_scoring'] == 'default' and\
not return_predictions:
return scores
generator.reset()
predictions, y_true = _predict_generator(estimator.model_,
generator,
steps=steps)
# for sklearn metrics
if sk_scoring['primary_scoring'] != 'default':
scorer = get_scoring(sk_scoring)
if not isinstance(scorer, (dict, list)):
scorer = [sk_scoring['primary_scoring']]
scorer = _check_multimetric_scoring(estimator, scoring=scorer)
sk_scores = gen_compute_scores(y_true, predictions, scorer)
scores.update(sk_scores)
if return_predictions:
return scores, predictions, y_true
else:
return scores, None, None
def main(inputs, infile_estimator, infile1, infile2,
outfile_result, outfile_object=None,
outfile_y_true=None,
outfile_y_preds=None, groups=None,
ref_seq=None, intervals=None, targets=None,
fasta_path=None):
"""
Parameter
---------
inputs : str
File path to galaxy tool parameter.
infile_estimator : str
File path to estimator.
infile1 : str
File path to dataset containing features.
infile2 : str
File path to dataset containing target values.
outfile_result : str
File path to save the results, either cv_results or test result.
outfile_object : str, optional
File path to save searchCV object.
outfile_y_true : str, optional
File path to target values for prediction.
outfile_y_preds : str, optional
File path to save predictions.
groups : str
File path to dataset containing groups labels.
ref_seq : str
File path to dataset containing genome sequence file.
intervals : str
File path to dataset containing interval file.
targets : str
File path to dataset compressed target bed file.
fasta_path : str
File path to dataset containing fasta file.
"""
warnings.simplefilter('ignore')
with open(inputs, 'r') as param_handler:
params = json.load(param_handler)
# load estimator
estimator = load_model_from_h5(infile_estimator)
estimator = clean_params(estimator)
# swap hyperparameter
swapping = params['experiment_schemes']['hyperparams_swapping']
swap_params = _eval_swap_params(swapping)
estimator.set_params(**swap_params)
estimator_params = estimator.get_params()
# store read dataframe object
loaded_df = {}
input_type = params['input_options']['selected_input']
# tabular input
if input_type == 'tabular':
header = 'infer' if params['input_options']['header1'] else None
column_option = (params['input_options']['column_selector_options_1']
['selected_column_selector_option'])
if column_option in ['by_index_number', 'all_but_by_index_number',
'by_header_name', 'all_but_by_header_name']:
c = params['input_options']['column_selector_options_1']['col1']
else:
c = None
df_key = infile1 + repr(header)
df = pd.read_csv(infile1, sep='\t', header=header,
parse_dates=True)
loaded_df[df_key] = df
X = read_columns(df, c=c, c_option=column_option).astype(float)
# sparse input
elif input_type == 'sparse':
X = mmread(open(infile1, 'r'))
# fasta_file input
elif input_type == 'seq_fasta':
pyfaidx = get_module('pyfaidx')
sequences = pyfaidx.Fasta(fasta_path)
n_seqs = len(sequences.keys())
X = np.arange(n_seqs)[:, np.newaxis]
for param in estimator_params.keys():
if param.endswith('fasta_path'):
estimator.set_params(
**{param: fasta_path})
break
else:
raise ValueError(
"The selected estimator doesn't support "
"fasta file input! Please consider using "
"KerasGBatchClassifier with "
"FastaDNABatchGenerator/FastaProteinBatchGenerator "
"or having GenomeOneHotEncoder/ProteinOneHotEncoder "
"in pipeline!")
elif input_type == 'refseq_and_interval':
path_params = {
'data_batch_generator__ref_genome_path': ref_seq,
'data_batch_generator__intervals_path': intervals,
'data_batch_generator__target_path': targets
}
estimator.set_params(**path_params)
n_intervals = sum(1 for line in open(intervals))
X = np.arange(n_intervals)[:, np.newaxis]
# Get target y
header = 'infer' if params['input_options']['header2'] else None
column_option = (params['input_options']['column_selector_options_2']
['selected_column_selector_option2'])
if column_option in ['by_index_number', 'all_but_by_index_number',
'by_header_name', 'all_but_by_header_name']:
c = params['input_options']['column_selector_options_2']['col2']
else:
c = None
df_key = infile2 + repr(header)
if df_key in loaded_df:
infile2 = loaded_df[df_key]
else:
infile2 = pd.read_csv(infile2, sep='\t',
header=header, parse_dates=True)
loaded_df[df_key] = infile2
y = read_columns(
infile2,
c=c,
c_option=column_option,
sep='\t',
header=header,
parse_dates=True)
if len(y.shape) == 2 and y.shape[1] == 1:
y = y.ravel()
if input_type == 'refseq_and_interval':
estimator.set_params(
data_batch_generator__features=y.ravel().tolist())
y = None
# end y
# load groups
if groups:
groups_selector = (params['experiment_schemes']['test_split']
['split_algos']).pop('groups_selector')
header = 'infer' if groups_selector['header_g'] else None
column_option = \
(groups_selector['column_selector_options_g']
['selected_column_selector_option_g'])
if column_option in ['by_index_number', 'all_but_by_index_number',
'by_header_name', 'all_but_by_header_name']:
c = groups_selector['column_selector_options_g']['col_g']
else:
c = None
df_key = groups + repr(header)
if df_key in loaded_df:
groups = loaded_df[df_key]
groups = read_columns(
groups,
c=c,
c_option=column_option,
sep='\t',
header=header,
parse_dates=True)
groups = groups.ravel()
# del loaded_df
del loaded_df
# cache iraps_core fits could increase search speed significantly
memory = joblib.Memory(location=CACHE_DIR, verbose=0)
main_est = get_main_estimator(estimator)
if main_est.__class__.__name__ == 'IRAPSClassifier':
main_est.set_params(memory=memory)
# handle scorer, convert to scorer dict
scoring = params['experiment_schemes']['metrics']['scoring']
scorer = get_scoring(scoring)
if not isinstance(scorer, (dict, list)):
scorer = [scoring['primary_scoring']]
scorer = _check_multimetric_scoring(estimator, scoring=scorer)
# handle test (first) split
test_split_options = (params['experiment_schemes']
['test_split']['split_algos'])
if test_split_options['shuffle'] == 'group':
test_split_options['labels'] = groups
if test_split_options['shuffle'] == 'stratified':
if y is not None:
test_split_options['labels'] = y
else:
raise ValueError("Stratified shuffle split is not "
"applicable on empty target values!")
X_train, X_test, y_train, y_test, groups_train, groups_test = \
train_test_split_none(X, y, groups, **test_split_options)
exp_scheme = params['experiment_schemes']['selected_exp_scheme']
# handle validation (second) split
if exp_scheme == 'train_val_test':
val_split_options = (params['experiment_schemes']
['val_split']['split_algos'])
if val_split_options['shuffle'] == 'group':
val_split_options['labels'] = groups_train
if val_split_options['shuffle'] == 'stratified':
if y_train is not None:
val_split_options['labels'] = y_train
else:
raise ValueError("Stratified shuffle split is not "
"applicable on empty target values!")
X_train, X_val, y_train, y_val, groups_train, groups_val = \
train_test_split_none(X_train, y_train, groups_train,
**val_split_options)
# train and eval
if hasattr(estimator, 'config') and hasattr(estimator, 'model_type'):
if exp_scheme == 'train_val_test':
estimator.fit(X_train, y_train,
validation_data=(X_val, y_val))
else:
estimator.fit(X_train, y_train,
validation_data=(X_test, y_test))
else:
estimator.fit(X_train, y_train)
if isinstance(estimator, KerasGBatchClassifier):
scores = {}
steps = estimator.prediction_steps
batch_size = estimator.batch_size
data_generator = estimator.data_generator_
scores, predictions, y_true = _evaluate_keras_and_sklearn_scores(
estimator, data_generator, X_test, y=y_test,
sk_scoring=scoring, steps=steps, batch_size=batch_size,
return_predictions=bool(outfile_y_true))
else:
scores = {}
if hasattr(estimator, 'model_') \
and hasattr(estimator.model_, 'metrics_names'):
batch_size = estimator.batch_size
score_results = estimator.model_.evaluate(X_test, y=y_test,
batch_size=batch_size,
verbose=0)
metrics_names = estimator.model_.metrics_names
if not isinstance(metrics_names, list):
scores[metrics_names] = score_results
else:
scores = dict(zip(metrics_names, score_results))
if hasattr(estimator, 'predict_proba'):
predictions = estimator.predict_proba(X_test)
else:
predictions = estimator.predict(X_test)
y_true = y_test
sk_scores = _score(estimator, X_test, y_test, scorer)
scores.update(sk_scores)
# handle output
if outfile_y_true:
try:
pd.DataFrame(y_true).to_csv(outfile_y_true, sep='\t',
index=False)
pd.DataFrame(predictions).astype(np.float32).to_csv(
outfile_y_preds, sep='\t', index=False,
float_format='%g', chunksize=10000)
except Exception as e:
print("Error in saving predictions: %s" % e)
# handle output
for name, score in scores.items():
scores[name] = [score]
df = pd.DataFrame(scores)
df = df[sorted(df.columns)]
df.to_csv(path_or_buf=outfile_result, sep='\t',
header=True, index=False)
memory.clear(warn=False)
if outfile_object:
dump_model_to_h5(estimator, outfile_object)
if __name__ == '__main__':
aparser = argparse.ArgumentParser()
aparser.add_argument("-i", "--inputs", dest="inputs", required=True)
aparser.add_argument("-e", "--estimator", dest="infile_estimator")
aparser.add_argument("-X", "--infile1", dest="infile1")
aparser.add_argument("-y", "--infile2", dest="infile2")
aparser.add_argument("-O", "--outfile_result", dest="outfile_result")
aparser.add_argument("-o", "--outfile_object", dest="outfile_object")
aparser.add_argument("-l", "--outfile_y_true", dest="outfile_y_true")
aparser.add_argument("-p", "--outfile_y_preds", dest="outfile_y_preds")
aparser.add_argument("-g", "--groups", dest="groups")
aparser.add_argument("-r", "--ref_seq", dest="ref_seq")
aparser.add_argument("-b", "--intervals", dest="intervals")
aparser.add_argument("-t", "--targets", dest="targets")
aparser.add_argument("-f", "--fasta_path", dest="fasta_path")
args = aparser.parse_args()
main(args.inputs, args.infile_estimator, args.infile1, args.infile2,
args.outfile_result, outfile_object=args.outfile_object,
outfile_y_true=args.outfile_y_true,
outfile_y_preds=args.outfile_y_preds,
groups=args.groups,
ref_seq=args.ref_seq, intervals=args.intervals,
targets=args.targets, fasta_path=args.fasta_path)
|
{"hexsha": "e0962b6c57e7fd2d8b3c35d12081ec1680b1b765", "size": 17734, "ext": "py", "lang": "Python", "max_stars_repo_path": "galaxy_ml/tools/keras_train_and_eval.py", "max_stars_repo_name": "bgruening/Galaxy-ML-1", "max_stars_repo_head_hexsha": "47514940c7ac39d6ca1d595b58b5d1311b3f3840", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "galaxy_ml/tools/keras_train_and_eval.py", "max_issues_repo_name": "bgruening/Galaxy-ML-1", "max_issues_repo_head_hexsha": "47514940c7ac39d6ca1d595b58b5d1311b3f3840", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "galaxy_ml/tools/keras_train_and_eval.py", "max_forks_repo_name": "bgruening/Galaxy-ML-1", "max_forks_repo_head_hexsha": "47514940c7ac39d6ca1d595b58b5d1311b3f3840", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.2658486708, "max_line_length": 77, "alphanum_fraction": 0.614412992, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3789}
|
from Results import Results
from Channel import Channel
from Message import Image_message
from CorectionCodes import CorectionCodes
from Generator import Generator
import numpy as np
import komm as komm
test_image_file_name = "image.jpg"
saved_test_image = "save_image.jpeg"
results = Results()
results_file_name = 'hamming_10.csv'
def main():
# for num_of_err in range(10000,100001,10000):
# image = Image_message(test_image_file_name)
# corectionCodes = CorectionCodes(image.image_bits)
# encoded_message = corectionCodes.hamming_encode(3)
# encoded_message_with_errors = Channel.random_error_number(np.concatenate(encoded_message), num_of_err)
# encoded_meassage_with_error_to_decode = corectionCodes.array_to_decode(len(encoded_message),encoded_message_with_errors)
# decoded_message = corectionCodes.hamming_decode(encoded_meassage_with_error_to_decode, 3)
# results.add_result(image.image_bits,np.concatenate(encoded_message),decoded_message,'hamming_7-4',num_of_err)
# results.print_results()
# for num_of_err in range(10000,100001,10000):
# image = Image_message(test_image_file_name)
# corectionCodes = CorectionCodes(image.image_bits)
# encoded_message = corectionCodes.hamming_encode(4)
# encoded_message_with_errors = Channel.random_error_number(np.concatenate(encoded_message), num_of_err)
# encoded_meassage_with_error_to_decode = corectionCodes.array_to_decode(len(encoded_message),encoded_message_with_errors)
# decoded_message = corectionCodes.hamming_decode(encoded_meassage_with_error_to_decode, 4)
# results.add_result(image.image_bits,np.concatenate(encoded_message),decoded_message,'hamming_15-11',num_of_err)
# results.print_results()
# for num_of_err in range(10000,100001,10000):
# image = Image_message(test_image_file_name)
# corectionCodes = CorectionCodes(image.image_bits)
# encoded_message = corectionCodes.hamming_encode(5)
# encoded_message_with_errors = Channel.random_error_number(np.concatenate(encoded_message), num_of_err)
# encoded_meassage_with_error_to_decode = corectionCodes.array_to_decode(len(encoded_message),encoded_message_with_errors)
# decoded_message = corectionCodes.hamming_decode(encoded_meassage_with_error_to_decode, 5)
# results.add_result(image.image_bits,np.concatenate(encoded_message),decoded_message,'hamming_31-26',num_of_err)
# results.print_results()
# for num_of_err in range(10000,100001,10000):
# image = Image_message(test_image_file_name)
# corectionCodes = CorectionCodes(image.image_bits)
# encoded_message = corectionCodes.hamming_encode(6)
# encoded_message_with_errors = Channel.random_error_number(np.concatenate(encoded_message), num_of_err)
# encoded_meassage_with_error_to_decode = corectionCodes.array_to_decode(len(encoded_message),encoded_message_with_errors)
# decoded_message = corectionCodes.hamming_decode(encoded_meassage_with_error_to_decode, 6)
# results.add_result(image.image_bits,np.concatenate(encoded_message),decoded_message,'hamming_63-57',num_of_err)
# results.print_results()
# for num_of_err in range(10000,100001,10000):
# image = Image_message(test_image_file_name)
# corectionCodes = CorectionCodes(image.image_bits)
# encoded_message = corectionCodes.hamming_encode(7)
# encoded_message_with_errors = Channel.random_error_number(np.concatenate(encoded_message), num_of_err)
# encoded_meassage_with_error_to_decode = corectionCodes.array_to_decode(len(encoded_message),encoded_message_with_errors)
# decoded_message = corectionCodes.hamming_decode(encoded_meassage_with_error_to_decode, 7)
# results.add_result(image.image_bits,np.concatenate(encoded_message),decoded_message,'hamming_127-120',num_of_err)
# results.print_results()
for num_of_err in range(100000,600000,100000):
image = Image_message(test_image_file_name)
corectionCodes = CorectionCodes(image.image_bits)
encoded_message = corectionCodes.hamming_encode(8)
encoded_message_with_errors = Channel.random_error_number(np.concatenate(encoded_message), num_of_err)
encoded_meassage_with_error_to_decode = corectionCodes.array_to_decode(len(encoded_message),encoded_message_with_errors)
decoded_message = corectionCodes.hamming_decode(encoded_meassage_with_error_to_decode, 8)
results.add_result(image.image_bits,np.concatenate(encoded_message),decoded_message,'hamming_255-247',num_of_err)
results.print_results()
results.save_to_file("hamming_test.csv")
# image = Image_message(test_image_file_name)
# corectionCodes = CorectionCodes(image.image_bits)
# image_with_errors = Channel.random_error_number(image.image_bits, 100000)
# image.image_bits = image_with_errors
# image.save("grafika 100k bledow.jpg")
# image = Image_message(test_image_file_name)
# corectionCodes = CorectionCodes(image.image_bits)
# encoded_message = corectionCodes.bits_trippling_2()
# encoded_message_with_errors = Channel.random_error_number(encoded_message, 100000)
# encoded_meassage_with_error_to_decode = corectionCodes.array_to_decode(len(encoded_message),encoded_message_with_errors)
# decoded_message = corectionCodes.decode_trippled_bits(encoded_meassage_with_error_to_decode,'C')
# image.image_bits = decoded_message
# image.save("grafika 100k bledow tripling.jpg")
# image = Image_message(test_image_file_name)
# corectionCodes = CorectionCodes(image.image_bits)
# encoded_message = corectionCodes.hamming_encode(3)
# encoded_message_with_errors = Channel.random_error_number(np.concatenate(encoded_message), 100000)
# encoded_meassage_with_error_to_decode = corectionCodes.array_to_decode(len(encoded_message),encoded_message_with_errors)
# decoded_message = corectionCodes.hamming_decode(encoded_meassage_with_error_to_decode, 3)
# image.image_bits = decoded_message
# image.save("grafika 100k bledow hamming.jpg")
# for num_of_err in range(10000,100001,10000):
# image = Image_message(test_image_file_name)
# corectionCodes = CorectionCodes(image.image_bits)
# encoded_message = corectionCodes.bits_trippling_1()
# encoded_message_with_errors = Channel.random_error_number(encoded_message, num_of_err)
# encoded_meassage_with_error_to_decode = corectionCodes.array_to_decode(len(encoded_message),encoded_message_with_errors)
# decoded_message = corectionCodes.decode_trippled_bits(encoded_meassage_with_error_to_decode,'F')
# results.add_result(image.image_bits,encoded_message,decoded_message,'tripling_abc',num_of_err)
# results.print_results()
# for num_of_err in range(10000,100001,10000):
# image = Image_message(test_image_file_name)
# corectionCodes = CorectionCodes(image.image_bits)
# encoded_message = corectionCodes.bits_trippling_2()
# encoded_message_with_errors = Channel.random_error_number(encoded_message, num_of_err)
# encoded_meassage_with_error_to_decode = corectionCodes.array_to_decode(len(encoded_message),encoded_message_with_errors)
# decoded_message = corectionCodes.decode_trippled_bits(encoded_meassage_with_error_to_decode,'C')
# results.add_result(image.image_bits,encoded_message,decoded_message,'tripling_aaa',num_of_err)
# results.print_results()
# results.save_to_file("tripling.csv")
# for num_of_err in range(100,1001,100):
# bits = Generator.generate_bits(3000)
# corectionCodes = CorectionCodes(bits)
# encoded_message = corectionCodes.BCH_encode(5,3)
# encoded_message_with_errors = Channel.random_error_number(np.concatenate(encoded_message), num_of_err)
# encoded_meassage_with_error_to_decode = corectionCodes.array_to_decode(len(encoded_message),encoded_message_with_errors)
# decoded_message = corectionCodes.BCH_decode(encoded_meassage_with_error_to_decode,5,3)
# results.add_result(bits,np.concatenate(encoded_message),decoded_message,'bch 5-3',num_of_err)
# results.print_results()
# results.save_to_file("bch.csv")
#encoded_message = corectionCodes.BCH_encode(10,2) #strasznie wolne dlatego zakomentowane
#encoded_message_with_errors = Channel.random_error_number(np.concatenate(encoded_message), 100000)
#encoded_meassage_with_error_to_decode = corectionCodes.array_to_decode(len(encoded_message),encoded_message_with_errors)
#decoded_message = corectionCodes.BCH_decode(encoded_meassage_with_error_to_decode,10,2)
#image.image_bits = decoded_message
#image.save(saved_test_image)
# encoded_message = corectionCodes.hamming_encode(10)
# encoded_message_with_errors = Channel.random_error_number(np.concatenate(encoded_message), 100000)
# encoded_meassage_with_error_to_decode = corectionCodes.array_to_decode(len(encoded_message),encoded_message_with_errors)
# decoded_message = corectionCodes.hamming_decode(encoded_meassage_with_error_to_decode, 10)
# image.image_bits = decoded_message
# #image.save(saved_test_image)
# results.add_result(image.image_bits,encoded_message,decoded_message,'Hamming10',100000)
# encoded_message = corectionCodes.bits_trippling_1()
# encoded_message_with_errors = Channel.random_error_number(encoded_message, 100000)
# encoded_meassage_with_error_to_decode = corectionCodes.array_to_decode(len(encoded_message),encoded_message_with_errors)
# decoded_message = corectionCodes.decode_trippled_bits(encoded_message_with_errors,'F')
# image.image_bits = decoded_message
# #image.save(saved_test_image)
# results.add_result(image.image_bits,encoded_message,decoded_message,'triplingaabbcc',100000)
# encoded_message = corectionCodes.bits_trippling_2()
# encoded_message_with_errors = Channel.random_error_number(encoded_message, 100000)
# encoded_meassage_with_error_to_decode = corectionCodes.array_to_decode(len(encoded_message),encoded_message_with_errors)
# decoded_message = corectionCodes.decode_trippled_bits(encoded_message_with_errors,'c')
# image.image_bits = decoded_message
# #image.save(saved_test_image)
# results.add_result(image.image_bits,encoded_message,decoded_message,'triplingabcabcabc',100000)
# encoded_message = CorectionCodes.encode_hamming(image.image_bits)
# encoded_message_with_errors = Channel.random_error_number(encoded_message, 100000)
# decoded__message = CorectionCodes.decode_hamming(encoded_message_with_errors)
# image.image_bits = decoded__message
# image.save(saved_test_image)
# results.add_result(image.image_bits,encoded_message,decoded_message,'hamming(7,4)',100000)
# results.print_results()
# results.save_to_file(results_file_name)
main()
|
{"hexsha": "2e7282ba60f28cf4921ffd16ef7556b5ef0de3fc", "size": 11003, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "tomekrzymyszkiewicz/forward-error-correction", "max_stars_repo_head_hexsha": "fba72896b77cd4f5dee79648e3ecbcc1c827e95c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-09T15:34:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-09T15:34:44.000Z", "max_issues_repo_path": "main.py", "max_issues_repo_name": "tomekrzymyszkiewicz/forward-error-correction", "max_issues_repo_head_hexsha": "fba72896b77cd4f5dee79648e3ecbcc1c827e95c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-20T12:09:04.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-20T12:09:26.000Z", "max_forks_repo_path": "main.py", "max_forks_repo_name": "tomekrzymyszkiewicz/forward-error-correction", "max_forks_repo_head_hexsha": "fba72896b77cd4f5dee79648e3ecbcc1c827e95c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 63.6011560694, "max_line_length": 130, "alphanum_fraction": 0.7845133146, "include": true, "reason": "import numpy", "num_tokens": 2479}
|
from numpy import genfromtxt
import matplotlib.pyplot as plt
import mpl_finance
import numpy as np
import uuid
import matplotlib
# Input your csv file here with historical data
ad = genfromtxt("../financial_data/BPI-copy.csv", delimiter=",", dtype=str)
pd = ad
buy_dir = "../data/train/buy/"
sell_dir = "../data/train/sell/"
def convolve_sma(array, period):
return np.convolve(array, np.ones((period,)) / period, mode="valid")
def graphwerk(start, finish):
open = []
high = []
low = []
close = []
volume = []
decision = []
date = []
for x in range(finish - start):
# Below filtering is valid for eurusd.csv file. Other financial data files have different orders so you need to find out
# what means open, high and close in their respective order.
open.append(float(pd[start][1]))
high.append(float(pd[start][2]))
low.append(float(pd[start][3]))
close.append(float(pd[start][4]))
volume.append(float(pd[start][5]))
decision.append(str(pd[start][6]))
date.append(pd[start][0])
start = start + 1
close_next = float(pd[finish][4])
# sma = convolve_sma(close, 5)
# smb = list(sma)
# diff = sma[-1] - sma[-2]
# for x in range(len(close)-len(smb)):
# smb.append(smb[-1]+diff)
fig = plt.figure(num=1, figsize=(1, 1), dpi=50, facecolor="w", edgecolor="k")
# fig2 = plt.figure(num=1, figsize=(1, 1), dpi=50, facecolor='w', edgecolor='k')
dx = fig.add_subplot(111)
# dx.axis("off")
# dx2 = fig.add_subplot(414)
# dx2.axis("off")
mpl_finance.candlestick2_ochl(
dx, open, close, high, low, width=1.5, colorup="g", colordown="r", alpha=0.5
)
# pad = 0.60
# yl = dx.get_ylim()
# print(yl)
# dx.set_ylim(yl[0]-(yl[1]-yl[0])*pad,yl[1])
# print(dx.get_ylim())
# mpl_finance.volume_overlay(dx2, open, close, volume, width=0.4, alpha=1)
plt.autoscale()
# plt.plot(smb, color="blue", linewidth=10, alpha=0.5)
plt.axis("off")
comp_ratio = close_next / close[-1]
print(comp_ratio)
if decision[-1] == "sell":
print("previous value is bigger")
print("last value: " + str(close[-1]))
print("next value: " + str(close_next))
print("sell")
plt.savefig(sell_dir + str(uuid.uuid4()) + ".jpg", bbox_inches="tight")
else:
print("previous value is smaller")
print("last value: " + str(close[-1]))
print("next value: " + str(close_next))
print("buy")
plt.savefig(buy_dir + str(uuid.uuid4()) + ".jpg", bbox_inches="tight")
# if close[-1] >= close_next:
# print('previous value is bigger')
# print('last value: ' + str(close[-1]))
# print('next value: ' + str(close_next))
# print('sell')
# plt.savefig(sell_dir + str(uuid.uuid4()) +'.jpg', bbox_inches='tight')
# else:
# print('previous value is smaller')
# print('last value: '+ str(close[-1]))
# print('next value: ' + str(close_next))
# print('buy')
# plt.savefig(buy_dir + str(uuid.uuid4())+'.jpg', bbox_inches='tight')
# plt.show()
open.clear()
close.clear()
volume.clear()
high.clear()
low.clear()
plt.cla()
plt.clf()
iter_count = int(len(pd))
print(iter_count)
iter = 0
for x in range(len(pd)): # (len(pd)-4):
graphwerk(iter, iter + 1)
iter = iter + 1
|
{"hexsha": "2b0ccf0474eece94bb0ca3f7dd102441ee1350a8", "size": 3479, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/backupSingle.py", "max_stars_repo_name": "accordinglyto/dferte", "max_stars_repo_head_hexsha": "d4b8449c1633973dc538c9e72aca5d37802a4ee4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/backupSingle.py", "max_issues_repo_name": "accordinglyto/dferte", "max_issues_repo_head_hexsha": "d4b8449c1633973dc538c9e72aca5d37802a4ee4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-11-13T18:55:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:34:40.000Z", "max_forks_repo_path": "src/backupSingle.py", "max_forks_repo_name": "accordinglyto/dferte", "max_forks_repo_head_hexsha": "d4b8449c1633973dc538c9e72aca5d37802a4ee4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7520661157, "max_line_length": 128, "alphanum_fraction": 0.5771773498, "include": true, "reason": "import numpy,from numpy", "num_tokens": 979}
|
(* This program is free software; you can redistribute it and/or *)
(* modify it under the terms of the GNU Lesser General Public License *)
(* as published by the Free Software Foundation; either version 2.1 *)
(* of the License, or (at your option) any later version. *)
(* *)
(* This program is distributed in the hope that it will be useful, *)
(* but WITHOUT ANY WARRANTY; without even the implied warranty of *)
(* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *)
(* GNU General Public License for more details. *)
(* *)
(* You should have received a copy of the GNU Lesser General Public *)
(* License along with this program; if not, write to the Free *)
(* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA *)
(* 02110-1301 USA *)
Set Implicit Arguments.
Unset Strict Implicit.
Require Import Arith.
Require Export colimits.
Require Export little_cat.
Require Export fc_limits.
Require Export cardinal.
Module Fiprod.
Export Finite_Cat.
Export Limit.
Export Vee_Cat.
Definition fiprod_hypothesis a u v y1 y2 :=
vee_hypothesis a u v &
mor a y1 & mor a y2
& source u = target y1 &
source v = target y2 &
source y1 = source y2 &
comp a u y1 = comp a v y2.
Definition fiprod_cone a u v y1 y2 :=
cone_create (source y1) (vee_nt a
(id a (source y1)) (id a (source y2)) u v y1
(comp a u y1) y2).
Lemma vertex_fiprod_cone : forall a u v y1 y2,
vertex (fiprod_cone a u v y1 y2) = source y1.
Proof.
ir. uf fiprod_cone.
rw vertex_cone_create. tv.
Qed.
Lemma edge_nt_fiprod_cone : forall a u v y1 y2,
edge_nt (fiprod_cone a u v y1 y2) =
(vee_nt a
(id a (source y1)) (id a (source y2)) u v
y1
(comp a u y1) y2).
Proof.
ir. uf fiprod_cone.
rw edge_nt_cone_create. tv.
Qed.
Lemma edge_fiprod_cone_R : forall a u v y1 y2 (x:obsy vee_data),
edge (fiprod_cone a u v y1 y2) (R x) =
match x with
o1 => y1 | o2 => (comp a u y1) | o3 => y2
end.
Proof.
ir.
uf edge.
rw edge_nt_fiprod_cone.
rw ntrans_vee_nt_R. tv.
Qed.
Lemma socle_fiprod_cone : forall a u v y1 y2,
socle (fiprod_cone a u v y1 y2) = vee_functor a u v.
Proof.
ir. uf socle.
rw edge_nt_fiprod_cone.
rw target_vee_nt. tv.
Qed.
Lemma cone_source_fiprod_cone : forall a u v y1 y2,
cone_source (fiprod_cone a u v y1 y2) = vee_cat.
Proof.
ir. uf cone_source.
rw socle_fiprod_cone. rww source_vee_functor.
Qed.
Lemma cone_target_fiprod_cone : forall a u v y1 y2,
cone_target (fiprod_cone a u v y1 y2) = a.
Proof.
ir. uf cone_target.
rw socle_fiprod_cone. rww target_vee_functor.
Qed.
Lemma fiprod_vee_nt_hypothesis : forall a u v y1 y2,
fiprod_hypothesis a u v y1 y2 ->
vee_nt_hypothesis a (id a (source y1))
(id a (source y2)) u v y1
(comp a u y1) y2.
Proof.
ir. uh H; ee.
uh H; ee.
assert (ob a (source y1)). rww ob_source.
assert (ob a (target y1)). rww ob_target.
assert (ob a (source y2)). rww ob_source.
assert (ob a (target y2)). rww ob_target.
uhg; ee.
uh H; ee; am.
app mor_id.
app mor_id. am. am. am.
rww mor_comp. am. rww source_id. sy; am.
rww source_comp. rww target_id.
rww target_comp. rww source_id. sy; am.
rww H4. am.
rww assoc.
rww right_id. app mor_id. rww target_id.
rww assoc. rww right_id. app mor_id.
rww target_id.
Qed.
Lemma is_cone_fiprod_cone : forall a u v y1 y2,
fiprod_hypothesis a u v y1 y2 ->
is_cone (fiprod_cone a u v y1 y2).
Proof.
ir. uhg; ee.
uf fiprod_cone.
ap cone_like_cone_create.
rw edge_nt_fiprod_cone.
ap vee_nt_axioms.
app fiprod_vee_nt_hypothesis.
rw cone_target_fiprod_cone.
rw vertex_fiprod_cone. uh H; ee. rww ob_source.
rw edge_nt_fiprod_cone.
rw source_vee_nt.
rw cone_source_fiprod_cone.
rw cone_target_fiprod_cone.
rw vertex_fiprod_cone. sy;
rw constant_functor_vee_functor.
uh H; ee. rww H4.
tv.
uh H; ee. rww ob_source.
Qed.
Definition fimap1 c := edge c (R o1').
Definition fimap2 c := edge c (R o3').
Lemma source_fimap1 : forall c,
is_cone c -> cone_source c = vee_cat ->
source (fimap1 c) = vertex c.
Proof.
ir. uf fimap1. rww source_edge.
rw H0. app ob_vee_cat.
Qed.
Lemma source_fimap2 : forall c,
is_cone c -> cone_source c = vee_cat ->
source (fimap2 c) = vertex c.
Proof.
ir. uf fimap2. rww source_edge.
rw H0. app ob_vee_cat.
Qed.
Lemma fiprod_hypothesis_fimaps :
forall a u v c, vee_hypothesis a u v ->
is_cone c -> socle c = vee_functor a u v ->
fiprod_hypothesis a u v (fimap1 c) (fimap2 c).
Proof.
ir.
assert (a = cone_target c).
uf cone_target. rw H1. rw target_vee_functor.
tv.
assert (cone_source c = vee_cat).
uf cone_source. rw H1.
rww source_vee_functor.
uhg. ee. am.
uf fimap1.
rw H2. ap mor_edge.
rw H3. ap ob_vee_cat.
am.
uf fimap2.
rw H2. ap mor_edge.
rw H3. ap ob_vee_cat.
am.
uf fimap1. rw target_edge.
rw H1.
rw fob_vee_functor_R. tv. am.
rw H3. ap ob_vee_cat. am.
uf fimap2. rw target_edge.
rw H1.
rw fob_vee_functor_R. tv. am.
rw H3. ap ob_vee_cat. am.
uf fimap1. uf fimap2.
rw source_edge. rw source_edge. tv.
rw H3. ap ob_vee_cat. am.
rw H3. ap ob_vee_cat. am.
set (k:= edge_nt c).
util (vee_nt_ntrans (u:=k)).
uf k. app edge_nt_axioms.
uf k. rww osource_edge_nt.
assert (otarget k = a).
uf k. rww otarget_edge_nt. sy; am.
assert
(fmor (source k) (catyd_arrow (d:=vee_data) a12') =
id a (vertex c)).
uf k.
rw source_edge_nt.
rw fmor_constant_functor. wr H2. tv.
rw H3. ap mor_vee_cat. am.
assert
(fmor (source k) (catyd_arrow (d:=vee_data) a32') =
id a (vertex c)).
uf k.
rw source_edge_nt.
rw fmor_constant_functor. wr H2. tv.
rw H3. ap mor_vee_cat. am.
assert
(fmor (target k) (catyd_arrow (d:=vee_data) a12') = u).
uf k.
rw target_edge_nt.
rw H1. rw fmor_vee_functor_catyd_arrow.
tv. am.
assert
(fmor (target k) (catyd_arrow (d:=vee_data) a32') = v).
uf k.
rw target_edge_nt.
rw H1. rw fmor_vee_functor_catyd_arrow.
tv. am.
util (vee_nt_hypothesis_ntrans (u:=k)).
uf k. app edge_nt_axioms. uf k.
rw osource_edge_nt. am. am.
rwi H6 H10.
rwi H5 H10. rwi H7 H10. rwi H8 H10.
rwi H9 H10.
assert (ntrans k (R o1') = (fimap1 c)).
uf fimap1. uf edge.
tv.
assert (ntrans k (R o3') = (fimap2 c)).
uf fimap2. uf edge.
tv.
rwi H11 H10.
set (m:= ntrans k (R o2')).
assert (m = ntrans k (R o2')). tv. rwi H12 H10.
wri H13 H10. uh H10; ee.
wr H29. wr H30.
reflexivity.
Qed.
Lemma fiprod_cone_eq : forall a u v c,
vee_hypothesis a u v ->
is_cone c -> socle c = vee_functor a u v ->
c = fiprod_cone a u v (fimap1 c) (fimap2 c).
Proof.
ir.
assert (a = cone_target c).
uf cone_target. rw H1. rw target_vee_functor.
tv.
assert (cone_source c = vee_cat).
uf cone_source. rw H1.
rww source_vee_functor.
set (k:= edge_nt c).
util (vee_nt_ntrans (u:=k)).
uf k. app edge_nt_axioms.
uf k. rww osource_edge_nt.
assert (otarget k = a).
uf k. rww otarget_edge_nt. sy; am.
assert
(fmor (source k) (catyd_arrow (d:=vee_data) a12') =
id a (vertex c)).
uf k.
rw source_edge_nt.
rw fmor_constant_functor. wr H2. tv.
rw H3. ap mor_vee_cat. am.
assert
(fmor (source k) (catyd_arrow (d:=vee_data) a32') =
id a (vertex c)).
uf k.
rw source_edge_nt.
rw fmor_constant_functor. wr H2. tv.
rw H3. ap mor_vee_cat. am.
assert
(fmor (target k) (catyd_arrow (d:=vee_data) a12') = u).
uf k.
rw target_edge_nt.
rw H1. rw fmor_vee_functor_catyd_arrow.
tv. am.
assert
(fmor (target k) (catyd_arrow (d:=vee_data) a32') = v).
uf k.
rw target_edge_nt.
rw H1. rw fmor_vee_functor_catyd_arrow.
tv. am.
rwi H5 H4. rwi H6 H4. rwi H7 H4. rwi H8 H4.
rwi H9 H4.
transitivity (cone_create (vertex c) k).
uh H0; ee. uh H0; ee.
sy; am.
uf fiprod_cone.
rw source_fimap1.
rw source_fimap2.
ap uneq. wr H4.
assert (lem1 : ntrans k (R o1') = (fimap1 c)).
uf fimap1. uf edge.
tv.
assert (lem2 : ntrans k (R o3') = (fimap2 c)).
uf fimap2. uf edge.
tv.
assert (ntrans k (R o2') = comp a u (fimap1 c)).
uf fimap1.
util (vee_nt_hypothesis_ntrans (u:=k)).
uf k. app edge_nt_axioms. uf k.
rw osource_edge_nt. am. am.
rwi H6 H10.
rwi H5 H10. rwi H7 H10. rwi H8 H10.
rwi H9 H10.
rwi lem1 H10. rwi lem2 H10.
set (m:= ntrans k (R o2')).
assert (m = ntrans k (R o2')). tv. wri H11 H10.
uh H10; ee. uf edge.
change (m=comp a u (ntrans k (R o1'))).
rw lem1. wr H27.
rw right_id. tv. rw H2. app ob_vertex.
am. rw H21. rw target_id. tv.
rw H2; app ob_vertex. tv.
rw lem1. rw lem2. rw H10. reflexivity. am.
am. am. am.
Qed.
Lemma fimap1_fiprod_cone : forall a u v y1 y2,
fimap1 (fiprod_cone a u v y1 y2) = y1.
Proof.
ir.
uf fimap1.
rw edge_fiprod_cone_R. tv.
Qed.
Lemma fimap2_fiprod_cone : forall a u v y1 y2,
fimap2 (fiprod_cone a u v y1 y2) = y2.
Proof.
ir.
uf fimap2.
rw edge_fiprod_cone_R. tv.
Qed.
Lemma fimap1_cone_compose : forall c z,
is_cone c -> cone_source c = vee_cat ->
cone_composable c z ->
fimap1 (cone_compose c z) =
(comp (cone_target c) (fimap1 c) z).
Proof.
ir.
uf fimap1.
rw edge_cone_compose. tv.
rw H0. ap ob_vee_cat. am.
Qed.
Lemma fimap2_cone_compose : forall c z,
is_cone c -> cone_source c = vee_cat ->
cone_composable c z ->
fimap2 (cone_compose c z) =
(comp (cone_target c) (fimap2 c) z).
Proof.
ir.
uf fimap2.
rw edge_cone_compose. tv.
rw H0. ap ob_vee_cat. am.
Qed.
Definition veefirst c :=
(fmor (socle c) (catyd_arrow (d:=vee_data) a12')).
Definition veesecond c :=
(fmor (socle c) (catyd_arrow (d:=vee_data) a32')).
Lemma fiprod_hypothesis_vee_fimaps : forall c,
is_cone c -> cone_source c = vee_cat ->
fiprod_hypothesis (cone_target c) (veefirst c) (veesecond c)
(fimap1 c) (fimap2 c).
Proof.
ir. util (fiprod_hypothesis_fimaps (a:= cone_target c)
(u:= veefirst c) (v:= veesecond c) (c:=c)).
uf veefirst. uf veesecond.
uf cone_target. ap functor_vee_hypothesis.
app socle_axioms. am. am.
uf cone_target.
uf veefirst. uf veesecond. ap functor_vee_eq.
app socle_axioms. am.
am.
Qed.
Lemma fiprod_cone_eq2 : forall c,
is_cone c -> cone_source c = vee_cat ->
c = fiprod_cone (cone_target c) (veefirst c) (veesecond c)
(fimap1 c) (fimap2 c).
Proof.
ir.
util (fiprod_cone_eq (a:= cone_target c)
(u:= veefirst c) (v:= veesecond c) (c:=c)).
uf veefirst. uf veesecond.
uf cone_target. ap functor_vee_hypothesis.
app socle_axioms. am. am.
uf cone_target.
uf veefirst. uf veesecond. ap functor_vee_eq.
app socle_axioms. am.
am.
Qed.
Lemma fimaps_extensionality : forall a b,
is_cone a -> is_cone b -> cone_source a = vee_cat
-> socle a = socle b -> fimap1 a = fimap1 b ->
fimap2 a = fimap2 b -> a = b.
Proof.
ir.
assert (cone_source b = vee_cat).
uf cone_source. wr H2. am.
util (fiprod_cone_eq2 (c:= a)).
am. am.
util (fiprod_cone_eq2 (c:= b)).
am. am. rw H6; rw H7.
assert (veefirst a = veefirst b).
uf veefirst. rww H2.
assert (veesecond a = veesecond b).
uf veesecond. rw H2. reflexivity.
rw H3; rw H8; rw H9.
assert (cone_target a = cone_target b).
uf cone_target. rww H2. rw H10. rw H4.
reflexivity.
Qed.
Lemma cone_compose_fiprod_cone : forall a u v y1 y2 z,
fiprod_hypothesis a u v y1 y2 ->
mor a z -> source y1 = target z ->
cone_compose (fiprod_cone a u v y1 y2) z =
fiprod_cone a u v (comp a y1 z) (comp a y2 z).
Proof.
ir.
assert (lem1: fiprod_hypothesis a u v
(comp a y1 z) (comp a y2 z)).
uh H; uhg; ee. am. rww mor_comp.
rww mor_comp. wrr H6. rww target_comp.
rww target_comp. wrr H6.
rww source_comp. rww source_comp. wrr H6.
wr assoc.
rw H7. rww assoc. uh H; ee; am. wrr H6.
uh H; ee; am. am. am. am. am. tv.
assert (lem2 : cone_composable (fiprod_cone a u v y1 y2) z).
uhg; ee.
app is_cone_fiprod_cone.
rww cone_target_fiprod_cone.
rww vertex_fiprod_cone. sy; am.
ap fimaps_extensionality.
rww is_cone_cone_compose.
app is_cone_fiprod_cone.
rw cone_source_cone_compose. rww cone_source_fiprod_cone.
rw socle_cone_compose.
rww socle_fiprod_cone. rww socle_fiprod_cone.
rww fimap1_cone_compose.
rww cone_target_fiprod_cone.
rww fimap1_fiprod_cone.
rww fimap1_fiprod_cone.
app is_cone_fiprod_cone.
rww cone_source_fiprod_cone.
rww fimap2_cone_compose.
rww cone_target_fiprod_cone.
rww fimap2_fiprod_cone.
rww fimap2_fiprod_cone.
app is_cone_fiprod_cone.
rww cone_source_fiprod_cone.
Qed.
Lemma cone_composable_fiprod_cone : forall a u v y1 y2 z,
fiprod_hypothesis a u v y1 y2 -> mor a z ->
source y1 = target z ->
cone_composable (fiprod_cone a u v y1 y2) z.
Proof.
ir. uhg; ee. app is_cone_fiprod_cone.
rww cone_target_fiprod_cone.
rww vertex_fiprod_cone. sy; am.
Qed.
Lemma is_uni_fiprod_cone : forall a u v y1 y2,
fiprod_hypothesis a u v y1 y2 ->
is_uni (fiprod_cone a u v y1 y2) =
(forall z z', mor a z -> mor a z' ->
source y1 = target z -> source y1 = target z' ->
comp a y1 z = comp a y1 z' ->
comp a y2 z = comp a y2 z'-> z = z').
Proof.
ir. ap iff_eq; ir.
uh H0. ee. ap H7.
app cone_composable_fiprod_cone.
app cone_composable_fiprod_cone.
rw cone_compose_fiprod_cone.
sy; rw cone_compose_fiprod_cone.
sy. rw H5. rw H6. tv. am. am. am. am. am. am.
uhg. ee. app is_cone_fiprod_cone.
ir.
cp H1; cp H2. uh H4; uh H5; ee.
rwi cone_target_fiprod_cone H8.
rwi cone_target_fiprod_cone H6.
rwi vertex_fiprod_cone H9.
rwi vertex_fiprod_cone H7.
app H0. sy; am. sy; am.
rwi cone_compose_fiprod_cone H3.
tv.
transitivity
(fimap1 (fiprod_cone a u v (comp a y1 u0) (comp a y2 u0))).
rw fimap1_fiprod_cone. tv.
rw H3.
rw fimap1_cone_compose.
rw cone_target_fiprod_cone.
rw fimap1_fiprod_cone. tv.
am. rww cone_source_fiprod_cone.
am. am. am. sy; am.
transitivity
(fimap2 (cone_compose (fiprod_cone a u v y1 y2) u0)).
rw cone_compose_fiprod_cone.
rw fimap2_fiprod_cone. tv. am. am. sy; am.
rw H3.
rw fimap2_cone_compose.
rw cone_target_fiprod_cone.
rw fimap2_fiprod_cone. tv.
am. rww cone_source_fiprod_cone.
am.
Qed.
Lemma is_versal_fiprod_cone : forall a u v y1 y2,
fiprod_hypothesis a u v y1 y2 ->
is_versal (fiprod_cone a u v y1 y2) =
(forall z1 z2, fiprod_hypothesis a u v z1 z2 ->
(exists w, (mor a w & source y1 = target w &
comp a y1 w = z1 & comp a y2 w = z2))).
Proof.
ir. ap iff_eq; ir.
uh H0; ee.
util (H2 (fiprod_cone a u v z1 z2)).
app is_cone_fiprod_cone.
rww socle_fiprod_cone; sy;
rww socle_fiprod_cone.
nin H3. ee.
cp H3. uh H5; ee.
rwi cone_target_fiprod_cone H6.
rwi vertex_fiprod_cone H7.
rwi cone_compose_fiprod_cone H4.
sh x; ee. am. sy; am.
transitivity
(fimap1 (fiprod_cone a u v (comp a y1 x) (comp a y2 x))).
rw fimap1_fiprod_cone. tv.
rw H4. rw fimap1_fiprod_cone. tv.
transitivity
(fimap2 (fiprod_cone a u v (comp a y1 x) (comp a y2 x))).
rw fimap2_fiprod_cone. tv.
rw H4. rw fimap2_fiprod_cone. tv. am. am. sy; am.
assert (lem1: fiprod_hypothesis a u v y1 y2).
am.
uh H; uhg; ee; try am. app is_cone_fiprod_cone.
ir.
rwi socle_fiprod_cone H8.
cp (fiprod_cone_eq H H7 H8).
cp (fiprod_hypothesis_fimaps H H7 H8).
util (H0 (fimap1 b) (fimap2 b)). am.
nin H11. ee. sh x; ee.
app cone_composable_fiprod_cone.
rww cone_compose_fiprod_cone. rw H13.
rw H14. sy; am.
Qed.
Definition is_fiprod a u v y1 y2 :=
fiprod_hypothesis a u v y1 y2 &
is_limit (fiprod_cone a u v y1 y2).
Lemma show_is_fiprod : forall a u v y1 y2 ,
fiprod_hypothesis a u v y1 y2 ->
(forall z z', mor a z -> mor a z' ->
source y1 = target z -> source y1 = target z' ->
comp a y1 z = comp a y1 z' -> comp a y2 z = comp a y2 z'
-> z = z') ->
(forall z1 z2, fiprod_hypothesis a u v z1 z2 ->
(exists w, (mor a w & source y1 = target w &
comp a y1 w = z1 & comp a y2 w = z2))) ->
is_fiprod a u v y1 y2.
Proof.
ir. uhg; ee. am.
uhg. ee. rww is_uni_fiprod_cone.
rww is_versal_fiprod_cone.
Qed.
Definition has_fiprod a u v :=
vee_hypothesis a u v &
has_limit (vee_functor a u v).
Lemma has_fiprod_rw : forall a u v,
has_fiprod a u v = (exists y1, exists y2, is_fiprod a u v y1 y2).
Proof.
ir. ap iff_eq; ir.
uh H; ee. uh H0.
nin H0. uh H0; ee. sh (fimap1 x).
sh (fimap2 x). uhg; ee.
ap fiprod_hypothesis_fimaps. am.
app is_limit_is_cone. am.
assert (is_cone x).
app is_limit_is_cone.
util (fiprod_cone_eq (a:=a) (u:=u) (v:=v) (c:=x)).
am. am. am.
wr H3. am.
nin H. nin H. uhg; ee.
uh H; ee. uh H; ee; am.
uh H; ee.
uhg.
sh (fiprod_cone a u v x x0).
uhg; ee. am.
rww socle_fiprod_cone.
Qed.
Definition has_fiprods a :=
Category.axioms a &
(forall u v, vee_hypothesis a u v ->
has_fiprod a u v).
Definition fipr1 a u v :=
fimap1 (limit (vee_functor a u v)).
Definition fipr2 a u v :=
fimap2 (limit (vee_functor a u v)).
Lemma fiprod_hypothesis_fiprod : forall a u v,
has_fiprod a u v ->
fiprod_hypothesis a u v (fipr1 a u v) (fipr2 a u v).
Proof.
ir. uf fipr1; uf fipr2.
app fiprod_hypothesis_fimaps.
uh H; ee; am.
ap is_limit_is_cone. ap is_limit_limit.
uh H; ee; am.
rw socle_limit. tv. uh H; ee; am.
Qed.
Lemma fiprod_hypothesis_comp : forall a u v y1 y2 z,
fiprod_hypothesis a u v y1 y2 -> mor a z ->
source y1 = target z ->
fiprod_hypothesis a u v (comp a y1 z) (comp a y2 z).
Proof.
ir. uh H; uhg; ee. am. rww mor_comp.
rww mor_comp. wrr H6. rww target_comp.
rww target_comp. wrr H6.
rww source_comp. rww source_comp. wrr H6.
wrr assoc. rw H7. rww assoc.
uh H; ee. am. wrr H6. uh H; ee; am.
Qed.
Lemma is_fiprod_fiprod : forall a u v,
has_fiprod a u v ->
is_fiprod a u v (fipr1 a u v) (fipr2 a u v).
Proof.
ir. uhg; ee.
app fiprod_hypothesis_fiprod.
uf fipr1; uf fipr2.
wr fiprod_cone_eq.
ap is_limit_limit. uh H; ee; am.
uh H; ee. am.
ap is_limit_is_cone. ap is_limit_limit. uh H; ee; am.
rw socle_limit. tv. uh H; ee; am.
Qed.
Lemma has_fiprods_has_fiprod : forall a u v,
has_fiprods a -> vee_hypothesis a u v->
has_fiprod a u v.
Proof.
ir. uh H; ee. ap H1. am.
Qed.
Lemma has_fiprods_has_limits_over : forall a,
Category.axioms a ->
has_fiprods a = has_limits_over vee_cat a.
Proof.
ir. ap iff_eq; ir.
uhg. ir. uh H0. ee.
util (H4 (fmor f (catyd_arrow a12')) (fmor f (catyd_arrow a32'))).
wr H3. ap functor_vee_hypothesis. am.
am.
uh H5. ee. wri H3 H6.
rww functor_vee_eq.
uh H0. uhg; ee. am.
ir. uhg; ee; try am.
ap H0.
app vee_functor_axioms.
rww source_vee_functor.
rww target_vee_functor.
Qed.
Lemma mor_fipr1 : forall a u v,
has_fiprod a u v ->
mor a (fipr1 a u v).
Proof.
ir. cp (is_fiprod_fiprod H).
uh H0; ee.
uh H0; ee. am.
Qed.
Lemma mor_fipr2 : forall a u v,
has_fiprod a u v ->
mor a (fipr2 a u v).
Proof.
ir. cp (is_fiprod_fiprod H).
uh H0; ee.
uh H0; ee. am.
Qed.
Lemma target_fipr1 : forall a u v,
has_fiprod a u v ->
target (fipr1 a u v) = source u.
Proof.
ir. cp (is_fiprod_fiprod H).
uh H0; ee.
uh H0; ee. sy; am.
Qed.
Lemma target_fipr2 : forall a u v,
has_fiprod a u v ->
target (fipr2 a u v) = source v.
Proof.
ir. cp (is_fiprod_fiprod H).
uh H0; ee.
uh H0; ee. sy; am.
Qed.
Lemma source_fipr2 : forall a u v,
has_fiprod a u v ->
source (fipr2 a u v) = source (fipr1 a u v).
Proof.
ir. cp (is_fiprod_fiprod H).
uh H0; ee.
uh H0; ee. sy; am.
Qed.
Lemma comp_fiprod_eq : forall a u v,
has_fiprod a u v ->
comp a u (fipr1 a u v) = comp a v (fipr2 a u v).
Proof.
ir. cp (is_fiprod_fiprod H).
uh H0; ee.
uh H0; ee. am.
Qed.
Definition fipr_dotted a u v y1 y2 :=
dotted (fiprod_cone a u v y1 y2).
Lemma fiprod_cone_fiprod : forall a u v,
has_fiprod a u v ->
fiprod_cone a u v (fipr1 a u v) (fipr2 a u v) =
limit (vee_functor a u v).
Proof.
ir.
uf fipr1; uf fipr2.
wr fiprod_cone_eq. tv. uh H; ee; am.
ap is_limit_is_cone. ap is_limit_limit.
uh H; ee; am.
rw socle_limit. tv. uh H; ee; am.
Qed.
Lemma cone_composable_fipr_dotted : forall a u v y1 y2,
has_fiprod a u v -> fiprod_hypothesis a u v y1 y2 ->
cone_composable (fiprod_cone a u v (fipr1 a u v) (fipr2 a u v))
(fipr_dotted a u v y1 y2).
Proof.
ir.
uf fipr_dotted.
rw fiprod_cone_fiprod.
assert (vee_functor a u v =
socle (fiprod_cone a u v y1 y2)).
rww socle_fiprod_cone. rw H1.
ap cone_composable_dotted.
app is_cone_fiprod_cone.
rw socle_fiprod_cone. uh H; ee; am.
am.
Qed.
Lemma cone_compose_fipr_dotted : forall a u v y1 y2,
has_fiprod a u v -> fiprod_hypothesis a u v y1 y2 ->
cone_compose (fiprod_cone a u v (fipr1 a u v) (fipr2 a u v))
(fipr_dotted a u v y1 y2) = fiprod_cone a u v y1 y2.
Proof.
ir.
uf fipr_dotted.
rw fiprod_cone_fiprod.
assert (vee_functor a u v =
socle (fiprod_cone a u v y1 y2)).
rww socle_fiprod_cone. rw H1.
ap cone_compose_dotted.
app is_cone_fiprod_cone.
rw socle_fiprod_cone. uh H; ee; am. am.
Qed.
Lemma fipr_dotted_uni : forall a u v y1 y2 r,
has_fiprod a u v -> fiprod_hypothesis a u v y1 y2 ->
cone_composable (fiprod_cone a u v (fipr1 a u v) (fipr2 a u v)) r ->
cone_compose (fiprod_cone a u v (fipr1 a u v) (fipr2 a u v)) r =
fiprod_cone a u v y1 y2 ->
fipr_dotted a u v y1 y2 = r.
Proof.
ir. uf fipr_dotted.
wr H2.
rw fiprod_cone_fiprod. ap dotted_unique.
ap vee_functor_axioms. uh H0; ee; am.
uh H; ee; am.
wr fiprod_cone_fiprod. am. am. am.
Qed.
Lemma mor_fipr_dotted : forall a u v y1 y2,
has_fiprod a u v -> fiprod_hypothesis a u v y1 y2 ->
mor a (fipr_dotted a u v y1 y2).
Proof.
ir.
cp (cone_composable_fipr_dotted H H0).
uh H1; ee. rwi cone_target_fiprod_cone H2. am.
Qed.
Lemma target_fipr_dotted : forall a u v y1 y2,
has_fiprod a u v -> fiprod_hypothesis a u v y1 y2 ->
target (fipr_dotted a u v y1 y2) = source (fipr1 a u v).
Proof.
ir.
cp (cone_composable_fipr_dotted H H0).
uh H1; ee. rwi vertex_fiprod_cone H3. am.
Qed.
Lemma comp_fipr1_fipr_dotted : forall a u v y1 y2,
has_fiprod a u v -> fiprod_hypothesis a u v y1 y2 ->
comp a (fipr1 a u v) (fipr_dotted a u v y1 y2) = y1.
Proof.
ir.
cp (cone_compose_fipr_dotted H H0).
transitivity (fimap1 (fiprod_cone a u v y1 y2)).
wr H1.
rw fimap1_cone_compose.
rw cone_target_fiprod_cone.
rw fimap1_fiprod_cone. tv.
ap is_cone_fiprod_cone.
ap fiprod_hypothesis_fiprod. am.
rww cone_source_fiprod_cone.
app cone_composable_fipr_dotted.
rww fimap1_fiprod_cone.
Qed.
Lemma comp_fipr2_fipr_dotted : forall a u v y1 y2,
has_fiprod a u v -> fiprod_hypothesis a u v y1 y2 ->
comp a (fipr2 a u v) (fipr_dotted a u v y1 y2) = y2.
Proof.
ir.
cp (cone_compose_fipr_dotted H H0).
transitivity (fimap2 (fiprod_cone a u v y1 y2)).
wr H1.
rw fimap2_cone_compose.
rw cone_target_fiprod_cone.
rw fimap2_fiprod_cone. tv.
ap is_cone_fiprod_cone.
app fiprod_hypothesis_fiprod.
rww cone_source_fiprod_cone.
app cone_composable_fipr_dotted.
rww fimap2_fiprod_cone.
Qed.
Lemma fipr_dotted_comp : forall a u v z,
has_fiprod a u v -> mor a z ->
source (fipr1 a u v) = target z ->
fipr_dotted a u v (comp a (fipr1 a u v) z)
(comp a (fipr2 a u v) z) = z.
Proof.
ir.
assert (fiprod_hypothesis a u v (fipr1 a u v) (fipr2 a u v)).
app fiprod_hypothesis_fiprod.
ap fipr_dotted_uni. am.
app fiprod_hypothesis_comp.
uhg; ee. app is_cone_fiprod_cone.
rww cone_target_fiprod_cone.
rww vertex_fiprod_cone. sy; am.
rw cone_compose_fiprod_cone. tv.
am. am. am.
Qed.
(*** now we apply the result of fc_limits to
the case of fiprods ***********************)
Lemma has_fiprods_functor_cat : forall a b,
Category.axioms a -> has_fiprods b ->
has_fiprods (functor_cat a b).
Proof.
ir. rw has_fiprods_has_limits_over.
cp H0.
uh H1; ee.
rwi has_fiprods_has_limits_over H0.
ap has_limits_functor_cat. am. am.
ap vee_cat_axioms. am. am.
ap functor_cat_axioms. am.
uh H0; ee; am.
Qed.
Lemma has_finite_limits_has_fiprods : forall a,
has_finite_limits a -> has_fiprods a.
Proof.
ir. rw has_fiprods_has_limits_over.
uh H; ee. ap H0.
ap is_finite_vee_cat. uh H; ee; am.
Qed.
End Fiprod.
Export Fiprod.
(*** we should be able to more or less recopy fiprod to
get a module Fiprod for fiber products; then also
dualize to get cofiprods and cofiber products. The other
main type of limits and colimits we need to do are direct
products and coproducts ... *****************************)
Module Cofiprod.
(**** we do this module by dualizing the module
fiprod, rather than by relying on colimits *******)
Definition covee_hypothesis a u v :=
mor a u & mor a v &
source u = source v.
Definition cofiprod_hypothesis a u v y1 y2 :=
covee_hypothesis a u v &
mor a y1 & mor a y2 & source y1 = target u &
source y2 = target v &
comp a y1 u = comp a y2 v.
Lemma cofi_hyp_fi_hyp : forall a u v y1 y2,
Category.axioms a ->
cofiprod_hypothesis a u v y1 y2 =
fiprod_hypothesis (opp a) (flip u) (flip v) (flip y1) (flip y2).
Proof.
ir. ap iff_eq; ir.
uh H0; uhg; ee.
uh H0; uhg; ee.
rww mor_opp. rww flip_flip.
rww mor_opp. rww flip_flip.
rw target_flip. rw target_flip.
am. alike. alike.
rww mor_opp. rww flip_flip.
rw mor_opp; rww flip_flip.
rw target_flip; try alike.
rw source_flip; try alike. sy; am. uh H0; ee; alike.
rw target_flip; try alike.
rw source_flip; try alike. sy; am. uh H0; ee; alike.
rw source_flip; try alike.
rw source_flip; try alike.
transitivity (target (comp a y1 u)).
rw target_comp. tv. am. uh H0; ee; am. am.
rw H5. rww target_comp. uh H0; ee; am.
rw comp_opp.
rw comp_opp. rw flip_flip.
rw flip_flip. rw flip_flip. rw flip_flip. ap uneq. am.
rww mor_opp; rww flip_flip.
uh H0; ee; am.
rww mor_opp; rww flip_flip.
rww target_flip; try alike.
rww source_flip; try alike.
uh H0; ee. sy; am.
uh H0; ee; alike.
rww mor_opp; rww flip_flip.
uh H0; ee; am.
rww mor_opp; rww flip_flip.
rww target_flip; try alike.
rww source_flip; try alike.
sy; am. uh H0; ee; alike.
uh H0; ee. uh H0; ee.
uhg; dj. uhg; dj. rwi mor_opp H0.
rwi flip_flip H0. am.
rwi mor_opp H7.
rwi flip_flip H7. am.
wr target_flip; try alike.
rw H8. rw target_flip; try alike. tv.
rwi mor_opp H1. rwi flip_flip H1; am.
rwi mor_opp H2. rwi flip_flip H2; am.
wr source_flip; try alike. rw H3.
rww target_flip; try alike.
rwi mor_opp H0.
rwi flip_flip H0. alike. wr target_flip; try alike.
wr H4. uh H9; ee.
rww source_flip; try alike.
rwi comp_opp H6. rwi flip_flip H6.
rwi flip_flip H6. ap flip_eq.
rw H6.
rw comp_opp. rw flip_flip. rw flip_flip. tv.
am. am. am. am. am. am.
Qed.
Definition is_cofiprod a u v y1 y2 :=
Category.axioms a &
is_fiprod (opp a) (flip u) (flip v) (flip y1) (flip y2).
Lemma show_is_cofiprod : forall a u v y1 y2,
Category.axioms a ->
cofiprod_hypothesis a u v y1 y2 ->
(forall z z', mor a z -> mor a z' ->
source z = target y1 -> source z' = target y1 ->
comp a z y1 = comp a z' y1 -> comp a z y2 = comp a z' y2
-> z = z') ->
(forall z1 z2, cofiprod_hypothesis a u v z1 z2 ->
(exists w, (mor a w & source w = target y1 &
comp a w y1 = z1 & comp a w y2 = z2))) ->
is_cofiprod a u v y1 y2.
Proof.
ir.
assert (lem1 : target y1 = target y2).
uh H0. ee. transitivity (target (comp a y1 u)).
rww target_comp. uh H0; ee; am.
rw H7. rww target_comp. uh H0; ee; am.
uhg; ee. am.
ap show_is_fiprod.
wr cofi_hyp_fi_hyp. am. am.
ir. set (z1:=flip z). set (z2 := flip z').
assert (z1 = z2). ap H1.
uf z1. rwi mor_opp H3. am.
rwi mor_opp H4. am.
uf z1. rw source_flip; try alike. wr H5.
rww source_flip; try alike. uh H0; ee.
alike.
uf z2. rw source_flip; try alike. wr H6.
rww source_flip; try alike. uh H0; ee.
alike. uf z1; uf z2.
rwi comp_opp H7.
rwi flip_flip H7.
rwi comp_opp H7. rwi flip_flip H7.
rwi comp_opp H8.
rwi flip_flip H8.
rwi comp_opp H8. rwi flip_flip H8.
ap flip_eq.
am.
rww mor_opp. rw flip_flip.
uh H0; ee; am.
am.
uh H0; ee.
rw source_flip.
wr lem1.
rwi source_flip H6. am. alike. alike.
rww mor_opp. rw flip_flip.
uh H0; ee; am. am.
rw source_flip; try alike. wr lem1.
rwi source_flip H5. am. uh H0; ee; alike.
uh H0; ee; alike.
rw mor_opp; rww flip_flip. uh H0; ee; am.
am. am. rw mor_opp; rw flip_flip; uh H0; ee; am.
am. am.
uf z1; uf z2.
rwi comp_opp H7.
rwi flip_flip H7.
rwi comp_opp H7. rwi flip_flip H7.
rwi comp_opp H8.
rwi flip_flip H8.
rwi comp_opp H8. rwi flip_flip H8.
ap flip_eq.
am.
rww mor_opp. rw flip_flip.
uh H0; ee; am.
am.
uh H0; ee.
rw source_flip.
wr lem1.
rwi source_flip H6. am. alike. alike.
rww mor_opp. rw flip_flip.
uh H0; ee; am. am.
rw source_flip; try alike. wr lem1.
rwi source_flip H5. am. uh H0; ee; alike.
uh H0; ee; alike.
rw mor_opp; rww flip_flip. uh H0; ee; am.
am. am. rw mor_opp; rw flip_flip; uh H0; ee; am.
am. am. ap flip_eq. am.
ir.
cp H3. set (z1' := flip z1).
assert (z1= flip z1').
uf z1'; rww flip_flip. rwi H5 H4.
set (z2' := flip z2).
assert (z2= flip z2').
uf z2'; rww flip_flip. rwi H6 H4.
wri cofi_hyp_fi_hyp H4.
nin (H2 z1' z2' H4). ee.
sh (flip x). ee. rww mor_opp.
rww flip_flip.
rw source_flip. rw target_flip.
sy; am. alike. uh H0; ee; alike.
rw comp_opp.
rw flip_flip. rw flip_flip. ap flip_eq.
rw flip_flip. exact H9.
rww mor_opp. rw flip_flip. uh H0; ee; am.
rww mor_opp. rww flip_flip.
rw source_flip. rw target_flip. sy; am.
alike. uh H0; ee; alike.
rw comp_opp. rw flip_flip. rw flip_flip.
ap flip_eq. rw flip_flip. exact H10.
rw mor_opp; rw flip_flip. uh H0; ee; am.
rw mor_opp; rww flip_flip.
rw source_flip. rw target_flip.
rww H8. sy; am. alike. uh H0; ee; alike.
am.
Qed.
Definition has_cofiprod a u v :=
covee_hypothesis a u v &
has_fiprod (opp a) (flip u) (flip v).
Lemma has_cofiprod_rw : forall a u v,
has_cofiprod a u v =
(exists y1, exists y2, is_cofiprod a u v y1 y2).
Proof.
ir. ap iff_eq; ir.
uh H; ee.
rwi has_fiprod_rw H0.
nin H0. nin H0. sh (flip x). sh (flip x0).
uhg; ee. uh H; ee. uh H; ee; am. rw flip_flip.
rw flip_flip. am.
nin H. nin H.
assert (cofiprod_hypothesis a u v x x0).
rw cofi_hyp_fi_hyp.
uh H; ee. uh H0; ee.
am. uh H; ee. am.
uhg; ee. uh H0; ee; am.
uh H; ee.
rw has_fiprod_rw. sh (flip x).
sh (flip x0). am.
Qed.
Definition has_cofiprods a :=
Category.axioms a &
(forall u v, covee_hypothesis a u v ->
has_cofiprod a u v).
Definition cofipr1 a u v :=
flip (fipr1 (opp a) (flip u) (flip v)).
Definition cofipr2 a u v :=
flip (fipr2 (opp a) (flip u) (flip v)).
Lemma cofiprod_hypothesis_cofiprod : forall a u v,
has_cofiprod a u v ->
cofiprod_hypothesis a u v (cofipr1 a u v) (cofipr2 a u v).
Proof.
ir. uf cofipr1; uf cofipr2.
rw cofi_hyp_fi_hyp. rw flip_flip.
rw flip_flip.
ap fiprod_hypothesis_fiprod.
uh H. ee; am. uh H; ee. uh H; ee. uh H; ee; am.
Qed.
Lemma cofiprod_hypothesis_comp : forall a u v y1 y2 z,
cofiprod_hypothesis a u v y1 y2 -> mor a z ->
source z = target y1 ->
cofiprod_hypothesis a u v (comp a z y1) (comp a z y2).
Proof.
ir.
rw cofi_hyp_fi_hyp.
rwi cofi_hyp_fi_hyp H.
assert (flip (comp a z y1) =
comp (opp a) (flip y1) (flip z)).
rw comp_opp.
rw flip_flip. rww flip_flip.
uh H; ee; am.
rww mor_opp.
rww flip_flip.
rw source_flip; try alike. rw target_flip.
sy; am. alike. uh H; ee.
rwi mor_opp H2. rwi flip_flip H2; alike.
assert (flip (comp a z y2) =
comp (opp a) (flip y2) (flip z)).
rw comp_opp.
rw flip_flip. rww flip_flip.
uh H; ee; am.
rww mor_opp.
rww flip_flip.
uh H; ee. wr H7.
rw source_flip; try alike. rw target_flip.
sy; am. alike.
rwi mor_opp H3. rwi flip_flip H3; alike.
rw H2; rw H3. ap fiprod_hypothesis_comp. am.
rww mor_opp. rww flip_flip.
rw source_flip; try alike. rw target_flip.
sy; am. alike. uh H; ee.
rwi mor_opp H4. rwi flip_flip H4; alike.
uh H0; ee; am. uh H0; ee; am.
Qed.
Lemma is_cofiprod_cofiprod : forall a u v,
has_cofiprod a u v ->
is_cofiprod a u v (cofipr1 a u v) (cofipr2 a u v).
Proof.
ir. assert (Category.axioms a).
uh H; ee. uh H; ee. uh H; ee; am.
uhg; ee; try am.
uf cofipr1; uf cofipr2. rw flip_flip.
rw flip_flip.
ap is_fiprod_fiprod. uh H; ee. am.
Qed.
Lemma has_cofiprods_has_cofiprod : forall a u v,
has_cofiprods a -> covee_hypothesis a u v->
has_cofiprod a u v.
Proof.
ir. uh H; ee. ap H1. am.
Qed.
Lemma covee_hypothesis_opp1 : forall a u v,
Category.axioms a ->
covee_hypothesis a u v ->
vee_hypothesis (opp a) (flip u) (flip v).
Proof.
ir. uh H0; ee. uhg; ee.
rww mor_opp. rww flip_flip.
rww mor_opp. rww flip_flip.
rw target_flip; try alike.
rw target_flip; try alike. am.
Qed.
Lemma covee_hypothesis_opp : forall a u v,
Category.axioms a ->
covee_hypothesis (opp a) u v =
vee_hypothesis a (flip u) (flip v).
Proof.
ir. ap iff_eq; ir.
uh H0; uhg; ee. rwi mor_opp H0. am.
wrr mor_opp.
rw target_flip; try alike. rw target_flip; try alike.
am.
uh H0; ee. uhg; ee.
rw mor_opp; am. rw mor_opp; am.
rwi target_flip H2; try alike. rwi target_flip H2; try alike.
am.
wri mor_opp H1; alike. wri mor_opp H0; alike.
Qed.
Lemma vee_hypothesis_opp_flip : forall a u v,
Category.axioms a ->
vee_hypothesis (opp a) (flip u) (flip v) =
covee_hypothesis a u v.
Proof.
ir.
transitivity (covee_hypothesis (opp (opp a))
u v). rw covee_hypothesis_opp. tv.
app opp_axioms. rww opp_opp.
Qed.
Lemma has_cofipr_has_fipr : forall a u v,
Category.axioms a ->
has_cofiprod a u v = has_fiprod (opp a) (flip u) (flip v).
Proof.
ir. ap iff_eq; ir.
uh H0; ee. am. uhg; ee. uh H0; ee.
rwi vee_hypothesis_opp_flip H0.
am. am. am.
Qed.
Lemma has_fipr_has_cofipr : forall a u v,
Category.axioms a ->
has_fiprod a u v = has_cofiprod (opp a) (flip u) (flip v).
Proof.
ir.
rw has_cofipr_has_fipr. rw flip_flip. rw flip_flip.
rw opp_opp. tv. app opp_axioms.
Qed.
Lemma has_cofiprods_opp : forall a,
has_cofiprods (opp a) = has_fiprods a.
Proof.
ir. ap iff_eq; ir.
uh H; ee. uhg; ee. wrr axioms_opp.
ir. rw has_fipr_has_cofipr.
ap H0.
rw covee_hypothesis_opp. rw flip_flip.
rw flip_flip. am.
wrr axioms_opp. wrr axioms_opp.
uh H; ee. uhg; ee.
app opp_axioms. ir.
rw has_cofipr_has_fipr.
rw opp_opp. ap H0.
wr covee_hypothesis_opp. am. am. app opp_axioms.
Qed.
Lemma has_fiprods_opp : forall a,
has_fiprods (opp a) = has_cofiprods a.
Proof.
ir. wr has_cofiprods_opp.
rww opp_opp.
Qed.
Lemma has_cofiprods_has_colimits_over_opp : forall a,
Category.axioms a ->
has_cofiprods a = has_colimits_over (opp vee_cat) a.
Proof.
ir. ap iff_eq; ir. assert (a = opp (opp a)).
rww opp_opp. rw H1.
ap has_colimits_over_opp. app opp_axioms.
ap vee_cat_axioms.
wr has_fiprods_has_limits_over.
rw has_fiprods_opp. am. app opp_axioms.
wr has_fiprods_opp. rw has_fiprods_has_limits_over.
assert (vee_cat = opp (opp vee_cat)).
rww opp_opp. rw H1.
ap has_limits_over_opp. am.
ap opp_axioms. ap vee_cat_axioms. am.
app opp_axioms.
Qed.
Lemma mor_cofipr1 : forall a u v,
has_cofiprod a u v ->
mor a (cofipr1 a u v).
Proof.
ir. cp (cofiprod_hypothesis_cofiprod H).
uh H0; ee. am.
Qed.
Lemma mor_cofipr2 : forall a u v,
has_cofiprod a u v ->
mor a (cofipr2 a u v).
Proof.
ir. cp (cofiprod_hypothesis_cofiprod H).
uh H0; ee. am.
Qed.
Lemma source_cofipr1 : forall a u v,
has_cofiprod a u v ->
source (cofipr1 a u v) = target u.
Proof.
ir. cp (cofiprod_hypothesis_cofiprod H).
uh H0; ee. am.
Qed.
Lemma source_cofipr2 : forall a u v,
has_cofiprod a u v ->
source (cofipr2 a u v) = target v.
Proof.
ir. cp (cofiprod_hypothesis_cofiprod H).
uh H0; ee. am.
Qed.
Lemma target_cofipr2 : forall a u v,
has_cofiprod a u v ->
target (cofipr2 a u v) = target (cofipr1 a u v).
Proof.
ir. cp (cofiprod_hypothesis_cofiprod H).
uh H0; ee.
uh H0; ee.
transitivity (target (comp a (cofipr1 a u v) u)).
rw H5. rw target_comp. tv.
ap mor_cofipr2. am. am.
rw source_cofipr2. tv. am.
rw target_comp. tv.
app mor_cofipr1. am.
rww source_cofipr1.
Qed.
Lemma comp_cofiprod_eq : forall a u v,
has_cofiprod a u v ->
comp a (cofipr1 a u v) u = comp a (cofipr2 a u v) v.
Proof.
ir. cp (cofiprod_hypothesis_cofiprod H).
uh H0; ee. am.
Qed.
Definition cofipr_dotted a u v y1 y2 :=
flip (fipr_dotted (opp a) (flip u) (flip v) (flip y1) (flip y2)).
Lemma mor_cofipr_dotted : forall a u v y1 y2,
has_cofiprod a u v -> cofiprod_hypothesis a u v y1 y2 ->
mor a (cofipr_dotted a u v y1 y2).
Proof.
ir.
rwi has_cofipr_has_fipr H.
rwi cofi_hyp_fi_hyp H0.
uf cofipr_dotted.
wr mor_opp. app mor_fipr_dotted.
uh H0; ee.
uh H1; ee; am. uh H0; ee.
uh H1; ee; am.
Qed.
Lemma source_fipr_dotted : forall a u v y1 y2,
has_cofiprod a u v -> cofiprod_hypothesis a u v y1 y2 ->
source (cofipr_dotted a u v y1 y2) = target (cofipr1 a u v).
Proof.
ir.
rwi has_cofipr_has_fipr H.
rwi cofi_hyp_fi_hyp H0.
uf cofipr_dotted.
rw source_flip.
rw target_fipr_dotted.
uf cofipr1. rw target_flip.
tv.
apply mor_arrow_like with (opp a). app mor_fipr1.
am. am.
apply mor_arrow_like with (opp a).
app mor_fipr_dotted.
uh H0; ee.
uh H1; ee; am. uh H0; ee.
uh H1; ee; am.
Qed.
Lemma comp_cofipr1_cofipr_dotted : forall a u v y1 y2,
has_cofiprod a u v -> cofiprod_hypothesis a u v y1 y2 ->
comp a (cofipr_dotted a u v y1 y2) (cofipr1 a u v) = y1.
Proof.
ir.
rwi has_cofipr_has_fipr H.
rwi cofi_hyp_fi_hyp H0.
uf cofipr_dotted.
uf cofipr1. ap flip_eq. wr comp_opp.
ap comp_fipr1_fipr_dotted. am. am.
app mor_fipr1. app mor_fipr_dotted.
rw target_fipr_dotted. tv. am. am.
uh H0; ee. uh H1; ee; am.
uh H0; ee. uh H1; ee; am.
Qed.
Lemma comp_cofipr2_cofipr_dotted : forall a u v y1 y2,
has_cofiprod a u v -> cofiprod_hypothesis a u v y1 y2 ->
comp a (cofipr_dotted a u v y1 y2) (cofipr2 a u v) = y2.
Proof.
ir.
rwi has_cofipr_has_fipr H.
rwi cofi_hyp_fi_hyp H0.
uf cofipr_dotted.
uf cofipr2. ap flip_eq. wr comp_opp.
ap comp_fipr2_fipr_dotted. am. am.
app mor_fipr2. app mor_fipr_dotted.
rw target_fipr_dotted. tv.
rw source_fipr2. tv. am. am.
am.
uh H0; ee. uh H1; ee; am.
uh H0; ee. uh H1; ee; am.
Qed.
Lemma cofipr_dotted_comp : forall a u v z,
has_cofiprod a u v -> mor a z ->
source z = target (cofipr1 a u v) ->
cofipr_dotted a u v (comp a z (cofipr1 a u v))
(comp a z (cofipr2 a u v)) = z.
Proof.
ir. cp H. rwi has_cofipr_has_fipr H.
assert (cofiprod_hypothesis a u v
(comp a z (cofipr1 a u v)) (comp a z (cofipr2 a u v))).
app cofiprod_hypothesis_comp.
app cofiprod_hypothesis_cofiprod.
rwi cofi_hyp_fi_hyp H3.
uf cofipr_dotted.
assert (flip (comp a z (cofipr1 a u v)) =
comp (opp a) (fipr1 (opp a) (flip u) (flip v)) (flip z)).
rw comp_opp. sy; rw flip_flip.
uf cofipr1. tv.
app mor_fipr1. rww mor_opp. rww flip_flip.
ufi cofipr1 H1.
rwi target_flip H1. wr H1. rww target_flip.
alike.
apply mor_arrow_like with (opp a).
app mor_fipr1.
assert (flip (comp a z (cofipr2 a u v)) =
comp (opp a) (fipr2 (opp a) (flip u) (flip v)) (flip z)).
rw comp_opp. sy; rw flip_flip.
uf cofipr2. tv.
app mor_fipr2. rww mor_opp. rww flip_flip.
assert (lem1 : source z = target (cofipr2 a u v)).
rw target_cofipr2. am. am.
ufi cofipr2 lem1.
rwi target_flip lem1. wr lem1. rww target_flip.
alike.
apply mor_arrow_like with (opp a).
app mor_fipr2.
rw H4. rw H5. rw fipr_dotted_comp. rww flip_flip. am.
rw mor_opp. rww flip_flip.
ufi cofipr1 H1.
rwi target_flip H1. wr H1. rww target_flip.
alike.
apply mor_arrow_like with (opp a).
app mor_fipr1.
uh H0; ee; am. uh H0; ee; am.
Qed.
(*** now we apply the result of fc_limits to
the case of cofiprods ***********************)
Lemma has_cofiprods_functor_cat : forall a b,
Category.axioms a -> has_cofiprods b ->
has_cofiprods (functor_cat a b).
Proof.
ir. rw has_cofiprods_has_colimits_over_opp.
cp H0.
uh H1; ee.
rwi has_cofiprods_has_colimits_over_opp H0.
ap has_colimits_functor_cat. am. am.
ap opp_axioms. ap vee_cat_axioms. am. am.
ap functor_cat_axioms. am.
uh H0; ee; am.
Qed.
Lemma has_finite_limits_has_cofiprods : forall a,
has_finite_colimits a -> has_cofiprods a.
Proof.
ir. rw has_cofiprods_has_colimits_over_opp.
uh H; ee. ap H0.
ap is_finite_cat_opp.
ap is_finite_vee_cat. uh H; ee; am.
Qed.
End Cofiprod.
Export Cofiprod.
|
{"author": "coq-contribs", "repo": "cats-in-zfc", "sha": "aa7067a8d0a243caec7288dffd1e0a86c65ece0e", "save_path": "github-repos/coq/coq-contribs-cats-in-zfc", "path": "github-repos/coq/coq-contribs-cats-in-zfc/cats-in-zfc-aa7067a8d0a243caec7288dffd1e0a86c65ece0e/fiprod.v"}
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import math
from matplotlib import style
from Points import BuildPath
from std_msgs.msg import String
import rospy
import tf
import time
import datetime ##
import numpy as np
from numpy.linalg import inv,pinv
from threading import Thread, Lock
from geometry_msgs.msg import TransformStamped
from line_following.srv import *
import Path
import copy
isRun = True
TestActive = True
style.use('fivethirtyeight')
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
#t = 0
xs0,ys0,xs1,ys1,xc0,yc0,xc1,yc1,xm,ym = BuildPath(0.05)
posx = [0]*36
posy = [0]*36
index1=20
name1="zumoTest20"
index2=29
name2="zumoTest29"
index3=17
name3="zumoTest17"
index4=23
name4="zumoTest23"
index5=25
name5="zumoTest25"
index6=4
name6="zumoTest4"
index7=10
name7="zumoTest10"
index8=24
name8="zumotest24"
index9 = 11
name9 = "zumotest11"
index10 = 26
name10 = "zumotest26"
index11 = 30
name11 = "zumotest30"
index12 = 35
name12 = "zumotest35"
inds = [name1,name2,name3, name4, name5, name6, name7, name8, name9, name10, name11, name12]
inds2 = [index1, index2, index3, index4, index5, index6, index7, index8, index9, index10, index11, index12]
#road 1 upper start
posx[index1] = 1.32
posy[index1] = 0.756802
posx[index2] = 1.0
posy[index2] = 0.76406
#road 2 lower start
posx[index3] = 0.7
posy[index3] = 0.76406
posx[index4] = 0.4
posy[index4] = 0.76406
#right roundabouts
posx[index5] = -2.64291
posy[index5] = 0.30423
posx[index6] = -2.64291
posy[index6] = 0.30423
posx[index7] = -2.51
posy[index7] = 0.038
posx[index8] = -2.5
posy[index8] = 0.73
#extra cars
posx[index9] = 0
posy[index9] = 0.756802
posx[index10] = -0.381052
posy[index10] = 0.756802
posx[index11] = 0.5
posy[index11] = 1.22389
posx[index12] = 1
posy[index12] = 1.22389
def talker(index,x,y,drx,dry,sp):
global pub
pub.publish(str(index)+","+str(x)+","+str(y)+","+str(drx)+","+str(dry)+","+str(sp))
def RT_control(to,tmi,xi,xf,v,vf):
A_mtx = np.matrix([[to**3/6,to**2/2,to,1],[to**2/2,to,1,0],[tmi**3/6,tmi**2/2,tmi,1],[tmi**2/2,tmi,1,0]])
Y_mtx = np.matrix([[xi],[v],[xf],[vf]])
A_aux = np.transpose(A_mtx)*A_mtx
X_mtx = pinv(A_aux)*np.transpose(A_mtx)*Y_mtx
return X_mtx
def getViconPos(frameName):
global tfl
t = tfl.getLatestCommonTime("/static_corner_1", "/vicon/"+frameName+"/"+frameName)
position1, quaternion1 = tfl.lookupTransform("/static_corner_1", "/vicon/"+frameName+"/"+frameName, t)
t = tfl.getLatestCommonTime("/static_corner_2", "/vicon/"+frameName+"/"+frameName)
position2, quaternion2 = tfl.lookupTransform("/static_corner_2", "/vicon/"+frameName+"/"+frameName, t)
t = tfl.getLatestCommonTime("/static_corner_3", "/vicon/"+frameName+"/"+frameName)
position3, quaternion3 = tfl.lookupTransform("/static_corner_3", "/vicon/"+frameName+"/"+frameName, t)
t = tfl.getLatestCommonTime("/static_corner_4", "/vicon/"+frameName+"/"+frameName)
position4, quaternion4 = tfl.lookupTransform("/static_corner_4", "/vicon/"+frameName+"/"+frameName, t)
x = (position1[0]+position2[0]+position3[0]+position4[0])/4
y = (position1[1]+position2[1]+position3[1]+position4[1])/4
q = ((quaternion1[0]+quaternion2[0]+quaternion3[0]+quaternion4[0])/4,(quaternion1[1]+quaternion2[1]+quaternion3[1]+quaternion4[1])/4,(quaternion1[2]+quaternion2[2]+quaternion3[2]+quaternion4[2])/4,(quaternion1[3]+quaternion2[3]+quaternion3[3]+quaternion4[3])/4)
euler = tf.transformations.euler_from_quaternion(q)
theta = euler[2]
return x,y,theta
def animate(i):
global xs0#long road
global ys0#Long road
global xs1
global ys1
ax1.clear()
#the dot at 0,0 is caused by having extra indexes here that don't corresspond to an actual car in the sim.
ax1.scatter(posx[index1],posy[index1],c='r',s=300)
ax1.scatter(posx[index2],posy[index2],c='b',s=300)
ax1.scatter(posx[index3],posy[index3],c='g',s=300)
ax1.scatter(posx[index4],posy[index4],c='y',s=300)
# ax1.scatter(posx[index5],posy[index5],c='r',s=300)
ax1.scatter(posx[index6],posy[index6],c='b',s=300)
ax1.scatter(posx[index7],posy[index7],c='g',s=300)
ax1.scatter(posx[index8],posy[index8],c='y',s=300)
#ax1.scatter(posx[index9],posy[index9],c='r',s=300)
#ax1.scatter(posx[index10],posy[index10],c='b',s=300)
#ax1.scatter(posx[index11],posy[index11],c='g',s=300)
#ax1.scatter(posx[index12],posy[index12],c='y',s=300)
ax1.plot(xs0,ys0,'g')
ax1.plot(xs1,ys1,'b')
ax1.plot(xc0,yc0,'y')
ax1.plot(xc1,yc1,'y')
ax1.plot(xm,ym,'k')
ax1.set_xlim([3.048,-3.048])
ax1.set_ylim([1.524,-4.5])
def zumoThread(index, frameName, controlRate, path):
#zumoThread inputs: index (car number) frameName (car name in Vicon), controlRate (???), path (car's path from path.py)
global posx
global posy
global TestActive
x = posx[index]
y = posy[index]
status = path.GetSegmentIDNoCheck()#return the Path segment number
road_speed = 0.2 #speed limit on the road unless intervened by from the controller
speed = road_speed #*1.145 #linear scaling factor 1.145 to achieve 0.30 actual
nf = rospy.ServiceProxy('lf_grad', LineFollowing) #nf is the result of the LineFollowing service call which we name 'lf_grad', look in folder srv to see inputs and outputs
nf_m = rospy.ServiceProxy('lf_merge', MergeControl)
cur_t = time.time()
dx = 0.0 #initialize gradient direction vectors
dy = 0.0
isControlled = False #initially not in the control region
abcd = np.matrix([[0],[0],[0],[0]]) #initialize an RT_control matrix
tinit = 0
controlInfoPre = (None,None)
while isRun:
temp_t = time.time()
x = x + dx*speed*(temp_t-cur_t) #Note temp_t and cur_t names seem to be backward
y = y + dy*speed*(temp_t-cur_t)
posx[index] = x #index refers to the car number, update that cars x position
posy[index] = y
talker(index,x,y,dx,dy,speed) #publish the cars number, its position, the desired vector to the next position and its desired speed
"""
if TestActive:
try:
x,y,t=getViconPos(frameName)
except:
if DEBUG:
print("VICON ERROR, Vicon cannot pass positions of vehicle back")
posx[index]=x
posy[index]=y
streetindex=findMe(index,headwayManager.nodes[paths[j]],k[j],1)
#Snap vehicle position to node
try:
x=headwayManager.nodes[paths[j]][streetindex][0]
y=headwayManager.nodes[paths[j]][streetindex][1]
except:
if DEBUG:
print("Projected x and y cannot be found from headway")
"""
cur_t = temp_t
rospy.wait_for_service('lf_grad')
try:
resp = nf(status, x, y) # from the LineFollowing service, the output res,dx,dy is saved as (ros object?) resp
res = [resp.res, resp.dx, resp.dy] # turn the ros object resp into a "useable" vector
status = path.GetSegmentID(x,y) #
if res[0] == 0:
dx = res[1]
dy = res[2]
elif res[0]==2:
pass
else:
print "Zumo "+str(index)+" Cannot Run NF."
except rospy.ServiceException, e:
print "Service call failed: %s"%e
controlInfo = path.CheckControl(x, y) #controlInfo can be 0,1,2 zero is in control region (L), one is in the merge region, and two is exiting the merge region. Check control checks if the position is > or < a specified transition condition
if (controlInfo is not None) and (controlInfo != controlInfoPre): #if the position falls in the control region
if controlInfo[0] == 0:
rospy.wait_for_service('lf_merge') #if
mf = nf_m(index,controlInfo[1],controlInfo[2],0,road_speed)
if mf.isFirst:
isControlled = False
else:
isControlled = True
print(isControlled)
abcd = RT_control(time.time()-mf.tinit,mf.tmi-mf.tinit,0,mf.L,road_speed,road_speed)
tinit = mf.tinit
#print "Robot "+str(index)+": to->"+str(time.time()-mf.tinit)+" tmi->"+str(mf.tmi-mf.tinit)+" xi->0 xf->"+str(mf.L)+" v->"+str(cur_speed)+" vf->"+str(cur_speed)
#print "ABCD: "+str(abcd)
elif controlInfo[0] == 2:
isControlled = False
rospy.wait_for_service('lf_merge')
mf = nf_m(index,controlInfo[1],controlInfo[2],1,road_speed)
elif controlInfo[0] == 1:
isControlled = False
controlInfoPre = controlInfo
if not isControlled:
speed = road_speed #*1.145 #this is a correction that is added to the real robot control and is removed here because
else:
temps = 0.5*abcd[0]*(time.time()-tinit)*(time.time()-tinit)+abcd[1]*(time.time()-tinit)+abcd[2]
ttemps = temps.item(0)
speed = ttemps #*1.145
#if speed >0.7:
#speed = 0.7
time.sleep(controlRate)
'''
if TestActive:
for i in range(12):
x,y,t=getViconPos(inds[i])
posx[inds2[i]]=x
posy[inds2[i]]=y
'''
rospy.init_node('zumo_go', anonymous=True)#zumo_go is a node
global tfl
tfl = tf.TransformListener()
pub = rospy.Publisher('/ZumoRefs', String, queue_size=1000)#ZumoRefs is a topic name
#one zipper merge starting positions
t1 = Thread(target = zumoThread, args = (index1, name1, 0.05, copy.deepcopy(Path.GetDefaultPath(39))))
t1.start()
t2 = Thread(target = zumoThread, args = (index2, name2, 0.05, copy.deepcopy(Path.GetDefaultPath(39))))
t2.start()
t3 = Thread(target = zumoThread, args = (index3, name3, 0.05, copy.deepcopy(Path.GetDefaultPath(39))))
t3.start()
t4 = Thread(target = zumoThread, args = (index4, name4, 0.05, copy.deepcopy(Path.GetDefaultPath(39))))
t4.start()
#t5 = Thread(target = zumoThread, args = (index5, name5, 0.05, copy.deepcopy(Path.GetDefaultPath(45))))
#t5.start()
t5 = Thread(target = zumoThread, args = (index6, name6, 0.05, copy.deepcopy(Path.GetDefaultPath(48))))
t5.start()
t6 = Thread(target = zumoThread, args = (index7, name7, 0.05, copy.deepcopy(Path.GetDefaultPath(48))))
t6.start()
t7 = Thread(target = zumoThread, args = (index8, name8, 0.05, copy.deepcopy(Path.GetDefaultPath(48))))
t7.start()
#t8 = Thread(target = zumoThread, args = (index9, name9, 0.05, copy.deepcopy(Path.GetDefaultPath(39))))
#t8.start()
#t9 = Thread(target = zumoThread, args = (index10, index10, 0.05, copy.deepcopy(Path.GetDefaultPath(39))))
#t9.start()
#t10 = Thread(target = zumoThread, args = (index11, index11, 0.05, copy.deepcopy(Path.GetDefaultPath(44))))
#t10.start()
#t11 = Thread(target = zumoThread, args = (index12, index12, 0.05, copy.deepcopy(Path.GetDefaultPath(44))))
#t11.start()
ani = animation.FuncAnimation(fig, animate, interval=100)
plt.axis('equal')
plt.show()
isRun = False
|
{"hexsha": "7cda57294d9f20248bceb65d906c2f537eae0803", "size": 11144, "ext": "py", "lang": "Python", "max_stars_repo_path": "Roundabouts/DynamicMerging.py", "max_stars_repo_name": "NKdeveloper/Autonomous-Vehicle-Research", "max_stars_repo_head_hexsha": "93c262a9ae9e6246b7f0d74023cfaadb4ee71eb7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Roundabouts/DynamicMerging.py", "max_issues_repo_name": "NKdeveloper/Autonomous-Vehicle-Research", "max_issues_repo_head_hexsha": "93c262a9ae9e6246b7f0d74023cfaadb4ee71eb7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Roundabouts/DynamicMerging.py", "max_forks_repo_name": "NKdeveloper/Autonomous-Vehicle-Research", "max_forks_repo_head_hexsha": "93c262a9ae9e6246b7f0d74023cfaadb4ee71eb7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0647249191, "max_line_length": 265, "alphanum_fraction": 0.6444723618, "include": true, "reason": "import numpy,from numpy", "num_tokens": 3500}
|
import os
import sys
from statsmodels.datasets import get_rdataset
from numpy.testing import assert_
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_get_rdataset():
# smoke test
if sys.version_info[0] >= 3:
#NOTE: there's no way to test both since the cached files were
#created with Python 2.x, they're strings, but Python 3 expects
#bytes and the index file path is hard-coded so both can't live
#side by side
pass
#duncan = get_rdataset("Duncan-py3", "car", cache=cur_dir)
else:
duncan = get_rdataset("Duncan", "car", cache=cur_dir)
assert_(duncan.from_cache)
|
{"hexsha": "f3e13fbd2429cb132d89ef667c1533479840b149", "size": 655, "ext": "py", "lang": "Python", "max_stars_repo_path": "statsmodels/datasets/tests/test_utils.py", "max_stars_repo_name": "toobaz/statsmodels", "max_stars_repo_head_hexsha": "5286dd713a809b0630232508bf9ad5104aae1980", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2016-05-18T11:46:33.000Z", "max_stars_repo_stars_event_max_datetime": "2018-12-23T04:52:27.000Z", "max_issues_repo_path": "statsmodels/datasets/tests/test_utils.py", "max_issues_repo_name": "AnaMP/statsmodels", "max_issues_repo_head_hexsha": "2d4aad9a14619ce0c84d4c7bca9dacd66b2be566", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-09-11T14:30:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-11T14:30:32.000Z", "max_forks_repo_path": "statsmodels/datasets/tests/test_utils.py", "max_forks_repo_name": "AnaMP/statsmodels", "max_forks_repo_head_hexsha": "2d4aad9a14619ce0c84d4c7bca9dacd66b2be566", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2015-04-01T08:26:54.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-14T14:34:10.000Z", "avg_line_length": 32.75, "max_line_length": 71, "alphanum_fraction": 0.6778625954, "include": true, "reason": "from numpy,from statsmodels", "num_tokens": 172}
|
# -*- coding: utf-8 -*-
'''
A Convolutional Neural Network implementation example using Tensorflow Library.
The example uses the mnist data in kaggle
https://www.kaggle.com/c/digit-recognizer
Author:sfailsthy
'''
# Libraries
import tensorflow as tf
import numpy as np
import csv
# setting parameters
learning_rate=1e-4
training_iterations=20000
dropout=0.5
batch_size = 50
validation_size = 2000
# Import all the training and testing data
test_data = np.genfromtxt('data/test.csv', skip_header=1, delimiter=',')
train_raw = np.genfromtxt('data/train.csv', skip_header=1, delimiter=',')
train_label = train_raw[:,0]
train_label_soft = np.zeros((len(train_label), 10))
for i in range( len(train_label) ):
train_label_soft[i, int(train_label[i])] = 1
train_data = train_raw[:,1:train_raw.shape[1]]/255.0
train_label=train_label_soft
validation_images=train_data[0:validation_size]
validation_labels=train_label[0:validation_size]
train_images=train_data[validation_size:]
train_labels=train_label[validation_size:]
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev = 0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x,W):
return tf.nn.conv2d(x,W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Create the model
x = tf.placeholder(tf.float32, [None, train_data.shape[1]])
x_image = tf.reshape(x, [-1, 28, 28, 1])
# Set up first layer
'''
The first layer is a convolution, followed by max pooling.
The convolution computes 32 features for each 5x5 patch.
Its weight tensor has a shape of [5, 5, 1, 32].
The first two dimensions are the patch size,
the next is the number of input channels (1 means that images are grayscale),
and the last is the number of output channels.
There is also a bias vector with a component for each output channel.
To apply the layer, we reshape the input data to a 4d tensor,
with the first dimension corresponding to the number of images,
second and third - to image width and height,
and the final dimension - to the number of colour channels.
After the convolution, pooling reduces the size of the output from 28x28 to 14x14.
'''
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# Set up the second layer
'''
The second layer has 64 features for each 5x5 patch.
Its weight tensor has a shape of [5, 5, 32, 64].
The first two dimensions are the patch size,
the next is the number of input channels (32 channels correspond to 32 featured that we got from previous convolutional layer),
and the last is the number of output channels.
There is also a bias vector with a component for each output channel.
Because the image is down-sampled by pooling
to 14x14 size second convolutional layer
picks up more general characteristics of the images.
Filters cover more space of the picture.
Therefore, it is adjusted for more generic features while the first layer finds smaller details.
'''
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = weight_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# Densely Connected Layer
'''
Now that the image size is reduced to 7x7,
we add a fully-connected layer with 1024 neurones
to allow processing on the entire image
(each of the neurons of the fully connected layer is connected to all the activations/outpus of the previous layer)
'''
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Dropout
'''
To prevent overfitting, we apply dropout before the readout layer.
Dropout removes some nodes from the network at each training stage.
Each of the nodes is either kept in the network with probability keep_prob or dropped with probability 1 - keep_prob.
After the training stage is over the nodes are returned to the NN with their original weights.
'''
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Readout Layer
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
y_ = tf.placeholder(tf.float32, [None, 10])
# => (40000, 10)
# Loss function
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
epochs_completed = 0
index_in_epoch = 0
num_examples = train_images.shape[0]
# serve data by batches
def next_batch(batch_size):
global train_images
global train_labels
global index_in_epoch
global epochs_completed
start = index_in_epoch
index_in_epoch += batch_size
# when all trainig data have been already used, it is reorder randomly
if index_in_epoch > num_examples:
# finished epoch
epochs_completed += 1
# shuffle the data
perm = np.arange(num_examples)
np.random.shuffle(perm)
train_images = train_images[perm]
train_labels = train_labels[perm]
# start next epoch
start = 0
index_in_epoch = batch_size
assert batch_size <= num_examples
end = index_in_epoch
return train_images[start:end], train_labels[start:end]
# start TensorFlow Session
# Initializing the variables
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(training_iterations):
# get new batch
batch_xs,batch_ys=next_batch(batch_size)
# train on batch
train_step.run(feed_dict={x:batch_xs,y_:batch_ys,keep_prob:dropout})
if i%100==0 or (i+1)==training_iterations:
train_accuracy=accuracy.eval(feed_dict={x:batch_xs,y_:batch_ys,keep_prob:1.0})
validation_accuracy=accuracy.eval(feed_dict={x:validation_images[:batch_size],y_:validation_labels[:batch_size],keep_prob:1.0})
print('training_accuracy / validation_accuracy => %.2f / %.2f for step %d'%(train_accuracy, validation_accuracy, i))
# Batch the test data and write to csv
with open('data/submission.csv','w', newline='') as f:
writer = csv.writer(f)
writer.writerow(('ImageId', 'Label'))
test_data=test_data/255.0
for start in range(0, test_data.shape[0], batch_size):
# Ensure we stay within boundaries
end = np.minimum(start + batch_size, test_data.shape[0])
prediction = tf.argmax(y_conv,1)
labels = prediction.eval(feed_dict={x: test_data[start:end,:], keep_prob: 1.0})
for i in range(batch_size):
writer.writerow((start + i + 1, labels[i]))
sess.close()
|
{"hexsha": "566fdbe5189bccef6c52203fb6ecceb50215e0f2", "size": 7036, "ext": "py", "lang": "Python", "max_stars_repo_path": "mnist/mnist_with_cnn.py", "max_stars_repo_name": "sfailsthy/kaggle", "max_stars_repo_head_hexsha": "4e03042278e891a99e774a0e0c5a404d62cdfc13", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mnist/mnist_with_cnn.py", "max_issues_repo_name": "sfailsthy/kaggle", "max_issues_repo_head_hexsha": "4e03042278e891a99e774a0e0c5a404d62cdfc13", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mnist/mnist_with_cnn.py", "max_forks_repo_name": "sfailsthy/kaggle", "max_forks_repo_head_hexsha": "4e03042278e891a99e774a0e0c5a404d62cdfc13", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8316831683, "max_line_length": 139, "alphanum_fraction": 0.7283968164, "include": true, "reason": "import numpy", "num_tokens": 1814}
|
# Test script to test solver with JuMP on a closest correlation matrix problem
using COSMO, JuMP, LinearAlgebra, SparseArrays, Test, Random
rng = Random.MersenneTwister(12345);
# Original problem has the following format:
# min_X 1/2 ||X-C||^2
# s.t. Xii = 1
# X ⪴ 0
# create a random test matrix C
n = 8
C = -1 .+ rand(rng, n, n) .* 2;
c = vec(C);
# define problem in JuMP
q = -vec(C);
r = 0.5 * vec(C)' * vec(C);
m = JuMP.Model(with_optimizer(COSMO.Optimizer, verbose=true, eps_abs = 1e-4));
@variable(m, X[1:n, 1:n], PSD);
x = vec(X);
@objective(m, Min, 0.5 * x' * x + q' * x + r)
for i = 1:n
@constraint(m, X[i, i] == 1.)
end
# solve and get results
status = JuMP.optimize!(m)
obj_val = JuMP.objective_value(m)
X_sol = JuMP.value.(X)
known_opt_val = 12.5406
known_solution = [
1.0 0.732562 -0.319491 -0.359985 -0.287543 -0.15578 0.0264044 -0.271438;
0.732562 1.0 0.0913246 -0.0386357 0.299199 -0.122733 0.126612 -0.187489;
-0.319491 0.0913246 1.0 -0.0863377 0.432948 0.461783 -0.248641 -0.395299;
-0.359985 -0.0386357 -0.0863377 1.0 0.503379 0.250601 0.141151 0.286088;
-0.287543 0.299199 0.432948 0.503379 1.0 -0.0875199 0.137518 0.0262425;
-0.15578 -0.122733 0.461783 0.250601 -0.0875199 1.0 -0.731556 0.0841783;
0.0264044 0.126612 -0.248641 0.141151 0.137518 -0.731556 1.0 -0.436274;
-0.271438 -0.187489 -0.395299 0.286088 0.0262425 0.0841783 -0.436274 1.0 ];
@testset "Closest correlation matrix example" begin
@test isapprox(obj_val, known_opt_val , atol=1e-3)
@test norm(X_sol - known_solution, Inf) < 1e-3
end
nothing
|
{"hexsha": "e296832d7de356c125a9ee029226a13454bae3d4", "size": 1739, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/closest_correlation_matrix.jl", "max_stars_repo_name": "UnofficialJuliaMirror/COSMO.jl-1e616198-aa4e-51ec-90a2-23f7fbd31d8d", "max_stars_repo_head_hexsha": "f90cc6218d86db2fcd47b7ca533df2aa1c51f7ce", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/closest_correlation_matrix.jl", "max_issues_repo_name": "UnofficialJuliaMirror/COSMO.jl-1e616198-aa4e-51ec-90a2-23f7fbd31d8d", "max_issues_repo_head_hexsha": "f90cc6218d86db2fcd47b7ca533df2aa1c51f7ce", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/closest_correlation_matrix.jl", "max_forks_repo_name": "UnofficialJuliaMirror/COSMO.jl-1e616198-aa4e-51ec-90a2-23f7fbd31d8d", "max_forks_repo_head_hexsha": "f90cc6218d86db2fcd47b7ca533df2aa1c51f7ce", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.2291666667, "max_line_length": 96, "alphanum_fraction": 0.6072455434, "num_tokens": 740}
|
using OpenVR
# include(joinpath((@__DIR__),"..","src","OpenVR_C.jl"))
# using Main.OpenVR_C
# const OpenVR = OpenVR_C
# unfortunatley the C-API is not maintained by Valve … and the currently distributed version does not provide all necessary function pointers in the C-function-tables
# https://github.com/ValveSoftware/openvr/issues/89
# https://steamcommunity.com/app/358720/discussions/0/405692758722144628/
# https://github.com/ValveSoftware/openvr/issues/133
include("hellovr_opengl_structs.jl")
ActionManifestPath = "/home/christianl/src/openvr/samples/bin/hellovr_actions.json"
strFullPath = "/home/christianl/src/openvr/samples/bin/cube_texture.png"
# https://stackoverflow.com/questions/39399187/case-insensitive-string-comparison-in-julia
using Unicode # Unicode.normalize
stricmp(a,b) = Unicode.normalize(a; casefold=true) == Unicode.normalize(b; casefold=true)
function mkCMainApplication( argv::Array{String} = String[]) :: CMainApplication
this = CMainApplication()
this.m_pCompanionWindow = C_NULL
this.m_pContext = C_NULL
this.m_nCompanionWindowWidth = 640
this.m_nCompanionWindowHeight = 320
this.m_unSceneProgramID = 0
this.m_unCompanionWindowProgramID = 0
this.m_unControllerTransformProgramID = 0
this.m_unRenderModelProgramID = 0
this.m_pHMD = C_NULL
this.m_bDebugOpenGL = false
this.m_bVerbose = false
this.m_bPerf = false
this.m_bVblank = false
this.m_bGlFinishHack = true
this.m_glControllerVertBuffer = 0
this.m_unControllerVAO = 0
this.m_unSceneVAO = 0
this.m_nSceneMatrixLocation = -1
this.m_nControllerMatrixLocation = -1
this.m_nRenderModelMatrixLocation = -1
this.m_iTrackedControllerCount = 0
this.m_iTrackedControllerCount_Last = -1
this.m_iValidPoseCount = 0
this.m_iValidPoseCount_Last = -1
this.m_iSceneVolumeInit = 20
this.m_strPoseClasses = ""
this.m_bShowCubes = true
this.m_rHand = [ControllerInfo_t(),ControllerInfo_t()] # must initialize non-isbits (else they become #undef)
this.m_vecRenderModels = [] # must initialize non-isbits (else they become #undef)
this.m_vecRenderModelsr = [] # must initialize non-isbits (else they become #undef)
argc = length(argv)
for i = 0:argc-1
if ( ~stricmp( argv[i+1], "-gldebug" ) )
m_bDebugOpenGL = true;
elseif ( ~stricmp( argv[i+1], "-verbose" ) )
m_bVerbose = true;
elseif ( ~stricmp( argv[i+1], "-novblank" ) )
m_bVblank = false;
elseif ( ~stricmp( argv[i+1], "-noglfinishhack" ) )
m_bGlFinishHack = false;
elseif ( ~stricmp( argv[i+1], "-noprintf" ) )
g_bPrintf = false;
elseif ( ~stricmp( argv[i+1], "-cubevolume" ) && ( argc > i + 1 ) && ( argv[ i + 1 +1][1] != '-' ) )
m_iSceneVolumeInit = atoi( argv[ i + 1 +1] );
i+=1;
end
end
# other initialization tasks are done in BInit
this.m_rDevClassChar = zeros(SArray{Tuple{64},Int8,1,64})
return this
end
using MemoryMutate # provides @mem
Base.fieldoffset(T :: DataType, field :: Symbol) = fieldoffset(T,findfirst(x -> x == field,fieldnames(T)))
fieldpointer(this,field) = pointer_from_objref(this) + fieldoffset(typeof(this),field)
using LinearAlgebra # inv
# all this just to load PNG
# https://github.com/JuliaIO/ImageMagick.jl
using FileIO
using ImageMagick
using ColorTypes
using FixedPointNumbers
# compatibility for CxxWrap wrapped library
cpp_object(x::Ptr{T}) where T = x
cpp_object(x::T) where T = x.cpp_object
Base.unsafe_string(x::String) = x
using CxxWrap
CxxWrap.isnull(x::Ptr{T}) where T = x == C_NULL
using SimpleDirectMediaLayer
const SDL = SimpleDirectMediaLayer
SDLText = Dict(vcat(map(M -> map(s -> getproperty(M,s) => s,filter(s -> typeof(getproperty(M,s)) == UInt32,names(M;all=true,imported=false))),[SDL])...))
# using CxxWrap
# module_functions = CxxWrap.get_module_functions(OpenVR)
# glmain() = ccall((:main, "/home/christianl/src/openvr/samples/bin/linux64/libhellovr_julia.so"), Int32, (Cint, Ref{Cstring}),0,[])
# https://discourse.julialang.org/t/initializing-an-array-of-mutable-objects-is-surprisingly-slow/11780
# Indeed, isbits type are stored inline in the array, non isbits typed are stored with pointers
# allocate typetagged memory within julia
# jMainApplication = CMainApplication()
jMainApplication = mkCMainApplication()
this = pMainApplication = jMainApplication
# pMainApplication = CMainApplication()
# app = pMainApplication = unsafe_load(convert(Ptr{CMainApplicationAllocated},pointer_from_objref(Ref(pointer_from_objref(jMainApplication)))))
app = jMainApplication
# use C++ placement new to put the object there
# VR.placeCMainApplication(pointer_from_objref(jMainApplication))
using ModernGL
ModernGL.glShaderSource(shader::GLuint,source::String) = glShaderSource(shader,1,[source],[length(source)])
# https://gamedev.stackexchange.com/questions/70829/why-is-gl-texture-max-anisotropy-ext-undefined
const GL_TEXTURE_MAX_ANISOTROPY_EXT = 0x84FE
const GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT = 0x84FF
frame = 0
# -----------------------------------------------------------------------------
# Purpose: Draws the render model
# -----------------------------------------------------------------------------
function Draw(ptr::Ref{CGLRenderModel})
this = unsafe_load(ptr)
glBindVertexArray( this.m_glVertArray );
glActiveTexture( GL_TEXTURE0 );
glBindTexture( GL_TEXTURE_2D, this.m_glTexture );
glDrawElements( GL_TRIANGLES, this.m_unVertexCount, GL_UNSIGNED_SHORT, C_NULL );
glBindVertexArray( 0 );
end
# -----------------------------------------------------------------------------
# Purpose: Gets a Current View Projection Matrix with respect to nEye,
# which may be an Eye_Left or an Eye_Right.
# -----------------------------------------------------------------------------
function GetCurrentViewProjectionMatrix( app :: CMainApplication, nEye::OpenVR.Hmd_Eye ) :: Matrix4
return Matrix4(
( nEye == OpenVR.Eye_Left
? this.m_mat4ProjectionLeft.m * this.m_mat4eyePosLeft.m * this.m_mat4HMDPose.m
: nEye == OpenVR.Eye_Right
? this.m_mat4ProjectionRight.m * this.m_mat4eyePosRight.m * this.m_mat4HMDPose.m
: zeros(4,4)
)
, zeros(4,4)
)
end
function RenderScene( app :: CMainApplication, nEye :: OpenVR.Hmd_Eye )
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST);
if ( this.m_bShowCubes )
glUseProgram( this.m_unSceneProgramID );
vpm = GetCurrentViewProjectionMatrix( app, nEye )
vpm_arr = Array(vpm.m[:])
# Effect:
# Idiag = [0,5,10] .+ 1
# vpm_arr[Idiag] .*= 1.0 + 0.1*cos(frame/50.0)
glUniformMatrix4fv( this.m_nSceneMatrixLocation, 1, GL_FALSE, vpm_arr);
glBindVertexArray( this.m_unSceneVAO );
glBindTexture( GL_TEXTURE_2D, this.m_iTexture );
glDrawArrays( GL_TRIANGLES, 0, this.m_uiVertcount );
glBindVertexArray( 0 );
end
m_pHMD = unsafe_load(convert(Ptr{Main.OpenVR.IVRSystemRef},pointer_from_objref(Ref(this.m_pHMD))))
bIsInputAvailable = OpenVR.IsInputAvailable(m_pHMD);
if ( bIsInputAvailable )
# draw the controller axis lines
glUseProgram( this.m_unControllerTransformProgramID );
cvpm = GetCurrentViewProjectionMatrix( app, nEye )
cvpm_arr = Array(cvpm.m[:])
glUniformMatrix4fv( this.m_nControllerMatrixLocation, 1, GL_FALSE, convert(Array{Float32},cvpm_arr) );
glBindVertexArray( this.m_unControllerVAO );
glDrawArrays( GL_LINES, 0, this.m_uiControllerVertcount );
glBindVertexArray( 0 );
end
# ----- Render Model rendering -----
glUseProgram( this.m_unRenderModelProgramID );
Left = 0 # TODO: use CxxWrap.jl enum here instead
Right = 1 # TODO: use CxxWrap.jl enum here instead
for eHand = Left:Right
m_rHand = this.m_rHand[eHand+1]
# if ( !VR.get_m_bShowController(m_rHand) || !VR.get_m_pRenderModel(m_rHand) )
if ( ~m_rHand.m_bShowController || m_rHand.m_pRenderModel == C_NULL )
continue;
end
matDeviceToTracking = m_rHand.m_rmat4Pose;
# TODO
mvp = GetCurrentViewProjectionMatrix( app, nEye )
# @GC.preserve mvpAllocated mvp = unsafe_load(reinterpret(Ptr{Matrix4},mvpAllocated.cpp_object))
matMVP = mvp.m * matDeviceToTracking.m; # Julia!
matMVP_arr = Array(matMVP[:])
glUniformMatrix4fv( this.m_nRenderModelMatrixLocation, 1, GL_FALSE, convert(Array{Float32},matMVP_arr) );
# glUniformMatrix4fv( this.m_nRenderModelMatrixLocation, 1, GL_FALSE, [matMVP.data...] );
# m_pRenderModel = Ref(m_rHand.m_pRenderModel) # TODO
# @GC.preserve m_pRenderModel m_pRenderModelRef = unsafe_load(reinterpret(Ptr{CGLRenderModelRef},pointer_from_objref(m_pRenderModel)))
# VR.Draw(m_pRenderModelRef);
Draw(m_rHand.m_pRenderModel);
end
glUseProgram( 0 );
end
function RenderStereoTargets(app :: CMainApplication)
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glEnable( GL_MULTISAMPLE );
# Left Eye
leftEyeDesc = this.leftEyeDesc
glBindFramebuffer( GL_FRAMEBUFFER, leftEyeDesc.m_nRenderFramebufferId );
glViewport(0, 0, this.m_nRenderWidth, this.m_nRenderHeight );
# VR.RenderScene(app, VR.Eye_Left );
RenderScene(app, OpenVR.Eye_Left );
glBindFramebuffer( GL_FRAMEBUFFER, 0 );
glDisable( GL_MULTISAMPLE );
glBindFramebuffer(GL_READ_FRAMEBUFFER, leftEyeDesc.m_nRenderFramebufferId);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, leftEyeDesc.m_nResolveFramebufferId );
glBlitFramebuffer( 0, 0, this.m_nRenderWidth, this.m_nRenderHeight, 0, 0, this.m_nRenderWidth, this.m_nRenderHeight,
GL_COLOR_BUFFER_BIT,
GL_LINEAR );
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0 );
glEnable( GL_MULTISAMPLE );
# Right Eye
rightEyeDesc = this.rightEyeDesc
glBindFramebuffer( GL_FRAMEBUFFER, rightEyeDesc.m_nRenderFramebufferId );
glViewport(0, 0, this.m_nRenderWidth, this.m_nRenderHeight );
# VR.RenderScene(app, VR.Eye_Right );
RenderScene(app, OpenVR.Eye_Right );
glBindFramebuffer( GL_FRAMEBUFFER, 0 );
glDisable( GL_MULTISAMPLE );
glBindFramebuffer(GL_READ_FRAMEBUFFER, rightEyeDesc.m_nRenderFramebufferId );
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, rightEyeDesc.m_nResolveFramebufferId );
glBlitFramebuffer( 0, 0, this.m_nRenderWidth, this.m_nRenderHeight, 0, 0, this.m_nRenderWidth, this.m_nRenderHeight,
GL_COLOR_BUFFER_BIT,
GL_LINEAR );
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0 );
end
# -----------------------------------------------------------------------------
# Purpose:
# -----------------------------------------------------------------------------
function UpdateHMDMatrixPose(app :: CMainApplication)
if ( this.m_pHMD == C_NULL )
return;
end
m_pHMD = unsafe_load(convert(Ptr{Main.OpenVR.IVRSystemRef},pointer_from_objref(Ref(this.m_pHMD)))) # TODO
vrcompositor = OpenVR.VRCompositor()
m_rTrackedDevicePosePtr = @ptr this.m_rTrackedDevicePose[1]
OpenVR.WaitGetPoses(vrcompositor,m_rTrackedDevicePosePtr, OpenVR.k_unMaxTrackedDeviceCount, C_NULL, 0 );
this.m_iValidPoseCount = 0;
# this.m_strPoseClassesJ = "";
m_strPoseClassesJ = "";
for nDevice = 0:OpenVR.k_unMaxTrackedDeviceCount-1
if ( this.m_rTrackedDevicePose[nDevice+1].bPoseIsValid )
this.m_iValidPoseCount += 1;
@mem this.m_rmat4DevicePose[nDevice+1] = ConvertSteamVRMatrixToMatrix4( this.m_rTrackedDevicePose[nDevice+1].mDeviceToAbsoluteTracking );
if (this.m_rDevClassChar[nDevice+1]==0)
tdc = OpenVR.GetTrackedDeviceClass(m_pHMD,nDevice)
if tdc == OpenVR.TrackedDeviceClass_Controller ; @mem this.m_rDevClassChar[nDevice+1] = 'C'
elseif tdc == OpenVR.TrackedDeviceClass_HMD ; @mem this.m_rDevClassChar[nDevice+1] = 'H'
elseif tdc == OpenVR.TrackedDeviceClass_Invalid ; @mem this.m_rDevClassChar[nDevice+1] = 'I'
elseif tdc == OpenVR.TrackedDeviceClass_GenericTracker ; @mem this.m_rDevClassChar[nDevice+1] = 'G'
elseif tdc == OpenVR.TrackedDeviceClass_TrackingReference; @mem this.m_rDevClassChar[nDevice+1] = 'T'
else @mem this.m_rDevClassChar[nDevice+1] = '?'
end
end
m_strPoseClassesJ *= Char(this.m_rDevClassChar[nDevice+1]);
end
end
# VR.setJuliaStringTostdstring(m_strPoseClassesJ,@voidptr this.m_strPoseClasses); # TODO
this.m_strPoseClasses = m_strPoseClassesJ
if ( this.m_rTrackedDevicePose[OpenVR.k_unTrackedDeviceIndex_Hmd+1].bPoseIsValid )
this.m_mat4HMDPose = this.m_rmat4DevicePose[OpenVR.k_unTrackedDeviceIndex_Hmd+1];
@mem this.m_mat4HMDPose.m = inv(this.m_mat4HMDPose.m);
end
end
function RenderCompanionWindow(app :: CMainApplication)
glDisable(GL_DEPTH_TEST);
glViewport( 0, 0, this.m_nCompanionWindowWidth, this.m_nCompanionWindowHeight );
glBindVertexArray( this.m_unCompanionWindowVAO );
glUseProgram( this.m_unCompanionWindowProgramID );
# render left eye (first half of index array )
glBindTexture(GL_TEXTURE_2D, this.leftEyeDesc.m_nResolveTextureId );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glDrawElements( GL_TRIANGLES, this.m_uiCompanionWindowIndexSize/2, GL_UNSIGNED_SHORT, C_NULL );
# render right eye (second half of index array )
glBindTexture(GL_TEXTURE_2D, this.rightEyeDesc.m_nResolveTextureId );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glDrawElements( GL_TRIANGLES, this.m_uiCompanionWindowIndexSize/2, GL_UNSIGNED_SHORT, C_NULL + this.m_uiCompanionWindowIndexSize );
glBindVertexArray( 0 );
glUseProgram( 0 );
end
#-----------------------------------------------------------------------------
# Purpose: Draw all of the controllers as X/Y/Z lines
#-----------------------------------------------------------------------------
function RenderControllerAxes(app :: CMainApplication)
m_pHMD = unsafe_load(convert(Ptr{Main.OpenVR.IVRSystemRef},pointer_from_objref(Ref(this.m_pHMD)))) # TODO
# Don't attempt to update controllers if input is not available
if ( !OpenVR.IsInputAvailable(m_pHMD) )
return;
end
vertdataarray = Float32[];
this.m_uiControllerVertcount = 0;
this.m_iTrackedControllerCount = 0;
Left = 0 # TODO
Right = 1 # TODO
for eHand = Left:Right
if ( !this.m_rHand[eHand+1].m_bShowController )
continue;
end
mat = this.m_rHand[eHand+1].m_rmat4Pose.m;
center = mat * Float32[ 0, 0, 0, 1 ];
for i = 0:3-1
color = Float32[ 0, 0, 0 ];
point = Float32[ 0, 0, 0, 1 ];Float32
point[i+1] += 0.05f0; # offset in X, Y, Z
color[i+1] = 1.0; # R, G, B
point = mat * point;
push!(vertdataarray, center[1] );
push!(vertdataarray, center[2] );
push!(vertdataarray, center[3] );
push!(vertdataarray, color[1] );
push!(vertdataarray, color[2] );
push!(vertdataarray, color[3] );
push!(vertdataarray, point[1] );
push!(vertdataarray, point[2] );
push!(vertdataarray, point[3] );
push!(vertdataarray, color[1] );
push!(vertdataarray, color[2] );
push!(vertdataarray, color[3] );
this.m_uiControllerVertcount += 2;
end
start = mat * Float32[ 0, 0, -0.02f0, 1 ];
ende = mat * Float32[ 0, 0, -39.0f0, 1 ];
color = Float32[ .92f0, .92f0, .71f0 ];
push!(vertdataarray, start[1] );push!(vertdataarray, start[2] );push!(vertdataarray, start[3] );
push!(vertdataarray, color[1] );push!(vertdataarray, color[2] );push!(vertdataarray, color[3] );
push!(vertdataarray, ende[1] );push!(vertdataarray, ende[2] );push!(vertdataarray, ende[3] );
push!(vertdataarray, color[1] );push!(vertdataarray, color[2] );push!(vertdataarray, color[3] );
this.m_uiControllerVertcount += 2;
end
# Setup the VAO the first time through.
if ( this.m_unControllerVAO == 0 )
m_unControllerVAOptr = @ptr this.m_unControllerVAO
glGenVertexArrays( 1, m_unControllerVAOptr );
glBindVertexArray( this.m_unControllerVAO );
m_glControllerVertBufferptr = @ptr this.m_glControllerVertBuffer
glGenBuffers( 1, m_glControllerVertBufferptr );
glBindBuffer( GL_ARRAY_BUFFER, this.m_glControllerVertBuffer );
stride = GLuint(2 * 3 * sizeof( Float32 ));
offset = 0; # GLuint
glEnableVertexAttribArray( 0 );
glVertexAttribPointer( 0, 3, GL_FLOAT, GL_FALSE, stride, C_NULL + offset);
offset += 3*sizeof( Float32 );
glEnableVertexAttribArray( 1 );
glVertexAttribPointer( 1, 3, GL_FLOAT, GL_FALSE, stride, C_NULL + offset);
glBindVertexArray( 0 );
end
glBindBuffer( GL_ARRAY_BUFFER, this.m_glControllerVertBuffer );
# set vertex data if we have some
if ( length(vertdataarray) > 0 )
#$ TODO: Use glBufferSubData for this...
glBufferData( GL_ARRAY_BUFFER, sizeof(Float32) * length(vertdataarray), vertdataarray, GL_STREAM_DRAW );
end
end
function RenderFrame(app :: CMainApplication)
global frame += 1
# for now as fast as possible
if ( this.m_pHMD != C_NULL )
RenderControllerAxes(app)
RenderStereoTargets(app)
RenderCompanionWindow(app)
# bounds_null = Main.VR.VRTextureBounds_t()
# bounds_null.cpp_object = C_NULL
# bounds_null = nullptr(Main.VR.VRTextureBounds_t) # like so?
glFlush();
glFinish();
comp = OpenVR.VRCompositor()
leftEyeDesc = this.leftEyeDesc
leftEyeTexture = OpenVR.Texture_t(UInt64(leftEyeDesc.m_nResolveTextureId), OpenVR.TextureType_OpenGL, OpenVR.ColorSpace_Gamma)
OpenVR.Submit(comp,OpenVR.Eye_Left,leftEyeTexture,C_NULL,OpenVR.Submit_Default)
rightEyeDesc = this.rightEyeDesc
rightEyeTexture = OpenVR.Texture_t(UInt64(rightEyeDesc.m_nResolveTextureId), OpenVR.TextureType_OpenGL, OpenVR.ColorSpace_Gamma)
OpenVR.Submit(comp,OpenVR.Eye_Right,rightEyeTexture,C_NULL,OpenVR.Submit_Default)
end
if ( this.m_bVblank && this.m_bGlFinishHack )
# $ HACKHACK. From gpuview profiling, it looks like there is a bug where two renders and a present
# happen right before and after the vsync causing all kinds of jittering issues. This glFinish()
# appears to clear that up. Temporary fix while I try to get nvidia to investigate this problem.
# 1/29/2014 mikesart
glFinish();
end
# SwapWindow
SDL.GL_SwapWindow( this.m_pCompanionWindow );
# Clear
# We want to make sure the glFinish waits for the entire present to complete, not just the submission
# of the command. So, we do a clear here right here so the glFinish will wait fully for the swap.
glClearColor( 0, 0, 0, 1 );
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
# Flush and wait for swap.
if ( this.m_bVblank )
glFlush();
glFinish();
end
# Spew out the controller and pose count whenever they change.
if ( this.m_iTrackedControllerCount != this.m_iTrackedControllerCount_Last || this.m_iValidPoseCount != this.m_iValidPoseCount_Last )
this.m_iValidPoseCount_Last = this.m_iValidPoseCount
this.m_iTrackedControllerCount_Last = this.m_iTrackedControllerCount
# println( "PoseCount:$(this.m_iValidPoseCount)($(this.m_strPoseClassesJ)) Controllers:$(this.m_iTrackedControllerCount)");
println( "PoseCount:$(this.m_iValidPoseCount)($(this.m_strPoseClasses)) Controllers:$(this.m_iTrackedControllerCount)");
end
UpdateHMDMatrixPose(app);
end
# VR.HmdMatrix34_t is backed by a row-major, two dimensional C-array
# so this is in Julia a transposed 3×4 matrix
# Matrix4 is backed by a column-major, one dimensional C-array
# ConvertSteamVRMatrixToMatrix4(matPose::VR.HmdMatrix34_t) = Matrix4([ (r < 4 ? matPose.m[c,r] : c < 4 ? 0f0 : 1f0) for r in 1:4, c in 1:4 ],zeros(Float32,4,4))
ConvertSteamVRMatrixToMatrix4(matPose::OpenVR.HmdMatrix34_t) = Matrix4(transpose(hcat(matPose.m,[0,0,0,1])),zeros(Float32,4,4))
# -----------------------------------------------------------------------------
# Purpose: Gets a Matrix Projection Eye with respect to nEye.
# -----------------------------------------------------------------------------
function GetHMDMatrixProjectionEye(app::CMainApplication, nEye::OpenVR.Hmd_Eye )::Matrix4
if (this.m_pHMD == C_NULL )
return Matrix4(Matrix{Float32}(I,4,4),zeros(4,4));
end
m_pHMD = unsafe_load(convert(Ptr{Main.OpenVR.IVRSystemRef},pointer_from_objref(Ref(this.m_pHMD)))) # TODO
mat = OpenVR.GetProjectionMatrix(m_pHMD, nEye, this.m_fNearClip, this.m_fFarClip ); # OpenVR uses row-major, two dimensional C-Arrays, where we use column-major two-dimensional Julia StaticArrays
# that is why we "receive" the transposed matrix in Julia
# @GC.preserve matr mat = unsafe_load(reinterpret(Ptr{SArray{Tuple{4,4},Float32,2,16}}, matr.cpp_object))
return Matrix4(transpose(mat.m),zeros(Float32,4,4));
end
# -----------------------------------------------------------------------------
# Purpose: Gets an HMDMatrixPoseEye with respect to nEye.
# -----------------------------------------------------------------------------
function GetHMDMatrixPoseEye(app::CMainApplication, nEye::OpenVR.Hmd_Eye )::Matrix4
if (this.m_pHMD == C_NULL )
return Matrix4(Matrix{Float32}(I,4,4),zeros(4,4));
end
m_pHMD = unsafe_load(convert(Ptr{Main.OpenVR.IVRSystemRef},pointer_from_objref(Ref(this.m_pHMD)))) # TODO
mat = OpenVR.GetEyeToHeadTransform(m_pHMD, nEye ); # OpenVR uses row-major, two dimensional C-Arrays, where we use column-major two-dimensional Julia StaticArrays
# that is why we "receive" the transposed matrix in Julia
# @GC.preserve matr mat = unsafe_load(reinterpret(Ptr{SArray{Tuple{4,4},Float32,2,16}}, matr.cpp_object))
return Matrix4(inv(transpose(hcat(mat.m,[0,0,0,1]))),zeros(Float32,4,4));
end
# sudo pacman -S sdl2_ttf sdl2_mixer
# m_bShowCubes = true
# Left = 0
# Right = 1
# -----------------------------------------------------------------------------
# Purpose: Helper to get a string from a tracked device property and turn it
# into a std::string
# -----------------------------------------------------------------------------
function GetTrackedDeviceString( unDevice::OpenVR.TrackedDeviceIndex_t, prop::OpenVR.TrackedDeviceProperty) :: String
vrsystem = OpenVR.VRSystem()
return isnull(vrsystem) ? "" : OpenVR.GetStringTrackedDeviceProperty(vrsystem,unDevice,prop)
end
# ---------------------------------------------------------------------------------------------------------------------
# Purpose: Returns true if the action is active and had a rising edge
# ---------------------------------------------------------------------------------------------------------------------
function GetDigitalActionRisingEdge(action :: OpenVR.VRActionHandle_t, pDevicePath::Ptr{OpenVR.VRInputValueHandle_t} = C_NULL ) :: Bool
actionData = Ref{OpenVR.InputDigitalActionData_t}();
vrinput = OpenVR.VRInput()
OpenVR.GetDigitalActionData(vrinput,action,actionData, sizeof(actionData), OpenVR.k_ulInvalidInputValueHandle );
if (pDevicePath != C_NULL)
unsafe_store!(pDevicePath, OpenVR.k_ulInvalidInputValueHandle);
if (actionData[].bActive)
originInfo = Ref{OpenVR.InputOriginInfo_t}();
if (OpenVR.VRInputError_None == OpenVR.GetOriginTrackedDeviceInfo(vrinput,actionData[].activeOrigin, originInfo, sizeof(originInfo)))
unsafe_store!(pDevicePath,originInfo[].devicePath)
end
end
end
return actionData[].bActive && actionData[].bChanged && actionData[].bState;
end
# ---------------------------------------------------------------------------------------------------------------------
# Purpose: Returns true if the action is active and had a falling edge
# ---------------------------------------------------------------------------------------------------------------------
function GetDigitalActionFallingEdge(action::OpenVR.VRActionHandle_t, pDevicePath::Ptr{OpenVR.VRInputValueHandle_t} = C_NULL )::Bool
actionData = Ref{InputDigitalActionData_t}();
vrinput = OpenVR.VRInput()
OpenVR.GetDigitalActionData(vrinput,action,actionData, sizeof(actionData), OpenVR.k_ulInvalidInputValueHandle );
if (pDevicePath != C_NULL)
unsafe_store!(pDevicePath, OpenVR.k_ulInvalidInputValueHandle)
if (actionData[].bActive)
originInfo = Ref{InputOriginInfo_t}();
if (OpenVR.VRInputError_None == OpenVR.GetOriginTrackedDeviceInfo(vrinput,actionData[].activeOrigin, originInfo, sizeof(originInfo)))
unsafe_store!(pDevicePath, originInfo[].devicePath);
end
end
end
return actionData[].bActive && actionData[].bChanged && ~actionData[].bState;
end
# ---------------------------------------------------------------------------------------------------------------------
# Purpose: Returns true if the action is active and its state is true
# ---------------------------------------------------------------------------------------------------------------------
function GetDigitalActionState(action::OpenVR.VRActionHandle_t, pDevicePath::Ptr{OpenVR.VRInputValueHandle_t} = C_NULL ) :: Bool
actionData = Ref{OpenVR.InputDigitalActionData_t}();
vrinput = OpenVR.VRInput()
OpenVR.GetDigitalActionData(vrinput,action, actionData, sizeof(actionData), OpenVR.k_ulInvalidInputValueHandle );
if (pDevicePath != C_NULL)
unsafe_store!(pDevicePath, OpenVR.k_ulInvalidInputValueHandle);
if (actionData[].bActive)
originInfo = Ref{OpenVR.InputOriginInfo_t}();
if (OpenVR.VRInputError_None == OpenVR.GetOriginTrackedDeviceInfo(vrinput,actionData[].activeOrigin, originInfo, sizeof(originInfo)))
unsafe_store!(pDevicePath, originInfo[].devicePath);
end
end
end
return actionData[].bActive && actionData[].bState;
end
# -----------------------------------------------------------------------------
# Purpose: Processes a single VR event
# -----------------------------------------------------------------------------
function ProcessVREvent( event::OpenVR.VREvent_t )
event.eventType == OpenVR.VREvent_TrackedDeviceDeactivated && println("Device $(event.trackedDeviceIndex) detached")
event.eventType == OpenVR.VREvent_TrackedDeviceUpdated && println("Device $(event.trackedDeviceIndex) updated")
end
# -----------------------------------------------------------------------------
# Purpose: Allocates and populates the GL resources for a render model
# -----------------------------------------------------------------------------
function BInit(this::Ref{CGLRenderModel}, vrModel::OpenVR.RenderModel_t, vrDiffuseTexture::OpenVR.RenderModel_TextureMap_t )::Bool
# create and bind a VAO to hold state for this model
glGenVertexArrays( 1, @ptr this[].m_glVertArray );
glBindVertexArray( this[].m_glVertArray );
# Populate a vertex buffer
glGenBuffers( 1, @ptr this[].m_glVertBuffer );
glBindBuffer( GL_ARRAY_BUFFER, this[].m_glVertBuffer );
glBufferData( GL_ARRAY_BUFFER, sizeof( OpenVR.RenderModel_Vertex_t ) * vrModel.unVertexCount, vrModel.rVertexData, GL_STATIC_DRAW );
# Identify the components in the vertex buffer
glEnableVertexAttribArray( 0 );
glVertexAttribPointer( 0, 3, GL_FLOAT, GL_FALSE, sizeof( OpenVR.RenderModel_Vertex_t ), Ptr{Nothing}(fieldoffset( OpenVR.RenderModel_Vertex_t, :vPosition )) );
glEnableVertexAttribArray( 1 );
glVertexAttribPointer( 1, 3, GL_FLOAT, GL_FALSE, sizeof( OpenVR.RenderModel_Vertex_t ), Ptr{Nothing}(fieldoffset( OpenVR.RenderModel_Vertex_t, :vNormal )) );
glEnableVertexAttribArray( 2 );
glVertexAttribPointer( 2, 2, GL_FLOAT, GL_FALSE, sizeof( OpenVR.RenderModel_Vertex_t ), Ptr{Nothing}(fieldoffset( OpenVR.RenderModel_Vertex_t, :rfTextureCoord )) );
# Create and populate the index buffer
glGenBuffers( 1, @ptr this[].m_glIndexBuffer );
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, this[].m_glIndexBuffer );
rIndexData = Ptr{Nothing}(UInt64(vrModel.rIndexData1) + (UInt64(vrModel.rIndexData2) << 32)) # TODO: this is a manual conversion due to different alignment of Julia structs; we might provide a function for this (or use getindex)
glBufferData( GL_ELEMENT_ARRAY_BUFFER, sizeof( UInt16 ) * vrModel.unTriangleCount * 3, rIndexData, GL_STATIC_DRAW );
glBindVertexArray( 0 );
# create and populate the texture
glGenTextures(1, @ptr this[].m_glTexture );
glBindTexture( GL_TEXTURE_2D, this[].m_glTexture );
rubTextureMapData = Ptr{Nothing}(UInt64(vrDiffuseTexture.rubTextureMapData1) + UInt64(vrDiffuseTexture.rubTextureMapData2) << 32) # TODO: same
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, vrDiffuseTexture.unWidth, vrDiffuseTexture.unHeight,
0, GL_RGBA, GL_UNSIGNED_BYTE, rubTextureMapData);
# If this renders black ask McJohn what's wrong.
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR );
fLargest = Ref{GLfloat}();
glGetFloatv( GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT, fLargest );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MAX_ANISOTROPY_EXT, fLargest[] );
glBindTexture( GL_TEXTURE_2D, 0 );
@yolo this[].m_unVertexCount = vrModel.unTriangleCount * 3;
return true;
end
# -----------------------------------------------------------------------------
# Purpose: Finds a render model we've already loaded or loads a new one
# -----------------------------------------------------------------------------
function FindOrLoadRenderModel(app::CMainApplication, pchRenderModelName::String ) :: Ptr{CGLRenderModel}
pRenderModel = Ptr{CGLRenderModel}(C_NULL);
for i in this.m_vecRenderModels
# r = unsafe_pointer_to_objref(i)
r = unsafe_load(i)
# r = i
# str = VR.stdstringToJuliaString(@ptr r[].m_sModelName)
str = r.m_sModelName
if ( str != pchRenderModelName )
pRenderModel = Ref(i);
break;
end
end
# load the model if we didn't find one
if ( pRenderModel == C_NULL )
pModel = Ref{Ptr{OpenVR.RenderModel_t}}(C_NULL);
# pModel = Ref{VR.RenderModel_t}();
err = OpenVR.VRRenderModelError_None;
vrrendermodels = OpenVR.VRRenderModels()
while ( true )
err = OpenVR.LoadRenderModel_Async(vrrendermodels, pchRenderModelName, pModel );
if ( err != OpenVR.VRRenderModelError_Loading )
break;
end
sleep( 1e-1 );
end
if ( err != OpenVR.VRRenderModelError_None )
println( "Unable to load render model $(pchRenderModelName) - $(OpenVR.GetRenderModelErrorNameFromEnum( vrrendermodels,err ))");
return Ptr{CGLRenderModel}(C_NULL); # move on to the next tracked device
end
# renderModel = `vr_controller_vive_1_5`
# pModel->rVertexData = 0x7fec3411ee80
# pModel->unVertexCount = 12199
# pModel->rIndexData = 0x7fec3417e370
# pModel->unTriangleCount = 17326
# pModel->diffuseTextureId = 0
# pchRenderModelName = vr_controller_vive_1_5
# pModelr[].rVertexData = Ptr{Main.VR.RenderModel_Vertex_t} @0x00007fec3411eea0
# pModelr[].unVertexCount = 12199
# pModelr[].rIndexData = Ptr{UInt16} @0x000043ae00007fec
# pModelr[].unTriangleCount = 0
# pModelr[].diffuseTextureId = 300
# pModelr = unsafe_pointer_to_objref(pModel[]) # NO!
# pModelr = pModel
pModelr = Ref(unsafe_load(pModel[]))
pTexture = Ref{Ptr{OpenVR.RenderModel_TextureMap_t}}();
while ( true )
@GC.preserve pModelr err = OpenVR.LoadTexture_Async( vrrendermodels, pModelr[].diffuseTextureId, pTexture );
if ( err != OpenVR.VRRenderModelError_Loading )
break;
end
sleep( 1e-3 );
end
println("done")
# Unable to load render texture id:300 for render model vr_controller_vive_1_5
# renderModel = `vr_controller_vive_1_5`
# pModel->diffuseTextureId = 0
if ( err != OpenVR.VRRenderModelError_None )
println( "Unable to load render texture id:$(pModelr[].diffuseTextureId) for render model $(pchRenderModelName)");
OpenVR.FreeRenderModel( vrrendermodels, pModelr );
return Ptr{CGLRenderModel}(C_NULL); # move on to the next tracked device
end
println("== init pRenderModelr ==")
pRenderModelr = Ref(CGLRenderModel(pchRenderModelName))
# @mem pRenderModelr[].m_glVertBuffer = 0
# @mem pRenderModelr[].m_glIndexBuffer = 0
# @mem pRenderModelr[].m_glVertArray = 0
# @mem pRenderModelr[].m_glTexture = 0
# @mem pRenderModelr[].m_unVertexCount = 0
# @mem pRenderModelr[].m_sModelName = VR.std_string(C_NULL,0,0,0)
# VR.setJuliaStringTostdstring(pchRenderModelName,@voidptr pRenderModelr[].m_sModelName)
# pRenderModelr[].m_sModelName = pchRenderModelName
# @mem pRenderModelr[].m_sModelNameJ = pchRenderModelName
# if ( ~VR.BInit(pRenderModelr2, pModelr[], unsafe_load(pTexture[]) ) )
if ( ~BInit(pRenderModelr, pModelr[], unsafe_load(pTexture[]) ) )
println( "Unable to create GL model from render model $(pchRenderModelName)" );
# delete pRenderModelr;
pRenderModelr = Ptr{CGLRenderModel}(C_NULL);
pRenderModel = reinterpret(Ptr{CGLRenderModel},pointer_from_objref(pRenderModelr))
else
# pRenderModelptr = unsafe_load(reinterpret(Ptr{CGLRenderModel},pointer_from_objref(Ref(pRenderModelr))))
println("pRenderModelr = ")
display(pRenderModelr)
pRenderModelptr = unsafe_load(reinterpret(Ptr{Ptr{CGLRenderModel}},pointer_from_objref(pRenderModelr)))
push!(this.m_vecRenderModels, pRenderModelptr );
push!(this.m_vecRenderModelsr, pRenderModelr );
pRenderModel = pRenderModelptr
end
println("== FreeRenderModel ==")
OpenVR.FreeRenderModel(vrrendermodels, pModelr );
println("== FreeTexture ==")
OpenVR.FreeTexture(vrrendermodels, pTexture[] );
# pRenderModel = pRenderModelr
end
return pRenderModel;
end
function HandleInput(app::CMainApplication) :: Bool
buf = zeros(UInt8,56);
bRet = false;
while ( @GC.preserve buf SDL.PollEvent( pointer(buf) ) != 0 ) # WARNING: the object backing the pointer might get garbage collected. We need to prevent that manually (here it is the case)
event_type = unsafe_load(reinterpret(Ptr{UInt32},pointer(buf)))
# unsafe_wrap(Array,reinterpret(Ptr{UInt32},pointer(buf)),(1,); own=false)[1]
println("event_type = $(SDLText[event_type]) $(event_type)")
if ( event_type == SDL.QUIT )
bRet = true;
elseif ( event_type == SDL.KEYDOWN )
local sdlEvent = unsafe_load(reinterpret(Ptr{SDL.event_type_to_event[SDL.KEYDOWN]},pointer(buf)))
if ( sdlEvent.keysym.sym == SDL.SDLK_ESCAPE
|| sdlEvent.keysym.sym == SDL.SDLK_q )
bRet = true;
end
if ( sdlEvent.keysym.sym == SDL.SDLK_c )
this.m_bShowCubes = !this.m_bShowCubes;
end
end
end
# Process SteamVR events
event = Ref{OpenVR.VREvent_t}();
m_pHMD = unsafe_load(convert(Ptr{Main.OpenVR.IVRSystemRef},pointer_from_objref(Ref(this.m_pHMD)))) # TODO
while ( OpenVR.PollNextEvent(m_pHMD, event, sizeof( event ) ) )
ProcessVREvent( event[] );
end
# Process SteamVR action state
# UpdateActionState is called each frame to update the state of the actions themselves. The application
# controls which action sets are active with the provided array of VRActiveActionSet_t structs.
actionSet = Ref(OpenVR.VRActiveActionSet_t(0,0,0,0,0))
@mem actionSet[].ulActionSet = this.m_actionsetDemo;
vrinput = OpenVR.VRInput()
OpenVR.UpdateActionState(vrinput, actionSet, sizeof(actionSet), 1 );
# m_actionHideCubesRef = unsafe_wrap(Array,convert(Ptr{UInt64},fieldpointer(this,:m_actionHideCubes)),(1,);own=false)
# this.m_bShowCubes = ~VR.GetDigitalActionState( this.m_actionHideCubes , Ptr{UInt64}(C_NULL) );
this.m_bShowCubes = ~GetDigitalActionState( this.m_actionHideCubes , Ptr{UInt64}(C_NULL) );
Left = 0 # TODO: CxxWrap.jl enum to Uint
Right = 1 # TODO: CxxWrap.jl enum to Uint
ulHapticDevice = Ref{OpenVR.VRInputValueHandle_t}();
if ( GetDigitalActionRisingEdge( this.m_actionTriggerHaptic, convert(Ptr{UInt64},pointer_from_objref(ulHapticDevice) )) )
if ( ulHapticDevice[] == this.m_rHand[Left+1].m_source )
OpenVR.TriggerHapticVibrationAction(vrinput, this.m_rHand[Left+1].m_actionHaptic, 0f0, 1f0, 4.0f0, 1.0f0, OpenVR.k_ulInvalidInputValueHandle );
end
if ( ulHapticDevice[] == this.m_rHand[Right+1].m_source )
OpenVR.TriggerHapticVibrationAction(vrinput, this.m_rHand[Right+1].m_actionHaptic, 0f0, 1f0, 4.0f0, 1.0f0, OpenVR.k_ulInvalidInputValueHandle );
end
end
analogData = Ref{OpenVR.InputAnalogActionData_t}();
if ( OpenVR.GetAnalogActionData(vrinput, this.m_actionAnalongInput, analogData, sizeof( analogData ), OpenVR.k_ulInvalidInputValueHandle ) == OpenVR.VRInputError_None && analogData[].bActive )
@mem this.m_vAnalogValue[0+1] = analogData[].x;
@mem this.m_vAnalogValue[1+1] = analogData[].y;
end
@yolo this.m_rHand[Left+1].m_bShowController = true
@yolo this.m_rHand[Right+1].m_bShowController = true
ulHideDevice = Ref{OpenVR.VRInputValueHandle_t}();
# if ( @GC.preserve ulHideDevice VR.GetDigitalActionState( this.m_actionHideThisController, reinterpret(Ptr{UInt64},pointer_from_objref(ulHideDevice)) ) )
if ( GetDigitalActionState( this.m_actionHideThisController, reinterpret(Ptr{UInt64},pointer_from_objref(ulHideDevice)) ) )
if ( ulHideDevice[] == this.m_rHand[Left+1].m_source )
@yolo this.m_rHand[Left+1].m_bShowController = false;
end
if ( ulHideDevice[] == this.m_rHand[Right+1].m_source )
@yolo this.m_rHand[Right+1].m_bShowController = false;
end
end
for eHand = Left:Right
poseData = Ref{OpenVR.InputPoseActionData_t}();
ret_code = OpenVR.GetPoseActionData(vrinput, this.m_rHand[eHand+1].m_actionPose, OpenVR.TrackingUniverseStanding, 0.0f0, poseData, sizeof( poseData ), OpenVR.k_ulInvalidInputValueHandle )
if ( ret_code != OpenVR.VRInputError_None
|| ~poseData[].bActive || ~poseData[].pose.bPoseIsValid )
@yolo this.m_rHand[eHand+1].m_bShowController = false;
else
@yolo this.m_rHand[eHand+1].m_rmat4Pose = ConvertSteamVRMatrixToMatrix4(poseData[].pose.mDeviceToAbsoluteTracking);
originInfo = Ref{OpenVR.InputOriginInfo_t}();
if ( OpenVR.GetOriginTrackedDeviceInfo(vrinput, poseData[].activeOrigin, originInfo, sizeof( originInfo ) ) == OpenVR.VRInputError_None
&& originInfo[].trackedDeviceIndex != OpenVR.k_unTrackedDeviceIndexInvalid )
# sRenderModelName = VR.GetTrackedDeviceString( originInfo[].trackedDeviceIndex, VR.Prop_RenderModelName_String , C_NULL );
sRenderModelName = GetTrackedDeviceString(originInfo[].trackedDeviceIndex, OpenVR.Prop_RenderModelName_String)
# m_sRenderModelName = VR.stdstringToJuliaString(@voidptr this.m_rHand[eHand+1].m_sRenderModelName) # TODO
m_sRenderModelName = this.m_rHand[eHand+1].m_sRenderModelName
if ( sRenderModelName != m_sRenderModelName )
# @yolo this.m_rHand[eHand+1].m_pRenderModel = VR.FindOrLoadRenderModel(app, sRenderModelName ).cpp_object;
# @yolo this.m_rHand[eHand+1].m_pRenderModel = FindOrLoadRenderModel(app, sRenderModelName ).cpp_object;
@yolo this.m_rHand[eHand+1].m_pRenderModel = FindOrLoadRenderModel(app, sRenderModelName );
# VR.setJuliaStringTostdstring(sRenderModelName,@voidptr this.m_rHand[eHand+1].m_sRenderModelName) # TODO
@yolo this.m_rHand[eHand+1].m_sRenderModelName = sRenderModelName
end
end
end
end
return bRet;
end
# -----------------------------------------------------------------------------
# Purpose:
# -----------------------------------------------------------------------------
function Shutdown(app :: CMainApplication)
if ( this.m_pHMD != C_NULL )
OpenVR.VR_Shutdown();
this.m_pHMD = C_NULL;
end
# for( std::vector< CGLRenderModel * >::iterator i = m_vecRenderModels.begin(); i != m_vecRenderModels.end(); i++ )
# delete (*i); # Julia GC!
# end
# m_vecRenderModels.clear(); # Julia GC!
if ( this.m_pContext != C_NULL )
if ( this.m_bDebugOpenGL )
glDebugMessageControl( GL_DONT_CARE, GL_DONT_CARE, GL_DONT_CARE, 0, nullptr, GL_FALSE );
glDebugMessageCallback(C_NULL, C_NULL);
end
m_glSceneVertBufferptr = @ptr this.m_glSceneVertBuffer
glDeleteBuffers(1, m_glSceneVertBufferptr);
if ( this.m_unSceneProgramID != 0 )
glDeleteProgram( this.m_unSceneProgramID );
end
if ( this.m_unControllerTransformProgramID != 0 )
glDeleteProgram( this.m_unControllerTransformProgramID );
end
if ( this.m_unRenderModelProgramID != 0 )
glDeleteProgram( this.m_unRenderModelProgramID );
end
if ( this.m_unCompanionWindowProgramID != 0 )
glDeleteProgram( this.m_unCompanionWindowProgramID );
end
glDeleteRenderbuffers( 1, (@ptr this.leftEyeDesc.m_nDepthBufferId ));
glDeleteTextures( 1, (@ptr this.leftEyeDesc.m_nRenderTextureId ));
glDeleteFramebuffers( 1, (@ptr this.leftEyeDesc.m_nRenderFramebufferId ));
glDeleteTextures( 1, (@ptr this.leftEyeDesc.m_nResolveTextureId ));
glDeleteFramebuffers( 1, (@ptr this.leftEyeDesc.m_nResolveFramebufferId ));
glDeleteRenderbuffers( 1, (@ptr this.rightEyeDesc.m_nDepthBufferId ));
glDeleteTextures( 1, (@ptr this.rightEyeDesc.m_nRenderTextureId ));
glDeleteFramebuffers( 1, (@ptr this.rightEyeDesc.m_nRenderFramebufferId ));
glDeleteTextures( 1, (@ptr this.rightEyeDesc.m_nResolveTextureId ));
glDeleteFramebuffers( 1, (@ptr this.rightEyeDesc.m_nResolveFramebufferId ));
if ( this.m_unCompanionWindowVAO != 0 )
glDeleteVertexArrays( 1, (@ptr this.m_unCompanionWindowVAO) );
end
if ( this.m_unSceneVAO != 0 )
glDeleteVertexArrays( 1, (@ptr this.m_unSceneVAO) );
end
if ( this.m_unControllerVAO != 0 )
glDeleteVertexArrays( 1, (@ptr this.m_unControllerVAO) );
end
end
if ( this.m_pCompanionWindow != C_NULL )
SDL.DestroyWindow(this.m_pCompanionWindow);
this.m_pCompanionWindow = C_NULL;
end
SDL.Quit();
end
function RunMainLoop(app :: CMainApplication)
bQuit = false;
# VR.SDL_StartTextInput();
SDL.StartTextInput();
# VR.SDL_ShowCursor( SDL.DISABLE );
SDL.ShowCursor( Int32(SDL.DISABLE) );
while ~bQuit
# bQuit = VR.HandleInput(app);
bQuit = HandleInput(app);
# VR.RenderFrame(app);
RenderFrame(app)
end
# VR.SDL_StopTextInput();
SDL.StopTextInput();
end
# -----------------------------------------------------------------------------
# Purpose: Compiles a GL shader program and returns the handle. Returns 0 if
# the shader couldn't be compiled for some reason.
# -----------------------------------------------------------------------------
function CompileGLShader( pchShaderName::String, pchVertexShader::String, pchFragmentShader::String )::GLuint
unProgramID = glCreateProgram();
nSceneVertexShader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource( nSceneVertexShader, pchVertexShader);
glCompileShader( nSceneVertexShader );
vShaderCompiled = Ref(GL_FALSE);
@GC.preserve vShaderCompiled glGetShaderiv( nSceneVertexShader, GL_COMPILE_STATUS, @ptr vShaderCompiled);
if ( vShaderCompiled[] != GL_TRUE)
println(pchShaderName, " - Unable to compile vertex shader ", nSceneVertexShader);
glDeleteProgram( unProgramID );
glDeleteShader( nSceneVertexShader );
return 0;
end
glAttachShader( unProgramID, nSceneVertexShader);
glDeleteShader( nSceneVertexShader ); # the program hangs onto this once it's attached
nSceneFragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource( nSceneFragmentShader, pchFragmentShader);
glCompileShader( nSceneFragmentShader );
fShaderCompiled = Ref(GL_FALSE);
@GC.preserve fShaderCompiled glGetShaderiv( nSceneFragmentShader, GL_COMPILE_STATUS, @ptr fShaderCompiled);
if (fShaderCompiled[] != GL_TRUE)
println(pchShaderName, " - Unable to compile fragment shader ", nSceneFragmentShader );
glDeleteProgram( unProgramID );
glDeleteShader( nSceneFragmentShader );
return 0;
end
glAttachShader( unProgramID, nSceneFragmentShader );
glDeleteShader( nSceneFragmentShader ); # the program hangs onto this once it's attached
glLinkProgram( unProgramID );
programSuccess = Ref(GL_TRUE);
@GC.preserve programSuccess glGetProgramiv( unProgramID, GL_LINK_STATUS, @ptr programSuccess);
if ( programSuccess[] != GL_TRUE )
println(pchShaderName, " - Error linking program ", unProgramID);
glDeleteProgram( unProgramID );
return 0;
end
glUseProgram( unProgramID );
glUseProgram( 0 );
return unProgramID;
end
# -----------------------------------------------------------------------------
# Purpose: Creates all the shaders used by HelloVR SDL
# -----------------------------------------------------------------------------
function CreateAllShaders(app :: CMainApplication)::Bool
this.m_unSceneProgramID = CompileGLShader(
"Scene",
# Vertex Shader
"""#version 410
uniform mat4 matrix;
layout(location = 0) in vec4 position;
layout(location = 1) in vec2 v2UVcoordsIn;
layout(location = 2) in vec3 v3NormalIn;
out vec2 v2UVcoords;
void main()
{
v2UVcoords = v2UVcoordsIn;
gl_Position = matrix * position;
}
""",
# Fragment Shader
"""#version 410 core
uniform sampler2D mytexture;
in vec2 v2UVcoords;
out vec4 outputColor;
void main()
{
outputColor = texture(mytexture, v2UVcoords);
}
"""
);
this.m_nSceneMatrixLocation = glGetUniformLocation( this.m_unSceneProgramID, "matrix" );
if ( this.m_nSceneMatrixLocation == -1 )
println( "Unable to find matrix uniform in scene shader" );
return false;
end
this.m_unControllerTransformProgramID = CompileGLShader(
"Controller",
# vertex shader
"""#version 410
uniform mat4 matrix;
layout(location = 0) in vec4 position;
layout(location = 1) in vec3 v3ColorIn;
out vec4 v4Color;
void main()
{
v4Color.xyz = v3ColorIn; v4Color.a = 1.0;
gl_Position = matrix * position;
}
""",
# fragment shader
"""#version 410
in vec4 v4Color;
out vec4 outputColor;
void main()
{
outputColor = v4Color;
}
"""
);
this.m_nControllerMatrixLocation = glGetUniformLocation( this.m_unControllerTransformProgramID, "matrix" );
if ( this.m_nControllerMatrixLocation == -1 )
println( "Unable to find matrix uniform in controller shader" );
return false;
end
this.m_unRenderModelProgramID = CompileGLShader(
"render model",
# vertex shader
"""#version 410
uniform mat4 matrix;
layout(location = 0) in vec4 position;
layout(location = 1) in vec3 v3NormalIn;
layout(location = 2) in vec2 v2TexCoordsIn;
out vec2 v2TexCoord;
void main()
{
v2TexCoord = v2TexCoordsIn;
gl_Position = matrix * vec4(position.xyz, 1);
}
""",
#fragment shader
"""#version 410 core
uniform sampler2D diffuse;
in vec2 v2TexCoord;
out vec4 outputColor;
void main()
{
outputColor = texture( diffuse, v2TexCoord);
}
"""
);
this.m_nRenderModelMatrixLocation = glGetUniformLocation( this.m_unRenderModelProgramID, "matrix" );
if ( this.m_nRenderModelMatrixLocation == -1 )
println( "Unable to find matrix uniform in render model shader" );
return false;
end
this.m_unCompanionWindowProgramID = CompileGLShader(
"CompanionWindow",
# vertex shader
"""#version 410 core
layout(location = 0) in vec4 position;
layout(location = 1) in vec2 v2UVIn;
noperspective out vec2 v2UV;
void main()
{
v2UV = v2UVIn;
gl_Position = position;
}
""",
# fragment shader
"""#version 410 core
uniform sampler2D mytexture;
noperspective in vec2 v2UV;
out vec4 outputColor;
void main()
{
outputColor = texture(mytexture, v2UV);
}
"""
);
return (this.m_unSceneProgramID != 0
&& this.m_unControllerTransformProgramID != 0
&& this.m_unRenderModelProgramID != 0
&& this.m_unCompanionWindowProgramID != 0);
end
# -----------------------------------------------------------------------------
# Purpose:
# -----------------------------------------------------------------------------
function SetupTexturemaps(app :: CMainApplication)::Bool
# std::string sWorkingDirectory = Path_StripFilename( Path_GetWorkingDirectory() );
# std::string strFullPath = Path_MakeAbsolute( "../cube_texture.png", sWorkingDirectory );
imageRGB = Array{ColorTypes.RGB{FixedPointNumbers.Normed{UInt8,8}},2}[]; # NOTE: we load RGB, where lodepng::decode loads RGBA
# unsigned nError = lodepng::decode( imageRGBA, nImageWidth, nImageHeight, strFullPath.c_str() );
nError = 0
nImageWidth, nImageHeight = 0, 0
try
imageRGB = load(strFullPath);
nImageWidth, nImageHeight = size(imageRGB)
catch e
println(e)
nError = 1
end
if ( nError != 0 )
return false;
end
glGenTextures(1, @ptr this.m_iTexture );
glBindTexture( GL_TEXTURE_2D, this.m_iTexture );
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, nImageWidth, nImageHeight,
0, GL_RGB, GL_UNSIGNED_BYTE, imageRGB );
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR );
fLargest = Ref{GLfloat}();
glGetFloatv(GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT, fLargest);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAX_ANISOTROPY_EXT, fLargest[]);
glBindTexture( GL_TEXTURE_2D, 0 );
return ( this.m_iTexture != 0 );
end
# -----------------------------------------------------------------------------
# Purpose:
# -----------------------------------------------------------------------------
function AddCubeVertex( fl0::Float32, fl1::Float32, fl2::Float32, fl3::Float32, fl4::Float32, vertdata::Array{Float32,1})
push!(vertdata, fl0 );
push!(vertdata, fl1 );
push!(vertdata, fl2 );
push!(vertdata, fl3 );
push!(vertdata, fl4 );
end
# -----------------------------------------------------------------------------
# Purpose:
# -----------------------------------------------------------------------------
function AddCubeToScene( mat::Matrix4, vertdata::Array{Float32,1} )
# Matrix4 mat( outermat.data() );
A = mat.m * [ 0, 0, 0, 1 ];
B = mat.m * [ 1, 0, 0, 1 ];
C = mat.m * [ 1, 1, 0, 1 ];
D = mat.m * [ 0, 1, 0, 1 ];
E = mat.m * [ 0, 0, 1, 1 ];
F = mat.m * [ 1, 0, 1, 1 ];
G = mat.m * [ 1, 1, 1, 1 ];
H = mat.m * [ 0, 1, 1, 1 ];
# triangles instead of quads
AddCubeVertex( E[1], E[2], E[3], 0f0, 1f0, vertdata ); # Front
AddCubeVertex( F[1], F[2], F[3], 1f0, 1f0, vertdata );
AddCubeVertex( G[1], G[2], G[3], 1f0, 0f0, vertdata );
AddCubeVertex( G[1], G[2], G[3], 1f0, 0f0, vertdata );
AddCubeVertex( H[1], H[2], H[3], 0f0, 0f0, vertdata );
AddCubeVertex( E[1], E[2], E[3], 0f0, 1f0, vertdata );
AddCubeVertex( B[1], B[2], B[3], 0f0, 1f0, vertdata ); # Back
AddCubeVertex( A[1], A[2], A[3], 1f0, 1f0, vertdata );
AddCubeVertex( D[1], D[2], D[3], 1f0, 0f0, vertdata );
AddCubeVertex( D[1], D[2], D[3], 1f0, 0f0, vertdata );
AddCubeVertex( C[1], C[2], C[3], 0f0, 0f0, vertdata );
AddCubeVertex( B[1], B[2], B[3], 0f0, 1f0, vertdata );
AddCubeVertex( H[1], H[2], H[3], 0f0, 1f0, vertdata ); # Top
AddCubeVertex( G[1], G[2], G[3], 1f0, 1f0, vertdata );
AddCubeVertex( C[1], C[2], C[3], 1f0, 0f0, vertdata );
AddCubeVertex( C[1], C[2], C[3], 1f0, 0f0, vertdata );
AddCubeVertex( D[1], D[2], D[3], 0f0, 0f0, vertdata );
AddCubeVertex( H[1], H[2], H[3], 0f0, 1f0, vertdata );
AddCubeVertex( A[1], A[2], A[3], 0f0, 1f0, vertdata ); # Bottom
AddCubeVertex( B[1], B[2], B[3], 1f0, 1f0, vertdata );
AddCubeVertex( F[1], F[2], F[3], 1f0, 0f0, vertdata );
AddCubeVertex( F[1], F[2], F[3], 1f0, 0f0, vertdata );
AddCubeVertex( E[1], E[2], E[3], 0f0, 0f0, vertdata );
AddCubeVertex( A[1], A[2], A[3], 0f0, 1f0, vertdata );
AddCubeVertex( A[1], A[2], A[3], 0f0, 1f0, vertdata ); # Left
AddCubeVertex( E[1], E[2], E[3], 1f0, 1f0, vertdata );
AddCubeVertex( H[1], H[2], H[3], 1f0, 0f0, vertdata );
AddCubeVertex( H[1], H[2], H[3], 1f0, 0f0, vertdata );
AddCubeVertex( D[1], D[2], D[3], 0f0, 0f0, vertdata );
AddCubeVertex( A[1], A[2], A[3], 0f0, 1f0, vertdata );
AddCubeVertex( F[1], F[2], F[3], 0f0, 1f0, vertdata ); # Right
AddCubeVertex( B[1], B[2], B[3], 1f0, 1f0, vertdata );
AddCubeVertex( C[1], C[2], C[3], 1f0, 0f0, vertdata );
AddCubeVertex( C[1], C[2], C[3], 1f0, 0f0, vertdata );
AddCubeVertex( G[1], G[2], G[3], 0f0, 0f0, vertdata );
AddCubeVertex( F[1], F[2], F[3], 0f0, 1f0, vertdata );
end
scale( mat::Matrix4,x::Float32,y::Float32,z::Float32)::Matrix4 = Matrix4( vcat(map(i -> transpose(mat.m[i,:].*[x,y,z,1][i]),1:4)...),mat.tm)
translate(mat::Matrix4,x::Float32,y::Float32,z::Float32)::Matrix4 = Matrix4(mat.m .+ vcat(map(i -> transpose(mat.m[4,:].*[x,y,z,0][i]),1:4)...),mat.tm)
# -----------------------------------------------------------------------------
# Purpose: create a sea of cubes
# -----------------------------------------------------------------------------
function SetupScene(app :: CMainApplication)
if ( this.m_pHMD == C_NULL )
return;
end
vertdataarray = Float32[];
ID = Matrix4(Matrix{Float32}(I,4,4),zeros(4,4))
matScale = ID
matScale = scale(matScale, this.m_fScale, this.m_fScale, this.m_fScale );
matTransform = ID
matTransform = translate(matTransform,
-( Float32(this.m_iSceneVolumeWidth) * this.m_fScaleSpacing ) / 2.f0,
-( Float32(this.m_iSceneVolumeHeight) * this.m_fScaleSpacing ) / 2.f0,
-( Float32(this.m_iSceneVolumeDepth) * this.m_fScaleSpacing ) / 2.f0)
mat = Matrix4(matScale.m * matTransform.m, zeros(4,4));
for z = 0:this.m_iSceneVolumeDepth-1
for y = 0:this.m_iSceneVolumeHeight-1
for x = 0:this.m_iSceneVolumeWidth-1
AddCubeToScene( mat, vertdataarray );
mat = Matrix4(mat.m * translate(ID, this.m_fScaleSpacing, 0f0, 0f0 ).m, zeros(4,4));
end
mat = Matrix4(mat.m * translate(ID, -(Float32(this.m_iSceneVolumeWidth)) * this.m_fScaleSpacing, this.m_fScaleSpacing, 0f0 ).m, zeros(4,4));
end
mat = Matrix4(mat.m * translate(ID, 0f0, -(Float32(this.m_iSceneVolumeHeight)) * this.m_fScaleSpacing, this.m_fScaleSpacing ).m, zeros(4,4));
end
this.m_uiVertcount = length(vertdataarray)/5;
glGenVertexArrays( 1, @ptr this.m_unSceneVAO );
glBindVertexArray( this.m_unSceneVAO );
glGenBuffers( 1, @ptr this.m_glSceneVertBuffer );
glBindBuffer( GL_ARRAY_BUFFER, this.m_glSceneVertBuffer );
glBufferData( GL_ARRAY_BUFFER, sizeof(Float32) * length(vertdataarray), vertdataarray, GL_STATIC_DRAW);
stride = GLsizei(sizeof(VertexDataScene));
offset = C_NULL;
glEnableVertexAttribArray( 0 );
glVertexAttribPointer( 0, 3, GL_FLOAT, GL_FALSE, stride , offset);
offset += 3*sizeof(Float32) # sizeof(VR.Vector3);
glEnableVertexAttribArray( 1 );
glVertexAttribPointer( 1, 2, GL_FLOAT, GL_FALSE, stride, offset);
glBindVertexArray( 0 );
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
end
# -----------------------------------------------------------------------------
# Purpose:
# -----------------------------------------------------------------------------
function SetupCameras(app :: CMainApplication)
# m_mat4ProjectionLeft = VR.GetHMDMatrixProjectionEye(app, VR.Eye_Left ); # TODO
# @mem this.m_mat4ProjectionLeft = unsafe_load(reinterpret(Ptr{Matrix4},m_mat4ProjectionLeft.cpp_object))
@mem this.m_mat4ProjectionLeft = GetHMDMatrixProjectionEye(app, OpenVR.Eye_Left );
# m_mat4ProjectionRight = VR.GetHMDMatrixProjectionEye(app, VR.Eye_Right ); # TODO
# @mem this.m_mat4ProjectionRight = unsafe_load(reinterpret(Ptr{Matrix4},m_mat4ProjectionRight.cpp_object))
@mem this.m_mat4ProjectionRight = GetHMDMatrixProjectionEye(app, OpenVR.Eye_Right );
# m_mat4eyePosLeft = VR.GetHMDMatrixPoseEye(app, VR.Eye_Left ); # TODO
# @mem this.m_mat4eyePosLeft = unsafe_load(reinterpret(Ptr{Matrix4},m_mat4eyePosLeft.cpp_object))
@mem this.m_mat4eyePosLeft = GetHMDMatrixPoseEye(app, OpenVR.Eye_Left );
# m_mat4eyePosRight = VR.GetHMDMatrixPoseEye(app, VR.Eye_Right ); # TODO
# @mem this.m_mat4eyePosRight = unsafe_load(reinterpret(Ptr{Matrix4},m_mat4eyePosRight.cpp_object))
@mem this.m_mat4eyePosRight = GetHMDMatrixPoseEye(app, OpenVR.Eye_Right );
end
# -----------------------------------------------------------------------------
# Purpose: Creates a frame buffer. Returns true if the buffer was set up.
# Returns false if the setup failed.
# -----------------------------------------------------------------------------
function CreateFrameBuffer( nWidth::Int32, nHeight::Int32, framebufferDescp::Ptr{FramebufferDesc} )::Bool
# here we unsafe_load an (immutable) bitstype "into" a RefValue, mutate it from within (OpenGL-)C via pointers and then unsafe_store! it back into the given pointer
# TODO: if we'd support dereferencing in MemoryMutate.jl this could be just
# @ptr framebufferDesc->m_nRenderFramebufferId
framebufferDesc = Ref(unsafe_load(framebufferDescp))
glGenFramebuffers(1, @ptr framebufferDesc[].m_nRenderFramebufferId );
glBindFramebuffer(GL_FRAMEBUFFER, framebufferDesc[].m_nRenderFramebufferId);
glGenRenderbuffers(1, @ptr framebufferDesc[].m_nDepthBufferId);
glBindRenderbuffer(GL_RENDERBUFFER, framebufferDesc[].m_nDepthBufferId);
glRenderbufferStorageMultisample(GL_RENDERBUFFER, 4, GL_DEPTH_COMPONENT, nWidth, nHeight );
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, framebufferDesc[].m_nDepthBufferId );
glGenTextures(1, @ptr framebufferDesc[].m_nRenderTextureId );
glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, framebufferDesc[].m_nRenderTextureId );
glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE, 4, GL_RGBA8, nWidth, nHeight, true);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D_MULTISAMPLE, framebufferDesc[].m_nRenderTextureId, 0);
glGenFramebuffers(1, @ptr framebufferDesc[].m_nResolveFramebufferId );
glBindFramebuffer(GL_FRAMEBUFFER, framebufferDesc[].m_nResolveFramebufferId);
glGenTextures(1, @ptr framebufferDesc[].m_nResolveTextureId );
glBindTexture(GL_TEXTURE_2D, framebufferDesc[].m_nResolveTextureId );
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, nWidth, nHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, C_NULL);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, framebufferDesc[].m_nResolveTextureId, 0);
unsafe_store!(framebufferDescp,framebufferDesc[]) # TODO see above
# check FBO status
status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE)
return false;
end
glBindFramebuffer( GL_FRAMEBUFFER, 0 );
return true;
end
# -----------------------------------------------------------------------------
# Purpose:
# -----------------------------------------------------------------------------
function SetupStereoRenderTargets(app :: CMainApplication)::Bool
if ( this.m_pHMD == C_NULL )
return false;
end
vrsystem = OpenVR.VRSystem() # TODO: the original uses m_pHMD, which currently is a Ptr{Nothing}; so we might use VR.IVRSystemRef and replace all "this.m_pHMD == C_NULL" with the CxxWrap provided "isnull(this.m_pHMD)"
OpenVR.GetRecommendedRenderTargetSize(vrsystem, (@typedptr UInt32 this.m_nRenderWidth), @typedptr UInt32 this.m_nRenderHeight );
CreateFrameBuffer(this.m_nRenderWidth, this.m_nRenderHeight, @ptr this.leftEyeDesc );
CreateFrameBuffer(this.m_nRenderWidth, this.m_nRenderHeight, @ptr this.rightEyeDesc );
return true;
end
# -----------------------------------------------------------------------------
# Purpose:
# -----------------------------------------------------------------------------
function SetupCompanionWindow(app :: CMainApplication)
if ( this.m_pHMD == C_NULL )
return;
end
vVerts = VertexDataWindow[]
# left eye verts
push!(vVerts, VertexDataWindow( [-1, -1], [0, 1] ) );
push!(vVerts, VertexDataWindow( [0, -1], [1, 1] ) );
push!(vVerts, VertexDataWindow( [-1, 1], [0, 0] ) );
push!(vVerts, VertexDataWindow( [0, 1], [1, 0] ) );
# right eye verts
push!(vVerts, VertexDataWindow( [0, -1], [0, 1] ) );
push!(vVerts, VertexDataWindow( [1, -1], [1, 1] ) );
push!(vVerts, VertexDataWindow( [0, 1], [0, 0] ) );
push!(vVerts, VertexDataWindow( [1, 1], [1, 0] ) );
vIndices = GLushort[ 0, 1, 3, 0, 3, 2, 4, 5, 7, 4, 7, 6];
this.m_uiCompanionWindowIndexSize = length(vIndices);
glGenVertexArrays( 1, @ptr this.m_unCompanionWindowVAO );
glBindVertexArray( this.m_unCompanionWindowVAO );
glGenBuffers( 1, @ptr this.m_glCompanionWindowIDVertBuffer );
glBindBuffer( GL_ARRAY_BUFFER, this.m_glCompanionWindowIDVertBuffer );
glBufferData( GL_ARRAY_BUFFER, length(vVerts)*sizeof(VertexDataWindow), vVerts, GL_STATIC_DRAW );
glGenBuffers( 1, @ptr this.m_glCompanionWindowIDIndexBuffer );
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, this.m_glCompanionWindowIDIndexBuffer );
glBufferData( GL_ELEMENT_ARRAY_BUFFER, this.m_uiCompanionWindowIndexSize*sizeof(GLushort), vIndices, GL_STATIC_DRAW );
glEnableVertexAttribArray( 0 );
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, sizeof(VertexDataWindow), Ptr{Nothing}(fieldoffset( VertexDataWindow, :position )) );
glEnableVertexAttribArray( 1 );
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(VertexDataWindow), Ptr{Nothing}(fieldoffset( VertexDataWindow, :texCoord )) );
glBindVertexArray( 0 );
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
end
# -----------------------------------------------------------------------------
# Purpose: Initialize OpenGL. Returns true if OpenGL has been successfully
# initialized, false if shaders could not be created.
# If failure occurred in a module other than shaders, the function
# may return true or throw an error.
# -----------------------------------------------------------------------------
function BInitGL(app :: CMainApplication)::Bool
if ( this.m_bDebugOpenGL )
# glDebugMessageCallback( (GLDEBUGPROC)DebugCallback, nullptr); # TODO
glDebugMessageControl( GL_DONT_CARE, GL_DONT_CARE, GL_DONT_CARE, 0, C_NULL, GL_TRUE );
glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
end
if ( ~CreateAllShaders(app) )
return false;
end
SetupTexturemaps(app);
SetupScene(app);
SetupCameras(app);
SetupStereoRenderTargets(app);
SetupCompanionWindow(app);
return true;
end
# -----------------------------------------------------------------------------
# Purpose: Initialize Compositor. Returns true if the compositor was
# successfully initialized, false otherwise.
# -----------------------------------------------------------------------------
function BInitCompositor(app :: CMainApplication)::Bool
peError = OpenVR.VRInitError_None;
if ( cpp_object(OpenVR.VRCompositor()) == C_NULL )
println( "Compositor initialization failed. See log file for details\n" );
return false;
end
return true;
end
# -----------------------------------------------------------------------------
# Purpose:
# -----------------------------------------------------------------------------
function BInit(app :: CMainApplication)::Bool
if ( SDL.Init( SDL.INIT_VIDEO | SDL.INIT_TIMER ) < 0 )
println("BInit - SDL could not initialize! SDL Error: ", SDL.GetError());
return false;
end
# Loading the SteamVR Runtime
eError = Ref(OpenVR.VRInitError_None);
m_pHMD = OpenVR.VR_Init( eError, OpenVR.VRApplication_Scene , ""); # the last parameter, pStartupInfo, is reserved for future use.
this.m_pHMD = cpp_object(m_pHMD)
# this.m_pHMD = OpenVR.VRSystem()
if ( eError[] != OpenVR.VRInitError_None )
this.m_pHMD = C_NULL;
# char buf[1024];
# sprintf_s( buf, sizeof( buf ), "Unable to init VR runtime: %s", VR.VR_GetVRInitErrorAsEnglishDescription( eError ) );
# SDL.ShowSimpleMessageBox( SDL.MESSAGEBOX_ERROR, "VR_Init Failed", buf, C_NULL );
# SDL.ShowSimpleMessageBox( SDL.MESSAGEBOX_ERROR, "VR_Init Failed", "TODO: VR_GetVRInitErrorAsEnglishDescription", C_NULL ); # TODO
# SDL.ShowSimpleMessageBox( SDL.MESSAGEBOX_ERROR, "VR_Init Failed", unsafe_string(OpenVR.VR_GetVRInitErrorAsEnglishDescription(eError[])), C_NULL ); # TODo
return false;
end
nWindowPosX = Int32(700);
nWindowPosY = Int32(100);
unWindowFlags = SDL.WINDOW_OPENGL | SDL.WINDOW_SHOWN;
SDL.GL_SetAttribute( SDL.GL_CONTEXT_MAJOR_VERSION, 4 );
SDL.GL_SetAttribute( SDL.GL_CONTEXT_MINOR_VERSION, 1 );
#SDL.GL_SetAttribute( SDL.GL_CONTEXT_PROFILE_MASK, SDL.GL_CONTEXT_PROFILE_COMPATIBILITY );
SDL.GL_SetAttribute( SDL.GL_CONTEXT_PROFILE_MASK, SDL.GL_CONTEXT_PROFILE_CORE );
SDL.GL_SetAttribute( SDL.GL_MULTISAMPLEBUFFERS, 0 );
SDL.GL_SetAttribute( SDL.GL_MULTISAMPLESAMPLES, 0 );
if ( this.m_bDebugOpenGL )
SDL.GL_SetAttribute( SDL.GL_CONTEXT_FLAGS, SDL.GL_CONTEXT_DEBUG_FLAG );
end
# SDL.SetHint(SDL.HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR, "1"); # defaults to "1" anyway?
this.m_pCompanionWindow = SDL.CreateWindow( "hellovr", nWindowPosX, nWindowPosY, this.m_nCompanionWindowWidth, this.m_nCompanionWindowHeight, unWindowFlags );
if (this.m_pCompanionWindow == C_NULL)
println( "BInit - Window could not be created! SDL Error: ", SDL.GetError() );
return false;
end
this.m_pContext = SDL.GL_CreateContext(this.m_pCompanionWindow);
if (this.m_pContext == C_NULL)
println( "BInit - OpenGL context could not be created! SDL Error: ", SDL.GetError() );
return false;
end
# # NOTE: this seems to work without GLEW apparently
# VR.setglewExperimental(UInt8(GL_TRUE)); # TODO
# nGlewError = VR.glewInit(); # TODO
# if (nGlewError != GLEW_OK)
# printf( "BInit - Error initializing GLEW! ", VR.glewGetErrorString( nGlewError ) ); # TODO
# return false;
# end
glGetError(); # to clear the error caused deep in GLEW
if ( SDL.GL_SetSwapInterval( Int32(this.m_bVblank ? 1 : 0) ) < 0 )
printf( "BInit - Warning: Unable to set VSync! SDL Error: ", SDL.GetError() );
return false;
end
this.m_strDriver = "No Driver";
# VR.setJuliaStringTostdstring(m_strDriver,@voidptr this.m_strDriver) # TODO
this.m_strDisplay = "No Display";
# VR.setJuliaStringTostdstring(m_strDisplay,@voidptr this.m_strDisplay) # TODO
this.m_strDriver = GetTrackedDeviceString( OpenVR.k_unTrackedDeviceIndex_Hmd, OpenVR.Prop_TrackingSystemName_String );
# VR.setJuliaStringTostdstring(m_strDriver,@voidptr this.m_strDriver) # TODO
this.m_strDisplay = GetTrackedDeviceString( OpenVR.k_unTrackedDeviceIndex_Hmd, OpenVR.Prop_SerialNumber_String );
# VR.setJuliaStringTostdstring(m_strDisplay,@voidptr this.m_strDisplay) # TODO
strWindowTitle = "hellovr - " * this.m_strDriver * " " * this.m_strDisplay;
SDL.SetWindowTitle( this.m_pCompanionWindow, strWindowTitle );
# cube array
this.m_iSceneVolumeWidth = this.m_iSceneVolumeInit;
this.m_iSceneVolumeHeight = this.m_iSceneVolumeInit;
this.m_iSceneVolumeDepth = this.m_iSceneVolumeInit;
this.m_fScale = 0.3f0;
this.m_fScaleSpacing = 4.0f0;
this.m_fNearClip = 0.1f0;
this.m_fFarClip = 30.0f0;
this.m_iTexture = 0;
this.m_uiVertcount = 0;
# m_MillisecondsTimer.start(1, this);
# m_SecondsTimer.start(1000, this);
if (~BInitGL(app))
println("BInit - Unable to initialize OpenGL!");
return false;
end
if (~BInitCompositor(app))
printf("BInit - Failed to initialize VR Compositor!");
return false;
end
# VR.VRInput()->SetActionManifestPath( Path_MakeAbsolute( "../hellovr_actions.json", Path_StripFilename( Path_GetExecutablePath() ) ).c_str() );
println("SetActionManifestPath of $ActionManifestPath");
vrinput = OpenVR.VRInput()
OpenVR.SetActionManifestPath(vrinput, ActionManifestPath );
OpenVR.GetActionHandle(vrinput, "/actions/demo/in/HideCubes", @ptr this.m_actionHideCubes);
OpenVR.GetActionHandle(vrinput, "/actions/demo/in/HideThisController", @ptr this.m_actionHideThisController);
OpenVR.GetActionHandle(vrinput, "/actions/demo/in/TriggerHaptic", @ptr this.m_actionTriggerHaptic);
OpenVR.GetActionHandle(vrinput, "/actions/demo/in/AnalogInput", @ptr this.m_actionAnalongInput);
OpenVR.GetActionSetHandle(vrinput, "/actions/demo", @ptr this.m_actionsetDemo);
Left = 0 # TODO
Right = 1 # TODO
OpenVR.GetActionHandle(vrinput, "/actions/demo/out/Haptic_Left", @ptr this.m_rHand[Left+1].m_actionHaptic);
OpenVR.GetInputSourceHandle(vrinput, "/user/hand/left", @ptr this.m_rHand[Left+1].m_source);
OpenVR.GetActionHandle(vrinput, "/actions/demo/in/Hand_Left", @ptr this.m_rHand[Left+1].m_actionPose);
OpenVR.GetActionHandle(vrinput, "/actions/demo/out/Haptic_Right", @ptr this.m_rHand[Right+1].m_actionHaptic);
OpenVR.GetInputSourceHandle(vrinput, "/user/hand/right", @ptr this.m_rHand[Right+1].m_source);
OpenVR.GetActionHandle(vrinput, "/actions/demo/in/Hand_Right", @ptr this.m_rHand[Right+1].m_actionPose);
return true;
end
if BInit(jMainApplication)
# VR.RunMainLoop(jMainApplication)
vrsystem = OpenVR.VRSystem()
println("TrackingSystemName = $(OpenVR.GetStringTrackedDeviceProperty(vrsystem,OpenVR.k_unTrackedDeviceIndex_Hmd,OpenVR.Prop_TrackingSystemName_String))")
println("SerialNumber = $(OpenVR.GetStringTrackedDeviceProperty(vrsystem,OpenVR.k_unTrackedDeviceIndex_Hmd,OpenVR.Prop_SerialNumber_String))")
println("ModelNumber = $(OpenVR.GetStringTrackedDeviceProperty(vrsystem,OpenVR.k_unTrackedDeviceIndex_Hmd,OpenVR.Prop_ModelNumber_String))")
println("RenderModelName = $(OpenVR.GetStringTrackedDeviceProperty(vrsystem,OpenVR.k_unTrackedDeviceIndex_Hmd,OpenVR.Prop_RenderModelName_String))")
println("ManufacturerName = $(OpenVR.GetStringTrackedDeviceProperty(vrsystem,OpenVR.k_unTrackedDeviceIndex_Hmd,OpenVR.Prop_ManufacturerName_String))")
println("TrackingFirmwareVersion = $(OpenVR.GetStringTrackedDeviceProperty(vrsystem,OpenVR.k_unTrackedDeviceIndex_Hmd,OpenVR.Prop_TrackingFirmwareVersion_String))")
println("HardwareRevision = $(OpenVR.GetStringTrackedDeviceProperty(vrsystem,OpenVR.k_unTrackedDeviceIndex_Hmd,OpenVR.Prop_HardwareRevision_String))")
println("AllWirelessDongleDescriptions = $(OpenVR.GetStringTrackedDeviceProperty(vrsystem,OpenVR.k_unTrackedDeviceIndex_Hmd,OpenVR.Prop_AllWirelessDongleDescriptions_String))")
println("ConnectedWirelessDongle = $(OpenVR.GetStringTrackedDeviceProperty(vrsystem,OpenVR.k_unTrackedDeviceIndex_Hmd,OpenVR.Prop_ConnectedWirelessDongle_String))")
println("Firmware_ManualUpdateURL = $(OpenVR.GetStringTrackedDeviceProperty(vrsystem,OpenVR.k_unTrackedDeviceIndex_Hmd,OpenVR.Prop_Firmware_ManualUpdateURL_String))")
println("Firmware_ProgrammingTarget = $(OpenVR.GetStringTrackedDeviceProperty(vrsystem,OpenVR.k_unTrackedDeviceIndex_Hmd,OpenVR.Prop_Firmware_ProgrammingTarget_String))")
println("DriverVersion = $(OpenVR.GetStringTrackedDeviceProperty(vrsystem,OpenVR.k_unTrackedDeviceIndex_Hmd,OpenVR.Prop_DriverVersion_String))")
println("ResourceRoot = $(OpenVR.GetStringTrackedDeviceProperty(vrsystem,OpenVR.k_unTrackedDeviceIndex_Hmd,OpenVR.Prop_ResourceRoot_String))")
println("RegisteredDeviceType = $(OpenVR.GetStringTrackedDeviceProperty(vrsystem,OpenVR.k_unTrackedDeviceIndex_Hmd,OpenVR.Prop_RegisteredDeviceType_String))")
println("InputProfilePath = $(OpenVR.GetStringTrackedDeviceProperty(vrsystem,OpenVR.k_unTrackedDeviceIndex_Hmd,OpenVR.Prop_InputProfilePath_String))")
println("AdditionalDeviceSettingsPath = $(OpenVR.GetStringTrackedDeviceProperty(vrsystem,OpenVR.k_unTrackedDeviceIndex_Hmd,OpenVR.Prop_AdditionalDeviceSettingsPath_String))")
println("DisplayMCImageLeft = $(OpenVR.GetStringTrackedDeviceProperty(vrsystem,OpenVR.k_unTrackedDeviceIndex_Hmd,OpenVR.Prop_DisplayMCImageLeft_String))")
println("DisplayMCImageRight = $(OpenVR.GetStringTrackedDeviceProperty(vrsystem,OpenVR.k_unTrackedDeviceIndex_Hmd,OpenVR.Prop_DisplayMCImageRight_String))")
println("DisplayGCImage = $(OpenVR.GetStringTrackedDeviceProperty(vrsystem,OpenVR.k_unTrackedDeviceIndex_Hmd,OpenVR.Prop_DisplayGCImage_String))")
println("CameraFirmwareDescription = $(OpenVR.GetStringTrackedDeviceProperty(vrsystem,OpenVR.k_unTrackedDeviceIndex_Hmd,OpenVR.Prop_CameraFirmwareDescription_String))")
println("DriverProvidedChaperonePath = $(OpenVR.GetStringTrackedDeviceProperty(vrsystem,OpenVR.k_unTrackedDeviceIndex_Hmd,OpenVR.Prop_DriverProvidedChaperonePath_String))")
println("NamedIconPathControllerLeftDeviceOff = $(OpenVR.GetStringTrackedDeviceProperty(vrsystem,OpenVR.k_unTrackedDeviceIndex_Hmd,OpenVR.Prop_NamedIconPathControllerLeftDeviceOff_String))")
println("NamedIconPathControllerRightDeviceOff = $(OpenVR.GetStringTrackedDeviceProperty(vrsystem,OpenVR.k_unTrackedDeviceIndex_Hmd,OpenVR.Prop_NamedIconPathControllerRightDeviceOff_String))")
println("NamedIconPathTrackingReferenceDeviceOff = $(OpenVR.GetStringTrackedDeviceProperty(vrsystem,OpenVR.k_unTrackedDeviceIndex_Hmd,OpenVR.Prop_NamedIconPathTrackingReferenceDeviceOff_String))")
RunMainLoop(jMainApplication)
end
Shutdown(jMainApplication)
# VR.HandleInput(jMainApplication)
# RenderFrame(jMainApplication)
# RenderFrame(jMainApplication)
# RenderFrame(jMainApplication)
# eError = Ref(OpenVR.VRInitError_None);
# m_pHMD = OpenVR.VR_Init( eError, OpenVR.VRApplication_Scene , "");
# vrsystem = OpenVR.VRSystem()
# err = Ref(OpenVR.TrackedProp_Success)
# OpenVR.GetStringTrackedDeviceProperty(vrsystem,OpenVR.k_unTrackedDeviceIndex_Hmd, OpenVR.Prop_TrackingSystemName_String,Cstring(C_NULL),0,err)
#
# fntable = unsafe_load(reinterpret(Ptr{OpenVR.VR_IVRSystem_FnTable},vrsystem))
# fntable.GetStringTrackedDeviceProperty
# ccall(unsafe_load(reinterpret(Ptr{VR_IVRSystem_FnTable},this)).GetStringTrackedDeviceProperty, UInt32, (TrackedDeviceIndex_t, ETrackedDeviceProperty, Cstring, UInt32, Ptr{ETrackedPropertyError},), unDeviceIndex, prop, pchValue, unBufferSize, pError)
# GetRecommendedRenderTargetSize = Ptr{Nothing} @0x00007f6ee221e958
# GetProjectionMatrix = Ptr{Nothing} @0x00007f6ee221eba8
# GetProjectionRaw = Ptr{Nothing} @0x00007f6ee221ec68
# ComputeDistortion = Ptr{Nothing} @0x00007f6ee2245220
# GetEyeToHeadTransform = Ptr{Nothing} @0x00007f6ee2227918
# GetTimeSinceLastVsync = Ptr{Nothing} @0x0000000000000000
# GetD3D9AdapterIndex = Ptr{Nothing} @0x00007f6ee2245220
# GetDXGIOutputInfo = Ptr{Nothing} @0x0000000000000000
# GetOutputDevice = Ptr{Nothing} @0x0000000000000000
# IsDisplayOnDesktop = Ptr{Nothing} @0x0000000000000001
# SetDisplayVisibility = Ptr{Nothing} @0x0000000000000000
# GetDeviceToAbsoluteTrackingPose = Ptr{Nothing} @0x0000000000000000
# ResetSeatedZeroPose = Ptr{Nothing} @0x0000000000000001
# GetSeatedZeroPoseToStandingAbsoluteTrackingPose = Ptr{Nothing} @0x0000000000000000
# GetRawZeroPoseToStandingAbsoluteTrackingPose = Ptr{Nothing} @0x0000000000000000
# GetSortedTrackedDeviceIndicesOfClass = Ptr{Nothing} @0x0000000000000000
# GetTrackedDeviceActivityLevel = Ptr{Nothing} @0x0000000000000000
# ApplyTransform = Ptr{Nothing} @0x0000000000000000
# GetTrackedDeviceIndexForControllerRole = Ptr{Nothing} @0x00007f6ee2243040
# GetControllerRoleForTrackedDeviceIndex = Ptr{Nothing} @0x00007f6ee2242318
# GetTrackedDeviceClass = Ptr{Nothing} @0x00007f6ee2244a38
# IsTrackedDeviceConnected = Ptr{Nothing} @0x00007f6ee2242308
# GetBoolTrackedDeviceProperty = Ptr{Nothing} @0x00007f6ee2244b90
# GetFloatTrackedDeviceProperty = Ptr{Nothing} @0x0000000000000001
# GetInt32TrackedDeviceProperty = Ptr{Nothing} @0x0000561697392920
# GetUint64TrackedDeviceProperty = Ptr{Nothing} @0x00005616963f9850
# GetMatrix34TrackedDeviceProperty = Ptr{Nothing} @0x000000000000007c
# GetArrayTrackedDeviceProperty = Ptr{Nothing} @0x0000000000000000
# GetStringTrackedDeviceProperty = Ptr{Nothing} @0x0000000000000000
# GetPropErrorNameFromEnum = Ptr{Nothing} @0x0000000000000000
# PollNextEvent = Ptr{Nothing} @0x0000007c0000007c
# PollNextEventWithPose = Ptr{Nothing} @0x0000007c0000007c
# GetEventTypeNameFromEnum = Ptr{Nothing} @0x0000007c0000007c
# GetHiddenAreaMesh = Ptr{Nothing} @0x0000007c0000007c
# GetControllerState = Ptr{Nothing} @0x0000007c0000007c
# GetControllerStateWithPose = Ptr{Nothing} @0x0000007c0000007c
# TriggerHapticPulse = Ptr{Nothing} @0x0000007c0000007c
# GetButtonIdNameFromEnum = Ptr{Nothing} @0x0000007c0000007c
# GetControllerAxisTypeNameFromEnum = Ptr{Nothing} @0x0000007c0000007c
# IsInputAvailable = Ptr{Nothing} @0x0000007c0000007c
# IsSteamVRDrawingControllers = Ptr{Nothing} @0x0000007c0000007c
# ShouldApplicationPause = Ptr{Nothing} @0x0000007c0000007c
# ShouldApplicationReduceRenderingWork = Ptr{Nothing} @0x0000007c0000007c
# DriverDebugRequest = Ptr{Nothing} @0x0000007c0000007c
# PerformFirmwareUpdate = Ptr{Nothing} @0x0000007c0000007c
# AcknowledgeQuit_Exiting = Ptr{Nothing} @0x0000007c0000007c
# AcknowledgeQuit_UserPrompt = Ptr{Nothing} @0x0000007c0000007c
|
{"hexsha": "781623a02822a973ce7dbfa8a54606673866023f", "size": 79804, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/hellovr_opengl_julia.jl", "max_stars_repo_name": "mchristianl/OpenVR.jl", "max_stars_repo_head_hexsha": "55c1a115e518d1176ab885478d20e3dbb324a199", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-04-09T16:46:07.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-29T06:02:45.000Z", "max_issues_repo_path": "test/hellovr_opengl_julia.jl", "max_issues_repo_name": "mchristianl/OpenVR.jl", "max_issues_repo_head_hexsha": "55c1a115e518d1176ab885478d20e3dbb324a199", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/hellovr_opengl_julia.jl", "max_forks_repo_name": "mchristianl/OpenVR.jl", "max_forks_repo_head_hexsha": "55c1a115e518d1176ab885478d20e3dbb324a199", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.8117106774, "max_line_length": 252, "alphanum_fraction": 0.679151421, "num_tokens": 22053}
|
import torchvision
import torch
import numpy as np
def get_mnist_batcher(batch_size):
"""Downloads MNIST and stores in the data folder in case it is not available, and
builds a data loader based batcher of the specified size
Args:
batch_size (int): size of the minibatch
Returns:
torch data_loader: iterator that builds minibatches of the specified batch
"""
def transform_mnist(x):
return np.array(x).astype(np.float32).reshape(-1) / 255
data = torchvision.datasets.MNIST("data", download=True, transform=transform_mnist)
data_loader = torch.utils.data.DataLoader(
data, batch_size=batch_size, shuffle=True, drop_last=True
)
return data_loader
|
{"hexsha": "bca06256f758a8ba579feb9b3d8f79516f70bbc7", "size": 723, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/data.py", "max_stars_repo_name": "ivallesp/VAE", "max_stars_repo_head_hexsha": "57340b02ee7b07e7e1e236c8a48aa7bd7fab364b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/data.py", "max_issues_repo_name": "ivallesp/VAE", "max_issues_repo_head_hexsha": "57340b02ee7b07e7e1e236c8a48aa7bd7fab364b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/data.py", "max_forks_repo_name": "ivallesp/VAE", "max_forks_repo_head_hexsha": "57340b02ee7b07e7e1e236c8a48aa7bd7fab364b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.92, "max_line_length": 87, "alphanum_fraction": 0.7136929461, "include": true, "reason": "import numpy", "num_tokens": 166}
|
# coding: utf-8
from __future__ import division, print_function, unicode_literals, absolute_import
"""
This module is a wrapper for AntechamberRunner which generates force field files
or a specified molecule using gaussian output file as input. Currently, the AntechamberRunner
class does not work properly.
"""
import shlex
import subprocess
import tempfile
import numpy as np
import parmed as pmd
from collections import namedtuple, OrderedDict
from monty.dev import requires
from monty.os.path import which
from monty.tempfile import ScratchDir
from pymatgen.core.structure import Molecule
from pymatgen.io.lammps.data import Topology, ForceField
__author__ = 'Navnidhi Rajput, Kiran Mathew, Matthew Bliss'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Matthew Bliss'
__email__ = 'mbliss01@tufts.edu'
__date__ = '1/29/20'
Gaff_dict = {'c': '12.01', 'c1': '12.01', 'c2': '12.01', 'c3': '12.01', 'ca': '12.01', 'cp': '12.01', 'cq': '12.01',
'cc': '12.01', 'cd': '12.01', 'ce': '12.01', 'cf': '12.01', 'cg': '12.01', 'ch': '12.01', 'cx': '12.01',
'cy': '12.01', 'cu': '12.01', 'cv': '12.01', 'cz': '12.01', 'h1': '1.008', 'h2': '1.008', 'h3': '1.008',
'h4': '1.008', 'h5': '1.008', 'ha': '1.008', 'hc': '1.008', 'hn': '1.008', 'ho': '1.008', 'hp': '1.008',
'hs': '1.008', 'hw': '1.008', 'hx': '1.008', 'f': '19.00', 'cl': '35.45', 'br': '79.90', 'i': '126.9',
'n': '14.01', 'n1': '14.01', 'n2': '14.01', 'n3': '14.01', 'n4': '14.01', 'na': '14.01', 'nb': '14.01',
'nc': '14.01', 'nd': '14.01', 'ne': '14.01', 'nf': '14.01', 'nh': '14.01', 'no': '14.01', 'ni': '14.01',
'nj': '14.01', 'nk': '14.01', 'nl': '14.01', 'nm': '14.01', 'nn': '14.01', 'np': '14.01', 'nq': '14.01',
'o': '16.00', 'oh': '16.00', 'os': '16.00', 'op': '16.00', 'oq': '16.00', 'ow': '16.00', 'p2': '30.97',
'p3': '30.97', 'p4': '30.97', 'p5': '30.97', 'pb': '30.97', 'pc': '30.97', 'pd': '30.97', 'pe': '30.97',
'pf': '30.97', 'px': '30.97', 'py': '30.97', 's': '32.06', 's2': '32.06', 's4': '32.06', 's6': '32.06',
'sh': '32.06', 'ss': '32.06', 'sp': '32.06', 'sq': '32.06', 'sx': '32.06', 'sy': '32.06', 'cs': '12.01',
'ns': '14.01', 'nt': '14.01', 'nx': '14.01', 'ny': '14.01', 'nz': '14.01', 'n+': '14.01', 'nu': '14.01',
'nv': '14.01', 'n7': '14.01', 'n8': '14.01', 'n9': '14.01', 'n5': '14.01', 'n6': '14.01'}
class AntechamberRunner(object):
"""
A wrapper for AntechamberRunner software
"""
@requires((which('parmchk') or which('parmchk2')), "Requires the binary parmchk."
"Install AmberTools from http://ambermd.org/#AmberTools")
@requires(which('antechamber'), "Requires the binary antechamber."
"Install AmberTools from http://ambermd.org/#AmberTools")
@requires(which('tleap'), "Requires the binary tleap."
"Install AmberTools from http://ambermd.org/#AmberTools")
def __init__(self, mols):
"""
Args:
mols: List of molecules
"""
self.mols = mols
if which('parmchk'):
self.parmchk_version = 'parmchk'
else:
self.parmchk_version = 'parmchk2'
def _run_parmchk(self, filename="mol.mol2", format="mol2", outfile_name="mol.frcmod",
print_improper_dihedrals="Y"):
"""
run parmchk
"""
command = self.parmchk_version + " -i {} -f {} -o {} -w {}".format(filename, format, outfile_name,
print_improper_dihedrals)
exit_code = subprocess.call(shlex.split(command))
return exit_code
def _run_antechamber(self, filename, infile_format="gout", outfile_name="mol",
outfile_format="mol2", charge_method="resp", status_info=2):
"""
run antechamber using the provided gaussian output file
"""
command = "antechamber -i {} -fi {} -o {}.{} -fo {} -c {} -s {}".format(filename,
infile_format,
outfile_name,
outfile_format,
outfile_format,
charge_method,
status_info)
# dont think 'charmm' is even an option for -fo
# GeneralizedForceFiled tries to read in *.ac(antechamber format) file and Toplogy
# is trying to readin *.rtf(charmm format topology) file !!! WHY?!!
# command = 'antechamber -i ' + filename + " -fi gout -o mol -fo charmm -c resp -s 2"
exit_code = subprocess.call(shlex.split(command))
return exit_code
def _run_tleap(self, mol_name='mol'):
'''
run tleap
'''
lines = []
lines.append('source leaprc.gaff')
lines.append('{} = loadmol2 {}.mol2'.format(mol_name,mol_name))
lines.append('check {}'.format(mol_name))
lines.append('loadamberparams {}.frcmod'.format(mol_name))
# lines.append('saveoff {} {}.lib'.format(mol_name,mol_name))
lines.append('saveamberparm {} {}.prmtop {}.inpcrd'.format(mol_name,mol_name,mol_name))
lines.append('quit')
text = '\n'.join(lines)
with open('tleap.in', 'w') as file:
file.write(text)
file.close()
command = 'tleap -f tleap.in'
exit_code = subprocess.call(shlex.split(command))
return exit_code
def _run_tleap_existing_param(self, file_resname, ff_resname, sources = ['leaprc.ff14SB'], mol_name=None):
if not mol_name:
lines = []
for source in sources:
lines.append('source ' + source)
lines.append('{} = {}'.format(file_resname, ff_resname))
def _get_gaussian_ff_top_single(self, filename=None):
"""
run antechamber using gaussian output file, then run parmchk
to generate missing force field parameters. Store and return
the force field and topology information in ff_mol.
Args:
filename: gaussian output file of the molecule
Returns:
Amberff namedtuple object that contains information on force field and
topology
"""
pass
# scratch = tempfile.gettempdir()
# Amberff = namedtuple("Amberff", ["force_field", "topology"])
# with ScratchDir(scratch, copy_from_current_on_enter=True,
# copy_to_current_on_exit=True) as d:
# # self._convert_to_pdb(mol, 'mol.pdb')
# # self.molname = filename.split('.')[0]
# self._run_antechamber(filename)
# self._run_parmchk()
# # if antechamber can't find parameters go to gaff_example.dat
# try:
# mol = Molecule.from_file('mol.rtf')
# print('mol.rtf file exists')
# except TopCorruptionException:
# correct_corrupted_top_files('mol.rtf', 'gaff_data.txt')
# top = Topology.from_file('mol.rtf')
# print('mol.rtf file does not exist')
# try:
# gff = ForceField.from_file('mol.frcmod')
# except FFCorruptionException:
# correct_corrupted_frcmod_files('ANTECHAMBER.FRCMOD', 'gaff_data.txt')
# gff = ForceField.from_file('ANTECHAMBER.FRCMOD')
# gff.set_atom_mappings('ANTECHAMBER_AC.AC')
# gff.read_charges()
# decorate the molecule with the sire property "atomname"
#mol.add_site_property("atomname", (list(gff.atom_index.values())))
# return Amberff(gff, top)
def get_gaussian_ff_top(self, filenames):
"""
return a list of amber force field and topology for the list of
gaussian output filenames corresponding to each molecule in mols list.
Args:
filenames (list): list of gaussian output files for each type of molecule
Returns:
list of Amberff namedtuples
"""
pass
# amber_ffs = []
# for fname in filenames:
# amber_ffs.append(self._get_gaussian_ff_top_single(filename=fname))
# return amber_ffs
def get_bond_param(amberparm,ff_label):
'''
Reads bond force field parameters and outputs list of dictionaries in proper format for instantiating PyMatGen ForceField object.
Removes duplicate bond parameters even when atom order is reversed.
:param amberparm: (parmed.amber._amberparm.AmberParm) Recommended to get from parmed.load_file(filename.prmtop) method.
The *.prmtop file should only include one molecule from running some of the methods for the Rubicon AntechamberRunner class.
:param ff_label: (str) String that will be appended to the Amber atom type strings such that
the same Amber atomtype from different molecules can have different force field parameters associated with them.
Reccomended to use the molecule's name.
:return ff_bond_types: (list) List of Dicts containing the bond force field parameters for the molecule in the proper format
for the PyMatGen ForceField object.
'''
ff_bond_types = []
for bond in amberparm.bonds:
add_bond = True
coeffs = [bond.type.k, bond.type.req]
atom_types = (bond.atom1.type + ff_label,
bond.atom2.type + ff_label)
for old_type in ff_bond_types:
if coeffs == old_type['coeffs']:
if atom_types not in old_type['types'] and atom_types[::-1] not in old_type['types']:
old_type['types'].append(atom_types)
add_bond = False
break
if add_bond:
ff_bond_types.append({'coeffs': coeffs, 'types': [atom_types]})
return ff_bond_types
def get_angle_param(amberparm,ff_label):
'''
Reads angle force field parameters and outputs list of Dicts that can be used for creating PyMatGen ForceField object.
Similar to get_bond_param() function.
:param amberparm: (parmed.amber._amberparm.AmberParm) Recommended to get from parmed.load_file(filename.prmtop) method.
The *.prmtop file should only include one molecule from running some of the methods for the Rubicon AntechamberRunner class.
:param ff_label: (str) String that will be appended to the Amber atom type strings such that
the same Amber atomtype from different molecules can have different force field parameters associated with them.
Reccomended to use the molecule's name.
:return ff_bond_types: (list) List of Dicts containing the angle force field parameters for the molecule in the proper format
for the PyMatGen ForceField object.
'''
ff_angle_types = []
for angle in amberparm.angles:
add_angle = True
coeffs = [angle.type.k, angle.type.theteq]
atom_types = (angle.atom1.type + ff_label,
angle.atom2.type + ff_label,
angle.atom3.type + ff_label)
for old_type in ff_angle_types:
if coeffs == old_type['coeffs']:
if atom_types not in old_type['types'] and atom_types[::-1] not in old_type['types']:
old_type['types'].append(atom_types)
add_angle = False
break
if add_angle:
ff_angle_types.append({'coeffs': coeffs, 'types': [atom_types]})
return ff_angle_types
def get_dihedral_param(amberparm,ff_label):
'''
Reads dihedral force field parameters and outputs list of Dicts that can be used for creating PyMatGen ForceField objects.
Similar to get_bond_param() function. Removes duplicate proper dihedrals even when bond order is reversed. Removes duplicate
improper dihedrals without reversing bond order.
:param amberparm: (parmed.amber._amberparm.AmberParm) Recommended to get from parmed.load_file(filename.prmtop) method.
The *.prmtop file should only include one molecule from running some of the methods for the Rubicon AntechamberRunner class.
:param ff_label: (str) String that will be appended to the Amber atom type strings such that the same Amber atomtype from
different molecules can have different force field parameters associated with them.
:return (ff_dihedral_types,ff_improper_types): (tuple) contains two lists of Dicts containing the proper dihedral and improper
dihedral force field parameters for the molecule, respectively, in the proper format for the PyMatGen ForceField object.
'''
ff_dihedral_types = []
ff_improper_types = []
for dihedral in amberparm.dihedrals:
add_dihedral = True
atom_types = (dihedral.atom1.type + ff_label,
dihedral.atom2.type + ff_label,
dihedral.atom3.type + ff_label,
dihedral.atom4.type + ff_label)
if int(dihedral.type.phase) % 360 == 180:
coeffs = [dihedral.type.phi_k, -1, dihedral.type.per]
elif int(dihedral.type.phase) % 360 == 0:
coeffs = [dihedral.type.phi_k, 1, dihedral.type.per]
else:
raise ValueError('The phase of the dihedral was a value other than 0 or 180 mod(360)')
if not dihedral.improper:
for old_d_type in ff_dihedral_types:
if coeffs == old_d_type['coeffs']:
if atom_types not in old_d_type['types'] and atom_types[::-1] not in old_d_type['types']:
old_d_type['types'].append(atom_types)
add_dihedral = False
break
if add_dihedral:
ff_dihedral_types.append({'coeffs': coeffs, 'types': [atom_types]})
else:
for old_i_type in ff_improper_types:
if coeffs == old_i_type['coeffs']:
if atom_types not in old_i_type['types']:
old_i_type['types'].append(atom_types)
add_dihedral = False
break
if add_dihedral:
ff_improper_types.append({'coeffs': coeffs, 'types': [atom_types]})
return (ff_dihedral_types,ff_improper_types)
def make_ff_bonded_type_list(types_dict, mol_name=''):
'''
Makes list for use as value in topo_coeffs input when instantiating PyMatGen ForceField object.
Should work for bond, angle, dihedral, and improper parameters.
:param types_dict [dict]: keys are ff parameters, values are sets of tuples of atom types involved. Intended to be obtained from OTHER_FUNCTION
:param mol_name [str]: molecule name to use for making ff_labels (see PyMatGen Topology object)
:return: list of dictionaries containing bonded coefficients and atom types involved
'''
bonded_type = []
for params in types_dict.keys():
bonded_type.append({'coeffs': [param for param in params], 'types': []})
for atom_types in types_dict[params]:
atom_labels = [atom_type + mol_name for atom_type in atom_types]
bonded_type[-1]['types'].append(tuple(atom_labels))
return bonded_type
def get_nonbond_param(amberparm,molecule_name):
'''
Reads mass and LJ parameters from the parmed.amberparm object and stores in the format required by the
pymatgen.ForceField object.
:param amberparm: (parmed.amber._amberparm.AmberParm) Recommended to get from parmed.load_file(filename.prmtop) method.
The *.prmtop file should only include one molecule from running some of the methods for the Rubicon AntechamberRunner class.
:param molecule_name: (str) molecule name to use for making ff_labels (see PyMatGen Topology object)
:return: tuple of OrderedDict and list of lists. The OrderedDict contains the atom labels as keys and the masses as values.
The list of lists contains LJ parameters that correspond to the ordering of the keys in the OrderedDict.
'''
Masses_ordered_dict = OrderedDict()
Nonbond_param_list = []
Label_list = [type + molecule_name for type in amberparm.LJ_types.keys()]
for type in Label_list:
Masses_ordered_dict[type] = float(Gaff_dict[type[:-len(molecule_name)]])
for type in Label_list:
index = amberparm.LJ_types[type[:-len(molecule_name)]] - 1
Nonbond_param_list.append([amberparm.LJ_depth[index],amberparm.LJ_radius[index] * 2 ** (5/6)])
return Masses_ordered_dict, Nonbond_param_list
def check_partial_charge_sum(partial_charges, net_charge=0, tolerance=10**-16):
'''
Compares the sum of the partial charges of atoms in a molecule to the net charge of the molecule.
If the sum is not close to zero (within the tolerance), then all partial charges are shifted by the
same amount such that the new sum of partial charges is within the tolerance of the net charge.
:param partial_charges: [array-like] the partial charges of the atoms in a molecule
:param net_charge: [int] the net charge of the molecule; defaults to 0
:param tolerance: [float] the desired tolerance for the difference; defaults to 10**-16
:return p_charges_array: [np.array] the new partial charges such that the difference is within the tolerance
'''
p_charges_array = np.asarray(partial_charges)
charge_difference = net_charge - np.sum(partial_charges)
correction = charge_difference / len(partial_charges)
if charge_difference > tolerance:
p_charges_array += correction
elif charge_difference < -tolerance:
p_charges_array += correction
return p_charges_array
def prmtop_to_python(file_name, pmg_molecule, ff_label, tolerance=10**-16):
'''
Extracts relevant parameters from *.prmtop file containing single molecule from tleap and stores them
in a dictionary. The partial charges are corrected such that their sum is within a tolerance of the net
charge.
:param file_name: [str] the filename of the *.prmtop file
:param pmg_molecule: [pymatgen Molecule] Intended to be obtained from the same GaussianOutput object used
to get the *.prmtop file. Make sure the net charge is set correctly.
:param ff_label: [str] the label used to differentiate atoms with the same atomtypes, but from different
molecules in the pymatgen.io.lammps.data ForceField object.
:return PyParm: [dict] Contains all the relevant force field parameters for the molecule as follows:
{'Molecule': pymatgen Molecule
'Masses': OrderedDict([('atom_1' + ff_label, mass), ...]),
'Nonbond': [[sigma_1, epsilon_1], ...],
'Bonds': [{'coeffs': [k_1, r_eq_1], 'types': [('atom_a' + ff_label, 'atom_b' + ff_label), ...]}, ...],
'Angles': [{'coeffs': [k_1, theta_eq_1], 'types': [('atom_a' + ff_label, 'atom_b' + ff_label, 'atom_c' + ff_label), ...]}, ...],
'Dihedrals': [{'coeffs': [phi_k_1, phase_1, per_1], 'types': [('atom_a' + ff_label, 'atom_b' + ff_label, 'atom_c' + ff_label, 'atom_d' + ff_label), ...]}, ...],
'Impropers': [{'coeffs': [phi_k_1, phase_1, per_1], 'types': [('atom_a' + ff_label, 'atom_b' + ff_label, 'atom_c' + ff_label, 'atom_d' + ff_label), ...]}, ...],
'Charges': [charge_1, ...]
'''
Amberparm = pmd.load_file(file_name)
Bond_parm = get_bond_param(Amberparm, ff_label)
Angle_parm = get_angle_param(Amberparm, ff_label)
Dihedral_parm, Improper_parm = get_dihedral_param(Amberparm, ff_label)
Masses, Nonbond_parm = get_nonbond_param(Amberparm, ff_label)
Charges = np.asarray(Amberparm.parm_data[Amberparm.charge_flag])
Corrected_Charges = list(check_partial_charge_sum(Charges,
pmg_molecule.charge,
tolerance))
PyParm = {'Molecule': pmg_molecule,
'Masses': Masses,
'Nonbond': Nonbond_parm,
'Bonds': Bond_parm,
'Angles': Angle_parm,
'Dihedrals': Dihedral_parm,
'Impropers': Improper_parm,
'Charges': Corrected_Charges}
return PyParm
class PrmtopParser:
'''
Object for parsing information necessary for LammpsDataWrapper from *.prmtop files containing a single molecule
using the ParmEd package.
'''
def __init__(self,prmtop_file_name,pmg_molecule,unique_molecule_name,check_partial_charges=True,tolerance=10**-16):
''''''
self._prmtop_file_name = prmtop_file_name
self._mol_name = unique_molecule_name
self._molecule = pmg_molecule
self._check_partial_charges = check_partial_charges
self._tolerance = tolerance
self._amberparm = pmd.load_file(self._prmtop_file_name)
@property
def LabelTypeList(self):
'''
List of force field labels for each atom in the molecule.
'''
Label_list = [type + self._mol_name for type in self._amberparm.LJ_types.keys()]
return Label_list
@property
def Masses(self):
'''
OrderedDict of Masses in the format of OrderedDict({'atom_label_a':mass_a,...}), where 'atom_label' is a string of
amber atomtype concatenated with the unique_molecule_label.
'''
Masses_ordered_dict = OrderedDict()
for type in self.LabelTypeList:
if self._mol_name:
Masses_ordered_dict[type] = float(Gaff_dict[type[:-len(self._mol_name)].lower()])
else:
Masses_ordered_dict[type] = float(Gaff_dict[type.lower()])
return Masses_ordered_dict
@property
def LJParam(self):
'''
List of lists for the Lennard Jones parameters in the format of [[epsilon_a, sigma_a],...].
:return:
'''
LJ_param_list = []
for type in self.LabelTypeList:
if self._mol_name:
index = self._amberparm.LJ_types[type[:-len(self._mol_name)]] - 1
else:
index = self._amberparm.LJ_types[type] - 1
LJ_param_list.append([self._amberparm.LJ_depth[index],self._amberparm.LJ_radius[index] * 2 ** (5/6)])
return LJ_param_list
@property
def BondParam(self):
'''
List of dicts for the bond parameters in the format of [{'coeffs':coeffs_1,'types':[(i,j),...]},...].
Automatically filters out any duplicates.
'''
Bond_param_list = []
for bond in self._amberparm.bonds:
add_bond = True
coeffs = [bond.type.k, bond.type.req]
atom_types = (bond.atom1.type + self._mol_name,
bond.atom2.type + self._mol_name)
for old_type in Bond_param_list:
if coeffs == old_type['coeffs']:
if atom_types not in old_type['types'] and atom_types[::-1] not in old_type['types']:
old_type['types'].append(atom_types)
add_bond = False
break
if add_bond:
Bond_param_list.append({'coeffs':coeffs,'types':[atom_types]})
return Bond_param_list
@property
def AngleParam(self):
'''
List of dicts for the angle parameters in the format of [{'coeffs':coeffs_1,'types':[(i,j,k),...]},...].
'''
Angle_param_list = []
for angle in self._amberparm.angles:
add_angle = True
coeffs = [angle.type.k, angle.type.theteq]
atom_types = (angle.atom1.type + self._mol_name,
angle.atom2.type + self._mol_name,
angle.atom3.type + self._mol_name)
for old_type in Angle_param_list:
if coeffs == old_type['coeffs']:
if atom_types not in old_type['types'] and atom_types[::-1] not in old_type['types']:
old_type['types'].append(atom_types)
add_angle = False
break
if add_angle:
Angle_param_list.append({'coeffs':coeffs,'types':[atom_types]})
return Angle_param_list
@property
def DihedralParam(self):
'''
List of dicts for the proper dihedral parameters in the format of [{'coeffs':coeffs_1,'types':[(i,j,k,l),...]},...].
'''
Dihedral_param_list = []
for index, dihedral in enumerate(self._amberparm.dihedrals):
add_dihedral = True
atom_types = (dihedral.atom1.type + self._mol_name,
dihedral.atom2.type + self._mol_name,
dihedral.atom3.type + self._mol_name,
dihedral.atom4.type + self._mol_name)
if int(dihedral.type.phase) % 360 == 180:
coeffs = [dihedral.type.phi_k, -1, dihedral.type.per]
elif int(dihedral.type.phase) % 360 == 0:
coeffs = [dihedral.type.phi_k, 1, dihedral.type.per]
else:
raise ValueError('The phase of the dihedral at index ' + str(index) + ' was a value other than 0 or 180 mod(360).')
if not dihedral.improper:
for old_d_type in Dihedral_param_list:
if coeffs == old_d_type['coeffs']:
if atom_types not in old_d_type['types'] and atom_types[::-1] not in old_d_type['types']:
old_d_type['types'].append(atom_types)
add_dihedral = False
break
if add_dihedral:
Dihedral_param_list.append({'coeffs':coeffs,'types':[atom_types]})
return Dihedral_param_list
@property
def ImproperParam(self):
'''
List of dicts for the improper dihedral parameters in the format of [{'coeffs':coeffs_1,'types':[(i,j,k,l),...]},...].
'''
Improper_param_list = []
for index, dihedral in enumerate(self._amberparm.dihedrals):
add_dihedral = True
atom_types = (dihedral.atom1.type + self._mol_name,
dihedral.atom2.type + self._mol_name,
dihedral.atom3.type + self._mol_name,
dihedral.atom4.type + self._mol_name)
if int(dihedral.type.phase) % 360 == 180:
coeffs = [dihedral.type.phi_k, -1, dihedral.type.per]
elif int(dihedral.type.phase) % 360 == 0:
coeffs = [dihedral.type.phi_k, 1, dihedral.type.per]
else:
raise ValueError('The phase of the dihedral at index ' + str(index) + ' was a value other than 0 or 180 mod(360).')
if dihedral.improper:
for old_i_type in Improper_param_list:
if coeffs == old_i_type['coeffs']:
if atom_types not in old_i_type['types']:
old_i_type['types'].append(atom_types)
add_dihedral = False
break
if add_dihedral:
Improper_param_list.append({'coeffs':coeffs,'types':[atom_types]})
return Improper_param_list
@property
def ImproperTopologies(self):
'''
List of lists for the improper topologies in the format of [[index_i,index_j,index_k,index_l],...].
:return:
'''
impropers = []
for dihedral in self._amberparm.dihedrals:
if dihedral.improper:
impropers.append(dihedral)
Improper_topology_list = []
if impropers:
for improper in impropers:
atom1_index = self._amberparm.atoms.index(improper.atom1)
atom2_index = self._amberparm.atoms.index(improper.atom2)
atom3_index = self._amberparm.atoms.index(improper.atom3)
atom4_index = self._amberparm.atoms.index(improper.atom4)
Improper_topology_list.append([atom1_index,atom2_index,atom3_index,atom4_index])
else:
Improper_topology_list = None
return Improper_topology_list
@property
def Charges(self):
'''
np.array of partial charges. Checks to make sure that the sum of the partial charges is within a tolerance of
the net charge of the molecule.
:return:
'''
Charges_array = np.asarray(self._amberparm.parm_data[self._amberparm.charge_flag])
if self._check_partial_charges:
charge_difference = self._molecule.charge - np.sum(Charges_array)
charge_correction = charge_difference / len(Charges_array)
if charge_difference > self._tolerance:
Charges_array += charge_correction
elif charge_difference < -self._tolerance:
Charges_array += charge_correction
return list(Charges_array)
def to_dict(self):
''''''
Labels = [atom.type + self._mol_name for atom in self._amberparm.atoms]
Param_dict = {'Molecule':self._molecule,
'Labels':Labels,
'Masses':self.Masses,
'Nonbond':self.LJParam,
'Bonds':self.BondParam,
'Angles':self.AngleParam,
'Dihedrals':self.DihedralParam,
'Impropers':self.ImproperParam,
'Improper Topologies':self.ImproperTopologies,
'Charges':self.Charges}
return Param_dict
|
{"hexsha": "22dc7c7f0b08a9282f0e2a7cc968ae8b17e75583", "size": 29914, "ext": "py", "lang": "Python", "max_stars_repo_path": "pymatgen/io/ambertools.py", "max_stars_repo_name": "mmbliss/pymatgen", "max_stars_repo_head_hexsha": "0d2e39bb6406d934c03e08919f2cd4dedb41bc22", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-01-28T19:19:15.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-30T18:10:32.000Z", "max_issues_repo_path": "pymatgen/io/ambertools.py", "max_issues_repo_name": "mmbliss/pymatgen", "max_issues_repo_head_hexsha": "0d2e39bb6406d934c03e08919f2cd4dedb41bc22", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-08-03T17:59:02.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-12T00:43:59.000Z", "max_forks_repo_path": "pymatgen/io/ambertools.py", "max_forks_repo_name": "mmbliss/pymatgen", "max_forks_repo_head_hexsha": "0d2e39bb6406d934c03e08919f2cd4dedb41bc22", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-27T15:30:10.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-27T15:30:10.000Z", "avg_line_length": 48.4045307443, "max_line_length": 168, "alphanum_fraction": 0.6005883533, "include": true, "reason": "import numpy", "num_tokens": 7343}
|
[STATEMENT]
lemma swap_rule [hoare_triple]:
"i < length xs \<Longrightarrow> j < length xs \<Longrightarrow>
<p \<mapsto>\<^sub>a xs>
swap p i j
<\<lambda>_. p \<mapsto>\<^sub>a list_swap xs i j>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>i < length xs; j < length xs\<rbrakk> \<Longrightarrow> <p \<mapsto>\<^sub>a xs> swap p i j <\<lambda>_. p \<mapsto>\<^sub>a list_swap xs i j>
[PROOF STEP]
by auto2
|
{"llama_tokens": 163, "file": "Auto2_Imperative_HOL_Imperative_Arrays_Impl", "length": 1}
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 11 13:48:41 2020
@author: Amir Moradi
"""
import cv2
import numpy as np
def undistortion(img_1, img_2):
h, w = img_1.shape[:2]
Camera_L_Matrix = np.load("SmartCar/Calibration/matrices/matrix/Camera_L_Matrix.npy")
Camera_R_Matrix = np.load("SmartCar/Calibration/matrices/Camera_R_Matrix.npy")
distorsionL = np.load("SmartCar/Calibration/matrices/distorsionL.npy")
distorsionR = np.load("SmartCar/Calibration/matrices/distorsionR.npy")
#Get optimal camera matrix for better undistortion
new_cameraL_matrix, roi_l = cv2.getOptimalNewCameraMatrix(Camera_L_Matrix, distorsionL, (w,h), 1, (w,h))
new_cameraR_matrix, roi_r = cv2.getOptimalNewCameraMatrix(Camera_R_Matrix, distorsionR, (w,h), 1, (w,h))
img_1_undistorted = cv2.undistort(img_1, Camera_L_Matrix, distorsionL, None, new_cameraL_matrix)
img_2_undistorted = cv2.undistort(img_2, Camera_R_Matrix, distorsionR, None, new_cameraR_matrix)
roi_x, roi_y, roi_w, roi_h = roi_l
img_1_undistorted = img_1_undistorted[roi_y : roi_y + roi_h, roi_x : roi_x + roi_w]
roi_x_r, roi_y_r, roi_w_r, roi_h_r = roi_r
img_2_undistorted = img_2_undistorted[roi_y_r : roi_y_r + roi_h_r, roi_x_r : roi_x_r + roi_w_r]
return img_1_undistorted, img_2_undistorted
|
{"hexsha": "f3ea4be195565fa932710c2775cd60dc9db1b2ef", "size": 1346, "ext": "py", "lang": "Python", "max_stars_repo_path": "Utils/undistortion.py", "max_stars_repo_name": "Amirmoradi94/SmartCar", "max_stars_repo_head_hexsha": "4c0f17a6a98e6db46769787dc95d11e48b335488", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-01-15T04:33:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-15T18:20:15.000Z", "max_issues_repo_path": "Utils/undistortion.py", "max_issues_repo_name": "Amirmoradi94/SmartCar", "max_issues_repo_head_hexsha": "4c0f17a6a98e6db46769787dc95d11e48b335488", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Utils/undistortion.py", "max_forks_repo_name": "Amirmoradi94/SmartCar", "max_forks_repo_head_hexsha": "4c0f17a6a98e6db46769787dc95d11e48b335488", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-07T15:38:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-07T15:38:47.000Z", "avg_line_length": 34.5128205128, "max_line_length": 108, "alphanum_fraction": 0.7265973254, "include": true, "reason": "import numpy", "num_tokens": 447}
|
import inspect
import numpy as np
from numpy import testing
# def PrintFrame():
# callerframerecord = inspect.stack() #[1] # 0 represents this line
# print(callerframerecord[1]) # 1 represents line at caller
# frame = callerframerecord[1][0]
# print(frame)
# info = inspect.getframeinfo(frame)
# print(info.filename) # __FILE__ -> Test.py
# print(info.function) # __FUNCTION__ -> Main
# print(info.lineno) # __LINE__ -> 13
# def Main():
# PrintFrame() # for this line
# Main()
class Test(np.ndarray):
def __new__(cls, input_array):
obj = np.asarray(input_array).view(cls)
obj.cat = 1
return obj
def __array_finalize__(self, obj):
self.cat = getattr(obj, 'cat', 1)
def _cat(self):
self.cat = 0
test = Test(np.random.rand(2, 2))
print(id(test))
test._cat()
test_ = test.view(np.ndarray)
print(id(test_))
#test = test_.view(Test)
print(test.cat)
|
{"hexsha": "3c70f63edfd8e3e9bde13db6243a84cde60a9f04", "size": 1068, "ext": "py", "lang": "Python", "max_stars_repo_path": "ds_clean/test.py", "max_stars_repo_name": "swjz/DSClean", "max_stars_repo_head_hexsha": "1311ea373049f089b0b96e9913c02fe0c339dee4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ds_clean/test.py", "max_issues_repo_name": "swjz/DSClean", "max_issues_repo_head_hexsha": "1311ea373049f089b0b96e9913c02fe0c339dee4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ds_clean/test.py", "max_forks_repo_name": "swjz/DSClean", "max_forks_repo_head_hexsha": "1311ea373049f089b0b96e9913c02fe0c339dee4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-12T06:37:25.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-12T06:37:25.000Z", "avg_line_length": 28.8648648649, "max_line_length": 102, "alphanum_fraction": 0.5627340824, "include": true, "reason": "import numpy,from numpy", "num_tokens": 270}
|
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.constants import golden
mpl.rc("text", usetex=True)
mpl.rc("font", family="serif")
x = np.array([-1, -0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1])
t = np.array([-4.9, -3.5, -2.8, 0.8, 0.3, -1.6, -1.3, 0.5, 2.1, 2.9, 5.6])
def f(x):
return 3*np.sin((1/2)*np.pi * x) - 2*np.sin((3/2) * np.pi * x)
M = 4
N = len(x)
X = np.zeros((N, M+1))
for m in range(M+1):
X[:, m] = x**m
beta = np.linalg.inv(X.T @ X) @ X.T @ t
h = np.poly1d(np.flip(beta, 0))
x_ = np.linspace(x.min()-0.025, x.max()+0.025, 250)
t_ = h(x_)
fig = plt.figure(figsize=(8, 8/golden))
ax = fig.add_subplot()
ax.scatter(x, t,
edgecolors = "magenta",
c = "None",
s = 12.5,
marker = "o"
)
ax.plot(x_, t_,
color="turquoise",
linewidth = 1,
label = "Predicted"
)
true = np.linspace(x.min()-0.025, x.max()+0.025, 250)
ax.plot(
true, f(true),
color="magenta",
linewidth = 1,
label = "True"
)
ax.set_xlim(x.min()-0.025, x.max()+0.025)
ax.set_xticks([-1, -0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1])
ax.set_xticklabels(["$-1.0$", "$-0.8$", "$-0.6$", "$-0.4$", "$-0.2$", "$0.0$", "$0.2$", "$0.4$", "$0.6$", "$0.8$", "$1.0$"])
ax.legend(frameon=False, fontsize=14)
plt.tight_layout()
plt.savefig("poly_reg.png")
plt.show()
|
{"hexsha": "8d7be49f3795ab46f2fc0c42edbf5728487d9d6f", "size": 1339, "ext": "py", "lang": "Python", "max_stars_repo_path": "extra/bsmalea-notes-1a/polynomial_regression.py", "max_stars_repo_name": "cookieblues/cookieblues.github.io", "max_stars_repo_head_hexsha": "9b570d83887eb2d6f92cfaa927a1adf136124a90", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "extra/bsmalea-notes-1a/polynomial_regression.py", "max_issues_repo_name": "cookieblues/cookieblues.github.io", "max_issues_repo_head_hexsha": "9b570d83887eb2d6f92cfaa927a1adf136124a90", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-03-30T14:58:30.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-10T15:15:06.000Z", "max_forks_repo_path": "extra/bsmalea-notes-1a/polynomial_regression.py", "max_forks_repo_name": "cookieblues/cookieblues.github.io", "max_forks_repo_head_hexsha": "9b570d83887eb2d6f92cfaa927a1adf136124a90", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.253968254, "max_line_length": 124, "alphanum_fraction": 0.5384615385, "include": true, "reason": "import numpy,from scipy", "num_tokens": 578}
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrift/test/reflection/gen-cpp2/reflection_types.h>
#include <typeindex>
#include <boost/mp11.hpp>
#include <folly/portability/GTest.h>
#include <thrift/lib/cpp2/type/Tag.h>
using apache::thrift::detail::st::struct_private_access;
namespace apache::thrift::type {
static_assert(
std::is_same_v<
struct_private_access::fields<test_cpp2::cpp_reflection::struct3>,
fields<
field_t<FieldId{2}, i32_t>,
field_t<FieldId{1}, string_t>,
field_t<FieldId{3}, enum_t<::test_cpp2::cpp_reflection::enum1>>,
field_t<FieldId{4}, enum_t<::test_cpp2::cpp_reflection::enum2>>,
field_t<FieldId{5}, union_t<::test_cpp2::cpp_reflection::union1>>,
field_t<FieldId{6}, union_t<::test_cpp2::cpp_reflection::union2>>,
field_t<FieldId{7}, struct_t<::test_cpp2::cpp_reflection::struct1>>,
field_t<FieldId{8}, union_t<::test_cpp2::cpp_reflection::union2>>,
field_t<FieldId{9}, list<i32_t>>,
field_t<FieldId{10}, list<string_t>>,
field_t<FieldId{11}, list<string_t>>,
field_t<
FieldId{12},
list<struct_t<::test_cpp2::cpp_reflection::structA>>>,
field_t<FieldId{13}, set<i32_t>>,
field_t<FieldId{14}, set<string_t>>,
field_t<FieldId{15}, set<string_t>>,
field_t<
FieldId{16},
set<struct_t<::test_cpp2::cpp_reflection::structB>>>,
field_t<
FieldId{17},
map<string_t, struct_t<::test_cpp2::cpp_reflection::structA>>>,
field_t<
FieldId{18},
map<string_t, struct_t<::test_cpp2::cpp_reflection::structB>>>,
field_t<FieldId{19}, map<binary_t, binary_t>>>>);
struct ExtractFieldsInfo {
std::vector<int> ids;
std::vector<std::type_index> tags;
template <FieldId Id, class Tag>
void operator()(field_t<Id, Tag>) {
ids.push_back(int(Id));
tags.emplace_back(typeid(Tag));
}
};
TEST(Fields, for_each) {
const std::vector<int> expectedIds = {
2, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19};
const std::vector<std::type_index> expectedTags = {
typeid(i32_t),
typeid(string_t),
typeid(enum_t<::test_cpp2::cpp_reflection::enum1>),
typeid(enum_t<::test_cpp2::cpp_reflection::enum2>),
typeid(union_t<::test_cpp2::cpp_reflection::union1>),
typeid(union_t<::test_cpp2::cpp_reflection::union2>),
typeid(struct_t<::test_cpp2::cpp_reflection::struct1>),
typeid(union_t<::test_cpp2::cpp_reflection::union2>),
typeid(list<i32_t>),
typeid(list<string_t>),
typeid(list<string_t>),
typeid(list<struct_t<::test_cpp2::cpp_reflection::structA>>),
typeid(set<i32_t>),
typeid(set<string_t>),
typeid(set<string_t>),
typeid(set<struct_t<::test_cpp2::cpp_reflection::structB>>),
typeid(map<string_t, struct_t<::test_cpp2::cpp_reflection::structA>>),
typeid(map<string_t, struct_t<::test_cpp2::cpp_reflection::structB>>),
typeid(map<binary_t, binary_t>)};
ExtractFieldsInfo info;
boost::mp11::mp_for_each<
struct_private_access::fields<test_cpp2::cpp_reflection::struct3>>(info);
EXPECT_EQ(info.ids, expectedIds);
EXPECT_EQ(info.tags, expectedTags);
}
struct Emplacer {
test_cpp2::cpp_reflection::struct3& s;
template <FieldId Id, class Tag>
void operator()(field_t<Id, Tag>) {
op::get<Id>(s).emplace();
}
};
TEST(Fields, get) {
test_cpp2::cpp_reflection::struct3 s;
s.fieldA() = 10;
EXPECT_EQ(op::get<FieldId{2}>(s), 10);
op::get<FieldId{2}>(s) = 20;
EXPECT_EQ(*s.fieldA(), 20);
EXPECT_TRUE(
(std::is_same_v<decltype(s.fieldA()), decltype(op::get<FieldId{2}>(s))>));
s.fieldE()->ui_ref() = 10;
EXPECT_EQ(op::get<FieldId{5}>(s)->ui_ref(), 10);
op::get<FieldId{5}>(s)->us_ref() = "20";
EXPECT_EQ(s.fieldE()->us_ref(), "20");
EXPECT_TRUE(
(std::is_same_v<decltype(s.fieldE()), decltype(op::get<FieldId{5}>(s))>));
boost::mp11::mp_for_each<
struct_private_access::fields<test_cpp2::cpp_reflection::struct3>>(
Emplacer{s});
EXPECT_EQ(*s.fieldA(), 0);
EXPECT_FALSE(s.fieldE()->us_ref());
}
} // namespace apache::thrift::type
|
{"hexsha": "a43e9c36bcf1b21dc3533c4ca7443e9c3f941d43", "size": 4915, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "thrift/test/reflection/FieldsTest.cpp", "max_stars_repo_name": "ahornby/fbthrift", "max_stars_repo_head_hexsha": "59dd614960da745e6a7b89c69c7aac77e0adf9b5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "thrift/test/reflection/FieldsTest.cpp", "max_issues_repo_name": "ahornby/fbthrift", "max_issues_repo_head_hexsha": "59dd614960da745e6a7b89c69c7aac77e0adf9b5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "thrift/test/reflection/FieldsTest.cpp", "max_forks_repo_name": "ahornby/fbthrift", "max_forks_repo_head_hexsha": "59dd614960da745e6a7b89c69c7aac77e0adf9b5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1397058824, "max_line_length": 80, "alphanum_fraction": 0.6427263479, "num_tokens": 1377}
|
from quchem_ibm.IBM_experiment_functions import *
import pickle
import os
import argparse
import numpy as np
def main(method_name):
molecule_name='H2'
## Load input data
base_dir = os.getcwd()
data_dir = os.path.join(base_dir, 'Input_data')
input_file = os.path.join(data_dir, 'H2_bravyi_kitaev_2_qubit_experiment_time=2020Sep21-162239536536.pickle')
with open(input_file, 'rb') as handle:
input_data = pickle.load(handle)
## Get IBM account
my_provider = load_IBM_provider()
# IBM_backend = Get_IBM_backends(my_provider, show_least_busy=False)
IBM_backend = 'ibmqx2'
# IBM_backend = None
# # goes up to 4914 for standard VQE and 8190 for unitary partitioning!
# shot_experiment_list = np.arange(1190 * 3, (8190 * 3) + 1, 2625, dtype=int)
# if method_name == 'standard_VQE':
# shot_list=shot_experiment_list/len(input_data['standard_VQE_circuits'])
# else:
# shot_list = shot_experiment_list/len(input_data['Seq_Rot_VQE_circuits'])
## array([1190., 2065., 2940., 3815., 4690., 5565., 6440., 7315., 8190.])
## array([ 714., 1239., 1764., 2289., 2814., 3339., 3864., 4389., 4914.])
if method_name == 'standard_VQE':
shot_list = [714 for _ in range(10)]
# shot_list = [*[2814 for _ in range(10)], *[2289 for _ in range(10)],*[1764 for _ in range(10)]]#,
# *[1239 for _ in range(10)], *[714 for _ in range(10)]]
# shot_list=[4914 for _ in range(10)] # max for standard vqe
else:
shot_list=[1190 for _ in range(5)]
# shot_list = [*[4690 for _ in range(10)], *[3815 for _ in range(10)],*[2940 for _ in range(10)]]#,
# *[2065 for _ in range(10)], *[1190 for _ in range(10)]]
# shot_list=[8190 for _ in range(10)] # max for unitary part
n_system_qubits = input_data['n_system_qubits']
run_experiment_exp_loop(molecule_name,
method_name,
my_provider,
IBM_backend,
input_data,
shot_list,
n_system_qubits,
optimization_level=3)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("method", type=str, help="VQE method (standard_VQE, LCU, seq_rot_VQE")
# parser.add_argument("IBMQ_backend", type=str, help="name of IBMQ backend device")
# parser.add_argument("n_shots", type=int, help="number of circuit shots")
args = parser.parse_args()
main(args.method)
|
{"hexsha": "67251399861fd603b6553705254c7783c93c3540", "size": 2637, "ext": "py", "lang": "Python", "max_stars_repo_path": "old_projects/quchem_ibm/Experiments/H2_2_qubit_exp.py", "max_stars_repo_name": "AlexisRalli/VQE-code", "max_stars_repo_head_hexsha": "4112d2bba4c327360e95dfd7cb6120b2ce67bf29", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-01T14:01:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-01T14:01:46.000Z", "max_issues_repo_path": "old_projects/quchem_ibm/Experiments/H2_2_qubit_exp.py", "max_issues_repo_name": "AlexisRalli/VQE-code", "max_issues_repo_head_hexsha": "4112d2bba4c327360e95dfd7cb6120b2ce67bf29", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2019-11-13T16:23:54.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-07T11:03:06.000Z", "max_forks_repo_path": "old_projects/quchem_ibm/Experiments/H2_2_qubit_exp.py", "max_forks_repo_name": "AlexisRalli/VQE-code", "max_forks_repo_head_hexsha": "4112d2bba4c327360e95dfd7cb6120b2ce67bf29", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7794117647, "max_line_length": 113, "alphanum_fraction": 0.6052332196, "include": true, "reason": "import numpy", "num_tokens": 721}
|
import os
import csv
import cv2
import numpy as np
from sklearn.utils import shuffle
def load_csv_data(log_file, data_dir, steering_correction = 0.25):
image_filepaths = []
measurements = []
with open(log_file, 'r') as f:
r = csv.reader(f)
next(r) # skip the header
for line in r:
image_filepaths.append(os.path.join(data_dir, line[0]))
measurements.append(float(line[3]))
image_filepaths.append(os.path.join(data_dir, line[1].strip()))
measurements.append(float(line[3]) + steering_correction)
image_filepaths.append(os.path.join(data_dir, line[2].strip()))
measurements.append(float(line[3]) - steering_correction)
return image_filepaths, measurements
def load_x_data(X_data, Y_data, flip=False):
out_x = []
out_y = []
for x, y in zip(X_data, Y_data):
img = cv2.cvtColor(cv2.imread(x), cv2.COLOR_BGR2RGB)
out_x.append(img)
out_y.append(y)
if flip:
out_x.append(cv2.flip(img, flipCode=1))
out_y.append(y * -1.0)
return np.array(out_x), np.array(out_y)
def load_batch_generator(X_data, Y_data, batch_size=32):
num_samples = len(X_data)
while 1: # Loop forever so the generator never terminates
X_data, Y_data = shuffle(X_data, Y_data)
for offset in range(0, num_samples, batch_size):
batch_x = X_data[offset:offset+batch_size]
batch_y = Y_data[offset:offset+batch_size]
images = []
angles = []
for x, y in zip(batch_x, batch_y):
image = cv2.cvtColor(cv2.imread(x), cv2.COLOR_BGR2RGB)
angle = float(y)
# load normal
images.append(image)
angles.append(angle)
# load flipped
images.append(cv2.flip(image, 1))
angles.append(angle * -1)
yield np.array(images), np.array(angles)
#yield shuffle(X_train, y_train)
|
{"hexsha": "2b1748f4d2509c2159bcc7932408343a6e367c96", "size": 2143, "ext": "py", "lang": "Python", "max_stars_repo_path": "project/data.py", "max_stars_repo_name": "tnweiss/CarND-Behavioral-Cloning-P3", "max_stars_repo_head_hexsha": "dafff3f62f691954012a4a84364dee3a0706414a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "project/data.py", "max_issues_repo_name": "tnweiss/CarND-Behavioral-Cloning-P3", "max_issues_repo_head_hexsha": "dafff3f62f691954012a4a84364dee3a0706414a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "project/data.py", "max_forks_repo_name": "tnweiss/CarND-Behavioral-Cloning-P3", "max_forks_repo_head_hexsha": "dafff3f62f691954012a4a84364dee3a0706414a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6142857143, "max_line_length": 75, "alphanum_fraction": 0.5674288381, "include": true, "reason": "import numpy", "num_tokens": 486}
|
module NeuroMetadata
using NeuroCore.AnatomicalAPI
using FieldProperties
export
EncodingDirection,
encoding_names,
freqdim,
freqdim!,
phasedim,
phasedim!,
slice_start,
slice_start!,
slice_end,
slice_end!,
slicedim,
slicedim!,
slice_duration,
slice_duration!,
phase_encoding_direction,
phase_encoding_direction!,
slice_encoding_direction,
slice_encoding_direction!,
InstitutionInformation,
HardwareMetadata,
EncodingDirectionMetadata
"""
ContrastIngredient
An enumerable type with the following possible values:
* `IODINE`
* `GADOLINIUM`
* `CARBON`
* `DIOXIDE`
* `BARIUM`
* `XENON`
* `UnkownContrast`
"""
@enum ContrastIngredient begin
IODINE
GADOLINIUM
CARBON
DIOXIDE
BARIUM
XENON
UnkownContrast
end
ContrastIngredient(i::AbstractString) = ContrastIngredient(Symbol(i))
function ContrastIngredient(i::Symbol)
if i === :IODINE
return IODINE
elseif i === :GADOLINIUM
return GADOLINIUM
elseif i === :CARBON
return CARBON
elseif i === :DIOXIDE
return DIOXIDE
elseif i === :BARIUM
return BARIUM
elseif i === :XENON
return XENON
else
return UnkownContrast
end
end
Base.String(i::ContrastIngredient) = String(Symbol(i))
include("mri.jl")
include("encoding_direction.jl")
include("magnetization_transfer.jl")
include("sequence.jl")
include("spatial_encoding.jl")
include("spoiling.jl")
include("time.jl")
include("electrophysiology.jl")
include("institution.jl")
include("hardware.jl")
include("task.jl")
end
|
{"hexsha": "ae44195abbfe6601a232c9a16b2d745e3dbe3b65", "size": 1615, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/NeuroMetadata/NeuroMetadata.jl", "max_stars_repo_name": "SimonDanisch/NeuroCore.jl", "max_stars_repo_head_hexsha": "b5d9a85eec4817732bda9bfff87910fae6c7049b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2019-12-18T22:45:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-02T06:52:04.000Z", "max_issues_repo_path": "src/NeuroMetadata/NeuroMetadata.jl", "max_issues_repo_name": "SimonDanisch/NeuroCore.jl", "max_issues_repo_head_hexsha": "b5d9a85eec4817732bda9bfff87910fae6c7049b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 21, "max_issues_repo_issues_event_min_datetime": "2019-12-17T05:06:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T04:50:33.000Z", "max_forks_repo_path": "src/NeuroMetadata/NeuroMetadata.jl", "max_forks_repo_name": "SimonDanisch/NeuroCore.jl", "max_forks_repo_head_hexsha": "b5d9a85eec4817732bda9bfff87910fae6c7049b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-09-17T08:29:52.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-14T15:29:36.000Z", "avg_line_length": 18.5632183908, "max_line_length": 69, "alphanum_fraction": 0.6910216718, "num_tokens": 440}
|
#include "VMController.h"
#include "CollabVM.h"
#include "Database/VMSettings.h"
#include <boost/asio.hpp>
VMController::VMController(CollabVMServer& server, boost::asio::io_service& service, const std::shared_ptr<VMSettings>& settings) :
server_(server),
io_service_(service),
settings_(settings),
turn_timer_(service),
vote_state_(VoteState::kIdle),
vote_count_yes_(0),
vote_count_no_(0),
vote_timer_(service),
current_turn_(nullptr),
connected_users_(0),
stop_reason_(StopReason::kNormal),
thumbnail_str_(nullptr),
agent_timer_(service),
agent_connected_(false)
{
}
void VMController::InitAgent(const VMSettings& settings, boost::asio::io_service& service)
{
if (settings.AgentEnabled)
{
if (settings_->AgentSocketType == VMSettings::SocketType::kTCP)
{
agent_address_ = settings_->AgentAddress;
// Port number doesn't need to be appended because
// agent_address_ isn't used by QEMUController for TCP
//std::string port = std::to_string(settings_->AgentPort);
//agent_address_.reserve(agent_address_.length() + 1 + port.length());
//agent_address_ += ':';
//agent_address_ += port;
agent_ = std::make_shared<AgentTCPClient>(service, settings_->AgentAddress, settings_->AgentPort);
}
else if (settings_->AgentSocketType == VMSettings::SocketType::kLocal)
{
#ifdef _WIN32
agent_address_ = R"(\\.\pipe\collab-vm-agent-)";
#else
// Unix domain sockets need to have a valid file path
agent_address_ = P_tmpdir "/collab-vm-agent-";
#endif
agent_address_ += settings.Name;
agent_ = std::make_shared<AgentLocalClient>(service, agent_address_);
}
#ifndef _WIN32
agent_->Init("collab-vm-agent.dll", /*settings.HeartbeatTimeout*/ 5);
#else
#if _DEBUG
agent_->Init(R"(..\..\collab-vm-agent\Debug\collab-vm-agent.dll)", /*settings.HeartbeatTimeout*/ 5);
#else
#endif
#endif
}
else if (agent_)
{
agent_.reset();
}
}
void VMController::ChangeSettings(const std::shared_ptr<VMSettings>& settings)
{
if (settings->TurnsEnabled != settings_->TurnsEnabled ||
settings->VotesEnabled != settings_->VotesEnabled ||
settings->UploadsEnabled != settings_->UploadsEnabled)
{
server_.ActionsChanged(*this, *settings);
}
else if (settings->MOTD != settings_->MOTD &&
!settings->MOTD.empty())
{
server_.BroadcastMOTD(*this, *settings);
}
}
void VMController::Stop(StopReason reason)
{
boost::system::error_code ec;
turn_timer_.cancel(ec);
vote_timer_.cancel(ec);
agent_timer_.cancel(ec);
if (thumbnail_str_)
{
delete thumbnail_str_;
thumbnail_str_ = nullptr;
}
server_.OnVMControllerStateChange(shared_from_this(), VMController::ControllerState::kStopping);
}
void VMController::Vote(CollabVMUser& user, bool vote)
{
if (settings_->VotesEnabled)
{
switch (vote_state_)
{
case VoteState::kCoolingdown:
{
int32_t time_remaining = std::chrono::duration_cast<std::chrono::duration<int32_t>>(vote_timer_.expires_from_now()).count();
// Earlier I was attempting to get staff to bypass this but this will need more work, come back to it later
if (time_remaining > 0)
{
server_.VoteCoolingDown(user, time_remaining);
//break;
}
// Could fall through when the timer has expired,
// although this may create a race condition with the
// timer's callback
break;
}
case VoteState::kIdle:
if (vote)
{
// Start a new vote
vote_state_ = VoteState::kVoting;
vote_count_yes_ = 1;
vote_count_no_ = 0;
boost::system::error_code ec;
vote_timer_.expires_from_now(std::chrono::seconds(settings_->VoteTime), ec);
vote_timer_.async_wait(std::bind(&VMController::VoteEndedCallback, shared_from_this(), std::placeholders::_1));
server_.UserStartedVote(*this, users_, user);
server_.BroadcastVoteInfo(*this, users_, true, std::chrono::duration_cast<millisecs_t>(vote_timer_.expires_from_now()).count(), vote_count_yes_, vote_count_no_);
}
break;
case VoteState::kVoting:
{
int32_t time_remaining = std::chrono::duration_cast<millisecs_t>(vote_timer_.expires_from_now()).count();
if (time_remaining > 0)
{
IPData::VoteDecision prev_vote = user.ip_data.votes[this];
bool changed = false;
if (user.voted_limit == false) {
if(user.voted_amount >= 5) {
user.voted_limit = true;
goto _vote_limit_die;
}
// A vote is already in progress so count the user's vote unless they've hit the limit
if (vote && prev_vote != IPData::VoteDecision::kYes)
{
if (prev_vote == IPData::VoteDecision::kNo)
vote_count_no_--, user.voted_amount++;
vote_count_yes_++;
user.voted_amount++;
changed = true;
}
else if (!vote && prev_vote != IPData::VoteDecision::kNo )
{
if (prev_vote == IPData::VoteDecision::kYes)
vote_count_yes_--, user.voted_amount++;
vote_count_no_++;
user.voted_amount++;
changed = true;
}
_vote_limit_die: {}
}
if (changed)
{
server_.UserVoted(*this, users_, user, vote);
server_.BroadcastVoteInfo(*this, users_, false, time_remaining, vote_count_yes_, vote_count_no_);
}
}
break;
}
}
}
}
void VMController::VoteEndedCallback(const boost::system::error_code& ec)
{
if (ec)
return;
server_.OnVMControllerVoteEnded(shared_from_this());
}
void VMController::EndVote()
{
if (vote_state_ != VoteState::kVoting) return;
bool vote_succeeded = (vote_count_yes_ >= vote_count_no_);
server_.BroadcastVoteEnded(*this, users_, vote_succeeded);
if (settings_->VoteCooldownTime)
{
vote_state_ = VoteState::kCoolingdown;
boost::system::error_code ec;
vote_timer_.expires_from_now(std::chrono::seconds(settings_->VoteCooldownTime), ec);
std::shared_ptr<VMController> self = shared_from_this();
vote_timer_.async_wait([this, self](const boost::system::error_code& ec)
{
if (!ec)
vote_state_ = VoteState::kIdle;
});
}
else
{
vote_state_ = VoteState::kIdle;
}
if (vote_succeeded)
RestoreVMSnapshot();
}
void VMController::SkipVote(bool vote_succeeded)
{
if (vote_state_ != VoteState::kVoting) return;
server_.BroadcastVoteEnded(*this, users_, vote_succeeded);
if (settings_->VoteCooldownTime)
{
vote_state_ = VoteState::kCoolingdown;
boost::system::error_code ec;
vote_timer_.expires_from_now(std::chrono::seconds(settings_->VoteCooldownTime), ec);
std::shared_ptr<VMController> self = shared_from_this();
vote_timer_.async_wait([this, self](const boost::system::error_code& ec)
{
if (!ec)
vote_state_ = VoteState::kIdle;
});
}
else
{
vote_state_ = VoteState::kIdle;
}
if (vote_succeeded) RestoreVMSnapshot();
}
void VMController::TurnRequest(const std::shared_ptr<CollabVMUser>& user, bool turnJack, bool isStaff)
{
// If the user is already in the queue or they are already
// in control don't allow them to make another turn request
if (GetState() != ControllerState::kRunning || (!settings_->TurnsEnabled && !isStaff) ||
(user->waiting_turn && !turnJack) || current_turn_ == user)
return;
if (user->waiting_turn) {
for (auto it = turn_queue_.begin(); it != turn_queue_.end(); it++) {
if (*it == user) {
turn_queue_.erase(it);
user->waiting_turn = false;
break;
}
}
};
if (!current_turn_) {
// If no one currently has a turn then give the requesting user control
current_turn_ = user;
// Start the turn timer
boost::system::error_code ec;
turn_timer_.expires_from_now(std::chrono::seconds(settings_->TurnTime), ec);
turn_timer_.async_wait(std::bind(&VMController::TurnTimerCallback, shared_from_this(), std::placeholders::_1));
} else {
if (!turnJack) {
// Otherwise add them to the queue
turn_queue_.push_back(user);
user->waiting_turn = true;
} else {
// Turn-jack
turn_queue_.push_front(current_turn_);
current_turn_->waiting_turn = true;
current_turn_ = user;
boost::system::error_code ec;
turn_timer_.cancel(ec);
turn_timer_.expires_from_now(std::chrono::seconds(settings_->TurnTime), ec);
turn_timer_.async_wait(std::bind(&VMController::TurnTimerCallback, shared_from_this(), std::placeholders::_1));
}
};
server_.BroadcastTurnInfo(*this, users_, turn_queue_, current_turn_.get(),
std::chrono::duration_cast<millisecs_t>(turn_timer_.expires_from_now()).count());
}
void VMController::NextTurn()
{
int32_t time_remaining;
if (!turn_queue_.empty())
{
current_turn_ = turn_queue_.front();
current_turn_->waiting_turn = false;
turn_queue_.pop_front();
// Set up the turn timer
boost::system::error_code ec;
turn_timer_.expires_from_now(std::chrono::seconds(settings_->TurnTime), ec);
turn_timer_.async_wait(std::bind(&VMController::TurnTimerCallback, shared_from_this(), std::placeholders::_1));
time_remaining = std::chrono::duration_cast<millisecs_t>(turn_timer_.expires_from_now()).count();
}
else
{
current_turn_ = nullptr;
time_remaining = 0;
}
server_.BroadcastTurnInfo(*this, users_, turn_queue_, current_turn_.get(), time_remaining);
}
void VMController::ClearTurnQueue()
{
auto it = turn_queue_.begin();
while (it != turn_queue_.end())
{
(*it)->waiting_turn = false;
it = turn_queue_.erase(it);
}
current_turn_ = nullptr;
server_.BroadcastTurnInfo(*this, users_, turn_queue_, current_turn_.get(), 0);
}
void VMController::TurnTimerCallback(const boost::system::error_code& ec)
{
if (ec)
return;
server_.OnVMControllerTurnChange(shared_from_this());
}
void VMController::OnAgentConnect(const std::string& os_name, const std::string& service_pack,
const std::string& pc_name, const std::string& username, uint32_t max_filename)
{
server_.OnAgentConnect(shared_from_this(), os_name, service_pack, pc_name, username, max_filename);
}
void VMController::OnAgentDisconnect(bool protocol_error)
{
server_.OnAgentDisconnect(shared_from_this());
}
void VMController::OnAgentHeartbeatTimeout()
{
if (settings_->RestoreHeartbeat)
RestoreVMSnapshot();
else
agent_->Disconnect();
//server_.OnAgentHeartbeatTimeout(shared_from_this());
}
void VMController::OnFileUploadStarted(const std::shared_ptr<UploadInfo>& info, std::string* filename)
{
// TODO: Report new filename
}
void VMController::OnFileUploadFailed(const std::shared_ptr<UploadInfo>& info)
{
server_.OnFileUploadFailed(shared_from_this(), info);
}
void VMController::OnFileUploadFinished(const std::shared_ptr<UploadInfo>& info)
{
server_.OnFileUploadFinished(shared_from_this(), info);
}
void VMController::OnFileUploadExecFinished(const std::shared_ptr<UploadInfo>& info, bool exec_success)
{
server_.OnFileUploadExecFinished(shared_from_this(), info, exec_success);
}
void VMController::EndTurn(const std::shared_ptr<CollabVMUser>& user)
{
bool turn_change = false;
// Check if they are in the turn queue and remove them if they are
if (user->waiting_turn)
{
for (auto it = turn_queue_.begin(); it != turn_queue_.end(); it++)
{
if (*it == user)
{
turn_change = true;
turn_queue_.erase(it);
user->waiting_turn = false;
// The client should not be in the queue more than once
break;
}
}
}
// Check if it is currently their turn
if (current_turn_ == user)
{
// Cancel the pending timer callback
boost::system::error_code ec;
turn_timer_.cancel(ec);
if (!turn_queue_.empty())
{
current_turn_ = turn_queue_.front();
current_turn_->waiting_turn = false;
turn_queue_.pop_front();
// Set up the turn timer
turn_timer_.expires_from_now(std::chrono::seconds(settings_->TurnTime), ec);
turn_timer_.async_wait(std::bind(&VMController::TurnTimerCallback, shared_from_this(), std::placeholders::_1));
}
else
current_turn_ = nullptr;
turn_change = true;
}
if (turn_change)
server_.BroadcastTurnInfo(*this, users_, turn_queue_, current_turn_.get(),
current_turn_ ? std::chrono::duration_cast<millisecs_t>(turn_timer_.expires_from_now()).count() : 0);
}
void VMController::AddUser(const std::shared_ptr<CollabVMUser>& user)
{
users_.AddUser(*user, [this](CollabVMUser& user) { OnAddUser(user); });
int32_t time_remaining;
if (current_turn_)
{
time_remaining = std::chrono::duration_cast<millisecs_t>(turn_timer_.expires_from_now()).count();
if (time_remaining > 0)
server_.SendTurnInfo(*user, time_remaining, *current_turn_->username, turn_queue_);
}
if (vote_state_ == VoteState::kVoting)
{
time_remaining = std::chrono::duration_cast<millisecs_t>(vote_timer_.expires_from_now()).count();
if (time_remaining > 0)
server_.SendVoteInfo(*this, *user, time_remaining, vote_count_yes_, vote_count_no_);
}
}
void VMController::RemoveUser(const std::shared_ptr<CollabVMUser>& user)
{
// Remove the user's vote
IPData::VoteDecision prev_vote = user->ip_data.votes[this];
int32_t time_remaining = std::chrono::duration_cast<millisecs_t>(vote_timer_.expires_from_now()).count();
if (vote_state_ == VoteState::kVoting && time_remaining > 0 && prev_vote != IPData::VoteDecision::kNotVoted)
{
if (prev_vote == IPData::VoteDecision::kYes)
vote_count_yes_--;
else if (prev_vote == IPData::VoteDecision::kNo)
vote_count_no_--;
user->ip_data.votes[this] = IPData::VoteDecision::kNotVoted;
server_.BroadcastVoteInfo(*this, users_, false, time_remaining, vote_count_yes_, vote_count_no_);
}
EndTurn(user);
users_.RemoveUser(*user, [this](CollabVMUser& user) { OnRemoveUser(user); });
}
void VMController::NewThumbnail(std::string* str)
{
server_.OnVMControllerThumbnailUpdate(shared_from_this(), str);
}
bool VMController::IsFileUploadValid(const std::shared_ptr<CollabVMUser>& user, const std::string& filename, size_t file_size, bool run_file)
{
return file_size >= 1 && file_size <= settings_->MaxUploadSize &&
AgentClient::IsFilenameValid(agent_max_filename_, filename);
}
VMController::~VMController()
{
}
|
{"hexsha": "64956660c24a1dfda6656bb32a8c67e6c6884bce", "size": 13779, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/VMControllers/VMController.cpp", "max_stars_repo_name": "FurryFan2003/collab-vm-server", "max_stars_repo_head_hexsha": "1b4b2e602dfe61a4502ab3cadcecc4106b5e766e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 74.0, "max_stars_repo_stars_event_min_datetime": "2020-12-20T19:29:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-04T14:59:29.000Z", "max_issues_repo_path": "src/VMControllers/VMController.cpp", "max_issues_repo_name": "FurryFan2003/collab-vm-server", "max_issues_repo_head_hexsha": "1b4b2e602dfe61a4502ab3cadcecc4106b5e766e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2020-12-27T12:10:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-24T12:38:24.000Z", "max_forks_repo_path": "src/VMControllers/VMController.cpp", "max_forks_repo_name": "FurryFan2003/collab-vm-server", "max_forks_repo_head_hexsha": "1b4b2e602dfe61a4502ab3cadcecc4106b5e766e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2020-12-20T14:28:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-20T17:01:11.000Z", "avg_line_length": 29.1927966102, "max_line_length": 165, "alphanum_fraction": 0.721097322, "num_tokens": 3752}
|
import isopy
import pytest
import isopy.toolbox as toolbox
import numpy as np
class Test_Inversion:
def test_one(self):
spike = isopy.array(pd104=1, pd106=0, pd108=1, pd110=0)
spike = spike.normalise(1)
self.compare_rudge_siebert('pd', spike, 1.6, 0.1, 0.5)
self.compare_rudge_siebert('pd', spike, -1.6, -0.1, 0.85)
self.compare_rudge_siebert('pd', spike, 1.6, -0.1, 0.15)
def compare_rudge_siebert(self, element, spike, fins, fnat, spike_fraction):
# Default reference values
measured = isopy.tb.make_ms_sample(element, fnat=fnat, fins=fins,
spike=spike, spike_fraction=spike_fraction,
random_seed=46)
result_rudge = isopy.tb.ds_correction(measured, spike)
result_siebert = isopy.tb.ds_correction(measured, spike, method='siebert')
assert type(result_rudge) is toolbox.doublespike.DSResult
assert result_rudge.method == 'rudge'
np.testing.assert_allclose(result_rudge.alpha, fnat*-1, rtol=0.1)
np.testing.assert_allclose(result_rudge.beta, fins, rtol=0.1)
np.testing.assert_allclose(result_rudge.fnat, fnat, rtol=0.1)
np.testing.assert_allclose(result_rudge.fins, fins, rtol=0.1)
np.testing.assert_allclose(result_rudge.spike_fraction, spike_fraction, rtol=1E-3)
np.testing.assert_allclose(result_rudge.sample_fraction, (1-spike_fraction), rtol=1E-3)
np.testing.assert_allclose(result_rudge.Q, (1 - spike_fraction)/spike_fraction, rtol=1E-3)
assert type(result_siebert) is toolbox.doublespike.DSResult
assert result_siebert.method == 'siebert'
np.testing.assert_allclose(result_siebert.alpha, fnat * -1, rtol=0.1)
np.testing.assert_allclose(result_siebert.beta, fins, rtol=0.1)
np.testing.assert_allclose(result_siebert.fnat, fnat, rtol=0.1)
np.testing.assert_allclose(result_siebert.fins, fins, rtol=0.1)
np.testing.assert_allclose(result_siebert.spike_fraction, spike_fraction, rtol=1E-3)
np.testing.assert_allclose(result_siebert.sample_fraction, (1 - spike_fraction), rtol=1E-3)
np.testing.assert_allclose(result_siebert.Q, (1 - spike_fraction) / spike_fraction, rtol=1E-3)
np.testing.assert_allclose(result_siebert.alpha, result_rudge.alpha, rtol=1E-6)
np.testing.assert_allclose(result_siebert.beta, result_rudge.beta, rtol=1E-6)
np.testing.assert_allclose(result_siebert.fnat, result_rudge.fnat, rtol=1E-6)
np.testing.assert_allclose(result_siebert.fins, result_rudge.fins, rtol=1E-6)
np.testing.assert_allclose(result_siebert.spike_fraction, result_rudge.spike_fraction)
np.testing.assert_allclose(result_siebert.sample_fraction, result_rudge.sample_fraction)
np.testing.assert_allclose(result_siebert.Q, result_rudge.Q)
np.testing.assert_allclose(result_siebert.lambda_, result_rudge.lambda_)
# Different reference values
mass_ref = isopy.refval.isotope.mass_number
fraction_ref = isopy.refval.isotope.initial_solar_system_fraction_L09
measured = isopy.tb.make_ms_sample(element, fnat=fnat, fins=fins,
spike=spike, spike_fraction=spike_fraction,
random_seed=46, isotope_masses=mass_ref,
isotope_fractions=fraction_ref)
result_rudge = isopy.tb.ds_correction(measured, spike, isotope_fractions=fraction_ref, isotope_masses= mass_ref)
result_siebert = isopy.tb.ds_correction(measured, spike, isotope_fractions= fraction_ref, isotope_masses= mass_ref, method='siebert')
assert type(result_rudge) is toolbox.doublespike.DSResult
assert result_rudge.method == 'rudge'
np.testing.assert_allclose(result_rudge.alpha, fnat * -1, rtol=0.1)
np.testing.assert_allclose(result_rudge.beta, fins, rtol=0.1)
np.testing.assert_allclose(result_rudge.fnat, fnat, rtol=0.1)
np.testing.assert_allclose(result_rudge.fins, fins, rtol=0.1)
np.testing.assert_allclose(result_rudge.spike_fraction, spike_fraction, rtol=1E-3)
np.testing.assert_allclose(result_rudge.sample_fraction, (1 - spike_fraction), rtol=1E-3)
np.testing.assert_allclose(result_rudge.Q, (1 - spike_fraction) / spike_fraction, rtol=1E-3)
assert type(result_siebert) is toolbox.doublespike.DSResult
assert result_siebert.method == 'siebert'
np.testing.assert_allclose(result_siebert.alpha, fnat * -1, rtol=0.1)
np.testing.assert_allclose(result_siebert.beta, fins, rtol=0.1)
np.testing.assert_allclose(result_siebert.fnat, fnat, rtol=0.1)
np.testing.assert_allclose(result_siebert.fins, fins, rtol=0.1)
np.testing.assert_allclose(result_siebert.spike_fraction, spike_fraction, rtol=1E-3)
np.testing.assert_allclose(result_siebert.sample_fraction, (1 - spike_fraction), rtol=1E-3)
np.testing.assert_allclose(result_siebert.Q, (1 - spike_fraction) / spike_fraction,
rtol=1E-3)
np.testing.assert_allclose(result_siebert.alpha, result_rudge.alpha, rtol=1E-6)
np.testing.assert_allclose(result_siebert.beta, result_rudge.beta, rtol=1E-6)
np.testing.assert_allclose(result_siebert.fnat, result_rudge.fnat, rtol=1E-6)
np.testing.assert_allclose(result_siebert.fins, result_rudge.fins, rtol=1E-6)
np.testing.assert_allclose(result_siebert.spike_fraction, result_rudge.spike_fraction)
np.testing.assert_allclose(result_siebert.sample_fraction, result_rudge.sample_fraction)
np.testing.assert_allclose(result_siebert.Q, result_rudge.Q)
np.testing.assert_allclose(result_siebert.lambda_, result_rudge.lambda_)
def test_result(self):
spike = isopy.array(pd104=1, pd106=0, pd108=1, pd110=0)
spike = spike.normalise(1)
measured = isopy.tb.make_ms_sample('pd', fnat=0.1, fins=1.6,
spike=spike, spike_fraction=0.5,
random_seed=46)
result = isopy.tb.ds_correction(measured, spike)
assert type(result) is toolbox.doublespike.DSResult
attrs = 'alpha beta lambda_ fnat fins spike_fraction sample_fraction Q'.split()
assert list(result.keys()) == attrs
values = list(result.values())
mean = np.mean(result)
assert type(mean) is toolbox.doublespike.DSResult
for i, (name, value) in enumerate(result.items()):
assert name == attrs[i]
assert result[name] is value
assert value is getattr(result, name)
assert value is values[i]
assert getattr(mean, name) == np.mean(getattr(result, name))
repr(result)
class Test_Correction:
def test_one(self):
spike = isopy.array(pd104=1, pd106=0, pd108=1, pd110=0)
spike = spike.normalise(1)
self.compare_rudge_siebert('pd', spike, 1.6, 0.1, 0.5)
self.compare_rudge_siebert('pd', spike, -1.6, -0.1, 0.85)
self.compare_rudge_siebert('pd', spike, 1.6, -0.1, 0.15)
self.compare_rudge_siebert('pd', spike, 1.6, 0.1, 0.5, ru=0.01)
self.compare_rudge_siebert('pd', spike, -1.6, -0.1, 0.85, cd=0.01)
self.compare_rudge_siebert('pd', spike, 1.6, -0.1, 0.15, ru=0.01, cd=0.01)
def compare_rudge_siebert(self, element, spike, fins, fnat, spike_fraction, **interferences):
# Default reference values
measured = isopy.tb.make_ms_sample(element, fnat=fnat, fins=fins,
spike=spike, spike_fraction=spike_fraction,
random_seed=46, **interferences)
result_rudge = isopy.tb.ds_correction(measured, spike)
result_siebert = isopy.tb.ds_correction(measured, spike, method='siebert')
assert type(result_rudge) is toolbox.doublespike.DSResult
assert result_rudge.method == 'rudge'
np.testing.assert_allclose(result_rudge.alpha, fnat * -1, rtol=0.1)
np.testing.assert_allclose(result_rudge.beta, fins, rtol=0.1)
np.testing.assert_allclose(result_rudge.fnat, fnat, rtol=0.1)
np.testing.assert_allclose(result_rudge.fins, fins, rtol=0.1)
np.testing.assert_allclose(result_rudge.spike_fraction, spike_fraction, rtol=1E-3)
np.testing.assert_allclose(result_rudge.sample_fraction, (1 - spike_fraction),
rtol=1E-3)
np.testing.assert_allclose(result_rudge.Q, (1 - spike_fraction) / spike_fraction,
rtol=1E-3)
assert type(result_siebert) is toolbox.doublespike.DSResult
assert result_siebert.method == 'siebert'
np.testing.assert_allclose(result_siebert.alpha, fnat * -1, rtol=0.1)
np.testing.assert_allclose(result_siebert.beta, fins, rtol=0.1)
np.testing.assert_allclose(result_siebert.fnat, fnat, rtol=0.1)
np.testing.assert_allclose(result_siebert.fins, fins, rtol=0.1)
np.testing.assert_allclose(result_siebert.spike_fraction, spike_fraction, rtol=1E-3)
np.testing.assert_allclose(result_siebert.sample_fraction, (1 - spike_fraction),
rtol=1E-3)
np.testing.assert_allclose(result_siebert.Q, (1 - spike_fraction) / spike_fraction,
rtol=1E-3)
np.testing.assert_allclose(result_siebert.alpha, result_rudge.alpha, rtol=1E-6)
np.testing.assert_allclose(result_siebert.beta, result_rudge.beta, rtol=1E-6)
np.testing.assert_allclose(result_siebert.fnat, result_rudge.fnat, rtol=1E-6)
np.testing.assert_allclose(result_siebert.fins, result_rudge.fins, rtol=1E-6)
np.testing.assert_allclose(result_siebert.spike_fraction, result_rudge.spike_fraction)
np.testing.assert_allclose(result_siebert.sample_fraction, result_rudge.sample_fraction)
np.testing.assert_allclose(result_siebert.Q, result_rudge.Q)
np.testing.assert_allclose(result_siebert.lambda_, result_rudge.lambda_)
# Different reference values
mass_ref = isopy.refval.isotope.mass_number
fraction_ref = isopy.refval.isotope.initial_solar_system_fraction_L09
measured = isopy.tb.make_ms_sample(element, fnat=fnat, fins=fins,
spike=spike, spike_fraction=spike_fraction,
random_seed=46, isotope_masses=mass_ref,
isotope_fractions=fraction_ref, **interferences)
result_rudge = isopy.tb.ds_correction(measured, spike, isotope_fractions=fraction_ref, isotope_masses=mass_ref)
result_siebert = isopy.tb.ds_correction(measured, spike, isotope_fractions=fraction_ref, isotope_masses=mass_ref,
method='siebert')
assert type(result_rudge) is toolbox.doublespike.DSResult
assert result_rudge.method == 'rudge'
np.testing.assert_allclose(result_rudge.alpha, fnat * -1, rtol=0.1)
np.testing.assert_allclose(result_rudge.beta, fins, rtol=0.1)
np.testing.assert_allclose(result_rudge.fnat, fnat, rtol=0.1)
np.testing.assert_allclose(result_rudge.fins, fins, rtol=0.1)
np.testing.assert_allclose(result_rudge.spike_fraction, spike_fraction, rtol=1E-3)
np.testing.assert_allclose(result_rudge.sample_fraction, (1 - spike_fraction),
rtol=1E-3)
np.testing.assert_allclose(result_rudge.Q, (1 - spike_fraction) / spike_fraction,
rtol=1E-3)
assert type(result_siebert) is toolbox.doublespike.DSResult
assert result_siebert.method == 'siebert'
np.testing.assert_allclose(result_siebert.alpha, fnat * -1, rtol=0.1)
np.testing.assert_allclose(result_siebert.beta, fins, rtol=0.1)
np.testing.assert_allclose(result_siebert.fnat, fnat, rtol=0.1)
np.testing.assert_allclose(result_siebert.fins, fins, rtol=0.1)
np.testing.assert_allclose(result_siebert.spike_fraction, spike_fraction, rtol=1E-3)
np.testing.assert_allclose(result_siebert.sample_fraction, (1 - spike_fraction),
rtol=1E-3)
np.testing.assert_allclose(result_siebert.Q, (1 - spike_fraction) / spike_fraction,
rtol=1E-3)
np.testing.assert_allclose(result_siebert.alpha, result_rudge.alpha, rtol=1E-6)
np.testing.assert_allclose(result_siebert.beta, result_rudge.beta, rtol=1E-6)
np.testing.assert_allclose(result_siebert.fnat, result_rudge.fnat, rtol=1E-6)
np.testing.assert_allclose(result_siebert.fins, result_rudge.fins, rtol=1E-6)
np.testing.assert_allclose(result_siebert.spike_fraction, result_rudge.spike_fraction)
np.testing.assert_allclose(result_siebert.sample_fraction, result_rudge.sample_fraction)
np.testing.assert_allclose(result_siebert.Q, result_rudge.Q)
np.testing.assert_allclose(result_siebert.lambda_, result_rudge.lambda_)
class Test_Delta:
def test_delta(self):
spike = isopy.array(pd104=1, pd106=0, pd108=1, pd110=0)
spike = spike.normalise(1)
measured = isopy.tb.make_ms_sample('pd', fnat=0.1, fins=1.6,
spike=spike, spike_fraction=0.5,
random_seed=46)
result = isopy.tb.ds_correction(measured, spike)
mass_ratio1 = '108pd/105pd'
mass_ratio2 = isopy.refval.isotope.mass_W17.get(mass_ratio1)
correct = result.fnat
correct1 = (np.power(mass_ratio2, correct) - 1)
correct2 = np.log(mass_ratio2) * correct
self.compare(correct1, correct2, mass_ratio1, result)
self.compare(correct1, correct2, mass_ratio2, result)
correct = result.fnat
correct1 = (np.power(mass_ratio2, correct) - 1)
correct2 = np.log(mass_ratio2) * correct
self.compare(correct1, correct2, mass_ratio1, result)
self.compare(correct1, correct2, mass_ratio2, result)
correct = result.fnat
correct1 = (np.power(mass_ratio2, correct) - 1)
correct2 = np.log(mass_ratio2) * correct
self.compare(correct1, correct2, mass_ratio1, result.fnat)
self.compare(correct1, correct2, mass_ratio2, result.fnat)
correct = 0 - np.mean(result.fnat)
correct1 = (np.power(mass_ratio2, correct) - 1)
correct2 = np.log(mass_ratio2) * correct
self.compare(correct1, correct2, mass_ratio1, 0, result.fnat)
self.compare(correct1, correct2, mass_ratio2, 0, result.fnat)
correct = 0 - np.mean(result.fnat)
correct1 = (np.power(mass_ratio2, correct) - 1)
correct2 = np.log(mass_ratio2) * correct
self.compare(correct1, correct2, mass_ratio1, 0, result)
self.compare(correct1, correct2, mass_ratio2, 0, result)
correct = 0 - (np.mean(result.fnat)/2 + 0.5)
correct1 = (np.power(mass_ratio2, correct) - 1)
correct2 = np.log(mass_ratio2) * correct
self.compare(correct1, correct2, mass_ratio1, 0, (result.fnat, 1))
self.compare(correct1, correct2, mass_ratio2, 0, (result.fnat, 1))
correct = 0 - (np.mean(result.fnat) / 2 + 0.5)
correct1 = (np.power(mass_ratio2, correct) - 1)
correct2 = np.log(mass_ratio2) * correct
self.compare(correct1, correct2, mass_ratio1, 0, (result, 1))
self.compare(correct1, correct2, mass_ratio2, 0, (result, 1))
mass_ratio1 = '108pd/105pd'
mass_ratio2 = isopy.refval.isotope.mass_number.get(mass_ratio1)
correct = result.fnat
correct1 = (np.power(mass_ratio2, correct) - 1)
correct2 = np.log(mass_ratio2) * correct
self.compare(correct1, correct2, mass_ratio1, result, mass_ref=isopy.refval.isotope.mass_number)
self.compare(correct1, correct2, mass_ratio2, result, mass_ref=isopy.refval.isotope.mass_number)
def compare(self, correct, correct_prime, mass_ratio, fnat, reference_fnat=0, factor=1, mass_ref=None):
result = isopy.tb.ds_Delta(mass_ratio, fnat, reference_fnat, factor=factor, isotope_masses=mass_ref)
np.testing.assert_allclose(result, correct)
result = isopy.tb.ds_Delta_prime(mass_ratio, fnat, reference_fnat, factor=factor, isotope_masses=mass_ref)
np.testing.assert_allclose(result, correct_prime)
|
{"hexsha": "b756ad875f33a35b83ff6ca0ea7fc5ed870f249c", "size": 16629, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_tb_doublespike.py", "max_stars_repo_name": "mattias-ek/isopy", "max_stars_repo_head_hexsha": "96d5530034655c7f9559568ab9b0879b978ef566", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_tb_doublespike.py", "max_issues_repo_name": "mattias-ek/isopy", "max_issues_repo_head_hexsha": "96d5530034655c7f9559568ab9b0879b978ef566", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-08-23T08:48:04.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-23T08:48:04.000Z", "max_forks_repo_path": "tests/test_tb_doublespike.py", "max_forks_repo_name": "mattias-ek/isopy", "max_forks_repo_head_hexsha": "96d5530034655c7f9559568ab9b0879b978ef566", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.7542662116, "max_line_length": 141, "alphanum_fraction": 0.6739431114, "include": true, "reason": "import numpy", "num_tokens": 4422}
|
using Documenter
using StatsBase
using MixedModels
makedocs(
root = joinpath(dirname(pathof(MixedModels)), "..", "docs"),
sitename = "MixedModels",
pages = [
"index.md",
"constructors.md",
"optimization.md",
"GaussHermite.md",
"bootstrap.md",
# "SimpleLMM.md",
# "MultipleTerms.md",
# "SingularCovariance.md",
# "SubjectItem.md",
# "benchmarks.md"
],
)
deploydocs(repo = "github.com/JuliaStats/MixedModels.jl.git", push_preview = true)
|
{"hexsha": "24197b4f39899c285bcfa383cf12be422231c5fc", "size": 534, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/make.jl", "max_stars_repo_name": "Nosferican/MixedModels.jl", "max_stars_repo_head_hexsha": "dec030a95103158aa738616f9dc72b9d0563fb26", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/make.jl", "max_issues_repo_name": "Nosferican/MixedModels.jl", "max_issues_repo_head_hexsha": "dec030a95103158aa738616f9dc72b9d0563fb26", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/make.jl", "max_forks_repo_name": "Nosferican/MixedModels.jl", "max_forks_repo_head_hexsha": "dec030a95103158aa738616f9dc72b9d0563fb26", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.2173913043, "max_line_length": 82, "alphanum_fraction": 0.5861423221, "num_tokens": 141}
|
"""
Evaluate co-ocurrence between `regions` and `query`
"""
from __future__ import print_function
import sys
from argparse import ArgumentParser
from collections import defaultdict
from concurrent.futures import ProcessPoolExecutor
import random
import copy
from interlap import InterLap
from peddy import Ped
import toolshed as ts
try:
from itertools import izip
zip = izip
range = xrange
except ImportError:
pass
from matplotlib import pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
def main(args=sys.argv[1:]):
p = ArgumentParser(__doc__)
p.add_argument("--region-key", default="sample_id", help="column-header from regions to use for sample")
p.add_argument("--query-key", default="sample_id", help="column-header from query to user for sample")
p.add_argument("--figure", default="xodn.png", help="path indicating where to save the figure")
p.add_argument("--simulations", type=int, default=1000, help="number of shufflings to compare to observed")
p.add_argument("--extend", type=int, default=10000, help="extend regions on either size by this amount before checking for overlaps.")
p.add_argument("--size-cutoff", type=int, default=80000, help="exclude regions greater than this length")
p.add_argument("--print", default=False, action="store_true", help="print the sample and family_id of overlapping regions.")
p.add_argument("regions", metavar="REGIONS")
p.add_argument("query", metavar="QUERY")
a = p.parse_args(args)
return run(a)
def mktree(iterable, sample_key, size_cutoff):
T = defaultdict(InterLap)
for d in iterable:
#if 'parent_sex' in d and d['parent_sex'] != 'female': continue
if d['chrom'] in ('X', 'Y'): continue
s, e = int(d['start']), int(d['end'])
if e - s > size_cutoff: continue
T[d['chrom']].add([s, e, d[sample_key], d])
return T
def get_overlap_counts(Q, R, extend=10000, do_print=False):
n = 0
a_pairs, b_pairs = [], []
for chrom in R:
for start, end, sample_id, dr in R[chrom]:
q_hits = list(Q[chrom].find((start - extend, end + extend)))
q_samples = [x[2] for x in q_hits]
a_pairs.extend([sample_id] * len(q_samples))
b_pairs.extend(q_samples)
if do_print:
for st, en, smp, dq in q_hits:
if smp != sample_id: continue
print("%s\t%d\t%d\t%s\t%s\t%d\t%d" % (chrom, st, en, dq.get('sample_id', dq.get('parent_id')), dq['family_id'], start, end))
n += int(sample_id in set(q_samples))
return n, a_pairs, b_pairs
def enrichment(regions, query, region_key, query_key, extend=10000,
simulations=1000,
size_cutoff=180000,
figpath=None,
do_print=False):
R = mktree(regions, region_key, size_cutoff)
Q = mktree(query, query_key, size_cutoff)
obs, aa_pairs, bb_pairs = get_overlap_counts(Q, R, extend, do_print=True)
print("observed:", obs, file=sys.stderr)
print("n-pairs:", len(aa_pairs), file=sys.stderr)
print("shared:", sum(a == b for a, b in zip(aa_pairs, bb_pairs)),
file=sys.stderr)
res = []
import time
import numpy as np
lookup = {p: i for i, p in enumerate(set(aa_pairs + bb_pairs))}
# faster to compair ints than strings so we convert here.
a_pairs = np.array([lookup[p] for p in aa_pairs], dtype=np.int)
b_pairs = np.array([lookup[p] for p in bb_pairs], dtype=np.int)
obs = (a_pairs == b_pairs).sum()
print("obs2:", obs, file=sys.stderr)
t0, t1 = time.time(), time.time()
for i in range(simulations):
if i > 0 and i % 10000 == 0:
print(i, time.time() - t1, res[-1], file=sys.stderr)
t1 = time.time()
np.random.shuffle(a_pairs)
res.append((a_pairs == b_pairs).sum())
ngt = sum(r >= obs for r in res)
p2p5, p50, p97p5 = np.percentile(res, [5, 50, 95])
p = (1.0 + ngt) / float(1 + len(res))
print("ngt, p", ngt, p, file=sys.stderr)
print("time:", time.time() - t0, file=sys.stderr)
colors = sns.color_palette()
enriched = obs / float(p50)
e2p5, e97p5 = obs / float(p2p5), obs / float(p97p5)
fig, ax = plt.subplots(1)
plt.title("Co-occurrence")
ax.hist(res, min(max(res), 25), label="expected")
ax.axvline(x=obs, label="observed", color=colors[1], lw=3)
#ax.text(0.66, 0.92, "p: %.3g (1 + %d) / (1 + %d)" % (p, ngt, len(res)), transform=ax.transAxes)
ax.text(0.60, 0.92, "p: %.3g\nFC:%.2f (%.2f-%.2f)"
% (p, enriched, e97p5, e2p5), transform=ax.transAxes)
ax.set_xlabel("Number of overlaps")
ax.set_ylabel("Count")
plt.legend(loc='upper left')
plt.savefig(figpath)
def run(args):
r = ts.reader(args.regions)
q = ts.reader(args.query)
enrichment(r, q, args.region_key, args.query_key,
simulations=args.simulations,
extend=args.extend,
size_cutoff=args.size_cutoff,
figpath=args.figure, do_print=args.print)
if __name__ == "__main__":
sys.exit(main())
|
{"hexsha": "962665e1bb23ab5f88c149a661b4e3eb29c555a1", "size": 5127, "ext": "py", "lang": "Python", "max_stars_repo_path": "recombinator/enrichment.py", "max_stars_repo_name": "quinlan-lab/recombinator", "max_stars_repo_head_hexsha": "a164c2ea5e91debacbe658e85fa38e89ebafad05", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2016-10-18T23:03:29.000Z", "max_stars_repo_stars_event_max_datetime": "2017-04-06T03:55:54.000Z", "max_issues_repo_path": "recombinator/enrichment.py", "max_issues_repo_name": "quinlan-lab/recombinator", "max_issues_repo_head_hexsha": "a164c2ea5e91debacbe658e85fa38e89ebafad05", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "recombinator/enrichment.py", "max_forks_repo_name": "quinlan-lab/recombinator", "max_forks_repo_head_hexsha": "a164c2ea5e91debacbe658e85fa38e89ebafad05", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3617021277, "max_line_length": 144, "alphanum_fraction": 0.6272674078, "include": true, "reason": "import numpy", "num_tokens": 1415}
|
from __future__ import annotations
from dataclasses import dataclass
from typing import Any
import numpy as np
from edutorch.typing import NPArray
from ..nn.module import Module
from .optimizer import Optimizer
@dataclass
class RMSProp(Optimizer):
"""
Uses the RMSProp update rule, which uses a moving average of squared
gradient values to set adaptive per-parameter learning rates.
config format:
- learning_rate: Scalar learning rate.
- decay_rate: Scalar between 0 and 1 giving the decay rate for the squared
gradient cache.
- epsilon: Small scalar used for smoothing to avoid dividing by zero.
- cache: Moving average of second moments of gradients.
"""
model: Module
lr: float = 1e-2
decay_rate: float = 0.99
eps: float = 1e-8
def init_context(self, w: NPArray) -> tuple[Any, ...]:
"""Initialize context using weights."""
v = np.zeros_like(w)
return (v,)
def update(
self, context: tuple[Any, ...], w: NPArray, dw: NPArray
) -> tuple[NPArray, tuple[NPArray, ...]]:
(v,) = context
v = self.decay_rate * v + (1 - self.decay_rate) * dw ** 2
w -= self.lr * dw / (np.sqrt(v) + self.eps)
return w, (v,)
|
{"hexsha": "ed20393995b03c32cd0e561458b0de4d96911689", "size": 1250, "ext": "py", "lang": "Python", "max_stars_repo_path": "edutorch/optim/rmsprop.py", "max_stars_repo_name": "TylerYep/edutorch", "max_stars_repo_head_hexsha": "6a4a425cbfd7fcdcd851b010816d29c3b5bae8bd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-06-14T01:17:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-20T09:34:32.000Z", "max_issues_repo_path": "edutorch/optim/rmsprop.py", "max_issues_repo_name": "TylerYep/edutorch", "max_issues_repo_head_hexsha": "6a4a425cbfd7fcdcd851b010816d29c3b5bae8bd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "edutorch/optim/rmsprop.py", "max_forks_repo_name": "TylerYep/edutorch", "max_forks_repo_head_hexsha": "6a4a425cbfd7fcdcd851b010816d29c3b5bae8bd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5957446809, "max_line_length": 78, "alphanum_fraction": 0.6456, "include": true, "reason": "import numpy", "num_tokens": 324}
|
using LinearAlgebra
function loss(w, s=4.0)
return (w[1]*w[2] - s)^2/2
end
function grad_loss(w, s=4.0)
term = w[1]*w[2] - s
return term*[w[2], w[1]]
end
function hess_loss(w, s=4.0)
term = w[1]*w[2] - s
dterm = [w[2] w[1]]
return term*[0 1;1 0] .+
[w[2]*dterm; w[1]*dterm]
end
next(w, η, s=4.0) = w - η*grad_loss(w, s)
dnext(w, η, s=4.0) = I(2) - η*hess_loss(w, s)
function d2next(w, η, s=4.0)
term = w[1]*w[2] - s
dterm = [w[2] w[1]]
d2term = [0 1;1 0]
dfirst = [0 0;dterm; dterm; 0 0]
dsecond = [0 2*w[2]; w[2] w[1];
w[2] w[1]; 2*w[1] 0]
return -η*(dfirst .+ dsecond)
end
|
{"hexsha": "356878cdaa964160f932040f309427205b1d74a8", "size": 620, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/nonconvex_smooth_noncompact.jl", "max_stars_repo_name": "nishaChandramoorthy/neuralOMET", "max_stars_repo_head_hexsha": "4be74e94b6ebe3c103e7dee7daada94b8252984c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/nonconvex_smooth_noncompact.jl", "max_issues_repo_name": "nishaChandramoorthy/neuralOMET", "max_issues_repo_head_hexsha": "4be74e94b6ebe3c103e7dee7daada94b8252984c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/nonconvex_smooth_noncompact.jl", "max_forks_repo_name": "nishaChandramoorthy/neuralOMET", "max_forks_repo_head_hexsha": "4be74e94b6ebe3c103e7dee7daada94b8252984c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.8461538462, "max_line_length": 49, "alphanum_fraction": 0.5258064516, "num_tokens": 325}
|
classdef SOP_F21 < PROBLEM
% <single> <real> <expensive/none>
% Shekel's family
%------------------------------- Reference --------------------------------
% X. Yao, Y. Liu, and G. Lin, Evolutionary programming made faster, IEEE
% Transactions on Evolutionary Computation, 1999, 3(2): 82-102.
%------------------------------- Copyright --------------------------------
% Copyright (c) 2023 BIMK Group. You are free to use the PlatEMO for
% research purposes. All publications which use this platform or any code
% in the platform should acknowledge the use of "PlatEMO" and reference "Ye
% Tian, Ran Cheng, Xingyi Zhang, and Yaochu Jin, PlatEMO: A MATLAB platform
% for evolutionary multi-objective optimization [educational forum], IEEE
% Computational Intelligence Magazine, 2017, 12(4): 73-87".
%--------------------------------------------------------------------------
methods
%% Default settings of the problem
function Setting(obj)
obj.M = 1;
obj.D = 4;
obj.lower = zeros(1,obj.D);
obj.upper = zeros(1,obj.D) + 10;
obj.encoding = ones(1,obj.D);
end
%% Calculate objective values
function PopObj = CalObj(obj,PopDec)
a = [4 4 4 4;1 1 1 1;8 8 8 8;6 6 6 6;3 7 3 7];
c = [0.1;0.2;0.2;0.4;0.4];
PopObj = zeros(size(PopDec,1),1);
for i = 1 : size(PopDec,1)
PopObj(i) = -sum(1./(sum((repmat(PopDec(i,:),5,1)-a).^2,2)+c));
end
end
%% Generate the minimum objective value
function R = GetOptimum(obj,N)
R = -10.16;
end
end
end
|
{"author": "BIMK", "repo": "PlatEMO", "sha": "c5b5b7c37a9bb42689a5ac2a0d638d9c4f5693d5", "save_path": "github-repos/MATLAB/BIMK-PlatEMO", "path": "github-repos/MATLAB/BIMK-PlatEMO/PlatEMO-c5b5b7c37a9bb42689a5ac2a0d638d9c4f5693d5/PlatEMO/Problems/Single-objective optimization/Simple SOPs/SOP_F21.m"}
|
[STATEMENT]
lemma Trans: assumes "H \<turnstile> x EQ y" "H \<turnstile> y EQ z" shows "H \<turnstile> x EQ z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. H \<turnstile> x EQ z
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. H \<turnstile> x EQ z
[PROOF STEP]
have "\<And>H. H \<turnstile> (x EQ x AND y EQ z) IMP (x EQ y IMP x EQ z)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>H. H \<turnstile> x EQ x AND y EQ z IMP x EQ y IMP x EQ z
[PROOF STEP]
by (metis eql_cong1 bot_least thin)
[PROOF STATE]
proof (state)
this:
?H \<turnstile> x EQ x AND y EQ z IMP x EQ y IMP x EQ z
goal (1 subgoal):
1. H \<turnstile> x EQ z
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
?H \<turnstile> x EQ x AND y EQ z IMP x EQ y IMP x EQ z
goal (1 subgoal):
1. H \<turnstile> x EQ z
[PROOF STEP]
have "{x EQ y, y EQ z} \<turnstile> x EQ x AND y EQ z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {x EQ y, y EQ z} \<turnstile> x EQ x AND y EQ z
[PROOF STEP]
by (metis Assume cnj_I Refl thin1)
[PROOF STATE]
proof (state)
this:
{x EQ y, y EQ z} \<turnstile> x EQ x AND y EQ z
goal (1 subgoal):
1. H \<turnstile> x EQ z
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
?H \<turnstile> x EQ x AND y EQ z IMP x EQ y IMP x EQ z
{x EQ y, y EQ z} \<turnstile> x EQ x AND y EQ z
[PROOF STEP]
have "{x EQ y, y EQ z} \<turnstile> x EQ z"
[PROOF STATE]
proof (prove)
using this:
?H \<turnstile> x EQ x AND y EQ z IMP x EQ y IMP x EQ z
{x EQ y, y EQ z} \<turnstile> x EQ x AND y EQ z
goal (1 subgoal):
1. {x EQ y, y EQ z} \<turnstile> x EQ z
[PROOF STEP]
by (metis Hyp MP_same insertI1)
[PROOF STATE]
proof (state)
this:
{x EQ y, y EQ z} \<turnstile> x EQ z
goal (1 subgoal):
1. H \<turnstile> x EQ z
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
{x EQ y, y EQ z} \<turnstile> x EQ z
goal (1 subgoal):
1. H \<turnstile> x EQ z
[PROOF STEP]
by (metis assms cut2)
[PROOF STATE]
proof (state)
this:
H \<turnstile> x EQ z
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1000, "file": "Robinson_Arithmetic_Robinson_Arithmetic", "length": 12}
|
import numpy as np
from utils import *
def sort_pixels(dnn, layer_functions, image, nc_layer, pos, gran=2):
sort_list=np.linspace(0, 1, gran)
image_batch = np.kron(np.ones((gran, 1, 1, 1)), image)
images=[]
(row, col, chl) = image.shape
dim_row, dim_col=row, col
if row>DIM: dim_row=DIM
if col>DIM: dim_col=DIM
selected_rows=np.random.choice(row, dim_row)
selected_cols=np.random.choice(col, dim_col)
for i in selected_rows:
for j in selected_cols:
new_image_batch = image_batch.copy()
for g in range(0, gran):
new_image_batch[g, i, j, :] = sort_list[g]
images.append(new_image_batch)
images=np.asarray(images)
images = images.reshape(dim_row * dim_col * gran, row, col, chl)
activations = eval_batch(layer_functions, images)
target_list = activations[nc_layer.layer_index]
osp=activations[nc_layer.layer_index].shape
index=np.unravel_index(pos, osp)
target_change=None
if nc_layer.is_conv:
target_change = target_list[:, index[1], index[2], index[3]].reshape(-1, gran).transpose()
else:
target_change = target_list[:, index[1]].reshape(-1, gran).transpose()
min_indices = np.argmax(target_change, axis=0)
min_values = np.amax(target_change, axis=0)
min_idx_values = min_indices.astype('float32') / (gran - 1)
[x, y] = np.meshgrid(np.arange(dim_row), np.arange(dim_col))
x = x.flatten('F') # to flatten in column-major order
y = y.flatten('F') # to flatten in column-major order
target_list = np.hstack((np.split(x, len(x)),
np.split(y, len(y)),
np.split(min_values, len(min_values)),
np.split(min_idx_values, len(min_idx_values))))
sorted_map = target_list[(target_list[:, 2]).argsort()]
sorted_map = np.flip(sorted_map, 0)
for i in range(0, len(sorted_map)):
sorted_map[i][0]=selected_rows[int(sorted_map[i][0])]
sorted_map[i][1]=selected_cols[int(sorted_map[i][1])]
return sorted_map
def accumulate(dnn, layer_functions, image, nc_layer, pos, sorted_pixels, mani_range):
images=[]
mani_image=image.copy()
for i in range(0, mani_range):
pixel_row = sorted_pixels[i, 0].astype('int')
pixel_col = sorted_pixels[i, 1].astype('int')
pixel_value = sorted_pixels[i, 3]
mani_image[pixel_row][pixel_col] = pixel_value
images.append(mani_image.copy())
images = np.asarray(images)
(row, col, chl) = image.shape
activations = eval_batch(layer_functions, images.reshape(len(images), row, col, chl))
osp=activations[nc_layer.layer_index].shape
index=np.unravel_index(pos, osp)
nc_acts=None
if nc_layer.is_conv:
nc_acts = activations[nc_layer.layer_index][:, index[1], index[2], index[3]]
else:
nc_acts = activations[nc_layer.layer_index][:, index[1]]
adversarial_images = images[nc_acts> 0, :, :]
if adversarial_images.any():
success_flag=True
idx_first=np.amin((nc_acts>0).nonzero(), axis=1)
else:
success_flag=False
idx_first=np.nan
return adversarial_images, idx_first, success_flag
def refine_act_image(dnn, layer_functions, image, nc_layer, pos, sorted_pixels, act_image_first, idx_first):
(row, col, chl) = image.shape
refined_act_image=act_image_first.copy()
total_idx=0
idx_range=np.arange(idx_first)
while True:
length=len(idx_range)
#print ('idx_first: ', idx_first)
for i in range(0, idx_first[0]):
pixel_row = sorted_pixels[i, 0].astype('int')
pixel_col = sorted_pixels[i, 1].astype('int')
refined_act_image[pixel_row, pixel_col] = image[pixel_row, pixel_col]
activations = eval_batch(layer_functions, refined_act_image.reshape(1, row, col, chl))
osp=activations[nc_layer.layer_index].shape
index=np.unravel_index(pos, osp)
refined_activation=None
if nc_layer.is_conv:
refined_activation = activations[nc_layer.layer_index][0][index[1]][index[2]][index[3]]
else:
refined_activation = activations[nc_layer.layer_index][0][index[1]]
if refined_activation < 0: # == label:
refined_act_image[pixel_row, pixel_col] = sorted_pixels[i, 3]
else:
total_idx = total_idx + 1
idx_range = idx_range[~(idx_range == i)]
if len(idx_range) == length:
break
return refined_act_image
|
{"hexsha": "b5cf0ef05b7d4a54460d6ba20b1ae32ce7027cd7", "size": 4326, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/l0_encoding.py", "max_stars_repo_name": "853108389/DeepConcolic", "max_stars_repo_head_hexsha": "2fb4bee11a07dcf39d9df9b2534f377336257def", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-11-07T10:34:07.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-28T22:56:05.000Z", "max_issues_repo_path": "src/l0_encoding.py", "max_issues_repo_name": "853108389/DeepConcolic", "max_issues_repo_head_hexsha": "2fb4bee11a07dcf39d9df9b2534f377336257def", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/l0_encoding.py", "max_forks_repo_name": "853108389/DeepConcolic", "max_forks_repo_head_hexsha": "2fb4bee11a07dcf39d9df9b2534f377336257def", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-06-07T15:04:09.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-25T18:56:22.000Z", "avg_line_length": 34.8870967742, "max_line_length": 108, "alphanum_fraction": 0.6805362922, "include": true, "reason": "import numpy", "num_tokens": 1180}
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: shirui <shirui816@gmail.com>
import numpy as np
class Evaluation(object):
r"""Evaluation of model."""
def __init__(self, model):
r"""Initialize with model.
Arguments:
model: a fit object
"""
self.model = model
@classmethod
def make_sample(cls, n, x, pdf):
r"""Make random sample taken from x.
Arguments:
n: int, sample size
x: np.ndarray
pdf: np.ndarray
Returns:
sample
"""
pdf /= np.sum(pdf)
return np.random.choice(x, size=n, p=pdf)
# This func is already a sum of functions
def _log_prob(self, x):
r"""Calculate log probability.
Arguments:
x: samples of (n_samples, n_features)
Returns:
probability: np.ndarray
"""
return np.log(self.model.Function(x, *self.model.Parameters) /
self.model.NormalFactor)
def aic(self, x):
r"""Calculate AIC.
Aho, K.; Derryberry, D.; Peterson, T. (2014), "Model selection for
ecologists: the worldviews of AIC and BIC", Ecology, 95: 631–636,
doi:10.1890/13-1452.1.
AIC = 2k - 2\ln{\hat{\mathcal{L}}}, \hat{\mathcal{{L}}} is Likelihood.
Arguments:
samples: samples of (n_samples, n_features)
Returns:
aic: np.ndarray
"""
return 2 * self.model.N_ -\
2 * self.score(x) * x.shape[0]
def bic(self, x):
r"""Calculate BIC.
Schwarz, Gideon E. (1978), "Estimating the dimension of a model",
Annals of Statistics, 6 (2): 461–464, doi:10.1214/aos/1176344136,
MR 0468014.
BIC = \ln{N}k - 2\ln{\hat{\mathcal{L}}}
Arguments:
samples: samples of (n_samples, n_features)
Returns:
bic: np.ndarray
"""
return self.model.N_ * np.log(x.shape[0]) -\
2 * self.score(x) * x.shape[0]
def aicc(self, x):
r"""Calculate AICc.
deLeeuw, J. (1992), "Introduction to Akaike (1973) information theory
and an extension of the maximum likelihood principle" (PDF),
in Kotz, S.; Johnson, N.L., Breakthroughs in Statistics I, Springer,
pp. 599–609.
AICc = AIC + \frac{2k^2+2k}{N-k-1}
Arguments:
samples: samples of (n_samples, n_features)
Returns:
bic: np.ndarray
"""
return 2 * self.model.N_ * np.log(x.shape[0]) -\
2 * self.score(x) * x.shape[0] +\
2 * (self.model.N_ ** 2 + self.model.N_) /\
(x.shape[0] - self.model.N_ - 1)
def score(self, x):
r"""Calculate Likelyhood.
Arguments:
samples: samples of (n_samples, n_features)
Returns:
likelihood: np.ndarray
"""
return self._log_prob(x).mean()
|
{"hexsha": "603abed99b992abc7946158668d8ec5e715dc3bb", "size": 2904, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/Evaluate.py", "max_stars_repo_name": "Shirui816/MultipleDistributionFitting", "max_stars_repo_head_hexsha": "5d3a51383fb8057f725468a5da6bdbc75dc40b99", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/Evaluate.py", "max_issues_repo_name": "Shirui816/MultipleDistributionFitting", "max_issues_repo_head_hexsha": "5d3a51383fb8057f725468a5da6bdbc75dc40b99", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/Evaluate.py", "max_forks_repo_name": "Shirui816/MultipleDistributionFitting", "max_forks_repo_head_hexsha": "5d3a51383fb8057f725468a5da6bdbc75dc40b99", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.4736842105, "max_line_length": 78, "alphanum_fraction": 0.541322314, "include": true, "reason": "import numpy", "num_tokens": 796}
|
# Data Preprocessing
# Importing the libraries
# import matplotlib.pyplot as plt
# library pandas offers data structures and operations for manipulating numerical tables and time series
import numpy as np
import pandas as pd
# Importing the dataset
df = pd.read_csv('Data.csv')
X = df.iloc[:, :-1].values
y = df.iloc[:, 3].values
# Taking care of missing data
from sklearn.impute import SimpleImputer
imp = SimpleImputer(missing_values = np.nan, strategy = 'mean')
imp = imp.fit(X[:, 1:3])
X[:, 1:3] = imp.transform(X[:, 1:3])
# GroupBy of DataFrame test
# dfGroup = df.groupby('Purchased')
# for Purchased in dfGroup:
# print(Purchased)
|
{"hexsha": "ee4e13d85aa42d8c27a08cf542b2cf0cd58d1690", "size": 648, "ext": "py", "lang": "Python", "max_stars_repo_path": "Machine Learning A-Z/Part 1 - Data Preprocessing/missing_data.py", "max_stars_repo_name": "SenonLi/LearnPython", "max_stars_repo_head_hexsha": "0d37ed625c623a79daa9c4407751050e683fa3ed", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Machine Learning A-Z/Part 1 - Data Preprocessing/missing_data.py", "max_issues_repo_name": "SenonLi/LearnPython", "max_issues_repo_head_hexsha": "0d37ed625c623a79daa9c4407751050e683fa3ed", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Machine Learning A-Z/Part 1 - Data Preprocessing/missing_data.py", "max_forks_repo_name": "SenonLi/LearnPython", "max_forks_repo_head_hexsha": "0d37ed625c623a79daa9c4407751050e683fa3ed", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0, "max_line_length": 105, "alphanum_fraction": 0.7237654321, "include": true, "reason": "import numpy", "num_tokens": 172}
|
from unittest import TestCase, SkipTest
import sys
from parameterized import parameterized
import numpy as np
import pandas as pd
from holoviews.core import GridMatrix, NdOverlay
from holoviews.element import (
Bivariate,
Distribution,
HexTiles,
Histogram,
Scatter,
)
from hvplot import scatter_matrix
class TestScatterMatrix(TestCase):
def setUp(self):
self.df = pd.DataFrame(np.random.randn(1000, 4), columns=['a', 'b', 'c', 'd'])
def test_returns_gridmatrix(self):
sm = scatter_matrix(self.df)
self.assertIsInstance(sm, GridMatrix)
def test_wrong_diagonal(self):
with self.assertRaises(ValueError):
scatter_matrix(self.df, diagonal='wrong')
def test_wrong_chart(self):
with self.assertRaises(ValueError):
scatter_matrix(self.df, chart='wrong')
def test_diagonal_default(self):
sm = scatter_matrix(self.df)
self.assertIsInstance(sm['a', 'a'], Histogram)
def test_offdiagonal_default(self):
sm = scatter_matrix(self.df)
self.assertIsInstance(sm['a', 'b'], Scatter)
def test_diagonal_kde(self):
sm = scatter_matrix(self.df, diagonal='kde')
self.assertIsInstance(sm['a', 'a'], Distribution)
def test_offdiagonal_bivariate(self):
sm = scatter_matrix(self.df, chart='bivariate')
self.assertIsInstance(sm['a', 'b'], Bivariate)
def test_offdiagonal_hexbin(self):
sm = scatter_matrix(self.df, chart='hexbin')
self.assertIsInstance(sm['a', 'b'], HexTiles)
def test_diagonal_kwargs_mutually_exclusive(self):
with self.assertRaises(TypeError):
scatter_matrix(self.df, diagonal_kwds=dict(a=1), hist_kwds=dict(a=1))
with self.assertRaises(TypeError):
scatter_matrix(self.df, diagonal_kwds=dict(a=1), density_kwds=dict(a=1))
with self.assertRaises(TypeError):
scatter_matrix(self.df, density_kwds=dict(a=1), hist_kwds=dict(a=1))
def test_diagonal_kwargs(self):
sm = scatter_matrix(self.df, diagonal_kwds=dict(line_color='red'))
self.assertEqual(sm['a', 'a'].opts.get().kwargs['line_color'], 'red')
def test_c(self):
df = self.df.copy(deep=True)
df['e'] = np.random.choice(list('xyz'), size=len(df))
sm = scatter_matrix(df, c='e')
self.assertIsInstance(sm['a', 'a'], NdOverlay)
diag_kdims = sm['a', 'a'].kdims
self.assertEqual(len(diag_kdims), 1)
self.assertEqual(diag_kdims[0].name, 'e')
self.assertIsInstance(sm['a', 'b'], Scatter)
offdiag_vdims = sm['a', 'b'].vdims
self.assertTrue('e' in (d.name for d in offdiag_vdims))
class TestDatashader(TestCase):
def setUp(self):
try:
import datashader # noqa
except:
raise SkipTest('Datashader not available')
if sys.maxsize < 2**32:
raise SkipTest('Datashader does not support 32-bit systems')
self.df = pd.DataFrame(np.random.randn(1000, 3), columns=['a', 'b', 'c'])
def test_rasterize_datashade_mutually_exclusive(self):
with self.assertRaises(ValueError):
scatter_matrix(self.df, rasterize=True, datashade=True)
def test_spread_but_no_rasterize_or_datashade(self):
with self.assertRaises(ValueError):
scatter_matrix(self.df, dynspread=True)
with self.assertRaises(ValueError):
scatter_matrix(self.df, spread=True)
with self.assertRaises(ValueError):
scatter_matrix(self.df, dynspread=True, spread=True)
@parameterized.expand([('rasterize',), ('datashade',)])
def test_rasterization(self, operation):
sm = scatter_matrix(self.df, **{operation: True})
dm = sm['a', 'b']
self.assertEqual(dm.callback.operation.name, operation)
dm[()]
self.assertEqual(len(dm.last.pipeline.operations), 3)
@parameterized.expand([('rasterize',), ('datashade',)])
def test_datashade_aggregator(self, operation):
sm = scatter_matrix(self.df, aggregator='mean', **{operation: True})
dm = sm['a', 'b']
dm[()]
self.assertEqual(dm.last.pipeline.operations[-1].aggregator, 'mean')
@parameterized.expand([('spread',), ('dynspread',)])
def test_spread_rasterize(self, operation):
sm = scatter_matrix(self.df, rasterize=True, **{operation: True})
dm = sm['a', 'b']
dm[()]
self.assertEqual(len(dm.last.pipeline.operations), 4)
@parameterized.expand([('spread',), ('dynspread',)])
def test_spread_datashade(self, operation):
sm = scatter_matrix(self.df, datashade=True, **{operation: True})
dm = sm['a', 'b']
dm[()]
self.assertEqual(len(dm.last.pipeline.operations), 4)
@parameterized.expand([('spread',), ('dynspread',)])
def test_spread_kwargs(self, operation):
sm = scatter_matrix(self.df, datashade=True, **{operation: True, 'shape': 'circle'})
dm = sm['a', 'b']
dm[()]
self.assertEqual(dm.last.pipeline.operations[-1].args[0].keywords['shape'], 'circle')
|
{"hexsha": "e11854296f5437afb6c864388ab500aa8ec27cd4", "size": 5153, "ext": "py", "lang": "Python", "max_stars_repo_path": "hvplot/tests/plotting/testscattermatrix.py", "max_stars_repo_name": "vishalbelsare/hvplot", "max_stars_repo_head_hexsha": "e0767f2533daf0ba8ed5b6ea2f28000803d99b91", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 338, "max_stars_repo_stars_event_min_datetime": "2019-11-13T17:17:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T13:28:54.000Z", "max_issues_repo_path": "hvplot/tests/plotting/testscattermatrix.py", "max_issues_repo_name": "vishalbelsare/hvplot", "max_issues_repo_head_hexsha": "e0767f2533daf0ba8ed5b6ea2f28000803d99b91", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 368, "max_issues_repo_issues_event_min_datetime": "2019-11-13T15:43:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T17:06:06.000Z", "max_forks_repo_path": "hvplot/tests/plotting/testscattermatrix.py", "max_forks_repo_name": "vishalbelsare/hvplot", "max_forks_repo_head_hexsha": "e0767f2533daf0ba8ed5b6ea2f28000803d99b91", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 55, "max_forks_repo_forks_event_min_datetime": "2019-12-01T17:17:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T13:53:40.000Z", "avg_line_length": 36.8071428571, "max_line_length": 93, "alphanum_fraction": 0.6367164758, "include": true, "reason": "import numpy", "num_tokens": 1212}
|
import numpy as np
import pandas as pd
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from dateutil.relativedelta import relativedelta
from flask import Flask, jsonify
engine = create_engine("sqlite:///Resources/hawaii.sqlite", connect_args={'check_same_thread': False})
Base = automap_base()
Base.prepare(engine, reflect=True)
Base.classes.keys()
Measurement = Base.classes.measurement
Station = Base.classes.station
session = Session(engine)
app = Flask(__name__)
def calc_temps(start_date, end_date):
return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
@app.route("/")
def Home():
return (
f"Welcome to the Hawaii Weather API!<br/>"
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/<'start'><br/>"
f"/api/v1.0/<'start'>/<'end'>"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
LastDate=session.query(func.max(func.strftime("%Y-%m-%d", Measurement.date)))
LastDate=LastDate[0][0]
LastDate = dt.datetime.strptime(LastDate, '%Y-%m-%d').date()
YearAgo=(LastDate-relativedelta(years=1)).strftime('%Y-%m-%d')
TwelveMonths=session.query(func.strftime("%Y-%m-%d", Measurement.date), Measurement.prcp).\
filter(func.strftime("%Y-%m-%d", Measurement.date) >= func.strftime("%Y-%m-%d", YearAgo)).all()
results_dict = {}
for result in TwelveMonths:
results_dict[result[0]] = result[1]
return jsonify(results_dict)
@app.route("/api/v1.0/stations")
def stations():
results = session.query(Station.name).all()
all_stations = list(np.ravel(results))
return jsonify(all_stations)
@app.route("/api/v1.0/tobs")
def tobs():
LastDate=session.query(func.max(func.strftime("%Y-%m-%d", Measurement.date)))
LastDate=LastDate[0][0]
LastDate = dt.datetime.strptime(LastDate, '%Y-%m-%d').date()
YearAgo=(LastDate-relativedelta(years=1)).strftime('%Y-%m-%d')
TobsQuery=session.query(Measurement).filter(func.strftime("%Y-%m-%d", Measurement.date) >= func.strftime("%Y-%m-%d", YearAgo)).all()
TobsData = []
for result in TobsQuery:
TobsDict = {}
TobsDict["date"] = result.date
TobsDict["station"] = result.station
TobsDict["tobs"] = result.tobs
TobsData.append(TobsDict)
return jsonify(TobsData)
@app.route("/api/v1.0/<start>")
def start(start):
LastDate=session.query(func.max(func.strftime("%Y-%m-%d", Measurement.date)))
LastDate=LastDate[0][0]
TempData = calc_temps(start, LastDate)
DateRange = []
date_dict = {'start_date': start, 'end_date': end}
DateRange.append(date_dict)
DateRange.append({'DataPoint': 'TMIN', 'Temperature': TempData[0][0]})
DateRange.append({'DataPoint': 'TAVG', 'Temperature': TempData[0][1]})
DateRange.append({'DataPoint': 'TMAX', 'Temperature': TempData[0][2]})
return jsonify(DateRange)
app.route("/api/v1.0/<start>/<end>")
def daterange(start, end):
TempData = calc_temps(start, end)
DateRange = []
date_dict = {'start_date': start, 'end_date': end}
DateRange.append(date_dict)
DateRange.append({'DataPoint': 'TMIN', 'Temperature': TempData[0][0]})
DateRange.append({'DataPoint': 'TAVG', 'Temperature': TempData[0][1]})
DateRange.append({'DataPoint': 'TMAX', 'Temperature': TempData[0][2]})
return jsonify(DateRange)
if __name__ == "__main__":
app.run(debug=True)
|
{"hexsha": "6f06a440dd502c3d59e1cd2921303cdcb6a1b0dd", "size": 3733, "ext": "py", "lang": "Python", "max_stars_repo_path": "appHW.py", "max_stars_repo_name": "kristine848/SQL-Alchemy-challenge", "max_stars_repo_head_hexsha": "3cca93b8c46532c3900da6c8013902fb451dae99", "max_stars_repo_licenses": ["ADSL"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "appHW.py", "max_issues_repo_name": "kristine848/SQL-Alchemy-challenge", "max_issues_repo_head_hexsha": "3cca93b8c46532c3900da6c8013902fb451dae99", "max_issues_repo_licenses": ["ADSL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "appHW.py", "max_forks_repo_name": "kristine848/SQL-Alchemy-challenge", "max_forks_repo_head_hexsha": "3cca93b8c46532c3900da6c8013902fb451dae99", "max_forks_repo_licenses": ["ADSL"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.7456140351, "max_line_length": 136, "alphanum_fraction": 0.6672917225, "include": true, "reason": "import numpy", "num_tokens": 1001}
|
(******************************************************************************)
(* Project: The Isabelle/UTP Proof System *)
(* File: Time.thy *)
(* Authors: Frank Zeyda and Simon Foster (University of York, UK) *)
(* Emails: frank.zeyda@york.ac.uk and simon.foster@york.ac.uk *)
(******************************************************************************)
(* LAST REVIEWED: 14 Sept 2017 *)
section {* Time Model *}
theory Time
imports
"../optics/Two"
"../continuum/Continuum"
"../utils/Positive"
begin
text {*
The rationale for this theory is to define an abstract model of time that
identifies reasonable assumptions that are sufficient for reasoning about
time, namely without having to specify in detail the notion of time that
we are dealing with such as discrete, continuous or super-dense time.
*}
subsection {* Abstract Time *}
text {*
We introduce permissible time domains abstractly as a type class. Clearly,
the elements of the type have to be linearly ordered while membership to
the class @{class semidom_divide} entails many key properties of addition,
subtraction, multiplication and division. Note that we cannot require time
to form a field as there may not be an additive inverse i.e.~if we confine
ourselves to positive time instants. Lastly, we also assume that time does
not stop, meaning that the ordering is unbounded (class @{class no_top});
there may be a bottom though which, if so, must be the same as @{term 0}.
*}
class time = linorder + semidom_divide + no_top
text {*
Positive time makes the additional assumption that the ordering has a bottom
which must be @{const zero}.
*}
class pos_time = time + zero + order_bot +
assumes zero_is_bot: "0 = \<bottom>"
text {*
I wonder if we can get away with weaker assumptions below. It would mean that
we may also be able to instantiate @{typ "int pos"} as both @{class time} and
@{class pos_time} (note that integers do not form a field). If not, this is
not an issue of course, since we can otherwise always use @{typ nat} in place
of the type @{typ "int pos"}.
*}
instance pos :: (linordered_field) time
apply (intro_classes)
done
instantiation pos :: (linordered_field) pos_time
begin
lift_definition bot_pos :: "'a pos"
is "0" ..
instance
apply (intro_classes)
apply (transfer; simp)
apply (transfer; simp)
done
end
subsection {* Discrete Time *}
text {* Naturals, integers and rationals are used to model discrete time. *}
instance nat :: time
apply (intro_classes)
done
instance int :: time
apply (intro_classes)
done
instance rat :: time
apply (intro_classes)
done
instantiation nat :: pos_time
begin
instance
apply (intro_classes)
apply (unfold bot_nat_def)
apply (rule refl)
done
end
subsection {* Continuous Time *}
text {* Reals and positive reals are used to model continuous time. *}
type_notation real ("\<real>")
type_synonym pos_real = "real pos" ("\<real>\<^sup>+")
translations (type) "\<real>\<^sup>+" \<leftharpoondown> (type) "real pos"
instance real :: time
apply (intro_classes)
done
text {*
Membership of @{typ pos_real} to the sort @{class time} follows from the
earlier instantiation of @{typ "'a pos"} as class @{class time}, provided
the type parameter @{typ "'a"} constitutes a @{class linordered_field}.
*}
subsection {* Instantiations *}
text {* Instantiation of class @{class time}. *}
lemma "OFCLASS(nat, time_class)" ..
lemma "OFCLASS(int, time_class)" ..
lemma "OFCLASS(rat, time_class)" ..
lemma "OFCLASS(real, time_class)" ..
lemma "OFCLASS(rat pos, time_class)" ..
lemma "OFCLASS(real pos, time_class)" ..
text {* Instantiation of class @{class pos_time}. *}
lemma "OFCLASS(nat, pos_time_class)" ..
lemma "OFCLASS(rat pos, pos_time_class)" ..
lemma "OFCLASS(real pos, pos_time_class)" ..
text {* Instantiation of class @{class two}. *}
lemma "OFCLASS(nat, two_class)" ..
lemma "OFCLASS(int, two_class)" ..
lemma "OFCLASS(rat, two_class)" ..
lemma "OFCLASS(real, two_class)" ..
lemma "OFCLASS(int pos, two_class)" ..
lemma "OFCLASS(rat pos, two_class)" ..
lemma "OFCLASS(real pos, two_class)" ..
text {* Instantiation of class @{class continuum}. *}
lemma "OFCLASS(nat, continuum_class)" ..
lemma "OFCLASS(int, continuum_class)" ..
lemma "OFCLASS(rat, continuum_class)" ..
lemma "OFCLASS(real, continuum_class)" ..
lemma "OFCLASS(int pos, continuum_class)" ..
lemma "OFCLASS(rat pos, continuum_class)" ..
lemma "OFCLASS(real pos, continuum_class)" ..
end
|
{"author": "isabelle-utp", "repo": "utp-main", "sha": "27bdf3aee6d4fc00c8fe4d53283d0101857e0d41", "save_path": "github-repos/isabelle/isabelle-utp-utp-main", "path": "github-repos/isabelle/isabelle-utp-utp-main/utp-main-27bdf3aee6d4fc00c8fe4d53283d0101857e0d41/fmi/Time.thy"}
|
import numpy as np
from rapt import Re, B0
from scipy.interpolate import RegularGridInterpolator
class _Field:
"""
The superclass for fields. Not used directly, but subclassed. All field-
related data and methods are defined in field objects.
Attributes
----------
gradientstepsize : float
Step size to evaluate spatial derivatives with central differences.
timederivstepsize : float
Step size to evaluate time derivatives with central differences.
static : bool
True if the electric field is zero and the magnetic field is static,
i.e., the fields do not change the speed of the particle.
Notes
-----
The electric and magnetic fields are accessed with the`E` and `B` methods,
respectively. When subclassing, these need to be overridden. Other methods
defined here are usually extended by subclasses.
All methods take a 4-element array consisting of time and coordinates
(t,x,y,z) as parameter.
All coordinates are Cartesian. SI units are used throughout.
"""
# Matrix to calculate the curl with central differences
_M1 = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0,-1, 0,-1, 0, 0, 1, 0],
[0, 0,-1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0,-1, 0, 0],
[0, 1, 0, 0,-1, 0,-1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
])
def __init__(self):
self.gradientstepsize = 1e-6 # step size to evaluate spatial derivatives with central differences
self.timederivstepsize = 1e-3 # step size to evaluate time derivatives with central differences
self.static = True # True if dB/dt=0 or E=0, False otherwise. Essentially, True if the particle's speed stays constant (static magnetic field), and False otherwise.
def B(self, tpos):
"""
Return the magnetic field vector.
Parameters
----------
tpos : array-like
4-element array of time and x,y,z coordinates.
Returns
-------
array
3-element array (Bx, By, Bz)
"""
return np.zeros(3)
def E(self, tpos):
"""
Return the electric field vector.
Parameters
----------
tpos : array-like
4-element array of time and x,y,z coordinates.
Returns
-------
array
3-element array (Ex, Ey, Ez)
"""
# tpos : 4-element array of time, x, y, z
return np.zeros(3)
def unitb(self, tpos):
"""
Return the direction of the magnetic field.
Parameters
----------
tpos : array-like
4-element array of time and x,y,z coordinates.
Returns
-------
array
3-element unit vector B / |B|.
"""
Bvec = self.B(tpos)
return Bvec / np.sqrt(np.dot(Bvec, Bvec))
def magB(self,tpos):
"""
Return the magnitude of the magnetic field.
Parameters
----------
tpos : array-like
4-element array of time and x,y,z coordinates.
Returns
-------
float
The magnetic field strength |B|.
"""
Bvec = self.B(tpos)
return np.sqrt(np.dot(Bvec, Bvec))
def gradB(self,tpos):
"""
Return the gradient of the magnetic field strength.
Parameters
----------
tpos : array-like
4-element array of time and x,y,z coordinates.
Returns
-------
array
3-element vector :math: `\nabla |B|`
"""
d=self.gradientstepsize
return np.array([
( self.magB(tpos + (0,d,0,0)) - self.magB(tpos - (0,d,0,0)) ) / (2*d),
( self.magB(tpos + (0,0,d,0)) - self.magB(tpos - (0,0,d,0)) ) / (2*d),
( self.magB(tpos + (0,0,0,d)) - self.magB(tpos - (0,0,0,d)) ) / (2*d)
])
def jacobianB(self,tpos):
"""
Return the Jacobian matrix of the magnetic field.
Parameters
----------
tpos : array-like
4-element array of time and x,y,z coordinates.
Returns
-------
array
3-by-3 array with element (i,j) equal to dB_i / dx_j
"""
d=self.gradientstepsize
result = np.zeros((3,3))
result[:,0] = (self.B(tpos + (0,d,0,0)) - self.B(tpos - (0,d,0,0)) ) / (2*d)
result[:,1] = (self.B(tpos + (0,0,d,0)) - self.B(tpos - (0,0,d,0)) ) / (2*d)
result[:,2] = (self.B(tpos + (0,0,0,d)) - self.B(tpos - (0,0,0,d)) ) / (2*d)
return result
def curvature(self, tpos):
"""
Return the magnetic field line curvature.
Parameters
----------
tpos : array-like
4-element array of time and x,y,z coordinates.
Returns
-------
float
The local field line curvature :math: `|\nabla_\perp B|/|B|`
"""
Bvec = self.B(tpos)
B = np.sqrt(np.dot(Bvec, Bvec))
gB = self.gradB(tpos)
gBperp = gB - (np.dot(gB,B)/B**2) * Bvec
return np.sqrt(np.dot(gBperp, gBperp))/B
def curlb(self,tpos):
"""
Return the curl of the magnetic field direction.
Parameters
----------
tpos : array-like
4-element array of time and x,y,z coordinates.
Returns
-------
array
3-element vector :math: `\nabla\times b`
"""
d=self.gradientstepsize
beta = np.concatenate((
self.unitb(tpos + (0,d,0,0)),
self.unitb(tpos - (0,d,0,0)),
self.unitb(tpos + (0,0,d,0)),
self.unitb(tpos - (0,0,d,0)),
self.unitb(tpos + (0,0,0,d)),
self.unitb(tpos - (0,0,0,d))
))
return np.dot(self._M1, beta) / (2*d)
def dBdt(self, tpos): # time derivative of the magnetic field magnitude.
"""
Return the time derivative of the magnetic field magntitude.
Parameters
----------
tpos : array-like
4-element array of time and x,y,z coordinates.
Returns
-------
float
The time derivative d|B|/dt
"""
if self.static:
return 0
else:
d = self.timederivstepsize
B1 = self.magB(tpos - [d,0,0,0])
B2 = self.magB(tpos + [d,0,0,0])
return (B2-B1)/d/2
def dbdt(self, tpos):
"""
Return the time derivative of the magnetic field direction.
Parameters
----------
tpos : array-like
4-element array of time and x,y,z coordinates.
Returns
-------
array
3-element vector db/dt
"""
if self.static:
return 0
else:
d = self.timederivstepsize
b1 = self.unitb(tpos - [d,0,0,0])
b2 = self.unitb(tpos + [d,0,0,0])
return (b2-b1)/d/2
def lengthscale(self, tpos):
"""
Return the length scale of the change of the magnetic field strength.
Parameters
----------
tpos : array-like
4-element array of time and x,y,z coordinates.
Returns
-------
float
The length scale, |B| / max(Jacobian(B))
"""
return self.magB(tpos) / np.max(abs(self.jacobianB(tpos)))
def timescale(self, tpos):
"""
Return the time scale of the change of the magnetic field strength.
Parameters
----------
tpos : array-like
4-element array of time and x,y,z coordinates.
Returns
-------
float
The time scale, |B| / d|B|/dt.
"""
if self.static:
return None
else:
return self.magB(tpos) / abs(self.dBdt(tpos))
class EarthDipole(_Field):
"""
The class representing the Earth's static dipole with zero tilt angle.
Subclasses `_Field`. Overrides ``gradientstepsize`` and ``B()``.
Parameters
----------
B0 : float, optional
The equatorial field strength at 1 Earth radius.
"""
def __init__(self,B0=B0):
"""
Initialize superclass and override the `gradientstepsize` attribute.
"""
_Field.__init__(self)
self.gradientstepsize = Re*1e-6
self._coeff = -3*B0*Re**3
def B(self,tpos):
"""
Return the magnetic field vector of the Earth's dipole.
Parameters
----------
tpos : array-like
4-element array of time and x,y,z coordinates.
Returns
-------
array
3-element array (Bx, By, Bz)
"""
t,x,y,z = tpos
r2 = x*x+y*y+z*z
return self._coeff / pow(r2, 2.5) * np.array([x*z, y*z, (z*z-r2/3)])
class DoubleDipole(_Field):
"""
Field of two Earth dipoles with parallel magnetic moments.
The dipole at x=y=0 represents Earth, and the dipole at x = distance is an
"image dipole", whose field compresses the dipole at origin, simulating
the dayside compression of the magnetosphere.
Parameters
----------
B0 : float, optional
Dipole field strength at the equator (1 Re).
distance : float, optional
The distance between the two dipoles. Default 20 Re.
imagestrength : float, optional
The relative strength of the image dipole. Must be >=1. Default 1.
"""
def __init__(self, B0=B0, distance=20*Re, imagestrength=1):
_Field.__init__(self)
self.gradientstepsize = Re/1000
self._dd = distance # distance between two dipoles
assert imagestrength >= 1
self._k = imagestrength # >=1. Relative strength of the image dipole
self._coeff = -B0*Re**3
def B(self, tpos):
"""
Return the magnetic field vector of the double-dipole model.
Parameters
----------
tpos : array-like
4-element array of time and x,y,z coordinates.
Returns
-------
array
3-element array (Bx, By, Bz)
"""
t,x,y,z = tpos
B1 = np.array([3*x*z, 3*y*z, (2*z*z -x*x- y*y)]) / pow(x*x+y*y+z*z, 5.0/2.0)
x -= self._dd
B2 = self._k * np.array([3*x*z, 3*y*z, (2*z*z -x*x- y*y)]) / pow(x*x+y*y+z*z, 5.0/2.0)
return self._coeff*(B1+B2)
class UniformBz(_Field):
"""
Uniform static magnetic field in the z-direction, B = (0,0,Bz).
Parameters
----------
Bz : float, optional
The constant field strength value in the z-direction. Default 1 T.
"""
def __init__(self, Bz=1):
_Field.__init__(self)
self.Bz = Bz
def B(self,tpos):
"""
Return the uniform magnetic field vector.
Parameters
----------
tpos : array-like
4-element array of time and x,y,z coordinates.
Returns
-------
array
3-element array (0, 0, Bz)
"""
return np.array((0,0,self.Bz))
class UniformCrossedEB(UniformBz):
"""
Perpendicular uniform static electric and magnetic fields.
E = (0,Ey,0), B = (0,0,Bz)
Extends `UniformBz`. Sets `static` to ``False``.
Parameters
----------
Ey : float, optional
The constant electric field value in the y-direction (V/m). Default 1.
Bz : float, optional
The constant electric field value in th z-direction (T). Default 1.
"""
# Uniform electric field in y-direction and uniform magnetic field in z-direction.
def __init__(self, Ey=1, Bz=1):
UniformBz.__init__(self)
self.static = False
self.Ey = Ey
self.Bz = Bz
def E(self,tpos):
"""
Return the uniform electric field vector.
Parameters
----------
tpos : array-like
4-element array of time and x,y,z coordinates.
Returns
-------
array
3-element array (0, Ey, 0)
"""
return np.array((0,self.Ey,0))
class VarEarthDipole(_Field):
"""
Time-varying Earth dipole.
The magnetic moment oscillates sinusodially around the nominal value.
Illustrates time-dependent field setup.
The induced electric field is ignored.
Extends `_Field`. Sets ``static`` to False.
Parameters
----------
amp : float, optional
The relative amplitude of the oscillations. Default 0.1. Unitless.
period : float, optional
The period of oscillations, in seconds. Default 10.
"""
# Variable Earth dipole, as an example of time-dependent field.
# Strength sinusoidally oscillating in time around the nominal value.
def __init__(self,amp=0.1,period=10):
_Field.__init__(self)
self.gradientstepsize = Re/1000
self.static = False
self._amp = amp
self._period = period
def B(self,tpos):
"""
Return the variable Earth dipole magnetic field vector.
Parameters
----------
tpos : array-like
4-element array of time and x,y,z coordinates.
Returns
-------
array
3-element array (Bx, By, Bz) at time t.
"""
t,x,y,z = tpos
return -B0*Re**3 * (1+self._amp*np.sin(2*np.pi*t/self._period)) * np.array([3*x*z, 3*y*z, (2*z*z -x*x- y*y)]) / pow(x*x+y*y+z*z, 5.0/2.0)
class Parabolic(_Field):
"""
The parabolic magnetic field model imitating the current sheet in the
magnetotail.
Parameters
----------
B0 : float, optional
The scale of the x-component of the field. Default 10.
Bn : float, optional
The z-component of the field. Default 1.
d : float, optional
The length scale, default 0.2. The field x-component increases by B0
when we move by d in z-direction.
Notes
-----
This model has the form :math: `B = (B0 z/d, 0, Bn)` if :math: `|z|<1` and
:math: `B = (B0/d, 0, Bn)` otherwise. The field lines have a parabolic shape.
Particles exhibit Speiser orbits, cucumber orbits and serpentine orbits.
The parabolic model is well suited to testing the `Adaptive` mode because of
the localized nonadiabaticity near z=0.
References
----------
"""
# A parabolic model field imitating the tail.
def __init__(self, B0=10.0, Bn=1.0, d=0.2):
_Field.__init__(self)
self.B0 = B0
self.Bn = Bn
self.d = d
def B(self, tpos):
z = tpos[3]
if abs(z)<=1.0:
return np.array([self.B0*z/self.d, 0, self.Bn])
else:
return np.array([np.sign(z)*B0, 0, self.Bn])
class Grid(_Field):
"""
A superclass for using fields sampled on a Cartesian grid.
Extends the `_Field` class.
Not for direct use; should be subclassed. The derived class should override
the `parsefile`, `E`, and `B` methods.
Parameters
----------
filelist : list of str
A list of file names storing the field grid data, one for each time
instant. The list must be ordered with respect to time.
Raises
------
ValueError
If the requested time or coordinates are out of bounds.
Notes
-----
Each file in the given list contains the electric and magnetic field data on
discrete grid points. The details must be handled by the `parsefile` method.
When a new model is implemented, users must override this method using
the details and storage format of the data file they use.
The `parsefile` method must return a dictionary with the following keys:
* "time" : The time of the data, float.
* "x" : 1D array of grid x-coordinates
* "y" : 1D array of grid y-coordinates
* "z" : 1D array of grid z-coordinates
* "Bx" : 3D array of Bx values (similarly "By", "Bz")
* "Ex" : 3D array of Ex values (similarly "Ey", "Ez")
In a given `Grid` instance, the grid point coordinates must be the same for
all files in the list. However, uniform spacing is not required.
The interpolated field vectors are accessed with `Egrid` and `Bgrid` methods. The `E` and `B` methods can be overridden for further tweaking; such as adding a dipole component, or handling missing regions using the field symmetry.
The class creates a 4-D linear interpolation for each of the six field components (Ex, Ey, Ez, Bx, By, Bz). Usually each data file is big, and MHD models are evaluated over a long period of time, resulting in many big data files. Loading the entire data set at once could be impossible for users with only several GB of memory. So the `Grid` object loads only the first three files when initialized, and then updates the interpolation as the tracer moves. There is always at most three time points in the interpolator.
.. warning:: Once the field interpolator is updated, it forgets about earlier times. So after a tracer has advanced sufficiently, if we initialize another tracer, we will get a `ValueError` because the time for the new tracer is out of bounds.
If the list contains only two files, a linear interpolation is done between two time points. Updates are not applicable.
If the input list contains a single file, the field is considered independent of time. The interpolation is only 3-dimensional. The `_time_indep` attribute is set to ``True``. The methods `Egrid` and `Bgrid` adjust their behavior accordingly.
"""
def __init__(self, filelist):
"""
Grid constructor.
Parameters
----------
filelist: list of str
The list of files where grid data is stored, one for each time point, in order of time. Length at least one.
Parses the first three files and sets up the interpolator.
"""
assert len(filelist)>0
_Field.__init__(self)
self.gradientstepsize = 1e-3*Re
self.files = filelist[:]
self._time_indep = False
if len(self.files) >= 3:
# parse the first three files and interpolate
g0 = self.parsefile(self.files[0])
self.g1 = self.parsefile(self.files[1]) # save for later use
self.g2 = self.parsefile(self.files[2]) # save for later use
self._set_interpolator(g0, self.g1, self.g2)
del self.files[:3]
elif len(self.files) == 2:
# parse the first two files and interpolate
g0 = self.parsefile(self.files[0])
g1 = self.parsefile(self.files[1])
self._set_interpolator(g0,g1)
del self.files[:2]
elif len(self.files)==1:
# Parse the file and set up time-independent fields.
self.time_indep = True # used in B()
g0 = self.parsefile(self.files[0])
self._set_interpolator(g0)
def parsefile(self, filename):
"""
Parse one data file that stores the field data at one time point.
Parameters
----------
filename : str
The name of the file storing the grid data.
Notes
-----
The code of this method depends on the details of how the data is stored. When `Grid' is subclassed, users should override this method as appropriate.
The method should return a dictionary with at least the following keys:
* "time" : The time of the data, float.
* "x" : 1D array of grid x-coordinates
* "y" : 1D array of grid y-coordinates
* "z" : 1D array of grid z-coordinates
* "Bx", "By", "Bz" : 3D arrays of Bx, By, Bz values
* "Ex", "Ey", "Ez" : 3D arrays of Ex, Ey, Ez values
All values should be in SI units.
"""
g = dict()
return g
def _set_interpolator(self, *glist):
"""
Set up the interpolators for field components, given parsed data.
Takes 1,2 or 3 data dictionaries generated by `parsefile`.
"""
# Sets the interpolators for field components.
assert 1<=len(glist)<=3
g0 = glist[0]
self.t0 = g0["time"]
tlist = [self.t0]
xg, yg, zg = g0["x"], g0["y"], g0["z"]
nx, ny, nz = g0["Bx"].shape
if len(glist) == 1: # One data file, time-independent, interpolation on 3D.
Bx = g0["Bx"][:,:,:]
By = g0["By"][:,:,:]
Bz = g0["Bz"][:,:,:]
Ex = g0["Ex"][:,:,:]
Ey = g0["Ey"][:,:,:]
Ez = g0["Ez"][:,:,:]
# The following are called with three arguments only: Bxt_interp(x,y,z)
self.Bxt_interp = RegularGridInterpolator( (xg,yg,zg), Bx)
self.Byt_interp = RegularGridInterpolator( (xg,yg,zg), By)
self.Bzt_interp = RegularGridInterpolator( (xg,yg,zg), Bz)
self.Ext_interp = RegularGridInterpolator( (xg,yg,zg), Ex)
self.Eyt_interp = RegularGridInterpolator( (xg,yg,zg), Ey)
self.Ezt_interp = RegularGridInterpolator( (xg,yg,zg), Ez)
else: # Two or three data files given. Time-dependent, 4D interpolation.
Bxt = np.zeros(( len(glist), nx,ny,nz))
Byt = np.zeros(( len(glist), nx,ny,nz))
Bzt = np.zeros(( len(glist), nx,ny,nz))
Ext = np.zeros(( len(glist), nx,ny,nz))
Eyt = np.zeros(( len(glist), nx,ny,nz))
Ezt = np.zeros(( len(glist), nx,ny,nz))
Bxt[0,:,:,:] = g0["Bx"][:,:,:]
Byt[0,:,:,:] = g0["By"][:,:,:]
Bzt[0,:,:,:] = g0["Bz"][:,:,:]
Ext[0,:,:,:] = g0["Ex"][:,:,:]
Eyt[0,:,:,:] = g0["Ey"][:,:,:]
Ezt[0,:,:,:] = g0["Ez"][:,:,:]
if len(glist) >= 2:
g1 = glist[1]
self.t1 = g1["time"]
tlist.append(self.t1)
Bxt[1,:,:,:] = g1["Bx"][:,:,:]
Byt[1,:,:,:] = g1["By"][:,:,:]
Bzt[1,:,:,:] = g1["Bz"][:,:,:]
Ext[1,:,:,:] = g1["Ex"][:,:,:]
Eyt[1,:,:,:] = g1["Ey"][:,:,:]
Ezt[1,:,:,:] = g1["Ez"][:,:,:]
if len(glist) == 3:
g2 = glist[2]
self.t2 = g2["time"]
tlist.append(self.t2)
Bxt[2,:,:,:] = g2["Bx"][:,:,:]
Byt[2,:,:,:] = g2["By"][:,:,:]
Bzt[2,:,:,:] = g2["Bz"][:,:,:]
Ext[2,:,:,:] = g2["Ex"][:,:,:]
Eyt[2,:,:,:] = g2["Ey"][:,:,:]
Ezt[2,:,:,:] = g2["Ez"][:,:,:]
# The following are called with 4 arguments: Bxt_interp(t,x,y,z)
self.Bxt_interp = RegularGridInterpolator( (tlist,xg,yg,zg), Bxt)
self.Byt_interp = RegularGridInterpolator( (tlist,xg,yg,zg), Byt)
self.Bzt_interp = RegularGridInterpolator( (tlist,xg,yg,zg), Bzt)
self.Ext_interp = RegularGridInterpolator( (tlist,xg,yg,zg), Ext)
self.Eyt_interp = RegularGridInterpolator( (tlist,xg,yg,zg), Eyt)
self.Ezt_interp = RegularGridInterpolator( (tlist,xg,yg,zg), Ezt)
def _update_interpolator(self):
"""
Parses the next grid data and repeats interpolation with the last triplet of data.
Removes the first file name from the list (oldest time).
"""
assert len(self.files) >= 1
g0 = self.g1
self.g1 = self.g2
self.g2 = self.parsefile(self.files[0])
self._set_interpolator(g0, self.g1, self.g2)
del self.files[0]
def Bgrid(self, tpos):
"""
Return the interpolated magnetic field vector.
Parameters
----------
tpos : array-like
4-element vector of time and position x,y,z.
Returns
-------
array
3-element array (Bx,By,Bz) of the magnetic field at the specified time and position.
Notes
-----
When called with time > (t1+t2)/2, where t0,t1,t2 are three time interpolation points,
calls `_update_interpolator`. The interpolation is redone with grids at times t1,t2,t3.
The `B` method is a wrapper around `Bgrid`. If further processing is required, override `B` when subclassing.
"""
if self._time_indep:
Bx = self.Bxt_interp(tpos[1:])[0]
By = self.Byt_interp(tpos[1:])[0]
Bz = self.Bzt_interp(tpos[1:])[0]
else:
if self.files and tpos[0] > (self.t1 + self.t2)/2:
self._update_interpolator()
Bx = self.Bxt_interp(tpos)[0]
By = self.Byt_interp(tpos)[0]
Bz = self.Bzt_interp(tpos)[0]
return np.array([Bx,By,Bz])
def Egrid(self, tpos):
"""
Return the interpolated electric field vector.
Parameters
----------
tpos : array-like
4-element vector of time and position x,y,z.
Returns
-------
array
3-element array (Ex,Ey,Ez) of the electric field at the specified time and position.
Notes
-----
When called with time > (t1+t2)/2, where t0,t1,t2 are three time interpolation points,
calls `_update_interpolator`. The interpolation is redone with grids at times t1,t2,t3.
The `E` method is a wrapper around `Egrid`. If further processing is required, override `E` when subclassing.
"""
if self._time_indep:
Ex = self.Ext_interp(tpos[1:])[0]
Ey = self.Eyt_interp(tpos[1:])[0]
Ez = self.Ezt_interp(tpos[1:])[0]
else:
if self.files and tpos[0] > (self.t1 + self.t2)/2:
self._update_interpolator()
Ex = self.Ext_interp(tpos)[0]
Ey = self.Eyt_interp(tpos)[0]
Ez = self.Ezt_interp(tpos)[0]
return np.array([Ex,Ey,Ez])
def B(self,tpos): # Override when subclassing
"""
Return the magnetic field vector.
Parameters
----------
tpos : array-like
4-element array of time and x,y,z coordinates.
Returns
-------
array
3-element array (Bx, By, Bz)
Notes
-----
Currently a wrapper around Bgrid. When subclassing, add further processing in this method as needed.
"""
res = self.Bgrid(tpos)
# You can add further processing here; e.g. add a dipole component if necessary.
return res
def E(self,tpos): # Override when subclassing
"""
Return the electric field vector.
Parameters
----------
tpos : array-like
4-element array of time and x,y,z coordinates.
Returns
-------
array
3-element array (Ex, Ey, Ez)
Notes
-----
Currently a wrapper around Egrid. When subclassing, add further processing in this method as needed.
"""
res = self.Egrid(tpos)
return res
|
{"hexsha": "79eff2c7fcd843cee60c244c17ac5535481bd75e", "size": 27621, "ext": "py", "lang": "Python", "max_stars_repo_path": "rapt/fields.py", "max_stars_repo_name": "mkozturk/rapt", "max_stars_repo_head_hexsha": "cb293ac98d2d7707baf822b4e0efe18b2355f35c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-12T09:44:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-12T09:44:56.000Z", "max_issues_repo_path": "rapt/fields.py", "max_issues_repo_name": "mkozturk/rapt", "max_issues_repo_head_hexsha": "cb293ac98d2d7707baf822b4e0efe18b2355f35c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rapt/fields.py", "max_forks_repo_name": "mkozturk/rapt", "max_forks_repo_head_hexsha": "cb293ac98d2d7707baf822b4e0efe18b2355f35c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.890797546, "max_line_length": 522, "alphanum_fraction": 0.5362586438, "include": true, "reason": "import numpy,from scipy", "num_tokens": 7180}
|
SUBROUTINE ECCMOD(I,ITERM)
*
*
* Eccentricity modulation of hierarchical binary.
* -----------------------------------------------
*
INCLUDE 'common6.h'
COMMON/BINARY/ CM(4,MMAX),XREL(3,MMAX),VREL(3,MMAX),
& HM(MMAX),UM(4,MMAX),UMDOT(4,MMAX),TMDIS(MMAX),
& NAMEM(MMAX),NAMEG(MMAX),KSTARM(MMAX),IFLAGM(MMAX)
REAL*8 BODYI(2),W(2)
DATA ITIME,IDELAY /1,0/
SAVE ITIME,IDELAY
*
*
* Determine merger & ghost index.
ITERM = 0
CALL FINDJ(I,IGHOST,IM)
*
* Quit for tidal dissipation or circular orbit (TMDIS set by IMPACT).
IF (KSTARM(IM).EQ.-2.OR.KSTARM(IM).GE.10) THEN
GO TO 50
END IF
*
* Initialize delay indicator and pair index.
IQ = 0
IPAIR = I - N
*
* Skip on hyperbolic outer orbit, double merger or circularized orbit.
IF (H(IPAIR).GT.0.0.OR.NAME(I).LT.-2*NZERO.OR.
& KSTARM(IM).EQ.10) THEN
TMDIS(IM) = TIME + 10.0/TSTAR
GO TO 50
END IF
*
* Resolve coordinates & velocities (first time only).
CALL RESOLV(IPAIR,1)
*
* Form inner distance, square KS velocity and radial velocity term.
RI = 0.0
V20 = 0.0
TD2 = 0.0
DO 5 K = 1,4
RI = RI + UM(K,IM)**2
V20 = V20 + UMDOT(K,IM)**2
TD2 = TD2 + 2.0*UM(K,IM)*UMDOT(K,IM)
5 CONTINUE
*
* Evaluate inner semi-major axis and eccentricity.
ZMB = CM(1,IM) + CM(2,IM)
SEMI = -0.5*ZMB/HM(IM)
IF (SEMI.LE.0.0) THEN
TMDIS(IM) = TIME + 1.0
WRITE(3,*)' ECCMOD ERROR ',SEMI,ECC,H(IPAIR),HM(IM),ZMB
GO TO 50
END IF
ECC2 = (1.0 - RI/SEMI)**2 + TD2**2/(ZMB*SEMI)
ECC = SQRT(ECC2)
*
* Obtain growth time and modify KS elements from de/dt & dh/dt.
I1 = 2*IPAIR - 1
CALL HIGROW(I1,IGHOST,IM,ECC,SEMI,EMAX,EMIN,TG,EDAV,ZI,IQ)
*
* Check termination for new CHAOS & SPIRAL or collision.
IF (IQ.LT.0) THEN
ITERM = 1
ITIME = 1
GO TO 50
END IF
*
* Delay for long growth times or aged active SPIRAL.
IF ((KSTARM(IM).GE.0.AND.TG.GT.20.0).OR.IQ.GT.0.OR.
& (KSTARM(IM).EQ.-2.AND.MAX(ECC,EMAX).LT.0.9).OR.
& (KSTARM(IM).LT.0.AND.ECC.LT.0.1)) THEN
RM = MAX(RADIUS(I1),RADIUS(IGHOST),1.0D-20)
IDELAY = IDELAY + 1
IF (EMAX.GT.0.99.AND.MOD(IDELAY,10).EQ.0) THEN
ALPH = 360.0*ZI/TWOPI
WRITE (6,10) NAME(I1), IQ, KSTARM(IM), LIST(1,I1), ECC,
& EMAX, TG, SEMI*(1.0-ECC)/RM, ALPH
END IF
10 FORMAT (' ECCMOD DELAY NAM IQ K* NP E EMAX TG QP/R* IN ',
& I6,3I4,2F8.4,F8.3,2F7.1)
DT = 10.0
IF (LIST(1,I1).GT.0.AND.SEMI*(1.0-ECC).LT.5.0*RM) THEN
DT = 1.0
END IF
TMDIS(IM) = TIME + MAX(TG,DT)/TSTAR
ITIME = 1
GO TO 50
END IF
*
* Estimate current t_{circ} and de/dt for relevant condition.
PMIN = SEMI*(1.0 - ECC)
RM = MAX(RADIUS(I1),RADIUS(IGHOST),1.0D-20)
IF (PMIN.LT.50.0*RM) THEN
BODYI(1) = CM(1,IM)
BODYI(2) = CM(2,IM)
CALL HICIRC(PMIN,ECC,I1,IGHOST,BODYI,TG,TC,EC,EDT,W)
ELSE
TC = 1.0D+10
EDT = 1.0D-10
END IF
*
* Include diagnostics every 5000th time.
IF (ITIME.EQ.1.OR.MOD(ITIME,5000).EQ.0) THEN
A1 = -0.5*BODY(I)/H(IPAIR)
E2 = (1.0 - R(IPAIR)/A1)**2 + TDOT2(IPAIR)**2/(A1*BODY(I))
ECC1 = SQRT(E2)
ZID = 360.0*ZI/TWOPI
NP = LIST(1,I1)
YC = R0(IPAIR)/SEMI
WRITE (6,15) NAME(I1), NP, TTOT, ECC, EMAX, ECC1, PMIN/RM,
& YC, TG, TC, EDAV, ZID
15 FORMAT (' ECCMOD NM NP T E EX E1 QP/R* PC/A TG TC EDA IN ',
& I6,I4,F11.4,3F8.4,2F7.1,1P,3E9.1,0P,F9.3)
CALL FLUSH(3)
END IF
*
NEMOD = NEMOD + 1
ITIME = ITIME + 1
*
* Include termination for expected short circularization time.
IF (KSTARM(IM).GE.0.AND.PMIN.LT.3.0*RM) THEN
ZID = 360.0*ZI/TWOPI
WRITE (6,40) ITIME, EMAX, ECC, SEMI*(1.0 - ECC)/RM, ZID, TC
40 FORMAT (' ECCMOD TERM IT EX E QP/R IN TC ',
& I5,2F9.5,F6.2,F8.2,F8.1)
CALL FLUSH(3)
ITERM = 1
ITIME = 1
END IF
*
50 RETURN
*
END
|
{"hexsha": "6d9aea6f43a8754d4077ea81665bfd6cb3ca5c7e", "size": 4471, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/amuse/community/nbody6xx/src/eccmod.f", "max_stars_repo_name": "rknop/amuse", "max_stars_repo_head_hexsha": "85d5bdcc29cfc87dc69d91c264101fafd6658aec", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 131, "max_stars_repo_stars_event_min_datetime": "2015-06-04T09:06:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-01T12:11:29.000Z", "max_issues_repo_path": "src/amuse/community/nbody6xx/src/eccmod.f", "max_issues_repo_name": "rknop/amuse", "max_issues_repo_head_hexsha": "85d5bdcc29cfc87dc69d91c264101fafd6658aec", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 690, "max_issues_repo_issues_event_min_datetime": "2015-10-17T12:18:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:15:58.000Z", "max_forks_repo_path": "src/amuse/community/nbody6xx/src/eccmod.f", "max_forks_repo_name": "rieder/amuse", "max_forks_repo_head_hexsha": "3ac3b6b8f922643657279ddee5c8ab3fc0440d5e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 102, "max_forks_repo_forks_event_min_datetime": "2015-01-22T10:00:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-09T13:29:43.000Z", "avg_line_length": 32.6350364964, "max_line_length": 76, "alphanum_fraction": 0.5001118318, "num_tokens": 1687}
|
import numpy as np
import pytest
import tinynn as tn
tn.seeder.random_seed(31)
@pytest.fixture(name="mock_dataset")
def fixture_mock_dataset():
X = np.random.normal(size=(100, 5))
y = np.random.uniform(size=(100, 1))
return X, y
@pytest.fixture(name="mock_img_dataset")
def fixture_mock_img_dataset():
X = np.random.normal(size=(100, 8, 8, 1))
y = np.random.uniform(size=(100, 1))
return X, y
@pytest.fixture(name="dense_model")
def fixture_dense_model():
net = tn.net.Net([tn.layer.Dense(10), tn.layer.Dense(1)])
loss = tn.loss.MSE()
opt = tn.optimizer.SGD()
return tn.model.Model(net, loss, opt)
@pytest.fixture(name="conv_model")
def fixture_conv_model():
net = tn.net.Net([
tn.layer.Conv2D(kernel=[3, 3, 1, 2]),
tn.layer.MaxPool2D(pool_size=[2, 2], stride=[2, 2]),
tn.layer.Conv2D(kernel=[3, 3, 2, 4]),
tn.layer.MaxPool2D(pool_size=[2, 2], stride=[2, 2]),
tn.layer.Flatten(),
tn.layer.Dense(1)
])
loss = tn.loss.MSE()
opt = tn.optimizer.SGD()
return tn.model.Model(net, loss, opt)
def _test_parameter_change(model, X, y):
pred = model.forward(X)
loss, grads = model.backward(pred, y)
# make sure the parameters does change after apply gradients
params_before = model.net.params.values
model.apply_grads(grads)
params_after = model.net.params.values
for p1, p2 in zip(params_before, params_after):
assert np.all(p1 != p2)
def test_parameters_change_dense_model(dense_model, mock_dataset):
_test_parameter_change(dense_model, *mock_dataset)
def test_parameter_change_conv_model(conv_model, mock_img_dataset):
_test_parameter_change(conv_model, *mock_img_dataset)
def _test_backprop(model, X, y):
previous_loss = np.inf
for _ in range(50):
pred = model.forward(X)
loss, grads = model.backward(pred, y)
model.apply_grads(grads)
# loss should decrease monotonically
assert loss < previous_loss
previous_loss = loss
def test_backprop_dense(dense_model, mock_dataset):
_test_backprop(dense_model, *mock_dataset)
def test_backprop_conv(conv_model, mock_img_dataset):
_test_backprop(conv_model, *mock_img_dataset)
|
{"hexsha": "8d117c26d6ddf50bc882f2e094b9562629305b38", "size": 2246, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/others/test_functionality.py", "max_stars_repo_name": "lx120/tinynn", "max_stars_repo_head_hexsha": "88b941a706700ca7f6b1cc4ae7f271df7049348c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/others/test_functionality.py", "max_issues_repo_name": "lx120/tinynn", "max_issues_repo_head_hexsha": "88b941a706700ca7f6b1cc4ae7f271df7049348c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/others/test_functionality.py", "max_forks_repo_name": "lx120/tinynn", "max_forks_repo_head_hexsha": "88b941a706700ca7f6b1cc4ae7f271df7049348c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.7283950617, "max_line_length": 67, "alphanum_fraction": 0.6821015138, "include": true, "reason": "import numpy", "num_tokens": 587}
|
# Use the Dask executor scheduler with a threadpool executor
import dask.array as da
import numpy as np
from dask_executor_scheduler import executor_scheduler
if __name__ == '__main__':
x = da.random.random((10000, 1000), chunks=(1000, 1000))
y = np.sum(x, axis=1)
z = y.compute(scheduler=executor_scheduler)
print(z)
print(z.shape)
|
{"hexsha": "59229956eab17f48f99e5bd917f53c715d242643", "size": 357, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/threadpool_executor.py", "max_stars_repo_name": "arontsang/dask-executor-scheduler", "max_stars_repo_head_hexsha": "c502f128cbad7421493b1dc2a70ee2ce723afaa9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-05-19T03:25:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-04T18:59:06.000Z", "max_issues_repo_path": "examples/threadpool_executor.py", "max_issues_repo_name": "arontsang/dask-executor-scheduler", "max_issues_repo_head_hexsha": "c502f128cbad7421493b1dc2a70ee2ce723afaa9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-02-08T07:10:23.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-08T07:10:23.000Z", "max_forks_repo_path": "examples/threadpool_executor.py", "max_forks_repo_name": "arontsang/dask-executor-scheduler", "max_forks_repo_head_hexsha": "c502f128cbad7421493b1dc2a70ee2ce723afaa9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-11-17T03:01:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-24T19:45:35.000Z", "avg_line_length": 23.8, "max_line_length": 60, "alphanum_fraction": 0.7170868347, "include": true, "reason": "import numpy", "num_tokens": 97}
|
"""flat.py
Provides alternative functions to hdbscan.HDBSCAN and others to
1. Allow prediction on a flat clustering by specifying 'n_clusters'.
This is done by choosing the best cluster_selection_epsilon that produces
the required number of clusters without adding unnecessary outliers.
2. Makes approximate_predict, membership_vector, and
all_points_membership_vectors consistent with cluster_selection_epsilon
Provides the following functions:
==================================
HDBSCAN_flat: trained HDBSCAN instance with 'n_clusters' clusters
The attributes (labels, probabilities, prediction_data) are tuned to
produce 'n_clusters' clusters.
approximate_predict_flat: labels and probabilities for novel points
Allows selecting n_clusters for novel points, or using the
original clustering (potentially specified using cluster_selection_epsilon)
membership_vector_flat: Soft-clustering probabilities for novel points
Similar to approximate_predict_flat, but for soft-clustering.
**Use with caution**
all_points_membership_vectors_flat: Soft-clustering probabilities
Similar to membership_vector_flat, but for points in training set
**Use with caution**
"""
import copy
from warnings import warn
import numpy as np
from ._hdbscan_tree import compute_stability, get_cluster_tree_leaves
from .hdbscan_ import HDBSCAN, _tree_to_labels
from .plots import _bfs_from_cluster_tree
from .prediction import (PredictionData,
_find_cluster_and_probability,
_find_neighbor_and_lambda)
from ._prediction_utils import (get_tree_row_with_child,
dist_membership_vector,
outlier_membership_vector,
prob_in_some_cluster,
all_points_dist_membership_vector,
all_points_outlier_membership_vector,
all_points_prob_in_some_cluster)
def HDBSCAN_flat(X, n_clusters=None,
cluster_selection_epsilon=0.,
clusterer=None, inplace=False, **kwargs):
"""
Train a HDBSCAN clusterer by specifying n_clusters.
Or, modify a trained clusterer to return specific n_clusters.
Parameters
----------
X: array-like
Data to be passed to HDBSCAN for training.
n_clusters: int, default=None
Number of clusters to produce.
If None, revert to default HDBSCAN
cluster_selection_epsilon: float, default=0.
core-distance below which to stop splitting clusters.
This can indirectly impose n_clusters.
This argument is ignored if n_clusters is supplied.
clusterer: HDBSCAN, default=None
If supplied, modify this clusterer to produce n_clusters clusters.
inplace: bool, default=False
If 'clusterer' parameter is supplied, and inplace is True,
modify the previous clusterer inplace.
If False, return a modified copy of the previous clusterer.
**kwargs: keyword arguments
All init arguments for HDBSCAN
Returns
-------
new_clusterer: HDBSCAN
New HDBSCAN instance; returned irrespective of inplace=True or False
Usage
-----
# Extract flat clustering from HDBSCAN's hierarchy for 7 clusters
clusterer = HDBSCAN_flat(X_train, n_clusters=7,
min_cluster_size=12, min_samples=8)
labels = clusterer.labels_
proba = clusterer.probabilities_
# Use a previously initialized/trained HDBSCAN
old_clusterer = HDBSCAN(min_cluster_size=12, min_samples=8)
clusterer = HDBSCAN_flat(X_train, n_clusters=7,
clusterer=old_clusterer, inplace=True)
labels = clusterer.labels_
proba = clusterer.probabilities_
See Also
---------
:py:func:`hdbscan.HDBSCAN`
:py:func:`re_init`
"""
# Handle the trivial case first.
if (n_clusters is None) and (cluster_selection_epsilon == 0.):
if (not isinstance(clusterer, HDBSCAN)) or (not inplace):
# Always generate prediction_data to avoid later woes
kwargs['prediction_data'] = True
new_clusterer = HDBSCAN(**kwargs)
else:
new_clusterer = clusterer
new_clusterer.prediction_data = True
new_clusterer.fit(X)
return new_clusterer
if (n_clusters is not None) and (cluster_selection_epsilon != 0.):
warn(f"'cluster_selection_epsilon' (={cluster_selection_epsilon})"
f" is ignored when 'n_clusters' is supplied.")
cluster_selection_epsilon = 0.
# This will later be chosen according to n_clusters
if not isinstance(clusterer, HDBSCAN):
# Initialize and train clusterer if one was not previously supplied.
# Always generate prediction data
kwargs['prediction_data'] = True
new_clusterer = HDBSCAN(**kwargs)
# We do not pass cluster_selection_epsilon here.
# While this adds unnecessary computation, it makes the code
# easier to read and debug.
new_clusterer.fit(X)
else:
if inplace:
new_clusterer = clusterer
else:
new_clusterer = copy.deepcopy(clusterer)
new_clusterer.prediction_data = True
# Train on 'X'. Do this even if the supplied clusterer was trained,
# because we want to make sure it fits 'X'.
new_clusterer.prediction_data = True
new_clusterer.fit(X)
if new_clusterer.cluster_selection_method == 'eom':
max_eom_clusters = len(
new_clusterer.condensed_tree_._select_clusters())
# Pick an epsilon value right after a split produces n_clusters,
# and the don't split further for smaller epsilon (larger lambda)
if n_clusters is not None:
if ((new_clusterer.cluster_selection_method == 'eom') and
(n_clusters > max_eom_clusters)):
warn(f"Cannot predict more than {max_eom_clusters} with cluster "
"selection method 'eom'. Changing to method 'leaf'...")
new_clusterer.cluster_selection_method = 'leaf'
epsilon = select_epsilon(new_clusterer.condensed_tree_, n_clusters)
else:
# Or use the specified cluster_selection_epsilon
epsilon = cluster_selection_epsilon
new_clusterer.cluster_selection_epsilon = float(epsilon)
# Extract tree related stuff, in order to re-assign labels
single_linkage_tree = new_clusterer.single_linkage_tree_
single_linkage_tree = single_linkage_tree.to_numpy()
min_cluster_size = new_clusterer.min_cluster_size
cluster_selection_method = new_clusterer.cluster_selection_method
allow_single_cluster = new_clusterer.allow_single_cluster
match_reference_implementation = False
# Get labels according to the required cluster_selection_epsilon
output = _tree_to_labels(
None,
single_linkage_tree, min_cluster_size,
cluster_selection_method,
allow_single_cluster,
match_reference_implementation,
cluster_selection_epsilon=epsilon)
# Reflect the related changes in HDBSCAN.
(new_clusterer.labels_,
new_clusterer.probabilities_,
new_clusterer.cluster_persistence_,
new_clusterer._condensed_tree,
new_clusterer._single_linkage_tree) = output
# PredictionData attached to HDBSCAN should also change.
# A function re_init is defined in this module to handle this.
re_init(new_clusterer.prediction_data_,
new_clusterer.condensed_tree_,
cluster_selection_epsilon=epsilon)
return new_clusterer
def approximate_predict_flat(clusterer,
points_to_predict,
n_clusters=None,
cluster_selection_epsilon=None,
prediction_data=None,
return_prediction_data=False):
"""
Predict the cluster label of new points at a particular flat clustering,
specified by n_clusters. This is a modified version of
hdbscan.approximate_predict to allow selection of n_clusters.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
points_to_predict : array, or array-like (n_samples, n_features)
The new data points to predict cluster labels for. They should
have the same dimensionality as the original dataset over which
clusterer was fit.
n_clusters: int, default=None
The number of clusters to have in the flat clustering
(over the training data, not points_to_predict)
Ignored when prediction_data is supplied.
cluster_selection_epsilon: float, default=None
core-distance below which to stop splitting clusters.
This can indirectly impose n_clusters.
This argument is ignored if n_clusters is supplied.
prediction_data: PredictionData, default=None
If supplied, use this to predict clusters for points_to_predict.
This allows predicting on multiple datasets without corrupting
prediction data associated with clusterer.
If neither n_clusters, nor prediction_data are supplied,
then the prediction_data associated with clusterer is used.
return_prediction_data: bool, default=False
If True, return prediction_data along with labels and proba.
Returns
-------
labels : array (n_samples,)
The predicted labels of the ``points_to_predict``
probabilities : array (n_samples,)
The soft cluster scores for each of the ``points_to_predict``
prediction_data: PredictionData, optional
prediction_data used to predict.
Returned if return_prediciton_data is set to True.
Usage
-----
# From a fitted HDBSCAN model, predict for n_clusters=5
labels, proba = approximate_predict_flat(
clusterer, X_predict, n_clusters=5)
# Store prediciton data for later use.
labels, proba, pred_data = approximate_predict_flat(
clusterer, X_predict, n_clusters=5,
return_prediction_data=True)
# and use this prediction data to predict on new points
labels1, proba1 = approximate_predict_flat(
clusterer, X_pred1,
prediction_data=pred_data)
See Also
---------
:py:func:`hdbscan.prediction.approximate_predict`
"""
# Get number of fitted clusters for later use.
n_clusters_fit = np.sum(np.unique(clusterer.labels_) >= 0)
if n_clusters is not None:
n_clusters = int(n_clusters) # Ensure n_clusters is int
# We'll need the condensed tree later...
condensed_tree = clusterer.condensed_tree_
# If none of the three arguments: prediction_data, n_clusters,
# and cluster_selection_epsilon are supplied,
# then use clusterer's prediciton data directly
if ((prediction_data is None) and
((n_clusters is None) or (n_clusters == n_clusters_fit)) and
(cluster_selection_epsilon is None)):
prediction_data = clusterer.prediction_data_
# If either of n_clusters or cluster_selection_epsilon were supplied,
# then build prediction data from these by modifying clusterer's
if not isinstance(prediction_data, PredictionData):
if clusterer.prediction_data_ is None:
raise ValueError(
'Clusterer does not have prediction data!'
' Try fitting with prediction_data=True set,'
' or run generate_prediction_data on the clusterer')
# Get prediction data from clusterer
prediction_data = clusterer.prediction_data_
# Modify prediction_data to reflect new n_clusters
# First, make a copy of prediction data to avoid modifying source
prediction_data = copy.deepcopy(prediction_data)
# Cluster selection method is hold by condensed_tree.
# Change from 'eom' to 'leaf' if n_clusters is too large.
if ((condensed_tree.cluster_selection_method == 'eom') and (
(n_clusters is not None) and (n_clusters > n_clusters_fit))):
warn(f"Cannot predict more than {n_clusters_fit} with cluster "
"selection method 'eom'. Changing to method 'leaf'...")
condensed_tree.cluster_selection_method = 'leaf'
# This change does not affect the tree associated with 'clusterer'
# Re-initialize prediction_data for the specified n_clusters or epsilon
re_init(prediction_data, condensed_tree,
n_clusters=n_clusters,
cluster_selection_epsilon=cluster_selection_epsilon)
# ============================================================
# Now we're ready to use prediction_data
# The rest of the code is copied from HDBSCAN's approximate_predict,
# but modified to use prediction_data instead of clusterer's attribute
points_to_predict = np.asarray(points_to_predict)
if points_to_predict.shape[1] != prediction_data.raw_data.shape[1]:
raise ValueError('New points dimension does not match fit data!')
if prediction_data.cluster_tree.shape[0] == 0:
warn('Prediction data does not have any defined clusters, new data'
' will be automatically predicted as noise.')
labels = -1 * np.ones(points_to_predict.shape[0], dtype=np.int32)
probabilities = np.zeros(points_to_predict.shape[0], dtype=np.float32)
if return_prediction_data:
return labels, probabilities, prediction_data
else:
return labels, probabilities
labels = np.empty(points_to_predict.shape[0], dtype=np.int)
probabilities = np.empty(points_to_predict.shape[0], dtype=np.float64)
min_samples = clusterer.min_samples or clusterer.min_cluster_size
neighbor_distances, neighbor_indices = prediction_data.tree.query(
points_to_predict,
k=2 * min_samples)
for i in range(points_to_predict.shape[0]):
label, prob = _find_cluster_and_probability(
condensed_tree,
prediction_data.cluster_tree,
neighbor_indices[i],
neighbor_distances[i],
prediction_data.core_distances,
prediction_data.cluster_map,
prediction_data.max_lambdas,
min_samples
)
labels[i] = label
probabilities[i] = prob
if return_prediction_data:
return labels, probabilities, prediction_data
else:
return labels, probabilities
def membership_vector_flat(
clusterer, points_to_predict,
prediction_data=None, n_clusters=None,
cluster_selection_epsilon=0.):
"""
(Adaptation of hdbscan's membership_vector for n_clusters, epsilon)
Predict soft cluster membership probabilities;
a vector for each point in ``points_to_predict`` that gives
a probability that the given point is a member of a cluster
for each of the selected clusters of the ``clusterer``.
Parameters
----------
clusterer: HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
points_to_predict: array, or array-like (n_samples, n_features)
The new data points to predict cluster labels for. They should
have the same dimensionality as the original dataset over which
clusterer was fit.
prediction_data: PredictionData, default=None
Prediction data associated with HDBSCAN for some flat clustering
n_clusters: int, default=None
Number of clusters over which to compute membership probabilities.
These clusters are obtained as a flat clustering at some
cluster_selection_epsilon.
cluster_selection_epsilon: float, default=0.
core-distance below which to stop splitting clusters.
This can indirectly impose n_clusters.
This argument is ignored if n_clusters is supplied.
Note: If neither n_clusters nor cluster_selection_epsilon are supplied,
the clusterer's original clustering is used.
Returns
-------
membership_vectors : array (n_samples, n_clusters)
The probability that point ``i`` is a member of cluster ``j`` is
in ``membership_vectors[i, j]``.
See Also
--------
:py:func:`hdbscan.predict.membership_vector`
:py:func:`hdbscan.predict.all_points_membership_vectors`
"""
points_to_predict = points_to_predict.astype(np.float64)
# Extract condensed tree for later use
condensed_tree = clusterer.condensed_tree_
# Choose flat clustering based on cluster_selection_epsilon or n_clusters.
# If neither is specified, use clusterer's cluster_selection_epsilon
if ((n_clusters is None) and
(cluster_selection_epsilon == 0.) and
(prediction_data is None)):
epsilon = clusterer.cluster_selection_epsilon
# Use the same prediction_data as clusterer's
prediction_data = clusterer.prediction_data_
elif prediction_data is None:
if n_clusters is not None:
# Compute cluster_selection_epsilon so that a flat clustering
# produces a specified number of n_clusters
# With method 'eom', we may fail to get 'n_clusters' clusters. So,
try:
epsilon = select_epsilon(condensed_tree, n_clusters)
except AssertionError:
warn(f"Failed to predict {n_clusters} clusters with "
"cluster selection method 'eom'. Switching to 'leaf'...")
condensed_tree.cluster_selection_method = 'leaf'
epsilon = select_epsilon(condensed_tree, n_clusters)
else:
epsilon = cluster_selection_epsilon
# Create another instance of prediction_data that is consistent
# with the selected value of epsilon.
prediction_data = copy.deepcopy(clusterer.prediction_data_)
re_init(prediction_data, condensed_tree,
cluster_selection_epsilon=epsilon)
# Flat clustering from prediction data
clusters = clusters_from_prediction_data(prediction_data)
# Initialize probabilities
result = np.empty((points_to_predict.shape[0], clusters.shape[0]),
dtype=np.float64)
# k-NN for prediciton points to training set
min_samples = clusterer.min_samples or clusterer.min_cluster_size
neighbor_distances, neighbor_indices = \
prediction_data.tree.query(points_to_predict,
k=2*min_samples)
# Loop over prediction points to compute probabilities
for i in range(points_to_predict.shape[0]):
# We need to find where in the tree the new point would go
# for the purposes of outlier membership approximation
nearest_neighbor, lambda_ = \
_find_neighbor_and_lambda(
neighbor_indices[i],
neighbor_distances[i],
prediction_data.core_distances,
min_samples)
# Find row in tree where nearest neighbor drops out,
# so we can get a lambda value for the nearest neighbor
neighbor_tree_row = get_tree_row_with_child(
condensed_tree._raw_tree, nearest_neighbor)
# Assign lambda as min(lambda-to-neighbor, neighbor's-lambda-to-tree)
# Equivalently, this assigns core distance for prediction point as
# max(dist-to-neighbor, neighbor's-dist-to-tree)
if neighbor_tree_row['lambda_val'] <= lambda_:
lambda_ = neighbor_tree_row['lambda_val']
# Probabilities based on distance to closest exemplar in each cluster:
# Use new prediction_data that points to exemplars that are specific
# to the choice of n_clusters
distance_vec = dist_membership_vector(
points_to_predict[i],
prediction_data.exemplars,
prediction_data.dist_metric)
# Probabilities based on how long the nearest exemplar persists in
# each cluster (with respect to most persistent exemplar)
# Use new clusters that are defined by the choice of n_clusters.
outlier_vec = outlier_membership_vector(
nearest_neighbor,
lambda_,
clusters,
condensed_tree._raw_tree,
prediction_data.leaf_max_lambdas,
prediction_data.cluster_tree)
# Merge the two probabilities to produce a single set of probabilities
result[i] = distance_vec ** 0.5 * outlier_vec ** 2.0
result[i] /= result[i].sum()
# Include probability that the nearest neighbor belongs to a cluster
result[i] *= prob_in_some_cluster(
nearest_neighbor,
lambda_,
clusters,
condensed_tree._raw_tree,
prediction_data.leaf_max_lambdas,
prediction_data.cluster_tree)
# Rename variable so it's easy to understand what's being returned
membership_vectors = result
return membership_vectors
def all_points_membership_vectors_flat(
clusterer, prediction_data=None,
n_clusters=None, cluster_selection_epsilon=None):
"""
(Adaptation of hdbscan's all_points_membership_vector
for n_clusters, epsilon)
Predict soft cluster membership vectors for all points in the
original dataset the clusterer was trained on. This function is more
efficient by making use of the fact that all points are already in the
condensed tree, and processing in bulk.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
This method does not work if the clusterer was trained
with ``metric='precomputed'``.
prediction_data: PredictionData, default=None
Prediction data associated with HDBSCAN for some flat clustering
n_clusters: int, optional, default=None
Number of clusters over which to compute membership probabilities.
These clusters are obtained as a flat clustering at some
cluster_selection_epsilon.
cluster_selection_epsilon: float, optional, default=None
core-distance below which to stop splitting clusters.
This can indirectly impose n_clusters.
This argument is ignored if n_clusters is supplied.
Note: If neither n_clusters nor cluster_selection_epsilon are supplied,
the clusterer's original clustering is used.
Returns
-------
membership_vectors : array (n_samples, n_clusters)
The probability that point ``i`` of the original dataset is a member of
cluster ``j`` is in ``membership_vectors[i, j]``.
See Also
--------
:py:func:`hdbscan.prediction.all_points_membership_vectors`
:py:func:`hdbscan.prediction.membership_vector`
"""
# Extract condensed tree for later use
condensed_tree = clusterer.condensed_tree_
# Choose flat clustering based on cluster_selection_epsilon or n_clusters.
# If neither is specified, use clusterer's cluster_selection_epsilon
if (n_clusters is None) and (cluster_selection_epsilon is None):
epsilon = clusterer.cluster_selection_epsilon
# Use the same prediction_data as clusterer's
prediction_data = clusterer.prediction_data_
elif prediction_data is None:
if n_clusters is not None:
# Compute cluster_selection_epsilon so that a flat clustering
# produces a specified number of n_clusters
# With method 'eom', we may fail to get 'n_clusters' clusters. So,
try:
epsilon = select_epsilon(condensed_tree, n_clusters)
except AssertionError:
warn(f"Failed to predict {n_clusters} clusters with "
"cluster selection method 'eom'. Switching to 'leaf'...")
condensed_tree.cluster_selection_method = 'leaf'
epsilon = select_epsilon(condensed_tree, n_clusters)
else:
epsilon = cluster_selection_epsilon
# Create another instance of prediction_data that is consistent
# with the selected value of epsilon.
prediction_data = copy.deepcopy(clusterer.prediction_data_)
re_init(prediction_data, condensed_tree,
cluster_selection_epsilon=epsilon)
# Flat clustering at the chosen epsilon from prediction_data
clusters = clusters_from_prediction_data(prediction_data)
all_points = prediction_data.raw_data
# When no clusters found, return array of 0's
if clusters.size == 0:
return np.zeros(all_points.shape[0])
# Probabilities based on distance to closest exemplar in each cluster:
# Use new prediction_data that points to exemplars that are specific
# to the choice of n_clusters
distance_vecs = all_points_dist_membership_vector(
all_points,
prediction_data.exemplars,
prediction_data.dist_metric)
# Probabilities based on how long the point persists in
# each cluster (with respect to most persistent exemplar)
# Use new clusters that are defined by the choice of n_clusters.
outlier_vecs = all_points_outlier_membership_vector(
clusters,
condensed_tree._raw_tree,
prediction_data.leaf_max_lambdas,
prediction_data.cluster_tree)
# Include probability that the point belongs to a cluster
in_cluster_probs = all_points_prob_in_some_cluster(
clusters,
condensed_tree._raw_tree,
prediction_data.leaf_max_lambdas,
prediction_data.cluster_tree)
# Aggregate the three probabilities to produce membership vectors
result = distance_vecs * outlier_vecs
row_sums = result.sum(axis=1)
result = result / row_sums[:, np.newaxis]
result *= in_cluster_probs[:, np.newaxis]
# Re-name variable to clarify what's being returned.
membership_vectors = result
return membership_vectors
def select_epsilon(condensed_tree, n_clusters):
"""
Pick optimal epsilon from condensed tree based on n_clusters,
calls functions specific to 'eom' or 'leaf' selection methods
"""
cluster_selection_method = condensed_tree.cluster_selection_method
if cluster_selection_method == 'eom':
return select_epsilon_eom(condensed_tree, n_clusters)
if cluster_selection_method == 'leaf':
return select_epsilon_leaf(condensed_tree, n_clusters)
raise ValueError('Invalid Cluster Selection Method: %s\n'
'Should be one of: "eom", "leaf"\n')
def select_epsilon_eom(condensed_tree, n_clusters):
"""
Select epsilon so that persistence-based clustering,
after truncating the tree at the above epsilon,
has exactly 'n_clusters' clusters
"""
# With method 'eom', max clusters are produced for epsilon=0,
# as computed by
eom_base_clusters = condensed_tree._select_clusters()
max_clusters = len(eom_base_clusters)
# Increasing epsilon can only reduce the number of ouput clusters.
assert n_clusters <= max_clusters, (
f"Cannot produce more than {max_clusters} with method 'eom'. " +
"Use method 'leaf' instead to extract flat clustering.")
tree = condensed_tree._raw_tree
# To select epsilon, consider all values where clusters are split
cluster_lambdas = tree['lambda_val'][tree['child_size'] > 1]
candidate_epsilons = 1./np.unique(cluster_lambdas) - 1.e-12
# Subtract the extra e-12 to avoid numerical errors in comparison
# Then, we avoid splitting for all epsilon below this.
candidate_epsilons = np.sort(candidate_epsilons)[::-1]
for epsilon in candidate_epsilons:
sel_clusters = _new_select_clusters(condensed_tree, epsilon)
if len(sel_clusters) == n_clusters:
break
else:
raise RuntimeError("Could not find epsilon")
return epsilon
def select_epsilon_leaf(condensed_tree, n_clusters):
"""
Select epsilon so that the leaves of condensed tree,
after truncating at the above epsilon,
has exactly 'n_clusters' clusters
"""
# Use an epsilon value that produces the right number of clusters.
# The condensed tree of HDBSCAN has this information.
# Extract the lambda levels (=1/distance) from the condensed tree
lambdas = condensed_tree._raw_tree['lambda_val']
# We don't want values that produce a large cluster and
# just one or two individual points.
child_sizes = condensed_tree._raw_tree['child_size']
child_sizes = child_sizes.astype(int)
# Keep only those lambda values corresponding to cluster separation;
# i.e., with child_sizes > 1
lambdas = lambdas[child_sizes > 1]
# Get the unique values, because when two clusters fall out of one,
# the entry with lambda is repeated.
lambdas = np.unique(lambdas.astype(float))
if n_clusters > len(lambdas) + 1:
warn(f"HDBSCAN can only compute {len(lambdas)+1} clusters. "
f"Setting n_clusters to {len(lambdas)+1}...")
n_clusters = len(lambdas) + 1
# lambda values are sorted by np.unique.
# Now, get epsilon (distance threshold) as 1/lambda
epsilon = 1./lambdas[n_clusters-2]
# At this epsilon, n_clusters have been split.
# Stop splits at epsilons smaller than this.
# To allow for numerical errors,
return epsilon - 1.e-12
def re_init(predData, condensed_tree,
n_clusters=None, cluster_selection_epsilon=0.):
"""
Modify PredictionData of HDBSCAN to account for epsilon.
epsilon is the cluster_selection_epsilon that controls granularity
of clusters; Large epsilon => More clusters
Parameters
----------
predData: PredictionData
Contains data to use for predicting novel points.
Defined in the HDBSCAN module
condensed_tree: CondensedTree
Tree structure that contains hierarchical clustering.
Defined in the HDBSCAN module
n_clusters: int, optional, default=None
If specified, use this to obtain cluster_selection_epsilon
from CondensedTree; Overrides cluster_selection_epsilon parameter
cluster_selection_epsilon: float, default=0.
In cluster tree, nodes are not split further beyond (>=) this value.
epsilon is the inverse of core distance.
Returns
-------
None
"""
# predData must be a pre-trained PredictionData instance from hdbscan
# If n_clusters is specified, compute cluster_selection_epsilon;
if (n_clusters is not None):
cluster_selection_epsilon = select_epsilon(condensed_tree, n_clusters)
# This is the key modification:
# Select clusters according to selection method and epsilon.
selected_clusters = _new_select_clusters(condensed_tree,
cluster_selection_epsilon)
# _new_select_clusters is a modification of get_clusters
# from hdbscan._hdbscan_tree
# raw tree, used later to get exemplars and lambda values
raw_condensed_tree = condensed_tree._raw_tree
# Re-do the cluster map: Map cluster numbers in tree (N, N+1, ..)
# to the cluster labels produced as output
predData.cluster_map = {int(c): n for n, c in
enumerate(sorted(list(selected_clusters)))}
predData.reverse_cluster_map = {n: c for c, n in
predData.cluster_map.items()}
# Re-compute lambdas and exemplars for selected clusters;
predData.max_lambdas = {}
predData.exemplars = []
for cluster in selected_clusters:
# max_lambda <=> smallest distance <=> most persistent point(s)
predData.max_lambdas[cluster] = \
raw_condensed_tree['lambda_val'][
raw_condensed_tree['parent'] == cluster].max()
# Map all sub-clusters of selected cluster to the selected cluster's
# label in output.
# Map lambdas too...
for sub_cluster in predData._clusters_below(cluster):
predData.cluster_map[sub_cluster] = predData.cluster_map[cluster]
predData.max_lambdas[sub_cluster] = predData.max_lambdas[cluster]
# Create set of exemplar points for later use.
# Novel points are assigned based on cluster of closest exemplar.
cluster_exemplars = np.array([], dtype=np.int64)
# For each selected cluster, get all of its leaves,
# and leaves of leaves, and so on...
for leaf in predData._recurse_leaf_dfs(cluster):
# Largest lambda => Most persistent points
leaf_max_lambda = raw_condensed_tree['lambda_val'][
raw_condensed_tree['parent'] == leaf].max()
# Get the most persistent points
points = raw_condensed_tree['child'][
(raw_condensed_tree['parent'] == leaf)
& (raw_condensed_tree['lambda_val'] == leaf_max_lambda)
]
# Add most persistent points as exemplars
cluster_exemplars = np.hstack([cluster_exemplars, points])
# Add exemplars for each leaf of each selected cluster.
predData.exemplars.append(predData.raw_data[cluster_exemplars])
return
def _new_select_clusters(condensed_tree,
cluster_selection_epsilon,
allow_single_cluster=False,
match_reference_implementation=False):
"""
Adaptation of get_clusters from hdbscan._hdbscan_tree.
Avoids the label and proba computation at the end,
and returns only the selected clusters instead.
"""
tree = condensed_tree._raw_tree
cluster_selection_method = condensed_tree.cluster_selection_method
stability = compute_stability(tree)
if allow_single_cluster:
node_list = sorted(stability.keys(), reverse=True)
else:
node_list = sorted(stability.keys(), reverse=True)[:-1]
# (exclude root)
cluster_tree = tree[tree['child_size'] > 1]
is_cluster = {cluster: True for cluster in node_list}
if cluster_selection_method == 'eom':
for node in node_list:
child_selection = (cluster_tree['parent'] == node)
subtree_stability = np.sum([
stability[child] for
child in cluster_tree['child'][child_selection]])
if subtree_stability > stability[node]:
is_cluster[node] = False
stability[node] = subtree_stability
else:
for sub_node in _bfs_from_cluster_tree(cluster_tree, node):
if sub_node != node:
is_cluster[sub_node] = False
if cluster_selection_epsilon != 0.0:
eom_clusters = set([c for c in is_cluster if is_cluster[c]])
selected_clusters = epsilon_search(eom_clusters, cluster_tree,
cluster_selection_epsilon,
allow_single_cluster)
for c in is_cluster:
if c in selected_clusters:
is_cluster[c] = True
else:
is_cluster[c] = False
elif cluster_selection_method == 'leaf':
leaves = set(get_cluster_tree_leaves(cluster_tree))
if len(leaves) == 0:
for c in is_cluster:
is_cluster[c] = False
is_cluster[tree['parent'].min()] = True
if cluster_selection_epsilon != 0.0:
selected_clusters = epsilon_search(leaves, cluster_tree,
cluster_selection_epsilon,
allow_single_cluster)
else:
selected_clusters = leaves
for c in is_cluster:
if c in selected_clusters:
is_cluster[c] = True
else:
is_cluster[c] = False
else:
raise ValueError('Invalid Cluster Selection Method: %s\n'
'Should be one of: "eom", "leaf"\n')
clusters = set([int(c) for c in is_cluster if is_cluster[c]])
return clusters
def epsilon_search(leaves, cluster_tree, cluster_selection_epsilon,
allow_single_cluster):
selected_clusters = []
processed = []
for leaf in leaves:
eps = 1/cluster_tree['lambda_val'][cluster_tree['child'] == leaf][0]
if eps < cluster_selection_epsilon:
if leaf not in processed:
epsilon_child = traverse_upwards(
cluster_tree, cluster_selection_epsilon,
leaf, allow_single_cluster)
if hasattr(epsilon_child, '__len__'):
epsilon_child = epsilon_child[0]
selected_clusters.append(epsilon_child)
for sub_node in _bfs_from_cluster_tree(cluster_tree,
epsilon_child):
if sub_node != epsilon_child:
processed.append(sub_node)
else:
selected_clusters.append(leaf)
return set(selected_clusters)
def traverse_upwards(cluster_tree, cluster_selection_epsilon,
leaf, allow_single_cluster):
root = cluster_tree['parent'].min()
parent = cluster_tree[cluster_tree['child'] == leaf]['parent']
if parent == root:
if allow_single_cluster:
return parent
else:
return leaf # return node closest to root
parent_eps = 1/cluster_tree[cluster_tree['child'] == parent]['lambda_val']
if parent_eps > cluster_selection_epsilon:
return parent
else:
return traverse_upwards(cluster_tree, cluster_selection_epsilon,
parent, allow_single_cluster)
def clusters_from_prediction_data(prediction_data):
"""
Extract selected clusters from PredictionData instance.
"""
return np.array(
sorted(list(prediction_data.reverse_cluster_map.values()))
).astype(np.intp)
|
{"hexsha": "1bbbb5acfb11a629736d380607967ae74a621771", "size": 38864, "ext": "py", "lang": "Python", "max_stars_repo_path": "hdbscan/flat.py", "max_stars_repo_name": "ainkov/hdbscan", "max_stars_repo_head_hexsha": "29dbaedfd281addc86cdae69a65798b9230a2c6e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-21T13:45:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-21T13:45:50.000Z", "max_issues_repo_path": "hdbscan/flat.py", "max_issues_repo_name": "ainkov/hdbscan", "max_issues_repo_head_hexsha": "29dbaedfd281addc86cdae69a65798b9230a2c6e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hdbscan/flat.py", "max_forks_repo_name": "ainkov/hdbscan", "max_forks_repo_head_hexsha": "29dbaedfd281addc86cdae69a65798b9230a2c6e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-18T04:55:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-18T04:55:17.000Z", "avg_line_length": 41.6995708155, "max_line_length": 79, "alphanum_fraction": 0.6632873611, "include": true, "reason": "import numpy", "num_tokens": 7811}
|
function boys_function(n::Int, x::Float64)::Float64
x < 1e-6 && return 1/(2n+1)
n == 0 && return 0.5*√(π/x)*erf(√x)
return ((2n-1)*boys_function(n-1, x) - exp(-x))/(2x)
end
"S(gs, gs)"
function overlap_integral(g1::Gaussian_s, g2::Gaussian_s)::Float64
p = (g1.α + g2.α)
μ = (g1.α * g2.α) / p
γ = 4μ / p
return γ^(3/4) * exp(-μ * abs2(g1.R - g2.R))
end
"S(gs, gp)"
function overlap_integral(g1::Gaussian_s, g2::Gaussian_p)::Float64
p = g1.α + g2.α
μ = (g1.α * g2.α) / p
γ = 4μ / p
Rab = g1.R - g2.R
return √(g1.α) * γ^(5/4) * exp(-μ * abs2(Rab)) * (Rab * g2.n)
end
"S(gp, gs)"
function overlap_integral(g1::Gaussian_p, g2::Gaussian_s)::Float64
overlap_integral(g2, g1)
end
"S(gp, gp)"
function overlap_integral(g1::Gaussian_p, g2::Gaussian_p)::Float64
p = g1.α + g2.α
μ = (g1.α * g2.α) / p
γ = 4μ / p
Rab = g1.R - g2.R
return γ^(5/4) * ((g1.n * g2.n) -√(g1.α*g2.α*γ) * (Rab * g1.n) * (Rab * g2.n)) * exp(-μ * abs2(Rab))
end
"T(gs, gs)"
function kinetic_integral(g1::Gaussian_s, g2::Gaussian_s)::Float64
p = g1.α + g2.α
μ = (g1.α * g2.α) / p
γ = 4μ / p
Rab2 = abs2(g1.R - g2.R)
return γ^(3/4) * μ * (3-2μ*Rab2) * exp(-μ * Rab2)
end
"T(gp, gs)"
function kinetic_integral(g1::Gaussian_p, g2::Gaussian_s)::Float64
p = g1.α + g2.α
μ = g1.α * g2.α / p
γ = 4μ / p
Rab = g1.R - g2.R
Rab2 = abs2(Rab)
return γ^(5/4) * exp(-μ * Rab2) * μ * (2μ*Rab2-5) * √g2.α * (Rab * g1.n)
end
"T(gs, gp)"
function kinetic_integral(g1::Gaussian_s, g2::Gaussian_p)::Float64
kinetic_integral(g2, g1)
end
"T(gp, gp)"
function kinetic_integral(g1::Gaussian_p, g2::Gaussian_p)::Float64
p = g1.α + g2.α
μ = g1.α * g2.α / p
γ = 4μ / p
Rab = g1.R - g2.R
dotnab = g1.n * g2.n
μRab2 = μ * abs2(Rab)
M = (g1.n * Rab) * (g2.n * Rab)
return γ^(5/4) * exp(-μRab2) * μ * (5dotnab - 14μ * M - 2μRab2 * dotnab + 4μ * μRab2 * M)
end
"Vn(gs, gs)"
function nuclear_acctaction_integral(Zc::Int, Rc::Vec3, g1::Gaussian_s, g2::Gaussian_s)::Float64
p = g1.α + g2.α
μ = (g1.α * g2.α) / p
γ = 4μ / p
Rμ = (g1.α / p) * g1.R + (g2.α / p) * g2.R
return -2Zc * γ^(3/4) * √(p/π) * exp(-μ * abs2(g1.R - g2.R)) * boys_function(0, p*abs2(Rμ-Rc))
end
"Vn(gp,gs)"
function nuclear_acctaction_integral(Zc::Int, Rc::Vec3, g1::Gaussian_p, g2::Gaussian_s)::Float64
p = g1.α + g2.α
μ = (g1.α * g2.α) / p
γ = 4μ / p
Rμ = (g1.α / p) * g1.R + (g2.α / p) * g2.R
pR2 = p*abs2(Rμ - Rc)
return -4Zc * γ^(3/4) * √(g1.α * p / π) * exp(-μ * abs2(g1.R-g2.R)) *
(-(g1.n * (Rμ-Rc)) * boys_function(1, pR2) + (g1.n * (Rμ-g1.R)) * boys_function(0, pR2))
end
"Vn(gs,gp)"
function nuclear_acctaction_integral(Zc::Int, Rc::Vec3, g1::Gaussian_s, g2::Gaussian_p)::Float64
nuclear_acctaction_integral(Zc, Rc, g2, g1)
end
"Vn(gp, gp)"
function nuclear_acctaction_integral(Zc::Int, Rc::Vec3, g1::Gaussian_p, g2::Gaussian_p)::Float64
p = g1.α + g2.α
μ = g1.α * g2.α / p
γ = 4μ / p
Rab = g1.R - g2.R
Rμ = (g1.α / p) * g1.R + (g2.α / p) * g2.R
Rμc = Rμ - Rc
Dac = g1.n * Rμc
Dbc = g2.n * Rμc
Daa = g1.n * (Rμ - g1.R)
Dbb = g2.n * (Rμ - g2.R)
x = p * abs2(Rμc)
F0x = boys_function(0, x)
F1x = boys_function(1, x)
F2x = boys_function(2, x)
return -2Zc * γ^(5/4) * √(p/π) * exp(-μ*abs2(g1.R - g2.R)) * (
(g1.n * g2.n) * (F0x - F1x) + 2p * Dac * Dbc * F2x -
2p * (Dac * Dbb + Dbc * Daa) * F1x + 2p * Daa * Dbb * F0x
)
end
function electron_repulsion_integral(g1::Gaussian_s, g2::Gaussian_s, g3::Gaussian_s, g4::Gaussian_s)
p1 = g1.α + g4.α
p2 = g2.α + g3.α
μ1 = (g1.α * g4.α) / p1
μ2 = (g2.α * g3.α) / p2
γ1 = 4μ1 / p1
γ2 = 4μ2 / p2
Rμ1 = (g1.α / p1) * g1.R + (g4.α / p1) * g4.R
Rμ2 = (g2.α / p2) * g2.R + (g3.α / p2) * g3.R
λ = p1 * p2 / (p1 + p2)
R = abs(Rμ1 - Rμ2)
if R < 1e-6
return (γ1*γ2)^(3/4) * exp(-μ1 * abs2(g1.R-g4.R)-μ2 * abs2(g2.R-g3.R)) * 2 * √(λ / π)
else
return (γ1*γ2)^(3/4) * exp(-μ1 * abs2(g1.R-g4.R)-μ2 * abs2(g2.R-g3.R)) * erf(√λ * R) / R
end
end
function two_center_integral(s1::STONG{N}, s2::STONG{N}, integral::Function) where N
total = 0.0
for i = 1:N
for j = 1:N
total += s1.d[i] * s2.d[j] * integral(s1.g[i], s2.g[j])
end
end
return total
end
function four_center_integral(s1::STONG{N}, s2::STONG{N}, s3::STONG{N}, s4::STONG{N}, integral::Function) where N
total = 0.0
for i = 1:N
for j = 1:N
for k = 1:N
for l = 1:N
total += s1.d[i]*s2.d[j]*s3.d[k]*s4.d[l] * integral(s1.g[i], s2.g[j], s3.g[k], s4.g[l])
end
end
end
end
return total
end
overlap_integral(s1::STONG, s2::STONG) = two_center_integral(s1, s2, overlap_integral)
kinetic_integral(s1::STONG, s2::STONG) = two_center_integral(s1, s2, kinetic_integral)
nuclear_acctaction_integral(Zc::Int, Rc::Vec3, s1::STONG, s2::STONG) = two_center_integral(
s1, s2,
(g1,g2)->nuclear_acctaction_integral(Zc, Rc, g1, g2)
)
electron_repulsion_integral(s1::STONG, s2::STONG, s3::STONG, s4::STONG) = four_center_integral(
s1, s2, s3, s4,
electron_repulsion_integral
)
|
{"hexsha": "cde3a813e5be713001edaa82c8cd80b44521cde1", "size": 5278, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/gto_integral.jl", "max_stars_repo_name": "0382/HartreeFock.jl", "max_stars_repo_head_hexsha": "1cf2c3eb52c84a23ada62196ae5e8739d02027a2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/gto_integral.jl", "max_issues_repo_name": "0382/HartreeFock.jl", "max_issues_repo_head_hexsha": "1cf2c3eb52c84a23ada62196ae5e8739d02027a2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/gto_integral.jl", "max_forks_repo_name": "0382/HartreeFock.jl", "max_forks_repo_head_hexsha": "1cf2c3eb52c84a23ada62196ae5e8739d02027a2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6860465116, "max_line_length": 113, "alphanum_fraction": 0.5424403183, "num_tokens": 2431}
|
```python
"""Purely numerical 1D finite element program."""
import sys, os, time
sys.path.insert(
0, os.path.join(os.pardir, os.pardir, 'approx', 'src-approx'))
from numint import GaussLegendre, NewtonCotes
#from fe_approx1D_numint import u_glob
import sympy as sym
import numpy as np
def Lagrange_polynomial(x, i, points):
"""
Return the Lagrange polynomial no. i.
points are the interpolation points, and x can be a number or
a sympy.Symbol object (for symbolic representation of the
polynomial). When x is a sympy.Symbol object, it is
normally desirable (for nice output of polynomial expressions)
to let points consist of integers or rational numbers in sympy.
"""
p = 1
for k in range(len(points)):
if k != i:
p *= (x - points[k])/(points[i] - points[k])
return p
def Chebyshev_nodes(a, b, N):
"""Return N+1 Chebyshev nodes (for interpolation) on [a, b]."""
from math import cos, pi
half = 0.5
nodes = [0.5*(a+b) + 0.5*(b-a)*cos(float(2*i+1)/(2*(N+1))*pi)
for i in range(N+1)]
return nodes
def basis(d, point_distribution='uniform', symbolic=False):
"""
Return all local basis function phi and their derivatives,
in physical coordinates, as functions of the local point
X in a 1D element with d+1 nodes.
If symbolic=True, return symbolic expressions, else
return Python functions of X.
point_distribution can be 'uniform' or 'Chebyshev'.
>>> phi = basis(d=1, symbolic=False)
>>> phi[0][0](0) # basis func 0 at X=0
0.5
>>> phi[1][0](0, h=0.5) # 1st x-derivative at X=0
-2
"""
X, h = sym.symbols('X h')
phi_sym = {}
phi_num = {}
if d == 0:
phi_sym[0] = [1]
phi_sym[1] = [0]
else:
if point_distribution == 'uniform':
nodes = np.linspace(-1, 1, d+1)
elif point_distribution == 'Chebyshev':
nodes = Chebyshev_nodes(-1, 1, d)
phi_sym[0] = [Lagrange_polynomial(X, r, nodes)
for r in range(d+1)]
phi_sym[1] = [sym.simplify(sym.diff(phi_sym[0][r], X)*2/h)
for r in range(d+1)]
# Transform to Python functions
phi_num[0] = [sym.lambdify([X], phi_sym[0][r])
for r in range(d+1)]
phi_num[1] = [sym.lambdify([X, h], phi_sym[1][r])
for r in range(d+1)]
return phi_sym if symbolic else phi_num
def affine_mapping(X, Omega_e):
x_L, x_R = Omega_e
return 0.5*(x_L + x_R) + 0.5*(x_R - x_L)*X
def finite_element1D_naive(
vertices, cells, dof_map, # mesh
essbc, # essbc[globdof]=value
ilhs,
irhs,
blhs=lambda e, phi, r, s, X, x, h: 0,
brhs=lambda e, phi, r, X, x, h: 0,
intrule='GaussLegendre',
verbose=False,
):
N_e = len(cells)
N_n = np.array(dof_map).max() + 1
A = np.zeros((N_n, N_n))
b = np.zeros(N_n)
timing = {}
t0 = time.clock()
for e in range(N_e):
Omega_e = [vertices[cells[e][0]], vertices[cells[e][1]]]
h = Omega_e[1] - Omega_e[0]
d = len(dof_map[e]) - 1 # Polynomial degree
# Compute all element basis functions and their derivatives
phi = basis(d)
if verbose:
print(('e=%2d: [%g,%g] h=%g d=%d' % \
(e, Omega_e[0], Omega_e[1], h, d)))
# Element matrix and vector
n = d+1 # No of dofs per element
A_e = np.zeros((n, n))
b_e = np.zeros(n)
# Integrate over the reference cell
if intrule == 'GaussLegendre':
points, weights = GaussLegendre(d+1)
elif intrule == 'NewtonCotes':
points, weights = NewtonCotes(d+1)
for X, w in zip(points, weights):
detJ = h/2
x = affine_mapping(X, Omega_e)
dX = detJ*w
# Compute contribution to element matrix and vector
for r in range(n):
for s in range(n):
A_e[r,s] += ilhs(e, phi, r, s, X, x, h)*dX
b_e[r] += irhs(e, phi, r, X, x, h)*dX
# Add boundary terms
for r in range(n):
for s in range(n):
A_e[r,s] += blhs(e, phi, r, s, X, x, h)
b_e[r] += brhs(e, phi, r, X, x, h)
if verbose:
print(('A^(%d):\n' % e, A_e)); print(('b^(%d):' % e, b_e))
# Incorporate essential boundary conditions
modified = False
for r in range(n):
global_dof = dof_map[e][r]
if global_dof in essbc:
# dof r is subject to an essential condition
value = essbc[global_dof]
# Symmetric modification
b_e -= value*A_e[:,r]
A_e[r,:] = 0
A_e[:,r] = 0
A_e[r,r] = 1
b_e[r] = value
modified = True
if verbose and modified:
print('after essential boundary conditions:')
print(('A^(%d):\n' % e, A_e)); print(('b^(%d):' % e, b_e))
# Assemble
for r in range(n):
for s in range(n):
A[dof_map[e][r], dof_map[e][s]] += A_e[r,s]
b[dof_map[e][r]] += b_e[r]
timing['assemble'] = time.clock() - t0
t1 = time.clock()
c = np.linalg.solve(A, b)
timing['solve'] = time.clock() - t1
if verbose:
print(('Global A:\n', A)); print(('Global b:\n', b))
print(('Solution c:\n', c))
return c, A, b, timing
def finite_element1D(
vertices, cells, dof_map, # mesh
essbc, # essbc[globdof]=value
ilhs,
irhs,
blhs=lambda e, phi, r, s, X, x, h: 0,
brhs=lambda e, phi, r, X, x, h: 0,
intrule='GaussLegendre',
verbose=False,
):
N_e = len(cells)
N_n = np.array(dof_map).max() + 1
import scipy.sparse
A = scipy.sparse.dok_matrix((N_n, N_n))
b = np.zeros(N_n)
timing = {}
t0 = time.clock()
for e in range(N_e):
Omega_e = [vertices[cells[e][0]], vertices[cells[e][1]]]
h = Omega_e[1] - Omega_e[0]
d = len(dof_map[e]) - 1 # Polynomial degree
# Compute all element basis functions and their derivatives
phi = basis(d)
if verbose:
print(('e=%2d: [%g,%g] h=%g d=%d' % \
(e, Omega_e[0], Omega_e[1], h, d)))
# Element matrix and vector
n = d+1 # No of dofs per element
A_e = np.zeros((n, n))
b_e = np.zeros(n)
# Integrate over the reference cell
if intrule == 'GaussLegendre':
points, weights = GaussLegendre(d+1)
elif intrule == 'NewtonCotes':
points, weights = NewtonCotes(d+1)
for X, w in zip(points, weights):
detJ = h/2
x = affine_mapping(X, Omega_e)
dX = detJ*w
# Compute contribution to element matrix and vector
for r in range(n):
for s in range(n):
A_e[r,s] += ilhs(e, phi, r, s, X, x, h)*dX
b_e[r] += irhs(e, phi, r, X, x, h)*dX
# Add boundary terms
for r in range(n):
for s in range(n):
A_e[r,s] += blhs(e, phi, r, s, X, x, h)
b_e[r] += brhs(e, phi, r, X, x, h)
if verbose:
print(('A^(%d):\n' % e, A_e)); print(('b^(%d):' % e, b_e))
# Incorporate essential boundary conditions
modified = False
for r in range(n):
global_dof = dof_map[e][r]
if global_dof in essbc:
# local dof r is subject to an essential condition
value = essbc[global_dof]
# Symmetric modification
b_e -= value*A_e[:,r]
A_e[r,:] = 0
A_e[:,r] = 0
A_e[r,r] = 1
b_e[r] = value
modified = True
if verbose and modified:
print('after essential boundary conditions:')
print(('A^(%d):\n' % e, A_e)); print(('b^(%d):' % e, b_e))
# Assemble
for r in range(n):
for s in range(n):
A[dof_map[e][r], dof_map[e][s]] += A_e[r,s]
b[dof_map[e][r]] += b_e[r]
import scipy.sparse.linalg
t1 = time.clock()
timing['assemble'] = t1 - t0
c = scipy.sparse.linalg.spsolve(A.tocsr(), b, use_umfpack=True)
timing['solve'] = time.clock() - t1
if verbose:
print(('Global A:\n', A))
print(('Nonzero (i,j) in A:', list(A.keys())))
print(('Global b:\n', b)); print(('Solution c:\n', c))
return c, A, b, timing
#print basis(d=1, symbolic=True)
def mesh_uniform(N_e, d, Omega=[0,1], symbolic=False):
"""
Return a 1D finite element mesh on Omega with N_e elements of
the polynomial degree d. The elements have uniform length.
Return vertices (vertices), local vertex to global
vertex mapping (cells), and local to global degree of freedom
mapping (dof_map).
If symbolic is True, the vertices are expressed as rational
sympy expressions with the symbol h as element length.
"""
vertices = np.linspace(Omega[0], Omega[1], N_e + 1).tolist()
if d == 0:
dof_map = [[e] for e in range(N_e)]
else:
dof_map = [[e*d + i for i in range(d+1)] for e in range(N_e)]
cells = [[e, e+1] for e in range(N_e)]
return vertices, cells, dof_map
def define_cases(name=None):
C = 0.5; D = 2; L = 4 # constants for case 'cubic'
cases = {
# u''=0 on (0,1), u(0)=0, u(1)=1 => u(x)=x
'linear': {
'Omega': [0,1],
'ilhs': lambda e, phi, r, s, X, x, h:
phi[1][r](X, h)*phi[1][s](X, h),
'irhs': lambda e, phi, r, X, x, h: 0,
'blhs': lambda e, phi, r, s, X, x, h: 0,
'brhs': lambda e, phi, r, X, x, h: 0,
'u_L': 0,
'u_R': 1,
'u_exact': lambda x: x,
},
# -u''=2 on (0,1), u(0)=0, u(1)=0 => u(x)=x(1-x)
'quadratic': {
'Omega': [0,1],
'ilhs': lambda e, phi, r, s, X, x, h:
phi[1][r](X, h)*phi[1][s](X, h),
'irhs': lambda e, phi, r, X, x, h:
2*phi[0][r](X),
'blhs': lambda e, phi, r, s, X, x, h: 0,
'brhs': lambda e, phi, r, X, x, h: 0,
'u_L': 0,
'u_R': 0,
'u_exact': lambda x: x*(1-x),
},
# -u''=f(x) on (0,L), u'(0)=C, u(L)=D
# f(x)=x: u = D + C*(x-L) + (1./6)*(L**3 - x**3)
'cubic': {
'Omega': [0,L],
'ilhs': lambda e, phi, r, s, X, x, h:
phi[1][r](X, h)*phi[1][s](X, h),
'irhs': lambda e, phi, r, X, x, h:
x*phi[0][r](X),
'blhs': lambda e, phi, r, s, X, x, h: 0,
'brhs': lambda e, phi, r, X, x, h:
-C*phi[0][r](-1) if e == 0 else 0,
'u_R': D,
'u_exact': lambda x: D + C*(x-L) + (1./6)*(L**3 - x**3),
'min_d': 1, # min d for exact finite element solution
},
}
if name is None:
return cases
else:
return {name: cases[name]}
def test_finite_element1D():
"""Solve 1D test problems."""
cases = define_cases()
verbose = False
for name in cases:
case = cases[name]
for N_e in [3]:
for d in [1, 2, 3, 4]:
# Do we need a minimum d to get exact
# numerical solution?
if d < case.get('min_d', 0):
continue
vertices, cells, dof_map = \
mesh_uniform(N_e=N_e, d=d, Omega=case['Omega'],
symbolic=False)
N_n = np.array(dof_map).max() + 1
# Assume uniform mesh
x = np.linspace(
case['Omega'][0], case['Omega'][1], N_n)
essbc = {}
if 'u_L' in case:
essbc[0] = case['u_L']
if 'u_R' in case:
essbc[dof_map[-1][-1]] = case['u_R']
c, A, b, timing = finite_element1D_naive(
vertices, cells, dof_map, essbc,
case['ilhs'], case['irhs'],
case['blhs'], case['brhs'],
intrule='GaussLegendre',
verbose=verbose)
# Compare with exact solution
tol = 1E-12
diff = (case['u_exact'](x) - c).max()
msg = 'naive: case "%s", N_e=%d, d=%d, diff=%g' % \
(name, N_e, d, diff)
print((msg, 'assemble: %.2f' % timing['assemble'], \
'solve: %.2f' % timing['solve']))
assert diff < tol, msg
c, A, b, timing = finite_element1D(
vertices, cells, dof_map, essbc,
case['ilhs'], case['irhs'],
case['blhs'], case['brhs'],
intrule='GaussLegendre',
verbose=verbose)
# Compare with exact solution
diff = (case['u_exact'](x) - c).max()
msg = 'sparse: case "%s", N_e=%d, d=%d, diff=%g' % \
(name, N_e, d, diff)
print((msg, 'assemble: %.2f' % timing['assemble'], \
'solve: %.2f' % timing['solve']))
assert diff < tol, msg
def investigate_efficiency():
"""Compare sparse and dense matrix versions of the FE algorithm."""
case = define_cases('linear')
for N_e in [300000, 1000000]:
for d in [1, 2, 3]:
vertices, cells, dof_map = \
mesh_uniform(N_e=3, d=3, Omega=[0,1],
symbolic=False)
N_n = np.array(dof_map).max() + 1
x = np.linspace(0, 1, N_n)
essbc = {}
essbc[0] = case['u_L']
essbc[dof_map[-1][-1]] = case['u_R']
c, A, b, timing = finite_element1D_naive(
vertices, cells, dof_map, essbc,
case['ilhs'], case['irhs'],
case['blhs'], case['brhs'],
intrule='GaussLegendre',
verbose=False)
msg = 'naive: N_e=%d, d=%d: assemble=%.2e solve=%.2e' % \
(N_e, d, timing['assemble'], timing['solve'])
print(msg)
c, A, b, timing = finite_element1D(
vertices, cells, dof_map, essbc,
case['ilhs'], case['irhs'],
case['blhs'], case['brhs'],
intrule='GaussLegendre',
verbose=False)
msg = 'sparse: N_e=%d, d=%d: assemble=%.2e solve=%.2e' % \
(N_e, d, timing['assemble'], timing['solve'])
print(msg)
if __name__ == '__main__':
test_finite_element1D()
#investigate_efficiency()
```
```python
```
|
{"hexsha": "339cdbe704d161afe5e7a1c14307646a0a8c1bf1", "size": 19576, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/EXAMPLES/FINITE_ELEMENTS/INTRO/SRC/19_FE1D.ipynb", "max_stars_repo_name": "okara83/Becoming-a-Data-Scientist", "max_stars_repo_head_hexsha": "f09a15f7f239b96b77a2f080c403b2f3e95c9650", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/EXAMPLES/FINITE_ELEMENTS/INTRO/SRC/19_FE1D.ipynb", "max_issues_repo_name": "okara83/Becoming-a-Data-Scientist", "max_issues_repo_head_hexsha": "f09a15f7f239b96b77a2f080c403b2f3e95c9650", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/EXAMPLES/FINITE_ELEMENTS/INTRO/SRC/19_FE1D.ipynb", "max_forks_repo_name": "okara83/Becoming-a-Data-Scientist", "max_forks_repo_head_hexsha": "f09a15f7f239b96b77a2f080c403b2f3e95c9650", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-02-09T15:41:33.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-11T07:47:40.000Z", "avg_line_length": 40.8684759916, "max_line_length": 86, "alphanum_fraction": 0.4064671026, "converted": true, "num_tokens": 4417}
|
import os
from collections import OrderedDict
import numpy as np
import scipy.io
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class GenericSpikeExporter:
def __call__(self,spikes, catalogue, seg_num, chan_grp, export_path,
split_by_cluster=False,
use_cell_label=True,
#~ use_index=True,
):
if not os.path.exists(export_path):
os.makedirs(export_path)
#~ print('export', spikes.size, seg_num, export_path)
#~ print('split_by_cluster', split_by_cluster, 'use_cell_label', use_cell_label)
clusters = catalogue['clusters']
spike_labels = spikes['cluster_label']
if use_cell_label:
spike_labels = spikes['cluster_label'].copy()
for l in clusters:
mask = spike_labels==l['cluster_label']
spike_labels[mask] = l['cell_label']
spike_indexes = spikes['index']
out_data = OrderedDict()
if split_by_cluster:
if use_cell_label:
possible_labels = np.unique(clusters['cell_label'])
label_name = 'cell'
else:
possible_labels = clusters['cluster_label']
label_name = 'cluster'
for k in possible_labels:
keep = k == spike_labels
out_data[label_name + '#'+ str(k)] = (spike_indexes[keep], spike_labels[keep])
else:
out_data['cell#all'] = (spike_indexes, spike_labels)
name = 'spikes - segNum {} - chanGrp {}'.format(seg_num, chan_grp)
filename = os.path.join(export_path, name)
self.write_out_data(out_data, filename)
class CsvSpikeExporter(GenericSpikeExporter):
ext = 'csv'
def write_out_data(self, out_data, filename):
for key, (spike_indexes, spike_labels) in out_data.items():
filename2 = filename +' - '+key+'.csv'
self._write_one_file(filename2, spike_indexes, spike_labels)
def _write_one_file(self, filename, labels, indexes):
rows = [''] * len(labels)
for i in range(len(labels)):
rows[i]='{},{}\n'.format(labels[i], indexes[i])
with open(filename, 'w') as out:
out.writelines(rows)
export_csv = CsvSpikeExporter()
class MatlabSpikeExporter(GenericSpikeExporter):
ext = 'mat'
def write_out_data(self, out_data, filename):
mdict = {}
for key, (spike_indexes, spike_labels) in out_data.items():
mdict['index_'+key] = spike_indexes
mdict['label_'+key] =spike_labels
scipy.io.savemat(filename+'.mat', mdict)
export_matlab = MatlabSpikeExporter()
class ExcelSpikeExporter(GenericSpikeExporter):
ext = 'xslx'
def write_out_data(self, out_data, filename):
assert HAS_PANDAS
writer = pd.ExcelWriter(filename+'.xlsx')
for key, (spike_indexes, spike_labels) in out_data.items():
df = pd.DataFrame()
df['index'] = spike_indexes
df['label'] = spike_labels
df.to_excel(writer, sheet_name=key, index=False)
writer.save()
export_excel = ExcelSpikeExporter()
# list
export_list = [export_csv, export_matlab, ]
if HAS_PANDAS:
export_list.append(export_excel)
export_dict = {e.ext:e for e in export_list}
|
{"hexsha": "b48bae98bb56abf765c039c658f5e90dccfa7b28", "size": 3407, "ext": "py", "lang": "Python", "max_stars_repo_path": "tridesclous/export.py", "max_stars_repo_name": "remi-pr/tridesclous", "max_stars_repo_head_hexsha": "074f425fd40f1fb76f619f74cc024dd9817b7ee7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-13T15:10:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-13T15:10:13.000Z", "max_issues_repo_path": "tridesclous/export.py", "max_issues_repo_name": "remi-pr/tridesclous", "max_issues_repo_head_hexsha": "074f425fd40f1fb76f619f74cc024dd9817b7ee7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tridesclous/export.py", "max_forks_repo_name": "remi-pr/tridesclous", "max_forks_repo_head_hexsha": "074f425fd40f1fb76f619f74cc024dd9817b7ee7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.141509434, "max_line_length": 94, "alphanum_fraction": 0.6116818315, "include": true, "reason": "import numpy,import scipy", "num_tokens": 772}
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import itertools
import json
import logging
import numpy as np
import pytest
import vineyard
from vineyard.core import default_builder_context
from vineyard.core import default_resolver_context
from vineyard.data import register_builtin_types
register_builtin_types(default_builder_context, default_resolver_context)
logger = logging.getLogger('vineyard')
@pytest.mark.skip_without_migration()
def test_migration(vineyard_ipc_sockets):
vineyard_ipc_sockets = list(
itertools.islice(itertools.cycle(vineyard_ipc_sockets), 2)
)
client1 = vineyard.connect(vineyard_ipc_sockets[0])
client2 = vineyard.connect(vineyard_ipc_sockets[1])
# test if metadata of remote object available
data = np.ones((1, 2, 3, 4, 5))
o = client1.put(data)
client1.persist(o)
meta = client2.get_meta(o, sync_remote=True)
assert data.shape == tuple(json.loads(meta['shape_']))
# migrate local to local: do nothing.
o1 = client1.migrate(o)
assert o == o1
logger.info('------- finish migrate local --------')
# migrate remote to local: do nothing.
o2 = client2.migrate(o)
assert o != o2
np.testing.assert_allclose(client1.get(o1), client2.get(o2))
logger.info('------- finish migrate remote --------')
@pytest.mark.skip_without_migration()
def test_migration_and_deletion(vineyard_ipc_sockets):
vineyard_ipc_sockets = list(
itertools.islice(itertools.cycle(vineyard_ipc_sockets), 2)
)
client1 = vineyard.connect(vineyard_ipc_sockets[0])
client2 = vineyard.connect(vineyard_ipc_sockets[1])
data1 = np.ones((1, 2, 3, 4, 5))
o1 = client1.put(data1)
client1.persist(o1)
meta1 = client2.get_meta(o1, sync_remote=True)
assert data1.shape == tuple(json.loads(meta1['shape_']))
data2 = np.zeros((1, 2, 3, 4, 5))
o2 = client2.put(data2)
client2.persist(o2)
meta2 = client1.get_meta(o2, sync_remote=True)
assert data2.shape == tuple(json.loads(meta2['shape_']))
# make the global object
o = client1.put((o1, o2), global_=True)
gmeta = client1.get_meta(o, sync_remote=True)
client1.persist(o)
assert not gmeta.islocal
assert gmeta.isglobal
# migrate o2 to h1, as o3
o3 = client1.migrate(o2)
assert o3 != o1
assert o3 != o2
logger.info('------- finish migrate remote --------')
# delete the o2
client1.sync_meta()
client1.delete(o2, force=False, deep=True)
logger.info('------- finish delete original chunk --------')
client1.sync_meta()
assert client1.exists(o)
assert client1.exists(o1)
assert client1.exists(o3)
assert not client1.exists(o2)
with pytest.raises(vineyard.ObjectNotExistsException):
print(client1.get_meta(o2))
client2.sync_meta()
assert client2.exists(o)
assert client2.exists(o1)
assert client2.exists(o3)
assert not client2.exists(o2)
with pytest.raises(vineyard.ObjectNotExistsException):
print(client2.get_meta(o2))
# delete the o3
client2.sync_meta()
client2.delete(o3, force=False, deep=True)
logger.info('------- finish delete migrated chunk --------')
client1.sync_meta()
assert client1.exists(o)
assert client1.exists(o1)
assert client1.exists(o3)
assert not client1.exists(o2)
with pytest.raises(vineyard.ObjectNotExistsException):
print(client1.get_meta(o2))
client2.sync_meta()
assert client2.exists(o)
assert client2.exists(o1)
assert client2.exists(o3)
assert not client2.exists(o2)
with pytest.raises(vineyard.ObjectNotExistsException):
print(client2.get_meta(o2))
|
{"hexsha": "17f6d65a930270abe03f3a20dceced43683cb5ee", "size": 4285, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/vineyard/deploy/tests/test_migration.py", "max_stars_repo_name": "v6d-io/v6d", "max_stars_repo_head_hexsha": "8f692c9bd95dad06c304a0020d4f946a5756c1e0", "max_stars_repo_licenses": ["Apache-2.0", "CC0-1.0"], "max_stars_count": 117, "max_stars_repo_stars_event_min_datetime": "2021-05-30T05:21:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T03:35:50.000Z", "max_issues_repo_path": "python/vineyard/deploy/tests/test_migration.py", "max_issues_repo_name": "v6d-io/v6d", "max_issues_repo_head_hexsha": "8f692c9bd95dad06c304a0020d4f946a5756c1e0", "max_issues_repo_licenses": ["Apache-2.0", "CC0-1.0"], "max_issues_count": 294, "max_issues_repo_issues_event_min_datetime": "2021-05-28T03:04:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T07:09:45.000Z", "max_forks_repo_path": "python/vineyard/deploy/tests/test_migration.py", "max_forks_repo_name": "v6d-io/v6d", "max_forks_repo_head_hexsha": "8f692c9bd95dad06c304a0020d4f946a5756c1e0", "max_forks_repo_licenses": ["Apache-2.0", "CC0-1.0"], "max_forks_count": 30, "max_forks_repo_forks_event_min_datetime": "2021-05-31T13:34:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T06:39:47.000Z", "avg_line_length": 30.176056338, "max_line_length": 74, "alphanum_fraction": 0.6980163361, "include": true, "reason": "import numpy", "num_tokens": 1089}
|
[STATEMENT]
lemma step_induction[consumes 2, case_names app\<^sub>1 app\<^sub>2 thunk lamvar var\<^sub>2 let\<^sub>1 if\<^sub>1 if\<^sub>2 refl trans]:
assumes "c \<Rightarrow>\<^sup>* c'"
assumes "\<not> boring_step c'"
assumes app\<^sub>1: "\<And> \<Gamma> e x S . P (\<Gamma>, App e x, S) (\<Gamma>, e , Arg x # S)"
assumes app\<^sub>2: "\<And> \<Gamma> y e x S . P (\<Gamma>, Lam [y]. e, Arg x # S) (\<Gamma>, e[y ::= x] , S)"
assumes thunk: "\<And> \<Gamma> x e S . map_of \<Gamma> x = Some e \<Longrightarrow> \<not> isVal e \<Longrightarrow> P (\<Gamma>, Var x, S) (delete x \<Gamma>, e , Upd x # S)"
assumes lamvar: "\<And> \<Gamma> x e S . map_of \<Gamma> x = Some e \<Longrightarrow> isVal e \<Longrightarrow> P (\<Gamma>, Var x, S) ((x,e) # delete x \<Gamma>, e , S)"
assumes var\<^sub>2: "\<And> \<Gamma> x e S . x \<notin> domA \<Gamma> \<Longrightarrow> isVal e \<Longrightarrow> P (\<Gamma>, e, Upd x # S) ((x,e)# \<Gamma>, e , S)"
assumes let\<^sub>1: "\<And> \<Delta> \<Gamma> e S . atom ` domA \<Delta> \<sharp>* \<Gamma> \<Longrightarrow> atom ` domA \<Delta> \<sharp>* S \<Longrightarrow> P (\<Gamma>, Let \<Delta> e, S) (\<Delta>@\<Gamma>, e, S)"
assumes if\<^sub>1: "\<And>\<Gamma> scrut e1 e2 S. P (\<Gamma>, scrut ? e1 : e2, S) (\<Gamma>, scrut, Alts e1 e2 # S)"
assumes if\<^sub>2: "\<And>\<Gamma> b e1 e2 S. P (\<Gamma>, Bool b, Alts e1 e2 # S) (\<Gamma>, if b then e1 else e2, S)"
assumes refl: "\<And> c. P c c"
assumes trans[trans]: "\<And> c c' c''. c \<Rightarrow>\<^sup>* c' \<Longrightarrow> c' \<Rightarrow>\<^sup>* c'' \<Longrightarrow> P c c' \<Longrightarrow> P c' c'' \<Longrightarrow> P c c''"
shows "P c c'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P c c'
[PROOF STEP]
by (rule step_invariant_induction[OF _ _ invariant_True, simplified, OF assms])
|
{"llama_tokens": 686, "file": "Call_Arity_Sestoft", "length": 1}
|
c.......subroutine bfilter
c
c Written by: David R. Russell, AFTAC/TT 10 December 2004
c
c Subroutine bfilter executes a fast, stable zero phase butterworth
c bandpass filter of order (m), which is optimized for narrow band
c applications. The method produces a complex time series output,
c of which the real portion is used to calculate the filtered time
c series, and the modulus is used to calculate the envelope function.
c Stability of the method is achieved by reducing the bandpass
c filter calculations to simple cascaded first order filters,
c which are forward and reverse filtered for zero phase. The method
c also does a linear shift of a butterworth lowpass filter
c to an equivalent bandpass, without going through a standard
c non-linear translation (Kanasewich, E.R., 1975) to bandpass. An option
c is included to remove the signal mean initially to compensate for
c large DC offsets
c
c INPUT:
c
c m: Order of Butterworth filter
c n: Number of input, output time series points
c mzer: Integer flag to remove mean (0 no, 1 yes)
c xr(n): Input (real) time series
c f0: Center value of bandpass filter
c fc: Corners of filter [ flow= f0-fc; fhigh= f0+fc ]
c dt: Time series sampling interval
c
c OUTPUT:
c
c yr(n): Output filtered (real) time series
c er(n): Output envelope function for filtered time series
c ermx: Maximum value of envelope function er.
c
c****************************
subroutine bfilter(xr,yr,er,ermx,f0,fc,dt,m,n,mzer)
c****************************
c
c.......nmax = total possible number of time series points
c mmax = highest possible order of butterworth filter
c
parameter (nmax=65000,mmax=10)
c
double complex z1(nmax),z2(nmax),a1(mmax),a2(mmax),
& a1c(mmax),a2c(mmax),p,s,ctemp
double precision pi,w0,wc,w1,w2,dtemp,dtt
dimension xr(n),yr(n),er(n)
c
c.......error check on frequencies
c
fnyq=1.0/(2.0*dt)
if ((f0-fc).le.0.0) then
write(6,*)'low corner frequency (f0-fc) <= 0.0'
stop
endif
if ((f0+fc).ge.fnyq) then
write(6,*)'high corner frequency (f0+fc) >= nyquist'
stop
endif
c
c.......initialize double precision pi, dtt, angular frequencies w0,wc
c
pi=3.14159265358979d0
w0=2.0d0*pi*dble(f0)
wc=2.0d0*pi*dble(fc)
dtt=dble(dt)
c
c.......prewarp frequencies for bilinear z-transform
c
w1=w0-wc
w2=w0+wc
w1=2.0d0/dtt*dtan(w1*dtt/2.0d0)
w2=2.0d0/dtt*dtan(w2*dtt/2.0d0)
w0=(w1+w2)/2.0d0
wc=(w2-w1)/2.0d0
c
c.......calculate (m) prototype lowpass poles (p), translate into bandpass
c poles (s), calculate bilinear recursive coefficients (a1,a2),
c conjugates of coefficients (a1c,a2c)
c
do j=1,m
dtemp=pi*(2.0d0*dble(j)-1.0d0+dble(m))/(2.0d0*dble(m))
ctemp=dcmplx(0.0d0,dtemp)
p=cdexp(ctemp)
s=p*wc+dcmplx(0.0d0,w0)
a1(j)=wc*dtt/(2.0d0-s*dtt)
a2(j)=(2.0d0+s*dtt)/(2.0d0-s*dtt)
a1c(j)=dconjg(a1(j))
a2c(j)=dconjg(a2(j))
enddo
c
c.......put real time series xr into complex series z1 and remove mean
c if mzer set to 1
c
xmean=0.0
if(mzer.eq.1) then
do k=1,n
xmean=xmean+xr(k)
enddo
xmean=xmean/float(n)
endif
do k=1,n
z1(k)=xr(k)-xmean
enddo
c
c.......calculate (m) cascaded first order complex filters
c
do j=1,m
do k=1,n
z2(k)=z1(k)
enddo
z1(1)=a1(j)*z2(1)
do k=2,n
z1(k)=a1(j)*(z2(k)+z2(k-1))+a2(j)*z1(k-1)
enddo
enddo
c
c.......reverse filtered time series
c
do k=1,n
z2(k)=z1(n-k+1)
enddo
c
c.......calculate (m) cascaded first order complex filters on
c reversed series - note conjugate bilinear coefficients
c for complex conjugate of filter
c
do j=1,m
do k=1,n
z1(k)=z2(k)
enddo
z2(1)=a1c(j)*z1(1)
do k=2,n
z2(k)=a1c(j)*(z1(k)+z1(k-1))+a2c(j)*z2(k-1)
enddo
enddo
c
c.......reverse filtered time series
c
do k=1,n
z1(k)=z2(n-k+1)
enddo
c
c.......calculate real output time series (yr), envelope (er),
c envelope maximum (ermx)
c
ermx=0.0
do k=1,n
yr(k)=2.0*dreal(z1(k))
er(k)=2.0*cdabs(z1(k))
ermx=amax1(er(k),ermx)
enddo
return
end
|
{"hexsha": "ebb29972c24598ceecba0f5d7e9aec9fceb1041b", "size": 4541, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "ni/src/lib/nfpfort/bfilter1.f", "max_stars_repo_name": "jlost/ncl_ncarg", "max_stars_repo_head_hexsha": "2206367f1887732bc7745bfb5ca56f6543f77948", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ni/src/lib/nfpfort/bfilter1.f", "max_issues_repo_name": "jlost/ncl_ncarg", "max_issues_repo_head_hexsha": "2206367f1887732bc7745bfb5ca56f6543f77948", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2016-09-27T18:49:30.000Z", "max_issues_repo_issues_event_max_datetime": "2016-09-27T18:49:30.000Z", "max_forks_repo_path": "ni/src/lib/nfpfort/bfilter1.f", "max_forks_repo_name": "jlost/ncl_ncarg", "max_forks_repo_head_hexsha": "2206367f1887732bc7745bfb5ca56f6543f77948", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2967741935, "max_line_length": 78, "alphanum_fraction": 0.5892975116, "num_tokens": 1519}
|
[STATEMENT]
lemma (in dist_execution) recv_insert_once:
"event_at (i,j) (Receive s (Insert m)) \<Longrightarrow> event_at (i,k) (Receive t (Insert m)) \<Longrightarrow> j = k"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>event_at (i, j) (Receive s (Insert m)); event_at (i, k) (Receive t (Insert m))\<rbrakk> \<Longrightarrow> j = k
[PROOF STEP]
using no_data_corruption send_insert_id at_most_once
[PROOF STATE]
proof (prove)
using this:
event_at ?i (Receive ?s ?m) \<Longrightarrow> event_at ?s (Send ?m)
event_at ?i (Send (Insert ?m)) \<Longrightarrow> I ?m = ?i
\<lbrakk>event_at ?i (Receive ?s ?m); event_at ?j (Receive ?s ?m); fst ?i = fst ?j\<rbrakk> \<Longrightarrow> ?i = ?j
goal (1 subgoal):
1. \<lbrakk>event_at (i, j) (Receive s (Insert m)); event_at (i, k) (Receive t (Insert m))\<rbrakk> \<Longrightarrow> j = k
[PROOF STEP]
by (simp, metis (mono_tags) Pair_inject event_pred.simps fst_conv is_valid_event_id.simps)
|
{"llama_tokens": 379, "file": "WOOT_Strong_Eventual_Consistency_StrongConvergence", "length": 2}
|
[STATEMENT]
lemma strategy_attracts_irrelevant_override:
assumes "strategy_attracts p \<sigma> A W" "strategy p \<sigma>" "strategy p \<sigma>'"
shows "strategy_attracts p (override_on \<sigma>' \<sigma> (A - W)) A W"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. strategy_attracts p (override_on \<sigma>' \<sigma> (A - W)) A W
[PROOF STEP]
proof (rule strategy_attractsI, rule ccontr)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>P v. \<lbrakk>v \<in> A; vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W)); \<not> visits_via P A W\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
fix P v
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>P v. \<lbrakk>v \<in> A; vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W)); \<not> visits_via P A W\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
let ?\<sigma> = "override_on \<sigma>' \<sigma> (A - W)"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>P v. \<lbrakk>v \<in> A; vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W)); \<not> visits_via P A W\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
assume "vmc_path G P v p ?\<sigma>"
[PROOF STATE]
proof (state)
this:
vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W))
goal (1 subgoal):
1. \<And>P v. \<lbrakk>v \<in> A; vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W)); \<not> visits_via P A W\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W))
[PROOF STEP]
interpret vmc_path G P v p ?\<sigma>
[PROOF STATE]
proof (prove)
using this:
vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W))
goal (1 subgoal):
1. vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W))
[PROOF STEP]
.
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>P v. \<lbrakk>v \<in> A; vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W)); \<not> visits_via P A W\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
assume "v \<in> A"
[PROOF STATE]
proof (state)
this:
v \<in> A
goal (1 subgoal):
1. \<And>P v. \<lbrakk>v \<in> A; vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W)); \<not> visits_via P A W\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
hence "P $ 0 \<in> A"
[PROOF STATE]
proof (prove)
using this:
v \<in> A
goal (1 subgoal):
1. P $ 0 \<in> A
[PROOF STEP]
using \<open>v \<in> A\<close>
[PROOF STATE]
proof (prove)
using this:
v \<in> A
v \<in> A
goal (1 subgoal):
1. P $ 0 \<in> A
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
P $ 0 \<in> A
goal (1 subgoal):
1. \<And>P v. \<lbrakk>v \<in> A; vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W)); \<not> visits_via P A W\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
P $ 0 \<in> A
goal (1 subgoal):
1. \<And>P v. \<lbrakk>v \<in> A; vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W)); \<not> visits_via P A W\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
assume contra: "\<not>visits_via P A W"
[PROOF STATE]
proof (state)
this:
\<not> visits_via P A W
goal (1 subgoal):
1. \<And>P v. \<lbrakk>v \<in> A; vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W)); \<not> visits_via P A W\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
P $ 0 \<in> A
\<not> visits_via P A W
[PROOF STEP]
have "P $ 0 \<in> A - W"
[PROOF STATE]
proof (prove)
using this:
P $ 0 \<in> A
\<not> visits_via P A W
goal (1 subgoal):
1. P $ 0 \<in> A - W
[PROOF STEP]
unfolding visits_via_def
[PROOF STATE]
proof (prove)
using this:
P $ 0 \<in> A
\<nexists>n. enat n < llength P \<and> P $ n \<in> W \<and> lset (ltake (enat n) P) \<subseteq> A
goal (1 subgoal):
1. P $ 0 \<in> A - W
[PROOF STEP]
by (meson DiffI P_len not_less0 lset_ltake)
[PROOF STATE]
proof (state)
this:
P $ 0 \<in> A - W
goal (1 subgoal):
1. \<And>P v. \<lbrakk>v \<in> A; vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W)); \<not> visits_via P A W\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
have "\<not>lset P \<subseteq> A - W"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> lset P \<subseteq> A - W
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. lset P \<subseteq> A - W \<Longrightarrow> False
[PROOF STEP]
assume "lset P \<subseteq> A - W"
[PROOF STATE]
proof (state)
this:
lset P \<subseteq> A - W
goal (1 subgoal):
1. lset P \<subseteq> A - W \<Longrightarrow> False
[PROOF STEP]
hence "\<And>v. v \<in> lset P \<Longrightarrow> override_on \<sigma>' \<sigma> (A - W) v = \<sigma> v"
[PROOF STATE]
proof (prove)
using this:
lset P \<subseteq> A - W
goal (1 subgoal):
1. \<And>v. v \<in> lset P \<Longrightarrow> override_on \<sigma>' \<sigma> (A - W) v = \<sigma> v
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
?v \<in> lset P \<Longrightarrow> override_on \<sigma>' \<sigma> (A - W) ?v = \<sigma> ?v
goal (1 subgoal):
1. lset P \<subseteq> A - W \<Longrightarrow> False
[PROOF STEP]
hence "path_conforms_with_strategy p P \<sigma>"
[PROOF STATE]
proof (prove)
using this:
?v \<in> lset P \<Longrightarrow> override_on \<sigma>' \<sigma> (A - W) ?v = \<sigma> ?v
goal (1 subgoal):
1. path_conforms_with_strategy p P \<sigma>
[PROOF STEP]
using path_conforms_with_strategy_irrelevant_updates[OF P_conforms]
[PROOF STATE]
proof (prove)
using this:
?v \<in> lset P \<Longrightarrow> override_on \<sigma>' \<sigma> (A - W) ?v = \<sigma> ?v
(\<And>v. v \<in> lset P \<Longrightarrow> override_on \<sigma>' \<sigma> (A - W) v = ?\<sigma>' v) \<Longrightarrow> path_conforms_with_strategy p P ?\<sigma>'
goal (1 subgoal):
1. path_conforms_with_strategy p P \<sigma>
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
path_conforms_with_strategy p P \<sigma>
goal (1 subgoal):
1. lset P \<subseteq> A - W \<Longrightarrow> False
[PROOF STEP]
hence "vmc_path G P (P $ 0) p \<sigma>"
[PROOF STATE]
proof (prove)
using this:
path_conforms_with_strategy p P \<sigma>
goal (1 subgoal):
1. vmc_path G P (P $ 0) p \<sigma>
[PROOF STEP]
using conforms_to_another_strategy P_0
[PROOF STATE]
proof (prove)
using this:
path_conforms_with_strategy p P \<sigma>
path_conforms_with_strategy p P ?\<sigma>' \<Longrightarrow> vmc_path G P v p ?\<sigma>'
P $ 0 = v
goal (1 subgoal):
1. vmc_path G P (P $ 0) p \<sigma>
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
vmc_path G P (P $ 0) p \<sigma>
goal (1 subgoal):
1. lset P \<subseteq> A - W \<Longrightarrow> False
[PROOF STEP]
thus False
[PROOF STATE]
proof (prove)
using this:
vmc_path G P (P $ 0) p \<sigma>
goal (1 subgoal):
1. False
[PROOF STEP]
using contra \<open>P $ 0 \<in> A\<close> assms(1)
[PROOF STATE]
proof (prove)
using this:
vmc_path G P (P $ 0) p \<sigma>
\<not> visits_via P A W
P $ 0 \<in> A
strategy_attracts p \<sigma> A W
goal (1 subgoal):
1. False
[PROOF STEP]
by (meson vmc_path.strategy_attractsE)
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<not> lset P \<subseteq> A - W
goal (1 subgoal):
1. \<And>P v. \<lbrakk>v \<in> A; vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W)); \<not> visits_via P A W\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
hence "\<exists>n. enat n < llength P \<and> P $ n \<notin> A - W"
[PROOF STATE]
proof (prove)
using this:
\<not> lset P \<subseteq> A - W
goal (1 subgoal):
1. \<exists>n. enat n < llength P \<and> P $ n \<notin> A - W
[PROOF STEP]
by (meson lset_subset)
[PROOF STATE]
proof (state)
this:
\<exists>n. enat n < llength P \<and> P $ n \<notin> A - W
goal (1 subgoal):
1. \<And>P v. \<lbrakk>v \<in> A; vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W)); \<not> visits_via P A W\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<exists>n. enat n < llength P \<and> P $ n \<notin> A - W
[PROOF STEP]
obtain n where n: "enat n < llength P \<and> P $ n \<notin> A - W"
"\<And>i. i < n \<Longrightarrow> \<not>(enat i < llength P \<and> P $ i \<notin> A - W)"
[PROOF STATE]
proof (prove)
using this:
\<exists>n. enat n < llength P \<and> P $ n \<notin> A - W
goal (1 subgoal):
1. (\<And>n. \<lbrakk>enat n < llength P \<and> P $ n \<notin> A - W; \<And>i. i < n \<Longrightarrow> \<not> (enat i < llength P \<and> P $ i \<notin> A - W)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using ex_least_nat_le[of "\<lambda>n. enat n < llength P \<and> P $ n \<notin> A - W"]
[PROOF STATE]
proof (prove)
using this:
\<exists>n. enat n < llength P \<and> P $ n \<notin> A - W
\<lbrakk>enat ?n < llength P \<and> P $ ?n \<notin> A - W; \<not> (enat 0 < llength P \<and> P $ 0 \<notin> A - W)\<rbrakk> \<Longrightarrow> \<exists>k\<le>?n. (\<forall>i<k. \<not> (enat i < llength P \<and> P $ i \<notin> A - W)) \<and> enat k < llength P \<and> P $ k \<notin> A - W
goal (1 subgoal):
1. (\<And>n. \<lbrakk>enat n < llength P \<and> P $ n \<notin> A - W; \<And>i. i < n \<Longrightarrow> \<not> (enat i < llength P \<and> P $ i \<notin> A - W)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
enat n < llength P \<and> P $ n \<notin> A - W
?i < n \<Longrightarrow> \<not> (enat ?i < llength P \<and> P $ ?i \<notin> A - W)
goal (1 subgoal):
1. \<And>P v. \<lbrakk>v \<in> A; vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W)); \<not> visits_via P A W\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
hence n_min: "\<And>i. i < n \<Longrightarrow> P $ i \<in> A - W"
[PROOF STATE]
proof (prove)
using this:
enat n < llength P \<and> P $ n \<notin> A - W
?i < n \<Longrightarrow> \<not> (enat ?i < llength P \<and> P $ ?i \<notin> A - W)
goal (1 subgoal):
1. \<And>i. i < n \<Longrightarrow> P $ i \<in> A - W
[PROOF STEP]
using dual_order.strict_trans enat_ord_simps(2)
[PROOF STATE]
proof (prove)
using this:
enat n < llength P \<and> P $ n \<notin> A - W
?i < n \<Longrightarrow> \<not> (enat ?i < llength P \<and> P $ ?i \<notin> A - W)
\<lbrakk>?b < ?a; ?c < ?b\<rbrakk> \<Longrightarrow> ?c < ?a
(enat ?m < enat ?n) = (?m < ?n)
goal (1 subgoal):
1. \<And>i. i < n \<Longrightarrow> P $ i \<in> A - W
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
?i < n \<Longrightarrow> P $ ?i \<in> A - W
goal (1 subgoal):
1. \<And>P v. \<lbrakk>v \<in> A; vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W)); \<not> visits_via P A W\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
have "n \<noteq> 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. n \<noteq> 0
[PROOF STEP]
using \<open>P $ 0 \<in> A - W\<close> n(1)
[PROOF STATE]
proof (prove)
using this:
P $ 0 \<in> A - W
enat n < llength P \<and> P $ n \<notin> A - W
goal (1 subgoal):
1. n \<noteq> 0
[PROOF STEP]
by meson
[PROOF STATE]
proof (state)
this:
n \<noteq> 0
goal (1 subgoal):
1. \<And>P v. \<lbrakk>v \<in> A; vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W)); \<not> visits_via P A W\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
n \<noteq> 0
[PROOF STEP]
obtain n' where n': "Suc n' = n"
[PROOF STATE]
proof (prove)
using this:
n \<noteq> 0
goal (1 subgoal):
1. (\<And>n'. Suc n' = n \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using not0_implies_Suc
[PROOF STATE]
proof (prove)
using this:
n \<noteq> 0
?n \<noteq> 0 \<Longrightarrow> \<exists>m. ?n = Suc m
goal (1 subgoal):
1. (\<And>n'. Suc n' = n \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
Suc n' = n
goal (1 subgoal):
1. \<And>P v. \<lbrakk>v \<in> A; vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W)); \<not> visits_via P A W\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
hence "P $ n' \<in> A - W"
[PROOF STATE]
proof (prove)
using this:
Suc n' = n
goal (1 subgoal):
1. P $ n' \<in> A - W
[PROOF STEP]
using n_min
[PROOF STATE]
proof (prove)
using this:
Suc n' = n
?i < n \<Longrightarrow> P $ ?i \<in> A - W
goal (1 subgoal):
1. P $ n' \<in> A - W
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
P $ n' \<in> A - W
goal (1 subgoal):
1. \<And>P v. \<lbrakk>v \<in> A; vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W)); \<not> visits_via P A W\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
P $ n' \<in> A - W
goal (1 subgoal):
1. \<And>P v. \<lbrakk>v \<in> A; vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W)); \<not> visits_via P A W\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
have "P $ n' \<rightarrow> P $ Suc n'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P $ n' \<rightarrow> P $ Suc n'
[PROOF STEP]
using P_valid n(1) n' valid_path_edges
[PROOF STATE]
proof (prove)
using this:
valid_path P
enat n < llength P \<and> P $ n \<notin> A - W
Suc n' = n
\<lbrakk>valid_path ?P; enat (Suc ?n) < llength ?P\<rbrakk> \<Longrightarrow> ?P $ ?n \<rightarrow> ?P $ Suc ?n
goal (1 subgoal):
1. P $ n' \<rightarrow> P $ Suc n'
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
P $ n' \<rightarrow> P $ Suc n'
goal (1 subgoal):
1. \<And>P v. \<lbrakk>v \<in> A; vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W)); \<not> visits_via P A W\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
P $ n' \<rightarrow> P $ Suc n'
goal (1 subgoal):
1. \<And>P v. \<lbrakk>v \<in> A; vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W)); \<not> visits_via P A W\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
have "P $ Suc n' \<notin> A \<union> W"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P $ Suc n' \<notin> A \<union> W
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. P $ Suc n' \<notin> A \<union> W
[PROOF STEP]
have "P $ n \<notin> W"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P $ n \<notin> W
[PROOF STEP]
using contra n(1) n_min
[PROOF STATE]
proof (prove)
using this:
\<not> visits_via P A W
enat n < llength P \<and> P $ n \<notin> A - W
?i < n \<Longrightarrow> P $ ?i \<in> A - W
goal (1 subgoal):
1. P $ n \<notin> W
[PROOF STEP]
unfolding visits_via_def
[PROOF STATE]
proof (prove)
using this:
\<nexists>n. enat n < llength P \<and> P $ n \<in> W \<and> lset (ltake (enat n) P) \<subseteq> A
enat n < llength P \<and> P $ n \<notin> A - W
?i < n \<Longrightarrow> P $ ?i \<in> A - W
goal (1 subgoal):
1. P $ n \<notin> W
[PROOF STEP]
by (meson Diff_subset lset_ltake subsetCE)
[PROOF STATE]
proof (state)
this:
P $ n \<notin> W
goal (1 subgoal):
1. P $ Suc n' \<notin> A \<union> W
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
P $ n \<notin> W
goal (1 subgoal):
1. P $ Suc n' \<notin> A \<union> W
[PROOF STEP]
using n(1) n'
[PROOF STATE]
proof (prove)
using this:
P $ n \<notin> W
enat n < llength P \<and> P $ n \<notin> A - W
Suc n' = n
goal (1 subgoal):
1. P $ Suc n' \<notin> A \<union> W
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
P $ Suc n' \<notin> A \<union> W
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
P $ Suc n' \<notin> A \<union> W
goal (1 subgoal):
1. \<And>P v. \<lbrakk>v \<in> A; vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W)); \<not> visits_via P A W\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
P $ n' \<in> A - W
P $ n' \<rightarrow> P $ Suc n'
P $ Suc n' \<notin> A \<union> W
[PROOF STEP]
have "P $ n' \<in> VV p \<and> \<sigma> (P $ n') \<noteq> P $ Suc n'"
[PROOF STATE]
proof (prove)
using this:
P $ n' \<in> A - W
P $ n' \<rightarrow> P $ Suc n'
P $ Suc n' \<notin> A \<union> W
goal (1 subgoal):
1. P $ n' \<in> VV p \<and> \<sigma> (P $ n') \<noteq> P $ Suc n'
[PROOF STEP]
using strategy_attracts_does_not_leave[of p \<sigma> A W "P $ n'" "P $ Suc n'"]
assms(1,2)
[PROOF STATE]
proof (prove)
using this:
P $ n' \<in> A - W
P $ n' \<rightarrow> P $ Suc n'
P $ Suc n' \<notin> A \<union> W
\<lbrakk>strategy_attracts p \<sigma> A W; strategy p \<sigma>; P $ n' \<rightarrow> P $ Suc n'; P $ n' \<in> A - W; P $ Suc n' \<notin> A \<union> W\<rbrakk> \<Longrightarrow> P $ n' \<in> VV p \<and> \<sigma> (P $ n') \<noteq> P $ Suc n'
strategy_attracts p \<sigma> A W
strategy p \<sigma>
goal (1 subgoal):
1. P $ n' \<in> VV p \<and> \<sigma> (P $ n') \<noteq> P $ Suc n'
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
P $ n' \<in> VV p \<and> \<sigma> (P $ n') \<noteq> P $ Suc n'
goal (1 subgoal):
1. \<And>P v. \<lbrakk>v \<in> A; vmc_path G P v p (override_on \<sigma>' \<sigma> (A - W)); \<not> visits_via P A W\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
thus False
[PROOF STATE]
proof (prove)
using this:
P $ n' \<in> VV p \<and> \<sigma> (P $ n') \<noteq> P $ Suc n'
goal (1 subgoal):
1. False
[PROOF STEP]
using n(1) n' vmc_path_conforms \<open>P $ n' \<in> A - W\<close>
[PROOF STATE]
proof (prove)
using this:
P $ n' \<in> VV p \<and> \<sigma> (P $ n') \<noteq> P $ Suc n'
enat n < llength P \<and> P $ n \<notin> A - W
Suc n' = n
\<lbrakk>enat (Suc ?n) < llength P; P $ ?n \<in> VV p\<rbrakk> \<Longrightarrow> override_on \<sigma>' \<sigma> (A - W) (P $ ?n) = P $ Suc ?n
P $ n' \<in> A - W
goal (1 subgoal):
1. False
[PROOF STEP]
by (metis override_on_apply_in)
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 7579, "file": "Parity_Game_AttractingStrategy", "length": 74}
|
import random
from enum import Enum
import numpy as np
class LocalSearch(Enum):
PER_VARIABLE_LOCAL_SEARCH = 1
def local_search_gene(population, fitness_function, method, config):
new_population = None
if (method == LocalSearch.PER_VARIABLE_LOCAL_SEARCH):
new_population = _per_variable_local_search_gene(population, fitness_function, config)
return new_population
def _per_variable_local_search_gene(population, fitness_func, args):
# TODO(andre:2018-07-26): Adaptar a busca local para o algoritmo genetico
return np.copy(population)
# for gene in population:
# best_local_fitness = fitness_func(gene)
#
# for name in gene.variables:
# best_variable_value = gene.get_value(name)
#
# variable_value = best_variable_value
# variable_value -= (args['local_search_step'] * args['local_search_quant'] * 0.5)
#
# for i in range(args['local_search_quant']):
# gene.set_value(name, variable_value)
#
# new_local_fitness = fitness_func(gene)
# if new_local_fitness < best_local_fitness:
# best_variable_value = variable_value
# best_local_fitness = new_local_fitness
#
# variable_value += args['local_search_step'] * i
#
# gene.set_value(name, best_variable_value)
# def _multi_variable_local_search_gene(population, fitness_func, args):
# def _adaptive_local_search_gene(population, fitness_func, args):
|
{"hexsha": "eb7bec4bad80bfd22abd0bd13810b0e1543a3177", "size": 1548, "ext": "py", "lang": "Python", "max_stars_repo_path": "ga/local_search.py", "max_stars_repo_name": "YannHyaric/evolutionary-computation", "max_stars_repo_head_hexsha": "af7778fd1b5d60a1e5630b483b55257adac0bbc6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ga/local_search.py", "max_issues_repo_name": "YannHyaric/evolutionary-computation", "max_issues_repo_head_hexsha": "af7778fd1b5d60a1e5630b483b55257adac0bbc6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ga/local_search.py", "max_forks_repo_name": "YannHyaric/evolutionary-computation", "max_forks_repo_head_hexsha": "af7778fd1b5d60a1e5630b483b55257adac0bbc6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.25, "max_line_length": 94, "alphanum_fraction": 0.673126615, "include": true, "reason": "import numpy", "num_tokens": 346}
|
using DashBootstrapComponents, DashHtmlComponents
toast = dbc_toast(
[html_p("This is the content of the toast", className = "mb-0")],
header = "This is the header",
);
|
{"hexsha": "f1969c81c018f8d211dce4d4c3c38f847e73ce25", "size": 178, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/components_page/components/toast/simple.jl", "max_stars_repo_name": "glsdown/dash-bootstrap-components", "max_stars_repo_head_hexsha": "0ebea4f7de43975f6e3a2958359c4480ae1d4927", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 776, "max_stars_repo_stars_event_min_datetime": "2019-02-07T19:36:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T05:53:04.000Z", "max_issues_repo_path": "docs/components_page/components/toast/simple.jl", "max_issues_repo_name": "glsdown/dash-bootstrap-components", "max_issues_repo_head_hexsha": "0ebea4f7de43975f6e3a2958359c4480ae1d4927", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 350, "max_issues_repo_issues_event_min_datetime": "2019-02-05T10:42:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T19:23:35.000Z", "max_forks_repo_path": "docs/components_page/components/toast/simple.jl", "max_forks_repo_name": "glsdown/dash-bootstrap-components", "max_forks_repo_head_hexsha": "0ebea4f7de43975f6e3a2958359c4480ae1d4927", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 219, "max_forks_repo_forks_event_min_datetime": "2019-02-10T13:46:25.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T17:03:39.000Z", "avg_line_length": 25.4285714286, "max_line_length": 69, "alphanum_fraction": 0.702247191, "num_tokens": 46}
|
import pandas as pd
import json
import scipy.stats
import random
import numpy as np
from yattag import Doc
import itertools
from collections import defaultdict
import argparse
import os
random.seed(1)
import hashlib
def hashhex(s):
"""Returns a heximal formated SHA1 hash of the input string."""
h = hashlib.sha1()
h.update(s.encode('utf-8'))
return h.hexdigest()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-input", required=True, type=str)
parser.add_argument("-out", required=True, type=str)
args = parser.parse_args()
with open(args.input) as f:
data = json.load(f)
for article in data:
doc, tag, text = Doc().tagtext()
doc.stag('link', rel='stylesheet', href='style.css')
ids = set(article['ids'])
with tag('body'):
with tag('p'):
with tag('u'):
text("query tokens")
doc.stag('br')
text(f"{article['query']}")
with tag('p'):
with tag('u'):
text("prediction")
doc.stag('br')
for i, sentence in enumerate(article['text']):
if i in ids:
with tag('span', klass="topic-2"):
text(sentence.capitalize())
else:
with tag('span'):
text(sentence.capitalize())
text(" ")
with open(os.path.join(args.out, hashhex(" ".join(article['text'][0])) + '.html'), 'w') as w:
w.write(doc.getvalue())
|
{"hexsha": "9ea60931f32fbe346e8f347edcb80e6b554e2979", "size": 1747, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/generate_html_from_outputs.py", "max_stars_repo_name": "oja/qfsumm", "max_stars_repo_head_hexsha": "dfa3541cfad928df412c86888ef0354ea97e8382", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-12T02:25:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-12T02:25:57.000Z", "max_issues_repo_path": "scripts/generate_html_from_outputs.py", "max_issues_repo_name": "oja/aosumm", "max_issues_repo_head_hexsha": "dfa3541cfad928df412c86888ef0354ea97e8382", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/generate_html_from_outputs.py", "max_forks_repo_name": "oja/aosumm", "max_forks_repo_head_hexsha": "dfa3541cfad928df412c86888ef0354ea97e8382", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7636363636, "max_line_length": 105, "alphanum_fraction": 0.4928448769, "include": true, "reason": "import numpy,import scipy", "num_tokens": 350}
|
# !/usr/bin/python
# -*- coding:utf-8 -*-
# Author: Shengjia Yan
# Date: 2017-10-26
# Email: i@yanshengjia.com
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import logging
import numpy as np
import itertools
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
def load_confusion_matrix(ref_score, pred_score):
ref_file = open('../../nea/output/emb/rnn/prompt_1/fold_0/preds/dev_ref.txt', 'r')
pred_file = open('../../nea/output/emb/rnn/prompt_1/fold_0/preds/dev_pred_49.txt', 'r')
for ref in ref_file.readlines():
ref = ref.strip('\n')
ref = int(ref)
ref_score.append(ref)
for pred in pred_file.readlines():
pred = pred.strip('\n')
pred = float(pred)
pred = round(pred)
pred_score.append(pred)
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
# 1. find out how many samples per class have received their correct label
# 计算真正类别为k的样本被预测成各个类别的比例
# e.g. 有25个样本的 true label 是 6,其中10个样本被预测为类别7,那么在混淆矩阵中 true label = 6 并且 predicted label = 7 的一个格子中的值为 0.4
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# 2. get the precision (fraction of class-k predictions that have ground truth label k)
# 计算预测的准确率
# e.g. 预测为类别k的有12个,但其中只有9个的真正类别是k,那么准确率为 0.75
# cm = cm.astype('float') / cm.sum(axis=0)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
# plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def main():
ref_score = [] # true label
pred_score = [] # predicted label
load_confusion_matrix(ref_score, pred_score)
nea_matrix = confusion_matrix(ref_score, pred_score)
np.set_printoptions(precision=2)
class_names = ['2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(nea_matrix, classes=class_names, title='Confusion matrix, without normalization')
plt.savefig('./unnormalized_cm.png')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(nea_matrix, classes=class_names, normalize=True, title='Normalized confusion matrix')
plt.savefig('./normalized_cm.png')
# plt.show()
if __name__ == '__main__':
main()
|
{"hexsha": "04c463c4e66719daeda64853ef761423a9ff054f", "size": 3128, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/plot/plot_confusion_matrix.py", "max_stars_repo_name": "yanshengjia/nlp", "max_stars_repo_head_hexsha": "43398652b2cab9b85fd042f60e6f68c7b48697bc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-04-12T07:48:10.000Z", "max_stars_repo_stars_event_max_datetime": "2018-04-12T07:48:10.000Z", "max_issues_repo_path": "utils/plot/plot_confusion_matrix.py", "max_issues_repo_name": "yanshengjia/nlp", "max_issues_repo_head_hexsha": "43398652b2cab9b85fd042f60e6f68c7b48697bc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/plot/plot_confusion_matrix.py", "max_forks_repo_name": "yanshengjia/nlp", "max_forks_repo_head_hexsha": "43398652b2cab9b85fd042f60e6f68c7b48697bc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-05-02T06:53:29.000Z", "max_forks_repo_forks_event_max_datetime": "2018-05-02T06:53:29.000Z", "avg_line_length": 31.595959596, "max_line_length": 113, "alphanum_fraction": 0.6524936061, "include": true, "reason": "import numpy", "num_tokens": 894}
|
#todo : just for test ===================
import numpy
from PIL import Image
import torch
'''
a= numpy.array(Image.open('/home/leejeyeol/Datasets/Avenue/training_videos/15/output_00118.png'),dtype=numpy.float)
a2 =Image.fromarray(a)
a2.show()
print(a)
b= numpy.array(Image.open('/home/leejeyeol/Datasets/Avenue/mean_image.png'),dtype=numpy.float)
b2 = Image.fromarray(b)
b2.show()
print(b)
c= a-b
c2 = Image.fromarray(c)
c2.show()
print(c)
d = numpy.array(numpy.round(c),numpy.uint8)
d2 = Image.fromarray(d)
d2.show()
print(d)
'''
'''
for i in range(0, 4259):
testfalse = torch.load("/mnt/fastdataset/centering_test_false/%05d.t7" % i)
testtrue = torch.load("/mnt/fastdataset/centering_test_false/%05d.t7" % i)
print(i," : ",sum(sum(sum(testfalse-testtrue))))
'''
#=========================================
|
{"hexsha": "ef77b90621ee2b166cbfc39f6553617928c719a8", "size": 821, "ext": "py", "lang": "Python", "max_stars_repo_path": "legacy/data_generation/NIPS2017_evaluation.py", "max_stars_repo_name": "neohanju/AutoencodingTheWorld", "max_stars_repo_head_hexsha": "23f8a89bb7399df63cd7a0cb1b5a750214a44072", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2017-09-22T02:34:32.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-10T02:59:57.000Z", "max_issues_repo_path": "legacy/data_generation/NIPS2017_evaluation.py", "max_issues_repo_name": "neohanju/AutoencodingTheWorld", "max_issues_repo_head_hexsha": "23f8a89bb7399df63cd7a0cb1b5a750214a44072", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "legacy/data_generation/NIPS2017_evaluation.py", "max_forks_repo_name": "neohanju/AutoencodingTheWorld", "max_forks_repo_head_hexsha": "23f8a89bb7399df63cd7a0cb1b5a750214a44072", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.65625, "max_line_length": 115, "alphanum_fraction": 0.6552984166, "include": true, "reason": "import numpy", "num_tokens": 236}
|
import numpy as np
class Features:
def __init__(self):
self._horest_features = []
self._texture_feature = []
self._sift_SDS = []
self._sift_SOH = []
@property
def horest_features(self):
return self._horest_features
@property
def texture_feature(self):
return self._texture_feature
@property
def sift_SDS(self):
return self._sift_SDS
@property
def sift_SOH(self):
return self._sift_SOH
@horest_features.setter
def horest_features(self, value):
self._horest_features = value
@texture_feature.setter
def texture_feature(self, value):
self._texture_feature = value
@sift_SDS.setter
def sift_SDS(self, value):
self._sift_SDS = value
@sift_SOH.setter
def sift_SOH(self, value):
self._sift_SOH = value
|
{"hexsha": "2f1158442a599aa1246e5addf62f2e6ec608158f", "size": 866, "ext": "py", "lang": "Python", "max_stars_repo_path": "server/models/features.py", "max_stars_repo_name": "Shaalan31/LIWI", "max_stars_repo_head_hexsha": "b4d615e0951b7c28c9258d0d7a8ff86c73c4ebe2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-10-16T07:37:46.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-04T10:31:02.000Z", "max_issues_repo_path": "server/models/features.py", "max_issues_repo_name": "Shaalan31/LIWI", "max_issues_repo_head_hexsha": "b4d615e0951b7c28c9258d0d7a8ff86c73c4ebe2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-03-19T00:22:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-13T01:12:35.000Z", "max_forks_repo_path": "server/models/features.py", "max_forks_repo_name": "Shaalan31/LIWI", "max_forks_repo_head_hexsha": "b4d615e0951b7c28c9258d0d7a8ff86c73c4ebe2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-06-04T10:58:39.000Z", "max_forks_repo_forks_event_max_datetime": "2019-06-06T18:52:01.000Z", "avg_line_length": 22.2051282051, "max_line_length": 37, "alphanum_fraction": 0.6408775982, "include": true, "reason": "import numpy", "num_tokens": 224}
|
import numpy as np
import matplotlib.pyplot as plt
import imageio
import scipy, scipy.misc, scipy.signal
import cv2
import sys
import PIL
from PIL import Image
windowName = ''
threshold = 11
size = 5
# path to input image is specified and
# image is loaded with imread command
image1 = cv2.imread('0136ns.png')
# image1 = cv2.imread('02.jpg')
# cv2.cvtColor is applied over the
# image input with applied parameters
# to convert the image in grayscale
img = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
# applying different thresholding
# techniques on the input image
thresh1 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY, 199, 5)
thresh2 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 11, 5)
# the window showing output images
# with the corresponding thresholding
# techniques applied to the input image
cv2.namedWindow( windowName )
def update():
thresh2 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, threshold, size)
cv2.imshow(windowName, thresh2)
def onTrackbarChange1(x):
global threshold
threshold = x*2+3
update()
def onTrackbarChange2(x):
global size
size = x + 1
update()
cv2.createTrackbar(
'threshold',
windowName,
4,
99,
onTrackbarChange1,
)
cv2.createTrackbar(
'size',
windowName,
4,
99,
onTrackbarChange2,
)
# cv2.setTrackbarMin( 'min2', windowName, 0 )
# cv2.imshow('Adaptive Mean', thresh1)
# cv2.imshow('Adaptive Gaussian', thresh2)
cv2.imshow(windowName, thresh2)
while True:
print("Press [q] or [esc] to close the window.")
k = cv2.waitKey() & 0xFF
if k in (ord("s"), ord("S")):
# print("SAVE IMAGE")
# print( thresh2.ndim )
# cv2.cvtColor(thresh2, cv2.COLOR_RGB2GRAY)
cv2.imwrite( "output.bmp", thresh2 )
if k in (ord("q"), ord("\x1b")):
cv2.destroyWindow(self.name)
break
# # De-allocate any associated memory usage
# if cv2.waitKey(0) & 0xff == 27:
# cv2.destroyAllWindows()
|
{"hexsha": "49a2ab22e8192297490d8fa6636222f77a77ec93", "size": 2229, "ext": "py", "lang": "Python", "max_stars_repo_path": "adaptative_threshold.py", "max_stars_repo_name": "fthernan/interferometry-processing-tools", "max_stars_repo_head_hexsha": "84420990410e117af08675247078734050079a22", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "adaptative_threshold.py", "max_issues_repo_name": "fthernan/interferometry-processing-tools", "max_issues_repo_head_hexsha": "84420990410e117af08675247078734050079a22", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "adaptative_threshold.py", "max_forks_repo_name": "fthernan/interferometry-processing-tools", "max_forks_repo_head_hexsha": "84420990410e117af08675247078734050079a22", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.3295454545, "max_line_length": 78, "alphanum_fraction": 0.6451323463, "include": true, "reason": "import numpy,import scipy", "num_tokens": 614}
|
import argparse
from numpy.random import default_rng
NUMBER_OF_SAMPLES = 1
def generate_normal_random_numbers(samples=1):
rng = default_rng()
return rng.standard_normal(samples)
if __name__ == '__main__':
# parse arguments
parser = argparse.ArgumentParser(
description="Generate random numbers from the standard normal distribution")
parser.add_argument("samples", type=int, nargs="?",
help="Number of samples")
args = parser.parse_args()
if args.samples:
samples = args.samples
else:
samples = NUMBER_OF_SAMPLES
# generate random numbers
numbers = generate_normal_random_numbers(samples)
if (len(numbers) < 10):
print(numbers)
else:
print("Done.")
|
{"hexsha": "c13616ee0118d701f5ff3c365755a4bd428ecf94", "size": 770, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/normal_numbers.py", "max_stars_repo_name": "sernamar/random-numbers", "max_stars_repo_head_hexsha": "9117a59b246ced3d82803cfd7b82b05f8a456d97", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/normal_numbers.py", "max_issues_repo_name": "sernamar/random-numbers", "max_issues_repo_head_hexsha": "9117a59b246ced3d82803cfd7b82b05f8a456d97", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/normal_numbers.py", "max_forks_repo_name": "sernamar/random-numbers", "max_forks_repo_head_hexsha": "9117a59b246ced3d82803cfd7b82b05f8a456d97", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.3333333333, "max_line_length": 84, "alphanum_fraction": 0.6675324675, "include": true, "reason": "from numpy", "num_tokens": 156}
|
import sys
sys.path.insert(1, '..')
import pickle
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
import datetime as dt
from collections import defaultdict
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import GRU
from tensorflow.keras.layers import Dropout
from tensorflow.keras import optimizers
from database.database import db
from database.tables.price import StockPrice
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
engine = create_engine('sqlite:///../database/database.db', echo = True)
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
Session = sessionmaker(bind = engine)
def company_name2dict():
with open('../database/tables/ticker_name.txt', 'r') as f:
_dict = {}
lines = f.readlines()
for l in lines:
c = l.split('\t')
_dict[c[1].replace('\n','')] = c[0]
f.close()
return _dict
def to_model_input(time_step, dataset, target_col_idx):
X = []
y = []
for i in range(time_step, len(dataset)):
X.append(dataset[i-time_step:i, :])
y.append(dataset[i, target_col_idx])
return np.array(X), np.array(y)
def train_model(X_train, y_train, epochs, batch_size):
model = Sequential()
model.add(GRU(units = 50, return_sequences = True, input_shape = (X_train.shape[1], 5)))
model.add(GRU(units = 100, return_sequences = True))
model.add(GRU(units = 100, return_sequences = True))
model.add(GRU(units = 50))
model.add(Dropout(0.2))
model.add(Dense(units = 1))
# Compiling
optimizer = optimizers.Adam()
model.compile(optimizer = optimizer , loss = 'mean_squared_error')
train_history = model.fit(X_train, y_train, epochs=epochs, \
batch_size=batch_size, verbose=1)
return model
def get_prediction(model, X_test, test_df, min_max_scaler):
predicted_price_scaler = model.predict(X_test)
real_stock_price = test_df.values[:, 3]
real_stock_price_scaler = min_max_scaler.transform(test_df.values)
# Convert the predicted price_scaler back
real_stock_price_scaler[:, 3] = predicted_price_scaler.flatten()
predicted_price = min_max_scaler.inverse_transform(real_stock_price_scaler)[:, 3]
return predicted_price, real_stock_price
def get_predicted_Q():
# with open('./sp500tickers.pkl', 'rb') as f:
# tickers = pickle.load(f)
sp500_wiki_url = 'https://en.wikipedia.org/wiki/List_of_S%26P_500_companies'
resp = requests.get(sp500_wiki_url)
soup = BeautifulSoup(resp.text, 'html.parser')
table = soup.find('table', class_='wikitable sortable')
trs = table.find_all('tr')
symbols = []
company_names = []
industries = []
for idx, tr in enumerate(trs):
if idx != 0:
tds = tr.find_all('td')
symbols.append(tds[0].text.replace('\n',''))
company_names.append(tds[1].text.replace('\n',''))
industries.append(tds[3].text.replace('\n',''))
stock_data_df = pd.DataFrame({
'symbol': symbols,
'company': company_names,
'industry': industries
})
tickers = []
company = []
industry = []
stock_dict = company_name2dict()
sym = list(stock_dict.keys())
com = list(stock_dict.values())
for idx, row in stock_data_df.iterrows():
if row[0] in sym:
tickers.append(row[0])
company.append(stock_dict[row[0]])
industry.append(row[2])
Q_predict_dict = defaultdict(list)
real_price_dict = defaultdict(list)
for tick in tqdm(tickers):
predicted_price, real_stock_price, test_df = predict_Q(tick)
Q_predict_dict[tick].append(predicted_price)
real_price_dict[tick].append(real_stock_price)
with open('predict_dict', 'wb') as f:
pickle.dump(Q_predict_dict, f)
with open('real_price_dict', 'wb') as f:
pickle.dump(real_price_dict, f)
return ''
def get_stock_price_offline(tick):
session = Session()
stock_default_list = defaultdict(list)
stock_default_list[tick] = []
# Use the Flask-SQLAlchemy to query our data from database
stock_data = session.query(StockPrice).filter(StockPrice.comp==tick).all()
date_ = []
high = []
low = []
open_ = []
adj_close = []
vol = []
# Store/Split the data into train & test dataframe
for row in stock_data:
date = dt.datetime.strptime(str(row.date), '%Y-%m-%d')
date_.append(date)
high.append(row.high)
low.append(row.low)
open_.append(row.open_)
adj_close.append(row.adj_close)
vol.append(row.vol)
df = pd.DataFrame({
'date': date_,
'high': high,
'low': low,
'open': open_,
'adj_close': adj_close,
'vol': vol
})
df.set_index('date', inplace=True)
# split dataframe into train & test part
train_df, test_df = df['2012-01-01': '2016-12-31'], df['2017-01-01': '2020-06-30']
# We need to standardize the input before putting them into the model
min_max_scaler = MinMaxScaler(feature_range=(0, 1))
train_scaled = min_max_scaler.fit_transform(train_df.values)
time_step = 180
target_col_idx = 3
# Get the trainset part
X_train, y_train = to_model_input(time_step, train_scaled, target_col_idx)
# Get the testset part
dataset_total = pd.concat([train_df, test_df], axis=0)
testing_inputs = dataset_total[len(dataset_total)-len(test_df)-time_step:]
testing_scaled = min_max_scaler.transform(testing_inputs)
X_test, y_test = to_model_input(time_step, testing_scaled, target_col_idx)
stock_default_list[tick].append(X_train)
stock_default_list[tick].append(y_train)
stock_default_list[tick].append(X_test)
stock_default_list[tick].append(y_test)
stock_default_list[tick].append(test_df)
session.close()
return test_df, stock_default_list, min_max_scaler
def predict_Q(tick):
test_df, stock_default_list, min_max_scaler = get_stock_price_offline(tick)
X_train = stock_default_list[tick][0]
y_train = stock_default_list[tick][1]
batch_size = 16
epochs = 64
model = train_model(X_train, y_train, epochs, batch_size)
X_test = stock_default_list[tick][2]
test_df = stock_default_list[tick][4]
predicted_price, real_stock_price = get_prediction(model, X_test, test_df, min_max_scaler)
return predicted_price, real_stock_price, test_df
if __name__ == "__main__":
get_predicted_Q()
|
{"hexsha": "44eb4904e210a7a3531ee94621a96fc75ddcc273", "size": 6780, "ext": "py", "lang": "Python", "max_stars_repo_path": "model/offline_predict_q.py", "max_stars_repo_name": "penguinwang96825/Intelligent-Asset-Allocation", "max_stars_repo_head_hexsha": "62aa4e70dae50c60c4dae7acfe0388028be242a2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-07-08T02:18:36.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-08T06:51:09.000Z", "max_issues_repo_path": "model/offline_predict_q.py", "max_issues_repo_name": "penguinwang96825/Intelligent-Asset-Allocation", "max_issues_repo_head_hexsha": "62aa4e70dae50c60c4dae7acfe0388028be242a2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-11-13T19:04:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T02:10:21.000Z", "max_forks_repo_path": "model/offline_predict_q.py", "max_forks_repo_name": "penguinwang96825/Intelligent-Asset-Allocation", "max_forks_repo_head_hexsha": "62aa4e70dae50c60c4dae7acfe0388028be242a2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-07-08T04:49:20.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-08T01:23:17.000Z", "avg_line_length": 30.4035874439, "max_line_length": 94, "alphanum_fraction": 0.6719764012, "include": true, "reason": "import numpy", "num_tokens": 1664}
|
import scipy.fft
import matplotlib.pyplot as plt
import numpy as np
x1=([0,4,2,0])
dft=scipy.fft.fft(x1)
plt.figure(figsize=(8,9))
plt.subplot(2, 1, 1)
plt.stem(dft.real, use_line_collection = True)
plt.xlabel('k')
plt.ylabel('Real{x[k]}')
plt.title('Real part of DFT')
plt.subplot(2, 1, 2)
plt.stem(dft.imag, use_line_collection = True)
plt.xlabel('k')
plt.ylabel('Img{X{k}}')
plt.title('Imaginary Part of DFT')
plt.show()
print('DFT X[k] =',dft)
|
{"hexsha": "fc10120e6149f6e7f0da26e9706dbcb0eee7c372", "size": 486, "ext": "py", "lang": "Python", "max_stars_repo_path": "Py_lab/Lab 4/scipy_one.py", "max_stars_repo_name": "veterinarian-5300/Genious-Python-Code-Generator", "max_stars_repo_head_hexsha": "d78cd5f4b64221e8e4dc80d6e1f5ba0a4c613bcd", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-27T06:24:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-27T06:24:21.000Z", "max_issues_repo_path": "Py_lab/Lab 4/scipy_one.py", "max_issues_repo_name": "veterinarian-5300/Genious-Python-Code-Generator", "max_issues_repo_head_hexsha": "d78cd5f4b64221e8e4dc80d6e1f5ba0a4c613bcd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Py_lab/Lab 4/scipy_one.py", "max_forks_repo_name": "veterinarian-5300/Genious-Python-Code-Generator", "max_forks_repo_head_hexsha": "d78cd5f4b64221e8e4dc80d6e1f5ba0a4c613bcd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.7586206897, "max_line_length": 47, "alphanum_fraction": 0.6419753086, "include": true, "reason": "import numpy,import scipy", "num_tokens": 152}
|
import autograd.numpy as np
import numpy.random as npr
from trajopt import core
npr.seed(1337)
if __name__ == '__main__':
Q = np.eye(2)
q = np.zeros((2, ))
q0 = 0.0
mu = np.zeros((2, ))
sigma = np.eye(2)
# expectation of quadratic under gaussian
print(core.quad_expectation(mu, sigma, Q, q, q0))
|
{"hexsha": "61eeb54c3ff8e1d4bf4c04a4cc190166f9b9822a", "size": 329, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/arma_test.py", "max_stars_repo_name": "JoeMWatson/trajopt", "max_stars_repo_head_hexsha": "8b98718721e0c373cd7dc01a35f42447c1134713", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-10-17T08:42:17.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-17T08:42:17.000Z", "max_issues_repo_path": "tests/arma_test.py", "max_issues_repo_name": "JoeMWatson/trajopt", "max_issues_repo_head_hexsha": "8b98718721e0c373cd7dc01a35f42447c1134713", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/arma_test.py", "max_forks_repo_name": "JoeMWatson/trajopt", "max_forks_repo_head_hexsha": "8b98718721e0c373cd7dc01a35f42447c1134713", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.2777777778, "max_line_length": 53, "alphanum_fraction": 0.6291793313, "include": true, "reason": "import numpy", "num_tokens": 105}
|
import os, sys, time
import argparse
import learn_mtfixbmodel
import mtfixb_model
import parseopts
import torch
import torch.optim as optim
import numpy as np
def parse_args(args=None):
parser = argparse.ArgumentParser(description='Optimise Z on new data')
parser.add_argument('--style_ix', dest='style_ix',
help='Style index for optimisation (1,..,8).', type=int, required=True)
parser.add_argument('--latent_k', dest='k',
help='Dimension of parameter manifold (choose 3, 5, 7).', type=int, required=True)
parser.add_argument('--train_set_size', dest='train_set_size', default=-1,
help='Size of training set that model was learned on (choose 4, 8, 16, 32, 64).', type=int)
parser.add_argument('--test_set_size', dest='test_set_size',
help='Size of batch to perform optimisation over.', type=int, default=32)
parser.add_argument('--B_forward', dest='B_forward',
help='number of batches forward from test index chosen. Since test batches are generally not'
'contiguous, the default `1` is recommended, although this then only performs density'
'estimation-ish.', type=int, default=1)
parser.add_argument('--use_cpu', dest='use_cpu', help='', action='store_true')
parser.add_argument('--data_dir', dest='data_dir', help='Data directory', type=str, default="../../mocap-mtds/data")
parser.add_argument('--training_iters', dest='iternums', help='Num Iterations model was trained for.', type=int)
parser.add_argument('--model_type', dest='model_type', help='`biasonly` or `no_mt_bias`.')
parser.add_argument('--learning_rate', dest='learning_rate', help='Learning rate for Z optimisation', type=float,
default=8e-3)
parser.add_argument('--model_path', dest='model_path', help='["Advanced"] Script usually constructs the path for' +
'the model from your specification. Otherwise supply a custom path here', type=str, default='')
parser.add_argument('--devmode', dest='devmode', help='Used for development on local machine: changes modelpath.',
action='store_true')
if args is None:
args = parser.parse_args()
else:
args = parser.parse_args(args)
return args
def optimise(args):
# ---------------------------------------------------------------------------------------------------------------------
# === Inputs =====================================
style_ix = args.style_ix # 1, 2, 3, 4, 5, 6, 7, 8
z_dim = args.k # 3, 5, 7
train_set_size = args.train_set_size # 4, 8, 16, 32, 64
test_set_size = args.test_set_size # 32 => cannot do 64 since some styles have less held-out data than this.
B_forward = args.B_forward # 1 => we can't really do LT prediction since test cases are not contiguous in general.
device = "cpu" if args.use_cpu else "cuda"
model_iternums = args.iternums # 10000, 20000
model_type = args.model_type # "biasonly", "no_mt_bias"
lr = args.learning_rate # 1e-2, 8e-3, 5e-3, 3e-3, 1e-3 => most reliable has been 8e-3 with poss. annealing.
data_dir = args.data_dir
# ---------------------------------------------------------------------------------------------------------------------
# Input checks
assert model_type in ["biasonly", "no_mt_bias", "full_mtds"]
assert device in ["cpu", "cuda"], "device must be 'cpu' or 'cuda'."
assert train_set_size in [-1, 0, 4, 8, 16, 32, 64]
assert B_forward == 1, "cannot do LT prediction as not contiguous."
assert z_dim in [3, 5, 7, 8]
# Input transformations
iscpu = device == "cpu"
biasonly = model_type == "biasonly"
is_mtl = train_set_size >= 0
model_iternums = 20000 if is_mtl else model_iternums
# Construct model path
if len(args.model_path) == 0:
if args.train_set_size > 0:
datafiles = "edin_Us_30fps_N{0:d}/edin_Ys_30fps_N{0:d}".format(train_set_size) if train_set_size > 0 else \
"edin_Us_30fps_final/edin_Ys_30fps_final"
else:
datafiles = "edin_Us_30fps_variableN_test_complement/edin_Ys_30fps_variableN_test_complement"
model_path = "experiments/style_{:d}".format(9 if train_set_size > 0 else style_ix) + \
"/out_64/iterations_{:d}".format(model_iternums) + \
"/decoder_size_1024/zdim_{:d}".format(z_dim) + \
"/ar_coef_0/psi_lowrank_30/optim_Adam/lr_{:.0e}/std/".format(2e-5 if not biasonly else 5e-5) + \
datafiles + "/not_residual_vel/model_{:d}".format(model_iternums)
if args.devmode:
if model_type == "biasonly":
model_path = "../../mocap-mtds/experiments/mtl/biasonly_k{:d}_N{:d}_20000".format(z_dim, train_set_size)
else:
# print(os.getcwd())
model_path = "../../mocap-mtds/experiments/mtl/fa/k{:d}_N{:d}_fullmtds_20000".format(z_dim, train_set_size)
else:
model_path = args.model_path
print("model: {:s}".format(model_path))
# Load model
load_args = ["--style_ix", str(style_ix), "--load", model_path,
"--latent_k", str(z_dim), "--input_size", str(35)]
iscpu and load_args.append("--use_cpu")
load_args = parseopts.parse_args(load_args)
load_args = parseopts.initial_arg_transform(load_args)
model = learn_mtfixbmodel.create_model(load_args, 850)
iscpu and model.cpu()
# Set AD off for most parameters
model.layer1_rnn.requires_grad = False
model.layer1_linear.requires_grad = False
model.mt_net.Z_logit_s.data = model.mt_net.Z_logit_s.data * 1e-7
model.mt_net.Z_logit_s.requires_grad = False
model.layer1_rnn.train()
if biasonly:
model.mt_net.rnn.requires_grad = False
model.mt_net.emission.requires_grad = False
model.mt_net.rnn.train()
else:
model.mt_net.psi_decoder.requires_grad = False
# Get test data
print("Reading test data (test index {0:d}).".format(style_ix))
if is_mtl:
input_fname_ = "edin_Us_30fps_variableN_test_seeds_{:d}.npz"
output_fname_ = "edin_Ys_30fps_variableN_test_seeds_{:d}.npz"
test_set_Y = [np.load(os.path.join(data_dir, output_fname_.format(i))) for i in range(1, 8+1)]
test_set_Y = [npz[str(j)] for npz in test_set_Y for j in range(1, 4+1)]
test_set_U = [np.load(os.path.join(data_dir, input_fname_.format(i))) for i in range(1, 8 + 1)]
test_set_U = [npz[str(j)] for npz in test_set_U for j in range(1, 4 + 1)]
# Create inputs/outputs for optimisation
ysz = test_set_Y[0].shape[1]
usz = test_set_U[0].shape[1]
bsz = 4 * 8 # each style has 4 seed sequences.
Yb, Ub = torch.zeros(bsz, 64, ysz).float(), torch.zeros(bsz, 64, usz).float()
for i in range(bsz):
Ub[i, :, :] = torch.from_numpy(test_set_U[i])
Yb[i, :, :] = torch.from_numpy(test_set_Y[i])
output_fname, input_fname = output_fname_.format(0), input_fname_.format(0)
test_set_size = bsz
else:
output_fname, input_fname = "edin_Ys_30fps_final.npz", "edin_Us_30fps_final.npz"
style_lkp = np.load(os.path.join(data_dir, "styles_lkp.npz"))
test_set_Y = np.load(os.path.join(data_dir, output_fname))
test_set_U = np.load(os.path.join(data_dir, input_fname))
test_set_Y = [test_set_Y[str(i)] for i in style_lkp[str(style_ix)]]
test_set_U = [test_set_U[str(i)] for i in style_lkp[str(style_ix)]]
all_data = list(mtfixb_model.DataIterator(test_set_Y, test_set_U, 64, min_size=64, overlap2=False))
test_set_Y = [all_data[i][0] for i in range(len(all_data))]
test_set_U = [all_data[i][1] for i in range(len(all_data))]
# Determine which test examples we will use.
test_ixs = np.linspace(0, len(test_set_Y) - 1 - B_forward, test_set_size).round().astype('int')
# Create inputs/outputs for optimisation
ysz = test_set_Y[0].shape[1]
usz = test_set_U[0].shape[1]
Yb, Ub = torch.zeros(test_set_size, 64, ysz).float(), torch.zeros(test_set_size, 64, usz).float()
for i in range(test_set_size):
Ub[i, :, :] = torch.from_numpy(test_set_U[test_ixs[i]])
Yb[i, :, :] = torch.from_numpy(test_set_Y[test_ixs[i]])
print("Using files {:s}; {:s}".format(input_fname, output_fname))
print("done reading data.")
if not iscpu:
Ub = Ub.cuda()
Yb = Yb.cuda()
# Generate initial Z and set-up for optimisation.
n_per_style = 120 if train_set_size == 0 else train_set_size
zixs = list(range(0, n_per_style*8, n_per_style // 4))
Z = model.mt_net.Z_mu[zixs, :].detach().to(device)
Z.requires_grad = True
sd = torch.ones_like(Z).float().to(device) * 1e-7
pars = [{'params': [Z], 'lr': lr}]
# Set-up optimiser
if biasonly:
iters = [1000, 1000, 1000]
else:
iters = [400, 200, 100]
optimiser = optim.Adam(pars, betas=(0.9, 0.999), weight_decay=0)
# Assign Z to best test ixs
cross_errors = np.zeros((test_set_size, test_set_size))
for i in range(test_set_size):
_Z = Z[i, :].repeat(test_set_size, 1) # duplicate i'th particle for all sequences.
preds, _state = model(Ub, _Z, sd)
err = (preds - Yb)
sqerr = err.pow(2)
cross_errors[:, i] = sqerr.mean(dim=2).mean(dim=1).cpu().detach().numpy()
choose_z = np.argmin(cross_errors, axis=1)
Z.data = Z[choose_z, :].detach()
# Perform optimisation
for j in range(len(iters)):
start_time = time.time()
for i in range(iters[j]):
optimiser.zero_grad()
preds, _state = model(Ub, Z, sd)
err = (preds - Yb)
sqerr = err.pow(2)
step_loss = ysz * 64 * sqerr.mean() / 2
# Actual backpropagation
step_loss.backward()
optimiser.step()
i % 5 == 0 and print("step {:d}: {:02.3f}".format(i, step_loss.cpu().data.numpy())); sys.stdout.flush()
print("Inner loop {:d}/{:d} took {:03.1f} seconds".format(j+1, len(iters), time.time() - start_time))
# Avoid local minima
if j != len(iters) - 1:
cross_errors = np.zeros((test_set_size, test_set_size))
for i in range(test_set_size):
_Z = Z[i, :].repeat(test_set_size, 1) # duplicate i'th particle for all sequences.
preds, _state = model(Ub, _Z, sd)
err = (preds - Yb)
sqerr = err.pow(2)
cross_errors[:, i] = sqerr.mean(dim=2).mean(dim=1).cpu().detach().numpy()
choose_z = np.argmin(cross_errors, axis=1)
Z.data = Z[choose_z, :].detach()
if is_mtl:
return Z.cpu().detach().numpy(), np.ones(1) * np.NaN
else:
return Z.cpu().detach().numpy(), test_ixs
if __name__ == "__main__":
args = parse_args()
Z, test_ixs = optimise(args)
Ntype = "N{:d}".format(args.train_set_size) if args.train_set_size > 0 else "TL"
savenm = "{:s}_{:s}_k{:d}_i{:d}".format(args.model_type, Ntype, args.k, args.style_ix)
dir = os.path.join(args.data_dir, "optim_Z")
os.makedirs(dir, exist_ok=True)
np.savez(os.path.join(dir, savenm), Z)
|
{"hexsha": "f2fc35440a5f735f238cbfec1e95b6f216b3caca", "size": 11521, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/optim_z.py", "max_stars_repo_name": "ornithos/pytorch-mtds-mocap", "max_stars_repo_head_hexsha": "3ec10387d3d897e9a20d789bd4a3782a047519f7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-02-09T17:53:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-02T11:25:35.000Z", "max_issues_repo_path": "src/optim_z.py", "max_issues_repo_name": "ornithos/pytorch-mtds-mocap", "max_issues_repo_head_hexsha": "3ec10387d3d897e9a20d789bd4a3782a047519f7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/optim_z.py", "max_forks_repo_name": "ornithos/pytorch-mtds-mocap", "max_forks_repo_head_hexsha": "3ec10387d3d897e9a20d789bd4a3782a047519f7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.8049792531, "max_line_length": 132, "alphanum_fraction": 0.603333044, "include": true, "reason": "import numpy", "num_tokens": 3170}
|
# created by Dmitrey
#PythonAll = all
from numpy import asarray, empty, inf, any, array, \
asfarray, isscalar, ndarray, int16, int32, int64, float64, tile, vstack, searchsorted, \
logical_or, where, asanyarray, arange, log2, logical_and, ceil, string_, atleast_1d
import numpy as np
from FDmisc import FuncDesignerException, isPyPy
from ooFun import oofun
from logic import AND, OR, NOT, EQUIVALENT
from BooleanOOFun import BooleanOOFun
#from FuncDesigner import IMPLICATION
from ooarray import ooarray
from baseClasses import Stochastic
from boundsurf import boundsurf, surf
f_none = lambda *args, **kw: None
One = array(1.0)
Zero = array(0.0) ## TODO: mb use 1-d array to improve indexation
class oovar(oofun):
is_oovar = True
domain = None
lb = -inf
ub = inf
#shape = nan
#fixed = False
#initialized = False
_unnamedVarNumber = 1#static variable for oovar class
__hash__ = oofun.__hash__
def __init__(self, name=None, *args, **kwargs):
if len(args) > 0: raise FuncDesignerException('incorrect args number for oovar constructor')
if name is None:
self.name = 'unnamed_oovar_with_oofun_id_%d' % oofun._id
# oovar._unnamedVarNumber += 1
# self.name = 'unnamed_' + str(oovar._unnamedVarNumber)
else:
kwargs['name'] = name
oofun.__init__(self, f_none, *args, **kwargs)
expression = lambda self, *args, **kw: self.name
def _interval_(self, domain, dtype = float64):
if self in domain.resolveSchedule:
tmp = domain.get(self, None)
if tmp is None:
Tmp = getattr(domain, '_dictOfStochVars', {})
tmp = Tmp.get(self, None)
return None if tmp is None else (tile(yield_stochastic(tmp, domain, self), (2, 1)), True)
if isinstance(tmp, ndarray) or isscalar(tmp): # thus variable value is fixed for this calculation
tmp = asarray(tmp, dtype)
return tile(tmp, (2, 1)), True
infinum, supremum = tmp
#prev
#return asarray(vstack((infinum, supremum)), dtype), True
#new, works faster in CPython
r = empty((2, asarray(infinum).size), dtype)
r[0] = infinum
r[1] = supremum
return r, True
else:
S = surf({self: One}, Zero)
return boundsurf(S, S, True, domain), True
def _getFuncCalcEngine(self, x, **kwargs):
if hasattr(x, 'xf'):
#return x.xf[self]
if x.probType == 'MOP':
s = 'evaluation of MOP result on arguments is unimplemented yet, use r.solutions'
raise FuncDesignerException(s)
return self._getFuncCalcEngine(x.xf, **kwargs) # essential for SP
r = x.get(self, None)
if r is not None:
if isinstance(r, Stochastic):
r = yield_stochastic(r, x, self)
return r
r = x.get(self.name, None)
if r is not None:
return r
Tmp = getattr(x, '_dictOfStochVars', {})
r = Tmp.get(self, None)
if r is not None:
r = yield_stochastic(r, x, self)
return r
# check for fixed oovars
dictOfFixedFuncs = getattr(x, 'dictOfFixedFuncs', {})
r = dictOfFixedFuncs.get(self, None)
if r is not None:
return r
s = '''for oovar %s the point involved doesn't contain
neither name nor the oovar instance.
Maybe you try to get function value or derivative
in a point where value for an oovar is missing
or run optimization problem
without setting initial value for this variable in start point
''' % self.name
raise FuncDesignerException(s)
def nlh(self, Lx, Ux, p, dataType, other=None):
T0, res, DefiniteRange = get_P(self, Lx, Ux, p, dataType, other, goal_is_nlh = True)
if type(T0) == bool:
assert len(res) == 0
return T0, {}, DefiniteRange
else:
return T0, {self: res}, DefiniteRange
def lh(self, Lx, Ux, p, dataType, other=None):
#print('lh')
T0, res, DefiniteRange = get_P(self, Lx, Ux, p, dataType, other, goal_is_nlh = False)
if type(T0) == bool:
assert len(res) == 0
return T0, {}, DefiniteRange
else:
return T0, {self: res}, DefiniteRange
__and__ = AND
__or__ = OR
#implication = IMPLICATION
__invert__ = NOT
__ne__ = lambda self, arg: NOT(self==arg)
def __eq__(self, other):
if type(other) == str and other =='__builtins__': return False
if (self.domain is bool or self.domain is 'bool') and isinstance(other, (oovar, BooleanOOFun)):
return EQUIVALENT(self, other)
else:
return oofun.__eq__(self, other)
def formAuxDomain(self, sort = True):
if 'aux_domain' in self.__dict__: return
d = self.domain
# if d.dtype.type not in [string_, unicode, str]:
# raise FuncDesignerException('to compare string with oovar latter should have domain of string type')
if type(d[0]) in (str, string_):
d = dict((elem, i) for i, elem in enumerate(d))
D = int(2 ** ceil(log2(len(d))))
self.reverse_aux_domain = dict((i, elem) for i, elem in enumerate(self.domain))
elif sort:
d = asanyarray(d)
if any(d[1:] > d[:-1]):
# if type(d) == tuple:
# d = list(d)
d.sort()
#self.ub = d.size - 1
D = int(2 ** ceil(log2(len(atleast_1d(d)))))
else:
d = asanyarray(d)
# atleast_1d - for domain from 1 element if it will be somewhere generated and obtained here
self.domain, self.aux_domain = arange(D), d
# self.domainSortOrder = \
# 1 if PythonAll(d[i] <= d[i+1] for i in range(D-1)) else \
# -1 if PythonAll(d[i] >= d[i+1] for i in range(D-1)) else\
# 0
# if isinstance(x, dict):
# tmp = x.get(self, None)
# if tmp is not None:
# return tmp #if type(tmp)==ndarray else asfarray(tmp)
# elif self.name in x:
# return asfarray(x[self.name])
# else:
# s = 'for oovar ' + self.name + \
# " the point involved doesn't contain niether name nor the oovar instance. Maybe you try to get function value or derivative in a point where value for an oovar is missing"
# raise FuncDesignerException(s)
# elif hasattr(x, 'xf'):
# # TODO: possibility of squeezing
# return x.xf[self]
# else:
# raise FuncDesignerException('Incorrect data type (%s) while obtaining oovar %s value' %(type(x), self.name))
# def _initialize(self, p):
#
# """ Handling size and shape """
# sizes = set()
# shapes = set()
# for fn in ['v0', 'lb', 'ub']:
# if hasattr(self, fn):
# setattr(self, fn, asarray(getattr(self, fn)))
# shapes.add(getattr(self, fn).shape)
# sizes.add(getattr(self, fn).size)
# if self.shape is not nan:
# shapes.add(self.shape)
# sizes.add(prod(self.shape))
# if self.size is not nan: sizes.add(self.size)
# #if len(shapes) > 1: p.err('for oovar fields (if present) lb, ub, v0 should have same shape')
# #elif len(shapes) == 1: self.shape = shapes.pop()
# if len(shapes) >= 1: self.shape = prod(shapes.pop())
#
# if len(sizes) > 1: p.err('for oovar fields (if present) lb, ub, v0 should have same size')
# elif len(sizes)==1 : self.size = sizes.pop()
#
# if self.size is nan: self.size = asarray(self.shape).prod()
# if self.shape is nan:
# assert isfinite(self.size)
# self.shape = (self.size, )
#
#
# """ Handling init value """
## if not hasattr(self, 'lb'):
## self.lb = empty(self.shape)
## self.lb.fill(-inf)
## if not hasattr(self, 'ub'):
## self.ub = empty(self.shape)
## self.ub.fill(inf)
## if any(self.lb > self.ub):
## p.err('lower bound exceeds upper bound, solving impossible')
# if not hasattr(self, 'v0'):
# #p.warn('got oovar w/o init value')
# v0 = zeros(self.shape)
#
# ind = isfinite(self.lb) & isfinite(self.ub)
# v0[ind] = 0.5*(self.lb[ind] + self.ub[ind])
#
# ind = isfinite(self.lb) & ~isfinite(self.ub)
# v0[ind] = self.lb[ind]
#
# ind = ~isfinite(self.lb) & isfinite(self.ub)
# v0[ind] = self.ub[ind]
#
# self.v0 = v0
#
# self.initialized = True
def oovars(*args, **kw):
if isPyPy:
raise FuncDesignerException('''
for PyPy using oovars() is impossible yet.
You could use oovar(size=n), also
you can create list or tuple of oovars in a cycle, e.g.
a = [oovar('a'+str(i)) for i in range(100)]
but you should ensure you haven't operations like k*a or a+val in your code,
it may work in completely different way (e.g. k*a will produce Python list of k a instances)
''')
lb = kw.pop('lb', None)
ub = kw.pop('ub', None)
if len(args) == 1:
if type(args[0]) in (int, int16, int32, int64):
r = ooarray([oovar(**kw) for i in range(args[0])])
elif type(args[0]) in [list, tuple]:
r = ooarray([oovar(name=args[0][i], **kw) for i in range(len(args[0]))])
elif type(args[0]) == str:
r = ooarray([oovar(name=s, **kw) for s in args[0].split()])
else:
raise FuncDesignerException('incorrect args number for oovars constructor')
else:
r = ooarray([oovar(name=args[i], **kw) for i in range(len(args))])
if lb is not None:
if np.isscalar(lb) or (isinstance(lb, np.ndarray) and lb.size == 1):
for v in r.view(np.ndarray):
v.lb = lb
else:
assert type(lb) in (list, tuple, ndarray)
for i, v in enumerate(r):
v.lb = lb[i]
if ub is not None:
if np.isscalar(ub) or (isinstance(ub, np.ndarray) and ub.size == 1):
for v in r.view(np.ndarray):
v.ub = ub
else:
assert type(ub) in (list, tuple, ndarray)
for i, v in enumerate(r):
v.ub = ub[i]
r._is_array_of_oovars = True
return r
def get_P(v, Lx, Ux, p, dataType, other=None, goal_is_nlh = True):
DefiniteRange = True
d = v.domain
if d is None:
raise FuncDesignerException('probably you are invoking boolean operation on continuous oovar')
if d is int or d is 'int':
raise FuncDesignerException('probably you are invoking boolean operation on non-boolean oovar')
inds = p._oovarsIndDict.get(v, None)
m = Lx.shape[0]
if inds is None:
# this oovar is fixed
res = {}
if v.domain is bool or v.domain is 'bool':
if goal_is_nlh:
T0 = True if p._x0[v] == 1 else False # 0 or 1
else:
T0 = False if p._x0[v] == 1 else True # 0 or 1
else:
assert other is not None, 'bug in FD kernel: called nlh with incorrect domain type'
if goal_is_nlh:
T0 = False if p._x0[v] != other else True
else:
T0 = False if p._x0[v] == other else True
return T0, res, DefiniteRange
#raise FuncDesignerException('probably you are trying to get nlh of fixed oovar, this is unimplemented in FD yet')
ind1, ind2 = inds
assert ind2-ind1 == 1, 'unimplemented for oovars of size > 1 yet'
lx, ux = Lx[:, ind1], Ux[:, ind1]
if d is bool or d is 'bool':
T0 = empty(m)
if goal_is_nlh:
T0.fill(inf)
T0[ux != lx] = 1.0 # lx = 0, ux = 1 => -log2(0.5) = 1
T0[lx == 1.0] = 0.0 # lx = 1 => ux = 1 => -log2(1) = 0
T2 = vstack((where(lx == 1, 0, inf), where(ux == 1, 0, inf))).T
else:
T0.fill(0)
T0[ux != lx] = 1.0
T0[lx == 1.0] = inf
T2 = vstack((where(lx == 1, inf, 0), where(ux == 1, inf, 0))).T
else:
assert other is not None, 'bug in FD kernel: called nlh with incorrect domain type'
mx = 0.5 * (lx + ux)
prev = 0
if prev:
ind = logical_and(mx==other, lx != ux)
if any(ind):
p.pWarn('seems like a categorical variables bug in FuncDesigner kernel, inform developers')
# mx[ind] += 1e-15 + 1e-15*abs(mx[ind])
I = searchsorted(d, lx, 'right') - 1
J = searchsorted(d, mx, 'right') - 1
#assert np.all(searchsorted(d, mx, 'right') == searchsorted(d, mx, 'left'))
K = searchsorted(d, ux, 'right') - 1
D0, D1, D2 = d[I], d[J], d[K]
d1, d2 = D0, D1
# if goal_is_nlh:
tmp1 = asfarray(J-I+1+where(d2==other, 1, 0))
tmp1[logical_or(other<d1, other>d2)] = inf
# else:
# tmp1 = asfarray(J-I+where(d2==other, 0, 1))
# tmp1[logical_or(other<d1, other>d2)] = 0
d1, d2 = D1, D2
# if goal_is_nlh:
tmp2 = asfarray(K-J+1+where(d2==other, 1, 0))
tmp2[logical_or(other<d1, other>d2)] = inf
# else:
# tmp2 = asfarray(K-J+where(d2==other, 0, 1))
# tmp2[logical_or(other<d1, other>d2)] = 0
if goal_is_nlh:
T2 = log2(vstack((tmp1, tmp2)).T)
else:
T2 = log2(vstack((tmp1, tmp2)).T)
d1, d2 = D0, D2
tmp = asfarray(K-I+where(d2==other, 1, 0))
tmp[logical_or(other<d1, other>d2)] = inf
T0 = log2(tmp)
else:
assert np.all(d == array(d, int)) and len(d) == d[-1]-d[0]+1, 'bug in FD kernel'
#assert np.all(1e-6 < np.abs(np.array(mx, int)-mx))
assert goal_is_nlh, 'unimplemented yet'
tmp = ux - lx
tmp[other < lx] = inf
tmp[other > ux] = inf
tmp[logical_and(tmp==0, other == lx)] = 0.0
T0 = log2(tmp+1)
floor_mx = np.floor(mx)
tmp1 = floor_mx - lx
tmp1[other < lx] = inf
tmp1[other > floor_mx] = inf
tmp1[logical_and(tmp1==0, other == lx)] = 0.0
ceil_mx = np.ceil(mx)
tmp2 = ux - ceil_mx
tmp2[other > ux] = inf
tmp2[other < ceil_mx] = inf
tmp2[logical_and(tmp2==0, other == ux)] = 0.0
if goal_is_nlh:
T2 = log2(vstack((tmp1, tmp2)).T + 1.0)
else:
assert 0, 'unimplemented yet'
#T2 = log2(vstack((tmp1, tmp2)).T)
res = T2
return T0, res, DefiniteRange
def yield_stochastic(r, point, v):
sz = getattr(point, 'maxDistributionSize', 0)
if sz == 0:
s = '''
if one of function arguments is stochastic distribution
without resolving into quantified value
(e.g. uniform(-10,10) instead of uniform(-10,10, 100), 100 is number of point to emulate)
then you should evaluate the function
onto oopoint with assigned parameter maxDistributionSize'''
raise FuncDesignerException(s)
if not r.quantified:
r = r._yield_quantified(sz)
r = r.copy()
r.stochDep = {v:1}
r.maxDistributionSize = sz
if r.size > sz:
r.reduce(sz)
tmp = getattr(point, '_p', None)
if tmp is not None:
r._p = tmp
return r
|
{"hexsha": "ca4193180bfc9518d393815530f27c1a7d22f9bf", "size": 16356, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/python2.7/site-packages/FuncDesigner/ooVar.py", "max_stars_repo_name": "wangyum/anaconda", "max_stars_repo_head_hexsha": "6e5a0dbead3327661d73a61e85414cf92aa52be6", "max_stars_repo_licenses": ["Apache-2.0", "BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/python2.7/site-packages/FuncDesigner/ooVar.py", "max_issues_repo_name": "wangyum/anaconda", "max_issues_repo_head_hexsha": "6e5a0dbead3327661d73a61e85414cf92aa52be6", "max_issues_repo_licenses": ["Apache-2.0", "BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/python2.7/site-packages/FuncDesigner/ooVar.py", "max_forks_repo_name": "wangyum/anaconda", "max_forks_repo_head_hexsha": "6e5a0dbead3327661d73a61e85414cf92aa52be6", "max_forks_repo_licenses": ["Apache-2.0", "BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7582938389, "max_line_length": 188, "alphanum_fraction": 0.5244558572, "include": true, "reason": "import numpy,from numpy", "num_tokens": 4535}
|
import argparse
import calendar
import copy
import glob
import shutil
import subprocess
import numpy as np
import pandas as pd
from sqlalchemy.exc import IntegrityError
from datetime import date
from urllib2 import urlopen
from httplib import BadStatusLine
from time import sleep
import bom_data_parser as bdp
from phildb.database import PhilDB
from phildb.exceptions import DuplicateError
SLAKE_END_POINT = 'http://water.bom.gov.au/waterstorage/resources/data/'
SLAKE_XMLCHART_END_POINT = 'http://water.bom.gov.au/waterstorage/resources/xmlchart/'
def main(phildb_name):
db = PhilDB(phildb_name)
try:
db.add_measurand('STORAGE', 'STORAGE', 'Water Storage')
except DuplicateError:
pass
try:
db.add_source('BOM', 'BOM')
except DuplicateError:
pass
states = bdp.read_water_storage_states(urlopen('{0}urn:bom.gov.au:awris:common:codelist:region.country:australia'.format(SLAKE_END_POINT)))
for state in states:
print("Processing state/territory: {0}".format(state))
storages = bdp.read_water_storage_urns(urlopen('{0}{1}'.format(SLAKE_END_POINT, state)))
for storage in storages:
print("Processing storage {0}".format(storage))
try:
db.add_timeseries(storage)
except DuplicateError:
pass
try:
db.add_timeseries_instance(storage, 'D', 'Bureau of Meteorology Water Storages', source = 'BOM', measurand = 'STORAGE')
except DuplicateError:
pass
url = "{0}{1}".format(SLAKE_XMLCHART_END_POINT, storage)
try:
storage_data = bdp.read_water_storage_series(urlopen(url))
except BadStatusLine:
print("Encounted bad status line, sleeping for 1 second before trying again...")
sleep(1)
storage_data = bdp.read_water_storage_series(urlopen(url))
db.write(storage, 'D', storage_data, source = 'BOM', measurand = 'STORAGE')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Load water storage data.')
parser.add_argument('--phildb-name', type=str,
default = 'hm_tsdb',
help='PhilDB to load the data into.')
args = parser.parse_args()
main(args.phildb_name)
|
{"hexsha": "2d9c5a66b7850081772c0dca622f61f827b04ba8", "size": 2363, "ext": "py", "lang": "Python", "max_stars_repo_path": "load_storages.py", "max_stars_repo_name": "amacd31/hydromet-toolkit", "max_stars_repo_head_hexsha": "d39edc6d3e02adeb3cd89ca13fdb9660be3247b4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-01-06T03:35:29.000Z", "max_stars_repo_stars_event_max_datetime": "2017-01-06T03:35:29.000Z", "max_issues_repo_path": "load_storages.py", "max_issues_repo_name": "amacd31/hydromet-toolkit", "max_issues_repo_head_hexsha": "d39edc6d3e02adeb3cd89ca13fdb9660be3247b4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "load_storages.py", "max_forks_repo_name": "amacd31/hydromet-toolkit", "max_forks_repo_head_hexsha": "d39edc6d3e02adeb3cd89ca13fdb9660be3247b4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5066666667, "max_line_length": 143, "alphanum_fraction": 0.6567922133, "include": true, "reason": "import numpy", "num_tokens": 544}
|
/*******************************************************************************
*
* MIT License
*
* Copyright (c) 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef GUARD_MIOPEN_FIND_DB_HPP_
#define GUARD_MIOPEN_FIND_DB_HPP_
#include <miopen/db.hpp>
#include <miopen/db_path.hpp>
#include <miopen/db_record.hpp>
#include <miopen/env.hpp>
#include <miopen/perf_field.hpp>
#include <miopen/ramdb.hpp>
#include <miopen/readonlyramdb.hpp>
#include <boost/optional.hpp>
#include <functional>
#include <vector>
MIOPEN_DECLARE_ENV_VAR(MIOPEN_DEBUG_DISABLE_FIND_DB)
namespace miopen {
struct Handle;
struct NetworkConfig;
template <class TDb>
class FindDbRecord_t;
#if MIOPEN_DEBUG_FIND_DB_CACHING
using SystemFindDb = ReadonlyRamDb;
using UserFindDb = RamDb;
#else
using SystemFindDb = PlainTextDb;
using UserFindDb = PlainTextDb;
#endif
using FindDb = MultiFileDb<SystemFindDb, UserFindDb, false>;
using FindDbRecord = FindDbRecord_t<FindDb>;
using UserFindDbRecord = FindDbRecord_t<UserFindDb>;
// For unit tests.
extern bool testing_find_db_enabled; // NOLINT (cppcoreguidelines-avoid-non-const-global-variables)
extern boost::optional<std::string>&
testing_find_db_path_override(); /// \todo Remove when #1723 is resolved.
bool CheckInvokerSupport(const std::string& algo);
template <class TDb>
class FindDbRecord_t
{
private:
template <class TTestDb>
using is_find_t = std::enable_if_t<std::is_same<TTestDb, UserFindDb>::value, int>;
template <class TTestDb>
using is_immediate_t = std::enable_if_t<std::is_same<TTestDb, FindDb>::value, int>;
public:
FindDbRecord_t(const FindDbRecord_t&) = delete;
FindDbRecord_t& operator=(const FindDbRecord_t&) = delete;
template <class TProblemDescription, class TTestDb = TDb>
FindDbRecord_t(Handle& handle, const TProblemDescription& problem, is_immediate_t<TTestDb> = 0)
: path(testing_find_db_path_override() ? *testing_find_db_path_override()
: GetUserPath(handle)),
installed_path(testing_find_db_path_override() ? *testing_find_db_path_override()
: GetInstalledPath(handle)),
db(boost::make_optional<DbTimer<TDb>>(testing_find_db_enabled &&
!IsEnabled(MIOPEN_DEBUG_DISABLE_FIND_DB{}),
DbTimer<TDb>{installed_path, path}))
{
if(!db.is_initialized())
return;
content = db->FindRecord(problem);
in_sync = content.is_initialized();
}
template <class TProblemDescription, class TTestDb = TDb>
FindDbRecord_t(Handle& handle, const TProblemDescription& problem, is_find_t<TTestDb> = 0)
: path(testing_find_db_path_override() ? *testing_find_db_path_override()
: GetUserPath(handle)),
#if MIOPEN_DISABLE_USERDB
db(boost::optional<DbTimer<TDb>>{})
#else
db(boost::make_optional<DbTimer<TDb>>(testing_find_db_enabled &&
!IsEnabled(MIOPEN_DEBUG_DISABLE_FIND_DB{}),
DbTimer<TDb>{path, false}))
#endif
{
if(!db.is_initialized())
return;
content = db->FindRecord(problem);
in_sync = content.is_initialized();
}
~FindDbRecord_t()
{
if(!db.is_initialized() || !content.is_initialized() || in_sync)
return;
if(!db->StoreRecord(content.get()))
MIOPEN_LOG_E("Failed to store record to find-db at <" << path << ">");
}
auto begin() const { return content->As<FindDbData>().begin(); }
auto begin() { return content->As<FindDbData>().begin(); }
auto end() const { return content->As<FindDbData>().end(); }
auto end() { return content->As<FindDbData>().end(); }
bool empty() const { return !content.is_initialized(); }
template <class TProblemDescription>
static std::vector<PerfField> TryLoad(Handle& handle,
const TProblemDescription& problem,
const std::function<void(DbRecord&)>& regenerator)
{
auto ret = std::vector<PerfField>{};
FindDbRecord_t<TDb> record{handle, problem};
const auto network_config = problem.BuildConfKey();
if(record.in_sync && !record.Validate(handle, network_config))
{
record.CopyTo(ret);
return ret;
}
MIOPEN_LOG_I("Find-db regenerating.");
ret.clear();
record.in_sync = false;
record.content.emplace(problem);
regenerator(*record.content);
record.CopyTo(ret);
return ret;
}
private:
std::string path;
std::string installed_path;
boost::optional<DbTimer<TDb>> db;
boost::optional<DbRecord> content{boost::none};
bool in_sync = false;
static bool HasKernel(Handle& handle, const FindDbKCacheKey& key);
static std::string GetInstalledPath(Handle& handle);
static std::string GetInstalledPathEmbed(Handle& handle);
static std::string GetInstalledPathFile(Handle& handle);
static std::string GetUserPath(Handle& handle);
// Returns true if rebuild is required
bool Validate(Handle& handle, const NetworkConfig& config) const;
void CopyTo(std::vector<PerfField>& to) const;
void LogFindDbItem(const std::pair<std::string, FindDbData>& pair,
bool log_as_error = false) const;
};
extern template class FindDbRecord_t<FindDb>;
extern template class FindDbRecord_t<UserFindDb>;
} // namespace miopen
#endif
|
{"hexsha": "b938461d4ee74f45aab6ba355de52c8a16bae59a", "size": 6882, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/include/miopen/find_db.hpp", "max_stars_repo_name": "j4yan/MIOpen", "max_stars_repo_head_hexsha": "dc38f79bee97e047d866d9c1e25289cba86fab56", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 745.0, "max_stars_repo_stars_event_min_datetime": "2017-07-01T22:03:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T23:46:27.000Z", "max_issues_repo_path": "src/include/miopen/find_db.hpp", "max_issues_repo_name": "j4yan/MIOpen", "max_issues_repo_head_hexsha": "dc38f79bee97e047d866d9c1e25289cba86fab56", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1348.0, "max_issues_repo_issues_event_min_datetime": "2017-07-02T12:37:48.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T23:45:51.000Z", "max_forks_repo_path": "src/include/miopen/find_db.hpp", "max_forks_repo_name": "j4yan/MIOpen", "max_forks_repo_head_hexsha": "dc38f79bee97e047d866d9c1e25289cba86fab56", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 158.0, "max_forks_repo_forks_event_min_datetime": "2017-07-01T19:37:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T11:57:04.000Z", "avg_line_length": 36.2210526316, "max_line_length": 99, "alphanum_fraction": 0.6469049695, "num_tokens": 1501}
|
#include "driver-test.h"
#include <boost/test/unit_test.hpp>
BOOST_AUTO_TEST_SUITE(BankcardTestSuite)
BOOST_AUTO_TEST_CASE(bankcard_test)
{
Driver driver;
DriverTest dt;
boost::filesystem::path directory("TestFiles");
std::ifstream metadataFile((directory / "bankcardExample" / "zone_files" / "metadata.json").string());
json metadata;
metadataFile >> metadata;
std::ifstream i((directory / "bankcardExample" / "jobs.json").string());
json j;
i >> j;
long total_rrs_parsed = driver.SetContext(metadata, (directory / "bankcardExample" / "zone_files").string(), false);
BOOST_TEST(22 == total_rrs_parsed);
auto types_to_count = dt.GetTypeToCountMap(driver);
BOOST_TEST(2 == types_to_count["DNAME"]);
BOOST_TEST(1 == types_to_count["CNAME"]);
BOOST_TEST(6 == types_to_count["A"]);
BOOST_TEST(6 == types_to_count["NS"]);
BOOST_TEST(5 == types_to_count["SOA"]);
BOOST_TEST(2 == types_to_count["AAAA"]);
for (auto &user_job : j) {
driver.SetJob(user_job);
driver.GenerateECsAndCheckProperties();
}
// We check for two properties: 1. Response Consistency
// 2. whether support.mybankcard.com will always get the ip address 204.58.233.244
// Both of them should be violated
BOOST_TEST(2 == dt.GetNumberofViolations(driver));
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "fd27c6a404430ab6ff84f515b0e82a71ad616f2f", "size": 1395, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/bankcardTest.cpp", "max_stars_repo_name": "dns-groot/groot", "max_stars_repo_head_hexsha": "995b1bb64bfe4a1407dcf0c5a6910dfe1d60e427", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 58.0, "max_stars_repo_stars_event_min_datetime": "2020-04-29T01:05:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T15:17:34.000Z", "max_issues_repo_path": "test/bankcardTest.cpp", "max_issues_repo_name": "ShuyangLiu/groot", "max_issues_repo_head_hexsha": "1c12500ab6c76a1ddef328031479d5a1f8d6d571", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4.0, "max_issues_repo_issues_event_min_datetime": "2020-05-11T16:09:55.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-22T11:07:36.000Z", "max_forks_repo_path": "test/bankcardTest.cpp", "max_forks_repo_name": "ShuyangLiu/groot", "max_forks_repo_head_hexsha": "1c12500ab6c76a1ddef328031479d5a1f8d6d571", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2020-10-29T17:07:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-06T12:40:31.000Z", "avg_line_length": 34.0243902439, "max_line_length": 120, "alphanum_fraction": 0.6745519713, "num_tokens": 348}
|
from os.path import splitext,basename
import os
import tensorflow as tf
from keras.models import model_from_json
from sklearn.preprocessing import LabelEncoder
import glob
import numpy as np
def load_models(wpod_net_path, mobile_net_path):
try:
#open wpod-net model json file and make model using it
path=wpod_net_path
with open(path, 'r') as json_file:
wpod_model_json = json_file.read()
wpod_model = tf.keras.models.model_from_json(wpod_model_json, custom_objects={})
print("hello")
#load weights into the model
wpod_model.load_weights(os.path.join(os.getcwd(),os.path.join("number_plate_detection_and_recognition","wpod-net.h5")))
#open mobile-net model json file and make model using it
path=mobile_net_path
with open(path, 'r') as json_file:
mobile_model_json = json_file.read()
mobile_model = tf.keras.models.model_from_json(mobile_model_json, custom_objects={})
#load weights into the model
mobile_model.load_weights(os.path.join(os.getcwd(),os.path.join("number_plate_detection_and_recognition","mobile_net.h5")))
print("Models Loaded Succesfully")
#load labels of character recognition model
labels = LabelEncoder()
labels.classes_ = np.load(os.path.join(os.getcwd(),os.path.join("number_plate_detection_and_recognition","license_character_classes.npy")))
print("Labels Loaded Succesfully")
#return both the models and the main function
return wpod_model, mobile_model, labels
except Exception as e:
print(e)
|
{"hexsha": "f407ee047644a13c68908214a11c93852c88aeed", "size": 1639, "ext": "py", "lang": "Python", "max_stars_repo_path": "server/number_plate_detection_and_recognition/load_models.py", "max_stars_repo_name": "CS305-software-Engineering/vehicle-attendance-system", "max_stars_repo_head_hexsha": "b33a583f923d92be669ee89c75a53e0ccacb5d16", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-21T03:39:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-21T03:39:13.000Z", "max_issues_repo_path": "server/number_plate_detection_and_recognition/load_models.py", "max_issues_repo_name": "CS305-software-Engineering/vehicle-attendance-system", "max_issues_repo_head_hexsha": "b33a583f923d92be669ee89c75a53e0ccacb5d16", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-03-05T15:46:45.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-23T17:39:31.000Z", "max_forks_repo_path": "server/number_plate_detection_and_recognition/load_models.py", "max_forks_repo_name": "CS305-software-Engineering/vehicle-attendance-system", "max_forks_repo_head_hexsha": "b33a583f923d92be669ee89c75a53e0ccacb5d16", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.1315789474, "max_line_length": 147, "alphanum_fraction": 0.7022574741, "include": true, "reason": "import numpy", "num_tokens": 354}
|
(* ** Imports and settings *)
From mathcomp Require Import all_ssreflect all_algebra.
From mathcomp Require Import word_ssrZ.
Require Import strings word utils type var expr.
Require Import compiler_util byteset.
Require Import ZArith.
Set Implicit Arguments.
Unset Strict Implicit.
Unset Printing Implicit Defensive.
Local Open Scope vmap.
Local Open Scope seq_scope.
Module Import E.
Definition pass : string := "stack allocation".
Definition stk_error_gen (internal:bool) (x:var_i) msg := {|
pel_msg := msg;
pel_fn := None;
pel_fi := None;
pel_ii := None;
pel_vi := Some x.(v_info);
pel_pass := Some pass;
pel_internal := internal
|}.
Definition stk_error := stk_error_gen false.
Definition stk_ierror := stk_error_gen true.
Definition stk_ierror_basic x msg :=
stk_ierror x (pp_box [:: pp_s msg; pp_nobox [:: pp_s "("; pp_var x; pp_s ")"]]).
Definition stk_error_no_var_gen (internal:bool) msg := {|
pel_msg := pp_s msg;
pel_fn := None;
pel_fi := None;
pel_ii := None;
pel_vi := None;
pel_pass := Some pass;
pel_internal := internal
|}.
Definition stk_error_no_var := stk_error_no_var_gen false.
Definition stk_ierror_no_var := stk_error_no_var_gen true.
End E.
(* TODO: could [wsize_size] return a [positive] rather than a [Z]?
If so, [size_of] could return a positive too.
*)
Definition size_of (t:stype) :=
match t with
| sword sz => wsize_size sz
| sarr n => Zpos n
| sbool | sint => 1%Z
end.
Definition slot := var.
Notation size_slot s := (size_of s.(vtype)).
Record region :=
{ r_slot : slot; (* the name of the region *)
(* the size of the region is encoded in the type of [r_slot] *)
r_align : wsize; (* the alignment of the region *)
r_writable : bool; (* the region is writable or not *)
}.
Definition region_beq (r1 r2:region) :=
[&& r1.(r_slot) == r2.(r_slot),
r1.(r_align) == r2.(r_align) &
r1.(r_writable) == r2.(r_writable)].
Definition region_same (r1 r2:region) :=
(r1.(r_slot) == r2.(r_slot)).
Lemma region_axiom : Equality.axiom region_beq.
Proof.
rewrite /region_beq => -[xs1 xa1 xw1] [xs2 xa2 xw2].
by apply:(iffP and3P) => /= [[/eqP -> /eqP -> /eqP ->] | [-> -> ->]].
Qed.
Definition region_eqMixin := Equality.Mixin region_axiom.
Canonical region_eqType := Eval hnf in EqType region region_eqMixin.
Module CmpR.
Definition t := [eqType of region].
Definition cmp (r1 r2: t) :=
Lex (bool_cmp r1.(r_writable) r2.(r_writable))
(Lex (wsize_cmp r1.(r_align) r2.(r_align))
(var_cmp r1.(r_slot) r2.(r_slot))).
#[global]
Instance cmpO : Cmp cmp.
Proof.
constructor => [x y | y x z c | [???] [???]]; rewrite /cmp !Lex_lex.
+ by repeat (apply lex_sym; first by apply cmp_sym); apply cmp_sym.
+ by repeat (apply lex_trans=> /=; first by apply cmp_ctrans); apply cmp_ctrans.
move=> /lex_eq [] /= h1 /lex_eq [] /= h2 h3.
by rewrite (cmp_eq h1) (cmp_eq h2) (cmp_eq h3).
Qed.
End CmpR.
Module Mr := Mmake CmpR.
(* ------------------------------------------------------------------ *)
Record zone := {
z_ofs : Z;
z_len : Z;
}.
Scheme Equality for zone.
Lemma zone_eq_axiom : Equality.axiom zone_beq.
Proof.
move=> x y;apply:(iffP idP).
+ by apply: internal_zone_dec_bl.
by apply: internal_zone_dec_lb.
Qed.
Definition zone_eqMixin := Equality.Mixin zone_eq_axiom.
Canonical zone_eqType := EqType zone zone_eqMixin.
Definition disjoint_zones z1 z2 :=
(((z1.(z_ofs) + z1.(z_len))%Z <= z2.(z_ofs)) ||
((z2.(z_ofs) + z2.(z_len))%Z <= z1.(z_ofs)))%CMP.
(* ------------------------------------------------------------------ *)
(* A zone inside a region. *)
Record sub_region := {
sr_region : region;
sr_zone : zone;
}.
Definition sub_region_beq sr1 sr2 :=
(sr1.(sr_region) == sr2.(sr_region)) && (sr1.(sr_zone) == sr2.(sr_zone)).
Lemma sub_region_eq_axiom : Equality.axiom sub_region_beq.
Proof.
rewrite /sub_region_beq => -[mp1 sub1] [mp2 sub2].
by apply:(iffP andP) => /= [[/eqP -> /eqP ->] | [-> ->]].
Qed.
Definition sub_region_eqMixin := Equality.Mixin sub_region_eq_axiom.
Canonical sub_region_eqType := EqType sub_region sub_region_eqMixin.
(* ------------------------------------------------------------------ *)
(* idea: could we use a gvar instead of var & v_scope? *)
Variant ptr_kind_init :=
| PIdirect of var & zone & v_scope
| PIregptr of var
| PIstkptr of var & zone & var.
Variant ptr_kind :=
| Pdirect of var & Z & wsize & zone & v_scope
| Pregptr of var
| Pstkptr of var & Z & wsize & zone & var.
Record param_info := {
pp_ptr : var;
pp_writable : bool;
pp_align : wsize;
}.
Record pos_map := {
vrip : var;
vrsp : var;
vxlen : var;
globals : Mvar.t (Z * wsize);
locals : Mvar.t ptr_kind;
vnew : Sv.t;
}.
(* TODO: Z.land or is_align ?
Could be just is_align (sub_region_addr sr) ws ? *)
Definition check_align x (sr:sub_region) ws :=
Let _ := assert (ws <= sr.(sr_region).(r_align))%CMP
(stk_ierror_basic x "unaligned offset") in
assert (Z.land sr.(sr_zone).(z_ofs) (wsize_size ws - 1) == 0)%Z
(stk_ierror_basic x "unaligned sub offset").
Definition writable (x:var_i) (r:region) :=
assert r.(r_writable)
(stk_error x (pp_box [:: pp_s "cannot write to the constant pointer"; pp_var x; pp_s "targetting"; pp_var r.(r_slot) ])).
Module Region.
Definition bytes_map := Mvar.t ByteSet.t.
Record region_map := {
var_region : Mvar.t sub_region; (* The region where the value is initialy stored *)
region_var :> Mr.t bytes_map; (* The set of source variables whose value is in the region *)
(* region -> var -> ByteSet.t *)
}.
Definition empty_bytes_map := Mvar.empty ByteSet.t.
Definition empty := {|
var_region := Mvar.empty _;
region_var := Mr.empty bytes_map;
|}.
Definition get_sub_region (rmap:region_map) (x:var_i) :=
match Mvar.get rmap.(var_region) x with
| Some sr => ok sr
| None => Error (stk_error x (pp_box [:: pp_s "no region associated to variable"; pp_var x]))
end.
Definition get_bytes_map (r:region) rv : bytes_map :=
odflt empty_bytes_map (Mr.get rv r).
Definition get_bytes (x:var) (bytes_map:bytes_map) :=
odflt ByteSet.empty (Mvar.get bytes_map x).
Definition interval_of_zone z :=
{| imin := z.(z_ofs); imax := z.(z_ofs) + z.(z_len) |}.
Definition get_var_bytes rv r x :=
let bm := get_bytes_map r rv in
let bytes := get_bytes x bm in
bytes.
(* Returns the sub-zone of [z] starting at offset [ofs] and of length [len].
The offset [z] can be None, meaning its exact value is not known. In this
case, the full zone [z] is returned. This is a safe approximation.
*)
Definition sub_zone_at_ofs z ofs len :=
match ofs with
| None => z
| Some ofs => {| z_ofs := z.(z_ofs) + ofs; z_len := len |}
end.
Definition sub_region_at_ofs sr ofs len :=
{| sr_region := sr.(sr_region);
sr_zone := sub_zone_at_ofs sr.(sr_zone) ofs len
|}.
Definition check_valid (rmap:region_map) (x:var_i) ofs len :=
(* we get the bytes associated to variable [x] *)
Let sr := get_sub_region rmap x in
let bytes := get_var_bytes rmap sr.(sr_region) x in
let sr' := sub_region_at_ofs sr ofs len in
let isub_ofs := interval_of_zone sr'.(sr_zone) in
(* we check if [isub_ofs] is a subset of one of the intervals of [bytes] *)
Let _ := assert (ByteSet.mem bytes isub_ofs)
(stk_error x (pp_box [:: pp_s "the region associated to variable"; pp_var x; pp_s "is partial"])) in
ok (sr, sr').
Definition clear_bytes i bytes := ByteSet.remove bytes i.
(* TODO: check optim
let bytes := ByteSet.remove bytes i in
if ByteSet.is_empty bytes then None else Some bytes.
*)
Definition clear_bytes_map i (bm:bytes_map) :=
Mvar.map (clear_bytes i) bm.
(* TODO: if optim above, optim below
let bm := Mvar.filter_map (clear_bytes i) bm in
if Mvar.is_empty bm then None else Some bm.
*)
(* TODO: take [bytes] as an argument ? *)
Definition set_pure_bytes rv (x:var) sr ofs len :=
let z := sr.(sr_zone) in
let z1 := sub_zone_at_ofs z ofs len in
let i := interval_of_zone z1 in
let bm := get_bytes_map sr.(sr_region) rv in
let bytes := if ofs is Some _ then ByteSet.add i (get_bytes x bm)
else get_bytes x bm
in
(* clear all bytes corresponding to z1 *)
let bm := clear_bytes_map i bm in
(* set the bytes *)
let bm := Mvar.set bm x bytes in
Mr.set rv sr.(sr_region) bm.
Definition set_bytes rv (x:var_i) sr (ofs : option Z) (len : Z) :=
Let _ := writable x sr.(sr_region) in
ok (set_pure_bytes rv x sr ofs len).
(* TODO: as many functions are similar, maybe we could have one big function
taking flags as arguments that tell whether we have to check align/check valid... *)
Definition set_sub_region rmap (x:var_i) sr (ofs : option Z) (len : Z) :=
Let rv := set_bytes rmap x sr ofs len in
ok {| var_region := Mvar.set rmap.(var_region) x sr;
region_var := rv |}.
Definition sub_region_stkptr s ws z :=
let r := {| r_slot := s; r_align := ws; r_writable := true |} in
{| sr_region := r; sr_zone := z |}.
Section WITH_POINTER_DATA.
Context {pd: PointerData}.
Definition set_stack_ptr (rmap:region_map) s ws z (x':var) :=
let sr := sub_region_stkptr s ws z in
let rv := set_pure_bytes rmap x' sr (Some 0)%Z (wsize_size Uptr) in
{| var_region := rmap.(var_region);
region_var := rv |}.
(* TODO: fusion with check_valid ? *)
Definition check_stack_ptr rmap s ws z x' :=
let sr := sub_region_stkptr s ws z in
let z := sub_zone_at_ofs z (Some 0)%Z (wsize_size Uptr) in
let i := interval_of_zone z in
let bytes := get_var_bytes rmap sr.(sr_region) x' in
ByteSet.mem bytes i.
End WITH_POINTER_DATA.
(* Precondition size_of x = ws && length sr.sr_zone = wsize_size ws *)
Definition set_word rmap (x:var_i) sr ws :=
Let _ := check_align x sr ws in
set_sub_region rmap x sr (Some 0)%Z (size_slot x).
(* If we write to array [x] at offset [ofs], we invalidate the corresponding
memory zone for the other variables, and mark it as valid for [x].
The offset [ofs] can be None, meaning its exact value is not known. In this
case, the full zone [z] associated to array [x] is invalidated for the
other variables, and remains the zone associated to [x]. It is a safe
approximation.
*)
(* [set_word], [set_stack_ptr] and [set_arr_word] could be factorized? -> think more about it *)
Definition set_arr_word (rmap:region_map) (x:var_i) ofs ws :=
Let sr := get_sub_region rmap x in
Let _ := check_align x sr ws in
set_sub_region rmap x sr ofs (wsize_size ws).
Definition set_arr_call rmap x sr := set_sub_region rmap x sr (Some 0)%Z (size_slot x).
Definition set_move_bytes rv x sr :=
let bm := get_bytes_map sr.(sr_region) rv in
let bytes := get_bytes x bm in
let bm := Mvar.set bm x (ByteSet.add (interval_of_zone sr.(sr_zone)) bytes) in
Mr.set rv sr.(sr_region) bm.
Definition set_move_sub (rmap:region_map) x sr :=
let rv := set_move_bytes rmap x sr in
{| var_region := rmap.(var_region);
region_var := rv |}.
Definition set_arr_sub (rmap:region_map) (x:var_i) ofs len sr_from :=
Let sr := get_sub_region rmap x in
let sr' := sub_region_at_ofs sr (Some ofs) len in
Let _ := assert (sr' == sr_from)
(stk_ierror x
(pp_box [::
pp_s "the assignment to sub-array"; pp_var x;
pp_s "cannot be turned into a nop: source and destination regions are not equal"]))
in
ok (set_move_sub rmap x sr').
(* identical to [set_sub_region], except clearing
TODO: fusion with set_arr_sub ? not sure its worth
*)
Definition set_move (rmap:region_map) (x:var) sr :=
let rv := set_move_bytes rmap x sr in
{| var_region := Mvar.set rmap.(var_region) x sr;
region_var := rv |}.
Definition set_arr_init rmap x sr := set_move rmap x sr.
Definition incl_bytes_map (_r: region) (bm1 bm2: bytes_map) :=
Mvar.incl (fun x => ByteSet.subset) bm1 bm2.
Definition incl (rmap1 rmap2:region_map) :=
Mvar.incl (fun x r1 r2 => r1 == r2) rmap1.(var_region) rmap2.(var_region) &&
Mr.incl incl_bytes_map rmap1.(region_var) rmap2.(region_var).
Definition merge_bytes (x:var) (bytes1 bytes2: option ByteSet.t) :=
match bytes1, bytes2 with
| Some bytes1, Some bytes2 =>
let bytes := ByteSet.inter bytes1 bytes2 in
if ByteSet.is_empty bytes then None
else Some bytes
| _, _ => None
end.
Definition merge_bytes_map (_r:region) (bm1 bm2: option bytes_map) :=
match bm1, bm2 with
| Some bm1, Some bm2 =>
let bm := Mvar.map2 merge_bytes bm1 bm2 in
if Mvar.is_empty bm then None
else Some bm
| _, _ => None
end.
Definition merge (rmap1 rmap2:region_map) :=
{| var_region :=
Mvar.map2 (fun _ osr1 osr2 =>
match osr1, osr2 with
| Some sr1, Some sr2 => if sr1 == sr2 then osr1 else None
| _, _ => None
end) rmap1.(var_region) rmap2.(var_region);
region_var := Mr.map2 merge_bytes_map rmap1.(region_var) rmap2.(region_var) |}.
End Region.
Import Region.
Section ASM_OP.
Context {pd: PointerData}.
Context `{asmop:asmOp}.
Definition mul := Papp2 (Omul (Op_w Uptr)).
Definition add := Papp2 (Oadd (Op_w Uptr)).
Definition mk_ofs aa ws e1 ofs :=
let sz := mk_scale aa ws in
if is_const e1 is Some i then
cast_const (i * sz + ofs)%Z
else
add (mul (cast_const sz) (cast_ptr e1)) (cast_const ofs).
Definition mk_ofsi aa ws e1 :=
if is_const e1 is Some i then Some (i * (mk_scale aa ws))%Z
else None.
Section CHECK.
(* The code in this file is called twice.
- First, it is called from the stack alloc OCaml oracle. Indeed, the oracle
returns initial results, and performs stack and reg allocation using
these results. Based on the program that it obtains,
it fixes some of the results and returns them.
- Second, it is called as a normal compilation pass on the results returned
by the oracle.
When the code is called from the OCaml oracle, all the checks
that are performed so that the pass can be proved correct are actually not
needed. We introduce this boolen [check] to deactivate some of the tests
when the code is called from the oracle.
TODO: deactivate more tests (or even do not use rmap) when [check] is [false]
*)
Variable (check : bool).
Definition assert_check E b (e:E) :=
if check then assert b e
else ok tt.
Variant vptr_kind :=
| VKglob of Z * wsize
| VKptr of ptr_kind.
Definition var_kind := option vptr_kind.
Record stack_alloc_params :=
{
(* Return an instruction that computes an address from an base address and
an offset. *)
sap_mov_ofs :
lval (* The variable to save the address to. *)
-> assgn_tag (* The tag present in the source. *)
-> vptr_kind (* The kind of address to compute. *)
-> pexpr (* Variable with base address. *)
-> Z (* Offset. *)
-> option instr_r;
}.
Context
(saparams : stack_alloc_params).
Section Section.
Variables (pmap:pos_map).
Section ALLOC_E.
Variables (rmap: region_map).
Definition get_global (x:var_i) :=
match Mvar.get pmap.(globals) x with
| None => Error (stk_ierror_basic x "unallocated global variable")
| Some z => ok z
end.
Definition get_local (x:var) := Mvar.get pmap.(locals) x.
Definition check_diff (x:var_i) :=
if Sv.mem x pmap.(vnew) then
Error (stk_ierror_basic x "the code writes to one of the new variables")
else ok tt.
Definition check_var (x:var_i) :=
match get_local x with
| None => ok tt
| Some _ =>
Error (stk_error x (pp_box [::
pp_var x; pp_s "is a stack variable, but a reg variable is expected"]))
end.
Definition with_var xi x :=
{| v_var := x; v_info := xi.(v_info) |}.
Definition base_ptr sc :=
match sc with
| Slocal => pmap.(vrsp)
| Sglobal => pmap.(vrip)
end.
Definition addr_from_pk (x:var_i) (pk:ptr_kind) :=
match pk with
| Pdirect _ ofs _ z sc => ok (with_var x (base_ptr sc), ofs + z.(z_ofs))
| Pregptr p => ok (with_var x p, 0)
| Pstkptr _ _ _ _ _ =>
Error (stk_error x (pp_box [::
pp_var x; pp_s "is a stack pointer, it should not appear in an expression"]))
end%Z.
Definition addr_from_vpk x (vpk:vptr_kind) :=
match vpk with
| VKglob zws => ok (with_var x pmap.(vrip), zws.1)
| VKptr pk => addr_from_pk x pk
end.
Definition mk_addr_ptr x aa ws (pk:ptr_kind) (e1:pexpr) :=
Let xofs := addr_from_pk x pk in
ok (xofs.1, mk_ofs aa ws e1 xofs.2).
Definition mk_addr x aa ws (vpk:vptr_kind) (e1:pexpr) :=
Let xofs := addr_from_vpk x vpk in
ok (xofs.1, mk_ofs aa ws e1 xofs.2).
Definition get_var_kind x :=
let xv := x.(gv) in
if is_glob x then
Let z := get_global xv in
ok (Some (VKglob z))
else
ok (omap VKptr (get_local xv)).
Definition sub_region_full x r :=
let z := {| z_ofs := 0; z_len := size_slot x |} in
{| sr_region := r; sr_zone := z |}.
Definition sub_region_glob x ws :=
let r := {| r_slot := x; r_align := ws; r_writable := false |} in
sub_region_full x r.
Definition check_vpk rmap (x:var_i) vpk ofs len :=
match vpk with
| VKglob (_, ws) =>
let sr := sub_region_glob x ws in
ok (sr, sub_region_at_ofs sr ofs len)
| VKptr _pk =>
check_valid rmap x ofs len
end.
(* We could write [check_vpk] as follows.
Definition check_vpk' rmap (x : gvar) ofs len :=
let (sr, bytes) := check_gvalid rmap x in
let sr' := sub_region_at_ofs sr.(sr_zone) ofs len in
let isub_ofs := interval_of_zone sr'.(sr_zone) in
(* we check if [isub_ofs] is a subset of one of the intervals of [bytes] *)
(* useless test when [x] is glob, but factorizes call to [sub_region_at_ofs] *)
Let _ := assert (ByteSet.mem bytes isub_ofs)
(Cerr_stk_alloc "check_valid: the region is partial") in
ok sr'.
*)
Definition check_vpk_word rmap x vpk ofs ws :=
Let srs := check_vpk rmap x vpk ofs (wsize_size ws) in
check_align x srs.1 ws.
Fixpoint alloc_e (e:pexpr) :=
match e with
| Pconst _ | Pbool _ | Parr_init _ => ok e
| Pvar x =>
let xv := x.(gv) in
Let vk := get_var_kind x in
match vk with
| None => Let _ := check_diff xv in ok e
| Some vpk =>
if is_word_type (vtype xv) is Some ws then
Let _ := check_vpk_word rmap xv vpk (Some 0%Z) ws in
Let pofs := mk_addr xv AAdirect ws vpk (Pconst 0) in
ok (Pload ws pofs.1 pofs.2)
else Error (stk_ierror_basic xv "not a word variable in expression")
end
| Pget aa ws x e1 =>
let xv := x.(gv) in
Let e1 := alloc_e e1 in
Let vk := get_var_kind x in
match vk with
| None => Let _ := check_diff xv in ok (Pget aa ws x e1)
| Some vpk =>
let ofs := mk_ofsi aa ws e1 in
Let _ := check_vpk_word rmap xv vpk ofs ws in
Let pofs := mk_addr xv aa ws vpk e1 in
ok (Pload ws pofs.1 pofs.2)
end
| Psub aa ws len x e1 =>
Error (stk_ierror_basic x.(gv) "Psub")
| Pload ws x e1 =>
Let _ := check_var x in
Let _ := check_diff x in
Let e1 := alloc_e e1 in
ok (Pload ws x e1)
| Papp1 o e1 =>
Let e1 := alloc_e e1 in
ok (Papp1 o e1)
| Papp2 o e1 e2 =>
Let e1 := alloc_e e1 in
Let e2 := alloc_e e2 in
ok (Papp2 o e1 e2)
| PappN o es =>
Let es := mapM alloc_e es in
ok (PappN o es)
| Pif t e e1 e2 =>
Let e := alloc_e e in
Let e1 := alloc_e e1 in
Let e2 := alloc_e e2 in
ok (Pif t e e1 e2)
end.
Definition alloc_es := mapM alloc_e.
End ALLOC_E.
Definition sub_region_direct x align sc z :=
let r := {| r_slot := x; r_align := align; r_writable := sc != Sglob |} in
{| sr_region := r; sr_zone := z |}.
Definition sub_region_stack x align z :=
sub_region_direct x align Slocal z.
Definition sub_region_pk x pk :=
match pk with
| Pdirect x ofs align sub Slocal => ok (sub_region_stack x align sub)
| _ => Error (stk_ierror x (pp_box [:: pp_var x; pp_s "is not in the stack"]))
end.
Definition alloc_lval (rmap: region_map) (r:lval) (ty:stype) :=
match r with
| Lnone _ _ => ok (rmap, r)
| Lvar x =>
(* TODO: could we remove this [check_diff] and use an invariant in the proof instead? *)
match get_local x with
| None => Let _ := check_diff x in ok (rmap, r)
| Some pk =>
if is_word_type (vtype x) is Some ws then
if subtype (sword ws) ty then
Let pofs := mk_addr_ptr x AAdirect ws pk (Pconst 0) in
Let sr := sub_region_pk x pk in
let r := Lmem ws pofs.1 pofs.2 in
Let rmap := Region.set_word rmap x sr ws in
ok (rmap, r)
else Error (stk_ierror_basic x "invalid type for assignment")
else Error (stk_ierror_basic x "not a word variable in assignment")
end
| Laset aa ws x e1 =>
(* TODO: could we remove this [check_diff] and use an invariant in the proof instead? *)
Let e1 := alloc_e rmap e1 in
match get_local x with
| None => Let _ := check_diff x in ok (rmap, Laset aa ws x e1)
| Some pk =>
let ofs := mk_ofsi aa ws e1 in
Let rmap := set_arr_word rmap x ofs ws in
Let pofs := mk_addr_ptr x aa ws pk e1 in
let r := Lmem ws pofs.1 pofs.2 in
ok (rmap, r)
end
| Lasub aa ws len x e1 =>
Error (stk_ierror_basic x "Lasub")
| Lmem ws x e1 =>
Let _ := check_var x in
Let _ := check_diff x in
Let e1 := alloc_e rmap e1 in
ok (rmap, Lmem ws x e1)
end.
Definition nop := Copn [::] AT_none Onop [::].
(* [is_spilling] is used for stack pointers. *)
Definition is_nop is_spilling rmap (x:var) (sry:sub_region) : bool :=
if is_spilling is Some (s, ws, z, f) then
if Mvar.get rmap.(var_region) x is Some srx then
(srx == sry) && check_stack_ptr rmap s ws z f
else false
else false.
(* TODO: better error message *)
Definition get_addr is_spilling rmap x dx tag sry vpk y ofs :=
let ir := if is_nop is_spilling rmap x sry
then Some nop
else sap_mov_ofs saparams dx tag vpk y ofs in
let rmap := Region.set_move rmap x sry in
(rmap, ir).
Definition get_ofs_sub aa ws x e1 :=
match mk_ofsi aa ws e1 with
| None => Error (stk_ierror_basic x "cannot take/set a subarray on a unknown starting position")
| Some ofs => ok ofs
end.
Definition get_Lvar_sub lv :=
match lv with
| Lvar x => ok (x, None)
| Lasub aa ws len x e1 =>
Let ofs := get_ofs_sub aa ws x e1 in
ok (x, Some (ofs, arr_size ws len))
| _ => Error (stk_ierror_no_var "get_Lvar_sub: variable/subarray expected")
end.
Definition get_Pvar_sub e :=
match e with
| Pvar x => ok (x, None)
| Psub aa ws len x e1 =>
Let ofs := get_ofs_sub aa ws x.(gv) e1 in
ok (x, Some (ofs, arr_size ws len))
| _ => Error (stk_ierror_no_var "get_Pvar_sub: variable/subarray expected")
end.
Definition is_stack_ptr vpk :=
match vpk with
| VKptr (Pstkptr s ofs ws z f) => Some (s, ofs, ws, z, f)
| _ => None
end.
(* Not so elegant: function [addr_from_vpk] can fail, but it
actually fails only on the [Pstkptr] case, that is treated apart.
Thus function [mk_addr_pexpr] never fails, but this is not checked statically.
*)
Definition mk_addr_pexpr rmap x vpk :=
if is_stack_ptr vpk is Some (s, ofs, ws, z, f) then
Let _ := assert (check_stack_ptr rmap s ws z f)
(stk_error x (pp_box [:: pp_s "the stack pointer"; pp_var x; pp_s "is no longer valid"])) in
ok (Pload Uptr (with_var x pmap.(vrsp)) (cast_const (ofs + z.(z_ofs))), 0%Z)
else
Let xofs := addr_from_vpk x vpk in
ok (Plvar xofs.1, xofs.2).
(* TODO: the check [is_lvar] was removed, was it really on purpose? *)
(* TODO : currently, we check that the source array is valid and set the target
array as valid too. We could, instead, give the same validity to the target
array as the source one.
[check_vpk] should be replaced with some function returning the valid bytes
of y...
*)
(* Precondition is_sarr ty *)
Definition alloc_array_move rmap r tag e :=
Let xsub := get_Lvar_sub r in
Let ysub := get_Pvar_sub e in
let '(x,subx) := xsub in
let '(y,suby) := ysub in
Let sryl :=
let vy := y.(gv) in
Let vk := get_var_kind y in
let (ofs, len) :=
match suby with
| None => (0%Z, size_slot vy)
| Some p => p
end
in
match vk with
| None => Error (stk_ierror_basic vy "register array remains")
| Some vpk =>
Let srs := check_vpk rmap vy vpk (Some ofs) len in
let sry := srs.2 in
Let eofs := mk_addr_pexpr rmap vy vpk in
ok (sry, vpk, eofs.1, (eofs.2 + ofs)%Z)
end
in
let '(sry, vpk, ey, ofs) := sryl in
match subx with
| None =>
match get_local (v_var x) with
| None => Error (stk_ierror_basic x "register array remains")
| Some pk =>
match pk with
| Pdirect s _ ws zx sc =>
let sr := sub_region_direct s ws sc zx in
Let _ :=
assert (sr == sry)
(stk_ierror x
(pp_box [::
pp_s "the assignment to array"; pp_var x;
pp_s "cannot be turned into a nop: source and destination regions are not equal"]))
in
let rmap := Region.set_move rmap x sry in
ok (rmap, nop)
| Pregptr p =>
let (rmap, oir) :=
get_addr None rmap x (Lvar (with_var x p)) tag sry vpk ey ofs in
match oir with
| None =>
let err_pp := pp_box [:: pp_s "cannot compute address"; pp_var x] in
Error (stk_error x err_pp)
| Some ir =>
ok (rmap, ir)
end
| Pstkptr slot ofsx ws z x' =>
let is_spilling := Some (slot, ws, z, x') in
let dx_ofs := cast_const (ofsx + z.(z_ofs)) in
let dx := Lmem Uptr (with_var x pmap.(vrsp)) dx_ofs in
let (rmap, oir) := get_addr is_spilling rmap x dx tag sry vpk ey ofs in
match oir with
| None =>
let err_pp := pp_box [:: pp_s "cannot compute address"; pp_var x] in
Error (stk_error x err_pp)
| Some ir =>
ok (Region.set_stack_ptr rmap slot ws z x', ir)
end
end
end
| Some (ofs, len) =>
match get_local (v_var x) with
| None => Error (stk_ierror_basic x "register array remains")
| Some _ =>
Let rmap := Region.set_arr_sub rmap x ofs len sry in
ok (rmap, nop)
end
end.
(* This function is also defined in array_init.v *)
(* TODO: clean *)
Definition is_array_init e :=
match e with
| Parr_init _ => true
| _ => false
end.
(* We do not update the [var_region] part *)
(* there seems to be an invariant: all Pdirect are in the rmap *)
(* long-term TODO: we can avoid putting PDirect in the rmap (look in pmap instead) *)
Definition alloc_array_move_init rmap r tag e :=
if is_array_init e then
Let xsub := get_Lvar_sub r in
let '(x,subx) := xsub in
let (ofs, len) :=
match subx with
| None => (0%Z, size_slot (v_var x))
| Some p => p
end in
Let sr :=
match get_local (v_var x) with
| None => Error (stk_ierror_basic x "register array remains")
| Some pk =>
match pk with
| Pdirect x' _ ws z sc =>
if sc is Slocal then
ok (sub_region_stack x' ws z)
else
Error (stk_error x (pp_box [:: pp_s "cannot initialize glob array"; pp_var x]))
| _ =>
get_sub_region rmap x
end
end in
let sr := sub_region_at_ofs sr (Some ofs) len in
let rmap := Region.set_move_sub rmap x sr in
ok (rmap, nop)
else alloc_array_move rmap r tag e.
Definition bad_lval_number := stk_ierror_no_var "invalid number of lval".
Definition alloc_lvals rmap rs tys :=
fmapM2 bad_lval_number alloc_lval rmap rs tys.
Section LOOP.
Variable ii:instr_info.
Variable check_c2 : region_map -> cexec ((region_map * region_map) * (pexpr * (seq cmd * seq cmd)) ).
Fixpoint loop2 (n:nat) (m:region_map) :=
match n with
| O => Error (pp_at_ii ii (stk_ierror_no_var "loop2"))
| S n =>
Let m' := check_c2 m in
if incl m m'.1.2 then ok (m'.1.1, m'.2)
else loop2 n (merge m m'.1.2)
end.
End LOOP.
Record stk_alloc_oracle_t :=
{ sao_align : wsize
; sao_size: Z
; sao_ioff: Z
; sao_extra_size: Z
; sao_max_size : Z
; sao_max_call_depth : Z
; sao_params : seq (option param_info) (* Allocation of pointer params *)
; sao_return : seq (option nat) (* Where to find the param input region *)
; sao_slots : seq (var * wsize * Z)
; sao_alloc: seq (var * ptr_kind_init) (* Allocation of local variables without params, and stk ptr *)
; sao_to_save: seq (var * Z)
; sao_rsp: saved_stack
; sao_return_address: return_address_location
}.
Section PROG.
Context (local_alloc: funname -> stk_alloc_oracle_t).
Definition get_Pvar e :=
match e with
| Pvar x => ok x
| _ => Error (stk_ierror_no_var "get_Pvar: variable expected")
end.
(* The name is chosen to be similar to [set_pure_bytes] and [set_move_bytes],
but there are probably better ideas.
TODO: factorize [set_clear_bytes] and [set_pure_bytes] ?
*)
Definition set_clear_bytes rv sr ofs len :=
let z := sr.(sr_zone) in
let z1 := sub_zone_at_ofs z ofs len in
let i := interval_of_zone z1 in
let bm := get_bytes_map sr.(sr_region) rv in
(* clear all bytes corresponding to z1 *)
let bm := clear_bytes_map i bm in
Mr.set rv sr.(sr_region) bm.
Definition set_clear_pure rmap sr ofs len :=
{| var_region := rmap.(var_region);
region_var := set_clear_bytes rmap sr ofs len |}.
Definition set_clear rmap x sr ofs len :=
Let _ := writable x sr.(sr_region) in
ok (set_clear_pure rmap sr ofs len).
(* We clear the arguments. This is not necessary in the classic case, because
we also clear them when assigning the results in alloc_call_res
(this works if each writable reg ptr is returned (which is currently
checked by the pretyper) and if each result variable has the same size
as the corresponding input variable).
But this complexifies the proof and needs a few more
checks in stack_alloc to be valid. Thus, for the sake of simplicity, it was
decided to make the clearing of the arguments twice : here and in
alloc_call_res.
We use two rmaps:
- the initial rmap [rmap0] is used to check the validity of the sub-regions;
- the current rmap [rmap] is [rmap0] with all the previous writable sub-regions cleared.
Actually, we could use [rmap] to check the validity, and that would partially
enforce that the arguments correspond to disjoint regions (in particular,
writable sub-regions are pairwise disjoint), so with this version we could
simplify check_all_disj. If we first check the validity and clear the writable regions,
and then check the validity of the non-writable ones, we can even remove [check_all_disj].
But the error message (disjoint regions) is much clearer when we have [check_all_disj],
so I leave it as it is now.
*)
Definition alloc_call_arg_aux rmap0 rmap (sao_param: option param_info) (e:pexpr) :=
Let x := get_Pvar e in
Let _ := assert (~~is_glob x)
(stk_ierror_basic x.(gv) "global variable in argument of a call") in
let xv := gv x in
match sao_param, get_local xv with
| None, None =>
Let _ := check_diff xv in
ok (rmap, (None, Pvar x))
| None, Some _ => Error (stk_ierror_basic xv "argument not a reg")
| Some pi, Some (Pregptr p) =>
Let srs := Region.check_valid rmap0 xv (Some 0%Z) (size_slot xv) in
let sr := srs.1 in
Let rmap := if pi.(pp_writable) then set_clear rmap xv sr (Some 0%Z) (size_slot xv) else ok rmap in
Let _ := check_align xv sr pi.(pp_align) in
ok (rmap, (Some (pi.(pp_writable),sr), Pvar (mk_lvar (with_var xv p))))
| Some _, _ => Error (stk_ierror_basic xv "the argument should be a reg ptr")
end.
Definition alloc_call_args_aux rmap sao_params es :=
fmapM2 (stk_ierror_no_var "bad params info") (alloc_call_arg_aux rmap) rmap sao_params es.
Definition disj_sub_regions sr1 sr2 :=
~~(region_same sr1.(sr_region) sr2.(sr_region)) ||
disjoint_zones sr1.(sr_zone) sr2.(sr_zone).
Fixpoint check_all_disj (notwritables writables:seq sub_region) (srs:seq (option (bool * sub_region) * pexpr)) :=
match srs with
| [::] => true
| (None, _) :: srs => check_all_disj notwritables writables srs
| (Some (writable, sr), _) :: srs =>
if all (disj_sub_regions sr) writables then
if writable then
if all (disj_sub_regions sr) notwritables then
check_all_disj notwritables (sr::writables) srs
else false
else check_all_disj (sr::notwritables) writables srs
else false
end.
Definition alloc_call_args rmap (sao_params: seq (option param_info)) (es:seq pexpr) :=
Let es := alloc_call_args_aux rmap sao_params es in
Let _ := assert (check_all_disj [::] [::] es.2)
(stk_error_no_var "some writable reg ptr are not disjoints") in
ok es.
Definition check_lval_reg_call (r:lval) :=
match r with
| Lnone _ _ => ok tt
| Lvar x =>
match get_local x with
| None => Let _ := check_diff x in ok tt
| Some _ => Error (stk_ierror_basic x "call result should be stored in reg")
end
| Laset aa ws x e1 => Error (stk_ierror_basic x "array assignement in lval of a call")
| Lasub aa ws len x e1 => Error (stk_ierror_basic x "sub-array assignement in lval of a call")
| Lmem ws x e1 => Error (stk_ierror_basic x "call result should be stored in reg")
end.
Definition check_is_Lvar r (x:var) :=
match r with
| Lvar x' => x == x'
| _ => false
end.
Definition get_regptr (x:var_i) :=
match get_local x with
| Some (Pregptr p) => ok (with_var x p)
| _ => Error (stk_ierror x (pp_box [:: pp_s "variable"; pp_var x; pp_s "should be a reg ptr"]))
end.
Definition alloc_lval_call (srs:seq (option (bool * sub_region) * pexpr)) rmap (r: lval) (i:option nat) :=
match i with
| None =>
Let _ := check_lval_reg_call r in
ok (rmap, r)
| Some i =>
match nth (None, Pconst 0) srs i with
| (Some (_,sr), _) =>
match r with
| Lnone i _ => ok (rmap, Lnone i (sword Uptr))
| Lvar x =>
Let p := get_regptr x in
Let rmap := Region.set_arr_call rmap x sr in
(* TODO: Lvar p or Lvar (with_var x p) like in alloc_call_arg? *)
ok (rmap, Lvar p)
| Laset aa ws x e1 => Error (stk_ierror_basic x "array assignement in lval of a call")
| Lasub aa ws len x e1 => Error (stk_ierror_basic x "sub-array assignement in lval of a call")
| Lmem ws x e1 => Error (stk_ierror_basic x "call result should be stored in reg ptr")
end
| (None, _) => Error (stk_ierror_no_var "alloc_lval_call")
end
end.
Definition alloc_call_res rmap srs ret_pos rs :=
fmapM2 bad_lval_number (alloc_lval_call srs) rmap rs ret_pos.
Definition is_RAnone ral :=
if ral is RAnone then true else false.
Definition alloc_call (sao_caller:stk_alloc_oracle_t) rmap ini rs fn es :=
let sao_callee := local_alloc fn in
Let es := alloc_call_args rmap sao_callee.(sao_params) es in
let '(rmap, es) := es in
Let rs := alloc_call_res rmap es sao_callee.(sao_return) rs in (*
Let _ := assert_check (~~ is_RAnone sao_callee.(sao_return_address))
(Cerr_stk_alloc "cannot call export function")
in *)
Let _ :=
let local_size :=
if is_RAnone sao_caller.(sao_return_address) then
(sao_caller.(sao_size) + sao_caller.(sao_extra_size) + wsize_size sao_caller.(sao_align) - 1)%Z
else
(round_ws sao_caller.(sao_align) (sao_caller.(sao_size) + sao_caller.(sao_extra_size)))%Z
in
assert_check (local_size + sao_callee.(sao_max_size) <=? sao_caller.(sao_max_size))%Z
(stk_ierror_no_var "error in max size computation")
in
Let _ := assert_check (sao_callee.(sao_align) <= sao_caller.(sao_align))%CMP
(stk_ierror_no_var "non aligned function call")
in
let es := map snd es in
ok (rs.1, Ccall ini rs.2 fn es).
(* Before stack_alloc :
Csyscall [::x] (getrandom len) [::t]
t : arr n & len <= n.
return arr len.
After:
xlen: Uptr
xlen := len;
Csyscall [::xp] (getrandom len) [::p, xlen]
*)
Definition alloc_syscall ii rmap rs o es :=
add_iinfo ii
match o with
| RandomBytes len =>
(* per the semantics, we have [len <= wbase Uptr], but we need [<] *)
Let _ := assert (len <? wbase Uptr)%Z
(stk_error_no_var "randombytes: the requested size is too large")
in
match rs, es with
| [::Lvar x], [::Pvar xe] =>
let xe := xe.(gv) in
let xlen := with_var xe (vxlen pmap) in
Let p := get_regptr xe in
Let xp := get_regptr x in
Let sr := get_sub_region rmap xe in
Let rmap := set_sub_region rmap x sr (Some 0%Z) (Zpos len) in
ok (rmap,
[:: MkI ii (Cassgn (Lvar xlen) AT_none (sword Uptr) (cast_const (Zpos len)));
MkI ii (Csyscall [::Lvar xp] o [:: Plvar p; Plvar xlen])])
| _, _ =>
Error (stk_ierror_no_var "randombytes: invalid args or result")
end
end.
Fixpoint alloc_i sao (rmap:region_map) (i: instr) : cexec (region_map * cmd) :=
let (ii, ir) := i in
match ir with
| Cassgn r t ty e =>
if is_sarr ty then
Let ri := add_iinfo ii (alloc_array_move_init rmap r t e) in
ok (ri.1, [:: MkI ii ri.2])
else
Let e := add_iinfo ii (alloc_e rmap e) in
Let r := add_iinfo ii (alloc_lval rmap r ty) in
ok (r.1, [:: MkI ii (Cassgn r.2 t ty e)])
| Copn rs t o e =>
Let e := add_iinfo ii (alloc_es rmap e) in
Let rs := add_iinfo ii (alloc_lvals rmap rs (sopn_tout o)) in
ok (rs.1, [:: MkI ii (Copn rs.2 t o e)])
| Csyscall rs o es =>
alloc_syscall ii rmap rs o es
| Cif e c1 c2 =>
Let e := add_iinfo ii (alloc_e rmap e) in
Let c1 := fmapM (alloc_i sao) rmap c1 in
Let c2 := fmapM (alloc_i sao) rmap c2 in
let rmap:= merge c1.1 c2.1 in
ok (rmap, [:: MkI ii (Cif e (flatten c1.2) (flatten c2.2))])
| Cwhile a c1 e c2 =>
let check_c rmap :=
Let c1 := fmapM (alloc_i sao) rmap c1 in
let rmap1 := c1.1 in
Let e := add_iinfo ii (alloc_e rmap1 e) in
Let c2 := fmapM (alloc_i sao) rmap1 c2 in
ok ((rmap1, c2.1), (e, (c1.2, c2.2))) in
Let r := loop2 ii check_c Loop.nb rmap in
ok (r.1, [:: MkI ii (Cwhile a (flatten r.2.2.1) r.2.1 (flatten r.2.2.2))])
| Ccall ini rs fn es =>
Let ri := add_iinfo ii (alloc_call sao rmap ini rs fn es) in
ok (ri.1, [::MkI ii ri.2])
| Cfor _ _ _ => Error (pp_at_ii ii (stk_ierror_no_var "don't deal with for loop"))
end.
End PROG.
End Section.
Definition init_stack_layout (mglob : Mvar.t (Z * wsize)) sao :=
let add (xsr: var * wsize * Z)
(slp: Mvar.t (Z * wsize) * Z) :=
let '(stack, p) := slp in
let '(x,ws,ofs) := xsr in
if Mvar.get stack x is Some _ then Error (stk_ierror_no_var "duplicate stack region")
else if Mvar.get mglob x is Some _ then Error (stk_ierror_no_var "a region is both glob and stack")
else
if (p <= ofs)%CMP then
let len := size_slot x in
if (ws <= sao.(sao_align))%CMP then
if (Z.land ofs (wsize_size ws - 1) == 0)%Z then
let stack := Mvar.set stack x (ofs, ws) in
ok (stack, (ofs + len)%Z)
else Error (stk_ierror_no_var "bad stack region alignment")
else Error (stk_ierror_no_var "bad stack alignment")
else Error (stk_ierror_no_var "stack region overlap") in
Let _ := assert (0 <=? sao.(sao_ioff))%Z (stk_ierror_no_var "negative initial stack offset") in
Let sp := foldM add (Mvar.empty _, sao.(sao_ioff)) sao.(sao_slots) in
let '(stack, size) := sp in
if (size <= sao.(sao_size))%CMP then ok stack
else Error (stk_ierror_no_var "stack size").
Definition add_alloc globals stack (xpk:var * ptr_kind_init) (lrx: Mvar.t ptr_kind * region_map * Sv.t) :=
let '(locals, rmap, sv) := lrx in
let '(x, pk) := xpk in
if Sv.mem x sv then Error (stk_ierror_no_var "invalid reg pointer")
else if Mvar.get locals x is Some _ then
Error (stk_ierror_no_var "the oracle returned two results for the same var")
else
Let svrmap :=
match pk with
| PIdirect x' z sc =>
let vars := if sc is Slocal then stack else globals in
match Mvar.get vars x' with
| None => Error (stk_ierror_no_var "unknown region")
| Some (ofs', ws') =>
if [&& (size_slot x <= z.(z_len))%CMP, (0%Z <= z.(z_ofs))%CMP &
((z.(z_ofs) + z.(z_len))%Z <= size_slot x')%CMP] then
let rmap :=
if sc is Slocal then
let sr := sub_region_stack x' ws' z in
Region.set_arr_init rmap x sr
else
rmap
in
ok (sv, Pdirect x' ofs' ws' z sc, rmap)
else Error (stk_ierror_no_var "invalid slot")
end
| PIstkptr x' z xp =>
if ~~ is_sarr x.(vtype) then
Error (stk_ierror_no_var "a stk ptr variable must be an array")
else
match Mvar.get stack x' with
| None => Error (stk_ierror_no_var "unknown stack region")
| Some (ofs', ws') =>
if Sv.mem xp sv then Error (stk_ierror_no_var "invalid stk ptr (not unique)")
else if xp == x then Error (stk_ierror_no_var "a pseudo-var is equal to a program var")
else if Mvar.get locals xp is Some _ then Error (stk_ierror_no_var "a pseudo-var is equal to a program var")
else
if [&& (Uptr <= ws')%CMP,
(0%Z <= z.(z_ofs))%CMP,
(Z.land z.(z_ofs) (wsize_size Uptr - 1) == 0)%Z,
(wsize_size Uptr <= z.(z_len))%CMP &
((z.(z_ofs) + z.(z_len))%Z <= size_slot x')%CMP] then
ok (Sv.add xp sv, Pstkptr x' ofs' ws' z xp, rmap)
else Error (stk_ierror_no_var "invalid ptr kind")
end
| PIregptr p =>
if ~~ is_sarr x.(vtype) then
Error (stk_ierror_no_var "a reg ptr variable must be an array")
else
if Sv.mem p sv then Error (stk_ierror_no_var "invalid reg pointer already exists")
else if Mvar.get locals p is Some _ then Error (stk_ierror_no_var "a pointer is equal to a program var")
else if vtype p != sword Uptr then Error (stk_ierror_no_var "invalid pointer type")
else ok (Sv.add p sv, Pregptr p, rmap)
end in
let '(sv,pk, rmap) := svrmap in
let locals := Mvar.set locals x pk in
ok (locals, rmap, sv).
Definition init_local_map vrip vrsp vxlen globals stack sao :=
Let _ := assert (vxlen != vrip) (stk_ierror_no_var "two fresh variables are equal") in
Let _ := assert (vxlen != vrsp) (stk_ierror_no_var "two fresh variables are equal") in
let sv := Sv.add vxlen (Sv.add vrip (Sv.add vrsp Sv.empty)) in
Let aux := foldM (add_alloc globals stack) (Mvar.empty _, Region.empty, sv) sao.(sao_alloc) in
let '(locals, rmap, sv) := aux in
ok (locals, rmap, sv).
(** For each function, the oracle returns:
- the size of the stack block;
- an allocation for local variables;
- an allocation for the variables to save;
- where to save the stack pointer (of the caller); (* TODO: merge with above? *)
- how to pass the return address (non-export functions only)
It can call back the partial stack-alloc transformation that given an oracle (size of the stack block and allocation of stack variables)
will transform the body of the current function.
The oracle is implemented as follows:
1/ stack allocation
2/ Reg allocation
3/ if we have remaining register to save the stack pointer we use on those register
else
4/ we restart stack allocation and we keep one position in the stack to save the stack pointer
5/ Reg allocation
*)
Definition check_result pmap rmap paramsi params oi (x:var_i) :=
match oi with
| Some i =>
match nth None paramsi i with
| Some sr =>
Let _ := assert (x.(vtype) == (nth x params i).(vtype))
(stk_ierror_no_var "reg ptr in result not corresponding to a parameter") in
Let srs := check_valid rmap x (Some 0%Z) (size_slot x) in
let sr' := srs.1 in
Let _ := assert (sr == sr') (stk_ierror_no_var "invalid reg ptr in result") in
Let p := get_regptr pmap x in
ok p
| None => Error (stk_ierror_no_var "invalid function info")
end
| None =>
Let _ := check_var pmap x in
Let _ := check_diff pmap x in
ok x
end.
(* TODO: clean the 3 [all2] functions *)
Definition check_all_writable_regions_returned paramsi (ret_pos:seq (option nat)) :=
all2 (fun i osr =>
match osr with
| Some sr => if sr.(sr_region).(r_writable) then Some i \in ret_pos else true
| None => true
end) (iota 0 (size paramsi)) paramsi.
Definition check_results pmap rmap paramsi params ret_pos res :=
Let _ := assert (check_all_writable_regions_returned paramsi ret_pos)
(stk_ierror_no_var "a writable region is not returned")
in
mapM2 (stk_ierror_no_var "invalid function info")
(check_result pmap rmap paramsi params) ret_pos res.
(* TODO: is duplicate region the best error msg ? *)
Definition init_param (mglob stack : Mvar.t (Z * wsize)) accu pi (x:var_i) :=
let: (disj, lmap, rmap) := accu in
Let _ := assert (~~ Sv.mem x disj) (stk_ierror_no_var "a parameter already exists") in
if Mvar.get lmap x is Some _ then Error (stk_ierror_no_var "a stack variable also occurs as a parameter")
else
match pi with
| None => ok (accu, (None, x))
| Some pi =>
Let _ := assert (vtype pi.(pp_ptr) == sword Uptr) (stk_ierror_no_var "bad ptr type") in
Let _ := assert (~~Sv.mem pi.(pp_ptr) disj) (stk_ierror_no_var "duplicate region") in
Let _ := assert (is_sarr x.(vtype)) (stk_ierror_no_var "bad reg ptr type") in
if Mvar.get lmap pi.(pp_ptr) is Some _ then Error (stk_ierror_no_var "a pointer is equal to a local var")
else if Mvar.get mglob x is Some _ then Error (stk_ierror_no_var "a region is both glob and param")
else if Mvar.get stack x is Some _ then Error (stk_ierror_no_var "a region is both stack and param")
else
let r :=
{| r_slot := x;
r_align := pi.(pp_align); r_writable := pi.(pp_writable) |} in
let sr := sub_region_full x r in
ok (Sv.add pi.(pp_ptr) disj,
Mvar.set lmap x (Pregptr pi.(pp_ptr)),
set_move rmap x sr,
(Some sr, with_var x pi.(pp_ptr)))
end.
Definition init_params mglob stack disj lmap rmap sao_params params :=
fmapM2 (stk_ierror_no_var "invalid function info")
(init_param mglob stack) (disj, lmap, rmap) sao_params params.
Definition alloc_fd_aux p_extra mglob (fresh_reg : string -> stype -> string) (local_alloc: funname -> stk_alloc_oracle_t) sao fd : cexec _ufundef :=
let vrip := {| vtype := sword Uptr; vname := p_extra.(sp_rip) |} in
let vrsp := {| vtype := sword Uptr; vname := p_extra.(sp_rsp) |} in
let vxlen := {| vtype := sword Uptr; vname := fresh_reg "__len__"%string (sword Uptr) |} in
let ra := sao.(sao_return_address) in
Let stack := init_stack_layout mglob sao in
Let mstk := init_local_map vrip vrsp vxlen mglob stack sao in
let '(locals, rmap, disj) := mstk in
(* adding params to the map *)
Let rparams :=
init_params mglob stack disj locals rmap sao.(sao_params) fd.(f_params) in
let: (sv, lmap, rmap, alloc_params) := rparams in
let paramsi := map fst alloc_params in
let params : seq var_i := map snd alloc_params in
let pmap := {|
vrip := vrip;
vrsp := vrsp;
vxlen := vxlen;
globals := mglob;
locals := lmap;
vnew := sv;
|} in
Let _ := assert (0 <=? sao.(sao_extra_size))%Z
(stk_ierror_no_var "negative extra size")
in
Let _ :=
let local_size :=
if is_RAnone sao.(sao_return_address) then
(sao.(sao_size) + sao.(sao_extra_size) + wsize_size sao.(sao_align) - 1)%Z
else
(round_ws sao.(sao_align) (sao.(sao_size) + sao.(sao_extra_size)))%Z
in
assert_check (local_size <=? sao.(sao_max_size))%Z
(stk_ierror_no_var "sao_max_size too small")
in
Let rbody := fmapM (alloc_i pmap local_alloc sao) rmap fd.(f_body) in
let: (rmap, body) := rbody in
Let res :=
check_results pmap rmap paramsi fd.(f_params) sao.(sao_return) fd.(f_res) in
ok {|
f_info := f_info fd;
f_tyin := map2 (fun o ty => if o is Some _ then sword Uptr else ty) sao.(sao_params) fd.(f_tyin);
f_params := params;
f_body := flatten body;
f_tyout := map2 (fun o ty => if o is Some _ then sword Uptr else ty) sao.(sao_return) fd.(f_tyout);
f_res := res;
f_extra := f_extra fd |}.
Definition alloc_fd p_extra mglob (fresh_reg : string -> stype -> string) (local_alloc: funname -> stk_alloc_oracle_t) fn fd :=
let: sao := local_alloc fn in
Let fd := alloc_fd_aux p_extra mglob fresh_reg local_alloc sao fd in
let f_extra := {|
sf_align := sao.(sao_align);
sf_stk_sz := sao.(sao_size);
sf_stk_ioff := sao.(sao_ioff);
sf_stk_extra_sz := sao.(sao_extra_size);
sf_stk_max := sao.(sao_max_size);
sf_max_call_depth := sao.(sao_max_call_depth);
sf_to_save := sao.(sao_to_save);
sf_save_stack := sao.(sao_rsp);
sf_return_address := sao.(sao_return_address);
|} in
ok (swith_extra fd f_extra).
Definition check_glob (m: Mvar.t (Z*wsize)) (data:seq u8) (gd:glob_decl) :=
let x := gd.1 in
match Mvar.get m x with
| None => false
| Some (z, _) =>
let n := Z.to_nat z in
let data := drop n data in
match gd.2 with
| @Gword ws w =>
let s := Z.to_nat (wsize_size ws) in
(s <= size data) &&
(LE.decode ws (take s data) == w)
| @Garr p t =>
let s := Z.to_nat p in
(s <= size data) &&
all (fun i =>
match read t (Z.of_nat i) U8 with
| Ok w => nth 0%R data i == w
| _ => false
end) (iota 0 s)
end
end.
Definition check_globs (gd:glob_decls) (m:Mvar.t (Z*wsize)) (data:seq u8) :=
all (check_glob m data) gd.
Definition init_map (sz:Z) (l:list (var * wsize * Z)) : cexec (Mvar.t (Z*wsize)) :=
let add (vp:var * wsize * Z) (globals:Mvar.t (Z*wsize) * Z) :=
let '(v, ws, p) := vp in
if (globals.2 <=? p)%Z then
if Z.land p (wsize_size ws - 1) == 0%Z then
let s := size_slot v in
ok (Mvar.set globals.1 v (p,ws), p + s)%Z
else Error (stk_ierror_no_var "bad global alignment")
else Error (stk_ierror_no_var "global overlap") in
Let globals := foldM add (Mvar.empty (Z*wsize), 0%Z) l in
if (globals.2 <=? sz)%Z then ok globals.1
else Error (stk_ierror_no_var "global size").
Definition alloc_prog (fresh_reg:string -> stype -> Ident.ident)
rip rsp global_data global_alloc local_alloc (P:_uprog) : cexec _sprog :=
Let mglob := init_map (Z.of_nat (size global_data)) global_alloc in
let p_extra := {|
sp_rip := rip;
sp_rsp := rsp;
sp_globs := global_data;
|} in
if rip == rsp then Error (stk_ierror_no_var "rip and rsp clash")
else if check_globs P.(p_globs) mglob global_data then
Let p_funs := map_cfprog_name (alloc_fd p_extra mglob fresh_reg local_alloc) P.(p_funcs) in
ok {| p_funcs := p_funs;
p_globs := [::];
p_extra := p_extra;
|}
else
Error (stk_ierror_no_var "invalid data").
End CHECK.
End ASM_OP.
|
{"author": "jasmin-lang", "repo": "jasmin", "sha": "3c783b662000c371ba924a953d444fd80b860d9f", "save_path": "github-repos/coq/jasmin-lang-jasmin", "path": "github-repos/coq/jasmin-lang-jasmin/jasmin-3c783b662000c371ba924a953d444fd80b860d9f/proofs/compiler/stack_alloc.v"}
|
import numpy as np
import torch
from baselines.common.vec_env import VecEnvWrapper
from gym import spaces, ActionWrapper
from envs.ImageObsVecEnvWrapper import get_image_obs_wrapper
from envs.ResidualVecEnvWrapper import get_residual_layers
from pose_estimator.utils import unnormalise_y
class PoseEstimatorVecEnvWrapper(VecEnvWrapper):
"""
Uses a pose estimator to estimate the state from the image. Wrapping this environment
around a ResidualVecEnvWrapper makes it possible to use a full state policy on an environment
with images as observations
"""
def __init__(self, venv, device, pose_estimator, state_to_estimate, low, high,
abs_to_rel=False):
super().__init__(venv)
self.image_obs_wrapper = get_image_obs_wrapper(venv)
assert self.image_obs_wrapper is not None
self.estimator = pose_estimator.to(device)
self.estimator.eval()
self.policy_layers = get_residual_layers(venv)
self.state_obs_space = self.policy_layers[0].observation_space
self.state_to_estimate = state_to_estimate
self.state_to_use = [i for i in range(self.state_obs_space.shape[0])
if i not in state_to_estimate]
self.low = low
self.high = high
self.curr_image = None
self.abs_to_rel = abs_to_rel
self.target_z = np.array(self.get_images(mode="target_height"))
self.junk = None
self.abs_estimations = None
def step_async(self, actions):
with torch.no_grad():
net_output = self.estimator.predict(self.curr_image).cpu().numpy()
estimation = net_output if self.low is None else unnormalise_y(net_output,
self.low, self.high)
if self.abs_estimations is None:
self.abs_estimations = np.array([estimation])
else:
self.abs_estimations = np.append(self.abs_estimations, [estimation], axis=0)
obs = np.zeros((self.num_envs, *self.state_obs_space.shape))
estimation = np.median(self.abs_estimations, axis=0)
obs[:, self.state_to_use] = self.image_obs_wrapper.curr_state_obs[:, self.state_to_use]
if self.abs_to_rel:
full_pos_estimation = np.append(estimation[:, :2], self.target_z, axis=1)
# rack_to_trg = self.base_env.get_position(self.base_env.target_handle) - \
# self.base_env.get_position(self.base_env.rack_handle)
# full_pos_estimation += rack_to_trg
actual_plate_pos = np.array(self.get_images(mode='plate'))
relative_estimation = full_pos_estimation - actual_plate_pos
estimation = np.append(relative_estimation, estimation[:, 2:], axis=1)
# FOR ADJUSTING FROM RACK ESTIMATION TO TARGET OBSERVATIONS
# rack_to_trg = self.base_env.get_position(self.base_env.target_handle) - \
# self.base_env.get_position(self.base_env.rack_handle)
# estimation[:, :-1] += rack_to_trg[:-1]
obs[:, self.state_to_estimate] = estimation
for policy in self.policy_layers:
policy.curr_obs = obs
self.venv.step_async(actions)
def step_wait(self):
self.curr_image, rew, done, info = self.venv.step_wait()
if np.all(done):
self.abs_estimations = None
return self.curr_image, rew, done, info
def reset(self):
self.curr_image = self.venv.reset()
return self.curr_image
class ClipActions(ActionWrapper):
def __init__(self, env):
super(ClipActions, self).__init__(env)
def action(self, action):
return np.clip(action, self.action_space.low, self.action_space.high)
# TODO: Scale properly to support boundaries where low != -high
class BoundPositionVelocity(ActionWrapper):
def __init__(self, env):
super(BoundPositionVelocity, self).__init__(env)
def action(self, action):
pos = action[:3]
if not ((pos >= self.action_space.low[0]) & (pos <= self.action_space.high[0])).all():
pos /= np.max(np.abs(pos))
pos *= self.action_space.high[0]
return action
class ScaleActions(ActionWrapper):
def __init__(self, env, factor):
self.factor = factor
super(ScaleActions, self).__init__(env)
def action(self, action):
action *= self.factor
return action
class E2EVecEnvWrapper(VecEnvWrapper):
"""
Used to train an end-to-end policy. Not useful in this project, training unfeasibly long.
"""
def __init__(self, venv):
res = venv.get_images(mode='activate')[0]
image_obs_space = spaces.Box(0, 255, [3, *res], dtype=np.uint8)
state_obs_space = venv.observation_space
observation_space = spaces.Tuple((image_obs_space, state_obs_space))
observation_space.shape = (image_obs_space.shape, state_obs_space.shape)
super().__init__(venv, observation_space)
self.curr_state_obs = None
self.last_4_image_obs = None
def reset(self):
self.curr_state_obs = self.venv.reset()
image_obs = np.transpose(self.venv.get_images(), (0, 3, 1, 2))
return image_obs, self.curr_state_obs
# Swap out state for image
def step_wait(self):
obs, rew, done, info = self.venv.step_wait()
self.curr_state_obs = obs
image_obs = np.transpose(self.venv.get_images(), (0, 3, 1, 2))
return (image_obs, self.curr_state_obs), rew, done, info
class InitialController(ActionWrapper):
"""
This environment wrapper moves the subject directly toward the target position at
every step. It can be used to initialise learning without the need for reward shaping.
"""
def __init__(self, env):
super(InitialController, self).__init__(env)
self.base_env = env.unwrapped
def action(self, action):
vec = self.base_env.target_pos - self.base_env.subject_pos
if not ((vec >= self.action_space.low[0]) & (vec <= self.action_space.high[0])).all():
vec /= np.max(np.abs(vec))
vec *= self.action_space.high[0]
full_vec = vec + action[:3]
rot = action[3:]
full_action = np.append(full_vec, rot)
return full_action
|
{"hexsha": "66b21eabe50e9934a4811aa461c0814373767b70", "size": 6412, "ext": "py", "lang": "Python", "max_stars_repo_path": "envs/wrappers.py", "max_stars_repo_name": "harry-uglow/Curriculum-Reinforcement-Learning", "max_stars_repo_head_hexsha": "cb050556e1fdc7b7de8d63ad932fc712a35ac144", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2020-02-02T22:22:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-03T07:50:45.000Z", "max_issues_repo_path": "envs/wrappers.py", "max_issues_repo_name": "harry-uglow/Deep-RL-Sim2Real", "max_issues_repo_head_hexsha": "cb050556e1fdc7b7de8d63ad932fc712a35ac144", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-01-28T20:45:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-14T07:58:27.000Z", "max_forks_repo_path": "envs/wrappers.py", "max_forks_repo_name": "harry-uglow/Curriculum-Reinforcement-Learning", "max_forks_repo_head_hexsha": "cb050556e1fdc7b7de8d63ad932fc712a35ac144", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-03-26T15:46:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-17T09:48:02.000Z", "avg_line_length": 40.075, "max_line_length": 99, "alphanum_fraction": 0.6464441672, "include": true, "reason": "import numpy", "num_tokens": 1444}
|
'''
Author: S.T. Castle
Created: 2015-03-15
'''
#import math
import numpy as np
from scipy import ndimage
from scipy import stats
import scipy.ndimage.filters
import scipy.linalg
#import skimage.feature
import cv2
from matplotlib import pyplot as plt
def main():
'''
Run the explicit coherence enhancing filter with spatial adaptive
elliptical kernel from F.Li et al. 2012.
'''
# Params.
iters = 80
window_size = 7
sigma = 1 # Standard deviation of initial Gaussian kernel.
rho = 6 # Std dev of Gaussian kernel used to compute structure tensor.
gamma = 0.05
eps = np.spacing(1) # Very small positive number.
filename = 'horse.png'
# Open as grayscale image.
orig_img = cv2.imread(filename, 0)
print 'Opened ' + filename
#plt.subplot(111),plt.imshow(img, cmap = 'gray')
#plt.title('Input image'), plt.xticks([]), plt.yticks([])
#plt.show()
# Convolve image with a Gaussian kernel with standard deviation sigma.
img = scipy.ndimage.filters.gaussian_filter(orig_img, sigma)
for ctr in xrange(iters):
print '----------- iteration ' + str(ctr+1) + ' ---------------'
#img = np.copy(orig_img)
# Convolve image with a Gaussian kernel with standard deviation sigma.
#img = scipy.ndimage.filters.gaussian_filter(img, sigma)
#plt.subplot(111),plt.imshow(img, cmap = 'gray')
#plt.title('Input image'), plt.xticks([]), plt.yticks([])
#plt.show()
#print 'shape of img:',
#print img.shape
# Compute the 2D structure tensor of the image.
# The structure tensor is:
# [j11 j12]
# [j12 j22]
#j11, j12, j22 = skimage.feature.structure_tensor(img, sigma=sigma)
j11, j12, j22 = structure_tensor(img, sigma=sigma)
#print 'j11'
#print j11
#print 'j12'
#print j12
#print 'j22'
#print j22
#print 'shape of j11:',
#print j11.shape
#print 'shape of J:',
#print np.array([[j11,j12],[j12,j22]]).shape
# Compute eigenvalues mu1, mu2 of structure tensor. mu1 >= mu2.
mu1 = (j11 + j22) / 2 + np.sqrt(4 * j12 ** 2 + (j11 - j22) ** 2) / 2
mu2 = (j11 + j22) / 2 - np.sqrt(4 * j12 ** 2 + (j11 - j22) ** 2) / 2
#print 'shape of mu1:',
#print mu1.shape
# Compute corresponding normalized eigenvectors v1, v2.
v1 = np.asarray([ 2*j12,
j22-j11 + np.sqrt((j11-j22)**2 + 4*(j12**2)) ])
# Rearrange axis so that v1 is indexed as (x,y,(eigvector))
v1 = np.rollaxis(v1,0,3)
#print 'mu1'
#print mu1
#print 'mu2'
#print mu2
#print 'v1'
#print v1
#print 'v2'
#print v2
#print 'shape of v1:',
#print v1.shape
#print 'v1[0] =',
#print v1[0]
#print 'v1[0][0] =',
#print v1[0][0]
#print v1
# Compute theta based on the angle of v1 and the positive direction of
# the horizontal axis.
# cos(theta) = x / magnitude.
# If the magnitude is 0, then just try setting theta=0 for now.
print 'Calculating theta...'
theta = np.empty((v1.shape[0], v1.shape[1]))
for i in xrange(v1.shape[0]):
for j in xrange(v1.shape[1]):
v = v1[i][j]
mag = float(magnitude(v))
if mag:
theta[i][j] = np.arccos(v[0]/magnitude(v))
else:
theta[i][j] = 0
print 'Done.'
#print 'shape of theta:',
#print theta.shape
# Now that necessary values are calculated, proceed to filtering.
print 'Filtering...'
fimg = np.empty_like(img) # Create a blank array for the filtered image.
rad = window_size/2 # Radius of the filtering window.
sig1 = 10*gamma
# Current pixel is (x1,x2) and neighbor is (y1,y2).
height = img.shape[0]
width = img.shape[1]
for x1 in xrange(height):
for x2 in xrange(width):
eig1 = mu1[x1][x2]
eig2 = mu2[x1][x2]
ang = theta[x1][x2]
sig2 = 10*(gamma+(1-gamma)*np.exp(-1/((eig1-eig2)**2+eps)))
wt_const = 1/(2*np.pi*sig1*sig2) # Constant factor for weighting.
# Add weighted value from neighbor pixel y.
sum = 0
wt_sum = 0 # Sum of the weights for normalization scaling.
for i in xrange(-rad,rad+1):
y1 = x1+i
if (y1 < 0) or (y1 >= height):
continue
for j in xrange(-rad,rad+1):
y2 = x2+i
if (y2 < 0) or (y2 >= width):
continue
# Calculate weight of neighboring position y.
s = (y1-x1)*np.cos(ang) + (y2-x2)*np.sin(ang)
t = -(y1-x1)*np.sin(ang) + (y2-x2)*np.cos(ang)
wt = wt_const * np.exp( -s**2/(2*sig1**2) - t**2/(2*sig2**2) )
sum = sum + wt*img[y1][y2] # Use original image or blurred?
wt_sum = wt_sum + wt
# Set value of this pixel x.
sum = sum * (1.0/wt_sum) # Scale the pixel value.
fimg[x1][x2] = sum
#print x1
print 'Done.'
#orig_img = np.copy(fimg)
img = np.copy(fimg)
# Display original and filtered images.
#plt.subplot(121),plt.imshow(img, cmap = 'gray')
#plt.title('Input image'), plt.xticks([]), plt.yticks([])
#plt.subplot(122),plt.imshow(fimg, cmap = 'gray')
#plt.title('Filtered Image'), plt.xticks([]), plt.yticks([])
#plt.show()
cv2.imwrite('new_image.png',fimg)
def magnitude(v):
"""Magnitude of a vector."""
return np.sqrt(np.dot(v, v))
# from skimage !!!!
def _compute_derivatives(image, mode='constant', cval=0):
"""Compute derivatives in x and y direction using the Sobel operator.
Parameters
----------
image : ndarray
Input image.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
imx : ndarray
Derivative in x-direction.
imy : ndarray
Derivative in y-direction.
"""
imy = ndimage.sobel(image, axis=0, mode=mode, cval=cval)
imx = ndimage.sobel(image, axis=1, mode=mode, cval=cval)
return imx, imy
def structure_tensor(image, sigma=1, mode='constant', cval=0):
"""Compute structure tensor using sum of squared differences.
The structure tensor A is defined as::
A = [Axx Axy]
[Axy Ayy]
which is approximated by the weighted sum of squared differences in a local
window around each pixel in the image.
Parameters
----------
image : ndarray
Input image.
sigma : float
Standard deviation used for the Gaussian kernel, which is used as a
weighting function for the local summation of squared differences.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
Axx : ndarray
Element of the structure tensor for each pixel in the input image.
Axy : ndarray
Element of the structure tensor for each pixel in the input image.
Ayy : ndarray
Element of the structure tensor for each pixel in the input image.
Examples
--------
>>> from skimage.feature import structure_tensor
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 1
>>> Axx, Axy, Ayy = structure_tensor(square, sigma=0.1)
>>> Axx
array([[ 0., 0., 0., 0., 0.],
[ 0., 1., 0., 1., 0.],
[ 0., 4., 0., 4., 0.],
[ 0., 1., 0., 1., 0.],
[ 0., 0., 0., 0., 0.]])
"""
#image = _prepare_grayscale_input_2D(image)
imx, imy = _compute_derivatives(image, mode=mode, cval=cval)
# structure tensore
Axx = ndimage.gaussian_filter(imx * imx, sigma, mode=mode, cval=cval)
Axy = ndimage.gaussian_filter(imx * imy, sigma, mode=mode, cval=cval)
Ayy = ndimage.gaussian_filter(imy * imy, sigma, mode=mode, cval=cval)
return Axx, Axy, Ayy
if __name__ == '__main__':
main()
|
{"hexsha": "a78080861ab4e14c3531c836736769622a919864", "size": 8806, "ext": "py", "lang": "Python", "max_stars_repo_path": "coherence-elliptical-kernel/main-iter.py", "max_stars_repo_name": "stcastle/shell-detection", "max_stars_repo_head_hexsha": "cdc49190deae7310db66e56574b6737771821f31", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-03-01T01:14:21.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-17T08:49:32.000Z", "max_issues_repo_path": "coherence-elliptical-kernel/main-iter.py", "max_issues_repo_name": "SamTCastle/shell-detection", "max_issues_repo_head_hexsha": "cdc49190deae7310db66e56574b6737771821f31", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "coherence-elliptical-kernel/main-iter.py", "max_forks_repo_name": "SamTCastle/shell-detection", "max_forks_repo_head_hexsha": "cdc49190deae7310db66e56574b6737771821f31", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7906137184, "max_line_length": 86, "alphanum_fraction": 0.5424710425, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 2414}
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
#===========================================================
# File Name: layers.py
# Author: Xu Zhang, Columbia University
# Creation Date: 09-07-2018
# Last Modified: Fri Sep 7 21:07:05 2018
#
# Usage:
# Description:
#
# Copyright (C) 2018 Xu Zhang
# All rights reserved.
#
# This file is made available under
# the terms of the BSD license (see the COPYING file).
#===========================================================
from __future__ import print_function
import tensorflow as tf
import numpy as np
class retrieval_layer_2:
def __init__(self, bn_node, n_classes):
self.bn_node = bn_node
self.n_classes = n_classes
#weights
self.out = tf.get_variable("retrieval_out", shape=[bn_node, n_classes],
initializer=tf.variance_scaling_initializer())
#bias
self.bout = tf.Variable(tf.zeros([n_classes]))
self.norm_feature = tf.Variable(1.0, tf.float32)
self.norm_weight = tf.Variable(1.0, tf.float32)
self.var_dict = {
'classifier_w': self.out,
'classifier_b': self.bout,
}
def get_output(self, pre_layer, alpha, l2_norm = False, norm_weights = False, learn_norm = False):
if l2_norm:
layer_3 = tf.nn.l2_normalize(pre_layer, dim = 1)
norm_out = tf.nn.l2_normalize(self.out,dim = 0)
out_layer = tf.matmul(layer_3, norm_out)
if learn_norm:
out_layer = self.norm_feature*out_layer + self.bout
else:
out_layer = alpha*out_layer + self.bout
else:
layer_3 = pre_layer
if norm_weights:
norm_out = tf.nn.l2_normalize(self.out, dim = 0)
if learn_norm:
out_layer = self.norm_feature*tf.matmul(pre_layer, norm_out) + self.bout
else:
out_layer = alpha*(tf.matmul(pre_layer, norm_out)) + self.bout
else:
if learn_norm:
out_layer = self.norm_feature*tf.matmul(pre_layer, norm_out) + self.bout
else:
out_layer = alpha*tf.matmul(pre_layer, self.out) + self.bout
return out_layer, layer_3
def nca_loss(distance_matrix, one_hot_label):
#pos_dis = tf.reduce_sum(tf.exp(distance_matrix)*one_hot_label, axis = 1)
pos_dis = tf.reduce_sum(distance_matrix*one_hot_label, axis = 1)
#neg_dis = tf.reduce_sum(tf.exp(distance_matrix)*(1.0-one_hot_label), axis = 1)
neg_dis = tf.reduce_max(distance_matrix*(1.0-one_hot_label), axis = 1)
#loss = -1.0*tf.reduce_mean(tf.log(pos_dis/neg_dis))
loss = tf.reduce_mean(neg_dis-pos_dis)
return loss
|
{"hexsha": "a4bd4ff95b6323aac18e6acfab16e91f7134ffe1", "size": 2756, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow/layers.py", "max_stars_repo_name": "ColumbiaDVMM/Heated_Up_Softmax_Embedding", "max_stars_repo_head_hexsha": "cb62d28e5faaf7fdb134b31c461125e3fef50d06", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 44, "max_stars_repo_stars_event_min_datetime": "2018-09-13T01:26:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-23T17:15:50.000Z", "max_issues_repo_path": "tensorflow/layers.py", "max_issues_repo_name": "mangye16/Heated_Up_Softmax_Embedding", "max_issues_repo_head_hexsha": "cb62d28e5faaf7fdb134b31c461125e3fef50d06", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tensorflow/layers.py", "max_forks_repo_name": "mangye16/Heated_Up_Softmax_Embedding", "max_forks_repo_head_hexsha": "cb62d28e5faaf7fdb134b31c461125e3fef50d06", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2018-09-13T02:36:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-12T10:55:29.000Z", "avg_line_length": 36.7466666667, "max_line_length": 102, "alphanum_fraction": 0.5874455733, "include": true, "reason": "import numpy", "num_tokens": 676}
|
// Copyright 2018 Your Name <your_email>
#ifndef INCLUDE_HEADER_HPP_
#define INCLUDE_HEADER_HPP_
#include <iostream>
#include <boost/filesystem.hpp>
#include <vector>
#include <string>
#define DIRECTORY 3
#define COM_FILE 2
using boost::filesystem::path;
using std::cout;
using std::endl;
using std::vector;
using std::stoi;
using boost::filesystem::directory_iterator;
class analizator {
public:
void print_first()
{
for (unsigned it = 0; it < broker_map.size(); ++it)
{
cout << broker_map[it][0] << " balance_" << broker_map[it][1] <<
"_" << broker_map[it][2];
cout << endl;
}
}
bool checkFile(const path file_n)
{
std::string fileName = file_n.c_str();
if (fileName.find(".old") != fileName.npos){
return false;
}
if (file_n.extension() != ".txt"){
return 0;
}
if (!fileName.find(balance)) {
return false;
}
if (fileName.find(spacer) == 7) {
for (int i = 8; i < 16; i++) {
if (!checkNumber(fileName.at(i))) {
return false;
}
}
if (fileName.find(spacer, fileName.find(spacer) + 1) == 16) {
for (int i = 17; i < 25; i++) {
if (!checkNumber(fileName.at(i))) {
return false;
}
}
return true;
}
}
return false;
}
void dataBase(path _path) {
std::string __path = _path.filename().c_str();
std::string __parent_name = _path.parent_path().filename().c_str();
vector<std::string> tmp;
tmp.push_back(__parent_name);
tmp.push_back(__path.substr(8, 8));
tmp.push_back(__path.substr(17, 8));
broker_map.push_back(tmp);
}
void print_second()
{
for (unsigned i = 0; i < clear_map.size(); ++i)
cout << "broker:" << clear_map[i][0] <<
" account:" << clear_map[i][1] <<
" files:" << clear_map[i][2] <<
" lastdate:" << clear_map[i][3]
<< endl;
}
void create_clear()
{
for (unsigned it = 0; it < broker_map.size(); it++)
{
unsigned j = 0;
int flag = 0;
for (; j < clear_map.size(); ++j)
{
if ((broker_map[it][0] == clear_map[j][0]) &&
(broker_map[it][1] == clear_map[j][1])) {
flag = 1;
int tmp = stoi(clear_map[j][2]);
tmp++;
clear_map[j][2] = std::to_string(tmp);
if (stoi(broker_map[it][20]) > stoi(clear_map[j][3])) {
clear_map[j][3] = broker_map[it][2];
}
}
}
if (flag == 0) {
vector<std::string> tmp;
tmp.push_back(broker_map[it][0]);
tmp.push_back(broker_map[it][1]);
tmp.push_back("1");
tmp.push_back(broker_map[it][2]);
clear_map.push_back(tmp);
}
}
}
void start(path currentDir)
{
getInfo(currentDir);
print_first();
create_clear();
print_second();
}
void getInfo(path currentDir)
{
for (directory_iterator p(currentDir), end; p != end; p++) {
if (p->status().type() == DIRECTORY) {
getInfo(p->path());
}
if (p->status().type() == COM_FILE) {
if (checkFile(p->path().filename()) == true)
{
dataBase(p->path());
}
}
}
}
int number_of(std::string broker, int64_t ttf)
{
for (unsigned i = 0; i < clear_map.size(); ++i){
if ((clear_map[i][0] == broker) && (stoi(clear_map[i][1]) == ttf))
{
return std::stoi(clear_map[i][2]);
}
}
return 0;
}
bool checkNumber(char str) {
if ((str >= '0') && (str <= '9'))
{
return true;
} else {
return false;
}
}
const char* balance = "balance";
const char* spacer = "_";
path parent_dir;
vector<vector<std::string>> clear_map;
vector<vector<std::string>> broker_map;
};
#endif // INCLUDE_HEADER_HPP_
|
{"hexsha": "db03c42c2478800918ba605028a0f49b0ce1063b", "size": 4576, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/header.hpp", "max_stars_repo_name": "Darioshka/lab_04", "max_stars_repo_head_hexsha": "91cdb16431394359c2afa42c71b321acbd3a80ee", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/header.hpp", "max_issues_repo_name": "Darioshka/lab_04", "max_issues_repo_head_hexsha": "91cdb16431394359c2afa42c71b321acbd3a80ee", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/header.hpp", "max_forks_repo_name": "Darioshka/lab_04", "max_forks_repo_head_hexsha": "91cdb16431394359c2afa42c71b321acbd3a80ee", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0769230769, "max_line_length": 78, "alphanum_fraction": 0.4462412587, "num_tokens": 1054}
|
import numpy as np
import matplotlib.pyplot as plt
rau = np.logspace(-1, 3, 1000)
# Isella et al. 2009
Rt = [55., 21., 86., 28., 21., 110., 60., 25., 43., 66., 20.]
St = [10., 608., 1.5, 13., 80., 4., 31., 58., 12., 4.7, 50.]
gam = [-0.3, -0.5, 0.8, 0.0, -0.3, 0.7, -0.8, -0.1, 0.8, 0.5, 0.1]
bmaj = np.array([1.05, 0.43, 0.82, 0.80, 0.46, 0.87, 0.83, 0.89, 0.82, 1.42, 1.45])
bmin = np.array([0.72, 0.27, 0.60, 0.58, 0.34, 0.65, 0.70, 0.60, 0.69, 0.85, 0.91])
fwhm = 140.*np.sqrt(bmaj*bmin)
sig = np.zeros((len(Rt), len(rau)))
plt.axis([20, 500, 1e-4, 10])
for i in range(len(Rt)):
sig[i,:] = St[i] * (Rt[i]/rau)**gam[i] * \
np.exp( -1. * ((rau/Rt[i])**(2.-gam[i])-1.) / (2.*(2.-gam[i])))
incond = (rau >= fwhm[i])
plt.loglog(rau[incond], 0.01*sig[i,incond])
plt.loglog(rau, 1e-1*(rau/100.)**(-3.), '--')
plt.show()
# very crudely, see things like 1/r**(3/2) to 1/r**10
|
{"hexsha": "619d39c8d4e4ebc0cf8b29fad3b36f1880d5dbbf", "size": 912, "ext": "py", "lang": "Python", "max_stars_repo_path": "sigmas_profiles.py", "max_stars_repo_name": "seanandrews/ARAA", "max_stars_repo_head_hexsha": "6c95f88f5619642b6914c611ba6c902b5412ab29", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sigmas_profiles.py", "max_issues_repo_name": "seanandrews/ARAA", "max_issues_repo_head_hexsha": "6c95f88f5619642b6914c611ba6c902b5412ab29", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sigmas_profiles.py", "max_forks_repo_name": "seanandrews/ARAA", "max_forks_repo_head_hexsha": "6c95f88f5619642b6914c611ba6c902b5412ab29", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4193548387, "max_line_length": 83, "alphanum_fraction": 0.5054824561, "include": true, "reason": "import numpy", "num_tokens": 479}
|
#!/usr/bin/python
#
# Determine signal strength upper limit for Dark Matter Z' mediator simplified model
# based on the ATLAS Run 2 dilepton resonance search
#
import sys, argparse, os, ROOT
import numpy as np
def getMuLimit(x = np.empty(0), mediator_type = "V"):
argParser = argparse.ArgumentParser( description = "DM Simp dilepton resonance interpretation" )
argParser.add_argument("--mZPrime", help = "Z\' mediator mass [GeV]", type = float)
argParser.add_argument("--mDM", help = "Dark Matter mass [GeV]", type = float)
argParser.add_argument("--g_q", help = "Z\' - quark coupling strength", type = float)
argParser.add_argument("--g_DM", help = "Z\' - dark matter coupling strength", type = float)
argParser.add_argument("--g_l", help = "Z\' - charged lepton coupling strength", type = float)
argParser.add_argument("--mediator_type", help = "Mediator Type - vector (V) or axial vector (A) ", type = str, choices = ["V", "A"] )
if len(sys.argv) > 1:
args = argParser.parse_args()
mZPrime_all = np.array([args.mZPrime])
mDM_all = np.array([args.mDM])
g_q_all = np.array([args.g_q])
g_DM_all = np.array([args.g_DM])
g_l_all = np.array([args.g_l])
mediator_type = args.mediator_type
elif x.size > 0:
if mediator_type != "V" and mediator_type != "A":
print("Error. Mediator type is {} but must be either 'A' (axial vector) or 'V' (vector).".format(mediator_type))
return -1.0
mZPrime_all = x[:,0]
mDM_all = x[:,1]
# tmp hack
g_q_all = [] # x[:,2]
g_DM_all = [] # x[:,3]
g_l_all = [] # x[:,4]
# tmp hack end
else:
print("Error. Input parameters missing.")
return -1.0
#
# ToDo:
#
# * Allow to vary g_DM
#
# * Make sure precision is sufficient for all parameters
#
#
#
#
# Turn the following part into a parallel work done on the batch system.
#
# * create a working directory where all results go,
# adjust scripts used below accordingly
# * loop over all rows of x
# * keep track of the created directories
# * submit the jobs (each job running the 3 shell scripts)
# * Maybe include the GetTheoryXSecAndWidth.C and run_obsLimit.sh steps in the batch jobs as well
# * have another loop checking all N seconds whether all jobs are done,
# then collect the limits and return them
#
#
mu_limit_all = np.array([])
for iParameterPoint in range(len(mZPrime_all)):
mZPrime = mZPrime_all[iParameterPoint]
mDM = mDM_all[iParameterPoint]
# tmp hack
g_q = 0.1 # g_q_all[iParameterPoint]
g_DM = 1.0 # g_DM_all[iParameterPoint]
g_l = 0.01 # g_l_all[iParameterPoint]
# tmp hack end
mZPrime_rounded = round(mZPrime * 1e-3, 2) * 1e3
mDM_rounded = round(mDM * 1e-3, 2) * 1e3
options = "{} {} {} {} {} {}".format(mZPrime_rounded * 1e-3, mDM_rounded * 1e-3, g_q, g_DM, g_l, mediator_type)
os.system("./run_EVNT_DM_singlePoint.sh {}".format(options))
os.system("./run_DAOD_DM_singlePoint.sh {}".format(options))
os.system("./run_MCVAL_DM_singlePoint.sh {}".format(options))
mZp__dir = mZPrime_rounded * 1e-3
mDM__dir = mDM_rounded * 1e-3
g_q__dir = g_q
g_l__dir = g_l
directory = "run_DMs{}_ee_mR{}_mDM{}_gQ{}_gL{}".format(mediator_type, mZp__dir, mDM__dir, g_q__dir, g_l__dir)
directory = directory.replace(".", "p")
os.system("root -l -q -b \"GetTheoryXSecAndWidth.C(\\\"{}/MCVAL/events.root\\\", \\\"{}/theoryResults.root\\\")\"".format(directory, directory))
theoryResults = ROOT.TFile("{}/theoryResults.root".format(directory), "READ")
fidXS_ee = theoryResults.Get("fidXS").GetVal() * 1e3 # pb -> fb
fidXS_ll = 2.0 * fidXS_ee
width = theoryResults.Get("width").GetVal()
width_perCent = width / mZPrime * 1e2
obsLimitOutputFileBaseName="output/obsLimit_{}_CL95".format(directory.replace("run_", "").replace("_ee", ""))
os.system("./run_obsLimit.sh {} {} {}".format(mZPrime, width_perCent, obsLimitOutputFileBaseName))
obsLimitFile = open("DileptonReinterpretationProj/{}.txt".format(obsLimitOutputFileBaseName), "r")
limit_obs_str = obsLimitFile.readline()
limit_obs = float("".join(a for a in limit_obs_str if a.isalnum() or a == "."))
mu_limit = limit_obs / fidXS_ll
mu_limit_all = np.append( mu_limit_all, np.array([mu_limit]) )
print(mu_limit_all)
return mu_limit_all
#--------------------------------------------------
if __name__ == "__main__":
getMuLimit()
|
{"hexsha": "aabeef609b57305da42cb5c84a181a2f45771369", "size": 4993, "ext": "py", "lang": "Python", "max_stars_repo_path": "excursion/testcases/madgraph5atlasval/getMuLimit.py", "max_stars_repo_name": "irinaespejo/excursion", "max_stars_repo_head_hexsha": "c5a5c6d882b8dd1008fbabf1a3b81eaba382bef6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-07-09T13:19:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-20T14:30:20.000Z", "max_issues_repo_path": "excursion/testcases/madgraph5atlasval/getMuLimit.py", "max_issues_repo_name": "irinaespejo/excursion", "max_issues_repo_head_hexsha": "c5a5c6d882b8dd1008fbabf1a3b81eaba382bef6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "excursion/testcases/madgraph5atlasval/getMuLimit.py", "max_forks_repo_name": "irinaespejo/excursion", "max_forks_repo_head_hexsha": "c5a5c6d882b8dd1008fbabf1a3b81eaba382bef6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-11-02T10:35:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-14T18:35:05.000Z", "avg_line_length": 38.1145038168, "max_line_length": 152, "alphanum_fraction": 0.5870218306, "include": true, "reason": "import numpy", "num_tokens": 1368}
|
import numpy as np
def lanc(numwt, haf):
"""
Generates a numwt + 1 + numwt lanczos cosine low pass filter with -6dB
(1/4 power, 1/2 amplitude) point at haf
Parameters
----------
numwt : int
number of points
haf : float
frequency (in 'cpi' of -6dB point, 'cpi' is cycles per interval.
For hourly data cpi is cph,
Examples
--------
>>> from oceans.filters import lanc
>>> import matplotlib.pyplot as plt
>>> t = np.arange(500) # Time in hours.
>>> h = 2.5 * np.sin(2 * np.pi * t / 12.42)
>>> h += 1.5 * np.sin(2 * np.pi * t / 12.0)
>>> h += 0.3 * np.random.randn(len(t))
>>> wt = lanc(96+1+96, 1./40)
>>> low = np.convolve(wt, h, mode='same')
>>> high = h - low
>>> fig, (ax0, ax1) = plt.subplots(nrows=2)
>>> _ = ax0.plot(high, label='high')
>>> _ = ax1.plot(low, label='low')
>>> _ = ax0.legend(numpoints=1)
>>> _ = ax1.legend(numpoints=1)
"""
summ = 0
numwt += 1
wt = np.zeros(numwt)
# Filter weights.
ii = np.arange(numwt)
wt = 0.5 * (1.0 + np.cos(np.pi * ii * 1.0 / numwt))
ii = np.arange(1, numwt)
xx = np.pi * 2 * haf * ii
wt[1 : numwt + 1] = wt[1 : numwt + 1] * np.sin(xx) / xx
summ = wt[1 : numwt + 1].sum()
xx = wt.sum() + summ
wt /= xx
return np.r_[wt[::-1], wt[1 : numwt + 1]]
def smoo1(datain, window_len=11, window="hanning"):
"""
Smooth the data using a window with requested size.
Parameters
----------
datain : array_like
input series
window_len : int
size of the smoothing window; should be an odd integer
window : str
window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'.
flat window will produce a moving average smoothing.
Returns
-------
data_out : array_like
smoothed signal
See Also
--------
scipy.signal.lfilter
Notes
-----
original from: https://scipy-cookbook.readthedocs.io/items/SignalSmooth.html
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal (with
the window size) in both ends so that transient parts are minimized in the
beginning and end part of the output signal.
Examples
--------
>>> from oceans.filters import smoo1
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> time = np.linspace( -4, 4, 100 )
>>> series = np.sin(time)
>>> noise_series = series + np.random.randn( len(time) ) * 0.1
>>> data_out = smoo1(series)
>>> ws = 31
>>> ax = plt.subplot(211)
>>> _ = ax.plot(np.ones(ws))
>>> windows = ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']
>>> for w in windows[1:]:
... _ = eval('plt.plot(np.' + w + '(ws) )')
>>> _ = ax.axis([0, 30, 0, 1.1])
>>> leg = ax.legend(windows)
>>> _ = plt.title('The smoothing windows')
>>> ax = plt.subplot(212)
>>> l1, = ax.plot(series)
>>> l2, = ax.plot(noise_series)
>>> for w in windows:
... _ = plt.plot(smoo1(noise_series, 10, w))
>>> l = ['original signal', 'signal with noise']
>>> l.extend(windows)
>>> leg = ax.legend(l)
>>> _ = plt.title('Smoothing a noisy signal')
TODO: window parameter can be the window itself (i.e. an array)
instead of a string.
"""
datain = np.asarray(datain)
if datain.ndim != 1:
raise ValueError("Smooth only accepts 1 dimension arrays.")
if datain.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return datain
if window not in ["flat", "hanning", "hamming", "bartlett", "blackman"]:
msg = "Window must be is one of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'" # noqa
raise ValueError(msg)
s = np.r_[
2 * datain[0] - datain[window_len:1:-1],
datain,
2 * datain[-1] - datain[-1:-window_len:-1],
]
if window == "flat": # Moving average.
w = np.ones(window_len, "d")
else:
w = eval("np." + window + "(window_len)")
data_out = np.convolve(w / w.sum(), s, mode="same")
return data_out[window_len - 1 : -window_len + 1]
def smoo2(A, hei, wid, kind="hann", badflag=-9999, beta=14):
"""
Calculates the smoothed array 'As' from the original array 'A' using the
specified window of type 'kind' and shape ('hei', 'wid').
Usage:
As = smoo2(A, hei, wid, kind='hann', badflag=-9999, beta=14)
Parameters
----------
A : 2D array
Array to be smoothed.
hei : integer
Window height. Must be odd and greater than or equal to 3.
wid : integer
Window width. Must be odd and greater than or equal to 3.
kind : string, optional
Refer to Numpy for details about each window type.
badflag : float, optional
The bad data flag. Elements of the input array 'A' holding this value are ignored.
beta : float, optional
Shape parameter for the kaiser window.
Returns
-------
As : 2D array
The smoothed array.
André Palóczy Filho (paloczy@gmail.com)
April 2012
"""
# Checking window type and dimensions
kinds = ["hann", "hamming", "blackman", "bartlett", "kaiser"]
if kind not in kinds:
raise ValueError("Invalid window type requested: %s" % kind)
if (np.mod(hei, 2) == 0) or (np.mod(wid, 2) == 0):
raise ValueError("Window dimensions must be odd")
if (hei <= 1) or (wid <= 1):
raise ValueError("Window shape must be (3,3) or greater")
# Creating the 2D window.
if kind == "kaiser": # If the window kind is kaiser (beta is required).
wstr = "np.outer(np.kaiser(hei, beta), np.kaiser(wid, beta))"
# If the window kind is hann, hamming, blackman or bartlett
# (beta is not required).
else:
if kind == "hann":
# Converting the correct window name (Hann) to the numpy function
# name (numpy.hanning).
kind = "hanning"
# Computing outer product to make a 2D window out of the original 1d
# windows.
# TODO: Get rid of this evil eval.
wstr = "np.outer(np." + kind + "(hei), np." + kind + "(wid))"
wdw = eval(wstr)
A = np.asanyarray(A)
Fnan = np.isnan(A)
imax, jmax = A.shape
As = np.NaN * np.ones((imax, jmax))
for i in range(imax):
for j in range(jmax):
# Default window parameters.
wupp = 0
wlow = hei
wlef = 0
wrig = wid
lh = np.floor(hei / 2)
lw = np.floor(wid / 2)
# Default array ranges (functions of the i, j indices).
upp = i - lh
low = i + lh + 1
lef = j - lw
rig = j + lw + 1
# Tiling window and input array at the edges.
# Upper edge.
if upp < 0:
wupp = wupp - upp
upp = 0
# Left edge.
if lef < 0:
wlef = wlef - lef
lef = 0
# Bottom edge.
if low > imax:
ex = low - imax
wlow = wlow - ex
low = imax
# Right edge.
if rig > jmax:
ex = rig - jmax
wrig = wrig - ex
rig = jmax
# Computing smoothed value at point (i, j).
Ac = A[upp:low, lef:rig]
wdwc = wdw[wupp:wlow, wlef:wrig]
fnan = np.isnan(Ac)
Ac[fnan] = 0
wdwc[fnan] = 0 # Eliminating NaNs from mean computation.
fbad = Ac == badflag
wdwc[fbad] = 0 # Eliminating bad data from mean computation.
a = Ac * wdwc
As[i, j] = a.sum() / wdwc.sum()
# Assigning NaN to the positions holding NaNs in the original array.
As[Fnan] = np.NaN
return As
def weim(x, N, kind="hann", badflag=-9999, beta=14):
"""
Calculates the smoothed array 'xs' from the original array 'x' using the
specified window of type 'kind' and size 'N'. 'N' must be an odd number.
Usage:
xs = weim(x, N, kind='hann', badflag=-9999, beta=14)
Parameters
----------
x : 1D array
Array to be smoothed.
N : integer
Window size. Must be odd.
kind : string, optional
Refer to Numpy for details about each window type.
badflag : float, optional
The bad data flag. Elements of the input array 'A' holding this
value are ignored.
beta : float, optional
Shape parameter for the kaiser window. For windows other than the
kaiser window, this parameter does nothing.
Returns
-------
xs : 1D array
The smoothed array.
---------------------------------------
André Palóczy Filho (paloczy@gmail.com) June 2012
"""
# Checking window type and dimensions.
kinds = ["hann", "hamming", "blackman", "bartlett", "kaiser"]
if kind not in kinds:
raise ValueError("Invalid window type requested: %s" % kind)
if np.mod(N, 2) == 0:
raise ValueError("Window size must be odd")
# Creating the window.
if kind == "kaiser": # If the window kind is kaiser (beta is required).
wstr = "np.kaiser(N, beta)"
# If the window kind is hann, hamming, blackman or bartlett (beta is not
# required).
else:
if kind == "hann":
# Converting the correct window name (Hann) to the numpy function
# name (numpy.hanning).
kind = "hanning"
# Computing outer product to make a 2D window out of the original
# 1D windows.
wstr = "np." + kind + "(N)"
# FIXME: Do not use `eval`.
w = eval(wstr)
x = np.asarray(x).flatten()
Fnan = np.isnan(x).flatten()
ln = (N - 1) / 2
lx = x.size
lf = lx - ln
xs = np.NaN * np.ones(lx)
# Eliminating bad data from mean computation.
fbad = x == badflag
x[fbad] = np.nan
for i in range(lx):
if i <= ln:
xx = x[: ln + i + 1]
ww = w[ln - i :]
elif i >= lf:
xx = x[i - ln :]
ww = w[: lf - i - 1]
else:
xx = x[i - ln : i + ln + 1]
ww = w.copy()
# Counting only NON-NaNs, both in the input array and in the window
# points.
f = ~np.isnan(xx)
xx = xx[f]
ww = ww[f]
# Thou shalt not divide by zero.
if f.sum() == 0:
xs[i] = x[i]
else:
xs[i] = np.sum(xx * ww) / np.sum(ww)
# Assigning NaN to the positions holding NaNs in the input array.
xs[Fnan] = np.nan
return xs
def medfilt1(x, L=3):
"""
Median filter for 1d arrays.
Performs a discrete one-dimensional median filter with window length `L` to
input vector `x`. Produces a vector the same size as `x`. Boundaries are
handled by shrinking `L` at edges; no data outside of `x` is used in
producing the median filtered output.
Parameters
----------
x : array_like
Input 1D data
L : integer
Window length
Returns
-------
xout : array_like
Numpy 1d array of median filtered result; same size as x
Examples
--------
>>> from oceans.filters import medfilt1
>>> import matplotlib.pyplot as plt
>>> # 100 pseudo-random integers ranging from 1 to 100, plus three large
>>> # outliers for illustration.
>>> x = np.r_[np.ceil(np.random.rand(25)*100), [1000],
... np.ceil(np.random.rand(25)*100), [2000],
... np.ceil(np.random.rand(25)*100), [3000],
... np.ceil(np.random.rand(25)*100)]
>>> L = 2
>>> xout = medfilt1(x=x, L=L)
>>> ax = plt.subplot(211)
>>> l1, l2 = ax.plot(x), ax.plot(xout)
>>> ax.grid(True)
>>> y1min, y1max = np.min(xout) * 0.5, np.max(xout) * 2.0
>>> leg1 = ax.legend(['x (pseudo-random)','xout'])
>>> t1 = ax.set_title('''Median filter with window length %s.
... Removes outliers, tracks remaining signal)''' % L)
>>> L = 103
>>> xout = medfilt1(x=x, L=L)
>>> ax = plt.subplot(212)
>>> l1, l2, = ax.plot(x), ax.plot(xout)
>>> ax.grid(True)
>>> y2min, y2max = np.min(xout) * 0.5, np.max(xout) * 2.0
>>> leg2 = ax.legend(["Same x (pseudo-random)", "xout"])
>>> t2 = ax.set_title('''Median filter with window length %s.
... Removes outliers and noise''' % L)
>>> ax = plt.subplot(211)
>>> lims1 = ax.set_ylim([min(y1min, y2min), max(y1max, y2max)])
>>> ax = plt.subplot(212)
>>> lims2 = ax.set_ylim([min(y1min, y2min), max(y1max, y2max)])
"""
xin = np.atleast_1d(np.asanyarray(x))
N = len(x)
L = int(L) # Ensure L is odd integer so median requires no interpolation.
if L % 2 == 0:
L += 1
if N < 2:
raise ValueError("Input sequence must be >= 2.")
return None
if L < 2:
raise ValueError("Input filter window length must be >=2.")
return None
if L > N:
msg = "Input filter window length must be shorter than series: L = {:d}, len(x) = {:d}".format # noqa
raise ValueError(msg(L, N))
return None
if xin.ndim > 1:
msg = "Input sequence has to be 1d: ndim = {}".format
raise ValueError(msg(xin.ndim))
xout = np.zeros_like(xin) + np.NaN
Lwing = (L - 1) // 2
# NOTE: Use np.ndenumerate in case I expand to +1D case
for i, xi in enumerate(xin):
if i < Lwing: # Left boundary.
xout[i] = np.median(xin[0 : i + Lwing + 1]) # (0 to i + Lwing)
elif i >= N - Lwing: # Right boundary.
xout[i] = np.median(xin[i - Lwing : N]) # (i-Lwing to N-1)
else: # Middle (N-2*Lwing input vector and filter window overlap).
xout[i] = np.median(xin[i - Lwing : i + Lwing + 1])
# (i-Lwing to i+Lwing)
return xout
def fft_lowpass(signal, low, high):
"""
Performs a low pass filer on the series.
low and high specifies the boundary of the filter.
>>> from oceans.filters import fft_lowpass
>>> import matplotlib.pyplot as plt
>>> t = np.arange(500) # Time in hours.
>>> x = 2.5 * np.sin(2 * np.pi * t / 12.42)
>>> x += 1.5 * np.sin(2 * np.pi * t / 12.0)
>>> x += 0.3 * np.random.randn(len(t))
>>> filtered = fft_lowpass(x, low=1/30, high=1/40)
>>> fig, ax = plt.subplots()
>>> l1, = ax.plot(t, x, label='original')
>>> l2, = ax.plot(t, filtered, label='filtered')
>>> legend = ax.legend()
"""
if len(signal) % 2:
result = np.fft.rfft(signal, len(signal))
else:
result = np.fft.rfft(signal)
freq = np.fft.fftfreq(len(signal))[: len(signal) // 2 + 1]
factor = np.ones_like(freq)
factor[freq > low] = 0.0
sl = np.logical_and(high < freq, freq < low)
a = factor[sl]
# Create float array of required length and reverse.
a = np.arange(len(a) + 2).astype(float)[::-1]
# Ramp from 1 to 0 exclusive.
a = (a / a[0])[1:-1]
# Insert ramp into factor.
factor[sl] = a
result = result * factor
return np.fft.irfft(result, len(signal))
def md_trenberth(x):
"""
Returns the filtered series using the Trenberth filter as described
on Monthly Weather Review, vol. 112, No. 2, Feb 1984.
Input data: series x of dimension 1Xn (must be at least dimension 11)
Output data: y = md_trenberth(x) where y has dimension 1X(n-10)
Examples
--------
>>> from oceans.filters import md_trenberth
>>> import matplotlib.pyplot as plt
>>> t = np.arange(500) # Time in hours.
>>> x = 2.5 * np.sin(2 * np.pi * t / 12.42)
>>> x += 1.5 * np.sin(2 * np.pi * t / 12.0)
>>> x += 0.3 * np.random.randn(len(t))
>>> filtered = md_trenberth(x)
>>> fig, ax = plt.subplots()
>>> l1, = ax.plot(t, x, label='original')
>>> pad = [np.NaN]*5
>>> l2, = ax.plot(t, np.r_[pad, filtered, pad], label='filtered')
>>> legend = ax.legend()
"""
x = np.asanyarray(x)
weight = np.array(
[
0.02700,
0.05856,
0.09030,
0.11742,
0.13567,
0.14210,
0.13567,
0.11742,
0.09030,
0.05856,
0.02700,
],
)
sz = len(x)
y = np.zeros(sz - 10)
for i in range(5, sz - 5):
y[i - 5] = 0
for j in range(11):
y[i - 5] = y[i - 5] + x[i - 6 + j + 1] * weight[j]
return y
def pl33tn(x, dt=1.0, T=33.0, mode="valid"):
"""
Computes low-passed series from `x` using pl33 filter, with optional
sample interval `dt` (hours) and filter half-amplitude period T (hours)
as input for non-hourly series.
The PL33 filter is described on p. 21, Rosenfeld (1983), WHOI
Technical Report 85-35. Filter half amplitude period = 33 hrs.,
half power period = 38 hrs. The time series x is folded over
and cosine tapered at each end to return a filtered time series
xf of the same length. Assumes length of x greater than 67.
Examples
--------
>>> from oceans.filters import pl33tn
>>> import matplotlib.pyplot as plt
>>> t = np.arange(500) # Time in hours.
>>> x = 2.5 * np.sin(2 * np.pi * t / 12.42)
>>> x += 1.5 * np.sin(2 * np.pi * t / 12.0)
>>> x += 0.3 * np.random.randn(len(t))
>>> filtered_33 = pl33tn(x, dt=4.0) # 33 hr filter
>>> filtered_33d3 = pl33tn(x, dt=4.0, T=72.0) # 3 day filter
>>> fig, ax = plt.subplots()
>>> l1, = ax.plot(t, x, label='original')
>>> pad = [np.NaN]*8
>>> l2, = ax.plot(t, np.r_[pad, filtered_33, pad], label='33 hours')
>>> pad = [np.NaN]*17
>>> l3, = ax.plot(t, np.r_[pad, filtered_33d3, pad], label='3 days')
>>> legend = ax.legend()
"""
pl33 = np.array(
[
-0.00027,
-0.00114,
-0.00211,
-0.00317,
-0.00427,
-0.00537,
-0.00641,
-0.00735,
-0.00811,
-0.00864,
-0.00887,
-0.00872,
-0.00816,
-0.00714,
-0.00560,
-0.00355,
-0.00097,
+0.00213,
+0.00574,
+0.00980,
+0.01425,
+0.01902,
+0.02400,
+0.02911,
+0.03423,
+0.03923,
+0.04399,
+0.04842,
+0.05237,
+0.05576,
+0.05850,
+0.06051,
+0.06174,
+0.06215,
+0.06174,
+0.06051,
+0.05850,
+0.05576,
+0.05237,
+0.04842,
+0.04399,
+0.03923,
+0.03423,
+0.02911,
+0.02400,
+0.01902,
+0.01425,
+0.00980,
+0.00574,
+0.00213,
-0.00097,
-0.00355,
-0.00560,
-0.00714,
-0.00816,
-0.00872,
-0.00887,
-0.00864,
-0.00811,
-0.00735,
-0.00641,
-0.00537,
-0.00427,
-0.00317,
-0.00211,
-0.00114,
-0.00027,
],
)
_dt = np.linspace(-33, 33, 67)
dt = float(dt) * (33.0 / T)
filter_time = np.arange(0.0, 33.0, dt, dtype="d")
# N = len(filter_time)
filter_time = np.hstack((-filter_time[-1:0:-1], filter_time))
pl33 = np.interp(filter_time, _dt, pl33)
pl33 /= pl33.sum()
xf = np.convolve(x, pl33, mode=mode)
return xf
|
{"hexsha": "5e5915684ffcce5d999eda9fbdd61b43e78f8c36", "size": 19922, "ext": "py", "lang": "Python", "max_stars_repo_path": "oceans/filters.py", "max_stars_repo_name": "Michelly-GC/python-oceans", "max_stars_repo_head_hexsha": "7b0e12a00cd125683e7b89acea0cdd67f7729a43", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 47, "max_stars_repo_stars_event_min_datetime": "2015-05-06T02:00:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-23T12:16:16.000Z", "max_issues_repo_path": "oceans/filters.py", "max_issues_repo_name": "Michelly-GC/python-oceans", "max_issues_repo_head_hexsha": "7b0e12a00cd125683e7b89acea0cdd67f7729a43", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 27, "max_issues_repo_issues_event_min_datetime": "2015-02-18T14:04:45.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T15:16:46.000Z", "max_forks_repo_path": "oceans/filters.py", "max_forks_repo_name": "Michelly-GC/python-oceans", "max_forks_repo_head_hexsha": "7b0e12a00cd125683e7b89acea0cdd67f7729a43", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 34, "max_forks_repo_forks_event_min_datetime": "2015-03-01T23:17:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-18T12:30:48.000Z", "avg_line_length": 29.5578635015, "max_line_length": 110, "alphanum_fraction": 0.5197771308, "include": true, "reason": "import numpy", "num_tokens": 5945}
|
import numpy as np
import os
from sklearn import ensemble, feature_extraction, preprocessing
from otto_utils import consts, utils
MODEL_NAME = 'model_02_random_forest'
MODE = 'holdout' # cv|submission|holdout
# import data
train, labels, test, _, _ = utils.load_data()
# transform counts to TFIDF features
tfidf = feature_extraction.text.TfidfTransformer(smooth_idf=False)
train = tfidf.fit_transform(train).toarray()
test = tfidf.transform(test).toarray()
# encode labels
lbl_enc = preprocessing.LabelEncoder()
labels = lbl_enc.fit_transform(labels)
# train classifier
clf = ensemble.ExtraTreesClassifier(n_jobs=4, n_estimators=2000, max_features=20, min_samples_split=3,
bootstrap=False, verbose=3, random_state=23)
if MODE == 'cv':
scores, predictions = utils.make_blender_cv(clf, train, labels, calibrate=False)
print 'CV:', scores, 'Mean log loss:', np.mean(scores)
utils.write_blender_data(consts.BLEND_PATH, MODEL_NAME + '.csv', predictions)
elif MODE == 'submission':
clf.fit(train, labels)
predictions = clf.predict_proba(test)
utils.save_submission(consts.DATA_SAMPLE_SUBMISSION_PATH,
os.path.join(consts.ENSEMBLE_PATH, MODEL_NAME + '.csv'),
predictions)
elif MODE == 'holdout':
score = utils.hold_out_evaluation(clf, train, labels, calibrate=False)
print 'Log loss:', score
else:
print 'Unknown mode'
|
{"hexsha": "52339a2d7c8253906fd34a61bc45232f215bc9a0", "size": 1446, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/external/repositories/156296/kaggle_otto-master/otto/model/model_02_random_forest/random_forest.py", "max_stars_repo_name": "Keesiu/meta-kaggle", "max_stars_repo_head_hexsha": "87de739aba2399fd31072ee81b391f9b7a63f540", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 38, "max_stars_repo_stars_event_min_datetime": "2015-05-21T00:35:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-07T03:32:49.000Z", "max_issues_repo_path": "otto/model/model_02_random_forest/random_forest.py", "max_issues_repo_name": "KartikPadmanabhan/kaggle_otto", "max_issues_repo_head_hexsha": "2b7861d052529d7a3f78c053088450f15278ac42", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "otto/model/model_02_random_forest/random_forest.py", "max_forks_repo_name": "KartikPadmanabhan/kaggle_otto", "max_forks_repo_head_hexsha": "2b7861d052529d7a3f78c053088450f15278ac42", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2015-05-20T23:24:05.000Z", "max_forks_repo_forks_event_max_datetime": "2017-12-02T12:58:34.000Z", "avg_line_length": 33.6279069767, "max_line_length": 102, "alphanum_fraction": 0.7130013831, "include": true, "reason": "import numpy", "num_tokens": 340}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.