text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import matplotlib.pyplot as plt
import numpy as np
import pyvista as pv
def draw():
global t
global sst
global ugrid_sst
global sst_cube
global ugrid_sst_cube
global p
sst.cell_arrays["faces"] = ugrid_sst[t].get_array("faces")
sst_cube.cell_arrays["faces"] = ugrid_sst_cube[t].get_array("faces")
t = 0 if t == 11 else t + 1
N_steps = 12
ugrid_sst = {t: pv.read(f"ugrid_sst_t{t}.vtk") for t in range(N_steps)}
sst = pv.read("ugrid_sst_t0.vtk")
ugrid_sst_cube = {t: pv.read(f"ugrid_cube_sst_t{t}.vtk") for t in range(N_steps)}
sst_cube = pv.read("ugrid_cube_sst_t0.vtk")
t = 0
cmap = "coolwarm" # colorcet (perceptually accurate) color maps
p = pv.BackgroundPlotter(shape=(1, 2))
p.subplot(0, 0)
p.add_text("C48 cube-sphere time-series", font_size=10, shadow=True)
p.add_mesh(sst, scalars="faces", show_edges=True, cmap=cmap, show_scalar_bar=False)
p.subplot(0, 1)
p.add_text("C48 cube time-series", font_size=10, shadow=True)
p.add_mesh(sst_cube, scalars="faces", show_edges=True, cmap=cmap, show_scalar_bar=True)
p.scalar_bar.SetTitle("SST / K")
p.show_axes_all()
p.link_views()
p.add_callback(draw, interval=200)
p.camera_position = "yz"
|
{"hexsha": "1006b9e7ecb51e8419c95b20475bbce5cde4eb43", "size": 1192, "ext": "py", "lang": "Python", "max_stars_repo_path": "poc-3/data/test/real/real_show_both.py", "max_stars_repo_name": "bjlittle/poc-ngvat", "max_stars_repo_head_hexsha": "91e5771d7692dafd477fd8564eb9a45966476ff5", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "poc-3/data/test/real/real_show_both.py", "max_issues_repo_name": "bjlittle/poc-ngvat", "max_issues_repo_head_hexsha": "91e5771d7692dafd477fd8564eb9a45966476ff5", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "poc-3/data/test/real/real_show_both.py", "max_forks_repo_name": "bjlittle/poc-ngvat", "max_forks_repo_head_hexsha": "91e5771d7692dafd477fd8564eb9a45966476ff5", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.4888888889, "max_line_length": 87, "alphanum_fraction": 0.7147651007, "include": true, "reason": "import numpy", "num_tokens": 387}
|
! Compiled with pr83149_b.f90
!
module mod
character(8) string
contains
function get_string() result(s)
character(len_trim(string)) s
s = string
end function
end module
|
{"hexsha": "3f15198bfe9708dd9475e0a46a774081939cbbbc", "size": 184, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "validation_tests/llvm/f18/gfortran.dg/pr83149_a.f90", "max_stars_repo_name": "brugger1/testsuite", "max_stars_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2019-02-12T18:20:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-09T19:46:19.000Z", "max_issues_repo_path": "validation_tests/llvm/f18/gfortran.dg/pr83149_a.f90", "max_issues_repo_name": "brugger1/testsuite", "max_issues_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2020-08-31T22:05:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T18:30:03.000Z", "max_forks_repo_path": "validation_tests/llvm/f18/gfortran.dg/pr83149_a.f90", "max_forks_repo_name": "brugger1/testsuite", "max_forks_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2020-08-31T21:59:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T22:06:46.000Z", "avg_line_length": 15.3333333333, "max_line_length": 33, "alphanum_fraction": 0.722826087, "num_tokens": 51}
|
# -*- coding: utf-8 -*-
#
# utils.py
#
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import os
import csv
import argparse
import json
import numpy as np
def get_compatible_batch_size(batch_size, neg_sample_size):
if neg_sample_size < batch_size and batch_size % neg_sample_size != 0:
old_batch_size = batch_size
batch_size = int(math.ceil(batch_size / neg_sample_size) * neg_sample_size)
print('batch size ({}) is incompatible to the negative sample size ({}). Change the batch size to {}'.format(
old_batch_size, neg_sample_size, batch_size))
return batch_size
def save_model(args, model, emap_file=None, rmap_file=None):
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
print('Save model to {}'.format(args.save_path))
model.save_emb(args.save_path, args.dataset)
# We need to save the model configurations as well.
conf_file = os.path.join(args.save_path, 'config.json')
dict = {}
config = args
dict.update(vars(config))
dict.update({'emp_file': emap_file,
'rmap_file': rmap_file})
with open(conf_file, 'w') as outfile:
json.dump(dict, outfile, indent=4)
def load_model_config(config_f):
print(config_f)
with open(config_f, "r") as f:
config = json.loads(f.read())
#config = json.load(f)
print(config)
return config
def load_raw_triplet_data(head_f=None, rel_f=None, tail_f=None, emap_f=None, rmap_f=None):
if emap_f is not None:
eid_map = {}
id2e_map = {}
with open(emap_f, 'r') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
eid_map[row[1]] = int(row[0])
id2e_map[int(row[0])] = row[1]
if rmap_f is not None:
rid_map = {}
id2r_map = {}
with open(rmap_f, 'r') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
rid_map[row[1]] = int(row[0])
id2r_map[int(row[0])] = row[1]
if head_f is not None:
head = []
with open(head_f, 'r') as f:
id = f.readline()
while len(id) > 0:
head.append(eid_map[id[:-1]])
id = f.readline()
head = np.asarray(head)
else:
head = None
if rel_f is not None:
rel = []
with open(rel_f, 'r') as f:
id = f.readline()
while len(id) > 0:
rel.append(rid_map[id[:-1]])
id = f.readline()
rel = np.asarray(rel)
else:
rel = None
if tail_f is not None:
tail = []
with open(tail_f, 'r') as f:
id = f.readline()
while len(id) > 0:
tail.append(eid_map[id[:-1]])
id = f.readline()
tail = np.asarray(tail)
else:
tail = None
return head, rel, tail, id2e_map, id2r_map
def load_triplet_data(head_f=None, rel_f=None, tail_f=None):
if head_f is not None:
head = []
with open(head_f, 'r') as f:
id = f.readline()
while len(id) > 0:
head.append(int(id))
id = f.readline()
head = np.asarray(head)
else:
head = None
if rel_f is not None:
rel = []
with open(rel_f, 'r') as f:
id = f.readline()
while len(id) > 0:
rel.append(int(id))
id = f.readline()
rel = np.asarray(rel)
else:
rel = None
if tail_f is not None:
tail = []
with open(tail_f, 'r') as f:
id = f.readline()
while len(id) > 0:
tail.append(int(id))
id = f.readline()
tail = np.asarray(tail)
else:
tail = None
return head, rel, tail
def load_raw_emb_mapping(map_f):
assert map_f is not None
id2e_map = {}
with open(map_f, 'r') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
id2e_map[int(row[0])] = row[1]
return id2e_map
def load_raw_emb_data(file, map_f=None, e2id_map=None):
if map_f is not None:
e2id_map = {}
id2e_map = {}
with open(map_f, 'r') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
e2id_map[row[1]] = int(row[0])
id2e_map[int(row[0])] = row[1]
elif e2id_map is not None:
id2e_map = [] # dummpy return value
else:
assert False, 'There should be an ID mapping file provided'
ids = []
with open(file, 'r') as f:
line = f.readline()
while len(line) > 0:
ids.append(e2id_map[line[:-1]])
line = f.readline()
ids = np.asarray(ids)
return ids, id2e_map, e2id_map
def load_entity_data(file=None):
if file is None:
return None
entity = []
with open(file, 'r') as f:
id = f.readline()
while len(id) > 0:
entity.append(int(id))
id = f.readline()
entity = np.asarray(entity)
return entity
class CommonArgParser(argparse.ArgumentParser):
def __init__(self):
super(CommonArgParser, self).__init__()
self.add_argument('--model_name', default='TransE',
choices=['TransE', 'TransE_l1', 'TransE_l2', 'TransR',
'RESCAL', 'DistMult', 'ComplEx', 'RotatE',
'SimplE'],
help='The models provided by DGL-KE.')
self.add_argument('--data_path', type=str, default='data',
help='The path of the directory where DGL-KE loads knowledge graph data.')
self.add_argument('--dataset', type=str, default='FB15k',
help='The name of the builtin knowledge graph. Currently, the builtin knowledge '\
'graphs include FB15k, FB15k-237, wn18, wn18rr and Freebase. '\
'DGL-KE automatically downloads the knowledge graph and keep it under data_path.')
self.add_argument('--format', type=str, default='built_in',
help='The format of the dataset. For builtin knowledge graphs, '\
'the foramt should be built_in. For users own knowledge graphs, '\
'it needs to be raw_udd_{htr} or udd_{htr}.')
self.add_argument('--data_files', type=str, default=None, nargs='+',
help='A list of data file names. This is used if users want to train KGE '\
'on their own datasets. If the format is raw_udd_{htr}, '\
'users need to provide train_file [valid_file] [test_file]. '\
'If the format is udd_{htr}, users need to provide '\
'entity_file relation_file train_file [valid_file] [test_file]. '\
'In both cases, valid_file and test_file are optional.')
self.add_argument('--delimiter', type=str, default='\t',
help='Delimiter used in data files. Note all files should use the same delimiter.')
self.add_argument('--save_path', type=str, default='ckpts',
help='the path of the directory where models and logs are saved.')
self.add_argument('--no_save_emb', action='store_true',
help='Disable saving the embeddings under save_path.')
self.add_argument('--max_step', type=int, default=80000,
help='The maximal number of steps to train the model. '\
'A step trains the model with a batch of data.')
self.add_argument('--batch_size', type=int, default=1024,
help='The batch size for training.')
self.add_argument('--batch_size_eval', type=int, default=8,
help='The batch size used for validation and test.')
self.add_argument('--neg_sample_size', type=int, default=256,
help='The number of negative samples we use for each positive sample in the training.')
self.add_argument('--neg_deg_sample', action='store_true',
help='Construct negative samples proportional to vertex degree in the training. '\
'When this option is turned on, the number of negative samples per positive edge '\
'will be doubled. Half of the negative samples are generated uniformly while '\
'the other half are generated proportional to vertex degree.')
self.add_argument('--neg_deg_sample_eval', action='store_true',
help='Construct negative samples proportional to vertex degree in the evaluation.')
self.add_argument('--neg_sample_size_eval', type=int, default=-1,
help='The number of negative samples we use to evaluate a positive sample.')
self.add_argument('--eval_percent', type=float, default=1,
help='Randomly sample some percentage of edges for evaluation.')
self.add_argument('--no_eval_filter', action='store_true',
help='Disable filter positive edges from randomly constructed negative edges for evaluation')
self.add_argument('-log', '--log_interval', type=int, default=1000,
help='Print runtime of different components every x steps.')
self.add_argument('--eval_interval', type=int, default=10000,
help='Print evaluation results on the validation dataset every x steps '\
'if validation is turned on')
self.add_argument('--test', action='store_true',
help='Evaluate the model on the test set after the model is trained.')
self.add_argument('--num_proc', type=int, default=1,
help='The number of processes to train the model in parallel. '\
'In multi-GPU training, the number of processes by default is set to match the number of GPUs. '\
'If set explicitly, the number of processes needs to be divisible by the number of GPUs.')
self.add_argument('--num_thread', type=int, default=1,
help='The number of CPU threads to train the model in each process. '\
'This argument is used for multiprocessing training.')
self.add_argument('--force_sync_interval', type=int, default=-1,
help='We force a synchronization between processes every x steps for '\
'multiprocessing training. This potentially stablizes the training process '
'to get a better performance. For multiprocessing training, it is set to 1000 by default.')
self.add_argument('--hidden_dim', type=int, default=400,
help='The embedding size of relation and entity')
self.add_argument('--lr', type=float, default=0.01,
help='The learning rate. DGL-KE uses Adagrad to optimize the model parameters.')
self.add_argument('-g', '--gamma', type=float, default=12.0,
help='The margin value in the score function. It is used by TransX and RotatE.')
self.add_argument('-de', '--double_ent', action='store_true',
help='Double entitiy dim for complex number or canonical polyadic. It is used by RotatE and SimplE.')
self.add_argument('-dr', '--double_rel', action='store_true',
help='Double relation dim for complex number or canonical polyadic. It is used by RotatE and SimplE')
self.add_argument('-adv', '--neg_adversarial_sampling', action='store_true',
help='Indicate whether to use negative adversarial sampling. '\
'It will weight negative samples with higher scores more.')
self.add_argument('-a', '--adversarial_temperature', default=1.0, type=float,
help='The temperature used for negative adversarial sampling.')
self.add_argument('-rc', '--regularization_coef', type=float, default=0.000002,
help='The coefficient for regularization.')
self.add_argument('-rn', '--regularization_norm', type=int, default=3,
help='norm used in regularization.')
self.add_argument('-pw', '--pairwise', action='store_true',
help='Indicate whether to use pairwise loss function. '
'It compares the scores of a positive triple and a negative triple')
self.add_argument('--loss_genre', default='Logsigmoid',
choices=['Hinge', 'Logistic', 'Logsigmoid', 'BCE'],
help='The loss function used to train KGEM.')
self.add_argument('-m', '--margin', type=float, default=1.0,
help='hyper-parameter for hinge loss.')
|
{"hexsha": "8a86e9ead7ba4249dcb296a211b75858406fdc38", "size": 13948, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/dglke/utils.py", "max_stars_repo_name": "ryantd/dgl-ke", "max_stars_repo_head_hexsha": "5a25166e402fab19f25ef6eb7525a1a0c65b8af3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 875, "max_stars_repo_stars_event_min_datetime": "2020-03-31T14:24:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T22:52:50.000Z", "max_issues_repo_path": "python/dglke/utils.py", "max_issues_repo_name": "ryantd/dgl-ke", "max_issues_repo_head_hexsha": "5a25166e402fab19f25ef6eb7525a1a0c65b8af3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 142, "max_issues_repo_issues_event_min_datetime": "2020-04-08T00:35:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T17:05:09.000Z", "max_forks_repo_path": "python/dglke/utils.py", "max_forks_repo_name": "ryantd/dgl-ke", "max_forks_repo_head_hexsha": "5a25166e402fab19f25ef6eb7525a1a0c65b8af3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 166, "max_forks_repo_forks_event_min_datetime": "2020-03-31T12:26:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-07T12:54:29.000Z", "avg_line_length": 46.8053691275, "max_line_length": 131, "alphanum_fraction": 0.5663177516, "include": true, "reason": "import numpy", "num_tokens": 3004}
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 16 13:55:06 2016
@author: Alice
"""
# Note: will need packages 'pandas' and 'pillow'
# Move to a directory on Nick's laptop
from os.path import expanduser
home = expanduser("~")
import os
os.chdir(home+'/research_not_syncd/git_projects/surf/abm/dda-mesa/alice_dda_code/')
import numpy as np
import workingcameras as cam
np.random.seed(3)
#%%
''' First we generate the true data. We use 7200 minutes for no real reason,
and a bleed out rate drawn from a normal distribution of 0.5, standard deviation 0.1.
For now start with 600 agents'''
bleedoutrate_true = np.random.normal(0.5, scale=0.1)
truth = cam.runProgramTrue(bleedoutrate_true, 7200, 600)
#%%
'''We'll be working with an ensemble of 30 for speed, but ultimately maybe 100
like in the RSO paper.'''
'''We initialise the ensemble drawing the bleed out rate from the prior normal distribution,
mean 0.5 and SD 0.1'''
initial = []
for i in range(30):
bleedoutrate = np.random.normal(0.5, scale=0.1)
result = cam.runProgram(bleedoutrate, 61, 600)
initial.append(result)
initial = np.array(initial)
#%%
''' To look at the spread in bleedoutrates'''
initial[:, -3]
#%%
''' Now we generate these predictions forward an hour '''
forecasts = []
for i in range(30):
prediction = cam.runForecast(61, 600, initial[i], 0, 0, 0, steps=60)
forecasts.append(prediction)
forecasts = np.array(forecasts)
#%%
''' We now generate the forecast covariance matrix.
The covariance matrix is an N x N matrix, where:
N = M agents locations and route info + bleedoutrate + c_a counts + c_b counts,
i.e N = 2M + 3'''
''' We find the mean of the forecast ensemble, and find the error from the mean
for each value '''
means = np.mean(forecasts, axis=0)
adjusted = forecasts - means
covariance = np.cov(adjusted.T)
#%%
''' The covariance[-1][-3] measures how the change in bleed out rate affects
the change in the c_b counts '''
print(covariance[-1][-3])
#%%
''' Begin the data assimilation step '''
#%%
''' First we extract the next observation from the true data '''
observation = [truth[-2][2], truth[-1][2]]
''' Then we create the virtual observations by assuming additive Gaussian noise '''
virtualobs = np.zeros((30,2))
for i in range(30):
for j in range(2):
virtualobs[i][j] = observation[j] + np.random.normal(0, 15)
#%%
''' Code the matrix H, the forward model, which is just a transformation matrix
changing the state vector into the same form as the observation vector '''
H = np.zeros((2, 2*600 + 3))
H[-1][-1] = 1
H[0][-2] = 1
#%%
''' Calculate the Kalman gain matrix '''
'''R contains the variance of the random errors (i.e 15^2) '''
P = covariance
R = np.array([[225, 0],[0, 225]])
# tbi = 'to be inverted'
tbi = np.dot(np.dot(H,P),H.T) + R
''' We want to solve K tbi = P H.T to find K
rewrite to form tbi.T K.T = H P.T'''
LHS = tbi.T
RHS = np.dot(H,P.T)
Ktranspose = np.linalg.lstsq(LHS,RHS)
K = Ktranspose[0].T
#%%
''' Create the ensemble analysis '''
ens_analysis = []
for i in range(30):
tbm = virtualobs[i] - np.dot(H,forecasts[i])
adjust = forecasts[i] + np.dot(K,tbm)
ens_analysis.append(adjust)
#%%
''' Average the ensemble analysis, find analysis covariance '''
ens_means = np.mean(ens_analysis, axis=0)
ens_error = ens_analysis - ens_means
ens_covariance = np.cov(ens_error.T)
#%%
'''-----------------------------------------------------------------------------'''
''' BEGINNING OF THE SAME THING, LOOPED... '''
'''-----------------------------------------------------------------------------'''
stored_forecast = []
stored_forecast_uncertainty = []
stored_analysis = []
stored_analysis_uncertainty = []
stored_truth = []
stored_parameter = []
forecast_error = []
analysis_error = []
observation_error = []
#%%
for n in range(3,120):
print(n)
forecasts = []
#previously started at time T=241
for i in range(30):
prediction = cam.runForecast((((n-1)*60)+1), 600, ens_analysis[i], 0, 0, 0, steps=60)
forecasts.append(prediction)
forecasts = np.array(forecasts)
means = np.mean(forecasts, axis=0)
adjusted = forecasts - means
covariance = np.cov(adjusted.T)
''' Begin the data assimilation step '''
''' First we extract the next observation from the true data '''
observation = [truth[-2][n], truth[-1][n]]
''' Then we create the virtual observations by assuming additive Gaussian noise '''
virtualobs = np.zeros((30,2))
for i in range(30):
for j in range(2):
virtualobs[i][j] = observation[j] + np.random.normal(0, 15)
meansobs = np.mean(virtualobs, axis=0)
virtualavg = meansobs[-1]
''' Code the matrix H, the forward model, which is just a transformation matrix
changing the state vector into the same form as the observation vector '''
H = np.zeros((2, 2*600 + 3))
H[-1][-1] = 1
H[0][-2] = 1
''' Calculate the Kalman gain matrix '''
P = covariance
R = np.array([[225, 0],[0, 225]])
# tbi = 'to be inverted'
tbi = np.dot(np.dot(H,P),H.T) + R
''' We want to solve K tbi = P H.T = RHS to find K
rewrite to form tbi.T K.T = H P.T'''
LHS = tbi.T
RHS = np.dot(H,P.T)
Ktranspose = np.linalg.lstsq(LHS,RHS)
K = Ktranspose[0].T
''' Create the ensemble analysis '''
ens_analysis = []
for i in range(30):
tbm = virtualobs[i] - np.dot(H,forecasts[i])
adjust = forecasts[i] + np.dot(K,tbm)
ens_analysis.append(adjust)
''' Average the ensemble analysis, find analysis covariance '''
ens_means = np.mean(ens_analysis, axis=0)
ens_error = ens_analysis - ens_means
ens_covariance = np.cov(ens_error.T)
''' Store the results we ultimately want to plot '''
stored_forecast.append(means[-1])
stored_forecast_uncertainty.append(covariance[-1][-1])
stored_analysis.append(ens_means[-1])
stored_analysis_uncertainty.append(ens_covariance[-1][-1])
stored_truth.append(virtualavg)
stored_parameter.append(ens_means[-3])
forecast_error.append(means[-1] - observation[-1])
analysis_error.append(ens_means[-1] - observation[-1])
observation_error.append(virtualavg - observation[-1])
#%%
smallertruth = truth[-1][3:120]
#%%
import matplotlib.pyplot as plt
xaxis = [i for i in range(117)]
plt.plot(xaxis[0:4], stored_forecast[0:4], xaxis[0:4], stored_analysis[0:4])
#%%
m = 0
n = 117
fig = plt.figure(figsize=(14,20))
ax = plt.subplot(3,1,1)
plt.plot(stored_forecast[m:n], label="forecast")
plt.plot(stored_analysis[m:n], label="analysis")
plt.plot(stored_truth[m:n], label="virtual observations")
plt.plot(smallertruth[m:n], label="truth")
ax.set_xlabel('Hour')
ax.set_ylabel('Count of agents')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
m = 91
n = 96
xaxis = [i for i in range(m,n)]
ax = plt.subplot(3,1,2)
plt.plot(xaxis, stored_forecast[m:n], label="forecast")
plt.plot(xaxis, stored_analysis[m:n], label="analysis")
plt.plot(xaxis, stored_truth[m:n], label="virtual obs")
plt.plot(xaxis, smallertruth[m:n], label="truth")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ax.set_xlabel('Hour')
ax.set_ylabel('Count of agents')
ax = plt.subplot(3,1,3)
plt.plot(stored_parameter[0:117], label="'True' bleedout rate")
plt.plot([bleedoutrate_true for i in range(117)], label="Estimated bleedout rate")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ax.set_xlabel('Hour')
ax.set_ylabel('Parameter value')
plt.show()
#%%
from numpy import mean, sqrt, square
forecast_RMSE = sqrt(mean(square(forecast_error)))
analysis_RMSE = sqrt(mean(square(analysis_error)))
observation_RMSE = sqrt(mean(square(observation_error)))
#%%
fig.savefig('image' + str(bleedoutrate_true) + ' ' + str(forecast_RMSE) \
+ ' ' + str(analysis_RMSE) + ' ' + str(observation_RMSE) + ' ' + '10' + '.png')
|
{"hexsha": "1a9bbef22ce2c8874d7a46ef1f5f390677d790f9", "size": 8472, "ext": "py", "lang": "Python", "max_stars_repo_path": "abm/dda-mesa/alice_dda_code/kalman.py", "max_stars_repo_name": "nickmalleson/surf", "max_stars_repo_head_hexsha": "d6b8abb75635ac0fbadb445e67fc50ccb8b19945", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-09-15T03:16:33.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-11T00:50:39.000Z", "max_issues_repo_path": "abm/essa-kalman_filter/kalman.py", "max_issues_repo_name": "nickmalleson/surf", "max_issues_repo_head_hexsha": "d6b8abb75635ac0fbadb445e67fc50ccb8b19945", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "abm/essa-kalman_filter/kalman.py", "max_forks_repo_name": "nickmalleson/surf", "max_forks_repo_head_hexsha": "d6b8abb75635ac0fbadb445e67fc50ccb8b19945", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2016-08-25T13:38:57.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-01T10:20:01.000Z", "avg_line_length": 21.18, "max_line_length": 94, "alphanum_fraction": 0.6102455146, "include": true, "reason": "import numpy,from numpy", "num_tokens": 2268}
|
import numpy
import matplotlib.pyplot as plt
import seaborn as sns
import sys
import json
sns.set_style("whitegrid")
results = json.load(open(sys.argv[1]))
bar_width = 0.3
n_groups = len(results["cluster_ids"])
index = numpy.arange(n_groups) + bar_width/2
legend_labels = []
colors = sns.color_palette("hls", len(results["cluster_ids"]))
for j, traj in enumerate(results["id_to_path"].keys()):
rects = plt.bar(index,
results["populations"][traj],
bar_width,
color = colors[j]
)
index = index + bar_width
legend_labels.append(results["id_to_path"][traj])
lgd = plt.legend(legend_labels,
loc='center right',
bbox_to_anchor=(1, 1))
plt.legend()
plt.show()
|
{"hexsha": "1298be24fe6f87a6c6b54f32f35e7fc7deaa44d9", "size": 788, "ext": "py", "lang": "Python", "max_stars_repo_path": "nma_algo_char/conf_overlap_hist.py", "max_stars_repo_name": "victor-gil-sepulveda/PhD-ANMPythonHelpers", "max_stars_repo_head_hexsha": "c0e15684cce4aa4da90141b51f043a567a5f8655", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-11-01T15:19:10.000Z", "max_stars_repo_stars_event_max_datetime": "2017-11-01T15:19:10.000Z", "max_issues_repo_path": "nma_algo_char/conf_overlap_hist.py", "max_issues_repo_name": "victor-gil-sepulveda/PhD-ANMPythonHelpers", "max_issues_repo_head_hexsha": "c0e15684cce4aa4da90141b51f043a567a5f8655", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nma_algo_char/conf_overlap_hist.py", "max_forks_repo_name": "victor-gil-sepulveda/PhD-ANMPythonHelpers", "max_forks_repo_head_hexsha": "c0e15684cce4aa4da90141b51f043a567a5f8655", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.2666666667, "max_line_length": 62, "alphanum_fraction": 0.6154822335, "include": true, "reason": "import numpy", "num_tokens": 185}
|
import densities
import tensorflow as tf
import numpy as np
from param import Parameterized
class Prior(Parameterized):
def logp(self, x):
"""
The log density of the prior as x
All priors (for the moment) are univariate, so if x is a vector or an
array, this is the sum of the log densities.
"""
raise NotImplementedError
def __str__(self):
"""
A short string to describe the prior at print time
"""
raise NotImplementedError
class Gaussian(Prior):
def __init__(self, mu, var):
Prior.__init__(self)
self.mu = np.atleast_1d(np.array(mu, np.float64))
self.var = np.atleast_1d(np.array(var, np.float64))
def logp(self, x):
return tf.reduce_sum(densities.gaussian(x, self.mu, self.var))
def __str__(self):
return "N("+str(self.mu) + "," + str(self.var) + ")"
class LogNormal(Prior):
def __init__(self, mu, var):
Prior.__init__(self)
self.mu = np.atleast_1d(np.array(mu, np.float64))
self.var = np.atleast_1d(np.array(var, np.float64))
def logp(self, x):
return tf.reduce_sum(densities.lognormal(x, self.mu, self.var))
def __str__(self):
return "logN("+str(self.mu) + "," + str(self.var) + ")"
class Gamma(Prior):
def __init__(self, shape, scale):
Prior.__init__(self)
self.shape = np.atleast_1d(np.array(shape, np.float64))
self.scale = np.atleast_1d(np.array(scale, np.float64))
def logp(self, x):
return tf.reduce_sum(densities.gamma(self.shape, self.scale, x))
def __str__(self):
return "Ga("+str(self.shape) + "," + str(self.scale) + ")"
class Laplace(Prior):
def __init__(self, mu, sigma):
Prior.__init__(self)
self.mu = np.atleast_1d(np.array(mu, np.float64))
self.sigma = np.atleast_1d(np.array(sigma, np.float64))
def logp(self, x):
return tf.reduce_sum(densities.laplace(self.mu, self.sigma, x))
def __str__(self):
return "Lap.("+str(self.mu) + "," + str(self.sigma) + ")"
|
{"hexsha": "b9df3d4f2d622aea23062423ebac171ef0fac383", "size": 2096, "ext": "py", "lang": "Python", "max_stars_repo_path": "GPflow/priors.py", "max_stars_repo_name": "blutooth/dgp", "max_stars_repo_head_hexsha": "bedbbc3595fbe124d7a06c3d6d64f9009304491e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-09-06T04:42:37.000Z", "max_stars_repo_stars_event_max_datetime": "2018-09-06T04:42:37.000Z", "max_issues_repo_path": "GPflow/priors.py", "max_issues_repo_name": "blutooth/dgp", "max_issues_repo_head_hexsha": "bedbbc3595fbe124d7a06c3d6d64f9009304491e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "GPflow/priors.py", "max_forks_repo_name": "blutooth/dgp", "max_forks_repo_head_hexsha": "bedbbc3595fbe124d7a06c3d6d64f9009304491e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.5789473684, "max_line_length": 77, "alphanum_fraction": 0.6121183206, "include": true, "reason": "import numpy", "num_tokens": 560}
|
[STATEMENT]
lemma pp_dist_sup [simp]:
"--(x \<squnion> y) = --x \<squnion> --y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. - - (x \<squnion> y) = - - x \<squnion> - - y
[PROOF STEP]
by simp
|
{"llama_tokens": 96, "file": "Stone_Algebras_P_Algebras", "length": 1}
|
Require Export prosa.analysis.facts.model.service_of_jobs.
Require Export prosa.analysis.facts.preemption.rtc_threshold.job_preemptable.
Require Export prosa.analysis.abstract.definitions.
(** * Run-to-Completion Threshold of a job *)
(** In this module, we provide a sufficient condition under which a job
receives enough service to become non-preemptive. *)
(** Previously we defined the notion of run-to-completion threshold (see file
abstract.run_to_completion_threshold.v). Run-to-completion threshold is the
amount of service after which a job cannot be preempted until its completion.
In this section we prove that if cumulative interference inside a busy interval
is bounded by a certain constant then a job executes long enough to reach its
run-to-completion threshold and become non-preemptive. *)
Section AbstractRTARunToCompletionThreshold.
(** Consider any type of tasks ... *)
Context {Task : TaskType}.
Context `{TaskCost Task}.
(** ... and any type of jobs associated with these tasks. *)
Context {Job : JobType}.
Context `{JobTask Job Task}.
Context `{JobArrival Job}.
Context `{JobCost Job}.
(** In addition, we assume existence of a function
mapping jobs to their preemption points. *)
Context `{JobPreemptable Job}.
(** Consider any kind of uni-service ideal processor state model. *)
Context {PState : Type}.
Context `{ProcessorState Job PState}.
Hypothesis H_ideal_progress_proc_model : ideal_progress_proc_model PState.
Hypothesis H_unit_service_proc_model : unit_service_proc_model PState.
(** Consider any arrival sequence with consistent arrivals ... *)
Variable arr_seq : arrival_sequence Job.
Hypothesis H_arrival_times_are_consistent : consistent_arrival_times arr_seq.
(** ... and any schedule of this arrival sequence. *)
Variable sched : schedule PState.
(** Assume that the job costs are no larger than the task costs. *)
Hypothesis H_jobs_respect_taskset_costs : arrivals_have_valid_job_costs arr_seq.
(** Let [tsk] be any task that is to be analyzed. *)
Variable tsk : Task.
(** Assume we are provided with abstract functions for interference and interfering workload. *)
Variable interference : Job -> instant -> bool.
Variable interfering_workload : Job -> instant -> duration.
(** For simplicity, let's define some local names. *)
Let work_conserving := work_conserving arr_seq sched tsk.
Let cumul_interference := cumul_interference interference.
Let cumul_interfering_workload := cumul_interfering_workload interfering_workload.
Let busy_interval := busy_interval sched interference interfering_workload.
(** We assume that the schedule is work-conserving. *)
Hypothesis H_work_conserving: work_conserving interference interfering_workload.
(** Let [j] be any job of task [tsk] with positive cost. *)
Variable j : Job.
Hypothesis H_j_arrives : arrives_in arr_seq j.
Hypothesis H_job_of_tsk : job_task j = tsk.
Hypothesis H_job_cost_positive : job_cost_positive j.
(** Next, consider any busy interval <<[t1, t2)>> of job [j]. *)
Variable t1 t2 : instant.
Hypothesis H_busy_interval : busy_interval j t1 t2.
(** First, we prove that job [j] completes by the end of the busy interval.
Note that the busy interval contains the execution of job j, in addition
time instant t2 is a quiet time. Thus by the definition of a quiet time
the job should be completed before time t2. *)
Lemma job_completes_within_busy_interval:
completed_by sched j t2.
Proof.
move: (H_busy_interval) => [[/andP [_ LT] [_ _]] [_ QT2]].
unfold pending, has_arrived in QT2.
move: QT2; rewrite /pending negb_and; move => /orP [QT2|QT2].
{ by move: QT2 => /negP QT2; exfalso; apply QT2, ltnW. }
by rewrite Bool.negb_involutive in QT2.
Qed.
(** In this section we show that the cumulative interference is a complement to
the total time where job [j] is scheduled inside the busy interval. *)
Section InterferenceIsComplement.
(** Consider any sub-interval <<[t, t + delta)>> inside the busy interval [t1, t2). *)
Variables (t : instant) (delta : duration).
Hypothesis H_greater_than_or_equal : t1 <= t.
Hypothesis H_less_or_equal: t + delta <= t2.
(** We prove that sum of cumulative service and cumulative interference
in the interval <<[t, t + delta)>> is equal to delta. *)
Lemma interference_is_complement_to_schedule:
service_during sched j t (t + delta) + cumul_interference j t (t + delta) = delta.
Proof.
rewrite /service_during /cumul_interference/service_at.
rewrite -big_split //=.
rewrite -{2}(sum_of_ones t delta).
rewrite big_nat [in RHS]big_nat.
apply: eq_bigr=> x /andP[Lo Hi].
move: (H_work_conserving j t1 t2 x) => Workj.
feed_n 5 Workj; try done.
{ by apply/andP; split; eapply leq_trans; eauto 2. }
destruct interference.
- replace (service_in _ _) with 0; auto; symmetry.
apply no_service_not_scheduled; auto.
now apply/negP; intros SCHED; apply Workj in SCHED; apply SCHED.
- replace (service_in _ _) with 1; auto; symmetry.
apply/eqP; rewrite eqn_leq; apply/andP; split.
+ apply H_unit_service_proc_model.
+ now apply H_ideal_progress_proc_model, Workj.
Qed.
End InterferenceIsComplement.
(** In this section, we prove a sufficient condition under which job [j] receives enough service. *)
Section InterferenceBoundedImpliesEnoughService.
(** Let progress_of_job be the desired service of job j. *)
Variable progress_of_job : duration.
Hypothesis H_progress_le_job_cost : progress_of_job <= job_cost j.
(** Assume that for some delta, the sum of desired progress and cumulative
interference is bounded by delta (i.e., the supply). *)
Variable delta : duration.
Hypothesis H_total_workload_is_bounded:
progress_of_job + cumul_interference j t1 (t1 + delta) <= delta.
(** Then, it must be the case that the job has received no less service than progress_of_job. *)
Theorem j_receives_at_least_run_to_completion_threshold:
service sched j (t1 + delta) >= progress_of_job.
Proof.
case NEQ: (t1 + delta <= t2); last first.
{ intros.
have L8 := job_completes_within_busy_interval.
apply leq_trans with (job_cost j); first by done.
rewrite /service.
rewrite -(service_during_cat _ _ _ t2).
apply leq_trans with (service_during sched j 0 t2); [by done | by rewrite leq_addr].
by apply/andP; split; last (apply negbT in NEQ; apply ltnW; rewrite ltnNge).
}
{ move: H_total_workload_is_bounded => BOUND.
apply subh3 in BOUND.
apply leq_trans with (delta - cumul_interference j t1 (t1 + delta)); first by done.
apply leq_trans with (service_during sched j t1 (t1 + delta)).
{ rewrite -{1}[delta](interference_is_complement_to_schedule t1) //.
rewrite -addnBA // subnn addn0 //.
}
{ rewrite /service -[X in _ <= X](service_during_cat _ _ _ t1).
rewrite leq_addl //.
by apply/andP; split; last rewrite leq_addr.
}
}
Qed.
End InterferenceBoundedImpliesEnoughService.
(** In this section we prove a simple lemma about completion of
a job after is reaches run-to-completion threshold. *)
Section CompletionOfJobAfterRunToCompletionThreshold.
(** Assume that completed jobs do not execute ... *)
Hypothesis H_completed_jobs_dont_execute:
completed_jobs_dont_execute sched.
(** .. and the preemption model is valid. *)
Hypothesis H_valid_preemption_model:
valid_preemption_model arr_seq sched.
(** Then, job [j] must complete in [job_cost j - job_run_to_completion_threshold j] time
units after it reaches run-to-completion threshold. *)
Lemma job_completes_after_reaching_run_to_completion_threshold:
forall t,
job_run_to_completion_threshold j <= service sched j t ->
completed_by sched j (t + (job_cost j - job_run_to_completion_threshold j)).
Proof.
move => t ES.
set (job_cost j - job_run_to_completion_threshold j) as job_last.
have LSNP := @job_nonpreemptive_after_run_to_completion_threshold
Job H2 H3 _ _ arr_seq sched _ j _ t.
apply negbNE; apply/negP; intros CONTR.
have SCHED: forall t', t <= t' <= t + job_last -> scheduled_at sched j t'.
{ move => t' /andP [GE LT].
rewrite -[t'](@subnKC t) //.
eapply LSNP; eauto 2; first by rewrite leq_addr.
rewrite subnKC //.
apply/negP; intros COMPL.
move: CONTR => /negP Temp; apply: Temp.
apply completion_monotonic with (t0 := t'); try done.
}
have SERV: job_last + 1 <= \sum_(t <= t' < t + (job_last + 1)) service_at sched j t'.
{ rewrite -{1}[job_last + 1]addn0 -{2}(subnn t) addnBA // addnC.
rewrite -{1}[_+_-_]addn0 -[_+_-_]mul1n -iter_addn -big_const_nat.
rewrite big_nat_cond [in X in _ <= X]big_nat_cond.
rewrite leq_sum //.
move => t' /andP [NEQ _].
apply H_ideal_progress_proc_model; apply SCHED.
by rewrite addn1 addnS ltnS in NEQ.
}
eapply service_at_most_cost with (j0 := j) (t0 := t + job_last.+1) in H_completed_jobs_dont_execute; auto.
move: H_completed_jobs_dont_execute; rewrite leqNgt; move => /negP T; apply: T.
rewrite /service -(service_during_cat _ _ _ t); last by (apply/andP; split; last rewrite leq_addr).
apply leq_trans with (job_run_to_completion_threshold j + service_during sched j t (t + job_last.+1));
last by rewrite leq_add2r.
apply leq_trans with (job_run_to_completion_threshold j + job_last.+1); last by rewrite leq_add2l /service_during -addn1.
by rewrite addnS ltnS subnKC //; eapply job_run_to_completion_threshold_le_job_cost; eauto.
Qed.
End CompletionOfJobAfterRunToCompletionThreshold.
End AbstractRTARunToCompletionThreshold.
|
{"author": "pointoflight", "repo": "prosa", "sha": "df7246392f27f32c760022b790f8c7aca11ff215", "save_path": "github-repos/coq/pointoflight-prosa", "path": "github-repos/coq/pointoflight-prosa/prosa-df7246392f27f32c760022b790f8c7aca11ff215/analysis/abstract/run_to_completion.v"}
|
import json
import logging
import os
import random
import sys
import matplotlib.pyplot as plt
import numpy as np
import pygame as pg
from peepo.playground.game_of_life.organism import Ennemies, Food, GoLPeepo
from peepo.pp.genetic_algorithm import GeneticAlgorithm
from peepo.pp.peepo_network import read_from_file, write_to_file
CAPTION = "game of life"
SCREEN_SIZE = (800, 800)
SCREEN_CENTER = (400, 400)
def create_population(graphical, generation, individuals, ennemies, food):
pop = []
for i, idv in enumerate(individuals):
peepo = GoLPeepo(name='peepo_' + str(generation) + '_' + str(i),
network=idv[1],
graphical=graphical,
pos=(5, 400),
ennemies=ennemies,
food=food)
pop.append(peepo)
return pop
def generate_ennemies(num):
objects = []
for x in range(0, num):
objects.append({
'id': 'obj_' + str(x),
'x': random.randint(20, SCREEN_SIZE[0] - 20),
'y': random.randint(20, SCREEN_SIZE[1] - 20)
})
with open('ennemies.json', 'w') as outfile:
json.dump(objects, outfile)
def read_ennemies(graphical):
ennemies = []
with open('ennemies.json') as json_data:
for f in json.load(json_data):
ennemies.append(Ennemies(f['id'], (f['x'], f['y']), graphical))
return ennemies
def generate_food(num):
objects = []
for x in range(0, num):
objects.append({
'id': 'obj_' + str(x),
'x': random.randint(20, SCREEN_SIZE[0] - 20),
'y': random.randint(20, SCREEN_SIZE[1] - 20)
})
with open('food.json', 'w') as outfile:
json.dump(objects, outfile)
def read_food(graphical):
food = []
with open('food.json') as json_data:
for f in json.load(json_data):
food.append(Food(f['id'], (f['x'], f['y']), graphical))
return food
class World(object):
def __init__(self, graphical, peepos, ennemies, food):
if graphical:
self.screen = pg.display.get_surface()
self.screen_rect = self.screen.get_rect()
self.graphical = graphical
self.clock = pg.time.Clock()
self.fps = 60
self.done = False
self.peepos = peepos
self.ennemies = ennemies
self.food = food
self.traject = []
self.eaten = []
self.collision = []
def event_loop(self):
for event in pg.event.get():
if event.type == pg.QUIT:
self.done = True
def render(self):
self.screen.fill(pg.Color("white"))
for obj in self.ennemies:
obj.draw(self.screen)
for obj in self.food:
obj.draw(self.screen)
for peepo in self.peepos:
peepo.draw(self.screen)
pg.display.update()
def main_loop(self, max_age, verify=False):
loop = 0
last_food = 0
last_collision = 0
while not self.done:
for peepo in self.peepos:
peepo.update()
self.food = peepo.food
self.traject.append(peepo.rect)
if peepo.stomach > last_food:
last_food = peepo.stomach
self.eaten.append(peepo.rect)
if peepo.bang > last_collision:
last_collision = peepo.bang
self.collision.append(peepo.rect)
if self.graphical:
self.event_loop()
self.render()
self.clock.tick(self.fps)
loop += 1
if loop % 10 == 0:
print('Age ' + str(loop) + ' out of ' + str(max_age))
if loop > max_age:
for peepo in self.peepos:
print('Peepo got ', peepo.stomach, ' food items and ', peepo.bang, ' injuries.')
break
if self.graphical:
self.screen.fill(pg.Color("white"))
for obj in self.ennemies:
obj.draw(self.screen)
for obj in self.food:
obj.draw(self.screen)
if verify:
for traj in self.traject:
pg.draw.circle(self.screen, (225, 220, 225), [int(traj.x), int(traj.y)], 2)
for traj in self.eaten:
pg.draw.circle(self.screen, (0, 255, 0), [int(traj.x), int(traj.y)], 4)
for traj in self.collision:
pg.draw.circle(self.screen, (255, 0, 0), [int(traj.x), int(traj.y)], 4)
pg.display.update()
def verification(graphical):
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
os.environ['SDL_VIDEO_CENTERED'] = '1'
if graphical:
pg.init()
pg.display.set_caption(CAPTION)
pg.display.set_mode(SCREEN_SIZE)
max_age = 400 # 2000
ennemies = read_ennemies(graphical)
food = read_food(graphical)
peepos = [
GoLPeepo('peepo', read_from_file('best_life_game_network'), graphical, (5, 400), ennemies=ennemies, food=food)]
world = World(graphical, peepos, ennemies, food)
world.main_loop(max_age, True)
while True:
a = 1
pg.quit()
sys.exit()
def evolution(graphical, enemy_wheight):
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
os.environ['SDL_VIDEO_CENTERED'] = '1'
if graphical:
pg.init()
pg.display.set_caption(CAPTION)
pg.display.set_mode(SCREEN_SIZE)
max_age = 400
num_individuals = 20
num_generations = 20
ga = GeneticAlgorithm('game_of_life',
convergence_period=5,
convergence_sensitivity_percent=5.,
fast=True,
p_mut_top=0.2,
p_mut_cpd=0.2,
Npop=num_individuals,
max_removal=2)
population = ga.get_population()
avg_fitnesses = []
for gen in range(num_generations):
ennemies = read_ennemies(graphical)
food = read_food(graphical)
peepos = create_population(graphical, gen, population, ennemies, food)
print('Generation ' + str(gen) + ' out of ' + str(num_generations), ' with ', len(peepos), ' peepos')
print('-------------------------------------------------------------------------------------------------')
world = World(graphical, peepos, ennemies, food)
world.main_loop(max_age)
for idx, peepo in enumerate(peepos):
population[idx][0] = (1.0 + peepo.stomach * (1. - enemy_wheight)) / (1.0 + peepo.bang * enemy_wheight)
avg_fitness, population, converging = ga.evolve(population)
if converging:
break
if avg_fitness < 0:
print(' population collapsed :-(')
break
print('Average fitness: ', avg_fitness)
print('----------------------------------------------------------')
avg_fitnesses.append(avg_fitness)
final_network, best_fitness = ga.get_optimal_network()
print('\n\nFINAL NETWORK has a fitness of ', best_fitness)
print('________________\n\n')
print(final_network.edges)
write_to_file('best_life_game_network', final_network)
t = np.arange(0.0, len(avg_fitnesses), 1)
fig, ax = plt.subplots()
ax.plot(t, avg_fitnesses)
ax.set(xlabel='generation', ylabel='average fitness',
title='Game of life game with genetic algorithm')
ax.grid()
plt.show()
if __name__ == '__main__':
enemy_wheight = 0.5
# generate_ennemies(200)
# generate_food(200)
# evolution(False,enemy_wheight)
verification(True)
|
{"hexsha": "4815ea526f6781adcd2606dde5e7a56743e76748", "size": 7769, "ext": "py", "lang": "Python", "max_stars_repo_path": "peepo/playground/game_of_life/sandbox.py", "max_stars_repo_name": "hayoc/peepo_prototype", "max_stars_repo_head_hexsha": "57b4a42fd6b769cbd17a3449a49a013fc5d75455", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-03-04T21:12:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-30T00:35:50.000Z", "max_issues_repo_path": "peepo/playground/game_of_life/sandbox.py", "max_issues_repo_name": "hayoc/peepo", "max_issues_repo_head_hexsha": "b15fd18d0f618e1e24eadc97c72fde62039ddafb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "peepo/playground/game_of_life/sandbox.py", "max_forks_repo_name": "hayoc/peepo", "max_forks_repo_head_hexsha": "b15fd18d0f618e1e24eadc97c72fde62039ddafb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.2365145228, "max_line_length": 119, "alphanum_fraction": 0.5541253701, "include": true, "reason": "import numpy", "num_tokens": 1879}
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 21 09:52:43 2018
@author: Madhur Kashyap 2016EEZ8350
"""
import os
import re
import logging
import numpy as np
import pandas as pd
from glob import glob1
from PlotUtils import *
class Timit:
_COMMENT_RE = re.compile("^;");
_SPKR_COLS = ['id','sex','dr','use','recdate','birthdate',
'ht','race','edu','comments'];
_DR_RE = re.compile('^dr\d+$');
_DR_PREFIX = 'dr';
def __init__(self,root,verbose=False):
assert os.path.exists(root), "Root folder does not exist"
logging.info('Initializing Timit corpus from '+root);
self._root = root;
vocab = os.path.join(root,"doc","TIMITDIC.TXT")
spkrinfo = os.path.join(root,"doc","SPKRINFO.TXT")
prompts = os.path.join(root,"doc","PROMPTS.TXT")
self.init_dictionary(vocab);
self.init_spkrinfo(spkrinfo);
self.init_sentences(prompts);
self.init_files(verbose=verbose);
self.silence_phoneme = 'h#';
def get_silence_phoneme(self):
return self.silence_phoneme
def _is_comment(self,line):
return self._COMMENT_RE.search(line)!=None
def init_dictionary(self,vocab):
logging.info('Start parsing dictionary')
assert os.path.exists(vocab), "Missing vocab dict: "+vocab
f = open(vocab, 'r');
linecnt = 0; rows = [];
for line in list(f):
linecnt+=1;
if self._is_comment(line): continue
rline=re.sub("/","",line);
rline=re.sub("\d+","",rline);
wlist = rline.split();
if len(wlist)<2:
msg = 'Incomplete dict entry @%d : %s'
logging.warn(msg,linecnt,line); continue
rows.append([wlist[0], ' '.join(wlist[1:])])
f.close();
df = pd.DataFrame(data=rows,columns=["word","phnseq"]);
assert df.shape[0]>0, "Invalid dictionary no valid entry found"
self._vocab = vocab; self._vocabdf = df;
df.set_index('word',inplace=True);
logging.info("Read %d words from dictionary",df.shape[0])
def init_spkrinfo(self,spkrinfo):
logging.info('Start parsing speaker information')
assert os.path.exists(spkrinfo), "Missing speaker info: "+spkrinfo
f = open(spkrinfo,"r"); linecnt=0; rows=[];
for line in list(f):
linecnt+=1;
if self._is_comment(line): continue
wlist = line.split();
if len(wlist)<9:
msg = 'Incomplete speaker entry @%d : %s'
logging.warn(msg,linecnt,line); continue
row = wlist[0:9]; row.append(' '.join(wlist[9:]));
row[0]=row[0].lower();
rows.append(row);
f.close()
assert len(rows)>0, "No valid speaker entry found"
df = pd.DataFrame(data=rows,columns=self._SPKR_COLS);
df.set_index('id',inplace=True);
self._spkrinfo = spkrinfo; self._spkrdf = df;
logging.info('Read information for %d speakers',df.shape[0]);
def init_sentences(self,prompts):
assert os.path.exists(prompts), "Missing sentence files: "+prompts
f = open(prompts,"r"); linecnt=0; rows=[];
for line in list(f):
linecnt+=1;
if self._is_comment(line): continue
r = re.compile('\(.+\)');
if not r.search(line):
msg = 'sentence id not found @%d %s';
logging.warn(msg,linecnt,line);
continue;
wlist = line.split();
i = re.sub('[()]',"",wlist[-1]);
c = re.sub('[()\d]',"",wlist[-1]);
row = [i,c,' '.join(wlist[0:-1])];
rows.append(row);
f.close();
assert len(rows)>0, "No valid sentence found"
logging.info('Read %d sentences',len(rows));
df = pd.DataFrame(data=rows,columns=['id','type','sentence']);
df.set_index('id',inplace=True);
self._sentfile = prompts; self._sentdf = df;
def get_dialect_regions(self):
assert hasattr(self,'_spkrdf'), "Speaker info is not initialized"
return ['dr'+x for x in self._spkrdf.dr.unique()];
def has_speaker(self,spkr):
assert hasattr(self,'_spkrdf'), "Speaker info is not initialized"
return spkr in self._spkrdf.index
def get_speaker_use(self,spkr):
if self.has_speaker(spkr):
return self._spkrdf.loc[spkr]['use']
def get_region_id(self,name):
if self._DR_RE.search(name): return name[2:];
def init_files(self,verbose=False):
dirs = glob1(self._root,'dr*');
# May need this for linux but windows is case insensitive
#dirs+=glob1(self._root,'DR*');
rows = []; f = open('timit_corpus_parsing.log',mode='w');
assert len(dirs)>0, "No dialect region directory division found dr*"
logging.info("Start initializing corpus files")
for drd in dirs:
logging.info("Parsing files for dialect dir %s",drd);
drid = int(self.get_region_id(drd));
drp = os.path.join(self._root,drd);
# First character is 'f' - female, 'm'- male
spkrdirs = glob1(drp,'[fmFM]*');
for spkd in spkrdirs:
sex = spkd[0]; spkr = spkd[1:];
spkp = os.path.join(drp,spkd);
# Get waves and check for wrd and phn files
wavfiles = glob1(spkp,'*.wav');
for wav in wavfiles:
senid = wav[0:-4]; wavp = os.path.join(spkp,wav);
phn = senid+'.phn'; wrd = senid+'.wrd';
phnp = os.path.join(spkp,phn);
wrdp = os.path.join(spkp,wrd);
if not (os.path.exists(phnp) and os.path.exists(wrdp)):
logging.warn('Could not find wrd or phn file '+spkp);
continue;
row = [drid,spkr,sex,senid,wavp,phnp,wrdp]
# Check for overlap in wrd and phn both and report
if self.has_overlap(phnp):
msg = "Phone boundaries overlap. "+ \
"Dropping entry %s" % str(row)
if verbose: logging.warn(msg);
f.write(msg+"\n");
elif self.has_overlap(wrdp):
msg = "Word boundaries overlap. "+\
"Dropping entry %s" % str(row)
if verbose: logging.warn(msg);
f.write(msg+"\n");
else:
spkr_use = self.get_speaker_use(spkr);
train = True if spkr_use=='TRN' else False
test = not train; valid = False;
duration = self._get_duration(phnp);
row+=[duration,train,valid,test];
rows.append(row);
assert len(rows)>0, "No valid data found in dataset "+self._root;
cols = ['dr','spkrid','sex','senid','wav','phn','wrd','duration',
'training','validation','testing'];
df = pd.DataFrame(rows,columns=cols);
count = np.count_nonzero(df[['training','testing']].values)
logging.info('Total %d valid samples found in dataset',count)
self._corpusdf = df; f.close();
return
def has_overlap(self,path):
assert os.path.exists(path), "Cannot open file " + path
try:
df = pd.read_csv(path,header=None,delim_whitespace=True);
except:
print(logging.warn("Unable to parse file %s",path));
return True
a = np.ndarray.flatten(df[[0,1]].values);
return not np.all(np.diff(a)>=0);
def _get_duration(self,path):
assert os.path.exists(path), "Cannot open file " + path
df = pd.read_csv(path,header=None,delim_whitespace=True);
return df[1].values[-1];
def get_grapheme_list(self):
"""
Parse sentence information to collect unique chars
"""
self.check_sentences_init();
cdups = [];
for x in self._sentdf.sentence: cdups+=list(set(x));
cdups = [x.lower() for x in cdups]
return list(set(cdups))
def get_vocab_phoneme_list(self):
assert hasattr(self,'_vocabdf'), "Vocabulary is not initialized"
phns = [];
for x in self._vocabdf.phnseq: phns+=x.split();
return list(set(phns));
def get_phoneme_list(self):
assert hasattr(self,'_corpusdf'), "Corpus is not initialized"
phns = [];
for f in self._corpusdf.phn.values:
df = pd.read_csv(f,header=None,delim_whitespace=True);
phns += list(df[2].values)
return list(set(phns))
def report_statistics(self,prefix='timit',folder=None,
reptext=True,plotfig=True):
self.check_corpus_init();
combos = [['Total','training==True or training==False or training!=training'],
['Dropped','training!=training and testing!=testing and validation!=validation'],
['Training','training==True'],
['Validation','validation==True']]
for name,expr in combos:
df = self._corpusdf.query(expr);
drsum = df.groupby(by='dr').size();
if reptext:
print(name+" corpus statistics")
print("==========================")
print(drsum);
if plotfig and drsum.size>0:
fig = new_figure();
drsum.plot.pie(title='Dialect region wise '+
name.lower()+' count',
label='Count',table=True);
save_figure(prefix+'.'+name.lower(),folder=folder);
def check_corpus_init(self):
assert hasattr(self,'_corpusdf'), "Corpus is not initialized"
def check_sentences_init(self):
assert hasattr(self,'_sentdf'), "Sentence info is not initialized"
def split_validation(self,train_ratio=0.9):
self.check_corpus_init();
drs = np.unique(self._corpusdf.dr.values);
for dr in drs:
for sex in ['m','f']:
expr = 'dr==@dr and sex==@sex and training==True';
df = self._corpusdf.query(expr);
total = df.shape[0]; valcnt = int((1-train_ratio)*total);
msg='Creating %d validation samples for gender=%s and dr=%s'
logging.info(msg,valcnt,sex,dr);
if valcnt>0:
idxs = df.sample(n=valcnt).index;
self._corpusdf.loc[idxs,'validation']=True;
self._corpusdf.loc[idxs,'training']=False;
def strip_punctuations(self,s):
# Retain hypens and dots after
schomp = re.sub('\.$','',s);
return re.sub('[?!:;,"]','',schomp)
def get_words(self,sentences):
words = [];
for x in sentences:
words += self.strip_punctuations(x).split();
return [x.lower() for x in list(set(words))];
def get_all_sentences(self):
self.check_sentences_init();
return self._sentdf.sentence.values;
def get_sentences(self,idxs):
self.check_sentences_init();
return self._sentdf.loc[idxs,'sentence'].values;
def get_train_sentence_ids(self):
self.check_corpus_init();
idxs = self._corpusdf.query('training==True')['senid'];
idxs = list(set(idxs));
return idxs;
def get_train_sentence_count(self):
return len(self.get_train_sentence_ids());
def get_train_sentences(self):
self.check_sentences_init();
idxs = self.get_train_sentence_ids();
return self.get_sentences(idxs);
def get_all_words(self):
self.check_sentences_init();
return self.get_words(self.get_all_sentences());
def get_train_words(self):
self.check_sentences_init();
sents = self.get_train_sentences();
return self.get_words(sents);
def get_sentence_count(self):
self.check_sentences_init();
return self._sentdf.shape[0]
def report_train_coverage(self,report='train_coverage.rpt'):
sents_cnt = self.get_sentence_count();
trsents_cnt = self.get_train_sentence_count();
words = self.get_all_words();
trwords = self.get_train_words();
nw = len(words); ntw = len(trwords);
ptw = round(ntw/nw*100,2);
pts = round(trsents_cnt/sents_cnt*100,2);
logging.info("Analyzing training set coverage");
fh = open(report, "w");
fh.write("Training coverage report\n");
fh.write("========================\n");
fh.write("Total sentences = {}\n".format(sents_cnt));
fh.write("Train sentences = {} ({})\n".format(trsents_cnt,pts));
fh.write("Total words = {}\n".format(nw));
fh.write("Train words = {} ({})\n".format(ntw,ptw));
missing = [];
for x in words:
if x not in trwords: missing.append(x);
fh.write("List of words missing in training set\n")
fh.write("-"*80+"\n");
fh.write("\n".join(missing))
fh.close();
logging.info("Written training set coverage report to %s",report);
def get_split_ids(self,split):
self.check_corpus_init();
assert split=='training' or split=='testing' or split=='validation',\
"Incorrect split requested - choose {training testing validation}"
return self._corpusdf.query(split+'==True').index.values;
def sort_indexes(self,indexes,by=['duration']):
sdf = self._corpusdf.iloc[indexes].sort_values(by)
return sdf.index.values
def get_corpus_columns(self,idxs,keys):
self.check_corpus_init();
return self._corpusdf.loc[idxs,keys]
def get_corpus_data(self,idxs):
keys = ['wav','phn','wrd'];
df = self.get_corpus_columns(idxs,keys).values;
flist=[];
for wav,pseqf,wseqf in df:
pdf = pd.read_csv(pseqf,header=None,delim_whitespace=True);
wdf = pd.read_csv(wseqf,header=None,delim_whitespace=True)
#seq = np.ndarray.flatten(df.values);
flist.append([wav,pdf,wdf]);
return flist;
|
{"hexsha": "7f1bce323aa8b924b8b9a8c413729765d936a70e", "size": 14907, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/SpeechCorpus.py", "max_stars_repo_name": "madhurkashyap/boundary_detection", "max_stars_repo_head_hexsha": "f7fb98c8bcbc204b1fcd0eb34a8699f16a8725a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/SpeechCorpus.py", "max_issues_repo_name": "madhurkashyap/boundary_detection", "max_issues_repo_head_hexsha": "f7fb98c8bcbc204b1fcd0eb34a8699f16a8725a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/SpeechCorpus.py", "max_forks_repo_name": "madhurkashyap/boundary_detection", "max_forks_repo_head_hexsha": "f7fb98c8bcbc204b1fcd0eb34a8699f16a8725a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.9915492958, "max_line_length": 100, "alphanum_fraction": 0.5367947944, "include": true, "reason": "import numpy", "num_tokens": 3432}
|
#!/usr/bin/python
import os,sys
import lcm
import time
from lcm import LCM
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from threading import Thread
import threading
home_dir =os.getenv("HOME")
#print home_dir
sys.path.append(home_dir + "/drc/software/build/lib/python2.7/site-packages")
sys.path.append(home_dir + "/drc/software/build/lib/python2.7/dist-packages")
from drc.robot_state_t import robot_state_t
########################################################################################
def timestamp_now (): return int (time.time () * 1000000)
def on_ers(channel, data):
lc.publish("TRUE_ROBOT_STATE",data)
####################################################################
lc = lcm.LCM()
print "started"
sub1 = lc.subscribe("EST_ROBOT_STATE", on_ers)
while True:
## Handle LCM if new messages have arrived.
lc.handle()
lc.unsubscribe(sub)
|
{"hexsha": "58d84c101919b71b98c49df5e38e3ddc2da2d12d", "size": 925, "ext": "py", "lang": "Python", "max_stars_repo_path": "software/motion_estimate/state_sync/scripts/republish_ERS_as_TRS.py", "max_stars_repo_name": "liangfok/oh-distro", "max_stars_repo_head_hexsha": "eeee1d832164adce667e56667dafc64a8d7b8cee", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 92, "max_stars_repo_stars_event_min_datetime": "2016-01-14T21:03:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-01T17:57:46.000Z", "max_issues_repo_path": "software/motion_estimate/state_sync/scripts/republish_ERS_as_TRS.py", "max_issues_repo_name": "liangfok/oh-distro", "max_issues_repo_head_hexsha": "eeee1d832164adce667e56667dafc64a8d7b8cee", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 62, "max_issues_repo_issues_event_min_datetime": "2016-01-16T18:08:14.000Z", "max_issues_repo_issues_event_max_datetime": "2016-03-24T15:16:28.000Z", "max_forks_repo_path": "software/motion_estimate/state_sync/scripts/republish_ERS_as_TRS.py", "max_forks_repo_name": "liangfok/oh-distro", "max_forks_repo_head_hexsha": "eeee1d832164adce667e56667dafc64a8d7b8cee", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 41, "max_forks_repo_forks_event_min_datetime": "2016-01-14T21:26:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T03:10:39.000Z", "avg_line_length": 21.511627907, "max_line_length": 88, "alphanum_fraction": 0.6259459459, "include": true, "reason": "import numpy", "num_tokens": 201}
|
! Runs a program in parallel using MPI
! (https://en.wikipedia.org/wiki/Message_Passing_Interface)
module HelloMpi
use mpi
use Constants, only: OUTPUT_LENGTH
implicit none
private
public :: hello_mpi
contains
!
! Print the ID of the process and the total number of them.
!
! Inputs:
! --------
!
! silent : if true, do not print to output (used in unit tests)
!
! Outputs:
! -------
!
! Returns: hello world string from the process
!
function hello_mpi(silent) result(result)
logical, intent(in) :: silent
character(len=OUTPUT_LENGTH) :: result
integer :: ifail
integer :: rank, size
call mpi_init(ifail)
call mpi_comm_rank(MPI_COMM_WORLD, rank, ifail)
call mpi_comm_size(MPI_COMM_WORLD, size, ifail)
! Print the ID of the process and the total number of them
write(result, '(a, x, i0, x, a, x, i0)') "Hello from rank", rank, "of", size
if (.not. silent) write (0, *) trim(result)
call mpi_finalize(ifail)
end function
end module HelloMpi
|
{"hexsha": "85d04a216b9e59d6b41356453a3988bd6eab2833", "size": 970, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/hello_mpi.f90", "max_stars_repo_name": "evgenyneu/fortran_mpi_hello_world", "max_stars_repo_head_hexsha": "1ce72f7b69f9dedeb22b41c312a44daaf776fd00", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/hello_mpi.f90", "max_issues_repo_name": "evgenyneu/fortran_mpi_hello_world", "max_issues_repo_head_hexsha": "1ce72f7b69f9dedeb22b41c312a44daaf776fd00", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/hello_mpi.f90", "max_forks_repo_name": "evgenyneu/fortran_mpi_hello_world", "max_forks_repo_head_hexsha": "1ce72f7b69f9dedeb22b41c312a44daaf776fd00", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.0869565217, "max_line_length": 78, "alphanum_fraction": 0.7051546392, "num_tokens": 264}
|
import Std.Tactic.Basic
import Std.Tactic.GuardExpr
import Std.Tactic.RCases
set_option linter.missingDocs false
example (x : α × β × γ) : True := by
rcases x with ⟨a, b, c⟩
guard_hyp a : α
guard_hyp b : β
guard_hyp c : γ
trivial
example (x : α × β × γ) : True := by
rcases x with ⟨(a : α) : id α, -, c : id γ⟩
guard_hyp a : α
fail_if_success have : β := by assumption
guard_hyp c : id γ
trivial
example (x : (α × β) × γ) : True := by
fail_if_success rcases x with ⟨_a, b, c⟩
fail_if_success rcases x with ⟨⟨a:β, b⟩, c⟩
rcases x with ⟨⟨a:α, b⟩, c⟩
guard_hyp a : α
guard_hyp b : β
guard_hyp c : γ
trivial
example : @Inhabited.{1} α × Option β ⊕ γ → True := by
rintro (⟨⟨a⟩, _ | b⟩ | c)
· guard_hyp a : α; trivial
· guard_hyp a : α; guard_hyp b : β; trivial
· guard_hyp c : γ; trivial
example : cond false Nat Int → cond true Int Nat → Nat ⊕ Unit → True := by
rintro (x y : Int) (z | u)
· guard_hyp x : Int; guard_hyp y : Int; guard_hyp z : Nat; trivial
· guard_hyp x : Int; guard_hyp y : Int; guard_hyp u : Unit; trivial
example (x y : Nat) (h : x = y) : True := by
rcases x with _|⟨⟩|z
· guard_hyp h : Nat.zero = y; trivial
· guard_hyp h : Nat.succ Nat.zero = y; trivial
· guard_hyp z : Nat
guard_hyp h : Nat.succ (Nat.succ z) = y; trivial
example (h : x = 3) (h₂ : x < 4) : x < 4 := by
rcases h with ⟨⟩
guard_hyp h₂ : 3 < 4; guard_target = 3 < 4; exact h₂
example (h : x = 3) (h₂ : x < 4) : x < 4 := by
rcases h with rfl
guard_hyp h₂ : 3 < 4; guard_target = 3 < 4; exact h₂
example (h : 3 = x) (h₂ : x < 4) : x < 4 := by
rcases h with ⟨⟩
guard_hyp h₂ : 3 < 4; guard_target = 3 < 4; exact h₂
example (h : 3 = x) (h₂ : x < 4) : x < 4 := by
rcases h with rfl
guard_hyp h₂ : 3 < 4; guard_target = 3 < 4; exact h₂
example (s : α ⊕ Empty) : True := by
rcases s with s|⟨⟨⟩⟩
guard_hyp s : α; trivial
example : True := by
obtain ⟨n : Nat, _h : n = n, -⟩ : ∃ n : Nat, n = n ∧ True
· exact ⟨0, rfl, trivial⟩
trivial
example : True := by
obtain (h : True) | ⟨⟨⟩⟩ : True ∨ False
· exact Or.inl trivial
guard_hyp h : True; trivial
example : True := by
obtain h | ⟨⟨⟩⟩ : True ∨ False := Or.inl trivial
guard_hyp h : True; trivial
example : True := by
obtain ⟨h, h2⟩ := And.intro trivial trivial
guard_hyp h : True; guard_hyp h2 : True; trivial
example : True := by
fail_if_success obtain ⟨h, h2⟩
trivial
example (x y : α × β) : True := by
rcases x, y with ⟨⟨a, b⟩, c, d⟩
guard_hyp a : α; guard_hyp b : β
guard_hyp c : α; guard_hyp d : β
trivial
example (x y : α ⊕ β) : True := by
rcases x, y with ⟨a|b, c|d⟩
· guard_hyp a : α; guard_hyp c : α; trivial
· guard_hyp a : α; guard_hyp d : β; trivial
· guard_hyp b : β; guard_hyp c : α; trivial
· guard_hyp b : β; guard_hyp d : β; trivial
example (i j : Nat) : (Σ' x, i ≤ x ∧ x ≤ j) → i ≤ j := by
intro h
rcases h' : h with ⟨x, h₀, h₁⟩
guard_hyp h' : h = ⟨x, h₀, h₁⟩
apply Nat.le_trans h₀ h₁
example (x : Quot fun _ _ : α => True) (h : x = x): x = x := by
rcases x with ⟨z⟩
guard_hyp z : α
guard_hyp h : Quot.mk (fun _ _ => True) z = Quot.mk (fun _ _ => True) z
guard_target = Quot.mk (fun _ _ => True) z = Quot.mk (fun _ _ => True) z
exact h
example (n : Nat) : True := by
obtain _one_lt_n | _n_le_one : 1 < n + 1 ∨ n + 1 ≤ 1 := Nat.lt_or_ge 1 (n + 1)
{trivial}; trivial
example (n : Nat) : True := by
obtain _one_lt_n | (_n_le_one : n + 1 ≤ 1) := Nat.lt_or_ge 1 (n + 1)
{trivial}; trivial
open Lean Elab Tactic in
/-- Asserts that the goal has `n` hypotheses. Used for testing. -/
elab "check_num_hyps " n:num : tactic => liftMetaMAtMain fun _ => do
-- +1 because the _example recursion decl is in the list
guard $ (← getLCtx).foldl (fun i _ => i+1) 0 = n.1.toNat + 1
example (h : ∃ x : Nat, x = x ∧ 1 = 1) : True := by
rcases h with ⟨-, _⟩
check_num_hyps 0
trivial
example (h : ∃ x : Nat, x = x ∧ 1 = 1) : True := by
rcases h with ⟨-, _, h⟩
check_num_hyps 1
guard_hyp h : 1 = 1
trivial
example (h : True ∨ True ∨ True) : True := by
rcases h with - | - | -
iterate 3 · check_num_hyps 0; trivial
example : Bool → False → True
| false => by rintro ⟨⟩
| true => by rintro ⟨⟩
example : (b : Bool) → cond b False False → True := by
rintro ⟨⟩ ⟨⟩
structure Baz {α : Type _} (f : α → α) : Prop where
[inst : Nonempty α]
h : f ∘ f = id
example {α} (f : α → α) (h : Baz f) : True := by rcases h with ⟨_⟩; trivial
example {α} (f : α → α) (h : Baz f) : True := by rcases h with @⟨_, _⟩; trivial
inductive Test : Nat → Prop
| a (n) : Test (2 + n)
| b {n} : n > 5 → Test (n * n)
example {n} (h : Test n) : n = n := by
have : True := by
rcases h with (a | b)
· guard_hyp a : Nat
trivial
· guard_hyp b : ‹Nat› > 5
trivial
· rcases h with (a | @⟨n, b⟩)
· guard_hyp a : Nat
trivial
· guard_hyp b : n > 5
trivial
example (h : a ≤ 2 ∨ 2 < a) : True := by
obtain ha1 | ha2 : a ≤ 2 ∨ 3 ≤ a := h
· guard_hyp ha1 : a ≤ 2; trivial
· guard_hyp ha2 : 3 ≤ a; trivial
example (h : a ≤ 2 ∨ 2 < a) : True := by
obtain ha1 | ha2 : a ≤ 2 ∨ 3 ≤ a := id h
· guard_hyp ha1 : a ≤ 2; trivial
· guard_hyp ha2 : 3 ≤ a; trivial
inductive BaseType : Type where
| one
inductive BaseTypeHom : BaseType → BaseType → Type where
| loop : BaseTypeHom one one
| id (X : BaseType) : BaseTypeHom X X
example : BaseTypeHom one one → Unit := by rintro ⟨_⟩ <;> constructor
|
{"author": "leanprover", "repo": "std4", "sha": "5507f9d8409f93b984ce04eccf4914d534e6fca2", "save_path": "github-repos/lean/leanprover-std4", "path": "github-repos/lean/leanprover-std4/std4-5507f9d8409f93b984ce04eccf4914d534e6fca2/test/rcases.lean"}
|
!
! Calculates the Coulomb matrix
!
! v = < M | v | M >
! k,IJ k,I k,J
!
! with the mixed-basis functions M (indices I and J).
!
! Note that
! *
! v = v .
! k,JI k,IJ
!
! In the code: coulomb(IJ,k) = v where only the upper triangle (I<=J) is stored.
! k,IJ
!
! The Coulomb matrix v(IJ,k) diverges at the Gamma-point. Here, we apply the decomposition
!
! (0) (1) * 2-l (0)* (0) (1)* m (1)
! v = v + SUM v * Y (k) / k with v = v , v = (-1) v
! k,IJ IJ lm IJ lm JI IJ JI,lm IJ,l,-m
!
! where a = atom index, R = position vector, T = Wigner-Seitz radius (scalar).
! a 0
! (0)
! In the code: coulomb(IJ,1) = v where only the upper triangle (I<=J) is stored,
! IJ
! (1)
! coulfac(IJ,lm) = v IJ,lm
!
! For the PW contribution we have to construct plane waves within the MT spheres with the help
! of spherical Bessel functions. The value lexp (LEXP in gwinp) is the corresponding cutoff.
!
MODULE m_coulombmatrix
CONTAINS
SUBROUTINE coulombmatrix(fmpi, fi, mpdata, hybdat, xcpot, work_pack)
use m_work_package
use m_structureconstant
USE m_types
USE m_types_hybdat
USE m_juDFT
USE m_constants
USE m_trafo, ONLY: symmetrize, bramat_trafo
USE m_intgrf, ONLY: intgrf, intgrf_init
use m_util, only: primitivef
USE m_hsefunctional, ONLY: change_coulombmatrix
USE m_wrapper
USE m_io_hybinp
use m_ylm
use m_sphbes, only: sphbes
use m_calc_l_m_from_lm
use m_calc_mpsmat
IMPLICIT NONE
TYPE(t_xcpot_inbuild), INTENT(IN) :: xcpot
TYPE(t_mpi), INTENT(IN) :: fmpi
type(t_fleurinput), intent(in) :: fi
TYPE(t_mpdata), intent(in) :: mpdata
TYPE(t_hybdat), INTENT(INOUT) :: hybdat
type(t_work_package), intent(in) :: work_pack
! - local scalars -
INTEGER :: inviop
INTEGER :: nqnrm, iqnrm, iqnrm1, iqnrm2, iqnrmstart, iqnrmstep
INTEGER :: itype, l, ix, iy, iy0, i, j, lm, l1, l2, m1, m2, ineq, idum, ikpt
INTEGER :: lm1, lm2, itype1, itype2, ineq1, ineq2, n, n1, n2, ng
INTEGER :: ic, ic1, ic2, ic3, ic4
INTEGER :: igpt, igpt1, igpt2, igptp, igptp1, igptp2
INTEGER :: isym, isym1, isym2, igpt0
INTEGER :: ok, iatm1, iatm2
INTEGER :: m, im
INTEGER :: maxfac
LOGICAL :: lsym
REAL :: rdum, rdum1, rdum2
REAL :: svol, qnorm, qnorm1, qnorm2, gnorm
REAL :: fcoulfac
COMPLEX :: cdum, cexp, csum
! - local arrays -
INTEGER :: g(3)
INTEGER, ALLOCATABLE :: pqnrm(:, :)
INTEGER :: rrot(3, 3, fi%sym%nsym), invrrot(3, 3, fi%sym%nsym)
INTEGER, ALLOCATABLE :: iarr(:), POINTER(:, :, :, :)!,pointer(:,:,:)
INTEGER, ALLOCATABLE :: nsym_gpt(:, :), sym_gpt(:, :, :)
INTEGER :: nsym1(fi%kpts%nkpt + 1), sym1(fi%sym%nsym, fi%kpts%nkpt + 1)
INTEGER, ALLOCATABLE :: ngptm1(:)
INTEGER, ALLOCATABLE :: pgptm1(:, :)
REAL :: q(3), q1(3), q2(3)
REAL :: integrand(fi%atoms%jmtd), primf1(fi%atoms%jmtd), primf2(fi%atoms%jmtd)
REAL :: moment(maxval(mpdata%num_radbasfn), 0:maxval(fi%hybinp%lcutm1), fi%atoms%ntype), &
moment2(maxval(mpdata%num_radbasfn), fi%atoms%ntype)
REAL :: sphbes_var(fi%atoms%jmtd, 0:maxval(fi%hybinp%lcutm1))
REAL :: sphbesmoment1(fi%atoms%jmtd, 0:maxval(fi%hybinp%lcutm1))
REAL :: rarr(0:fi%hybinp%lexp + 1), rarr1(0:maxval(fi%hybinp%lcutm1))
REAL, ALLOCATABLE :: gmat(:, :), qnrm(:)
REAL, ALLOCATABLE :: sphbesmoment(:, :, :)
REAL, ALLOCATABLE :: sphbes0(:, :, :)
REAL, ALLOCATABLE :: olap(:, :, :, :), integral(:, :, :, :)
REAL, ALLOCATABLE :: gridf(:, :)
REAL :: facA(0:MAX(2*fi%atoms%lmaxd + maxval(fi%hybinp%lcutm1) + 1, 4*MAX(maxval(fi%hybinp%lcutm1), fi%hybinp%lexp) + 1))
REAL :: facB(0:MAX(2*fi%atoms%lmaxd + maxval(fi%hybinp%lcutm1) + 1, 4*MAX(maxval(fi%hybinp%lcutm1), fi%hybinp%lexp) + 1))
REAL :: facC(-1:MAX(2*fi%atoms%lmaxd + maxval(fi%hybinp%lcutm1) + 1, 4*MAX(maxval(fi%hybinp%lcutm1), fi%hybinp%lexp) + 1))
COMPLEX :: structconst((2*fi%hybinp%lexp + 1)**2, fi%atoms%nat, fi%atoms%nat, fi%kpts%nkpt) ! nw = 1
COMPLEX :: y((fi%hybinp%lexp + 1)**2)
COMPLEX :: dwgn(-maxval(fi%hybinp%lcutm1):maxval(fi%hybinp%lcutm1), -maxval(fi%hybinp%lcutm1):maxval(fi%hybinp%lcutm1), 0:maxval(fi%hybinp%lcutm1), fi%sym%nsym)
COMPLEX, ALLOCATABLE :: carr2(:, :), carr2a(:, :), carr2b(:, :)
COMPLEX, ALLOCATABLE :: structconst1(:, :)
INTEGER :: ishift, ishift1
INTEGER :: iatom, iatom1, entry_len
INTEGER :: indx1, indx2, indx3, indx4
TYPE(t_mat) :: coul_mtmt, mat, coulmat, smat, tmp
type(t_mat), allocatable :: coulomb(:)
CALL timestart("Coulomb matrix setup")
call timestart("prep in coulomb")
if (fmpi%is_root()) write (*, *) "start of coulomb calculation"
call mat%alloc(.True., maxval(mpdata%num_radbasfn), maxval(mpdata%num_radbasfn))
svol = SQRT(fi%cell%vol)
fcoulfac = fpi_const/fi%cell%vol
maxfac = MAX(2*fi%atoms%lmaxd + maxval(fi%hybinp%lcutm1) + 1, 4*MAX(maxval(fi%hybinp%lcutm1), fi%hybinp%lexp) + 1)
facA(0) = 1 !
facB(0) = 1 ! Define:
facC(-1:0) = 1 ! facA(i) = i!
DO i = 1, maxfac ! facB(i) = sqrt(i!)
facA(i) = facA(i - 1)*i ! facC(i) = (2i+1)!!
facB(i) = facB(i - 1)*SQRT(i*1.0) !
facC(i) = facC(i - 1)*(2*i + 1) !
END DO
CALL intgrf_init(fi%atoms%ntype, fi%atoms%jmtd, fi%atoms%jri, fi%atoms%dx, fi%atoms%rmsh, gridf)
! Calculate the structure constant
CALL structureconstant(structconst, fi%cell, fi%hybinp, fi%atoms, fi%kpts, fmpi)
IF (fmpi%irank == 0) WRITE (oUnit, '(//A)') '### subroutine: coulombmatrix ###'
!
! Matrix allocation
!
call timestart("coulomb allocation")
call coul_mtmt%alloc(.False., maxval(hybdat%nbasm), maxval(hybdat%nbasm))
allocate(coulomb(fi%kpts%nkpt))
DO im = 1, work_pack%n_kpacks
ikpt = work_pack%k_packs(im)%nk
call coulomb(ikpt)%alloc(.False., hybdat%nbasm(ikpt), hybdat%nbasm(ikpt))
enddo
call timestop("coulomb allocation")
IF (fmpi%irank == 0) then
write (oUnit,*) "Size of coulomb matrix: " //&
float2str(sum([(coulomb(work_pack%k_packs(i)%nk)%size_mb(), i=1,work_pack%n_kpacks)])) // " MB"
endif
! Generate Symmetry:
! Reduce list of g-Points so that only one of each symm-equivalent is calculated
! calculate rotations in reciprocal space
DO isym = 1, fi%sym%nsym
IF (isym <= fi%sym%nop) THEN
inviop = fi%sym%invtab(isym)
rrot(:, :, isym) = TRANSPOSE(fi%sym%mrot(:, :, inviop))
DO l = 0, maxval(fi%hybinp%lcutm1)
dwgn(:, :, l, isym) = TRANSPOSE(fi%hybinp%d_wgn2(-maxval(fi%hybinp%lcutm1):maxval(fi%hybinp%lcutm1), &
-maxval(fi%hybinp%lcutm1):maxval(fi%hybinp%lcutm1), l, isym))
END DO
ELSE
inviop = isym - fi%sym%nop
rrot(:, :, isym) = -rrot(:, :, inviop)
dwgn(:, :, :, isym) = dwgn(:, :, :, inviop)
DO l = 0, maxval(fi%hybinp%lcutm1)
DO m1 = -l, l
DO m2 = -l, -1
cdum = dwgn(m1, m2, l, isym)
dwgn(m1, m2, l, isym) = dwgn(m1, -m2, l, isym)*(-1)**m2
dwgn(m1, -m2, l, isym) = cdum*(-1)**m2
END DO
END DO
END DO
END IF
END DO
invrrot(:, :, :fi%sym%nop) = rrot(:, :, fi%sym%invtab)
IF (fi%sym%nsym > fi%sym%nop) THEN
invrrot(:, :, fi%sym%nop + 1:) = rrot(:, :, fi%sym%invtab + fi%sym%nop)
END IF
! Get symmetry operations that leave bk(:,ikpt) invariant -> sym1
nsym1 = 0
DO ikpt = 1, fi%kpts%nkpt
isym1 = 0
DO isym = 1, fi%sym%nsym
! temporary fix until bramat_trafo is correct
! for systems with symmetries including translations
IF (isym > fi%sym%nop) THEN
isym2 = isym - fi%sym%nop
ELSE
isym2 = isym
END IF
IF (ANY(abs(fi%sym%tau(:, isym2)) > 1e-12)) CYCLE
IF (ALL(ABS(MATMUL(rrot(:, :, isym), fi%kpts%bk(:, ikpt)) - fi%kpts%bk(:, ikpt)) < 1e-12)) THEN
isym1 = isym1 + 1
sym1(isym1, ikpt) = isym
END IF
END DO
nsym1(ikpt) = isym1
END DO
! Define reduced lists of G points -> pgptm1(:,ikpt), ikpt=1,..,nkpt
!if(allocated(pgptm1)) deallocate(fi%hybinp%pgptm1)
allocate (pgptm1(maxval(mpdata%n_g), fi%kpts%nkptf), source=0) !in mixedbasis
allocate (iarr(maxval(mpdata%n_g)), source=0)
allocate (POINTER(fi%kpts%nkpt, &
MINVAL(mpdata%g(1, :)) - 1:MAXVAL(mpdata%g(1, :)) + 1, &
MINVAL(mpdata%g(2, :)) - 1:MAXVAL(mpdata%g(2, :)) + 1, &
MINVAL(mpdata%g(3, :)) - 1:MAXVAL(mpdata%g(3, :)) + 1), &
source=0)
allocate (ngptm1, mold=mpdata%n_g)
ngptm1 = 0
DO ikpt = 1, fi%kpts%nkpt
DO igpt = 1, mpdata%n_g(ikpt)
g = mpdata%g(:, mpdata%gptm_ptr(igpt, ikpt))
POINTER(ikpt, g(1), g(2), g(3)) = igpt
END DO
iarr = 0
j = 0
DO igpt = mpdata%n_g(ikpt), 1, -1
IF (iarr(igpt) == 0) THEN
j = j + 1
pgptm1(j, ikpt) = igpt
DO isym1 = 1, nsym1(ikpt)
g = MATMUL(rrot(:, :, sym1(isym1, ikpt)), mpdata%g(:, mpdata%gptm_ptr(igpt, ikpt)))
i = POINTER(ikpt, g(1), g(2), g(3))
IF (i == 0) call judft_error('coulombmatrix: zero pointer (bug?)')
iarr(i) = 1
END DO
END IF
END DO
ngptm1(ikpt) = j
END DO
deallocate (iarr)
! Distribute the work as equally as possible over the processes
call timestop("prep in coulomb")
call timestart("define gmat")
! Define gmat (symmetric)
allocate (gmat((fi%hybinp%lexp + 1)**2, (fi%hybinp%lexp + 1)**2))
gmat = 0
lm1 = 0
DO l1 = 0, fi%hybinp%lexp
DO m1 = -l1, l1
lm1 = lm1 + 1
lm2 = 0
lp1: DO l2 = 0, l1
DO m2 = -l2, l2
lm2 = lm2 + 1
IF (lm2 > lm1) EXIT lp1 ! Don't cross the diagonal!
gmat(lm1, lm2) = facB(l1 + l2 + m2 - m1)*facB(l1 + l2 + m1 - m2)/ &
(facB(l1 + m1)*facB(l1 - m1)*facB(l2 + m2)*facB(l2 - m2))/ &
SQRT(1.0*(2*l1 + 1)*(2*l2 + 1)*(2*(l1 + l2) + 1))*(fpi_const)**1.5
gmat(lm2, lm1) = gmat(lm1, lm2)
END DO
END DO LP1
END DO
END DO
call timestop("define gmat")
! Calculate moments of MT functions
call timestart("calc moments of MT")
DO itype = 1, fi%atoms%ntype
DO l = 0, fi%hybinp%lcutm1(itype)
DO i = 1, mpdata%num_radbasfn(l, itype)
! note that mpdata%radbasfn_mt already contains the factor rgrid
moment(i, l, itype) = intgrf(fi%atoms%rmsh(:, itype)**(l + 1)*mpdata%radbasfn_mt(:, i, l, itype), &
fi%atoms, itype, gridf)
END DO
END DO
DO i = 1, mpdata%num_radbasfn(0, itype)
moment2(i, itype) = intgrf(fi%atoms%rmsh(:, itype)**3*mpdata%radbasfn_mt(:, i, 0, itype), &
fi%atoms, itype, gridf)
END DO
END DO
call timestop("calc moments of MT")
call timestart("getnorm")
! Look for different qnorm = |k+G|, definition of qnrm and pqnrm.
CALL getnorm(fi%kpts, mpdata%g, mpdata%n_g, mpdata%gptm_ptr, qnrm, nqnrm, pqnrm, fi%cell)
allocate (sphbesmoment(0:fi%hybinp%lexp, fi%atoms%ntype, nqnrm), &
olap(maxval(mpdata%num_radbasfn), 0:maxval(fi%hybinp%lcutm1), fi%atoms%ntype, nqnrm), &
integral(maxval(mpdata%num_radbasfn), 0:maxval(fi%hybinp%lcutm1), fi%atoms%ntype, nqnrm))
sphbes_var = 0
sphbesmoment = 0
sphbesmoment1 = 0
olap = 0
integral = 0
! Calculate moments of spherical Bessel functions (for (2) and (3)) (->sphbesmoment)
! Calculate overlap of spherical Bessel functions with basis functions (for (2)) (->olap)
! Calculate overlap of sphbesmoment1(r,l) with basis functions (for (2)) (->integral)
! We use sphbes(r,l) = j_l(qr)
! and sphbesmoment1(r,l) = 1/r**(l-1) * INT(0..r) r'**(l+2) * j_l(qr') dr'
! + r**(l+2) * INT(r..S) r'**(1-l) * j_l(qr') dr' .
iqnrmstart = fmpi%irank + 1
iqnrmstep = fmpi%isize
call timestop("getnorm")
call timestart("Bessel calculation")
!DO iqnrm = iqnrmstart, nqnrm, iqnrmstep
do iqnrm = 1, nqnrm
qnorm = qnrm(iqnrm)
DO itype = 1, fi%atoms%ntype
ng = fi%atoms%jri(itype)
rdum = fi%atoms%rmt(itype)
sphbes_var = 0
sphbesmoment1 = 0
IF (abs(qnorm) < 1e-12) THEN
sphbesmoment(0, itype, iqnrm) = rdum**3/3
DO i = 1, ng
sphbes_var(i, 0) = 1
sphbesmoment1(i, 0) = fi%atoms%rmsh(i, itype)**2/3 + (rdum**2 - fi%atoms%rmsh(i, itype)**2)/2
END DO
ELSE
call sphbes(fi%hybinp%lexp + 1, qnorm*rdum, rarr)
DO l = 0, fi%hybinp%lexp
sphbesmoment(l, itype, iqnrm) = rdum**(l + 2)*rarr(l + 1)/qnorm
END DO
DO i = ng, 1, -1
rdum = fi%atoms%rmsh(i, itype)
call sphbes(fi%hybinp%lcutm1(itype) + 1, qnorm*rdum, rarr)
DO l = 0, fi%hybinp%lcutm1(itype)
sphbes_var(i, l) = rarr(l)
IF (l /= 0) THEN; rdum1 = -rdum**(1 - l)*rarr(l - 1)
ELSE; rdum1 = -COS(qnorm*rdum)/qnorm
ENDIF
IF (i == ng) rarr1(l) = rdum1
sphbesmoment1(i, l) = (rdum**(l + 2)*rarr(l + 1)/rdum**(l + 1) &
+ (rarr1(l) - rdum1)*rdum**l)/qnorm
END DO
END DO
END IF
DO l = 0, fi%hybinp%lcutm1(itype)
DO n = 1, mpdata%num_radbasfn(l, itype)
! note that mpdata%radbasfn_mt already contains one factor rgrid
olap(n, l, itype, iqnrm) = &
intgrf(fi%atoms%rmsh(:, itype)*mpdata%radbasfn_mt(:, n, l, itype)*sphbes_var(:, l), &
fi%atoms, itype, gridf)
integral(n, l, itype, iqnrm) = &
intgrf(fi%atoms%rmsh(:, itype)*mpdata%radbasfn_mt(:, n, l, itype)*sphbesmoment1(:, l), &
fi%atoms, itype, gridf)
END DO
END DO
END DO
END DO
call timestop("Bessel calculation")
!
! (1) Case < MT | v | MT >
!
! (1a) r,r' in same MT
call timestart("loop 1")
ix = 0
iy = 0
iy0 = 0
DO itype = 1, fi%atoms%ntype
DO ineq = 1, fi%atoms%neq(itype)
! Here the diagonal block matrices do not depend on ineq. In (1b) they do depend on ineq, though,
DO l = 0, fi%hybinp%lcutm1(itype)
mat%matsize1=mpdata%num_radbasfn(l, itype)
mat%matsize2=mpdata%num_radbasfn(l, itype)
DO n2 = 1, mpdata%num_radbasfn(l, itype)
! note that mpdata%radbasfn_mt already contains the factor rgrid
CALL primitivef(primf1, mpdata%radbasfn_mt(:, n2, l, itype) &
*fi%atoms%rmsh(:, itype)**(l + 1), fi%atoms%rmsh, fi%atoms%dx, &
fi%atoms%jri, fi%atoms%jmtd, itype, fi%atoms%ntype)
! -itype is to enforce inward integration
CALL primitivef(primf2, mpdata%radbasfn_mt(:fi%atoms%jri(itype), n2, l, itype) &
/fi%atoms%rmsh(:fi%atoms%jri(itype), itype)**l, fi%atoms%rmsh, fi%atoms%dx, &
fi%atoms%jri, fi%atoms%jmtd, -itype, fi%atoms%ntype)
primf1(:fi%atoms%jri(itype)) = primf1(:fi%atoms%jri(itype))/fi%atoms%rmsh(:fi%atoms%jri(itype), itype)**l
primf2 = primf2*fi%atoms%rmsh(:, itype)**(l + 1)
DO n1 = 1, n2
integrand = mpdata%radbasfn_mt(:, n1, l, itype)*(primf1 + primf2)
mat%data_r(n1, n2) = fpi_const/(2*l + 1) * intgrf(integrand, fi%atoms, itype, gridf)
END DO
END DO
! distribute mat for m=-l,l on coulomb in block-matrix form
DO M = -l, l
DO n2 = 1, mpdata%num_radbasfn(l, itype)
ix = ix + 1
iy = iy0
DO n1 = 1, n2
iy = iy + 1
i = ix*(ix - 1)/2 + iy
j = n2*(n2 - 1)/2 + n1
coul_mtmt%data_c(iy, ix) = mat%data_r(n1, n2)
END DO
END DO
iy0 = ix
END DO
END DO
END DO
END DO
call timestop("loop 1")
call coul_mtmt%u2l()
call coulmat%alloc(.False., hybdat%nbasp, hybdat%nbasp)
DO im = 1, work_pack%n_kpacks
ikpt = work_pack%k_packs(im)%nk
! only the first rank handles the MT-MT part
call timestart("MT-MT part")
ix = 0
ic2 = 0
DO itype2 = 1, fi%atoms%ntype
DO ineq2 = 1, fi%atoms%neq(itype2)
ic2 = ic2 + 1
lm2 = 0
DO l2 = 0, fi%hybinp%lcutm1(itype2)
DO m2 = -l2, l2
lm2 = lm2 + 1
DO n2 = 1, mpdata%num_radbasfn(l2, itype2)
ix = ix + 1
iy = 0
ic1 = 0
lp2: DO itype1 = 1, itype2
DO ineq1 = 1, fi%atoms%neq(itype1)
ic1 = ic1 + 1
lm1 = 0
DO l1 = 0, fi%hybinp%lcutm1(itype1)
DO m1 = -l1, l1
lm1 = lm1 + 1
DO n1 = 1, mpdata%num_radbasfn(l1, itype1)
iy = iy + 1
IF (iy > ix) EXIT lp2 ! Don't cross the diagonal!
rdum = (-1)**(l2 + m2)*moment(n1, l1, itype1)*moment(n2, l2, itype2)*gmat(lm1, lm2)
l = l1 + l2
lm = l**2 + l + m1 - m2 + 1
idum = ix*(ix - 1)/2 + iy
coulmat%data_c(iy, ix) = coul_mtmt%data_c(iy,ix) &
+ EXP(CMPLX(0.0, 1.0)*tpi_const* &
dot_PRODUCT(fi%kpts%bk(:, ikpt), &
fi%atoms%taual(:, ic2) - fi%atoms%taual(:, ic1))) &
*rdum*structconst(lm, ic1, ic2, ikpt)
END DO
END DO
END DO
END DO
END DO lp2
END DO
END DO
END DO
END DO
END DO
call coulmat%u2l()
IF (fi%sym%invs) THEN
!symmetrize makes the Coulomb matrix real symmetric
CALL symmetrize(coulmat%data_c, hybdat%nbasp, hybdat%nbasp, 3, .FALSE., &
fi%atoms, fi%hybinp%lcutm1, maxval(fi%hybinp%lcutm1), &
mpdata%num_radbasfn, fi%sym)
ENDIF
call coulomb(ikpt)%copy(coulmat, 1,1)
call timestop("MT-MT part")
END DO
IF (maxval(mpdata%n_g) /= 0) THEN ! skip calculation of plane-wave contribution if mixed basis does not contain plane waves
!
! (2) Case < MT | v | PW >
!
! (2a) r in MT, r' everywhere
! (2b) r,r' in same MT
! (2c) r,r' in different MT
call timestart("loop over interst.")
DO im = 1, work_pack%n_kpacks
ikpt = work_pack%k_packs(im)%nk
call loop_over_interst(fi, hybdat, mpdata, structconst, sphbesmoment, moment, moment2, &
qnrm, facc, gmat, integral, olap, pqnrm, pgptm1, ngptm1, ikpt, coulomb(ikpt))
call coulomb(ikpt)%u2l()
END DO
call timestop("loop over interst.")
deallocate (olap, integral)
!
! (3) Case < PW | v | PW >
!
! (3a) r,r' everywhere; r everywhere, r' in MT; r in MT, r' everywhere
! Calculate the hermitian matrix smat(i,j) = sum(a) integral(MT(a)) exp[i(Gj-Gi)r] dr
call calc_mpsmat(fi, mpdata, smat)
! Coulomb matrix, contribution (3a)
call timestart("coulomb matrix 3a")
DO im = 1, work_pack%n_kpacks
ikpt = work_pack%k_packs(im)%nk
DO igpt0 = 1, ngptm1(ikpt)
igpt2 = pgptm1(igpt0, ikpt)
igptp2 = mpdata%gptm_ptr(igpt2, ikpt)
ix = hybdat%nbasp + igpt2
iy = hybdat%nbasp
q2 = MATMUL(fi%kpts%bk(:, ikpt) + mpdata%g(:, igptp2), fi%cell%bmat)
rdum2 = SUM(q2**2)
IF (abs(rdum2) > 1e-12) rdum2 = fpi_const/rdum2
DO igpt1 = 1, igpt2
igptp1 = mpdata%gptm_ptr(igpt1, ikpt)
iy = iy + 1
q1 = MATMUL(fi%kpts%bk(:, ikpt) + mpdata%g(:, igptp1), fi%cell%bmat)
idum = ix*(ix - 1)/2 + iy
rdum1 = SUM(q1**2)
IF (abs(rdum1) > 1e-12) rdum1 = fpi_const/rdum1
IF (ikpt == 1) THEN
IF (igpt1 /= 1) THEN
coulomb(1)%data_c(iy,ix) = -smat%data_c(igptp1, igptp2)*rdum1/fi%cell%vol
END IF
IF (igpt2 /= 1) THEN
coulomb(1)%data_c(iy,ix) = coulomb(1)%data_c(iy,ix) - smat%data_c(igptp1, igptp2)*rdum2/fi%cell%vol
END IF
ELSE
coulomb(ikpt)%data_c(iy,ix) = -smat%data_c(igptp1, igptp2)*(rdum1 + rdum2)/fi%cell%vol
END IF
END DO
IF (ikpt /= 1 .OR. igpt2 /= 1) THEN !
coulomb(ikpt)%data_c(iy,ix) = coulomb(ikpt)%data_c(iy,ix) + rdum2
END IF !
END DO
call coulomb(ikpt)%u2l()
END DO
call timestop("coulomb matrix 3a")
! (3b) r,r' in different MT
call timestart("coulomb matrix 3b")
DO im = 1, work_pack%n_kpacks
ikpt = work_pack%k_packs(im)%nk
if (fmpi%is_root()) write (*, *) "coulomb pw-loop nk: ("//int2str(ikpt)//"/"//int2str(fi%kpts%nkpt)//")"
! group together quantities which depend only on l,m and igpt -> carr2a
allocate (carr2a((fi%hybinp%lexp + 1)**2, maxval(mpdata%n_g)), carr2b(fi%atoms%nat, maxval(mpdata%n_g)))
carr2a = 0; carr2b = 0
DO igpt = 1, mpdata%n_g(ikpt)
igptp = mpdata%gptm_ptr(igpt, ikpt)
iqnrm = pqnrm(igpt, ikpt)
q = MATMUL(fi%kpts%bk(:, ikpt) + mpdata%g(:, igptp), fi%cell%bmat)
call ylm4(fi%hybinp%lexp, q, y)
y = CONJG(y)
lm = 0
DO l = 0, fi%hybinp%lexp
DO M = -l, l
lm = lm + 1
carr2a(lm, igpt) = fpi_const*CMPLX(0.0, 1.0)**(l)*y(lm)
END DO
END DO
DO ic = 1, fi%atoms%nat
carr2b(ic, igpt) = EXP(-CMPLX(0.0, 1.0)*tpi_const* &
dot_PRODUCT(fi%kpts%bk(:, ikpt) + mpdata%g(:, igptp), fi%atoms%taual(:, ic)))
END DO
END DO
!finally we can loop over the plane waves (G: igpt1,igpt2)
call timestart("loop over plane waves")
allocate (carr2(fi%atoms%nat, (fi%hybinp%lexp + 1)**2), &
structconst1(fi%atoms%nat, (2*fi%hybinp%lexp + 1)**2))
carr2 = 0; structconst1 = 0
DO igpt0 = 1, ngptm1(ikpt)!1,ngptm1(ikpt)
igpt2 = pgptm1(igpt0, ikpt)
ix = hybdat%nbasp + igpt2
igptp2 = mpdata%gptm_ptr(igpt2, ikpt)
iqnrm2 = pqnrm(igpt2, ikpt)
iatom = 0
carr2 = 0
call timestart("itype loops")
DO itype2 = 1, fi%atoms%ntype
DO ineq2 = 1, fi%atoms%neq(itype2)
iatom = iatom + 1
cexp = CONJG(carr2b(iatom, igpt2))
structconst1(:, :) = transpose(structconst(:, :, iatom, ikpt))
! this is a nested loop over
! l=1..hyb%lexp{
! m=-l..l{}
! }
!$OMP PARALLEL DO default(none) private(lm1,l1,m1,lm2,l2,m2,cdum,l,lm) &
!$OMP shared(fi, sphbesmoment, itype2, iqnrm2, cexp, carr2a, igpt2, carr2, gmat, structconst1)
DO lm1 = 1, (fi%hybinp%lexp+1)**2
do lm2 = 1, (fi%hybinp%lexp+1)**2
call calc_l_m_from_lm(lm1, l1, m1)
call calc_l_m_from_lm(lm2, l2, m2)
cdum = (-1)**(l2 + m2)*sphbesmoment(l2, itype2, iqnrm2)*cexp*carr2a(lm2, igpt2)
l = l1 + l2
lm = l**2 + l - l1 - m2 + (m1 + l1) + 1
carr2(:, lm1) = carr2(:, lm1) + cdum*gmat(lm1, lm2)*structconst1(:, lm)
END DO
enddo
!$OMP end parallel do
END DO
END DO
call timestop("itype loops")
call timestart("igpt1")
iy = hybdat%nbasp
DO igpt1 = 1, igpt2
iy = iy + 1
igptp1 = mpdata%gptm_ptr(igpt1, ikpt)
iqnrm1 = pqnrm(igpt1, ikpt)
csum = 0
!$OMP PARALLEL DO default(none) &
!$OMP private(ic, itype, lm, l, m, cdum) &
!$OMP shared(fi, carr2b, sphbesmoment, iqnrm1, igpt1, carr2, carr2a) &
!$OMP reduction(+: csum) &
!$OMP collapse(2)
do ic = 1, fi%atoms%nat
do lm = 1, (fi%hybinp%lexp+1)**2
itype = fi%atoms%itype(ic)
call calc_l_m_from_lm(lm, l, m)
cdum = carr2b(ic, igpt1)*sphbesmoment(l, itype, iqnrm1)
csum = csum + cdum*carr2(ic, lm)*CONJG(carr2a(lm, igpt1)) ! for coulomb
END DO
END DO
!$OMP end parallel do
coulomb(ikpt)%data_c(iy,ix) = coulomb(ikpt)%data_c(iy,ix) + csum/fi%cell%vol
END DO
call timestop("igpt1")
END DO
deallocate (carr2, carr2a, carr2b, structconst1)
call coulomb(ikpt)%u2l()
call timestop("loop over plane waves")
END DO !ikpt
call timestop("coulomb matrix 3b")
! check if I own the gamma point
if(work_pack%has_nk(1)) then
! Add corrections from higher orders in (3b) to coulomb(:,1)
! (1) igpt1 > 1 , igpt2 > 1 (finite G vectors)
call timestart("add corrections from higher orders")
rdum = (fpi_const)**(1.5)/fi%cell%vol**2*gmat(1, 1)
!$OMP PARALLEL DO default(none) schedule(dynamic) &
!$OMP private(igpt0, igpt2, ix, iqnrm2, igptp2, q2, qnorm2, igpt1, iy)&
!$OMP private(iqnrm1,igptp1,q1,qnorm1,rdum1,iatm1,itype1,iatm2,itype2,cdum)&
!$OMP shared(ngptm1, pgptm1, hybdat, mpdata, coulomb, sphbesmoment,pqnrm,fi,rdum)
DO igpt0 = 1, ngptm1(1)
igpt2 = pgptm1(igpt0, 1)
IF (igpt2 == 1) CYCLE
ix = hybdat%nbasp + igpt2
iqnrm2 = pqnrm(igpt2, 1)
igptp2 = mpdata%gptm_ptr(igpt2, 1)
q2 = MATMUL(mpdata%g(:, igptp2), fi%cell%bmat)
qnorm2 = norm2(q2)
DO igpt1 = 2, igpt2
iy = hybdat%nbasp + igpt1
iqnrm1 = pqnrm(igpt1, 1)
igptp1 = mpdata%gptm_ptr(igpt1, 1)
q1 = MATMUL(mpdata%g(:, igptp1), fi%cell%bmat)
qnorm1 = norm2(q1)
rdum1 = dot_PRODUCT(q1, q2)/(qnorm1*qnorm2)
do iatm1 = 1,fi%atoms%nat
itype1 = fi%atoms%itype(iatm1)
do iatm2 = 1,fi%atoms%nat
itype2 = fi%atoms%itype(iatm2)
cdum = EXP(CMPLX(0.0, 1.0)*tpi_const* &
(-dot_PRODUCT(mpdata%g(:, igptp1), fi%atoms%taual(:, iatm1)) &
+ dot_PRODUCT(mpdata%g(:, igptp2), fi%atoms%taual(:, iatm2))))
coulomb(1)%data_c(iy, ix) = coulomb(1)%data_c(iy, ix) + rdum*cdum*( &
-sphbesmoment(1, itype1, iqnrm1) &
*sphbesmoment(1, itype2, iqnrm2)*rdum1/3 &
- sphbesmoment(0, itype1, iqnrm1) &
*sphbesmoment(2, itype2, iqnrm2)/6 &
- sphbesmoment(2, itype1, iqnrm1) &
*sphbesmoment(0, itype2, iqnrm2)/6 &
+ sphbesmoment(0, itype1, iqnrm1) &
*sphbesmoment(1, itype2, iqnrm2)/qnorm2/2 &
+ sphbesmoment(1, itype1, iqnrm1) &
*sphbesmoment(0, itype2, iqnrm2)/qnorm1/2)
END DO
END DO
END DO
END DO
!$OMP END PARALLEL DO
call coulomb(1)%u2l()
! (2) igpt1 = 1 , igpt2 > 1 (first G vector vanishes, second finite)
iy = hybdat%nbasp + 1
DO igpt0 = 1, ngptm1(1)
igpt2 = pgptm1(igpt0, 1); IF (igpt2 == 1) CYCLE
ix = hybdat%nbasp + igpt2
iqnrm2 = pqnrm(igpt2, 1)
igptp2 = mpdata%gptm_ptr(igpt2, 1)
qnorm2 = qnrm(iqnrm2)
idum = ix*(ix - 1)/2 + iy
DO itype1 = 1, fi%atoms%ntype
DO ineq1 = 1, fi%atoms%neq(itype1)
ic2 = 0
DO itype2 = 1, fi%atoms%ntype
DO ineq2 = 1, fi%atoms%neq(itype2)
ic2 = ic2 + 1
cdum = EXP(CMPLX(0.0, 1.0)*tpi_const*dot_PRODUCT(mpdata%g(:, igptp2), fi%atoms%taual(:, ic2)))
coulomb(1)%data_c(iy, ix) = coulomb(1)%data_c(iy, ix) &
+ rdum*cdum*fi%atoms%rmt(itype1)**3*( &
+sphbesmoment(0, itype2, iqnrm2)/30*fi%atoms%rmt(itype1)**2 &
- sphbesmoment(2, itype2, iqnrm2)/18 &
+ sphbesmoment(1, itype2, iqnrm2)/6/qnorm2)
END DO
END DO
END DO
END DO
END DO
call coulomb(1)%u2l()
! (2) igpt1 = 1 , igpt2 = 1 (vanishing G vectors)
iy = hybdat%nbasp + 1
ix = hybdat%nbasp + 1
idum = ix*(ix - 1)/2 + iy
DO itype1 = 1, fi%atoms%ntype
DO ineq1 = 1, fi%atoms%neq(itype1)
DO itype2 = 1, fi%atoms%ntype
DO ineq2 = 1, fi%atoms%neq(itype2)
coulomb(1)%data_c(iy, ix) = coulomb(1)%data_c(iy, ix) &
+ rdum*fi%atoms%rmt(itype1)**3*fi%atoms%rmt(itype2)**3* &
(fi%atoms%rmt(itype1)**2 + fi%atoms%rmt(itype2)**2)/90
END DO
END DO
END DO
END DO
call coulomb(1)%u2l()
call timestop("add corrections from higher orders")
endif
! (3c) r,r' in same MT
! Calculate sphbesintegral
call timestart("sphbesintegral")
allocate (sphbes0(-1:fi%hybinp%lexp + 2, fi%atoms%ntype, nqnrm),&
& carr2((fi%hybinp%lexp + 1)**2, maxval(mpdata%n_g)))
sphbes0 = 0; carr2 = 0
DO iqnrm = 1, nqnrm
DO itype = 1, fi%atoms%ntype
rdum = qnrm(iqnrm)*fi%atoms%rmt(itype)
call sphbes(fi%hybinp%lexp + 2, rdum, sphbes0(0, itype, iqnrm))
IF (abs(rdum) > 1e-12) sphbes0(-1, itype, iqnrm) = COS(rdum)/rdum
END DO
END DO
call timestop("sphbesintegral")
call timestart("loop 2")
DO im = 1, work_pack%n_kpacks
ikpt = work_pack%k_packs(im)%nk
call timestart("harmonics setup")
DO igpt = 1, mpdata%n_g(ikpt)
igptp = mpdata%gptm_ptr(igpt, ikpt)
q = MATMUL(fi%kpts%bk(:, ikpt) + mpdata%g(:, igptp), fi%cell%bmat)
call ylm4(fi%hybinp%lexp, q, carr2(:, igpt))
END DO
call timestop("harmonics setup")
call perform_double_g_loop(fi, hybdat, mpdata, sphbes0, carr2, ngptm1,pgptm1,&
pqnrm,qnrm, nqnrm, ikpt, coulomb(ikpt))
call coulomb(ikpt)%u2l()
END DO
call timestop("loop 2")
deallocate (carr2)
!
! Symmetry-equivalent G vectors
!
! All elements are needed so send all data to all processes treating the
! respective k-points
allocate (carr2(maxval(hybdat%nbasm), 2), iarr(maxval(mpdata%n_g)))
allocate (nsym_gpt(mpdata%num_gpts(), fi%kpts%nkpt), &
sym_gpt(MAXVAL(nsym1), mpdata%num_gpts(), fi%kpts%nkpt))
nsym_gpt = 0; sym_gpt = 0
call timestart("loop 3")
DO im = 1, work_pack%n_kpacks
ikpt = work_pack%k_packs(im)%nk
carr2 = 0; iarr = 0
iarr(pgptm1(:ngptm1(ikpt), ikpt)) = 1
DO igpt0 = 1, ngptm1(ikpt)
lsym = (1 <= igpt0) .AND. (ngptm1(ikpt) >= igpt0)
igpt2 = pgptm1(igpt0, ikpt)
carr2(:hybdat%nbasm(ikpt),2) = coulomb(ikpt)%data_c(:hybdat%nbasm(ikpt),hybdat%nbasp + igpt2)
IF (lsym) THEN
ic = 1
sym_gpt(ic, igpt0, ikpt) = igpt2
END IF
DO isym1 = 2, nsym1(ikpt)
isym = sym1(isym1, ikpt)
CALL bramat_trafo(carr2(:, 2), igpt2, ikpt, isym, .FALSE., POINTER(ikpt, :, :, :), &
fi%sym, rrot(:, :, isym), invrrot(:, :, isym), mpdata, fi%hybinp, &
fi%kpts, maxval(fi%hybinp%lcutm1), fi%atoms, fi%hybinp%lcutm1, &
mpdata%num_radbasfn, maxval(mpdata%num_radbasfn), dwgn(:, :, :, isym), &
hybdat%nbasp, hybdat%nbasm, carr2(:, 1), igpt1)
IF (iarr(igpt1) == 0) THEN
CALL bramat_trafo(carr2(:, 2), igpt2, ikpt, isym, .TRUE., POINTER(ikpt, :, :, :), &
fi%sym, rrot(:, :, isym), invrrot(:, :, isym), mpdata, fi%hybinp, &
fi%kpts, maxval(fi%hybinp%lcutm1), fi%atoms, fi%hybinp%lcutm1, &
mpdata%num_radbasfn, maxval(mpdata%num_radbasfn), &
dwgn(:, :, :, isym), hybdat%nbasp, hybdat%nbasm, carr2(:, 1), igpt1)
l = (hybdat%nbasp + igpt1 - 1)*(hybdat%nbasp + igpt1)/2
coulomb(ikpt)%data_c(:hybdat%nbasp + igpt1,hybdat%nbasp + igpt1) = carr2(:hybdat%nbasp + igpt1, 1)
coulomb(ikpt)%data_c(hybdat%nbasp + igpt1,:hybdat%nbasp + igpt1) = conjg(carr2(:hybdat%nbasp + igpt1, 1))
iarr(igpt1) = 1
IF (lsym) THEN
ic = ic + 1
sym_gpt(ic, igpt0, ikpt) = igpt1
END IF
END IF
END DO
nsym_gpt(igpt0, ikpt) = ic
END DO ! igpt0
call coulomb(ikpt)%u2l()
END DO ! ikpt
call timestop("loop 3")
call timestart("gap 1:")
deallocate (carr2, iarr, pgptm1)
END IF
deallocate (qnrm, pqnrm)
IF (xcpot%is_name("hse") .OR. xcpot%is_name("vhse")) THEN
!
! The HSE functional is realized subtracting erf/r from
! the normal Coulomb matrix
!
call judft_error("HSE is not implemented")
ELSE
! check for gamma
if(work_pack%has_nk(1)) then
CALL subtract_sphaverage(fi%sym, fi%cell, fi%atoms, mpdata, &
fi%hybinp, hybdat, hybdat%nbasm, gridf, coulomb(1))
endif
END IF
! transform Coulomb matrix to the biorthogonal set
! REFACTORING HINT: THIS IS DONE WTIH THE INVERSE OF OLAP
! IT CAN EASILY BE REWRITTEN AS A LINEAR SYSTEM
call timestop("gap 1:")
DO im = 1, work_pack%n_kpacks
ikpt = work_pack%k_packs(im)%nk
call apply_inverse_olaps(mpdata, fi%atoms, fi%cell, hybdat, fi%sym, fi%kpts, ikpt, coulomb(ikpt))
call coulomb(ikpt)%u2l()
enddo
!call plot_coulombmatrix() -> code was shifted to plot_coulombmatrix.F90
!
! rearrange coulomb matrix
!
if(.not. allocated(hybdat%coul)) allocate(hybdat%coul(fi%kpts%nkpt))
call timestart("loop bla")
DO ikpt = 1, fi%kpts%nkpt
call hybdat%coul(ikpt)%init()
enddo
DO im = 1, work_pack%n_kpacks
ikpt = work_pack%k_packs(im)%nk
! unpack coulomb into coulomb(ikpt)
! only one processor per k-point calculates MT convolution
!
! store m-independent part of Coulomb matrix in MT spheres
! in coulomb_mt1(:mpdata%num_radbasfn(l,itype)-1,:mpdata%num_radbasfn(l,itype)-1,l,itype)
!
call timestart("m-indep. part of coulomb mtx")
indx1 = 0
DO itype = 1, fi%atoms%ntype
DO ineq = 1, fi%atoms%neq(itype)
DO l = 0, fi%hybinp%lcutm1(itype)
IF (ineq == 1) THEN
DO n = 1, mpdata%num_radbasfn(l, itype) - 1
if (fi%sym%invs) THEN
hybdat%coul(ikpt)%mt1_r(n, 1:mpdata%num_radbasfn(l, itype) - 1, l, itype) &
= real(coulomb(ikpt)%data_c(indx1 + n, indx1 + 1:indx1 + mpdata%num_radbasfn(l, itype) - 1))
else
hybdat%coul(ikpt)%mt1_c(n, 1:mpdata%num_radbasfn(l, itype) - 1, l, itype) &
= real(coulomb(ikpt)%data_c(indx1 + n, indx1 + 1:indx1 + mpdata%num_radbasfn(l, itype) - 1))
endif
END DO
END IF
indx1 = indx1 + (2*l + 1)*mpdata%num_radbasfn(l, itype)
END DO
END DO
END DO
call timestop("m-indep. part of coulomb mtx")
!
! store m-dependent and atom-dependent part of Coulomb matrix in MT spheres
! in coulomb_mt2(:mpdata%num_radbasfn(l,itype)-1,-l:l,l,iatom)
!
call timestart("m-dep. part of coulomb mtx")
indx1 = 0
iatom = 0
DO itype = 1, fi%atoms%ntype
DO ineq = 1, fi%atoms%neq(itype)
iatom = iatom + 1
DO l = 0, fi%hybinp%lcutm1(itype)
DO M = -l, l
if (fi%sym%invs) THEN
hybdat%coul(ikpt)%mt2_r(:mpdata%num_radbasfn(l, itype) - 1, M, l, iatom) &
= real(coulomb(ikpt)%data_c(indx1 + 1:indx1 + mpdata%num_radbasfn(l, itype) - 1, indx1 + mpdata%num_radbasfn(l, itype)))
else
hybdat%coul(ikpt)%mt2_c(:mpdata%num_radbasfn(l, itype) - 1, M, l, iatom) &
= coulomb(ikpt)%data_c(indx1 + 1:indx1 + mpdata%num_radbasfn(l, itype) - 1, indx1 + mpdata%num_radbasfn(l, itype))
endif
indx1 = indx1 + mpdata%num_radbasfn(l, itype)
END DO
END DO
END DO
END DO
call timestop("m-dep. part of coulomb mtx")
!
! due to the subtraction of the divergent part at the Gamma point
! additional contributions occur
!
call timestart("gamma point treatment")
IF (ikpt == 1) THEN
!
! store the contribution of the G=0 plane wave with the MT l=0 functions in
! coulomb_mt2(:mpdata%num_radbasfn(l=0,itype),0,maxval(fi%hybinp%lcutm1)+1,iatom)
!
ic = 0
iatom = 0
DO itype = 1, fi%atoms%ntype
DO ineq = 1, fi%atoms%neq(itype)
iatom = iatom + 1
DO n = 1, mpdata%num_radbasfn(0, itype) - 1
if (fi%sym%invs) THEN
hybdat%coul(ikpt)%mt2_r(n, 0, maxval(fi%hybinp%lcutm1) + 1, iatom) = real(coulomb(ikpt)%data_c(ic + n, hybdat%nbasp + 1))
else
hybdat%coul(ikpt)%mt2_c(n, 0, maxval(fi%hybinp%lcutm1) + 1, iatom) = coulomb(ikpt)%data_c(ic + n, hybdat%nbasp + 1)
endif
END DO
ic = ic + SUM([((2*l + 1)*mpdata%num_radbasfn(l, itype), l=0, fi%hybinp%lcutm1(itype))])
END DO
END DO
!
! store the contributions between the MT s-like functions at atom1 and
! and the constant function at a different atom2
!
iatom = 0
ic = 0
DO itype = 1, fi%atoms%ntype
ishift = SUM([((2*l + 1)*mpdata%num_radbasfn(l, itype), l=0, fi%hybinp%lcutm1(itype))])
DO ineq = 1, fi%atoms%neq(itype)
iatom = iatom + 1
ic1 = ic + mpdata%num_radbasfn(0, itype)
iatom1 = 0
ic2 = 0
DO itype1 = 1, fi%atoms%ntype
ishift1 = SUM([((2*l1 + 1)*mpdata%num_radbasfn(l1, itype1), l1=0, fi%hybinp%lcutm1(itype1))])
DO ineq1 = 1, fi%atoms%neq(itype1)
iatom1 = iatom1 + 1
ic3 = ic2 + 1
ic4 = ic3 + mpdata%num_radbasfn(0, itype1) - 2
IF (fi%sym%invs) THEN
hybdat%coul(ikpt)%mt3_r(:mpdata%num_radbasfn(0, itype1) - 1, iatom, iatom1) = real(coulomb(ikpt)%data_c(ic1, ic3:ic4))
ELSE
hybdat%coul(ikpt)%mt3_c(:mpdata%num_radbasfn(0, itype1) - 1, iatom, iatom1) &
= CONJG(coulomb(ikpt)%data_c(ic1, ic3:ic4))
ENDIF
ic2 = ic2 + ishift1
END DO
END DO
ic = ic + ishift
END DO
END DO
!test
iatom = 0
DO itype = 1, fi%atoms%ntype
DO ineq = 1, fi%atoms%neq(itype)
iatom = iatom + 1
if (fi%sym%invs) THEN
IF (MAXVAL(ABS(hybdat%coul(ikpt)%mt2_r(:mpdata%num_radbasfn(0, itype) - 1, 0, 0, iatom) &
- hybdat%coul(ikpt)%mt3_r(:mpdata%num_radbasfn(0, itype) - 1, iatom, iatom))) > 1E-08) &
call judft_error('coulombmatrix: coulomb_mt2 and coulomb_mt3 are inconsistent')
else
IF (MAXVAL(ABS(hybdat%coul(ikpt)%mt2_c(:mpdata%num_radbasfn(0, itype) - 1, 0, 0,iatom) &
- hybdat%coul(ikpt)%mt3_c(:mpdata%num_radbasfn(0, itype) - 1, iatom,iatom))) > 1E-08) &
call judft_error('coulombmatrix: coulomb_mt2 and coulomb_mt3 are inconsistent')
endif
END DO
END DO
END IF
call timestop("gamma point treatment")
!
! add the residual MT contributions, i.e. those functions with an moment,
! to the matrix coulomb_mtir, which is fully occupied
!
call timestart("residual MT contributions")
ic = 0
DO itype = 1, fi%atoms%ntype
DO ineq = 1, fi%atoms%neq(itype)
DO l = 0, fi%hybinp%lcutm1(itype)
DO M = -l, l
ic = ic + 1
END DO
END DO
END DO
END DO
indx1 = 0; indx2 = 0; indx3 = 0; indx4 = 0
DO itype = 1, fi%atoms%ntype
DO ineq = 1, fi%atoms%neq(itype)
DO l = 0, fi%hybinp%lcutm1(itype)
DO M = -l, l
indx1 = indx1 + 1
indx3 = indx3 + mpdata%num_radbasfn(l, itype)
indx2 = 0
indx4 = 0
DO itype1 = 1, fi%atoms%ntype
DO ineq1 = 1, fi%atoms%neq(itype1)
DO l1 = 0, fi%hybinp%lcutm1(itype1)
DO m1 = -l1, l1
indx2 = indx2 + 1
indx4 = indx4 + mpdata%num_radbasfn(l1, itype1)
IF (indx4 < indx3) CYCLE
IF (fi%sym%invs) THEN
hybdat%coul(ikpt)%mtir_r(indx1, indx2) = real(coulomb(ikpt)%data_c(indx3, indx4))
hybdat%coul(ikpt)%mtir_r(indx2, indx1) = hybdat%coul(ikpt)%mtir_r(indx1, indx2)
ELSE
hybdat%coul(ikpt)%mtir_c(indx1, indx2) = coulomb(ikpt)%data_c(indx3, indx4)
hybdat%coul(ikpt)%mtir_c(indx2, indx1) = CONJG(hybdat%coul(ikpt)%mtir_c(indx1, indx2))
ENDIF
END DO
END DO
END DO
END DO
DO igpt = 1, mpdata%n_g(ikpt)
indx2 = indx2 + 1
IF (fi%sym%invs) THEN
hybdat%coul(ikpt)%mtir_r(indx1, indx2) = real(coulomb(ikpt)%data_c(indx3, hybdat%nbasp + igpt))
hybdat%coul(ikpt)%mtir_r(indx2, indx1) = hybdat%coul(ikpt)%mtir_r(indx1, indx2)
ELSE
hybdat%coul(ikpt)%mtir_c(indx1, indx2) = coulomb(ikpt)%data_c(indx3, hybdat%nbasp + igpt)
hybdat%coul(ikpt)%mtir_c(indx2, indx1) = CONJG(hybdat%coul(ikpt)%mtir_c(indx1, indx2))
ENDIF
END DO
END DO
END DO
END DO
END DO
call timestop("residual MT contributions")
IF (indx1 /= ic) call judft_error('coulombmatrix: error index counting')
!
! add ir part to the matrix coulomb_mtir
!
if (fi%sym%invs) THEN
hybdat%coul(ikpt)%mtir_r(ic + 1:ic + mpdata%n_g(ikpt), ic + 1:ic + mpdata%n_g(ikpt)) &
= real(coulomb(ikpt)%data_c(hybdat%nbasp + 1:hybdat%nbasm(ikpt), hybdat%nbasp + 1:hybdat%nbasm(ikpt)))
ic2 = indx1 + mpdata%n_g(ikpt)
else
hybdat%coul(ikpt)%mtir_c(ic + 1:ic + mpdata%n_g(ikpt), ic + 1:ic + mpdata%n_g(ikpt)) &
= coulomb(ikpt)%data_c(hybdat%nbasp + 1:hybdat%nbasm(ikpt), hybdat%nbasp + 1:hybdat%nbasm(ikpt))
ic2 = indx1 + mpdata%n_g(ikpt)
end if
call coulomb(ikpt)%free()
END DO ! ikpt
call timestop("loop bla")
CALL timestop("Coulomb matrix setup")
END SUBROUTINE coulombmatrix
! Calculate body of Coulomb matrix at Gamma point: v_IJ = SUM(G) c^*_IG c_JG 4*pi/G**2 .
! For this we must subtract from coulomb(:,1) the spherical average of a term that comes
! from the fact that MT functions have k-dependent Fourier coefficients (see script).
SUBROUTINE subtract_sphaverage(sym, cell, atoms, mpdata, hybinp, hybdat, nbasm1, gridf, coulomb)
USE m_types
USE m_constants
USE m_wrapper
USE m_trafo
USE m_util
use m_intgrf
USE m_olap
IMPLICIT NONE
TYPE(t_sym), INTENT(IN) :: sym
TYPE(t_cell), INTENT(IN) :: cell
TYPE(t_atoms), INTENT(IN) :: atoms
TYPE(t_mpdata), intent(in) :: mpdata
TYPE(t_hybinp), INTENT(IN) :: hybinp
TYPE(t_hybdat), INTENT(IN) :: hybdat
INTEGER, INTENT(IN) :: nbasm1(:)
REAL, INTENT(IN) :: gridf(:, :)
type(t_mat), intent(inout) :: coulomb
! - local scalars -
INTEGER :: l, i, j, n, nn, itype, ieq, M
! - local arrays -
TYPE(t_mat) :: olap
!COMPLEX , ALLOCATABLE :: constfunc(:) !can also be real in inversion case
COMPLEX :: coeff(nbasm1(1)), cderiv(nbasm1(1), -1:1), claplace(nbasm1(1))
call timestart("subtract_sphaverage")
CALL olap%alloc(sym%invs, mpdata%n_g(1), mpdata%n_g(1), 0.)
n = nbasm1(1)
nn = n*(n + 1)/2
CALL olap_pw(olap, mpdata%g(:, mpdata%gptm_ptr(:mpdata%n_g(1), 1)), mpdata%n_g(1), atoms, cell)
! Define coefficients (coeff) and their derivatives (cderiv,claplace)
coeff = 0
cderiv = 0
claplace = 0
j = 0
DO itype = 1, atoms%ntype
DO ieq = 1, atoms%neq(itype)
DO l = 0, hybinp%lcutm1(itype)
DO M = -l, l
DO i = 1, mpdata%num_radbasfn(l, itype)
j = j + 1
IF (l == 0) THEN
coeff(j) = SQRT(fpi_const) &
*intgrf(atoms%rmsh(:, itype)*mpdata%radbasfn_mt(:, i, 0, itype), &
atoms, itype, gridf) &
/SQRT(cell%vol)
claplace(j) = -SQRT(fpi_const) &
*intgrf(atoms%rmsh(:, itype)**3*mpdata%radbasfn_mt(:, i, 0, itype), &
atoms, itype, gridf) &
/SQRT(cell%vol)
ELSE IF (l == 1) THEN
cderiv(j, M) = -SQRT(fpi_const/3)*CMPLX(0.0, 1.0) &
*intgrf(atoms%rmsh(:, itype)**2*mpdata%radbasfn_mt(:, i, 1, itype), &
atoms, itype, gridf) &
/SQRT(cell%vol)
END IF
END DO
END DO
END DO
END DO
END DO
IF (olap%l_real) THEN
coeff(hybdat%nbasp + 1:n) = olap%data_r(1, 1:n - hybdat%nbasp)
else
coeff(hybdat%nbasp + 1:n) = olap%data_c(1, 1:n - hybdat%nbasp)
END IF
IF (sym%invs) THEN
CALL symmetrize(coeff, 1, nbasm1(1), 2, .FALSE., &
atoms, hybinp%lcutm1, maxval(hybinp%lcutm1), &
mpdata%num_radbasfn, sym)
CALL symmetrize(claplace, 1, nbasm1(1), 2, .FALSE., &
atoms, hybinp%lcutm1, maxval(hybinp%lcutm1), &
mpdata%num_radbasfn, sym)
CALL symmetrize(cderiv(:, -1), 1, nbasm1(1), 2, .FALSE., &
atoms, hybinp%lcutm1, maxval(hybinp%lcutm1), &
mpdata%num_radbasfn, sym)
CALL symmetrize(cderiv(:, 0), 1, nbasm1(1), 2, .FALSE., &
atoms, hybinp%lcutm1, maxval(hybinp%lcutm1), &
mpdata%num_radbasfn, sym)
CALL symmetrize(cderiv(:, 1), 1, nbasm1(1), 2, .FALSE., &
atoms, hybinp%lcutm1, maxval(hybinp%lcutm1), &
mpdata%num_radbasfn, sym)
ENDIF
! Subtract head contributions from coulomb(:nn,1) to obtain the body
l = 0
DO j = 1, n
DO i = 1, j
l = l + 1
coulomb%data_c(i,j) = coulomb%data_c(i,j) - fpi_const/3 &
*(dot_PRODUCT(cderiv(i, :), cderiv(j, :)) &
+ (CONJG(coeff(i))*claplace(j) &
+ CONJG(claplace(i))*coeff(j))/2)
END DO
END DO
call coulomb%u2l()
call timestop("subtract_sphaverage")
END SUBROUTINE subtract_sphaverage
! ---------
! Returns a list of (k+G) vector lengths in qnrm(1:nqnrm) and the corresponding pointer pqnrm(1:ngpt(ikpt),ikpt)
SUBROUTINE getnorm(kpts, gpt, ngpt, pgpt, qnrm, nqnrm, pqnrm, cell)
USE m_types
USE m_juDFT
IMPLICIT NONE
TYPE(t_cell), INTENT(IN) :: cell
TYPE(t_kpts), INTENT(IN) :: kpts
INTEGER, INTENT(IN) :: ngpt(:), gpt(:, :), pgpt(:, :)!(dim,kpts%nkpt)
REAL, ALLOCATABLE :: qnrm(:), help(:)
INTEGER, INTENT(INOUT) :: nqnrm
INTEGER, ALLOCATABLE :: pqnrm(:, :)
INTEGER :: i, j, ikpt, igpt, igptp
REAL :: q(3), qnorm
allocate (qnrm(MAXVAL(ngpt)*kpts%nkpt), pqnrm(MAXVAL(ngpt), kpts%nkpt))
i = 0
DO ikpt = 1, kpts%nkpt
igptloop: DO igpt = 1, ngpt(ikpt)
igptp = pgpt(igpt, ikpt)
IF (igptp == 0) call judft_error('getnorm: zero pointer (bug?)')
q = MATMUL(kpts%bk(:, ikpt) + gpt(:, igptp), cell%bmat)
qnorm = norm2(q)
DO j = 1, i
IF (ABS(qnrm(j) - qnorm) < 1e-12) THEN
pqnrm(igpt, ikpt) = j
CYCLE igptloop
END IF
END DO
i = i + 1
qnrm(i) = qnorm
pqnrm(igpt, ikpt) = i
END DO igptloop
END DO
nqnrm = i
allocate (help(nqnrm))
help(1:nqnrm) = qnrm(1:nqnrm)
deallocate (qnrm)
allocate (qnrm(1:nqnrm))
qnrm = help
END SUBROUTINE getnorm
subroutine apply_inverse_olaps(mpdata, atoms, cell, hybdat, sym, kpts, ikpt, coulomb)
USE m_olap, ONLY: olap_pw
USE m_types
use m_judft
implicit none
type(t_mpdata), intent(in) :: mpdata
type(t_atoms), intent(in) :: atoms
type(t_cell), intent(in) :: cell
type(t_hybdat), intent(in) :: hybdat
type(t_sym), intent(in) :: sym
type(t_kpts), intent(in) :: kpts
type(t_mat), intent(inout) :: coulomb
integer, intent(in) :: ikpt
type(t_mat) :: olap, coulhlp, coul_submtx
integer :: nbasm
call timestart("solve olap linear eq. sys")
nbasm = hybdat%nbasp + mpdata%n_g(ikpt)
CALL olap%alloc(sym%invs, mpdata%n_g(ikpt), mpdata%n_g(ikpt), 0.0)
!calculate IR overlap-matrix
CALL olap_pw(olap, mpdata%g(:, mpdata%gptm_ptr(:mpdata%n_g(ikpt), ikpt)), mpdata%n_g(ikpt), atoms, cell)
! perform O^-1 * coulhlp%data_r(hybdat%nbasp + 1:, :) = x
! rewritten as O * x = C
call coul_submtx%alloc(sym%invs, mpdata%n_g(ikpt), nbasm)
if (coul_submtx%l_real) then
coul_submtx%data_r = real(coulomb%data_c(hybdat%nbasp + 1:, :))
else
coul_submtx%data_c = coulomb%data_c(hybdat%nbasp + 1:, :)
endif
call olap%linear_problem(coul_submtx)
if (coul_submtx%l_real) then
coulomb%data_c(hybdat%nbasp + 1:, :) = coul_submtx%data_r
coul_submtx%data_r = real(transpose(coulomb%data_c(:, hybdat%nbasp + 1:)))
else
coulomb%data_c(hybdat%nbasp + 1:, :) = coul_submtx%data_c
coul_submtx%data_c = conjg(transpose(coulomb%data_c(:, hybdat%nbasp + 1:)))
endif
! perform coulomb%data_r(hybdat%nbasp + 1:, :) * O^-1 = X
! rewritten as O^T * x^T = C^T
! reload O, since the solver destroys it.
CALL olap_pw(olap, mpdata%g(:, mpdata%gptm_ptr(:mpdata%n_g(ikpt), ikpt)), mpdata%n_g(ikpt), atoms, cell)
! Notice O = O^T since it's symmetric
call olap%linear_problem(coul_submtx)
if (coul_submtx%l_real) then
coulomb%data_c(:, hybdat%nbasp + 1:) = transpose(coul_submtx%data_r)
else
coulomb%data_c(:, hybdat%nbasp + 1:) = conjg(transpose(coul_submtx%data_c))
endif
call coul_submtx%free()
call olap%free()
call timestop("solve olap linear eq. sys")
end subroutine apply_inverse_olaps
subroutine loop_over_interst(fi, hybdat, mpdata, structconst, sphbesmoment, moment, moment2, &
qnrm, facc, gmat, integral, olap, pqnrm, pgptm1, ngptm1, ikpt, coulmat)
use m_types
use m_juDFT
use m_ylm, only: ylm4
use m_constants, only: fpi_const, tpi_const
USE m_trafo, ONLY: symmetrize
use m_calc_l_m_from_lm
implicit none
type(t_fleurinput), intent(in) :: fi
type(t_hybdat), intent(in) :: hybdat
type(t_mpdata), intent(in) :: mpdata
REAL, intent(in) :: sphbesmoment(0:, :, :), qnrm(:), facC(-1:), gmat(:, :), moment(:, 0:, :), moment2(:, :)
real, intent(in) :: integral(:, 0:, :, :), olap(:, 0:, :, :)
integer, intent(in) :: ikpt, ngptm1(:), pqnrm(:, :), pgptm1(:, :)
complex, intent(in) :: structconst(:, :, :, :)
type(t_mat), intent(inout) :: coulmat
integer :: igpt0, igpt, igptp, iqnrm, niter
integer :: ix, iy, ic, itype, lm, l, m, itype1, ic1, l1, m1, lm1
integer :: l2, m2, lm2, n, i, idum, iatm, j_type, j_l, iy_start, j_m, j_lm
real :: q(3), qnorm, svol, tmp_vec(3)
COMPLEX :: y((fi%hybinp%lexp + 1)**2), y1((fi%hybinp%lexp + 1)**2), y2((fi%hybinp%lexp + 1)**2)
complex :: csum, csumf(9), cdum, cexp
integer, allocatable :: lm_arr(:), ic_arr(:)
coulmat%data_c(:hybdat%nbasp,hybdat%nbasp+1:) = 0
svol = SQRT(fi%cell%vol)
! start to loop over interstitial plane waves
DO igpt0 = 1, ngptm1(ikpt) !1,ngptm1(ikpt)
igpt = pgptm1(igpt0, ikpt)
igptp = mpdata%gptm_ptr(igpt, ikpt)
ix = hybdat%nbasp + igpt
q = MATMUL(fi%kpts%bk(:, ikpt) + mpdata%g(:, igptp), fi%cell%bmat)
qnorm = norm2(q)
iqnrm = pqnrm(igpt, ikpt)
IF (ABS(qnrm(iqnrm) - qnorm) > 1e-12) then
call judft_error('coulombmatrix: qnorm does not equal corresponding & element in qnrm (bug?)') ! We shouldn't st op here!
endif
tmp_vec = MATMUL(fi%kpts%bk(:, fi%kpts%nkpt), fi%cell%bmat)
call ylm4(2, tmp_vec, y1)
tmp_vec = MATMUL(mpdata%g(:, igptp), fi%cell%bmat)
call ylm4(2, tmp_vec, y2)
call ylm4(fi%hybinp%lexp, q, y)
y1 = CONJG(y1); y2 = CONJG(y2); y = CONJG(y)
! this unrolls the do ic=1,atoms%nat{do lm=1,..{}}
call collapse_ic_and_lm_loop(fi%atoms, fi%hybinp%lcutm1, niter, ic_arr, lm_arr)
!$OMP PARALLEL DO default(none) &
!$OMP private(ic, lm, itype, l, m, csum, csumf, ic1, itype1, cexp, lm1, l2, cdum, m2, lm2, iy) &
!$OMP private(j_m, j_type, iy_start, l1, m1) &
!$OMP shared(ic_arr, lm_arr, fi, mpdata, olap, qnorm, moment, integral, hybdat, coulmat, svol) &
!$OMP shared(moment2, ix, igpt, facc, structconst, y, y1, y2, gmat, iqnrm, sphbesmoment, ikpt) &
!$OMP shared(igptp, niter) &
!$OMP schedule(dynamic)
do i = 1,niter
ic = ic_arr(i)
lm = lm_arr(i)
itype = fi%atoms%itype(ic)
call calc_l_m_from_lm(lm, l, m)
! calculate sum over lm and centers for (2c) -> csum, csumf
csum = 0
csumf = 0
do ic1 = 1, fi%atoms%nat
itype1 = fi%atoms%itype(ic1)
cexp = fpi_const*EXP(CMPLX(0.0, 1.0)*tpi_const &
*(dot_PRODUCT(fi%kpts%bk(:, ikpt) + mpdata%g(:, igptp), fi%atoms%taual(:, ic1)) &
- dot_PRODUCT(fi%kpts%bk(:, ikpt), fi%atoms%taual(:, ic))))
do lm1 = 1, (fi%hybinp%lexp+1)**2
call calc_l_m_from_lm(lm1, l1, m1)
l2 = l + l1 ! for structconst
cdum = sphbesmoment(l1, itype1, iqnrm)*CMPLX(0.0, 1.0)**(l1)*cexp
m2 = M - m1 ! for structconst
lm2 = l2**2 + l2 + m2 + 1 !
csum = csum - (-1)**(m1 + l1)*gmat(lm1, lm)*y(lm1)*cdum*structconst(lm2, ic, ic1, ikpt)
END DO
! add contribution of (2c) to csum and csumf coming from linear and quadratic orders of Y_lm*(G) / G * j_(l+1)(GS)
IF (ikpt == 1 .AND. l <= 2) THEN
cexp = EXP(CMPLX(0.0, 1.0)*tpi_const*dot_PRODUCT(mpdata%g(:, igptp), fi%atoms%taual(:, ic1))) &
*gmat(lm, 1)*fpi_const/fi%cell%vol
csumf(lm) = csumf(lm) - cexp*SQRT(fpi_const)* &
CMPLX(0.0, 1.0)**l*sphbesmoment(0, itype1, iqnrm)/facC(l - 1)
IF (l == 0) THEN
IF (igpt /= 1) THEN
csum = csum - cexp*(sphbesmoment(0, itype1, iqnrm)*fi%atoms%rmt(itype1)**2 - &
sphbesmoment(2, itype1, iqnrm)*2.0/3)/10
ELSE
csum = csum - cexp*fi%atoms%rmt(itype1)**5/30
END IF
ELSE IF (l == 1) THEN
csum = csum + cexp*CMPLX(0.0, 1.0)*SQRT(fpi_const) &
*sphbesmoment(1, itype1, iqnrm)*y(lm)/3
END IF
END IF
END DO
! add contribution of (2a) to csumf
IF (ikpt == 1 .AND. igpt == 1 .AND. l <= 2) THEN
csumf(lm) = csumf(lm) + (fpi_const)**2*CMPLX(0.0, 1.0)**l/facC(l)
END IF
! finally define coulomb
cdum = (fpi_const)**2*CMPLX(0.0, 1.0)**(l)*y(lm) &
*EXP(CMPLX(0.0, 1.0)*tpi_const &
*dot_PRODUCT(mpdata%g(:, igptp), fi%atoms%taual(:, ic)))
!calclate iy_start on the fly for OpenMP
iy_start = 0
do iatm = 1, ic-1
j_type = fi%atoms%itype(iatm)
do j_l = 0,fi%hybinp%lcutm1(j_type)
iy_start = iy_start + mpdata%num_radbasfn(j_l, j_type) * (2*j_l+1)
end do
end do
do j_lm = 1,lm-1
call calc_l_m_from_lm(j_lm, j_l, j_m)
iy_start = iy_start + mpdata%num_radbasfn(j_l, itype)
enddo
DO n = 1, mpdata%num_radbasfn(l, itype)
iy = iy_start + n
IF (ikpt == 1 .AND. igpt == 1) THEN
IF (l == 0) coulmat%data_c(iy, ix) = &
-cdum*moment2(n, itype)/6/svol ! (2a)
coulmat%data_c(iy, ix) = coulmat%data_c(iy, ix) &
+ (-cdum/(2*l + 1)*integral(n, l, itype, iqnrm) & ! (2b)&
+ csum*moment(n, l, itype))/svol ! (2c)
ELSE
coulmat%data_c(iy, ix) = &
(cdum*olap(n, l, itype, iqnrm)/qnorm**2 & ! (2a)&
- cdum/(2*l + 1)*integral(n, l, itype, iqnrm) & ! (2b)&
+ csum*moment(n, l, itype))/svol ! (2c)
END IF
END DO
END DO ! collapsed atom & lm loop (ic)
!$OMP END PARALLEL DO
END DO
IF (fi%sym%invs) THEN
CALL symmetrize(coulmat%data_c(:hybdat%nbasp,hybdat%nbasp+1:), hybdat%nbasp, mpdata%n_g(ikpt), 1, .FALSE., &
fi%atoms, fi%hybinp%lcutm1, maxval(fi%hybinp%lcutm1), mpdata%num_radbasfn, fi%sym)
ENDIF
endsubroutine loop_over_interst
subroutine perform_double_g_loop(fi, hybdat, mpdata, sphbes0, carr2, ngptm1,pgptm1,pqnrm,qnrm, nqnrm, ikpt, coulomb)
use m_juDFT
use m_types
use m_constants, only: tpi_const,fpi_const
use m_sphbessel_integral
!$ use omp_lib
implicit none
type(t_fleurinput), intent(in) :: fi
TYPE(t_mpdata), intent(in) :: mpdata
TYPE(t_hybdat), INTENT(IN) :: hybdat
integer, intent(in) :: ikpt, ngptm1(:), pqnrm(:,:),pgptm1(:, :), nqnrm
real, intent(in) :: qnrm(:), sphbes0(:, :, :)
complex, intent(in) :: carr2(:, :)
!complex, intent(inout) :: coulomb(:) ! only at ikpt
type(t_mat), intent(inout) :: coulomb
integer :: igpt0, igpt1, igpt2, ix, iy, igptp1, igptp2, iqnrm1, iqnrm2
integer :: ic, itype, lm, m, idum, l, i
real :: q1(3), q2(3)
complex :: y1((fi%hybinp%lexp + 1)**2), y2((fi%hybinp%lexp + 1)**2)
COMPLEX :: cexp1(fi%atoms%ntype)
complex :: cdum, cdum1
logical :: ldum
!$ integer, parameter :: lock_size = 100
!$ integer(kind=omp_lock_kind) :: lock(0:lock_size-1)
call timestart("double g-loop")
! create lock for race-condition in coulomb
!$ do i =0,lock_size-1
!$ call omp_init_lock(lock(i))
!$ enddo
!$OMP PARALLEL DO default(none) &
!$OMP private(igpt0, igpt1, igpt2, ix, igptp2, iqnrm2, q2, y2, iy,igptp1, iqnrm1, q1) &
!$OMP private(y1, ic, itype, cexp1, lm, cdum, l, cdum1, m, idum, ldum) &
!$OMP shared(coulomb, ngptm1, ikpt, pgptm1, hybdat, mpdata, pqnrm, fi) &
!$OMP shared(lock, nqnrm, sphbes0, qnrm, carr2) &
!$OMP schedule(dynamic)
DO igpt0 = 1, ngptm1(ikpt)!1,ngptm1(ikpt)
igpt2 = pgptm1(igpt0, ikpt)
ix = hybdat%nbasp + igpt2
igptp2 = mpdata%gptm_ptr(igpt2, ikpt)
iqnrm2 = pqnrm(igpt2, ikpt)
q2 = MATMUL(fi%kpts%bk(:, ikpt) + mpdata%g(:, igptp2), fi%cell%bmat)
y2 = CONJG(carr2(:, igpt2))
DO igpt1 = 1, igpt2
iy = hybdat%nbasp + igpt1
igptp1 = mpdata%gptm_ptr(igpt1, ikpt)
iqnrm1 = pqnrm(igpt1, ikpt)
q1 = MATMUL(fi%kpts%bk(:, ikpt) + mpdata%g(:, igptp1), fi%cell%bmat)
y1 = carr2(:, igpt1)
cexp1 = 0
do ic = 1,fi%atoms%nat
itype = fi%atoms%itype(ic)
cexp1(itype) = cexp1(itype) + &
EXP(CMPLX(0.0, 1.0)*tpi_const*dot_PRODUCT( &
(mpdata%g(:, igptp2) - mpdata%g(:, igptp1)), fi%atoms%taual(:, ic)))
ENDDO
lm = 0
cdum = 0
DO l = 0, fi%hybinp%lexp
cdum1 = 0
DO itype = 1, fi%atoms%ntype
cdum1 = cdum1 + cexp1(itype)*sphbessel_integral( &
fi%atoms, itype, qnrm, nqnrm, &
iqnrm1, iqnrm2, l, fi%hybinp, &
sphbes0, .False., ldum) &
/(2*l + 1)
END DO
DO M = -l, l
lm = lm + 1
cdum = cdum + cdum1*y1(lm)*y2(lm)
ENDDO
ENDDO
idum = ix*(ix - 1)/2 + iy
!$ call omp_set_lock(lock(modulo(idum,lock_size)))
coulomb%data_c(iy,ix) = coulomb%data_c(iy,ix) + (fpi_const)**3*cdum/fi%cell%vol
!$ call omp_unset_lock(lock(modulo(idum,lock_size)))
END DO
END DO
!$OMP END PARALLEL DO
!$ do i =0,lock_size-1
!$ call omp_destroy_lock(lock(i))
!$ enddo
call timestop("double g-loop")
end subroutine perform_double_g_loop
subroutine collapse_ic_and_lm_loop(atoms, lcutm1, niter, ic_arr, lm_arr)
use m_types
implicit none
type(t_atoms), intent(in) :: atoms
integer, intent(in) :: lcutm1(:)
integer, intent(out) :: niter
integer, intent(inout), allocatable :: ic_arr(:), lm_arr(:)
integer :: ic, lm, itype
if(allocated(ic_arr)) deallocate(ic_arr)
if(allocated(lm_arr)) deallocate(lm_arr)
niter = 0
do ic = 1, atoms%nat
itype = atoms%itype(ic)
do lm = 1,(lcutm1(itype)+1)**2
niter = niter + 1
enddo
enddo
allocate( lm_arr(niter), ic_arr(niter))
niter = 0
do ic = 1, atoms%nat
itype = atoms%itype(ic)
do lm = 1,(lcutm1(itype)+1)**2
niter = niter + 1
lm_arr(niter) = lm
ic_arr(niter) = ic
enddo
enddo
end subroutine collapse_ic_and_lm_loop
END MODULE m_coulombmatrix
|
{"hexsha": "db3c7bdcf3f3588c42ab2d76d958bfd7cf214f48", "size": 71417, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "hybrid/coulombmatrix.f90", "max_stars_repo_name": "MRedies/FLEUR", "max_stars_repo_head_hexsha": "84234831c55459a7539e78600e764ff4ca2ec4b6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hybrid/coulombmatrix.f90", "max_issues_repo_name": "MRedies/FLEUR", "max_issues_repo_head_hexsha": "84234831c55459a7539e78600e764ff4ca2ec4b6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hybrid/coulombmatrix.f90", "max_forks_repo_name": "MRedies/FLEUR", "max_forks_repo_head_hexsha": "84234831c55459a7539e78600e764ff4ca2ec4b6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.3308504035, "max_line_length": 170, "alphanum_fraction": 0.4758390859, "num_tokens": 23347}
|
import tweepy
import numpy as np
from pymongo import MongoClient
import re
import json
import math
import folium
auth = tweepy.OAuthHandler('q4utaFepGhE5OjujyoruBOoQg', 'D5K3P5URNUTxKnoVnggiUFsNapuNLOSx5cB7Zh6Y4HhpBhhtNy')
auth.set_access_token('438291047-AWXl0LpNxZzjhdFA3FH7AJHtmLRK52QDJiKzq5Wz', 'o3kZKFF2s9ctgVpfDVRRpMbg6BMsGUIFWlJm9wSysKyyY')
api = tweepy.API(auth)
try:
client = MongoClient("mongodb://TeamProject:JoemonJoseForever@twitterdb-shard-00-00-qc9br.mongodb.net:27017,twitterdb-shard-00-01-qc9br.mongodb.net:27017,twitterdb-shard-00-02-qc9br.mongodb.net:27017/test?ssl=true&replicaSet=TwitterDB-shard-0&authSource=admin")
except ValueError:
print(ValueError)
db = client.twitterdb
class MyStreamListener(tweepy.StreamListener):
def on_data(self, raw_data):
json_data = json.loads(raw_data)
post_id = db.twitter_data.insert_one(json_data).inserted_id
print(post_id)
def on_error(self, status_code):
if status_code == 420:
# returning False in on_data disconnects the stream
return False
myStream = tweepy.Stream(auth = api.auth, listener=MyStreamListener())
#myStream.filter(locations=[-4.50,55.79,-3.97,55.93], async=True)
#localized_tweets = list(tweepy.Cursor(api.search,
# q="university",
## geocode="55.85,-4.25,10km",
# #since="2017-10-13",
# #until="2017-10-21",
# lang="en").items())
#print(len(localized_tweets))
print(db.twitter_data.count())
filter = "is"
regx = re.compile(".*"+filter+".*", re.IGNORECASE)
posts = db.twitter_data.find({"text": regx})
for post in posts:
print(post["place"]["bounding_box"]["type"])
print(posts.count())
def startMap(sender):
m = folium.Map(location=[(55.79+55.93)/2, (-4.50-3.97)/2])
filter = "is"
regx = re.compile(".*"+filter+".*", re.IGNORECASE)
posts = db.twitter_data.find({"text": regx})
for post in posts:
x0 = post["place"]["bounding_box"]["coordinates"][0][0][0]
x1 = post["place"]["bounding_box"]["coordinates"][0][1][0]
x2 = post["place"]["bounding_box"]["coordinates"][0][2][0]
y0 = post["place"]["bounding_box"]["coordinates"][0][0][1]
y1 = post["place"]["bounding_box"]["coordinates"][0][1][1]
y2 = post["place"]["bounding_box"]["coordinates"][0][2][1]
sq = np.square([x1-x0, y1-y0, x2-x1, y2-y1])
sqrts = np.sqrt([sq[0]+sq[1], sq[2]+sq[3]])
L1 = sqrts[0]
L2 = sqrts[1]
R = math.sqrt(L1**2 + L2**2)
centre=[(y2+y0)/2, (x2+x0)/2]
text = ''.join(e for e in post["text"] if e.isalnum() or e==' ')[:40]
text = '<i>' + text + '</i>'
if (R>1.0):
folium.CircleMarker(centre, radius=R, popup=text,
color='#3186cc', fill_color='#3186cc').add_to(m)
else:
folium.Marker(centre, popup=text,
icon=folium.Icon(color='green',icon='info-sign')).add_to(m)
display(m)
|
{"hexsha": "1db699149d1859ec779394429f47138f02ec2c34", "size": 3034, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "alek-beloff/teamproject", "max_stars_repo_head_hexsha": "950dc59a18387ed1f4dcaeb3d4c9b6b8174b7840", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-10-16T00:02:07.000Z", "max_stars_repo_stars_event_max_datetime": "2018-10-16T00:02:07.000Z", "max_issues_repo_path": "main.py", "max_issues_repo_name": "alek-beloff/teamproject", "max_issues_repo_head_hexsha": "950dc59a18387ed1f4dcaeb3d4c9b6b8174b7840", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "alek-beloff/teamproject", "max_forks_repo_head_hexsha": "950dc59a18387ed1f4dcaeb3d4c9b6b8174b7840", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-04-23T16:27:43.000Z", "max_forks_repo_forks_event_max_datetime": "2019-04-23T16:27:43.000Z", "avg_line_length": 38.8974358974, "max_line_length": 265, "alphanum_fraction": 0.6212920237, "include": true, "reason": "import numpy", "num_tokens": 936}
|
import os
from itertools import chain
import numpy as np
from c3nav.mapdata.render.engines import register_engine
from c3nav.mapdata.render.engines.base3d import Base3DEngine
@register_engine
class WavefrontEngine(Base3DEngine):
filetype = 'obj'
def _normal_normal(self, normal):
return normal / (np.absolute(normal).max())
def render(self, filename=None):
facets = np.vstack(chain(*(chain(*v.values()) for v in self.vertices.values())))
vertices = tuple(set(tuple(vertex) for vertex in facets.reshape((-1, 3))))
vertices_lookup = {vertex: i for i, vertex in enumerate(vertices, start=1)}
normals = np.cross(facets[:, 1] - facets[:, 0], facets[:, 2] - facets[:, 1]).reshape((-1, 3))
normals = normals / np.amax(np.absolute(normals), axis=1).reshape((-1, 1))
normals = tuple(set(tuple(normal) for normal in normals))
normals_lookup = {normal: i for i, normal in enumerate(normals, start=1)}
materials = b''
materials_filename = filename + '.mtl'
for name, color in self.colors.items():
materials += ((b'newmtl %s\n' % name.encode()) +
(b'Ka %.2f %.2f %.2f\n' % color[:3]) +
(b'Kd %.2f %.2f %.2f\n' % color[:3]) +
b'Ks 0.00 0.00 0.00\n' +
(b'd %.2f\n' % color[3]) +
b'illum 2\n')
result = b'mtllib %s\n' % os.path.split(materials_filename)[-1].encode()
result += b'o c3navExport\n'
result += b''.join((b'v %.3f %.3f %.3f\n' % vertex) for vertex in vertices)
result += b''.join((b'vn %.6f %.6f %.6f\n' % normal) for normal in normals)
for group, subgroups in self.groups.items():
result += b'\n# ' + group.encode() + b'\n'
for subgroup in subgroups:
result += b'\n# ' + subgroup.encode() + b'\n'
for i, vertices in enumerate(self.vertices[subgroup].values()):
if not vertices:
continue
for j, facets in enumerate(vertices):
if not facets.size:
continue
normals = np.cross(facets[:, 1] - facets[:, 0], facets[:, 2] - facets[:, 1]).reshape((-1, 3))
normals = normals / np.amax(np.absolute(normals), axis=1).reshape((-1, 1))
normals = tuple(normals_lookup[tuple(normal)] for normal in normals)
result += ((b'g %s_%d_%d\n' % (subgroup.encode(), i, j)) +
(b'usemtl %s\n' % subgroup.encode()) +
b's off\n' +
b''.join((b'f %d//%d %d//%d %d//%d\n' % (vertices_lookup[tuple(a)], normals[k],
vertices_lookup[tuple(b)], normals[k],
vertices_lookup[tuple(c)], normals[k],)
for k, (a, b, c) in enumerate(facets)))
)
return result, (materials_filename, materials)
|
{"hexsha": "2b6dd9c28b2165429727f20b87b0828f52963859", "size": 3251, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/c3nav/mapdata/render/engines/wavefront.py", "max_stars_repo_name": "johnjohndoe/c3nav", "max_stars_repo_head_hexsha": "a17f863a3512e305595c16b0300796b6bae81241", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 132, "max_stars_repo_stars_event_min_datetime": "2016-11-12T01:45:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T15:17:10.000Z", "max_issues_repo_path": "src/c3nav/mapdata/render/engines/wavefront.py", "max_issues_repo_name": "johnjohndoe/c3nav", "max_issues_repo_head_hexsha": "a17f863a3512e305595c16b0300796b6bae81241", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 66, "max_issues_repo_issues_event_min_datetime": "2016-09-29T09:46:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:26:18.000Z", "max_forks_repo_path": "src/c3nav/mapdata/render/engines/wavefront.py", "max_forks_repo_name": "johnjohndoe/c3nav", "max_forks_repo_head_hexsha": "a17f863a3512e305595c16b0300796b6bae81241", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 42, "max_forks_repo_forks_event_min_datetime": "2016-09-29T08:34:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T15:17:15.000Z", "avg_line_length": 50.796875, "max_line_length": 117, "alphanum_fraction": 0.486311904, "include": true, "reason": "import numpy", "num_tokens": 767}
|
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
class Gridmap(object):
# Visualization tools
# occupancy: A binary array encoding an occupancy grid of
# the environment (1: occupied, 0: free)
# xres, yres: Map resolution
def __init__(self, occupancy, xres=1, yres=1):
# Flip the occupancy grid so that indexing starts in the lower-left
self.occupancy = occupancy[::-1, :].astype(np.bool)
self.xres = xres
self.yres = yres
self.m = self.occupancy.shape[0]
self.n = self.occupancy.shape[1]
# Returns True if (x, y) is in collision
# ij: Indicates whether (x,y) are array indices
def inCollision(self, x, y, ij=False):
if ij == True:
j = x
i = y
else:
# j = int(np.ceil(x/self.xres))
# i = int(np.ceil(y/self.yres))
j = np.int32(np.floor(x / self.xres))
i = np.int32(np.floor(y / self.yres))
# i = (self.m-1) - int(np.floor(y/self.yres)) # Since i=0 is upper-left
i = np.asarray(i)
j = np.asarray(j)
inBounds = (i < self.m) * (i >= 0) * (j < self.n) * (j >= 0)
collision = np.ones(i.shape, dtype=np.bool)
collision[inBounds] = self.occupancy[i[inBounds], j[inBounds]]
return collision
# Returns the height and width of the occupancy
# grid in terms of the number of cells
#
# Returns:
# m: Height in number of cells
# n: Width in number of cells
def getShape(self):
return self.m, self.n
# Converts an (i,j) integer pair to an (x,y) pair
# i: Row index (zero at bottom)
# j: Column index
#
# Returns
# x: x position
# y: y position
def ij2xy(self, i, j):
x = np.float(j * self.xres)
y = np.float(i * self.yres)
return (x, y)
# Converts an (i,j) integer pair to an (x,y) pair
# x: x position
# y: y position
#
# Returns:
# i: Row index (zero at bottom)
# j: Column index
def xy2ij(self, x, y):
i = int(np.floor(y / self.yres))
j = int(np.floor(x / self.yres))
return (i, j)
|
{"hexsha": "bcf29767c293df4b70b6c03650b44c5afc6c241c", "size": 2276, "ext": "py", "lang": "Python", "max_stars_repo_path": "robotics/ekf_slam_and_pf_localization/code/pf/Gridmap.py", "max_stars_repo_name": "mtn/ml", "max_stars_repo_head_hexsha": "2cd2c447c15baa41e9626fa453c2fdc872e73cd6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "robotics/ekf_slam_and_pf_localization/code/pf/Gridmap.py", "max_issues_repo_name": "mtn/ml", "max_issues_repo_head_hexsha": "2cd2c447c15baa41e9626fa453c2fdc872e73cd6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "robotics/ekf_slam_and_pf_localization/code/pf/Gridmap.py", "max_forks_repo_name": "mtn/ml", "max_forks_repo_head_hexsha": "2cd2c447c15baa41e9626fa453c2fdc872e73cd6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7567567568, "max_line_length": 83, "alphanum_fraction": 0.5399824253, "include": true, "reason": "import numpy", "num_tokens": 656}
|
function constraint(mechanism::Mechanism{T,Nn,Ne,Nb}, body::Body{T}) where {T,Nn,Ne,Nb}
state = body.state
timestep= mechanism.timestep
mass = body.mass
inertia = body.inertia
gravity = mechanism.gravity
x1, q1 = previous_configuration(state)
x2, q2 = current_configuration(state)
x3, q3 = next_configuration(state, timestep)
# dynamics
D1x = - 1.0 / timestep * mass * (x2 - x1) - 0.5 * timestep * mass * gravity
D2x = 1.0 / timestep * mass * (x3 - x2) - 0.5 * timestep * mass * gravity
D1q = -2.0 / timestep * LVᵀmat(q2)' * Lmat(q1) * Vᵀmat() * inertia * Vmat() * Lmat(q1)' * vector(q2)
D2q = -2.0 / timestep * LVᵀmat(q2)' * Tmat() * Rmat(q3)' * Vᵀmat() * inertia * Vmat() * Lmat(q2)' * vector(q3)
dynT = D2x + D1x
dynR = D2q + D1q
state.d = [dynT; dynR]
# inputs
state.d -= [state.JF2; state.Jτ2]
# impulses
for id in connections(mechanism.system, body.id)
Ne < id <= Ne + Nb && continue # body
impulses!(mechanism, body, get_node(mechanism, id))
end
return state.d
end
function constraint_jacobian_configuration(mechanism::Mechanism{T,Nn,Ne,Nb}, body::Body{T}; reg::T=Dojo.REG) where {T,Nn,Ne,Nb}
state = body.state
timestep = mechanism.timestep
mass = body.mass
inertia = body.inertia
x2, q2 = current_configuration(state)
x3, q3 = next_configuration(state, timestep)
I3 = SMatrix{3,3,T,9}(Diagonal(sones(T,3)))
Z33 = szeros(T, 3, 3)
Z34 = szeros(T, 3, 4)
# dynamics
dynT = I3 * mass / timestep
dynR = -2.0 / timestep * LVᵀmat(q2)' * Tmat() * (∂Rᵀmat∂q(Vᵀmat() * inertia * Vmat() * Lmat(q2)' * vector(q3)) + Rmat(q3)' * Vᵀmat() * inertia * Vmat() * Lmat(q2)')
state.D = [[dynT; Z33] [Z34; dynR]] * integrator_jacobian_velocity(body, timestep)
state.D += [[reg * I3; Z33] [Z33; reg * I3]]
# inputs
nothing
# impulses
for id in connections(mechanism.system, body.id)
Ne < id <= Ne + Nb && continue # body
impulses_jacobian_velocity!(mechanism, body, get_node(mechanism, id))
end
return state.D
end
function integrator_jacobian_velocity(body::Body{T}, timestep) where T
state = body.state
x2, v25, q2, ω25 = current_configuration_velocity(state)
integrator_jacobian_velocity(x2, v25, q2, ω25, timestep)
end
function integrator_jacobian_configuration(body::Body{T},
timestep; attjac::Bool=true) where T
state = body.state
x2, v25, q2, ω25 = current_configuration_velocity(state)
integrator_jacobian_configuration(x2, v25, q2, ω25, timestep; attjac=attjac)
end
# linear system
function set_matrix_vector_entries!(mechanism, matrix_entry::Entry, vector_entry::Entry, body::Body)
matrix_entry.value = constraint_jacobian_configuration(mechanism, body)
vector_entry.value = -constraint(mechanism, body)
end
|
{"hexsha": "1b70956f9ba226412125dc37d415a41982f096fc", "size": 2864, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/integrators/constraint.jl", "max_stars_repo_name": "dojo-sim/Dojo.jl", "max_stars_repo_head_hexsha": "33ccdde8d7f74c4ea3c3bffdebcc6ed65959a5be", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 70, "max_stars_repo_stars_event_min_datetime": "2022-03-02T01:28:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T15:14:51.000Z", "max_issues_repo_path": "src/integrators/constraint.jl", "max_issues_repo_name": "dojo-sim/Dojo.jl", "max_issues_repo_head_hexsha": "33ccdde8d7f74c4ea3c3bffdebcc6ed65959a5be", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 35, "max_issues_repo_issues_event_min_datetime": "2022-03-02T06:58:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T17:17:26.000Z", "max_forks_repo_path": "src/integrators/constraint.jl", "max_forks_repo_name": "dojo-sim/Dojo.jl", "max_forks_repo_head_hexsha": "33ccdde8d7f74c4ea3c3bffdebcc6ed65959a5be", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2022-03-07T01:47:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-12T19:13:07.000Z", "avg_line_length": 33.3023255814, "max_line_length": 168, "alphanum_fraction": 0.6490921788, "num_tokens": 942}
|
import datetime
import gensim.downloader as api
import io
import json
import math
import nltk
import numpy as np
import os
import pandas as pd
import pdfminer
import pickle
import PyPDF2
import re
import sys
import pdfrw
import textract
import tldextract
import traceback
from collections import Counter
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.tokenize import sent_tokenize, word_tokenize, wordpunct_tokenize
from gensim.summarization.summarizer import summarize
from os.path import basename
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LAParams
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfinterp import PDFResourceManager
from pdfminer.pdfinterp import PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfparser import PDFParser
from pdfrw import PdfReader
from PIL import Image
from pymongo import MongoClient
from sklearn.base import BaseEstimator
from wand.image import Image
from watson_developer_cloud import NaturalLanguageUnderstandingV1
from watson_developer_cloud.natural_language_understanding_v1 import Features, KeywordsOptions, EntitiesOptions
def words(text): return re.findall(r'\w+', text.lower())
class LinguisticVectorizer(BaseEstimator):
def get_feature_names(self):
return np.array(
['text_length',
'number_of_paragraphs',
'average_sent_length',
'average_word_length',
'number_of_nouns',
'number_of_adjectives',
'number_of_verbs',
'type_token_relation',
'hapaxes_index',
'action_index',
'number_of_question_marks',
'number_of_exclamations',
'number_of_percentages',
'number_of_currency_symbols',
'number_of_paragraph_symbols',
'content_fraction',
'number_of_cappsed_words',
'number_of_first_person_pronouns']
)
def fit(self, documents, y=None):
return self
def __filter(self, string):
return [w for w in word_tokenize(string) if w.isalpha()]
def _get_text_length(self, string):
tokens = self.__filter(string)
return len(tokens)
def _get_number_of_paragraphs(self, string):
return round(string.count('\n') / 2)
def _get_average_sent_length(self, string):
tokens = self.__filter(string)
if len(sent_tokenize(string)) is 0:
return len(tokens)
return len(tokens) / len(sent_tokenize(string))
def _get_average_word_length(self, string):
tokens = self.__filter(string)
word_length_list = []
for word in tokens:
word_length_list.append(len(word))
return np.average(word_length_list)
def _get_number_of_pos(self, string):
nouns = 0
verbs = 0
adj = 0
for a in pos_tag(self.__filter(string)):
if a[1] in ['NN', 'NNS', 'NNP', 'NNPS']: nouns += 1
if a[1] in ['JJ', 'JJR', 'JJS']: adj += 1
if a[1] in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']: verbs += 1
length = self._get_text_length(string)
return (nouns/length, verbs/length, adj/length, verbs / (adj + verbs))
# n, v, a, naq
def _get_ttr(self, string):
tokens = self.__filter(string)
if len(tokens) is 0:
return 0
return len(set(tokens)) / len(tokens)
def _get_hl(self, string):
words = self.__filter(string)
fdist = nltk.FreqDist(words)
hapaxes = fdist.hapaxes()
if len(words) is 0:
return len(hapaxes)
return len(hapaxes) / len(words)
def _get_number_of_currency_symbols(self, string):
currencies = ["£","€","$","¥","¢","₩"]
sum = 0
for currency in currencies:
sum += self._get_number_of_symbol(string, currency)
return sum / self._get_text_length(string)
def _get_number_of_symbol(self, string, symbol):
return string.count(symbol) / self._get_text_length(string)
def _get_content_fraction(self, string):
tokens = self.__filter(string)
content = [w for w in tokens if w.lower() not in stopwords.words('english')]
if len(tokens) is 0:
return 0
return len(content) / len(tokens)
def _get_number_of_cappsed_words(self, string):
tokens = self.__filter(string)
return np.sum([t.isupper() for t in tokens if len(t) > 2]) / self._get_text_length(string)
def _get_number_of_first_person_pronouns(self, string):
tokens = word_tokenize(string)
pronouns = ["i","me","my", "mine", "myself","we", "our", "ours", "ourself"]
sum = 0
mode = 0
for word in tokens:
if word == "``":
mode = mode + 1
elif word == "''":
mode = mode - 1
if mode <= 0 and word.lower() in '\t'.join(pronouns):
sum += 1
return sum / len(tokens)
def transform(self, documents):
text_length = [self._get_text_length(d) for d in documents]
number_of_paragraphs = [self._get_number_of_paragraphs(d) for d in documents]
average_length_of_sent = [self._get_average_sent_length(d) for d in documents]
average_word_length = [self._get_average_word_length(d) for d in documents]
number_of_nouns, number_of_verbs, number_of_adjectives, action_index = self._get_number_of_pos(documents[0])
type_token_relation = [self._get_ttr(d) for d in documents]
hapaxes_index = [self._get_hl(d) for d in documents]
number_of_question_marks = [self._get_number_of_symbol(d, "?") for d in documents]
number_of_exclamations = [self._get_number_of_symbol(d, "!") for d in documents]
number_of_percentages = [self._get_number_of_symbol(d, "%") for d in documents]
number_of_currency_symbols = [self._get_number_of_currency_symbols(d) for d in documents]
number_of_paragraph_symbols = [self._get_number_of_symbol(d, "§") for d in documents]
content_fraction = [self._get_content_fraction(d) for d in documents]
number_of_cappsed_words = [self._get_number_of_cappsed_words(d) for d in documents]
number_of_first_person_pronouns = [self._get_number_of_first_person_pronouns(d) for d in documents]
result = np.array(
[text_length,
number_of_paragraphs,
average_length_of_sent,
average_word_length,
[number_of_nouns],
[number_of_adjectives],
[number_of_verbs],
type_token_relation,
hapaxes_index,
[action_index],
number_of_question_marks,
number_of_exclamations,
number_of_percentages,
number_of_currency_symbols,
number_of_paragraph_symbols,
content_fraction,
number_of_cappsed_words,
number_of_first_person_pronouns]
).T
return result
def filter_entities(entities_in, entity_filter = ["Person", "Location", "Organization", "Company"]):
entities_out = []
for val in entities_in:
if val[0] in entity_filter:
entities_out.append(val)
return entities_out
def map_entities(entities):
out = []
for val in entities:
out.append((val['type'], val['text'], val['relevance']))
return out
def process_keywords(keywords):
out = []
for val in keywords:
out.append((val['text'], val['relevance']))
return out
def process_entities(entities):
mapped_entites = map_entities(entities)# type : 'type', text: 'text', relevance: 'relevance' to (type, text relevance)
filtered_entities = filter_entities( mapped_entites)
return filtered_entities
def get_title_from_meta(input_pdf):
title = PdfReader(input_pdf).Info.Title
if title != None: title = title.strip("()").strip()
if title == "": title = None
return title
def createPDFDoc(fpath):
fp = open(fpath, 'rb')
parser = PDFParser(fp)
document = PDFDocument(parser, password='')
if not document.is_extractable:
raise "Not extractable"
else:
return document
def createDeviceInterpreter():
rsrcmgr = PDFResourceManager()
laparams = LAParams()
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
return device, interpreter
def parse_obj(objs):
string_fontsize = []
for obj in objs:
if isinstance(obj, pdfminer.layout.LTTextBox):
for o in obj._objs:
if isinstance(o,pdfminer.layout.LTTextLine):
text=o.get_text()
if text.strip():
old_size = 0
string = ""
for c in o._objs:
try:
string += c._text
old_size = c.size
except:
pass
if isinstance(c, pdfminer.layout.LTChar):
pass
string_fontsize.append({"string":string, "size":old_size})
elif isinstance(obj, pdfminer.layout.LTFigure):
parse_obj(obj._objs)
else:
pass
i = 0
while i < len(string_fontsize):
if i != 0 and math.floor(string_fontsize[i-1]["size"]) == math.floor(string_fontsize[i]["size"]):
string_fontsize[i-1]["string"] += string_fontsize[i]["string"]
del string_fontsize[i]
i -= 1
i+=1
return string_fontsize
def get_title_without_meta(input_pdf):
document=createPDFDoc(input_pdf)
device,interpreter=createDeviceInterpreter()
pages=PDFPage.create_pages(document)
interpreter.process_page(next(pages))
layout = device.get_result()
string_fontsize = parse_obj(layout._objs)
title_index = max(range(len(string_fontsize)), key=lambda index: string_fontsize[index]['size'])
title = string_fontsize[title_index]["string"].replace("\n", " ")
return title
def create_pdf_images(input_pdf, output_path):
# NOTE nach Größe filtern, Mindestgröße ? 100x100?
pdf = open(input_pdf, "rb").read()
startmark = b"\xff\xd8"
startfix = 0
endmark = b"\xff\xd9"
endfix = 2
i = 0
njpg = 0
export_paths = []
while True:
istream = pdf.find(b"stream", i)
if istream < 0:
break
istart = pdf.find(startmark, istream, istream+20)
if istart < 0:
i = istream+20
continue
iend = pdf.find(b"endstream", istart)
if iend < 0:
raise Exception("Didn't find end of stream!")
iend = pdf.find(endmark, iend-20)
if iend < 0:
raise Exception("Didn't find end of JPG!")
istart += startfix
iend += endfix
print("JPG %d from %d to %d" % (njpg, istart, iend))
jpg = pdf[istart:iend]
file_path = os.path.join(output_path, "jpg%d.jpg" % njpg)
os.makedirs(output_path, exist_ok = True)
with open(file_path, "wb") as jpgfile:
jpgfile.write(jpg)
#img = Image.open(file_path)
#size = img.size
#if size[0] < 100 or size[1] < 100:
# os.remove(file_path)
#else:
export_paths.append(file_path)
njpg += 1
i = iend
return export_paths
def create_thumbnail(src_filename, output_path, pagenum = 0, resolution = 72,):
src_pdf = PyPDF2.PdfFileReader(open(src_filename, "rb"))
dst_pdf = PyPDF2.PdfFileWriter()
dst_pdf.addPage(src_pdf.getPage(pagenum))
pdf_bytes = io.BytesIO()
dst_pdf.write(pdf_bytes)
pdf_bytes.seek(0)
img = Image(file = pdf_bytes, resolution = resolution)
img.convert("png")
print(output_path)
export_path = output_path + "_thumb.png"
img.save(filename = export_path)
return export_path
def getVectorsOf(model, text):
vectors = []
for token in wordpunct_tokenize(text):
try:
vectors.append(model[token])
except:
pass
return vectors
def vectorize_document(text, model):
return np.array(getVectorsOf(model, text)).mean(axis=0)
def mongo_connect():
client = MongoClient('localhost', 27017)
db = client.stanford_data
collection = db.document_collection
documents = collection.documents
return documents
def mongo_save(document, schema):
doc = {"document_title_md": document['title_md'],
"document_title_pdf": document['title_pdf'],
"document_title_summ": summarize(document['text'], word_count=6, split=False),
"document_text": document["text"],
"document_summary": document['summary'],
"thumbnail_path": document['thumbnail_path'],
"extracted_image_paths": document['pdf_images_paths'],
"document_url": document['url'],
"document_parent_url": document['parent_url'],
"document_type": document['document_type'],
"filename": document["filename"],
"keywords": document['keywords'], # word, score
"entities": document['entities'], # entity, value, score, image
"word2vec": document['vector_representation'].tolist(),
"lingvector" : document['lingvector'].tolist(),
"date": document['time']}
schema.insert_one(doc)
print("SUCCESS :)))")
def main(entity_limit = 50, keyword_limit = 20):
schema = mongo_connect()
with open('../notebooks/document_type_classifier.pkl', 'rb') as f:
document_type_classifier = pickle.load(f)
with open('../notebooks/url_features.pkl', 'rb') as f:
url_features = pickle.load(f)
model = api.load("glove-wiki-gigaword-300") # download the model and return as object ready for use
natural_language_understanding = NaturalLanguageUnderstandingV1(
username='cccb5076-87bd-4992-b99e-29a0f258460b',
password='Prop61GOuNtl',
version='2018-03-16')
start = 2000
end = 3000
for index, pdffile in enumerate(sorted(os.listdir('nextiterationhackathon2018/pdf'))[start:end]):
try:
print("Durchlauf " + str(start) + "/" + str(start+index) + "/" + str(end))
if(pdffile.endswith(".json")):
print("JSON found :(")
continue
pdfpath = os.path.join('nextiterationhackathon2018/pdf', pdffile)
document = {}
print("Titel start")
# document title
try:
document['title_pdf'] = get_title_without_meta(pdfpath)
document['title_md'] = get_title_from_meta(pdfpath)
except pdfrw.errors.PdfParseError as e:
print("PdfParseError")
continue
document['time'] = os.path.getmtime(pdfpath)
# process raw text
try:
document['text'] = textract.process(pdfpath).decode("utf-8")
except UnicodeDecodeError:
print("UnicodeDecodeError")
continue
print("NLU start")
#process entities and keywords
response = natural_language_understanding.analyze(
text=document['text'],
features=Features(
entities=EntitiesOptions(
sentiment=False,
limit=entity_limit),
keywords=KeywordsOptions(
sentiment=False,
emotion=False,
limit=keyword_limit)))
keywords = response['keywords']
document['keywords'] = process_keywords(keywords)
entities = response['entities']
document['entities'] = process_entities(entities)
# add metadata
json_path = os.path.join('nextiterationhackathon2018/pdf', basename(pdffile) + '.json')
with open(json_path, 'r+') as jsondata:
metadata = json.load(jsondata)
document['url'] = metadata['url']
document['parent_url'] = metadata['parent_url']
document['filename'] = document['url'].split('/')[-1]
# document classification
print("Classification start")
generated_url_features = pd.DataFrame(columns=url_features)
generated_url_features.loc[0] = np.zeros(len(url_features))
url_feature = "tld-url" + "_" + '.'.join(tldextract.extract(document['url'])[:2])
parent_url_feature = "tld-parent-url" + "_" + '.'.join(tldextract.extract(document['parent_url'])[:2])
if url_feature in generated_url_features.columns:
generated_url_features.loc[0][url_feature] = 1
if parent_url_feature in generated_url_features.columns:
generated_url_features.loc[0][parent_url_feature] = 1
ling = LinguisticVectorizer()
x_ling = ling.fit([document['text']]).transform([document['text']])
document['lingvector'] = x_ling[0]
ling_features = pd.DataFrame(x_ling, columns=ling.get_feature_names())
w2v_features = pd.DataFrame([np.array(getVectorsOf(model, document["text"])).mean(axis=0)]).add_prefix("w2v_")
features = pd.concat([generated_url_features, ling_features, w2v_features], axis=1)
prediction = document_type_classifier.predict(features)
document["document_type"] = prediction[0]
print("Images start")
outpath = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "out", str(index))
os.makedirs(outpath, exist_ok=True)
# document images
document['pdf_images_paths'] = create_pdf_images(pdfpath, os.path.join(outpath, "docimages"))
document['thumbnail_path'] = create_thumbnail(pdfpath, os.path.join(outpath, "thumbnail"))
print("Vector start")
document['vector_representation'] = vectorize_document(document['text'], model)
print("Summary start")
document['summary'] = summarize(document['text'].replace('\n', ' '), word_count=100, split=False)
print("Save start")
mongo_save(document, schema)
except Exception as e:
print("EXCEPTION" + str(e))
traceback.print_tb(e.__traceback__)
continue
if __name__ == "__main__":
main()
|
{"hexsha": "153b4fbe5c9d1181700108f1f1ab1d3111eeb3e4", "size": 18592, "ext": "py", "lang": "Python", "max_stars_repo_path": "backend/process.py", "max_stars_repo_name": "manu183/NextIterationHackathon2018", "max_stars_repo_head_hexsha": "7ee730d7fae94376fa25d42287619d3a5577dd8c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-04-23T19:42:56.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-12T09:00:14.000Z", "max_issues_repo_path": "backend/process.py", "max_issues_repo_name": "manu183/Autonomous-Semantic-Search-Engine", "max_issues_repo_head_hexsha": "7ee730d7fae94376fa25d42287619d3a5577dd8c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2018-04-21T19:41:26.000Z", "max_issues_repo_issues_event_max_datetime": "2018-04-21T19:42:06.000Z", "max_forks_repo_path": "backend/process.py", "max_forks_repo_name": "manuel-lang/Autonomous-Semantic-Search-Engine", "max_forks_repo_head_hexsha": "7ee730d7fae94376fa25d42287619d3a5577dd8c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-10-02T22:15:28.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-10T10:11:07.000Z", "avg_line_length": 36.4549019608, "max_line_length": 122, "alphanum_fraction": 0.6170395869, "include": true, "reason": "import numpy", "num_tokens": 4242}
|
[STATEMENT]
lemma tendsto_cong_limit: "(f \<longlongrightarrow> l) F \<Longrightarrow> k = l \<Longrightarrow> (f \<longlongrightarrow> k) F"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>(f \<longlongrightarrow> l) F; k = l\<rbrakk> \<Longrightarrow> (f \<longlongrightarrow> k) F
[PROOF STEP]
by simp
|
{"llama_tokens": 110, "file": null, "length": 1}
|
\subsection{Three-dimensional RTE solver \index{MYSTIC} (mystic)}
\label{sec:mystic}
The Monte Carlo method is the most straightforward way to calculate
(polarized) radiative transfer. In forward tracing mode
individual photons are traced on their random paths through the atmosphere.
Starting from top of the atmosphere (for solar radiation), or being thermally
emitted by the atmosphere or surface, the photons are followed until they
hit the surface or leave again at top of the atmosphere (TOA). For
solar radiation, the start position is either a random location in the
TOA plane, with the direction determined by the solar zenith and
azimuth. Originally, the ``Monte Carlo for the physically correct tracing of
photons in cloudy atmospheres'' MYSTIC \citep{mayer2009} has been developed as
a forward tracing method for the calculation of irradiances and radiances in
\ifthreedmystic{3D} plane-parallel atmospheres. Later the model has been extended to fully
spherical geometry and a backward tracing mode \citep{emde2007}. The backward
photon tracing option speeds up the calculation of radiances and allows very
fast calculations in the thermal spectral range.
\ifthreedmystic{MYSTIC handles three-dimensional water and ice clouds in a
one-dimensional background atmosphere of molecular scatterers and
absorbers and aerosol particles. MYSTIC also allows topography as well as
inhomogeneous surface albedo and BRDF to be considered.}
MYSTIC is now a full vector code: It can handle polarization and
polarization-dependent scattering by randomly oriented particles,
i.e. clouds droplets and particles, aerosol particles, and molecules
\citep{emde2010}. To keep the computational time reasonable for
accurate calculations of e.g. polarized radiances in cloudy
atmospheres several ``tricks'' are required to speed up the
calculations. The first is the so called ``local estimate method''
\citep{marshak2005}. Using this method a photon contributes to the
final result of the calculation each time it is scattered. However, in
the presence of particles with strong forward scattering in the
simulated scene, such as clouds and large aerosols, the local estimate
method will produce so-called ``spikes'', these are rare photons whose
very large contribution to the result leads to slow convergence. The spike
problem can be resolved by using the ``Variance Reduction Optimal
Options Method'' \citep[VROOM,][]{buras2011a}, a collection of several
variance reduction methods which change the photon paths such that the
spikes disappear, but without altering the result (i.e.~the variance
reduction is ``unbiased'').
A detailed introduction to the Monte Carlo technique and in particular
to MYSTIC is given in \citet{mayer2009}. For specific questions
concerning the Monte Carlo technique the reader is referred to the literature
\citep{marchuk1980,collins1972,marshak2005,cahalan2005}.
MYSTIC is switched on by the option \code{rte\_solver mystic}. If no
other options are specified MYSTIC computes unpolarized quantities for
a \ifthreedmystic{3D} plane-parallel
atmosphere\ifthreedmystic{~(domain is divided into block-shaped
boxes)}. If \codeidx{mc\_polarisation} is specified, polarized
quantities are computed. The option \codeidx{mc\_spherical 1D} enables
calculations in a 1D spherical model atmosphere. All MYSTIC-specific
options start with \code{mc\_} and
are described in detail in section~\ref{sec:uvspec_options}.
\ifthreedmystic{
MYSTIC may be provided with various 2D and 3D input files, in addition
to the standard 1D input files of {\sl uvspec}. The input files may
include the following data:
\begin{itemize}
\item 3D water clouds
\item 3D ice clouds
\item 2D surface albedo
\item 2D elevation
\item 2D BRDF
%\item Lasers and detectors for lidar
\end{itemize}
If none of these are specified, MYSTIC does basically a 1D calculation
and should compare well with other solvers, in particular \code{disort2}.
MYSTIC operates in a user-defined model-domain. Since periodic boundary
conditions are applied, the user has to make sure that the quantity to be
calculated is not affected by the boundaries. For maximum flexibility,
the different grids (sample, cloud, elevation, albedo or BRDF) are
completely independent. The only constraint is that the domain size
is equal for all grids used. E.g., clouds may be defined in 5x5x2~km$^3$
cubes while the surface albedo is defined on a 1x1~km$^2$ grid,
the elevation on a 0.1x0.2~km$^2$ grid, and the output might
be sampled on a 3x4~km$^2$ grid. The only restrictions are
the available memory, the processor speed, and the requirements on
the precision of the result (see section~\ref{sec:mc_memory}).
The sampling information for MYSTIC is defined in the input file (e.g.
with \codeidx{mc\_sample\_grid}, \codeidx{zout}, \codeidx{umu}, ...).
After initialization MYSTIC reports how it interpreted the input parameters
which gives the user a chance to see if it really does what it is
supposed to be doing.
\subsubsection{Coordinates}
Mystic in cartesian mode uses an ENU coordinate system to define the atmospheric state:
\begin{itemize}
\item{\emph{x}: east}
\item{\emph{y}: north}
\item{\emph{z}: up}
\end{itemize}
\begin{figure}[h!]
\centering
\tdplotsetmaincoords{70}{20}
\begin{tikzpicture}[scale=5,tdplot_main_coords]
\coordinate (O) at (0,0,0);
\coordinate (Ex) at (1,0,0);
\coordinate (Ey) at (0,1,0);
\coordinate (Ez) at (0,0,1);
\draw[thick, ->] (-0.2,0,0) -- (Ex) node[anchor=west]{$x~\textrm{(east)}$};
\draw[thick, ->] (0,-0.2,0) -- (Ey) node[anchor=west]{$y~\textrm{(north)}$};
\draw[thick, ->] (0,0,0) -- (Ez) node[anchor=south]{$z~\textrm{(up)}$};
\tdplotsetcoord{P}{1}{45.57}{60};
\draw[thick, ->, color=red] (O) -- (P) node[anchor=south west]{sensor};
\draw[dashed, color=red] (O) -- (Pxy);
\draw[dashed, color=red] (P) -- (Pxy) node[midway,anchor=west]{$\textrm{umu} = 0.7$};
\tdplotdrawarc[color=red]{(O)}{0.3}{90}{60}{xshift=0.3cm,anchor=north west}{$\textrm{phi} = 30\degree$};
\end{tikzpicture}
\caption{Sensor coordinate specification (\codeidx{umu} and \codeidx{phi}). The red arrow is pointing into the sensor, thus a sensor with positive umu values is looking downwards.}
\label{fig_sensor_coordinates}
\end{figure}
Generally, the sensor orientation is specified using the \codeidx{umu} and \codeidx{phi} input options as shown in figure~\ref{fig_sensor_coordinates}.
The location of the sensor is given in the same $x$, $y$, $z$ coordinate system by use of the option \codeidx{mc\_sensorposition}.
The position of the sun is specified either using \codeidx{latitude}, \codeidx{lognitude} and \codeidx{time} or using \codeidx{sza} and \codeidx{phi0}.
In the latter case, the angles are (maybe depending on the mindset of the reader), defined slightly different as shown in figure~\ref{fig_sun_coordinates}.
\begin{figure}[h!]
\centering
\tdplotsetmaincoords{70}{20}
\begin{tikzpicture}[scale=5,tdplot_main_coords]
\coordinate (O) at (0,0,0);
\coordinate (Ex) at (1,0,0);
\coordinate (Ey) at (0,1,0);
\coordinate (Ez) at (0,0,1);
\draw[thick, ->] (-0.2,0,0) -- (Ex) node[anchor=west]{$x~\textrm{(east)}$};
\draw[thick, ->] (0,-0.2,0) -- (Ey) node[anchor=west]{$y~\textrm{(north)}$};
\draw[thick, ->] (0,0,0) -- (Ez) node[anchor=south]{$z~\textrm{(up)}$};
\tdplotsetcoord{P}{1}{30}{210};
\draw[thick, ->, color=orange] (O) -- (P) node[anchor=south east]{sun};
\draw[dashed, color=orange] (O) -- (Pxy);
\draw[dashed, color=orange] (P) -- (Pxy);
\tdplotsetthetaplanecoords{210};
\tdplotdrawarc[tdplot_rotated_coords, color=orange]{(O)}{0.5}{0}{30}{xshift=0.7cm,anchor=west}{$\textrm{sza} = 30\degree$}
\tdplotdrawarc[color=orange]{(O)}{0.15}{270}{210}{yshift=-0.2cm,anchor=north}{$\textrm{phi0} = 60\degree$};
\end{tikzpicture}
\caption{Sun coordinate specification (sza and phi0). The orange arrow is pointing into the sun.}
\label{fig_sun_coordinates}
\end{figure}
\begin{figure}[h!]
\centering
\tdplotsetmaincoords{70}{20}
\pgfmathsetmacro{\thetavec}{20}
\pgfmathsetmacro{\phivec}{320}
\begin{tikzpicture}[scale=5,tdplot_main_coords]
\coordinate (O) at (0,0,0);
\coordinate (Ex) at (1,0,0);
\coordinate (Ey) at (0,1,0);
\coordinate (Ez) at (0,0,1);
\draw[thick, ->] (-0.2,0,0) -- (Ex) node[anchor=west]{$x~\textrm{(east)}$};
\draw[thick, ->] (0,-0.7,0) -- (Ey) node[anchor=west]{$y~\textrm{(north)}$};
\draw[thick, ->] (0,0,-0.7) node[anchor=north]{nadir} -- (Ez) node[anchor=south]{$z~\textrm{(up, zenith)}$};
\tdplotsetcoord{P1}{0.6}{60}{160}
\tdplotsetcoord{P2}{0.6}{60}{100}
\tdplotsetcoord{P3}{0.6}{100}{100}
\tdplotsetcoord{P4}{0.6}{100}{160}
\tdplotsetcoord{P3a}{0.6}{90}{100}
\tdplotsetcoord{P3b}{0.7}{90}{100}
\draw[dashed,color=orange] (O) -- (P1) node[anchor=east]{$(0,N_y-1)$};
\draw[dashed,color=orange] (O) -- (P2) node[xshift=0.5cm,anchor=west]{$(N_x-1,N_y-1)$};
\draw[dashed,color=orange] (O) -- (P3) node[xshift=0.5cm,anchor=west]{$(N_x-1,0)$};
\draw[dashed,color=orange] (O) -- (P4) node[xshift=-1.2cm,anchor=north east]{$(0,0)$};
\draw[dashed,color=black] (P3a) -- (P3b);
\tdplotdrawarc[color=black,dotted]{(0,0,0)}{0.6}{160}{-90}{}{}
\tdplotdrawarc[color=orange]{(P1z)}{0.519}{100}{160}{}{}
\tdplotdrawarc[dashed,color=blue]{(P1z)}{0.519}{95}{100}{}{}
\tdplotdrawarc[color=orange]{(P3z)}{0.59088}{100}{160}{}{}
\tdplotdrawarc[color=black,dashed]{(0,0,0)}{0.6}{270}{160}{xshift=0.1cm,anchor=west}{$\phi_1 = 110\degree$}
\tdplotdrawarc[color=black,dashed]{(0,0,0)}{0.7}{270}{100}{anchor=east}{$\phi_2 = 170\degree$}
\tdplotsetthetaplanecoords{100}
\tdplotdrawarc[tdplot_rotated_coords,color=orange]{(0,0,0)}{0.6}{60}{100}{}{}
%\tdplotdrawarc[tdplot_rotated_coords,dotted,color=black]{(0,0,0)}{0.6}{0}{360}{}{}
%\tdplotdrawarc[tdplot_rotated_coords,dashed,color=black]{(0,0,0)}{0.6}{100}{90}{}{}
\tdplotsetthetaplanecoords{155}
\tdplotdrawarc[tdplot_rotated_coords,dashed,color=blue]{(0,0,0)}{0.6}{100}{180}{anchor=east}{$\theta_1 = 80\degree$}
\tdplotsetthetaplanecoords{95}
\tdplotdrawarc[tdplot_rotated_coords,dashed,color=blue]{(0,0,0)}{0.6}{60}{180}{anchor=west}{$\theta_2 = 120\degree$}
\tdplotsetthetaplanecoords{160}
\tdplotdrawarc[tdplot_rotated_coords,color=orange]{(0,0,0)}{0.6}{60}{100}{}{}
%\tdplotdrawarc[tdplot_rotated_coords,dotted,color=black]{(0,0,0)}{0.6}{0}{360}{}{}
%\tdplotdrawarc[tdplot_rotated_coords,dashed,color=black]{(0,0,0)}{0.6}{80}{90}{}{}
\end{tikzpicture}
\caption{Panorama coordinates for the option \codeidx{mc\_panorama\_alignment} \code{zenith} (default). The camera is located at the coordinate system origin. The image is recorded on a spherical shell inside the orange box, which is behind the origin. The boundaries of this box are given by the parameters $\phi_1$, $\phi_2$, $\theta_1$, $\theta_2$ to \codeidx{mc\_panorama\_view}. $\phi_1$ and $\phi_2$ are measured in the $x-y$-plane, beginning in the south. $\theta_1$ and $\theta_2$ measure the "latitude" where nadir is 0. The coordinates given in orange show the $(x, y)$ coordinates of the resulting image. The $(x, y)$ coordinates thus correspond to $(\theta, \phi)$ coordinates.}
\label{fig_panorama_coords_zenith}
\end{figure}
\begin{figure}[h!]
\centering
\tdplotsetmaincoords{70}{20}
\pgfmathsetmacro{\thetavec}{36.87}
\pgfmathsetmacro{\phivec}{-50}
\begin{tikzpicture}[scale=5,tdplot_main_coords]
\coordinate (O) at (0,0,0);
\coordinate (Ex) at (1,0,0);
\coordinate (Ey) at (0,1,0);
\coordinate (Ez) at (0,0,1);
\draw[thick, ->] (-0.1,0,0) -- (Ex) node[anchor=west]{$x~\textrm{(east)}$};
\draw[thick, ->] (0,-0.6,0) -- (Ey) node[anchor=west]{$y~\textrm{(north)}$};
\draw[thick, ->] (0,0,0) -- (Ez) node[anchor=south]{$z~\textrm{(up)}$};
\tdplotsetcoord{P}{0.6}{\thetavec}{\phivec}
\tdplotsetcoord{Q}{0.6}{53.13}{130}
\draw[dashed, color=blue] (P) -- (Pxy) node[near start,anchor=west]{$\textrm{umu} = -0.8$};
\draw[dashed, color=blue] (O) -- (Pxy);
\tdplotdrawarc[color=red]{(O)}{0.48}{270}{130}{xshift=0.3cm,anchor=south east}{$\textrm{phi} = 140\degree$};
\draw[dashed, color=red] (Q) -- (Qxy);
\tdplotsetrotatedcoords{\phivec}{\thetavec}{0}
\draw[tdplot_rotated_coords, thick, ->, color=magenta] (0,-0.1,0) -- (0,0.6,0) node[anchor=south east]{$x'$};
\draw[tdplot_rotated_coords, thick, ->, color=red] (0.4,0,0) -- (-0.6,0,0) node[anchor=north west]{$y'$};
\draw[tdplot_rotated_coords, thick, -, color=red] (0.6,0,0) -- (0.53,0,0);
\draw[tdplot_rotated_coords, thick, ->, color=blue] (0,0,-0.6) -- (0,0,0.6) node[anchor=south east]{$z'$};
\tdplotdrawarc[tdplot_rotated_coords,color=black,dotted]{(0,0,0)}{0.6}{0}{345}{}{};
\tdplotdrawarc[tdplot_rotated_coords,color=black,dashed]{(0,0,0)}{0.6}{0}{-15}{anchor=north west}{$\phi_1=15\degree$};
\tdplotdrawarc[tdplot_rotated_coords,color=black,dashed]{(0,0,0)}{0.8}{0}{-45}{anchor=north west}{$\phi_2=45\degree$};
\tdplotdrawarc[tdplot_rotated_coords,color=orange]{(0,0,0.1042)}{0.59088}{-15}{-45}{}{}
\tdplotdrawarc[tdplot_rotated_coords,color=orange]{(0,0,-0.1042)}{0.59088}{-15}{-45}{}{}
\tdplotsetrotatedthetaplanecoords{0}
\tdplotdrawarc[tdplot_rotated_coords,color=black,dotted]{(0,0,0)}{0.6}{0}{360}{}{}
\tdplotsetrotatedthetaplanecoords{-15} %note that this changes the rotated frame already
\tdplotdrawarc[tdplot_rotated_coords,color=orange]{(0,0,0)}{0.6}{80}{100}{}{}
\tdplotdrawarc[tdplot_rotated_coords,dashed,color=blue]{(0,0,0)}{0.6}{180}{100}{yshift=1cm,anchor=west}{$\theta_1=80\degree$}
\tdplotsetrotatedthetaplanecoords{-30}
\tdplotdrawarc[tdplot_rotated_coords,color=orange]{(0,0,0)}{0.6}{80}{100}{}{}
\tdplotsetrotatedthetaplanecoords{-5}
\tdplotdrawarc[tdplot_rotated_coords,dashed,color=blue]{(0,0,0)}{0.6}{180}{80}{yshift=0.5cm,anchor=east}{$\theta_2=100\degree$}
\end{tikzpicture}
\caption{Panorama corrdinates for \codeidx{mc\_panorama\_alignment} \code{mu}. In \emph{mu} aligned mode, the panorama is taken as in \emph{zenith}-mode, however the whole coordinate system is rotated using the parameters \codeidx{umu} and \codeidx{phi} yielding the new coordinates $x'$, $y'$, $z'$. If $\textrm{umu} = -1$ and $\textrm{phi} = 180\degree$, the coordinates are exactly the same as in \emph{zenith} mode. Be aware that if \codeidx{umu} is $1$ or $-1$, the value of \codeidx{phi} is not accepted and the system behaves as if $\textrm{phi} = 180\degree$. In this case, the equivalent rotation can be achieved by $\phi_1$ and $\phi_2$.}
\label{fig_panorama_coords_mu}
\end{figure}
If panoramic images are calculated, the sensor specification is extended by a definition for the field of view of the camera an the option \codeidx{mc\_panorama\_view}.
The behavior of this specification can be changed by the \codeidx{mc\_panorama\_alignment}.
In the default mode (\codeidx{mc\_panorama\_alignment} \code{zenith}), \code{umu} and \code{phi} are ineffective.
The view specificatiion behaves as described in figure~\ref{fig_panorama_coords_zenith}.
If \codeidx{mc\_panorama\_alignment} \code{mu} is activated, the user can basically rotate the camera.
The angles are defined as in figure~\ref{fig_panorama_coords_mu}.
\subsubsection{Three-dimensional clouds}
The options \codeidx{wc\_file 3D}
and \codeidx{ic\_file 3D} may be used to define 3D water and ice
cloud properties respectively. The model atmosphere may contain both 3D
(defined in a \code{wc\_file/ic\_file 3D}) and 1D clouds (defined in a
\code{wc\_file/ic\_file 1D}), but for obvious reasons not in the same layer. The format
of the files is explained in section~\ref{sec:uvspec_options}.
The conversion from microphysical to optical properties
is done identically for 1D and 3D clouds and is defined
by the options \codeidx{wc\_properties} and \codeidx{ic\_properties}. All
possible settings for \code{wc\_properties} and \code{ic\_properties}
have been implemented in MYSTIC (e.g., hu, mie, yang, HEY, baum ...).
Clouds can be defined eiter as ASCII or as netCDF files.
\paragraph{ASCII}
The following example shows the easiest possible 3D
cloud file which defines a checkerboard grid of clouds:
\begin{Verbatim}[fontsize=\footnotesize, frame=single, samepage=true]
2 2 5 3
1.0 1.0 0 1 2 3 4 5
1 1 2 0.1 10
2 2 2 0.1 10
\end{Verbatim}
For the layer between 1 and 2 km a 3D cloud is defined with liquid water
content 0.1 $g/m^3$ and effective radius 10 $\mu$m for horizontal
boxes (1,1) and (2,2). Boxes (1,2) and (2,1) are cloudless. The result are
cubic clouds with a volume of 1x1x1~km$^3$. The microphysical properties are
converted to optical properties according to \code{wc\_properties}.
\paragraph{netCDF}
If the provided cloud file is a netCDF instead of an ASCII file, this will be
detected automatically. The netCDF file must have the following structure:
\begin{Verbatim}[fontsize=\footnotesize, frame=single, samepage=true]
netcdf cloud {
dimensions:
ny = 2 ;
nx = 2 ;
nz = 1 ;
nz_lev = 2 ;
variables:
double lwc(ny, nx, nz) ;
double z(nz_lev) ;
double reff(ny, nx, nz) ;
// global attributes:
:cldproperties = 3 ;
:dx = 1. ;
:dy = 1. ;
}
\end{Verbatim}
The order of the dimensions is important, and the size of \code{nz\_lev} must be
exactly \code{nz + 1}. The \code{cldproperties} attribute is equal to the flag
in the ASCII format. The variable names change for different \code{cldproperties}
according to the following table:
\begin{itemize}
\item{1: \quad \code{ext g1 ssa}}
\item{2: \quad \code{ext reff}}
\item{3: \quad \code{lwc reff}}
\end{itemize}
\subsubsection{Two-dimensional surface albedo}
A 2D surface albedo may be defined using the option
\codeidx{mc\_albedo\_file}. The format of the file is explained in
section~\ref{sec:uvspec_options}.
\subsubsection{Topography}
A 2D elevation may be defined using the option
\codeidx{mc\_elevation\_file}. The ASCII format is explained in
section~\ref{sec:uvspec_options}. For the netcdf format please refer
to \file{README.MC}.
Caution:
\begin{itemize}
\item To avoid confusion, do not specify an \code{altitude} different
from 0 in the uvspec input file.
\item There may be problems if the 2D surface hits 0
(more testing required). Use 0.001 as lowest altitude.
\item The elevation file MUST BE PERIODIC
\end{itemize}
Between the grid points, the surface is interpolated
bilinearely:
\begin{equation}
z(x,y) = a + bx + cy + dxy
\end{equation}
where $z$ is the altitude and the coefficients $a$, $b$, $c$, and $d$
are determined from
the altitudes specified at the four corners of each pixel.
Slant surfaces are treated as they should be; e.g., photons
may be reflected downward at snow-covered mountain sides.
\subsubsection{Bidirectional reflectance distribution function}
MYSTIC allows different homogenous or 2D-inhomogeneous BRDFs. The
implementation of the different BRDFs is rather heterogeneous,
as were the requirements when each of them was first implemented.
Please note that BRDFs currently do not work together with topography. The
very simple reason for that is in structured terrain reflection
polar angles larger than 90 degree are possible for which the
existing parameterizations do not provide data. The following
parameterizations are included:
\begin{itemize}
\item Water BRDF by \citet{cox54a,cox54b}; currently only homogeneous; switched
on with \codeidx{brdf\_cam u10}, ... like in the 1D case
\item AMBRALS (Algorithm for Modeling[MODIS] Bidirectional Reflectance
Anisotropies of the Land Surface; \citet{wanner97}), see also
\url{http://i3rc.gsfc.nasa.gov/phase3/brasil/phase3_step1_brasil.htm}.
AMBRALS is three-parameter BRDF fit for vegetated and non-vegetated surfaces.
A \codeidx{mc\_ambrals\_file} may be specified which contains the
3 parameters for each surface pixel. The format is like
the format of 2D surface albedo file, except that each data line
contains (iso, vol, geo) instead of only one albedo value.
Obviously, wavelength-dependent AMBRALS BRDFs are not
possible at present.
\item RPV \citep{rahman93a}, a three parameter
fit for vegetated and non-vegetated surfaces. RPV is currently the
most flexible surface description in MYSTIC as it is currently
the only way to define a wavelength-dependent 2D BRDF.
Two different ways exist to define an RPV BRDF:
\begin{itemize}
\item 1D, using the options \codeidx{brdf\_rpv k} ... like in the 1D case
\item using \codeidx{mc\_rpv\_file} and \codeidx{mc\_rpv\_type}.
The first is a RPV
type for each surface pixel; type is simply a label like
"Grass" or "1" or whatever. For each of these types,
\code{mc\_rpv\_type} must have an entry which connects the type to
a file which contains wavelength-dependent RPVs. Sounds
complicated but is a very efficient way to define a
wavelength-dependent 2D RPV. Label ``CaM'' is reserved for Cox and Munk
and invokes the Cox and Munk ocean BRDF for the respective pixel. Thus
it is possible to combine land and ocean BRDFs.
\end{itemize}
\item For calculations including polarization the option
\codeidx{bpdf\_tsang\_u10} is available which calculates polarized
bidirectional reflection from water surfaces.
\end{itemize}
}
\subsubsection{MYSTIC output}
{\sl uvspec} will print its output (horizontally averaged
irradiance and actinic flux) usually to stdout. MYSTIC provides
several additional output
files. We have to distinguish two classes of output:
Monochromatic and spectral output where the latter can be recognized
by the extension ``.spc''. Monochromatic output files
\begin{itemize}
\item mc.flx - irradiance, actinic flux at levels
\item mc.rad - radiance
\item mc.abs - absorption/emission
\item mc.act - actinic flux, averaged over layers
\end{itemize}
are generated only for the case of a calculation where MYSTIC is called
only once. That is, a monochromatic calculation without subbands introduced
by \code{mol\_abs\_param}. They contain ``plain'' MYSTIC output, without
consideration of extraterrestrial irradiance, sun-earth-distance, spectral
integration, etc. As such they are mainly interesting for MYSTIC developers
or for users interested in artificial cases and photon statistics since
they are as close as possible to the photon statistics of MYSTIC: e.g.
the ``irradiance'' in these files is basically the number of photons arriving
at the detector divided by the number of photons traced. In addition to the
average, a standard deviation of the result can be calculated online which is
stored in ``.std''.
For most real-world applications the user will prefer the ``.spc'' files
\begin{itemize}
\item mc.flx.spc - spectral irradiance, actinic flux at levels
\item mc.rad.spc - spectral radiance at levels
\item ...
\end{itemize}
In contrast to the monochromatic files which are transmittances (E/E0, L/E0,
...) the data in ".spc" is "fully calibrated" output, as for all other
solvers. "fully calibrated" means multiplied with the extraterrestrial
irradiance, corrected for the Sun-Earth distance, integrated/summed
over wavelength, etc. Please be aware that such a calculation might require
a lot of memory because output is stored as a function of x, y, z, and
wavelength (and possibly polarization, if you switched on \code{mc\_polarisation}).
E.g. a comparatively harmless "mol\_abs\_param kato2" calculation with an sample grid of 100x100 pixels
at 10 altitudes would imply about 100x100x10x148 = 14,800,000 (Nx$\cdot$Ny$\cdot$Nz$\cdot$Nlambda) grid points. Depending
on the output chosen (irradiance, radiance, ...) up to six floating
point numbers need to be stored which amounts to 360 MBytes. Depending
on the post-processing in {\sl uvspec}, this memory may actually be used
twice which then would be 720 MBytes.
\begin{description}
\item[mc.flx / mcNN.flx]
The output file \code{mc.flx} contains the irradiance
at the surface defined by elevation file. Note that
this output is {\bf not} for $z=0$, but for the actual 2D surface:
\begin{Verbatim}[fontsize=\footnotesize, frame=single, samepage=true]
500 500 0 0.325889 0 0 0.441766 0
500 1500 0 0.191699 0 0 0.267122 0
500 2500 0 0.210872 0 0 0.420268 0
\end{Verbatim}
The columns are:
\begin{enumerate}
\item x [m] (pixel center)
\item y [m] (pixel center)
\item direct transmittance
\item diffuse downward transmittance
\item diffuse upward transmittance
\item direct actinic flux transmittance
\item diffuse downward actinic flux transmittance
\item diffuse upward actinic flux transmittance
\end{enumerate}
The transmittance is defined as irradiance devided by the extraterrestrial irradiance.
It is not corrected for Sun-Earth-Distance.
Note that even for an empty atmosphere, the transmittance
would not be 1 but cos(SZA), due to the slant incidence of the radiation.
The output files \code{mcNN.flx} contain the irradiances
at different model levels - one for each \code{zout}. \code{NN} is the
number of the output level counted from the bottom (ATTENTION: Levels are
counted from 0 here!). The file format is the same as in \code{mc.flx}.
(If interested in surface quantities, please use the irradiance data at the
surface from \code{mc.flx}, not from \code{mc00.flx}; the data from
\code{mc00.flx} or whatever layer coincides with the
surface may be wrong for technical reasons).
\item[mc.rad / mcNN.rad]
The output file \code{mc.rad} contains the radiance
at the surface defined by \code{elevation\_file}. Note that
this output is {\bf not} for $z=0$, but for the actual 2D surface:
\begin{Verbatim}[fontsize=\footnotesize, frame=single, samepage=true]
500 500 45 270 0.0239094 0 0.0623305 0.063324
500 1500 45 270 0.0239094 0 0.0602891 0.063156
\end{Verbatim}
The columns are:
\begin{enumerate}
\item x [m] (pixel center)
\item y [m] (pixel center)
\item viewing zenith angle [deg]
\item viewing azimuth angle [deg]
\item aperture solid angle [sterad]
\item direct radiance component
\item diffuse radiance component
\item "escape" radiance
\end{enumerate}
For almost all applications you may safely ignore the ``direct'' and
``diffuse'' radiance components and use only the escape radiance. If the
latter is 0 then you probably forgot to switch on \code{mc\_escape}.
The "escape" radiance is the radiance "measured" by an ideal
instrument with 0$^\circ$ opening angle. It is only calculated
when \codeidx{mc\_escape} is selected and it usually converges
much faster than the "cone sampled" radiance in column 7.
It is recommended to always use \code{mc\_escape} for radiance
calculations. For the ``direct'' and ``diffuse'' radiance, photons
falling into the aperture are counted. This might be an option for
instruments with a very large aperture only because otherwise the result
is noisy.
The output files \code{mcNN.rad} contain the radiances
at different model levels - one for each \code{zout}. \code{NN} is the
number of the output level counted from the bottom (ATTENTION: Levels are
counted from 0 here!). The file format is the same as in \code{mc.rad}.
(If interested in surface quantities, please use the radiance data
at the surface from \code{mc.rad}, not from \code{mc00.rad}; the data from
\code{mc00.rad} or whatever layer coincides with the
surface may be wrong for technical reasons).
\item[mcNN.abs]
The file \code{mcNN.abs} includes the absorption per unit area in the given layer.
NN is the number of the output layer on the atmospheric
grid (counted from the bottom, starting from 1). This file is generated if \code{mc\_forward\_ouput absorption}
or \code{mc\_forward\_output emission} is specified.
The columns are:
\begin{enumerate}
\item x [m] (pixel center)
\item y [m] (pixel center)
\item absorption/emission/heating rate (W/m$^2$)
\end{enumerate}
If multiplied by the extraterrestrial irradiance, the column
absorption in W/m$^2$ is obtained. In a 1D atmosphere, with a solar source,
absorption = e\_net(top) - e\_net(bottom) (this is not true for a thermal
source because then emission needs to be considered; see below). If
\code{mc\_forward\_output emission} is specified, the file contains the thermal emission of
the layer per unit area, that is, the Planck function times the optical
thickness of the layer times 4$\pi$ (angular integral of the Planck
radiance). If \code{mc\_forward\_output heating} is specified, the heating rate per unit area is
provided instead of absorption (in the same units as absorption).
For a solar source, the heating rate is identical to the absorption.
In the thermal, however, each emitted photon is counted as cooling
and hence the heating rate may be negative. In a 1D atmosphere, with a
solar or thermal source, absorption = e\_net(top) - e\_net(bottom).
For computational efficiency reasons \code{mcNN.abs} is not provided on the
sample grid but on the atmospheric grid. For the same reason, results are
only calculated for 3D layers. In order to obtain 3D absorption for 1D
cloudless layer, you need to specify an optically very thin 3D cloud, e.g.
LWC/IWC = 10$^{-20}$ g/m$^3$ (yes, this is a dirty trick but a necessary one).
\item[mcNN.act]
\code{mcNN.act} contains the 4$\pi$ actinic flux in the given layer,
calculated from the absorbed energy (per unit area) divided by the
absorption optical thickness of the layer. In contrast to the
actinic flux in \code{mcNN.flx}, this is a layer quantity the
accuracy of which is generally much better than the level quantities
which are calculated from radiance / cos (theta). As for the absorption
above, \code{mcNN.act} is not provided on the sample grid but on the
atmospheric grid. NN is the number of the output layer (counted from the
bottom, starting from 1). This file is generated if \codeidx{mc\_forward\_output actinic} is specified.
The columns are:
\begin{enumerate}
\item x [m]
\item y [m]
\item actinic flux (W/m$^2$)
\end{enumerate}
\end{description}
The spectral files are as follows:
\begin{description}
\item[mc.flx.spc]
\ifthreedmystic{This is the most versatile and useful irradiance /
actinic flux output of MYSTIC}:
\begin{Verbatim}[fontsize=\footnotesize, frame=single, samepage=true]
400.0 0 0 0 1.0e+00 0.0e+00 1.5067e-01 1.0e+00 0.0e+00 3.5044e-01
401.0 0 0 0 1.0e+00 0.0e+00 1.5044e-01 1.0e+00 0.0e+00 3.5863e-01
402.0 0 0 0 1.0e+00 0.0e+00 1.5022e-01 1.0e+00 0.0e+00 3.4755e-01
\end{Verbatim}
The columns are:
\begin{enumerate}
\item wavelength [nm]
\item ix (0 ... Nx-1)
\item iy (0 ... Ny-1)
\item iz (0 ... Nz-1)
\item direct irradiance
\item diffuse downward irradiance
\item diffuse upward irradiance
\item direct actinic flux
\item diffuse downward actinic flux
\item diffuse upward actinic flux
\end{enumerate}
These numbers are created the same way as the standard {\sl uvspec} output. That
is, they are multiplied with the extraterrestrial irradiance, corrected for
Sun-Earth-distance, integrated over wavelength, converted to reflectivity or
brightness temperature, etc.
\item[mc.rad.spc]
\ifthreedmystic{This is the most versatile and useful radiance output of
MYSTIC}:
\begin{Verbatim}[fontsize=\footnotesize, frame=single, samepage=true]
400.0 0 0 0 0.0398276
401.0 0 0 0 0.0396459
402.0 0 0 0 0.0398005
\end{Verbatim}
The columns are:
\begin{enumerate}
\item wavelength [nm]
\item ix (0 ... Nx-1)
\item iy (0 ... Ny-1)
\item iz (0 ... Nz-1)
\item radiance
\end{enumerate}
These numbers are created the same way as the standard {\sl uvspec}
output. That is, they are multiplied with the extraterrestrial irradiance,
corrected for Sun-Earth-distance, integrated over wavelength, converted to
reflectivity or brightness temperature, etc.
If the {\bf polarized} mystic is used (\code{mc\_polarisation}) then the four
components of the Stokes vector (I,Q,U,V) are output for each wavelength and
grid point, in four separate lines.
\ifthreedmystic{
\item[mc.abs.spc]
\code{mc.abs.spc} contains the absorption per unit area for all layers. This
file is generated if \code{mc\_forward\_output absorption} or \code{mc\_forward\_output emission} is specified.
The columns are:
\begin{enumerate}
\item wavelength [nm]
\item ix (0 ... Nx-1)
\item iy (0 ... Ny-1)
\item iz (0 ... Nz-1)
\item absorption/emission/heating rate
\end{enumerate}
These numbers are created the same way as the standard {\sl uvspec} output. That
is, they are multiplied with the extraterrestrial irradiance, corrected for
Sun-Earth-distance, integrated over wavelength, converted to reflectivity or
brightness temperature, etc. The unit is determined by an extra option to
\code{mc\_forward\_output absorption} or \code{mc\_forward\_output emission}.
For computational efficiency reasons \code{mc.abs.spc} is not provided on the
sample grid but on the atmospheric grid. For the same reason, results are
only calculated for 3D layers. In order to obtain 3D absorption for 1D
cloudless layer, you need to specify an optically very thin 3D cloud, e.g.
LWC/IWC = 10$^{-20}$ g/m$^3$ (yes, this is a dirty trick but a necessary one).
\item[mc.act.spc]
\code{mcNN.act} contains the 4$\pi$ actinic flux in the given layer,
calculated from the absorbed energy (per unit area) divided by the
absorption optical thickness of the layer. In contrast to the
actinic flux in \code{mc.act.flx}, this is a layer quantity the
accuracy of which is generally much better than the level quantities
which are calculated from radiance / cos (theta). As for the absorption
above, \code{mcNN.act} is not provided on the sample grid but on the
atmospheric grid. NN is the number of the output layer (counted from the
bottom, starting from 1). This file is generated if \codeidx{mc\_forward\_output actinic} is specified.
The columns are:
\begin{enumerate}
\item wavelength [nm]
\item ix (0 ... Nx-1)
\item iy (0 ... Ny-1)
\item iz (0 ... Nz-1)
\item actinic flux
\end{enumerate}
}
\end{description}
\ifthreedmystic{
\subsubsection{Memory requirements and computational speed}
\label{sec:mc_memory}
\begin{enumerate}
\item 3D clouds are defined on a 3D grid and the amount of
memory required by MYSTIC is usually determined by
the 3D cloud grid. Several variables need to be stored
for each grid cell, including the optical properties of
the cell, the grid cell absorption, etc. Umpteen (20-100)
bytes are typically required for each grid cell.
Computational time is to a large degree determined by
the total optical thickness of the cloud because higher
extinction means more scattering events and a longer
photon path. To a lesser degree, the computational time
is influenced by the number of cells but this may become
important if the radiance is calculated using
local estimates.
\item 2D albedo requires some memory but usually less than
clouds because it is defined on a 2D grid compared to the
3D cloud grid. Only one double (8 bytes on a 32 bit machine)
is stored per pixel. A high resolution has no influence
on computational time.
\item 2D elevation requires somewhat more memory than a 2D albedo
because 5 doubles are stored per pixel. Higher resolution
will lead to higher computational times.
\item The sample grid by itself has only little influence on
computational time because for a given number of photons,
the computational time does not depend on the number of
grid cells. The precision of the result, however depends
strongly on the number of grid cells Nx $\cdot$ Ny, as it
decreases with sqrt(Nx$\cdot$Ny). The number of altitude
levels has only little influence on the computational
time but of course a large influence on the memory
requirements. The calculation of radiances has a large
impact on computational time if local estimates are used.
% CE: In current version it is only possible to compute 1 direction at a time
%In the worst case, the computational time may
%scale directly with the number of directions for which the
%radiance is to be calculated.
\end{enumerate}
}
|
{"hexsha": "869fb1d585a88259570017388b076efbfb8ea77d", "size": 36543, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "ubuntu20/projects/libRadtran-2.0.4/doc/mystic.tex", "max_stars_repo_name": "AmberCrafter/docker-compose_libRadtran", "max_stars_repo_head_hexsha": "0182f991db6a13e0cacb3bf9f43809e6850593e4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ubuntu20/projects/libRadtran-2.0.4/doc/mystic.tex", "max_issues_repo_name": "AmberCrafter/docker-compose_libRadtran", "max_issues_repo_head_hexsha": "0182f991db6a13e0cacb3bf9f43809e6850593e4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ubuntu20/projects/libRadtran-2.0.4/doc/mystic.tex", "max_forks_repo_name": "AmberCrafter/docker-compose_libRadtran", "max_forks_repo_head_hexsha": "0182f991db6a13e0cacb3bf9f43809e6850593e4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.724, "max_line_length": 694, "alphanum_fraction": 0.7323974496, "num_tokens": 11209}
|
import sys
from sklearn.model_selection import LeaveOneGroupOut
import numpy as np
import anndata as ad
import torch
from torch import nn
import torch.nn.functional as F
import pandas as pd
import shutil
import pickle
from catalyst import dl, utils
import catalyst
import os
from sklearn.model_selection import LeaveOneGroupOut
## VIASH START
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]=""
dataset_path = "output/datasets/match_modality/openproblems_bmmc_cite_phase2_rna/openproblems_bmmc_cite_phase2_rna.censor_dataset.output_"
pretrain_path = "output/pretrain/match_modality/clue/openproblems_bmmc_cite_phase2_rna.clue_train.output_pretrain/"
par = {
'input_train_mod1': f'{dataset_path}train_mod1.h5ad',
'input_train_mod2': f'{dataset_path}train_mod2.h5ad',
'input_train_sol': f'{dataset_path}train_sol.h5ad',
'input_test_mod1': f'{dataset_path}test_mod1.h5ad',
'input_test_mod2': f'{dataset_path}test_mod2.h5ad',
'input_test_sol': f'{dataset_path}test_sol.h5ad',
'output_pretrain': pretrain_path
}
meta = {
'resources_dir': '.',
'functionality_name': '169594'
}
## VIASH END
sys.path.append(meta['resources_dir'])
from data import get_dataloaders, ModalityMatchingDataset
from models import Modality_CLIP, Encoder, symmetric_npair_loss
from catalyst_tools import scRNARunner, CustomMetric
from preprocessing import lsiTransformer
os.makedirs(par['output_pretrain'], exist_ok=True)
print("Start train")
input_train_mod1 = ad.read_h5ad(par['input_train_mod1'])
input_train_mod2 = ad.read_h5ad(par['input_train_mod2'])
sol_train = ad.read_h5ad(par['input_train_sol'])
mod1 = input_train_mod1.var['feature_types'][0]
mod2 = input_train_mod2.var['feature_types'][0]
input_train_mod2 = input_train_mod2[sol_train.to_df().values.argmax(1)]
if(mod1 == 'ADT' or mod2 == 'ADT'):
import config_ADT2GEX as config
logo = LeaveOneGroupOut()
groups = sol_train.obs.batch
logo.get_n_splits(input_train_mod2, groups=groups)
for fold_number, (train_indexes, test_indexes) in enumerate(logo.split(input_train_mod2, groups=groups)):
trial_dump_folder = os.path.join(par['output_pretrain'], str(fold_number))
os.makedirs(trial_dump_folder, exist_ok=True)
lsi_transformer_gex = lsiTransformer(n_components=config.N_LSI_COMPONENTS_GEX, drop_first=True)
if(mod1 == 'ADT'):
gex_train = lsi_transformer_gex.fit_transform(input_train_mod2[train_indexes])
gex_test = lsi_transformer_gex.transform(input_train_mod2[test_indexes])
adt_train = input_train_mod1[train_indexes].to_df()
adt_test = input_train_mod1[test_indexes].to_df()
dataset_train = ModalityMatchingDataset(adt_train, gex_train)
dataset_test = ModalityMatchingDataset(adt_test, gex_test)
dataloader_train = torch.utils.data.DataLoader(dataset_train, config.BATCH_SIZE, shuffle = True)
dataloader_test = torch.utils.data.DataLoader(dataset_test, 2048, shuffle = False)
model = Modality_CLIP(
Encoder=Encoder,
layers_dims = (
config.LAYERS_DIM_FIRST,
config.LAYERS_DIM_GEX
),
dropout_rates = (
config.DROPOUT_RATES_FIRST,
config.DROPOUT_RATES_GEX
),
dim_mod1 = 134 if mod1 == 'ADT' else config.N_LSI_COMPONENTS_FIRST,
dim_mod2 = config.N_LSI_COMPONENTS_GEX,
output_dim = config.EMBEDDING_DIM,
T = config.LOG_T,
swap_rate_1 = 0.,
swap_rate_2 = 0.)
optimizer = torch.optim.Adam(model.parameters(), config.LR, weight_decay=config.weight_decay)
loaders = {
"train": dataloader_train,
"valid": dataloader_test,
}
runner = scRNARunner()
runner.train(
model=model,
optimizer=optimizer,
loaders=loaders,
num_epochs=config.N_EPOCHS,
callbacks=[
dl.OptimizerCallback(metric_key='loss'),
dl.CheckpointCallback(
logdir = trial_dump_folder,
loader_key='valid',
metric_key='avg_acc',
minimize=False,
use_runner_logdir=False,
save_n_best=1
),
dl.EarlyStoppingCallback(
patience=150,
loader_key='valid',
metric_key='avg_acc',
minimize=False,
min_delta=1e-5),
dl.LoaderMetricCallback(
metric=CustomMetric(),
input_key=['embeddings_first', 'embeddings_second', 'temperature'],
target_key=['embeddings_second']
),
],
verbose=True
)
with open(trial_dump_folder + '/lsi_transformer.pickle', 'wb') as f:
pickle.dump(lsi_transformer_gex, f)
else:
import config_ATAC2GEX as config
test_indexes = sol_train.obs.batch == 's1d1'
train_indexes = sol_train.obs.batch != 's1d1'
lsi_transformer_atac = lsiTransformer(n_components=config.N_LSI_COMPONENTS_FIRST, drop_first=True)
lsi_transformer_gex = lsiTransformer(n_components=config.N_LSI_COMPONENTS_GEX, drop_first=True)
if(mod1 == 'ATAC'):
atac_train = lsi_transformer_atac.fit_transform(input_train_mod1[train_indexes])
atac_test = lsi_transformer_atac.transform(input_train_mod1[test_indexes])
gex_train = lsi_transformer_gex.fit_transform(input_train_mod2[train_indexes])
gex_test = lsi_transformer_gex.transform(input_train_mod2[test_indexes])
dataset_train = ModalityMatchingDataset(atac_train, gex_train)
dataset_test = ModalityMatchingDataset(atac_test, gex_test)
dataloader_train = torch.utils.data.DataLoader(dataset_train, config.BATCH_SIZE, shuffle = True)
dataloader_test = torch.utils.data.DataLoader(dataset_test, 2048, shuffle = False)
model = Modality_CLIP(
Encoder=Encoder,
layers_dims = (
config.LAYERS_DIM_FIRST,
config.LAYERS_DIM_GEX
),
dropout_rates = (
config.DROPOUT_RATES_FIRST,
config.DROPOUT_RATES_GEX
),
dim_mod1 = 134 if mod1 == 'ADT' else config.N_LSI_COMPONENTS_FIRST,
dim_mod2 = config.N_LSI_COMPONENTS_GEX,
output_dim = config.EMBEDDING_DIM,
T = config.LOG_T,
swap_rate_1 = 0.,
swap_rate_2 = 0.)
optimizer = torch.optim.Adam(model.parameters(), config.LR, weight_decay=config.weight_decay)
loaders = {
"train": dataloader_train,
"valid": dataloader_test,
}
runner = scRNARunner()
runner.train(
model=model,
optimizer=optimizer,
loaders=loaders,
num_epochs=config.N_EPOCHS,
callbacks=[
dl.OptimizerCallback(metric_key='loss'),
dl.CheckpointCallback(
logdir = par['output_pretrain'],
loader_key='valid',
metric_key='avg_acc',
minimize=False,
use_runner_logdir=False,
save_n_best=1
),
dl.EarlyStoppingCallback(
patience=150,
loader_key='valid',
metric_key='avg_acc',
minimize=False,
min_delta=1e-5),
dl.LoaderMetricCallback(
metric=CustomMetric(),
input_key=['embeddings_first', 'embeddings_second', 'temperature'],
target_key=['embeddings_second']
),
],
verbose=True
)
with open(par['output_pretrain'] + '/lsi_GEX_transformer.pickle', 'wb') as f:
pickle.dump(lsi_transformer_gex, f)
with open(par['output_pretrain'] + '/lsi_ATAC_transformer.pickle', 'wb') as f:
pickle.dump(lsi_transformer_atac, f)
|
{"hexsha": "aab9a491d4bdee004aa26fa6f9c0c40a9db33ef9", "size": 8404, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/match_modality/methods/novel/train/script.py", "max_stars_repo_name": "itscassie/neurips2021_multimodal_topmethods", "max_stars_repo_head_hexsha": "9f8519cdf39bea1e70413283c7c004dff4d30482", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 28, "max_stars_repo_stars_event_min_datetime": "2021-12-07T00:19:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T15:08:25.000Z", "max_issues_repo_path": "src/match_modality/methods/novel/train/script.py", "max_issues_repo_name": "itscassie/neurips2021_multimodal_topmethods", "max_issues_repo_head_hexsha": "9f8519cdf39bea1e70413283c7c004dff4d30482", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2021-12-06T15:11:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T05:50:40.000Z", "max_forks_repo_path": "src/match_modality/methods/novel/train/script.py", "max_forks_repo_name": "itscassie/neurips2021_multimodal_topmethods", "max_forks_repo_head_hexsha": "9f8519cdf39bea1e70413283c7c004dff4d30482", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2021-12-06T14:42:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-15T15:12:45.000Z", "avg_line_length": 37.6860986547, "max_line_length": 138, "alphanum_fraction": 0.6200618753, "include": true, "reason": "import numpy", "num_tokens": 1909}
|
"""
Implementation of variogram-matching procedure.
"""
# Author: Joshua Burt <joshua.burt@yale.edu>
# License: BSD 3 clause
import numpy as np
import numpy.lib.format
from pathlib import Path
from sklearn.linear_model import LinearRegression
# ----------------------
# ------ Checks --------
# ----------------------
def is_string_like(obj):
""" Check whether `obj` behaves like a string. """
try:
obj + ''
except (TypeError, ValueError):
return False
return True
def check_map(x):
"""
Check that brain map is array_like and one dimensional.
Parameters
----------
x : 1D ndarray
Brain map
Returns
-------
None
Raises
------
TypeError : `x` is not a ndarray object
ValueError : `x` is not one-dimensional
"""
if not isinstance(x, np.ndarray):
e = "Brain map must be array-like\n"
e += "got type {}".format(type(x))
raise TypeError(e)
if x.ndim != 1:
e = "Brain map must be one-dimensional\n"
e += "got shape {}".format(x.shape)
raise ValueError(e)
def check_pv(pv):
"""
Check input argument `pv`.
Parameters
----------
pv : int
Percentile of the pairwise distance distribution at which to truncate
during variogram fitting.
Returns
-------
int
Raises
------
ValueError : `pv` lies outside range (0, 100]
"""
try:
pv = int(pv)
except ValueError:
raise ValueError("parameter 'pv' must be an integer in (0,100]")
if pv <= 0 or pv > 100:
raise ValueError("parameter 'pv' must be in (0,100]")
return pv
def check_deltas(deltas):
"""
Check input argument `deltas`.
Parameters
----------
deltas : 1D ndarray or List[float]
Proportions of neighbors to include for smoothing, in (0, 1]
Returns
-------
None
Raises
------
TypeError : `deltas` is not a List or ndarray object
ValueError : One or more elements of `deltas` lies outside (0,1]
"""
if not isinstance(deltas, list) and not isinstance(deltas, np.ndarray):
raise TypeError("Parameter `deltas` must be a list or ndarray")
for d in deltas:
if d <= 0 or d > 1:
raise ValueError("Each element of `deltas` must lie in (0,1]")
def count_lines(filename):
"""
Count number of lines in a file.
Parameters
----------
filename : filename
Returns
-------
int
number of lines in file
"""
with open(filename, 'rb') as f:
lines = 0
buf_size = 1024 * 1024
read_f = f.raw.read
buf = read_f(buf_size)
while buf:
lines += buf.count(b'\n')
buf = read_f(buf_size)
return lines
# ----------------------
# ------ Data I/O ------
# ----------------------
def dataio(x):
"""
Data I/O for core classes.
To facilitate flexible user inputs, this function loads data from:
- txt files
- npy files (memory-mapped arrays)
- array_like data
Parameters
----------
x : filename or ndarray or np.memmap
Returns
-------
ndarray or np.memmap
Raises
------
FileExistsError : file does not exist
RuntimeError : file is empty
ValueError : file type cannot be determined by file extension
TypeError : input is not a filename or array_like object
"""
if is_string_like(x):
if not Path(x).exists():
raise FileExistsError("file does not exist: {}".format(x))
elif Path(x).stat().st_size == 0:
raise RuntimeError("file is empty: {}".format(x))
elif Path(x).suffix == ".npy": # memmap
return np.load(x, mmap_mode='r')
elif Path(x).suffix == ".txt": # text file
return np.loadtxt(x).squeeze()
else:
raise ValueError(
"expected npy or txt file, got {}".format(Path(x).suffix))
else:
if not isinstance(x, np.ndarray):
raise TypeError(
"expected filename or array_like obj, got {}".format(type(x)))
return x
def txt2memmap(dist_file, output_dir, maskfile=None, delimiter=' '):
"""
Export distance matrix to memory-mapped array.
Parameters
----------
dist_file : filename
Path to `delimiter`-separated distance matrix file
output_dir : filename
Path to directory in which output files will be written
maskfile : filename or ndarray or None, default None
Path to a neuroimaging/txt file containing a mask, or a mask
represented as a numpy array. Mask scalars are cast to boolean, and
all elements not equal to zero will be masked.
delimiter : str
Delimiting character in `dist_file`
Returns
-------
dict
Keys are 'D' and 'index'; values are absolute paths to the
corresponding binary files on disk.
Notes
-----
Each row of the distance matrix is sorted before writing to file. Thus, a
second mem-mapped array is necessary, the i-th row of which contains
argsort(d[i]).
If `maskfile` is not None, a binary mask.txt file will also be written to
the output directory.
Raises
------
IOError : `output_dir` doesn't exist
ValueError : Mask image and distance matrix have inconsistent sizes
"""
op = Path(output_dir)
nlines = count_lines(dist_file)
if not op.exists():
raise IOError("Output directory does not exist: {}".format(output_dir))
# Load mask if one was provided
if maskfile is not None:
mask = dataio(maskfile).astype(bool)
if mask.size != nlines:
e = "Incompatible input sizes\n"
e += "{} rows in {}\n".format(nlines, dist_file)
e += "{} elements in {}".format(mask.size, maskfile)
raise ValueError(e)
mask_fileout = str(op.joinpath("mask.txt"))
np.savetxt( # Write to text file
fname=mask_fileout, X=mask.astype(int), fmt="%i", delimiter=',')
nv = int((~mask).sum()) # number of non-masked elements
idx = np.arange(nlines)[~mask] # indices of non-masked elements
else:
nv = nlines
idx = np.arange(nlines)
# Build memory-mapped arrays
with open(dist_file, 'r') as fp:
npydfile = str(op.joinpath("distmat.npy"))
npyifile = str(op.joinpath("index.npy"))
fpd = numpy.lib.format.open_memmap(
npydfile, mode='w+', dtype=np.float32, shape=(nv, nv))
fpi = numpy.lib.format.open_memmap(
npyifile, mode='w+', dtype=np.int32, shape=(nv, nv))
ifp = 0 # Build memory-mapped arrays one row of distances at a time
for il, l in enumerate(fp): # Loop over lines of file
if il not in idx: # Keep only CIFTI vertices
continue
else:
line = l.rstrip()
if line:
data = np.array(line.split(delimiter), dtype=np.float32)
if data.size != nlines:
raise RuntimeError(
"Distance matrix is not square: {}".format(
dist_file))
d = data[idx]
sort_idx = np.argsort(d)
fpd[ifp, :] = d[sort_idx] # sorted row of distances
fpi[ifp, :] = sort_idx # sort indexes
ifp += 1
del fpd # Flush memory changes to disk
del fpi
return {'distmat': npydfile, 'index': npyifile} # Return filenames
# ----------------------
# - Smoothing kernels --
# ----------------------
def gaussian(d):
"""
Gaussian kernel which truncates at one standard deviation.
Parameters
----------
d : ndarray, shape (N,) or (M,N)
one- or two-dimensional array of distances
Returns
-------
ndarray, shape (N,) or (M,N)
Gaussian kernel weights
Raises
------
TypeError : `d` is not array_like
"""
try: # 2-dim
return np.exp(-1.25 * np.square(d / d.max(axis=-1)[:, np.newaxis]))
except IndexError: # 1-dim
return np.exp(-1.25 * np.square(d/d.max()))
except AttributeError:
raise TypeError("expected array_like, got {}".format(type(d)))
def exp(d):
"""
Exponentially decaying kernel which truncates at e^{-1}.
Parameters
----------
d : ndarray, shape (N,) or (M,N)
one- or two-dimensional array of distances
Returns
-------
ndarray, shape (N,) or (M,N)
Exponential kernel weights
Notes
-----
Characteristic length scale is set to d.max(axis=-1), i.e. the maximum
distance within each row.
Raises
------
TypeError : `d` is not array_like
"""
try: # 2-dim
return np.exp(-d / d.max(axis=-1)[:, np.newaxis])
except IndexError: # 1-dim
return np.exp(-d/d.max())
except AttributeError:
raise TypeError("expected array_like, got {}".format(type(d)))
def invdist(d):
"""
Inverse distance kernel.
Parameters
----------
d : ndarray, shape (N,) or (M,N)
One- or two-dimensional array of distances
Returns
-------
ndarray, shape (N,) or (M,N)
Inverse distance, i.e. d^{-1}
Raises
------
ZeroDivisionError : `d` includes zero value
TypeError : `d` is not array_like
"""
try:
return 1. / d
except ZeroDivisionError as e:
raise ZeroDivisionError(e)
except AttributeError:
raise TypeError("expected array_like, got {}".format(type(d)))
def uniform(d):
"""
Uniform (i.e., distance independent) kernel.
Parameters
----------
d : ndarray, shape (N,) or (M,N)
One- or two-dimensional array of distances
Returns
-------
ndarray, shape (N,) or (M,N)
Uniform kernel weights
Notes
-----
Each element is normalized to 1/N such that columns sum to unity.
Raises
------
TypeError : `d` is not array_like
"""
try: # 2-dim
return np.ones(d.shape) / d.shape[-1]
except IndexError: # 1-dim
return np.ones(d.size) / d.size
except AttributeError:
raise TypeError("expected array_like, got {}".format(type(d)))
def check_kernel(kernel):
"""
Check that a valid kernel was specified and return callable.
Parameters
----------
kernel : 'exp' or 'gaussian' or 'invdist' or 'uniform'
Kernel selection
Returns
-------
Callable
Raises
------
NotImplementedError : kernel is not implemented
"""
kernels = {'exp': exp,
'gaussian': gaussian,
'invdist': invdist,
'uniform': uniform}
if kernel not in kernels.keys():
e = "'{}' is not a valid kernel\n".format(kernel)
e += "Valid kernels: {}".format(", ".join([k for k in kernels.keys()]))
raise NotImplementedError(e)
return kernels[kernel]
# ----------------------
# ---- Core classes ----
# ----------------------
class Base:
""" Base implementation of map generator.
Parameters
----------
x : filename or 1D ndarray
Target brain map
D : filename or ndarray, shape (N,N)
Pairwise distance matrix
deltas : 1D ndarray or List[float], default [0.1,0.2,...,0.9]
Proportion of neighbors to include for smoothing, in (0, 1]
kernel : str, default 'exp'
Kernel with which to smooth permuted maps:
'gaussian' : Gaussian function.
'exp' : Exponential decay function.
'invdist' : Inverse distance.
'uniform' : Uniform weights (distance independent).
pv : int, default 25
Percentile of the pairwise distance distribution at which to
truncate during variogram fitting
nh : int, default 25
Number of uniformly spaced distances at which to compute variogram
resample : bool, default False
Resample surrogate maps' values from target brain map
b : float or None, default None
Gaussian kernel bandwidth for variogram smoothing. If None, set to
three times the spacing between variogram x-coordinates.
Notes
-----
Passing resample=True preserves the distribution of values in the target
map, with the possibility of worsening the simulated surrogate maps'
variograms fits.
"""
def __init__(self, x, D, deltas=np.linspace(0.1, 0.9, 9),
kernel='exp', pv=25, nh=25, resample=False, b=None):
self.x = x
self.D = D
n = self._x.size
self.resample = resample
self.nh = nh
self.deltas = deltas
self.pv = pv
self.nmap = n
self.kernel = kernel # Smoothing kernel selection
self._ikn = np.arange(n)[:, None]
self._triu = np.triu_indices(self._nmap, k=1) # upper triangular inds
self._u = self._D[self._triu] # variogram X-coordinate
self._v = self.compute_variogram(self._x) # variogram Y-coord
# Get indices of pairs with u < pv'th percentile
self._uidx = np.where(self._u < np.percentile(self._u, self._pv))[0]
self._uisort = np.argsort(self._u[self._uidx])
# Find sorted indices of first `kmax` elements of each row of dist. mat.
self._disort = np.argsort(self._D, axis=-1)
self._jkn = dict.fromkeys(deltas)
self._dkn = dict.fromkeys(deltas)
for delta in deltas:
k = int(delta*n)
# find index of k nearest neighbors for each area
self._jkn[delta] = self._disort[:, 1:k+1] # prevent self-coupling
# find distance to k nearest neighbors for each area
self._dkn[delta] = self._D[(self._ikn, self._jkn[delta])]
# Smoothed variogram and variogram _b
utrunc = self._u[self._uidx]
self._h = np.linspace(utrunc.min(), utrunc.max(), self._nh)
self.b = b
self._smvar = self.smooth_variogram(self._v)
# Linear regression model
self._lm = LinearRegression(fit_intercept=True)
def __call__(self, n=1):
"""
Randomly generate new surrogate map(s).
Parameters
----------
n : int, default 1
Number of surrogate maps to randomly generate
Returns
-------
ndarray, shape (n,N)
Randomly generated map(s) with matched spatial autocorrelation
Notes
-----
Chooses a level of smoothing that produces a smoothed variogram which
best approximates the true smoothed variogram. Selecting resample='True'
preserves the original map's value distribution at the expense of
worsening the surrogate maps' variogram fit.
"""
print("Generating {} maps...".format(n))
surrs = np.empty((n, self._nmap))
for i in range(n): # generate random maps
xperm = self.permute_map() # Randomly permute values
res = dict.fromkeys(self._deltas)
for delta in self.deltas: # foreach neighborhood size
# Smooth the permuted map using delta proportion of
# neighbors to reintroduce spatial autocorrelation
sm_xperm = self.smooth_map(x=xperm, delta=delta)
# Calculate empirical variogram of the smoothed permuted map
vperm = self.compute_variogram(sm_xperm)
# Calculate smoothed variogram of the smoothed permuted map
smvar_perm = self.smooth_variogram(vperm)
# Fit linear regression btwn smoothed variograms
res[delta] = self.regress(smvar_perm, self._smvar)
alphas, betas, residuals = np.array(
[res[d] for d in self._deltas]).T
# Select best-fit model and regression parameters
iopt = np.argmin(residuals)
dopt = self._deltas[iopt]
aopt = alphas[iopt]
bopt = betas[iopt]
# Transform and smooth permuted map using best-fit parameters
sm_xperm_best = self.smooth_map(x=xperm, delta=dopt)
surr = (np.sqrt(np.abs(bopt)) * sm_xperm_best +
np.sqrt(np.abs(aopt)) * np.random.randn(self._nmap))
surrs[i] = surr
if self._resample: # resample values from empirical map
sorted_map = np.sort(self._x)
for i, surr in enumerate(surrs):
ii = np.argsort(surr)
np.put(surr, ii, sorted_map)
return surrs.squeeze()
def compute_variogram(self, x):
"""
Compute variogram values (i.e., one-half squared pairwise differences).
Parameters
----------
x : 1D ndarray
Brain map scalar array
Returns
-------
v : ndarray, shape (N(N-1)/2,)
Variogram y-coordinates, i.e. 0.5 * (x_i - x_j) ^ 2
"""
diff_ij = np.subtract.outer(x, x)
v = 0.5 * np.square(diff_ij)[self._triu]
return v
def permute_map(self):
"""
Return randomly permuted brain map.
Returns
-------
1D ndarray
Random permutation of target brain map
"""
perm_idx = np.random.permutation(np.arange(self._x.size))
mask_perm = self._x.mask[perm_idx]
x_perm = self._x.data[perm_idx]
return np.ma.masked_array(data=x_perm, mask=mask_perm)
def smooth_map(self, x, delta):
"""
Smooth `x` using `delta` proportion of nearest neighbors.
Parameters
----------
x : 1D ndarray
Brain map scalars
delta : float
Proportion of neighbors to include for smoothing, in (0, 1)
Returns
-------
1D ndarray
Smoothed brain map
"""
# Values of k nearest neighbors for each brain area
xkn = x[self._jkn[delta]]
weights = self._kernel(self._dkn[delta]) # Distance-weight kernel
# Kernel-weighted sum
return (weights * xkn).sum(axis=1) / weights.sum(axis=1)
def smooth_variogram(self, v, return_h=False):
"""
Smooth a variogram.
Parameters
----------
v : 1D ndarray
Variogram values, i.e. 0.5 * (x_i - x_j) ^ 2
return_h : bool, default False
Return distances at which the smoothed variogram was computed
Returns
-------
1D ndarray, shape (nh,)
Smoothed variogram values
1D ndarray, shape (nh,)
Distances at which smoothed variogram was computed (returned only if
`return_h` is True)
Raises
------
ValueError : `v` has unexpected size.
"""
u = self._u[self._uidx]
v = v[self._uidx]
if len(u) != len(v):
raise ValueError(
"argument v: expected size {}, got {}".format(len(u), len(v)))
# Subtract each h from each pairwise distance u
# Each row corresponds to a unique h
du = np.abs(u - self._h[:, None])
w = np.exp(-np.square(2.68 * du / self._b) / 2)
denom = w.sum(axis=1)
wv = w * v[None, :]
num = wv.sum(axis=1)
output = num / denom
if not return_h:
return output
return output, self._h
def regress(self, x, y):
"""
Linearly regress `x` onto `y`.
Parameters
----------
x : 1D ndarray
Independent variable
y : 1D ndarray
Dependent variable
Returns
-------
alpha : float
Intercept term (offset parameter)
beta : float
Regression coefficient (scale parameter)
res : float
Sum of squared residuals
"""
self._lm.fit(X=np.expand_dims(x, -1), y=y)
beta = self._lm.coef_
alpha = self._lm.intercept_
y_pred = self._lm.predict(X=np.expand_dims(x, -1))
res = np.sum(np.square(y-y_pred))
return alpha, beta, res
@property
def x(self):
""" 1D ndarray : brain map scalar array """
return self._x
@x.setter
def x(self, x):
x_ = dataio(x)
check_map(x=x_)
brain_map = np.ma.masked_array(data=x_, mask=np.isnan(x_))
self._x = brain_map
@property
def D(self):
""" ndarray, shape (N,N) : Pairwise distance matrix """
return self._D
@D.setter
def D(self, x):
d_ = dataio(x)
if not np.allclose(d_, d_.T):
raise ValueError("Distance matrix must be symmetric")
n = self._x.size
if d_.shape != (n, n):
e = "Distance matrix must have dimensions consistent with brain map"
e += "\nDistance matrix shape: {}".format(d_.shape)
e += "\nBrain map size: {}".format(n)
raise ValueError(e)
self._D = d_
@property
def nmap(self):
""" int : length of brain map """
return self._nmap
@nmap.setter
def nmap(self, x):
self._nmap = int(x)
@property
def pv(self):
""" int : percentile of pairwise distances at which to truncate """
return self._pv
@pv.setter
def pv(self, x):
pv = check_pv(x)
self._pv = pv
@property
def deltas(self):
""" 1D ndarray or List[float] : proportions of nearest neighbors """
return self._deltas
@deltas.setter
def deltas(self, x):
check_deltas(deltas=x)
self._deltas = x
@property
def nh(self):
""" int : number of variogram distance intervals """
return self._nh
@nh.setter
def nh(self, x):
self._nh = x
@property
def h(self):
""" 1D ndarray : distances at which smoothed variogram is computed """
return self._h
@property
def kernel(self):
""" Callable : smoothing kernel function """
return self._kernel
@kernel.setter
def kernel(self, x):
kernel_callable = check_kernel(x)
self._kernel = kernel_callable
@property
def resample(self):
""" bool : whether to resample surrogate maps from target map """
return self._resample
@resample.setter
def resample(self, x):
if not isinstance(x, bool):
e = "parameter `resample`: expected bool, got {}".format(type(x))
raise TypeError(e)
self._resample = x
@property
def b(self):
""" numeric : Gaussian kernel bandwidth """
return self._b
@b.setter
def b(self, x):
if x is not None:
try:
self._b = float(x)
except (ValueError, TypeError):
e = "bandwidth b: expected numeric, got {}".format(type(x))
raise ValueError(e)
else: # set bandwidth equal to 3x bin spacing
self._b = 3.*np.mean(self._h[1:] - self._h[:-1])
class Sampled:
"""
Sampling implementation of map generator.
Parameters
----------
x : 1D ndarray
Target brain map
D : ndarray or memmap, shape (N,N)
Pairwise distance matrix between elements of `x`. Each row of `D` should
be sorted. Indices used to sort each row are passed to the `index`
argument. See :func:`brainsmash.mapgen.memmap.txt2memmap` or the online
documentation for more details (brainsmash.readthedocs.io)
index : filename or ndarray or memmap, shape(N,N)
See above
ns : int, default 500
Take a subsample of `ns` rows from `D` when fitting variograms
deltas : ndarray or List[float], default [0.3, 0.5, 0.7, 0.9]
Proportions of neighbors to include for smoothing, in (0, 1]
kernel : str, default 'exp'
Kernel with which to smooth permuted maps
- 'gaussian' : gaussian function
- 'exp' : exponential decay function
- 'invdist' : inverse distance
- 'uniform' : uniform weights (distance independent)
pv : int, default 70
Percentile of the pairwise distance distribution (in `D`) at
which to truncate during variogram fitting
nh : int, default 25
Number of uniformly spaced distances at which to compute variogram
knn : int, default 1000
Number of nearest regions to keep in the neighborhood of each region
b : float or None, default None
Gaussian kernel bandwidth for variogram smoothing. if None,
three times the distance interval spacing is used.
resample : bool, default False
Resample surrogate map values from the target brain map
verbose : bool, default False
Print surrogate count each time new surrogate map created
Notes
-----
Passing resample=True will preserve the distribution of values in the
target map, at the expense of worsening simulated surrogate maps'
variograms fits. This worsening will increase as the empirical map
more strongly deviates from normality.
Raises
------
ValueError : `x` and `D` have inconsistent sizes
"""
def __init__(self, x, D, index, ns=500, pv=70, nh=25, knn=1000, b=None,
deltas=np.arange(0.3, 1., 0.2), kernel='exp', resample=False,
verbose=False):
self._verbose = verbose
self.x = x
n = self._x.size
self.nmap = int(n)
self.knn = knn
self.D = D
self.index = index
self.resample = resample
self.nh = int(nh)
self.deltas = deltas
self.ns = int(ns)
self.b = b
self.pv = pv
self._ikn = np.arange(self._nmap)[:, None]
# Store k nearest neighbors from distance and index matrices
self.kernel = kernel # Smoothing kernel selection
self._dmax = np.percentile(self._D, self._pv)
self.h = np.linspace(self._D.min(), self._dmax, self._nh)
if not self._b:
self.b = 3 * (self.h[1] - self.h[0])
# Linear regression model
self._lm = LinearRegression(fit_intercept=True)
def __call__(self, n=1):
"""
Randomly generate new surrogate map(s).
Parameters
----------
n : int, default 1
Number of surrogate maps to randomly generate
Returns
-------
ndarray, shape (n,N)
Randomly generated map(s) with matched spatial autocorrelation
Notes
-----
Chooses a level of smoothing that produces a smoothed variogram which
best approximates the true smoothed variogram. Selecting resample='True'
preserves the map value distribution at the expense of worsening the
surrogate maps' variogram fits.
"""
if self._verbose:
print("Generating {} maps...".format(n))
surrs = np.empty((n, self._nmap))
for i in range(n): # generate random maps
if self._verbose:
print(i+1)
# Randomly permute map
x_perm = self.permute_map()
# Randomly select subset of regions to use for variograms
idx = self.sample()
# Compute empirical variogram
v = self.compute_variogram(self._x, idx)
# Variogram ordinates; use nearest neighbors because local effect
u = self._D[idx, :]
uidx = np.where(u < self._dmax)
# Smooth empirical variogram
smvar, u0 = self.smooth_variogram(u[uidx], v[uidx], return_h=True)
res = dict.fromkeys(self._deltas)
for d in self._deltas: # foreach neighborhood size
k = int(d * self._knn)
# Smooth the permuted map using k nearest neighbors to
# reintroduce spatial autocorrelation
sm_xperm = self.smooth_map(x=x_perm, k=k)
# Calculate variogram values for the smoothed permuted map
vperm = self.compute_variogram(sm_xperm, idx)
# Calculate smoothed variogram of the smoothed permuted map
smvar_perm = self.smooth_variogram(u[uidx], vperm[uidx])
# Fit linear regression btwn smoothed variograms
res[d] = self.regress(smvar_perm, smvar)
alphas, betas, residuals = np.array(
[res[d] for d in self._deltas]).T
# Select best-fit model and regression parameters
iopt = np.argmin(residuals)
dopt = self._deltas[iopt]
self._dopt = dopt
kopt = int(dopt * self._knn)
aopt = alphas[iopt]
bopt = betas[iopt]
# Transform and smooth permuted map using best-fit parameters
sm_xperm_best = self.smooth_map(x=x_perm, k=kopt)
surr = (np.sqrt(np.abs(bopt)) * sm_xperm_best +
np.sqrt(np.abs(aopt)) * np.random.randn(self._nmap))
surrs[i] = surr
if self._resample: # resample values from empirical map
sorted_map = np.sort(self._x)
for i, surr in enumerate(surrs):
ii = np.argsort(surr)
np.put(surr, ii, sorted_map)
if self._ismasked:
return np.ma.masked_array(
data=surrs, mask=np.isnan(surrs)).squeeze()
return surrs.squeeze()
def compute_variogram(self, x, idx):
"""
Compute variogram of `x` using pairs of regions indexed by `idx`.
Parameters
----------
x : 1Dndarray
Brain map
idx : ndarray[int], shape (ns,)
Indices of randomly sampled brain regions
Returns
-------
v : ndarray, shape (ns,ns)
Variogram y-coordinates, i.e. 0.5 * (x_i - x_j) ^ 2, for i,j in idx
"""
diff_ij = x[idx][:, None] - x[self._index[idx, :]]
return 0.5 * np.square(diff_ij)
def permute_map(self):
"""
Return a random permutation of the target brain map.
Returns
-------
1D ndarray
Random permutation of target brain map
"""
perm_idx = np.random.permutation(self._nmap)
if self._ismasked:
mask_perm = self._x.mask[perm_idx]
x_perm = self._x.data[perm_idx]
return np.ma.masked_array(data=x_perm, mask=mask_perm)
return self._x[perm_idx]
def smooth_map(self, x, k):
"""
Smooth `x` using `k` nearest neighboring regions.
Parameters
----------
x : 1D ndarray
Brain map
k : float
Number of nearest neighbors to include for smoothing
Returns
-------
x_smooth : 1D ndarray
Smoothed brain map
Notes
-----
Assumes `D` provided at runtime has been sorted.
"""
jkn = self._index[:, :k] # indices of k nearest neighbors
xkn = x[jkn] # values of k nearest neighbors
dkn = self._D[:, :k] # distances to k nearest neighbors
weights = self._kernel(dkn) # distance-weighted kernel
# Kernel-weighted sum
return (weights * xkn).sum(axis=1) / weights.sum(axis=1)
def smooth_variogram(self, u, v, return_h=False):
"""
Smooth a variogram.
Parameters
----------
u : 1D ndarray
Pairwise distances, ie variogram x-coordinates
v : 1D ndarray
Squared differences, ie variogram y-coordinates
return_h : bool, default False
Return distances at which smoothed variogram is computed
Returns
-------
ndarray, shape (nh,)
Smoothed variogram samples
ndarray, shape (nh,)
Distances at which smoothed variogram was computed (returned if
`return_h` is True)
Raises
------
ValueError : `u` and `v` are not identically sized
"""
if len(u) != len(v):
raise ValueError("u and v must have same number of elements")
# Subtract each element of h from each pairwise distance `u`.
# Each row corresponds to a unique h.
du = np.abs(u - self._h[:, None])
w = np.exp(-np.square(2.68 * du / self._b) / 2)
denom = w.sum(axis=1)
wv = w * v[None, :]
num = wv.sum(axis=1)
output = num / denom
if not return_h:
return output
return output, self._h
def regress(self, x, y):
"""
Linearly regress `x` onto `y`.
Parameters
----------
x : 1D ndarray
Independent variable
y : 1D ndarray
Dependent variable
Returns
-------
alpha : float
Intercept term (offset parameter)
beta : float
Regression coefficient (scale parameter)
res : float
Sum of squared residuals
"""
self._lm.fit(X=np.expand_dims(x, -1), y=y)
beta = self._lm.coef_.item()
alpha = self._lm.intercept_
ypred = self._lm.predict(np.expand_dims(x, -1))
res = np.sum(np.square(y-ypred))
return alpha, beta, res
def sample(self):
"""
Randomly sample (without replacement) brain areas for variogram
computation.
Returns
-------
ndarray, shape (ns,)
Indices of randomly sampled areas
"""
return np.random.choice(
a=self._nmap, size=self._ns, replace=False).astype(np.int32)
@property
def x(self):
"""1D ndarray : brain map scalars """
if self._ismasked:
return np.ma.copy(self._x)
return np.copy(self._x)
@x.setter
def x(self, x):
self._ismasked = False
x_ = dataio(x)
check_map(x=x_)
mask = np.isnan(x_)
if mask.any():
self._ismasked = True
brain_map = np.ma.masked_array(data=x_, mask=mask)
else:
brain_map = x_
self._x = brain_map
@property
def D(self):
"""ndarray or memmap, shape (N,N) : Pairwise distance matrix """
return np.copy(self._D)
@D.setter
def D(self, x):
x_ = dataio(x)
n = self._x.size
if x_.shape[0] != n:
raise ValueError(
"D size along axis=0 must equal brain map size")
self._D = x_[:, 1:self._knn + 1] # prevent self-coupling
@property
def index(self):
"""ndarray or memmap : indexes used to sort each row of dist. matrix """
return np.copy(self._index)
@index.setter
def index(self, x):
x_ = dataio(x)
n = self._x.size
if x_.shape[0] != n:
raise ValueError(
"index size along axis=0 must equal brain map size")
self._index = x_[:, 1:self._knn+1].astype(np.int32)
@property
def nmap(self):
""" int : length of brain map """
return self._nmap
@nmap.setter
def nmap(self, x):
self._nmap = int(x)
@property
def pv(self):
""" int : percentile of pairwise distances at which to truncate """
return self._pv
@pv.setter
def pv(self, x):
pv = check_pv(x)
self._pv = pv
@property
def deltas(self):
""" 1D ndarray or List[float] : proportions of nearest neighbors """
return self._deltas
@deltas.setter
def deltas(self, x):
check_deltas(deltas=x)
self._deltas = x
@property
def nh(self):
""" int : number of variogram distance intervals """
return self._nh
@nh.setter
def nh(self, x):
self._nh = x
@property
def kernel(self):
""" Callable : smoothing kernel function
Notes
-----
When setting kernel, use name of kernel as defined in ``config.py``.
"""
return self._kernel
@kernel.setter
def kernel(self, x):
kernel_callable = check_kernel(x)
self._kernel = kernel_callable
@property
def resample(self):
""" bool : whether to resample surrogate map values from target maps """
return self._resample
@resample.setter
def resample(self, x):
if not isinstance(x, bool):
raise TypeError("expected bool, got {}".format(type(x)))
self._resample = x
@property
def knn(self):
""" int : number of nearest neighbors included in distance matrix """
return self._knn
@knn.setter
def knn(self, x):
if x > self._nmap:
raise ValueError('knn must be less than len(X)')
self._knn = int(x)
@property
def ns(self):
""" int : number of randomly sampled regions used to construct map """
return self._ns
@ns.setter
def ns(self, x):
self._ns = int(x)
@property
def b(self):
""" numeric : Gaussian kernel bandwidth """
return self._b
@b.setter
def b(self, x):
self._b = x
@property
def h(self):
""" 1D ndarray : distances at which variogram is evaluated """
return self._h
@h.setter
def h(self, x):
self._h = x
|
{"hexsha": "6d9e592f76df84f68aab5286b096307cd6206161", "size": 37372, "ext": "py", "lang": "Python", "max_stars_repo_path": "brainspace/null_models/variogram.py", "max_stars_repo_name": "jbburt/BrainSpace", "max_stars_repo_head_hexsha": "20c372a9d618e05cb27dacb205ec7748805169e2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "brainspace/null_models/variogram.py", "max_issues_repo_name": "jbburt/BrainSpace", "max_issues_repo_head_hexsha": "20c372a9d618e05cb27dacb205ec7748805169e2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "brainspace/null_models/variogram.py", "max_forks_repo_name": "jbburt/BrainSpace", "max_forks_repo_head_hexsha": "20c372a9d618e05cb27dacb205ec7748805169e2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1286048324, "max_line_length": 80, "alphanum_fraction": 0.5614096115, "include": true, "reason": "import numpy", "num_tokens": 9039}
|
kernel = "../src/pca/logreg.jl"
function logreg_loss(v, X, b, λ)
rv = 0.0
for i in 1:length(b)
rv += log(1 + exp(-b[i]*(v[1]+dot(X[:, i], view(v, 2:length(v))))))
end
rv / length(b) + λ/2 * norm(v)^2
end
# input data set with known optimal solution
X = [1.444786643000158 0.49236792885913283 -0.53258473265429 0.05476455630673194 -1.3473893605265843; 0.48932299731783646 2.0708445447107926 1.2414596020757043 0.9131934117095984 -0.15692043560721075; 0.7774625331093794 0.7234405608945721 -0.037446104354257874 -1.1104987697394342 1.354975413199728]
b = [-1, 1, -1, -1, 1]
v_opt = [-0.3423591553493419, -0.41317049965033387, 0.007294166575956451, 0.6763846515861628]
m, n = size(X)
λ = 1 / n
opt = logreg_loss(v_opt, X, b, λ)
# write test problem to disk
inputfile = tempname()
inputdataset = "X"
outputdataset = "V"
labeldataset = "b"
h5open(inputfile, "w") do file
file[inputdataset] = X
file[labeldataset] = b
end
# GD
nworkers = 2
niterations = 200
stepsize = 0.1
outputfile = tempname()
mpiexec(cmd -> run(```
$cmd -n $(nworkers+1) julia --project $kernel $inputfile $outputfile
--inputdataset $inputdataset
--outputdataset $outputdataset
--niterations $niterations
--saveiterates
--lambda $λ
```))
vs = load_logreg_iterates(outputfile, outputdataset)
v = vs[end]
f = logreg_loss(v, X, b, λ)
@test f < opt * (1+1e-2)
# same as previous, but with nslow > 0
nworkers = 2
nslow = 1
niterations = 100
stepsize = 0.1
outputfile = tempname()
mpiexec(cmd -> run(```
$cmd -n $(nworkers+1) julia --project $kernel $inputfile $outputfile
--inputdataset $inputdataset
--outputdataset $outputdataset
--niterations $niterations
--saveiterates
--lambda $λ
--nslow $nslow
```))
vs = load_logreg_iterates(outputfile, outputdataset)
v = vs[end]
f = logreg_loss(v, X, b, λ)
@test f < opt * (1+1e-2)
# same as previous, but with slowprob. > 0
nworkers = 2
slowprob = 0.5
niterations = 100
stepsize = 0.1
outputfile = tempname()
mpiexec(cmd -> run(```
$cmd -n $(nworkers+1) julia --project $kernel $inputfile $outputfile
--inputdataset $inputdataset
--outputdataset $outputdataset
--niterations $niterations
--saveiterates
--lambda $λ
--slowprob $slowprob
```))
vs = load_logreg_iterates(outputfile, outputdataset)
v = vs[end]
f = logreg_loss(v, X, b, λ)
@test f < opt * (1+1e-2)
# DSAG
vralgo = "tree"
nworkers = 2
nwait = 1
niterations = 100
stepsize = 0.1
nsubpartitions = 2
outputfile = tempname()
mpiexec(cmd -> run(```
$cmd -n $(nworkers+1) julia --project $kernel $inputfile $outputfile
--inputdataset $inputdataset
--nwait $nwait
--variancereduced
--vralgo $vralgo
--nsubpartitions $nsubpartitions
--outputdataset $outputdataset
--niterations $niterations
--saveiterates
--lambda $λ
```))
vs = load_logreg_iterates(outputfile, outputdataset)
v = vs[end]
f = logreg_loss(v, X, b, λ)
@test f < opt * (1+1e-2)
# DSAG w. nwaitschedule < 1.0
nworkers = 2
nwait = 2
niterations = 100
stepsize = 0.1
nsubpartitions = 2
nwaitschedule = 0.9
outputfile = tempname()
mpiexec(cmd -> run(```
$cmd -n $(nworkers+1) julia --project $kernel $inputfile $outputfile
--inputdataset $inputdataset
--nwait $nwait
--variancereduced
--vralgo $vralgo
--nsubpartitions $nsubpartitions
--outputdataset $outputdataset
--niterations $niterations
--saveiterates
--lambda $λ
--nwaitschedule $nwaitschedule
```))
vs = load_logreg_iterates(outputfile, outputdataset)
v = vs[end]
f = logreg_loss(v, X, b, λ)
@test f < opt * (1+1e-2)
# DSAG w. sparse input data
inputfile = tempname()
h5open(inputfile, "cw") do file
H5SparseMatrixCSC(file, inputdataset, sparse(X))
file[labeldataset] = b
flush(file)
end
nworkers = 2
nwait = 1
niterations = 100
stepsize = 0.1
nsubpartitions = 2
outputfile = tempname()
mpiexec(cmd -> run(```
$cmd -n $(nworkers+1) julia --project $kernel $inputfile $outputfile
--inputdataset $inputdataset
--nwait $nwait
--variancereduced
--vralgo $vralgo
--nsubpartitions $nsubpartitions
--outputdataset $outputdataset
--niterations $niterations
--lambda $λ
```))
vs = load_logreg_iterates(outputfile, outputdataset)
v = vs[end]
f = logreg_loss(v, X, b, λ)
@test f < opt * (1+1e-2)
|
{"hexsha": "3bd7716aa765f3d23471700695253f5509bc90ed", "size": 4402, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/logreg_test.jl", "max_stars_repo_name": "severinson/CodedComputing.jl", "max_stars_repo_head_hexsha": "c60597579faa176f57c86acd309cf904c395268f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/logreg_test.jl", "max_issues_repo_name": "severinson/CodedComputing.jl", "max_issues_repo_head_hexsha": "c60597579faa176f57c86acd309cf904c395268f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/logreg_test.jl", "max_forks_repo_name": "severinson/CodedComputing.jl", "max_forks_repo_head_hexsha": "c60597579faa176f57c86acd309cf904c395268f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5180722892, "max_line_length": 299, "alphanum_fraction": 0.6624261699, "num_tokens": 1543}
|
## ------------------------------------------------------------------
let
@info("Testing PropertyChangedEvent")
global __glob = rand()
e = PropertyChangedEvent(Main)
update!(e, :__glob)
events_count = 0
trigger_at = [3, 4, 5]
for it in 1:10
if has_event!(e, :__glob)
println(:__glob, " changed!!!")
events_count += 1
end
if it in trigger_at
__glob = rand()
end
sleep(0.1)
end
@test events_count == length(trigger_at)
end
|
{"hexsha": "9decf3d3d004df61d42c25a88026c6a3190fe8ae", "size": 547, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/PropertyChangedEvent_test.jl", "max_stars_repo_name": "josePereiro/EasyEvents.jl", "max_stars_repo_head_hexsha": "a398e269d707806f4a9a9080ad39fb2087bf452f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/PropertyChangedEvent_test.jl", "max_issues_repo_name": "josePereiro/EasyEvents.jl", "max_issues_repo_head_hexsha": "a398e269d707806f4a9a9080ad39fb2087bf452f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/PropertyChangedEvent_test.jl", "max_forks_repo_name": "josePereiro/EasyEvents.jl", "max_forks_repo_head_hexsha": "a398e269d707806f4a9a9080ad39fb2087bf452f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.7826086957, "max_line_length": 69, "alphanum_fraction": 0.4606946984, "num_tokens": 134}
|
from abc import ABC, abstractmethod
import cv2
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from tensorflow.keras.backend import set_session
import tensorflow.keras.losses
import re
import time
from scipy.optimize import curve_fit
class ModelInterface(ABC):
@abstractmethod
def get_prediction(self, images, info):
pass
def encoder(x, angle):
return np.sin(((2*np.pi*(x-1))/(9))-((angle*np.pi)/(2*1)))
class LSTMKeras(ModelInterface):
def __init__(self, path, seq_length, sampling_interval, capture_rate=3):
self._model = None
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.log_device_placement = True
sess = tf.Session(config=config)
set_session(sess)
# Initialize network input history
self._img_center_history = []
self._img_left_history = []
self._img_right_history = []
self._info_history = []
self._hlc_history = []
self._environment_history = []
# Network parameters
self._seq_length = seq_length
self._sampling_interval = sampling_interval + capture_rate - 1
self.hlc_one_hot = { 1: [1,0,0,0,0,0], 2:[0,1,0,0,0,0], 3:[0,0,1,0,0,0], 4:[0,0,0,1,0,0], 5:[0,0,0,0,1,0], 6:[0,0,0,0,0,1]}
self.environment_one_hot = { 0: [1,0], 1:[0,1]}
self.loaded_at = time.time()
self.brake_hist = []
# Load model
self._load_model(path)
self._frame = 0
self._last_pred = None
def _init_history(self):
self._img_center_history = []
self._img_left_history = []
self._img_right_history = []
self._info_history = []
self._hlc_history = []
self._environment_history = []
def _load_model(self, path):
self._model = tf.keras.models.load_model(path, compile=False)
def restart(self):
self._img_center_history = []
self._img_left_history = []
self._img_right_history = []
self._info_history = []
self._hlc_history = []
self._environment_history = []
self.loaded_at = time.time()
self._frame = 0
self._last_pred = None
print("Restart")
def get_prediction(self, images, info):
self._frame += 1
if self._model is None:
return False
req = (self._seq_length - 1) * (self._sampling_interval + 1) + 1
img_center = cv2.cvtColor(images["forward_center_rgb"], cv2.COLOR_BGR2LAB)
"""img_left = cv2.cvtColor(images["left_center_rgb"], cv2.COLOR_BGR2LAB)
img_right = cv2.cvtColor(images["right_center_rgb"], cv2.COLOR_BGR2LAB)"""
info_input = [
max(float(info["speed"] * 3.6 / 100),0.2 ),
float(info["speed_limit"] * 3.6 / 100),
info["traffic_light"]
]
hlc_input = self.hlc_one_hot[(info["hlc"].value)]
environment_input = self.environment_one_hot[(info["environment"].value)]
self._img_center_history.append(np.array(img_center))
"""self._img_left_history.append(np.array(img_left))
self._img_right_history.append(np.array(img_right))"""
self._info_history.append(np.array(info_input))
self._hlc_history.append(np.array(hlc_input))
self._environment_history.append(np.array(environment_input))
sinus = True
if len(self._img_center_history) > req:
self._img_center_history.pop(0)
"""self._img_left_history.pop(0)
self._img_right_history.pop(0)"""
self._info_history.pop(0)
self._hlc_history.pop(0)
self._environment_history.pop(0)
if len(self._img_center_history) == req:
imgs_center = np.array([self._img_center_history[0::self._sampling_interval + 1]])
"""imgs_left = np.array([self._img_left_history[0::self._sampling_interval + 1]])
imgs_right = np.array([self._img_right_history[0::self._sampling_interval + 1]])"""
infos = np.array([self._info_history[0::self._sampling_interval + 1]])
hlcs = np.array([self._hlc_history[0::self._sampling_interval + 1]])
environments = np.array([self._environment_history[0::self._sampling_interval + 1]])
prediction = self._model.predict({
"forward_image_input": imgs_center,
"info_input": infos,
"hlc_input": hlcs,
"environment_input": environments
})
"""if info["hlc"].value == 4:
prediction = prediction[0]
elif info["hlc"].value == 5:
prediction = prediction[1]
elif info["hlc"].value == 6:
prediction = prediction[2]"""
"""steer, acc = prediction[0][0], prediction[1][0]
if sinus:
steer_curve_parameters = curve_fit(encoder, np.arange(1, 11, 1), steer)[0]
steer_angle = steer_curve_parameters[0]
brake = 1 if acc < -0.1 else 0
throttle = 0.5 if acc > 0 else 0
print(acc)
return (steer_angle, throttle, brake)"""
# print(brake)
steer, throttle, brake = prediction[0][0], prediction[1][0], prediction[2][0]
self.brake_hist.append(brake)
if len(self.brake_hist)>7:
self.brake_hist.pop(0)
if sinus:
steer_curve_parameters = curve_fit(encoder, np.arange(1, 11, 1), steer)[0]
steer_angle = steer_curve_parameters[0]
avg_brake = np.max(self.brake_hist)
step_brake = 1 if avg_brake > 0.5 else 0
"""if self._frame % 30 != 0 and self._last_pred:
return self._last_pred"""
self._last_pred = (steer_angle, throttle, step_brake) if sinus else (steer, throttle, step_brake)
return self._last_pred
return (0, 0.5, 0)
|
{"hexsha": "d646a2249dc9046690dcd255d9f672f2afd80aba", "size": 6105, "ext": "py", "lang": "Python", "max_stars_repo_path": "PythonAPI/custom/drive_models.py", "max_stars_repo_name": "MaxJohnsen/carla", "max_stars_repo_head_hexsha": "03b2e127b3f666d69087a8b89f4a4f7d60fc576e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PythonAPI/custom/drive_models.py", "max_issues_repo_name": "MaxJohnsen/carla", "max_issues_repo_head_hexsha": "03b2e127b3f666d69087a8b89f4a4f7d60fc576e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PythonAPI/custom/drive_models.py", "max_forks_repo_name": "MaxJohnsen/carla", "max_forks_repo_head_hexsha": "03b2e127b3f666d69087a8b89f4a4f7d60fc576e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8857142857, "max_line_length": 131, "alphanum_fraction": 0.5870597871, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1514}
|
#ifndef __GIFS_UTIL_HPP
#define __GIFS_UTIL_HPP
#include <armadillo>
namespace util{
arma::vec center_of_mass(const arma::mat R, const arma::vec m);
arma::vec sum_cross(const arma::mat A, const arma::mat B);
// arma::vec net(arma::vec (*op)(const arma::vec &a, const arma::vec &b), const arma::mat A, const arma::mat B);
double hypot(std::complex<double> a, std::complex<double> b);
arma::uword sample_discrete(const arma::vec &p);
arma::uvec range(arma::uword a, arma::uword b); //[a, b)
arma::uvec range(arma::uword n); // [0, n)
template <typename T>
inline bool approx_equal(T a, T b, T tol=arma::datum::eps){
return std::abs(a-b) < tol;
};
}
#endif
|
{"hexsha": "ae77370bfd509754eb5a919136196a42b16d529f", "size": 684, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "gifs_src/util.hpp", "max_stars_repo_name": "farajilab/gifs_release", "max_stars_repo_head_hexsha": "ffa674110bcd15de851a8b6a703b4f4bc96fcd2d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-03-11T19:48:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-11T19:48:20.000Z", "max_issues_repo_path": "gifs_src/util.hpp", "max_issues_repo_name": "farajilab/gifs_release", "max_issues_repo_head_hexsha": "ffa674110bcd15de851a8b6a703b4f4bc96fcd2d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gifs_src/util.hpp", "max_forks_repo_name": "farajilab/gifs_release", "max_forks_repo_head_hexsha": "ffa674110bcd15de851a8b6a703b4f4bc96fcd2d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2022-02-08T00:11:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-08T00:11:00.000Z", "avg_line_length": 29.7391304348, "max_line_length": 115, "alphanum_fraction": 0.6637426901, "num_tokens": 218}
|
*----------------------------------------------------------------------*
subroutine get_mrcc_response_input(orb_info,env_type)
*----------------------------------------------------------------------*
* process response functions input for ic-MRCC and write everything
* in a file.
* Adapted from get_response_input and this now works only for one
* perturbation, though it is written for any number of perturbtions.
*
* Pradipta, 2016
*----------------------------------------------------------------------*
implicit none
include 'def_target.h'
include 'def_orbinf.h'
include 'ifc_input.h'
include 'def_pert_info.h'
include 'def_filinf.h'
type(orbinf), intent(in) ::
& orb_info
character(len=*), intent(in) ::
& env_type
type(filinf) ::
& ffpropinf
type(pert_op_info) ::
& pop(3*maxpop)
type(pert_component_info) ::
& cmp(maxcmp)
integer ::
& ncnt, icnt, idx, jdx, ipop, pos, sign
integer ::
& ncmp, npop, luprop, idum, maxord, new_pos
integer, allocatable ::
& ord(:)
integer, allocatable ::
& prop_comp(:,:), conj_comp(:,:), conj_prop(:,:)
real ::
& order
logical ::
& skip, trplt
character(len=1) ::
& pert_ord
character(len=6) ::
& int_name
character(len_command_par) ::
& pert, pertop
character(20),parameter::
& name_propinf="prop_info.gecco"
integer, external ::
& pert_sym_dalton, pert_sym_molpro
ncnt = is_keyword_set('method.MRCC.response')
if (ncnt.eq.0) then
call quit(1,'get_mrcc_response_input',
& 'response keyword is not set')
else if(ncnt.gt.1) then
! call quit(1,'get_mrcc_response_input',
! & 'response keyword can be set only once, for now')
end if
allocate(ord(ncnt))
do icnt = 1,ncnt
call get_argument_value('method.MRCC.response','order',
& keycount=icnt,ival=ord(icnt))
end do
call get_argument_value('calculate.properties','triplet',
& lval=trplt)
maxord = maxval(ord(:))
if (maxord.gt.1) call quit(1,'get_mrcc_response_input',
& 'maxord>1 is a WIP feature. Contact the main author.')
allocate(prop_comp(ncnt,maxord))
allocate(conj_comp(ncnt,maxord))
allocate(conj_prop(ncnt,maxord))
prop_comp=0
conj_comp=0
conj_prop=0
ncmp = ncnt*maxord
cmp(:)%redun = 0
cmp(:)%pop_idx = 0
cmp(:)%freq = 0d0
cmp(:)%order=0
if (maxord.gt.maximum_order) then
call quit(1,'get_mrcc_response_input',
& 'ord must not exceed '//pert_ord)
end if
pert(1:len_command_par) = ' '
pertop(1:len_command_par) = ' '
npop = 0
do icnt = 1,ncnt
pos = (icnt-1)*maxord + 1
call get_argument_value('method.MRCC.response','comp',
& keycount=icnt,str=pert(pos:len_command_par))
call get_argument_value('method.MRCC.response','pert',
& keycount=icnt,str=pertop(pos:len_command_par))
! Getting the frequencies of all the perturbations and storing them
! starting from the second place in cmp%freq
call get_argument_value('method.MRCC.response','freq',
& keycount=icnt,xarr=cmp(pos+1:ncmp)%freq)
cmp(pos+ord(icnt):ncmp)%freq = 0d0
! Then putting the frequency at the first position of cmp
! This is the negative of the sum of all the frequencies
if (ord(icnt).gt.0)
& cmp(pos)%freq = -sum(cmp(pos+1:pos+ord(icnt)-1)%freq)
! duplicate values for pert if necessary and not specified
do idx = pos+1,pos+ord(icnt)-1
if (pert(idx:idx).eq.' ')
& pert(idx:idx) = pert(idx-1:idx-1)
if (pertop(idx:idx).eq.' ')
& pertop(idx:idx) = pertop(idx-1:idx-1)
end do
do idx = pos,pos+ord(icnt)-1
! check if perturbation operator input is ok
if (pert(idx:idx).ne.'X' .and.
& pert(idx:idx).ne.'Y' .and.
& pert(idx:idx).ne.'Z')
& call quit(1,'get_mrcc_response_input',
& 'comp must contain X,Y,Z')
skip = .false.
do ipop = 1,len(pert_ops)
if (pertop(idx:idx).eq.pert_ops(ipop:ipop)) then
skip = .true.
int_name = dalton_int_names(6*ipop-5:6*ipop)
sign = pert_op_sign(ipop)
exit
end if
end do
if (.not.skip) call quit(1,'get_mrcc_response_input',
& 'perturbation operator "'//pertop(idx:idx)//
& '" is currently not allowed.')
skip = .false.
! get the order corresponding to the component
order=((real(ord(icnt))-1.0d0)/2.0d0)
cmp(idx)%order=ceiling(order)
if(cmp(idx)%order.gt.0) then
do ipop = 1,npop
if (pop(ipop)%comp.eq.pert(idx:idx).and.
& pop(ipop)%name.eq.pertop(idx:idx)) then
skip = .true.
cmp(idx)%pop_idx = ipop
exit
end if
end do
end if
prop_comp(icnt,idx-(icnt-1)*maxord) = idx
if (.not.skip) then
npop = npop + 1
pop(npop)%comp = pert(idx:idx)
pop(npop)%name = pertop(idx:idx)
pop(npop)%int_name = pert(idx:idx)//int_name//' '
pop(npop)%sign = sign
select case(env_type(1:6))
case ('dalton','DALTON')
if(trplt) then
pop(npop)%isym = 1 ! If the perturbation is triplet,
!then we can ignore the isym and set it to 1
else
pop(npop)%isym = pert_sym_dalton(pop(npop)%int_name,
& orb_info)
end if
case ('molpro','MOLPRO')
if(trplt) then
pop(npop)%isym = 1 ! If the perturbation is triplet,
!then we can ignore the isym and set it to 1
else
pop(npop)%isym = pert_sym_molpro(pop(npop)%int_name,
& orb_info)
end if
case default
call quit(1,'get_mrcc_response_input',
& 'Calculations of properties not possible with "'
& //trim(env_type)//'" ')
end select
cmp(idx)%pop_idx = npop
end if
! determine redundancies
do jdx = 1,idx-1
if (pert(idx:idx).eq.pert(jdx:jdx) .and.
& pertop(idx:idx).eq.pertop(jdx:jdx) .and.
& cmp(idx)%order.eq.cmp(jdx)%order.and.
& abs(cmp(idx)%freq - cmp(jdx)%freq).lt.1d-12)
& cmp(idx)%redun = jdx
end do
if (cmp(idx)%redun.eq.0) cmp(idx)%redun = idx
end do
end do
! Here we get and store the information about each call
! for the response calculation.
do icnt = 1, ncnt
pos = (icnt-1)*maxord + 1
if (ord(icnt).eq.1)then
! prop_comp(icnt,1)=pos
conj_comp(icnt,1)=pos
conj_prop(icnt,1)=cmp(pos)%pop_idx
elseif(ord(icnt).eq.2)then
if (cmp(pos)%redun.eq.pos) then
new_pos=cmp(pos+1)%redun
conj_comp(icnt,1)=new_pos
conj_prop(icnt,1)=cmp(new_pos)%pop_idx
else
new_pos=cmp(pos)%redun
prop_comp(icnt,1)=new_pos
conj_comp(icnt,1)=new_pos
conj_prop(icnt,1)=cmp(new_pos)%pop_idx
end if
if (cmp(pos+1)%redun.eq.pos+1) then
new_pos=cmp(pos)%redun
conj_comp(icnt,2)=new_pos
conj_prop(icnt,2)=cmp(new_pos)%pop_idx
else
new_pos=cmp(pos+1)%redun
prop_comp(icnt,2)=new_pos
conj_comp(icnt,2)=new_pos
conj_prop(icnt,2)=cmp(new_pos)%pop_idx
end if
end if
end do
call file_init(ffpropinf,trim(name_propinf),ftyp_sq_frm,idum)
call file_open(ffpropinf)
luprop = ffpropinf%unit
write(luprop,*) 'npop', npop
do ipop=1,npop
write(luprop,*) 'pop(npop)%comp: ', pop(ipop)%comp
write(luprop,*) 'pop(npop)%name: ', pop(ipop)%name
write(luprop,*) 'pop(npop)%int_name: ', pop(ipop)%int_name
write(luprop,*) 'pop(npop)%sign: ', pop(ipop)%sign
write(luprop,*) 'pop(npop)%isym: ', pop(ipop)%isym
enddo
write(luprop,*) ncnt, maxord
do idx=1,ncnt*maxord
write(luprop,*) 'cmp(idx)%pop_idx', cmp(idx)%pop_idx
write(luprop,*) 'cmp(idx)%freq', cmp(idx)%freq
write(luprop,*) 'cmp(idx)%redun', cmp(idx)%redun
write(luprop,*) 'cmp(idx)%order', cmp(idx)%order
end do
write(luprop,*) 'order: ', ord(1:ncnt)
do icnt=1,ncnt
write(luprop,*) 'prop_comp:', prop_comp(icnt,:)
write(luprop,*) 'conj_comp:', conj_comp(icnt,:)
write(luprop,*) 'conj_prop:', conj_prop(icnt,:)
end do
deallocate(ord)
return
end
|
{"hexsha": "b07d3dad8985c28df47eb9e0213dbe4b67a0c5b4", "size": 9388, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "input/get_mrcc_response_input.f", "max_stars_repo_name": "ak-ustutt/GeCCo-public", "max_stars_repo_head_hexsha": "8d43a6c9323aeba7eb54625b95553bfd4b2418c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "input/get_mrcc_response_input.f", "max_issues_repo_name": "ak-ustutt/GeCCo-public", "max_issues_repo_head_hexsha": "8d43a6c9323aeba7eb54625b95553bfd4b2418c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "input/get_mrcc_response_input.f", "max_forks_repo_name": "ak-ustutt/GeCCo-public", "max_forks_repo_head_hexsha": "8d43a6c9323aeba7eb54625b95553bfd4b2418c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2932330827, "max_line_length": 82, "alphanum_fraction": 0.5172560716, "num_tokens": 2608}
|
#!/usr/bin/env python3
""" Functions for use with fitting.
.. code-author: Raymond Ehlers <raymond.ehlers@cern.ch>, Yale University
"""
import abc
import logging
import operator
from typing import Any, Callable, Optional, Sequence, Union
import numpy as np
import numpy.typing as npt
from pachyderm import generic_class
from pachyderm.fit import base as fit_base
logger = logging.getLogger(__name__)
class CombinePDF(generic_class.EqualityMixin, abc.ABC):
"""Combine functions (PDFs) together.
Args:
functions: Functions to be added.
prefixes: Prefix for arguments of each function. Default: None. If specified, there must
be one prefix for each function.
skip_prefixes: Prefixes to skip when assigning prefixes. As noted in probfit, this can be
useful to mix prefixed and non-prefixed arguments. Default: None.
Attributes:
functions: List of functions that are combined in the PDF.
func_code: Function arguments derived from the fit functions. They need to be separately
specified to allow iminuit to determine the proper arguments.
argument_positions: Map of merged arguments to the arguments for each individual function.
"""
# Don't specify the function arguments to work aorund a mypy bug.
# For an unclear reason, it won't properly detect the number of arguments.
_operation: Callable[..., float]
_call_function: Callable[..., float]
def __init__(
self,
*functions: Callable[..., float],
prefixes: Optional[Sequence[str]] = None,
skip_prefixes: Optional[Sequence[str]] = None,
) -> None:
# Store the functions
self.functions = list(functions)
# Determine the arguments for the functions.
merged_args, argument_positions = fit_base.merge_func_codes(
self.functions, prefixes=prefixes, skip_prefixes=skip_prefixes
)
logger.debug(f"merged_args: {merged_args}")
self.func_code = fit_base.FuncCode(merged_args)
self.argument_positions = argument_positions
def __call__(self, x: npt.NDArray[Any], *merged_args: float) -> float:
"""Call the added PDF.
Args:
x: Value(s) where the functions should be evaluated.
merged_args: Merged arguments for the functions. Must contain all of the arguments
need to call the functions.
Returns:
Value(s) of the functions when evaluated with the given input values.
"""
# We add in the x values into the function arguments here so we don't have to play tricks later
# to get the function argumment indices correct.
return fit_base.call_list_of_callables_with_operation(
self._operation, self.functions, self.argument_positions, *[x, *merged_args] # type: ignore
)
class AddPDF(CombinePDF):
"""Add functions (PDFs) together.
Args:
functions: Functions to be added.
prefixes: Prefix for arguments of each function. Default: None. If specified, there must
be one prefix for each function.
skip_prefixes: Prefixes to skip when assigning prefixes. As noted in probfit, this can be
useful to mix prefixed and non-prefixed arguments. Default: None.
Attributes:
functions: List of functions that are combined in the PDF.
func_code: Function arguments derived from the fit functions. They need to be separately
specified to allow iminuit to determine the proper arguments.
argument_positions: Map of merged arguments to the arguments for each individual function.
"""
_operation = operator.add
class SubtractPDF(CombinePDF):
"""Subtract functions (PDFs) together.
Args:
functions: Functions to be added.
prefixes: Prefix for arguments of each function. Default: None. If specified, there must
be one prefix for each function.
skip_prefixes: Prefixes to skip when assigning prefixes. As noted in probfit, this can be
useful to mix prefixed and non-prefixed arguments. Default: None.
Attributes:
functions: List of functions that are combined in the PDF.
func_code: Function arguments derived from the fit functions. They need to be separately
specified to allow iminuit to determine the proper arguments.
argument_positions: Map of merged arguments to the arguments for each individual function.
"""
_operation = operator.sub
class MultiplyPDF(CombinePDF):
"""Multiply functions (PDFs) together.
Args:
functions: Functions to be added.
prefixes: Prefix for arguments of each function. Default: None. If specified, there must
be one prefix for each function.
skip_prefixes: Prefixes to skip when assigning prefixes. As noted in probfit, this can be
useful to mix prefixed and non-prefixed arguments. Default: None.
Attributes:
functions: List of functions that are combined in the PDF.
func_code: Function arguments derived from the fit functions. They need to be separately
specified to allow iminuit to determine the proper arguments.
argument_positions: Map of merged arguments to the arguments for each individual function.
"""
_operation = operator.mul
class DividePDF(CombinePDF):
"""Divide functions (PDFs) together.
Args:
functions: Functions to be added.
prefixes: Prefix for arguments of each function. Default: None. If specified, there must
be one prefix for each function.
skip_prefixes: Prefixes to skip when assigning prefixes. As noted in probfit, this can be
useful to mix prefixed and non-prefixed arguments. Default: None.
Attributes:
functions: List of functions that are combined in the PDF.
func_code: Function arguments derived from the fit functions. They need to be separately
specified to allow iminuit to determine the proper arguments.
argument_positions: Map of merged arguments to the arguments for each individual function.
"""
_operation = operator.truediv
def gaussian(
x: Union[npt.NDArray[np.float64], float], mean: float, sigma: float
) -> Union[npt.NDArray[np.float64], float]:
r"""Normalized gaussian.
.. math::
f = 1 / \sqrt{2 * \pi * \sigma^{2}} * \exp{-\frac{(x - \mu)^{2}}{(2 * \sigma^{2}}}
Args:
x: Value(s) where the gaussian should be evaluated.
mean: Mean of the gaussian distribution.
sigma: Width of the gaussian distribution.
Returns:
Calculated gaussian value(s).
"""
return 1.0 / np.sqrt(2 * np.pi * np.square(sigma)) * np.exp(-1.0 / 2.0 * np.square((x - mean) / sigma)) # type: ignore
def extended_gaussian(
x: Union[npt.NDArray[np.float64], float], mean: float, sigma: float, amplitude: float
) -> Union[npt.NDArray[np.float64], float]:
r"""Extended gaussian.
.. math::
f = A / \sqrt{2 * \pi * \sigma^{2}} * \exp{-\frac{(x - \mu)^{2}}{(2 * \sigma^{2}}}
Args:
x: Value(s) where the gaussian should be evaluated.
mean: Mean of the gaussian distribution.
sigma: Width of the gaussian distribution.
amplitude: Amplitude of the gaussian.
Returns:
Calculated gaussian value(s).
"""
return amplitude / np.sqrt(2 * np.pi * np.square(sigma)) * np.exp(-1.0 / 2.0 * np.square((x - mean) / sigma)) # type: ignore
|
{"hexsha": "1246c10bfc38de6c5b4134cb060faef272265f03", "size": 7550, "ext": "py", "lang": "Python", "max_stars_repo_path": "pachyderm/fit/function.py", "max_stars_repo_name": "raymondEhlers/pachyderm", "max_stars_repo_head_hexsha": "c9a554d8c4e904315171a5aafa4569259e280fa4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-29T20:00:03.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-29T20:00:03.000Z", "max_issues_repo_path": "pachyderm/fit/function.py", "max_issues_repo_name": "raymondEhlers/pachyderm", "max_issues_repo_head_hexsha": "c9a554d8c4e904315171a5aafa4569259e280fa4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2018-12-09T21:20:33.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-31T14:13:19.000Z", "max_forks_repo_path": "pachyderm/fit/function.py", "max_forks_repo_name": "raymondEhlers/pachyderm", "max_forks_repo_head_hexsha": "c9a554d8c4e904315171a5aafa4569259e280fa4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-03-27T18:09:19.000Z", "max_forks_repo_forks_event_max_datetime": "2019-08-23T19:21:33.000Z", "avg_line_length": 38.5204081633, "max_line_length": 129, "alphanum_fraction": 0.6749668874, "include": true, "reason": "import numpy", "num_tokens": 1676}
|
import numpy as np
import mylib_f77
if __name__ == "__main__":
x = [2.0, 3.0, 4.0]
y = [1.0, 1.0, 1.0]
alpha = 1.0
n = len(x)
# Functions can take iterables, but you get an ndarray back. You also
# modify an argument inplace if it isn't an ndarray.
ret = mylib_f77.daxpy(n, alpha, x, y)
assert y == [1.0, 1.0, 1.0]
assert isinstance(ret, np.ndarray)
ref = np.array([3.0, 4.0, 5.0])
np.testing.assert_equal(ret, ref)
# switch from intent(in,out) to intent(inout)
y = np.array([1.0, 1.0, 1.0])
ret = mylib_f77.daxpy_inplace(n, alpha, x, y)
np.testing.assert_equal(y, ref)
assert ret == None
|
{"hexsha": "fae99343acd83d51612bc80d8a0c790723773c73", "size": 658, "ext": "py", "lang": "Python", "max_stars_repo_path": "linking/f77/using_f2py.py", "max_stars_repo_name": "berquist/eg", "max_stars_repo_head_hexsha": "4c368b12eaaffcf0af8032f10348cf8bc1c3957a", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "linking/f77/using_f2py.py", "max_issues_repo_name": "berquist/eg", "max_issues_repo_head_hexsha": "4c368b12eaaffcf0af8032f10348cf8bc1c3957a", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "linking/f77/using_f2py.py", "max_forks_repo_name": "berquist/eg", "max_forks_repo_head_hexsha": "4c368b12eaaffcf0af8032f10348cf8bc1c3957a", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.32, "max_line_length": 73, "alphanum_fraction": 0.6018237082, "include": true, "reason": "import numpy", "num_tokens": 243}
|
# Extended Kalman filter with Parameter Identification for Nomoto model
An Extended Kalman filter with a Nomoto model as the predictor will be developed.
The filter should also estimate the parameters in the Nomoto model. This is done by setting up a system where the parameters are defined as some of the states in the model
The filter is run on simulated data as well as real model test data.
```python
%load_ext autoreload
%autoreload 2
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import inv
import sympy as sp
import src.visualization.book_format as book_format
book_format.set_style()
from src.substitute_dynamic_symbols import lambdify
from sympy import Matrix
from sympy.physics.mechanics import (dynamicsymbols, ReferenceFrame,
Particle, Point)
from IPython.display import display, Math, Latex
from src.substitute_dynamic_symbols import run, lambdify
from sympy.physics.vector.printing import vpprint, vlatex
from src.data import mdl
from src.kalman_filter import extended_kalman_filter
```
## Nomoto model for ship manoeuvring dynamics
The Nomoto model can be written as:
```python
r,r1d,r2d = sp.symbols('r \dot{r} \ddot{r}')
psi,psi1d = sp.symbols('psi \dot{\psi}')
h,u = sp.symbols('h u')
x, x1d = sp.symbols('x \dot{x}')
A,B,C,D,E, Phi = sp.symbols('A B C D E Phi')
w = sp.symbols('w')
K, delta, T_1, T_2 = sp.symbols('K delta T_1 T_2')
eq_nomoto = sp.Eq(K*delta,
r + T_1*r1d + T_2*r2d)
Math(vlatex(eq_nomoto))
```
where $r$ is yaw rate with its time derivatives and $\delta$ is the rudder angle. $K$, $T_{1}$
and $T_{1}$ are the coefficients describing the hydrodynamics of the ship.
For slow manoeuvres this equation can be further simplified by removing the $\ddot{r}$ term into a first order Nomoto model:
```python
eq_nomoto_simple = eq_nomoto.subs(r2d,0)
Math(vlatex(eq_nomoto_simple))
```
### Simulation model
```python
f_hat = sp.Function('\hat{f}')(x,u,w)
eq_system = sp.Eq(x1d, f_hat)
eq_system
```
Where the state vector $x$:
```python
eq_x = sp.Eq(x, sp.UnevaluatedExpr(Matrix([psi,r])))
eq_x
```
and input vector $u$:
and $w$ is zero mean Gausian process noise
For the nomoto model the time derivatives for the states can be expressed as:
```python
eq_psi1d = sp.Eq(psi1d,r)
eq_psi1d
```
```python
eq_r1d = sp.Eq(r1d,sp.solve(eq_nomoto_simple,r1d)[0])
eq_r1d
```
```python
def lambda_f_constructor(K, T_1):
def lambda_f(x, u):
delta = u
f = np.array([[x[1], (K*delta-x[1])/T_1]]).T
return f
return lambda_f
```
## Simulation
Simulation with this model where rudder angle shifting between port and starboard
```python
T_1_ = 1.8962353076056344
K_ = 0.17950970687951323
h_ = 0.01
lambda_f = lambda_f_constructor(K=K_, T_1=T_1_)
```
```python
def simulate(E, ws, t, us):
simdata = []
x_=np.deg2rad(np.array([[0,0]]).T)
for u_,w_ in zip(us,ws):
x_=x_ + h_*lambda_f(x=x_.flatten(), u=u_)
simdata.append(x_.flatten())
simdata = np.array(simdata)
df = pd.DataFrame(simdata, columns=["psi","r"], index=t)
df['delta'] = us
return df
```
```python
N_ = 8000
t_ = np.arange(0,N_*h_,h_)
us = np.deg2rad(np.concatenate((-10*np.ones(int(N_/4)),
10*np.ones(int(N_/4)),
-10*np.ones(int(N_/4)),
10*np.ones(int(N_/4)))))
np.random.seed(42)
E = np.array([[0, 1]]).T
process_noise = np.deg2rad(0.01)
ws = process_noise*np.random.normal(size=N_)
df = simulate(E=E, ws=ws, t=t_, us=us)
measurement_noise = np.deg2rad(0.5)
df['epsilon'] = measurement_noise*np.random.normal(size=N_)
df['psi_measure'] = df['psi'] + df['epsilon']
df['psi_deg'] = np.rad2deg(df['psi'])
df['psi_measure_deg'] = np.rad2deg(df['psi_measure'])
df['delta_deg'] = np.rad2deg(df['delta'])
```
```python
fig,ax=plt.subplots()
df.plot(y='psi_deg', ax=ax)
df.plot(y='psi_measure_deg', ax=ax, zorder=-1)
df.plot(y='delta_deg', ax=ax, zorder=-1)
df.plot(y='r')
ax.set_title('Simulation with measurement and process noise')
ax.set_xlabel('Time [s]');
```
## Kalman filter
Implementation of the Kalman filter. The code is inspired of this Matlab implementation: [ExEKF.m](https://github.com/cybergalactic/MSS/blob/master/mssExamples/ExEKF.m).
```python
jac = sp.eye(3,3) + Matrix([r,eq_r1d.rhs,0]).jacobian([psi,r,T_1])*h
jac
```
```python
def lambda_f_constructor2(K):
def lambda_f(x, u):
delta = u
T_1 = x[2] # Note! T_1 is the third state now!
r = x[1]
f = np.array([[r, (K*delta-r)/T_1, 0]]).T
return f
return lambda_f
```
```python
def lambda_jacobian_constructor(h, K):
def lambda_jacobian(x, u):
T_1 = x[2] # Note! T_1 is the third state now!
delta = u
r = x[1]
jac = np.array(
[
[1, h, 0],
[0, 1 - h / T_1, -h * (K * delta - r) / T_1 ** 2],
[0, 0, 1],
]
)
return jac
return lambda_jacobian
```
```python
lambda_jacobian = lambda_jacobian_constructor(h=h_, K=K_)
lambda_f = lambda_f_constructor2(K=K_)
```
```python
lambda_jacobian(x=[0,0,0.1], u=0)
```
```python
lambda_f(x=[0,0,0.1], u=0)
```
```python
x0=np.deg2rad(np.array([[0,0,3]]).T)
P_prd = np.diag([np.deg2rad(1), np.deg2rad(0.1), 0.1])
Qd = np.diag([np.deg2rad(0), 2])
Rd = np.deg2rad(1)
ys = df['psi_measure'].values
E_ = np.array(
[
[0,0],
[1,0],
[0,1]
],
)
C_ = np.array([[1, 0, 0]])
Cd_ = C_
Ed_ = h_ * E_
time_steps = extended_kalman_filter(x0=x0, P_prd=P_prd, lambda_f=lambda_f,
lambda_jacobian=lambda_jacobian,h=h_, us=us, ys=ys, E=E_, Qd=Qd, Rd=Rd, Cd=Cd_)
x_hats = np.array([time_step["x_hat"] for time_step in time_steps]).T
time = np.array([time_step["time"] for time_step in time_steps]).T
Ks = np.array([time_step["K"] for time_step in time_steps]).T
stds = np.sqrt(np.array([[time_step["P_hat"][0,0],
time_step["P_hat"][1,1],
time_step["P_hat"][2,2]] for time_step in time_steps]).T)
```
```python
n=len(P_prd)
fig,axes=plt.subplots(nrows=n)
df['T_1'] = T_1_
keys = ['psi','r','T_1']
for i,key in enumerate(keys):
ax=axes[i]
df.plot(y=key, ax=ax, label="True")
if key=='psi':
df.plot(y='psi_measure', ax=ax, label="Measured", zorder=-1)
ax.plot(time, x_hats[i, :], "-", label="kalman")
std_top = x_hats[i, :] + stds[i, :]
std_btm = x_hats[i, :] - stds[i, :]
ax.plot(time, std_top, linestyle=':', color='k', lw=1, alpha=0.4)
ax.plot(time, std_btm, linestyle=':', color='k', lw=1, alpha=0.4)
ax.fill_between(time, std_top, std_btm,
facecolor='yellow', alpha=0.2, interpolate=True, label='+/- std')
ax.set_ylabel(key)
ax.legend()
```
```python
fig,ax=plt.subplots()
for i,key in enumerate(keys):
ax.plot(time,Ks[i,:],label=key)
ax.set_title('Kalman gains')
ax.legend();
ax.set_ylim(-1,1);
```
```python
fig,axes=plt.subplots(nrows=2)
ax=axes[0]
for i,key in enumerate(keys):
ax.plot(time,stds[i,:]**2,label=key)
ax.set_title('Variances')
ax.legend();
ax=axes[1]
df.plot(y='delta',ax=ax)
```
# Real data
Using the developed Kalman filter on some real model test data
## Load test
```python
id=22773
df, units, meta_data = mdl.load(dir_path = '../data/raw', id=id)
df.index = df.index.total_seconds()
df.index-=df.index[0]
```
```python
from src.visualization.plot import track_plot
fig,ax=plt.subplots()
fig.set_size_inches(10,10)
track_plot(df=df, lpp=meta_data.lpp, x_dataset='x0', y_dataset='y0', psi_dataset='psi', beam=meta_data.beam, ax=ax);
```
```python
ys = df['psi'].values
h_m=h_ = df.index[1]-df.index[0]
us = df['delta'].values
x0=np.deg2rad(np.array([[0,0,T_1_]]).T)
P_prd = np.diag([np.deg2rad(1), np.deg2rad(0.1), 100])
Qd = np.diag([np.deg2rad(5), 10])
Rd = np.deg2rad(0.1)
E_ = np.array(
[
[0,0],
[1,0],
[0,1]
],
)
C_ = np.array([[1, 0, 0]])
Cd_ = C_
Ed_ = h_ * E_
time_steps = extended_kalman_filter(x0=x0, P_prd=P_prd, lambda_f=lambda_f,
lambda_jacobian=lambda_jacobian,h=h_, us=us, ys=ys, E=E_, Qd=Qd, Rd=Rd, Cd=Cd_)
x_hats = np.array([time_step["x_hat"] for time_step in time_steps]).T
time = np.array([time_step["time"] for time_step in time_steps]).T
Ks = np.array([time_step["K"] for time_step in time_steps]).T
stds = np.sqrt(np.array([[time_step["P_hat"][0,0],
time_step["P_hat"][1,1],
time_step["P_hat"][2,2]] for time_step in time_steps]).T)
```
```python
n=len(P_prd)
fig,axes=plt.subplots(nrows=n)
ax = axes[0]
df.plot(y='psi', label='Measured', ax=ax)
df['T_1'] = T_1_
keys = ['psi','r','T_1']
for i,key in enumerate(keys):
ax=axes[i]
ax.plot(time, x_hats[i, :], "-", label="kalman")
std_top = x_hats[i, :] + stds[i, :]
std_btm = x_hats[i, :] - stds[i, :]
ax.plot(time, std_top, linestyle=':', color='k', lw=1, alpha=0.4)
ax.plot(time, std_btm, linestyle=':', color='k', lw=1, alpha=0.4)
ax.fill_between(time, std_top, std_btm,
facecolor='yellow', alpha=0.2, interpolate=True, label='+/- std')
ax.set_ylabel(key)
ax.legend()
```
```python
fig,ax=plt.subplots()
for i,key in enumerate(keys):
ax.plot(time,Ks[i,:],label=key)
ax.set_title('Kalman gains')
ax.legend();
ax.set_ylim(-1,1);
```
```python
```
|
{"hexsha": "27bc9d14f685488f4bc9f4202e3ec9681b949fa8", "size": 19241, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "notebooks/15.42_EKF_PIT_2_nomoto.ipynb", "max_stars_repo_name": "martinlarsalbert/wPCC", "max_stars_repo_head_hexsha": "16e0d4cc850d503247916c9f5bd9f0ddb07f8930", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "notebooks/15.42_EKF_PIT_2_nomoto.ipynb", "max_issues_repo_name": "martinlarsalbert/wPCC", "max_issues_repo_head_hexsha": "16e0d4cc850d503247916c9f5bd9f0ddb07f8930", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notebooks/15.42_EKF_PIT_2_nomoto.ipynb", "max_forks_repo_name": "martinlarsalbert/wPCC", "max_forks_repo_head_hexsha": "16e0d4cc850d503247916c9f5bd9f0ddb07f8930", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.2138964578, "max_line_length": 181, "alphanum_fraction": 0.5084974793, "converted": true, "num_tokens": 3047}
|
import argparse
import matplotlib
matplotlib.use("TkAgg")
import csv
import os
import matplotlib.pyplot as plt
import numpy as np
def plot_mean_std(ax, x, y_mean, y_std, color="darkorange", alpha=0.2, label=None):
ax.fill_between(
x, y_mean - y_std, y_mean + y_std, color=color, alpha=alpha, label=label
)
def plot(logdir, vis_headers_y_mean, vis_header_y_std, optional_legends, delta=None):
"""Plot mean and std of given statistics."""
progress_path = os.path.join(logdir, "progress.csv")
reader = csv.reader(open(progress_path, "rt"), delimiter=",")
raw_data = list(reader)
headers = raw_data[0]
data = np.array(raw_data[1:]).astype("float")
vis_headers_x = [
"Epoch",
]
vis_header_idx_x = [headers.index(x) for x in vis_headers_x]
data_x = data[:, vis_header_idx_x[0]]
vis_header_idx_y_mean = [headers.index(x) for x in vis_headers_y_mean]
vis_header_idx_y_std = [headers.index(x) for x in vis_header_y_std]
data_y_mean = []
data_y_std = []
for idx in vis_header_idx_y_mean:
data_y_mean.append(data[:, idx])
for idx in vis_header_idx_y_std:
data_y_std.append(data[:, idx])
for i in range(len(data_y_mean)):
fig, ax = plt.subplots()
plot_mean_std(
ax,
data_x,
data_y_mean[i],
data_y_std[i],
color="gray",
label=vis_header_y_std[i],
)
ax.plot(data_x, data_y_mean[i], label=vis_headers_y_mean[i])
# Plot a line for upper risk bound if it exists.
if "Risk" in vis_headers_y_mean[i] and delta is not None:
ax.hlines(
y=delta,
xmin=0,
xmax=np.max(data_x),
linewidth=2,
color="r",
linestyle="--",
)
ax.legend()
ax.set_xlabel(vis_headers_x[0])
ax.grid(True)
fig_path = os.path.join(logdir, optional_legends[i] + ".png")
fig.savefig(fig_path, dpi=320, bbox_inches="tight")
def plot_loss(logdir, stats):
"""Plot progression of loss."""
progress_path = os.path.join(logdir, "progress.csv")
reader = csv.reader(open(progress_path, "rt"), delimiter=",")
raw_data = list(reader)
headers = raw_data[0]
data = np.array(raw_data[1:]).astype("float")
vis_headers_x = ["Epoch"]
vis_header_idx_x = [headers.index(x) for x in vis_headers_x]
data_x = data[:, vis_header_idx_x[0]]
vis_header_idx_y = [headers.index(y) for y in stats]
data_y_mean = []
for idx in vis_header_idx_y:
data_y_mean.append(data[:, idx])
for i in range(len(data_y_mean)):
fig, ax = plt.subplots()
ax.plot(data_x, data_y_mean[i], color="gray", label=stats[i])
ax.legend()
ax.set_xlabel(vis_headers_x[0])
ax.grid(True)
fig_path = os.path.join(logdir, stats[i].split("/")[-1] + ".png")
fig.savefig(fig_path, dpi=320)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-i", "--input-dir", help="Log directory.")
parser.add_argument(
"-l",
"--loss",
action="store_true",
help="Whether to plot loss stats in trainer.",
)
parser.add_argument(
"--delta", type=float, help="Whether to visualize risk upper bound."
)
args = parser.parse_args()
logdir = args.input_dir
# See possible Headers in utils/stats_examples.txt
if args.loss:
plot_stats = ["trainer/RF1 Loss", "trainer/RF2 Loss"]
plot_loss(logdir, plot_stats)
else:
plot_stats = ["Distance", "Risks", "path length"]
for stat in plot_stats:
vis_headers_y_mean = [
"evaluation/{} Mean".format(stat),
"exploration/{} Mean".format(stat),
]
vis_header_y_std = [
"evaluation/{} Std".format(stat),
"exploration/{} Std".format(stat),
]
optional_legends = [
"Evaluation {}".format(stat),
"Exploration {}".format(stat),
]
plot(
logdir,
vis_headers_y_mean,
vis_header_y_std,
optional_legends,
args.delta,
)
print("DONE")
|
{"hexsha": "732ab700e7a0baf1593eee0b03eec27c59292836", "size": 4387, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/plot_learning_stats.py", "max_stars_repo_name": "cyrushx/risk_sac", "max_stars_repo_head_hexsha": "74f02a286166301221010a770360668d1dd8e63f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-08-06T04:29:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-04T08:43:21.000Z", "max_issues_repo_path": "utils/plot_learning_stats.py", "max_issues_repo_name": "cyrushx/risk_sac", "max_issues_repo_head_hexsha": "74f02a286166301221010a770360668d1dd8e63f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/plot_learning_stats.py", "max_forks_repo_name": "cyrushx/risk_sac", "max_forks_repo_head_hexsha": "74f02a286166301221010a770360668d1dd8e63f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-09-19T17:29:11.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-10T08:06:50.000Z", "avg_line_length": 29.4429530201, "max_line_length": 85, "alphanum_fraction": 0.5808069296, "include": true, "reason": "import numpy", "num_tokens": 1044}
|
## Demo de for Loops
## Prof. James Hunter
## from: https://rstudio.cloud/project/1181172
## 28 de maio de 2020
## Baseado em Cap. 21 de Grolemund & Wickham, R for Data Science (O'Reilly)
set.seed(42)
df <- tibble(
a = rnorm(10),
b = rnorm(10),
c = rnorm(10),
d = rnorm(10)
)
glimpse(df)
set.seed(42)
output <- numeric(length = ncol(df))
for(i in seq_along(output)) {
output[i] <- median(df[[i]]) # dupla [[]] porque median veja df como lista
}
output
|
{"hexsha": "cd6d9c373ff60ccd20953c746a0350d52abcd33c", "size": 495, "ext": "r", "lang": "R", "max_stars_repo_path": "demo_for_loop.r", "max_stars_repo_name": "jameshunterbr/Sustentare_MAD_2020", "max_stars_repo_head_hexsha": "299a7f0af7e999e59cc53c57cf22618b6eb68092", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "demo_for_loop.r", "max_issues_repo_name": "jameshunterbr/Sustentare_MAD_2020", "max_issues_repo_head_hexsha": "299a7f0af7e999e59cc53c57cf22618b6eb68092", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "demo_for_loop.r", "max_forks_repo_name": "jameshunterbr/Sustentare_MAD_2020", "max_forks_repo_head_hexsha": "299a7f0af7e999e59cc53c57cf22618b6eb68092", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.3333333333, "max_line_length": 77, "alphanum_fraction": 0.604040404, "num_tokens": 161}
|
function g2(byvec, valvec)
cv = copy(valvec)
cb = copy(byvec)
FastGroupBy.grouptwo!(cb, cv)
cv, cb
end
a = rand([randstring(8) for i=1:100_000], 10_000_000)
x = rand(10_000_000)
gc_enable(false)
@time g2(a,x);
gc_enable(true)
function g3(byvec, valvec)
bv = collect(zip(Ptr{UInt}.(pointer.(byvec)), valvec))
sort!(bv, by=x->unsafe_load(x[1]), alg = RadixSort)
end
gc_enable(false)
@time g3(a,x);
gc_enable(true)
import SortLab.load_bits
function ptrload(ptrs, skipbytes = 0)::T where T <: Unsigned
n = sizeof(s)
if n < skipbytes
res = zero(T)
elseif n - skipbytes >= sizeof(T)
res = ntoh(unsafe_load(ptrs + skipbytes))
else
ptrs = pointer(s) + skipbytes
remaining_bytes_to_load = n - skipbytes
res = load_bits_with_padding(T, ptrs, remaining_bytes_to_load)
end
return res
end
# SortingLab.load_bits(UInt, a[1], 2)
function strsort3!(svec)
ms = maximum(sizeof, svec)
ms = max(0, ms - sizeof(UInt))
vs = Ptr{UInt}.(pointer.(svec))
sort!(vs,by= x->x[1], alg=RadixSort)
# @time while ms > 0
# sort!(svec, by=x->SortingLab.load_bits(UInt, x, ms) , alg = RadixSort)
# ms -= max(0, sizeof(UInt))
# end
[vs1[2] for vs1 in vs]
end
strsort3(svec) = strsort3!(copy(svec))
gc_enable(false)
@time bb = strsort3(a)
issorted(bb)
gc_enable(true)
using SortingLab, SortingAlgorithms
srand(1);
svec = rand([randstring(8) for i=1:1_000_000], 100_000_000)
function pointersort(svec)
# sp =
@inbounds sv = collect(zip(1:length(svec), unsafe_load.(Ptr{UInt}.(pointer.(svec)))))
# sv = Vector{Tuple{Ptr{UInt8}, UInt}}(length(svec))
# for (i,s) in enumerate(svec)
# sp = pointer(s)
# sv[i] = tuple(sp, ntoh(unsafe_load(Ptr{UInt}(sp))))
# end
@inbounds sort!(sv, by=x->x[2], alg=RadixSort)
@inbounds pos = [sv1[1] for sv1 in sv]
@inbounds svec[pos]
end
gc_enable(false)
@time pointersort(svec);
@code_warntype pointersort(svec)
gc_enable(true)
gc_enable(false)
@time fsort(svec);
gc_enable(true)
function strsort5(svec)
ps = Ptr{UInt}.(pointer.(svec))
vs = collect(zip(ps, ntoh.(unsafe_load.(ps))))
sort!(vs, by=x->x[2], alg=RadixSort)
unsafe_string.(Ptr{UInt8}.(getindex.(vs,1)))
end
@time sort(svec)
using BenchmarkTools
@benchmark pointersort($svec) samples=5 seconds=120
gc_enable(false)
gc_enable(true)
gc_enable(false)
@time fsort(svec);
gc_enable(true)
@time fsortperm(svec)
using RCall
R"""
memory.limit(2^31-1)
"""
@rput svec;
rres = R"""
replicate(5, system.time(sort(svec, method="radix"))[3])
"""
mean(rres)
@time a = strsort5(svec);
@code_warntype strsort5(svec);
fsort(svec[1:1])
@time aa = fsort(svec);
all(a == aa)
x = "abc"
pointer(x)
pointer(x) - 8
pointer_from_objref(x) |> unsafe_pointer_to_objref
pointer_from_objref(svec[1]) |> unsafe_pointer_to_objref
pointer(svec)
# SortingLab.load_bits(UInt, a[1], 2)
function strsort!(svec)
ms = maximum(sizeof, svec)
ms = max(0, ms - sizeof(UInt))
@time sort!(svec, by=x->SortingLab.load_bits(UInt, x, ms) , alg = RadixSort)
@time while ms > 0
sort!(svec, by=x->SortingLab.load_bits(UInt, x, ms) , alg = RadixSort)
ms -= max(0, sizeof(UInt))
end
svec
end
strsort(svec) = strsort!(copy(svec))
@time bb = strsort(a)
@code_warntype strsort(a)
issorted(bb)
using SortingLab
@time bb = fsort(a)
issorted(bb)
# SortingLab.load_bits(UInt, a[1], 2)
function strsort2!(svec)
ms = maximum(sizeof, svec)
ms = max(0, ms - sizeof(UInt))
vs = collect(zip(SortingLab.load_bits.(UInt, svec, ms), svec))
sort!(vs,by= x->x[1], alg=RadixSort)
# @time while ms > 0
# sort!(svec, by=x->SortingLab.load_bits(UInt, x, ms) , alg = RadixSort)
# ms -= max(0, sizeof(UInt))
# end
[vs1[2] for vs1 in vs]
end
strsort2(svec) = strsort2!(copy(svec))
gc_enable(false)
@time bb = strsort2(a)
issorted(bb)
gc_enable(true)
|
{"hexsha": "8e0be3d1156f9894be2143c94c1a2388a938a1de", "size": 3953, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/experiments/string_sort_exp.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/SortingLab.jl-562c1548-17b8-5b69-83cf-d8aebec229f5", "max_stars_repo_head_hexsha": "0af37a9af94de625d6acb59016e203ede7b83b18", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2018-03-06T06:41:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-12T19:51:41.000Z", "max_issues_repo_path": "src/experiments/string_sort_exp.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/SortingLab.jl-562c1548-17b8-5b69-83cf-d8aebec229f5", "max_issues_repo_head_hexsha": "0af37a9af94de625d6acb59016e203ede7b83b18", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2018-01-25T06:05:10.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-20T03:20:36.000Z", "max_forks_repo_path": "src/experiments/string_sort_exp.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/SortingLab.jl-562c1548-17b8-5b69-83cf-d8aebec229f5", "max_forks_repo_head_hexsha": "0af37a9af94de625d6acb59016e203ede7b83b18", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2018-02-26T18:37:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-19T19:54:27.000Z", "avg_line_length": 21.252688172, "max_line_length": 89, "alphanum_fraction": 0.6463445484, "num_tokens": 1327}
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import seekr2.modules.common_base as base
import plotting
title = "Muller Potential System"
model_file = "/home/lvotapka/toy_seekr_systems/muller_potential_multi/model.xml"
model = base.load_model(model_file)
boundaries = np.array([[-1.75, 1.0], [-0.5, 2.0]])
toy_plot = plotting.Toy_plot(model, title, boundaries, stride=10)
milestone_cv_functions = ["x=value", "value"]
plotting.draw_linear_milestones(toy_plot, milestone_cv_functions)
ani = toy_plot.animate_trajs(animating_anchor_indices=[19, 20, 30, 31, 29, 18, 40, 39, 38, 28, 27, 37, 26, 17, 48, 49, 50, 47, 58, 46, 69, 57, 68, 80, 81, 70, 45, 56, 36, 35, 34, 25, 24, 23, 22, 33, 44, 82, 93, 92, 55, 66, 67, 77, 78, 79, 88, 89, 90, 91, 100, 99, 101, 102, 103, 104, 16, 15, 14, 13, 12, 11, 2, 3, 4, 5, 6, 7, 8, 1, 0, 51, 21, 32, 105, 94])
plt.show()
#movie_filename = "muller_potential_milestones.gif"
#writergif = animation.ImageMagickWriter(fps=30)
#ani.save(movie_filename, writer=writergif)
#movie_filename = "muller_potential_milestones.mp4"
#writervideo = animation.FFMpegWriter(fps=60)
#ani.save(movie_filename, writer=writervideo)
|
{"hexsha": "a128b6ba431528b388d5bbcfa08c944b4474c7c3", "size": 1195, "ext": "py", "lang": "Python", "max_stars_repo_path": "systems/toy_systems/muller_potential_plot_multidimensional.py", "max_stars_repo_name": "seekrcentral/seekr2_systems", "max_stars_repo_head_hexsha": "938e36ea61a3f106c0fb8d81b9bd02f664327ac7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "systems/toy_systems/muller_potential_plot_multidimensional.py", "max_issues_repo_name": "seekrcentral/seekr2_systems", "max_issues_repo_head_hexsha": "938e36ea61a3f106c0fb8d81b9bd02f664327ac7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "systems/toy_systems/muller_potential_plot_multidimensional.py", "max_forks_repo_name": "seekrcentral/seekr2_systems", "max_forks_repo_head_hexsha": "938e36ea61a3f106c0fb8d81b9bd02f664327ac7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.8, "max_line_length": 356, "alphanum_fraction": 0.7246861925, "include": true, "reason": "import numpy", "num_tokens": 474}
|
@with_kw struct MobileNetV3 <: ModuleSpec
stages::AbstractVector{MBConv}
"Size of last convolution"
k_out::Int
end
function (mbnv3::MobileNetV3)()
head = ConvBlock(ksize = 3, k_in = 3, k_out = 16; σ = relu, stride = 2)()
return Chain(
head,
[stage() for stage in mbnv3.stages]...,
Conv((1, 1), mbnv3.stages[end].k_out => mbnv3.k_out),
BatchNorm(mbnv3.k_out, relu)
)
end
"""
($TYPEDEF)
Classification head for MobileNetV3 as described in
MobileNetV3-small from [Searching for MobileNetV3](https://arxiv.org/abs/1905.02244)
section 5.1 and figure 5.
$(TYPEDFIELDS)
"""
@with_kw struct MobileNetV3Head <: ModuleSpec
"Number of classes to predict"
n_classes
"Number of input kernels"
k_in
"Number of intermediate kernels"
k_mid
end
function (head::MobileNetV3Head)()
return Chain(
GlobalMeanPool(),
Conv((1, 1), head.k_in => head.k_mid, relu),
Conv((1, 1), head.k_mid => head.n_classes),
flatten
)
end
"""
mobilenetv3_small(usedepthwise = false)
MobileNetV3-small from [Searching for MobileNetV3](https://arxiv.org/abs/1905.02244)
"""
function mobilenetv3_small(usedepthwise = false)
cb = usedepthwise ? DepthwiseSeparable : ConvBlock
mbvn3 = MobileNetV3([
MBConv(ksize = 3, k_in = 16, k_exp = 16, k_out = 16,
σ = relu, has_se = true, stride = 2, convblock = cb),
MBConv(ksize = 3, k_in = 16, k_exp = 72, k_out = 24,
σ = relu, has_se = false, stride = 2, convblock = cb),
MBConv(ksize = 3, k_in = 24, k_exp = 88, k_out = 24,
σ = relu, has_se = false, stride = 1, convblock = cb),
MBConv(ksize = 5, k_in = 24, k_exp = 96, k_out = 40,
σ = relu, has_se = true, stride = 2, convblock = cb),
MBConv(ksize = 5, k_in = 40, k_exp = 240, k_out = 40,
σ = relu, has_se = true, stride = 1, convblock = cb),
MBConv(ksize = 5, k_in = 40, k_exp = 240, k_out = 40,
σ = relu, has_se = true, stride = 1, convblock = cb),
MBConv(ksize = 5, k_in = 40, k_exp = 120, k_out = 48,
σ = relu, has_se = true, stride = 1, convblock = cb),
MBConv(ksize = 5, k_in = 48, k_exp = 144, k_out = 48,
σ = relu, has_se = true, stride = 1, convblock = cb),
MBConv(ksize = 5, k_in = 48, k_exp = 288, k_out = 96,
σ = relu, has_se = true, stride = 2, convblock = cb),
MBConv(ksize = 5, k_in = 96, k_exp = 576, k_out = 96,
σ = relu, has_se = true, stride = 1, convblock = cb),
MBConv(ksize = 5, k_in = 96, k_exp = 576, k_out = 96,
σ = relu, has_se = true, stride = 1, convblock = cb),
], 576)
return mbvn3()
end
"""
mobilenetv3_large(usedepthwise = false)
MobileNetV3-large from [Searching for MobileNetV3](https://arxiv.org/abs/1905.02244)
"""
function mobilenetv3_large(usedepthwise = false)
cb = usedepthwise ? DepthwiseSeparable : ConvBlock
mbvn3 = MobileNetV3([
MBConv(ksize = 3, k_in = 16, k_exp = 16, k_out = 16,
σ = relu, has_se = false, stride = 1, convblock = cb),
MBConv(ksize = 3, k_in = 16, k_exp = 64, k_out = 24,
σ = relu, has_se = false, stride = 2, convblock = cb),
MBConv(ksize = 3, k_in = 24, k_exp = 72, k_out = 24,
σ = relu, has_se = false, stride = 1, convblock = cb),
MBConv(ksize = 5, k_in = 24, k_exp = 72, k_out = 40,
σ = relu, has_se = true, stride = 2, convblock = cb),
MBConv(ksize = 5, k_in = 40, k_exp = 120, k_out = 40,
σ = relu, has_se = true, stride = 1, convblock = cb),
MBConv(ksize = 5, k_in = 40, k_exp = 120, k_out = 40,
σ = relu, has_se = true, stride = 1, convblock = cb),
MBConv(ksize = 3, k_in = 40, k_exp = 240, k_out = 80,
σ = relu, has_se = false, stride = 2, convblock = cb),
MBConv(ksize = 3, k_in = 80, k_exp = 200, k_out = 80,
σ = relu, has_se = false, stride = 1, convblock = cb),
MBConv(ksize = 3, k_in = 80, k_exp = 184, k_out = 80,
σ = relu, has_se = false, stride = 1, convblock = cb),
MBConv(ksize = 3, k_in = 80, k_exp = 184, k_out = 80,
σ = relu, has_se = false, stride = 1, convblock = cb),
MBConv(ksize = 3, k_in = 80, k_exp = 480, k_out = 112,
σ = relu, has_se = true, stride = 1, convblock = cb),
MBConv(ksize = 3, k_in = 112, k_exp = 672, k_out = 112,
σ = relu, has_se = true, stride = 1, convblock = cb),
MBConv(ksize = 5, k_in = 112, k_exp = 672, k_out = 160,
σ = relu, has_se = true, stride = 2, convblock = cb),
MBConv(ksize = 5, k_in = 160, k_exp = 960, k_out = 160,
σ = relu, has_se = true, stride = 1, convblock = cb),
MBConv(ksize = 5, k_in = 160, k_exp = 960, k_out = 160,
σ = relu, has_se = true, stride = 1, convblock = cb),
], 960)
return mbvn3()
end
function mobilenetv3_head_small(n_classes)
return MobileNetV3Head(
n_classes = n_classes,
k_in = 576,
k_mid = 1024
)()
end
function mobilenetv3_head_large(n_classes)
return MobileNetV3Head(
n_classes = n_classes,
k_in = 576,
k_mid = 1024
)()
end
|
{"hexsha": "c00c814c7c5d8fe0e31846447512dfb48d06954e", "size": 5298, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/mobilenetv3.jl", "max_stars_repo_name": "lorenzoh/FluxModels.jl", "max_stars_repo_head_hexsha": "9d53bf10c0fe8d5c333e5ec5866e8699f9c5f3e4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-08-09T23:36:40.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-26T20:58:54.000Z", "max_issues_repo_path": "src/mobilenetv3.jl", "max_issues_repo_name": "lorenzoh/FluxModels.jl", "max_issues_repo_head_hexsha": "9d53bf10c0fe8d5c333e5ec5866e8699f9c5f3e4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mobilenetv3.jl", "max_forks_repo_name": "lorenzoh/FluxModels.jl", "max_forks_repo_head_hexsha": "9d53bf10c0fe8d5c333e5ec5866e8699f9c5f3e4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-09T23:36:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-09T23:36:41.000Z", "avg_line_length": 34.8552631579, "max_line_length": 84, "alphanum_fraction": 0.5688939222, "num_tokens": 1827}
|
import numpy as np
from typing import Tuple
import autoarray as aa
from autogalaxy.profiles.light_profiles import light_profiles as lp
class LightProfileLinear(lp.LightProfile, aa.LinearObj):
def mapping_matrix_from(self, grid: aa.type.Grid2DLike) -> np.ndarray:
return self.image_2d_from(grid=grid).slim
class EllSersic(lp.AbstractEllSersic, LightProfileLinear):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
effective_radius: float = 0.6,
sersic_index: float = 4.0,
):
"""
The elliptical Sersic light profile.
See `autogalaxy.profiles.light_profiles.light_profiles.LightProfile` for a description of light profile objects.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
"""
super().__init__(
centre=centre,
elliptical_comps=elliptical_comps,
intensity=1.0,
effective_radius=effective_radius,
sersic_index=sersic_index,
)
|
{"hexsha": "8b8d4ed94f218b92365e5c0b9c8bf326872a0598", "size": 1614, "ext": "py", "lang": "Python", "max_stars_repo_path": "autogalaxy/profiles/light_profiles/light_profiles_linear.py", "max_stars_repo_name": "caoxiaoyue/PyAutoGalaxy", "max_stars_repo_head_hexsha": "ad2b4b27404f5bf0f65ba9a0cd7c3ee6570e2d05", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "autogalaxy/profiles/light_profiles/light_profiles_linear.py", "max_issues_repo_name": "caoxiaoyue/PyAutoGalaxy", "max_issues_repo_head_hexsha": "ad2b4b27404f5bf0f65ba9a0cd7c3ee6570e2d05", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "autogalaxy/profiles/light_profiles/light_profiles_linear.py", "max_forks_repo_name": "caoxiaoyue/PyAutoGalaxy", "max_forks_repo_head_hexsha": "ad2b4b27404f5bf0f65ba9a0cd7c3ee6570e2d05", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0869565217, "max_line_length": 121, "alphanum_fraction": 0.6338289963, "include": true, "reason": "import numpy", "num_tokens": 361}
|
import sys
from rdkit import Chem
import glob
import pandas as pd
import numpy as np
if __name__ == '__main__':
COLUMNS = ['SMILES', 'DOCKING', 'ITER'] # SAC
d_path = sys.argv[1]
protein = d_path.split('_')[0]
ef = []
tot = []
unique = []
hit = []
top_k_each = []
df_append = None
if 'morld_' in d_path:
COLUMNS = ['SMILES', 'DOCKING', 'SA', 'QED'] # morld
elif 'rei_' in d_path:
COLUMNS = ['ITER', 'SMILES', 'DOCKING', 'd', 'r'] # REINVENT
df = pd.read_csv(d_path, names = COLUMNS)
if ('_rei' not in d_path) and ('_morld' not in d_path):
df = df.loc[df['ITER']>4000].loc[df['ITER']<20000] # remark for morld
df = df.head(3000)
n_total_smi = len(df)
print('Total molecules : ', n_total_smi)
df = df.drop_duplicates(subset=['SMILES'])
df['MOL'] = df['SMILES'].apply(Chem.MolFromSmiles)
df = df.dropna(subset=['MOL'])
n_unique_smi = len(df)
print('Unique molecules : ', n_unique_smi)
if 'fa7_' in d_path:
df_hit = df.loc[df['DOCKING']>8.5] # fa7
elif 'parp1_' in d_path:
df_hit = df.loc[df['DOCKING']>10.] # parp1
elif '5ht1b_' in d_path:
df_hit = df.loc[df['DOCKING']>8.7845] # 5ht1b
n_hit = len(df_hit)
print('Hit molecules : ', n_hit)
print('Hit ratio : ', float(n_hit/n_total_smi))
idx_tmp = int(len(df)*.05)
if len(df)<20:
top_5_score = df.sort_values(by='DOCKING', ascending=False).loc[:,'DOCKING'].iloc[0]
print('Top 5% score : ', top_5_score)
else:
top_5_score = df.sort_values(by='DOCKING', ascending=False).loc[:,'DOCKING'].iloc[:idx_tmp].mean()
print('Top 5% score : ', top_5_score)
|
{"hexsha": "e3d872361b219de6948df9b88167e794518b2417", "size": 1707, "ext": "py", "lang": "Python", "max_stars_repo_path": "rl_final_results/libs/enrichment.py", "max_stars_repo_name": "AITRICS/FREED", "max_stars_repo_head_hexsha": "ff75e903808edc57f01ef6d0d198b44a56dcdf4b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2021-11-03T09:27:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T00:49:00.000Z", "max_issues_repo_path": "rl_final_results/libs/enrichment.py", "max_issues_repo_name": "doxxitxxyoung/FREED", "max_issues_repo_head_hexsha": "f7fed79dd98c0ea2ea66ae9b03de63fc57083995", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-11T08:02:58.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-06T05:49:54.000Z", "max_forks_repo_path": "rl_final_results/libs/enrichment.py", "max_forks_repo_name": "doxxitxxyoung/FREED", "max_forks_repo_head_hexsha": "f7fed79dd98c0ea2ea66ae9b03de63fc57083995", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2021-12-09T07:13:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-15T11:15:47.000Z", "avg_line_length": 27.9836065574, "max_line_length": 106, "alphanum_fraction": 0.5864089045, "include": true, "reason": "import numpy", "num_tokens": 569}
|
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2019, Clearpath Robotics
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <fuse_models/unicycle_2d_state_cost_function.h>
#include <fuse_models/unicycle_2d_state_cost_functor.h>
#include <benchmark/benchmark.h>
#include <ceres/autodiff_cost_function.h>
#include <Eigen/Dense>
#include <vector>
class Unicycle2DStateCostFunction : public benchmark::Fixture
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
Unicycle2DStateCostFunction()
: jacobians(num_parameter_blocks)
, J(num_parameter_blocks)
{
for (size_t i = 0; i < num_parameter_blocks; ++i)
{
J[i].resize(num_residuals, block_sizes[i]);
jacobians[i] = J[i].data();
}
}
// Analytic cost function
static constexpr double dt{ 0.1 };
static const fuse_core::Matrix8d sqrt_information;
static const fuse_models::Unicycle2DStateCostFunction cost_function;
// Parameters
static const double* parameters[];
// Residuals
fuse_core::Vector8d residuals;
static const std::vector<int32_t>& block_sizes;
static const size_t num_parameter_blocks;
static const size_t num_residuals;
// Jacobians
std::vector<double*> jacobians;
private:
// Cost function process noise and covariance
static const double process_noise_diagonal[];
static const fuse_core::Matrix8d covariance;
// Parameter blocks
static const double position1[];
static const double yaw1[];
static const double vel_linear1[];
static const double vel_yaw1[];
static const double acc_linear1[];
static const double position2[];
static const double yaw2[];
static const double vel_linear2[];
static const double vel_yaw2[];
static const double acc_linear2[];
// Jacobian matrices
std::vector<fuse_core::MatrixXd> J;
};
// Cost function process noise and covariance
const double Unicycle2DStateCostFunction::process_noise_diagonal[] = { 1e-3, 1e-3, 1e-2, 1e-6, 1e-6, 1e-4, 1e-9, 1e-9 };
const fuse_core::Matrix8d Unicycle2DStateCostFunction::covariance =
fuse_core::Vector8d(process_noise_diagonal).asDiagonal();
// Parameter blocks
const double Unicycle2DStateCostFunction::position1[] = { 0.0, 0.0 };
const double Unicycle2DStateCostFunction::yaw1[] = {0.0};
const double Unicycle2DStateCostFunction::vel_linear1[] = {1.0, 0.0};
const double Unicycle2DStateCostFunction::vel_yaw1[] = {1.570796327};
const double Unicycle2DStateCostFunction::acc_linear1[] = {1.0, 0.0};
const double Unicycle2DStateCostFunction::position2[] = {0.105, 0.0};
const double Unicycle2DStateCostFunction::yaw2[] = {0.1570796327};
const double Unicycle2DStateCostFunction::vel_linear2[] = {1.1, 0.0};
const double Unicycle2DStateCostFunction::vel_yaw2[] = {1.570796327};
const double Unicycle2DStateCostFunction::acc_linear2[] = {1.0, 0.0};
// Analytic cost function
const fuse_core::Matrix8d Unicycle2DStateCostFunction::sqrt_information(covariance.inverse().llt().matrixU());
const fuse_models::Unicycle2DStateCostFunction Unicycle2DStateCostFunction::cost_function{ dt, sqrt_information };
// Parameters
const double* Unicycle2DStateCostFunction::parameters[] = { // NOLINT(whitespace/braces)
position1, yaw1, vel_linear1, vel_yaw1, acc_linear1, position2, yaw2, vel_linear2, vel_yaw2, acc_linear2
};
const std::vector<int32_t>& Unicycle2DStateCostFunction::block_sizes = cost_function.parameter_block_sizes();
const size_t Unicycle2DStateCostFunction::num_parameter_blocks = block_sizes.size();
const size_t Unicycle2DStateCostFunction::num_residuals = cost_function.num_residuals();
BENCHMARK_F(Unicycle2DStateCostFunction, AnalyticUnicycle2DCostFunction)(benchmark::State& state)
{
for (auto _ : state)
{
cost_function.Evaluate(parameters, residuals.data(), jacobians.data());
}
}
BENCHMARK_F(Unicycle2DStateCostFunction, AutoDiffUnicycle2DStateCostFunction)(benchmark::State& state)
{
// Create cost function using automatic differentiation on the cost functor
ceres::AutoDiffCostFunction<fuse_models::Unicycle2DStateCostFunctor, 8, 2, 1, 2, 1, 2, 2, 1, 2, 1, 2>
cost_function_autodiff(new fuse_models::Unicycle2DStateCostFunctor(dt, sqrt_information));
for (auto _ : state)
{
cost_function_autodiff.Evaluate(parameters, residuals.data(), jacobians.data());
}
}
BENCHMARK_MAIN();
|
{"hexsha": "42aee42d4933732568d1e376fd28168dd2b4aec6", "size": 5827, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "fuse_models/benchmark/benchmark_unicycle_2d_state_cost_function.cpp", "max_stars_repo_name": "mcx/fuse", "max_stars_repo_head_hexsha": "3825e489ceaba394fb07c87e0e52dce9485da19b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 383.0, "max_stars_repo_stars_event_min_datetime": "2018-07-02T07:20:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T12:51:06.000Z", "max_issues_repo_path": "fuse_models/benchmark/benchmark_unicycle_2d_state_cost_function.cpp", "max_issues_repo_name": "mcx/fuse", "max_issues_repo_head_hexsha": "3825e489ceaba394fb07c87e0e52dce9485da19b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 117.0, "max_issues_repo_issues_event_min_datetime": "2018-07-16T10:32:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-02T20:15:16.000Z", "max_forks_repo_path": "fuse_models/benchmark/benchmark_unicycle_2d_state_cost_function.cpp", "max_forks_repo_name": "mcx/fuse", "max_forks_repo_head_hexsha": "3825e489ceaba394fb07c87e0e52dce9485da19b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 74.0, "max_forks_repo_forks_event_min_datetime": "2018-10-01T10:10:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-02T04:48:22.000Z", "avg_line_length": 36.8797468354, "max_line_length": 120, "alphanum_fraction": 0.7609404496, "num_tokens": 1505}
|
// Copyright (C) 2020 T. Zachary Laine
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/program_options_2/parse_command_line.hpp>
#include <boost/program_options_2/storage.hpp>
#include <gtest/gtest.h>
// TODO: Document that the option default type (std::string_view) causes
// dangling when loading from files.
namespace po2 = boost::program_options_2;
#define ARGUMENTS(T, choice0, choice1, choice2) \
po2::argument<T>("-a,--abacus", "The abacus."), \
po2::argument<T>("-b,--bobcat", "The bobcat."), \
po2::argument<std::vector<T>>("-c,--cataphract", "The cataphract", 2), \
po2::argument<T>( \
"-d,--dolemite", "*The* Dolemite.", 1, choice0, choice1, choice2), \
po2::argument<std::vector<T>>( \
"-z,--zero-plus", "None is fine; so is more.", po2::zero_or_more), \
po2::argument<std::set<T>>( \
"-o,--one-plus", "One is fine; so is more.", po2::one_or_more)
#define MIXED(T, choice0, choice1, choice2, default_) \
po2::argument<T>("-a,--abacus", "The abacus."), \
po2::with_default( \
po2::argument<T>("-b,--bobcat", "The bobcat."), default_), \
po2::positional<std::vector<T>>("cataphract", "The cataphract", 2), \
po2::argument<T>( \
"-d,--dolemite", "*The* Dolemite.", 1, choice0, choice1, choice2), \
po2::remainder<std::vector<std::string>>( \
"args", "other args at the end")
TEST(storage, save_load_response_file)
{
// Just arguments
{
std::ostringstream os;
std::vector<std::string_view> args{
"prog",
"-a",
"55",
"--bobcat",
"66",
"-o",
"2",
"-z",
"2",
"-c",
"77",
"88",
"--dolemite",
"5"};
po2::string_any_map m;
po2::parse_command_line(
args, m, "A program.", os, ARGUMENTS(int, 4, 5, 6));
EXPECT_EQ(m.size(), 6u);
EXPECT_EQ(boost::any_cast<int>(m["abacus"]), 55);
EXPECT_EQ(boost::any_cast<int>(m["bobcat"]), 66);
EXPECT_EQ(
boost::any_cast<std::vector<int>>(m["cataphract"]),
std::vector<int>({77, 88}));
EXPECT_EQ(boost::any_cast<int>(m["dolemite"]), 5);
EXPECT_EQ(
boost::any_cast<std::vector<int>>(m["zero-plus"]),
std::vector<int>{2});
EXPECT_EQ(
boost::any_cast<std::set<int>>(m["one-plus"]), std::set<int>{2});
po2::save_response_file("saved_map", m, ARGUMENTS(int, 4, 5, 6));
}
{
std::ostringstream os;
std::vector<std::string_view> args{
"prog",
"-a",
"55",
"--bobcat",
"66",
"-o",
"2",
"-z",
"2",
"-c",
"77",
"88",
"--dolemite",
"5"};
po2::string_any_map m;
po2::parse_command_line(
args, m, "A program.", os, ARGUMENTS(int, 4, 5, 6));
EXPECT_EQ(m.size(), 6u);
EXPECT_EQ(boost::any_cast<int>(m["abacus"]), 55);
EXPECT_THROW(
po2::save_response_file(
"dummy_file",
m,
po2::argument<double>("-a,--abacus", "The abacus.")),
po2::save_error);
try {
po2::load_response_file(
"dummy_file",
m,
po2::argument<double>("-a,--abacus", "The abacus."));
} catch (po2::save_error & e) {
EXPECT_EQ(e.error(), po2::save_result::bad_any_cast);
}
}
{
std::ostringstream os;
std::vector<std::string_view> args{"prog", "@saved_map"};
po2::string_any_map m;
po2::parse_command_line(
args, m, "A program.", os, ARGUMENTS(int, 4, 5, 6));
EXPECT_EQ(m.size(), 6u);
EXPECT_EQ(boost::any_cast<int>(m["abacus"]), 55);
}
{
po2::string_any_map m;
po2::load_response_file("saved_map", m, ARGUMENTS(int, 4, 5, 6));
EXPECT_EQ(m.size(), 6u);
EXPECT_EQ(boost::any_cast<int>(m["abacus"]), 55);
EXPECT_EQ(boost::any_cast<int>(m["bobcat"]), 66);
EXPECT_EQ(
boost::any_cast<std::vector<int>>(m["cataphract"]),
std::vector<int>({77, 88}));
EXPECT_EQ(boost::any_cast<int>(m["dolemite"]), 5);
EXPECT_EQ(
boost::any_cast<std::vector<int>>(m["zero-plus"]),
std::vector<int>{2});
EXPECT_EQ(
boost::any_cast<std::set<int>>(m["one-plus"]), std::set<int>{2});
}
// Mixed arguments and positionals
{
std::ostringstream os;
std::vector<std::string_view> args{
"prog",
"-a",
"55",
"--bobcat",
"66",
"77",
"88",
"--dolemite",
"5",
"\\2\""};
po2::string_any_map m;
po2::parse_command_line(
args, m, "A program.", os, MIXED(int, 4, 5, 6, 42));
EXPECT_EQ(m.size(), 5u);
EXPECT_EQ(boost::any_cast<int>(m["abacus"]), 55);
EXPECT_EQ(boost::any_cast<int>(m["bobcat"]), 66);
EXPECT_EQ(
boost::any_cast<std::vector<int>>(m["cataphract"]),
std::vector<int>({77, 88}));
EXPECT_EQ(boost::any_cast<int>(m["dolemite"]), 5);
EXPECT_EQ(
boost::any_cast<std::vector<std::string>>(m["args"]),
std::vector<std::string>({"\\2\""}));
po2::save_response_file("saved_mixed_map", m, MIXED(int, 4, 5, 6, 42));
{
std::ifstream ifs("saved_mixed_map");
std::string const contents_with_comments =
"#comments are ignored\n" + po2::detail::file_slurp(ifs) +
"# more comments";
ifs.close();
std::ofstream ofs("saved_mixed_map");
ofs << contents_with_comments;
}
}
{
std::ostringstream os;
std::vector<std::string_view> args{"prog", "@saved_mixed_map"};
po2::string_any_map m;
po2::parse_command_line(
args, m, "A program.", std::cout, MIXED(int, 4, 5, 6, 42));
EXPECT_EQ(m.size(), 5u);
EXPECT_EQ(boost::any_cast<int>(m["abacus"]), 55);
}
{
po2::string_any_map m;
po2::load_response_file("saved_mixed_map", m, MIXED(int, 4, 5, 6, 42));
EXPECT_EQ(m.size(), 5u);
EXPECT_EQ(boost::any_cast<int>(m["abacus"]), 55);
EXPECT_EQ(boost::any_cast<int>(m["bobcat"]), 66);
EXPECT_EQ(
boost::any_cast<std::vector<int>>(m["cataphract"]),
std::vector<int>({77, 88}));
EXPECT_EQ(boost::any_cast<int>(m["dolemite"]), 5);
EXPECT_EQ(
boost::any_cast<std::vector<std::string>>(m["args"]),
std::vector<std::string>({"\\2\""}));
}
// Error cases.
{
po2::string_any_map m;
EXPECT_THROW(
po2::load_response_file(
"nonexistent_file", m, ARGUMENTS(int, 4, 5, 6)),
po2::load_error);
try {
po2::load_response_file(
"nonexistent_file", m, ARGUMENTS(int, 4, 5, 6));
} catch (po2::load_error & e) {
EXPECT_EQ(
e.error(), po2::load_result::could_not_open_file_for_reading);
}
}
{
std::ofstream ofs("bad_map_for_loading");
ofs << "--cataphract 5";
ofs.close();
po2::string_any_map m;
EXPECT_THROW(
po2::load_response_file(
"bad_map_for_loading", m, ARGUMENTS(int, 4, 5, 6)),
po2::load_error);
try {
po2::load_response_file(
"bad_map_for_loading", m, ARGUMENTS(int, 4, 5, 6));
} catch (po2::load_error & e) {
EXPECT_EQ(e.error(), po2::load_result::wrong_number_of_args);
}
}
{
std::ofstream ofs("bad_map_for_loading");
ofs << "--unknown";
ofs.close();
po2::string_any_map m;
EXPECT_THROW(
po2::load_response_file(
"bad_map_for_loading", m, ARGUMENTS(int, 4, 5, 6)),
po2::load_error);
try {
po2::load_response_file(
"bad_map_for_loading", m, ARGUMENTS(int, 4, 5, 6));
} catch (po2::load_error & e) {
EXPECT_EQ(e.error(), po2::load_result::unknown_arg);
}
}
{
std::ofstream ofs("bad_map_for_loading");
ofs << "unknown";
ofs.close();
po2::string_any_map m;
EXPECT_THROW(
po2::load_response_file(
"bad_map_for_loading", m, ARGUMENTS(int, 4, 5, 6)),
po2::load_error);
try {
po2::load_response_file(
"bad_map_for_loading", m, ARGUMENTS(int, 4, 5, 6));
} catch (po2::load_error & e) {
EXPECT_EQ(e.error(), po2::load_result::unknown_arg);
}
}
{
std::ofstream ofs("bad_map_for_loading");
ofs << "--abacus fish";
ofs.close();
po2::string_any_map m;
EXPECT_THROW(
po2::load_response_file(
"bad_map_for_loading", m, ARGUMENTS(int, 4, 5, 6)),
po2::load_error);
try {
po2::load_response_file(
"bad_map_for_loading", m, ARGUMENTS(int, 4, 5, 6));
} catch (po2::load_error & e) {
EXPECT_EQ(e.error(), po2::load_result::cannot_parse_arg);
}
}
{
std::ofstream ofs("bad_map_for_loading");
ofs << "--dolemite 555";
ofs.close();
po2::string_any_map m;
EXPECT_THROW(
po2::load_response_file(
"bad_map_for_loading", m, ARGUMENTS(int, 4, 5, 6)),
po2::load_error);
try {
po2::load_response_file(
"bad_map_for_loading", m, ARGUMENTS(int, 4, 5, 6));
} catch (po2::load_error & e) {
EXPECT_EQ(e.error(), po2::load_result::no_such_choice);
}
}
{
auto validator = [](auto const &) {
return po2::validation_result{false, "bad"};
};
std::ofstream ofs("bad_map_for_loading");
ofs << "--abacus 55";
ofs.close();
po2::string_any_map m;
EXPECT_THROW(
po2::load_response_file(
"bad_map_for_loading",
m,
po2::with_validator(
po2::argument<int>("-a,--abacus", "The abacus."),
validator)),
po2::load_error);
try {
po2::load_response_file(
"bad_map_for_loading",
m,
po2::with_validator(
po2::argument<int>("-a,--abacus", "The abacus."),
validator));
} catch (po2::load_error & e) {
EXPECT_EQ(e.error(), po2::load_result::validation_error);
}
}
}
TEST(storage, save_load_json_file)
{
// Mixed arguments only
{
std::ostringstream os;
std::vector<std::string_view> args{
"prog",
"-a",
"55",
"--bobcat",
"66",
"-o",
"2",
"-z",
"2",
"-c",
"77",
"88",
"--dolemite",
"5"};
po2::string_any_map m;
po2::parse_command_line(
args, m, "A program.", os, ARGUMENTS(int, 4, 5, 6));
EXPECT_EQ(m.size(), 6u);
EXPECT_EQ(boost::any_cast<int>(m["abacus"]), 55);
EXPECT_EQ(boost::any_cast<int>(m["bobcat"]), 66);
EXPECT_EQ(
boost::any_cast<std::vector<int>>(m["cataphract"]),
std::vector<int>({77, 88}));
EXPECT_EQ(boost::any_cast<int>(m["dolemite"]), 5);
EXPECT_EQ(
boost::any_cast<std::vector<int>>(m["zero-plus"]),
std::vector<int>{2});
EXPECT_EQ(
boost::any_cast<std::set<int>>(m["one-plus"]), std::set<int>{2});
po2::save_json_file("saved_json_map", m, ARGUMENTS(int, 4, 5, 6));
{
std::ifstream ifs("saved_json_map");
std::string const contents_with_comments =
"#comments are ignored\n" + po2::detail::file_slurp(ifs) +
"# more comments";
ifs.close();
std::ofstream ofs("saved_json_map");
ofs << contents_with_comments;
}
}
{
std::ostringstream os;
std::vector<std::string_view> args{
"prog",
"-a",
"55",
"--bobcat",
"66",
"-o",
"2",
"-z",
"2",
"-c",
"77",
"88",
"--dolemite",
"5"};
po2::string_any_map m;
po2::parse_command_line(
args, m, "A program.", os, ARGUMENTS(int, 4, 5, 6));
EXPECT_THROW(
po2::save_json_file(
"dummy_file",
m,
po2::argument<double>("-a,--abacus", "The abacus.")),
po2::save_error);
try {
po2::save_json_file(
"dummy_file",
m,
po2::argument<double>("-a,--abacus", "The abacus."));
} catch (po2::save_error & e) {
EXPECT_EQ(e.error(), po2::save_result::bad_any_cast);
}
}
{
po2::string_any_map m;
EXPECT_THROW(
po2::load_json_file("dummy_file", m, ARGUMENTS(int, 4, 5, 6)),
po2::load_error);
try {
po2::load_json_file("dummy_file", m, ARGUMENTS(int, 4, 5, 6));
} catch (po2::load_error & e) {
EXPECT_EQ(e.error(), po2::load_result::malformed_json);
EXPECT_EQ(e.str(), R"(dummy_file:2:0: error: Expected '}' here (end of input):
^
Note: The file is expected to use a subset of JSON that contains only strings,
arrays, and objects. JSON types null, boolean, and number are not supported,
and character escapes besides '\\' and '\"' are not supported.
)");
}
}
{
po2::string_any_map m;
po2::load_json_file("saved_json_map", m, ARGUMENTS(int, 4, 5, 6));
EXPECT_EQ(m.size(), 6u);
EXPECT_EQ(boost::any_cast<int>(m["abacus"]), 55);
EXPECT_EQ(boost::any_cast<int>(m["bobcat"]), 66);
EXPECT_EQ(
boost::any_cast<std::vector<int>>(m["cataphract"]),
std::vector<int>({77, 88}));
EXPECT_EQ(boost::any_cast<int>(m["dolemite"]), 5);
EXPECT_EQ(
boost::any_cast<std::vector<int>>(m["zero-plus"]),
std::vector<int>{2});
EXPECT_EQ(
boost::any_cast<std::set<int>>(m["one-plus"]), std::set<int>{2});
}
// Mixed arguments and positionals
{
std::ostringstream os;
std::vector<std::string_view> args{
"prog",
"-a",
"55",
"--bobcat",
"66",
"77",
"88",
"--dolemite",
"5",
"\\2\""};
po2::string_any_map m;
po2::parse_command_line(
args, m, "A program.", os, MIXED(int, 4, 5, 6, 42));
EXPECT_EQ(m.size(), 5u);
EXPECT_EQ(boost::any_cast<int>(m["abacus"]), 55);
EXPECT_EQ(boost::any_cast<int>(m["bobcat"]), 66);
EXPECT_EQ(
boost::any_cast<std::vector<int>>(m["cataphract"]),
std::vector<int>({77, 88}));
EXPECT_EQ(boost::any_cast<int>(m["dolemite"]), 5);
EXPECT_EQ(
boost::any_cast<std::vector<std::string>>(m["args"]),
std::vector<std::string>({"\\2\""}));
po2::save_response_file(
"saved_mixed_json_map", m, MIXED(int, 4, 5, 6, 42));
{
std::ifstream ifs("saved_mixed_json_map");
std::string const contents_with_comments =
"#comments are ignored\n" + po2::detail::file_slurp(ifs) +
"# more comments";
ifs.close();
std::ofstream ofs("saved_mixed_json_map");
ofs << contents_with_comments;
}
}
{
po2::string_any_map m;
po2::load_response_file(
"saved_mixed_json_map", m, MIXED(int, 4, 5, 6, 42));
EXPECT_EQ(m.size(), 5u);
EXPECT_EQ(boost::any_cast<int>(m["abacus"]), 55);
EXPECT_EQ(boost::any_cast<int>(m["bobcat"]), 66);
EXPECT_EQ(
boost::any_cast<std::vector<int>>(m["cataphract"]),
std::vector<int>({77, 88}));
EXPECT_EQ(boost::any_cast<int>(m["dolemite"]), 5);
EXPECT_EQ(
boost::any_cast<std::vector<std::string>>(m["args"]),
std::vector<std::string>({"\\2\""}));
}
// Error cases
{
std::ofstream ofs("bad_json_map_for_loading");
ofs << "{ \"--unknown\": \"unknown\" }";
ofs.close();
po2::string_any_map m;
EXPECT_THROW(
po2::load_response_file(
"bad_json_map_for_loading", m, ARGUMENTS(int, 4, 5, 6)),
po2::load_error);
try {
po2::load_response_file(
"bad_json_map_for_loading", m, ARGUMENTS(int, 4, 5, 6));
} catch (po2::load_error & e) {
EXPECT_EQ(e.error(), po2::load_result::unknown_arg);
}
}
}
#undef ARGUMENTS
#undef MIXED
|
{"hexsha": "ca868e967d67227c47275b59234661d6f9578bdc", "size": 18095, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/storage.cpp", "max_stars_repo_name": "tzlaine/program_options_2", "max_stars_repo_head_hexsha": "41f2d9fef785d0f775cccff9b318526ad19bc089", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/storage.cpp", "max_issues_repo_name": "tzlaine/program_options_2", "max_issues_repo_head_hexsha": "41f2d9fef785d0f775cccff9b318526ad19bc089", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 5.0, "max_issues_repo_issues_event_min_datetime": "2020-09-21T06:52:56.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-26T17:55:11.000Z", "max_forks_repo_path": "test/storage.cpp", "max_forks_repo_name": "tzlaine/program_options_2", "max_forks_repo_head_hexsha": "41f2d9fef785d0f775cccff9b318526ad19bc089", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3703041145, "max_line_length": 90, "alphanum_fraction": 0.4811826471, "num_tokens": 4704}
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.3.4
# kernelspec:
# display_name: Python [conda env:mtg]
# language: python
# name: conda-env-mtg-py
# ---
# # Build graph for each path from a cards text to entities
# The idea here is to
#
# 1. Load the previously ETLelled outgoing and incoming graphs
# 2. Build simple paths from card to its entity nodes
# 3. Build a paths df keyed by card_id, entity and orders with some common attributes of these paths:
# paragraph type/order, pop type/order, part type/order,
# entity pos (actualy head's pos), entity head (actually head's head)
# 4. Store in postgres
#
# NEXT
# 4. Don't know yet
#
# **DESIRED RESULT**:
# result = dataframe/postgres table: (Indexes: card_id, orders, entity)
#
# | card_id | paragraph_order | pop_order | part_order | entity | paragraph_type | pop_type | part_type | entity_pos | entity_head | main_verb_of_path |
# | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
# | a2fh34 | 0 | 1 | 1 | TYPE: Instant | activated | effect | intensifier | pobj | for | destroy |
from mtgnlp import config
import networkx as nx
import datetime
from networkx.readwrite import json_graph
import hashlib
from collections import OrderedDict
import collections
import sqlalchemy
from sqlalchemy import create_engine
from tqdm import tqdm
import json
import pandas as pd
import numpy
import re
from collections import defaultdict
from IPython.display import clear_output
import logging
logPathFileName = config.LOGS_DIR.joinpath("build_text_to_entity_graphs.log")
# create logger'
logger = logging.getLogger("e_build_text_to_entity_graphs")
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler(f"{logPathFileName}", mode="w")
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
)
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
# This is for jupyter notebook
# from tqdm.notebook import tqdm_notebook
# tqdm_notebook.pandas()
# This is for terminal
tqdm.pandas(desc="Progress")
# # Params
engine = create_engine(config.DB_STR)
logger.info("Logging to get line")
engine.connect()
# # Helping functions
# + code_folding=[0]
# Split dataframelist
def splitDataFrameList(df, target_column, separator=None):
"""
https://gist.github.com/jlln/338b4b0b55bd6984f883
df = dataframe to split,
target_column = the column containing the values to split
separator = the symbol used to perform the split
returns: a dataframe with each entry for the target column separated, with each element moved into a new row.
The values in the other columns are duplicated across the newly divided rows.
"""
def splitListToRows(row, row_accumulator, target_column, separator):
split_row = row[target_column] # .split(separator)
if isinstance(split_row, collections.Iterable):
for s in split_row:
new_row = row.to_dict()
new_row[target_column] = s
row_accumulator.append(new_row)
else:
new_row = row.to_dict()
new_row[target_column] = numpy.nan
row_accumulator.append(new_row)
new_rows = []
df.apply(splitListToRows, axis=1, args=(new_rows, target_column, separator))
new_df = pd.DataFrame(new_rows)
return new_df
# + code_folding=[0]
# Create hashable dict
class HashableDict(OrderedDict):
def __hash__(self):
return hash(tuple(sorted(self.items())))
def hexdigext(self):
return hashlib.sha256(
"".join([str(k) + str(v) for k, v in self.items()]).encode()
).hexdigest()
# + code_folding=[0]
# Make defaultdict which depends on its key
# Source: https://www.reddit.com/r/Python/comments/27crqg/making_defaultdict_create_defaults_that_are_a/
class key_dependent_dict(defaultdict):
def __init__(self, f_of_x):
super().__init__(None) # base class doesn't get a factory
self.f_of_x = f_of_x # save f(x)
def __missing__(self, key): # called when a default needed
ret = self.f_of_x(key) # calculate default value
self[key] = ret # and install it in the dict
return ret
def entity_key_hash(key):
return HashableDict({"entity": key}).hexdigext()
# + code_folding=[0]
# function to draw a graph to png
shapes = [
"box",
"polygon",
"ellipse",
"oval",
"circle",
"egg",
"triangle",
"exagon",
"star",
]
colors = ["blue", "black", "red", "#db8625", "green", "gray", "cyan", "#ed125b"]
styles = ["filled", "rounded", "rounded, filled", "dashed", "dotted, bold"]
entities_colors = {
"PLAYER": "#FF6E6E",
"ZONE": "#F5D300",
"ACTION": "#1ADA00",
"MANA": "#00DA84",
"SUBTYPE": "#0DE5E5",
"TYPE": "#0513F0",
"SUPERTYPE": "#8D0BCA",
"ABILITY": "#cc3300",
"COLOR": "#666633",
"STEP": "#E0E0F8",
}
def draw_graph(G, filename="test.png"):
pdot = nx.drawing.nx_pydot.to_pydot(G)
for i, node in enumerate(pdot.get_nodes()):
attrs = node.get_attributes()
node.set_label(str(attrs.get("label", "none")))
# node.set_fontcolor(colors[random.randrange(len(colors))])
entity_node_ent_type = attrs.get("entity_node_ent_type", numpy.nan)
if not pd.isnull(entity_node_ent_type):
color = entities_colors[entity_node_ent_type.strip('"')]
node.set_fillcolor(color)
node.set_color(color)
node.set_shape("hexagon")
# node.set_colorscheme()
node.set_style("filled")
node_type = attrs.get("type", None)
if node_type == '"card"':
color = "#999966"
node.set_fillcolor(color)
# node.set_color(color)
node.set_shape("star")
# node.set_colorscheme()
node.set_style("filled")
#
# pass
for i, edge in enumerate(pdot.get_edges()):
att = edge.get_attributes()
att = att.get("label", "NO-LABEL")
edge.set_label(att)
# edge.set_fontcolor(colors[random.randrange(len(colors))])
# edge.set_style(styles[random.randrange(len(styles))])
# edge.set_color(colors[random.randrange(len(colors))])
png_path = filename
pdot.write_png(png_path)
from IPython.display import Image
return Image(png_path)
# -
# # Build graph with Networkx
# ### Get paths from cards_text to entities (simple paths from text -> entities)
# + deletable=false editable=false run_control={"frozen": true}
with engine.connect() as con:
try:
con.execute(f"""DROP TABLE public."{config.CARDS_JSON_TNAME}" """)
except Exception as e:
pass
con.execute(
f"""CREATE TABLE public."{config.CARDS_JSON_TNAME}" AS
(SELECT * FROM public."{config.CARDS_JSON_TNAME}_temp")"""
)
# +
table_name = config.CARDS_JSON_TNAME
to_table_name = config.CARDS_TEXT_TO_ENTITY_SIMPLE_PATHS_TNAME
chunk_size = 200
all_ids = pd.read_sql_query(
"SELECT card_id from {0}".format(table_name),
engine,
)
chunks = [
all_ids.iloc[all_ids.index[i : i + chunk_size]]
for i in range(0, all_ids.shape[0], chunk_size)
]
# + code_folding=[]
def get_df_for_subgraphs_of_paths_from_card_to_entities(row):
"""INPUT: a row with outgoing graph of card
RETURNs: a dataframe with each row corresponding to a path from text to entity"""
if not row["outgoing"]:
return []
G = json_graph.node_link_graph(json.loads(row["outgoing"]))
card_nodes = [x for x, y in G.nodes(data=True) if y["type"] == "card"]
entity_nodes = [x for x, y in G.nodes(data=True) if y["type"] == "entity"]
assert len(card_nodes) == 1
new_rows = []
for entity_node in entity_nodes:
paths_list = nx.all_simple_paths(G, card_nodes[0], entity_node)
for path in paths_list:
graph_row = {}
path_g = G.subgraph(path)
graph_row["card_id"] = row["card_id"]
graph_row["path_graph_json"] = json.dumps(json_graph.node_link_data(path_g))
graph_row["part"] = G.nodes[path[1]]["part"]
has_add = re.findall(r"add ", str(graph_row["part"]), flags=re.IGNORECASE)
graph_row["has_add"] = True if has_add else False
# Text type and orders
# graph_row['paragraph_type'] = G.node[path[1]]['paragraph_type']
graph_row["paragraph_order"] = G.nodes[path[1]]["paragraph_order"]
graph_row["pop_type"] = G.nodes[path[1]]["pop_type"]
graph_row["pop_order"] = G.nodes[path[1]]["pop_order"]
graph_row["part_type"] = G.nodes[path[1]]["part_type"]
graph_row["part_order"] = G.nodes[path[1]]["part_order"]
graph_row["path_text_key"] = (
graph_row["card_id"]
+ "-"
+ str(int(graph_row["paragraph_order"]))
+ "-"
+ str(int(graph_row["pop_order"]))
+ "-"
+ str(int(graph_row["part_order"]))
)
# Entities info
graph_row["entity_node_entity"] = G.nodes[path[-1]]["entity_node_entity"]
graph_row["entity_node_ent_type"] = G.nodes[path[-1]][
"entity_node_ent_type"
]
graph_row["entity_node_desc"] = G.nodes[path[-1]]["entity_node_desc"]
graph_row["entity_node_lemma"] = G.nodes[path[-1]]["entity_node_lemma"]
graph_row["path_pk"] = (
graph_row["path_text_key"] + "-" + graph_row["entity_node_entity"]
)
graph_row["entity_pos"] = G.nodes[path[-2]]["token_node_pos"]
graph_row["entity_tag"] = G.nodes[path[-2]]["token_node_tag"]
graph_row["entity_head_dep"] = G.nodes[path[-2]]["token_head_dep"]
# Entities head info
if G.nodes[path[-3]]["type"] == "token":
graph_row["entity_head"] = G.nodes[path[-3]]["token_node_text"]
graph_row["entity_head_tag"] = G.nodes[path[-3]]["token_node_tag"]
graph_row["entity_head_head_dep"] = G.nodes[path[-2]]["token_head_dep"]
graph_row["entity_head_pos"] = G.nodes[path[-2]]["token_node_pos"]
# Append row
new_rows.append(graph_row)
return pd.DataFrame(new_rows)
# + code_folding=[0] deletable=false editable=false run_control={"frozen": true}
# # Testing dataframe composition
# row = df.iloc[1]
# G = json_graph.node_link_graph(json.loads(row['outgoing']))
# card_nodes = [x for x,y in G.nodes(data=True) if y['type']=='card']
# entity_nodes = [x for x,y in G.nodes(data=True) if y['type']=='entity']
# assert len(card_nodes) == 1
#
# new_rows = []
# for entity_node in entity_nodes:
# paths_list = nx.all_simple_paths(G, card_nodes[0], entity_node)
# for path in paths_list:
# graph_row = {}
# path_g = G.subgraph(path)
#
# graph_row['card_id'] = row['card_id']
# graph_row['path_graph_json'] = json.dumps(json_graph.node_link_data(path_g))
# graph_row['part'] = G.node[path[1]]['part']
#
# # Text type and orders
# # graph_row['paragraph_type'] = G.node[path[1]]['paragraph_type']
# graph_row['paragraph_order'] = G.node[path[1]]['paragraph_order']
# graph_row['pop_type'] = G.node[path[1]]['pop_type']
# graph_row['pop_order'] = G.node[path[1]]['pop_order']
# graph_row['part_type'] = G.node[path[1]]['part_type']
# graph_row['part_order'] = G.node[path[1]]['part_order']
#
# # Entities info
# graph_row['entity_node_entity'] = G.node[path[-1]]['entity_node_entity']
# graph_row['entity_node_ent_type'] = G.node[path[-1]]['entity_node_ent_type']
# graph_row['entity_node_desc'] = G.node[path[-1]]['entity_node_desc']
#
# graph_row['entity_pos'] = G.node[path[-2]]['token_node_pos']
# graph_row['entity_tag'] = G.node[path[-2]]['token_node_tag']
# graph_row['entity_head_dep'] = G.node[path[-2]]['token_head_dep']
#
# # Entities head info
# if G.node[path[-3]]['type'] == 'token':
# graph_row['entity_head'] = G.node[path[-3]]['token_node_text']
# graph_row['entity_head_tag'] = G.node[path[-3]]['token_node_tag']
# graph_row['entity_head_head_dep'] = G.node[path[-2]]['token_head_dep']
# graph_row['entity_head_pos'] = G.node[path[-2]]['token_node_pos']
#
#
#
# # Append row
# new_rows.append(graph_row)
# n = pd.DataFrame(new_rows)
# -
logger.info("Logging to get line")
df = pd.read_sql_query(
"SELECT * from {0} WHERE card_id IN ({1})".format(
table_name, ",".join(["'" + x + "'" for x in chunks[0]["card_id"]])
),
engine,
index_col="card_id",
)
# + code_folding=[]
# Iter chunks and save simple paths
start = datetime.datetime.now()
logger.info("Logging to get line")
for i, chunk in enumerate(chunks):
logger.info("df = pd.read_sql_query(")
df = pd.read_sql_query(
"SELECT * from {0} WHERE card_id IN ({1})".format(
table_name, ",".join(["'" + x + "'" for x in chunk["card_id"]])
),
engine,
)
logger.info("paths_series = df.progress_apply(")
paths_series = df.progress_apply(
get_df_for_subgraphs_of_paths_from_card_to_entities, axis="columns"
)
# Drop these ids to append them again
# DROP_QUERY = ('DELETE FROM {0} WHERE card_id IN ({1})'.
# format(table_name, ','.join(["'"+x+"'" for x in df.index]))
# )
# print(engine.execute(DROP_QUERY))
# Create columns if not exists
# NEW_QUERY = '''
# ALTER TABLE {0} ADD COLUMN {1} json;
# '''.format(table_name, 'list_of_subgraphs_of_paths_from_card_to_entities')
# try:
# print(engine.execute(NEW_QUERY))
# except sqlalchemy.exc.ProgrammingError:
# # Just ignore, it alredy exists
# pass
logger.info("Concatenating")
df = (
pd.concat(paths_series.values, sort=False)
.reset_index(drop=True)
.set_index(
[
"card_id",
"paragraph_order",
"pop_order",
"part_order",
"entity_node_entity",
]
)
)
method = "append" if i else "replace"
df.to_sql(
to_table_name,
engine,
if_exists=method,
dtype={"path_graph_json": sqlalchemy.types.JSON},
)
logger.info(
"Chunk {0}/{1} ELAPSED: {2}".format(
i, len(chunks), datetime.datetime.now() - start
)
)
logger.info("Export finished")
if not i % 15:
clear_output()
# + code_folding=[0] deletable=false editable=false hideCode=false run_control={"frozen": true}
# # Testing
# card_id = some_ids.iloc[0]
# G = json_graph.node_link_graph(json.loads(df.loc[card_id, 'outgoing']))
# card_nodes = [x for x,y in G.nodes(data=True) if y['type']=='card']
# entity_nodes = [x for x,y in G.nodes(data=True) if y['type']=='entity']
# assert len(card_nodes) == 1
#
# paths = []
# for entity_node in entity_nodes:
# paths_list = nx.all_simple_paths(G, card_nodes[0], entity_node)
# a = [json_graph.node_link_data(G.subgraph(path)) for path in paths_list]
# paths.extend(a)
#
# json.dumps(paths)
# + deletable=false editable=false run_control={"frozen": true}
# test = pd.read_sql_query('SELECT * from {0}'.
# format(to_table_name),
# engine,
# index_col=['card_id', 'paragraph_order', 'pop_order', 'part_order', 'entity_node_entity'])
# test
# -
logger.info(f"FINISHED: {__file__}")
|
{"hexsha": "99784dadc165ed2ccb76f49a1f54031e5f2f910b", "size": 16220, "ext": "py", "lang": "Python", "max_stars_repo_path": "mtgnlp/flows/build_text_to_entity_graphs.py", "max_stars_repo_name": "pedrovgp/mtg-nltk", "max_stars_repo_head_hexsha": "aeda27914236b5b82fa9c0b7d9a9482031f0fe69", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mtgnlp/flows/build_text_to_entity_graphs.py", "max_issues_repo_name": "pedrovgp/mtg-nltk", "max_issues_repo_head_hexsha": "aeda27914236b5b82fa9c0b7d9a9482031f0fe69", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-06-17T13:07:39.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-17T13:07:39.000Z", "max_forks_repo_path": "mtgnlp/flows/build_text_to_entity_graphs.py", "max_forks_repo_name": "pedrovgp/mtg-nltk", "max_forks_repo_head_hexsha": "aeda27914236b5b82fa9c0b7d9a9482031f0fe69", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4432989691, "max_line_length": 152, "alphanum_fraction": 0.618865598, "include": true, "reason": "import numpy,import networkx,from networkx", "num_tokens": 4081}
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import numpy as np
import random
download_dir = "/tmp/"
import os
import urllib
def check_exist_or_download(url):
''' download data into tmp '''
name = url.rsplit('/', 1)[-1]
filename = os.path.join(download_dir, name)
if not os.path.isfile(filename):
print("Downloading %s" % url)
urllib.request.urlretrieve(url, filename)
return filename
def unzip_data(download_dir, data_zip):
data_dir = download_dir + "insuranceQA-master/V2/"
if not os.path.exists(data_dir):
print("extracting %s to %s" % (download_dir, data_dir))
from zipfile import ZipFile
with ZipFile(data_zip, 'r') as zipObj:
zipObj.extractall(download_dir)
return data_dir
def get_label2answer(data_dir):
import gzip
label2answer = dict()
with gzip.open(data_dir +
"/InsuranceQA.label2answer.token.encoded.gz") as fin:
for line in fin:
pair = line.decode().strip().split("\t")
idxs = pair[1].split(" ")
idxs = [int(idx.replace("idx_", "")) for idx in idxs]
label2answer[int(pair[0])] = idxs
return label2answer
pad_idx = 0
pad_string = "<pad>"
pad_embed = np.zeros((300,))
insuranceqa_train_filename = "/InsuranceQA.question.anslabel.token.100.pool.solr.train.encoded.gz"
insuranceqa_test_filename = "/InsuranceQA.question.anslabel.token.100.pool.solr.test.encoded.gz"
insuranceQA_url = "https://github.com/shuzi/insuranceQA/archive/master.zip"
insuranceQA_cache_fp = download_dir + "insuranceQA_cache.pickle"
google_news_pretrain_embeddings_link = "https://s3.amazonaws.com/dl4j-distribution/GoogleNews-vectors-negative300.bin.gz"
def get_idx2word(data_dir):
idx2word = dict()
with open(data_dir + "vocabulary", encoding="utf-8") as vc_f:
for line in vc_f:
pair = line.strip().split("\t")
idx = int(pair[0].replace("idx_", ""))
idx2word[idx] = pair[1]
# add padding string to idx2word lookup
idx2word[pad_idx] = pad_string
return idx2word
def get_train_raw(data_dir, data_filename):
''' deserialize training data file
args:
data_dir: dir of data file
return:
train_raw: list of QnA pair, length of list == number of samples,
each pair has 3 fields:
0 is question sentence idx encoded, use idx2word to decode,
idx2vec to get embedding.
1 is ans labels, each label corresponds to a ans sentence,
use label2answer to decode.
2 is top K candidate ans, these are negative ans for
training.
'''
train_raw = []
import gzip
with gzip.open(data_dir + data_filename) as fin:
for line in fin:
tpl = line.decode().strip().split("\t")
question = [
int(idx.replace("idx_", "")) for idx in tpl[1].split(" ")
]
ans = [int(label) for label in tpl[2].split(" ")]
candis = [int(label) for label in tpl[3].split(" ")]
train_raw.append((question, ans, candis))
return train_raw
def limit_encode_train(train_raw, label2answer, idx2word, q_seq_limit,
ans_seq_limit, idx2vec):
''' prepare train data to embedded word vector sequence given sequence limit
return:
questions_encoded: np ndarray, shape
(number samples, seq length, vector size)
poss_encoded: same layout, sequence for positive answer
negs_encoded: same layout, sequence for negative answer
'''
questions = [question for question, answers, candis in train_raw]
# choose 1 answer from answer pool
poss = [
label2answer[random.choice(answers)]
for question, answers, candis in train_raw
]
# choose 1 candidate from candidate pool
negs = [
label2answer[random.choice(candis)]
for question, answers, candis in train_raw
]
# filtered word not in idx2vec
questions_filtered = [
[idx for idx in q if idx in idx2vec] for q in questions
]
poss_filtered = [[idx for idx in ans if idx in idx2vec] for ans in poss]
negs_filtered = [[idx for idx in ans if idx in idx2vec] for ans in negs]
# crop to seq limit
questions_crop = [
q[:q_seq_limit] + [0] * max(0, q_seq_limit - len(q))
for q in questions_filtered
]
poss_crop = [
ans[:ans_seq_limit] + [0] * max(0, ans_seq_limit - len(ans))
for ans in poss_filtered
]
negs_crop = [
ans[:ans_seq_limit] + [0] * max(0, ans_seq_limit - len(ans))
for ans in negs_filtered
]
# encoded, word idx to word vector
questions_encoded = [[idx2vec[idx] for idx in q] for q in questions_crop]
poss_encoded = [[idx2vec[idx] for idx in ans] for ans in poss_crop]
negs_encoded = [[idx2vec[idx] for idx in ans] for ans in negs_crop]
# make nd array
questions_encoded = np.array(questions_encoded).astype(np.float32)
poss_encoded = np.array(poss_encoded).astype(np.float32)
negs_encoded = np.array(negs_encoded).astype(np.float32)
return questions_encoded, poss_encoded, negs_encoded
def get_idx2vec_weights(wv, idx2word):
idx2vec = {k: wv[v] for k, v in idx2word.items() if v in wv}
# add padding embedding (all zeros) to idx2vec lookup
idx2vec[pad_idx] = pad_embed
return idx2vec
def prepare_data(use_cache=True):
import pickle
if not os.path.isfile(insuranceQA_cache_fp) or not use_cache:
# no cache is found, preprocess data from scratch
print("prepare data from scratch")
# get pretained word vector
from gensim.models.keyedvectors import KeyedVectors
google_news_pretrain_fp = check_exist_or_download(
google_news_pretrain_embeddings_link)
wv = KeyedVectors.load_word2vec_format(google_news_pretrain_fp,
binary=True)
# prepare insurance QA dataset
data_zip = check_exist_or_download(insuranceQA_url)
data_dir = unzip_data(download_dir, data_zip)
label2answer = get_label2answer(data_dir)
idx2word = get_idx2word(data_dir)
idx2vec = get_idx2vec_weights(wv, idx2word)
train_raw = get_train_raw(data_dir, insuranceqa_train_filename)
test_raw = get_train_raw(data_dir, insuranceqa_test_filename)
with open(insuranceQA_cache_fp, 'wb') as handle:
pickle.dump((train_raw, test_raw, label2answer, idx2word, idx2vec),
handle,
protocol=pickle.HIGHEST_PROTOCOL)
else:
# load from cached pickle
with open(insuranceQA_cache_fp, 'rb') as handle:
(train_raw, test_raw, label2answer, idx2word,
idx2vec) = pickle.load(handle)
return train_raw, test_raw, label2answer, idx2word, idx2vec
def limit_encode_eval(train_raw,
label2answer,
idx2word,
q_seq_limit,
ans_seq_limit,
idx2vec,
top_k_candi_limit=6):
''' prepare train data to embedded word vector sequence given sequence limit for testing
return:
questions_encoded: np ndarray, shape
(number samples, seq length, vector size)
poss_encoded: same layout, sequence for positive answer
negs_encoded: same layout, sequence for negative answer
'''
questions = [question for question, answers, candis in train_raw]
# combine truth and candidate answers label,
candi_pools = [
list(answers + candis)[:top_k_candi_limit]
for question, answers, candis in train_raw
]
assert all([len(pool) == top_k_candi_limit for pool in candi_pools])
ans_count = [len(answers) for question, answers, candis in train_raw]
assert all([c > 0 for c in ans_count])
# encode ans
candi_pools_encoded = [[label2answer[candi_label]
for candi_label in pool]
for pool in candi_pools]
# filtered word not in idx2vec
questions_filtered = [
[idx for idx in q if idx in idx2vec] for q in questions
]
candi_pools_filtered = [[[idx
for idx in candi_encoded
if idx in idx2vec]
for candi_encoded in pool]
for pool in candi_pools_encoded]
# crop to seq limit
questions_crop = [
q[:q_seq_limit] + [0] * max(0, q_seq_limit - len(q))
for q in questions_filtered
]
candi_pools_crop = [[
candi[:ans_seq_limit] + [0] * max(0, ans_seq_limit - len(candi))
for candi in pool
]
for pool in candi_pools_filtered]
# encoded, word idx to word vector
questions_encoded = [[idx2vec[idx] for idx in q] for q in questions_crop]
candi_pools_encoded = [[[idx2vec[idx]
for idx in candi]
for candi in pool]
for pool in candi_pools_crop]
questions_encoded = np.array(questions_encoded).astype(np.float32)
candi_pools_encoded = np.array(candi_pools_encoded).astype(np.float32)
# candi_pools_encoded shape
# (number of sample QnA,
# number of candi in pool,
# number of sequence word idx per candi,
# 300 word embedding for 1 word idx)
# e.g 10 QnA to test
# 5 each question has 5 possible ans
# 8 each ans has 8 words
# 300 each word has vector size 300
return questions_encoded, candi_pools_encoded, ans_count
|
{"hexsha": "4494855dfbb76d5e2c2c34205dab99147a1b64ff", "size": 10579, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/qabot/qabot_data.py", "max_stars_repo_name": "XinChCh/singa", "max_stars_repo_head_hexsha": "93fd9da72694e68bfe3fb29d0183a65263d238a1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2354, "max_stars_repo_stars_event_min_datetime": "2015-05-05T03:01:56.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-22T15:08:11.000Z", "max_issues_repo_path": "examples/qabot/qabot_data.py", "max_issues_repo_name": "Dadaguaibuhaoyisi/singa", "max_issues_repo_head_hexsha": "93fd9da72694e68bfe3fb29d0183a65263d238a1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 332, "max_issues_repo_issues_event_min_datetime": "2019-10-24T15:06:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-07T06:22:32.000Z", "max_forks_repo_path": "examples/qabot/qabot_data.py", "max_forks_repo_name": "zlheui/singa", "max_forks_repo_head_hexsha": "ced9e9d44c200d709db5a2354076390788986b77", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 607, "max_forks_repo_forks_event_min_datetime": "2015-05-03T14:09:05.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-21T09:49:21.000Z", "avg_line_length": 37.3816254417, "max_line_length": 121, "alphanum_fraction": 0.63370829, "include": true, "reason": "import numpy", "num_tokens": 2533}
|
C ******************************************************************
C ******************************************************************
subroutine newtd(nind,x,l,u,g,m,rho,equatn,d,adsupn,maxelem,
+memfail,inform)
implicit none
C SCALAR ARGUMENTS
logical memfail
integer inform,nind,m
double precision adsupn,maxelem
C ARRAY ARGUMENTS
logical equatn(m)
double precision d(nind),g(*),l(*),rho(m),u(*),x(*)
C This subroutine solves the Newtonian system
C
C ( H + rho A^T A ) x = b
C
C by solving the Martinez-Santos system
C
C H x + A^t y = b
C A x - y/rho = 0
include "dim.par"
include "machconst.inc"
include "algparam.inc"
include "outtyp.inc"
include "itetyp.inc"
C PARAMETERS
integer dimmax
parameter ( dimmax = nmax + mmax )
C COMMON ARRAYS
double precision mindiag(nmax)
C LOCAL SCALARS
logical adddiff
integer dim,hnnz,i,iter,lssinfo,nneigv,pind
double precision diff,pval
C LOCAL ARRAYS
integer hdiag(dimmax),hlin(hnnzmax),hcol(hnnzmax)
double precision adddiag(dimmax),adddiagprev(dimmax),
+ hval(hnnzmax),sol(dimmax)
C COMMON BLOCKS
common /diadat/ mindiag
save /diadat/
C ------------------------------------------------------------------
C Presentation
C ------------------------------------------------------------------
if ( iprintinn .ge. 5 ) then
C write(* ,1000)
write(10,1000)
end if
C ------------------------------------------------------------------
C Initialization
C ------------------------------------------------------------------
iter = 0
memfail = .false.
call lssini(sclsys,.true.,.false.)
C ------------------------------------------------------------------
C Compute ML matrix
C ------------------------------------------------------------------
call mlsyst(nind,x,g,m,rho,equatn,hlin,hcol,hval,hnnz,hdiag,sol,
+dim,inform)
if ( inform .lt. 0 ) return
maxelem = 0.0d0
do i = 1,hnnz
maxelem = max( maxelem, abs( hval(i) ) )
end do
C ------------------------------------------------------------------
C Analyse sparsity pattern
C ------------------------------------------------------------------
call lssana(dim,hnnz,hlin,hcol,hval,hdiag,lssinfo)
if ( lssinfo .eq. 6 ) then
! INSUFFICIENT SPACE TO STORE THE LINEAR SYSTEM
memfail = .true.
return
end if
C ------------------------------------------------------------------
C Main loop
C ------------------------------------------------------------------
100 continue
iter = iter + 1
C ------------------------------------------------------------------
C Compute regularization
C ------------------------------------------------------------------
if ( iter .eq. 1 ) then
if ( sameface .and. ittype .eq. 3 ) then
do i = 1,nind
mindiag(i) = 0.1d0 * mindiag(i)
end do
else
do i = 1,nind
if ( g(i) .eq. 0.0d0 ) then
mindiag(i) = 0.0d0
else
if ( g(i) .gt. 0.0d0 ) then
diff = x(i) - l(i)
else if ( g(i) .lt. 0.0d0 ) then
diff = u(i) - x(i)
end if
mindiag(i) = abs( g(i) / diff )
end if
end do
end if
do i = 1,nind
adddiag(i) = max( macheps23, mindiag(i) - hval(hdiag(i)) )
end do
else
do i = 1,nind
adddiagprev(i) = adddiag(i)
end do
110 continue
do i = 1,nind
if ( mindiag(i) .eq. 0.0d0 ) then
mindiag(i) = macheps23
else
mindiag(i) = 10.0d0 * mindiag(i)
end if
end do
do i = 1,nind
adddiag(i) = max( macheps23, mindiag(i) - hval(hdiag(i)) )
end do
adddiff = .false.
do i = 1,nind
if ( adddiag(i) .gt. adddiagprev(i) ) then
adddiff = .true.
end if
end do
if ( .not. adddiff ) then
go to 110
end if
end if
do i = nind + 1,dim
adddiag(i) = 0.0d0
end do
adsupn = 0.0d0
do i = 1,dim
adsupn = max( adsupn, adddiag(i) )
end do
if ( iprintinn .ge. 5 ) then
C write(* ,1010) adsupn
write(10,1010) adsupn
end if
C ------------------------------------------------------------------
C Factorize matrix
C ------------------------------------------------------------------
call lssfac(dim,hnnz,hlin,hcol,hval,hdiag,adddiag,pind,pval,
+nneigv,lssinfo)
if ( lssinfo .eq. 0 .or. lssinfo .eq. 1 ) then
if ( nneigv .ne. dim - nind ) then
C ! WRONG INERTIA (SEE NOCEDAL AND WRIGHT)
C Lemma 16.3 [pg. 447]: Assume that the Jacobian of the
C constraints has full rank and that the reduced Hessian
C Z^T H Z is positive definite. Then the Jacobian of the
C KKT system has n positive eigenvalues, m negative
C eigenvalues, and no zero eigenvalues.
C Note that at this point we know that the matrix has no
C zero eigenvalues. nneigv gives the number of negative
C eigenvalues.
if ( iprintinn .ge. 5 ) then
C write(* ,1020) nneigv,dim - nind
write(10,1020) nneigv,dim - nind
end if
go to 100
else
if ( iprintinn .ge. 5 ) then
C write(* ,1030)
write(10,1030)
end if
end if
else if ( lssinfo .eq. 2 ) then
! SINGULAR JACOBIAN
if ( iprintinn .ge. 5 ) then
C write(* ,1040)
write(10,1040)
end if
go to 100
else if ( lssinfo .eq. 6 ) then
! INSUFFICIENT SPACE TO STORE THE LINEAR SYSTEM
memfail = .true.
return
else if ( lssinfo .eq. 7 ) then
! INSUFFICIENT DOUBLE PRECISION WORKING SPACE
memfail = .true.
return
else ! if ( lssinfo .eq. 8 ) then
! INSUFFICIENT INTEGER WORKING SPACE
memfail = .true.
return
end if
C ------------------------------------------------------------------
C Solve
C ------------------------------------------------------------------
call lsssol(dim,sol)
do i = 1,nind
d(i) = sol(i)
end do
if ( iprintinn .ge. 5 .and. nprint .ne. 0 ) then
C write(*, 1050) min0(nind,nprint),(d(i),i=1,min0(nind,nprint))
write(10,1050) min0(nind,nprint),(d(i),i=1,min0(nind,nprint))
end if
C NON-EXECUTABLE STATEMENTS
1000 format(/,5X,'Sparse factorization of the ML system.')
1010 format( 5X,'Maximum value added to the diagonal: ',1P,D24.16)
1020 format( 5X,'ML-matrix with wrong inertia.',
+ /,5X,'Actual number of negative eigenvalues = ',I16,'.',
+ /,5X,'Desired number of negative eigenvalues = ',I16,'.')
1030 format( 5X,'Direct solver finished successfully.')
1040 format( 5X,'ML-matrix numerically singular.')
1050 format(/,5X,'Newton direction (first ',I7,' components): ',
+ /,1(5X,6(1X,1P,D11.4)))
end
C ******************************************************************
C ******************************************************************
subroutine mlsyst(nind,x,nal,m,rho,equatn,ulin,ucol,uval,unnz,
+udiag,b,dim,inform)
implicit none
C SCALAR ARGUMENTS
integer dim,inform,m,nind,unnz
C ARRAY ARGUMENTS
logical equatn(m)
integer ucol(*),udiag(*),ulin(*)
double precision b(*),nal(nind),rho(m),uval(*),x(*)
include "dim.par"
include "graddat.inc"
include "rspace.inc"
C LOCAL SCALARS
integer col,i,j,k,lin,var
C LOCAL ARRAYS
integer wi(nmax)
C This subrotuine is called from the reduced space.
C MATRIX
C Compute Hessian of the Lagrangian
do i = 1,nt - nind
x(nind+i) = xcomplement(i)
end do
call expand(nind,x)
call sevalhl(nt,x,m,dpdc,ulin,ucol,uval,unnz,inform)
if ( inform .lt. 0 ) return
call shrink(nind,x)
C Preparation for shrink (wi indicates, for each free variable x_i,
C its rank within the set of free variables. wi(i)=0 if x_i is not
C a free variable)
do i = 1,nt
wi(i) = 0
end do
do i = 1,nind
wi(ind(i)) = i
end do
C Shrink Hessian of the Lagrangian and set diagonal-elements indices
do i = 1,nind
udiag(i) = 0
end do
k = 0
do i = 1,unnz
lin = wi(ulin(i))
col = wi(ucol(i))
if ( lin .ne. 0 .and. col .ne. 0 ) then
k = k + 1
ulin(k) = lin
ucol(k) = col
uval(k) = uval(i)
if ( lin .eq. col ) then
udiag(lin) = k
end if
end if
end do
do i = 1,nind
if ( udiag(i) .eq. 0 ) then
k = k + 1
ulin(k) = i
ucol(k) = i
uval(k) = 0.0d0
udiag(i) = k
end if
end do
C Shrink Jacobian and add diagonal matrix - 1.0 / rho
dim = nind
do j = 1,m
if ( equatn(j) .or. dpdc(j) .gt. 0.0d0 ) then
dim = dim + 1
do i = jcsta(j),jcsta(j) + jclen(j) - 1
var = wi(jcvar(i))
if ( var .ne. 0 ) then
k = k + 1
ulin(k) = dim
ucol(k) = var
uval(k) = jcval(i)
end if
end do
k = k + 1
ulin(k) = dim
ucol(k) = dim
uval(k) = - 1.0d0 / rho(j)
udiag(dim) = k
end if
end do
unnz = k
C RHS
do i = 1,nind
b(i) = - nal(i)
end do
do i = nind + 1,dim
b(i) = 0.0d0
end do
end
|
{"hexsha": "fc8b52604a4814ee848e7ce80673a4e274a70b54", "size": 10574, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "docker/optimization/pyOpt/tags/v1.2.0/pyOpt/pyALGENCAN/source/newtd.f", "max_stars_repo_name": "liujiamingustc/phd", "max_stars_repo_head_hexsha": "4f815a738abad43531d02ac66f5bd0d9a1def52a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-01-06T03:01:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T03:02:55.000Z", "max_issues_repo_path": "docker/optimization/pyOpt/tags/v1.2.0/pyOpt/pyALGENCAN/source/newtd.f", "max_issues_repo_name": "liujiamingustc/phd", "max_issues_repo_head_hexsha": "4f815a738abad43531d02ac66f5bd0d9a1def52a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docker/optimization/pyOpt/tags/v1.2.0/pyOpt/pyALGENCAN/source/newtd.f", "max_forks_repo_name": "liujiamingustc/phd", "max_forks_repo_head_hexsha": "4f815a738abad43531d02ac66f5bd0d9a1def52a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.3573141487, "max_line_length": 72, "alphanum_fraction": 0.4230187252, "num_tokens": 2910}
|
chapter \<open>Abstract Formulation of Gödel's Second Incompleteness Theorem\<close>
(*<*)
theory Abstract_Second_Goedel imports Abstract_First_Goedel Derivability_Conditions
begin
(*>*)
text \<open>We assume all three derivability conditions, and assumptions
behind Gödel formulas:\<close>
locale Goedel_Second_Assumptions =
HBL1_2_3
var trm fmla Var FvarsT substT Fvars subst
num
eql cnj imp all exi
prv bprv
enc
P
+
Goedel_Form
var trm fmla Var num FvarsT substT Fvars subst
eql cnj imp all exi
fls
prv bprv
enc
S
P
for
var :: "'var set" and trm :: "'trm set" and fmla :: "'fmla set"
and Var FvarsT substT Fvars subst
and num
and eql cnj imp all exi
and prv bprv
and enc ("\<langle>_\<rangle>")
and S
and P
and fls
begin
lemma P_G:
"bprv (imp (PP \<langle>\<phi>G\<rangle>) (PP \<langle>fls\<rangle>))"
proof-
have 0: "prv (imp \<phi>G (neg (PP \<langle>\<phi>G\<rangle>)))"
using prv_\<phi>G_eqv by (intro prv_imp_eqvEL) auto
have 1: "bprv (PP \<langle>imp \<phi>G (neg (PP \<langle>\<phi>G\<rangle>))\<rangle>)"
using HBL1_PP[OF _ _ 0] by simp
have 2: "bprv (imp (PP \<langle>\<phi>G\<rangle>) (PP \<langle>neg (PP \<langle>\<phi>G\<rangle>)\<rangle>))"
using HBL2_imp2[OF _ _ _ _ 1] by simp
have 3: "bprv (imp (PP \<langle>\<phi>G\<rangle>) (PP \<langle>PP \<langle>\<phi>G\<rangle>\<rangle>))"
using HBL3[OF \<phi>G] by simp
have 23: "bprv (imp (PP \<langle>\<phi>G\<rangle>)
(cnj (PP \<langle>PP \<langle>\<phi>G\<rangle>\<rangle>)
(PP \<langle>neg (PP \<langle>\<phi>G\<rangle>)\<rangle>)))"
using B.prv_imp_cnj[OF _ _ _ 3 2] by simp
have 4: "bprv (imp (cnj (PP \<langle>PP \<langle>\<phi>G\<rangle>\<rangle>)
(PP \<langle>neg (PP \<langle>\<phi>G\<rangle>)\<rangle>))
(PP \<langle>fls\<rangle>))"
using HBL2[of "PP \<langle>\<phi>G\<rangle>" fls] unfolding neg_def[symmetric] by simp
show ?thesis using B.prv_prv_imp_trans[OF _ _ _ 23 4] by simp
qed
text \<open>First the "direct", positive formulation:\<close>
lemma goedel_second_pos:
assumes "prv (neg (PP \<langle>fls\<rangle>))"
shows "prv fls"
proof-
note PG = bprv_prv[OF _ _ P_G, simplified]
have "prv (neg (PP \<langle>\<phi>G\<rangle>))"
using PG assms unfolding neg_def by (rule prv_prv_imp_trans[rotated 3]) auto
hence "prv \<phi>G" using prv_\<phi>G_eqv by (rule prv_eqv_prv_rev[rotated 2]) auto
thus ?thesis
\<comment>\<open>The only part of Goedel's first theorem that is needed:\<close>
using goedel_first_theEasyHalf_pos by simp
qed
text \<open>Then the more standard, counterpositive formulation:\<close>
theorem goedel_second:
"consistent \<Longrightarrow> \<not> prv (neg (PP \<langle>fls\<rangle>))"
using goedel_second_pos unfolding consistent_def by auto
text \<open>It is an immediate consequence of Gödel's Second HLB1, HLB2 that
(assuming consistency) @{term "prv (neg (PP \<langle>\<phi>\<rangle>))"} holds for no sentence, be it
provable or not. The theory is omniscient about what it can prove
(thanks to HLB1), but completely ignorant about what it cannot prove.\<close>
corollary not_prv_neg_PP:
assumes c: "consistent" and [simp]: "\<phi> \<in> fmla" "Fvars \<phi> = {}"
shows "\<not> prv (neg (PP \<langle>\<phi>\<rangle>))"
proof
assume 0: "prv (neg (PP \<langle>\<phi>\<rangle>))"
have "prv (imp fls \<phi>)" by simp
hence "bprv (PP \<langle>imp fls \<phi>\<rangle>)" by (intro HBL1_PP) auto
hence "bprv (imp (PP \<langle>fls\<rangle>) (PP \<langle>\<phi>\<rangle>))" by (intro HBL2_imp2) auto
hence "bprv (imp (neg (PP \<langle>\<phi>\<rangle>)) (neg (PP \<langle>fls\<rangle>)))" by (intro B.prv_imp_neg_rev) auto
from prv_imp_mp[OF _ _ bprv_prv[OF _ _ this, simplified] 0, simplified]
have "prv (neg (PP \<langle>fls\<rangle>))" .
thus False using goedel_second[OF c] by simp
qed
end \<comment> \<open>context @{locale Goedel_Second_Assumptions}\<close>
(*<*)
end
(*>*)
|
{"author": "isabelle-prover", "repo": "mirror-afp-devel", "sha": "c84055551f07621736c3eb6a1ef4fb7e8cc57dd1", "save_path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel", "path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel/mirror-afp-devel-c84055551f07621736c3eb6a1ef4fb7e8cc57dd1/thys/Goedel_Incompleteness/Abstract_Second_Goedel.thy"}
|
\subsection{Bundle Protocol Agent Interface}
The bundle protocol agent interface will use the current DTN2 application
interface. We seek feedback on any additional features that are required
for this interface.
In addition, MITRE is developing an XML-based interface that conforms
to the ICCP. The interface being developed is expected to support the
same primitives as are found in the current RPC-based interface in DTN2.
We expect the XML-based BPA interface to be included in a future version
of this document.
|
{"hexsha": "b30722e711e52b9d3b6761000c1421572ce94991", "size": 528, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/plugin-architecture/bpa-interface.tex", "max_stars_repo_name": "delay-tolerant-networking/DTN2", "max_stars_repo_head_hexsha": "1c12a9dea32c5cbae8c46db105012a2031f4161e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2016-06-27T19:28:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-28T20:41:17.000Z", "max_issues_repo_path": "doc/plugin-architecture/bpa-interface.tex", "max_issues_repo_name": "delay-tolerant-networking/DTN2", "max_issues_repo_head_hexsha": "1c12a9dea32c5cbae8c46db105012a2031f4161e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/plugin-architecture/bpa-interface.tex", "max_forks_repo_name": "delay-tolerant-networking/DTN2", "max_forks_repo_head_hexsha": "1c12a9dea32c5cbae8c46db105012a2031f4161e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-09-23T11:07:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-28T20:41:24.000Z", "avg_line_length": 40.6153846154, "max_line_length": 74, "alphanum_fraction": 0.8049242424, "num_tokens": 107}
|
# pylint: disable=abstract-method, no-self-use, useless-super-delegation, too-many-lines, duplicate-code
"""
Modules defining isotropic and kinematic hardening models.
These models provide:
1. A set of internal variables
2. The evolution equations defining each of those variables
3. A map between the internal variables and the actual value of
isotropic/kinematic hardening used in the
:py:class:`pyoptmat.flowrules.FlowRule`
4. The derivative of that map with respect to the internal variables
"""
import numpy as np
import torch
from torch import nn
from pyoptmat import temperature
class HardeningModel(nn.Module):
"""
Superclass for all hardening models. Right now this does nothing, but
could be a basis for future expansion.
"""
def __init__(self):
super().__init__()
class IsotropicHardeningModel(HardeningModel):
"""
Superclass for all isotropic hardening models. Right now this
does nothing but is here in case we need it in the future.
"""
def __init__(self):
super().__init__()
class VoceIsotropicHardeningModel(IsotropicHardeningModel):
"""
Voce isotropic hardening, defined by
.. math::
\\sigma_{iso} = h
\\dot{h} = d (R - h) \\left|\\dot{\\varepsilon}_{in}\\right|
Args:
R (|TP|): saturated increase/decrease in flow stress
d (|TP|): parameter controlling the rate of saturation
"""
def __init__(self, R, d):
super().__init__()
self.R = R
self.d = d
def value(self, h):
"""
Map from the vector of internal variables to the isotropic hardening
value
Args:
h (torch.tensor): the vector of internal variables for this model
Returns:
torch.tensor: the isotropic hardening value
"""
return h[:, 0]
def dvalue(self, h):
"""
Derivative of the map with respect to the internal variables
Args:
h (torch.tensor): the vector of internal variables for this model
Returns:
torch.tensor: the derivative of the isotropic hardening value
with respect to the internal variables
"""
return torch.ones((h.shape[0], 1), device=h.device)
@property
def nhist(self):
"""
The number of internal variables: here just 1
"""
return 1
def history_rate(self, s, h, t, ep, T):
"""
The rate evolving the internal variables
Args:
s (torch.tensor): stress
h (torch.tensor): history
t (torch.tensor): time
ep (torch.tensor): the inelastic strain rate
T (torch.tensor): the temperature
Returns:
torch.tensor: internal variable rate
"""
return torch.unsqueeze(self.d(T) * (self.R(T) - h[:, 0]) * torch.abs(ep), 1)
def dhistory_rate_dstress(self, s, h, t, ep, T):
"""
The derivative of this history rate with respect to the stress
Args:
s (torch.tensor): stress
h (torch.tensor): history
t (torch.tensor): time
ep (torch.tensor): the inelastic strain rate
T (torch.tensor): the temperature
Returns:
torch.tensor: derivative with respect to stress
"""
return torch.zeros_like(h)
def dhistory_rate_dhistory(self, s, h, t, ep, T):
"""
The derivative of the history rate with respect to the internal variables
Args:
s (torch.tensor): stress
h (torch.tensor): history
t (torch.tensor): time
ep (torch.tensor): the inelastic strain rate
T (torch.tensor): the temperature
Returns:
torch.tensor: derivative with respect to history
"""
return torch.unsqueeze(
-torch.unsqueeze(self.d(T), -1)
* torch.ones_like(h)
* torch.abs(ep)[:, None],
1,
)
def dhistory_rate_derate(self, s, h, t, ep, T):
"""
The derivative of the history rate with respect to the inelastic
strain rate
Args:
s (torch.tensor): stress
h (torch.tensor): history
t (torch.tensor): time
ep (torch.tensor): the inelastic strain rate
T (torch.tensor): the temperature
Returns:
torch.tensor: derivative with respect to the inelastic rate
"""
return torch.unsqueeze(
torch.unsqueeze(self.d(T) * (self.R(T) - h[:, 0]) * torch.sign(ep), 1), 1
)
class Theta0VoceIsotropicHardeningModel(IsotropicHardeningModel):
"""
Reparameterized Voce isotropic hardening, defined by
.. math::
\\sigma_{iso} = h
\\dot{h} = \\theta_0 (1-h/\\tau) \\left|\\dot{\\varepsilon}_{in}\\right|
This gives the same response as :py:class:`pyoptmat.hardening.VoceIsotropicHardeningModel`
it just uses a different definition of the parameters
Args:
tau (|TP|): saturated increase/decrease in flow stress
theta (|TP|): initial hardening rate
"""
def __init__(self, tau, theta):
super().__init__()
self.tau = tau
self.theta = theta
def value(self, h):
"""
Map from the vector of internal variables to the isotropic hardening
value
Args:
h (torch.tensor): the vector of internal variables for this model
Returns:
torch.tensor: the isotropic hardening value
"""
return h[:, 0]
def dvalue(self, h):
"""
Map from the vector of internal variables to the isotropic hardening
value
Args:
h (torch.tensor): the vector of internal variables for this model
Returns:
torch.tensor: the isotropic hardening value
"""
return torch.ones((h.shape[0], 1), device=h.device)
@property
def nhist(self):
"""
The number of internal variables: here just 1
"""
return 1
def history_rate(self, s, h, t, ep, T):
"""
The rate evolving the internal variables
Args:
s (torch.tensor): stress
h (torch.tensor): history
t (torch.tensor): time
ep (torch.tensor): the inelastic strain rate
T (torch.tensor): the temperature
Returns:
torch.tensor: internal variable rate
"""
return torch.unsqueeze(
self.theta(T) * (1.0 - h[:, 0] / self.tau(T)) * torch.abs(ep), 1
)
def dhistory_rate_dstress(self, s, h, t, ep, T):
"""
The derivative of this history rate with respect to the stress
Args:
s (torch.tensor): stress
h (torch.tensor): history
t (torch.tensor): time
ep (torch.tensor): the inelastic strain rate
T (torch.tensor): the temperature
Returns:
torch.tensor: derivative with respect to stress
"""
return torch.zeros_like(h)
def dhistory_rate_dhistory(self, s, h, t, ep, T):
"""
The derivative of the history rate with respect to the internal variables
Args:
s (torch.tensor): stress
h (torch.tensor): history
t (torch.tensor): time
ep (torch.tensor): the inelastic strain rate
T (torch.tensor): the temperature
Returns:
torch.tensor: derivative with respect to history
"""
return torch.unsqueeze(
-torch.unsqueeze(self.theta(T) / self.tau(T), -1)
* torch.ones_like(h)
* torch.abs(ep)[:, None],
1,
)
def dhistory_rate_derate(self, s, h, t, ep, T):
"""
The derivative of the history rate with respect to the inelastic
strain rate
Args:
s (torch.tensor): stress
h (torch.tensor): history
t (torch.tensor): time
ep (torch.tensor): the inelastic strain rate
T (torch.tensor): the temperature
Returns:
torch.tensor: derivative with respect to the inelastic rate
"""
return torch.unsqueeze(
torch.unsqueeze(
self.theta(T) * (1.0 - h[:, 0] / self.tau(T)) * torch.sign(ep), 1
),
1,
)
class Theta0RecoveryVoceIsotropicHardeningModel(IsotropicHardeningModel):
# pylint: disable=line-too-long
"""
Voce isotropic hardening with static recovery, defined by
.. math::
\\sigma_{iso} = h
\\dot{h} = \\theta_0 \\left(1-\\frac{h}{\\tau}\\right) \\left|\\dot{\\varepsilon}_{in}\\right| + r_1 \\left(R_0 - h\\right) \\left| R_0 - h \\right|^{r_2 - 1}
Args:
tau (|TP|): saturated increase/decrease in flow stress
theta (|TP|): initial hardening rate
R0 (|TP|): static recovery threshold
r1 (|TP|): static recovery prefactor
r2 (|TP|): static recovery exponent
"""
def __init__(self, tau, theta, R0, r1, r2):
super().__init__()
self.tau = tau
self.theta = theta
self.R0 = R0
self.r1 = r1
self.r2 = r2
def value(self, h):
"""
Map from the vector of internal variables to the isotropic hardening
value
Args:
h: the vector of internal variables for this model
"""
return h[:, 0]
def dvalue(self, h):
"""
Derivative of the map with respect to the internal variables
Args:
h: the vector of internal variables for this model
"""
return torch.ones((h.shape[0], 1), device=h.device)
@property
def nhist(self):
"""
The number of internal variables: here just 1
"""
return 1
def history_rate(self, s, h, t, ep, T):
"""
The rate evolving the internal variables
Args:
s: stress
h: history
t: time
ep: the inelastic strain rate
T: the temperature
"""
return torch.unsqueeze(
self.theta(T) * (1.0 - h[:, 0] / self.tau(T)) * torch.abs(ep)
+ self.r1(T)
* (self.R0(T) - h[:, 0])
* torch.abs(self.R0(T) - h[:, 0]) ** (self.r2(T) - 1.0),
1,
)
def dhistory_rate_dstress(self, s, h, t, ep, T):
"""
The derivative of this history rate with respect to the stress
Args:
s: stress
h: history
t: time
ep: the inelastic strain rate
T: temperature
"""
return torch.zeros_like(h)
def dhistory_rate_dhistory(self, s, h, t, ep, T):
"""
The derivative of the history rate with respect to the internal variables
Args:
s: stress
h: history
t: time
ep: the inelastic strain rate
T: temperature
"""
recovery = (
self.r2(T)
* self.r1(T)
* torch.abs(self.R0(T) - h[:, 0]) ** (self.r2(T) - 1.0)
)[:, None, None]
return (
torch.unsqueeze(
-torch.unsqueeze(self.theta(T) / self.tau(T), -1)
* torch.ones_like(h)
* torch.abs(ep)[:, None],
1,
)
- recovery
)
def dhistory_rate_derate(self, s, h, t, ep, T):
"""
The derivative of the history rate with respect to the inelastic
strain rate
Args:
s: stress
h: history
t: time
ep: the inelastic strain rate
T: temperature
"""
return torch.unsqueeze(
torch.unsqueeze(
self.theta(T) * (1.0 - h[:, 0] / self.tau(T)) * torch.sign(ep), 1
),
1,
)
class KinematicHardeningModel(HardeningModel):
"""
Common superclass for kinematic hardening models
Right now this does nothing, but it's available for future expansion
"""
def __init__(self):
super().__init__()
class NoKinematicHardeningModel(KinematicHardeningModel):
"""
The simplest kinematic hardening model: a constant value of 0
"""
def __init__(self):
super().__init__()
@property
def nhist(self):
"""
The number of internal variables, here 0
"""
return 0
def value(self, h):
"""
The map between the vector of internal variables and the kinematic
hardening
Args:
h: vector of internal variables
"""
return torch.zeros(h.shape[0], device=h.device)
def dvalue(self, h):
"""
Derivative of the map to the kinematic hardening with respect to the
vector of internal variables
Args:
h: vector of internal variables
"""
return torch.zeros(h.shape[0], 0, device=h.device)
def history_rate(self, s, h, t, ep, T):
"""
The history evolution rate. Here this is an empty vector.
Args:
s: stress
h: history
t: time
ep: the inelastic strain rate
T: the temperature
"""
return torch.empty_like(h)
def dhistory_rate_dstress(self, s, h, t, ep, T):
"""
The derivative of the history rate with respect to the stress.
Here this is an empty vector.
Args:
s: stress
h: history
t: time
ep: the inelastic strain rate
T: temperature
"""
return torch.empty_like(h)
def dhistory_rate_dhistory(self, s, h, t, ep, T):
"""
The derivative of the history rate with respect to the history
Here this is an empty vector.
Args:
s: stress
h: history
t: time
ep: the inelastic strain rate
T: temperature
"""
return torch.empty(h.shape[0], 0, 0, device=h.device)
def dhistory_rate_derate(self, s, h, t, ep, T):
"""
The derivative of the history rate with respect to the inelastic
strain rate.
Here this is an empty vector.
Args:
s: stress
h: history
t: time
ep: the inelastic strain rate
T: temperature
"""
return torch.empty(h.shape[0], 0, 1, device=h.device)
class FAKinematicHardeningModel(KinematicHardeningModel):
# pylint: disable=line-too-long
"""
Frederick and Armstrong hardening, as defined in :cite:`frederick2007mathematical`
The kinematic hardening is equal to the single internal variable.
The variable evolves as:
.. math::
\\dot{x}=\\frac{2}{3}C\\dot{\\varepsilon}_{in}-gx\\left|\\dot{\\varepsilon}_{in}\\right| - b\\left| h \\right|^{r-1} h
where the static recovery defaults to zero
Args:
C (|TP|): kinematic hardening parameter
g (|TP|): recovery parameter
b (optional): static recovery prefactor
r (optional): static recovery exponent
"""
def __init__(self, C, g, b=None, r=None):
super().__init__()
self.C = C
self.g = g
Cdev = self.C.device
if b is None:
b = temperature.ConstantParameter(torch.zeros(self.C.shape, device=Cdev))
if r is None:
r = temperature.ConstantParameter(torch.ones(self.C.shape, device=Cdev))
self.b = b
self.r = r
def value(self, h):
"""
Map from the vector of internal variables to the kinematic hardening
value
Args:
h (torch.tensor): the vector of internal variables for this model
Returns:
torch.tensor: the kinematic hardening value
"""
return h[:, 0]
def dvalue(self, h):
"""
Derivative of the map with respect to the internal variables
Args:
h (torch.tensor): the vector of internal variables for this model
Returns:
torch.tensor: the derivative of the kinematic hardening value
with respect to the internal variables
"""
return torch.ones((h.shape[0], 1), device=h.device)
@property
def nhist(self):
"""
The number of internal variables, here just 1
"""
return 1
def history_rate(self, s, h, t, ep, T):
"""
The rate evolving the internal variables
Args:
s (torch.tensor): stress
h (torch.tensor): history
t (torch.tensor): time
ep (torch.tensor): the inelastic strain rate
T (torch.tensor): the temperature
Returns:
torch.tensor: internal variable rate
"""
return torch.unsqueeze(
self.C(T) * ep
- self.g(T) * h[:, 0] * torch.abs(ep)
- self.b(T) * torch.abs(h[:, 0]) ** (self.r(T) - 1.0) * h[:, 0],
1,
)
def dhistory_rate_dstress(self, s, h, t, ep, T):
"""
The derivative of this history rate with respect to the stress
Args:
s (torch.tensor): stress
h (torch.tensor): history
t (torch.tensor): time
ep (torch.tensor): the inelastic strain rate
T (torch.tensor): the temperature
Returns:
torch.tensor: derivative with respect to stress
"""
return torch.zeros_like(h)
def dhistory_rate_dhistory(self, s, h, t, ep, T):
"""
The derivative of the history rate with respect to the internal variables
Args:
s (torch.tensor): stress
h (torch.tensor): history
t (torch.tensor): time
ep (torch.tensor): the inelastic strain rate
T (torch.tensor): the temperature
Returns:
torch.tensor: derivative with respect to history
"""
return (
torch.unsqueeze(-self.g(T)[..., None] * torch.abs(ep)[:, None], 1)
- (self.b(T) * self.r(T) * torch.abs(h)[:, 0] ** (self.r(T) - 1.0))[
:, None, None
]
)
def dhistory_rate_derate(self, s, h, t, ep, T):
"""
The derivative of the history rate with respect to the inelastic
strain rate
Args:
s (torch.tensor): stress
h (torch.tensor): history
t (torch.tensor): time
ep (torch.tensor): the inelastic strain rate
T (torch.tensor): the temperature
Returns:
torch.tensor: derivative with respect to the inelastic rate
"""
return torch.unsqueeze(
torch.unsqueeze(self.C(T) - self.g(T) * h[:, 0] * torch.sign(ep), 1),
1,
)
class ChabocheHardeningModel(KinematicHardeningModel):
# pylint: disable=line-too-long
"""
Chaboche kinematic hardening, as defined in :cite:`chaboche1989unified`
This version does *not* include static recovery
The model maintains :math:`n` backstresses and sums them to provide the
total kinematic hardening
.. math::
\\sigma_{kin}=\\sum_{i=1}^{n_{kin}}x_{i}
Each individual backstress evolves per the Frederick-Armstrong model
.. math::
\\dot{x}_{i}=\\frac{2}{3}C_{i}\\dot{\\varepsilon}_{in}-g_{i}x_{i}\\left|\\dot{\\varepsilon}_{in}\\right|
Args:
C (list of |TP|): *vector* of hardening coefficients
g (list of |TP|): *vector* of recovery coefficients
"""
def __init__(self, C, g):
super().__init__()
self.C = C
self.g = g
self.nback = self.C.shape[-1]
def value(self, h):
"""
Map from the vector of internal variables to the kinematic hardening
value
Args:
h (torch.tensor): the vector of internal variables for this model
Returns:
torch.tensor: the kinematic hardening value
"""
return torch.sum(h, 1)
def dvalue(self, h):
"""
Derivative of the map with respect to the internal variables
Args:
h (torch.tensor): the vector of internal variables for this model
Returns:
torch.tensor: the derivative of the kinematic hardening value
with respect to the internal variables
"""
return torch.ones((h.shape[0], self.nback), device=h.device)
@property
def nhist(self):
"""
Number of history variables, equal to the number of backstresses
"""
return self.nback
def history_rate(self, s, h, t, ep, T):
"""
The rate evolving the internal variables
Args:
s (torch.tensor): stress
h (torch.tensor): history
t (torch.tensor): time
ep (torch.tensor): the inelastic strain rate
T (torch.tensor): the temperature
Returns:
torch.tensor: internal variable rate
"""
return (
self.C(T)[None, ...] * ep[:, None]
- self.g(T)[None, ...] * h * torch.abs(ep)[:, None]
).reshape(h.shape)
def dhistory_rate_dstress(self, s, h, t, ep, T):
"""
The derivative of this history rate with respect to the stress
Args:
s (torch.tensor): stress
h (torch.tensor): history
t (torch.tensor): time
ep (torch.tensor): the inelastic strain rate
T (torch.tensor): the temperature
Returns:
torch.tensor: derivative with respect to stress
"""
return torch.zeros_like(h)
def dhistory_rate_dhistory(self, s, h, t, ep, T):
"""
The derivative of the history rate with respect to the internal variables
Args:
s (torch.tensor): stress
h (torch.tensor): history
t (torch.tensor): time
ep (torch.tensor): the inelastic strain rate
T (torch.tensor): the temperature
Returns:
torch.tensor: derivative with respect to history
"""
return torch.diag_embed(-self.g(T)[None, ...] * torch.abs(ep)[:, None]).reshape(
h.shape + h.shape[1:]
)
def dhistory_rate_derate(self, s, h, t, ep, T):
"""
The derivative of the history rate with respect to the inelastic
strain rate
Args:
s (torch.tensor): stress
h (torch.tensor): history
t (torch.tensor): time
ep (torch.tensor): the inelastic strain rate
T (torch.tensor): the temperature
Returns:
torch.tensor: derivative with respect to the inelastic rate
"""
return torch.unsqueeze(
self.C(T)[None, ...] * torch.ones_like(ep)[:, None]
- self.g(T)[None, :] * h * torch.sign(ep)[:, None],
-1,
).reshape(h.shape + (1,))
class ChabocheHardeningModelRecovery(KinematicHardeningModel):
# pylint: disable=line-too-long
"""
Chaboche kinematic hardening, as defined in :cite:`chaboche1989unified`
This version *does* include static recovery
The model maintains :math:`n` backstresses and sums them to provide the
total kinematic hardening
.. math::
\\sigma_{kin}=\\sum_{i=1}^{n_{kin}}x_{i}
Each individual backstress evolves per the Frederick-Armstrong model
.. math::
\\dot{x}_{i}=\\frac{2}{3}C_{i}\\dot{\\varepsilon}_{in}-g_{i}x_{i}\\left|\\dot{\\varepsilon}_{in}\\right| - b\\left| h \\right|^{r-1} h
.. math::
\\sigma_{kin}=\\sum_{i=1}^{n_{kin}}x_{i}
Args:
C (list of |TP|): *vector* of hardening coefficients
g (list of |TP|): *vector* of recovery coefficients
b (list of |TP|): *vector* of static recovery prefactors
r (list of |TP|): *vector* of static recovery exponents
"""
def __init__(self, C, g, b, r):
super().__init__()
self.C = C
self.g = g
self.b = b
self.r = r
self.nback = self.C.shape[-1]
def value(self, h):
"""
Map from the vector of internal variables to the kinematic hardening
value
Args:
h (torch.tensor): the vector of internal variables for this model
Returns:
torch.tensor: the kinematic hardening value
"""
return torch.sum(h, 1)
def dvalue(self, h):
"""
Derivative of the map with respect to the internal variables
Args:
h (torch.tensor): the vector of internal variables for this model
Returns:
torch.tensor: the derivative of the kinematic hardening value
with respect to the internal variables
"""
return torch.ones((h.shape[0], self.nback), device=h.device)
@property
def nhist(self):
"""
Number of history variables, equal to the number of backstresses
"""
return self.nback
def history_rate(self, s, h, t, ep, T):
"""
The rate evolving the internal variables
Args:
s (torch.tensor): stress
h (torch.tensor): history
t (torch.tensor): time
ep (torch.tensor): the inelastic strain rate
T (torch.tensor): the temperature
Returns:
torch.tensor: internal variable rate
"""
return (
self.C(T)[None, ...] * ep[:, None]
- self.g(T)[None, ...] * h * torch.abs(ep)[:, None]
- self.b(T)[None, ...] * torch.abs(h) ** (self.r(T)[None, ...] - 1.0) * h
).reshape(h.shape)
def dhistory_rate_dstress(self, s, h, t, ep, T):
"""
The derivative of this history rate with respect to the stress
Args:
s (torch.tensor): stress
h (torch.tensor): history
t (torch.tensor): time
ep (torch.tensor): the inelastic strain rate
T (torch.tensor): the temperature
Returns:
torch.tensor: derivative with respect to stress
"""
return torch.zeros_like(h)
def dhistory_rate_dhistory(self, s, h, t, ep, T):
"""
The derivative of the history rate with respect to the internal variables
Args:
s (torch.tensor): stress
h (torch.tensor): history
t (torch.tensor): time
ep (torch.tensor): the inelastic strain rate
T (torch.tensor): the temperature
Returns:
torch.tensor: derivative with respect to history
"""
return torch.diag_embed(-self.g(T)[None, ...] * torch.abs(ep)[:, None]).reshape(
h.shape + h.shape[1:]
) + torch.diag_embed(
-self.b(T)[None, ...]
* self.r(T)[None, ...]
* torch.abs(h) ** (self.r(T)[None, ...] - 1.0)
).reshape(
h.shape + h.shape[1:]
)
def dhistory_rate_derate(self, s, h, t, ep, T):
"""
The derivative of the history rate with respect to the inelastic
strain rate
Args:
s (torch.tensor): stress
h (torch.tensor): history
t (torch.tensor): time
ep (torch.tensor): the inelastic strain rate
T (torch.tensor): the temperature
Returns:
torch.tensor: derivative with respect to the inelastic rate
"""
return torch.unsqueeze(
self.C(T)[None, ...] * torch.ones_like(ep)[:, None]
- self.g(T)[None, :] * h * torch.sign(ep)[:, None],
-1,
).reshape(h.shape + (1,))
class SuperimposedKinematicHardening(KinematicHardeningModel):
# pylint: disable=line-too-long
"""
Sum the contributions of several kinematic hardening models
Args:
models (list of models): list of KinematicHardening models
"""
def __init__(self, models):
super().__init__()
self.models = nn.ModuleList(models)
self.nmodels = len(self.models)
self.nhist_per = [m.nhist for m in self.models]
self.offsets = [0] + list(np.cumsum(self.nhist_per))[:-1]
def value(self, h):
"""
Map from the vector of internal variables to the kinematic hardening
value
Args:
h (torch.tensor): the vector of internal variables for this model
Returns:
torch.tensor: the kinematic hardening value
"""
v = torch.zeros(h.shape[0], device=h.device)
for o, n, model in zip(self.offsets, self.nhist_per, self.models):
v += model.value(h[:, o : o + n])
return v
def dvalue(self, h):
"""
Derivative of the map with respect to the internal variables
Args:
h (torch.tensor): the vector of internal variables for this model
Returns:
torch.tensor: the derivative of the kinematic hardening value
with respect to the internal variables
"""
dv = torch.zeros((h.shape[0], self.nhist), device=h.device)
for o, n, model in zip(self.offsets, self.nhist_per, self.models):
dv[:, o : o + n] = model.dvalue(h[:, o : o + n])
return dv
@property
def nhist(self):
"""
Number of history variables
"""
return sum(self.nhist_per)
def history_rate(self, s, h, t, ep, T):
"""
The rate evolving the internal variables
Args:
s (torch.tensor): stress
h (torch.tensor): history
t (torch.tensor): time
ep (torch.tensor): the inelastic strain rate
T (torch.tensor): the temperature
Returns:
torch.tensor: internal variable rate
"""
hr = torch.zeros_like(h)
for o, n, model in zip(self.offsets, self.nhist_per, self.models):
hr[:, o : o + n] = model.history_rate(s, h[:, o : o + n], t, ep, T)
return hr
def dhistory_rate_dstress(self, s, h, t, ep, T):
"""
The derivative of this history rate with respect to the stress
Args:
s (torch.tensor): stress
h (torch.tensor): history
t (torch.tensor): time
ep (torch.tensor): the inelastic strain rate
T (torch.tensor): the temperature
Returns:
torch.tensor: derivative with respect to stress
"""
dhr = torch.zeros_like(h)
for o, n, model in zip(self.offsets, self.nhist_per, self.models):
dhr[:, o : o + n] = model.dhistory_rate_dstress(
s, h[:, o : o + n], t, ep, T
)
return dhr
def dhistory_rate_dhistory(self, s, h, t, ep, T):
"""
The derivative of the history rate with respect to the internal variables
Args:
s (torch.tensor): stress
h (torch.tensor): history
t (torch.tensor): time
ep (torch.tensor): the inelastic strain rate
T (torch.tensor): the temperature
Returns:
torch.tensor: derivative with respect to history
"""
dhr = torch.zeros(h.shape[0], self.nhist, self.nhist, device=s.device)
for o, n, model in zip(self.offsets, self.nhist_per, self.models):
dhr[:, o : o + n, o : o + n] = model.dhistory_rate_dhistory(
s, h[:, o : o + n], t, ep, T
)
return dhr
def dhistory_rate_derate(self, s, h, t, ep, T):
"""
The derivative of the history rate with respect to the inelastic
strain rate
Args:
s (torch.tensor): stress
h (torch.tensor): history
t (torch.tensor): time
ep (torch.tensor): the inelastic strain rate
T (torch.tensor): the temperature
Returns:
torch.tensor: derivative with respect to the inelastic rate
"""
dhr = torch.zeros(h.shape + (1,), device=s.device)
for o, n, model in zip(self.offsets, self.nhist_per, self.models):
dhr[:, o : o + n] = model.dhistory_rate_derate(s, h[:, o : o + n], t, ep, T)
return dhr
|
{"hexsha": "e3c70a88e58cdfefc7c0853f4c0469d67722e5d0", "size": 32839, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyoptmat/hardening.py", "max_stars_repo_name": "tianjuchen/pyoptmat", "max_stars_repo_head_hexsha": "6f34205f450fd884679f37522ccd0d0b65ecdb71", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyoptmat/hardening.py", "max_issues_repo_name": "tianjuchen/pyoptmat", "max_issues_repo_head_hexsha": "6f34205f450fd884679f37522ccd0d0b65ecdb71", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyoptmat/hardening.py", "max_forks_repo_name": "tianjuchen/pyoptmat", "max_forks_repo_head_hexsha": "6f34205f450fd884679f37522ccd0d0b65ecdb71", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1642984014, "max_line_length": 164, "alphanum_fraction": 0.5438046226, "include": true, "reason": "import numpy", "num_tokens": 7935}
|
# Copyright 2020-2021 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from opendr.engine.target import BoundingBox3DList, TrackingAnnotation3DList
from scipy.optimize import linear_sum_assignment
from opendr.perception.object_tracking_3d.ab3dmot.algorithm.kalman_tracker_3d import KalmanTracker3D
from opendr.perception.object_detection_3d.voxel_object_detection_3d.second_detector.core.box_np_ops import (
center_to_corner_box3d,
)
from numba.cuda.cudadrv.error import CudaSupportError
try:
from opendr.perception.object_detection_3d.voxel_object_detection_3d.\
second_detector.core.non_max_suppression.nms_gpu import (
rotate_iou_gpu_eval as iou3D,
)
except (CudaSupportError, ValueError):
def iou3D(boxes, qboxes, criterion=-1):
return np.ones((boxes.shape[0], qboxes.shape[0]))
class AB3DMOT():
def __init__(
self, max_staleness=2, min_updates=3, frame=0,
state_dimensions=10, # x, y, z, rotation_y, l, w, h, speed_x, speed_z, angular_speed
measurement_dimensions=7, # x, y, z, rotation_y, l, w, h
state_transition_matrix=None,
measurement_function_matrix=None,
covariance_matrix=None,
process_uncertainty_matrix=None,
iou_threshold=0.01,
):
super().__init__()
self.max_staleness = max_staleness
self.min_updates = min_updates
self.frame = frame
self.tracklets = []
self.last_tracklet_id = 1
self.iou_threshold = iou_threshold
self.state_dimensions = state_dimensions
self.measurement_dimensions = measurement_dimensions
self.state_transition_matrix = state_transition_matrix
self.measurement_function_matrix = measurement_function_matrix
self.covariance_matrix = covariance_matrix
self.process_uncertainty_matrix = process_uncertainty_matrix
def update(self, detections: BoundingBox3DList):
if len(detections) > 0:
predictions = np.zeros([len(self.tracklets), self.measurement_dimensions])
for i, tracklet in enumerate(self.tracklets):
box = tracklet.predict().reshape(-1)[:self.measurement_dimensions]
predictions[i] = [*box]
detection_corners = center_to_corner_box3d(
np.array([box.location for box in detections.boxes]),
np.array([box.dimensions for box in detections.boxes]),
np.array([box.rotation_y for box in detections.boxes]),
)
if len(predictions) > 0:
prediction_corners = center_to_corner_box3d(
predictions[:, :3],
predictions[:, 4:],
predictions[:, 3],
)
else:
prediction_corners = np.zeros((0, 8, 3))
(
matched_pairs,
unmatched_detections,
unmatched_predictions
) = associate(detection_corners, prediction_corners, self.iou_threshold)
for d, p in matched_pairs:
self.tracklets[p].update(detections[d], self.frame)
for d in unmatched_detections:
self.last_tracklet_id += 1
tracklet = KalmanTracker3D(
detections[d], self.last_tracklet_id, self.frame,
self.state_dimensions, self.measurement_dimensions,
self.state_transition_matrix, self.measurement_function_matrix,
self.covariance_matrix, self.process_uncertainty_matrix
)
self.tracklets.append(tracklet)
old_tracklets = self.tracklets
self.tracklets = []
tracked_boxes = []
for tracklet in old_tracklets:
if tracklet.staleness(self.frame) < self.max_staleness:
self.tracklets.append(tracklet)
if self.frame <= self.min_updates or tracklet.updates >= self.min_updates:
tracked_boxes.append(tracklet.tracking_bounding_box_3d(self.frame))
result = TrackingAnnotation3DList(tracked_boxes)
self.frame += 1
return result
def reset(self):
self.frame = 0
self.tracklets = []
self.last_tracklet_id = 1
def associate(detection_corners, prediction_corners, iou_threshold):
ious = iou3D(detection_corners, prediction_corners)
detection_match_ids, prediction_match_ids = linear_sum_assignment(-ious)
unmatched_detections = []
unmatched_predictions = []
for i in range(len(detection_corners)):
if i not in detection_match_ids:
unmatched_detections.append(i)
for i in range(len(prediction_corners)):
if i not in detection_match_ids:
unmatched_predictions.append(i)
matched_pairs = []
for i in range(len(detection_match_ids)):
detection_id = detection_match_ids[i]
prediction_id = prediction_match_ids[i]
if ious[detection_id, prediction_id] < iou_threshold:
unmatched_detections.append(detection_id)
unmatched_predictions.append(prediction_id)
else:
matched_pairs.append([detection_id, prediction_id])
if len(matched_pairs) <= 0:
matched_pairs = np.zeros((0, 2), dtype=np.int32)
else:
matched_pairs = np.array(matched_pairs, dtype=np.int32)
return matched_pairs, unmatched_detections, unmatched_predictions
|
{"hexsha": "e753d7caed0ce915fd8cbac38361666c23178571", "size": 6010, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/opendr/perception/object_tracking_3d/ab3dmot/algorithm/ab3dmot.py", "max_stars_repo_name": "makistsantekidis/opendr", "max_stars_repo_head_hexsha": "07dee3b59d3487b9c5a93d6946317178a02c9890", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-06-24T01:54:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-12T16:21:24.000Z", "max_issues_repo_path": "src/opendr/perception/object_tracking_3d/ab3dmot/algorithm/ab3dmot.py", "max_issues_repo_name": "makistsantekidis/opendr", "max_issues_repo_head_hexsha": "07dee3b59d3487b9c5a93d6946317178a02c9890", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 79, "max_issues_repo_issues_event_min_datetime": "2021-06-23T10:40:10.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-16T07:59:42.000Z", "max_forks_repo_path": "src/opendr/perception/object_tracking_3d/ab3dmot/algorithm/ab3dmot.py", "max_forks_repo_name": "makistsantekidis/opendr", "max_forks_repo_head_hexsha": "07dee3b59d3487b9c5a93d6946317178a02c9890", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-07-04T07:38:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-12T16:18:47.000Z", "avg_line_length": 36.8711656442, "max_line_length": 109, "alphanum_fraction": 0.6638935108, "include": true, "reason": "import numpy,from scipy,from numba", "num_tokens": 1283}
|
import pandas as pd
import numpy as np
import math
from gurobipy import *
from sklearn import preprocessing
import json
import razzball_scraper
class Team:
def __init__(self):
# players
self.Hitters = {}
self.Pitchers = {}
self.Bench = {}
self.budget = 260
# batter scoring stats
self.R = 0
self.HR = 0
self.RBI = 0
self.SB = 0
self.AVG = math.inf
# batter auxiliary stats
self.BatterHits = 0
self.AB = 0
# pitcher scoring stats
self.K = 0
self.W = 0
self.SV = 0
self.ERA = math.inf
self.WHIP = math.inf
# pitcher auxiliary stats
self.ER = 0
self.BB = 0
self.IP = 0
self.PitcherHits = 0
def add_hitter_to_team(self,hitter):
self.BatterHits = self.BatterHits + hitter.H
self.AB = self.AB + hitter.AB
self.R = hitter.R
self.HR = hitter.HR
self.RBI = hitter.RBI
self.SB = hitter.SB
self.AVG = self.BatterHits / self.AB
if len(self.Hitters) < 14:
self.Hitters[hitter.Name] = hitter
else:
self.Bench[hitter.Name] = hitter
def add_pitcher_to_team(self,pitcher):
print(pitcher)
self.IP = self.IP + pitcher.IP
self.ER = self.ER + pitcher.ER
self.BB = self.BB + pitcher.BB
self.PitcherHits = self.PitcherHits + pitcher.Hits
self.K = self.K + pitcher.K
self.W = self.W + pitcher.W
self.SV = self.SV + pitcher.SV
self.ERA = (self.ER / self.IP) * 9
self.WHIP = (self.BB + self.PitcherHits) / self.IP
if len(self.Pitchers) < 11:
self.Pitchers[pitcher.Name] = pitcher
else:
self.Bench[pitcher.Name] = pitcher
class Hitter:
Type = "Hitter"
def __init__(self, name, positions, R, HR, RBI, SB, H, AB, estPrice):
self.Name = name
self.Pos = positions
self.R = R
self.HR = HR
self.RBI = RBI
self.SB = SB
self.H = H
self.AB = AB
self.Price = estPrice
self.Team = ""
class Pitcher:
Type = "Pitcher"
def __init__(self, name, positions, K, W, SV, ER, IP, Hits, Walks, estPrice):
self.Name = name
self.Pos = positions
self.K = K
self.W = W
self.SV = SV
self.ER = ER
self.IP = IP
self.Hits = Hits
self.Price = estPrice
self.BB = Walks
self.Team = ""
class Optimizer:
HitterPositions = ['C', '1B', '2B', 'SS', '3B', 'OF', 'outer', 'inner', 'util']
HitterMetrics = ['R', 'HR', 'RBI', 'SB', 'AVG']
PitcherPositions = ['SP', 'RP']
PitcherMetrics = ['W', 'SV', 'K', 'ERA', 'WHIP']
Teams = {}
opposing_team_names = []
all_players = []
def __init__(self):
self.Teams["My Team"] = Team()
self.all_players = self.get_hitter_prices(self.get_hitters()).nsmallest(150,'Ovr').append(self.get_pitcher_prices(self.get_starting_pitchers()).nsmallest(120,'Ovr'),ignore_index=True).append(self.get_pitcher_prices(self.get_closing_pitchers()).nsmallest(22,'SV rank'),ignore_index=True)
self.all_players.fillna(0, inplace=True)
self.all_players['ERA'] = self.all_players['ERA'].apply(lambda x: 0 if x == 0 else 1/x)
self.all_players['WHIP'] = self.all_players['WHIP'].apply(lambda x: 0 if x == 0 else 1/x)
self.all_players['AB'] = self.all_players['AB'].apply(lambda x: 0 if x == 0 else 1/x)
self.all_players['adjAVG'] = self.all_players['AVG']*100
def get_all_players(self):
return self.all_players
def get_team(self, teamName):
return self.Teams[teamName]
def add_team(self, team):
self.Teams[team] = Team()
self.opposing_team_names.append(team)
def remove_team(self, team):
self.Teams.pop(team)
self.opposing_team_names.remove(team)
def get_budget(self, teamName):
tm = self.Teams[teamName]
return tm.budget
def get_team_players(self, tm):
selected = []
for m in tm.Bench.values():
if not (m.empty):
selected.append(m.Name)
for m in tm.Hitters.values():
if not (m.empty):
selected.append(m.Name)
for m in tm.Pitchers.values():
print(m)
if not (m.empty):
selected.append(m.Name)
return selected
def get_hitters(self, combined=False):
hitters = pd.read_csv('data/razzball-hitters.csv', index_col='#', usecols=['#','Name','Team','ESPN','R','HR', 'RBI', 'SB','AVG','AB','H'])
hitters.rename_axis('Razzball_Rank', inplace=True)
hitters.reset_index(inplace=True)
# sort and rank
for metric in self.HitterMetrics:
hitters.sort_values(by=[metric],inplace=True, ascending=False)
hitters.reset_index(inplace=True, drop=True)
hitters.index.rename('{} rank'.format(metric), inplace=True)
hitters.reset_index(inplace=True)
hitters['Ovr'] = (hitters['AVG rank'] + hitters['SB rank'] + hitters['RBI rank'] + hitters['HR rank'] + hitters['R rank']) / 5
#hitters['Ovr'] = (hitters['Ovr'] + hitters['Razzball_Rank']) / 2
hitters.rename(columns={'ESPN':'POS'}, inplace=True)
if (combined):
hitters = hitters.assign(POS=hitters.POS.str.split('/'))
else:
hitters = hitters.assign(POS=hitters.POS.str.split('/')).explode('POS')
hitters.sort_values(by=['Ovr'],inplace=True,ascending=True)
return hitters
def get_starting_pitchers(self):
pitchers = pd.read_csv('data/razzball-pitchers.csv', index_col='#', usecols=['#','Name','Team','POS','W', 'SV', 'K', 'ERA', 'WHIP','IP','BB','H', 'ER'])
pitchers.rename_axis('Razzball_Rank', inplace=True)
pitchers.reset_index(inplace=True)
pitchers.rename(columns={'H':'Hits'}, inplace=True)
pitchers = pitchers.assign(POS=pitchers.POS.str.split('/')).explode('POS')
sp = pitchers[pitchers['POS'] == 'SP'].reset_index(drop=True)
rp = pitchers[pitchers['POS'] == 'RP'].reset_index(drop=True)
for metric in self.PitcherMetrics:
if(metric != 'SV'):
sp.sort_values(by=[metric],inplace=True, ascending=(metric=='WHIP' or metric=='ERA'))
sp.reset_index(inplace=True, drop=True)
sp.rename_axis('{} rank'.format(metric), inplace=True)
sp.reset_index(inplace=True)
sp['Ovr'] = (sp['W rank'] + sp['K rank'] + sp['ERA rank'] + sp['WHIP rank']) / 4
sp.sort_values(by=['Ovr'],inplace=True,ascending=True)
return sp
def get_closing_pitchers(self):
pitchers = pd.read_csv('data/razzball-pitchers.csv', index_col='#', usecols=['#','Name','Team','POS','W', 'SV', 'K', 'ERA', 'WHIP','IP','BB','H', 'ER'])
pitchers.rename_axis('Razzball_Rank', inplace=True)
pitchers.reset_index(inplace=True)
pitchers.rename(columns={'H':'Hits'}, inplace=True)
pitchers = pitchers.assign(POS=pitchers.POS.str.split('/')).explode('POS')
rp = pitchers[pitchers['POS'] == 'RP'].reset_index(drop=True)
for metric in self.PitcherMetrics:
if(metric != 'W'):
rp.sort_values(by=[metric],inplace=True, ascending=(metric=='WHIP' or metric=='ERA'))
rp.reset_index(inplace=True, drop=True)
rp.rename_axis('{} rank'.format(metric), inplace=True)
rp.reset_index(inplace=True)
rp['Ovr'] = (rp['SV rank'] + rp['K rank'] + rp['ERA rank'] + rp['WHIP rank']) / 4
rp.sort_values(by=['Ovr'],inplace=True,ascending=True)
return rp
def get_hitter_prices(self, hitters):
prices = pd.read_csv('data/razzball-hitters-prices.csv', index_col='#', usecols=['#', 'Name', 'Team', '5×5 $', '$R', '$HR', '$RBI', '$SB', '$AVG (no OBP)'])
prices.rename(columns={'5×5 $': '$'},inplace=True)
prices['$'] = prices['$'].apply(lambda x: 1 if x <=1 else x)
hitters = hitters.merge(prices, left_on=['Name', 'Team'], right_on=['Name','Team'], how='left')
return hitters
def get_pitcher_prices(self, pitchers):
prices = pd.read_csv('data/razzball-pitchers-prices.csv', index_col='#', usecols=['#','Name','Team','5×5 $','$W (no QS)','$SV (no HLD)','$K','$WHIP','$ERA'])
prices.rename(columns={'5×5 $': '$'},inplace=True)
prices['$'] = prices['$'].apply(lambda x: 1 if x <=1 else x)
pitchers = pitchers.merge(prices, left_on=['Name', 'Team'], right_on=['Name','Team'], how='left')
return pitchers
def get_score(self, tm):
score = {'R': 0,'HR':0,'RBI':0,'SB':0,'AVG':0,'K':0,'W':0,'SV':0,'ERA':0,'WHIP':0,'Total':0}
R = sorted([i * (60/162) for i in [1174,1136,1067,1006,1241,1110,974,997,1159,966,898]])
HR = sorted([i * (60/162) for i in [433,352,353,284,382,321,291,332,355,302,260]])
RBI = sorted([i * (60/162) for i in [1198,1088,1077,1030,1147,1016,955,1000,1075,905,897]])
SB = sorted([i * (60/162) for i in [113,141,97,106,94,110,127,121,123,73,72]])
AVG = sorted([0.2735,0.2592,0.2740,0.2642,0.2768,0.2710,0.2620,0.2601,0.2705,0.2645,0.2641])
K = sorted([i * (60/162) for i in [1643,1531,1788,1598,1330,1387,1480,1725,1132,1391,1336]])
W = sorted([i * (60/162) for i in [97,98,109,112,80,93,85,99,64,84,74]])
SV = sorted([i * (60/162) for i in [59,73,3,55,115,79,105,42,39,59,54]])
ERA = sorted([3.907, 3.898,4.144,3.665,4.444,4.107,3.760,4.217,3.493,4.112,4.616],reverse=True)
WHIP = sorted([1.216,1.244,1.262,1.102,1.339,1.247,1.210,1.291,1.131,1.267,1.284],reverse=True)
hitter_scores = {'R':R,'HR':HR,'RBI':RBI,'SB':SB,'AVG':AVG}
pitcher_scores = {'K':K,'W':W,'SV':SV,'ERA':ERA,'WHIP':WHIP}
for metric in self.HitterMetrics:
for s in range(len(hitter_scores[metric])):
if(tm[metric] < hitter_scores[metric][s]):
score[metric] = s+1
score['Total'] += s+1
break
if (score[metric] == 0):
score[metric] = 12
score['Total'] += 12
for metric in self.PitcherMetrics:
if (metric != 'ERA') and (metric != 'WHIP'):
for s in range(len(pitcher_scores[metric])):
if(tm[metric] < pitcher_scores[metric][s]):
score[metric] = s+1
score['Total'] += s+1
break
else:
for s in range(len(pitcher_scores[metric])):
if(tm[metric] > pitcher_scores[metric][s]):
score[metric] = s+1
score['Total'] += s+1
break
if (score[metric] == 0):
score[metric] = 12
score['Total'] += 12
return score
def build_team(self):
budget = self.Teams["My Team"].budget
selected = self.get_team_players(self.Teams["My Team"])
m = Model("mip1")
# this works really well without using the maxes
scaler = preprocessing.StandardScaler(with_std=False)
all_players = self.all_players
for team in self.opposing_team_names:
opp_players = self.get_team_players(self.Teams[team])
for pl in opp_players:
all_players = all_players[all_players['Name'] != pl].reset_index(drop=True)
norm_players = all_players[['R', 'HR', 'RBI', 'SB','adjAVG','W','SV','K','ERA','WHIP','AB','H']]
norm_players = pd.DataFrame(scaler.fit_transform(norm_players),columns=['R', 'HR', 'RBI', 'SB','adjAVG','W','SV','K', 'ERA', 'WHIP','H','AB'])
for h in self.HitterMetrics:
if h == 'AVG':
all_players['normAVG'] = norm_players['adjAVG']
else:
all_players['norm{}'.format(h)] = norm_players[h]
for p in self.PitcherMetrics:
all_players['norm{}'.format(p)] = norm_players[p]
for h in ['AB','H']:
all_players['norm{}'.format(h)] = norm_players[h]
all_names = list(all_players.index)
name_list = list(dict.fromkeys(all_players['Name']))
for s in selected:
all_players.loc[all_players['Name'] == s,'$'] = 0
allCosts = dict(zip(all_names,all_players['$']))
player_vars = m.addVars(all_names,vtype=GRB.INTEGER,lb=0,ub=1,name='players')
player_chosen = m.addVars(name_list,vtype=GRB.INTEGER,lb=0,ub=1,name='pl_chosen')
allRuns = dict(zip(all_names,all_players['normR']))
allHRs = dict(zip(all_names,all_players['normHR']))
allRBIs = dict(zip(all_names,all_players['normRBI']))
allSBs = dict(zip(all_names,all_players['normSB']*2.5))
allAVG = dict(zip(all_names,all_players['normAVG']))
allAB = dict(zip(all_names,all_players['normAB']*0.8))
allH = dict(zip(all_names,all_players['normH']*0.8))
allWs = dict(zip(all_names,all_players['normW']))
allKs = dict(zip(all_names,all_players['normK']))
allSVs = dict(zip(all_names,all_players['normSV']))
allERA = dict(zip(all_names,all_players['normERA']))
allWHIP = dict(zip(all_names,all_players['normWHIP']))
allPOS = dict(zip(all_names,all_players['POS']))
obj = LinExpr()
obj += quicksum([allRuns[i]*player_vars[i] for i in all_names])
obj += quicksum([allHRs[i]*player_vars[i] for i in all_names])
obj += quicksum([allRBIs[i]*player_vars[i] for i in all_names])
obj += quicksum([allSBs[i]*player_vars[i] for i in all_names])
obj += quicksum([allAVG[i]*player_vars[i] for i in all_names])
obj += quicksum([allWs[i]*player_vars[i] for i in all_names])
obj += quicksum([allKs[i]*player_vars[i] for i in all_names])
obj += quicksum([allSVs[i]*player_vars[i] for i in all_names])
obj += quicksum([allERA[i]*player_vars[i] for i in all_names])
obj += quicksum([allWHIP[i]*player_vars[i] for i in all_names])
obj += quicksum([allCosts[i]*player_vars[i] for i in all_names])
obj += quicksum([allH[i]*player_vars[i] for i in all_names])
obj += quicksum([allAB[i]*player_vars[i] for i in all_names])
m.setObjective(obj, GRB.MAXIMIZE)
m.addConstr(sum([allCosts[i]*player_vars[i] for i in all_names])<= budget)
m.addConstr(sum([(allPOS[i]=='C')*player_vars[i] for i in all_names]) == 1)
# update these based on position depth
m.addConstr(sum([(allPOS[i]=='1B')*player_vars[i] for i in all_names]) == 2)
m.addConstr(sum([(allPOS[i]=='2B')*player_vars[i] for i in all_names]) == 2)
m.addConstr(sum([(allPOS[i]=='3B')*player_vars[i] for i in all_names]) == 1)
m.addConstr(sum([(allPOS[i]=='SS')*player_vars[i] for i in all_names]) == 2)
m.addConstr(sum([(allPOS[i]=='OF')*player_vars[i] for i in all_names]) == 6)
m.addConstr(sum([(allPOS[i]=='SP')*player_vars[i] for i in all_names]) == 8)
m.addConstr(sum([(allPOS[i]=='RP')*player_vars[i] for i in all_names]) == 3)
m.addConstr(sum([(allPOS[i]=='DH')*player_vars[i] for i in all_names]) == 1)
m.addConstr(sum([allCosts[i]*player_vars[i]*(allPOS[i]=='RP' or allPOS[i]=='SP') for i in all_names]) <= 120)
m.addConstr(sum([allCosts[i]*player_vars[i]*(allPOS[i]!='RP' and allPOS[i]!='SP') for i in all_names]) <= 140)
# ensure no player is selected twice
for f in all_names:
m.addConstr(player_vars[f]>= player_chosen[all_players.iloc[f]['Name']]*0.01)
m.addConstr(player_vars[f]<= player_chosen[all_players.iloc[f]['Name']]*1e8)
m.addConstr(sum(player_chosen.values())==26)
for s in selected:
m.addConstr(player_chosen[s] >= 0.7)
m.setParam(GRB.Param.PoolSolutions,10)
m.setParam(GRB.Param.PoolSearchMode,2)
m.setParam(GRB.Param.OutputFlag,0)
m.optimize()
all_players['AB'] = all_players['AB'].apply(lambda x: 0 if x == 0 else 1/x)
nSolns = m.SolCount
max_tot = 0
best = 0
for e in range(nSolns):
Team = pd.DataFrame()
m.setParam(GRB.Param.SolutionNumber,e)
for v in m.getVars():
if v.Xn > 0.01 and not 'chosen' in v.varName:
Team = Team.append(all_players.iloc[int(v.varName.split('[')[1].split(']')[0])])
Team = Team.append(Team.append({'Name':'Total','AVG':sum(Team['H'])/sum(Team['AB']),'R':sum(Team['R']),'RBI':sum(Team['RBI']),'HR':sum(Team['HR']),'SB':sum(Team['SB']),'WHIP':(sum(Team['BB'])+sum(Team['Hits']))/sum(Team['IP']),'ERA':(sum(Team['ER'])/sum(Team['IP']))*9,'W':sum(Team['W']),'SV':sum(Team['SV']),'K':sum(Team['K']),'$':sum(Team['$'])},ignore_index=True))
sc = self.get_score(Team[Team['Name'] == 'Total'].iloc[0])['Total']
if (sc > max_tot):
best = e
max_tot = sc
Team = pd.DataFrame()
m.setParam(GRB.Param.SolutionNumber,best)
for v in m.getVars():
if v.Xn > 0.01 and not 'chosen' in v.varName:
Team = Team.append(all_players.iloc[int(v.varName.split('[')[1].split(']')[0])])
Team['ERA'] = Team['ERA'].apply(lambda x: 0 if x == 0 else 1/x)
Team['WHIP'] = Team['WHIP'].apply(lambda x: 0 if x == 0 else 1/x)
return Team.append({'Name':'Total','AVG':(sum(Team['H'])/sum(Team['AB'])),'R':sum(Team['R']),'RBI':sum(Team['RBI']),'HR':sum(Team['HR']),'SB':sum(Team['SB']),'WHIP':(sum(Team['BB'])+sum(Team['Hits']))/sum(Team['IP']),'ERA':(sum(Team['ER'])/sum(Team['IP']))*9,'W':sum(Team['W']),'SV':sum(Team['SV']),'K':sum(Team['K']),'$':sum(Team['$'])},ignore_index=True)
def add_player_to_team(self, player, teamName, position, price):
team = self.Teams[teamName]
pl = self.all_players[self.all_players['Name'].str.contains(player,case=False)].reset_index(drop=True).iloc[0]
if (position == 'SP' or position == 'RP' or position == "SP/RP"):
team.add_pitcher_to_team(pl)
else:
team.add_hitter_to_team(pl)
team.budget -= price
#player.Team = teamName
def remove_player_from_team(self, player, teamName, position):
team = self.Teams[teamName]
if(player in team.Hitters):
team.Hitters.pop(player)
elif(player in team.Pitchers):
team.Pitchers.pop(player)
elif(player in team.Bench):
team.Bench.pop(player)
def print_team(self, teamName):
tm = self.Teams[teamName]
selected={}
for m in tm.Bench:
if (m != ""):
player = self.all_players[self.all_players['Name'] == m].iloc[0]
selected[player['Name']] = player['$']
for m in tm.Hitters:
if (m != ""):
player = self.all_players[self.all_players['Name'] == m].iloc[0]
selected[player['Name']] = player['$']
for m in tm.Pitchers:
if (m != ""):
player = self.all_players[self.all_players['Name'] == m].iloc[0]
selected[player['Name']] = player['$']
pl = pd.DataFrame()
for key,val in selected.items():
pl = pl.append(self.all_players[self.all_players['Name'] == key])
pl.loc[pl['Name'] == key,'$'] = val
if not pl.empty:
return pl[['Name','POS','R', 'HR', 'RBI', 'SB', 'AVG','W', 'SV', 'K', 'ERA', 'WHIP','Ovr','$']]
return 'No Players on team'
|
{"hexsha": "3f0611ae80b4a4d70d6de4d914a961a967d12b5c", "size": 20172, "ext": "py", "lang": "Python", "max_stars_repo_path": "draftbot3001/RotisserieOptimizer.py", "max_stars_repo_name": "samozm/rotissarie_baseball", "max_stars_repo_head_hexsha": "9338c2cd68bfa628b27ee1fd8756bc71d07ff3e5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "draftbot3001/RotisserieOptimizer.py", "max_issues_repo_name": "samozm/rotissarie_baseball", "max_issues_repo_head_hexsha": "9338c2cd68bfa628b27ee1fd8756bc71d07ff3e5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "draftbot3001/RotisserieOptimizer.py", "max_forks_repo_name": "samozm/rotissarie_baseball", "max_forks_repo_head_hexsha": "9338c2cd68bfa628b27ee1fd8756bc71d07ff3e5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.474137931, "max_line_length": 379, "alphanum_fraction": 0.5550763434, "include": true, "reason": "import numpy", "num_tokens": 5714}
|
#include "status_loop.h"
#include <boost/log/trivial.hpp>
namespace asio = boost::asio;
void guarded_main()
{
BOOST_LOG_TRIVIAL(info) << "initializing I/O";
io io;
}
i32 main()
{
try
{
guarded_main();
}
catch (const std::exception& e)
{
BOOST_LOG_TRIVIAL(fatal) << e.what();
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
|
{"hexsha": "eb1e255edcffea085256b57e7ffb022ad8479ffd", "size": 385, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/main.cpp", "max_stars_repo_name": "janekb04/driver", "max_stars_repo_head_hexsha": "a1271d35726e7490add9bbe265ef292db297ace1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/main.cpp", "max_issues_repo_name": "janekb04/driver", "max_issues_repo_head_hexsha": "a1271d35726e7490add9bbe265ef292db297ace1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/main.cpp", "max_forks_repo_name": "janekb04/driver", "max_forks_repo_head_hexsha": "a1271d35726e7490add9bbe265ef292db297ace1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.8076923077, "max_line_length": 50, "alphanum_fraction": 0.5948051948, "num_tokens": 101}
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow.python.nn.module import Module
from oneflow.python.oneflow_export import oneflow_export, experimental_api
from oneflow.python.framework.tensor import register_tensor_op
from typing import Sequence
from functools import reduce
import operator
def infer_shape(x, shape):
dim_index_need_infer = shape.index(-1) if shape.count(-1) == 1 else None
in_elem_cnt = reduce(operator.mul, x.shape, 1)
out_elem_cnt = reduce(operator.mul, shape, 1)
if dim_index_need_infer is not None:
assert (in_elem_cnt % out_elem_cnt) == 0
shape[dim_index_need_infer] = int(abs(in_elem_cnt / out_elem_cnt))
else:
assert in_elem_cnt == out_elem_cnt
return shape
class Reshape(Module):
def __init__(self, shape: Sequence[int]) -> None:
super().__init__()
assert isinstance(shape, tuple) or isinstance(shape, list)
shape = list(shape)
assert all(dim == -1 or dim > 0 for dim in shape)
assert shape.count(-1) <= 1
self._op = (
flow.builtin_op("reshape")
.Input("in")
.Output("out")
.Attr("shape", shape)
.Build()
)
self.shape = shape
def forward(self, x):
new_shape = infer_shape(x, self.shape)
return self._op(x, shape=new_shape)[0]
@oneflow_export("reshape")
@register_tensor_op("reshape")
@experimental_api
def reshape_op(x, shape: Sequence[int] = None):
"""This operator reshapes a Tensor.
We can set one dimension in `shape` as `-1`, the operator will infer the complete shape.
Args:
x: A Tensor.
shape: Shape of the output tensor.
Returns:
A Tensor has the same type as `x`.
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array(
... [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]
... ).astype(np.float32)
>>> input = flow.Tensor(x)
>>> y = flow.reshape(input, shape=[2, 2, 2, -1]).numpy().shape
>>> print(y)
(2, 2, 2, 2)
"""
return Reshape(shape=shape)(x)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
|
{"hexsha": "6fa18280e0dc4fc9e6ceb9054fe3dd7196326f99", "size": 2913, "ext": "py", "lang": "Python", "max_stars_repo_path": "oneflow/python/nn/modules/reshape.py", "max_stars_repo_name": "vycezhong/oneflow", "max_stars_repo_head_hexsha": "adeab621afca45142d85ed9cf9c4375af85d33d1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-14T03:19:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-14T03:19:35.000Z", "max_issues_repo_path": "oneflow/python/nn/modules/reshape.py", "max_issues_repo_name": "vycezhong/oneflow", "max_issues_repo_head_hexsha": "adeab621afca45142d85ed9cf9c4375af85d33d1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-06-16T08:37:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-16T08:37:50.000Z", "max_forks_repo_path": "oneflow/python/nn/modules/reshape.py", "max_forks_repo_name": "vycezhong/oneflow", "max_forks_repo_head_hexsha": "adeab621afca45142d85ed9cf9c4375af85d33d1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4242424242, "max_line_length": 92, "alphanum_fraction": 0.6474424991, "include": true, "reason": "import numpy", "num_tokens": 735}
|
/*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2013, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
/* Author: Suat Gedikli */
#pragma once
#include <moveit/macros/class_forward.h>
#include <Eigen/Core> // for Vector3f
namespace mesh_filter
{
// forward declarations
class GLRenderer;
/**
* \brief Abstract Interface defining a sensor model for mesh filtering
* \author Suat Gedikli <gedikli@willowgarage.com>
*/
class SensorModel
{
public:
MOVEIT_CLASS_FORWARD(Parameters) // Defines ParametersPtr, ConstPtr, WeakPtr... etc
/**
* \brief Abstract Interface defining Sensor Parameters.
* \author Suat Gedikli <gedikli@willowgarage.com>
*/
class Parameters
{
public:
/**
* \brief Constructor taking core parameters that are required for all sensors
* \param width width of the image generated by this kind of sensor
* \param height height of the image generated by this kind of sensors
* \param near_clipping_plane_distance distance of the near clipping plane in meters
* \param far_clipping_plane_distance distance of the far clipping plane in meters
*/
Parameters(unsigned width, unsigned height, float near_clipping_plane_distance, float far_clipping_plane_distance);
/** \brief virtual destructor*/
virtual ~Parameters();
/**
* \brief method that sets required parameters for the renderer.
* Each sensor usually has its own shaders with specific parameters depending on sensor parameters.
* This method is called within MeshFilter before any rendering/filtering is done to set any changed
* sensor parameters in the shader code.
* \param renderer the renderer that needs to be updated
*/
virtual void setRenderParameters(GLRenderer& renderer) const = 0;
/**
* \brief sets the specific Filter Renderer parameters
* \param renderer renderer the renderer that needs to be updated
*/
virtual void setFilterParameters(GLRenderer& renderer) const = 0;
/**
* \brief polymorphic clone method
* \return clones object as base class
*/
virtual Parameters* clone() const = 0;
/**
* \brief returns sensor dependent padding coefficients
* \return returns sensor dependent padding coefficients
*/
virtual const Eigen::Vector3f& getPaddingCoefficients() const = 0;
/**
* \brief transforms depth values from rendered model to metric depth values
* \param[in,out] depth pointer to floating point depth buffer
*/
virtual void transformModelDepthToMetricDepth(float* depth) const;
/**
* \brief transforms depth values from filtered depth to metric depth values
* \param[in,out] depth pointer to floating point depth buffer
*/
virtual void transformFilteredDepthToMetricDepth(float* depth) const;
/**
* \brief sets the image size
* \param[in] width with of depth map
* \param[in] height height of depth map
*/
void setImageSize(unsigned width, unsigned height);
/**
* \brief sets the clipping range
* \param[in] near distance of near clipping plane
* \param[in] far distance of far clipping plane
*/
void setDepthRange(float near, float far);
/**
* \brief returns the width of depth maps
* \return width of the depth map
*/
unsigned getWidth() const;
/**
* \brief returns the height of depth maps
* \return height of the depth map
*/
unsigned getHeight() const;
/**
* \brief returns distance to the near clipping plane
* \return distance to near clipping plane
*/
float getNearClippingPlaneDistance() const;
/**
* \brief returns the distance to the far clipping plane
* \return distance to far clipping plane
*/
float getFarClippingPlaneDistance() const;
protected:
/** \brief width of depth maps generated by the sensor*/
unsigned width_;
/** \brief height of depth maps generated by the sensor*/
unsigned height_;
/** \brief distance of far clipping plane*/
float far_clipping_plane_distance_;
/** \brief distance of near clipping plane*/
float near_clipping_plane_distance_;
};
/**
* \brief virtual destructor
*/
virtual ~SensorModel();
};
} // namespace mesh_filter
|
{"hexsha": "c8e8c070ed915c2bcecfa071f2e76e3981a9108d", "size": 5992, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "moveit_ros/perception/mesh_filter/include/moveit/mesh_filter/sensor_model.hpp", "max_stars_repo_name": "FabianSchuetze/moveit2", "max_stars_repo_head_hexsha": "d1960f3994daff215c4a51de15c96ce618f4d97d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-04-04T14:34:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-04T14:34:04.000Z", "max_issues_repo_path": "moveit_ros/perception/mesh_filter/include/moveit/mesh_filter/sensor_model.hpp", "max_issues_repo_name": "FabianSchuetze/moveit2", "max_issues_repo_head_hexsha": "d1960f3994daff215c4a51de15c96ce618f4d97d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2020-10-08T18:22:12.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-08T18:22:12.000Z", "max_forks_repo_path": "moveit_ros/perception/mesh_filter/include/moveit/mesh_filter/sensor_model.hpp", "max_forks_repo_name": "FabianSchuetze/moveit2", "max_forks_repo_head_hexsha": "d1960f3994daff215c4a51de15c96ce618f4d97d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2020-07-24T00:54:47.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-12T20:29:37.000Z", "avg_line_length": 34.8372093023, "max_line_length": 119, "alphanum_fraction": 0.689753004, "num_tokens": 1274}
|
"""
Tests of the "convenience_Horizons" routines that are used for testing.
Some of these tests really function as demos/documentation to
remind myself/ourselves of how these Horizons functions are
intended to work
"""
# Import standard packages
# --------------------------------------------------------------
import numpy as np
# Import the module to be tested
# --------------------------------------------------------------
import convenience_Horizons as Horizons
def test_read_Horizons_state_from_text():
"""
This is NOT testing a built-in Horizons function
This is just testing a little convenience routine created by MJP
This convenience routine is ONLY used as part of the testing code for Cheby Checker
"""
# input text
lines = """
X =-2.590350154796811E+00 Y =-7.949342693459856E-02 Z = 1.245107691757731E-01
VX=-1.454708370733871E-03 VY=-9.503445860627428E-03 VZ=-3.846514535533382E-03
""".split('\n')[1:-1]
# use the target function to extract the coordinaets
result = Horizons.read_Horizons_state_from_text( lines )
# check that the results are as expected
expected_array = np.array([ float('-2.590350154796811E+00'), float('-7.949342693459856E-02'), float('1.245107691757731E-01'),
float('-1.454708370733871E-03'), float('-9.503445860627428E-03'), float('-3.846514535533382E-03') ] )
assert np.allclose(expected_array, result, rtol=1e-08, atol=1e-08)
def test_extract_first_state_from_text():
"""
This is NOT testing a built-in Horizons function
This is just testing a little convenience routine created by MJP
This convenience routine is ONLY used as part of the testing code for Cheby Checker
"""
# input text
lines = """
*******************************************************************************
JPL/HORIZONS 12345 (1993 FT8) 2022-Jan-28 14:39:42
Rec #: 12345 (+COV) Soln.date: 2021-Nov-10_08:38:58 # obs: 1959 (1993-2021)
IAU76/J2000 helio. ecliptic osc. elements (au, days, deg., period=Julian yrs):
EPOCH= 2457108.5 ! 2015-Mar-27.00 (TDB) Residual RMS= .2812
EC= .1603033905689926 QR= 2.056207695854036 TP= 2457050.1973502915
OM= 106.4549280993016 W= 314.1929318541605 IN= 3.350816780296945
A= 2.448750742541829 MA= 14.99600220651154 ADIST= 2.841293789229623
PER= 3.832 N= .25720961 ANGMOM= .02657056
DAN= 2.14602 DDN= 2.68596 L= 60.6968709
B= -2.401858 MOID= 1.06974006 TP= 2015-Jan-27.6973502915
Asteroid physical parameters (km, seconds, rotational period in hours):
GM= n.a. RAD= 1.506 ROTPER= n.a.
H= 14.52 G= .150 B-V= n.a.
ALBEDO= .407 STYP= n.a.
ASTEROID comments:
1: soln ref.= JPL#32, OCC=0
2: source=ORB
*******************************************************************************
*******************************************************************************
Ephemeris / WWW_USER Fri Jan 28 14:39:42 2022 Pasadena, USA / Horizons
*******************************************************************************
Target body name: 12345 (1993 FT8) {source: JPL#32}
Center body name: Sun (10) {source: DE441}
Center-site name: BODY CENTER
*******************************************************************************
Start time : A.D. 2020-Jan-01 12:00:00.0000 TDB
Stop time : A.D. 2020-Jan-01 12:00:00.0000 TDB
Step-size : DISCRETE TIME-LIST
*******************************************************************************
Center geodetic : 0.00000000,0.00000000,0.0000000 {E-lon(deg),Lat(deg),Alt(km)}
Center cylindric: 0.00000000,0.00000000,0.0000000 {E-lon(deg),Dxy(km),Dz(km)}
Center radii : 696000.0 x 696000.0 x 696000.0 k{Equator, meridian, pole}
Small perturbers: Yes {source: SB441-N16}
Output units : AU-D
Output type : GEOMETRIC cartesian states
Output format : 3 (position, velocity, LT, range, range-rate)
Reference frame : ICRF
*******************************************************************************
Initial IAU76/J2000 heliocentric ecliptic osculating elements (au, days, deg.):
EPOCH= 2457108.5 ! 2015-Mar-27.00 (TDB) Residual RMS= .2812
EC= .1603033905689926 QR= 2.056207695854036 TP= 2457050.1973502915
OM= 106.4549280993016 W= 314.1929318541605 IN= 3.350816780296945
Equivalent ICRF heliocentric cartesian coordinates (au, au/d):
X= 3.047919278950221E-01 Y= 1.902892265722551E+00 Z= 7.692605770652556E-01
VX=-1.255238959074424E-02 VY= 2.052146789677108E-03 VZ= 1.612315394505861E-03
Asteroid physical parameters (km, seconds, rotational period in hours):
GM= n.a. RAD= 1.506 ROTPER= n.a.
H= 14.52 G= .150 B-V= n.a.
ALBEDO= .407 STYP= n.a.
*******************************************************************************
JDTDB
X Y Z
VX VY VZ
LT RG RR
*******************************************************************************
$$SOE
2458850.000000000 = A.D. 2020-Jan-01 12:00:00.0000 TDB [del_T= 69.183915 s]
X =-2.590350154796811E+00 Y =-7.949342693459856E-02 Z = 1.245107691757731E-01
VX=-1.454708370733871E-03 VY=-9.503445860627428E-03 VZ=-3.846514535533382E-03
LT= 1.498492268422344E-02 RG= 2.594558933811760E+00 RR= 1.558928955626413E-03
$$EOE
*******************************************************************************
TIME
Barycentric Dynamical Time ("TDB" or T_eph) output was requested. This
continuous relativistic coordinate time is equivalent to the relativistic
proper time of a clock at rest in a reference frame comoving with the
solar system barycenter but outside the system's gravity well. It is the
independent variable in the solar system relativistic equations of motion.
TDB runs at a uniform rate of one SI second per second and is independent
of irregularities in Earth's rotation.
Calendar dates prior to 1582-Oct-15 are in the Julian calendar system.
Later calendar dates are in the Gregorian system.
REFERENCE FRAME AND COORDINATES
International Celestial Reference Frame (ICRF)
The ICRF is an adopted reference frame whose axes are defined relative to
fixed extragalactic radio sources distributed across the sky.
The ICRF was aligned with the prior FK5/J2000 dynamical system at the ~0.02
arcsecond level but is not identical and has no associated standard epoch.
Symbol meaning [1 au= 149597870.700 km, 1 day= 86400.0 s]:
JDTDB Julian Day Number, Barycentric Dynamical Time
del_T Time-scale conversion difference TDB - UT (s)
X X-component of position vector (au)
Y Y-component of position vector (au)
Z Z-component of position vector (au)
VX X-component of velocity vector (au/day)
VY Y-component of velocity vector (au/day)
VZ Z-component of velocity vector (au/day)
LT One-way down-leg Newtonian light-time (day)
RG Range; distance from coordinate center (au)
RR Range-rate; radial velocity wrt coord. center (au/day)
ABERRATIONS AND CORRECTIONS
Geometric state vectors have NO corrections or aberrations applied.
Computations by ...
Solar System Dynamics Group, Horizons On-Line Ephemeris System
4800 Oak Grove Drive, Jet Propulsion Laboratory
Pasadena, CA 91109 USA
General site: https://ssd.jpl.nasa.gov/
Mailing list: https://ssd.jpl.nasa.gov/email_list.html
System news : https://ssd.jpl.nasa.gov/horizons/news.html
User Guide : https://ssd.jpl.nasa.gov/horizons/manual.html
Connect : browser https://ssd.jpl.nasa.gov/horizons/app.html#/x
API https://ssd-api.jpl.nasa.gov/doc/horizons.html
command-line telnet ssd.jpl.nasa.gov 6775
e-mail/batch https://ssd.jpl.nasa.gov/ftp/ssd/hrzn_batch.txt
scripts https://ssd.jpl.nasa.gov/ftp/ssd/SCRIPTS
Author : Jon.D.Giorgini@jpl.nasa.gov
*******************************************************************************
""".split('\n')[1:-1]
#print(lines)
# use the target function to extract the coordinaets
result = Horizons.extract_first_state_from_text( lines )
# check that the results are as expected
expected_array = np.array([ float('-2.590350154796811E+00'), float('-7.949342693459856E-02'), float('1.245107691757731E-01'),
float('-1.454708370733871E-03'), float('-9.503445860627428E-03'), float('-3.846514535533382E-03') ] )
assert np.allclose(expected_array, result, rtol=1e-08, atol=1e-08)
def test_nice_Horizons_A():
"""
Testing Mike A's convenience wrapper around Horizon query functionality
- Much of this test is being done to provide some reminder
to myself/ourselves as to how to use the Horizons tool
Deliberately *not* using all of the functionalities of pytest here.
Just want to keep it simple and keep it obvious what everything is supposed to be doing.
Here we extract the
HELIOCENTRIC state
for
Asteroid number 12345 (== 1993 FT8)
in an
EQUATORIAL FRAME (refplane='earth')
"""
# Define the variables that will be used in the query
target = '12345' # <<-- Asteroid number 12345 == 1993 FT8
centre = '500@10'
epochs = '2458850.0'
id_type = 'smallbody'
refplane= 'earth'
# Hardpaste the expected results from a by-hand query of horizons
hardpasted_results = """
*******************************************************************************
JPL/HORIZONS 12345 (1993 FT8) 2022-Jan-28 14:39:42
Rec #: 12345 (+COV) Soln.date: 2021-Nov-10_08:38:58 # obs: 1959 (1993-2021)
IAU76/J2000 helio. ecliptic osc. elements (au, days, deg., period=Julian yrs):
EPOCH= 2457108.5 ! 2015-Mar-27.00 (TDB) Residual RMS= .2812
EC= .1603033905689926 QR= 2.056207695854036 TP= 2457050.1973502915
OM= 106.4549280993016 W= 314.1929318541605 IN= 3.350816780296945
A= 2.448750742541829 MA= 14.99600220651154 ADIST= 2.841293789229623
PER= 3.832 N= .25720961 ANGMOM= .02657056
DAN= 2.14602 DDN= 2.68596 L= 60.6968709
B= -2.401858 MOID= 1.06974006 TP= 2015-Jan-27.6973502915
Asteroid physical parameters (km, seconds, rotational period in hours):
GM= n.a. RAD= 1.506 ROTPER= n.a.
H= 14.52 G= .150 B-V= n.a.
ALBEDO= .407 STYP= n.a.
ASTEROID comments:
1: soln ref.= JPL#32, OCC=0
2: source=ORB
*******************************************************************************
*******************************************************************************
Ephemeris / WWW_USER Fri Jan 28 14:39:42 2022 Pasadena, USA / Horizons
*******************************************************************************
Target body name: 12345 (1993 FT8) {source: JPL#32}
Center body name: Sun (10) {source: DE441}
Center-site name: BODY CENTER
*******************************************************************************
Start time : A.D. 2020-Jan-01 12:00:00.0000 TDB
Stop time : A.D. 2020-Jan-01 12:00:00.0000 TDB
Step-size : DISCRETE TIME-LIST
*******************************************************************************
Center geodetic : 0.00000000,0.00000000,0.0000000 {E-lon(deg),Lat(deg),Alt(km)}
Center cylindric: 0.00000000,0.00000000,0.0000000 {E-lon(deg),Dxy(km),Dz(km)}
Center radii : 696000.0 x 696000.0 x 696000.0 k{Equator, meridian, pole}
Small perturbers: Yes {source: SB441-N16}
Output units : AU-D
Output type : GEOMETRIC cartesian states
Output format : 3 (position, velocity, LT, range, range-rate)
Reference frame : ICRF
*******************************************************************************
Initial IAU76/J2000 heliocentric ecliptic osculating elements (au, days, deg.):
EPOCH= 2457108.5 ! 2015-Mar-27.00 (TDB) Residual RMS= .2812
EC= .1603033905689926 QR= 2.056207695854036 TP= 2457050.1973502915
OM= 106.4549280993016 W= 314.1929318541605 IN= 3.350816780296945
Equivalent ICRF heliocentric cartesian coordinates (au, au/d):
X= 3.047919278950221E-01 Y= 1.902892265722551E+00 Z= 7.692605770652556E-01
VX=-1.255238959074424E-02 VY= 2.052146789677108E-03 VZ= 1.612315394505861E-03
Asteroid physical parameters (km, seconds, rotational period in hours):
GM= n.a. RAD= 1.506 ROTPER= n.a.
H= 14.52 G= .150 B-V= n.a.
ALBEDO= .407 STYP= n.a.
*******************************************************************************
JDTDB
X Y Z
VX VY VZ
LT RG RR
*******************************************************************************
$$SOE
2458850.000000000 = A.D. 2020-Jan-01 12:00:00.0000 TDB [del_T= 69.183915 s]
X =-2.590350154796811E+00 Y =-7.949342693459856E-02 Z = 1.245107691757731E-01
VX=-1.454708370733871E-03 VY=-9.503445860627428E-03 VZ=-3.846514535533382E-03
LT= 1.498492268422344E-02 RG= 2.594558933811760E+00 RR= 1.558928955626413E-03
$$EOE
*******************************************************************************
TIME
Barycentric Dynamical Time ("TDB" or T_eph) output was requested. This
continuous relativistic coordinate time is equivalent to the relativistic
proper time of a clock at rest in a reference frame comoving with the
solar system barycenter but outside the system's gravity well. It is the
independent variable in the solar system relativistic equations of motion.
TDB runs at a uniform rate of one SI second per second and is independent
of irregularities in Earth's rotation.
Calendar dates prior to 1582-Oct-15 are in the Julian calendar system.
Later calendar dates are in the Gregorian system.
REFERENCE FRAME AND COORDINATES
International Celestial Reference Frame (ICRF)
The ICRF is an adopted reference frame whose axes are defined relative to
fixed extragalactic radio sources distributed across the sky.
The ICRF was aligned with the prior FK5/J2000 dynamical system at the ~0.02
arcsecond level but is not identical and has no associated standard epoch.
Symbol meaning [1 au= 149597870.700 km, 1 day= 86400.0 s]:
JDTDB Julian Day Number, Barycentric Dynamical Time
del_T Time-scale conversion difference TDB - UT (s)
X X-component of position vector (au)
Y Y-component of position vector (au)
Z Z-component of position vector (au)
VX X-component of velocity vector (au/day)
VY Y-component of velocity vector (au/day)
VZ Z-component of velocity vector (au/day)
LT One-way down-leg Newtonian light-time (day)
RG Range; distance from coordinate center (au)
RR Range-rate; radial velocity wrt coord. center (au/day)
ABERRATIONS AND CORRECTIONS
Geometric state vectors have NO corrections or aberrations applied.
Computations by ...
Solar System Dynamics Group, Horizons On-Line Ephemeris System
4800 Oak Grove Drive, Jet Propulsion Laboratory
Pasadena, CA 91109 USA
General site: https://ssd.jpl.nasa.gov/
Mailing list: https://ssd.jpl.nasa.gov/email_list.html
System news : https://ssd.jpl.nasa.gov/horizons/news.html
User Guide : https://ssd.jpl.nasa.gov/horizons/manual.html
Connect : browser https://ssd.jpl.nasa.gov/horizons/app.html#/x
API https://ssd-api.jpl.nasa.gov/doc/horizons.html
command-line telnet ssd.jpl.nasa.gov 6775
e-mail/batch https://ssd.jpl.nasa.gov/ftp/ssd/hrzn_batch.txt
scripts https://ssd.jpl.nasa.gov/ftp/ssd/SCRIPTS
Author : Jon.D.Giorgini@jpl.nasa.gov
*******************************************************************************
""".split('\n')[1:-1]
# Extract the hardpasted results into an array
# (using convenience func, "extract_first_state_from_text", tested above)
expected_array = Horizons.extract_first_state_from_text( hardpasted_results )
# Call the nice_Horizons function (i.e. the focus of the test)
result = Horizons.nice_Horizons(target, centre, epochs, id_type, refplane=refplane )
# Check that the results are as expected
# - Lowered the accuracy from 1e-11 to 1e-8 as the discrepany grows when JPL re-fits the orbit,
# and I don't want to keep hand-pasting different sets of results
assert np.allclose(expected_array, result, rtol=1e-8, atol=1e-8)
def test_nice_Horizons_B():
"""
Testing Mike A's convenience wrapper around Horizon query functionality
- Much of this test is being done to provide some reminder
to myself/ourselves as to how to use the Horizons tool
Deliberately *not* using all of the functionalities of pytest here.
Just want to keep it simple and keep it obvious what everything is supposed to be doing.
Here we extract the
HELIOCENTRIC state
for
GEOCENTER
in an
EQUATORIAL FRAME (refplane='earth')
"""
# Define the variables that will be used in the query
target = '399' # Earth
centre = '500@10'
epochs = '2458850.0'
id_type = 'majorbody'
refplane= 'earth'
# Hardpaste the expected results from a by-hand query of horizons
hardpasted_results = """
*******************************************************************************
Revised: April 12, 2021 Earth 399
GEOPHYSICAL PROPERTIES (revised Aug 15, 2018):
Vol. Mean Radius (km) = 6371.01+-0.02 Mass x10^24 (kg)= 5.97219+-0.0006
Equ. radius, km = 6378.137 Mass layers:
Polar axis, km = 6356.752 Atmos = 5.1 x 10^18 kg
Flattening = 1/298.257223563 oceans = 1.4 x 10^21 kg
Density, g/cm^3 = 5.51 crust = 2.6 x 10^22 kg
J2 (IERS 2010) = 0.00108262545 mantle = 4.043 x 10^24 kg
g_p, m/s^2 (polar) = 9.8321863685 outer core = 1.835 x 10^24 kg
g_e, m/s^2 (equatorial) = 9.7803267715 inner core = 9.675 x 10^22 kg
g_o, m/s^2 = 9.82022 Fluid core rad = 3480 km
GM, km^3/s^2 = 398600.435436 Inner core rad = 1215 km
GM 1-sigma, km^3/s^2 = 0.0014 Escape velocity = 11.186 km/s
Rot. Rate (rad/s) = 0.00007292115 Surface area:
Mean sidereal day, hr = 23.9344695944 land = 1.48 x 10^8 km
Mean solar day 2000.0, s = 86400.002 sea = 3.62 x 10^8 km
Mean solar day 1820.0, s = 86400.0 Love no., k2 = 0.299
Moment of inertia = 0.3308 Atm. pressure = 1.0 bar
Mean temperature, K = 270 Volume, km^3 = 1.08321 x 10^12
Mean effect. IR temp, K = 255 Magnetic moment = 0.61 gauss Rp^3
Geometric albedo = 0.367 Vis. mag. V(1,0)= -3.86
Solar Constant (W/m^2) = 1367.6 (mean), 1414 (perihelion), 1322 (aphelion)
HELIOCENTRIC ORBIT CHARACTERISTICS:
Obliquity to orbit, deg = 23.4392911 Sidereal orb period = 1.0000174 y
Orbital speed, km/s = 29.79 Sidereal orb period = 365.25636 d
Mean daily motion, deg/d = 0.9856474 Hill's sphere radius = 234.9
*******************************************************************************
*******************************************************************************
Ephemeris / WWW_USER Fri Jan 28 16:02:02 2022 Pasadena, USA / Horizons
*******************************************************************************
Target body name: Earth (399) {source: DE441}
Center body name: Sun (10) {source: DE441}
Center-site name: BODY CENTER
*******************************************************************************
Start time : A.D. 2020-Jan-01 12:00:00.0000 TDB
Stop time : A.D. 2020-Jan-01 12:00:00.0000 TDB
Step-size : DISCRETE TIME-LIST
*******************************************************************************
Center geodetic : 0.00000000,0.00000000,0.0000000 {E-lon(deg),Lat(deg),Alt(km)}
Center cylindric: 0.00000000,0.00000000,0.0000000 {E-lon(deg),Dxy(km),Dz(km)}
Center radii : 696000.0 x 696000.0 x 696000.0 k{Equator, meridian, pole}
Output units : AU-D
Output type : GEOMETRIC cartesian states
Output format : 3 (position, velocity, LT, range, range-rate)
Reference frame : ICRF
*******************************************************************************
JDTDB
X Y Z
VX VY VZ
LT RG RR
*******************************************************************************
$$SOE
2458850.000000000 = A.D. 2020-Jan-01 12:00:00.0000 TDB [del_T= 69.183915 s]
X =-1.749585912701602E-01 Y = 8.877645495087018E-01 Z = 3.848482875671789E-01
VX=-1.721190438300784E-02 VY=-2.874039035670773E-03 VZ=-1.245648654352060E-03
LT= 5.678966496273616E-03 RG= 9.832825679666131E-01 RR=-1.981645766688001E-05
$$EOE
*******************************************************************************
TIME
Barycentric Dynamical Time ("TDB" or T_eph) output was requested. This
continuous relativistic coordinate time is equivalent to the relativistic
proper time of a clock at rest in a reference frame comoving with the
solar system barycenter but outside the system's gravity well. It is the
independent variable in the solar system relativistic equations of motion.
TDB runs at a uniform rate of one SI second per second and is independent
of irregularities in Earth's rotation.
Calendar dates prior to 1582-Oct-15 are in the Julian calendar system.
Later calendar dates are in the Gregorian system.
REFERENCE FRAME AND COORDINATES
International Celestial Reference Frame (ICRF)
The ICRF is an adopted reference frame whose axes are defined relative to
fixed extragalactic radio sources distributed across the sky.
The ICRF was aligned with the prior FK5/J2000 dynamical system at the ~0.02
arcsecond level but is not identical and has no associated standard epoch.
Symbol meaning [1 au= 149597870.700 km, 1 day= 86400.0 s]:
JDTDB Julian Day Number, Barycentric Dynamical Time
del_T Time-scale conversion difference TDB - UT (s)
X X-component of position vector (au)
Y Y-component of position vector (au)
Z Z-component of position vector (au)
VX X-component of velocity vector (au/day)
VY Y-component of velocity vector (au/day)
VZ Z-component of velocity vector (au/day)
LT One-way down-leg Newtonian light-time (day)
RG Range; distance from coordinate center (au)
RR Range-rate; radial velocity wrt coord. center (au/day)
ABERRATIONS AND CORRECTIONS
Geometric state vectors have NO corrections or aberrations applied.
Computations by ...
Solar System Dynamics Group, Horizons On-Line Ephemeris System
4800 Oak Grove Drive, Jet Propulsion Laboratory
Pasadena, CA 91109 USA
General site: https://ssd.jpl.nasa.gov/
Mailing list: https://ssd.jpl.nasa.gov/email_list.html
System news : https://ssd.jpl.nasa.gov/horizons/news.html
User Guide : https://ssd.jpl.nasa.gov/horizons/manual.html
Connect : browser https://ssd.jpl.nasa.gov/horizons/app.html#/x
API https://ssd-api.jpl.nasa.gov/doc/horizons.html
command-line telnet ssd.jpl.nasa.gov 6775
e-mail/batch https://ssd.jpl.nasa.gov/ftp/ssd/hrzn_batch.txt
scripts https://ssd.jpl.nasa.gov/ftp/ssd/SCRIPTS
Author : Jon.D.Giorgini@jpl.nasa.gov
*******************************************************************************
""".split('\n')[1:-1]
# Extract the hardpasted results into an array
# (using convenience func, "extract_first_state_from_text", tested above)
expected_array = Horizons.extract_first_state_from_text( hardpasted_results )
# Call the nice_Horizons function (i.e. the focus of the test)
result = Horizons.nice_Horizons(target, centre, epochs, id_type, refplane=refplane)
print('result=\n' , result)
# Check that the results are as expected
assert np.allclose(expected_array, result, rtol=1e-10, atol=1e-10)
def test_nice_Horizons_C():
"""
Testing Mike A's convenience wrapper around Horizon query functionality
- Much of this test is being done to provide some reminder
to myself/ourselves as to how to use the Horizons tool
Deliberately *not* using all of the functionalities of pytest here.
Just want to keep it simple and keep it obvious what everything is supposed to be doing.
Here we extract the
TOPOCENTRIC (F51) state
for
Asteroid number 54321 (== 2000 JA81)
in an
EQUATORIAL FRAME (refplane='earth')
"""
# Define the variables that will be used in the query
target = '54321' # <<-- Asteroid number 54321 == 2000 JA81
centre = 'F51'
epochs = '2458850.0'
id_type = 'smallbody'
refplane= 'earth'
# Hardpaste the expected results from a by-hand query of horizons
hardpasted_results = """
*******************************************************************************
JPL/HORIZONS 54321 (2000 JA81) 2022-Jan-28 16:08:57
Rec #: 54321 (+COV) Soln.date: 2021-Oct-08_04:39:24 # obs: 1315 (1979-2021)
IAU76/J2000 helio. ecliptic osc. elements (au, days, deg., period=Julian yrs):
EPOCH= 2456698.5 ! 2014-Feb-10.00 (TDB) Residual RMS= .27282
EC= .2508846058943067 QR= 1.938411174247326 TP= 2456093.1011463138
OM= 91.32740861093403 W= 91.37096816741918 IN= 6.76912753748867
A= 2.58760024089671 MA= 143.3507250314229 ADIST= 3.236789307546094
PER= 4.1625 N= .236787236 ANGMOM= .026786318
DAN= 2.43937 DDN= 2.41026 L= 182.707997
B= 6.7671807 MOID= .95572901 TP= 2012-Jun-14.6011463138
Asteroid physical parameters (km, seconds, rotational period in hours):
GM= n.a. RAD= n.a. ROTPER= n.a.
H= 14.45 G= .150 B-V= n.a.
ALBEDO= n.a. STYP= n.a.
ASTEROID comments:
1: soln ref.= JPL#33, OCC=0
2: source=ORB
*******************************************************************************
*******************************************************************************
Ephemeris / WWW_USER Fri Jan 28 16:08:57 2022 Pasadena, USA / Horizons
*******************************************************************************
Target body name: 54321 (2000 JA81) {source: JPL#33}
Center body name: Earth (399) {source: DE441}
Center-site name: Pan-STARRS 1, Haleakala
*******************************************************************************
Start time : A.D. 2020-Jan-01 12:00:00.0000 TDB
Stop time : A.D. 2020-Jan-01 12:00:00.0000 TDB
Step-size : DISCRETE TIME-LIST
*******************************************************************************
Center geodetic : 203.744100,20.7071888,3.0763821 {E-lon(deg),Lat(deg),Alt(km)}
Center cylindric: 203.744100,5971.48324,2242.1878 {E-lon(deg),Dxy(km),Dz(km)}
Center pole/equ : ITRF93 {East-longitude positive}
Center radii : 6378.1 x 6378.1 x 6356.8 km {Equator, meridian, pole}
Small perturbers: Yes {source: SB441-N16}
Output units : AU-D
Output type : GEOMETRIC cartesian states
Output format : 3 (position, velocity, LT, range, range-rate)
EOP file : eop.220127.p220422
EOP coverage : DATA-BASED 1962-JAN-20 TO 2022-JAN-27. PREDICTS-> 2022-APR-21
Reference frame : ICRF
*******************************************************************************
Initial IAU76/J2000 heliocentric ecliptic osculating elements (au, days, deg.):
EPOCH= 2456698.5 ! 2014-Feb-10.00 (TDB) Residual RMS= .27282
EC= .2508846058943067 QR= 1.938411174247326 TP= 2456093.1011463138
OM= 91.32740861093403 W= 91.37096816741918 IN= 6.76912753748867
Equivalent ICRF heliocentric cartesian coordinates (au, au/d):
X= 2.934573285149345E+00 Y=-8.702901499041770E-01 Z=-7.535748078855007E-01
VX= 3.948600090813408E-03 VY= 7.155151609877323E-03 VZ= 2.568700850735469E-03
Asteroid physical parameters (km, seconds, rotational period in hours):
GM= n.a. RAD= n.a. ROTPER= n.a.
H= 14.45 G= .150 B-V= n.a.
ALBEDO= n.a. STYP= n.a.
*******************************************************************************
JDTDB
X Y Z
VX VY VZ
LT RG RR
*******************************************************************************
$$SOE
2458850.000000000 = A.D. 2020-Jan-01 12:00:00.0000 TDB [del_T= 69.183916 s]
X = 3.140272938432556E-01 Y = 1.401450872643150E+00 Z = 5.824305212783573E-01
VX= 6.595793009138184E-03 VY= 5.366428257622971E-04 VZ= 1.577496239642071E-03
LT= 8.950941094980980E-03 RG= 1.549807407979245E+00 RR= 2.414570690380698E-03
$$EOE
*******************************************************************************
TIME
Barycentric Dynamical Time ("TDB" or T_eph) output was requested. This
continuous relativistic coordinate time is equivalent to the relativistic
proper time of a clock at rest in a reference frame comoving with the
solar system barycenter but outside the system's gravity well. It is the
independent variable in the solar system relativistic equations of motion.
TDB runs at a uniform rate of one SI second per second and is independent
of irregularities in Earth's rotation.
Calendar dates prior to 1582-Oct-15 are in the Julian calendar system.
Later calendar dates are in the Gregorian system.
REFERENCE FRAME AND COORDINATES
International Celestial Reference Frame (ICRF)
The ICRF is an adopted reference frame whose axes are defined relative to
fixed extragalactic radio sources distributed across the sky.
The ICRF was aligned with the prior FK5/J2000 dynamical system at the ~0.02
arcsecond level but is not identical and has no associated standard epoch.
Symbol meaning [1 au= 149597870.700 km, 1 day= 86400.0 s]:
JDTDB Julian Day Number, Barycentric Dynamical Time
del_T Time-scale conversion difference TDB - UT (s)
X X-component of position vector (au)
Y Y-component of position vector (au)
Z Z-component of position vector (au)
VX X-component of velocity vector (au/day)
VY Y-component of velocity vector (au/day)
VZ Z-component of velocity vector (au/day)
LT One-way down-leg Newtonian light-time (day)
RG Range; distance from coordinate center (au)
RR Range-rate; radial velocity wrt coord. center (au/day)
ABERRATIONS AND CORRECTIONS
Geometric state vectors have NO corrections or aberrations applied.
Computations by ...
Solar System Dynamics Group, Horizons On-Line Ephemeris System
4800 Oak Grove Drive, Jet Propulsion Laboratory
Pasadena, CA 91109 USA
General site: https://ssd.jpl.nasa.gov/
Mailing list: https://ssd.jpl.nasa.gov/email_list.html
System news : https://ssd.jpl.nasa.gov/horizons/news.html
User Guide : https://ssd.jpl.nasa.gov/horizons/manual.html
Connect : browser https://ssd.jpl.nasa.gov/horizons/app.html#/x
API https://ssd-api.jpl.nasa.gov/doc/horizons.html
command-line telnet ssd.jpl.nasa.gov 6775
e-mail/batch https://ssd.jpl.nasa.gov/ftp/ssd/hrzn_batch.txt
scripts https://ssd.jpl.nasa.gov/ftp/ssd/SCRIPTS
Author : Jon.D.Giorgini@jpl.nasa.gov
*******************************************************************************
""".split('\n')[1:-1]
# Extract the hardpasted results into an array
# (using convenience func, "extract_first_state_from_text", tested above)
expected_array = Horizons.extract_first_state_from_text( hardpasted_results )
# Call the nice_Horizons function (i.e. the focus of the test)
result = Horizons.nice_Horizons(target, centre, epochs, id_type, refplane=refplane )
print('result=\n' , result)
# Check that the results are as expected
assert np.allclose(expected_array, result, rtol=1e-11, atol=1e-11)
def test_nice_Horizons_D():
"""
Similar to test_nice_Horizons_C, but ECLIPTIC insted of equatorial
Here we extract the
TOPOCENTRIC (F51) state
for
Asteroid number 54321 (== 2000 JA81)
in an
ECLIPTIC FRAME (refplane='ecliptic')
"""
# Define the variables that will be used in the query
target = '54321' # <<-- Asteroid number 54321 == 2000 JA81
centre = 'F51'
epochs = '2458850.0'
id_type = 'smallbody'
refplane= 'ecliptic'
# Hardpaste the expected results from a by-hand query of horizons
hardpasted_results = """
*******************************************************************************
JPL/HORIZONS 54321 (2000 JA81) 2022-Jan-28 16:19:21
Rec #: 54321 (+COV) Soln.date: 2021-Oct-08_04:39:24 # obs: 1315 (1979-2021)
IAU76/J2000 helio. ecliptic osc. elements (au, days, deg., period=Julian yrs):
EPOCH= 2456698.5 ! 2014-Feb-10.00 (TDB) Residual RMS= .27282
EC= .2508846058943067 QR= 1.938411174247326 TP= 2456093.1011463138
OM= 91.32740861093403 W= 91.37096816741918 IN= 6.76912753748867
A= 2.58760024089671 MA= 143.3507250314229 ADIST= 3.236789307546094
PER= 4.1625 N= .236787236 ANGMOM= .026786318
DAN= 2.43937 DDN= 2.41026 L= 182.707997
B= 6.7671807 MOID= .95572901 TP= 2012-Jun-14.6011463138
Asteroid physical parameters (km, seconds, rotational period in hours):
GM= n.a. RAD= n.a. ROTPER= n.a.
H= 14.45 G= .150 B-V= n.a.
ALBEDO= n.a. STYP= n.a.
ASTEROID comments:
1: soln ref.= JPL#33, OCC=0
2: source=ORB
*******************************************************************************
*******************************************************************************
Ephemeris / WWW_USER Fri Jan 28 16:19:22 2022 Pasadena, USA / Horizons
*******************************************************************************
Target body name: 54321 (2000 JA81) {source: JPL#33}
Center body name: Earth (399) {source: DE441}
Center-site name: Pan-STARRS 1, Haleakala
*******************************************************************************
Start time : A.D. 2020-Jan-01 12:00:00.0000 TDB
Stop time : A.D. 2020-Jan-01 12:00:00.0000 TDB
Step-size : DISCRETE TIME-LIST
*******************************************************************************
Center geodetic : 203.744100,20.7071888,3.0763821 {E-lon(deg),Lat(deg),Alt(km)}
Center cylindric: 203.744100,5971.48324,2242.1878 {E-lon(deg),Dxy(km),Dz(km)}
Center pole/equ : ITRF93 {East-longitude positive}
Center radii : 6378.1 x 6378.1 x 6356.8 km {Equator, meridian, pole}
Small perturbers: Yes {source: SB441-N16}
Output units : AU-D
Output type : GEOMETRIC cartesian states
Output format : 3 (position, velocity, LT, range, range-rate)
EOP file : eop.220127.p220422
EOP coverage : DATA-BASED 1962-JAN-20 TO 2022-JAN-27. PREDICTS-> 2022-APR-21
Reference frame : Ecliptic of J2000.0
*******************************************************************************
Initial IAU76/J2000 heliocentric ecliptic osculating elements (au, days, deg.):
EPOCH= 2456698.5 ! 2014-Feb-10.00 (TDB) Residual RMS= .27282
EC= .2508846058943067 QR= 1.938411174247326 TP= 2456093.1011463138
OM= 91.32740861093403 W= 91.37096816741918 IN= 6.76912753748867
Equivalent ICRF heliocentric cartesian coordinates (au, au/d):
X= 2.934573285149345E+00 Y=-8.702901499041770E-01 Z=-7.535748078855007E-01
VX= 3.948600090813408E-03 VY= 7.155151609877323E-03 VZ= 2.568700850735469E-03
Asteroid physical parameters (km, seconds, rotational period in hours):
GM= n.a. RAD= n.a. ROTPER= n.a.
H= 14.45 G= .150 B-V= n.a.
ALBEDO= n.a. STYP= n.a.
*******************************************************************************
JDTDB
X Y Z
VX VY VZ
LT RG RR
*******************************************************************************
$$SOE
2458850.000000000 = A.D. 2020-Jan-01 12:00:00.0000 TDB [del_T= 69.183916 s]
X = 3.140272938432556E-01 Y = 1.517483592803339E+00 Z =-2.309558662379520E-02
VX= 6.595793009138184E-03 VY= 1.119852134073137E-03 VZ= 1.233860245870196E-03
LT= 8.950941094980980E-03 RG= 1.549807407979244E+00 RR= 2.414570690380698E-03
$$EOE
*******************************************************************************
TIME
Barycentric Dynamical Time ("TDB" or T_eph) output was requested. This
continuous relativistic coordinate time is equivalent to the relativistic
proper time of a clock at rest in a reference frame comoving with the
solar system barycenter but outside the system's gravity well. It is the
independent variable in the solar system relativistic equations of motion.
TDB runs at a uniform rate of one SI second per second and is independent
of irregularities in Earth's rotation.
Calendar dates prior to 1582-Oct-15 are in the Julian calendar system.
Later calendar dates are in the Gregorian system.
REFERENCE FRAME AND COORDINATES
Ecliptic at the standard reference epoch
Reference epoch: J2000.0
X-Y plane: adopted Earth orbital plane at the reference epoch
Note: IAU76 obliquity of 84381.448 arcseconds wrt ICRF X-Y plane
X-axis : ICRF
Z-axis : perpendicular to the X-Y plane in the directional (+ or -) sense
of Earth's north pole at the reference epoch.
Symbol meaning [1 au= 149597870.700 km, 1 day= 86400.0 s]:
JDTDB Julian Day Number, Barycentric Dynamical Time
del_T Time-scale conversion difference TDB - UT (s)
X X-component of position vector (au)
Y Y-component of position vector (au)
Z Z-component of position vector (au)
VX X-component of velocity vector (au/day)
VY Y-component of velocity vector (au/day)
VZ Z-component of velocity vector (au/day)
LT One-way down-leg Newtonian light-time (day)
RG Range; distance from coordinate center (au)
RR Range-rate; radial velocity wrt coord. center (au/day)
ABERRATIONS AND CORRECTIONS
Geometric state vectors have NO corrections or aberrations applied.
Computations by ...
Solar System Dynamics Group, Horizons On-Line Ephemeris System
4800 Oak Grove Drive, Jet Propulsion Laboratory
Pasadena, CA 91109 USA
General site: https://ssd.jpl.nasa.gov/
Mailing list: https://ssd.jpl.nasa.gov/email_list.html
System news : https://ssd.jpl.nasa.gov/horizons/news.html
User Guide : https://ssd.jpl.nasa.gov/horizons/manual.html
Connect : browser https://ssd.jpl.nasa.gov/horizons/app.html#/x
API https://ssd-api.jpl.nasa.gov/doc/horizons.html
command-line telnet ssd.jpl.nasa.gov 6775
e-mail/batch https://ssd.jpl.nasa.gov/ftp/ssd/hrzn_batch.txt
scripts https://ssd.jpl.nasa.gov/ftp/ssd/SCRIPTS
Author : Jon.D.Giorgini@jpl.nasa.gov
*******************************************************************************
""".split('\n')[1:-1]
# Extract the hardpasted results into an array
# (using convenience func, "extract_first_state_from_text", tested above)
expected_array = Horizons.extract_first_state_from_text( hardpasted_results )
# Call the nice_Horizons function (i.e. the focus of the test)
result = Horizons.nice_Horizons(target, centre, epochs, id_type, refplane=refplane)
print('result=\n' , result)
# Check that the results are as expected
assert np.allclose(expected_array, result, rtol=1e-11, atol=1e-11)
def test_nice_Horizons_E():
"""
Here we use Horizons to get the Heliocentric EQUATORIAL position of the Observatory
(NB we use a hack, setting the target as the Sun, and the center as the observatory)
Here we extract the
TOPOCENTRIC (F51) state
for
The Sun
in an
EQUATORIAL FRAME (refplane='earth')
"""
# Define the variables that will be used in the query
target = '10'
centre = 'F51'
epochs = '2458850.0'
id_type = 'majorbody'
refplane='earth'
# Hardpaste the expected results from a by-hand query of horizons
hardpasted_results = """
*******************************************************************************
Revised: July 31, 2013 Sun 10
PHYSICAL PROPERTIES (updated 2018-Aug-15):
GM, km^3/s^2 = 132712440041.93938 Mass, 10^24 kg = ~1988500
Vol. mean radius, km = 695700 Volume, 10^12 km^3 = 1412000
Solar radius (IAU) = 696000 km Mean density, g/cm^3 = 1.408
Radius (photosphere) = 696500 km Angular diam at 1 AU = 1919.3"
Photosphere temp., K = 6600 (bottom) Photosphere temp., K = 4400(top)
Photospheric depth = ~500 km Chromospheric depth = ~2500 km
Flatness, f = 0.00005 Adopted sid. rot. per.= 25.38 d
Surface gravity = 274.0 m/s^2 Escape speed, km/s = 617.7
Pole (RA,DEC), deg. = (286.13, 63.87) Obliquity to ecliptic = 7.25 deg.
Solar constant (1 AU) = 1367.6 W/m^2 Luminosity, 10^24 J/s = 382.8
Mass-energy conv rate = 4.260 x 10^9 kg/s Effective temp, K = 5772
Sunspot cycle = 11.4 yr Cycle 24 sunspot min. = 2008 A.D.
Motion relative to nearby stars = apex : R.A.= 271 deg.; DEC.= +30 deg.
speed: 19.4 km/s (0.0112 au/day)
Motion relative to 2.73K BB/CBR = apex : l= 264.7 +- 0.8; b= 48.2 +- 0.5 deg.
speed: 369 +-11 km/s
*******************************************************************************
*******************************************************************************
Ephemeris / WWW_USER Fri Jan 28 16:31:17 2022 Pasadena, USA / Horizons
*******************************************************************************
Target body name: Sun (10) {source: DE441}
Center body name: Earth (399) {source: DE441}
Center-site name: Pan-STARRS 1, Haleakala
*******************************************************************************
Start time : A.D. 2020-Jan-01 12:00:00.0000 TDB
Stop time : A.D. 2020-Jan-01 12:00:00.0000 TDB
Step-size : DISCRETE TIME-LIST
*******************************************************************************
Center geodetic : 203.744100,20.7071888,3.0763821 {E-lon(deg),Lat(deg),Alt(km)}
Center cylindric: 203.744100,5971.48324,2242.1878 {E-lon(deg),Dxy(km),Dz(km)}
Center pole/equ : ITRF93 {East-longitude positive}
Center radii : 6378.1 x 6378.1 x 6356.8 km {Equator, meridian, pole}
Output units : AU-D
Output type : GEOMETRIC cartesian states
Output format : 3 (position, velocity, LT, range, range-rate)
EOP file : eop.220127.p220422
EOP coverage : DATA-BASED 1962-JAN-20 TO 2022-JAN-27. PREDICTS-> 2022-APR-21
Reference frame : ICRF
*******************************************************************************
JDTDB
X Y Z
VX VY VZ
LT RG RR
*******************************************************************************
$$SOE
2458850.000000000 = A.D. 2020-Jan-01 12:00:00.0000 TDB [del_T= 69.183916 s]
X = 1.749807755042866E-01 Y =-8.877977147373203E-01 Z =-3.848633185157228E-01
VX= 1.742085896638510E-02 VY= 3.013989047237847E-03 VZ= 1.245251026128347E-03
LT= 5.679196211202660E-03 RG= 9.833223418736220E-01 RR=-1.085591308641175E-04
$$EOE
*******************************************************************************
TIME
Barycentric Dynamical Time ("TDB" or T_eph) output was requested. This
continuous relativistic coordinate time is equivalent to the relativistic
proper time of a clock at rest in a reference frame comoving with the
solar system barycenter but outside the system's gravity well. It is the
independent variable in the solar system relativistic equations of motion.
TDB runs at a uniform rate of one SI second per second and is independent
of irregularities in Earth's rotation.
Calendar dates prior to 1582-Oct-15 are in the Julian calendar system.
Later calendar dates are in the Gregorian system.
REFERENCE FRAME AND COORDINATES
International Celestial Reference Frame (ICRF)
The ICRF is an adopted reference frame whose axes are defined relative to
fixed extragalactic radio sources distributed across the sky.
The ICRF was aligned with the prior FK5/J2000 dynamical system at the ~0.02
arcsecond level but is not identical and has no associated standard epoch.
Symbol meaning [1 au= 149597870.700 km, 1 day= 86400.0 s]:
JDTDB Julian Day Number, Barycentric Dynamical Time
del_T Time-scale conversion difference TDB - UT (s)
X X-component of position vector (au)
Y Y-component of position vector (au)
Z Z-component of position vector (au)
VX X-component of velocity vector (au/day)
VY Y-component of velocity vector (au/day)
VZ Z-component of velocity vector (au/day)
LT One-way down-leg Newtonian light-time (day)
RG Range; distance from coordinate center (au)
RR Range-rate; radial velocity wrt coord. center (au/day)
ABERRATIONS AND CORRECTIONS
Geometric state vectors have NO corrections or aberrations applied.
Computations by ...
Solar System Dynamics Group, Horizons On-Line Ephemeris System
4800 Oak Grove Drive, Jet Propulsion Laboratory
Pasadena, CA 91109 USA
General site: https://ssd.jpl.nasa.gov/
Mailing list: https://ssd.jpl.nasa.gov/email_list.html
System news : https://ssd.jpl.nasa.gov/horizons/news.html
User Guide : https://ssd.jpl.nasa.gov/horizons/manual.html
Connect : browser https://ssd.jpl.nasa.gov/horizons/app.html#/x
API https://ssd-api.jpl.nasa.gov/doc/horizons.html
command-line telnet ssd.jpl.nasa.gov 6775
e-mail/batch https://ssd.jpl.nasa.gov/ftp/ssd/hrzn_batch.txt
scripts https://ssd.jpl.nasa.gov/ftp/ssd/SCRIPTS
Author : Jon.D.Giorgini@jpl.nasa.gov
*******************************************************************************
""".split('\n')[1:-1]
# Extract the hardpasted results into an array
# (using convenience func, "extract_first_state_from_text", tested above)
expected_array = Horizons.extract_first_state_from_text( hardpasted_results )
# Call the nice_Horizons function (i.e. the focus of the test)
result = Horizons.nice_Horizons(target, centre, epochs, id_type, refplane=refplane)
print('result=\n' , result)
# Check that the results are as expected
assert np.allclose(expected_array, result, rtol=1e-11, atol=1e-11)
def test_nice_Horizons_F():
"""
Similar to test_nice_Horizons_E, but ECLIPTIC instead of equatorial
Here we use Horizons to get the Heliocentric ECLIPTIC position of the Observatory
(NB we use a hack, setting the target as the Sun, and the center as the observatory)
Here we extract the
TOPOCENTRIC (F51) state
for
The Sun
in an
ECLIPTIC FRAME (refplane='ecliptic')
"""
# Define the variables that will be used in the query
target = '10'
centre = 'F51'
epochs = '2458850.0'
id_type = 'majorbody'
refplane='ecliptic'
# Hardpaste the expected results from a by-hand query of horizons
hardpasted_results = """
*******************************************************************************
Revised: July 31, 2013 Sun 10
PHYSICAL PROPERTIES (updated 2018-Aug-15):
GM, km^3/s^2 = 132712440041.93938 Mass, 10^24 kg = ~1988500
Vol. mean radius, km = 695700 Volume, 10^12 km^3 = 1412000
Solar radius (IAU) = 696000 km Mean density, g/cm^3 = 1.408
Radius (photosphere) = 696500 km Angular diam at 1 AU = 1919.3"
Photosphere temp., K = 6600 (bottom) Photosphere temp., K = 4400(top)
Photospheric depth = ~500 km Chromospheric depth = ~2500 km
Flatness, f = 0.00005 Adopted sid. rot. per.= 25.38 d
Surface gravity = 274.0 m/s^2 Escape speed, km/s = 617.7
Pole (RA,DEC), deg. = (286.13, 63.87) Obliquity to ecliptic = 7.25 deg.
Solar constant (1 AU) = 1367.6 W/m^2 Luminosity, 10^24 J/s = 382.8
Mass-energy conv rate = 4.260 x 10^9 kg/s Effective temp, K = 5772
Sunspot cycle = 11.4 yr Cycle 24 sunspot min. = 2008 A.D.
Motion relative to nearby stars = apex : R.A.= 271 deg.; DEC.= +30 deg.
speed: 19.4 km/s (0.0112 au/day)
Motion relative to 2.73K BB/CBR = apex : l= 264.7 +- 0.8; b= 48.2 +- 0.5 deg.
speed: 369 +-11 km/s
*******************************************************************************
*******************************************************************************
Ephemeris / WWW_USER Fri Jan 28 16:43:25 2022 Pasadena, USA / Horizons
*******************************************************************************
Target body name: Sun (10) {source: DE441}
Center body name: Earth (399) {source: DE441}
Center-site name: Pan-STARRS 1, Haleakala
*******************************************************************************
Start time : A.D. 2020-Jan-01 12:00:00.0000 TDB
Stop time : A.D. 2020-Jan-01 12:00:00.0000 TDB
Step-size : DISCRETE TIME-LIST
*******************************************************************************
Center geodetic : 203.744100,20.7071888,3.0763821 {E-lon(deg),Lat(deg),Alt(km)}
Center cylindric: 203.744100,5971.48324,2242.1878 {E-lon(deg),Dxy(km),Dz(km)}
Center pole/equ : ITRF93 {East-longitude positive}
Center radii : 6378.1 x 6378.1 x 6356.8 km {Equator, meridian, pole}
Output units : AU-D
Output type : GEOMETRIC cartesian states
Output format : 3 (position, velocity, LT, range, range-rate)
EOP file : eop.220127.p220422
EOP coverage : DATA-BASED 1962-JAN-20 TO 2022-JAN-27. PREDICTS-> 2022-APR-21
Reference frame : Ecliptic of J2000.0
*******************************************************************************
JDTDB
X Y Z
VX VY VZ
LT RG RR
*******************************************************************************
$$SOE
2458850.000000000 = A.D. 2020-Jan-01 12:00:00.0000 TDB [del_T= 69.183916 s]
X = 1.749807755042866E-01 Y =-9.676283142792063E-01 Z = 4.045892447000447E-05
VX= 1.742085896638510E-02 VY= 3.260613297708340E-03 VZ=-5.640051197420877E-05
LT= 5.679196211202660E-03 RG= 9.833223418736221E-01 RR=-1.085591308641179E-04
$$EOE
*******************************************************************************
TIME
Barycentric Dynamical Time ("TDB" or T_eph) output was requested. This
continuous relativistic coordinate time is equivalent to the relativistic
proper time of a clock at rest in a reference frame comoving with the
solar system barycenter but outside the system's gravity well. It is the
independent variable in the solar system relativistic equations of motion.
TDB runs at a uniform rate of one SI second per second and is independent
of irregularities in Earth's rotation.
Calendar dates prior to 1582-Oct-15 are in the Julian calendar system.
Later calendar dates are in the Gregorian system.
REFERENCE FRAME AND COORDINATES
Ecliptic at the standard reference epoch
Reference epoch: J2000.0
X-Y plane: adopted Earth orbital plane at the reference epoch
Note: IAU76 obliquity of 84381.448 arcseconds wrt ICRF X-Y plane
X-axis : ICRF
Z-axis : perpendicular to the X-Y plane in the directional (+ or -) sense
of Earth's north pole at the reference epoch.
Symbol meaning [1 au= 149597870.700 km, 1 day= 86400.0 s]:
JDTDB Julian Day Number, Barycentric Dynamical Time
del_T Time-scale conversion difference TDB - UT (s)
X X-component of position vector (au)
Y Y-component of position vector (au)
Z Z-component of position vector (au)
VX X-component of velocity vector (au/day)
VY Y-component of velocity vector (au/day)
VZ Z-component of velocity vector (au/day)
LT One-way down-leg Newtonian light-time (day)
RG Range; distance from coordinate center (au)
RR Range-rate; radial velocity wrt coord. center (au/day)
ABERRATIONS AND CORRECTIONS
Geometric state vectors have NO corrections or aberrations applied.
Computations by ...
Solar System Dynamics Group, Horizons On-Line Ephemeris System
4800 Oak Grove Drive, Jet Propulsion Laboratory
Pasadena, CA 91109 USA
General site: https://ssd.jpl.nasa.gov/
Mailing list: https://ssd.jpl.nasa.gov/email_list.html
System news : https://ssd.jpl.nasa.gov/horizons/news.html
User Guide : https://ssd.jpl.nasa.gov/horizons/manual.html
Connect : browser https://ssd.jpl.nasa.gov/horizons/app.html#/x
API https://ssd-api.jpl.nasa.gov/doc/horizons.html
command-line telnet ssd.jpl.nasa.gov 6775
e-mail/batch https://ssd.jpl.nasa.gov/ftp/ssd/hrzn_batch.txt
scripts https://ssd.jpl.nasa.gov/ftp/ssd/SCRIPTS
Author : Jon.D.Giorgini@jpl.nasa.gov
*******************************************************************************
""".split('\n')[1:-1]
# Extract the hardpasted results into an array
# (using convenience func, "extract_first_state_from_text", tested above)
expected_array = Horizons.extract_first_state_from_text( hardpasted_results )
# Call the nice_Horizons function (i.e. the focus of the test)
result = Horizons.nice_Horizons(target, centre, epochs, id_type, refplane=refplane)
print('result=\n' , result)
# Check that the results are as expected
assert np.allclose(expected_array, result, rtol=1e-11, atol=1e-11)
|
{"hexsha": "e9459c6bf47c528b6a046741149a42cf8ab0a673", "size": 55653, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_convenience_Horizons.py", "max_stars_repo_name": "Smithsonian/cheby_checker", "max_stars_repo_head_hexsha": "ce1542e4b1b3303ac08ea823be1eaec06322fd48", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-03-05T15:20:30.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-05T15:20:30.000Z", "max_issues_repo_path": "tests/test_convenience_Horizons.py", "max_issues_repo_name": "Smithsonian/cheby_checker", "max_issues_repo_head_hexsha": "ce1542e4b1b3303ac08ea823be1eaec06322fd48", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_convenience_Horizons.py", "max_forks_repo_name": "Smithsonian/cheby_checker", "max_forks_repo_head_hexsha": "ce1542e4b1b3303ac08ea823be1eaec06322fd48", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-02-04T15:26:08.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-04T18:23:13.000Z", "avg_line_length": 47.2837723025, "max_line_length": 133, "alphanum_fraction": 0.5862397355, "include": true, "reason": "import numpy", "num_tokens": 15645}
|
[STATEMENT]
lemma (in Corps) value_inf_zero:"\<lbrakk>valuation K v; x \<in> carrier K; v x = \<infinity>\<rbrakk>
\<Longrightarrow> x = \<zero>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>valuation K v; x \<in> carrier K; v x = \<infinity>\<rbrakk> \<Longrightarrow> x = \<zero>
[PROOF STEP]
by (rule contrapos_pp, simp+,
frule val_nonzero_noninf[of v x], assumption+, simp)
|
{"llama_tokens": 159, "file": "Valuation_Valuation1", "length": 1}
|
# -*- coding: utf-8 -*-
import os
import numpy as np
from keras.models import Model
import sys
# print("before")
# print(sys.path)
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '/utils')
from backend import (
DummyNetFeature,
InceptionV3Feature,
VGG16Feature,
ResNet50Feature,
MobileNetV2Feature,
)
from backend import InceptionResNetV2Feature
from utils import make_batches
from top_models import glob_pool_norm, glob_pool, glob_softmax
from keras.callbacks import EarlyStopping, ModelCheckpoint, CSVLogger
class BaseModel(object):
def __init__(
self,
input_shape,
backend,
frontend,
embedding_size,
connect_layer=-1,
train_from_layer=0,
distance='l2',
weights='imagenet',
):
"""Base model consists of backend feature extractor (pretrained model) and a front-end model.
Input:
backend: string: one of predefined features extractors. Name matches model name from keras.applications
input_shape: 3D tuple of integers, shape of input tuple
frontend: string, name of a function to define top model from top_models.py
embedding_size: ingeter, size of produced embedding, eg. 256
connect_layer: integer (positive or negative) or a string: either index of a layer or name of a layer
that is used to connect base model with top model
train_from_layer: integer (positive or negative) or a string: either index of a layer or name of a layer
to train the model from.
distance: string, distance function to calculate distance between embeddings. TODO: implement
"""
self.input_shape = input_shape
self.embedding_size = embedding_size
self.weights = weights
self.backend = backend
self.frontend = frontend
self.feature_extractor()
self.connect_layer = self.get_connect_layer(connect_layer)
self.backend_model()
self.features_shape()
self.train_from_layer = self.get_train_from_layer(train_from_layer)
self.top_model()
self.distance = distance
def feature_extractor(self):
""" Base feature extractor """
if self.backend == 'InceptionV3':
self.backend_class = InceptionV3Feature(self.input_shape, self.weights)
elif self.backend == 'VGG16':
self.backend_class = VGG16Feature(self.input_shape, self.weights)
elif self.backend == 'ResNet50':
self.backend_class = ResNet50Feature(self.input_shape, self.weights)
elif self.backend == 'InceptionResNetV2':
self.backend_class = InceptionResNetV2Feature(self.input_shape, self.weights)
elif self.backend == 'DummyNet':
self.backend_class = DummyNetFeature(self.input_shape, self.weights)
elif self.backend == 'MobileNetV2':
self.backend_class = MobileNetV2Feature(self.input_shape, self.weights)
else:
raise Exception(
'Architecture is not supported! Use only MobileNet, VGG16, ResNet50, and Inception3.'
)
self.feature_extractor = self.backend_class.feature_extractor
def normalize_input(self, image):
'''Normalise input to a CNN depending on a backend'''
return self.backend_class.normalize(image)
def backend_model(self):
""" Model to obtain features from a specific layer of feature extractor."""
self.backend_model = Model(
inputs=self.feature_extractor.get_input_at(0),
outputs=self.feature_extractor.layers[self.connect_layer].get_output_at(0),
name='features_model',
)
def features_shape(self):
self.features_shape = self.backend_model.get_output_shape_at(0)[1:]
print('Shape of base features: {}'.format(self.features_shape))
def preproc_predict(self, imgs, batch_size=32):
"""Preprocess images and predict with the model (no batch processing for first step)
Input:
imgs: 4D float or int array of images
batch_size: integer, size of the batch
Returns:
predictions: numpy array with predictions (num_images, len_model_output)
"""
print('base_model preproc_predict!')
# import utool as ut
# ut.embed()
batch_idx = make_batches(imgs.shape[0], batch_size)
imgs_preds = np.zeros((imgs.shape[0],) + self.model.get_output_shape_at(0)[1:])
print('Computing predictions with the shape {}'.format(imgs_preds.shape))
for sid, eid in batch_idx:
preproc = self.backend_class.normalize(imgs[sid:eid])
imgs_preds[sid:eid] = self.model.predict_on_batch(preproc)
print('imgs_preds = %s' % imgs_preds)
return imgs_preds
def top_model(self, verbose=1):
"""Model on top of features."""
if self.frontend == 'glob_pool_norm':
self.top_model = glob_pool_norm(
embedding_size=self.embedding_size, backend_model=self.backend_model
)
elif self.frontend == 'glob_pool':
self.top_model = glob_pool(
embedding_size=self.embedding_size, backend_model=self.backend_model
)
elif self.frontend == 'glob_softmax':
self.top_model = glob_softmax(
embedding_size=self.embedding_size, backend_model=self.backend_model
)
else:
raise Exception('{} is not supported'.format(self.frontend))
# Freeze layers as per config
self.set_trainable()
def get_connect_layer(self, connect_layer):
"""If connect_layer is a string (layer name), return layer index.
If connect layer is a negative integer, return positive layer index."""
index = None
if isinstance(connect_layer, str):
for idx, layer in enumerate(self.feature_extractor.layers):
if layer.name == connect_layer:
index = idx
break
elif isinstance(connect_layer, int):
if connect_layer >= 0:
index = connect_layer
else:
index = connect_layer + len(self.feature_extractor.layers)
else:
raise ValueError
print('Check type of connect_layer')
print(
'Connecting layer {} - {}'.format(
index, self.feature_extractor.layers[index].name
)
)
return index
def get_train_from_layer(self, train_from_layer):
"""If train_from_layer is a string (layer name), return layer index.
If train_from_layer layer is a negative integer, return positive layer index."""
index = None
if isinstance(train_from_layer, str):
for idx, layer in enumerate(self.feature_extractor.layers):
if layer.name == train_from_layer:
index = idx
break
if isinstance(train_from_layer, int):
if train_from_layer >= 0:
index = train_from_layer
else:
index = train_from_layer + len(self.feature_extractor.layers)
print(
'Train network from layer {} - {}'.format(
index, self.feature_extractor.layers[index].name
)
)
return index
def load_weights(self, weight_path, by_name=False):
self.model.load_weights(weight_path, by_name)
def set_all_layers_trainable(self):
for i in range(len(self.top_model.layers)):
self.top_model.layers[i].trainable = True
def set_trainable(self):
self.set_all_layers_trainable()
for i in range(self.train_from_layer):
self.top_model.layers[i].trainable = False
print(
'Layers are frozen as per config. Non-trainable layers are till layer {} - {}'.format(
self.train_from_layer, self.top_model.layers[self.train_from_layer].name
)
)
def warm_up_train(
self,
train_gen,
valid_gen,
nb_epochs,
batch_size,
learning_rate,
steps_per_epoch,
distance='l2',
saved_weights_name='best_weights.h5',
logs_file='history.csv',
plot_file='plot.png',
debug=False,
):
"""Train only randomly initialised layers of top model"""
# Freeze base model
self.set_all_layers_trainable()
backend_model_len = len(self.backend_model.layers)
print('Freezeing layers before warm-up training')
for i in range(backend_model_len):
self.top_model.layers[i].trainable = False
for layer in self.top_model.layers:
print(layer.name, layer.trainable)
# Compile the model
self.compile_model(learning_rate)
# Warm-up training
csv_logger = CSVLogger(logs_file, append=True)
self.model.fit_generator(
generator=train_gen,
steps_per_epoch=steps_per_epoch,
epochs=nb_epochs,
verbose=2 if debug else 1,
validation_data=valid_gen,
validation_steps=steps_per_epoch // 5 + 1,
callbacks=[csv_logger],
)
self.top_model.save_weights(saved_weights_name)
# Freeze layers as per config
self.set_trainable()
def train(
self,
train_gen,
valid_gen,
nb_epochs,
batch_size,
learning_rate,
steps_per_epoch,
distance='l2',
saved_weights_name='best_weights.h5',
logs_file='history.csv',
debug=False,
weights=None,
):
# Compile the model
if weights is None:
self.compile_model(learning_rate)
else:
self.compile_model(learning_rate, weights=weights)
# Make a few callbacks
early_stop = EarlyStopping(
monitor='val_loss',
patience=5, # changed from 3
min_delta=0.001,
mode='min',
verbose=1,
)
checkpoint = ModelCheckpoint(
saved_weights_name,
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='min',
period=1,
)
csv_logger = CSVLogger(logs_file, append=True)
############################################
# Start the training process
############################################
self.model.fit_generator(
generator=train_gen,
steps_per_epoch=steps_per_epoch,
epochs=nb_epochs,
verbose=1 if debug else 2,
validation_data=valid_gen,
validation_steps=steps_per_epoch // 5 + 1,
callbacks=[early_stop, checkpoint, csv_logger],
)
def precompute_features(self, imgs, batch_size):
imgs = self.backend_class.preprocess_imgs(imgs)
features = self.backend_model.predict(imgs, batch_size)
return features
|
{"hexsha": "b6e7ad19d655bb10c87cecbdcaa0cf95aa0a89a5", "size": 11242, "ext": "py", "lang": "Python", "max_stars_repo_path": "wbia_pie/model/base_model.py", "max_stars_repo_name": "karenc/wbia-plugin-pie", "max_stars_repo_head_hexsha": "0e0130501108855c437684914c41130e49037b91", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "wbia_pie/model/base_model.py", "max_issues_repo_name": "karenc/wbia-plugin-pie", "max_issues_repo_head_hexsha": "0e0130501108855c437684914c41130e49037b91", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-05-13T18:28:25.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-30T05:25:26.000Z", "max_forks_repo_path": "wbia_pie/model/base_model.py", "max_forks_repo_name": "karenc/wbia-plugin-pie", "max_forks_repo_head_hexsha": "0e0130501108855c437684914c41130e49037b91", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-28T07:29:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-28T07:29:12.000Z", "avg_line_length": 36.3818770227, "max_line_length": 112, "alphanum_fraction": 0.6147482654, "include": true, "reason": "import numpy", "num_tokens": 2271}
|
// kv_record_store.cpp
/**
* Copyright (C) 2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link the
* code of portions of this program with the OpenSSL library under certain
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. You
* must comply with the GNU Affero General Public License in all respects for
* all of the code used other than as permitted herein. If you modify file(s)
* with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also delete
* it in the license file.
*/
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage
#include <algorithm>
#include <climits>
#include <boost/scoped_ptr.hpp>
#include <boost/static_assert.hpp>
#include "mongo/db/catalog/collection_options.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/storage/key_string.h"
#include "mongo/db/storage/kv/dictionary/kv_dictionary_update.h"
#include "mongo/db/storage/kv/dictionary/kv_record_store.h"
#include "mongo/db/storage/kv/dictionary/kv_size_storer.h"
#include "mongo/db/storage/kv/dictionary/visible_id_tracker.h"
#include "mongo/db/storage/kv/slice.h"
#include "mongo/platform/endian.h"
#include "mongo/util/log.h"
namespace mongo {
namespace {
const long long kScanOnCollectionCreateThreshold = 10000;
}
KVRecordStore::KVRecordStore( KVDictionary *db,
OperationContext* opCtx,
StringData ns,
StringData ident,
const CollectionOptions& options,
KVSizeStorer *sizeStorer )
: RecordStore(ns),
_db(db),
_ident(ident.toString()),
_sizeStorer(sizeStorer)
{
invariant(_db != NULL);
// Get the next id, which is one greater than the greatest stored.
boost::scoped_ptr<RecordIterator> iter(getIterator(opCtx, RecordId(), CollectionScanParams::BACKWARD));
if (!iter->isEOF()) {
const RecordId lastId = iter->curr();
invariant(lastId.isNormal());
_nextIdNum.store(lastId.repr() + 1);
} else {
// Need to start at 1 so we are within bounds of RecordId::isNormal()
_nextIdNum.store(1);
}
if (_sizeStorer) {
long long numRecords;
long long dataSize;
_sizeStorer->load(_ident, &numRecords, &dataSize);
if (numRecords < kScanOnCollectionCreateThreshold) {
LOG(1) << "Doing scan of collection " << ns << " to refresh numRecords and dataSize";
_numRecords.store(0);
_dataSize.store(0);
for (boost::scoped_ptr<RecordIterator> iter(getIterator(opCtx)); !iter->isEOF(); ) {
RecordId loc = iter->getNext();
RecordData data = iter->dataFor(loc);
_numRecords.fetchAndAdd(1);
_dataSize.fetchAndAdd(data.size());
}
if (numRecords != _numRecords.load()) {
warning() << "Stored value for " << ns << " numRecords was " << numRecords
<< " but actual value is " << _numRecords.load();
}
if (dataSize != _dataSize.load()) {
warning() << "Stored value for " << ns << " dataSize was " << dataSize
<< " but actual value is " << _dataSize.load();
}
} else {
_numRecords.store(numRecords);
_dataSize.store(dataSize);
}
_sizeStorer->onCreate(this, _ident, _numRecords.load(), _dataSize.load());
}
}
KVRecordStore::~KVRecordStore() {
if (_sizeStorer) {
_sizeStorer->onDestroy(_ident, _numRecords.load(), _dataSize.load());
}
}
#define invariantKVOK(s, expr) massert(28627, expr, s.isOK())
long long KVRecordStore::dataSize( OperationContext* txn ) const {
if (_sizeStorer) {
return _dataSize.load();
} else {
return _db->getStats().dataSize;
}
}
long long KVRecordStore::numRecords( OperationContext* txn ) const {
if (_sizeStorer) {
return _numRecords.load();
} else {
return _db->getStats().numKeys;
}
}
int64_t KVRecordStore::storageSize( OperationContext* txn,
BSONObjBuilder* extraInfo,
int infoLevel ) const {
return _db->getStats().storageSize;
}
class RollbackSizeChange : public RecoveryUnit::Change {
KVRecordStore *_rs;
long long _nrDelta;
long long _dsDelta;
public:
RollbackSizeChange(KVRecordStore *rs, long long nrDelta, long long dsDelta)
: _rs(rs),
_nrDelta(nrDelta),
_dsDelta(dsDelta)
{}
void commit() {}
void rollback() {
_rs->undoUpdateStats(_nrDelta, _dsDelta);
}
};
void KVRecordStore::undoUpdateStats(long long nrDelta, long long dsDelta) {
invariant(_sizeStorer);
_numRecords.subtractAndFetch(nrDelta);
_dataSize.subtractAndFetch(dsDelta);
}
void KVRecordStore::_updateStats(OperationContext *txn, long long nrDelta, long long dsDelta) {
if (_sizeStorer) {
_numRecords.addAndFetch(nrDelta);
_dataSize.addAndFetch(dsDelta);
txn->recoveryUnit()->registerChange(new RollbackSizeChange(this, nrDelta, dsDelta));
}
}
void KVRecordStore::updateStatsAfterRepair(OperationContext* txn, long long numRecords, long long dataSize) {
if (_sizeStorer) {
_numRecords.store(numRecords);
_dataSize.store(dataSize);
_sizeStorer->store(this, _ident, numRecords, dataSize);
_sizeStorer->storeIntoDict(txn);
}
}
RecordData KVRecordStore::_getDataFor(const KVDictionary *db, OperationContext* txn, const RecordId& id, bool skipPessimisticLocking) {
Slice value;
Status status = db->get(txn, Slice::of(KeyString(id)), value, skipPessimisticLocking);
if (!status.isOK()) {
if (status.code() == ErrorCodes::NoSuchKey) {
return RecordData(nullptr, 0);
} else {
log() << "storage engine get() failed, operation will fail: " << status.toString();
uasserted(28549, status.toString());
}
}
// Return an owned RecordData that uses the SharedBuffer from `value'
return RecordData(std::move(value.ownedBuf()), value.size());
}
RecordData KVRecordStore::dataFor( OperationContext* txn, const RecordId& loc) const {
RecordData rd;
bool found = findRecord(txn, loc, &rd);
massert(28630, "Didn't find RecordId in record store", found);
return rd;
}
bool KVRecordStore::findRecord( OperationContext* txn,
const RecordId& loc, RecordData* out, bool skipPessimisticLocking ) const {
RecordData rd = _getDataFor(_db.get(), txn, loc, skipPessimisticLocking);
if (rd.data() == NULL) {
return false;
}
*out = rd;
return true;
}
void KVRecordStore::deleteRecord(OperationContext* txn, const RecordId& id) {
const KeyString key(id);
Slice val;
Status s = _db->get(txn, Slice::of(key), val, false);
invariantKVOK(s, str::stream() << "KVRecordStore: couldn't find record " << id << " for delete: " << s.toString());
_updateStats(txn, -1, -val.size());
s = _db->remove(txn, Slice::of(key));
invariant(s.isOK());
}
Status KVRecordStore::_insertRecord(OperationContext *txn,
const RecordId &id,
const Slice &value) {
const KeyString key(id);
DEV {
// Should never overwrite an existing record.
Slice v;
const Status status = _db->get(txn, Slice::of(key), v, true);
invariant(status.code() == ErrorCodes::NoSuchKey);
}
Status s = _db->insert(txn, Slice::of(key), value, true);
if (!s.isOK()) {
return s;
}
_updateStats(txn, 1, value.size());
return s;
}
StatusWith<RecordId> KVRecordStore::insertRecord(OperationContext* txn,
const char* data,
int len,
bool enforceQuota) {
const RecordId id = _nextId();
const Slice value(data, len);
const Status status = _insertRecord(txn, id, value);
if (!status.isOK()) {
return StatusWith<RecordId>(status);
}
return StatusWith<RecordId>(id);
}
StatusWith<RecordId> KVRecordStore::insertRecord(OperationContext* txn,
const DocWriter* doc,
bool enforceQuota) {
Slice value(doc->documentSize());
doc->writeDocument(value.mutableData());
return insertRecord(txn, value.data(), value.size(), enforceQuota);
}
StatusWith<RecordId> KVRecordStore::updateRecord(OperationContext* txn,
const RecordId& id,
const char* data,
int len,
bool enforceQuota,
UpdateNotifier* notifier) {
const KeyString key(id);
const Slice value(data, len);
int64_t numRecordsDelta = 0;
int64_t dataSizeDelta = value.size();
Slice val;
Status status = _db->get(txn, Slice::of(key), val, false);
if (status.code() == ErrorCodes::NoSuchKey) {
numRecordsDelta += 1;
} else if (status.isOK()) {
dataSizeDelta -= val.size();
} else {
return StatusWith<RecordId>(status);
}
// An update with a complete new image (data, len) is implemented as an overwrite insert.
status = _db->insert(txn, Slice::of(key), value, false);
if (!status.isOK()) {
return StatusWith<RecordId>(status);
}
_updateStats(txn, numRecordsDelta, dataSizeDelta);
return StatusWith<RecordId>(id);
}
Status KVRecordStore::updateWithDamages( OperationContext* txn,
const RecordId& id,
const RecordData& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages ) {
const KeyString key(id);
const Slice oldValue(oldRec.data(), oldRec.size());
const KVUpdateWithDamagesMessage message(damageSource, damages);
// updateWithDamages can't change the number or size of records, so we don't need to update
// stats.
const Status s = _db->update(txn, Slice::of(key), oldValue, message);
if (!s.isOK()) {
return s;
}
// We also need to reach in and screw with the old doc's data so that the update system gets
// the new image, because the update system is assuming mmapv1's behavior. Sigh.
for (mutablebson::DamageVector::const_iterator it = damages.begin(); it != damages.end(); it++) {
const mutablebson::DamageEvent &event = *it;
invariant(event.targetOffset + event.size < static_cast<uint32_t>(oldRec.size()));
std::copy(damageSource + event.sourceOffset, damageSource + event.sourceOffset + event.size,
/* eek */
const_cast<char *>(oldRec.data()) + event.targetOffset);
}
return s;
}
RecordIterator* KVRecordStore::getIterator(OperationContext* txn,
const RecordId& start,
const CollectionScanParams::Direction& dir) const {
return new KVRecordIterator(*this, _db.get(), txn, start, dir);
}
std::vector<RecordIterator *> KVRecordStore::getManyIterators( OperationContext* txn ) const {
std::vector<RecordIterator *> iterators;
iterators.push_back(getIterator(txn));
return iterators;
}
Status KVRecordStore::truncate( OperationContext* txn ) {
// This is not a very performant implementation of truncate.
//
// At the time of this writing, it is only used by 'emptycapped', a test-only command.
for (boost::scoped_ptr<RecordIterator> iter(getIterator(txn));
!iter->isEOF(); ) {
RecordId id = iter->getNext();
deleteRecord(txn, id);
}
return Status::OK();
}
Status KVRecordStore::compact( OperationContext* txn,
RecordStoreCompactAdaptor* adaptor,
const CompactOptions* options,
CompactStats* stats ) {
return _db->compact( txn );
}
Status KVRecordStore::validate( OperationContext* txn,
bool full,
bool scanData,
ValidateAdaptor* adaptor,
ValidateResults* results,
BSONObjBuilder* output ) {
bool invalidObject = false;
long long numRecords = 0;
long long dataSizeTotal = 0;
for (boost::scoped_ptr<RecordIterator> iter( getIterator( txn ) );
!iter->isEOF(); ) {
numRecords++;
if (scanData) {
RecordData data = dataFor( txn, iter->curr() );
size_t dataSize;
if (full) {
const Status status = adaptor->validate( data, &dataSize );
if (!status.isOK()) {
results->valid = false;
if ( invalidObject ) {
results->errors.push_back("invalid object detected (see logs)");
}
invalidObject = true;
log() << "Invalid object detected in " << _ns << ": " << status.reason();
}
dataSizeTotal += static_cast<long long>(dataSize);
}
}
iter->getNext();
}
if (_sizeStorer && full && scanData && results->valid) {
if (numRecords != _numRecords.load() || dataSizeTotal != _dataSize.load()) {
warning() << ns() << ": Existing record and data size counters ("
<< _numRecords.load() << " records " << _dataSize.load() << " bytes) "
<< "are inconsistent with full validation results ("
<< numRecords << " records " << dataSizeTotal << " bytes). "
<< "Updating counters with new values.";
}
_numRecords.store(numRecords);
_dataSize.store(dataSizeTotal);
long long oldNumRecords;
long long oldDataSize;
_sizeStorer->load(_ident, &oldNumRecords, &oldDataSize);
if (numRecords != oldNumRecords || dataSizeTotal != oldDataSize) {
warning() << ns() << ": Existing data in size storer ("
<< oldNumRecords << " records " << oldDataSize << " bytes) "
<< "is inconsistent with full validation results ("
<< numRecords << " records " << dataSizeTotal << " bytes). "
<< "Updating size storer with new values.";
}
_sizeStorer->store(this, _ident, numRecords, dataSizeTotal);
}
output->appendNumber("nrecords", numRecords);
return Status::OK();
}
void KVRecordStore::appendCustomStats( OperationContext* txn,
BSONObjBuilder* result,
double scale ) const {
_db->appendCustomStats(txn, result, scale);
}
RecordId KVRecordStore::_nextId() {
return RecordId(_nextIdNum.fetchAndAdd(1));
}
// ---------------------------------------------------------------------- //
void KVRecordStore::KVRecordIterator::_setCursor(const RecordId id) {
// We should no cursor at this point, either because we're getting newly
// constructed or because we're recovering from saved state (and so
// the old cursor needed to be dropped).
invariant(!_cursor);
_cursor.reset();
_savedLoc = RecordId();
_savedVal = Slice();
// A new iterator with no start position will be either min() or max()
invariant(id.isNormal() || id == RecordId::min() || id == RecordId::max());
_cursor.reset(_db->getCursor(_txn, Slice::of(KeyString(id)), _dir));
}
KVRecordStore::KVRecordIterator::KVRecordIterator(const KVRecordStore &rs, KVDictionary *db, OperationContext *txn,
const RecordId &start,
const CollectionScanParams::Direction &dir)
: _rs(rs),
_db(db),
_dir(dir),
_savedLoc(),
_savedVal(),
_lowestInvisible(),
_idTracker(NULL),
_txn(txn),
_cursor()
{
if (start.isNull()) {
// A null RecordId means the beginning for a forward cursor,
// and the end for a reverse cursor.
_setCursor(_dir == CollectionScanParams::FORWARD ? RecordId::min() : RecordId::max());
} else {
_setCursor(start);
}
}
bool KVRecordStore::KVRecordIterator::isEOF() {
return !_cursor || !_cursor->ok();
}
RecordId KVRecordStore::KVRecordIterator::curr() {
if (isEOF()) {
return RecordId();
}
const Slice &key = _cursor->currKey();
BufReader br(key.data(), key.size());
return KeyString::decodeRecordId(&br);
}
void KVRecordStore::KVRecordIterator::_saveLocAndVal() {
if (!isEOF()) {
_savedLoc = curr();
_savedVal = _cursor->currVal().owned();
dassert(_savedLoc.isNormal());
} else {
_savedLoc = RecordId();
_savedVal = Slice();
}
}
RecordId KVRecordStore::KVRecordIterator::getNext() {
if (isEOF()) {
return RecordId();
}
// We need valid copies of _savedLoc / _savedVal since we are
// about to advance the underlying cursor.
_saveLocAndVal();
_cursor->advance(_txn);
if (!isEOF()) {
if (_idTracker) {
RecordId currentId = curr();
if (!_lowestInvisible.isNull()) {
// oplog
if (currentId >= _lowestInvisible) {
_cursor.reset();
} else if (RecordId(currentId.repr() + 1) == _lowestInvisible && !_idTracker->canReadId(currentId)) {
_cursor.reset();
}
} else if (!_idTracker->canReadId(currentId)) {
_cursor.reset();
}
}
}
return _savedLoc;
}
void KVRecordStore::KVRecordIterator::invalidate(const RecordId& loc) {
// this only gets called to invalidate potentially buffered
// `loc' results between saveState() and restoreState(). since
// we dropped our cursor and have no buffered rows, we do nothing.
}
void KVRecordStore::KVRecordIterator::saveState() {
// we need to drop the current cursor because it was created with
// an operation context that the caller intends to close after
// this function finishes (and before restoreState() is called,
// which will give us a new operation context)
_saveLocAndVal();
_cursor.reset();
_txn = NULL;
}
bool KVRecordStore::KVRecordIterator::restoreState(OperationContext* txn) {
invariant(!_txn && !_cursor);
_txn = txn;
if (!_savedLoc.isNull()) {
RecordId saved = _savedLoc;
_setCursor(_savedLoc);
if (curr() != saved && _rs.isCapped()) {
// Doc was deleted either by cappedDeleteAsNeeded() or cappedTruncateAfter()
_cursor.reset();
return false;
}
} else {
// We had saved state when the cursor was at EOF, so the savedLoc
// was null - therefore we must restoreState to EOF as well.
//
// Assert that this is indeed the case.
invariant(isEOF());
}
// `true' means the collection still exists, which is always the case
// because this cursor would have been deleted by higher layers if
// the collection were to indeed be dropped.
return true;
}
RecordData KVRecordStore::KVRecordIterator::dataFor(const RecordId& loc) const {
invariant(_txn);
// Kind-of tricky:
//
// We save the last loc and val that we were pointing to before a call
// to getNext(). We know that our caller intends to call dataFor() on
// each loc read this way, so if the given loc is equal to the last
// loc, then we can return the last value read, which we own and now
// pass to the caller with a shared pointer.
if (!_savedLoc.isNull() && _savedLoc == loc) {
Slice val = _savedVal;
invariant(val.mutableData());
return RecordData(std::move(val.ownedBuf()), val.size());
} else {
// .. otherwise something strange happened and the caller actually
// wants some other data entirely. we should probably never execute
// this code that often because it is slow to descend the dictionary
// for every value we want to read..
return _getDataFor(_db, _txn, loc);
}
}
} // namespace mongo
|
{"hexsha": "30c1459ee6ba65d627b2935dc84885c5df753faf", "size": 23768, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/mongo/db/storage/kv/dictionary/kv_record_store.cpp", "max_stars_repo_name": "leifwalsh/mongo", "max_stars_repo_head_hexsha": "4cf51324255f76a110246f6d1646dc8cda570141", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/mongo/db/storage/kv/dictionary/kv_record_store.cpp", "max_issues_repo_name": "leifwalsh/mongo", "max_issues_repo_head_hexsha": "4cf51324255f76a110246f6d1646dc8cda570141", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mongo/db/storage/kv/dictionary/kv_record_store.cpp", "max_forks_repo_name": "leifwalsh/mongo", "max_forks_repo_head_hexsha": "4cf51324255f76a110246f6d1646dc8cda570141", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.4817275748, "max_line_length": 139, "alphanum_fraction": 0.5483002356, "num_tokens": 4985}
|
"""
AskHandlers related to order relations: positive, negative, etc.
"""
from sympy.assumptions import Q, ask
from sympy.assumptions.handlers import CommonHandler
class AskNegativeHandler(CommonHandler):
"""
This is called by ask() when key='negative'
Test that an expression is less (strict) than zero.
Examples:
>>> from sympy import ask, Q, pi
>>> ask(Q.negative(pi+1)) # this calls AskNegativeHandler.Add
False
>>> ask(Q.negative(pi**2)) # this calls AskNegativeHandler.Pow
False
"""
@staticmethod
def _number(expr, assumptions):
if not expr.as_real_imag()[1]:
return expr.evalf() < 0
else:
return False
@staticmethod
def Basic(expr, assumptions):
if expr.is_number:
return AskNegativeHandler._number(expr, assumptions)
@staticmethod
def Add(expr, assumptions):
"""
Positive + Positive -> Positive,
Negative + Negative -> Negative
"""
if expr.is_number:
return AskNegativeHandler._number(expr, assumptions)
for arg in expr.args:
if not ask(Q.negative(arg), assumptions):
break
else:
# if all argument's are negative
return True
@staticmethod
def Mul(expr, assumptions):
if expr.is_number:
return AskNegativeHandler._number(expr, assumptions)
result = None
for arg in expr.args:
if result is None:
result = False
if ask(Q.negative(arg), assumptions):
result = not result
elif ask(Q.positive(arg), assumptions):
pass
else:
return
return result
@staticmethod
def Pow(expr, assumptions):
"""
Real ** Even -> NonNegative
Real ** Odd -> same_as_base
NonNegative ** Positive -> NonNegative
"""
if expr.is_number:
return AskNegativeHandler._number(expr, assumptions)
if ask(Q.real(expr.base), assumptions):
if ask(Q.positive(expr.base), assumptions):
return False
if ask(Q.even(expr.exp), assumptions):
return False
if ask(Q.odd(expr.exp), assumptions):
return ask(Q.negative(expr.base), assumptions)
ImaginaryUnit, Abs = [staticmethod(CommonHandler.AlwaysFalse)]*2
@staticmethod
def exp(expr, assumptions):
if ask(Q.real(expr.args[0]), assumptions):
return False
class AskNonZeroHandler(CommonHandler):
"""
Handler for key 'zero'
Test that an expression is not identically zero
"""
@staticmethod
def Basic(expr, assumptions):
if expr.is_number:
# if there are no symbols just evalf
return expr.evalf() != 0
@staticmethod
def Add(expr, assumptions):
if all(ask(Q.positive(x), assumptions) for x in expr.args) \
or all(ask(Q.negative(x), assumptions) for x in expr.args):
return True
@staticmethod
def Mul(expr, assumptions):
for arg in expr.args:
result = ask(Q.nonzero(arg), assumptions)
if result:
continue
return result
return True
@staticmethod
def Pow(expr, assumptions):
return ask(Q.nonzero(expr.base), assumptions)
NaN = staticmethod(CommonHandler.AlwaysTrue)
@staticmethod
def Abs(expr, assumptions):
return ask(Q.nonzero(expr.args[0]), assumptions)
class AskPositiveHandler(CommonHandler):
"""
Handler for key 'positive'
Test that an expression is greater (strict) than zero
"""
@staticmethod
def _number(expr, assumptions):
if not expr.as_real_imag()[1]:
return expr.evalf() > 0
else:
return False
@staticmethod
def Basic(expr, assumptions):
if expr.is_number:
return AskPositiveHandler._number(expr, assumptions)
@staticmethod
def Mul(expr, assumptions):
if expr.is_number:
return AskPositiveHandler._number(expr, assumptions)
result = True
for arg in expr.args:
if ask(Q.positive(arg), assumptions):
continue
elif ask(Q.negative(arg), assumptions):
result = result ^ True
else:
return
return result
@staticmethod
def Add(expr, assumptions):
if expr.is_number:
return AskPositiveHandler._number(expr, assumptions)
for arg in expr.args:
if ask(Q.positive(arg), assumptions) is not True:
break
else:
# if all argument's are positive
return True
@staticmethod
def Pow(expr, assumptions):
if expr.is_number:
return expr.evalf() > 0
if ask(Q.positive(expr.base), assumptions):
return True
if ask(Q.negative(expr.base), assumptions):
if ask(Q.even(expr.exp), assumptions):
return True
if ask(Q.even(expr.exp), assumptions):
return False
@staticmethod
def exp(expr, assumptions):
if ask(Q.real(expr.args[0]), assumptions):
return True
ImaginaryUnit = staticmethod(CommonHandler.AlwaysFalse)
@staticmethod
def Abs(expr, assumptions):
return ask(Q.nonzero(expr), assumptions)
@staticmethod
def Trace(expr, assumptions):
if ask(Q.positive_definite(expr.arg), assumptions):
return True
@staticmethod
def Determinant(expr, assumptions):
if ask(Q.positive_definite(expr.arg), assumptions):
return True
@staticmethod
def MatrixElement(expr, assumptions):
if (expr.i == expr.j
and ask(Q.positive_definite(expr.parent), assumptions)):
return True
|
{"hexsha": "076ac079a18a2c7d4e2b17afb17a99cc417dd6aa", "size": 5970, "ext": "py", "lang": "Python", "max_stars_repo_path": "sympy/assumptions/handlers/order.py", "max_stars_repo_name": "eriknw/sympy", "max_stars_repo_head_hexsha": "b7544e2bb74c011f6098a7e886fd77f41776c2c4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2015-12-28T08:19:43.000Z", "max_stars_repo_stars_event_max_datetime": "2015-12-28T08:19:43.000Z", "max_issues_repo_path": "sympy/assumptions/handlers/order.py", "max_issues_repo_name": "pbeltran/sympy-1", "max_issues_repo_head_hexsha": "94f92b36731c2bebe6de1037c063c2a258a8a399", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sympy/assumptions/handlers/order.py", "max_forks_repo_name": "pbeltran/sympy-1", "max_forks_repo_head_hexsha": "94f92b36731c2bebe6de1037c063c2a258a8a399", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.2938388626, "max_line_length": 75, "alphanum_fraction": 0.5859296482, "include": true, "reason": "from sympy", "num_tokens": 1244}
|
//
// Licensed to Green Energy Corp (www.greenenergycorp.com) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. Green Enery Corp licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
#include <boost/test/unit_test.hpp>
#include <APLTestTools/TestHelpers.h>
#include <opendnp3/APL/CommandQueue.h>
#include <limits>
using namespace apl;
template<class T>
void OptimalTypeTest(T val, SetpointEncodingType correct)
{
Setpoint sp(val);
BOOST_REQUIRE_EQUAL(correct, sp.GetOptimalEncodingType());
}
template<class T>
void AutoTypeTest(T val, SetpointEncodingType correct)
{
Setpoint sp; sp.SetValue(val);
BOOST_REQUIRE_EQUAL(sp.GetValue(), val);
BOOST_REQUIRE_EQUAL(correct, sp.GetEncodingType());
}
BOOST_AUTO_TEST_SUITE(CommandTypesSuite)
BOOST_AUTO_TEST_CASE(SetpointSet)
{
AutoTypeTest(0.01, SPET_AUTO_DOUBLE);
}
BOOST_AUTO_TEST_CASE(SetpointSetInt)
{
AutoTypeTest(5, SPET_AUTO_INT);
}
BOOST_AUTO_TEST_CASE(OptimalFloat)
{
OptimalTypeTest(100.0, SPET_FLOAT);
}
BOOST_AUTO_TEST_CASE(OptimalDouble)
{
OptimalTypeTest(std::numeric_limits<float>::max() * 100.0, SPET_DOUBLE);
}
BOOST_AUTO_TEST_CASE(OptimalInt16)
{
OptimalTypeTest(55, SPET_INT16);
}
BOOST_AUTO_TEST_CASE(OptimalInt32)
{
OptimalTypeTest(80000, SPET_INT32);
}
BOOST_AUTO_TEST_CASE(ByteToCommand)
{
BOOST_REQUIRE_EQUAL(CS_SUCCESS, ByteToCommandStatus(0));
BOOST_REQUIRE_EQUAL(CS_TIMEOUT, ByteToCommandStatus(1));
BOOST_REQUIRE_EQUAL(CS_NO_SELECT, ByteToCommandStatus(2));
BOOST_REQUIRE_EQUAL(CS_FORMAT_ERROR, ByteToCommandStatus(3));
BOOST_REQUIRE_EQUAL(CS_NOT_SUPPORTED, ByteToCommandStatus(4));
BOOST_REQUIRE_EQUAL(CS_ALREADY_ACTIVE, ByteToCommandStatus(5));
BOOST_REQUIRE_EQUAL(CS_HARDWARE_ERROR, ByteToCommandStatus(6));
BOOST_REQUIRE_EQUAL(CS_LOCAL, ByteToCommandStatus(7));
BOOST_REQUIRE_EQUAL(CS_TOO_MANY_OPS, ByteToCommandStatus(8));
BOOST_REQUIRE_EQUAL(CS_NOT_AUTHORIZED, ByteToCommandStatus(9));
}
BOOST_AUTO_TEST_CASE(CommandToString)
{
BOOST_REQUIRE_EQUAL("CS_SUCCESS", ToString(CS_SUCCESS));
BOOST_REQUIRE_EQUAL("CS_TIMEOUT", ToString(CS_TIMEOUT));
BOOST_REQUIRE_EQUAL("CS_NO_SELECT", ToString(CS_NO_SELECT));
BOOST_REQUIRE_EQUAL("CS_FORMAT_ERROR", ToString(CS_FORMAT_ERROR));
BOOST_REQUIRE_EQUAL("CS_NOT_SUPPORTED", ToString(CS_NOT_SUPPORTED));
BOOST_REQUIRE_EQUAL("CS_ALREADY_ACTIVE", ToString(CS_ALREADY_ACTIVE));
BOOST_REQUIRE_EQUAL("CS_HARDWARE_ERROR", ToString(CS_HARDWARE_ERROR));
BOOST_REQUIRE_EQUAL("CS_LOCAL", ToString(CS_LOCAL));
BOOST_REQUIRE_EQUAL("CS_TOO_MANY_OPS", ToString(CS_TOO_MANY_OPS));
BOOST_REQUIRE_EQUAL("CS_NOT_AUTHORIZED", ToString(CS_NOT_AUTHORIZED));
BOOST_REQUIRE_EQUAL("Unknown", ToString(CS_UNDEFINED));
}
BOOST_AUTO_TEST_CASE(ByteToControl)
{
BOOST_REQUIRE_EQUAL(CC_NULL, ByteToControlCode(0));
BOOST_REQUIRE_EQUAL(CC_PULSE, ByteToControlCode(0x01));
BOOST_REQUIRE_EQUAL(CC_LATCH_ON, ByteToControlCode(0x03));
BOOST_REQUIRE_EQUAL(CC_LATCH_OFF, ByteToControlCode(0x04));
BOOST_REQUIRE_EQUAL(CC_PULSE_CLOSE, ByteToControlCode(0x41));
BOOST_REQUIRE_EQUAL(CC_PULSE_TRIP, ByteToControlCode(0x81));
BOOST_REQUIRE_EQUAL(CC_UNDEFINED, ByteToControlCode(0xFF));
}
BOOST_AUTO_TEST_CASE(ControlToString)
{
BOOST_REQUIRE_EQUAL("CC_NULL", ToString(CC_NULL));
BOOST_REQUIRE_EQUAL("CC_PULSE", ToString(CC_PULSE));
BOOST_REQUIRE_EQUAL("CC_LATCH_ON", ToString(CC_LATCH_ON));
BOOST_REQUIRE_EQUAL("CC_LATCH_OFF", ToString(CC_LATCH_OFF));
BOOST_REQUIRE_EQUAL("CC_PULSE_CLOSE", ToString(CC_PULSE_CLOSE));
BOOST_REQUIRE_EQUAL("CC_PULSE_TRIP", ToString(CC_PULSE_TRIP));
BOOST_REQUIRE_EQUAL("Unknown", ToString(CC_UNDEFINED));
}
BOOST_AUTO_TEST_CASE(CommandTypeToString)
{
BOOST_REQUIRE_EQUAL("BinaryOutput", ToString(CT_BINARY_OUTPUT));
BOOST_REQUIRE_EQUAL("Setpoint", ToString(CT_SETPOINT));
BOOST_REQUIRE_EQUAL("Unknown", ToString(CT_NONE));
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "a8ebcdbb0e372d49a343eb2cb1aeaa2eb9dcd11c", "size": 4454, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "TestAPL/TestCommandTypes.cpp", "max_stars_repo_name": "rahul-sachdev/DNPSimulator", "max_stars_repo_head_hexsha": "d78f6a3f2fe3b706fe73c163173f06c55af35b1e", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2019-10-28T01:27:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-14T08:06:33.000Z", "max_issues_repo_path": "TestAPL/TestCommandTypes.cpp", "max_issues_repo_name": "rahul-sachdev/DNPSimulator", "max_issues_repo_head_hexsha": "d78f6a3f2fe3b706fe73c163173f06c55af35b1e", "max_issues_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2019-04-15T06:32:48.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-15T06:52:33.000Z", "max_forks_repo_path": "APLTest/TestCommandTypes.cpp", "max_forks_repo_name": "sentient-energy/emsw-opendnp3-mirror", "max_forks_repo_head_hexsha": "a182a01c15f342fbd498e16b02865fd6d5d5be98", "max_forks_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2016-11-16T15:14:51.000Z", "max_forks_repo_forks_event_max_datetime": "2016-11-16T15:14:51.000Z", "avg_line_length": 34.0, "max_line_length": 73, "alphanum_fraction": 0.8100583745, "num_tokens": 1070}
|
"""This module provides tools to transform nfpcaps into csv and generate the dataset to make the predictions"""
from . import CommonTools
import os
import operator
import numpy as np
def nfpcapsToCSV(locationToSaveTw, nfpcapLocation):
print("::::::::::::::::::::::::::::")
print(":::: TRANSFORMING ::::")
print("::::::::::::::::::::::::::::")
numberOfFiles = len(CommonTools.getFilesOfSpecificLocation(nfpcapLocation))
# os.chdir(locationToSaveTw)
for y in range(0, numberOfFiles):
cmd = "nfdump -r " + nfpcapLocation + "nfcapd." + str(y) + " -o csv > " + locationToSaveTw + str(y) + ".csv"
os.system(cmd)
def datasetGenerator(locationToSaveDataset, twLocation, datasetName):
# VARS
flag = 0
counter = 0
header = "Netflows,First_Protocol,Second_Protocol,Third_Protocol,p1_d,p2_d,p3_d,duration,max_d,min_d,packets,Avg_bps,Avg_pps,Avg_bpp,Bytes,number_sp,number_dp,first_sp,second_sp,third_sp,first_dp,second_dp,third_dp,p1_ip,p2_ip,p3_ip,p1_ib,p2_ib,p3_ib\n"
fout = open(locationToSaveDataset + datasetName, "a")
numberOfFiles = len(CommonTools.getFilesOfSpecificLocation(twLocation))
for y in range(0, numberOfFiles):
netflows = 0
lduration = []
protocols = {}
packets = 0
avg_bps = 0
avg_pps = 0
avg_bpp = 0
bytes = 0
sourcePorts = {}
destinationPorts = {}
lipkt = []
libyt = []
f = open(twLocation + str(y) + ".csv")
for line in f:
if counter == 0:
if "ts" in line:
if flag == 0:
fout.write(header)
flag = 1
elif "Summary" in line:
counter = 2
else:
temp = line.split(",")
lduration.append(float(temp[2]))
lipkt.append(float(temp[11]))
libyt.append(float(temp[12]))
if temp[5] in sourcePorts:
sourcePorts[temp[5]] = sourcePorts[temp[5]] + 1
else:
sourcePorts[temp[5]] = 1
if temp[6] in destinationPorts:
destinationPorts[temp[6]] = destinationPorts[temp[6]] + 1
else:
destinationPorts[temp[6]] = 1
if temp[7] in protocols:
protocols[temp[7]] = protocols[temp[7]] + 1
else:
protocols[temp[7]] = 1
elif counter == 1:
temp = line.split(",")
netflows = temp[0]
bytes = temp[1]
packets = temp[2]
avg_bps = temp[3]
avg_pps = temp[4]
avg_bpp = temp[5].replace("\n", "")
sourcePorts = sorted(sourcePorts.items(), key=operator.itemgetter(1), reverse=True)
destinationPorts = sorted(destinationPorts.items(), key=operator.itemgetter(1), reverse=True)
protocols = sorted(protocols.items(), key=operator.itemgetter(1), reverse=True)
counter = counter - 1
else:
counter = counter - 1
f.close()
duration = np.array(lduration)
ipkt = np.array(lipkt) # 11
ibyt = np.array(libyt) # 12
sum_d = str(np.sum(duration, axis=0))
d_max = str(np.amax(duration))
d_min = str(np.amin(duration))
p1_d = str(np.percentile(duration, 25))
p2_d = str(np.percentile(duration, 50))
p3_d = str(np.percentile(duration, 75))
p1_ip = str(np.percentile(ipkt, 25))
p2_ip = str(np.percentile(ipkt, 50))
p3_ip = str(np.percentile(ipkt, 75))
p1_ib = str(np.percentile(ibyt, 25))
p2_ib = str(np.percentile(ibyt, 50))
p3_ib = str(np.percentile(ibyt, 75))
number_sp = str(len(sourcePorts))
number_dp = str(len(destinationPorts))
first_protocol = protocols[0][0]
second_protocol = ""
third_protocol = ""
lg = len(protocols)
if lg > 1:
second_protocol = protocols[1][0]
if lg > 2:
third_protocol = protocols[2][0]
first_sp = sourcePorts[0][0]
lg = len(sourcePorts)
second_sp = ""
third_sp = ""
if lg > 1:
second_sp = sourcePorts[1][0]
if lg > 2:
third_sp = sourcePorts[2][0]
first_dp = destinationPorts[0][0]
lg = len(destinationPorts)
second_dp = ""
third_dp = ""
if lg > 1:
second_dp = destinationPorts[1][0]
if lg > 2:
third_dp = destinationPorts[2][0]
liner = netflows + "," + first_protocol + "," + second_protocol + "," + third_protocol + "," + p1_d + "," + p2_d + "," + p3_d + "," + sum_d + "," + d_max + "," + d_min + "," + packets + "," + avg_bps + "," + avg_pps + "," + avg_bpp + "," + bytes + "," + number_sp + "," + number_dp + "," + first_sp + "," + second_sp + "," + third_sp + "," + first_dp + "," + second_dp + "," + third_dp + "," + p1_ip + "," + p2_ip + "," + p3_ip + "," + p1_ib + "," + p2_ib + "," + p3_ib + "\n"
fout.write(liner)
fout.close()
def deleteTW(location):
cmd = "rm " + location + "*"
os.system(cmd)
def deleteDataset(location):
cmd = "rm " + location
os.system(cmd)
|
{"hexsha": "18198c219ae82bdb62b9506bfa32afc6ac2fb348", "size": 5458, "ext": "py", "lang": "Python", "max_stars_repo_path": "Botnets/App/App Web/backend/Modules/DataMining/TimeWindowTools.py", "max_stars_repo_name": "i2tResearch/Ciberseguridad_web", "max_stars_repo_head_hexsha": "ac3dd934a60628532e3538369cb145d9a8f33e4f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-10-01T22:02:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-09T17:48:45.000Z", "max_issues_repo_path": "Botnets/App/App Web/backend/Modules/DataMining/TimeWindowTools.py", "max_issues_repo_name": "i2tResearch/Ciberseguridad_web", "max_issues_repo_head_hexsha": "ac3dd934a60628532e3538369cb145d9a8f33e4f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Botnets/App/App Web/backend/Modules/DataMining/TimeWindowTools.py", "max_forks_repo_name": "i2tResearch/Ciberseguridad_web", "max_forks_repo_head_hexsha": "ac3dd934a60628532e3538369cb145d9a8f33e4f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.6413793103, "max_line_length": 484, "alphanum_fraction": 0.5181385123, "include": true, "reason": "import numpy", "num_tokens": 1402}
|
[STATEMENT]
lemma "{P} R {> Q} = (\<forall>s t. s \<in> P \<longrightarrow> (s, t) \<in> R \<longrightarrow> t \<in> Q)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {P} R {> Q} = (\<forall>s t. s \<in> P \<longrightarrow> (s, t) \<in> R \<longrightarrow> t \<in> Q)
[PROOF STEP]
by (auto simp add: PO_hoare_defs)
|
{"llama_tokens": 135, "file": "Consensus_Refined_Refinement", "length": 1}
|
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
import joblib
dataset = pd.read_csv('Processed Dataset.csv')
X = dataset[['Gender', 'Age', 'Height', 'Weight', 'Duration', 'Heart_Rate', 'Body_Temp']]
y = dataset['Calories']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print(y_pred)
filename = 'calories_model.sav'
joblib.dump(model, filename)
# loaded_model = joblib.load(filename)
# result = loaded_model.score(X_test, y_test)
# print(result)
|
{"hexsha": "775e16fa67782fb97ecad46c5c5a7189d7cfeae0", "size": 719, "ext": "py", "lang": "Python", "max_stars_repo_path": "model/train.py", "max_stars_repo_name": "saputhebeast/my-calaries", "max_stars_repo_head_hexsha": "2469a8eecc0ad802121bfb6bafd86746a7a2be78", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "model/train.py", "max_issues_repo_name": "saputhebeast/my-calaries", "max_issues_repo_head_hexsha": "2469a8eecc0ad802121bfb6bafd86746a7a2be78", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model/train.py", "max_forks_repo_name": "saputhebeast/my-calaries", "max_forks_repo_head_hexsha": "2469a8eecc0ad802121bfb6bafd86746a7a2be78", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.7931034483, "max_line_length": 93, "alphanum_fraction": 0.7607788595, "include": true, "reason": "import numpy", "num_tokens": 182}
|
import sklearn
import pandas
import numpy
import nltk
import dill
import eli5
print('All packages were imported successfully')
|
{"hexsha": "d523424bac48e6cd24a5c7928e3b1a52ddd39b42", "size": 127, "ext": "py", "lang": "Python", "max_stars_repo_path": "docker/sklearn/verify.py", "max_stars_repo_name": "bkschmoll-uptycs/dockerfiles", "max_stars_repo_head_hexsha": "ba79c07df7f6e41484d4b6b4d2c0d857e2205a0b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docker/sklearn/verify.py", "max_issues_repo_name": "bkschmoll-uptycs/dockerfiles", "max_issues_repo_head_hexsha": "ba79c07df7f6e41484d4b6b4d2c0d857e2205a0b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docker/sklearn/verify.py", "max_forks_repo_name": "bkschmoll-uptycs/dockerfiles", "max_forks_repo_head_hexsha": "ba79c07df7f6e41484d4b6b4d2c0d857e2205a0b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.875, "max_line_length": 48, "alphanum_fraction": 0.8346456693, "include": true, "reason": "import numpy", "num_tokens": 29}
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Methods for ideal equations of state.
Currently only supports liquid and vapor phases
"""
from pyomo.environ import Expression, log
from idaes.core import Apparent
from idaes.core.util.exceptions import (
ConfigurationError, PropertyNotSupportedError)
from idaes.generic_models.properties.core.generic.utility import (
get_method, get_component_object as cobj)
from .eos_base import EoSBase
# TODO: Add support for ideal solids
class Ideal(EoSBase):
# Add attribute indicating support for electrolyte systems
electrolyte_support = True
@staticmethod
def common(b, pobj):
# No common components required for ideal property calculations
pass
@staticmethod
def calculate_scaling_factors(b, pobj):
pass
@staticmethod
def build_parameters(b):
# No EoS specific parameters required
pass
@staticmethod
def act_phase_comp(b, p, j):
return b.mole_frac_phase_comp[p, j]
@staticmethod
def act_phase_comp_true(b, p, j):
return b.mole_frac_phase_comp_true[p, j]
@staticmethod
def act_phase_comp_appr(b, p, j):
return b.mole_frac_phase_comp_apparent[p, j]
@staticmethod
def act_coeff_phase_comp(b, p, j):
return 1
@staticmethod
def act_coeff_phase_comp_true(b, p, j):
return 1
@staticmethod
def act_coeff_phase_comp_appr(b, p, j):
return 1
@staticmethod
def compress_fact_phase(b, p):
pobj = b.params.get_phase(p)
if pobj.is_vapor_phase():
return 1
else:
return 0
@staticmethod
def cp_mol_phase(b, p):
return sum(b.get_mole_frac()[p, j]*b.cp_mol_phase_comp[p, j]
for j in b.components_in_phase(p))
@staticmethod
def cp_mol_phase_comp(b, p, j):
pobj = b.params.get_phase(p)
if pobj.is_vapor_phase():
return get_method(b, "cp_mol_ig_comp", j)(
b, cobj(b, j), b.temperature)
elif pobj.is_liquid_phase():
return get_method(b, "cp_mol_liq_comp", j)(
b, cobj(b, j), b.temperature)
elif pobj.is_solid_phase():
return get_method(b, "cp_mol_sol_comp", j)(
b, cobj(b, j), b.temperature)
else:
raise PropertyNotSupportedError(_invalid_phase_msg(b.name, p))
@staticmethod
def cv_mol_phase(b, p):
return sum(b.get_mole_frac()[p, j]*b.cv_mol_phase_comp[p, j]
for j in b.components_in_phase(p))
@staticmethod
def cv_mol_phase_comp(b, p, j):
pobj = b.params.get_phase(p)
if pobj.is_vapor_phase():
return EoSBase.cv_mol_ig_comp_pure(b, j)
elif pobj.is_liquid_phase() or pobj.is_solid_phase():
return EoSBase.cv_mol_ls_comp_pure(b, p, j)
else:
raise PropertyNotSupportedError(_invalid_phase_msg(b.name, p))
@staticmethod
def dens_mass_phase(b, p):
return b.dens_mol_phase[p]*b.mw_phase[p]
@staticmethod
def dens_mol_phase(b, p):
pobj = b.params.get_phase(p)
if pobj.is_vapor_phase():
return b.pressure/(Ideal.gas_constant(b)*b.temperature)
else:
return 1/b.vol_mol_phase[p]
@staticmethod
def energy_internal_mol_phase(b, p):
return sum(b.get_mole_frac()[p, j] *
b.energy_internal_mol_phase_comp[p, j]
for j in b.components_in_phase(p))
@staticmethod
def energy_internal_mol_phase_comp(b, p, j):
pobj = b.params.get_phase(p)
if pobj.is_vapor_phase():
return EoSBase.energy_internal_mol_ig_comp_pure(b, j)
elif pobj.is_liquid_phase() or pobj.is_solid_phase():
return EoSBase.energy_internal_mol_ls_comp_pure(b, p, j)
else:
raise PropertyNotSupportedError(_invalid_phase_msg(b.name, p))
@staticmethod
def enth_mol_phase(b, p):
pobj = b.params.get_phase(p)
if pobj.is_vapor_phase():
return sum(b.get_mole_frac()[p, j]*b.enth_mol_phase_comp[p, j]
for j in b.components_in_phase(p))
elif pobj.is_liquid_phase():
return (sum(b.get_mole_frac()[p, j] *
get_method(b, "enth_mol_liq_comp", j)(
b, cobj(b, j), b.temperature)
for j in b.components_in_phase(p)) +
(b.pressure-b.params.pressure_ref)/b.dens_mol_phase[p])
elif pobj.is_solid_phase():
return (sum(b.get_mole_frac()[p, j] *
get_method(b, "enth_mol_sol_comp", j)(
b, cobj(b, j), b.temperature)
for j in b.components_in_phase(p)) +
(b.pressure-b.params.pressure_ref)/b.dens_mol_phase[p])
else:
raise PropertyNotSupportedError(_invalid_phase_msg(b.name, p))
@staticmethod
def enth_mol_phase_comp(b, p, j):
pobj = b.params.get_phase(p)
if pobj.is_vapor_phase():
return get_method(b, "enth_mol_ig_comp", j)(
b, cobj(b, j), b.temperature)
elif pobj.is_liquid_phase():
return (get_method(b, "enth_mol_liq_comp", j)(
b, cobj(b, j), b.temperature) +
(b.pressure-b.params.pressure_ref)/b.dens_mol_phase[p])
elif pobj.is_solid_phase():
return (get_method(b, "enth_mol_sol_comp", j)(
b, cobj(b, j), b.temperature) +
(b.pressure-b.params.pressure_ref)/b.dens_mol_phase[p])
else:
raise PropertyNotSupportedError(_invalid_phase_msg(b.name, p))
@staticmethod
def entr_mol_phase(b, p):
return sum(b.get_mole_frac()[p, j]*b.entr_mol_phase_comp[p, j]
for j in b.components_in_phase(p))
@staticmethod
def entr_mol_phase_comp(b, p, j):
pobj = b.params.get_phase(p)
if pobj.is_vapor_phase():
return (get_method(b, "entr_mol_ig_comp", j)(
b, cobj(b, j), b.temperature) -
Ideal.gas_constant(b)*log(
b.get_mole_frac()[p, j]*b.pressure /
b.params.pressure_ref))
elif pobj.is_liquid_phase():
# Assume no pressure/volume dependecy of entropy for ideal liquids
return (get_method(b, "entr_mol_liq_comp", j)(
b, cobj(b, j), b.temperature))
elif pobj.is_solid_phase():
# Assume no pressure/volume dependecy of entropy for ideal solids
return (get_method(b, "entr_mol_sol_comp", j)(
b, cobj(b, j), b.temperature))
else:
raise PropertyNotSupportedError(_invalid_phase_msg(b.name, p))
@staticmethod
def fug_phase_comp(b, p, j):
return _fug_phase_comp(b, p, j, b.temperature)
@staticmethod
def fug_phase_comp_eq(b, p, j, pp):
return _fug_phase_comp(b, p, j, b._teq[pp])
@staticmethod
def log_fug_phase_comp_eq(b, p, j, pp):
pobj = b.params.get_phase(p)
if pobj.is_vapor_phase():
return log(b.get_mole_frac()[p, j]) + log(b.pressure)
elif pobj.is_liquid_phase():
if (cobj(b, j).config.henry_component is not None and
p in cobj(b, j).config.henry_component):
# Use Henry's Law
return log(b.get_mole_frac()[p, j]) + log(b.henry[p, j])
elif cobj(b, j).config.has_vapor_pressure:
# Use Raoult's Law
return (log(b.get_mole_frac()[p, j]) +
log(get_method(b, "pressure_sat_comp", j)(
b, cobj(b, j), b.temperature)))
else:
return Expression.Skip
else:
raise PropertyNotSupportedError(_invalid_phase_msg(b.name, p))
@staticmethod
def fug_coeff_phase_comp(b, p, j):
pobj = b.params.get_phase(p)
if not (pobj.is_vapor_phase() or pobj.is_liquid_phase()):
raise PropertyNotSupportedError(_invalid_phase_msg(b.name, p))
return 1
@staticmethod
def fug_coeff_phase_comp_eq(b, p, j, pp):
pobj = b.params.get_phase(p)
if not (pobj.is_vapor_phase() or pobj.is_liquid_phase()):
raise PropertyNotSupportedError(_invalid_phase_msg(b.name, p))
return 1
@staticmethod
def log_fug_phase_comp_Tbub(b, p, j, pp):
pobj = b.params.get_phase(p)
cobj = b.params.get_component(j)
if pobj.is_vapor_phase():
return log(b._mole_frac_tbub[pp[0], pp[1], j]) + log(b.pressure)
elif pobj.is_liquid_phase():
if (cobj.config.henry_component is not None and
p in cobj.config.henry_component):
return (log(b.mole_frac_comp[j]) +
log(get_method(b, "henry_component", j, p)(
b, p, j, b.temperature_bubble[pp])))
else:
return (log(b.mole_frac_comp[j]) +
log(get_method(b, "pressure_sat_comp", j)(
b, cobj, b.temperature_bubble[pp])))
else:
raise PropertyNotSupportedError(_invalid_phase_msg(b.name, p))
@staticmethod
def log_fug_phase_comp_Tdew(b, p, j, pp):
pobj = b.params.get_phase(p)
cobj = b.params.get_component(j)
if pobj.is_vapor_phase():
return log(b.mole_frac_comp[j]) + log(b.pressure)
elif pobj.is_liquid_phase():
if (cobj.config.henry_component is not None and
p in cobj.config.henry_component):
return (log(b._mole_frac_tdew[pp[0], pp[1], j]) +
log(get_method(b, "henry_component", j, p)(
b, p, j, b.temperature_dew[pp])))
else:
return (log(b._mole_frac_tdew[pp[0], pp[1], j]) +
log(get_method(b, "pressure_sat_comp", j)(
b, cobj, b.temperature_dew[pp])))
else:
raise PropertyNotSupportedError(_invalid_phase_msg(b.name, p))
@staticmethod
def log_fug_phase_comp_Pbub(b, p, j, pp):
pobj = b.params.get_phase(p)
cobj = b.params.get_component(j)
if pobj.is_vapor_phase():
return (log(b._mole_frac_pbub[pp[0], pp[1], j]) +
log(b.pressure_bubble[pp]))
elif pobj.is_liquid_phase():
if (cobj.config.henry_component is not None and
p in cobj.config.henry_component):
return (log(b.mole_frac_comp[j]) +
log(b.henry[p, j]))
else:
return (log(b.mole_frac_comp[j]) +
log(b.pressure_sat_comp[j]))
else:
raise PropertyNotSupportedError(_invalid_phase_msg(b.name, p))
@staticmethod
def log_fug_phase_comp_Pdew(b, p, j, pp):
pobj = b.params.get_phase(p)
cobj = b.params.get_component(j)
if pobj.is_vapor_phase():
return log(b.mole_frac_comp[j]) + log(b.pressure_dew[pp])
elif pobj.is_liquid_phase():
if (cobj.config.henry_component is not None and
p in cobj.config.henry_component):
return (log(b._mole_frac_pdew[pp[0], pp[1], j]) +
log(b.henry[p, j]))
else:
return (log(b._mole_frac_pdew[pp[0], pp[1], j]) +
log(b.pressure_sat_comp[j]))
else:
raise PropertyNotSupportedError(_invalid_phase_msg(b.name, p))
@staticmethod
def gibbs_mol_phase(b, p):
return sum(b.get_mole_frac()[p, j]*b.gibbs_mol_phase_comp[p, j]
for j in b.components_in_phase(p))
@staticmethod
def gibbs_mol_phase_comp(b, p, j):
return (b.enth_mol_phase_comp[p, j] -
b.entr_mol_phase_comp[p, j] *
b.temperature)
@staticmethod
def pressure_osm_phase(b, p):
try:
solvent_set = b.params.solvent_set
except AttributeError:
raise ConfigurationError(
f"{b.name} called for pressure_osm, but no solvents were "
f"defined. Osmotic pressure requires at least one component "
f"to be declared as a solvent.")
C = 0
for j in b.component_list:
if (p, j) in b.phase_component_set and j not in solvent_set:
c_obj = b.params.get_component(j)
if isinstance(c_obj, Apparent):
i = sum(c_obj.config.dissociation_species.values())
else:
i = 1
C += i*b.conc_mol_phase_comp[p, j]
return Ideal.gas_constant(b)*b.temperature*C
@staticmethod
def vol_mol_phase(b, p):
pobj = b.params.get_phase(p)
if pobj.is_vapor_phase():
return Ideal.gas_constant(b)*b.temperature/b.pressure
elif pobj.is_liquid_phase():
ptype = "liq"
elif pobj.is_solid_phase():
ptype = "sol"
else:
raise PropertyNotSupportedError(_invalid_phase_msg(b.name, p))
v_expr = 0
for j in b.components_in_phase(p):
# First try to get a method for vol_mol
v_comp = Ideal.get_vol_mol_pure(b, ptype, j, b.temperature)
v_expr += b.get_mole_frac()[p, j]*v_comp
return v_expr
def _invalid_phase_msg(name, phase):
return ("{} received unrecognised phase name {}. Ideal property "
"libray only supports Vap and Liq phases."
.format(name, phase))
def _fug_phase_comp(b, p, j, T):
pobj = b.params.get_phase(p)
if pobj.is_vapor_phase():
return b.get_mole_frac()[p, j] * b.pressure
elif pobj.is_liquid_phase():
if (cobj(b, j).config.henry_component is not None and
p in cobj(b, j).config.henry_component):
# Use Henry's Law
return b.get_mole_frac()[p, j] * b.henry[p, j]
elif cobj(b, j).config.has_vapor_pressure:
# Use Raoult's Law
return (b.get_mole_frac()[p, j] *
get_method(b, "pressure_sat_comp", j)(
b, cobj(b, j), T))
else:
return Expression.Skip
else:
raise PropertyNotSupportedError(_invalid_phase_msg(b.name, p))
|
{"hexsha": "6a2fe25c372584d48fbd2f54319ad4391b9afca0", "size": 15216, "ext": "py", "lang": "Python", "max_stars_repo_path": "idaes/generic_models/properties/core/eos/ideal.py", "max_stars_repo_name": "adowling2/idaes-pse", "max_stars_repo_head_hexsha": "38ddfdc3e016a515e556aa3376e2252f7342352f", "max_stars_repo_licenses": ["RSA-MD"], "max_stars_count": 112, "max_stars_repo_stars_event_min_datetime": "2019-02-11T23:16:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T20:59:57.000Z", "max_issues_repo_path": "idaes/generic_models/properties/core/eos/ideal.py", "max_issues_repo_name": "adowling2/idaes-pse", "max_issues_repo_head_hexsha": "38ddfdc3e016a515e556aa3376e2252f7342352f", "max_issues_repo_licenses": ["RSA-MD"], "max_issues_count": 621, "max_issues_repo_issues_event_min_datetime": "2019-03-01T14:44:12.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T19:49:25.000Z", "max_forks_repo_path": "idaes/generic_models/properties/core/eos/ideal.py", "max_forks_repo_name": "adowling2/idaes-pse", "max_forks_repo_head_hexsha": "38ddfdc3e016a515e556aa3376e2252f7342352f", "max_forks_repo_licenses": ["RSA-MD"], "max_forks_count": 154, "max_forks_repo_forks_event_min_datetime": "2019-02-01T23:46:33.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T15:07:10.000Z", "avg_line_length": 38.2311557789, "max_line_length": 81, "alphanum_fraction": 0.5801130389, "include": true, "reason": "from pyomo", "num_tokens": 3765}
|
import numpy as np
import time
from PINNs.create_example_parameters import create_example_parameters
from PINNs.create_data import create_data
from PINNs.PinnModel import PinnModel
def run_system_identification():
# load or create a file with all simulation parameters such that a simulation is repeatable
# to illustrate the working principle, examples for 1 and 4 buses are implemented
simulation_parameters = create_example_parameters(n_buses=4)
# at this point the training data are provided
# here we simulate a dataset based on the previously defined simulation parameters
x_training, y_training = create_data(simulation_parameters=simulation_parameters)
# creating the model including building it and setting the options for the optimiser, the loss function and the
# loss weights --> see PinnModel.py
model = PinnModel(simulation_parameters=simulation_parameters)
np.set_printoptions(precision=3)
print('Starting training')
total_start_time = time.time()
for n_epochs, batch_size in zip(simulation_parameters['training']['epoch_schedule'],
simulation_parameters['training']['batching_schedule']):
epoch_start_time = time.time()
model.fit(x_training,
y_training,
epochs=n_epochs,
batch_size=batch_size,
verbose=0,
shuffle=True)
epoch_end_time = time.time()
print(f'Trained for {n_epochs} epochs with batch size {batch_size} '
f'in {epoch_end_time - epoch_start_time:.2f} seconds.')
model.PinnLayer.print_relative_error()
total_end_time = time.time()
print(f'Total training time: {total_end_time - total_start_time:.1f} seconds')
if __name__ == "__main__":
run_system_identification()
|
{"hexsha": "5a89539c76c3ff9b099b683f8cb95b501f4b759d", "size": 1898, "ext": "py", "lang": "Python", "max_stars_repo_path": "PINNs/run_system_identification.py", "max_stars_repo_name": "jmontalvo94/PINN_system_identification", "max_stars_repo_head_hexsha": "c3742a7c88ea1039982e37ee630800f2e4b7c246", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-05-15T19:41:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T21:03:47.000Z", "max_issues_repo_path": "PINNs/run_system_identification.py", "max_issues_repo_name": "jmontalvo94/PINN_system_identification", "max_issues_repo_head_hexsha": "c3742a7c88ea1039982e37ee630800f2e4b7c246", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-04-30T21:15:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T01:22:58.000Z", "max_forks_repo_path": "PINNs/run_system_identification.py", "max_forks_repo_name": "jmontalvo94/PINN_system_identification", "max_forks_repo_head_hexsha": "c3742a7c88ea1039982e37ee630800f2e4b7c246", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2020-04-23T05:47:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-05T05:37:02.000Z", "avg_line_length": 37.96, "max_line_length": 116, "alphanum_fraction": 0.6875658588, "include": true, "reason": "import numpy", "num_tokens": 378}
|
import numpy as np
def hpdi(samples, prob):
"""Compute highest density interval from a sample of representative values,
estimated as the shortest credible interval.
Args:
samples (np.array): samples from a distribution
prob (float): credible mass (i.e. 0.95)
Returns:
tuple: highest density interval
"""
sorted_points = sorted(samples)
ci_idx_inc = np.ceil(prob * len(sorted_points)).astype('int')
n_cis = len(sorted_points) - ci_idx_inc
ci_width = [0] * n_cis
for i in range(0, n_cis):
ci_width[i] = sorted_points[i + ci_idx_inc] - sorted_points[i]
hdi_min = sorted_points[ci_width.index(min(ci_width))]
hdi_max = sorted_points[ci_width.index(min(ci_width)) + ci_idx_inc]
return hdi_min, hdi_max
def information_entropy(p):
"""Information entropy.
Args:
p (np.array): array of relative probability of each event
Returns:
float: the information entropy
"""
non_zero = p > 0
p = p[non_zero]
return -np.sum(p * np.log(p))
def kl_divergence(p, q):
"""Kullback-Leibler divergence.
Args:
p (np.array): target probability
q (np.array): model probability
Returns:
float: the average difference in log probability between the
target (p) and model (q)
"""
return np.sum(p * (np.log(p) - np.log(q)))
def log_likelihood(q):
"""Log-likelihood.
Args:
q (np.array): the likelihood of each observation.
Returns:
float: the log-likelihood of the model q.
"""
return np.sum(np.log(q))
def deviance(q):
"""Deviance. A relative measure of model fit.
Args:
q (np.array): first alternative model
Returns:
float: approximate of the relative value of E(log(q_i))
"""
return -2 * log_likelihood(q)
|
{"hexsha": "f50410375abd2991c6592d96201d0f06e4ec7686", "size": 1858, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/rethinking/utils.py", "max_stars_repo_name": "IamGianluca/rethinking", "max_stars_repo_head_hexsha": "2384c685ea0916c4c1f73651bd68cfb70dbf0615", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/rethinking/utils.py", "max_issues_repo_name": "IamGianluca/rethinking", "max_issues_repo_head_hexsha": "2384c685ea0916c4c1f73651bd68cfb70dbf0615", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/rethinking/utils.py", "max_forks_repo_name": "IamGianluca/rethinking", "max_forks_repo_head_hexsha": "2384c685ea0916c4c1f73651bd68cfb70dbf0615", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.4473684211, "max_line_length": 79, "alphanum_fraction": 0.6286329386, "include": true, "reason": "import numpy", "num_tokens": 460}
|
theory SPS
imports spf.SPFcomp
begin
subsection \<open>SPS \label{sec:sps}\<close>
text\<open>The behaviour of under-specified or nondeterministic components
can often not be modeled by a single \gls{spf} but by a set of
\Gls{spf}. Similar to the \gls{spf} type, we define \gls{sps} type
as a type synonym.\<close>
type_synonym ('I,'O) SPS = "('I,'O) SPF set"
(* TODO: move *)
definition spfIO::"('I1\<^sup>\<Omega> \<rightarrow> 'O1\<^sup>\<Omega>) \<Rightarrow> ('I1\<^sup>\<Omega> \<times> 'O1\<^sup>\<Omega>) set" where
"spfIO spf = {(sb, spf\<cdot>sb) | sb. True}"
definition spsIO::"('I1\<^sup>\<Omega> \<rightarrow> 'O1\<^sup>\<Omega>) set \<Rightarrow> ('I1\<^sup>\<Omega> \<times> 'O1\<^sup>\<Omega>) set" where
"spsIO sps = {(sb, spf\<cdot>sb) | sb spf. spf\<in>sps}"
definition spsComplete ::"('I1\<^sup>\<Omega> \<rightarrow> 'O1\<^sup>\<Omega>) set \<Rightarrow> ('I1\<^sup>\<Omega> \<rightarrow> 'O1\<^sup>\<Omega>) set" where
"spsComplete sps = {spf . spfIO spf \<subseteq> spsIO sps}"
subsection \<open>I/O-Behaviour\<close>
lemma spsio_empty[simp]: "spsIO {} = {}"
unfolding spsIO_def
by blast
subsection \<open>Completion\<close>
lemma spscomplete_belowI:
assumes "\<And>spf sb. spf\<in>S1 \<Longrightarrow> \<exists>spf2 \<in> S2. spf\<cdot>sb = spf2\<cdot>sb"
shows "S1 \<subseteq> spsComplete S2"
unfolding spsComplete_def spsIO_def spfIO_def
apply auto
using assms by auto
lemma spscomplete_below: "sps \<subseteq> spsComplete sps"
using spscomplete_belowI by auto
lemma spscomplete_set: "spsComplete sps = {spf. \<forall>sb. \<exists>spf2\<in>sps. spf\<cdot>sb = spf2\<cdot>sb}"
unfolding spsComplete_def spsIO_def spfIO_def
apply auto
by auto
lemma spscomplete_complete [simp]: "spsComplete (spsComplete sps) = spsComplete sps"
unfolding spscomplete_set apply auto
by metis
lemma spscomplete_mono: assumes "sps1 \<subseteq> sps2"
shows "spsComplete sps1 \<subseteq> spsComplete sps2"
apply(rule spscomplete_belowI)
unfolding spscomplete_set
apply (auto)
by (meson assms in_mono)
lemma spscomplete_io: "spsIO (spsComplete sps) = spsIO sps"
unfolding spscomplete_set spsIO_def
apply auto
by auto
lemma spscomplete_empty[simp]: "spsComplete {} = {}"
unfolding spscomplete_set by auto
lemma spscomplete_one[simp]: "spsComplete {f} = {f}"
unfolding spscomplete_set apply auto
by (simp add: cfun_eqI)
lemma spscomplete_univ[simp]: "spsComplete UNIV = UNIV"
by (simp add: spscomplete_below top.extremum_uniqueI)
subsection\<open>General Composition of SPSs\<close>
text\<open>With our general composition operator for \Gls{spf} we can also
define the general composition operator for \Gls{sps}. It composes
every combination of \Gls{spf} possible from both input \Gls{sps}.\<close>
definition spsComp::
"('I1\<^sup>\<Omega> \<rightarrow> 'O1\<^sup>\<Omega>) set \<Rightarrow> ('I2\<^sup>\<Omega> \<rightarrow> 'O2\<^sup>\<Omega>) set \<Rightarrow> ( 'E\<^sup>\<Omega> \<rightarrow> 'F\<^sup>\<Omega>) set" where
"spsComp F G = {f \<otimes>\<^sub>\<star> g | f g. f\<in>F \<and> g\<in>G }"
abbreviation spsComp_abbr (infixr "\<Otimes>\<^sub>\<star>" 70) where
"sps1 \<Otimes>\<^sub>\<star> sps2 \<equiv> spsComp sps1 sps2"
text\<open>Hence, we restrict the signature of our general composition
operator to always obtain a \gls{sps} with desired input and output
domains.\<close>
abbreviation genComp_nomabbr::
"('I1\<^sup>\<Omega> \<rightarrow> 'O1\<^sup>\<Omega>) set\<Rightarrow> ('I2\<^sup>\<Omega> \<rightarrow> 'O2\<^sup>\<Omega>) set
\<Rightarrow> ((('I1 \<union> 'I2) - ('O1 \<union> 'O2))\<^sup>\<Omega> \<rightarrow> ('O1 \<union> 'O2)\<^sup>\<Omega>) set"
(infixr "\<Otimes>" 70) where "sps1 \<Otimes> sps2 \<equiv> spsComp sps1 sps2"
theorem spscomp_praedicate:
fixes P::"'I1\<^sup>\<Omega> \<Rightarrow> 'O1\<^sup>\<Omega> \<Rightarrow> bool"
and H::"'I2\<^sup>\<Omega> \<Rightarrow> 'O2\<^sup>\<Omega> \<Rightarrow> bool"
assumes "chDom TYPE ('O1) \<inter> chDom TYPE ('O2) = {}"
shows "{p . \<forall>sb. P sb (p\<cdot>sb)} \<Otimes> {h . \<forall>sb. H sb (h\<cdot>sb)} \<subseteq>
{g. \<forall>sb.
let all = sb \<uplus> g\<cdot>sb in
P (all\<star>) (all\<star>) \<and> H (all\<star>) (all\<star>)
}"
apply (auto simp add: spsComp_def Let_def)
apply (simp add: spfcomp_extract_l)
apply (simp add: assms spfcomp_extract_r)
done
(* TODO: ähnliches Lemma mit spsIO *)
lemma spscomp_praedicate2:
fixes P::"'I1\<^sup>\<Omega> \<Rightarrow> 'O1\<^sup>\<Omega> \<Rightarrow> bool"
and H::"'I2\<^sup>\<Omega> \<Rightarrow> 'O2\<^sup>\<Omega> \<Rightarrow> bool"
assumes "chDom TYPE ('O1) \<inter> chDom TYPE ('O2) = {}"
shows "
{g. \<forall>sb.
let all = sb \<uplus> g\<cdot>sb in
P (all\<star>) (all\<star>) \<and> H (all\<star>) (all\<star>)
} \<subseteq> spsComp {p . \<forall>sb. P sb (p\<cdot>sb)} {h . \<forall>sb. H sb (h\<cdot>sb)}" (is "?LHS \<subseteq> ?RHS")
oops
(*
proof
fix g
assume "g\<in>?LHS"
hence "\<And>sb. P ((sb\<uplus>g\<cdot>sb)\<star>) ((sb\<uplus>g\<cdot>sb)\<star>)"
by (metis (mono_tags, lifting) mem_Collect_eq)
have "\<exists>p h. p\<otimes>h = g" oops*)
(* from this obtain p h where "p\<otimes>h = g" by auto
have "\<And>sb. P (sb) (p\<cdot>sb)" oops *)
(* show "g\<in>?RHS" oops *)
(* Gegenbeispiel ... soweit ich sehe:
P = H = "ist schwachkausal"
bleibt nicht unter der feedbackkomposition erhalten *)
lemma "(spsComplete sps1) \<Otimes>\<^sub>\<star> (spsComplete sps2) = spsComplete (sps1 \<Otimes>\<^sub>\<star> sps2)"
oops
end
|
{"author": "yyisgladiator", "repo": "demo", "sha": "2a57300dfa7268721c78c233ee6b0a5454acce1f", "save_path": "github-repos/isabelle/yyisgladiator-demo", "path": "github-repos/isabelle/yyisgladiator-demo/demo-2a57300dfa7268721c78c233ee6b0a5454acce1f/src/sps/SPS.thy"}
|
import pandas as pd
import numpy as np
# Example usage: x = tex_table(df, textable_file="testlatextable.txt", bold='min')
def tex_table(df, textable_file, bold = None, nan_char = " x ", max_digits = 4):
""" This function is only intended for fully numerical tables (dataset x frameworks comparison).
Datasets should be row indices of df rather than a column.
Args:
df = DataFrame
textable_file = path to output file
bold = 'min' or = 'max' (if df only contains numbers), or = None for no bolding.
nan_char replaces NaN in LaTex table
max_digits = Maximum number of digits to show in each cell.
"""
if bold is not None:
if bold == 'min':
best_row_vals = df.min(axis=1)
# best_cols = df.idxmin(axis=1)
elif bold == 'max':
best_row_vals = df.max(axis=1)
# best_cols = df.idxmax(axis=0)
else:
raise ValueError("unknown bold option")
best_cols = []
for i in df.index:
row_best_cols = list(df.columns[np.abs(df.loc[i] - best_row_vals[i]) < 1e-5])
best_cols.append(row_best_cols)
if len(row_best_cols) <= 0:
raise ValueError("no row value matches best row value")
# SHIFT_FACTOR = 100
# df = df * SHIFT_FACTOR
# df = df.round(num_decimals)
# df = df / SHIFT_FACTOR
max_int = int(df.max(numeric_only=True).max())
max_digits = max(max_digits, len(str(max_int))) # make sure we don't truncate values before decimal
df = df.astype('str')
df = df.replace("nan", nan_char)
df = df.applymap(lambda x: x[:max_digits])
print(df.columns)
if bold is not None:
ind = 0
for i in df.index: # bold best value:
if len(best_cols[ind]) > 0:
for col_name in best_cols[ind]:
df.at[i,col_name] = "\\textbf{" + df.at[i,col_name] + "}"
ind += 1
df.reset_index(inplace=True) # set dataset indices as first column
df.rename(columns={'dataset':'Dataset'}, inplace=True)
cols = list(df.columns)
df.columns = ['\\textbf{'+col+'}' for col in cols]
textab = df.to_latex(escape=True, index=False, column_format = 'l'+'c'*(len(df.columns)-1))
textab = textab.replace("\\textbackslash textbf", "\\textbf")
textab = textab.replace("\\{", "{")
textab = textab.replace("\\}", "}")
with open(textable_file,'w') as tf:
tf.write(textab)
print("saved tex table to: %s" % textable_file)
return textab
|
{"hexsha": "f787ed21aba9ce3fe0f0fcc84587cc4247ab35ac", "size": 2575, "ext": "py", "lang": "Python", "max_stars_repo_path": "autogluon_utils/benchmarking/evaluation/tex_table.py", "max_stars_repo_name": "jwmueller/autogluon-benchmarking", "max_stars_repo_head_hexsha": "28f35188a65c5fb37d4950fa9657ea84c9163049", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2020-03-18T21:07:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-11T20:21:21.000Z", "max_issues_repo_path": "autogluon_utils/benchmarking/evaluation/tex_table.py", "max_issues_repo_name": "jwmueller/autogluon-benchmarking", "max_issues_repo_head_hexsha": "28f35188a65c5fb37d4950fa9657ea84c9163049", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-04-10T07:41:02.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-11T23:49:42.000Z", "max_forks_repo_path": "autogluon_utils/benchmarking/evaluation/tex_table.py", "max_forks_repo_name": "jwmueller/autogluon-benchmarking", "max_forks_repo_head_hexsha": "28f35188a65c5fb37d4950fa9657ea84c9163049", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-03-13T21:21:48.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-01T23:39:20.000Z", "avg_line_length": 39.6153846154, "max_line_length": 103, "alphanum_fraction": 0.5933980583, "include": true, "reason": "import numpy", "num_tokens": 662}
|
!
! The Laboratory of Algorithms
!
! The MIT License
!
! Copyright 2011-2015 Andrey Pudov.
!
! Permission is hereby granted, free of charge, to any person obtaining a copy
! of this software and associated documentation files (the 'Software'), to deal
! in the Software without restriction, including without limitation the rights
! to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
! copies of the Software, and to permit persons to whom the Software is
! furnished to do so, subject to the following conditions:
!
! The above copyright notice and this permission notice shall be included in
! all copies or substantial portions of the Software.
!
! THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
! IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
! FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
! AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
! LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
! OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
! THE SOFTWARE.
!
module MFCircle
use MFShape
implicit none
private
type, extends(TFShape), public :: TFCircle
contains
procedure :: getArea
end type
interface TFCircle
module procedure init
end interface
contains
function init(radius) result(circle)
real, intent(in) :: radius
type(TFCircle) :: circle
circle%width = radius
end function
function getArea(this) result(area)
class(TFCircle), intent(in) :: this
real :: area
area = this%width * this%width * 3.14159265 / 4.0
end function
end module
|
{"hexsha": "91085d476e8bac4b9ec65337ffcbe041a8f0ba87", "size": 1751, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "Features/Inheritance/Circle.f", "max_stars_repo_name": "andreypudov/Algorithms", "max_stars_repo_head_hexsha": "9693efa0abed30a38fcb9c3f7eebbd5b92b734f5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2016-02-15T14:45:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-01T22:42:32.000Z", "max_issues_repo_path": "Features/Inheritance/Circle.f", "max_issues_repo_name": "jlokimlin/Algorithms", "max_issues_repo_head_hexsha": "d981990394d940fd999334b0b65b118567670d1a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Features/Inheritance/Circle.f", "max_forks_repo_name": "jlokimlin/Algorithms", "max_forks_repo_head_hexsha": "d981990394d940fd999334b0b65b118567670d1a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2016-02-24T15:47:02.000Z", "max_forks_repo_forks_event_max_datetime": "2019-03-01T10:45:03.000Z", "avg_line_length": 30.7192982456, "max_line_length": 79, "alphanum_fraction": 0.7213021131, "num_tokens": 404}
|
r=0.28
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7dg6x/media/images/d7dg6x-023/svc:tesseract/full/full/0.28/default.jpg Accept:application/hocr+xml
|
{"hexsha": "e0d5f275d6b7860ec398b68de789a67f9a514f22", "size": 195, "ext": "r", "lang": "R", "max_stars_repo_path": "ark_87287/d7dg6x/d7dg6x-023/rotated.r", "max_stars_repo_name": "ucd-library/wine-price-extraction", "max_stars_repo_head_hexsha": "c346e48b5cda8377335b66e4a1f57c013aa06f1f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-11-16T19:55:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-21T18:04:46.000Z", "max_issues_repo_path": "ark_87287/d7dg6x/d7dg6x-023/rotated.r", "max_issues_repo_name": "ucd-library/wine-price-extraction", "max_issues_repo_head_hexsha": "c346e48b5cda8377335b66e4a1f57c013aa06f1f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 21, "max_issues_repo_issues_event_min_datetime": "2018-09-18T17:41:32.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-02T19:19:33.000Z", "max_forks_repo_path": "tesseract/rotate/d7dg6x-023.r", "max_forks_repo_name": "ucd-library/wine-price-extraction", "max_forks_repo_head_hexsha": "c346e48b5cda8377335b66e4a1f57c013aa06f1f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 65.0, "max_line_length": 187, "alphanum_fraction": 0.8256410256, "num_tokens": 62}
|
import numpy as np
import matplotlib.pyplot as plt
positions = np.loadtxt("test-positions.txt")
plt.plot(positions[:, 0], positions[:, 1])
plt.show()
|
{"hexsha": "3c681b0bc7f836e8f6d34a1bb3d8d21631c377df", "size": 152, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/LanderApp/test.py", "max_stars_repo_name": "LeoMaden/MarsLander", "max_stars_repo_head_hexsha": "d5b2e1f6c4520280c09cd22a0aeaeded89cc906e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/LanderApp/test.py", "max_issues_repo_name": "LeoMaden/MarsLander", "max_issues_repo_head_hexsha": "d5b2e1f6c4520280c09cd22a0aeaeded89cc906e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/LanderApp/test.py", "max_forks_repo_name": "LeoMaden/MarsLander", "max_forks_repo_head_hexsha": "d5b2e1f6c4520280c09cd22a0aeaeded89cc906e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.0, "max_line_length": 44, "alphanum_fraction": 0.7236842105, "include": true, "reason": "import numpy", "num_tokens": 37}
|
\section{Design} \label{designsection}
\begin{figure}[htbp]
\centering
\fbox{\includegraphics[width=\linewidth]{images/datapipeline.png}}
\caption{Data Pipeline.}
\label{fig:datapipeline}
\end{figure}
Figure \ref{fig:datapipeline} shows the overall data pipeline for the
project. The data pipeline has three important stages:
\begin{itemize}
\item Wiki crawler: Wiki crawler runs in batch mode on a standalone machine. It
can download wikipedia data as explained in section \ref{wikicrawlersection}.
Crawler creates CrawlDB which is a collection of text files. This crawler
can be replaced or augmented with any web-crawler which can download or
create the text files.
\item News crawler: News crawler is responsible for downloading the news
articles.
\item CreateWord2VecModel: This component is responsible for creating the
Word2Vec model for the text files in the CrawlDB. This model runs on spark
and stores the model on HDFS. Section \ref{createmodelsection} describes this
component in detail.
\item UseWord2VecModel and FindRelations: These two components use the
pre-created Word2Vec model to find synonym of a word or find the relationships.
Section \ref{usemodel} describes these components in detail.
\end{itemize}
\subsection{Wiki crawler} \label{wikicrawlersection}
The Wiki Crawler component is useful to download the data from web. We
implemented
a simple crawler using Python which can deep traverse the wikipedia pages
and download the text from it. In our crawler implementation, a user can
specify the seed pages from wikipedia. User can also specify the maximum
number of pages that are required to be downloaded. The crawler first
downloads all the pages specified in the seedlist. It then extract the links
from each wikipedia page and puts it in a queue which is internally
maintained by the crawler. The crawler then downloads the the linked pages.
Since this logic is implemented in recursive manner, the crawler can
potentially download all the wikipedia pages which can be reached from the
pages in the seedlist.
We followed the seedlist based crawler approach so that we can retrieve
domain specific web pages. A well chosen seedlist can fetch large number
of relevant web pages.
Figure \ref{fig:crawleralgo} is the flowchart of the crawler implementation.
\begin{figure}[htbp]
\centering
\fbox{\includegraphics[width=\linewidth]{images/crawleralgo.png}}
\caption{Flowchart of crawler.}
\label{fig:crawleralgo}
\end{figure}
\subsection{News crawler} \label{newscrawlersection}
News crawler is crawler implemented in Python. It executes in
batch mode and download latest news article related to the topics configured
in its seedlist. The news crawler uses Google APIs
\cite{www-google-custom-search} to search the topics configured in the
seedlist. Then it iterates over the result of each search, and downloads the
original HTML page contents. The textual portion of the HTML is extracted
using goose python library \cite{www-goose}
\subsection{Word2Vec model creation} \label{createmodelsection}
CreateWord2VecModel is Spark application implemented in Python. This
application is responsible for creating the Word2Vec model and storing it for
later use. Figure \ref{fig:word2vecmodelflow} explains the steps involved in
the Word2Vec model creation. We used Spark Feature Extraction
\cite{www-sparkml-features} for implementing the steps involved in the
Word2Vec model creation. These steps are explained below:
\begin{itemize}
\item Read crawled documents from HDFS
\item For each crawled document, remove special characters from the text
\item Tokenize text to create list of words
\item Remove the stop words
\item Create Word2Vec model
\item Store the model on HDFS
\end{itemize}
\begin{figure}[htbp]
\centering
\fbox{\includegraphics[width=\linewidth]{images/createword2vec.png}}
\caption{Steps involved in Word2Vec model creation.}
\label{fig:word2vecmodelflow}
\end{figure}
\subsection{Using the Word2Vec model to find synonyms and relations}
\label{usemodel}
The pre-created Word2Vec model can be queried to find the synonyms or
relations between the words. In the context of Word2Vec, synonym of the word
is the word that co-occur in similar context \cite{Goldberg2014word2vecED}.
The \textit{UseWord2VecModel} finds the synonyms for the words provided in
the
\textit{stest.csv} file. The results are stored in \textit{sresults.csv} file.
The Word2Vec model is also used to find the relationships.
\cite{www-tensorflow} explains how the vector operations can be performed on
word vectors to derive relationships. The \textit{FindRelations} spark
application
performs the vector operations to find relationships. The relationtest.csv is
input to the \textit{FindRelations} application. The relationtest.csv has 3
words in
each row. The application predicts the fourth word which has same relation to
the thrid word as the second word related to first word. In the example below
\textit{Sachin,Anjali,Sourav}
If \textit{Anjali} is spouse of \textit{Sachin}, then \textit{FindRelations}
application is
expected to
predict the first name of the person who is spouse of Sourav.
The result of \textit{FindRelations} is saved in relationsresult.csv.
|
{"hexsha": "ac0a0c319c1a677a5c483e75902db1029de4cf0f", "size": 5284, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "project/S17-IR-P005/report/design.tex", "max_stars_repo_name": "cloudmesh/sp17-i524", "max_stars_repo_head_hexsha": "42dd11b914c03c741dad8a8505c3e091dc6ec412", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-01-19T07:25:57.000Z", "max_stars_repo_stars_event_max_datetime": "2017-04-02T21:02:52.000Z", "max_issues_repo_path": "project/S17-IR-P005/report/design.tex", "max_issues_repo_name": "cloudmesh/sp17-i524", "max_issues_repo_head_hexsha": "42dd11b914c03c741dad8a8505c3e091dc6ec412", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 98, "max_issues_repo_issues_event_min_datetime": "2017-01-19T04:24:02.000Z", "max_issues_repo_issues_event_max_datetime": "2017-10-27T11:30:50.000Z", "max_forks_repo_path": "project/S17-IR-P005/report/design.tex", "max_forks_repo_name": "cloudmesh/sp17-i524", "max_forks_repo_head_hexsha": "42dd11b914c03c741dad8a8505c3e091dc6ec412", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 294, "max_forks_repo_forks_event_min_datetime": "2017-01-09T13:18:39.000Z", "max_forks_repo_forks_event_max_datetime": "2018-07-13T01:32:24.000Z", "avg_line_length": 43.6694214876, "max_line_length": 79, "alphanum_fraction": 0.7973126419, "num_tokens": 1260}
|
<a href="https://colab.research.google.com/github/janchorowski/ml_uwr/blob/fall2019/assignment4/Assignment4.ipynb" target="_parent"></a>
**Submission deadline:**
* **Regular problems: last lab session before or on Monday, 9.13.2020b**
* **Bonus problems: Last lab during semester**
**Points: 5 + 9 bonus points**
Please note: some of the assignments are tedious or boring if you are already a NumPy ninja. The bonus problems were designed to give you a more satisfying alternative.
## Heads Up!
This assignment comes with starter code, but you are not forced to use it, as long as you execute all analysis demanded in the problems.
## A note about plots!
Plots are a way of communication. Just lke text, they can be paraphrased. You do not have to exactly reproducy my plots, but you must try to make suer yourp plots tell a similar story:
- label axis
- add titles
- choose plot type properly
- choose a color scale, limits, ticks
so that you can describe what is happening!
## Bugs?!
Please submit Github PRs or email us about any problems with the notebook - we will try to correct them quickly.
```python
# Standard IPython notebook imports
%matplotlib inline
import os
from io import StringIO
import itertools
import httpimport
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from tqdm import tqdm_notebook
import scipy.stats as sstats
import scipy.optimize as sopt
import seaborn as sns
import sklearn.datasets
import sklearn.ensemble
import sklearn.svm
import sklearn.tree
import cvxopt
# In this way we can import functions straight from github
with httpimport.github_repo('janchorowski', 'nn_assignments',
module='common', branch='nn18'):
from common.plotting import plot_mat
sns.set_style('whitegrid')
```
# SVM Theory
A linear SVM assigns points $x^{(i)}\in\mathbb{R}^n$ to one of two
classes, $y^{(i)}\in\{-1,1\}$ using the decision rule:
\begin{equation}
y = \text{signum}(w^T x + b).
\end{equation}
SVM training consists of finding weights $w\in\mathbb{R}^n$
and bias $b\in\mathbb{R}$ that maximize the separation margin. This
corresponds to solving the following quadratic optimization problem:
\begin{equation}
\begin{split}
\min_{w,b,\xi} &\frac{1}{2}w^Tw + C\sum_{i=1}^m \xi_i \\
\text{s.t. } & y^{(i)}(w^T x^{(i)} + b) \geq 1- \xi_i\;\; \forall_i \\
& \xi_i \geq 0 \;\; \forall_i.
\end{split}
\end{equation}
# Problem 1 [2p]
Load the iris dataset.
1. [1p] Using the `sklearn.svm.SVC` library train a linear SVM that
separates the Virginica from the Versicolor class using the
petal length and petal width features. Plot the obtained decision boundary and
the support vectors (their locations and weights - coefficients $\alpha$).
2. [.5p] Now train a nonlinear SVM using the Gaussian kernel. Tune the parameetrs `C` and `gamma` (for the kernel) to reach maximum training accurracy. Plot the decision boundary and supprt vectors.
3. [.5p] Answer the following questions:
- When the SVM is forced to maximally accurate on the train set, roughly how many support vectors do we get ?\
ans: 80%
- what is the relationship between the regularization constant `C` and the support vector weights `alpha`?\
ans: The bigger C, the bigger difference between weights
```python
# load iris, extract petal_length and petal_width of versicolors and virginicas
iris = sklearn.datasets.load_iris()
print('Features: ', iris.feature_names)
print('Targets: ', iris.target_names)
petal_length = iris.data[:,iris.feature_names.index('petal length (cm)')]
petal_width = iris.data[:, iris.feature_names.index('petal width (cm)')]
IrisX = np.array(iris.data.T)
IrisX = IrisX[:, iris.target!=0]
IrisX2F = np.vstack([petal_length, petal_width])
IrisX2F = IrisX2F[:, iris.target!=0]
# Set versicolor=0 and virginia=1
IrisY = (iris.target[iris.target!=0]-1).reshape(1,-1).astype(np.float64)
plt.scatter(IrisX2F[0,:], IrisX2F[1,:], c=IrisY.ravel(),
cmap='spring', edgecolors='k')
plt.xlabel('petal_length')
plt.ylabel('petal_width')
```
```python
#
# Fit a linear SVM using libsvm
#
from sklearn.svm import SVC
svm_model = SVC(kernel="linear")
svm_model.fit(IrisX2F.T, IrisY.ravel())
print("libsvm error rate: %f" % ((svm_model.predict(IrisX2F.T)!=IrisY).mean(),))
```
libsvm error rate: 0.050000
```python
#
# Plot the decision boundary
#
petal_lengths, petal_widths = np.meshgrid(np.linspace(IrisX2F[0,:].min(), IrisX2F[0,:].max(), 100),
np.linspace(IrisX2F[1,:].min(), IrisX2F[1,:].max(), 100))
IrisXGrid = np.vstack([petal_lengths.ravel(), petal_widths.ravel()])
predictions_Grid = svm_model.predict(IrisXGrid.T)
plt.contourf(petal_lengths, petal_widths, predictions_Grid.reshape(petal_lengths.shape), cmap='spring')
plt.scatter(IrisX2F[0,:], IrisX2F[1,:], c=IrisY.ravel(),
cmap='spring', edgecolors='k')
plt.xlabel('petal_length')
plt.ylabel('petal_width')
plt.title('Decision boundary found by libsvm')
```
```python
#
# Plot the decision boundary and the support vectors.
#
# You can extract the indices of support vectors and their weights from fielfs of the
# svm object. Display the loaction of support vectors and their weights (by changing the
# size in the scatterplot)
#
# TODO
#
support_vector_indices = svm_model.support_
support_vector_coefficients = svm_model.dual_coef_
plt.contourf(petal_lengths, petal_widths, predictions_Grid.reshape(petal_lengths.shape), cmap='spring')
plt.scatter(
IrisX2F[0,support_vector_indices],
IrisX2F[1,support_vector_indices],
c=IrisY.ravel()[support_vector_indices],
s=(np.abs(support_vector_coefficients)*10)**2,
cmap='spring',
edgecolors='k')
plt.xlabel('petal_length')
plt.ylabel('petal_width')
plt.title('Decision boundary found by libsvm')
```
```python
#
# Fit a nonlinear SVM with a Gaussian kernel using libsvm.
# Optimize the SVM to make
#
svm_gauss_model = SVC(C=3, gamma=100)
svm_gauss_model.fit(IrisX2F.T, IrisY.ravel())
print("libsvm error rate: %f" % ((svm_gauss_model.predict(IrisX2F.T)!=IrisY).mean(),))
petal_lengths, petal_widths = np.meshgrid(np.linspace(IrisX2F[0,:].min(), IrisX2F[0,:].max(), 100),
np.linspace(IrisX2F[1,:].min(), IrisX2F[1,:].max(), 100))
IrisXGrid = np.vstack([petal_lengths.ravel(), petal_widths.ravel()])
predictions_Grid = svm_gauss_model.predict(IrisXGrid.T)
plt.contourf(petal_lengths, petal_widths, predictions_Grid.reshape(petal_lengths.shape), cmap='spring')
sizes = np.zeros(IrisY.shape[-1])
sizes[svm_gauss_model.support_] = np.abs(svm_gauss_model.dual_coef_) **2 *10
# sizes[svm_gauss_model.support_[np.abs(svm_gauss_model.dual_coef_).argsort()[:, -3:]]]*=10
plt.scatter(IrisX2F[0,:], IrisX2F[1,:], c=IrisY.ravel(), s=sizes,
cmap='spring', edgecolors='k')
print(IrisY.ravel().shape, svm_gauss_model.dual_coef_.shape)
plt.xlabel('petal_length')
plt.ylabel('petal_width')
plt.title('Decision boundary found by libsvm')
```
# Problem 2 [1p]
Reimplement the linear SVM using the use `cvxopt.solvers.qp`
Quadratic Programming (QP) solver. You will need to define the matrices
that define the problem. Compare the obtained solutions. Extract the
support vectors from the LIBSVM solution and plot the support vectors.
The `cvxopt.solvers.qp` solves the following optimization problem:
\begin{align}
\text{minimize over } x \text{: }& \frac{1}{2} x^T P x + q^T x \\
\text{subject to: } & Gx \leq h \\
& Ax = b
\end{align}
\begin{equation}
\begin{split}
\min_{w,b,\xi} &\frac{1}{2}w^Tw + C\sum_{i=1}^m \xi_i \\
\text{s.t. } & - y^{(i)} * w^T x^{(i)} + -y^{(i)} * b - \xi_i \leq -1\;\; \forall_i \\
& \xi_i \geq 0 \;\; \forall_i.
\end{split}
\end{equation}
To solve the SVM problem you need to encode the weights $W$, biases $b$, and slack variables $\xi$ as elements of the vector $x$, then properly fill the matrices and vectors $P$, $q$, $G$, $h$. We can ignore setting the $A$ and $b$ parametrs, since there are no linear constraints.
```python
IrisX2F.shape
```
(2, 100)
```python
#
# Now solve the SVM using the QP solver
#
n, m = IrisX2F.shape
C=10.0
#x: w | b | ksi
P = np.zeros((n+1+m, n+1+m)) #w, bias, xi
q = np.zeros((n+1+m,1))
G = np.zeros((2*m, n+1+m)) # we have two constrains for each data point:
# that the margin is equal to 1-xi
# and that xi is nonnegative
h = np.zeros((2*m,1))
#
# TODO: fill in P, q, G, h
#
P[:n, :n] = np.eye(n)
q[n+1:] = np.ones((m,1)) * C
G[:m,:n+1] = -np.ones((m,n+1))
G[:m,:n+1] *= IrisY.T * 2 - 1
G[:m,:n] *= IrisX2F.T
G[:m,n+1:] = -np.eye(m)
G[m:,n+1:] = -np.eye(m)
h[:m,:] = -np.ones((m, 1))
#
# Now run the solver
#
ret = cvxopt.solvers.qp(cvxopt.matrix(P), cvxopt.matrix(q),
cvxopt.matrix(G), cvxopt.matrix(h), )
ret = np.array(ret['x'])
#
# extract the weights and biases
#
W = ret[:n].reshape(-1,1)
b = ret[n]
#
# Extract the weight and bias from libsvm for comparison
#
Wlibsvm = svm_model.coef_
blibsvm = svm_model.intercept_
print()
print('W', W.T, 'Wlibsvm', Wlibsvm)
print('b', b, 'blibsvm', blibsvm)
petal_lengths, petal_widths = np.meshgrid(np.linspace(IrisX2F[0,:].min(), IrisX2F[0,:].max(), 100),
np.linspace(IrisX2F[1,:].min(), IrisX2F[1,:].max(), 100))
IrisXGrid = np.vstack([petal_lengths.ravel(), petal_widths.ravel()])
# predictions_Grid = svm_model.predict(IrisXGrid.T)
plt.contourf(petal_lengths, petal_widths, (W.T @ IrisXGrid + b >= 0).astype(int).reshape(petal_lengths.shape), cmap='spring')
plt.scatter(IrisX2F[0,:], IrisX2F[1,:], c=IrisY.ravel(),
cmap='spring', edgecolors='k')
plt.xlabel('petal_length')
plt.ylabel('petal_width')
plt.title('Decision boundary found by QP solver')
None
```
# Problem 3 [2p]
Repeat 100 bootstrap experiments to establish the effect of constant $C$ on SVM.
For each experiment do the following:
1. Sample (with replacement) a bootstrap dataset equal in size to the
training dataset. This will be this experiment's training dataset.
2. Prepare the experiment's testing dataset by using samples not
inluded in the bootstrap dataset.
3. For all $C$ from the set $\{10^{-4}, 10^{-3.5}, 10^{-3.}, \ldots, 10^{6}\}$
fit a nonlinear SVM (Gaussian kernel, called \texttt{rbf} in
LIBSVM using the default $\gamma$) and record the training and
testing errors.
Analyze a box plot of errors as a function of $C$. Can you see its
influence on the training and testing error, as well as on the
testing error variability?
**Indicate regions of overfitting and underfitting.**
```python
res = []
for rep in range(100):
bootstrap_sel = np.random.randint(0, IrisY.shape[1], IrisY.shape[1])
test_sel = np.setdiff1d(np.arange(IrisY.shape[1]), np.unique(bootstrap_sel))
bootstrap_IrisX = IrisX[:,bootstrap_sel]
bootstrap_IrisY = IrisY[:,bootstrap_sel]
test_IrisX = IrisX[:,test_sel]
test_IrisY = IrisY[:,test_sel]
#
# TODO: Loop over a list of exponents.
#
for Cexponent in np.arange(-4, 6.5, 0.5):
C = 10.0**Cexponent
svm_model = SVC(C=C, gamma='auto')
svm_model.fit(bootstrap_IrisX.T, bootstrap_IrisY.ravel())
train_acc = (svm_model.predict(bootstrap_IrisX.T)==bootstrap_IrisY).mean()
test_acc = (svm_model.predict(test_IrisX.T)==test_IrisY).mean()
res.append(dict(Cexponent=Cexponent, err=1-test_acc, subset='test'))
res.append(dict(Cexponent=Cexponent, err=1-train_acc, subset='train'))
res = pd.DataFrame(res)
chart = sns.catplot(kind='box', x='Cexponent', y='err', col='subset',
color='blue', data=res)
chart.set_xticklabels(rotation=45)
None
```
# Problem 4 [3p bonus]
Implement a nonlinear SVM by solving the dual problem using the Quadratic Programming solver. Compare results with LIBSVM.
Please see [page 20 if CS229 lecture notes](http://cs229.stanford.edu/notes/cs229-notes3.pdf) for problem formulation.
# Problem 5 [2p bonus]
Compare two ways to implement a multi-class
SVM: by training ``1-vs-1`` classifier for each class combination,
and by training a ``1-vs-rest`` classifier for each clas. See
http://www.csie.ntu.edu.tw/~cjlin/papers/multisvm.pdf for
details.
```python
```
# Problem 6 [4p bonus]
Implement a Kernelized linear regression. Train it on Iris using a Gaussian kernel. Compare to the non-linear SVM.
|
{"hexsha": "a5afec979183c72b1b6cd8801caa013a71feddca", "size": 168550, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "assignment4/Assignment4.ipynb", "max_stars_repo_name": "iCarrrot/ML", "max_stars_repo_head_hexsha": "05177012d36ca64a5b2730287b3ae5b086306197", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "assignment4/Assignment4.ipynb", "max_issues_repo_name": "iCarrrot/ML", "max_issues_repo_head_hexsha": "05177012d36ca64a5b2730287b3ae5b086306197", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "assignment4/Assignment4.ipynb", "max_forks_repo_name": "iCarrrot/ML", "max_forks_repo_head_hexsha": "05177012d36ca64a5b2730287b3ae5b086306197", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 205.5487804878, "max_line_length": 28892, "alphanum_fraction": 0.8999525363, "converted": true, "num_tokens": 3649}
|
/-
Given any two propositions, P and Q, we can form
the proposition, P → Q. That is the syntax of an
implications.
If P and Q are propositions, we read P → Q as
P implies Q. A proof of a proposition, P → Q,
is a program that that converts any proof of P
into a proof of Q. The type of such a program
is P → Q.
-/
/- Elimination Rule for Implication -/
/-
From two premises, (1) that "if it's raining then
the streets are wet," and (2) "it's raining,"" we
can surely derive as a conclusion that the streets
are wet. Combining "raining → streets wet" with
"raining" reduces to "streets wet." This is modus
ponens.
Let's abbreviate the proposition "it's raining",
as R, and let's also abbreviate the proposition,
"the streets are wet as W". We will then abbreviate
the proposition "if it's raining then the streets
are wet" as R → W.
Such a proposition is in the form of what we call
an implication. It can be pronounced as "R implies
W", or simply "if R is true then W must be true."
Note that by this latter reading, in a case where
R is not true, the implication says nothing about
whether W must be true or not. We will thus judge
an implication to be true if either R is false (in
which case the implications does not constrain W
to be any value), or if whenever R is true, W is,
too. The one situation under which we will not be
able to judge R → W to be true is if it can be
the case that R is true and yet W is not, as that
would contradict the meaning of an implication.
With these abbreviations in hand, we can write
an informal inference rule to capture the idea
we started with. If we know that a proposition,
R → W, is true, and we know that the proposition,
R, is true, then we can deduce that W therefore
must also be true. We can write this inference
rule informally like this:
R → W, R
-------- (→ elim)
W
This is the arrow (→) elimination rule?
In the rest of this chapter, we will formalize
this notion of inference by first presenting the
elimination rule for implication. We will see
that this rule not only formalizes Aristotle's
modus ponens rule of reasoning (it is one of
his fundamental "syllogisms"), but it also
corresponds to function application!
EXERCISE: When you apply a function that takes
an argument of type R and returns a value of
type W to a value of type R, what do you get?
-/
/-
Now let's specify that R and W are arbitrary
propositions in the type theory of Lean. And
recall that to judge R → W to be true or to
judge either R or W to be true means that we
have proofs of these propositions. We can now
give a precise rule in type theory capturing
Aristotle's modus ponens: what it means to be
a function, and how function application works.
{ R W : Prop }, pfRtoW : R → W, pfR : R
--------------------------------------- (→-elim)
pfW: W
Here it is formalized as a function.
-/
def
arrow_elim
{ R W: Prop } (pfRtopfW : R → W) (pfR : R) : W :=
pfRtopfW pfR
/-
This program expresses the inference rule. The
name of the rule (a program) is arrow_elim. The
function takes (1) two propositions, R and W;
(2) a proof of R → W (itself a program that
converts and proof of R into a proof of W;
(3) a proof of R. If promises that if it is
given any values of these types, it will
return a proof of(a value of type) W. Given
values for its arguments it derives a proof
of W by applying that given function to that
given value. The result will be a proof of (a
value of type) W.
We thus now have another way to pronounce this
inference rule: "if you have a function that can
turn any proof of R into a proof of W, and if you
have a proof of R, then you obtain a proof of W,
and you do it in particular by applying the
function to that value."
-/
/- In yet other words, if you've got a function
and you've got an argument value, then you can
eliminate the function (the "arrow") by applying
the function to that value, yielding a value of
the return type.
-/
/-
A concrete example of a program that serves as
a proof of W → R is found in our program, from
the 03_Conjunction chapter that turns any proof
of P ∧ Q (W) into a proof Q ∧ P (R).
We wrote that code so that for any propositions,
P and Q, for any proof of P ∧ Q, it returns a
proof of Q ∧ P. It can always do this because
from any proof of P ∧ Q, it can obtain separate
proofs of P and Q that it can then re-assembled
into a proof of Q ∧ P. That function is a proof
of this type: ∀ P Q: Prop, P ∧ Q → Q ∧ P. That
says, for any propositions, P and Q, a function
of this type turns a proof of P ∧ Q into a proof
of Q ∧ P. It thus proves P ∧ Q → Q ∧ P.
-/
/-
We want to give an example of the use of the
arrow-elim rule. In this example we use a new
(for us) capability of Lean: you can define
variables to be give given types without proofs,
i.e., as axioms.
Here (read the code) we assume that P and Q
are arbitrary propositions. We do not say
what specific propositions they are. Next
we assume that we have a proof that P → Q,
which will be represented as a program
that takes proofs of Ps and returns proofs
of Qs. Third we assume that we have some
proof of P. And finally we check to see
that the result of applying impl to pfP is
of type Q.
-/
variables P Q : Prop
variable impl : P → Q
variable pfP : P
#check (impl pfP)
#check (arrow_elim impl pfP)
/-
In Lean, a proof of R → W is given as a
program: a "recipe" for making a proof of W
out of a proof of R. With such a program in
hand, if we apply it to a proof of R it will
derive a proof of W.
-/
/-
EXAMPLE: implications involving true and false
-/
/-
Another way to read P → Q is "if P (is true)
then Q (is true)."
We now ask which of the following implications
can be proved?
true → true -- if true (is true) then true (is true)
true → false -- if true (is true) then false (is true)
false → true -- if false (is true) then true (is true)
false → false -- if false (is true) then false (is true)
EXERCISE: What is your intuition?
Hint: Remember the unit on true and false. Think about
the false elimination rule. Recall how many proofs there
are of the proposition, false.
-/
/- true → true -/
/-
Let's see one of the simplest of all possible
examples to make these abstract ideas concrete.
Consider the proposition, true → true. We can
read this as "true implies true". But for our
purposes, a better way to say it is, "if you
assume you are given a proof of true, then you
can construct and return a proof of true."
We can also see this proposition as the type of
any program that turns a proof of true into a
proof of true. That's going to be easy! Here it
is: a simple function definition in Lean. We call
the program timpt (for "true implies true"). It
takes an argument, pf_true, of type true, i.e.,
a proof of true, as an argument, and it builds
and returns a proof of true by just returning
the proof it was given! This function is thus
just the identity function of type true → true.
-/
def timpt ( pf_true: true ) : true := pf_true
theorem timpt_theorem : true → true := timpt
#check timpt
/-
If this program is given a proof, pf, of true,
it can and does return a proof of true. Let's
quickly verify that by looking at the value we
get when we apply the function (implication) to
true.intro, which is the always-available proof
of true.
-/
#reduce (timpt true.intro)
/-
Indeed, this program is in effect a proof
of true → true because if it's given any
proof of true (there's only one), it then
returns a proof of true.
Now we can see explicitly that this program
is a proof of the proposition, true → true,
by formalizing the proposition, true → true,
and giving the program as a proof!
-/
theorem true_imp_true : true → true := timpt
/-
And the type of the program, which we are
now interpreting as a proof of true → true,
is thus true → true. The program is a value
of the proposition, and type, true → true.
-/
#check timpt
/- true → false -/
/-
EXERCISE: Can you prove true → false? If
so, state and prove the theorem. If not,
explain exactly why you think you can't
do it.
-/
-- def timpf (pf_true : true) : false := _
-- theorem timpf_theorem: true → false := _
/- false → true -/
/-
EXERCISE: Prove false → true. The key to
doing this is to remember that applying
false.elim (think of it as a function) to
a proof of false proves anything at all.
-/
def fimpt (f: false) : true := true.intro
theorem fimpt_theorem : false → true := fimpt
/- false → false -/
/-
EXERCISE: Is it true that false → false?
Prove it. Hint: We wrote a program that
turned out to be a proof of true → true.
If you can write such a program, call it
fimpf, then use it to prove the theorem,
false_imp_false: false → false.
-/
def fimpf (f: false) : false := f
theorem fimpf_theorem : false → false := fimpf
def fimpzeqo (f: false) : 0 = 1 := false.elim f
theorem fizeo : false → 0 = 1 := fimpzeqo
/-
We summarize our findings in the following
table for implication.
true → true : proof, true
true → false : <no proof>
false → true : proof, true
false → false : proof, true
What's deeply interesting here is that
we're not just given these judgments as
unexplained pronouncements. We've *proved*
three of these judgments. The fourth we
could not prove, and we made an argument
that it can't be proved, but we haven't
yet formally proved, nor do even have a
way to say yet, that the proposition is
false. The best we can say at this time
is that we don't have a proof.
-/
#check true_imp_true -- (proof of) implication
#check true.intro -- (proof of) premise
#check (true_imp_true true.intro) -- conclusion!
/- *** → INTRODUCTION RULE -/
/-
The → introduction rules say that if
assuming that there is proof of P allows
you to derive a proof of Q, then one can
derive a proof of P → Q, discharging the
assumption.
To represent this rule as an inference
rule, we need a notation to represent
the idea that from an assumption that
there is a proof of P one can derive a
proof of Q. The notation used in most
logic books represents this notion as a
vertical dotted line from a P above to
a Q below. If one has such a derivation
then one can conclude P → Q. The idea
is that the derivation is in essence a
program; the program is the proof; and
it is a proof of the proposition, which
is to say, of the type, P → Q.
P
|
|
Q
-----
P → Q
The proof of a proposition, P → Q, in
Lean, is thus a program that takes an
argument of type P and returns a result
of type Q.
-/
/- ** Direct Proofs: Proof Strategy -/
/-
Many of the propositions you need to
prove in practice are implications, of
the form P → Q. It's a way of saying,
"under conditions defined by P it must
be the case that Q is also true. A
direct proof is just what we have
seen: a way to derive a proof of Q
from an assumed proof of P.
To prove P → Q, you thus start with an
assumption that there's a proof of P,
and from that you deduce a proof of Q.
-/
/-
EXAMPLE: Give a direct proof of eqsym:
a = b → b = a. Give it as both a lambda
expression and as an assume-show-from
tactic script.
-/
lemma eqsym :
∀ a b : nat, a = b → b = a
:= sorry
lemma eqsym' :
∀ a b : nat, a = b → b = a
:= begin
sorry
end
/-
http://zimmer.csufresno.edu/~larryc/proofs/proofs.direct.html
-/
|
{"author": "kevinsullivan", "repo": "cs-dm", "sha": "bfd2f5fd2612472e15bd970c7870b5d0dd73bd1c", "save_path": "github-repos/lean/kevinsullivan-cs-dm", "path": "github-repos/lean/kevinsullivan-cs-dm/cs-dm-bfd2f5fd2612472e15bd970c7870b5d0dd73bd1c/04_Implication/00_intro.lean"}
|
function trainfunction2d(func,x,y)
if func == 1
a1 = 3 #rand(Uniform(-10,10))
b1 = -4#rand(Uniform(-10,10))
u = a1*x + b1*y
elseif func == 2
a = [0.9, 0.1, -0.4]#rand(Uniform(-1,1),3)
b = [0.2, -0.6, -0.3]#rand(Uniform(-1,1),3)
u = a[2]*sin(1*pi*y)+cos(1*pi*x)
elseif func == 3
a = [0.9464328087730216; 0.17162310348664844; 0.07857856828523646] #rand(Uniform(-1,1),3)
b = [0.9355991072384002, 0.5353806651162376, -0.0613925812939633]
u = a[3]*sin(2*pi*y)+b[3]*cos(2*pi*x)
elseif func == 4
a = [-0.8752648836596824, -0.2937424545901659, 0.5352037919227479]#rand(Uniform(-1,1),3)
b = [-0.9137179533411737, -0.4640307310161864, -0.8663071119500332]#rand(Uniform(-1,1),3)
u1 = a[3]*sin(pi*x)+b[3]*cos(pi*y)
u = u1 + a[1]*sin(2*pi*x)+b[1]*cos(2*pi*y)
elseif func == 5
a = [-0.6867267253726719, 0.21059347965868014, -0.728510882976293]#rand(Uniform(-1,1),3)
b = [0.676235507814718, 0.8294240477938528, -0.3998500839840293]#rand(Uniform(-1,1),3)
u1 = a[1]*sin(pi*x)+b[2]*cos(pi*y)
u = u1 + a[3]*sin(2*pi*x)+b[3]*cos(2*pi*y)
elseif func == 6
a = [0.7983982959430316, 0.903381400725598, 0.6022370326216002]#rand(Uniform(-1,1),3)
b = [-0.7513545425595294, -0.7714624763400932, -0.8408910576188569]#rand(Uniform(-1,1),3)
u1 = a[1]*sin(pi*x)+b[1]*cos(pi*y)
u2 = u1 + a[2]*sin(2*pi*x)+b[2]*cos(2*pi*y)
u = u2 + a[3]*sin(3*pi*x)+b[3]*cos(3*pi*y)
elseif func == 7
a = [0.4456995188427575, -0.760694865555275, 0.5341392645364422]#rand(Uniform(-1,1),3)
b = [0.6038471708245794, -0.9293109017054246, -0.03067895573440227]#rand(Uniform(-1,1),3)
u1 = a[1]*sin(pi*x)+b[1]*cos(pi*y)
u2 = u1 + a[2]*sin(2*pi*x)+b[2]*cos(2*pi*y)
u = u2 + a[3]*sin(3*pi*x)+b[3]*cos(3*pi*y)
elseif func == 8
a1 = 8 #rand(Uniform(-10,10))
b1 = -3 #rand(Uniform(-10,10))
u = a1*x + b1*y
elseif func == 9
a =[0.2653910349699058, 0.4700076088116263, 0.09668422924668407, -0.046234710248600486, 0.20474920422233067, 0.5826924980829493]
u = exp(a[1]*((x-a[2])^2+(y-a[3])^2)) + exp(a[4]*((x-a[5])^2+(y-a[6])^2))
elseif func == 10
a =[-0.9017561219736625, -0.03447706434100306, 0.3305030118808854, 0.684511518915635, -0.661785410962842, 0.877713860698909]
u = exp(a[1]*((x-a[2])^2+(y-a[3])^2)) + exp(a[4]*((x-a[5])^2+(y-a[6])^2))
elseif func == 11
a =[-0.4, 1.0, 0.2, 0.6, -0.9, 0.8] # rand(Uniform(-1,1),6)
u = exp(a[1]*((x-a[2])^2+(y-a[3])^2)) + exp(a[4]*((x-a[5])^2+(y-a[6])^2))
elseif func == 13
a = [0.8977676302413324, 0.5503759548450637, -0.6451820733536873]#rand(Uniform(-1,1),3)
b = [0.8, -0.6, -0.3]#rand(Uniform(-1,1),3)
u = a[2]*sin(1*pi*x)+cos(1*pi*y)
elseif func == 12
a =[-0.21587992491755914, -0.041287018187041724, -0.8635531658151585, -0.040103752643044555, -0.0632059673237162, -0.09508661768127391]
u = exp(a[1]*((x-a[2])^2+(y-a[3])^2)) + exp(a[4]*((x-a[5])^2+(y-a[6])^2))
elseif func == 14
a = [-0.9, 0.1, -0.4]#rand(Uniform(-1,1),3)
b = [0.8, -0.6, -0.3]#rand(Uniform(-1,1),3)
u = a[2]*sin(pi*x)+b[2]*cos(pi*y)
elseif func == 15
a1 = -1 #rand(Uniform(-10,10))
b1 = 3 #rand(Uniform(-10,10))
u = a1*x + b1*y
elseif func == 16
a = [0.2, 0.9, -0.6]#rand(Uniform(-1,1),3)
b = [-0.1, 0.7, 0.1]#rand(Uniform(-1,1),3)
u = a[1]*sin(2*pi*x)+b[1]*cos(2*pi*y)
elseif func == 17
a = [-0.6, 0.2, -0.9]#rand(Uniform(-1,1),3)
b = [-1.0, -0.8, 0.3]#rand(Uniform(-1,1),3)
u1 = a[1]*sin(pi*x)+b[1]*cos(pi*y)
u2 = u1 + a[2]*sin(2*pi*x)+b[2]*cos(2*pi*y)
u = u2 + a[3]*sin(3*pi*x)+b[3]*cos(3*pi*y)
elseif func == 18
a =[-0.4, 1.0, 0.2, 0.6, -0.9, 0.8] # rand(Uniform(-1,1),6)
u = exp(a[1]*((x-a[2])^2+(y-a[3])^2)) + exp(a[4]*((x-a[5])^2+(y-a[6])^2))
elseif func == 19 #10
a = [0.2, 0.1, -0.4]#rand(Uniform(-1,1),3)
b = [0.1, -0.6, -0.3]#rand(Uniform(-1,1),3)
u = a[2]*sin(pi*x)+b[2]cos(pi*y)
elseif func == 20
a = [-0.6, 0.2, -0.9]#rand(Uniform(-1,1),3)
b = [-1.0, -0.8, 0.3]#rand(Uniform(-1,1),3)
u1 = a[1]*sin(pi*x)+b[1]*cos(pi*y)
u2 = u1 + a[2]*sin(2*pi*x)+b[2]*cos(2*pi*y)
u = u2 + a[3]*sin(3*pi*x)+b[3]*cos(3*pi*y)
elseif func == 21
a = [0.3, 0.4, -1.0]#rand(Uniform(-1,1),3)
b = [1.0, -0.4, 0.6]#rand(Uniform(-1,1),3)
u1 = a[1]*sin(pi*x)+b[1]*cos(pi*y)
u2 = u1 + a[2]*sin(2*pi*x)+b[2]*cos(2*pi*y)
u = u2 + a[3]*sin(3*pi*x)+b[3]*cos(3*pi*y)
elseif func == 22
a = [0.5, -0.9, -0.2]#rand(Uniform(-1,1),3)
b = [0.1, 0.2, 0.8]#rand(Uniform(-1,1),3)
u = a[1]*sin(2*pi*x)+b[1]*cos(2*pi*y)
elseif func == 23
a = [-0.2, -0.4, 0.9]#rand(Uniform(-1,1),3)
b = [0.8, 0.6, -0.1]#rand(Uniform(-1,1),3)
u = a[1]*sin(pi*x)+b[1]cos(pi*y)
elseif func == 24
a = [-0.2, -0.4, 0.9]#rand(Uniform(-1,1),3)
b = [0.8, 0.6, -0.1]#rand(Uniform(-1,1),3)
u1 = a[1]*sin(pi*x)+b[1]*cos(pi*y)
u2 = u1 + a[2]*sin(2*pi*x)+b[2]*cos(2*pi*y)
u = u2 + a[3]*sin(3*pi*x)+b[3]*cos(3*pi*y)
elseif func == 25
a = [-0.3, -0.2, 1.0]#rand(Uniform(-1,1),3)
b = [1.0, -0.2, -0.6]#rand(Uniform(-1,1),3)
u1 = a[1]*sin(pi*x)+b[1]*cos(pi*y)
u2 = u1 + a[2]*sin(2*pi*x)+b[2]*cos(2*pi*y)
u = u2 + a[3]*sin(3*pi*x)+b[3]*cos(3*pi*y)
elseif func == 26
a = [-0.1, -0.4, 1.0]#rand(Uniform(-1,1),3)
b = [-1.0, 0.1, 0.8]#rand(Uniform(-1,1),3)
u1 = a[1]*sin(pi*x)+b[1]*cos(pi*y)
u = u1 + a[2]*sin(2*pi*x)+b[2]*cos(2*pi*y)
elseif func == 27
a = [0.8, 0.9, -0.2]#rand(Uniform(-1,1),3)
b = [0.1, 0.2, -0.1]#rand(Uniform(-1,1),3)
u = a[1]*sin(2*pi*x)+b[1]*cos(2*pi*y)
elseif func == 28
a = [0.2, -0.4, 0.9]#rand(Uniform(-1,1),3)
b = [-0.8, 0.6, -0.1]#rand(Uniform(-1,1),3)
u = a[1]*sin(pi*x)+b[1]cos(pi*y)
elseif func == 29 #20
a = [0.9, 0.4, -0.7]#rand(Uniform(-1,1),3)
b = [0.9, -0.5, -0.3]#rand(Uniform(-1,1),3)
u1 = a[1]*sin(pi*x)+b[1]*cos(pi*y)
u2 = u1 + a[2]*sin(2*pi*x)+b[2]*cos(2*pi*y)
u = u2 + a[3]*sin(3*pi*x)+b[3]*cos(3*pi*y)
elseif func == 30
a = [0.7, 0.0, -1.0]#rand(Uniform(-1,1),3)
b = [0.0, -0.2, -0.6]#rand(Uniform(-1,1),3)
u1 = a[1]*sin(pi*x)+b[1]*cos(pi*y)
u2 = u1 + a[2]*sin(2*pi*x)+b[2]*cos(2*pi*y)
u = u2 + a[3]*sin(3*pi*x)+b[3]*cos(3*pi*y)
elseif func == 31
a = [0.1, 1.0, 1.0]#rand(Uniform(-1,1),3)
b = [-1.0, -0.3, 0.8]#rand(Uniform(-1,1),3)
u1 = a[1]*sin(pi*x)+b[1]*cos(pi*y)
u = u1 + a[2]*sin(2*pi*x)+b[2]*cos(2*pi*y)
elseif func == 32
a = [0.1, 0.9, -0.2]#rand(Uniform(-1,1),3)
b = [0.2, 0.2, -0.1]#rand(Uniform(-1,1),3)
u = a[1]*sin(2*pi*x)+b[1]*cos(2*pi*y)
elseif func == 33
a = [-0.3, -0.4, 0.9]#rand(Uniform(-1,1),3)
b = [0.8, 0.6, -0.1]#rand(Uniform(-1,1),3)
u = a[1]*sin(pi*x)+b[1]cos(pi*y)
elseif func == 34
a = [0.9, 0.1, -0.8]#rand(Uniform(-1,1),3)
b = [0.9, 0.3, -0.3]#rand(Uniform(-1,1),3)
u1 = a[1]*sin(pi*x)+b[1]*cos(pi*y)
u2 = u1 + a[2]*sin(2*pi*x)+b[2]*cos(2*pi*y)
u = u2 + a[3]*sin(3*pi*x)+b[3]*cos(3*pi*y)
elseif func == 35
a =[1.0, 0.3, -0.7, -0.2, 0.1, -0.9] # rand(Uniform(-1,1),6)
u = exp(a[1]*((x-a[2])^2+(y-a[3])^2)) + exp(a[4]*((x-a[5])^2+(y-a[6])^2))
elseif func == 36
a =[-0.6, 1.0, 0.6, -0.8, -0.1, 0.7] # rand(Uniform(-1,1),6)
u = exp(a[1]*((x-a[2])^2+(y-a[3])^2)) + exp(a[4]*((x-a[5])^2+(y-a[6])^2))
elseif func == 37
a =[0.9, -0.1, 0.1, -0.8, 0.4, -0.9] # rand(Uniform(-1,1),6)
u = exp(a[1]*((x-a[2])^2+(y-a[3])^2)) + exp(a[4]*((x-a[5])^2+(y-a[6])^2))
elseif func == 38
a =[-1.0, 0.5, 0.2, -0.8, 0.0, -0.9] # rand(Uniform(-1,1),6)
u = exp(a[1]*((x-a[2])^2+(y-a[3])^2)) + exp(a[4]*((x-a[5])^2+(y-a[6])^2))
elseif func == 39
a =[-0.4, 1.0, 0.2, 0.6, -0.9, 0.8] # rand(Uniform(-1,1),6)
u = exp(a[1]*((x-a[2])^2+(y-a[3])^2)) + exp(a[4]*((x-a[5])^2+(y-a[6])^2))
elseif func == 40
a =[0.9, -1.0, 0.4, -0.2, 0.6, -0.2] # rand(Uniform(-1,1),6)
u = exp(a[1]*((x-a[2])^2+(y-a[3])^2)) + exp(a[4]*((x-a[5])^2+(y-a[6])^2))
elseif func == 41
a =[0.4, -0.6, 1.0, 0.5, -0.5, 0.2] # rand(Uniform(-1,1),6)
u = exp(a[1]*((x-a[2])^2+(y-a[3])^2)) + exp(a[4]*((x-a[5])^2+(y-a[6])^2))
elseif func == 42
a =[-0.2, 0.6, 0.3, 0.9, -0.7, 0.9] # rand(Uniform(-1,1),6)
u = exp(a[1]*((x-a[2])^2+(y-a[3])^2)) + exp(a[4]*((x-a[5])^2+(y-a[6])^2))
elseif func == 43
a =[-1.0, -0.1, 0.1, 1.0, 0.1, 0.9] # rand(Uniform(-1,1),6)
u = exp(a[1]*((x-a[2])^2+(y-a[3])^2)) + exp(a[4]*((x-a[5])^2+(y-a[6])^2))
elseif func == 44
a =[0.4, -0.2, 0.8, 0.6, -0.9, 0.8] # rand(Uniform(-1,1),6)
u = exp(a[1]*((x-a[2])^2+(y-a[3])^2)) + exp(a[4]*((x-a[5])^2+(y-a[6])^2))
elseif func == 45
a =[1.0, 0.5, 0.9, -1.0, -0.1, 0.3] # rand(Uniform(-1,1),6)
u = exp(a[1]*((x-a[2])^2+(y-a[3])^2)) + exp(a[4]*((x-a[5])^2+(y-a[6])^2))
elseif func == 46
a =[-0.4, -0.4, 0.9, 0.6, -0.9, -0.8] # rand(Uniform(-1,1),6)
u = exp(a[1]*((x-a[2])^2+(y-a[3])^2)) + exp(a[4]*((x-a[5])^2+(y-a[6])^2))
end
return u
end
function troubledcellfunctionabs2d(x, y, a, m, x0, y0)
u = a*abs((y-y0)-m*(x-x0))
return u
end
function troubledcellfunctionstep2d(x, y, ui, m, x0, y0)
h1 = y0+m*(x-x0)
h2 = y0-(1/m)*(x-x0)
if y <= h1 && y < h2
u=ui[1]
elseif y >= h1 && y > h2
u=ui[2]
elseif y < h1 && y >= h2
u=ui[3]
elseif y > h1 && y <= h2
u=ui[4]
elseif y == h1 && y == h2
u=ui[1]
end
return u
end
function roundstep2d(x, y, ui, r0, x0, y0)
inicenter = SVector(x0, y0)
x_norm = x - inicenter[1]
y_norm = y - inicenter[2]
r = sqrt(x_norm^2 + y_norm^2)
# Calculate primitive variables
u = r > r0 ? ui[1] : ui[2]
return u
end
function good_cell2d(node_coord, length, func, m, x0, y0)
x1 = node_coord[1]
y1 = node_coord[2]
x2 = node_coord[1] + length
y2 = node_coord[2] + length
if func == 1
y1_abs = y0 + m*(x1-x0)
y2_abs = y0 + m*(x2-x0)
x1_abs = x0 + (1/m)*(y1-y0)
x2_abs = x0 + (1/m)*(y2-y0)
if y1_abs <=y2 && y1_abs >= y1
false
elseif y2_abs <=y2 && y2_abs >= y1
false
elseif x1_abs <= x2 && x1_abs >= x1
false
elseif x2_abs <= x2 && x2_abs >= x1
false
else
true
end
elseif func == 2 || func == 3
y1_step = y0 + m*(x1-x0)
y2_step = y0 + m*(x2-x0)
x1_step = x0 + (1/m)*(y1-y0)
x2_step = x0 + (1/m)*(y2-y0)
y1_step2 = y0 -(1/m)*(x1-x0)
y2_step2 = y0 -(1/m)*(x2-x0)
x1_step2 = x0 - m*(y1-y0)
x2_step2 = x0 - m*(y2-y0)
if (y1_step <=y2 && y1_step >= y1) || (y1_step2 <=y2 && y1_step2 >= y1 )
false
elseif (y2_step <=y2 && y2_step >= y1 ) || (y2_step2 <=y2 && y2_step2 >= y1 )
false
elseif (x1_step <= x2 && x1_step >= x1 ) || (x1_step2 <= x2 && x1_step2 >= x1)
false
elseif (x2_step <= x2 && x2_step >= x1) || (x2_step2 <= x2 && x2_step2 >= x1)
false
else
true
end
elseif func == 4
inicenter = SVector(x0, y0)
x_norm1 = x1 - inicenter[1]
x_norm2 = x2 - inicenter[1]
y_norm1 = y1 - inicenter[2]
y_norm2 = y2 - inicenter[2]
r1 = sqrt(x_norm1^2 + y_norm1^2)
r2 = sqrt(x_norm2^2 + y_norm1^2)
r3 = sqrt(x_norm1^2 + y_norm2^2)
r4 = sqrt(x_norm2^2 + y_norm2^2)
if (r1 <= m && r2 >=m) || (r1>= m && r2 <=m)
false
elseif (r1 <= m && r3 >=m) || (r1>= m && r3 <=m)
false
elseif (r2 <= m && r4 >=m) || (r2>= m && r4 <=m)
false
elseif (r3 <= m && r4 >=m) || (r3>= m && r4 <=m)
false
else
true
end
end
end
function validfunction2d(func,x,y)
if func == 1
a = [-0.2, 0.8, -0.7 ]#rand(Uniform(-1,1),3)
b = [-1.0, 0.4, -0.1] #rand(Uniform(-1,1),3)
u1 = a[1]*sin(pi*x)+b[1]cos(pi*y)
u2 = u1 + a[2]*sin(2*pi*x)+b[2]cos(2*pi*y)
u = u2 + a[3]*sin(3*pi*x)+b[3]cos(3*pi*y)
elseif func == 2
a = [0.4, -0.8, 0.1] #rand(Uniform(-1,1),3)
b = [0.1, -0.6, 0.9] #rand(Uniform(-1,1),3)
u1 = a[1]*sin(pi*x)+b[1]cos(pi*y)
u2 = u1 + a[2]*sin(2*pi*x)+b[2]cos(2*pi*y)
u = u2 + a[3]*sin(3*pi*x)+b[3]cos(3*pi*y)
elseif func == 3
a = [0.4]
b = [0.1]
u = a[1]*sin(pi*x)+b[1]cos(pi*y)
elseif func == 4
a = [-0.4]
b = [0.9]
u = a[1]*sin(pi*x)+b[1]sin(pi*y)
elseif func == 5
a = [0.4]
b = [-0.7]
u = a[1]*sin(2*pi*x)+b[1]sin(2*pi*y)
elseif func == 6
a =[0.3, -0.1]# rand(Uniform(-1,1),6)
u = a[1]*x + a[2]*x^2
elseif func == 7
a =[-0.1, 0.6, 0.8]# rand(Uniform(-1,1),6)
u = a[1]*x + a[2]*x^2 + a[3]*x^3
elseif func == 8
#a =[-0.5, -0.1, -0.8, 0.8]# rand(Uniform(-1,1),6)
#u = a[1]*x + a[2]*x^2 + a[3]*x^3 + a[4]*x^4
a =[1.0, 0.5, 0.9, -1.0, -0.1, 0.3] # rand(Uniform(-1,1),6)
u = exp(a[1]*((x-a[2])^2+(y-a[3])^2)) + exp(a[4]*((x-a[5])^2+(y-a[6])^2))
elseif func == 9
a =[0.8, -0.4, 0.1]# rand(Uniform(-1,1),6)
u = a[1]*x + a[2]*x^2 + a[3]*x^3
end
return u
end
|
{"hexsha": "c12e78f273ad72ebe3feea0bc56d8c983e3767c0", "size": 14012, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/functions/function2d.jl", "max_stars_repo_name": "HenrZu/SmartShockFinder.jl", "max_stars_repo_head_hexsha": "7a2881915c8ab78b9487b2e8d232e1bf8b3c04cb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/functions/function2d.jl", "max_issues_repo_name": "HenrZu/SmartShockFinder.jl", "max_issues_repo_head_hexsha": "7a2881915c8ab78b9487b2e8d232e1bf8b3c04cb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2021-09-17T00:20:52.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-19T00:21:16.000Z", "max_forks_repo_path": "src/functions/function2d.jl", "max_forks_repo_name": "HenrZu/SmartShockFinder.jl", "max_forks_repo_head_hexsha": "7a2881915c8ab78b9487b2e8d232e1bf8b3c04cb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-20T20:42:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-20T20:42:23.000Z", "avg_line_length": 40.4971098266, "max_line_length": 143, "alphanum_fraction": 0.449971453, "num_tokens": 6771}
|
import numpy as np
import torch
from utilities.data_structures.Deque import Deque
from utilities.data_structures.Max_Heap import Max_Heap
class Prioritised_Replay_Buffer(Max_Heap, Deque):
"""维护一个 deque、max_heap 和 array 的数据结构。
deque 会跟踪哪些体验是最旧的,因此告诉我们一旦缓冲区开始变满要删除哪些体验。
max_heap 让我们可以使用最大 td_value 快速检索经验。
array 让我们可以快速随机抽样,其概率等比例于 td 误差。
我们还使用一个简单的变量来跟踪 td 值的总和。
"""
def __init__(self, hyperparameters, seed=0, device=None):
Max_Heap.__init__(self, hyperparameters["buffer_size"], dimension_of_value_attribute=5, default_key_to_use=0)
Deque.__init__(self, hyperparameters["buffer_size"], dimension_of_value_attribute=5)
np.random.seed(seed)
self.deques_td_errors = self.initialise_td_errors_array() # 使用 td_error 数组初始化双端队列
self.heap_index_to_overwrite_next = 1 # 要重写的堆索引
self.number_experiences_in_deque = 0 # 队列中的经验数
self.adapted_overall_sum_of_td_errors = 0 # 调整后的 td_errors 的总和
self.alpha = hyperparameters["alpha_prioritised_replay"]
self.beta = hyperparameters["beta_prioritised_replay"]
self.incremental_td_error = hyperparameters["incremental_td_error"] # 增量 td_error
self.batch_size = hyperparameters["batch_size"]
self.heap_indexes_to_update_td_error_for = None
self.indexes_in_node_value_tuple = {
"state": 0,
"action": 1,
"reward": 2,
"next_state": 3,
"done": 4
}
# self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if device:
self.device = torch.device(device)
else:
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def initialise_td_errors_array(self):
"""初始化一个长度为 self.max_size 的节点的双端队列"""
return np.zeros(self.max_size)
def add_experience(self, raw_td_error, state, action, reward, next_state, done):
"""保存经验"""
td_error = (abs(raw_td_error) + self.incremental_td_error) ** self.alpha
self.update_overall_sum(td_error, self.deque[self.deque_index_to_overwrite_next].key)
self.update_deque_and_deque_td_errors(td_error, state, action, reward, next_state, done)
self.update_heap_and_heap_index_to_overwrite()
self.update_number_experiences_in_deque()
self.update_deque_index_to_overwrite_next()
def update_overall_sum(self, new_td_error, old_td_error):
"""更新缓冲区中存在的 td_error 的总和"""
self.adapted_overall_sum_of_td_errors += new_td_error - old_td_error
def update_deque_and_deque_td_errors(self, td_error, state, action, reward, next_state, done):
"""通过使用提供的经验覆盖最旧的经验来更新双端队列"""
self.deques_td_errors[self.deque_index_to_overwrite_next] = td_error
self.add_element_to_deque(td_error, (state, action, reward, next_state, done))
def add_element_to_deque(self, new_key, new_value):
"""添加元素到双端队列"""
self.update_deque_node_key_and_value(self.deque_index_to_overwrite_next, new_key, new_value)
def update_heap_and_heap_index_to_overwrite(self):
"""根据刚刚合并的新经验,通过重新排列堆来更新堆。 如果我们还没有达到最大容量,那么新经验将直接添加到堆中,
否则堆上的指针更改以反映新经验,因此无需添加。
"""
if not self.reached_max_capacity:
self.update_heap_element(self.heap_index_to_overwrite_next, self.deque[self.deque_index_to_overwrite_next])
self.deque[self.deque_index_to_overwrite_next].heap_index = self.heap_index_to_overwrite_next
self.update_heap_index_to_overwrite_next()
heap_index_change = self.deque[self.deque_index_to_overwrite_next].heap_index
self.reorganise_heap(heap_index_change)
def update_heap_index_to_overwrite_next(self):
"""这将更新堆索引以进行下一次写入。 一旦缓冲区满了,我们就停止调用这个函数,因为堆指向的节点开始直接改变,而不是堆上的指针改变"""
self.heap_index_to_overwrite_next += 1
def swap_heap_elements(self, index1, index2):
"""交换两个堆元素的位置,然后更新存储在两个节点中的 heap_index。 我们必须从 Max_Heap 覆盖这个方法,以便它也更新 heap_index 变量"""
self.heap[index1], self.heap[index2] = self.heap[index2], self.heap[index1]
self.heap[index1].heap_index = index1
self.heap[index2].heap_index = index2
def sample(self, rank_based=True):
"""从经验中随机抽样一批,给具有较高 td 误差的经验提供更高的可能性。 然后它计算每个采样经验的重要性采样权重,您可以在论文中了解这一点:
https://arxiv.org/pdf/1511.05952.pdf
"""
experiences, deque_sample_indexes = self.pick_experiences_based_on_proportional_td_error()
states, actions, rewards, next_states, dones = self.separate_out_data_types(experiences)
self.deque_sample_indexes_to_update_td_error_for = deque_sample_indexes
importance_sampling_weights = self.calculate_importance_sampling_weights(experiences)
return (states, actions, rewards, next_states, dones), importance_sampling_weights
def pick_experiences_based_on_proportional_td_error(self):
"""随机选择一批经验,概率等比例于 td_errors"""
probabilities = self.deques_td_errors / self.give_adapted_sum_of_td_errors()
deque_sample_indexes = np.random.choice(range(len(self.deques_td_errors)), size=self.batch_size, replace=False, p=probabilities)
experiences = self.deque[deque_sample_indexes]
return experiences, deque_sample_indexes
def separate_out_data_types(self, experiences):
"""将经验分成不同的部分,并使它们准备好在 pytorch 模型中使用的张量"""
states = torch.from_numpy(np.vstack([e.value[self.indexes_in_node_value_tuple["state"]] for e in experiences])).float().to(self.device)
actions = torch.from_numpy(np.vstack([e.value[self.indexes_in_node_value_tuple["action"]] for e in experiences])).float().to(self.device)
rewards = torch.from_numpy(np.vstack([e.value[self.indexes_in_node_value_tuple["reward"]] for e in experiences])).float().to(self.device)
next_states = torch.from_numpy(np.vstack([e.value[self.indexes_in_node_value_tuple["next_state"]] for e in experiences])).float().to(
self.device)
dones = torch.from_numpy(np.vstack([int(e.value[self.indexes_in_node_value_tuple["done"]]) for e in experiences])).float().to(self.device)
return states, actions, rewards, next_states, dones
def calculate_importance_sampling_weights(self, experiences):
"""计算样本中每个观测值的重要性抽样权重。 权重与 observation 的 td_error 成正比,
请参阅此处的论文了解更多详情:https://arxiv.org/pdf/1511.05952.pdf
"""
td_errors = [experience.key for experience in experiences]
importance_sampling_weights = [((1.0 / self.number_experiences_in_deque) * (self.give_adapted_sum_of_td_errors() / td_error)) ** self.beta for td_error in td_errors]
sample_max_importance_weight = max(importance_sampling_weights)
importance_sampling_weights = [is_weight / sample_max_importance_weight for is_weight in importance_sampling_weights]
importance_sampling_weights = torch.tensor(importance_sampling_weights).float().to(self.device)
return importance_sampling_weights
def update_td_errors(self, td_errors):
"""更新提供的堆索引的 td_errors。 索引应该是 give_sample 方法最近提供的 observation 结果"""
for raw_td_error, deque_index in zip(td_errors, self.deque_sample_indexes_to_update_td_error_for):
td_error = (abs(raw_td_error) + self.incremental_td_error) ** self.alpha
corresponding_heap_index = self.deque[deque_index].heap_index
self.update_overall_sum(td_error, self.heap[corresponding_heap_index].key)
self.heap[corresponding_heap_index].key = td_error
self.reorganise_heap(corresponding_heap_index)
self.deques_td_errors[deque_index] = td_error
def give_max_td_error(self):
"""返回当前堆中的最大 td_error,即最大堆的顶部元素"""
return self.give_max_key()
def give_adapted_sum_of_td_errors(self):
"""返回堆中当前经验的 td_error 总和"""
return self.adapted_overall_sum_of_td_errors
def __len__(self):
"""重放缓冲区的经验数"""
return self.number_experiences_in_deque
|
{"hexsha": "72e07bd22e86fee802a02d2c9c8cb0ee8e4bf799", "size": 7963, "ext": "py", "lang": "Python", "max_stars_repo_path": "Unified/utilities/data_structures/Prioritised_Replay_Buffer.py", "max_stars_repo_name": "Crazyalltnt/RL-Alogorithms-Implement", "max_stars_repo_head_hexsha": "27905f1c1890b1aff907564230b4ec0c22e60ba0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Unified/utilities/data_structures/Prioritised_Replay_Buffer.py", "max_issues_repo_name": "Crazyalltnt/RL-Alogorithms-Implement", "max_issues_repo_head_hexsha": "27905f1c1890b1aff907564230b4ec0c22e60ba0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Unified/utilities/data_structures/Prioritised_Replay_Buffer.py", "max_forks_repo_name": "Crazyalltnt/RL-Alogorithms-Implement", "max_forks_repo_head_hexsha": "27905f1c1890b1aff907564230b4ec0c22e60ba0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-15T10:22:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-15T10:22:03.000Z", "avg_line_length": 51.0448717949, "max_line_length": 173, "alphanum_fraction": 0.7235966344, "include": true, "reason": "import numpy", "num_tokens": 2303}
|
# tests for shock capturing
import EulerEquationMod: AbstractShockSensor, AbstractShockCapturing
function test_shocksensorPP()
@testset "Shock sensor PP" begin
opts = read_input_file("input_vals_jac2d.jl")
opts["order"] = 2
delete!(opts, "calc_jac_explicit")
opts["force_solution_complex"] = true
opts["force_mesh_complex"] = true
mesh, sbp, eqn, opts = solvePDE(opts)
Tsol = eltype(eqn.q); Tres = eltype(eqn.res)
q = eqn.q[:, :, 1]
coords = mesh.coords[:, :, 1]
dxidx = mesh.dxidx[:, :, :, 1]
jac = ones(Complex128, mesh.numNodesPerElement) + 0.001*rand(mesh.numNodesPerElement)
#jac = ones(Complex128, mesh.numNodesPerElement) + 0.01*collect(1:mesh.numNodesPerElement)
res = zeros(eltype(eqn.res), mesh.numDofPerNode, mesh.numNodesPerElement)
Se = zeros(eltype(eqn.res), mesh.dim, mesh.numNodesPerElement)
ee = zeros(eltype(eqn.res), mesh.dim, mesh.numNodesPerElement)
sensor = EulerEquationMod.ShockSensorPP{Tsol, Tres}(mesh, sbp, opts)
sensor.use_filtered = false
sensora = EulerEquationMod.ShockSensorPP{Tsol, Tres}(mesh, sbp, opts)
sensora.use_filtered = true
capture = EulerEquationMod.ProjectionShockCapturing{Tsol, Tres}(mesh, sbp, eqn, opts, sensor)
sensor2 = EulerEquationMod.ShockSensorHIso{Tsol, Tres}(mesh, sbp, opts)
sensor3 = EulerEquationMod.ShockSensorBO{Tsol, Tres}(mesh, sbp, opts)
sensor4 = EulerEquationMod.ShockSensorHHO{Tsol, Tres}(mesh, sbp, opts)
sensor5 = EulerEquationMod.ShockSensorVelocity{Tsol, Tres}(mesh, sbp, opts)
sensor6 = EulerEquationMod.ShockSensorHHOConst{Tsol, Tres}(mesh, sbp, opts)
# initial condition is constant, check the sensor reports no shock
EulerEquationMod.getShockSensor(eqn.params, sbp, sensor, q, 1, coords,
dxidx, jac, Se, ee)
test_vandermonde(eqn.params, sbp, coords)
@test maximum(abs.((Se))) < 1e-12
@test maximum(ee) == 0
fill!(res, 0)
EulerEquationMod.projectionShockCapturing(eqn.params, sbp, sensor, capture,
q, 1, coords, dxidx, jac, res)
@test maximum(abs.(res)) < 1e-13
# test when a shock is present
q[1, 3] += 5
EulerEquationMod.getShockSensor(eqn.params, sbp, sensor, q, 1, coords,
dxidx, jac, Se, ee)
@test maximum(abs.(Se)) > 1e-12
@test maximum(ee) > 0.01*sensor.e0
fill!(res, 0)
w = copy(q)
for i=1:mesh.numNodesPerElement
w_i = sview(w, :, i)
q_i = sview(q, :, i)
EulerEquationMod.convertToIR(eqn.params, q_i, w_i)
end
EulerEquationMod.projectionShockCapturing(eqn.params, sbp, sensor, capture,
q, 1, coords, dxidx, jac, res)
@test sum(res .* w) < 0 # the term is negative definite
# case 3: ee = 1
test_shocksensor_diff(eqn.params, sbp, sensor, q, coords, dxidx, jac)
test_shocksensor_diff(eqn.params, sbp, sensora, q, coords, dxidx, jac)
# sensor2
test_shocksensor_diff(eqn.params, sbp, sensor2, q, coords, dxidx, jac)
test_shocksensor_diff(eqn.params, sbp, sensor3, q, coords, dxidx, jac)
test_shocksensor_diff(eqn.params, sbp, sensor4, q, coords, dxidx, jac)
test_shocksensor_diff(eqn.params, sbp, sensor6, q, coords, dxidx, jac)
test_shocksensor_revq(eqn.params, sbp, sensor, q, coords, dxidx, jac)
test_shocksensor_revq(eqn.params, sbp, sensora, q, coords, dxidx, jac)
test_shocksensor_revq(eqn.params, sbp, sensor3, q, coords, dxidx, jac)
test_shocksensor_revq(eqn.params, sbp, sensor4, q, coords, dxidx, jac)
test_ansiofactors_revm(mesh, sbp, eqn, opts, sensor4)
test_shocksensor_revm(eqn.params, sbp, sensor, q, coords, dxidx, jac)
test_shocksensor_revm(eqn.params, sbp, sensora, q, coords, dxidx, jac)
test_shocksensor_revm(mesh, sbp, eqn, opts, sensor)
test_shocksensor_revm(mesh, sbp, eqn, opts, sensora)
test_shocksensor_revm(mesh, sbp, eqn, opts, sensor3)
test_shocksensor_revm(mesh, sbp, eqn, opts, sensor4)
# case 2: ee on sin wave
q[1, 3] = 1.005
# for i=1:mesh.numNodesPerElement
# for j=2:mesh.numDofPerNode
# q[j, i] += 0.1*(i + j)
# end
# end
test_shocksensor_diff(eqn.params, sbp, sensor, q, coords, dxidx, jac)
test_shocksensor_diff(eqn.params, sbp, sensora, q, coords, dxidx, jac)
test_shocksensor_diff(eqn.params, sbp, sensor5, q, coords, dxidx, jac)
test_shocksensor_revq(eqn.params, sbp, sensor, q, coords, dxidx, jac)
test_shocksensor_revq(eqn.params, sbp, sensora, q, coords, dxidx, jac)
test_shocksensor_revq(eqn.params, sbp, sensor5, q, coords, dxidx, jac)
test_shocksensor_revm(eqn.params, sbp, sensor, q, coords, dxidx, jac)
test_shocksensor_revm(eqn.params, sbp, sensora, q, coords, dxidx, jac)
test_shocksensor_revm(mesh, sbp, eqn, opts, sensor)
test_shocksensor_revm(mesh, sbp, eqn, opts, sensora)
# for isotropic grids, the HHO anisotropy factors should be ~h/(p+1)
for i=1:mesh.numEl
jac_i = ro_sview(mesh.jac, :, i)
h_avg = EulerEquationMod.computeElementVolume(eqn.params, sbp, jac_i)^(1/mesh.dim)
h_avg /= (sbp.degree + 1)
for d=1:mesh.dim
@test sensor4.h_k_tilde[d, i] > h_avg/3
@test sensor4.h_k_tilde[d, i] < h_avg*3
end
end
end # end testset
return nothing
end
"""
Tests derivative of the shock sensor at a given state
"""
function test_shocksensor_diff(params, sbp, sensor::AbstractShockSensor, _q,
coords, dxidx, jac)
srand(1234)
numDofPerNode, numNodesPerElement = size(_q)
dim = size(dxidx, 1)
EulerEquationMod.setAlpha(sensor, 2)
q = zeros(Complex128, numDofPerNode, numNodesPerElement)
copy!(q, _q)
Se_jac = zeros(Complex128, dim, numDofPerNode, numNodesPerElement,
numNodesPerElement)
Se_jac2 = copy(Se_jac)
ee_jac = zeros(Complex128, dim, numDofPerNode, numNodesPerElement,
numNodesPerElement)
ee_jac2 = copy(ee_jac)
dof = 1; node = 1
Se = zeros(Complex128, dim, numNodesPerElement)
ee = zeros(Complex128, dim, numNodesPerElement)
h = 1e-20
pert = Complex128(0, h)
for i=1:1 #numNodesPerElement
for j=1:1 #numDofPerNode
q[j, i] += pert
EulerEquationMod.getShockSensor(params, sbp, sensor, q, 1, coords,
dxidx, jac, Se, ee)
Se_jac[:, j, :, i] = imag(Se)./h
ee_jac[:, j, :, i] = imag(ee)./h
q[j, i] -= pert
end
end
EulerEquationMod.getShockSensor_diff(params, sbp, sensor, q, 1,
coords, dxidx, jac, Se_jac2, ee_jac2)
@test maximum(abs.(Se_jac[:, dof, :, node] - Se_jac2[:, dof, :, node])) < 1e-11
@test maximum(abs.(ee_jac[:, dof, :, node] - ee_jac2[:, dof, :, node])) < 1e-11
# @test maximum(abs.(Se_jac - Se_jac2)) < 1e-11
# @test maximum(abs.(ee_jac - ee_jac2)) < 1e-11
#=
# test vector mode
q_dot = rand_realpart(size(q))
q .+= pert*q_dot
EulerEquationMod.getShockSensor(params, sbp, sensor, q, 1, coords,
dxidx, jac, Se, ee)
Se_dot = imag(Se)./h
ee_dot = imag(ee)./h
q .-= pert*q_dot
# run again to make sure intermediate arrays are zeroed out
EulerEquationMod.getShockSensor_diff(params, sbp, sensor, q, 1,
coords, dxidx, jac, Se_jac2, ee_jac2)
Se_dot2 = zeros(Complex128, dim, numNodesPerElement)
ee_dot2 = zeros(Complex128, dim, numNodesPerElement)
for i=1:numNodesPerElement
for j=1:dim
Se_dot2[j, i] = sum(Se_jac2[j, :, i, :] .* q_dot)
ee_dot2[j, i] = sum(ee_jac2[j, :, i, :] .* q_dot)
end
end
@test maximum(abs.(Se_dot - Se_dot2)) < 1e-11
@test maximum(abs.(ee_dot - ee_dot2)) < 1e-11
EulerEquationMod.setAlpha(sensor, 1)
=#
return nothing
end
function test_shocksensor_revq(params, sbp, sensor::AbstractShockSensor, _q,
coords, dxidx, jac)
numDofPerNode, numNodesPerElement = size(_q)
dim = size(dxidx, 1)
EulerEquationMod.setAlpha(sensor, 1)
Se = zeros(Complex128, dim, numNodesPerElement)
ee = zeros(Complex128, dim, numNodesPerElement)
q_dot = rand_realpart(size(_q))
# q_dot = zeros(Complex128, size(_q)); q_dot[1] = 1
q_bar = zeros(Complex128, size(q_dot))
ee_bar = rand_realpart(size(ee))
# ee_bar = zeros(Complex128, size(ee)); ee_bar[1] = 1
q = zeros(Complex128, numDofPerNode, numNodesPerElement)
copy!(q, _q)
q .+= 0.01*rand(size(q))
# complex step
h = 1e-20
pert = Complex128(0, h)
q .+= pert*q_dot
EulerEquationMod.getShockSensor(params, sbp, sensor, q, 1, coords,
dxidx, jac, Se, ee)
q .-= pert*q_dot
ee_dot = imag(ee)./h
val1 = sum(ee_dot .* ee_bar)
# reverse mode
EulerEquationMod.getShockSensor_revq(params, sbp, sensor, q, q_bar, 1, coords,
dxidx, jac, ee, ee_bar)
val2 = sum(q_bar .* q_dot)
println("val1 = ", val1)
println("val2 = ", val2)
@test abs(val1 - val2) < 1e-12
# run twice to check accumulation behavior
q_bar_orig = copy(q_bar)
EulerEquationMod.getShockSensor_revq(params, sbp, sensor, q, q_bar, 1, coords,
dxidx, jac, ee, ee_bar)
@test maximum(abs.(q_bar - 2*q_bar_orig)) < 1e-13
EulerEquationMod.setAlpha(sensor, 1)
return nothing
end
function test_shocksensor_revm(params, sbp, sensor::AbstractShockSensor, _q,
coords, dxidx, jac)
numDofPerNode, numNodesPerElement = size(_q)
dim = size(dxidx, 1)
EulerEquationMod.setAlpha(sensor, 1)
dxidx_dot = rand_realpart(size(dxidx))
dxidx_bar = zeros(Complex128, size(dxidx))
jac_dot = rand_realpart(size(jac))
jac_bar = zeros(Complex128, size(jac))
coords_bar = zeros(Complex128, size(coords))
ee_bar = rand_realpart(dim, numNodesPerElement)
Se = zeros(Complex128, dim, numNodesPerElement)
ee = zeros(Complex128, dim, numNodesPerElement)
q = zeros(Complex128, numDofPerNode, numNodesPerElement)
copy!(q, _q)
q .+= 0.01*rand(size(q))
# complex step
h = 1e-20
pert = Complex128(0, h)
dxidx .+= pert*dxidx_dot
jac .+= pert*jac_dot
EulerEquationMod.getShockSensor(params, sbp, sensor, q, 1, coords,
dxidx, jac, Se, ee)
dxidx .-= pert*dxidx_dot
jac .-= pert*jac_dot
ee_dot = imag(ee)./h
val1 = sum(ee_dot .* ee_bar)
# reverse mode
EulerEquationMod.getShockSensor_revm(params, sbp, sensor, q, 1, coords,
coords_bar, dxidx, dxidx_bar, jac, jac_bar, ee_bar)
val2 = sum(dxidx_bar .* dxidx_dot) + sum(jac_bar .* jac_dot)
println("val1 = ", val1)
println("val2 = ", val2)
@test abs(val1 - val2) < 1e-12
return nothing
end
function test_shocksensor_revm(mesh, sbp, eqn::EulerData{Tsol, Tres}, opts, sensor::AbstractShockSensor) where {Tsol, Tres}
# the capturing scheme doesn't matter here, but eqn.shock_capturing must be set
capture = EulerEquationMod.VolumeShockCapturing{Tsol, Tres}(mesh, sbp, eqn, opts, sensor)
eqn.shock_capturing = capture
EulerEquationMod.setAlpha(sensor, 2)
srand(1234)
elnum = 1
q = ro_sview(eqn.q, :, :, elnum)
q = eqn.q[:, :, elnum] + 0.1*rand_realpart((mesh.numDofPerNode, mesh.numNodesPerElement))
coords = sview(mesh.coords, :, :, elnum)
#coords_bar = sview(mesh.coords_bar, :, :, elnum)
dxidx = sview(mesh.dxidx, :, :, :, elnum)
dxidx_bar = sview(mesh.dxidx_bar, :, :, :, elnum)
jac = sview(mesh.jac, :, elnum)
jac_bar = sview(mesh.jac_bar, :, elnum)
params = eqn.params
@unpack mesh numNodesPerElement numDofPerNode dim
zeroBarArrays(mesh)
#coords_dot = rand_realpart((dim, numNodesPerElement))
coords_dot = zeros(dim, numNodesPerElement)
coords_bar = zeros(Complex128, dim, numNodesPerElement)
#dxidx_dot = rand_realpart(size(dxidx))
dxidx_dot = zeros(size(dxidx))
#dxidx_bar = zeros(Complex128, dim, dim, numNodesPerElement)
#jac_bar = zeros(Complex128, numNodesPerElement)
jac_dot = rand_realpart(size(jac))
#jac_dot = zeros(size(jac))
Se = zeros(Complex128, dim, numNodesPerElement)
ee = zeros(Complex128, dim, numNodesPerElement)
ee_bar = rand_realpart((dim, numNodesPerElement))
h = 1e-20
pert = Complex128(0, h)
# complex step
coords .+= pert*coords_dot
dxidx .+= pert*dxidx_dot
jac .+= pert*jac_dot
updateMetricDependents(mesh, sbp, eqn, opts)
EulerEquationMod.getShockSensor(params, sbp, sensor, q, elnum, coords, dxidx, jac,
Se, ee)
coords .-= pert*coords_dot
dxidx .-= pert*dxidx_dot
jac .-= pert*jac_dot
updateMetricDependents(mesh, sbp, eqn, opts)
val1 = sum( imag(ee)./h .* ee_bar)
# reverse mode
EulerEquationMod.initForRevm(sensor)
EulerEquationMod.getShockSensor_revm(params, sbp, sensor, q, elnum, coords,
coords_bar, dxidx, dxidx_bar,
jac, jac_bar, ee_bar)
EulerEquationMod.finishRevm(mesh, sbp, eqn, opts, sensor)
val2 = sum(dxidx_bar .* dxidx_dot) + sum(jac_bar .* jac_dot) + sum(coords_bar .* coords_dot)
println("val1 = ", real(val1))
println("val2 = ", real(val2))
@test abs(val1 - val2) < 1e-12
EulerEquationMod.setAlpha(sensor, 1)
return nothing
end
function test_ansiofactors_revm(mesh, sbp, eqn, opts, sensor)
println("\nEntered test_ansiofactors")
# test calcAnsioFactors
h = 1e-20
pert = Complex128(0, h)
zeroBarArrays(mesh)
# jac_dot = rand_realpart(size(mesh.jac))
jac_dot = zeros(size(mesh.jac))
# dxidx_dot = rand_realpart(size(mesh.dxidx))
dxidx_dot = zeros(size(mesh.dxidx))
nrm_face_dot = rand_realpart(size(mesh.nrm_face))
nrm_face_dot = zeros(size(mesh.nrm_face))
#nrm_bndry_dot = rand_realpart(size(mesh.nrm_bndry))
nrm_bndry_dot = zeros(size(mesh.nrm_bndry)); nrm_bndry_dot[2, 1, 1] = 1
h_k_bar = rand_realpart(size(sensor.h_k_tilde))
h_k_tilde_orig = copy(sensor.h_k_tilde)
# complex step
mesh.nrm_face .+= pert*nrm_face_dot
mesh.nrm_bndry .+= pert*nrm_bndry_dot
mesh.dxidx .+= pert*dxidx_dot
mesh.jac .+= pert*jac_dot
EulerEquationMod.calcAnisoFactors(mesh, sbp, opts, sensor.h_k_tilde)
mesh.nrm_face .-= pert*nrm_face_dot
mesh.nrm_bndry .-= pert*nrm_bndry_dot
mesh.dxidx .-= pert*dxidx_dot
mesh.jac .-= pert*jac_dot
h_k_dot = imag(sensor.h_k_tilde)./h
val1 = sum(h_k_dot .* h_k_bar)
EulerEquationMod.calcAnisoFactors(mesh, sbp, opts, sensor.h_k_tilde)
copy!(sensor.h_k_tilde_bar, h_k_bar)
EulerEquationMod.calcAnisoFactors_revm(mesh, sbp, opts, sensor.h_k_tilde, sensor.h_k_tilde_bar)
val2 = sum(mesh.nrm_face_bar .* nrm_face_dot) +
sum(mesh.nrm_bndry_bar .* nrm_bndry_dot) +
sum(mesh.dxidx_bar .* dxidx_dot) +
sum(mesh.jac_bar .* jac_dot)
println("val1 = ", real(val1))
println("val2 = ", real(val2))
@test abs(val1 - val2) < 1e-12
@test maximum(abs.(sensor.h_k_tilde - h_k_tilde_orig)) < 1e-12
return nothing
end
function test_vandermonde(params, sbp, coords)
q = zeros(sbp.numnodes)
q1 = zeros(sbp.numnodes)
q2 = zeros(sbp.numnodes)
q3 = zeros(sbp.numnodes)
q4 = zeros(sbp.numnodes)
# test constants (which should be represented exactly on both p=0 and p=1
# solutions
vand = EulerEquationMod.VandermondeData(sbp, 1)
fill!(q, 1)
EulerEquationMod.getFilteredSolutions(params, vand, q, q1, q2)
@test maximum(abs.(q - q1)) < 1e-12
@test maximum(abs.(q - q2)) < 1e-12
# test linear polynomials are split correctly
for i=1:sbp.numnodes
q[i] = 1 + coords[1, i] + coords[2, i]
q3[i] = 1
q4[i] = coords[1, i] + coords[2, i]
end
EulerEquationMod.getFilteredSolutions(params, vand, q, q1, q2)
@test maximum(abs.(q1 - q)) < 1e-12
@test maximum(abs.( q - (q2 + q4))) < 1e-12
return nothing
end
add_func1!(EulerTests, test_shocksensorPP, [TAG_SHORTTEST])
#------------------------------------------------------------------------------
# test LDG shock capturing
function test_ldg()
opts = Dict{String, Any}(
"physics" => "Euler",
"operator_type" => "SBPOmega",
"dimensions" => 2,
"run_type" => 5,
"jac_method" => 2,
"jac_type" => 2,
"order" => 2,
"IC_name" => "ICIsentropicVortex",
"use_DG" => true,
"volume_integral_type" => 2,
"Volume_flux_name" => "IRFlux",
"face_integral_type" => 2,
"FaceElementIntegral_name" => "ESLFFaceIntegral",
"Flux_name" => "IRFlux",
"numBC" => 3,
"BC1" => [0],
"BC1_name" => "isentropicVortexBC", # outlet
"BC2" => [2],
"BC2_name" => "isentropicVortexBC", # inlet
"BC3" => [1, 3],
"BC3_name" => "isentropicVortexBC", # was noPenetrationBC
"aoa" => 0.0,
"smb_name" => "SRCMESHES/vortex_3x3_.smb",
"dmg_name" => ".null",
"itermax" => 20,
"res_abstol" => 1e-12,
"res_reltol" => 1e-12,
"do_postproc" => true,
"exact_soln_func" => "ICIsentropicVortex",
"force_solution_complex" => true,
"force_mesh_complex" => true,
"solve" => false,
"addVolumeIntegrals" => false,
"addFaceIntegrals" => false,
"addBoundaryIntegrals" => false,
)
opts2 = copy(opts)
opts2["operator_type"] = "SBPDiagonalE"
opts2["face_integral_type"] = 1
opts2["flux_name"] = "IRSLFFlux"
opts2["use_lps"] = true
@testset "DG shock capturing" begin
mesh, sbp, eqn, opts = solvePDE(opts)
mesh2, sbp2, eqn2, opts2 = solvePDE(opts2)
mesh3, sbp3, eqn3, opts3 = solvePDE("input_vals_curve.jl")
testQx(mesh, sbp, eqn, opts)
# test on a curvilinear mesh
testQx(mesh3, sbp3, eqn3, opts3, check_poly=false)
test_shockmesh(mesh, sbp, eqn, opts)
#test_shockmesh2(mesh, sbp, eqn, opts)
# these tests don't work since the changes to shockmesh.interfaces
#=
test_thetaface(mesh, sbp, eqn, opts)
test_qj(mesh, sbp, eqn, opts)
test_qface(mesh, sbp, eqn, opts)
test_q(mesh, sbp, eqn, opts)
ic_func = EulerEquationMod.ICDict[opts["IC_name"]]
ic_func(mesh, sbp, eqn, opts, eqn.q_vec)
test_ldg_ESS(mesh, sbp, eqn, opts)
=#
test_br2_gradw(mesh, sbp, eqn, opts)
test_br2_volume(mesh, sbp, eqn, opts)
test_br2_face(mesh, sbp, eqn, opts)
test_br2_Dgk(mesh, sbp, eqn, opts)
ic_func = EulerEquationMod.ICDict[opts["IC_name"]]
for i=1:10
ic_func(mesh, sbp, eqn, opts, eqn.q_vec)
test_br2_ESS(mesh, sbp, eqn, opts; fullmesh=true)
#ic_func(mesh, sbp, eqn, opts, eqn.q_vec)
#test_br2_ESS(mesh, sbp, eqn, opts; fullmesh=false)
ic_func(mesh2, sbp2, eqn2, opts2, eqn2.q_vec)
test_br2reduced_ESS(mesh2, sbp2, eqn2, opts2; fullmesh=true)
ic_func(mesh2, sbp2, eqn2, opts2, eqn2.q_vec)
test_br2reduced_ESS(mesh2, sbp2, eqn2, opts2; fullmesh=false)
ic_func(mesh2, sbp2, eqn2, opts2, eqn2.q_vec)
test_br2reduced2_ESS(mesh2, sbp2, eqn2, opts2; fullmesh=true)
ic_func(mesh2, sbp2, eqn2, opts2, eqn2.q_vec)
test_br2reduced2_ESS(mesh2, sbp2, eqn2, opts2; fullmesh=false)
end
ic_func = EulerEquationMod.ICDict[opts["IC_name"]]
ic_func(mesh, sbp, eqn, opts, eqn.q_vec)
test_br2_serialpart(mesh, sbp, eqn, opts)
ic_func(mesh2, sbp2, eqn2, opts2, eqn2.q_vec)
test_br2reduced_serialpart(mesh2, sbp2, eqn2, opts2)
end
return nothing
end
add_func1!(EulerTests, test_ldg, [TAG_SHORTTEST])
"""
Set q to be a polynomial of specified degree
"""
function setPoly(mesh, q::Abstract3DArray, degree::Int)
for i=1:mesh.numEl
for j=1:mesh.numNodesPerElement
x = mesh.coords[1, j, i]
y = mesh.coords[2, j, i]
val = x^degree + 2*(y^degree)
if mesh.dim == 3
z = mesh.coords[3, j, i]
val += 3*(z^degree)
end
for k=1:mesh.numDofPerNode
q[k, j, i] = val + k
end
end
end
return nothing
end
function setPolyDeriv(mesh, qx, degree::Int)
# qx should be mesh.numDofPerNode x mesh.dim x mesh.numNodesPerElement x
# mesh.numEl
for i=1:mesh.numEl
for j=1:mesh.numNodesPerElement
x = mesh.coords[1, j, i]
y = mesh.coords[2, j, i]
#val = x^degree + 2*(y^degree)
valx = degree*x^(degree-1)
valy = 2*degree*y^(degree-1)
if mesh.dim == 3
z = mesh.coords[3, j, i]
#val += 3*(z^degree)
valz = 3*degree*z^(degree-1)
end
for k=1:mesh.numDofPerNode
qx[k, 1, j, i] = valx
qx[k, 2, j, i] = valy
if mesh.dim == 3
qx[k, 3, j, i] = valz
end
end
end
end
return nothing
end
"""
Sets capture.w_el to be polynomial. w_el is numDofPerNode x
numNodesPerElement x shockmesh.numEl
"""
function setWPoly(mesh, shockmesh, w_el, degree::Int)
# set w_el to be polynomial
for i=1:shockmesh.numEl
i_full = shockmesh.elnums_all[i]
for j=1:mesh.numNodesPerElement
x = mesh.coords[1, j, i_full]
y = mesh.coords[2, j, i_full]
for k=1:mesh.numDofPerNode
w_el[k, j, i] = k*x^degree + (k+1)*y^degree
if mesh.dim == 3
z = mesh.coords[3, j, i_full]
w_el[k, j, i] += (k+2)*z^degree
end
end
end
end
return nothing
end
"""
Get the xyz derivatives of setWPoly. `w_elx` should be numDofPerNode x
numNodesPerElement x dim x shockmesh.numEl
"""
function setWPolyDeriv(mesh, shockmesh, w_elx::AbstractArray{T, 4}, degree::Int) where {T}
# set w_el to be polynomial
for i=1:shockmesh.numEl
i_full = shockmesh.elnums_all[i]
for j=1:mesh.numNodesPerElement
x = mesh.coords[1, j, i_full]
y = mesh.coords[2, j, i_full]
for k=1:mesh.numDofPerNode
#capture.w_el[k, j, i] = k*x^degree + (k+1)*y^degree
w_elx[k, j, 1, i] = degree*k*x^(degree-1)
w_elx[k, j, 2, i] = degree*(k+1)*y^(degree-1)
if mesh.dim == 3
z = mesh.coords[3, j, i_full]
#capture.w_el[k, j, i] += (k+2)*z^degree
w_elx[k, j, 3, i] = degree*(k+2)*z^(degree-1)
end
end
end
end
return nothing
end
"""
Computes second derivative. `w_elx` is numDofPerNode x numNodesPerElement
x dim x dim x shockmesh.numEl, each dim x dim block contains d/dx_i dx_j.
"""
function setWPolyDeriv2(mesh, shockmesh, w_elx::AbstractArray{T, 5}, degree::Int) where {T}
# set w_el to be polynomial
fill(w_elx, 0)
for i=1:shockmesh.numEl
i_full = shockmesh.elnums_all[i]
for j=1:mesh.numNodesPerElement
x = mesh.coords[1, j, i_full]
y = mesh.coords[2, j, i_full]
for k=1:mesh.numDofPerNode
#capture.w_el[k, j, i] = k*x^degree + (k+1)*y^degree
w_elx[k, j, 1, 1, i] = degree*(degree-1)*k*x^(degree-2)
w_elx[k, j, 2, 2, i] = degree*(degree-1)*(k+1)*y^(degree-2)
if mesh.dim == 3
z = mesh.coords[3, j, i_full]
#capture.w_el[k, j, i] += (k+2)*z^degree
w_elx[k, j, 3, 3, i] = degree*(degree-1)*(k+2)*z^(degree-2)
end
end
end
end
return nothing
end
"""
q_j is numDofPerNode x numNodesPerElement x dim x shockmesh.numEl
"""
function setQjPoly(mesh, shockmesh, q_j, degree::Int)
# set w_el to be polynomial
for i=1:shockmesh.numEl
i_full = shockmesh.elnums_all[i]
for j=1:mesh.numNodesPerElement
x = mesh.coords[1, j, i_full]
y = mesh.coords[2, j, i_full]
for d=1:mesh.dim
for k=1:mesh.numDofPerNode
q_j[k, j, d, i] = (k+d)*x^degree + (k+1 + 2*d)*y^degree
if mesh.dim == 3
z = mesh.coords[3, j, i_full]
q_j[k, j, d, i] += (k+2 + 3*d)*z^degree
end
end
end
end
end
return nothing
end
"""
q_jx is numDofPerNode x numNodesPerElement x dim x dim x shockmesh.numEl
the dim x dim block contains dq_i/dx_j
"""
function setQjPolyDeriv(mesh, shockmesh, q_jx, degree::Int)
# set w_el to be polynomial
for i=1:shockmesh.numEl
i_full = shockmesh.elnums_all[i]
for j=1:mesh.numNodesPerElement
x = mesh.coords[1, j, i_full]
y = mesh.coords[2, j, i_full]
for d=1:mesh.dim
for k=1:mesh.numDofPerNode
#q_j[k, j, d, i] = (k+d)*x^degree + (k+1 + 2*d)*y^degree
q_jx[k, j, d, 1, i] = degree*(k+d)*x^(degree-1)
q_jx[k, j, d, 2, i] = degree*(k+1 + 2*d)*y^(degree-1)
if mesh.dim == 3
z = mesh.coords[3, j, i_full]
#q_j[k, j, d, i] += (k+2 + 3*d)*z^degree
q_jx[k, j, d, 3, i] += degree*(k+2 + 3*d)*z^degree
end
end
end
end
end
return nothing
end
"""
Returns array: `iface_idx`, `numFacesPerElement`
x `numEl`, containing the indices of the faces that compose
this element.
"""
function getInterfaceList(mesh)
iface_idx = zeros(Int, mesh.dim + 1, mesh.numEl) # interface index
for i=1:mesh.numInterfaces
iface_i = mesh.interfaces[i]
for k=1:(mesh.dim + 1)
if iface_idx[k, iface_i.elementL] == 0
iface_idx[k, iface_i.elementL] = i
break
end
end
for k=1:(mesh.dim + 1)
if iface_idx[k, iface_i.elementR] == 0
iface_idx[k, iface_i.elementR] = i
break
end
end
end
return iface_idx
end
"""
Gets a shockmesh consisting of only elements on the interior of the domain
(so all faces of the elements are contained in mesh.interfaces)
**Inputs**
* mesh
* sbp
* eqn
* opts
**Outputs**
* iface_idx: output of getInterfaceList
* shockmesh: the shock mesh object
"""
function getInteriorMesh(mesh, sbp, eqn::EulerData{Tsol, Tres}, opts) where {Tsol, Tres}
degree = sbp.degree
# construct the shock mesh with all elements in it that are fully interior
iface_idx = getInterfaceList(mesh)
shockmesh = EulerEquationMod.ShockedElements{Tres}(mesh)
for i=1:mesh.numEl
if iface_idx[end, i] != 0
push!(shockmesh, i)
end
end
EulerEquationMod.completeShockElements(mesh, shockmesh)
return iface_idx, shockmesh
end
"""
Like `getInteriorMesh`, but puts all elements in the shockmesh
"""
function getEntireMesh(mesh, sbp, eqn::EulerData{Tsol, Tres}, opts) where {Tsol, Tres}
degree = sbp.degree
# construct the shock mesh with all elements in it that are fully interior
iface_idx = getInterfaceList(mesh)
shockmesh = EulerEquationMod.ShockedElements{Tres}(mesh)
for i=1:mesh.numEl
push!(shockmesh, i)
end
EulerEquationMod.completeShockElements(mesh, shockmesh)
return iface_idx, shockmesh
end
"""
Computes [Ex, Ey, Ez] * q
**Inputs**
* mesh
* i: element number
* iface_idx: the indices of the faces of element `i` `in mesh.interfaces`
* q_i: mesh.numDofPerNode x mesh.numNodesPerElement
* qface: mesh.numDofPerNode x mesh.numNodesPerFace work array
* work2: mesh.numDofPerNode x mesh.numNodesPerFace x mesh.dim work array
* E_term: mesh.numDofPerNode x mesh.numNodesPerElement x meshh.dim output
array
"""
function applyE(mesh, i::Integer, iface_idx::AbstractVector,
q_i::AbstractMatrix, qface::AbstractMatrix,
work2::Abstract3DArray, E_term::Abstract3DArray)
fill!(E_term, 0)
for f=1:size(iface_idx, 1)
idx_f = iface_idx[f]
iface_f = mesh.interfaces[idx_f]
if iface_f.elementL == i
face = iface_f.faceL
else
@assert iface_f.elementR == i
face = iface_f.faceR
end
boundaryFaceInterpolate!(mesh.sbpface, face, q_i, qface)
for j=1:mesh.numNodesPerFace
for d=1:mesh.dim
# figure out the right normal vector
if iface_f.elementL == i
nj = mesh.nrm_face[d, j, idx_f]
else
nj = -mesh.nrm_face[d, mesh.sbpface.nbrperm[j, iface_f.orient], idx_f]
end
for k=1:mesh.numDofPerNode
work2[k, j, d] = nj*qface[k, j]
end
end # end d
end # end j
for d=1:mesh.dim
work2_d = sview(work2, :, :, d)
E_d = sview(E_term, :, :, d)
boundaryFaceIntegrate!(mesh.sbpface, face, work2_d, E_d)
end
end # end f
return nothing
end
function testQx(mesh, sbp, eqn::EulerData{Tsol, Tres}, opts;
check_poly=true) where {Tsol, Tres}
_check_poly::Bool = check_poly
degree = sbp.degree
# degree = 1
setPoly(mesh, eqn.q, degree)
qderiv = zeros(eltype(eqn.q), mesh.numDofPerNode, mesh.dim, mesh.numNodesPerElement, mesh.numEl)
setPolyDeriv(mesh, qderiv, degree)
iface_idx = getInterfaceList(mesh)
# test that Dx = -M * Qx^T + M*Ex, where Dx is exact for polynomials
qxT_term = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement, mesh.dim)
work = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement, mesh.dim)
op = SummationByParts.Subtract()
# test Dx
dx_term = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement, mesh.dim)
# test Qx
qx_term = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement, mesh.dim)
# test Dx^T
dxT_term = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement, mesh.dim)
# test calculating the operator matrices themselves
Dx = zeros(Tres, mesh.numNodesPerElement, mesh.numNodesPerElement, mesh.dim)
Qx = zeros(Tres, mesh.numNodesPerElement, mesh.numNodesPerElement, mesh.dim)
DxT = zeros(Tres, mesh.numNodesPerElement, mesh.numNodesPerElement, mesh.dim)
QxT = zeros(Tres, mesh.numNodesPerElement, mesh.numNodesPerElement, mesh.dim)
dx_term2 = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement, mesh.dim)
dxT_term2 = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement, mesh.dim)
qx_term2 = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement, mesh.dim)
qxT_term2 = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement, mesh.dim)
qface = zeros(Tsol, mesh.numDofPerNode, mesh.numNodesPerFace)
work2 = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerFace, mesh.dim)
E_term = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement, mesh.dim)
nel = 0 # number of elements tests
for i=1:mesh.numEl
# the Ex calculation is only used for verifying polynomial exactness
# Don't skip the boundary elements (the ones that might be curved) when
# not checking polynomial exactness
if _check_poly && iface_idx[end, i] == 0 # non-interior element
continue
end
nel += 1
q_i = ro_sview(eqn.q, :, :, i)
dxidx_i = ro_sview(mesh.dxidx, :, :, :, i)
jac_i = ro_sview(mesh.jac, :, i)
# do Qx^T
fill!(qxT_term, 0)
# fill!(work, 0)
EulerEquationMod.applyQxTransposed(sbp, q_i, dxidx_i, work, qxT_term, op)
# Do Ex: interpolate to face, apply normal vector (apply nbrperm and fac),
# reverse interpolate
if _check_poly
applyE(mesh, i, sview(iface_idx, :, i), q_i, qface, work2, E_term)
end
# test apply Dx
fill!(dx_term, 0)
EulerEquationMod.applyDx(sbp, q_i, dxidx_i, jac_i, work, dx_term, op)
# test apply Qx
fill!(qx_term, 0)
EulerEquationMod.applyQx(sbp, q_i, dxidx_i, work, qx_term, op)
# test apply Dx^T
fill!(dxT_term, 0)
EulerEquationMod.applyDxTransposed(sbp, q_i, dxidx_i, jac_i, work, dxT_term, op)
# test explicitly computed operator matrices
EulerEquationMod.calcDx(sbp, dxidx_i, jac_i, Dx)
EulerEquationMod.calcQx(sbp, dxidx_i, Qx)
EulerEquationMod.calcDxTransposed(sbp, dxidx_i, jac_i, DxT)
EulerEquationMod.calcQxTransposed(sbp, dxidx_i, QxT)
for d1=1:mesh.dim
for k=1:mesh.numDofPerNode
dx_term2[k, :, d1] = -Dx[:, :, d1]*q_i[k, :]
dxT_term2[k, :, d1] = -Dx[:, :, d1].'*q_i[k, :]
qx_term2[k, :, d1] = -Qx[:, :, d1]*q_i[k, :]
qxT_term2[k, :, d1] = -Qx[:, :, d1].'*q_i[k, :]
end
@test maximum(abs.(DxT[:, :, d1] - Dx[:, :, d1].')) < 1e-13
@test maximum(abs.(QxT[:, :, d1] - Qx[:, :, d1].')) < 1e-13
end
# check against analytical derivative
for j=1:mesh.numNodesPerElement
for d=1:mesh.dim
fac = mesh.jac[j, i]/sbp.w[j]
for k=1:mesh.numDofPerNode
val = fac*(qxT_term[k, j, d] + E_term[k, j, d])
val2 = dx_term[k, j, d]
val3 = fac*qx_term[k, j, d]
val4 = fac*qx_term2[k, j, d]
if _check_poly
@test abs(val - qderiv[k, d, j, i]) < 1e-11
@test abs(val2 + qderiv[k, d, j, i]) < 1e-11
@test abs(val3 + qderiv[k, d, j, i]) < 1e-11
@test abs(dx_term2[k, j, d] + qderiv[k, d, j, i]) < 1e-11
@test abs(val4 + qderiv[k, d, j, i]) < 1e-11
end
# test agreement between applyDx and calcDx + mat-vec
@test abs(dx_term2[k, j, d] - dx_term[k, j, d]) < 1e-11
@test abs(dxT_term2[k, j, d] - dxT_term[k, j, d]) < 1e-11
@test abs(qx_term2[k, j, d] - qx_term[k, j, d]) < 1e-11
@test abs(qxT_term2[k, j, d] - qxT_term[k, j, d]) < 1e-11
end
end
end
if _check_poly
for d=1:mesh.dim
for k=1:mesh.numDofPerNode
# Dx^T is a bit weird because it isn't easily related to a polynomial.
# Instead do: v^T Dx^T u = (v^T Dx^T) u = dv/dx^T u
# = v^T (Dx^T u), where v is a polynomial
val4 = dot(dxT_term[k, :, d], q_i[k, :])
val5 = dot(qderiv[k, d, :, i], q_i[k, :])
@test abs(val4 + val5) < 5e-11
end
end
end
end # end i
# make sure some elements were done
@test nel > 0
testQx2(mesh, sbp, eqn, opts)
return nothing
end
function testQx2(mesh, sbp, eqn::EulerData{Tsol, Tres}, opts) where {Tsol, Tres}
# now that the first method of applyQxTransposed is verified, use it to
# test the second
degree = sbp.degree
wx = zeros(Tsol, mesh.numDofPerNode, mesh.numNodesPerElement, mesh.dim)
wxi = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement, mesh.dim)
res1_qxT_tmp = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement, mesh.dim)
res1_qx_tmp = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement, mesh.dim)
res1_dx_tmp = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement, mesh.dim)
res1_dxT_tmp = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement, mesh.dim)
res1_qxT = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement)
res1_dx = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement)
res1_qx = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement)
res1_dxT = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement)
res2_qxT = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement)
res2_dx = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement)
res2_qx = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement)
res2_dxT = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement)
for i=1:mesh.numEl
# setup polynomial
for j=1:mesh.numNodesPerElement
x = mesh.coords[1, j, i]
y = mesh.coords[2, j, i]
for d=1:mesh.dim
for k=1:mesh.numDofPerNode
# make the polynomials and their derivatives different in each direction
facx = d + k
facy = 2*d + k
facz = 3*d + k
wx[k, j, d] = facx*x^degree + facy*y^degree
if mesh.dim == 3
z = mesh.coords[3, j, k]
wx[k, j, d] += facz*z^degree
end
end
end # end
end # end j
dxidx_i = ro_sview(mesh.dxidx, :, :, :, i)
jac_i = ro_sview(mesh.jac, :, i)
# call first method
# This computes [Qx, Qy, Qz] * w_d, sum only Q_x * w_d into res1
fill!(res1_qxT, 0); fill!(res1_qx, 0); fill!(res1_dx, 0); fill!(res1_dxT, 0)
for d=1:mesh.dim
fill!(res1_qxT_tmp, 0); fill!(res1_qx_tmp, 0), fill!(res1_dx_tmp, 0)
fill!(res1_dxT_tmp, 0)
wx_d = sview(wx, :, :, d)
EulerEquationMod.applyQxTransposed(sbp, wx_d, dxidx_i, wxi, res1_qxT_tmp)
EulerEquationMod.applyQx(sbp, wx_d, dxidx_i, wxi, res1_qx_tmp)
EulerEquationMod.applyDx(sbp, wx_d, dxidx_i, jac_i, wxi, res1_dx_tmp)
EulerEquationMod.applyDxTransposed(sbp, wx_d, dxidx_i, jac_i, wxi, res1_dxT_tmp)
for j=1:mesh.numNodesPerElement
for k=1:mesh.numDofPerNode
res1_qxT[k, j] += res1_qxT_tmp[k, j, d]
res1_qx[k, j] += res1_qx_tmp[k, j, d]
res1_dx[k, j] += res1_dx_tmp[k, j, d]
res1_dxT[k, j] += res1_dxT_tmp[k, j, d]
end
end
end # end d
# second method
fill!(res2_qxT, 0); fill!(res2_qx, 0); fill!(res2_dx, 0); fill!(res2_dxT, 0)
EulerEquationMod.applyQxTransposed(sbp, wx, dxidx_i, wxi, res2_qxT)
EulerEquationMod.applyQx(sbp, wx, dxidx_i, wxi, res2_qx)
EulerEquationMod.applyDx(sbp, wx, dxidx_i, jac_i, wxi, res2_dx)
EulerEquationMod.applyDxTransposed(sbp, wx, dxidx_i, jac_i, wxi, res2_dxT)
@test maximum(abs.(res2_qxT - res1_qxT)) < 1e-12
@test maximum(abs.(res2_qx - res1_qx)) < 1e-12
@test maximum(abs.(res2_dx - res1_dx)) < 1e-12
@test maximum(abs.(res2_dxT - res1_dxT)) < 1e-11
end # end i
return nothing
end
"""
Tests the shockmesh was constructed correctly. This works for the case
where the shockmesh boundary elements are only those that lie on the
original mesh boundary
"""
function test_shockmesh(mesh, sbp, eqn::EulerData{Tsol, Tres}, opts) where {Tsol, Tres}
# construct the shock mesh with all elements in it that are fully interior
iface_idx, shockmesh = getInteriorMesh(mesh, sbp, eqn, opts)
# all elements that are fully interior should be listed as shocked, all
# other elements should be on the boundary
elnums_shock = sview(shockmesh.elnums_all, 1:shockmesh.numShock)
elnums_bndry = sview(shockmesh.elnums_all, (shockmesh.numShock+1):(shockmesh.numEl))
for i=1:mesh.numEl
if iface_idx[end, i] != 0
@test i in elnums_shock
else
@test i in elnums_bndry
end
end
# get all elements on boundary
boundary_els = Array{Int}(mesh.numBoundaryFaces)
for i=1:mesh.numBoundaryFaces
boundary_els[i] = mesh.bndryfaces[i].element
end
boundary_els = unique(boundary_els)
sort!(boundary_els)
boundary_els_shock = sort!(shockmesh.elnums_all[(shockmesh.numShock+1):shockmesh.numEl])
@test length(boundary_els) == length(boundary_els_shock)
@test maximum(boundary_els - boundary_els_shock) == 0
@test shockmesh.numEl == mesh.numEl
# make sure no elements are double-counted
@test length(unique(shockmesh.elnums_all[1:shockmesh.numEl])) == mesh.numEl
@test shockmesh.numBoundaryFaces == 0 # neighbor elements should not add
# boundaries
# check interfaces
for i=1:shockmesh.numInterfaces
iface_red = shockmesh.ifaces[i]
idx_orig = iface_red.idx_orig
elnum_fullL = shockmesh.elnums_all[iface_red.iface.elementL]
elnum_fullR = shockmesh.elnums_all[iface_red.iface.elementR]
@test elnum_fullL == Int(mesh.interfaces[idx_orig].elementL)
@test elnum_fullR == Int(mesh.interfaces[idx_orig].elementR)
end
# add only boundary elements, to check that Boundaries are correctly added
shockmesh2 = EulerEquationMod.ShockedElements{Tres}(mesh)
for i in boundary_els
push!(shockmesh2, i)
end
EulerEquationMod.completeShockElements(mesh, shockmesh2)
@test shockmesh2.numShock == length(boundary_els)
println("length(shockmesh.bndryfaces) = ", length(shockmesh2.bndryfaces))
println("shockmesh.bndryfaces = ", shockmesh.bndryfaces)
@test shockmesh2.numBoundaryFaces == length(mesh.bndryfaces)
# check original indices
idx_orig = zeros(Int, 0)
for i=1:shockmesh2.numBoundaryFaces
idx_orig_i = shockmesh2.bndryfaces[i].idx_orig
push!(idx_orig, idx_orig_i)
end
sort!(idx_orig)
@test idx_orig == collect(1:mesh.numBoundaryFaces)
end
function test_shockmesh2(mesh, sbp, eqn::EulerData{Tsol, Tres}, opts) where {Tsol, Tres}
# construct the shock mesh with all elements in it that are fully interior
iface_idx, shockmesh = getInteriorMesh(mesh, sbp, eqn, opts)
# all elements that are fully interior should be listed as shocked, all
# other elements should be on the boundary
@test shockmesh.numShock == shockmesh.numEl # no neighbor elements
elnums_shock = sview(shockmesh.elnums_all, 1:shockmesh.numShock)
# get list of all elements on the boundary of the original domain
elnums_boundary = Array{Int}(0)
for i=1:mesh.numBoundaryFaces
push!(elnums_boundary, mesh.bndryfaces[i].element)
end
elnums_boundary = sort!(unique(elnums_boundary))
# get list of all elements on the boundary of the shock domain
for i=1:shockmesh.numBoundaryFaces
idx_orig = shockmesh.bndryfaces[i].idx_orig
iface_i = mesh.interfaces[idx_orig]
@test (iface_i.elementL in elnums_boundary) || (iface_i.elementR in elnums_boundary)
end
# test that all non-boundary elements are in the shockmesh
for i=1:mesh.numEl
if !(i in elnums_boundary)
@test i in elnums_shock
end
end
# check interfaces
for i=1:shockmesh.numInterfaces
iface_red = shockmesh.ifaces[i]
idx_orig = iface_red.idx_orig
elnum_fullL = shockmesh.elnums_all[iface_red.iface.elementL]
elnum_fullR = shockmesh.elnums_all[iface_red.iface.elementR]
@test elnum_fullL == Int(mesh.interfaces[idx_orig].elementL)
@test elnum_fullR == Int(mesh.interfaces[idx_orig].elementR)
end
# check bndry_offsets
@test shockmesh.bndry_offsets[end] == 1
return nothing
end
function test_thetaface(mesh, sbp, eqn::EulerData{Tsol, Tres}, opts) where {Tsol, Tres}
degree = sbp.degree
iface_idx, shockmesh = getInteriorMesh(mesh, sbp, eqn, opts)
sensor = EulerEquationMod.ShockSensorEverywhere{Tsol, Tres}(mesh, sbp, opts)
capture = EulerEquationMod.LDGShockCapturing{Tsol, Tres}(mesh, sbp, eqn, opts,
sensor)
EulerEquationMod.allocateArrays(capture, mesh, shockmesh)
flux = EulerEquationMod.LDG_ESFlux()
# set w_el to be polynomial
setWPoly(mesh, shockmesh, capture.w_el, degree)
# use the LDG code
fill!(capture.q_j, 0)
EulerEquationMod.computeThetaFaceContribution(mesh, sbp, eqn, opts,
capture, shockmesh, flux)
# compute against Ex. For polynomials, the face contribution
# reduces to the E_i w (sum i) operator
qface = zeros(Tsol, mesh.numDofPerNode, mesh.numNodesPerFace)
work = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerFace, mesh.dim)
E_term = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement, mesh.dim)
for i=1:shockmesh.numShock
i_full = shockmesh.elnums_all[i]
w_i = sview(capture.w_el, :, :, i)
idx_i = sview(iface_idx, :, i_full)
applyE(mesh, i_full, idx_i, w_i, qface, work, E_term)
for d=1:mesh.dim
@test maximum(abs.(capture.q_j[:, :, d, i] - E_term[:, :, d])) < 1e-13
end
end
return nothing
end
function test_qj(mesh, sbp, eqn::EulerData{Tsol, Tres}, opts) where {Tsol, Tres}
iface_idx, shockmesh = getInteriorMesh(mesh, sbp, eqn, opts)
degree = sbp.degree
sensor = EulerEquationMod.ShockSensorEverywhere{Tsol, Tres}(mesh, sbp, opts)
capture = EulerEquationMod.LDGShockCapturing{Tsol, Tres}(mesh, sbp, eqn, opts, sensor)
EulerEquationMod.allocateArrays(capture, mesh, shockmesh)
flux = EulerEquationMod.LDG_ESFlux()
diffusion = EulerEquationMod.ShockDiffusion(shockmesh.ee)
# q_j = D_j * w when interpolation is exact
setWPoly(mesh, shockmesh, capture.w_el, degree)
wx_el = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement, mesh.dim, shockmesh.numEl)
setWPolyDeriv(mesh, shockmesh, wx_el, degree)
# use LDG code
EulerEquationMod.computeThetaVolumeContribution(mesh, sbp, eqn, opts, capture, shockmesh)
EulerEquationMod.computeThetaFaceContribution(mesh, sbp, eqn, opts, capture, shockmesh, flux)
EulerEquationMod.computeQFromTheta(mesh, sbp, eqn, opts, capture, shockmesh, diffusion)
# compare against analytical value
for i=1:shockmesh.numShock
for d=1:mesh.dim
@test maximum(abs.(capture.q_j[:, :, d, i] - wx_el[:, :, d, i])) < 1e-12
end
end
# all the neighbor elements should have q_j = 0 because epsilon = 0 there
for i=(shockmesh.numShock+1):shockmesh.numEl
for d=1:mesh.dim
@test maximum(abs.(capture.q_j[:, :, d, i])) == 0
end
end
return nothing
end
function test_qface(mesh, sbp, eqn::EulerData{Tsol, Tres}, opts) where {Tsol, Tres}
degree = sbp.degree
iface_idx, shockmesh = getInteriorMesh(mesh, sbp, eqn, opts)
sensor = EulerEquationMod.ShockSensorEverywhere{Tsol, Tres}(mesh, sbp, opts)
capture = EulerEquationMod.LDGShockCapturing{Tsol, Tres}(mesh, sbp, eqn, opts, sensor)
EulerEquationMod.allocateArrays(capture, mesh, shockmesh)
flux = EulerEquationMod.LDG_ESFlux()
setQjPoly(mesh, shockmesh, capture.q_j, degree)
fill!(capture.w_el, 0) # this only shows up in a jump term
# use LDG code
fill(eqn.res, 0)
EulerEquationMod.computeQFaceTerm(mesh, sbp, eqn, opts, capture, shockmesh, flux)
# for exact interpolation, the LDG term reduces to E_x * q_x + Ey*q_y
qface = zeros(Tsol, mesh.numDofPerNode, mesh.numNodesPerFace)
work = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerFace, mesh.dim)
E_term = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement, mesh.dim)
E_term2 = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement)
for i=1:shockmesh.numShock
i_full = shockmesh.elnums_all[i]
idx_i = sview(iface_idx, :, i_full)
# compute using E operator
fill!(E_term2, 0)
for d=1:mesh.dim
q_j = sview(capture.q_j, :, :, d, i)
# its a bit wasteful to compute E_y*q_x and E_x*q_y, but its only a test
applyE(mesh, i_full, idx_i, q_j, qface, work, E_term)
for j=1:mesh.numNodesPerElement
for k=1:mesh.numDofPerNode
E_term2[k, j] += E_term[k, j, d]
end
end
end
# compare
@test maximum(abs.(eqn.res[:, :, i_full] - E_term2)) < 1e-12
end
end
function test_q(mesh, sbp, eqn::EulerData{Tsol, Tres}, opts) where {Tsol, Tres}
# for exact interpolation, the q terms are -Q^T + E, which is equal to Q,
# Thus M^-1 * Q = D, which we can compute analytically.
# Because of the sum, we get, D_x * q_x + D_y * q_y
degree = sbp.degree
iface_idx, shockmesh = getInteriorMesh(mesh, sbp, eqn, opts)
sensor = EulerEquationMod.ShockSensorEverywhere{Tsol, Tres}(mesh, sbp, opts)
capture = EulerEquationMod.LDGShockCapturing{Tsol, Tres}(mesh, sbp, eqn, opts, sensor)
EulerEquationMod.allocateArrays(capture, mesh, shockmesh)
flux = EulerEquationMod.LDG_ESFlux()
q_jx = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement, mesh.dim, mesh.dim, shockmesh.numEl)
setQjPoly(mesh, shockmesh, capture.q_j, degree)
setQjPolyDeriv(mesh, shockmesh, q_jx, degree)
fill!(capture.w_el, 0) # this only shows up in a jump term
# use LDG code
fill!(eqn.res, 0)
EulerEquationMod.computeQFaceTerm(mesh, sbp, eqn, opts, capture, shockmesh, flux)
EulerEquationMod.computeQVolumeTerm(mesh, sbp, eqn, opts, capture, shockmesh)
# test against analytical derivative
res2 = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement)
for i=1:shockmesh.numShock
i_full = shockmesh.elnums_all[i]
fill!(res2, 0)
for d=1:mesh.dim
for j=1:mesh.numNodesPerElement
for k=1:mesh.numDofPerNode
res2[k, j] += q_jx[k, j, d, d, i]
end
end
end
# apply inverse mass matrix to LDG terms
for j=1:mesh.numNodesPerElement
fac = mesh.jac[j, i_full]/sbp.w[j]
for k=1:mesh.numDofPerNode
eqn.res[k, j, i_full] *= fac
end
end
@test maximum(abs.(eqn.res[:, :, i_full] - res2)) < 1e-11
end
return nothing
end
function test_ldg_ESS(mesh, sbp, eqn::EulerData{Tsol, Tres}, opts) where {Tsol, Tres}
# construct the shock mesh with all elements in it that are fully interior
iface_idx, shockmesh = getInteriorMesh(mesh, sbp, eqn, opts)
sensor = EulerEquationMod.ShockSensorEverywhere{Tsol, Tres}(mesh, sbp, opts)
capture = EulerEquationMod.LDGShockCapturing{Tsol, Tres}(mesh, sbp, eqn, opts, sensor)
EulerEquationMod.allocateArrays(capture, mesh, shockmesh)
flux = EulerEquationMod.LDG_ESFlux()
# add random component to q
# test entropy stability
q_pert = 0.1*rand(size(eqn.q_vec))
eqn.q_vec .+= q_pert
array1DTo3D(mesh, sbp, eqn, opts, eqn.q_vec, eqn.q)
fill!(eqn.res, 0)
EulerEquationMod.calcShockCapturing(mesh, sbp, eqn, opts, capture, shockmesh)
array3DTo1D(mesh, sbp, eqn, opts, eqn.res, eqn.res_vec)
w_vec = zeros(Tsol, mesh.numDof)
copy!(w_vec, eqn.q_vec)
EulerEquationMod.convertToIR(mesh, sbp, eqn, opts, w_vec)
val = dot(w_vec, eqn.res_vec)
println("val = ", val)
@test val < 0
return nothing
end
#------------------------------------------------------------------------------
# test BR2
function test_br2_gradw(mesh, sbp, eqn::EulerData{Tsol, Tres}, opts) where {Tsol, Tres}
iface_idx, shockmesh = getInteriorMesh(mesh, sbp, eqn, opts)
# set q to be convertToConservative(polynomial)
# Then the inverse process will produce known values, and have known
# derivatives
degree = sbp.degree
setPoly(mesh, eqn.q, degree)
q_deriv = zeros(Tres, mesh.numDofPerNode, mesh.dim, mesh.numNodesPerElement, mesh.numEl)
setPolyDeriv(mesh, q_deriv, degree)
w_vals = copy(eqn.q)
for i=1:mesh.numEl
for j=1:mesh.numNodesPerElement
q_j = sview(eqn.q, :, j, i)
w_j = sview(w_vals, :, j, i)
EulerEquationMod.convertToIR_(eqn.params, q_j, w_j)
end
end
sensor = EulerEquationMod.ShockSensorEverywhere{Tsol, Tres}(mesh, sbp, opts)
capture = EulerEquationMod.SBPParabolicSC{Tsol, Tres}(mesh, sbp, eqn, opts, sensor)
EulerEquationMod.allocateArrays(capture, mesh, shockmesh)
EulerEquationMod.computeGradW(mesh, sbp, eqn, opts, capture, shockmesh,
capture.entropy_vars, capture.diffusion)
A0inv = zeros(Tsol, mesh.numDofPerNode, mesh.numDofPerNode)
dw_dx = zeros(Tsol, mesh.numDofPerNode)
for i=1:shockmesh.numEl
i_full = shockmesh.elnums_all[i]
@test maximum(abs.(capture.w_el[:, :, i] - w_vals[:, :, i_full])) < 1e-13
end
return nothing
end
function test_br2_volume(mesh, sbp, eqn::EulerData{Tsol, Tres}, opts) where {Tsol, Tres}
degree = sbp.degree
iface_idx, shockmesh = getInteriorMesh(mesh, sbp, eqn, opts)
sensor = EulerEquationMod.ShockSensorEverywhere{Tsol, Tres}(mesh, sbp, opts)
capture = EulerEquationMod.SBPParabolicSC{Tsol, Tres}(mesh, sbp, eqn, opts, sensor)
EulerEquationMod.allocateArrays(capture, mesh, shockmesh)
# set w_el to be polynomial
setWPoly(mesh, shockmesh, capture.w_el, degree)
setWPolyDeriv(mesh, shockmesh, capture.grad_w, degree)
w_deriv2 = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement, mesh.dim,
mesh.dim, shockmesh.numEl)
setWPolyDeriv2(mesh, shockmesh, w_deriv2, degree)
fill!(eqn.res, 0)
EulerEquationMod.computeVolumeTerm(mesh, sbp, eqn, opts, capture, shockmesh)
# test: w^T Dx^T H Dx w = wx^T H wx
vals = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement)
for i=1:shockmesh.numShock
i_full = shockmesh.elnums_all[i]
fill!(vals, 0)
for j=1:mesh.numNodesPerElement
for k=1:mesh.numDofPerNode
eqn.res[k, j, i_full] *= mesh.jac[j, i_full]/sbp.w[j]
for d1=1:mesh.dim
vals[k, j] += w_deriv2[k, j, d1, d1, i]
end
end
end
@test maximum(abs.(vals - eqn.res[:, :, i_full])) < 1e-12
end # end i
return nothing
end
function test_br2_face(mesh, sbp, eqn::EulerData{Tsol, Tres}, opts) where {Tsol, Tres}
degree = sbp.degree
iface_idx, shockmesh = getInteriorMesh(mesh, sbp, eqn, opts)
sensor = EulerEquationMod.ShockSensorEverywhere{Tsol, Tres}(mesh, sbp, opts)
capture = EulerEquationMod.SBPParabolicSC{Tsol, Tres}(mesh, sbp, eqn, opts, sensor)
EulerEquationMod.allocateArrays(capture, mesh, shockmesh)
# test alpha sums to 1
alpha_sum = zeros(shockmesh.numEl)
els_interior = Array{Int}(0)
for i=1:shockmesh.numInterfaces
iface_i = shockmesh.ifaces[i].iface
alpha_sum[iface_i.elementL] += capture.alpha[1, i]
alpha_sum[iface_i.elementR] += capture.alpha[2, i]
push!(els_interior, iface_i.elementL)
push!(els_interior, iface_i.elementR)
end
els_interior = unique(els_interior)
for i=1:shockmesh.numShock
el_orig = shockmesh.elnums_all[i]
println("element ", el_orig, " has alpha sum = ", alpha_sum[i])
end
for i=1:shockmesh.numShock
if i in els_interior # isolated elements (no interfaces) have alpha of 0
@test abs(alpha_sum[i] - 1) < 1e-15
end
end
if !shockmesh.isNeumann
println("\nTesting alpha values")
# in Dirichlet case, all alphas should be 1/numFacesPerElement (at least
# for shocked elements)
val = 1/(mesh.dim + 1)
for i=1:shockmesh.numInterfaces
iface_i = shockmesh.ifaces[i].iface
if iface_i.elementL <= shockmesh.numShock
@test abs(capture.alpha[1, i] - val) < 1e-13
end
if iface_i.elementR <= shockmesh.numShock
@test abs(capture.alpha[2, i] - val) < 1e-13
end
end
for i=1:shockmesh.numBoundaryFaces
@test abs(capture.alpha_b[i] - val) < 1e-13
end
end # end if
# set w_el to be polynomial
setWPoly(mesh, shockmesh, capture.w_el, degree)
setWPolyDeriv(mesh, shockmesh, capture.grad_w, degree)
fill!(eqn.res, 0)
EulerEquationMod.computeFaceTerm(mesh, sbp, eqn, opts, capture, shockmesh,
capture.diffusion, capture.penalty)
# because interpolation and differentiation are exact for polynomials, this
# term should be zero
@test maximum(abs.(eqn.res)) < 1e-12
# test applyPenalty
delta_w = zeros(Tsol, mesh.numDofPerNode, mesh.numNodesPerFace)
theta = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerFace)
res_wL = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerFace)
res_wR = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerFace)
res_thetaL = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerFace)
res_thetaR = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerFace)
tmp_el = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement)
for i=1:shockmesh.numInterfaces
iface_red = shockmesh.ifaces[i].iface
idx_orig = shockmesh.ifaces[i].idx_orig
elnumL = shockmesh.elnums_all[iface_red.elementL]
elnumR = shockmesh.elnums_all[iface_red.elementL]
qL = ro_sview(eqn.q, :, :, elnumL) # the value of these are irrelevent
qR = ro_sview(eqn.q, :, :, elnumR)
wL = ro_sview(capture.w_el, :, :, iface_red.elementL)
wR = ro_sview(capture.w_el, :, :, iface_red.elementR)
coordsL = ro_sview(mesh.coords, :, :, elnumL)
coordsR = ro_sview(mesh.coords, :, :, elnumR)
nrm_face = ro_sview(mesh.nrm_face, :, :, idx_orig)
alphas = ro_sview(capture.alpha, :, i)
dxidxL = ro_sview(mesh.dxidx, :, :, :, elnumL)
dxidxR = ro_sview(mesh.dxidx, :, :, :, elnumR)
jacL = ro_sview(mesh.jac, :, elnumL)
jacR = ro_sview(mesh.jac, :, elnumR)
# test T3 by checking consistency with boundaryIntegrate
rand_realpart!(delta_w); fill!(res_wL, 0); fill!(res_wR, 0)
fill!(theta, 0); fill!(res_thetaL, 0); fill!(res_thetaR, 0)
EulerEquationMod.applyPenalty(capture.penalty, sbp, eqn.params, mesh.sbpface,
capture.diffusion, iface_red, delta_w,
theta, qL, qR, wL, wR, coordsL, coordsR,
nrm_face, alphas, dxidxL, dxidxR,
jacL, jacR, res_wL, res_wR,
res_thetaL, res_thetaR)
fill!(tmp_el, 0)
boundaryFaceIntegrate!(mesh.sbpface, iface_red.faceL, delta_w, tmp_el)
for k=1:mesh.numDofPerNode
@test maximum(abs.(2*sum(res_thetaL[k, :]) + sum(tmp_el[k, :]))) < 1e-12
@test maximum(abs.(2*sum(res_thetaR[k, :]) - sum(tmp_el[k, :]))) < 1e-12
end
# test T2 by checking consistency with boundaryIntegrate
fill!(delta_w, 0); fill!(res_wL, 0); fill!(res_wR, 0)
rand_realpart!(theta); fill!(res_thetaL, 0); fill!(res_thetaR, 0)
EulerEquationMod.applyPenalty(capture.penalty, sbp, eqn.params, mesh.sbpface,
capture.diffusion, iface_red, delta_w,
theta, qL, qR, wL, wR, coordsL, coordsR,
nrm_face, alphas, dxidxL, dxidxR,
jacL, jacR, res_wL, res_wR,
res_thetaL, res_thetaR)
fill!(tmp_el, 0)
boundaryFaceIntegrate!(mesh.sbpface, iface_red.faceL, theta, tmp_el)
for k=1:mesh.numDofPerNode
@test maximum(abs.(2*sum(res_wL[k, :]) - sum(tmp_el[k, :]))) < 1e-12
@test maximum(abs.(2*sum(res_wR[k, :]) - sum(tmp_el[k, :]))) < 1e-12
end
# test entropy stability of T1
rand_realpart!(delta_w); fill!(res_wL, 0); fill!(res_wR, 0)
fill!(theta, 0); fill!(res_thetaL, 0); fill!(res_thetaR, 0)
EulerEquationMod.applyPenalty(capture.penalty, sbp, eqn.params, mesh.sbpface,
capture.diffusion, iface_red, delta_w,
theta, qL, qR, wL, wR, coordsL, coordsR,
nrm_face, alphas, dxidxL, dxidxR,
jacL, jacR, res_wL, res_wR,
res_thetaL, res_thetaR)
for k=1:mesh.numDofPerNode
@test dot(delta_w[k, :], res_wL[k, :]) > 0
@test -dot(delta_w[k, :], res_wR[k, :]) > 0
end
end
return nothing
end
function test_br2_Dgk(mesh, sbp, eqn::EulerData{Tsol, Tres}, opts) where {Tsol, Tres}
degree = sbp.degree
iface_idx, shockmesh = getInteriorMesh(mesh, sbp, eqn, opts)
sensor = EulerEquationMod.ShockSensorEverywhere{Tsol, Tres}(mesh, sbp, opts)
capture = EulerEquationMod.SBPParabolicSC{Tsol, Tres}(mesh, sbp, eqn, opts, sensor)
EulerEquationMod.allocateArrays(capture, mesh, shockmesh)
fill!(eqn.res, 0)
# set w_el to be polynomial
setWPoly(mesh, shockmesh, capture.w_el, degree)
setWPolyDeriv(mesh, shockmesh, capture.grad_w, degree)
w_face = zeros(Tsol, mesh.numDofPerNode, mesh.numNodesPerFace)
for i=1:shockmesh.numInterfaces
println("interface ", i)
iface = shockmesh.ifaces[i].iface
idx_orig = shockmesh.ifaces[i].idx_orig
elnumL = shockmesh.elnums_all[iface.elementL]
elnumR = shockmesh.elnums_all[iface.elementR]
println("elnumL = ", elnumL, ", elnumR = ", elnumR)
qL = ro_sview(eqn.q, :, :, elnumL)
qR = ro_sview(eqn.q, :, :, elnumR)
wL = ro_sview(capture.w_el, :, :, iface.elementL)
wR = ro_sview(capture.w_el, :, :, iface.elementR)
coordsL = ro_sview(mesh.coords, :, :, elnumL)
coordsR = ro_sview(mesh.coords, :, :, elnumR)
nrm_face = ro_sview(mesh.nrm_face, :, :, idx_orig)
dxidxL = ro_sview(mesh.dxidx, :, :, :, elnumL)
dxidxR = ro_sview(mesh.dxidx, :, :, :, elnumR)
jacL = ro_sview(mesh.jac, :, elnumL)
jacR = ro_sview(mesh.jac, :, elnumR)
resL = sview(eqn.res, :, :, elnumL)
resR = sview(eqn.res, :, :, elnumR)
# the solution is polynomial, so it doesn't matter which element we
# interpolate from (except for the nbrperm stuff)
boundaryFaceInterpolate!(mesh.sbpface, iface.faceL, wL, w_face)
for j=1:mesh.numNodesPerFace
for k=1:mesh.numDofPerNode
w_face[k, j] *= mesh.sbpface.wface[j]
end
end
EulerEquationMod.applyDgkTranspose(capture, sbp, eqn.params, mesh.sbpface,
iface, capture.diffusion, w_face, w_face,
qL, qR, wL, wR, coordsL, coordsR, nrm_face, dxidxL, dxidxR,
jacL, jacR, resL, resR)
end
# now use applyE for every element, then apply Dx^T and Dy^T
boundary_els = Array{Int}(0)
for i=1:shockmesh.numBoundaryFaces
push!(boundary_els, shockmesh.bndryfaces[i].bndry.element)
end
work = zeros(Tsol, mesh.numDofPerNode, mesh.numNodesPerFace)
work2 = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerFace, mesh.dim)
work3 = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement, mesh.dim)
E_term = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement, mesh.dim)
res2 = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement)
for i=1:shockmesh.numShock
# this doesn't work because not all the faces of boundary elements do
# apply Dgk^T
if i in boundary_els
continue
end
i_full = shockmesh.elnums_all[i]
println("element ", i_full)
w_i = ro_sview(capture.w_el, :, :, i)
applyE(mesh, i_full, sview(iface_idx, :, i_full), w_i, work, work2, E_term)
dxidx_i = ro_sview(mesh.dxidx, :, :, :, i_full)
jac_i = ro_sview(mesh.jac, :, i_full)
fill!(res2, 0)
EulerEquationMod.applyDxTransposed(sbp, E_term, dxidx_i, jac_i, work3, res2)
@test maximum(abs.(res2 - eqn.res[:, :, i_full])) < 1e-12
end
return nothing
end
function test_br2_ESS(mesh, sbp, eqn::EulerData{Tsol, Tres}, _opts; fullmesh=false) where {Tsol, Tres}
opts = copy(_opts)
# the scheme is entropy stable when the dirichlet BC = 0
for i=1:opts["numBC"]
opts[string("BC", i, "_name")] = "zeroBC"
end
# construct the shock mesh with all elements in it that are fully interior
if fullmesh
iface_idx, shockmesh = getEntireMesh(mesh, sbp, eqn, opts)
else
iface_idx, shockmesh = getInteriorMesh(mesh, sbp, eqn, opts)
end
sensor = EulerEquationMod.ShockSensorEverywhere{Tsol, Tres}(mesh, sbp, opts)
capture = EulerEquationMod.SBPParabolicSC{Tsol, Tres}(mesh, sbp, eqn, opts, sensor)
EulerEquationMod.allocateArrays(capture, mesh, shockmesh)
test_sc_ESS(mesh, sbp, eqn, opts, sensor, capture, shockmesh)
return nothing
end
function test_br2reduced_ESS(mesh, sbp, eqn::EulerData{Tsol, Tres}, opts;
fullmesh=false) where {Tsol, Tres}
# construct the shock mesh with all elements in it that are fully interior
if fullmesh
iface_idx, shockmesh = getEntireMesh(mesh, sbp, eqn, opts)
else
iface_idx, shockmesh = getInteriorMesh(mesh, sbp, eqn, opts)
end
sensor = EulerEquationMod.ShockSensorEverywhere{Tsol, Tres}(mesh, sbp, opts)
capture = EulerEquationMod.SBPParabolicReducedSC{Tsol, Tres}(mesh, sbp, eqn, opts, sensor)
EulerEquationMod.allocateArrays(capture, mesh, shockmesh)
test_sc_ESS(mesh, sbp, eqn, opts, sensor, capture, shockmesh)
test_sc_conservation(mesh, sbp, eqn, opts, sensor, capture, shockmesh)
return nothing
end
#TODO: combine with the above
function test_br2reduced2_ESS(mesh, sbp, eqn::EulerData{Tsol, Tres}, opts;
fullmesh=false) where {Tsol, Tres}
# construct the shock mesh with all elements in it that are fully interior
if fullmesh
iface_idx, shockmesh = getEntireMesh(mesh, sbp, eqn, opts)
else
iface_idx, shockmesh = getInteriorMesh(mesh, sbp, eqn, opts)
end
sensor = EulerEquationMod.ShockSensorEverywhere{Tsol, Tres}(mesh, sbp, opts)
capture = EulerEquationMod.SBPParabolicReduced2SC{Tsol, Tres}(mesh, sbp, eqn, opts, sensor)
EulerEquationMod.allocateArrays(capture, mesh, shockmesh)
test_sc_ESS(mesh, sbp, eqn, opts, sensor, capture, shockmesh)
test_sc_conservation(mesh, sbp, eqn, opts, sensor, capture, shockmesh)
return nothing
end
function test_sc_ESS(mesh, sbp, eqn::EulerData{Tsol, Tres}, opts,
sensor, capture, shockmesh) where {Tsol, Tres}
# add random component to q
# test entropy stability
q_pert = 0.1*rand(size(eqn.q_vec))
eqn.q_vec .+= q_pert
array1DTo3D(mesh, sbp, eqn, opts, eqn.q_vec, eqn.q)
fill!(eqn.res, 0)
EulerEquationMod.calcShockCapturing(mesh, sbp, eqn, opts, capture, shockmesh)
array3DTo1D(mesh, sbp, eqn, opts, eqn.res, eqn.res_vec)
w_vec = zeros(Tsol, mesh.numDof)
copy!(w_vec, eqn.q_vec)
EulerEquationMod.convertToIR(mesh, sbp, eqn, opts, w_vec)
val = dot(w_vec, eqn.res_vec)
println("val = ", val)
@test val < 0
return nothing
end
function test_sc_conservation(mesh, sbp, eqn::EulerData{Tsol, Tres},
_opts, sensor, capture, shockmesh) where {Tsol, Tres}
opts = copy(_opts)
opts["addVolumeIntegrals"] = false
opts["addBoundaryIntegrals"] = false
opts["addFaceIntegrals"] = false
opts["addStabilization"] = false
# add random component to q
# test entropy stability
q_pert = 0.1*rand(size(eqn.q_vec))
eqn.q_vec .+= q_pert
array1DTo3D(mesh, sbp, eqn, opts, eqn.q_vec, eqn.q)
fill!(eqn.res, 0)
EulerEquationMod.calcShockCapturing(mesh, sbp, eqn, opts, capture, shockmesh)
array3DTo1D(mesh, sbp, eqn, opts, eqn.res, eqn.res_vec)
val = abs(sum(eqn.res_vec))
@test val < 1e-13
return nothing
end
function test_br2_serialpart(mesh, sbp, eqn::EulerData{Tsol, Tres}, _opts) where {Tsol, Tres}
opts = copy(_opts)
sensor = EulerEquationMod.ShockSensorEverywhere{Tsol, Tres}(mesh, sbp, opts)
capture = EulerEquationMod.SBPParabolicSC{Tsol, Tres}(mesh, sbp, eqn, opts, sensor)
# solve the PDE to get a solution with non-zero jump between elements
# that can be reproduced in parallel
opts["solve"] = true
opts["addVolumeIntegrals"] = true
opts["addFaceIntegrals"] = true
opts["addBoundaryIntegrals"] = true
solvePDE(mesh, sbp, eqn, opts)
# the scheme is entropy stable when the dirichlet BC = 0
for i=1:opts["numBC"]
opts[string("BC", i, "_name")] = "zeroBC"
end
EulerEquationMod.getBCFunctors(mesh, sbp, eqn, opts)
test_serialpart(mesh, sbp, eqn, opts, capture, "br2")
return nothing
end
function test_br2reduced_serialpart(mesh, sbp, eqn::EulerData{Tsol, Tres}, _opts) where {Tsol, Tres}
opts = copy(_opts)
sensor = EulerEquationMod.ShockSensorEverywhere{Tsol, Tres}(mesh, sbp, opts)
capture = EulerEquationMod.SBPParabolicReducedSC{Tsol, Tres}(mesh, sbp, eqn, opts, sensor)
# solve the PDE to get a solution with non-zero jump between elements
# that can be reproduced in parallel
opts["solve"] = true
opts["addVolumeIntegrals"] = true
opts["addFaceIntegrals"] = true
opts["addBoundaryIntegrals"] = true
solvePDE(mesh, sbp, eqn, opts)
test_serialpart(mesh, sbp, eqn, opts, capture, "br2reduced")
return nothing
end
function test_serialpart(mesh, sbp, eqn::EulerData{Tsol, Tres}, opts,
capture, prefix::String) where {Tsol, Tres}
w_vec = zeros(Tsol, mesh.numDof)
copy!(w_vec, eqn.q_vec)
EulerEquationMod.convertToIR(mesh, sbp, eqn, opts, w_vec)
fill!(eqn.res, 0)
EulerEquationMod.applyShockCapturing(mesh, sbp, eqn, opts, capture)
println("isnan eqn.q = ", any(isnan.(eqn.q)))
println("isnan eqn.res_vec = ", any(isnan.(eqn.res_vec)))
val = dot(w_vec, eqn.res_vec)
println("val = ", val)
@test val < 0
# save this for parallel tests
f = open("$(prefix)_entropy_serial.dat", "w")
println(f, real(val))
close(f)
saveSolutionToMesh(mesh, eqn.q_vec)
writeVisFiles(mesh, "$(prefix)_serial")
return nothing
end
|
{"hexsha": "c1c5d304a7aa889e0aea02973ee2976e45bbbcc8", "size": 66800, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/euler/test_shock_capturing.jl", "max_stars_repo_name": "OptimalDesignLab/PDESolver.jl", "max_stars_repo_head_hexsha": "328ef45f764ab99a9d5cc3c5e4c0a4c56b263279", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2016-10-30T17:12:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-02T10:29:45.000Z", "max_issues_repo_path": "test/euler/test_shock_capturing.jl", "max_issues_repo_name": "tangwang-USTC/PDESolver.jl", "max_issues_repo_head_hexsha": "328ef45f764ab99a9d5cc3c5e4c0a4c56b263279", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 163, "max_issues_repo_issues_event_min_datetime": "2015-07-14T19:15:21.000Z", "max_issues_repo_issues_event_max_datetime": "2019-01-08T21:24:41.000Z", "max_forks_repo_path": "test/euler/test_shock_capturing.jl", "max_forks_repo_name": "tangwang-USTC/PDESolver.jl", "max_forks_repo_head_hexsha": "328ef45f764ab99a9d5cc3c5e4c0a4c56b263279", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2015-05-20T15:36:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-07T17:57:33.000Z", "avg_line_length": 32.6490713587, "max_line_length": 123, "alphanum_fraction": 0.6594461078, "num_tokens": 22012}
|
from hazel.chromosphere import Hazel_atmosphere
from hazel.photosphere import SIR_atmosphere
from hazel.parametric import Parametric_atmosphere
from hazel.stray import Straylight_atmosphere
from hazel.configuration import Configuration
from hazel.io import Generic_output_file
from collections import OrderedDict
from hazel.codes import hazel_code, sir_code
from hazel.spectrum import Spectrum
from hazel.transforms import transformed_to_physical, physical_to_transformed, jacobian_transformation
import hazel.util
import numpy as np
import copy
import os
from pathlib import Path
import scipy.stats
import scipy.special
import scipy.signal
import scipy.linalg
import scipy.optimize
import warnings
import logging
import sys
__all__ = ['Model']
class Model(object):
def __init__(self, config=None, working_mode='synthesis', verbose=0, debug=False, rank=0, randomization=None, root=''):
np.random.seed(123)
if (rank != 0):
return
self.photospheres = []
self.chromospheres = []
self.chromospheres_order = []
self.atmospheres = {}
self.order_atmospheres = []
self.straylight = []
self.parametric = []
self.spectrum = []
self.configuration = None
self.n_cycles = 1
self.spectrum = {}
self.topologies = []
self.straylights = []
self.working_mode = working_mode
self.pixel = 0
self.debug = debug
self.use_analytical_RF_if_possible = False
self.nlte_available = False
self.use_nlte = False
self.root = root
self.epsilon = 1e-2
self.svd_tolerance = 1e-8
self.step_limiter_inversion = 1.0
self.backtracking = 'brent'
self.verbose = verbose
self.logger = logging.getLogger("model")
self.logger.setLevel(logging.DEBUG)
self.logger.handlers = []
ch = logging.StreamHandler()
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter('%(asctime)s - %(message)s')
ch.setFormatter(formatter)
self.logger.addHandler(ch)
# Set randomization
if (randomization is None):
self.n_randomization = 1
else:
self.n_randomization = randomization
if (self.verbose >= 1):
self.logger.info('Hazel2 v1.0')
if ('torch' in sys.modules and 'torch_geometric' in sys.modules):
if (self.verbose >= 1):
self.logger.info('PyTorch and PyTorch Geometric found. NLTE for Ca II is available')
self.nlte_available = True
if (config is not None):
if (self.verbose >= 1):
self.logger.info('Using configuration from file : {0}'.format(config))
self.configuration = Configuration(config)
self.use_configuration(self.configuration.config_dict)
# Initialize pyhazel
hazel_code._init()
def __getstate__(self):
d = self.__dict__.copy()
if 'logger' in d:
d['logger'] = d['logger'].name
return d
def __setstate__(self, d):
if 'logger' in d:
d['logger'] = logging.getLogger(d['logger'])
self.__dict__.update(d)
def __str__(self):
tmp = ''
for l, par in self.__dict__.items():
if (l != 'LINES'):
tmp += '{0}: {1}\n'.format(l, par)
return tmp
def use_configuration(self, config_dict):
"""
Use a configuration file
Parameters
----------
config_dict : dict
Dictionary containing all the options from the configuration file previously read
Returns
-------
None
"""
# Deal with the spectral regions
tmp = config_dict['spectral regions']
# Output file
self.output_file = config_dict['working mode']['output file']
# Backtracking mode
if ('backtracking' in config_dict['working mode']):
self.backtracking = config_dict['working mode']['backtracking']
else:
self.backtracking = 'brent'
if (self.verbose >= 1):
self.logger.info('Backtracking mode : {0}'.format(self.backtracking))
# Working mode
# self.working_mode = config_dict['working mode']['action']
# Add spectral regions
for key, value in config_dict['spectral regions'].items():
self.add_spectral(value)
# Set number of cycles if present
if (self.working_mode == 'inversion'):
if ('number of cycles' in config_dict['working mode']):
if (config_dict['working mode']['number of cycles'] != 'None'):
self.n_cycles = int(config_dict['working mode']['number of cycles'])
if (self.verbose >= 1):
self.logger.info('Using {0} cycles'.format(self.n_cycles))
# Use analytical RFs if possible
if ('analytical rf if possible' in config_dict['working mode']):
if (config_dict['working mode']['analytical rf if possible'] != 'None'):
self.use_analytical_RF_if_possible = hazel.util.tobool(config_dict['working mode']['analytical rf if possible'])
else:
self.use_analytical_RF_if_possible = False
else:
self.use_analytical_RF_if_possible = False
if (self.verbose >= 1):
self.logger.info('Using analytical RFs if possible : {0}'.format(self.use_analytical_RF_if_possible))
# Set number of maximum iterations
if ('maximum iterations' in config_dict['working mode']):
if (config_dict['working mode']['number of cycles'] != 'None'):
self.max_iterations = int(config_dict['working mode']['maximum iterations'])
else:
self.max_iterations = 10
else:
self.max_iterations = 10
if (self.verbose >= 1):
self.logger.info('Using {0} max. iterations'.format(self.max_iterations))
# Randomization
if (self.verbose >= 1):
if (self.n_randomization == 1):
self.logger.info('Not using randomizations')
else:
self.logger.info('Using a maximum of {0} randomizations'.format(self.n_randomization))
# Set number of maximum iterations
if ('relative error' in config_dict['working mode']):
if (config_dict['working mode']['relative error'] != 'None'):
self.relative_error = float(config_dict['working mode']['relative error'])
if (self.verbose >= 1):
self.logger.info('Stopping when relative error is below {0}'.format(self.relative_error))
else:
self.relative_error = 1e-4
else:
self.relative_error = 1e-4
# Save all cycles
if ('save all cycles' not in config_dict['working mode']):
self.save_all_cycles = False
else:
self.save_all_cycles = hazel.util.tobool(config_dict['working mode']['save all cycles'])
if (self.verbose >= 1):
self.logger.info('Saving all cycles : {0}'.format(self.save_all_cycles))
# Deal with the atmospheres
tmp = config_dict['atmospheres']
self.atmospheres = {}
if (self.verbose >= 1):
self.logger.info('Adding atmospheres')
for key, value in tmp.items():
if ('photosphere' in key):
if (self.verbose >=1):
self.logger.info(' - New available photosphere : {0}'.format(value['name']))
self.add_photosphere(value)
if ('chromosphere' in key):
if (self.verbose >= 1):
self.logger.info(' - New available chromosphere : {0}'.format(value['name']))
self.add_chromosphere(value)
if ('parametric' in key):
if (self.verbose >= 1):
self.logger.info(' - New available parametric : {0}'.format(value['name']))
self.add_parametric(value)
if ('straylight' in key):
if (self.verbose >= 1):
self.logger.info(' - New available straylight : {0}'.format(value['name']))
self.add_straylight(value)
self.setup()
def setup(self):
"""
Setup the model for synthesis/inversion. This setup includes adding the topologies, removing unused
atmospheres, reading the number of cycles for the inversion and some sanity checks
Parameters
----------
None
Returns
-------
None
"""
# Adding topologies
if (self.verbose >= 1):
self.logger.info("Adding topologies")
for value in self.topologies:
self.add_topology(value)
# Remove unused atmospheres defined in the configuration file and not in the topology
if (self.verbose >= 1):
self.logger.info("Removing unused atmospheres")
self.remove_unused_atmosphere()
# Calculate indices for atmospheres
index_chromosphere = 1
index_photosphere = 1
self.n_photospheres = 0
self.n_chromospheres = 0
for k, v in self.atmospheres.items():
if (v.type == 'photosphere'):
v.index = index_photosphere
index_photosphere += 1
self.n_photospheres += 1
if (v.type == 'chromosphere'):
v.index = index_chromosphere
index_chromosphere += 1
self.n_chromospheres += 1
# Use analytical RFs if only photospheres are defined
if (self.n_chromospheres == 0 and self.use_analytical_RF_if_possible):
self.use_analytical_RF = True
if (self.verbose >= 1):
self.logger.info('Using analytical RFs : {0}'.format(self.use_analytical_RF))
else:
self.use_analytical_RF = False
# Check that number of pixels is the same for all atmospheric files if in synthesis mode
if (self.working_mode == 'synthesis'):
n_pixels = [v.n_pixel for k, v in self.atmospheres.items()]
all_equal = all(x == n_pixels[0] for x in n_pixels)
if (not all_equal):
for k, v in self.atmospheres.items():
self.logger.info('{0} -> {1}'.format(k, v.n_pixel))
raise Exception("Files with model atmospheres do not contain the same number of pixels")
else:
if (self.verbose >= 1):
self.logger.info('Number of pixels to read : {0}'.format(n_pixels[0]))
self.n_pixels = n_pixels[0]
if (self.working_mode == 'inversion'):
n_pixels = [v.n_pixel for k, v in self.spectrum.items()]
all_equal = all(x == n_pixels[0] for x in n_pixels)
if (not all_equal):
for k, v in self.spectrum.items():
self.logger.info('{0} -> {1}'.format(k, v.n_pixel))
raise Exception("Files with spectral regions do not contain the same number of pixels")
else:
if (self.verbose >= 1):
self.logger.info('Number of pixels to invert : {0}'.format(n_pixels[0]))
self.n_pixels = n_pixels[0]
# Check that the number of pixels from all observations (in case of inversion) is the same
# Check also if they are equal to those of the models
# n_pixels = [v.n_pixel for k, v in self.atmospheres.items()]
# all_equal = all(x == n_pixels[0] for x in n_pixels)
# Check that the number of cycles is the same for all atmospheres (in case of inversion)
if (self.working_mode == 'inversion'):
cycles = []
for k, v in self.atmospheres.items():
for k2, v2 in v.cycles.items():
if (v2 is not None):
cycles.append(len(v2))
all_equal = all(x == cycles[0] for x in cycles)
if (not all_equal):
raise Exception("Number of cycles in the nodes of active atmospheres is not always the same")
else:
if (self.n_cycles is None):
self.n_cycles = cycles[0]
# if (self.working_mode == 'inversion'):
# cycles = []
# for tmp in ['I', 'Q', 'U', 'V']:
# if ( cycles.append
# for k, v in self.atmospheres.items():
# for k2, v2 in v.cycles.items():
# cycles.append(len(v2))
# all_equal = all(x == cycles[0] for x in cycles)
# if (not all_equal):
# raise Exception("Number of cycles in the nodes of active atmospheres is not always the same")
# else:
# if (self.n_cycles is None):
# self.n_cycles = cycles[0]
filename = os.path.join(os.path.dirname(__file__),'data/LINEAS')
ff = open(filename, 'r')
self.LINES = ff.readlines()
ff.close()
self.init_sir()
for k, v in self.spectrum.items():
v.allocate_info_cycles(n_cycles=self.n_cycles)
for k, v in self.atmospheres.items():
v.allocate_info_cycles(n_cycles=self.n_cycles)
# Count total number of free parameters
if (self.working_mode == 'inversion'):
self.n_free_parameters = 0
for k, v in self.atmospheres.items():
for k2, v2 in v.cycles.items():
if (v2 is not None):
self.n_free_parameters += max(hazel.util.onlyint(v2[0:self.n_cycles+1]))
if (self.verbose >= 1):
self.logger.info('Total number of free parameters in all cycles : {0}'.format(self.n_free_parameters))
def open_output(self):
self.output_handler = Generic_output_file(self.output_file)
self.output_handler.open(self)
def close_output(self):
self.output_handler.close()
def write_output(self, randomization=0):
if (self.working_mode == 'synthesis'):
self.flatten_parameters_to_reference(cycle=0)
self.output_handler.write(self, pixel=0, randomization=randomization)
def add_spectral(self, spectral):
"""
Programmatically add a spectral region
Parameters
----------
spectral : dict
Dictionary containing the following data
'Name', 'Wavelength', 'Topology', 'Weights Stokes', 'Wavelength file', 'Wavelength weight file',
'Observations file', 'Mask file'
Returns
-------
None
"""
# Make sure that all keys of the input dictionary are in lower case
# This is irrelevant if a configuration file is used because this has been
# already done
value = hazel.util.lower_dict_keys(spectral)
if (self.verbose >= 1):
self.logger.info('Adding spectral region {0}'.format(value['name']))
if ('wavelength file' not in value):
value['wavelength file'] = None
elif (value['wavelength file'] == 'None'):
value['wavelength file'] = None
if ('wavelength weight file' not in value):
value['wavelength weight file'] = None
elif (value['wavelength weight file'] == 'None'):
value['wavelength weight file'] = None
if ('observations file' not in value):
value['observations file'] = None
elif (value['observations file'] == 'None'):
value['observations file'] = None
if ('stokes weights' not in value):
value['stokes weights'] = None
elif (value['stokes weights'] == 'None'):
value['stokes weights'] = None
if ('mask file' not in value):
value['mask file'] = None
elif (value['mask file'] == 'None'):
value['mask file'] = None
if ('los' not in value):
value['los'] = None
elif (value['los'] == 'None'):
value['los'] = None
for tmp in ['i', 'q', 'u', 'v']:
if ('weights stokes {0}'.format(tmp) not in value):
value['weights stokes {0}'.format(tmp)] = [None]*10
elif (value['weights stokes {0}'.format(tmp)] == 'None'):
value['weights stokes {0}'.format(tmp)] = [None]*10
if ('boundary condition' not in value):
value['boundary condition'] = None
elif (value['boundary condition'] == 'None'):
value['boundary condition'] = None
if ('instrumental profile' not in value):
value['instrumental profile'] = None
elif (value['instrumental profile'] == 'None'):
value['instrumental profile'] = None
# Wavelength file is not present
if (value['wavelength file'] is None):
# If the wavelength is defined
if ('wavelength' in value):
axis = value['wavelength']
wvl = np.linspace(float(axis[0]), float(axis[1]), int(axis[2]))
wvl_lr = None
if (self.verbose >= 1):
self.logger.info(' - Using wavelength axis from {0} to {1} with {2} steps'.format(float(axis[0]), float(axis[1]), int(axis[2])))
else:
raise Exception('Wavelength range is not defined. Please, use "Wavelength" or "Wavelength file"')
else:
# If both observed and synthetic wavelength points are given
if ('wavelength' in value):
axis = value['wavelength']
if (len(axis) != 3):
raise Exception("Wavelength range is not given in the format: lower, upper, steps")
wvl = np.linspace(float(axis[0]), float(axis[1]), int(axis[2]))
if (self.verbose >= 1):
self.logger.info(' - Using wavelength axis from {0} to {1} with {2} steps'.format(float(axis[0]), float(axis[1]), int(axis[2])))
self.logger.info(' - Reading wavelength axis from {0}'.format(value['wavelength file']))
wvl_lr = np.loadtxt(self.root + value['wavelength file'])
else:
if (self.verbose >= 1):
self.logger.info(' - Reading wavelength axis from {0}'.format(value['wavelength file']))
wvl = np.loadtxt(self.root + value['wavelength file'])
wvl_lr = None
if (value['wavelength weight file'] is None):
if (self.verbose >= 1 and self.working_mode == 'inversion'):
self.logger.info(' - Setting all wavelength weights to 1')
weights = np.ones((4,len(wvl)))
else:
if (self.verbose >= 1):
self.logger.info(' - Reading wavelength weights from {0}'.format(value['wavelength weight file']))
weights = np.loadtxt(self.root + value['wavelength weight file'], skiprows=1).T
# Observations file not present
if (value['observations file'] is None):
if (self.working_mode == 'inversion'):
raise Exception("Inversion mode without observations is not allowed.")
obs_file = None
else:
if (self.verbose >= 1):
self.logger.info(' - Using observations from {0}'.format(value['observations file']))
obs_file = value['observations file']
if (value['mask file'] is None):
mask_file = None
if (self.verbose >= 1):
self.logger.info(' - No mask for pixels')
else:
if (self.verbose >= 1):
self.logger.info(' - Using mask from {0}'.format(value['mask file']))
mask_file = value['mask file']
if (value['instrumental profile'] is None):
if (self.verbose >= 1):
self.logger.info(' - No instrumental profile')
else:
if (self.verbose >= 1):
self.logger.info(' - Instrumental profile : {0}'.format(value['instrumental profile']))
# if (value['straylight file'] is None):
# if (self.verbose >= 1):
# self.logger.info(' - Not using straylight')
# stray_file = None
# else:
# if (self.verbose >= 1):
# self.logger.info(' - Using straylight from {0}'.format(value['straylight file']))
# stray_file = value['straylight file']
if (value['los'] is None):
if (self.working_mode == 'synthesis'):
raise Exception("You need to provide the LOS for spectral region {0}".format(value['name']))
los = None
else:
los = np.array(value['los']).astype('float64')
if (self.verbose >= 1):
self.logger.info(' - Using LOS {0}'.format(value['los']))
if (value['boundary condition'] is None):
if (self.verbose >= 1):
self.logger.info(' - Using default boundary conditions [1,0,0,0] in spectral region {0} or read from file. Check carefully!'.format(value['name']))
boundary = np.array([1.0,0.0,0.0,0.0])
self.normalization = 'on-disk'
else:
boundary = np.array(value['boundary condition']).astype('float64')
if (boundary[0] == 0.0):
if (self.verbose >= 1):
self.logger.info(' - Using off-limb normalization (peak intensity)')
if (self.verbose >= 1):
self.logger.info(' - Using boundary condition {0}'.format(value['boundary condition']))
stokes_weights = []
for st in ['i', 'q', 'u', 'v']:
tmp = hazel.util.tofloat(value['weights stokes {0}'.format(st)])
tmp = [i if i is not None else 1.0 for i in tmp]
stokes_weights.append(tmp)
stokes_weights = np.array(stokes_weights)
self.spectrum[value['name']] = Spectrum(wvl=wvl, weights=weights, observed_file=obs_file,
name=value['name'], stokes_weights=stokes_weights, los=los, boundary=boundary, mask_file=mask_file, instrumental_profile=value['instrumental profile'], root=self.root, wvl_lr=wvl_lr)
self.topologies.append(value['topology'])
def add_photosphere(self, atmosphere):
"""
Programmatically add a photosphere
Parameters
----------
atmosphere : dict
Dictionary containing the following data
'Name', 'Spectral region', 'Height', 'Line', 'Wavelength', 'Reference atmospheric model',
'Ranges', 'Nodes'
Returns
-------
None
"""
# Make sure that all keys of the input dictionary are in lower case
# This is irrelevant if a configuration file is used because this has been
# already done
atm = hazel.util.lower_dict_keys(atmosphere)
self.atmospheres[atm['name']] = SIR_atmosphere(working_mode=self.working_mode, name=atm['name'], verbose=self.verbose)
lines = [int(k) for k in list(atm['spectral lines'])]
# If NLTE is available because PyTorch and PyTorch Geom are available
# check whether the line is needed in NLTE or not
if self.nlte_available:
if ('nlte' not in atm):
self.atmospheres[atm['name']].nlte = False
else:
self.atmospheres[atm['name']].nlte = hazel.util.tobool(atm['nlte'])
if (self.verbose >= 1):
self.logger.info(" * Line in NLTE if available")
else:
self.atmospheres[atm['name']].nlte = False
if ('wavelength' not in atm):
atm['wavelength'] = None
elif (atm['wavelength'] == 'None'):
atm['wavelength'] = None
if (atm['wavelength'] is not None):
wvl_range = [float(k) for k in atm['wavelength']]
else:
wvl_range = [np.min(self.spectrum[atm['spectral region']].wavelength_axis), np.max(self.spectrum[atm['spectral region']].wavelength_axis)]
if ('reference frame' in atm):
if ('line-of-sight' in atm['reference frame']):
self.atmospheres[atm['name']].reference_frame = 'line-of-sight'
if ('vertical' in atm['reference frame']):
raise Exception('Magnetic fields in photospheres are always in the line-of-sight reference frame.')
else:
self.atmospheres[atm['name']].reference_frame = 'line-of-sight'
if (self.verbose >= 1):
self.logger.info(" * Adding line : {0}".format(lines))
self.logger.info(" * Magnetic field reference frame : {0}".format(self.atmospheres[atm['name']].reference_frame))
self.atmospheres[atm['name']].add_active_line(lines=lines, spectrum=self.spectrum[atm['spectral region']],
wvl_range=np.array(wvl_range), verbose=self.verbose)
if (self.atmospheres[atm['name']].graphnet_nlte is not None):
self.set_nlte(True)
if ('ranges' in atm):
for k, v in atm['ranges'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].ranges[k2] = None
else:
self.atmospheres[atm['name']].ranges[k2] = hazel.util.tofloat(v)
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
self.atmospheres[atm['name']].regularization[k2] = None
if ('regularization' in atm):
for k, v in atm['regularization'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].regularization[k2] = None
else:
self.atmospheres[atm['name']].regularization[k2] = v
if ('reference atmospheric model' in atm):
my_file = Path(self.root + atm['reference atmospheric model'])
if (not my_file.exists()):
raise FileExistsError("Input file {0} for atmosphere {1} does not exist.".format(my_file, atm['name']))
self.atmospheres[atm['name']].load_reference_model(self.root + atm['reference atmospheric model'], self.verbose)
if (self.atmospheres[atm['name']].model_type == '3d'):
self.atmospheres[atm['name']].n_pixel = self.atmospheres[atm['name']].model_handler.get_npixel()
if ('nodes' in atm):
for k, v in atm['nodes'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
self.atmospheres[atm['name']].cycles[k2] = hazel.util.toint(v)
if ('temperature change to recompute departure coefficients' in atm):
self.atmospheres[atm['name']].t_change_departure = float(atm['temperature change to recompute departure coefficients'])
else:
self.atmospheres[atm['name']].t_change_departure = 0.0
def add_chromosphere(self, atmosphere):
"""
Programmatically add a chromosphere
Parameters
----------
atmosphere : dict
Dictionary containing the following data
'Name', 'Spectral region', 'Height', 'Line', 'Wavelength', 'Reference atmospheric model',
'Ranges', 'Nodes'
Returns
-------
None
"""
# Make sure that all keys of the input dictionary are in lower case
# This is irrelevant if a configuration file is used because this has been
# already done
atm = hazel.util.lower_dict_keys(atmosphere)
self.atmospheres[atm['name']] = Hazel_atmosphere(working_mode=self.working_mode, name=atm['name'])
if ('wavelength' not in atm):
atm['wavelength'] = None
elif (atm['wavelength'] == 'None'):
atm['wavelength'] = None
if (atm['wavelength'] is not None):
wvl_range = [float(k) for k in atm['wavelength']]
else:
wvl_range = [np.min(self.spectrum[atm['spectral region']].wavelength_axis), np.max(self.spectrum[atm['spectral region']].wavelength_axis)]
self.atmospheres[atm['name']].add_active_line(line=atm['line'], spectrum=self.spectrum[atm['spectral region']],
wvl_range=np.array(wvl_range))
if ('reference frame' in atm):
if (atm['reference frame'] == 'line-of-sight'):
self.atmospheres[atm['name']].reference_frame = 'line-of-sight'
if (atm['reference frame'] == 'vertical'):
self.atmospheres[atm['name']].reference_frame = 'vertical'
else:
self.atmospheres[atm['name']].reference_frame = 'vertical'
if (self.verbose >= 1):
self.logger.info(" * Adding line : {0}".format(atm['line']))
self.logger.info(" * Magnetic field reference frame : {0}".format(self.atmospheres[atm['name']].reference_frame))
if ('ranges' in atm):
for k, v in atm['ranges'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].ranges[k2] = None
else:
self.atmospheres[atm['name']].ranges[k2] = hazel.util.tofloat(v)
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
self.atmospheres[atm['name']].regularization[k2] = None
if ('regularization' in atm):
for k, v in atm['regularization'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].regularization[k2] = None
else:
self.atmospheres[atm['name']].regularization[k2] = v
if ('coordinates for magnetic field vector' in atm):
if (atm['coordinates for magnetic field vector'] == 'cartesian'):
self.atmospheres[atm['name']].coordinates_B = 'cartesian'
if (atm['coordinates for magnetic field vector'] == 'spherical'):
self.atmospheres[atm['name']].coordinates_B = 'spherical'
else:
self.atmospheres[atm['name']].coordinates_B = 'cartesian'
self.atmospheres[atm['name']].select_coordinate_system()
if (self.verbose >= 1):
self.logger.info(" * Magnetic field coordinates system : {0}".format(self.atmospheres[atm['name']].coordinates_B))
if ('reference atmospheric model' in atm):
my_file = Path(self.root + atm['reference atmospheric model'])
if (not my_file.exists()):
raise FileExistsError("Input file {0} for atmosphere {1} does not exist.".format(my_file, atm['name']))
self.atmospheres[atm['name']].load_reference_model(self.root + atm['reference atmospheric model'], self.verbose)
if (self.atmospheres[atm['name']].model_type == '3d'):
self.atmospheres[atm['name']].n_pixel = self.atmospheres[atm['name']].model_handler.get_npixel()
# Set values of parameters
self.atmospheres[atm['name']].height = float(atm['height'])
if ('nodes' in atm):
for k, v in atm['nodes'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
self.atmospheres[atm['name']].cycles[k2] = hazel.util.toint(v)
def add_parametric(self, atmosphere):
"""
Programmatically add a parametric atmosphere
Parameters
----------
atmosphere : dict
Dictionary containing the following data
'Name', 'Spectral region', 'Wavelength', 'Reference atmospheric model', 'Type',
'Ranges', 'Nodes'
Returns
-------
None
"""
# Make sure that all keys of the input dictionary are in lower case
# This is irrelevant if a configuration file is used because this has been
# already done
atm = hazel.util.lower_dict_keys(atmosphere)
self.atmospheres[atm['name']] = Parametric_atmosphere(working_mode=self.working_mode)
if ('wavelength' not in atm):
atm['wavelength'] = None
elif (atm['wavelength'] == 'None'):
atm['wavelength'] = None
if (atm['wavelength'] is not None):
wvl_range = [float(k) for k in atm['wavelength']]
else:
wvl_range = [np.min(self.spectrum[atm['spectral region']].wavelength_axis), np.max(self.spectrum[atm['spectral region']].wavelength_axis)]
self.atmospheres[atm['name']].add_active_line(spectrum=self.spectrum[atm['spectral region']],
wvl_range=np.array(wvl_range))
if ('ranges' in atm):
for k, v in atm['ranges'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].ranges[k2] = None
else:
self.atmospheres[atm['name']].ranges[k2] = hazel.util.tofloat(v)
if ('reference atmospheric model' in atm):
my_file = Path(self.root + atm['reference atmospheric model'])
if (not my_file.exists()):
raise FileExistsError("Input file {0} for atmosphere {1} does not exist.".format(my_file, atm['name']))
self.atmospheres[atm['name']].load_reference_model(self.root + atm['reference atmospheric model'], self.verbose)
if (self.atmospheres[atm['name']].model_type == '3d'):
self.atmospheres[atm['name']].n_pixel = self.atmospheres[atm['name']].model_handler.get_npixel()
# Set values of parameters
if ('nodes' in atm):
for k, v in atm['nodes'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
self.atmospheres[atm['name']].cycles[k2] = hazel.util.toint(v)
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
self.atmospheres[atm['name']].regularization[k2] = None
if ('regularization' in atm):
for k, v in atm['regularization'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].regularization[k2] = None
else:
self.atmospheres[atm['name']].regularization[k2] = v
def add_straylight(self, atmosphere):
"""
Programmatically add a straylight atmosphere
Parameters
----------
atmosphere : dict
Dictionary containing the following data
'Name', 'Spectral region', 'Reference atmospheric model',
'Ranges', 'Nodes'
Returns
-------
None
"""
# Make sure that all keys of the input dictionary are in lower case
# This is irrelevant if a configuration file is used because this has been
# already done
atm = hazel.util.lower_dict_keys(atmosphere)
self.atmospheres[atm['name']] = Straylight_atmosphere(working_mode=self.working_mode)
if ('wavelength' not in atm):
atm['wavelength'] = None
elif (atm['wavelength'] == 'None'):
atm['wavelength'] = None
if (atm['wavelength'] is not None):
wvl_range = [float(k) for k in atm['wavelength']]
else:
wvl_range = [np.min(self.spectrum[atm['spectral region']].wavelength_axis), np.max(self.spectrum[atm['spectral region']].wavelength_axis)]
self.atmospheres[atm['name']].add_active_line(spectrum=self.spectrum[atm['spectral region']],
wvl_range=np.array(wvl_range))
if ('ranges' in atm):
for k, v in atm['ranges'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].ranges[k2] = None
else:
self.atmospheres[atm['name']].ranges[k2] = hazel.util.tofloat(v)
my_file = Path(self.root + atm['reference atmospheric model'])
if (not my_file.exists()):
raise FileExistsError("Input file {0} for atmosphere {1} does not exist.".format(my_file, atm['name']))
if ('reference atmospheric model' in atm):
self.atmospheres[atm['name']].load_reference_model(self.root + atm['reference atmospheric model'], self.verbose)
if (self.atmospheres[atm['name']].model_type == '3d'):
self.atmospheres[atm['name']].n_pixel = self.atmospheres[atm['name']].model_handler.get_npixel()
# Set values of parameters
if ('nodes' in atm):
for k, v in atm['nodes'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
self.atmospheres[atm['name']].cycles[k2] = hazel.util.toint(v)
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
self.atmospheres[atm['name']].regularization[k2] = None
if ('regularization' in atm):
for k, v in atm['regularization'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].regularization[k2] = None
else:
self.atmospheres[atm['name']].regularization[k2] = v
def remove_unused_atmosphere(self):
"""
Remove unused atmospheres
Parameters
----------
None
Returns
-------
None
"""
to_remove = []
for k, v in self.atmospheres.items():
if (not v.active):
to_remove.append(k)
if (self.verbose >= 1):
self.logger.info(' - Atmosphere {0} deleted.'.format(k))
for k in to_remove:
self.atmospheres.pop(k)
def init_sir_external(self):
"""
Initialize SIR for this synthesis
Parameters
----------
None
Returns
-------
None
"""
for k, v in self.atmospheres.items():
if (v.type == 'photosphere'):
f = open('lte.grid', 'w')
f.write("IMPORTANT: a) All items must be separated by commas. \n")
f.write(" b) The first six characters of the last line \n")
f.write(" in the header (if any) must contain the symbol --- \n")
f.write("\n")
f.write("Line and blends indices : Initial lambda Step Final lambda \n")
f.write("(in this order) (mA) (mA) (mA) \n")
f.write("-----------------------------------------------------------------------\n")
ind_low = (np.abs(v.spectrum.wavelength_axis - v.wvl_range_lambda[0])).argmin()
ind_top = (np.abs(v.spectrum.wavelength_axis - v.wvl_range_lambda[1])).argmin()
low = v.spectrum.wavelength_axis[ind_low]
top = v.spectrum.wavelength_axis[ind_top] # TODO
delta = (v.spectrum.wavelength_axis[1] - v.spectrum.wavelength_axis[0])
filename = os.path.join(os.path.dirname(__file__),'data/LINEAS')
ff = open(filename, 'r')
flines = ff.readlines()
ff.close()
for i in range(len(v.lines)):
for l in flines:
tmp = l.split()
index = int(tmp[0].split('=')[0])
if (index == v.lines[0]):
wvl = float(tmp[2])
f.write("{0} : {1}, {2}, {3}\n".format(str(v.lines)[1:-1], 1e3*(low-wvl), 1e3*delta, 1e3*(top-wvl)))
f.close()
v.n_lambda = sir_code.init_externalfile(v.index, filename)
def init_sir(self):
"""
Initialize SIR for this synthesis. This version does not make use of any external file, which might be
not safe when running in MPI mode.
Parameters
----------
None
Returns
-------
None
"""
lines = []
n_lines = 0
elements = {'H':1,'HE':2,'LI':3,'BE':4,'B':5,'C':6,'N':7,'O':8,'F':9,'NE':10,
'NA':11,'MG':12,'AL':13,'SI':14,'P':15,'S':16,'CL':17,'AR':18,'K':19,'CA':20,'SC':21,'TI':22,'V':23,'CR':24,
'MN':25,'FE':26,'CO':27,'NI':28,'CU':29,'ZN':30,'GA':31,'GE':32,'AS':33,'SE':34,'BR':35,'KR':36,
'RB':37,'SR':38,'Y':39,'ZR':40,'NB':41,'MO':42,'TC':43,'RU':44,'RH':45,'PD':46,'AG':47,'CD':48,'IN':49,
'SN':50,'SB':51,'TE':52,'I':53,'XE':54,'CS':55,'BA':56,'LA':57,'CE':58,'PR':59,'ND':60,'PM':61,
'SM':62,'EU':63,'GD':64,'TB':65,'DY':66,'HO':67,'ER':68,'TM':69,'YB':70,'LU':71,'HF':72,'TA':73,'W':74,
'RE':75,'OS':76,'IR':77,'PT':78,'AU':79,'HG':80,'TL':81,'PB':82,'BI':83,'PO':84,'AT':85,'RN':86,
'FR':87,'RA':88,'AC':89,'TH':90,'PA':91,'U':92}
states = {'S': 0, 'P': 1, 'D': 2, 'F': 3, 'G': 4, 'H': 5, 'I': 6}
for k, v in self.atmospheres.items():
if (v.type == 'photosphere'):
n_lines += 1
ind_low = (np.abs(v.spectrum.wavelength_axis - v.wvl_range_lambda[0])).argmin()
ind_top = (np.abs(v.spectrum.wavelength_axis - v.wvl_range_lambda[1])).argmin()
low = v.spectrum.wavelength_axis[ind_low]
top = v.spectrum.wavelength_axis[ind_top] # TODO
delta = (v.spectrum.wavelength_axis[1] - v.spectrum.wavelength_axis[0])
nblend = len(v.lines)
lines = np.zeros(len(v.lines), dtype=np.intc)
atom = np.zeros(len(v.lines), dtype=np.intc)
istage = np.zeros(len(v.lines), dtype=np.intc)
wvl = np.zeros(len(v.lines))
zeff = np.zeros(len(v.lines))
energy = np.zeros(len(v.lines))
loggf = np.zeros(len(v.lines))
mult1 = np.zeros(len(v.lines), dtype=np.intc)
mult2 = np.zeros(len(v.lines), dtype=np.intc)
design1 = np.zeros(len(v.lines), dtype=np.intc)
design2 = np.zeros(len(v.lines), dtype=np.intc)
tam1 = np.zeros(len(v.lines))
tam2 = np.zeros(len(v.lines))
alfa = np.zeros(len(v.lines))
sigma = np.zeros(len(v.lines))
for i in range(len(v.lines)):
lines[i] = v.lines[i]
for l in self.LINES:
tmp = l.split()
index = int(tmp[0].split('=')[0])
if (index == v.lines[i]):
atom[i] = elements[tmp[0].split('=')[1]]
istage[i] = tmp[1]
wvl[i] = float(tmp[2])
zeff[i] = float(tmp[3])
energy[i] = float(tmp[4])
loggf[i] = float(tmp[5])
mult1[i] = int(tmp[6][:-1])
mult2[i] = int(tmp[8][:-1])
design1[i] = states[tmp[6][-1]]
design2[i] = states[tmp[8][-1]]
tam1[i] = float(tmp[7].split('-')[0])
tam2[i] = float(tmp[9].split('-')[0])
if (len(tmp) == 12):
alfa[i] = float(tmp[-2])
sigma[i] = float(tmp[-1])
else:
alfa[i] = 0.0
sigma[i] = 0.0
lambda0 = 1e3*(low-wvl[0])
lambda1 = 1e3*(top-wvl[0])
n_steps = ind_top - ind_low + 1
v.n_lambda = n_steps
sir_code.init(v.index, nblend, lines, atom, istage, wvl, zeff, energy, loggf,
mult1, mult2, design1, design2, tam1, tam2, alfa, sigma, lambda0, lambda1, n_steps)
def exit_hazel(self):
for k, v in self.atmospheres.items():
if (v.type == 'chromosphere'):
hazel_code.exit(v.index)
def add_topology(self, atmosphere_order):
"""
Add a new topology
Parameters
----------
topology : str
Topology
Returns
-------
None
"""
# Transform the order to a list of lists
if (self.verbose >= 1):
self.logger.info(' - {0}'.format(atmosphere_order))
vertical_order = atmosphere_order.split('->')
order = []
for k in vertical_order:
name = k.strip().replace('(','').replace(')','').split('+')
name = [k.strip() for k in name]
tmp = []
for n in name:
tmp.append(n)
self.atmospheres[n].active = True
order.append(tmp)
order_flat = [item for sublist in order for item in sublist]
# Check that straylight components, if any, are not at the last position
for atm in order_flat[:-1]:
if (self.atmospheres[atm].type == 'straylight'):
raise Exception("Straylight components can only be at the last position of a topology.")
self.order_atmospheres.append(order)
# Check that there are no two photospheres linked with ->
# because they do not make any sense
n_photospheres_linked = []
for atmospheres in self.order_atmospheres:
for order in atmospheres:
for k, atm in enumerate(order):
if (self.atmospheres[atm].type == 'photosphere'):
n_photospheres_linked.append(k)
if (len(n_photospheres_linked) != len(set(n_photospheres_linked))):
raise Exception("There are several photospheres linked with ->. This is not allowed.")
def normalize_ff(self):
"""
Normalize all filling factors so that they add to one to avoid later problems.
We use a softmax function to make sure they all add to one and can be unconstrained
ff_i = exp(x_i) / sum(exp(x_i))
Parameters
----------
None
Returns
-------
None
"""
for atmospheres in self.order_atmospheres:
for order in atmospheres:
total_ff = 0.0
for atm in order:
if (self.atmospheres[atm].type != 'straylight'):
if (self.working_mode == 'inversion'):
self.atmospheres[atm].parameters['ff'], self.atmospheres[atm].ranges['ff'][0], self.atmospheres[atm].ranges['ff'][1]
ff = transformed_to_physical(self.atmospheres[atm].parameters['ff'], self.atmospheres[atm].ranges['ff'][0], self.atmospheres[atm].ranges['ff'][1])
else:
ff = transformed_to_physical(self.atmospheres[atm].parameters['ff'], -0.00001, 1.00001)
total_ff += ff
for atm in order:
if (self.atmospheres[atm].type != 'straylight'):
if (self.working_mode == 'inversion'):
ff = transformed_to_physical(self.atmospheres[atm].parameters['ff'], self.atmospheres[atm].ranges['ff'][0], self.atmospheres[atm].ranges['ff'][1])
self.atmospheres[atm].parameters['ff'] = ff / total_ff
self.atmospheres[atm].parameters['ff'] = physical_to_transformed(self.atmospheres[atm].parameters['ff'], self.atmospheres[atm].ranges['ff'][0], self.atmospheres[atm].ranges['ff'][1])
else:
ff = transformed_to_physical(self.atmospheres[atm].parameters['ff'], -0.00001, 1.00001)
self.atmospheres[atm].parameters['ff'] = ff / total_ff
self.atmospheres[atm].parameters['ff'] = physical_to_transformed(self.atmospheres[atm].parameters['ff'], -0.00001, 1.00001)
def synthesize_spectral_region(self, spectral_region, perturbation=False):
"""
Synthesize all atmospheres for a single spectral region and normalize to the continuum of the quiet Sun at disk center
Parameters
----------
spectral_region : str
Spectral region to synthesize
perturbation : bool
Set to True if you are synthesizing with a perturbation. In this case, the synthesis
is saved in spectrum.stokes_perturbed instead of spectrum.stokes
Returns
-------
None
"""
stokes = None
stokes_out = None
# Loop over all atmospheres
for i, atmospheres in enumerate(self.order_atmospheres):
for n, order in enumerate(atmospheres):
for k, atm in enumerate(order):
if (self.atmospheres[atm].spectrum.name == spectral_region):
# Update the boundary condition only for the first atmosphere if several are sharing ff
if (n > 0 and k == 0):
ind_low, ind_top = self.atmospheres[atm].wvl_range
if (perturbation):
stokes_out = self.atmospheres[atm].spectrum.stokes_perturbed[:, ind_low:ind_top] * hazel.util.i0_allen(self.atmospheres[atm].spectrum.wavelength_axis[ind_low:ind_top], 1.0)[None,:]
else:
stokes_out = self.atmospheres[atm].spectrum.stokes[:, ind_low:ind_top] * hazel.util.i0_allen(self.atmospheres[atm].spectrum.wavelength_axis[ind_low:ind_top], 1.0)[None,:]
if (self.atmospheres[atm].type == 'straylight'):
stokes, error = self.atmospheres[atm].synthesize(nlte=self.use_nlte)
if (error == 1):
raise
stokes += (1.0 - self.atmospheres[atm].parameters['ff']) * stokes_out
else:
if (k == 0):
if (self.use_analytical_RF):
stokes, self.rf_analytical, error = self.atmospheres[atm].synthesize(stokes_out, returnRF=True, nlte=self.use_nlte)
else:
stokes, error = self.atmospheres[atm].synthesize(stokes_out, nlte=self.use_nlte)
else:
tmp, error = self.atmospheres[atm].synthesize(stokes_out, nlte=self.use_nlte)
stokes += tmp
ind_low, ind_top = self.atmospheres[atm].wvl_range
mean_wvl = np.mean(self.atmospheres[atm].spectrum.wavelength_axis[ind_low:ind_top])
i0 = hazel.util.i0_allen(mean_wvl, 1.0)
# Divide by i0
if (self.use_analytical_RF):
for k, v in self.rf_analytical.items():
if (k != 'ff'):
v /= i0
if (perturbation):
self.atmospheres[atm].spectrum.stokes_perturbed[:, ind_low:ind_top] = stokes / i0#[None,:]
else:
self.atmospheres[atm].spectrum.stokes[:, ind_low:ind_top] = stokes / i0#[None,:]
def set_nlte(self, option):
"""
Set calculation of Ca II 8542 A to NLTE
Parameters
----------
option : bool
Set to True to use NLTE, False to use LTE
"""
self.use_nlte = option
if (self.verbose >= 1):
self.logger.info('Setting NLTE for Ca II 8542 A to {0}'.format(self.use_nlte))
def synthesize(self, perturbation=False):
"""
Synthesize all atmospheres
Parameters
----------
perturbation : bool
Set to True if you are synthesizing with a perturbation. In this case, the synthesis
is saved in spectrum.stokes_perturbed instead of spectrum.stokes
Returns
-------
None
"""
if (self.working_mode == 'inversion'):
self.normalize_ff()
for k, v in self.spectrum.items():
self.synthesize_spectral_region(k, perturbation=perturbation)
if (v.normalization == 'off-limb'):
if (perturbation):
v.stokes_perturbed /= np.max(v.stokes_perturbed[0,:])
else:
v.stokes /= np.max(v.stokes[0,:])
if (v.psf_spectral is not None):
for i in range(4):
if (perturbation):
v.stokes_perturbed[i,:] = scipy.signal.convolve(v.stokes_perturbed[i,:], v.psf_spectral, mode='same', method='auto')
else:
v.stokes[i,:] = scipy.signal.convolve(v.stokes[i,:], v.psf_spectral, mode='same', method='auto')
if (v.interpolate_to_lr):
for i in range(4):
if (perturbation):
v.stokes_perturbed_lr[i,:] = np.interp(v.wavelength_axis_lr, v.wavelength_axis, v.stokes_perturbed[i,:])
else:
v.stokes_lr[i,:] = np.interp(v.wavelength_axis_lr, v.wavelength_axis, v.stokes[i,:])
def find_active_parameters(self, cycle):
"""
Find all active parameters in all active atmospheres in the current cycle
Parameters
----------
cycle : int
Cycle to consider
Returns
-------
None
"""
pars = []
coupled = []
self.nodes = []
left = 0
right = 0
for atmospheres in self.order_atmospheres:
for n, order in enumerate(atmospheres):
for k, atm in enumerate(order):
for l, par in self.atmospheres[atm].cycles.items():
if (par is not None):
if (hazel.util.isint(par[cycle])):
if (par[cycle] > 0):
# [Atmosphere name, n_nodes, nodes, value, range]
self.atmospheres[atm].nodes[l] = np.zeros(par[cycle])
self.atmospheres[atm].n_nodes[l] = par[cycle]
right += par[cycle]
n_lambda = len(self.atmospheres[atm].spectrum.wavelength_axis)
tmp = {'atm': atm, 'n_nodes': par[cycle], 'parameter': l,
'ranges': self.atmospheres[atm].ranges[l], 'delta': self.atmospheres[atm].epsilon[l],
'left': left, 'right': right, 'regularization': self.atmospheres[atm].regularization[l],
'coupled': False}
self.nodes.append(self.atmospheres[atm].nodes[l])
left = copy.copy(right)
pars.append(tmp)
else:
self.atmospheres[atm].nodes[l] = 0.0
self.atmospheres[atm].n_nodes[l] = 0
else:
n_lambda = len(self.atmospheres[atm].spectrum.wavelength_axis)
tmp = {'atm': atm, 'n_nodes': par[cycle], 'parameter': l, 'coupled': True}
coupled.append(tmp)
self.active_meta = pars
self.coupled_meta = coupled
if (not self.nodes):
raise Exception("No parameters to invert in cycle {0}. Please add them or reduce the number of cycles. ".format(cycle))
self.nodes = np.concatenate(self.nodes).ravel()
def synthesize_and_compute_rf(self, compute_rf=False, include_jacobian=False):
"""
Compute response functions for all free parameters according to all active_parameters
Parameters
----------
compute_rf : bool (optional, default False)
If True, then compute the response functions. If not, just compute the synthesis.
Returns
-------
None
"""
self.synthesize()
if (not compute_rf):
return
n_active_pars = len(self.active_meta)
loop = 0
loop2 = 0
self.hessian_regularization = np.zeros(self.n_free_parameters_cycle)
self.grad_regularization = np.zeros(self.n_free_parameters_cycle)
# self.use_analytical_RF = False
for par in self.active_meta:
nodes = self.nodes[par['left']:par['right']]
lower = par['ranges'][0]
upper = par['ranges'][1]
if (self.verbose >= 4):
self.logger.info(" * RF to {0} - {1} - nodes={2}".format(par['parameter'], par['atm'], par['n_nodes']))
if (self.use_analytical_RF):
for i in range(par['n_nodes']):
rf = {}
for k, v in self.spectrum.items():
# The minus sign comes from the fact that we compute the RF numerically as
# (stokes-stokes_perturbed)/delta
# rf[k] = -self.atmospheres[par['atm']].rf_analytical[par['parameter']][:,:,i] * jacobian
rf[k] = -self.rf_analytical[par['parameter']][:,:,i]
rf[k] = rf[k][None, :, :]
if (loop == 0):
self.response = rf
else:
for k, v in self.spectrum.items():
self.response[k] = np.vstack([self.response[k], rf[k]])
if (par['regularization'] is not None):
if (par['regularization'][0] == 'l2-value'):
alpha = float(par['regularization'][1])
lower = par['ranges'][0]
upper = par['ranges'][1]
value = physical_to_transformed(float(par['regularization'][2]), lower, upper)
self.grad_regularization[par['left']:par['right']] = 2.0 * alpha * (self.atmospheres[par['atm']].nodes[par['parameter']] - value)
self.hessian_regularization[par['left']:par['right']] = 2.0 * alpha
loop += 1
else:
for i in range(par['n_nodes']):
perturbation = np.zeros(par['n_nodes'])
if (nodes[i] == 0):
perturbation[i] = self.epsilon * par['delta']
else:
perturbation[i] = self.epsilon * nodes[i]
# Perturb this parameter
self.atmospheres[par['atm']].nodes[par['parameter']] = nodes + perturbation
# Also perturb those parameters that are coupled
for par2 in self.coupled_meta:
if (par2['coupled'] is True):
if (par['atm'] == par2['n_nodes'] and par['parameter'] == par2['parameter']):
if (self.verbose >= 4):
self.logger.info(" * Coupling RF to {0} - {1}".format(par2['parameter'], par2['atm']))
self.atmospheres[par2['atm']].nodes[par2['parameter']] = nodes + perturbation
# Synthesize
self.synthesize(perturbation=True)
# And come back to the original value of the nodes
self.atmospheres[par['atm']].nodes[par['parameter']] = nodes
for par2 in self.coupled_meta:
if (par2['coupled'] is True):
if (par['atm'] == par2['n_nodes'] and par['parameter'] == par2['parameter']):
self.atmospheres[par2['atm']].nodes[par2['parameter']] = nodes
if (include_jacobian):
# jacobian =
# self.atmospheres[par['atm']].jacobian[par['parameter']]
jacobian = jacobian_transformation(nodes[i], lower, upper)
else:
jacobian = 1.0
rf = {}
for k, v in self.spectrum.items():
if (v.interpolate_to_lr):
rf[k] = jacobian * np.expand_dims((v.stokes_lr - v.stokes_perturbed_lr) / perturbation[i], 0)
else:
rf[k] = jacobian * np.expand_dims((v.stokes - v.stokes_perturbed) / perturbation[i], 0)
# rf = np.expand_dims((self.spectrum['spec1'].stokes - self.spectrum['spec1'].stokes_perturbed) / perturbation[i], 0)
if (loop == 0):
self.response = rf
else:
# self.response = np.vstack([self.response, rf])
for k, v in self.spectrum.items():
self.response[k] = np.vstack([self.response[k], rf[k]])
if (par['regularization'] is not None):
if (par['regularization'][0] == 'l2-value'):
alpha = float(par['regularization'][1])
lower = par['ranges'][0]
upper = par['ranges'][1]
value = physical_to_transformed(float(par['regularization'][2]), lower, upper)
self.grad_regularization[par['left']:par['right']] = 2.0 * alpha * (self.atmospheres[par['atm']].nodes[par['parameter']] - float(par['regularization'][2]))
self.hessian_regularization[par['left']:par['right']] = 2.0 * alpha
loop += 1
# for i in range(par['n_nodes']):
# rf = {}
# for k, v in self.spectrum.items():
# # The minus sign comes from the fact that we compute the RF numerically as
# # (stokes-stokes_perturbed)/delta
# # rf[k] = -self.atmospheres[par['atm']].rf_analytical[par['parameter']][:,:,i] * jacobian
# rf[k] = -self.rf_analytical[par['parameter']][:,:,i]
# rf[k] = rf[k][None, :, :]
# if (loop2 == 0):
# self.response2 = rf
# else:
# for k, v in self.spectrum.items():
# self.response2[k] = np.vstack([self.response2[k], rf[k]])
# loop2 += 1
# import matplotlib.pyplot as pl
# f, ax = pl.subplots(nrows=3, ncols=2, figsize=(9,9))
# ax = ax.flatten()
# for i in range(3):
# ax[i].plot(self.response['spec1'][i,0,0:60], label='numerical')
# ax[i].plot(self.response2['spec1'][i,0,0:60], label='analytical')
# ax[i].legend()
# pl.show()
# breakpoint()
# # self.response = copy.deepcopy(self.response2)
# self.use_analytical_RF = True
def flatten_parameters_to_reference(self, cycle):
"""
Flatten all current parameters to the reference atmosphere
Parameters
----------
cycle : int
Current cycle
Returns
-------
None
"""
if (self.working_mode == 'inversion'):
for k, v in self.atmospheres.items():
v.set_reference(cycle=cycle)
for k, v in self.spectrum.items():
v.stokes_cycle[cycle] = copy.deepcopy(v.stokes)
if (v.interpolate_to_lr):
v.stokes_lr_cycle[cycle] = copy.deepcopy(v.stokes_lr)
if (self.working_mode == 'inversion'):
v.chi2_cycle[cycle] = copy.deepcopy(v.chi2)
v.bic_cycle[cycle] = copy.deepcopy(self.n_free_parameters * np.log(v.dof) + v.dof * np.log(v.rss))
v.aic_cycle[cycle] = copy.deepcopy(2.0 * self.n_free_parameters + v.dof * np.log(v.rss))
def set_new_model(self, nodes):
"""
Set the nodes of the current model to the values passed on the arguments
Parameters
----------
nodes : float
Array with the new set of nodes
Returns
-------
None
"""
n_active_pars = len(self.active_meta)
# Modify all active parameters
for par in self.active_meta:
left = par['left']
right = par['right']
self.atmospheres[par['atm']].nodes[par['parameter']] = nodes[left:right]
# Modify all coupled parameters accordingly
for par in self.coupled_meta:
for par2 in self.active_meta:
if (par2['atm'] == par['n_nodes'] and par2['parameter'] == par['parameter']):
left = par2['left']
right = par2['right']
self.atmospheres[par['atm']].nodes[par['parameter']] = nodes[left:right]
self.atmospheres[par['atm']].parameters[par['parameter']] = copy.copy(self.atmospheres[par2['atm']].parameters[par2['parameter']])
def modified_svd_inverse(self, H, tol=1e-8):
"""
Compute the inverse of the Hessian matrix using a modified SVD, by thresholding each subpsace separately
Parameters
----------
H : float
Hessian matrix
tol : float
Tolerance for the singular value of each subspace
Returns
-------
None
"""
try:
U, w, VT = np.linalg.svd(H, full_matrices=False)
except np.linalg.LinAlgError:
U, w, VT = scipy.linalg.svd(H, full_matrices=False, lapack_driver='gesvd') # This calculation should be more robust but slower
w_new = np.zeros_like(w)
for par in self.active_meta:
left = par['left']
right = par['right']
Ui = np.zeros_like(U)
Ui[:,left:right] = U[:,left:right]
Gamma_i = np.diagonal(np.diag(w) @ Ui.T @ U).copy()
wmax = np.max(np.abs(Gamma_i))
Gamma_i[np.abs(Gamma_i) < tol*wmax] = 0.0
w_new += Gamma_i
w_new_inv = np.zeros_like(w)
ind = np.where(w_new != 0)[0]
w_new_inv[ind] = 1.0 / w_new[ind]
return U, w_new_inv, VT
def compute_chi2(self, only_chi2=False, weights=None):
"""
Compute chi2 for all spectral regions
Parameters
----------
obs : float
Vector of observations
only_chi2 : bool
Control whether the gradient and Hessian is returned
Returns
-------
None
"""
chi2 = 0.0
rss = 0.0
n = len(self.nodes)
dchi2 = np.zeros(n)
ddchi2 = np.zeros((n,n))
for k, v in self.spectrum.items():
if (v.interpolate_to_lr):
residual = (v.stokes_lr - v.obs)
else:
residual = (v.stokes - v.obs)
# Do not use weights. This is used for the computation of errors
# if (weights is None):
weights = (v.stokes_weights[:,self.cycle][:,None] * v.wavelength_weights) * v.factor_chi2
chi2 += np.sum(weights * residual**2)
rss += np.sum(residual**2)
if (not only_chi2):
response = self.response[k]
dchi2 += -2.0 * np.sum(weights[None,:,:] * response * residual[None,:,:] , axis=(1,2)) #/ v.dof
ddchi2 += 2.0 * np.sum(weights[None,None,:,:] * response[None,:,:,:] * response[:,None,:,:] , axis=(2,3)) #/ v.dof
v.chi2 = chi2
v.rss = rss
if (not only_chi2):
return chi2, dchi2, ddchi2
else:
return chi2
# if (not only_chi2):
# return chi2, dchi2, ddchi2
# else:
# return chi2
def compute_uncertainty(self):
"""
Compute the uncertainty in the parameters at the minimum with the current Hessian
Parameters
----------
None
Returns
-------
None
"""
#----------------------------
# Recalculate chi2 without weights
# chi2 = 0.0
# for k, v in self.spectrum.items():
# residual = (v.stokes - v.obs)
# weights = v.dof / residual**2
# # Calculate Hessian
# self.synthesize_and_compute_rf(compute_rf=True)
# chi2, dchi2, ddchi2 = self.compute_chi2(weights=weights)
# hessian = 0.5 * ddchi2
#----------------------------
# Recalculate chi2 without weights
# Calculate Hessian
self.synthesize_and_compute_rf(compute_rf=True, include_jacobian=True)
chi2, dchi2, ddchi2 = self.compute_chi2()
hessian = 0.5 * ddchi2
U, w_inv, VT = self.modified_svd_inverse(hessian, tol=self.svd_tolerance)
cov = VT.T.dot(np.diag(w_inv)).dot(U.T)
# breakpoint()
for par in self.active_meta:
left = par['left']
right = par['right']
dof = self.atmospheres[par['atm']].spectrum.dof
rf = scipy.stats.chi2(dof)
delta = np.sqrt(rf.isf(1.0 - scipy.special.erf(1.0/np.sqrt(2.0))))
rf = scipy.stats.chi2(right-left)
delta = np.sqrt(rf.isf(1.0 - scipy.special.erf(1.0/np.sqrt(2.0))))
cov_diagonal = np.abs(np.diagonal(cov[left:right,left:right]))
# This gives 1sigma error in the transformed domain
error = np.sqrt(cov_diagonal) * delta
# Multiply by the Jacobian of the transformation to compute the error in the physical quantities
error *= jacobian_transformation(self.nodes[left:right], par['ranges'][0], par['ranges'][1])
self.atmospheres[par['atm']].error[par['parameter']] = error
def _fun_backtracking(self, log_lambda, dchi2, ddchi2):
H = 0.5 * (ddchi2 + np.diag(self.hessian_regularization))
H += np.diag(10.0**(log_lambda) * np.diag(H))
gradF = 0.5 * (dchi2 + self.grad_regularization)
U, w_inv, VT = self.modified_svd_inverse(H, tol=self.svd_tolerance)
# xnew = xold - H^-1 * grad F
delta = -VT.T.dot(np.diag(w_inv)).dot(U.T).dot(gradF)
# Clip the new solution so that the step is resaonable
new_solution = self.nodes + np.clip(delta, -self.step_limiter_inversion, self.step_limiter_inversion)
self.set_new_model(new_solution)
self.synthesize_and_compute_rf()
chi2 = self.compute_chi2(only_chi2=True)
if (self.verbose >= 4):
self.logger.info(' - Backtracking - lambda: {0:7.5f} - chi2: {1:7.5f}'.format(10.0**log_lambda, chi2))
return chi2
def backtracking_brent(self, dchi2, ddchi2, maxiter=10, bounds=[-3.0,3.0], tol=1e-2):
tmp = scipy.optimize.minimize_scalar(self._fun_backtracking, bounds=bounds, args=(dchi2, ddchi2),
method='bounded', options={'xatol': tol, 'maxiter': maxiter})
return 10.0**tmp['x']
def backtracking_parabolic(self, dchi2, ddchi2, direction='down', maxiter=5, lambda_init=1e-3, current_chi2=1e10):
"""
Do the backtracking to get an optimal value of lambda in the LM algorithm
Parameters
----------
dchi2 : float
Gradient of the chi2
ddchi2 : float
Second order derivatives with which the Hessian is computed
direction : str, optional
Direction on which do the backtracking ('down'/'up' for decreasing/increasing lambda)
maxiter : int
Maximum number of iterations
lambda_init : float
Initial value of lambda
current_chi2 : float
Current best chi2 to compare with those of the backtracking
Returns
-------
lambda_opt : float
Optimal value of lambda found. Bracketed value if bracketing has been possible or just the best value otherwise
bracketed : bool
True if the best value has been bracketed
best_chi2 : float
Best value of chi2 found
"""
lambdaLM = lambda_init
chi2_arr = []
lambdas = []
sols = []
keepon = True
bracketed = False
loop = 0
best_chi2 = current_chi2
while keepon:
H = 0.5 * (ddchi2 + np.diag(self.hessian_regularization))
H += np.diag(lambdaLM * np.diag(H))
gradF = 0.5 * (dchi2 + self.grad_regularization)
U, w_inv, VT = self.modified_svd_inverse(H, tol=self.svd_tolerance)
# xnew = xold - H^-1 * grad F
delta = -VT.T.dot(np.diag(w_inv)).dot(U.T).dot(gradF)
# Clip the new solution so that the step is resaonable
new_solution = self.nodes + np.clip(delta, -self.step_limiter_inversion, self.step_limiter_inversion)
sols.append(new_solution)
self.set_new_model(new_solution)
self.synthesize_and_compute_rf()
chi2_arr.append(self.compute_chi2(only_chi2=True))
lambdas.append(lambdaLM)
if (self.verbose >= 4):
if (direction == 'down'):
self.logger.info(' - Backtracking: {0:2d} - lambda: {1:7.5f} - chi2: {2:7.5f}'.format(loop, lambdaLM, chi2_arr[-1]))
else:
self.logger.info(' * Backtracking: {0:2d} - lambda: {1:7.5f} - chi2: {2:7.5f}'.format(loop, lambdaLM, chi2_arr[-1]))
# If we improve the chi2
if (chi2_arr[-1] < best_chi2):
best_chi2 = chi2_arr[-1]
ind_min = np.argmin(chi2_arr)
if (loop > 1):
# Have we bracketed the minimum
if (ind_min != 0 and ind_min != len(chi2_arr)-1):
keepon = False
bracketed = True
# If lambda < 1e-3, then stop
if (lambdaLM < 1e-3 or loop > maxiter):
keepon = False
min_found = False
if (direction == 'down'):
lambdaLM /= np.sqrt(10.0)
else:
lambdaLM *= np.sqrt(10.0)
loop += 1
# Parabolic interpolation of the optimal value of lambda
if (bracketed):
coeff = np.polyfit(np.log(lambdas[ind_min-1:ind_min+2]), chi2_arr[ind_min-1:ind_min+2], 2)
lambda_opt = np.exp(-coeff[1] / (2.0*coeff[0]))
else:
lambda_opt = lambdas[ind_min]
return lambda_opt, bracketed, best_chi2, np.min(chi2_arr)
def randomize(self):
"""
Randomize all free parameters to lie uniformly in the interval [-2,2] in the transformed
domain
"""
self.nodes = np.random.uniform(low=-2.0, high=2.0, size=self.nodes.shape)
def invert(self, randomize=False, randomization_ind=None):
"""
Invert all atmospheres
Parameters
----------
None
Returns
-------
None
"""
first = True
# Reset reference model to the one loaded from the file
for k, v in self.atmospheres.items():
v.reset_reference()
# Compute normalization factor for the chi^2
for k, v in self.spectrum.items():
v.factor_chi2 = 1.0 / (v.noise**2 * v.dof)
lambdaLM = 10.0
lambda_opt = 10.0
bestchi2 = 1e10
for self.cycle in range(self.n_cycles):
if (self.verbose >= 2):
self.logger.info('-------------')
if (randomization_ind):
self.logger.info(' Cycle {0} - Randomization {1} '.format(self.cycle, randomization_ind))
else:
self.logger.info(' Cycle {0} '.format(self.cycle))
for k, v in self.spectrum.items():
self.logger.info(' Weights for region {0} : SI={1} - SQ={2} - SU={3} - SV={4}'.format(k, v.stokes_weights[0,self.cycle], v.stokes_weights[1,self.cycle],
v.stokes_weights[2,self.cycle], v.stokes_weights[3,self.cycle]))
self.logger.info('-------------')
# Find all active parameters for this cycle and print them in the output
self.find_active_parameters(self.cycle)
tmp = [pars['atm'] for pars in self.active_meta]
tmp = list(set(tmp))
self.n_free_parameters_cycle = 0
for k, v in self.atmospheres.items():
if (k in tmp):
if (self.verbose >= 3):
self.logger.info('Free parameters for {0}'.format(k))
for pars in self.active_meta:
if (pars['atm'] == k):
if (self.verbose >= 3):
if (pars['coupled'] is False):
if (pars['n_nodes'] == 1):
if (pars['regularization'] is not None):
self.logger.info(' - {0} with {1} node - Regularization -> type:{2}, weight:{3}, value:{4}'.format(pars['parameter'],
pars['n_nodes'], pars['regularization'][0], pars['regularization'][1], pars['regularization'][2]))
else:
self.logger.info(' - {0} with {1} node - Not regularized'.format(pars['parameter'], pars['n_nodes']))
else:
if (pars['regularization'] is not None):
self.logger.info(' - {0} with {1} nodes - Regularization -> type:{2}, weight:{3}, value:{4}'.format(pars['parameter'],
pars['n_nodes'], pars['regularization'][0], pars['regularization'][1], pars['regularization'][2]))
else:
self.logger.info(' - {0} with {1} nodes - Not regularized'.format(pars['parameter'], pars['n_nodes']))
else:
self.logger.info(' - {0} coupled to {1} variable'.format(pars['parameter'], pars['n_nodes']))
if (pars['coupled'] is False):
self.n_free_parameters_cycle += pars['n_nodes']
# Randomize parameters if necessary
if (randomize):
self.randomize()
keepon = True
iteration = 0
# Main Levenberg-Marquardt algorithm
self.synthesize_and_compute_rf(compute_rf=True)
chi2, dchi2, ddchi2 = self.compute_chi2()
while keepon:
# Simple parabolic backtracking
if (self.backtracking == 'parabolic'):
lambda_opt, bracketed, best_chi2, backtracking_bestchi2_down = self.backtracking(dchi2, ddchi2, direction='down', maxiter=5, lambda_init=lambdaLM, current_chi2=chi2)
backtracking_bestchi2 = copy.copy(backtracking_bestchi2_down)
# If solution is not bracketed, then try on the other sense and use the best of the two
if (not bracketed):
lambda_opt_up, bracketed, best_chi2_up, backtracking_bestchi2_up = self.backtracking(dchi2, ddchi2, direction='up', maxiter=2, lambda_init=lambdaLM)
if (best_chi2_up < best_chi2):
lambda_opt = lambda_opt_up
backtracking_bestchi2 = np.min([backtracking_bestchi2, backtracking_bestchi2_up])
# Bounded Brent backtracking
if (self.backtracking == 'brent'):
lambda_opt = self.backtracking_brent(dchi2, ddchi2, maxiter=10, bounds=[-4.0,1.0], tol=1e-2)
# if (self.verbose >= 3):
# self.logger.info(' * Optimal lambda: {0}'.format(lambda_opt))
# If after backtracking the chi2 is larger than the current one, then increase lambda and go to the iteration
# print(chi2, backtracking_bestchi2)
# if (chi2 < backtracking_bestchi2 and iteration > 1):
# lambdaLM *= 100.0
# # print('breaking')
# continue
# Give the final step
H = 0.5 * (ddchi2 + np.diag(self.hessian_regularization))
H += np.diag(lambda_opt * np.diag(H))
gradF = 0.5 * (dchi2 + self.grad_regularization)
U, w_inv, VT = self.modified_svd_inverse(H, tol=self.svd_tolerance)
# xnew = xold - H^-1 * grad F
delta = -VT.T.dot(np.diag(w_inv)).dot(U.T).dot(gradF)
# New solution
# Clip the new solution so that the step is resaonable
new_solution = self.nodes + np.clip(delta, -self.step_limiter_inversion, self.step_limiter_inversion)
self.set_new_model(new_solution)
# Clip the new solution so that the step is resaonable
self.nodes += np.clip(delta, -self.step_limiter_inversion, self.step_limiter_inversion)
self.synthesize_and_compute_rf(compute_rf=True)
chi2, dchi2, ddchi2 = self.compute_chi2()
rel = 2.0 * (chi2 - bestchi2) / (chi2 + bestchi2)
if (self.verbose > 2):
for k, v in self.atmospheres.items():
self.logger.info('')
self.logger.info('-----------')
self.logger.info('{0}'.format(k))
self.logger.info('-----------')
if (v.type == 'chromosphere'):
v.print_parameters(first=first)
if (v.type == 'photosphere'):
v.print_parameters(first=first)
if (v.type == 'parametric'):
v.print_parameters(first=first)
first = False
if (self.verbose >= 2):
self.logger.info('==============================================================================')
self.logger.info('It: {0} - chi2: {1:10.6f} - lambda_opt: {2:10.6f} - rel: {3:10.6f}'.format(iteration, chi2, lambda_opt, np.abs(rel)))
self.logger.info('==============================================================================')
# Increase the optimal by 100 to find again the optimal value
lambdaLM = 100.0 * lambda_opt
bestchi2 = copy.copy(chi2)
if (np.abs(rel) < self.relative_error or iteration > self.max_iterations):
keepon = False
iteration += 1
self.set_new_model(self.nodes)
# Calculate final chi2
# self.synthesize_and_compute_rf()
# chi2 = self.compute_chi2(only_chi2=True)
self.compute_uncertainty()
# if (self.verbose >= 2):
# self.atmospheres['ch1'].print_parameters(first=first, error=True)
self.flatten_parameters_to_reference(self.cycle)
def _func_grad(self, x):
"""
Auxiliary functions to use with optimization methods that use gradients
"""
self.nodes = x
self.set_new_model(self.nodes)
self.synthesize_and_compute_rf(compute_rf=True)
self.chi2, dchi2, _ = self.compute_chi2()
return self.chi2, dchi2
def _func_nograd(self, x):
"""
Auxiliary functions to use with optimization methods that do not use gradients
"""
self.nodes = x
self.set_new_model(self.nodes)
self.synthesize_and_compute_rf(compute_rf=False)
self.chi2 = self.compute_chi2(only_chi2=True)
return self.chi2
def _callback_general(self, x):
if (self.verbose >= 2):
self.logger.info('chi2: {0}'.format(self.chi2))
def invert_external(self, algorithm, use_jacobian=False, **kwargs):
"""
Invert all atmospheres
Parameters
----------
None
Returns
-------
None
"""
for k, v in self.spectrum.items():
v.factor_chi2 = 1.0 / (v.noise**2 * v.dof)
for self.cycle in range(self.n_cycles):
if (self.verbose >= 2):
self.logger.info('-------------')
self.logger.info(' Cycle {0} '.format(self.cycle))
for k, v in self.spectrum.items():
self.logger.info(' Weights for region {0} : SI={1} - SQ={2} - SU={3} - SV={4}'.format(k, v.stokes_weights[0,self.cycle], v.stokes_weights[1,self.cycle],
v.stokes_weights[2,self.cycle], v.stokes_weights[3,self.cycle]))
self.logger.info('-------------')
self.find_active_parameters(self.cycle)
tmp = [pars['atm'] for pars in self.active_meta]
tmp = list(set(tmp))
self.n_free_parameters_cycle = 0
for k, v in self.atmospheres.items():
if (k in tmp):
if (self.verbose >= 3):
self.logger.info('Free parameters for {0}'.format(k))
for pars in self.active_meta:
if (pars['atm'] == k):
if (self.verbose >= 3):
if (pars['coupled'] is False):
if (pars['n_nodes'] == 1):
self.logger.info(' - {0} with {1} node'.format(pars['parameter'], pars['n_nodes']))
else:
self.logger.info(' - {0} with {1} nodes'.format(pars['parameter'], pars['n_nodes']))
else:
self.logger.info(' - {0} coupled to {1} variable'.format(pars['parameter'], pars['n_nodes']))
if (pars['coupled'] is False):
self.n_free_parameters_cycle += pars['n_nodes']
n_pars = len(self.nodes)
if (use_jacobian):
tmp = algorithm(self._func_grad, self.nodes, jac=True, callback=self._callback_general, **kwargs)
else:
tmp = algorithm(self._func_nograd, self.nodes, callback=self._callback_general, **kwargs)
self._func_grad(tmp['x'])
self._callback_general(tmp['x'])
self.set_new_model(tmp['x'])
self.flatten_parameters_to_reference(self.cycle)
def read_observation(self):
for k, v in self.spectrum.items():
v.read_observation(pixel=self.pixel)
# v.read_straylight(pixel=self.pixel)
|
{"hexsha": "aa0df0366b432f0026a8ed2e19ca32e7201235b5", "size": 93249, "ext": "py", "lang": "Python", "max_stars_repo_path": "hazel/model.py", "max_stars_repo_name": "aasensio/hazel2", "max_stars_repo_head_hexsha": "d9b551915f5d2bb399e03b054dffe4ca42fedeb5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2018-08-31T11:13:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-12T02:30:56.000Z", "max_issues_repo_path": "hazel/model.py", "max_issues_repo_name": "aasensio/hazel2", "max_issues_repo_head_hexsha": "d9b551915f5d2bb399e03b054dffe4ca42fedeb5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 26, "max_issues_repo_issues_event_min_datetime": "2018-04-03T15:09:21.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-27T10:10:45.000Z", "max_forks_repo_path": "hazel/model.py", "max_forks_repo_name": "aasensio/hazel2", "max_forks_repo_head_hexsha": "d9b551915f5d2bb399e03b054dffe4ca42fedeb5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-05-01T13:47:21.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-23T20:49:08.000Z", "avg_line_length": 42.366651522, "max_line_length": 212, "alphanum_fraction": 0.5006595245, "include": true, "reason": "import numpy,import scipy", "num_tokens": 20475}
|
######################################################################
# max_trajectory.jl
# tries to maximize phi(x) collected
# TODO: this is a wreck. clean it so it looks like clerc_trajectory.
######################################################################
function max_trajectory(em::ErgodicManager, tm::TrajectoryManager, mu::VF, Sigma::MF)
return max_trajectory(em, tm, [mu], [Sigma])
end
function max_trajectory(em::ErgodicManager, tm::TrajectoryManager, mus::VVF, Sigmas::VMF, weights::VF=ones(length(mus)); max_iters=100)
# initialize trajectory
xd, ud = initialize(tm.initializer, em, tm)
# TODO: really a termination condition
for i = 1:max_iters
ad, bd = compute_gradients(tm, xd, ud, mus, Sigmas, weights)
A, B = linearize(tm.dynamics, xd, ud, tm.h)
K, C = LQ(A, B, ad, bd, tm.Qn, tm.Rn, tm.N)
#zd, vd = convex_descent(A, B, ad, bd, N)
zd, vd = apply_LQ_gains(A, B, K, C)
step_size = .15 / sqrt(i)
#step_size = .25 / sqrt(i)
# printing statistics for testing
println("i = ",i)
dd = directional_derivative(ad, bd, zd, vd)
sdd = scaled_dd(ad, bd, zd, vd)
#es = ergodic_score(em, xd)
#ts = total_score(em, xd, ud, T)
#cs = control_score(ud, 0.01*eye(2), T)
#println("es = ", es)
#println("ts = ", ts)
#println("cs = ", cs)
println("dd = ", dd)
println("scaled_dd = ", sdd)
#println("alpha = ", step_size)
println("##################################")
descend!(xd, ud, zd, vd, step_size)
end
return xd, ud
end
export max_trajectory
# TODO: should really take in a bunch of gaussians here
# returns a and b, each of which is an array of vectors
function compute_gradients(tm::TrajectoryManager, xd::VVF, ud::VVF, mus::VVF, Sigmas::VMF, weights::VF)
#a = Array(VF, tm.N+1)
#b = Array(VF, tm.N)
a = zeros(tm.dynamics.n, tm.N+1)
b = zeros(tm.dynamics.m, tm.N)
for ni = 1:tm.N
a[1:2, ni] = compute_an(mus, Sigmas, weights, xd[ni], tm.h, tm.q)
b[:, ni] = tm.h * tm.R * ud[ni]
end
a[1:2,tm.N+1] = compute_an(mus, Sigmas, weights, xd[tm.N+1], tm.h, tm.q)
return a, b
end
function compute_an(mus::VVF, Sigmas::VMF, weights::VF, x::VF, h::Float64, q::Float64)
num_gauss = length(weights)
an = zeros(2)
for i = 1:num_gauss
mu = mus[i]
Sigma = Sigmas[i]
xmu = x[1:2] - mu
inv_Sigma = inv(Sigma)
c = q*h*exp(-.5dot(xmu, inv_Sigma*xmu)) / (2*pi*sqrt(det(Sigma)))
an += weights[i]*c*inv_Sigma*xmu
end
return an
end
|
{"hexsha": "5375f3d6f7c4ebb867d67500b66e40bdd87da918", "size": 2413, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/trajectory_generation/max.jl", "max_stars_repo_name": "josh0tt/ErgodicControl.jl", "max_stars_repo_head_hexsha": "aa9235ea6eca441e2239375f8745dfd2b30ef682", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-06-18T18:53:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-06T12:20:04.000Z", "max_issues_repo_path": "src/trajectory_generation/max.jl", "max_issues_repo_name": "josh0tt/ErgodicControl.jl", "max_issues_repo_head_hexsha": "aa9235ea6eca441e2239375f8745dfd2b30ef682", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/trajectory_generation/max.jl", "max_forks_repo_name": "josh0tt/ErgodicControl.jl", "max_forks_repo_head_hexsha": "aa9235ea6eca441e2239375f8745dfd2b30ef682", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-02-22T20:31:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-28T16:53:45.000Z", "avg_line_length": 29.4268292683, "max_line_length": 135, "alphanum_fraction": 0.6000828844, "num_tokens": 817}
|
import pandas as pd
import numpy as np
from time import time
from itertools import combinations_with_replacement
import pickle
import mapping_career_causeways.compare_nodes_utils as compare_nodes_utils
import os
import boto3
from ast import literal_eval
import sys
## SETUP
# Get the skill integer to check
if len(sys.argv) < 2:
print('Core skill integer missing!')
raise
else:
j_skill = int(sys.argv[1])
# Set up AWS params
df_keys = pd.read_csv('../../private/karlisKanders_accessKeys.csv')
os.environ["AWS_ACCESS_KEY_ID"] = df_keys['Access key ID'].iloc[0]
os.environ["AWS_SECRET_ACCESS_KEY"] = df_keys['Secret access key'].iloc[0]
bucket_name = 'ojd-temp-storage'
s3_output_folder = 'outputs_Essential/'
s3_client = boto3.client('s3')
s3_resource = boto3.resource('s3')
my_bucket = s3_resource.Bucket(name=bucket_name)
# Set up folder for temporary data
data_folder = '../../data/temp_files/'
if os.path.exists(data_folder) == False:
os.mkdir(data_folder)
# Load embeddings and lists of skills items of each occupation
files_to_download = [
'embeddings_skills_description_SBERT.npy',
'topOccupation_to_all_skills.pickle',
'topOccupation_to_essential_skills.pickle',
'sorted_core_skills_id.pickle']
for file in files_to_download:
if os.path.exists(data_folder + file) == False:
s3_resource.Object(bucket_name=bucket_name, key=file).download_file(data_folder + file)
embeddings = np.load(data_folder + files_to_download[0])
node_to_essential_items_Top = pickle.load(open(data_folder + files_to_download[2], 'rb'))
node_to_all_items_Top = pickle.load(open(data_folder + files_to_download[1], 'rb'))
sorted_core_skills = pickle.load(open(data_folder + files_to_download[3], 'rb'))
n_occupations = len(node_to_essential_items_Top)
## ANALYSIS
## Select skill to add to origin occupation's skill-set
skill_id = sorted_core_skills[j_skill]
## Set up "origin" sector and "destination" sectors
# Origin nodes: here, only ESSENTIAL items
from_node_to_items = node_to_essential_items_Top.copy()
from_node_to_items.sector = 'origin'
# Add the extra skill to job_i skillset
t = time()
skill_added_to = []
new_items_list = []
for job_i, row in from_node_to_items.iterrows():
# Original skillset of the origin occupation
origin_skillset = row.items_list.copy()
# Check if skills is not already in the skillset
if skill_id not in origin_skillset:
list_of_skills = sorted([skill_id] + origin_skillset)
new_items_list.append(str(list_of_skills))
skill_added_to.append(row.original_id)
else:
new_items_list.append(str(origin_skillset))
# Re-evaluate all items lists so that they are treated as lists
from_node_to_items.items_list = new_items_list
from_node_to_items.items_list = from_node_to_items.items_list.apply(lambda x: literal_eval(x))
t_elapsed = time()-t
print(f"Added skill #{skill_id} to {len(skill_added_to)} occupations in {t_elapsed:.2f} seconds")
# Destination nodes: only ESSENTIAL items
to_node_to_items = node_to_essential_items_Top.copy()
to_node_to_items.sector = 'destination'
to_node_to_items.id = to_node_to_items.id + n_occupations
# Combine all into one dataframe
node_to_items = pd.concat([from_node_to_items, to_node_to_items]).reset_index(drop=True)
# Set up the combination of sectors to check
combos = [('origin','destination')]
# Perform the comparison!
comp_all_to_essential = compare_nodes_utils.CompareSectors(
node_to_items,
embeddings,
combos,
metric='cosine',
symmetric=False)
t = time()
comp_all_to_essential.run_comparisons(dump=False)
comp_all_to_essential.collect_comparisons()
t_elapsed = time()-t
print('===============')
print(f"Total time elapsed: {t_elapsed:.0f} seconds")
# Select only the edges from origin to destination occupations
W_all_to_essential = comp_all_to_essential.D
print(W_all_to_essential.shape)
i_edges = [edge[0] for edge in comp_all_to_essential.real_edge_list]
from_edges = np.array(comp_all_to_essential.real_edge_list)[np.where(np.array(i_edges)<n_occupations)[0]]
W_perturbed = np.zeros((n_occupations,n_occupations))
for edge in from_edges:
W_perturbed[edge[0], edge[1]-n_occupations] = W_all_to_essential[edge[0],edge[1]]
# Take care of nulls
W_perturbed[np.isinf(W_perturbed)] = 0
# Save
output_file_name = f"W_perturbed_essential_{j_skill}_Skill_{skill_id}.npy"
np.save(data_folder + output_file_name, W_perturbed)
# Upload to S3
s3_resource.Object(bucket_name, s3_output_folder + output_file_name).upload_file(Filename=data_folder + output_file_name)
|
{"hexsha": "480f32d7663d1a84d46672f38b84da6f8c057278", "size": 4560, "ext": "py", "lang": "Python", "max_stars_repo_path": "mapping_career_causeways/scripts/upskilling_aws_scripts/skill_perturbation_essential_C.py", "max_stars_repo_name": "bb20417/mapping-career-causeways", "max_stars_repo_head_hexsha": "cb8c57a2c4a0ab40a2bba4eca6664601bcc24cad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2020-11-26T13:01:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-04T13:32:33.000Z", "max_issues_repo_path": "mapping_career_causeways/scripts/upskilling_aws_scripts/skill_perturbation_essential_C.py", "max_issues_repo_name": "bb20417/mapping-career-causeways", "max_issues_repo_head_hexsha": "cb8c57a2c4a0ab40a2bba4eca6664601bcc24cad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2021-01-22T10:59:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-08T14:17:49.000Z", "max_forks_repo_path": "mapping_career_causeways/scripts/upskilling_aws_scripts/skill_perturbation_essential_C.py", "max_forks_repo_name": "bb20417/mapping-career-causeways", "max_forks_repo_head_hexsha": "cb8c57a2c4a0ab40a2bba4eca6664601bcc24cad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2021-01-06T16:44:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-21T14:47:54.000Z", "avg_line_length": 33.7777777778, "max_line_length": 121, "alphanum_fraction": 0.7734649123, "include": true, "reason": "import numpy", "num_tokens": 1145}
|
```python
import psi4
from psi4 import *
from psi4.core import *
import numpy as np
import os
sys.path.append('os.getcwd()')
from opt_helper import stre, bend, intcosMisc, linearAlgebra
```
## The Step Back-transformation
An optimization algorithm carried out in internal coordinates (see, e.g., the RFO tutorial) will generate a displacement step to be taken in internal coordinates. The conversion of the step into Cartesian coordinates is here called the "back-transformation" ("back", since the original gradient was computed in Cartesians).
As shown in the tutorial on coordinates and the B-matrix,
$$\textbf {B} \Delta x = \Delta q $$
and the $\textbf A^T$ matrix defined by
$$ \textbf A^T \equiv (\textbf{B} \textbf{u} \textbf {B}^T)^{-1} \textbf {B} \textbf{u}$$
was shown to be the left inverse of $\textbf B^T$ where __u__ is an arbitrary symmetric matrix. Attention must be paid to the non-square nature of __A__ and __B__. Here, we have
\begin{align}
\textbf B \Delta x &= \Delta q \\
\\
\textbf B \Delta x &= \big( \textbf{BuB}^T \big) \big( \textbf{BuB}^T\big)^{-1} \Delta q \\
\\
\textbf B \Delta x &= \textbf B \big[ \textbf{uB}^T \big( \textbf{BuB}^T\big)^{-1}\big] \Delta q \\
\\
\Delta x &= \textbf{uB}^T \big( \textbf{BuB}^T\big)^{-1} \Delta q = \textbf A \Delta q \\
\end{align}
The __u__ matrix may be chosen to be the unit matrix which gives
$$\Delta x = \textbf B^T (\textbf B \textbf B^T)^{-1} \Delta q$$
where redundant coordinates can be accommodated simply by using the generalized inverse. It is common to introduce $ \textbf{G} = \textbf B \textbf B^T $ and write the expression as
$$ \Delta x = \textbf{B}^T \textbf{G}^{-1} \Delta q$$
Note the __G__ matrix is a square matrix of dimension (number of internals) by (number of internals). This equation is exact only for infinitesimal displacements, because the B-matrix elements depend upon the molecular geometry (i.e., the Cartesian coordinates). Thus, the back-transformation is carried out iteratively.
To converge on a Cartesian geometry with the desired internal coordinate values, we repeatedly compute the difference between the current internal coordinate values and the desired ones (generating repeated $\Delta q$'s) and using the equation above to compute a new Cartesian geometry.
### Illustration of back-transformation
The back-transformation will now be demonstrated by taking a 0.2 au step increase in the bond lengths and a 5 degree increase in the bond angle of a water molecule.
```python
# Setup the water molecule and coordinates.
mol = psi4.geometry("""
O
H 1 1.7
H 1 1.7 2 104
unit au
""")
# We'll use cc-pVDZ RHF.
psi4.set_options({"basis": "cc-pvdz"})
mol.update_geometry()
xyz_0 = np.array( mol.geometry() )
# Generate the internal coordinates manually. Show their values.
intcos = [stre.STRE(0,1), stre.STRE(0,2), bend.BEND(1,0,2)]
print("%15s%15s" % ('Coordinate', 'Value'))
for I in intcos:
print("%15s = %15.8f %15.8f" % (I, I.q(xyz_0), I.qShow(xyz_0)))
# Handy variables for later.
Natom = mol.natom()
Nintco = len(intcos)
Ncart = 3*Natom
```
```python
# Create an internal coordinate displacement of +0.2au in bond lengths,
# and +5 degrees in the bond angle.
dq = np.array( [0.2, 0.2, 5.0/180*np.pi], float)
B = intcosMisc.Bmat(intcos, xyz_0)
G = np.dot(B, B.T)
G_inv = linearAlgebra.symmMatInv(G, redundant=True)
# Dx = B^T G^(-1) Dq
dx = np.dot(B.T, np.dot(G_inv, dq))
print("Displacement in Cartesians")
print(dx)
# Add Dx to original geometry.
xyz_1 = np.add(np.reshape(dx, (3, 3)), xyz_0)
print("New geometry in cartesians")
print(xyz_1)
# Compute internal coordinate values of new geometry.
print("\n%15s%15s" % ('Coordinate', 'Value'))
for I in intcos:
print("%15s = %15.8f %15.8f" % (I, I.q(xyz_1), I.qShow(xyz_1)))
```
You see that the desired internal coordinate value is not _exactly_ achieved. You can play with the desired displacement and observe more diverse behavior. For water, if you displace only the bond lengths, then the result will be exact, because if the bond angle is fixed then the direction of the displacements (_s_-vectors on each atom) are constant wrt to the bond lengths. On the other hand, the displacement directions for the bend depend upon the value of the angle. So if you displace only along a bend, the result will not be exact. In general, the result is reasonable but only approximate for small displacements.
### Illustration of iterative back-transformation
Finally, we demonstrate how convergence to the desired internal coordinate displacement can be achieved by an interative process.
```python
# Create array of target internal coordinate values.
dq_target = np.array( [0.2, 0.2, 5.0/180*np.pi], float)
q_target = np.zeros( (len(intcos)), float)
for i, intco in enumerate(intcos):
q_target[i] = intco.q(xyz_0) + dq_target[i]
xyz = xyz_0.copy()
rms_dq = 1
niter = 1
while rms_dq > 1e-10:
print("Iteration %d" % niter)
dq = dq_target.copy()
# Compute distance from target in internal coordinates.
for i, intco in enumerate(intcos):
dq[i] = q_target[i] - intco.q(xyz)
rms_dq = np.sqrt(np.mean(dq**2))
print("\tRMS(dq) = %10.5e" % rms_dq)
# Dx = B^T G^(-1) Dq
B = intcosMisc.Bmat(intcos, xyz)
G = np.dot(B, B.T)
G_inv = linearAlgebra.symmMatInv(G, redundant=True)
dx = np.dot(B.T, np.dot(G_inv, dq))
print("\tRMS(dx) = %10.5e" % np.sqrt(np.mean(dx**2)))
# Compute new Cartesian geometry.
xyz[:] += np.reshape(dx, (3,3))
niter += 1
print("\nFinal converged geometry.")
print(xyz)
# Compute internal coordinate values of new geometry.
print("\n%15s%15s" % ('Coordinate', 'Value'))
for I in intcos:
print("%15s = %15.8f %15.8f" % (I, I.q(xyz), I.qShow(xyz)))
```
The exact desired displacement is achieved.
Due to the non-orthogonal nature of the coordinates, the iterations may not always converge. In this case, common tactics include using the Cartesian geometry generated by the first back-transformation step, or using the Cartesian geometry that was closest to the desired internal coordinates. Hopefully, as a geometry optimization proceeds, the forces and displacements get smaller and convergence occurs.
A serious complication in procedures such as this one are discontinuities in the values of the internal coordinates. In some way, the internal coordinate values must be canonicalized so that, e.g., an increase in a torsion from 179 degrees to -178 degrees is interpreted as an increase of 3 degrees. Similar problems present for bond angles near 180 degrees. (The consistent computation of these changes in values and forces is also critical in Hessian update schemes.)
|
{"hexsha": "2ffa88155d34e02cd25aa73d1c3f8a031c9991e2", "size": 9456, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "Example/Psi4Numpy/13-GeometryOptimization/13e_Step-Backtransformation.ipynb", "max_stars_repo_name": "yychuang/109-2-compchem-lite", "max_stars_repo_head_hexsha": "cbf17e542f9447e89fb48de1b28759419ffff956", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 214, "max_stars_repo_stars_event_min_datetime": "2017-03-01T08:04:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T08:52:04.000Z", "max_issues_repo_path": "Example/Psi4Numpy/13-GeometryOptimization/13e_Step-Backtransformation.ipynb", "max_issues_repo_name": "yychuang/109-2-compchem-lite", "max_issues_repo_head_hexsha": "cbf17e542f9447e89fb48de1b28759419ffff956", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 100, "max_issues_repo_issues_event_min_datetime": "2017-03-03T13:20:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-05T18:20:27.000Z", "max_forks_repo_path": "Example/Psi4Numpy/13-GeometryOptimization/13e_Step-Backtransformation.ipynb", "max_forks_repo_name": "yychuang/109-2-compchem-lite", "max_forks_repo_head_hexsha": "cbf17e542f9447e89fb48de1b28759419ffff956", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 150, "max_forks_repo_forks_event_min_datetime": "2017-02-17T19:44:47.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T05:52:43.000Z", "avg_line_length": 39.2365145228, "max_line_length": 634, "alphanum_fraction": 0.5954949239, "converted": true, "num_tokens": 1907}
|
from scipy.spatial import cKDTree
import numpy as np
from matplotlib.tri import Triangulation,LinearTriInterpolator
from scipy import interpolate
import sys
import time
def _checkBounds(_datetimes,datetimes):
"""
"""
dt_min=np.min(datetimes)
dt__min=np.min(_datetimes)
dt_max=np.max(datetimes)
dt__max=np.max(_datetimes)
if dt_min <dt__min:raise Exception("{} is below reference datetimes {}".format(dt_min,dt__min))
if dt_max >dt__max:raise Exception("{} is above reference datetimes {}".format(dt_max,dt__max))
def timeSeries(_datetimes,datetimes,_data=None,bounds_error=True,kind='nearest'):
"""
"""
_datetimes=_datetimes.astype('datetime64[ms]')
datetimes=datetimes.astype('datetime64[ms]')
if bounds_error:
_checkBounds(_datetimes,datetimes)
f = interpolate.interp1d(_datetimes.astype("f8"), _data,kind=kind,axis=0)
return f(datetimes.astype("f8"))
def mesh(x,y,elem,data,_x,_y):
"""
"""
tri = Triangulation(x, y, elem.astype("int32"))
trifinder = tri.get_trifinder()
if data.ndim==1:
if len(data)!=len(x):raise Exception("x, y and data must be equal-length 1-D array")
lti=LinearTriInterpolator(tri,data,trifinder)
return lti(_x,_y)
elif data.ndim==2:
intdata=np.zeros((len(_x),data.shape[1]))
for i in range(data.shape[1]):
lti=LinearTriInterpolator(tri,data[:,i],trifinder)
intdata[:,i]=lti(_x,_y)
return intdata
else:
raise Exception("Not programmed")
|
{"hexsha": "49cfe93b2c9f76b2c48c009bb5aad0706c75f05f", "size": 1464, "ext": "py", "lang": "Python", "max_stars_repo_path": "s3netcdfapi/data/interpolation.py", "max_stars_repo_name": "meracan/s3-netcdf-api", "max_stars_repo_head_hexsha": "920d09ef7b1a205230ea2c76eabcb4853616992c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-30T01:47:45.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-30T01:47:45.000Z", "max_issues_repo_path": "s3netcdfapi/data/interpolation.py", "max_issues_repo_name": "meracan/s3-netcdf-api", "max_issues_repo_head_hexsha": "920d09ef7b1a205230ea2c76eabcb4853616992c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "s3netcdfapi/data/interpolation.py", "max_forks_repo_name": "meracan/s3-netcdf-api", "max_forks_repo_head_hexsha": "920d09ef7b1a205230ea2c76eabcb4853616992c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5333333333, "max_line_length": 97, "alphanum_fraction": 0.712431694, "include": true, "reason": "import numpy,from scipy", "num_tokens": 421}
|
# TODO TIP: Segmentation is just one of many approaches to object localization.
import os
from argparse import ArgumentParser
import cv2
import numpy as np
import torch
import tqdm
from segmentation.models import get_model as get_segmentation_model
from inference_utils import prepare_for_segmentation, get_boxes_from_mask, prepare_for_recognition
from recognition.model import get_model as get_recognition_model
def parse_arguments():
parser = ArgumentParser()
parser.add_argument("-d", "--data_path", dest="data_path", type=str, default=None, help="path to the data")
parser.add_argument("-t", "--seg_threshold", dest="seg_threshold", type=float, default=0.5,
help="decision threshold for segmentation model")
parser.add_argument("-s", "--seg_model", dest="seg_model", type=str, default=None,
help="path to a trained segmentation model")
parser.add_argument("-r", "--rec_model", dest="rec_model", type=str, default=None,
help="path to a trained recognition model")
parser.add_argument("--input_wh", "-wh", dest="input_wh", type=str, help="recognition model input size",
default="320x64")
parser.add_argument("-o", "--output_file", dest="output_file", default="baseline_submission.csv",
help="file to save predictions to")
return parser.parse_args()
def main(args):
print("Start inference")
w, h = list(map(int, args.input_wh.split('x')))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
segmentation_model = get_segmentation_model()
with open(args.seg_model, "rb") as fp:
state_dict = torch.load(fp, map_location="cpu")
segmentation_model.load_state_dict(state_dict)
segmentation_model.to(device)
segmentation_model.eval()
recognition_model = get_recognition_model()
with open(args.rec_model, "rb") as fp:
state_dict = torch.load(fp, map_location="cpu")
recognition_model.load_state_dict(state_dict)
recognition_model.to(device)
recognition_model.eval()
test_images_dirname = os.path.join(args.data_path, "test")
results = []
files = os.listdir(test_images_dirname)
for i, file_name in enumerate(tqdm.tqdm(files)):
image_src = cv2.imread(os.path.join(test_images_dirname, file_name))
image_src = cv2.cvtColor(image_src, cv2.COLOR_BGR2RGB)
# 1. Segmentation.
image, k, dw, dh = prepare_for_segmentation(image_src.astype(np.float) / 255.,
(256, 256))
x = torch.from_numpy(image.transpose(2, 0, 1)).float().unsqueeze(0)
with torch.no_grad():
pred = torch.sigmoid(segmentation_model(x.to(device))).squeeze().cpu().numpy()
mask = (pred >= args.seg_threshold).astype(np.uint8) * 255
# 2. Extraction of detected regions.
boxes = get_boxes_from_mask(mask, margin=0., clip=False)
if len(boxes) == 0:
results.append((file_name, []))
continue
boxes[:, [0, 2]] *= mask.shape[1]
boxes[:, [1, 3]] *= mask.shape[0]
boxes = boxes.astype(np.float)
# 3. Text recognition for every detected bbox.
texts = []
for box in boxes:
box[[0, 2]] -= dw
box[[1, 3]] -= dh
box /= k
box[[0, 2]] = box[[0, 2]].clip(0, image_src.shape[1] - 1)
box[[1, 3]] = box[[1, 3]].clip(0, image_src.shape[0] - 1)
box = box.astype(np.int)
x1, y1, x2, y2 = box
if x1 < 0 or y1 < 0 or x2 < 0 or y2 < 0:
raise (Exception, str(box))
crop = image_src[y1: y2, x1: x2, :]
tensor = prepare_for_recognition(crop, (w, h)).to(device)
with torch.no_grad():
text = recognition_model(tensor, decode=True)[0]
texts.append((x1, text))
# all predictions must be sorted by x1
texts.sort(key=lambda x: x[0])
results.append((file_name, [w[1] for w in texts]))
# Generate a submission file
with open(args.output_file, "wt") as wf:
wf.write("file_name,plates_string\n")
for file_name, texts in sorted(results, key=lambda x: int(os.path.splitext(x[0])[0])):
wf.write(f"test/{file_name},{' '.join(texts)}\n")
print('Done')
if __name__ == "__main__":
main(parse_arguments())
|
{"hexsha": "8491af2b0232e9e89bec867aa2a13e32d098a3c0", "size": 4443, "ext": "py", "lang": "Python", "max_stars_repo_path": "2_term/made_2021_computer_vision/homeworks/02/scripts/create-submission.py", "max_stars_repo_name": "Ilyabasharov/made_mail.ru", "max_stars_repo_head_hexsha": "a81bfd874ab80eb8c7eaad8a4acf723f327f2f50", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-02-15T06:20:30.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-15T06:20:30.000Z", "max_issues_repo_path": "2_term/made_2021_computer_vision/homeworks/02/scripts/create-submission.py", "max_issues_repo_name": "Ilyabasharov/made_mail.ru", "max_issues_repo_head_hexsha": "a81bfd874ab80eb8c7eaad8a4acf723f327f2f50", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2_term/made_2021_computer_vision/homeworks/02/scripts/create-submission.py", "max_forks_repo_name": "Ilyabasharov/made_mail.ru", "max_forks_repo_head_hexsha": "a81bfd874ab80eb8c7eaad8a4acf723f327f2f50", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-31T08:47:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-31T08:47:24.000Z", "avg_line_length": 41.1388888889, "max_line_length": 111, "alphanum_fraction": 0.6198514517, "include": true, "reason": "import numpy", "num_tokens": 1083}
|
import neuroglancer
import numpy as np
import networkx as nx
from .graph_source import SkeletonSource
import logging
import copy
logger = logging.getLogger(__name__)
def add_trees_no_skeletonization(
s, trees, node_id, name, dimensions, visible=False, color=None
):
mst = []
for u, v in trees.edges():
pos_u = np.array(trees.nodes[u]["location"]) + 0.5
pos_v = np.array(trees.nodes[v]["location"]) + 0.5
mst.append(
neuroglancer.LineAnnotation(
point_a=pos_u[::-1], point_b=pos_v[::-1], id=next(node_id)
)
)
s.layers.append(
name="{}".format(name),
layer=neuroglancer.AnnotationLayer(annotations=mst),
annotationColor="#{:02X}{:02X}{:02X}".format(255, 125, 125),
visible=visible,
)
def add_graph(
s,
graph: nx.Graph,
name: str,
graph_dimensions,
visible=False,
):
array_dimensions = copy.deepcopy(graph_dimensions)
offset = np.min([attrs["location"] for attrs in graph.nodes.values()], axis=0)
voxel_size = (1,) * len(offset)
s.layers.append(
name="{}".format(name),
layer=neuroglancer.SegmentationLayer(
source=[
neuroglancer.LocalVolume(
data=np.ones((1, 1, 1), dtype=np.uint32),
dimensions=array_dimensions,
voxel_offset=offset,
),
SkeletonSource(graph, graph_dimensions, voxel_size=voxel_size),
],
skeleton_shader="""
#uicontrol float showautapse slider(min=0, max=2)
void main() {
if (distance > showautapse) discard;
emitRGB(colormapJet(distance));
}
""",
selected_alpha=0,
not_selected_alpha=0,
),
)
def visualize_graph(graph: nx.Graph, name: str = "graph", dimensions=None):
if dimensions is None:
node, attrs = next(iter(graph.nodes.items()))
loc = attrs["location"]
n_dims = len(loc)
dims = ["t", "z", "y", "x"][-n_dims:]
units = "nm"
scales = (1,) * n_dims
attrs = {"names": dims, "units": units, "scales": scales}
dimensions = neuroglancer.CoordinateSpace(**attrs)
viewer = neuroglancer.Viewer()
viewer.dimensions = dimensions
with viewer.txn() as s:
add_graph(
s,
graph,
name=name,
visible=True,
graph_dimensions=dimensions,
)
print(viewer)
input("Hit ENTER to quit!")
|
{"hexsha": "4c746fb921d876e8bc870667f918266a34cba226", "size": 2524, "ext": "py", "lang": "Python", "max_stars_repo_path": "neuroglancer_graphs/graphs.py", "max_stars_repo_name": "pattonw/neuroglancer_graphs", "max_stars_repo_head_hexsha": "3fb6dbfa8d4ef3e2012eb3007063b8bab4af8ebc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "neuroglancer_graphs/graphs.py", "max_issues_repo_name": "pattonw/neuroglancer_graphs", "max_issues_repo_head_hexsha": "3fb6dbfa8d4ef3e2012eb3007063b8bab4af8ebc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "neuroglancer_graphs/graphs.py", "max_forks_repo_name": "pattonw/neuroglancer_graphs", "max_forks_repo_head_hexsha": "3fb6dbfa8d4ef3e2012eb3007063b8bab4af8ebc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.8510638298, "max_line_length": 82, "alphanum_fraction": 0.574881141, "include": true, "reason": "import numpy,import networkx", "num_tokens": 603}
|
function Base.parse(::Type{Rational{T}}, x::AbstractString) where {T <: Integer}
ms, ns = split(x, '/', keepempty = false)
m = parse(T, ms)
n = parse(T, ns)
return m // n
end
Base.parse(::Type{Rational}, x::AbstractString) = parse(Rational{Int32}, x)
function get_ratio(x::AbstractString)
r = tryparse(Float64, x)
return r === nothing ? convert(Float64, parse(Rational, x)) : r
end
get_ratio(x::Float64) = x
|
{"hexsha": "dbffcb15edf0bf103003a2e9c9e0547f41dd207b", "size": 435, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/helper/Ratios.jl", "max_stars_repo_name": "ORNL/ProxyIO.jl", "max_stars_repo_head_hexsha": "ea1eedfe6acda0d7adea86f82f3aeb632cc6cfdd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/helper/Ratios.jl", "max_issues_repo_name": "ORNL/ProxyIO.jl", "max_issues_repo_head_hexsha": "ea1eedfe6acda0d7adea86f82f3aeb632cc6cfdd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/helper/Ratios.jl", "max_forks_repo_name": "ORNL/ProxyIO.jl", "max_forks_repo_head_hexsha": "ea1eedfe6acda0d7adea86f82f3aeb632cc6cfdd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-18T02:50:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-18T02:50:04.000Z", "avg_line_length": 27.1875, "max_line_length": 80, "alphanum_fraction": 0.6413793103, "num_tokens": 132}
|
/* Copyright (c) 2010-2015, Delft University of Technology
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are
* permitted provided that the following conditions are met:
* - Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* - Neither the name of the Delft University of Technology nor the names of its contributors
* may be used to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Changelog
* YYMMDD Author Comment
* 121213 R.C.A. Boon Creation of code.
* 130117 R.C.A. Boon Added solved boolean flag to applicable member functions (with
* help from S. Billemont). Moved constructor to header file.
* Moved debugged getMaximumNumberOfRevolutions to source file.
* 130211 R.C.A. Boon Added hasSolution flag to root finder.
* 120227 S. Billemont Removed hasSolution in favor of an exception.
* 130325 R.C.A. Boon Removed superfluous sanity check of number of revolutions in
* execute() function, fixed bug in computation of maximumNumberOf-
* Revolutions.
*
* References
* PyKEP toolbox, Dario Izzo, ESA Advanced Concepts Team.
* Richard H. An Introduction to the Mathematics and Methods of Astrodynamics, Revised
* Edition.
* Battin, AIAA Education Series.
*
* Notes
*
*/
#include <cmath>
#include <boost/format.hpp>
#include <boost/math/special_functions.hpp> // for asinh and acosh
#include "Tudat/Mathematics/BasicMathematics/mathematicalConstants.h"
#include "Tudat/Astrodynamics/MissionSegments/multiRevolutionLambertTargeterIzzo.h"
#include "Tudat/Mathematics/BasicMathematics/convergenceException.h"
namespace tudat
{
namespace mission_segments
{
//! Compute solution for N revolutions and branch.
void MultiRevolutionLambertTargeterIzzo::computeForRevolutionsAndBranch(
const int aNumberOfRevolutions, const bool aIsRightBranch )
{
// Adjust parameters for new solution
numberOfRevolutions = aNumberOfRevolutions;
isRightBranch = aIsRightBranch;
// Check whether number of revolutions is possible
sanityCheckNumberOfRevolutions( );
// Execute problem solving for new solution
execute( );
}
//! Get maximum number of revolutions calculated.
int MultiRevolutionLambertTargeterIzzo::getMaximumNumberOfRevolutions( )
{
if ( !solved )
{
transformDimensions( );
sanityCheckNumberOfRevolutions( );
}
return maximumNumberOfRevolutions;
}
//! Sanity check number of revolutions.
void MultiRevolutionLambertTargeterIzzo::sanityCheckNumberOfRevolutions( )
{
// If not yet defined, calculate number of revolutions possible.
if ( maximumNumberOfRevolutions == NO_MAXIMUM_REVOLUTIONS )
{
// Temporarily store specified number, as numberOfRevolutions is needed to calculate max
// (this is a tricky way to work, but on the other hand this makes this approach decidedly
// different from PyKEP routines and it also happens only once per object).
int copyOfOriginalNumberOfRevolutions = numberOfRevolutions;
// Calculate first guess of maximum, by dividing the time of flight of the minimum energy
// ellipse by the normalized time of flight.
numberOfRevolutions = static_cast< int >(
normalizedTimeOfFlight / (
mathematical_constants::PI / 2.0
* std::sqrt( 2.0 * normalizedSemiPerimeter
* normalizedSemiPerimeter
* normalizedSemiPerimeter ) ) );
// If the current guess for the maximum is non-zero, then additional analysis is required to
// determine the correct maximum.
if( numberOfRevolutions != 0)
{
// The following try-block is meant to check whether the solution converges or not. If
// the current guess for the maximum number of revolutions is correct, then the problem
// will converge. If it does not, an exception will be thrown stating that it did not
// converge. Catching this exception allows to decrease the guess only when the
// exception occurs, and not under other circumstances.
try
{
// Compute root (no further information is required)
computeRootTimeOfFlight();
}
catch( basic_mathematics::ConvergenceException )
{
// If the rootfinder did not converge, then the current guess is wrong and needs to
// be decreased
numberOfRevolutions--;
}
}
// No further analysis is needed of the current guess is equal to zero.
// Maximum is now found.
maximumNumberOfRevolutions = numberOfRevolutions;
// Reinstating original number of revolutions specified.
numberOfRevolutions = copyOfOriginalNumberOfRevolutions;
}
// Default: compare maximum with specified number of revolutions.
// If specified is larger than maximum, no solution is possible.
if ( numberOfRevolutions > maximumNumberOfRevolutions )
{
// Throw exception.
BOOST_THROW_EXCEPTION( std::runtime_error(
( boost::format(
"Number of revolutions specified in Lambert problem is larger than possible.\n"
"Specified number of revolutions %d while the maximum is %d"
) % numberOfRevolutions % maximumNumberOfRevolutions).str( )
) );
}
// Else, nothing wrong.
}
//! Execute solving procedure (for multiple revolutions).
void MultiRevolutionLambertTargeterIzzo::execute( )
{
// Sanity checks.
sanityCheckTimeOfFlight( );
sanityCheckGravitationalParameter( );
// Transform dimensions.
transformDimensions( );
/*// Sanity check for number of revolutions (must be after dimension removal).
sanityCheckNumberOfRevolutions( );*/
if ( numberOfRevolutions == 0 )
{
// call base class function that works on zero revolutions.
ZeroRevolutionLambertTargeterIzzo::execute( );
}
else
{
// Solve multi-rev root.
double multipleRevolutionXParameter = computeRootTimeOfFlight( );
// Reconstruct velocities.
computeVelocities( multipleRevolutionXParameter );
}
solved = true;
}
//! Compute time-of-flight using Lagrange's equation (for multiple revolutions).
double MultiRevolutionLambertTargeterIzzo::computeTimeOfFlight( const double xParameter )
{
// Determine semi-major axis.
const double semiMajorAxis = normalizedMinimumEnergySemiMajorAxis
/ ( 1.0 - xParameter * xParameter );
// If x < 1, the solution is an ellipse.
if ( xParameter < 1.0 )
{
// Alpha parameter in Lagrange's equation (no explanation available).
const double alphaParameter = 2.0 * std::acos( xParameter );
// Beta parameter in Lagrange's equation (no explanation available).
double betaParameter;
// If long transfer arc.
if ( isLongway )
{
betaParameter = -2.0 * std::asin(
std::sqrt( ( normalizedSemiPerimeter - normalizedChord )
/ ( 2.0 * semiMajorAxis ) ) );
}
// Otherwise short transfer arc.
else
{
betaParameter = 2.0 * std::asin(
std::sqrt( ( normalizedSemiPerimeter - normalizedChord )
/ ( 2.0 * semiMajorAxis ) ) );
}
// Time-of-flight according to Lagrange including multiple revolutions.
const double timeOfFlight = semiMajorAxis * std::sqrt( semiMajorAxis ) *
( ( alphaParameter - std::sin( alphaParameter ) )
- ( betaParameter - std::sin( betaParameter ) )
+ 2.0 * mathematical_constants::PI
* numberOfRevolutions );
return timeOfFlight;
}
// Otherwise it is a hyperbola.
else
{
// Alpha parameter in Lagrange's equation (no explanation available).
const double alphaParameter = 2.0 * boost::math::acosh( xParameter );
// Beta parameter in Lagrange's equation (no explanation available).
double betaParameter;
// If long transfer arc.
if ( isLongway )
{
betaParameter = -2.0 * boost::math::asinh( std::sqrt( ( normalizedSemiPerimeter
- normalizedChord )
/ ( -2.0 * semiMajorAxis ) ) );
}
// Otherwise short transfer arc
else
{
betaParameter = 2.0 * boost::math::asinh( std::sqrt( ( normalizedSemiPerimeter
- normalizedChord )
/ ( -2.0 * semiMajorAxis ) ) );
}
// Time-of-flight according to Lagrange.
const double timeOfFlightLagrange = -semiMajorAxis * std::sqrt( -semiMajorAxis ) *
( ( std::sinh( alphaParameter ) - alphaParameter )
- ( std::sinh( betaParameter ) - betaParameter ) );
return timeOfFlightLagrange;
}
}
//! Solve the time of flight equation for x (for multiple revolutions).
double MultiRevolutionLambertTargeterIzzo::computeRootTimeOfFlight( )
{
using mathematical_constants::PI;
// Define initial guesses for abcissae (x) and ordinates (y).
double x1, x2;
if ( isRightBranch )
{ // right branch solution.
x1 = std::tan( .7234 * PI / 2.0 );
x2 = std::tan( .5234 * PI / 2.0 );
}
else
{ // left branch solution.
x1 = std::tan( -.5234 * PI / 2.0 );
x2 = std::tan( -.2234 * PI / 2.0 );
}
double y1 = computeTimeOfFlight( std::atan( x1 ) * 2.0 / PI ) - normalizedTimeOfFlight;
double y2 = computeTimeOfFlight( std::atan( x2 ) * 2.0 / PI ) - normalizedTimeOfFlight;
// Declare and initialize root-finding parameters.
double rootFindingError = 1.0, xNew = 0.0, yNew = 0.0;
int iterator = 0;
// Root-finding loop.
while ( ( rootFindingError > convergenceTolerance ) && ( y1 != y2 )
&& ( iterator < maximumNumberOfIterations ) )
{
// Update iterator.
iterator++;
// Compute new x-value.
xNew = ( x1 * y2 - y1 * x2 ) / ( y2 - y1 );
// Compute corresponding y-value.
yNew = computeTimeOfFlight( std::atan( xNew ) * 2.0 / PI ) - normalizedTimeOfFlight;
// Update abcissae and ordinates.
x1 = x2;
y1 = y2;
x2 = xNew;
y2 = yNew;
// Compute root-finding error.
rootFindingError = std::fabs( x1 - xNew );
}
// Verify that root-finder has converged.
if ( iterator == maximumNumberOfIterations )
{
BOOST_THROW_EXCEPTION( basic_mathematics::ConvergenceException(
( boost::format(
"Multi-Revolution Lambert targeter failed to converge to a solution.\n"
"Reached the maximum number of iterations: %d"
) % maximumNumberOfIterations).str( )
) );
}
// Revert to x parameter.
double xParameter = std::atan( xNew ) * 2.0 / PI;
return xParameter;
}
// Add compute maximum number of revolutions routine?
} // namespace mission_segments
} // namespace tudat
|
{"hexsha": "051dc777deda98a9cd4b0fe8bb801b37d8f6bc3c", "size": 12932, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Tudat/Astrodynamics/MissionSegments/multiRevolutionLambertTargeterIzzo.cpp", "max_stars_repo_name": "JPelamatti/ThesisTUDAT", "max_stars_repo_head_hexsha": "b94ce35fb7c8fa44ae83238e296a979dfa3adfe8", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Tudat/Astrodynamics/MissionSegments/multiRevolutionLambertTargeterIzzo.cpp", "max_issues_repo_name": "JPelamatti/ThesisTUDAT", "max_issues_repo_head_hexsha": "b94ce35fb7c8fa44ae83238e296a979dfa3adfe8", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Tudat/Astrodynamics/MissionSegments/multiRevolutionLambertTargeterIzzo.cpp", "max_forks_repo_name": "JPelamatti/ThesisTUDAT", "max_forks_repo_head_hexsha": "b94ce35fb7c8fa44ae83238e296a979dfa3adfe8", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2019-05-30T03:42:22.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-30T03:42:22.000Z", "avg_line_length": 39.5474006116, "max_line_length": 100, "alphanum_fraction": 0.6236467677, "num_tokens": 2859}
|
$NetBSD: patch-src_pingus_components_slider__box.hpp,v 1.1 2019/05/12 06:17:30 triaxx Exp $
* Port to Boost.Signals2.
--- src/pingus/components/slider_box.hpp.orig 2011-12-24 21:46:47.000000000 +0000
+++ src/pingus/components/slider_box.hpp
@@ -17,7 +17,7 @@
#ifndef HEADER_PINGUS_PINGUS_COMPONENTS_SLIDER_BOX_HPP
#define HEADER_PINGUS_PINGUS_COMPONENTS_SLIDER_BOX_HPP
-#include <boost/signal.hpp>
+#include <boost/signals2.hpp>
#include "engine/gui/rect_component.hpp"
@@ -39,7 +39,7 @@ public:
void set_value(int v);
- boost::signal<void (int)> on_change;
+ boost::signals2::signal<void (int)> on_change;
private:
SliderBox (const SliderBox&);
|
{"hexsha": "4697dc4a6a963bae9a436b3c55c3ed816f613682", "size": 672, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "source/pkgsrc/games/pingus/patches/patch-src_pingus_components_slider__box.hpp", "max_stars_repo_name": "Scottx86-64/dotfiles-1", "max_stars_repo_head_hexsha": "51004b1e2b032664cce6b553d2052757c286087d", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-11-20T22:46:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-20T22:46:39.000Z", "max_issues_repo_path": "source/pkgsrc/games/pingus/patches/patch-src_pingus_components_slider__box.hpp", "max_issues_repo_name": "Scottx86-64/dotfiles-1", "max_issues_repo_head_hexsha": "51004b1e2b032664cce6b553d2052757c286087d", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source/pkgsrc/games/pingus/patches/patch-src_pingus_components_slider__box.hpp", "max_forks_repo_name": "Scottx86-64/dotfiles-1", "max_forks_repo_head_hexsha": "51004b1e2b032664cce6b553d2052757c286087d", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.88, "max_line_length": 91, "alphanum_fraction": 0.7366071429, "num_tokens": 216}
|
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy import integrate, optimize
class polynomial():
"""class for a polynomial function
"""
def __init__(self, D=[0, -math.sqrt(3)/3, math.sqrt(3)/3, 0], chord = None,
color = 'k'):
self.D = D
self.chord = chord
self.color = color
def x1(self, x1, diff=None):
if diff is None:
return x1
elif diff == 'x1':
return np.ones(len(x1))
elif diff == 'x11':
return np.zeros(len(x1))
elif diff == 'theta3' or diff == 'theta33':
return np.zeros(len(x1))
elif diff == 'theta1':
dr = self.r(x1, diff='x1')
return 1/np.einsum('ij,ij->i',dr, dr)
elif diff == 'theta11':
return -self.x1(x1, 'theta1')**4*self.x3(x1, 'x1')*self.x3(x1, 'x11')
def x2(self, x1, diff=None):
return np.zeros(len(x1))
def x3(self, x1, diff=None, D=None):
""" z2 (checked)"""
if D is None:
D = self.D
if len(D) != 5:
D_temp = np.zeros(len(D))
D_temp[-len(D):] = D
D = D_temp
if diff is None:
return(D[4]*x1**4 + D[3]*x1**3 + D[2]*x1**2 + D[1]*x1 + D[0])
elif diff == 'x1':
return(4*D[4]*x1**3 + 3*D[3]*x1**2 + 2*D[2]*x1 + D[1])
elif diff == 'x11':
return(12*D[4]*x1**2 + 6*D[3]*x1 + 2*D[2])
elif diff == 'x111':
return(24*D[4]*x1 + 6*D[3])
elif diff == 'theta3':
return(np.zeros(len(x1)))
def r(self, x1 = None, diff=None):
if x1 is None:
x1 = self.x1_grid
else:
if type(x1) == float:
x1 = np.array([x1])
if diff == 'theta1':
output = np.array([self.x1(x1, 'x1'),
self.x2(x1, 'x1'),
self.x3(x1, 'x1')]).T
output = np.einsum('ij,i->ij',output, self.x1(x1, 'theta1'))
else:
output = np.array([self.x1(x1, diff),
self.x2(x1, diff),
self.x3(x1, diff)]).T
self.position = output
return (output)
def basis(self, x1 = None, diff=None):
if x1 is None:
x1 = self.x1_grid
if diff is None:
self.a = np.zeros([3, len(x1), 3])
self.a[0,:,:] = np.array([self.x1(x1, 'x1')*self.x1(x1, 'theta1'),
[0]*len(x1),
self.x3(x1, 'x1')*self.x1(x1, 'theta1')]).T
self.a[1,:,:] = np.array([[0,1,0],]*len(x1))
self.a[2,:,:] = np.cross(self.a[0,:,:], self.a[1,:,:])
elif diff == 'theta':
# Most components are null
self.da = np.zeros([3,3,len(x1),3])
# a1 diff theta1
self.da[0,0,:,:] = np.einsum('ij,i->ij', self.r(x1, 'x11'), self.x1(x1, 'theta1')**2) + \
np.einsum('ij,i->ij', self.r(x1, 'theta1'), self.x1(x1, 'theta11'))
# a3 diff theta1
self.da[0,2,:,:] = np.einsum('ij,i->ij', self.r(x1, 'x11'), self.x1(x1, 'theta1')**2) + \
np.einsum('ij,i->ij', self.r(x1, 'theta3'), self.x1(x1, 'theta33'))
def christoffel(self, i, j, k, order=1):
if order == 1:
gik_j = self.dA[i,k,j]
gjk_i = self.dA[j,k,i]
gij_k = self.dA[i,j,k]
return .5*(gik_j + gjk_i - gij_k)
elif order == 2:
raise NotImplementedError
def metric_tensor(self, diff = None):
if diff is None:
self.A = np.zeros([3,3,len(self.x1_grid)])
for i in range(3):
for j in range(3):
self.A[i,j] = np.einsum('ij,ij->i',self.a[i,:], self.a[j,:])
elif diff == 'theta':
self.dA = np.zeros([3,3,3,len(self.x1_grid)])
for i in range(3):
for j in range(3):
for k in range(3):
self.dA[i,j,k] = np.einsum('ij,ij->i',self.da[i,k,:],
self.a[j,:]) + \
np.einsum('ij,ij->i',self.a[i,:],
self.da[j,k,:])
def curvature_tensor(self):
self.B = np.zeros([2,2,len(self.x1_grid)])
for alpha in range(2):
for beta in range(2):
self.B[alpha, beta] = self.christoffel(alpha, beta, 2)
def arclength(self, chord = None):
def integrand(x1):
dr = self.r(x1, 'x1')
return np.sqrt(np.inner(dr, dr))
if chord is None:
chord = self.chord
return integrate.quad(integrand, 0, chord)
def calculate_x1(self, length_target):
def f(c_c):
length_current, err = self.arclength(c_c)
return abs(target - length_current)
x1 = []
for target in length_target:
x1.append(optimize.minimize(f, target).x[0])
self.x1_grid = np.array(x1)
def plot(self, basis=False, label=None, linestyle = '-', color = None):
r = self.r(self.x1_grid)
if color is None:
color = self.color
if label is None:
plt.plot(r[:,0], r[:,2], color, linestyle = linestyle, lw = 4)
else:
plt.plot(r[:,0], r[:,2], color, linestyle = linestyle, lw = 4,
label=label)
if basis:
plt.quiver(r[:,0], r[:,2],
self.a[0,:,0], self.a[0,:,2],
angles='xy', color = self.color, scale_units='xy')
plt.quiver(r[:,0], r[:,2],
self.a[2,:,0], self.a[2,:,2],
angles='xy', color = self.color, scale_units='xy')
plt.xlabel('x (m)')
plt.ylabel('y (m)')
class poly():
"""class for a polynomial function
"""
def __init__(self, a=[0, -math.sqrt(3)/3, math.sqrt(3)/3, 0]):
self.a = a
def z2(self, z1, diff=None, a=None):
""" z2 (checked)"""
if a is None:
a = self.a
if diff is None:
return(a[3]*z1**3 + a[2]*z1**2 + a[1]*z1 + a[0])
elif diff == 'z1':
return(3*a[3]*z1**2 + 2*a[2]*z1 + a[1])
elif diff == 'z11':
return(6*a[3]*z1 + 2*a[2])
elif diff == 'z111':
return(6*a[3])
elif diff == 'x1':
return(self.z2(z1, 'z1')*self.z1(z1, 'x1'))
elif diff == 'x11':
return(self.z2(z1, 'z11')*(self.z1(z1, 'x1'))**2 +
self.z2(z1, 'z1')*self.z1(z1, 'x11'))
def x1(self, z1, diff=None, a=None):
""" dx1/ dz1 (checked)"""
if diff is None:
output = []
try:
for z_final in z1:
output_i, err = integrate.quad(lambda x: self.x1(x, 'z1'),
0, z_final)
output.append(output_i)
output = np.array(output)
except(TypeError):
output, err = integrate.quad(lambda x: self.x1(x, 'z1'), 0, z1)
return(output)
elif diff == 'z1':
return(np.sqrt(1+(self.z2(z1, 'z1', a=a))**2))
elif diff == 'z11':
return(self.z1(z1, 'x1')*self.z2(z1, 'z1')*self.z2(z1, 'z11'))
def z1(self, input, diff=None, a=None):
""" dx1 / dz1 (all checked). For calculating z1 from x1, there is not
a numerical solution, but I can minimize the residual."""
if diff is None:
output = []
for x_final in input:
def _residual(x):
return abs(x_final - self.x1(x))
output_i = optimize.newton(_residual, x_final)
output.append(output_i)
output = np.array(output)
return(output)
if diff == 'x1':
if a is None:
return(1.0/self.x1(input, 'z1'))
else:
return(- self.z1(input, 'z1')**3*self.z2(input, 'z1') *
self.z2(input, 'z1', a=a))
elif diff == 'x11':
return(-self.z1(input, 'x1')**4*self.z2(input, 'z1') *
self.z2(input, 'z11'))
elif diff == 'x111':
return(-4*self.z1(input, 'x1')**3*self.z1(input, 'x11') *
self.z2(input, 'z1') * self.z2(input, 'z11') -
self.z1(input, 'x1')**5*(self.z2(input, 'z11')**2 -
self.z2(input, 'z1') *
self.z2(input, 'z111')))
def tangent(self, z1, diff=None):
"""Tangent vector r (checked)"""
if diff is None:
try:
output = self.z1(z1, 'x1')*np.array([np.ones(len(z1)),
self.z2(z1, 'z1')])
except(ValueError):
output = self.z1(z1, 'x1')*np.array([1, self.z2(z1, 'z1')])
if len(output) > 2:
BRAKE
elif diff == 'x1':
try:
output = self.z1(z1, 'x1')**2*np.array([np.zeros(len(z1)),
self.z2(z1, 'z11')])
output += self.z1(z1, 'x11')*np.array([np.ones(len(z1)),
self.z2(z1, 'z1')])
except(ValueError):
output = self.z1(z1, 'x1')**2*np.array([0, self.z2(z1, 'z11')])
output += self.z1(z1, 'x11')*np.array([1, self.z2(z1, 'z1')])
return(output)
def normal(self, z1, diff=None):
"""Normal vector (checked)"""
if diff is None:
try:
output = self.z1(z1, 'x1')*np.array([- self.z2(z1, 'z1'),
np.ones(len(z1))])
except(TypeError):
output = self.z1(z1, 'x1')*np.array([- self.z2(z1, 'z1'), 1])
elif diff == 'z1':
try:
output = self.z1(z1, 'x1')*np.array([- self.z2(z1, 'z1'),
np.zeros(len(z1))])
except(TypeError):
output = self.z1(z1, 'x1')*np.array([- self.z2(z1, 'z11'), 0])
elif diff == 'x1':
output = self.z1(z1, 'x11')*np.array([- self.z2(z1, 'z1'),
np.ones(len(z1))])
output += self.z1(z1, 'x1')*self.normal(z1, 'z1')
elif diff == 'x11':
output = self.z1(z1, 'x111')*np.array([- self.z2(z1, 'z1'),
np.ones(len(z1))])
output += self.z1(z1, 'x11')*np.array([- self.z2(z1, 'z11'),
np.zeros(len(z1))])
output += self.z1(z1, 'x11')*self.normal(z1, 'z1')
output += self.z1(z1, 'x1')**2*self.normal(z1, 'z1')
elif type(diff) == list:
output = self.z1(z1, 'x1')*np.array([- self.z2(z1, 'z1', a=diff),
np.zeros(len(z1))])
output += self.z1(z1, 'x1', a=diff)*np.array([- self.z2(z1, 'z1'),
np.ones(len(z1))])
elif diff == 'x2':
output = np.array([[0], [0]])
return(output)
def neutral_line(self, z1, a=None):
""" Position along neutral line"""
if a is not None:
return(np.array([z1, self.z2(z1, a=a)]))
else:
return(np.array([z1, self.z2(z1)]))
def r(self, input, x2=0, diff=None, input_type='x1'):
""" Position anywhere along shell considering shell thickness """
z1 = self._process_input(input, input_type)
if diff is None:
output = self.neutral_line(z1) + x2*self.g(2, z1)
elif diff == 'x1':
output = self.g(1, z1, x2)
elif diff == 'x2':
output = self.g(2, z1, x2)
elif type(diff) == list:
output = self.neutral_line(z1, a=diff) + \
x2*self.normal(z1, diff=diff)
return(output)
def g(self, i, input, x2=0, diff=None, input_type='z1'):
"""Tangent vector r (checked)"""
z1 = self._process_input(input, input_type)
if i == 1:
if diff is None:
g_i = self.tangent(z1) + x2*self.normal(z1, diff='x1')
elif diff == 'x1':
g_i = self.tangent(z1, 'x1') + x2*self.normal(z1, diff='x2')
elif diff == 'x2':
g_i = self.normal(z1, diff='x1')
elif i == 2:
if diff is None:
g_i = self.normal(z1)
else:
g_i = np.array([[0], [0]])
return(g_i)
def gij(self, i, j, z1, x2=0, diff=None, covariant=True, orthogonal=True):
def dot(a, b):
a_1, a_2 = a
b_1, b_2 = b
return(a_1*b_1 + a_2*b_2)
if diff is None:
gi = self.g(i, z1, x2)
gj = self.g(j, z1, x2)
output = []
for n in range(len(z1)):
output.append(gi[0][n]*gj[0][n] +
gi[1][n]*gj[1][n])
output = np.array(output)
elif diff == 'x1':
# Calculating basic vectors
t = self.tangent(z1)
dt = self.tangent(z1, 'x1')
n = self.normal(z1)
dn = self.normal(z1, 'x1')
ddn = self.normal(z1, 'x11')
# calculate g
if i == 1 and j == 1:
output = 2*dot(t, dt) + 2*x2*(dot(dt, dn) + dot(t, ddn)) + \
2*x2**2*dot(dn, ddn)
if (i == 1 and j == 2) or (i == 2 and j == 1):
output = x2*(dot(dn, dn) + dot(n, ddn))
if i == 2 and j == 2:
output = 2*dot(n, dn)
elif diff == 'x2':
# Calculating basic vectors
t = self.tangent(z1)
n = self.normal(z1)
dn = self.normal(z1, 'x1')
# calculate g
if i == 1 and j == 1:
output = 2*dot(t, dn) + 2*x2**2*dot(dn, dn)
if (i == 1 and j == 2) or (i == 2 and j == 1):
output = dot(dn, n)
if i == 2 and j == 2:
output = np.zeros(len(z1))
if not covariant and orthogonal:
if i == j:
output = 1/output
else:
output = 0.0
return(output)
def christoffel(self, i, k, l, z1, x2, order='second'):
if order == 'second':
output = np.zeros(len(z1))
for m in range(1, 3):
gim = self.gij(i, m, z1, x2, covariant=False)
gmk_l = self.gij(m, k, z1, x2, diff='x%i' % (l))
gml_k = self.gij(m, l, z1, x2, diff='x%i' % (k))
gkl_m = self.gij(k, l, z1, x2, diff='x%i' % (m))
output += .5*gim*(gmk_l + gml_k + gkl_m)
return(output)
def _process_input(self, input, input_type):
if input_type == 'z1':
z1 = input
elif input_type == 'x1':
z1 = self.z1(input)
return(z1)
|
{"hexsha": "4b9d8360a86a6dfe315ea55933a2bac0c3a1e271", "size": 15386, "ext": "py", "lang": "Python", "max_stars_repo_path": "aeropy/geometry/parametric.py", "max_stars_repo_name": "carsecond/AeroPy", "max_stars_repo_head_hexsha": "81685f364abd9536fc62dce114f14bef191dab8c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-13T03:28:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-13T03:28:57.000Z", "max_issues_repo_path": "aeropy/geometry/parametric.py", "max_issues_repo_name": "carsecond/AeroPy", "max_issues_repo_head_hexsha": "81685f364abd9536fc62dce114f14bef191dab8c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "aeropy/geometry/parametric.py", "max_forks_repo_name": "carsecond/AeroPy", "max_forks_repo_head_hexsha": "81685f364abd9536fc62dce114f14bef191dab8c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0841584158, "max_line_length": 101, "alphanum_fraction": 0.4262316392, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4417}
|
/*==============================================================================
Copyright (c) 2016, 2017, 2018 Matt Calabrese
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
==============================================================================*/
#ifndef ARGOT_RECEIVER_GRAPHVIZ_DETAIL_PROPERTY_DECL_HPP_
#define ARGOT_RECEIVER_GRAPHVIZ_DETAIL_PROPERTY_DECL_HPP_
#include <boost/graph/properties.hpp>
#define ARGOT_RECEIVER_GRAPHVIZ_DETAIL_PROPERTY_DECL( kind_, name_ ) \
struct name_ \
{ \
using kind = ::boost::kind_ ## _property_tag; \
static char const* name() { return #name_; } \
}
#endif // ARGOT_RECEIVER_GRAPHVIZ_DETAIL_PROPERTY_DECL_HPP_
|
{"hexsha": "500ae75bf945850719db5dba3b76a39fd378dc36", "size": 988, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/argot/receiver/graphviz/detail/property_decl.hpp", "max_stars_repo_name": "mattcalabrese/argot", "max_stars_repo_head_hexsha": "97349baaf27659c9dc4d67cf8963b2e871eaedae", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 49.0, "max_stars_repo_stars_event_min_datetime": "2018-05-09T23:17:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-21T10:05:19.000Z", "max_issues_repo_path": "include/argot/receiver/graphviz/detail/property_decl.hpp", "max_issues_repo_name": "mattcalabrese/argot", "max_issues_repo_head_hexsha": "97349baaf27659c9dc4d67cf8963b2e871eaedae", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/argot/receiver/graphviz/detail/property_decl.hpp", "max_forks_repo_name": "mattcalabrese/argot", "max_forks_repo_head_hexsha": "97349baaf27659c9dc4d67cf8963b2e871eaedae", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2019-08-04T03:51:36.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-28T06:53:29.000Z", "avg_line_length": 47.0476190476, "max_line_length": 80, "alphanum_fraction": 0.4858299595, "num_tokens": 175}
|
import numpy as np
from simplexlib.src.table import Table, Simplex, V
from .tree import Node, Edge
class BranchAndBound:
@classmethod
def resolve(cls, table: Table, prev: np.float64 = None) -> Node:
summary = Simplex.resolve(table)
if not summary.fixed or not summary.solved:
return Node(summary, True)
result: Table = summary.result
source: Table = summary.source
if prev is not None and result.F == prev:
return Node(summary, True)
node = Node(summary)
for idx, (value, label) in enumerate(zip(result.b, result.vlabels)):
if np.float64.is_integer(value):
continue
condition = np.zeros(len(result.c))
condition[idx] = 1
left, right = np.floor(value), np.ceil(value)
ltable = source.add_constraint(
V[condition] <= left
)
rtable = source.add_constraint(
V[condition].inv >= right
)
node.left = Edge(
[label, "≤", left],
node,
cls.resolve(ltable, result.F),
)
node.right = Edge(
[label, "≥", right],
node,
cls.resolve(rtable, result.F),
)
return node
return node
|
{"hexsha": "68f5c80c8fcf1836d363646710264a7b1ea7eb63", "size": 1372, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/branch_and_bound.py", "max_stars_repo_name": "mocurin/dt-lab-03", "max_stars_repo_head_hexsha": "575a547ac349fc1dce020ab4964025f90f199644", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/branch_and_bound.py", "max_issues_repo_name": "mocurin/dt-lab-03", "max_issues_repo_head_hexsha": "575a547ac349fc1dce020ab4964025f90f199644", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/branch_and_bound.py", "max_forks_repo_name": "mocurin/dt-lab-03", "max_forks_repo_head_hexsha": "575a547ac349fc1dce020ab4964025f90f199644", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.8867924528, "max_line_length": 76, "alphanum_fraction": 0.5131195335, "include": true, "reason": "import numpy", "num_tokens": 288}
|
@everywhere using BioSequences
@everywhere using DataStructures: DefaultDict
@everywhere function twin(km)
DNAKmer(reverse_complement(DNASequence(km)))
end
function contig_to_string(c)
return "$(c[1])" * join(["$(x[end])" for x in c[2:end]])
end
@everywhere function get_contig(::Type{DNAKmer{k}}, kmers, km) where {k}
contig_fw = get_contig_forward(DNAKmer{k}, kmers, km)
contig_bw = get_contig_forward(DNAKmer{k}, kmers, twin(km))
if km in neighbors(contig_fw[end])
return contig_fw
else
return [[twin(x) for x in reverse(contig_bw)[1:end-1]] ; contig_fw]
end
end
@everywhere function get_contig_forward(::Type{DNAKmer{k}}, kmers, km) where {k}
c_fw = DNAKmer[km]
while true
# check if forward exists, and only 1:
fw_neighbors = [kmer for kmer in neighbors(c_fw[end]) if canonical(kmer) in kmers]
if length(fw_neighbors) != 1
break
end
candidate = fw_neighbors[1]
if candidate == km || candidate == twin(km) || candidate == twin(c_fw[end])
break
end
bw_neighbors = [kmer for kmer in neighbors(twin(candidate)) if canonical(kmer) in kmers]
if length(bw_neighbors) != 1
break
end
push!(c_fw, candidate)
end
return c_fw
end
@everywhere function all_contigs(::Type{DNAKmer{k}}, kmers) where {k}
done = Set{DNAKmer}()
contigs = []
for kmer in kmers
if kmer in done
continue
end
contig = get_contig(DNAKmer{k}, kmers, kmer)
for c_kmer in contig
push!(done, canonical(c_kmer))
end
push!(contigs, contig)
end
return contigs
end
### classifies each kmer with his colors (present alleles)
@everywhere function find_all_kmer_colors(::Type{DNAKmer{k}}, fastafile) where {k}
# record = FASTASeqRecord{DNASequence}()
errors = 0
record = FASTA.Record()
kmer_class = DefaultDict{DNAKmer, Vector{Int16}}(() -> Int16[])
allele_ids = Int16[]
allele_idx::Int16 = 1
n_kmers = Int[]
seen = Set{DNAKmer}()
# reader = open(FASTA.Reader{DNASequence}, fastafile)
# reader = FASTA.Reader{DNASequence}, fastafile)
reader = open(FASTA.Reader, fastafile)
local seq_id, pos
while !eof(reader)
try
read!(reader, record)
seq_id = FASTA.identifier(record)
seen = Set{DNAKmer}()
for (pos, kmer) in each(DNAKmer{k}, FASTA.sequence(record))
can_kmer = canonical(kmer)
if !in(can_kmer, seen)
push!(kmer_class[can_kmer], allele_idx)
push!(seen, can_kmer)
end
end
push!(n_kmers, length(seen)) # number of unique kmers for this allele;
# find the separator; will assume that if I see a "_", that's it, otherwise try "-";
separator = in('_', seq_id) ? "_" : "-"
# update idx; the counter idx is incremental (1,2, ...) because we need the array sorted.
# But this is not always in the allele ordering, so we have to save the original id to restore it later;
allele_id = parse(Int16,split(FASTA.identifier(record), separator)[end])
push!(allele_ids, allele_id)
catch e
@error("Error parsing file $fastafile, around record $seq_id, most likely some unkown characters are present; this allele will be skipped. \nThis might make the results on this locus unreliable; please fix this FASTA file and rerun.")
errors += 1
if errors > 2
@error("Too many unrecoverable errors on this FASTA file; skipping this locus ... ")
exit(-1) # TODO: Could I try to skip just this locus?
end
end
allele_idx += 1
end
close(reader)
return kmer_class, allele_ids, n_kmers
end
@everywhere function build_db_graph(::Type{DNAKmer{k}}, fastafile, coverage_p=1) where {k}
# get kmers and colors (alleles)
kmer_class, allele_ids, n_kmers = find_all_kmer_colors(DNAKmer{k}, fastafile)
locus = splitext(basename(fastafile))[1]
# Solve a coverage ILP to find the kmers:
selected_kmers = DNAKmer[]
actual_coverages = []
# Coverage per allele:
coverages = [ Int(round(n_k * coverage_p)) for n_k in n_kmers]
# If coverage < 1, solve the ilp, otherwise just select all keys (all kmers)
selected_kmers, actual_coverages = coverage_p < 1 ? Base.invokelatest(kmer_coverage_ilp, locus, kmer_class, allele_ids, coverages) : (keys(kmer_class), coverages)
# filter more kmers with de Bruijn graph contigs
filtered_kmer_class, kmer_weights = filter_kmers_with_db_graph(DNAKmer{k}, kmer_class, selected_kmers)
# debug log:
sel_kmers, contig_kmers = length(selected_kmers), length(filtered_kmer_class)
# println("$locus\t$coverage_p\tAll kmers\t$sel_kmers")
# println("$locus\t$coverage_p\tdb Graph\t$contig_kmers")
return (filtered_kmer_class, allele_ids, kmer_weights, actual_coverages)
end
@everywhere function filter_kmers_with_db_graph(::Type{DNAKmer{k}}, kmer_class, selected_kmers) where {k}
# select 1 kmer per contig and set weight as length of contig; save kmers in filtered_kmer_class
contig_list = all_contigs(DNAKmer{k}, selected_kmers)
kmer_weights = Dict{UInt64, Int}() # Kmer is converted to UInt64, because DNAKmer apparently does not support write(), needed for the parallel pmap() call;
filtered_kmer_class = Dict{UInt64, Vector{Int16}}()
for contig in contig_list
kmer = canonical(contig[1])
kmer_uint = convert(UInt64,kmer)
kmer_weights[kmer_uint] = length(contig)
filtered_kmer_class[kmer_uint] = kmer_class[kmer]
end
return filtered_kmer_class, kmer_weights
end
|
{"hexsha": "21e6975b0d6f76ed684b5f781fb3a35c9e2c0533", "size": 5494, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/db_graph.jl", "max_stars_repo_name": "liberuser/MentaLiST", "max_stars_repo_head_hexsha": "fbfa6b8cbb8ef7fa823731d32ecfcb3e5af1bed5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2017-08-07T02:35:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-21T07:56:56.000Z", "max_issues_repo_path": "src/db_graph.jl", "max_issues_repo_name": "liberuser/MentaLiST", "max_issues_repo_head_hexsha": "fbfa6b8cbb8ef7fa823731d32ecfcb3e5af1bed5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 96, "max_issues_repo_issues_event_min_datetime": "2017-07-29T18:29:06.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-29T07:07:47.000Z", "max_forks_repo_path": "src/db_graph.jl", "max_forks_repo_name": "liberuser/MentaLiST", "max_forks_repo_head_hexsha": "fbfa6b8cbb8ef7fa823731d32ecfcb3e5af1bed5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2017-10-20T23:35:35.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-24T18:22:22.000Z", "avg_line_length": 38.1527777778, "max_line_length": 240, "alphanum_fraction": 0.6909355661, "num_tokens": 1584}
|
if lowercase(get(ENV, "CONCURRENTCOLLECTIONS_JL_ASSERTION", "false")) == "true"
import ConcurrentCollections
ConcurrentCollections.Implementations.enable_assertion()
@assert ConcurrentCollections.Implementations.assertion_enabled()
@info "ConcurrentCollections: Assertion enabled"
else
@info "ConcurrentCollections: Assertion disenabled (default)"
end
using TestFunctionRunner
TestFunctionRunner.@run(paths = ["../benchmark/ConcurrentCollectionsBenchmarks"])
|
{"hexsha": "f5a2dba92beb5bd1983d85d903727783e1975ead", "size": 480, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "JuliaConcurrent/ConcurrentCollections.jl", "max_stars_repo_head_hexsha": "09a8cbe25a1a0d3cb9d0fb0d03cad60a7d5ccebd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2021-10-05T01:45:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T02:29:04.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "tkf/ConcurrentCollections.jl", "max_issues_repo_head_hexsha": "4215b87ede80d0608b78e73b8e8f04f74e168551", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 33, "max_issues_repo_issues_event_min_datetime": "2021-05-10T03:01:02.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-22T04:00:49.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "tkf/ConcurrentCollections.jl", "max_forks_repo_head_hexsha": "4215b87ede80d0608b78e73b8e8f04f74e168551", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.0, "max_line_length": 81, "alphanum_fraction": 0.8041666667, "num_tokens": 103}
|
program compare
integer a, b
read(*,*) a, b
if (a .lt. b) then
write(*, *) a, ' is less than ', b
else if (a .eq. b) then
write(*, *) a, ' is equal to ', b
else if (a .gt. b) then
write(*, *) a, ' is greater than ', b
end if
end
|
{"hexsha": "4685bb7c9b42d0cc26119f099a8c69fffe16b086", "size": 237, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "Task/Integer-comparison/Fortran/integer-comparison-3.f", "max_stars_repo_name": "LaudateCorpus1/RosettaCodeData", "max_stars_repo_head_hexsha": "9ad63ea473a958506c041077f1d810c0c7c8c18d", "max_stars_repo_licenses": ["Info-ZIP"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-11-09T22:08:38.000Z", "max_stars_repo_stars_event_max_datetime": "2018-11-09T22:08:38.000Z", "max_issues_repo_path": "Task/Integer-comparison/Fortran/integer-comparison-3.f", "max_issues_repo_name": "seanwallawalla-forks/RosettaCodeData", "max_issues_repo_head_hexsha": "9ad63ea473a958506c041077f1d810c0c7c8c18d", "max_issues_repo_licenses": ["Info-ZIP"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Task/Integer-comparison/Fortran/integer-comparison-3.f", "max_forks_repo_name": "seanwallawalla-forks/RosettaCodeData", "max_forks_repo_head_hexsha": "9ad63ea473a958506c041077f1d810c0c7c8c18d", "max_forks_repo_licenses": ["Info-ZIP"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-11-09T22:08:40.000Z", "max_forks_repo_forks_event_max_datetime": "2018-11-09T22:08:40.000Z", "avg_line_length": 16.9285714286, "max_line_length": 39, "alphanum_fraction": 0.5400843882, "num_tokens": 95}
|
from similarities.similarity import Similarity
from scipy.spatial.distance import cosine
from pipeline.normalizer import words
class COSSimilarity(Similarity):
"""Cosine TF-based similarity."""
def similarity(self, x, y):
d = {}
wx = words(x)
wy = words(y)
for w in wx:
d[w] = [0, 0]
for w in wy:
d[w] = [0, 0]
for w in wx:
d[w][0] += 1
for w in wy:
d[w][1] += 1
e1 = [v[0] for v in d.values()]
e2 = [v[1] for v in d.values()]
return 1 - cosine(e1, e2)
|
{"hexsha": "b1f3a6e763a6811bffbab14f03a8670233e8ae26", "size": 594, "ext": "py", "lang": "Python", "max_stars_repo_path": "similarities/cos.py", "max_stars_repo_name": "ekiuled/pairwise-similarities", "max_stars_repo_head_hexsha": "db2f909a74ed2d9296b1a3814facb3e5a0efe28d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "similarities/cos.py", "max_issues_repo_name": "ekiuled/pairwise-similarities", "max_issues_repo_head_hexsha": "db2f909a74ed2d9296b1a3814facb3e5a0efe28d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "similarities/cos.py", "max_forks_repo_name": "ekiuled/pairwise-similarities", "max_forks_repo_head_hexsha": "db2f909a74ed2d9296b1a3814facb3e5a0efe28d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.8461538462, "max_line_length": 46, "alphanum_fraction": 0.5, "include": true, "reason": "from scipy", "num_tokens": 172}
|
function nonLinChem(dy,y,p,t)
dy[1] = -y[1]
dy[2] = y[1]-(y[2])^2
dy[3] = (y[2])^2
end
y0 = [1.0;0.0;0.0]
tspan = (0.0,20)
nlc_analytic(u0,p,t) = [exp(-t);
(2sqrt(exp(-t))besselk(1,2sqrt(exp(-t)))-2besselk(1,2)/besseli(1,2)*sqrt(exp(-t))besseli(1,2sqrt(exp(-t))))/(2besselk(0,2sqrt(exp(-t)))+(2besselk(1,2)/besseli(1,2))besseli(0,2sqrt(exp(-t))));
-exp(-t)+1+(-2sqrt(exp(-t))*besselk(1,2sqrt(exp(-t)))+sqrt(exp(-t))*besseli(1,2sqrt(exp(-t)))*2besselk(1,2)/besseli(1,2))/(2besselk(0,2sqrt(exp(-t)))+2besselk(1,2)/besseli(1,2)*besseli(0,2sqrt(exp(-t))))]
nonLinChem_f = ODEFunction(nonLinChem,analytic = nlc_analytic)
prob_ode_nonlinchem = ODEProblem(nonLinChem,y0,tspan)
|
{"hexsha": "e2406cf468f195dcc92b131f5acbaff3f659b5fb", "size": 681, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/ode/nonlinchem.jl", "max_stars_repo_name": "devmotion/DiffEqProblemLibrary.jl", "max_stars_repo_head_hexsha": "7aa6e819b3aa58f1f6e42f7e2d2c69351e09f5c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/ode/nonlinchem.jl", "max_issues_repo_name": "devmotion/DiffEqProblemLibrary.jl", "max_issues_repo_head_hexsha": "7aa6e819b3aa58f1f6e42f7e2d2c69351e09f5c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ode/nonlinchem.jl", "max_forks_repo_name": "devmotion/DiffEqProblemLibrary.jl", "max_forks_repo_head_hexsha": "7aa6e819b3aa58f1f6e42f7e2d2c69351e09f5c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.3846153846, "max_line_length": 208, "alphanum_fraction": 0.637298091, "num_tokens": 338}
|
import numpy as np
import pytest
import learnmolsim as lms
@pytest.fixture
def state():
s = lms.state.State(2,lms.state.Box(10.0),mass=10.0)
s.positions = [[1,1,1],[2,2,2]]
s.velocities = [[1,-1,1],[-2,2,-2]]
s.energies = [3,-4]
s.forces = [[1,2,3],[-1,-2,-3]]
return s
@pytest.fixture
def thermo():
return lms.analyze.Thermodynamics()
def test_kinetic_energy(state,thermo):
assert thermo.kinetic_energy(state) == pytest.approx(75.)
def test_potential_energy(state,thermo):
assert thermo.potential_energy(state) == pytest.approx(-1.)
def test_kT(state,thermo):
assert thermo.kT(state) == pytest.approx(25.)
def test_pressure(state,thermo):
# ideal gas + virial
pid = 2*25/10**3
pex = (6-12)/(3*10**3)
assert thermo.pressure(state) == pytest.approx(pid+pex)
|
{"hexsha": "e7400a373093ae04fca36bcc8cd73ffd3939276a", "size": 820, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_thermodynamics.py", "max_stars_repo_name": "mayukh33/Learn_molecularsim", "max_stars_repo_head_hexsha": "020badd574287c8a89796efa05616762c5153e10", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_thermodynamics.py", "max_issues_repo_name": "mayukh33/Learn_molecularsim", "max_issues_repo_head_hexsha": "020badd574287c8a89796efa05616762c5153e10", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-12T20:28:20.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-12T20:28:20.000Z", "max_forks_repo_path": "tests/test_thermodynamics.py", "max_forks_repo_name": "mayukh33/Learn_molecularsim", "max_forks_repo_head_hexsha": "020badd574287c8a89796efa05616762c5153e10", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-02-26T16:55:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-05T03:32:42.000Z", "avg_line_length": 24.8484848485, "max_line_length": 63, "alphanum_fraction": 0.656097561, "include": true, "reason": "import numpy", "num_tokens": 266}
|
import numpy as np
from numpy.random import RandomState
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.nn import Module
from torch.nn import init
from hypercomplex_ops import *
import math
import sys
########################
## STANDARD PHM LAYER ##
########################
class PHMLinear(nn.Module):
def __init__(self, n, in_features, out_features, cuda=True):
super(PHMLinear, self).__init__()
self.n = n
self.in_features = in_features
self.out_features = out_features
self.cuda = cuda
self.bias = nn.Parameter(torch.Tensor(out_features))
self.A = nn.Parameter(torch.nn.init.xavier_uniform_(torch.zeros((n, n, n))))
self.S = nn.Parameter(torch.nn.init.xavier_uniform_(torch.zeros((n, self.out_features//n, self.in_features//n))))
self.weight = torch.zeros((self.out_features, self.in_features))
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def kronecker_product1(self, a, b): #adapted from Bayer Research's implementation
siz1 = torch.Size(torch.tensor(a.shape[-2:]) * torch.tensor(b.shape[-2:]))
res = a.unsqueeze(-1).unsqueeze(-3) * b.unsqueeze(-2).unsqueeze(-4)
siz0 = res.shape[:-4]
out = res.reshape(siz0 + siz1)
return out
def kronecker_product2(self):
H = torch.zeros((self.out_features, self.in_features))
for i in range(self.n):
H = H + torch.kron(self.A[i], self.S[i])
return H
def forward(self, input):
self.weight = torch.sum(self.kronecker_product1(self.A, self.S), dim=0)
# self.weight = self.kronecker_product2() <- SLOWER
input = input.type(dtype=self.weight.type())
return F.linear(input, weight=self.weight, bias=self.bias)
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None)
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.A, a=math.sqrt(5))
init.kaiming_uniform_(self.S, a=math.sqrt(5))
fan_in, _ = init._calculate_fan_in_and_fan_out(self.placeholder)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
#############################
## CONVOLUTIONAL PH LAYER ##
#############################
class PHConv(Module):
def __init__(self, n, in_features, out_features, kernel_size, padding=0, stride=1, cuda=True):
super(PHConv, self).__init__()
self.n = n
self.in_features = in_features
self.out_features = out_features
self.padding = padding
self.stride = stride
self.cuda = cuda
self.bias = nn.Parameter(torch.Tensor(out_features))
self.A = nn.Parameter(torch.nn.init.xavier_uniform_(torch.zeros((n, n, n))))
self.F = nn.Parameter(torch.nn.init.xavier_uniform_(
torch.zeros((n, self.out_features//n, self.in_features//n, kernel_size, kernel_size))))
self.weight = torch.zeros((self.out_features, self.in_features))
self.kernel_size = kernel_size
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def kronecker_product1(self, A, F):
siz1 = torch.Size(torch.tensor(A.shape[-2:]) * torch.tensor(F.shape[-4:-2]))
siz2 = torch.Size(torch.tensor(F.shape[-2:]))
res = A.unsqueeze(-1).unsqueeze(-3).unsqueeze(-1).unsqueeze(-1) * F.unsqueeze(-4).unsqueeze(-6)
siz0 = res.shape[:1]
out = res.reshape(siz0 + siz1 + siz2)
return out
def kronecker_product2(self):
H = torch.zeros((self.out_features, self.in_features, self.kernel_size, self.kernel_size))
if self.cuda:
H = H.cuda()
for i in range(self.n):
kron_prod = torch.kron(self.A[i], self.F[i]).view(self.out_features, self.in_features, self.kernel_size, self.kernel_size)
H = H + kron_prod
return H
def forward(self, input):
self.weight = torch.sum(self.kronecker_product1(self.A, self.F), dim=0)
# self.weight = self.kronecker_product2()
if self.cuda:
self.weight = self.weight.cuda()
input = input.type(dtype=self.weight.type())
return F.conv2d(input, weight=self.weight, stride=self.stride, padding=self.padding)
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None)
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.A, a=math.sqrt(5))
init.kaiming_uniform_(self.F, a=math.sqrt(5))
fan_in, _ = init._calculate_fan_in_and_fan_out(self.placeholder)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
class KroneckerConv(Module):
r"""Applies a Quaternion Convolution to the incoming data.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride,
dilatation=1, padding=0, groups=1, bias=True, init_criterion='glorot',
weight_init='quaternion', seed=None, operation='convolution2d', rotation=False,
quaternion_format=True, scale=False, learn_A=False, cuda=True, first_layer=False):
super(KroneckerConv, self).__init__()
self.in_channels = in_channels // 4
self.out_channels = out_channels // 4
self.stride = stride
self.padding = padding
self.groups = groups
self.dilatation = dilatation
self.init_criterion = init_criterion
self.weight_init = weight_init
self.seed = seed if seed is not None else np.random.randint(0,1234)
self.rng = RandomState(self.seed)
self.operation = operation
self.rotation = rotation
self.quaternion_format = quaternion_format
self.winit = {'quaternion': quaternion_init,
'unitary' : unitary_init,
'random' : random_init}[self.weight_init]
self.scale = scale
self.learn_A = learn_A
self.cuda = cuda
self.first_layer = first_layer
(self.kernel_size, self.w_shape) = get_kernel_and_weight_shape( self.operation,
self.in_channels, self.out_channels, kernel_size )
self.r_weight = Parameter(torch.Tensor(*self.w_shape))
self.i_weight = Parameter(torch.Tensor(*self.w_shape))
self.j_weight = Parameter(torch.Tensor(*self.w_shape))
self.k_weight = Parameter(torch.Tensor(*self.w_shape))
if self.scale:
self.scale_param = Parameter(torch.Tensor(self.r_weight.shape))
else:
self.scale_param = None
if self.rotation:
self.zero_kernel = Parameter(torch.zeros(self.r_weight.shape), requires_grad=False)
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
affect_init_conv(self.r_weight, self.i_weight, self.j_weight, self.k_weight,
self.kernel_size, self.winit, self.rng, self.init_criterion)
if self.scale_param is not None:
torch.nn.init.xavier_uniform_(self.scale_param.data)
if self.bias is not None:
self.bias.data.zero_()
def forward(self, input):
if self.rotation:
# return quaternion_conv_rotation(input, self.zero_kernel, self.r_weight, self.i_weight, self.j_weight,
# self.k_weight, self.bias, self.stride, self.padding, self.groups, self.dilatation,
# self.quaternion_format, self.scale_param)
pass
else:
return kronecker_conv(input, self.r_weight, self.i_weight, self.j_weight,
self.k_weight, self.bias, self.stride, self.padding, self.groups, self.dilatation, self.learn_A, self.cuda, self.first_layer)
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'in_channels=' + str(self.in_channels) \
+ ', out_channels=' + str(self.out_channels) \
+ ', bias=' + str(self.bias is not None) \
+ ', kernel_size=' + str(self.kernel_size) \
+ ', stride=' + str(self.stride) \
+ ', padding=' + str(self.padding) \
+ ', init_criterion=' + str(self.init_criterion) \
+ ', weight_init=' + str(self.weight_init) \
+ ', seed=' + str(self.seed) \
+ ', rotation=' + str(self.rotation) \
+ ', q_format=' + str(self.quaternion_format) \
+ ', operation=' + str(self.operation) + ')'
class QuaternionTransposeConv(Module):
r"""Applies a Quaternion Transposed Convolution (or Deconvolution) to the incoming data.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride,
dilatation=1, padding=0, output_padding=0, groups=1, bias=True, init_criterion='he',
weight_init='quaternion', seed=None, operation='convolution2d', rotation=False,
quaternion_format=False):
super(QuaternionTransposeConv, self).__init__()
self.in_channels = in_channels // 4
self.out_channels = out_channels // 4
self.stride = stride
self.padding = padding
self.output_padding = output_padding
self.groups = groups
self.dilatation = dilatation
self.init_criterion = init_criterion
self.weight_init = weight_init
self.seed = seed if seed is not None else np.random.randint(0,1234)
self.rng = RandomState(self.seed)
self.operation = operation
self.rotation = rotation
self.quaternion_format = quaternion_format
self.winit = {'quaternion': quaternion_init,
'unitary' : unitary_init,
'random' : random_init}[self.weight_init]
(self.kernel_size, self.w_shape) = get_kernel_and_weight_shape( self.operation,
self.out_channels, self.in_channels, kernel_size )
self.r_weight = Parameter(torch.Tensor(*self.w_shape))
self.i_weight = Parameter(torch.Tensor(*self.w_shape))
self.j_weight = Parameter(torch.Tensor(*self.w_shape))
self.k_weight = Parameter(torch.Tensor(*self.w_shape))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
affect_init_conv(self.r_weight, self.i_weight, self.j_weight, self.k_weight,
self.kernel_size, self.winit, self.rng, self.init_criterion)
if self.bias is not None:
self.bias.data.zero_()
def forward(self, input):
if self.rotation:
return quaternion_tranpose_conv_rotation(input, self.r_weight, self.i_weight,
self.j_weight, self.k_weight, self.bias, self.stride, self.padding,
self.output_padding, self.groups, self.dilatation, self.quaternion_format)
else:
return quaternion_transpose_conv(input, self.r_weight, self.i_weight, self.j_weight,
self.k_weight, self.bias, self.stride, self.padding, self.output_padding,
self.groups, self.dilatation)
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'in_channels=' + str(self.in_channels) \
+ ', out_channels=' + str(self.out_channels) \
+ ', bias=' + str(self.bias is not None) \
+ ', kernel_size=' + str(self.kernel_size) \
+ ', stride=' + str(self.stride) \
+ ', padding=' + str(self.padding) \
+ ', dilation=' + str(self.dilation) \
+ ', init_criterion=' + str(self.init_criterion) \
+ ', weight_init=' + str(self.weight_init) \
+ ', seed=' + str(self.seed) \
+ ', operation=' + str(self.operation) + ')'
class QuaternionConv(Module):
r"""Applies a Quaternion Convolution to the incoming data.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride,
dilatation=1, padding=0, groups=1, bias=True, init_criterion='glorot',
weight_init='quaternion', seed=None, operation='convolution2d', rotation=False, quaternion_format=True, scale=False):
super(QuaternionConv, self).__init__()
self.in_channels = in_channels // 4
self.out_channels = out_channels // 4
self.stride = stride
self.padding = padding
self.groups = groups
self.dilatation = dilatation
self.init_criterion = init_criterion
self.weight_init = weight_init
self.seed = seed if seed is not None else np.random.randint(0,1234)
self.rng = RandomState(self.seed)
self.operation = operation
self.rotation = rotation
self.quaternion_format = quaternion_format
self.winit = {'quaternion': quaternion_init,
'unitary' : unitary_init,
'random' : random_init}[self.weight_init]
self.scale = scale
(self.kernel_size, self.w_shape) = get_kernel_and_weight_shape( self.operation,
self.in_channels, self.out_channels, kernel_size )
self.r_weight = Parameter(torch.Tensor(*self.w_shape))
self.i_weight = Parameter(torch.Tensor(*self.w_shape))
self.j_weight = Parameter(torch.Tensor(*self.w_shape))
self.k_weight = Parameter(torch.Tensor(*self.w_shape))
if self.scale:
self.scale_param = Parameter(torch.Tensor(self.r_weight.shape))
else:
self.scale_param = None
if self.rotation:
self.zero_kernel = Parameter(torch.zeros(self.r_weight.shape), requires_grad=False)
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
affect_init_conv(self.r_weight, self.i_weight, self.j_weight, self.k_weight,
self.kernel_size, self.winit, self.rng, self.init_criterion)
if self.scale_param is not None:
torch.nn.init.xavier_uniform_(self.scale_param.data)
if self.bias is not None:
self.bias.data.zero_()
def forward(self, input):
if self.rotation:
return quaternion_conv_rotation(input, self.zero_kernel, self.r_weight, self.i_weight, self.j_weight,
self.k_weight, self.bias, self.stride, self.padding, self.groups, self.dilatation,
self.quaternion_format, self.scale_param)
else:
return quaternion_conv(input, self.r_weight, self.i_weight, self.j_weight,
self.k_weight, self.bias, self.stride, self.padding, self.groups, self.dilatation)
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'in_channels=' + str(self.in_channels) \
+ ', out_channels=' + str(self.out_channels) \
+ ', bias=' + str(self.bias is not None) \
+ ', kernel_size=' + str(self.kernel_size) \
+ ', stride=' + str(self.stride) \
+ ', padding=' + str(self.padding) \
+ ', init_criterion=' + str(self.init_criterion) \
+ ', weight_init=' + str(self.weight_init) \
+ ', seed=' + str(self.seed) \
+ ', rotation=' + str(self.rotation) \
+ ', q_format=' + str(self.quaternion_format) \
+ ', operation=' + str(self.operation) + ')'
class QuaternionLinearAutograd(Module):
r"""Applies a quaternion linear transformation to the incoming data. A custom
Autograd function is call to drastically reduce the VRAM consumption. Nonetheless, computing
time is also slower compared to QuaternionLinear().
"""
def __init__(self, in_features, out_features, bias=True,
init_criterion='glorot', weight_init='quaternion',
seed=None, rotation=False, quaternion_format=True, scale=False):
super(QuaternionLinearAutograd, self).__init__()
self.in_features = in_features//4
self.out_features = out_features//4
self.rotation = rotation
self.quaternion_format = quaternion_format
self.r_weight = Parameter(torch.Tensor(self.in_features, self.out_features))
self.i_weight = Parameter(torch.Tensor(self.in_features, self.out_features))
self.j_weight = Parameter(torch.Tensor(self.in_features, self.out_features))
self.k_weight = Parameter(torch.Tensor(self.in_features, self.out_features))
self.scale = scale
if self.scale:
self.scale_param = Parameter(torch.Tensor(self.in_features, self.out_features))
else:
self.scale_param = None
if self.rotation:
self.zero_kernel = Parameter(torch.zeros(self.r_weight.shape), requires_grad=False)
if bias:
self.bias = Parameter(torch.Tensor(self.out_features*4))
else:
self.register_parameter('bias', None)
self.init_criterion = init_criterion
self.weight_init = weight_init
self.seed = seed if seed is not None else np.random.randint(0,1234)
self.rng = RandomState(self.seed)
self.reset_parameters()
def reset_parameters(self):
winit = {'quaternion': quaternion_init, 'unitary': unitary_init, 'random': random_init}[self.weight_init]
if self.scale_param is not None:
torch.nn.init.xavier_uniform_(self.scale_param.data)
if self.bias is not None:
self.bias.data.fill_(0)
affect_init(self.r_weight, self.i_weight, self.j_weight, self.k_weight, winit,
self.rng, self.init_criterion)
def forward(self, input):
# See the autograd section for explanation of what happens here.
if self.rotation:
return quaternion_linear_rotation(input, self.zero_kernel, self.r_weight, self.i_weight, self.j_weight, self.k_weight, self.bias, self.quaternion_format, self.scale_param)
else:
return quaternion_linear(input, self.r_weight, self.i_weight, self.j_weight, self.k_weight, self.bias)
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'in_features=' + str(self.in_features) \
+ ', out_features=' + str(self.out_features) \
+ ', bias=' + str(self.bias is not None) \
+ ', init_criterion=' + str(self.init_criterion) \
+ ', weight_init=' + str(self.weight_init) \
+ ', rotation=' + str(self.rotation) \
+ ', seed=' + str(self.seed) + ')'
class QuaternionLinear(Module):
r"""Applies a quaternion linear transformation to the incoming data.
"""
def __init__(self, in_features, out_features, bias=True,
init_criterion='he', weight_init='quaternion',
seed=None):
super(QuaternionLinear, self).__init__()
self.in_features = in_features//4
self.out_features = out_features//4
self.r_weight = Parameter(torch.Tensor(self.in_features, self.out_features))
self.i_weight = Parameter(torch.Tensor(self.in_features, self.out_features))
self.j_weight = Parameter(torch.Tensor(self.in_features, self.out_features))
self.k_weight = Parameter(torch.Tensor(self.in_features, self.out_features))
if bias:
self.bias = Parameter(torch.Tensor(self.out_features*4))
else:
self.register_parameter('bias', None)
self.init_criterion = init_criterion
self.weight_init = weight_init
self.seed = seed if seed is not None else np.random.randint(0,1234)
self.rng = RandomState(self.seed)
self.reset_parameters()
def reset_parameters(self):
winit = {'quaternion': quaternion_init,
'unitary': unitary_init}[self.weight_init]
if self.bias is not None:
self.bias.data.fill_(0)
affect_init(self.r_weight, self.i_weight, self.j_weight, self.k_weight, winit,
self.rng, self.init_criterion)
def forward(self, input):
# See the autograd section for explanation of what happens here.
if input.dim() == 3:
T, N, C = input.size()
input = input.view(T * N, C)
output = QuaternionLinearFunction.apply(input, self.r_weight, self.i_weight, self.j_weight, self.k_weight, self.bias)
output = output.view(T, N, output.size(1))
elif input.dim() == 2:
output = QuaternionLinearFunction.apply(input, self.r_weight, self.i_weight, self.j_weight, self.k_weight, self.bias)
else:
raise NotImplementedError
return output
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'in_features=' + str(self.in_features) \
+ ', out_features=' + str(self.out_features) \
+ ', bias=' + str(self.bias is not None) \
+ ', init_criterion=' + str(self.init_criterion) \
+ ', weight_init=' + str(self.weight_init) \
+ ', seed=' + str(self.seed) + ')'
|
{"hexsha": "990820adf8a432ef8be4eddea8c16b8670202a6f", "size": 22229, "ext": "py", "lang": "Python", "max_stars_repo_path": "sound-event-detection/hypercomplex_layers.py", "max_stars_repo_name": "Javesun99/HyperNets", "max_stars_repo_head_hexsha": "6ebe64c440c7bc1e7e2ea39c898fe90cf850d61e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2021-10-09T20:11:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T18:02:57.000Z", "max_issues_repo_path": "sound-event-detection/hypercomplex_layers.py", "max_issues_repo_name": "Javesun99/HyperNets", "max_issues_repo_head_hexsha": "6ebe64c440c7bc1e7e2ea39c898fe90cf850d61e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-11-16T00:07:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-31T10:44:15.000Z", "max_forks_repo_path": "sound-event-detection/hypercomplex_layers.py", "max_forks_repo_name": "Javesun99/HyperNets", "max_forks_repo_head_hexsha": "6ebe64c440c7bc1e7e2ea39c898fe90cf850d61e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-12-28T09:02:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T11:52:17.000Z", "avg_line_length": 43.5009784736, "max_line_length": 183, "alphanum_fraction": 0.6078995906, "include": true, "reason": "import numpy", "num_tokens": 4952}
|
/* GStreamer
* Copyright (C) <2005> Thomas Vander Stichele <thomas at apestaart dot org>
* Copyright (C) <2006> Tim-Philipp Müller <tim centricular net>
*
* gstutils.c: Unit test for functions in gstutils
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <gst/check/gstcheck.h>
#define SPECIAL_POINTER(x) ((void*)(19283847+(x)))
static int n_data_probes = 0;
static int n_buffer_probes = 0;
static int n_event_probes = 0;
static GstPadProbeReturn
probe_do_nothing (GstPad * pad, GstPadProbeInfo * info, gpointer data)
{
GstMiniObject *obj = GST_PAD_PROBE_INFO_DATA (info);
GST_DEBUG_OBJECT (pad, "is buffer:%d", GST_IS_BUFFER (obj));
return GST_PAD_PROBE_OK;
}
static GstPadProbeReturn
data_probe (GstPad * pad, GstPadProbeInfo * info, gpointer data)
{
GstMiniObject *obj = GST_PAD_PROBE_INFO_DATA (info);
n_data_probes++;
GST_DEBUG_OBJECT (pad, "data probe %d", n_data_probes);
g_assert (GST_IS_BUFFER (obj) || GST_IS_EVENT (obj));
g_assert (data == SPECIAL_POINTER (0));
return GST_PAD_PROBE_OK;
}
static GstPadProbeReturn
buffer_probe (GstPad * pad, GstPadProbeInfo * info, gpointer data)
{
GstBuffer *obj = GST_PAD_PROBE_INFO_BUFFER (info);
n_buffer_probes++;
GST_DEBUG_OBJECT (pad, "buffer probe %d", n_buffer_probes);
g_assert (GST_IS_BUFFER (obj));
g_assert (data == SPECIAL_POINTER (1));
return GST_PAD_PROBE_OK;
}
static GstPadProbeReturn
event_probe (GstPad * pad, GstPadProbeInfo * info, gpointer data)
{
GstEvent *obj = GST_PAD_PROBE_INFO_EVENT (info);
n_event_probes++;
GST_DEBUG_OBJECT (pad, "event probe %d [%s]",
n_event_probes, GST_EVENT_TYPE_NAME (obj));
g_assert (GST_IS_EVENT (obj));
g_assert (data == SPECIAL_POINTER (2));
return GST_PAD_PROBE_OK;
}
GST_START_TEST (test_buffer_probe_n_times)
{
GstElement *pipeline, *fakesrc, *fakesink;
GstBus *bus;
GstMessage *message;
GstPad *pad;
pipeline = gst_element_factory_make ("pipeline", NULL);
fakesrc = gst_element_factory_make ("fakesrc", NULL);
fakesink = gst_element_factory_make ("fakesink", NULL);
g_object_set (fakesrc, "num-buffers", (int) 10, NULL);
gst_bin_add_many (GST_BIN (pipeline), fakesrc, fakesink, NULL);
gst_element_link (fakesrc, fakesink);
pad = gst_element_get_static_pad (fakesink, "sink");
/* add the probes we need for the test */
gst_pad_add_probe (pad, GST_PAD_PROBE_TYPE_DATA_BOTH, data_probe,
SPECIAL_POINTER (0), NULL);
gst_pad_add_probe (pad, GST_PAD_PROBE_TYPE_BUFFER, buffer_probe,
SPECIAL_POINTER (1), NULL);
gst_pad_add_probe (pad, GST_PAD_PROBE_TYPE_EVENT_BOTH, event_probe,
SPECIAL_POINTER (2), NULL);
/* add some string probes just to test that the data is free'd
* properly as it should be */
gst_pad_add_probe (pad, GST_PAD_PROBE_TYPE_DATA_BOTH, probe_do_nothing,
g_strdup ("data probe string"), (GDestroyNotify) g_free);
gst_pad_add_probe (pad, GST_PAD_PROBE_TYPE_BUFFER, probe_do_nothing,
g_strdup ("buffer probe string"), (GDestroyNotify) g_free);
gst_pad_add_probe (pad, GST_PAD_PROBE_TYPE_EVENT_BOTH, probe_do_nothing,
g_strdup ("event probe string"), (GDestroyNotify) g_free);
gst_object_unref (pad);
gst_element_set_state (pipeline, GST_STATE_PLAYING);
bus = gst_element_get_bus (pipeline);
message = gst_bus_poll (bus, GST_MESSAGE_EOS, -1);
gst_message_unref (message);
gst_object_unref (bus);
g_assert (n_buffer_probes == 10); /* one for every buffer */
g_assert (n_event_probes == 4); /* stream-start, new segment, latency and eos */
g_assert (n_data_probes == 14); /* duh */
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
/* make sure nothing was sent in addition to the above when shutting down */
g_assert (n_buffer_probes == 10); /* one for every buffer */
g_assert (n_event_probes == 4); /* stream-start, new segment, latency and eos */
g_assert (n_data_probes == 14); /* duh */
} GST_END_TEST;
static int n_data_probes_once = 0;
static int n_buffer_probes_once = 0;
static int n_event_probes_once = 0;
static GstPadProbeReturn
data_probe_once (GstPad * pad, GstPadProbeInfo * info, guint * data)
{
GstMiniObject *obj = GST_PAD_PROBE_INFO_DATA (info);
n_data_probes_once++;
g_assert (GST_IS_BUFFER (obj) || GST_IS_EVENT (obj));
gst_pad_remove_probe (pad, *data);
return GST_PAD_PROBE_OK;
}
static GstPadProbeReturn
buffer_probe_once (GstPad * pad, GstPadProbeInfo * info, guint * data)
{
GstBuffer *obj = GST_PAD_PROBE_INFO_BUFFER (info);
n_buffer_probes_once++;
g_assert (GST_IS_BUFFER (obj));
gst_pad_remove_probe (pad, *data);
return GST_PAD_PROBE_OK;
}
static GstPadProbeReturn
event_probe_once (GstPad * pad, GstPadProbeInfo * info, guint * data)
{
GstEvent *obj = GST_PAD_PROBE_INFO_EVENT (info);
n_event_probes_once++;
g_assert (GST_IS_EVENT (obj));
gst_pad_remove_probe (pad, *data);
return GST_PAD_PROBE_OK;
}
GST_START_TEST (test_buffer_probe_once)
{
GstElement *pipeline, *fakesrc, *fakesink;
GstBus *bus;
GstMessage *message;
GstPad *pad;
guint id1, id2, id3;
pipeline = gst_element_factory_make ("pipeline", NULL);
fakesrc = gst_element_factory_make ("fakesrc", NULL);
fakesink = gst_element_factory_make ("fakesink", NULL);
g_object_set (fakesrc, "num-buffers", (int) 10, NULL);
gst_bin_add_many (GST_BIN (pipeline), fakesrc, fakesink, NULL);
gst_element_link (fakesrc, fakesink);
pad = gst_element_get_static_pad (fakesink, "sink");
id1 =
gst_pad_add_probe (pad, GST_PAD_PROBE_TYPE_DATA_BOTH,
(GstPadProbeCallback) data_probe_once, &id1, NULL);
id2 =
gst_pad_add_probe (pad, GST_PAD_PROBE_TYPE_BUFFER,
(GstPadProbeCallback) buffer_probe_once, &id2, NULL);
id3 =
gst_pad_add_probe (pad, GST_PAD_PROBE_TYPE_EVENT_BOTH,
(GstPadProbeCallback) event_probe_once, &id3, NULL);
gst_object_unref (pad);
gst_element_set_state (pipeline, GST_STATE_PLAYING);
bus = gst_element_get_bus (pipeline);
message = gst_bus_poll (bus, GST_MESSAGE_EOS, -1);
gst_message_unref (message);
gst_object_unref (bus);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
g_assert (n_buffer_probes_once == 1); /* can we hit it and quit? */
g_assert (n_event_probes_once == 1); /* i said, can we hit it and quit? */
g_assert (n_data_probes_once == 1); /* let's hit it and quit!!! */
} GST_END_TEST;
GST_START_TEST (test_math_scale)
{
fail_if (gst_util_uint64_scale_int (1, 1, 1) != 1);
fail_if (gst_util_uint64_scale_int (10, 10, 1) != 100);
fail_if (gst_util_uint64_scale_int (10, 10, 2) != 50);
fail_if (gst_util_uint64_scale_int (0, 10, 2) != 0);
fail_if (gst_util_uint64_scale_int (0, 0, 2) != 0);
fail_if (gst_util_uint64_scale_int (G_MAXUINT32, 5, 1) != G_MAXUINT32 * 5LL);
fail_if (gst_util_uint64_scale_int (G_MAXUINT32, 10, 2) != G_MAXUINT32 * 5LL);
fail_if (gst_util_uint64_scale_int (G_MAXUINT32, 1, 5) != G_MAXUINT32 / 5LL);
fail_if (gst_util_uint64_scale_int (G_MAXUINT32, 2, 10) != G_MAXUINT32 / 5LL);
/* not quite overflow */
fail_if (gst_util_uint64_scale_int (G_MAXUINT64 - 1, 10,
10) != G_MAXUINT64 - 1);
fail_if (gst_util_uint64_scale_int (G_MAXUINT64 - 1, G_MAXINT32,
G_MAXINT32) != G_MAXUINT64 - 1);
fail_if (gst_util_uint64_scale_int (G_MAXUINT64 - 100, G_MAXINT32,
G_MAXINT32) != G_MAXUINT64 - 100);
/* overflow */
fail_if (gst_util_uint64_scale_int (G_MAXUINT64 - 1, 10, 1) != G_MAXUINT64);
fail_if (gst_util_uint64_scale_int (G_MAXUINT64 - 1, G_MAXINT32,
1) != G_MAXUINT64);
} GST_END_TEST;
GST_START_TEST (test_math_scale_round)
{
fail_if (gst_util_uint64_scale_int_round (2, 1, 2) != 1);
fail_if (gst_util_uint64_scale_int_round (3, 1, 2) != 2);
fail_if (gst_util_uint64_scale_int_round (4, 1, 2) != 2);
fail_if (gst_util_uint64_scale_int_round (200, 100, 20000) != 1);
fail_if (gst_util_uint64_scale_int_round (299, 100, 20000) != 1);
fail_if (gst_util_uint64_scale_int_round (300, 100, 20000) != 2);
fail_if (gst_util_uint64_scale_int_round (301, 100, 20000) != 2);
fail_if (gst_util_uint64_scale_int_round (400, 100, 20000) != 2);
} GST_END_TEST;
GST_START_TEST (test_math_scale_ceil)
{
fail_if (gst_util_uint64_scale_int_ceil (2, 1, 2) != 1);
fail_if (gst_util_uint64_scale_int_ceil (3, 1, 2) != 2);
fail_if (gst_util_uint64_scale_int_ceil (4, 1, 2) != 2);
fail_if (gst_util_uint64_scale_int_ceil (200, 100, 20000) != 1);
fail_if (gst_util_uint64_scale_int_ceil (299, 100, 20000) != 2);
fail_if (gst_util_uint64_scale_int_ceil (300, 100, 20000) != 2);
fail_if (gst_util_uint64_scale_int_ceil (301, 100, 20000) != 2);
fail_if (gst_util_uint64_scale_int_ceil (400, 100, 20000) != 2);
} GST_END_TEST;
GST_START_TEST (test_math_scale_uint64)
{
fail_if (gst_util_uint64_scale (1, 1, 1) != 1);
fail_if (gst_util_uint64_scale (10, 10, 1) != 100);
fail_if (gst_util_uint64_scale (10, 10, 2) != 50);
fail_if (gst_util_uint64_scale (0, 10, 2) != 0);
fail_if (gst_util_uint64_scale (0, 0, 2) != 0);
fail_if (gst_util_uint64_scale (G_MAXUINT32, 5, 1) != G_MAXUINT32 * 5LL);
fail_if (gst_util_uint64_scale (G_MAXUINT32, 10, 2) != G_MAXUINT32 * 5LL);
fail_if (gst_util_uint64_scale (G_MAXUINT32, 1, 5) != G_MAXUINT32 / 5LL);
fail_if (gst_util_uint64_scale (G_MAXUINT32, 2, 10) != G_MAXUINT32 / 5LL);
/* not quite overflow */
fail_if (gst_util_uint64_scale (G_MAXUINT64 - 1, 10, 10) != G_MAXUINT64 - 1);
fail_if (gst_util_uint64_scale (G_MAXUINT64 - 1, G_MAXUINT32,
G_MAXUINT32) != G_MAXUINT64 - 1);
fail_if (gst_util_uint64_scale (G_MAXUINT64 - 100, G_MAXUINT32,
G_MAXUINT32) != G_MAXUINT64 - 100);
fail_if (gst_util_uint64_scale (G_MAXUINT64 - 1, 10, 10) != G_MAXUINT64 - 1);
fail_if (gst_util_uint64_scale (G_MAXUINT64 - 1, G_MAXUINT64,
G_MAXUINT64) != G_MAXUINT64 - 1);
fail_if (gst_util_uint64_scale (G_MAXUINT64 - 100, G_MAXUINT64,
G_MAXUINT64) != G_MAXUINT64 - 100);
/* overflow */
fail_if (gst_util_uint64_scale (G_MAXUINT64 - 1, 10, 1) != G_MAXUINT64);
fail_if (gst_util_uint64_scale (G_MAXUINT64 - 1, G_MAXUINT64,
1) != G_MAXUINT64);
} GST_END_TEST;
GST_START_TEST (test_math_scale_random)
{
guint64 val, num, denom, res;
GRand *rand;
gint i;
rand = g_rand_new ();
i = 100000;
while (i--) {
guint64 check, diff;
val = ((guint64) g_rand_int (rand)) << 32 | g_rand_int (rand);
num = ((guint64) g_rand_int (rand)) << 32 | g_rand_int (rand);
denom = ((guint64) g_rand_int (rand)) << 32 | g_rand_int (rand);
res = gst_util_uint64_scale (val, num, denom);
check = gst_gdouble_to_guint64 (gst_guint64_to_gdouble (val) *
gst_guint64_to_gdouble (num) / gst_guint64_to_gdouble (denom));
if (res < G_MAXUINT64 && check < G_MAXUINT64) {
if (res > check)
diff = res - check;
else
diff = check - res;
/* some arbitrary value, really.. someone do the proper math to get
* the upper bound */
if (diff > 20000)
fail_if (diff > 20000);
}
}
g_rand_free (rand);
}
GST_END_TEST;
GST_START_TEST (test_guint64_to_gdouble)
{
guint64 from[] = { 0, 1, 100, 10000, (guint64) (1) << 63,
((guint64) (1) << 63) + 1,
((guint64) (1) << 63) + (G_GINT64_CONSTANT (1) << 62)
};
gdouble to[] = { 0., 1., 100., 10000., 9223372036854775808.,
9223372036854775809., 13835058055282163712.
};
gdouble tolerance[] = { 0., 0., 0., 0., 0., 1., 1. };
gint i;
gdouble result;
gdouble delta;
for (i = 0; i < G_N_ELEMENTS (from); ++i) {
result = gst_util_guint64_to_gdouble (from[i]);
delta = ABS (to[i] - result);
fail_unless (delta <= tolerance[i],
"Could not convert %d: %" G_GUINT64_FORMAT
" -> %f, got %f instead, delta of %e with tolerance of %e",
i, from[i], to[i], result, delta, tolerance[i]);
}
}
GST_END_TEST;
GST_START_TEST (test_gdouble_to_guint64)
{
gdouble from[] = { 0., 1., 100., 10000., 9223372036854775808.,
9223372036854775809., 13835058055282163712.
};
guint64 to[] = { 0, 1, 100, 10000, (guint64) (1) << 63,
((guint64) (1) << 63) + 1,
((guint64) (1) << 63) + (G_GINT64_CONSTANT (1) << 62)
};
guint64 tolerance[] = { 0, 0, 0, 0, 0, 1, 1 };
gint i;
gdouble result;
guint64 delta;
for (i = 0; i < G_N_ELEMENTS (from); ++i) {
result = gst_util_gdouble_to_guint64 (from[i]);
delta = ABS (to[i] - result);
fail_unless (delta <= tolerance[i],
"Could not convert %f: %" G_GUINT64_FORMAT
" -> %d, got %d instead, delta of %e with tolerance of %e",
i, from[i], to[i], result, delta, tolerance[i]);
}
}
GST_END_TEST;
#ifndef GST_DISABLE_PARSE
GST_START_TEST (test_parse_bin_from_description)
{
struct
{
const gchar *bin_desc;
const gchar *pad_names;
} bin_tests[] = {
{
"identity", "identity0/sink,identity0/src"}, {
"identity ! identity ! identity", "identity1/sink,identity3/src"}, {
"identity ! fakesink", "identity4/sink"}, {
"fakesrc ! identity", "identity5/src"}, {
"fakesrc ! fakesink", ""}
};
gint i;
for (i = 0; i < G_N_ELEMENTS (bin_tests); ++i) {
GstElement *bin, *parent;
GString *s;
GstPad *ghost_pad, *target_pad;
GError *err = NULL;
bin = gst_parse_bin_from_description (bin_tests[i].bin_desc, TRUE, &err);
if (err) {
g_error ("ERROR in gst_parse_bin_from_description (%s): %s",
bin_tests[i].bin_desc, err->message);
}
g_assert (bin != NULL);
s = g_string_new ("");
if ((ghost_pad = gst_element_get_static_pad (bin, "sink"))) {
g_assert (GST_IS_GHOST_PAD (ghost_pad));
target_pad = gst_ghost_pad_get_target (GST_GHOST_PAD (ghost_pad));
g_assert (target_pad != NULL);
g_assert (GST_IS_PAD (target_pad));
parent = gst_pad_get_parent_element (target_pad);
g_assert (parent != NULL);
g_string_append_printf (s, "%s/sink", GST_ELEMENT_NAME (parent));
gst_object_unref (parent);
gst_object_unref (target_pad);
gst_object_unref (ghost_pad);
}
if ((ghost_pad = gst_element_get_static_pad (bin, "src"))) {
g_assert (GST_IS_GHOST_PAD (ghost_pad));
target_pad = gst_ghost_pad_get_target (GST_GHOST_PAD (ghost_pad));
g_assert (target_pad != NULL);
g_assert (GST_IS_PAD (target_pad));
parent = gst_pad_get_parent_element (target_pad);
g_assert (parent != NULL);
if (s->len > 0) {
g_string_append (s, ",");
}
g_string_append_printf (s, "%s/src", GST_ELEMENT_NAME (parent));
gst_object_unref (parent);
gst_object_unref (target_pad);
gst_object_unref (ghost_pad);
}
if (strcmp (s->str, bin_tests[i].pad_names) != 0) {
g_error ("FAILED: expected '%s', got '%s' for bin '%s'",
bin_tests[i].pad_names, s->str, bin_tests[i].bin_desc);
}
g_string_free (s, TRUE);
gst_object_unref (bin);
}
}
GST_END_TEST;
#endif
GST_START_TEST (test_element_found_tags)
{
GstElement *pipeline, *fakesrc, *fakesink;
GstTagList *list;
GstBus *bus;
GstMessage *message;
GstPad *srcpad;
pipeline = gst_element_factory_make ("pipeline", NULL);
fakesrc = gst_element_factory_make ("fakesrc", NULL);
fakesink = gst_element_factory_make ("fakesink", NULL);
list = gst_tag_list_new_empty ();
g_object_set (fakesrc, "num-buffers", (int) 10, NULL);
gst_bin_add_many (GST_BIN (pipeline), fakesrc, fakesink, NULL);
gst_element_link (fakesrc, fakesink);
gst_element_set_state (pipeline, GST_STATE_PLAYING);
srcpad = gst_element_get_static_pad (fakesrc, "src");
gst_pad_push_event (srcpad, gst_event_new_tag (list));
gst_object_unref (srcpad);
bus = gst_element_get_bus (pipeline);
message = gst_bus_poll (bus, GST_MESSAGE_EOS, -1);
gst_message_unref (message);
gst_object_unref (bus);
/* FIXME: maybe also check if the fakesink receives the message */
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
}
GST_END_TEST;
GST_START_TEST (test_element_unlink)
{
GstElement *src, *sink;
src = gst_element_factory_make ("fakesrc", NULL);
sink = gst_element_factory_make ("fakesink", NULL);
fail_unless (gst_element_link (src, sink) != FALSE);
gst_element_unlink (src, sink);
gst_object_unref (src);
gst_object_unref (sink);
}
GST_END_TEST;
GST_START_TEST (test_set_value_from_string)
{
GValue val = { 0, };
/* g_return_if_fail */
ASSERT_CRITICAL (gst_util_set_value_from_string (NULL, "xyz"));
g_value_init (&val, G_TYPE_STRING);
ASSERT_CRITICAL (gst_util_set_value_from_string (&val, NULL));
g_value_unset (&val);
/* string => string */
g_value_init (&val, G_TYPE_STRING);
gst_util_set_value_from_string (&val, "Y00");
fail_unless (g_value_get_string (&val) != NULL);
fail_unless_equals_string (g_value_get_string (&val), "Y00");
g_value_unset (&val);
/* string => int */
g_value_init (&val, G_TYPE_INT);
gst_util_set_value_from_string (&val, "987654321");
fail_unless (g_value_get_int (&val) == 987654321);
g_value_unset (&val);
g_value_init (&val, G_TYPE_INT);
ASSERT_CRITICAL (gst_util_set_value_from_string (&val, "xyz"));
g_value_unset (&val);
/* string => uint */
g_value_init (&val, G_TYPE_UINT);
gst_util_set_value_from_string (&val, "987654321");
fail_unless (g_value_get_uint (&val) == 987654321);
g_value_unset (&val);
/* CHECKME: is this really desired behaviour? (tpm) */
g_value_init (&val, G_TYPE_UINT);
gst_util_set_value_from_string (&val, "-999");
fail_unless (g_value_get_uint (&val) == ((guint) 0 - (guint) 999));
g_value_unset (&val);
g_value_init (&val, G_TYPE_UINT);
ASSERT_CRITICAL (gst_util_set_value_from_string (&val, "xyz"));
g_value_unset (&val);
/* string => long */
g_value_init (&val, G_TYPE_LONG);
gst_util_set_value_from_string (&val, "987654321");
fail_unless (g_value_get_long (&val) == 987654321);
g_value_unset (&val);
g_value_init (&val, G_TYPE_LONG);
ASSERT_CRITICAL (gst_util_set_value_from_string (&val, "xyz"));
g_value_unset (&val);
/* string => ulong */
g_value_init (&val, G_TYPE_ULONG);
gst_util_set_value_from_string (&val, "987654321");
fail_unless (g_value_get_ulong (&val) == 987654321);
g_value_unset (&val);
/* CHECKME: is this really desired behaviour? (tpm) */
g_value_init (&val, G_TYPE_ULONG);
gst_util_set_value_from_string (&val, "-999");
fail_unless (g_value_get_ulong (&val) == ((gulong) 0 - (gulong) 999));
g_value_unset (&val);
g_value_init (&val, G_TYPE_ULONG);
ASSERT_CRITICAL (gst_util_set_value_from_string (&val, "xyz"));
g_value_unset (&val);
/* string => boolean */
g_value_init (&val, G_TYPE_BOOLEAN);
gst_util_set_value_from_string (&val, "true");
fail_unless_equals_int (g_value_get_boolean (&val), TRUE);
g_value_unset (&val);
g_value_init (&val, G_TYPE_BOOLEAN);
gst_util_set_value_from_string (&val, "TRUE");
fail_unless_equals_int (g_value_get_boolean (&val), TRUE);
g_value_unset (&val);
g_value_init (&val, G_TYPE_BOOLEAN);
gst_util_set_value_from_string (&val, "false");
fail_unless_equals_int (g_value_get_boolean (&val), FALSE);
g_value_unset (&val);
g_value_init (&val, G_TYPE_BOOLEAN);
gst_util_set_value_from_string (&val, "FALSE");
fail_unless_equals_int (g_value_get_boolean (&val), FALSE);
g_value_unset (&val);
g_value_init (&val, G_TYPE_BOOLEAN);
gst_util_set_value_from_string (&val, "bleh");
fail_unless_equals_int (g_value_get_boolean (&val), FALSE);
g_value_unset (&val);
#if 0
/* string => float (yay, localisation issues involved) */
g_value_init (&val, G_TYPE_FLOAT);
gst_util_set_value_from_string (&val, "987.654");
fail_unless (g_value_get_float (&val) >= 987.653 &&
g_value_get_float (&val) <= 987.655);
g_value_unset (&val);
g_value_init (&val, G_TYPE_FLOAT);
gst_util_set_value_from_string (&val, "987,654");
fail_unless (g_value_get_float (&val) >= 987.653 &&
g_value_get_float (&val) <= 987.655);
g_value_unset (&val);
/* string => double (yay, localisation issues involved) */
g_value_init (&val, G_TYPE_DOUBLE);
gst_util_set_value_from_string (&val, "987.654");
fail_unless (g_value_get_double (&val) >= 987.653 &&
g_value_get_double (&val) <= 987.655);
g_value_unset (&val);
g_value_init (&val, G_TYPE_DOUBLE);
gst_util_set_value_from_string (&val, "987,654");
fail_unless (g_value_get_double (&val) >= 987.653 &&
g_value_get_double (&val) <= 987.655);
g_value_unset (&val);
#endif
}
GST_END_TEST;
static gint
_binary_search_compare (guint32 * a, guint32 * b)
{
return *a - *b;
}
GST_START_TEST (test_binary_search)
{
guint32 data[257];
guint32 *match;
guint32 search_element = 121 * 2;
guint i;
for (i = 0; i < 257; i++)
data[i] = (i + 1) * 2;
match =
(guint32 *) gst_util_array_binary_search (data, 257, sizeof (guint32),
(GCompareDataFunc) _binary_search_compare, GST_SEARCH_MODE_EXACT,
&search_element, NULL);
fail_unless (match != NULL);
fail_unless_equals_int (match - data, 120);
match =
(guint32 *) gst_util_array_binary_search (data, 257, sizeof (guint32),
(GCompareDataFunc) _binary_search_compare, GST_SEARCH_MODE_BEFORE,
&search_element, NULL);
fail_unless (match != NULL);
fail_unless_equals_int (match - data, 120);
match =
(guint32 *) gst_util_array_binary_search (data, 257, sizeof (guint32),
(GCompareDataFunc) _binary_search_compare, GST_SEARCH_MODE_AFTER,
&search_element, NULL);
fail_unless (match != NULL);
fail_unless_equals_int (match - data, 120);
search_element = 0;
match =
(guint32 *) gst_util_array_binary_search (data, 257, sizeof (guint32),
(GCompareDataFunc) _binary_search_compare, GST_SEARCH_MODE_EXACT,
&search_element, NULL);
fail_unless (match == NULL);
match =
(guint32 *) gst_util_array_binary_search (data, 257, sizeof (guint32),
(GCompareDataFunc) _binary_search_compare, GST_SEARCH_MODE_AFTER,
&search_element, NULL);
fail_unless (match != NULL);
fail_unless_equals_int (match - data, 0);
match =
(guint32 *) gst_util_array_binary_search (data, 257, sizeof (guint32),
(GCompareDataFunc) _binary_search_compare, GST_SEARCH_MODE_BEFORE,
&search_element, NULL);
fail_unless (match == NULL);
search_element = 1000;
match =
(guint32 *) gst_util_array_binary_search (data, 257, sizeof (guint32),
(GCompareDataFunc) _binary_search_compare, GST_SEARCH_MODE_EXACT,
&search_element, NULL);
fail_unless (match == NULL);
match =
(guint32 *) gst_util_array_binary_search (data, 257, sizeof (guint32),
(GCompareDataFunc) _binary_search_compare, GST_SEARCH_MODE_AFTER,
&search_element, NULL);
fail_unless (match == NULL);
match =
(guint32 *) gst_util_array_binary_search (data, 257, sizeof (guint32),
(GCompareDataFunc) _binary_search_compare, GST_SEARCH_MODE_BEFORE,
&search_element, NULL);
fail_unless (match != NULL);
fail_unless_equals_int (match - data, 256);
search_element = 121 * 2 - 1;
match =
(guint32 *) gst_util_array_binary_search (data, 257, sizeof (guint32),
(GCompareDataFunc) _binary_search_compare, GST_SEARCH_MODE_EXACT,
&search_element, NULL);
fail_unless (match == NULL);
match =
(guint32 *) gst_util_array_binary_search (data, 257, sizeof (guint32),
(GCompareDataFunc) _binary_search_compare, GST_SEARCH_MODE_AFTER,
&search_element, NULL);
fail_unless (match != NULL);
fail_unless_equals_int (match - data, 120);
match =
(guint32 *) gst_util_array_binary_search (data, 257, sizeof (guint32),
(GCompareDataFunc) _binary_search_compare, GST_SEARCH_MODE_BEFORE,
&search_element, NULL);
fail_unless (match != NULL);
fail_unless_equals_int (match - data, 119);
}
GST_END_TEST;
#ifdef HAVE_GSL
#ifdef HAVE_GMP
#include <gsl/gsl_rng.h>
#include <gmp.h>
static guint64
randguint64 (gsl_rng * rng, guint64 n)
{
union
{
guint64 x;
struct
{
guint16 a, b, c, d;
} parts;
} x;
x.parts.a = gsl_rng_uniform_int (rng, 1 << 16);
x.parts.b = gsl_rng_uniform_int (rng, 1 << 16);
x.parts.c = gsl_rng_uniform_int (rng, 1 << 16);
x.parts.d = gsl_rng_uniform_int (rng, 1 << 16);
return x.x % n;
}
enum round_t
{
ROUND_TONEAREST = 0,
ROUND_UP,
ROUND_DOWN
};
static void
gmp_set_uint64 (mpz_t mp, guint64 x)
{
mpz_t two_32, tmp;
mpz_init (two_32);
mpz_init (tmp);
mpz_ui_pow_ui (two_32, 2, 32);
mpz_set_ui (mp, (unsigned long) ((x >> 32) & G_MAXUINT32));
mpz_mul (tmp, mp, two_32);
mpz_add_ui (mp, tmp, (unsigned long) (x & G_MAXUINT32));
mpz_clear (two_32);
mpz_clear (tmp);
}
static guint64
gmp_get_uint64 (mpz_t mp)
{
mpz_t two_64, two_32, tmp;
guint64 ret;
mpz_init (two_64);
mpz_init (two_32);
mpz_init (tmp);
mpz_ui_pow_ui (two_64, 2, 64);
mpz_ui_pow_ui (two_32, 2, 32);
if (mpz_cmp (tmp, two_64) >= 0)
return G_MAXUINT64;
mpz_clear (two_64);
mpz_tdiv_q (tmp, mp, two_32);
ret = mpz_get_ui (tmp);
ret <<= 32;
ret |= mpz_get_ui (mp);
mpz_clear (two_32);
mpz_clear (tmp);
return ret;
}
static guint64
gmp_scale (guint64 x, guint64 a, guint64 b, enum round_t mode)
{
mpz_t mp1, mp2, mp3;
if (!b)
/* overflow */
return G_MAXUINT64;
mpz_init (mp1);
mpz_init (mp2);
mpz_init (mp3);
gmp_set_uint64 (mp1, x);
gmp_set_uint64 (mp3, a);
mpz_mul (mp2, mp1, mp3);
switch (mode) {
case ROUND_TONEAREST:
gmp_set_uint64 (mp1, b);
mpz_tdiv_q_ui (mp3, mp1, 2);
mpz_add (mp1, mp2, mp3);
mpz_set (mp2, mp1);
break;
case ROUND_UP:
gmp_set_uint64 (mp1, b);
mpz_sub_ui (mp3, mp1, 1);
mpz_add (mp1, mp2, mp3);
mpz_set (mp2, mp1);
break;
case ROUND_DOWN:
break;
}
gmp_set_uint64 (mp3, b);
mpz_tdiv_q (mp1, mp2, mp3);
x = gmp_get_uint64 (mp1);
mpz_clear (mp1);
mpz_clear (mp2);
mpz_clear (mp3);
return x;
}
static void
_gmp_test_scale (gsl_rng * rng)
{
guint64 bygst, bygmp;
guint64 a = randguint64 (rng, gsl_rng_uniform_int (rng,
2) ? G_MAXUINT64 : G_MAXUINT32);
guint64 b = randguint64 (rng, gsl_rng_uniform_int (rng, 2) ? G_MAXUINT64 - 1 : G_MAXUINT32 - 1) + 1; /* 0 not allowed */
guint64 val = randguint64 (rng, gmp_scale (G_MAXUINT64, b, a, ROUND_DOWN));
enum round_t mode = gsl_rng_uniform_int (rng, 3);
const char *func;
bygmp = gmp_scale (val, a, b, mode);
switch (mode) {
case ROUND_TONEAREST:
bygst = gst_util_uint64_scale_round (val, a, b);
func = "gst_util_uint64_scale_round";
break;
case ROUND_UP:
bygst = gst_util_uint64_scale_ceil (val, a, b);
func = "gst_util_uint64_scale_ceil";
break;
case ROUND_DOWN:
bygst = gst_util_uint64_scale (val, a, b);
func = "gst_util_uint64_scale";
break;
default:
g_assert_not_reached ();
break;
}
fail_unless (bygst == bygmp,
"error: %s(): %" G_GUINT64_FORMAT " * %" G_GUINT64_FORMAT " / %"
G_GUINT64_FORMAT " = %" G_GUINT64_FORMAT ", correct = %" G_GUINT64_FORMAT
"\n", func, val, a, b, bygst, bygmp);
}
static void
_gmp_test_scale_int (gsl_rng * rng)
{
guint64 bygst, bygmp;
gint32 a = randguint64 (rng, G_MAXINT32);
gint32 b = randguint64 (rng, G_MAXINT32 - 1) + 1; /* 0 not allowed */
guint64 val = randguint64 (rng, gmp_scale (G_MAXUINT64, b, a, ROUND_DOWN));
enum round_t mode = gsl_rng_uniform_int (rng, 3);
const char *func;
bygmp = gmp_scale (val, a, b, mode);
switch (mode) {
case ROUND_TONEAREST:
bygst = gst_util_uint64_scale_int_round (val, a, b);
func = "gst_util_uint64_scale_int_round";
break;
case ROUND_UP:
bygst = gst_util_uint64_scale_int_ceil (val, a, b);
func = "gst_util_uint64_scale_int_ceil";
break;
case ROUND_DOWN:
bygst = gst_util_uint64_scale_int (val, a, b);
func = "gst_util_uint64_scale_int";
break;
default:
g_assert_not_reached ();
break;
}
fail_unless (bygst == bygmp,
"error: %s(): %" G_GUINT64_FORMAT " * %d / %d = %" G_GUINT64_FORMAT
", correct = %" G_GUINT64_FORMAT "\n", func, val, a, b, bygst, bygmp);
}
#define GMP_TEST_RUNS 100000
GST_START_TEST (test_math_scale_gmp)
{
gsl_rng *rng = gsl_rng_alloc (gsl_rng_mt19937);
gint n;
for (n = 0; n < GMP_TEST_RUNS; n++)
_gmp_test_scale (rng);
gsl_rng_free (rng);
}
GST_END_TEST;
GST_START_TEST (test_math_scale_gmp_int)
{
gsl_rng *rng = gsl_rng_alloc (gsl_rng_mt19937);
gint n;
for (n = 0; n < GMP_TEST_RUNS; n++)
_gmp_test_scale_int (rng);
gsl_rng_free (rng);
}
GST_END_TEST;
#endif
#endif
GST_START_TEST (test_pad_proxy_query_caps_aggregation)
{
GstElement *tee, *sink1, *sink2;
GstCaps *caps;
GstPad *tee_src1, *tee_src2, *tee_sink, *sink1_sink, *sink2_sink;
tee = gst_element_factory_make ("tee", "tee");
sink1 = gst_element_factory_make ("fakesink", "sink1");
tee_src1 = gst_element_get_request_pad (tee, "src_%u");
sink1_sink = gst_element_get_static_pad (sink1, "sink");
fail_unless_equals_int (gst_pad_link (tee_src1, sink1_sink), GST_PAD_LINK_OK);
sink2 = gst_element_factory_make ("fakesink", "sink2");
tee_src2 = gst_element_get_request_pad (tee, "src_%u");
sink2_sink = gst_element_get_static_pad (sink2, "sink");
fail_unless_equals_int (gst_pad_link (tee_src2, sink2_sink), GST_PAD_LINK_OK);
tee_sink = gst_element_get_static_pad (tee, "sink");
gst_element_set_state (sink1, GST_STATE_PAUSED);
gst_element_set_state (sink2, GST_STATE_PAUSED);
gst_element_set_state (tee, GST_STATE_PAUSED);
/* by default, ANY caps should intersect to ANY */
caps = gst_pad_query_caps (tee_sink, NULL);
GST_INFO ("got caps: %" GST_PTR_FORMAT, caps);
fail_unless (caps != NULL);
fail_unless (gst_caps_is_any (caps));
gst_caps_unref (caps);
/* these don't intersect we should get empty caps */
caps = gst_caps_new_empty_simple ("foo/bar");
fail_unless (gst_pad_set_caps (sink1_sink, caps));
gst_pad_use_fixed_caps (sink1_sink);
gst_caps_unref (caps);
caps = gst_caps_new_empty_simple ("bar/ter");
fail_unless (gst_pad_set_caps (sink2_sink, caps));
gst_pad_use_fixed_caps (sink2_sink);
gst_caps_unref (caps);
caps = gst_pad_query_caps (tee_sink, NULL);
GST_INFO ("got caps: %" GST_PTR_FORMAT, caps);
fail_unless (caps != NULL);
fail_unless (gst_caps_is_empty (caps));
gst_caps_unref (caps);
/* test intersection */
caps = gst_caps_new_simple ("foo/bar", "barversion", G_TYPE_INT, 1, NULL);
GST_OBJECT_FLAG_UNSET (sink2_sink, GST_PAD_FLAG_FIXED_CAPS);
fail_unless (gst_pad_set_caps (sink2_sink, caps));
gst_pad_use_fixed_caps (sink2_sink);
gst_caps_unref (caps);
caps = gst_pad_query_caps (tee_sink, NULL);
GST_INFO ("got caps: %" GST_PTR_FORMAT, caps);
fail_unless (caps != NULL);
fail_if (gst_caps_is_empty (caps));
{
GstStructure *s = gst_caps_get_structure (caps, 0);
fail_unless_equals_string (gst_structure_get_name (s), "foo/bar");
fail_unless (gst_structure_has_field_typed (s, "barversion", G_TYPE_INT));
}
gst_caps_unref (caps);
gst_element_set_state (sink1, GST_STATE_NULL);
gst_element_set_state (sink2, GST_STATE_NULL);
gst_element_set_state (tee, GST_STATE_NULL);
/* clean up */
gst_element_release_request_pad (tee, tee_src1);
gst_object_unref (tee_src1);
gst_element_release_request_pad (tee, tee_src2);
gst_object_unref (tee_src2);
gst_object_unref (tee_sink);
gst_object_unref (tee);
gst_object_unref (sink1_sink);
gst_object_unref (sink1);
gst_object_unref (sink2_sink);
gst_object_unref (sink2);
}
GST_END_TEST;
GST_START_TEST (test_greatest_common_divisor)
{
fail_if (gst_util_greatest_common_divisor (1, 1) != 1);
fail_if (gst_util_greatest_common_divisor (2, 3) != 1);
fail_if (gst_util_greatest_common_divisor (3, 5) != 1);
fail_if (gst_util_greatest_common_divisor (-1, 1) != 1);
fail_if (gst_util_greatest_common_divisor (-2, 3) != 1);
fail_if (gst_util_greatest_common_divisor (-3, 5) != 1);
fail_if (gst_util_greatest_common_divisor (-1, -1) != 1);
fail_if (gst_util_greatest_common_divisor (-2, -3) != 1);
fail_if (gst_util_greatest_common_divisor (-3, -5) != 1);
fail_if (gst_util_greatest_common_divisor (1, -1) != 1);
fail_if (gst_util_greatest_common_divisor (2, -3) != 1);
fail_if (gst_util_greatest_common_divisor (3, -5) != 1);
fail_if (gst_util_greatest_common_divisor (2, 2) != 2);
fail_if (gst_util_greatest_common_divisor (2, 4) != 2);
fail_if (gst_util_greatest_common_divisor (1001, 11) != 11);
}
GST_END_TEST;
GST_START_TEST (test_read_macros)
{
guint8 carray[] = "ABCDEFGH"; /* 0x41 ... 0x48 */
guint32 uarray[2];
guint8 *cpointer;
memcpy (uarray, carray, 8);
cpointer = carray;
/* 16 bit */
/* First try the standard pointer variants */
fail_unless_equals_int_hex (GST_READ_UINT16_BE (cpointer), 0x4142);
fail_unless_equals_int_hex (GST_READ_UINT16_BE (cpointer + 1), 0x4243);
fail_unless_equals_int_hex (GST_READ_UINT16_BE (cpointer + 2), 0x4344);
fail_unless_equals_int_hex (GST_READ_UINT16_BE (cpointer + 3), 0x4445);
fail_unless_equals_int_hex (GST_READ_UINT16_BE (cpointer + 4), 0x4546);
fail_unless_equals_int_hex (GST_READ_UINT16_BE (cpointer + 5), 0x4647);
fail_unless_equals_int_hex (GST_READ_UINT16_BE (cpointer + 6), 0x4748);
fail_unless_equals_int_hex (GST_READ_UINT16_LE (cpointer), 0x4241);
fail_unless_equals_int_hex (GST_READ_UINT16_LE (cpointer + 1), 0x4342);
fail_unless_equals_int_hex (GST_READ_UINT16_LE (cpointer + 2), 0x4443);
fail_unless_equals_int_hex (GST_READ_UINT16_LE (cpointer + 3), 0x4544);
fail_unless_equals_int_hex (GST_READ_UINT16_LE (cpointer + 4), 0x4645);
fail_unless_equals_int_hex (GST_READ_UINT16_LE (cpointer + 5), 0x4746);
fail_unless_equals_int_hex (GST_READ_UINT16_LE (cpointer + 6), 0x4847);
/* On an array of guint8 */
fail_unless_equals_int_hex (GST_READ_UINT16_BE (carray), 0x4142);
fail_unless_equals_int_hex (GST_READ_UINT16_BE (carray + 1), 0x4243);
fail_unless_equals_int_hex (GST_READ_UINT16_BE (carray + 2), 0x4344);
fail_unless_equals_int_hex (GST_READ_UINT16_BE (carray + 3), 0x4445);
fail_unless_equals_int_hex (GST_READ_UINT16_BE (carray + 4), 0x4546);
fail_unless_equals_int_hex (GST_READ_UINT16_BE (carray + 5), 0x4647);
fail_unless_equals_int_hex (GST_READ_UINT16_BE (carray + 6), 0x4748);
fail_unless_equals_int_hex (GST_READ_UINT16_LE (carray), 0x4241);
fail_unless_equals_int_hex (GST_READ_UINT16_LE (carray + 1), 0x4342);
fail_unless_equals_int_hex (GST_READ_UINT16_LE (carray + 2), 0x4443);
fail_unless_equals_int_hex (GST_READ_UINT16_LE (carray + 3), 0x4544);
fail_unless_equals_int_hex (GST_READ_UINT16_LE (carray + 4), 0x4645);
fail_unless_equals_int_hex (GST_READ_UINT16_LE (carray + 5), 0x4746);
fail_unless_equals_int_hex (GST_READ_UINT16_LE (carray + 6), 0x4847);
/* On an array of guint32 */
fail_unless_equals_int_hex (GST_READ_UINT16_BE (uarray), 0x4142);
fail_unless_equals_int_hex (GST_READ_UINT16_BE (uarray + 1), 0x4546);
fail_unless_equals_int_hex (GST_READ_UINT16_LE (uarray), 0x4241);
fail_unless_equals_int_hex (GST_READ_UINT16_LE (uarray + 1), 0x4645);
/* 24bit */
/* First try the standard pointer variants */
fail_unless_equals_int_hex (GST_READ_UINT24_BE (cpointer), 0x414243);
fail_unless_equals_int_hex (GST_READ_UINT24_BE (cpointer + 1), 0x424344);
fail_unless_equals_int_hex (GST_READ_UINT24_BE (cpointer + 2), 0x434445);
fail_unless_equals_int_hex (GST_READ_UINT24_BE (cpointer + 3), 0x444546);
fail_unless_equals_int_hex (GST_READ_UINT24_BE (cpointer + 4), 0x454647);
fail_unless_equals_int_hex (GST_READ_UINT24_BE (cpointer + 5), 0x464748);
fail_unless_equals_int_hex (GST_READ_UINT24_LE (cpointer), 0x434241);
fail_unless_equals_int_hex (GST_READ_UINT24_LE (cpointer + 1), 0x444342);
fail_unless_equals_int_hex (GST_READ_UINT24_LE (cpointer + 2), 0x454443);
fail_unless_equals_int_hex (GST_READ_UINT24_LE (cpointer + 3), 0x464544);
fail_unless_equals_int_hex (GST_READ_UINT24_LE (cpointer + 4), 0x474645);
fail_unless_equals_int_hex (GST_READ_UINT24_LE (cpointer + 5), 0x484746);
/* On an array of guint8 */
fail_unless_equals_int_hex (GST_READ_UINT24_BE (carray), 0x414243);
fail_unless_equals_int_hex (GST_READ_UINT24_BE (carray + 1), 0x424344);
fail_unless_equals_int_hex (GST_READ_UINT24_BE (carray + 2), 0x434445);
fail_unless_equals_int_hex (GST_READ_UINT24_BE (carray + 3), 0x444546);
fail_unless_equals_int_hex (GST_READ_UINT24_BE (carray + 4), 0x454647);
fail_unless_equals_int_hex (GST_READ_UINT24_BE (carray + 5), 0x464748);
fail_unless_equals_int_hex (GST_READ_UINT24_LE (carray), 0x434241);
fail_unless_equals_int_hex (GST_READ_UINT24_LE (carray + 1), 0x444342);
fail_unless_equals_int_hex (GST_READ_UINT24_LE (carray + 2), 0x454443);
fail_unless_equals_int_hex (GST_READ_UINT24_LE (carray + 3), 0x464544);
fail_unless_equals_int_hex (GST_READ_UINT24_LE (carray + 4), 0x474645);
fail_unless_equals_int_hex (GST_READ_UINT24_LE (carray + 5), 0x484746);
/* On an array of guint32 */
fail_unless_equals_int_hex (GST_READ_UINT24_BE (uarray), 0x414243);
fail_unless_equals_int_hex (GST_READ_UINT24_BE (uarray + 1), 0x454647);
fail_unless_equals_int_hex (GST_READ_UINT24_LE (uarray), 0x434241);
fail_unless_equals_int_hex (GST_READ_UINT24_LE (uarray + 1), 0x474645);
/* 32bit */
/* First try the standard pointer variants */
fail_unless_equals_int_hex (GST_READ_UINT32_BE (cpointer), 0x41424344);
fail_unless_equals_int_hex (GST_READ_UINT32_BE (cpointer + 1), 0x42434445);
fail_unless_equals_int_hex (GST_READ_UINT32_BE (cpointer + 2), 0x43444546);
fail_unless_equals_int_hex (GST_READ_UINT32_BE (cpointer + 3), 0x44454647);
fail_unless_equals_int_hex (GST_READ_UINT32_BE (cpointer + 4), 0x45464748);
fail_unless_equals_int_hex (GST_READ_UINT32_LE (cpointer), 0x44434241);
fail_unless_equals_int_hex (GST_READ_UINT32_LE (cpointer + 1), 0x45444342);
fail_unless_equals_int_hex (GST_READ_UINT32_LE (cpointer + 2), 0x46454443);
fail_unless_equals_int_hex (GST_READ_UINT32_LE (cpointer + 3), 0x47464544);
fail_unless_equals_int_hex (GST_READ_UINT32_LE (cpointer + 4), 0x48474645);
/* On an array of guint8 */
fail_unless_equals_int_hex (GST_READ_UINT32_BE (carray), 0x41424344);
fail_unless_equals_int_hex (GST_READ_UINT32_BE (carray + 1), 0x42434445);
fail_unless_equals_int_hex (GST_READ_UINT32_BE (carray + 2), 0x43444546);
fail_unless_equals_int_hex (GST_READ_UINT32_BE (carray + 3), 0x44454647);
fail_unless_equals_int_hex (GST_READ_UINT32_BE (carray + 4), 0x45464748);
fail_unless_equals_int_hex (GST_READ_UINT32_LE (carray), 0x44434241);
fail_unless_equals_int_hex (GST_READ_UINT32_LE (carray + 1), 0x45444342);
fail_unless_equals_int_hex (GST_READ_UINT32_LE (carray + 2), 0x46454443);
fail_unless_equals_int_hex (GST_READ_UINT32_LE (carray + 3), 0x47464544);
fail_unless_equals_int_hex (GST_READ_UINT32_LE (carray + 4), 0x48474645);
/* On an array of guint32 */
fail_unless_equals_int_hex (GST_READ_UINT32_BE (uarray), 0x41424344);
fail_unless_equals_int_hex (GST_READ_UINT32_BE (uarray + 1), 0x45464748);
fail_unless_equals_int_hex (GST_READ_UINT32_LE (uarray), 0x44434241);
fail_unless_equals_int_hex (GST_READ_UINT32_LE (uarray + 1), 0x48474645);
/* 64bit */
fail_unless_equals_int64_hex (GST_READ_UINT64_BE (cpointer),
0x4142434445464748);
fail_unless_equals_int64_hex (GST_READ_UINT64_LE (cpointer),
0x4847464544434241);
fail_unless_equals_int64_hex (GST_READ_UINT64_BE (carray),
0x4142434445464748);
fail_unless_equals_int64_hex (GST_READ_UINT64_LE (carray),
0x4847464544434241);
fail_unless_equals_int64_hex (GST_READ_UINT64_BE (uarray),
0x4142434445464748);
fail_unless_equals_int64_hex (GST_READ_UINT64_LE (uarray),
0x4847464544434241);
/* make sure the data argument is not duplicated inside the macro
* with possibly unexpected side-effects */
cpointer = carray;
fail_unless_equals_int (GST_READ_UINT8 (cpointer++), 'A');
fail_unless (cpointer == carray + 1);
cpointer = carray;
fail_unless_equals_int_hex (GST_READ_UINT16_BE (cpointer++), 0x4142);
fail_unless (cpointer == carray + 1);
cpointer = carray;
fail_unless_equals_int_hex (GST_READ_UINT32_BE (cpointer++), 0x41424344);
fail_unless (cpointer == carray + 1);
cpointer = carray;
fail_unless_equals_int64_hex (GST_READ_UINT64_BE (cpointer++),
0x4142434445464748);
fail_unless (cpointer == carray + 1);
}
GST_END_TEST;
GST_START_TEST (test_write_macros)
{
guint8 carray[8];
guint8 *cpointer;
/* make sure the data argument is not duplicated inside the macro
* with possibly unexpected side-effects */
memset (carray, 0, sizeof (carray));
cpointer = carray;
GST_WRITE_UINT8 (cpointer++, 'A');
fail_unless_equals_pointer (cpointer, carray + 1);
fail_unless_equals_int (carray[0], 'A');
memset (carray, 0, sizeof (carray));
cpointer = carray;
GST_WRITE_UINT16_BE (cpointer++, 0x4142);
fail_unless_equals_pointer (cpointer, carray + 1);
fail_unless_equals_int (carray[0], 'A');
fail_unless_equals_int (carray[1], 'B');
memset (carray, 0, sizeof (carray));
cpointer = carray;
GST_WRITE_UINT32_BE (cpointer++, 0x41424344);
fail_unless_equals_pointer (cpointer, carray + 1);
fail_unless_equals_int (carray[0], 'A');
fail_unless_equals_int (carray[3], 'D');
memset (carray, 0, sizeof (carray));
cpointer = carray;
GST_WRITE_UINT64_BE (cpointer++, 0x4142434445464748);
fail_unless_equals_pointer (cpointer, carray + 1);
fail_unless_equals_int (carray[0], 'A');
fail_unless_equals_int (carray[7], 'H');
memset (carray, 0, sizeof (carray));
cpointer = carray;
GST_WRITE_UINT16_LE (cpointer++, 0x4142);
fail_unless_equals_pointer (cpointer, carray + 1);
fail_unless_equals_int (carray[0], 'B');
fail_unless_equals_int (carray[1], 'A');
memset (carray, 0, sizeof (carray));
cpointer = carray;
GST_WRITE_UINT32_LE (cpointer++, 0x41424344);
fail_unless_equals_pointer (cpointer, carray + 1);
fail_unless_equals_int (carray[0], 'D');
fail_unless_equals_int (carray[3], 'A');
memset (carray, 0, sizeof (carray));
cpointer = carray;
GST_WRITE_UINT64_LE (cpointer++, 0x4142434445464748);
fail_unless_equals_pointer (cpointer, carray + 1);
fail_unless_equals_int (carray[0], 'H');
fail_unless_equals_int (carray[7], 'A');
}
GST_END_TEST;
static void
count_request_pad (const GValue * item, gpointer user_data)
{
GstPad *pad = GST_PAD (g_value_get_object (item));
guint *count = (guint *) user_data;
if (GST_PAD_TEMPLATE_PRESENCE (GST_PAD_PAD_TEMPLATE (pad)) == GST_PAD_REQUEST)
(*count)++;
}
static guint
request_pads (GstElement * element)
{
GstIterator *iter;
guint pads = 0;
iter = gst_element_iterate_pads (element);
fail_unless (gst_iterator_foreach (iter, count_request_pad, &pads) ==
GST_ITERATOR_DONE);
gst_iterator_free (iter);
return pads;
}
static GstPadLinkReturn
refuse_to_link (GstPad * pad, GstObject * parent, GstPad * peer)
{
return GST_PAD_LINK_REFUSED;
}
typedef struct _GstFakeReqSink GstFakeReqSink;
typedef struct _GstFakeReqSinkClass GstFakeReqSinkClass;
struct _GstFakeReqSink
{
GstElement element;
};
struct _GstFakeReqSinkClass
{
GstElementClass parent_class;
};
G_GNUC_INTERNAL GType gst_fakereqsink_get_type (void);
static GstStaticPadTemplate fakereqsink_sink_template =
GST_STATIC_PAD_TEMPLATE ("sink_%u",
GST_PAD_SINK,
GST_PAD_REQUEST,
GST_STATIC_CAPS_ANY);
G_DEFINE_TYPE (GstFakeReqSink, gst_fakereqsink, GST_TYPE_ELEMENT);
static GstPad *
gst_fakereqsink_request_new_pad (GstElement * element, GstPadTemplate * templ,
const gchar * name, const GstCaps * caps)
{
GstPad *pad;
pad = gst_pad_new_from_static_template (&fakereqsink_sink_template, name);
gst_pad_set_link_function (pad, refuse_to_link);
gst_element_add_pad (GST_ELEMENT_CAST (element), pad);
return pad;
}
static void
gst_fakereqsink_release_pad (GstElement * element, GstPad * pad)
{
gst_pad_set_active (pad, FALSE);
gst_element_remove_pad (element, pad);
}
static void
gst_fakereqsink_class_init (GstFakeReqSinkClass * klass)
{
GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
gst_element_class_set_static_metadata (gstelement_class,
"Fake Request Sink", "Sink", "Fake sink with request pads",
"Sebastian Rasmussen <sebras@hotmail.com>");
gst_element_class_add_static_pad_template (gstelement_class,
&fakereqsink_sink_template);
gstelement_class->request_new_pad = gst_fakereqsink_request_new_pad;
gstelement_class->release_pad = gst_fakereqsink_release_pad;
}
static void
gst_fakereqsink_init (GstFakeReqSink * fakereqsink)
{
}
static void
test_link (const gchar * expectation, const gchar * srcname,
const gchar * srcpad, const gchar * srcstate, const gchar * sinkname,
const gchar * sinkpad, const gchar * sinkstate)
{
GstElement *src, *sink, *othersrc, *othersink;
guint src_pads, sink_pads;
if (g_strcmp0 (srcname, "requestsrc") == 0)
src = gst_element_factory_make ("tee", NULL);
else if (g_strcmp0 (srcname, "requestsink") == 0)
src = gst_element_factory_make ("funnel", NULL);
else if (g_strcmp0 (srcname, "staticsrc") == 0)
src = gst_element_factory_make ("fakesrc", NULL);
else if (g_strcmp0 (srcname, "staticsink") == 0)
src = gst_element_factory_make ("fakesink", NULL);
else
g_assert_not_reached ();
if (g_strcmp0 (sinkname, "requestsink") == 0)
sink = gst_element_factory_make ("funnel", NULL);
else if (g_strcmp0 (sinkname, "requestsrc") == 0)
sink = gst_element_factory_make ("tee", NULL);
else if (g_strcmp0 (sinkname, "staticsink") == 0)
sink = gst_element_factory_make ("fakesink", NULL);
else if (g_strcmp0 (sinkname, "staticsrc") == 0)
sink = gst_element_factory_make ("fakesrc", NULL);
else if (g_strcmp0 (sinkname, "fakerequestsink") == 0)
sink = gst_element_factory_make ("fakereqsink", NULL);
else
g_assert_not_reached ();
othersrc = gst_element_factory_make ("fakesrc", NULL);
othersink = gst_element_factory_make ("fakesink", NULL);
if (g_strcmp0 (srcstate, "linked") == 0)
fail_unless (gst_element_link_pads (src, srcpad, othersink, NULL));
if (g_strcmp0 (sinkstate, "linked") == 0)
fail_unless (gst_element_link_pads (othersrc, NULL, sink, sinkpad));
if (g_strcmp0 (srcstate, "unlinkable") == 0) {
GstPad *pad = gst_element_get_static_pad (src, srcpad ? srcpad : "src");
gst_pad_set_link_function (pad, refuse_to_link);
gst_object_unref (pad);
}
if (g_strcmp0 (sinkstate, "unlinkable") == 0) {
GstPad *pad = gst_element_get_static_pad (sink, sinkpad ? sinkpad : "sink");
gst_pad_set_link_function (pad, refuse_to_link);
gst_object_unref (pad);
}
src_pads = request_pads (src);
sink_pads = request_pads (sink);
if (g_strcmp0 (expectation, "OK") == 0) {
fail_unless (gst_element_link_pads (src, srcpad, sink, sinkpad));
if (g_str_has_prefix (srcname, "request")) {
fail_unless_equals_int (request_pads (src), src_pads + 1);
} else {
fail_unless_equals_int (request_pads (src), src_pads);
}
if (g_str_has_prefix (sinkname, "request")) {
fail_unless_equals_int (request_pads (sink), sink_pads + 1);
} else {
fail_unless_equals_int (request_pads (sink), sink_pads);
}
} else {
fail_if (gst_element_link_pads (src, srcpad, sink, sinkpad));
fail_unless_equals_int (request_pads (src), src_pads);
fail_unless_equals_int (request_pads (sink), sink_pads);
}
gst_object_unref (othersrc);
gst_object_unref (othersink);
gst_object_unref (src);
gst_object_unref (sink);
}
GST_START_TEST (test_element_link)
{
/* Successful cases */
gst_element_register (NULL, "fakereqsink", GST_RANK_NONE,
gst_fakereqsink_get_type ());
test_link ("OK", "staticsrc", "src", "", "staticsink", "sink", "");
test_link ("OK", "staticsrc", "src", "", "requestsink", "sink_0", "");
test_link ("OK", "staticsrc", "src", "", "staticsink", NULL, "");
test_link ("OK", "staticsrc", "src", "", "requestsink", NULL, "");
test_link ("OK", "requestsrc", "src_0", "", "staticsink", "sink", "");
test_link ("OK", "requestsrc", "src_0", "", "requestsink", "sink_0", "");
test_link ("OK", "requestsrc", "src_0", "", "staticsink", NULL, "");
test_link ("OK", "requestsrc", "src_0", "", "requestsink", NULL, "");
test_link ("OK", "staticsrc", NULL, "", "staticsink", "sink", "");
test_link ("OK", "staticsrc", NULL, "", "requestsink", "sink_0", "");
test_link ("OK", "staticsrc", NULL, "", "staticsink", NULL, "");
test_link ("OK", "staticsrc", NULL, "", "requestsink", NULL, "");
test_link ("OK", "requestsrc", NULL, "", "staticsink", "sink", "");
test_link ("OK", "requestsrc", NULL, "", "requestsink", "sink_0", "");
test_link ("OK", "requestsrc", NULL, "", "staticsink", NULL, "");
test_link ("OK", "requestsrc", NULL, "", "requestsink", NULL, "");
/* Failure cases */
test_link ("NOK", "staticsrc", "missing", "", "staticsink", "sink", "");
test_link ("NOK", "staticsink", "sink", "", "staticsink", "sink", "");
test_link ("NOK", "staticsrc", "src", "linked", "staticsink", "sink", "");
test_link ("NOK", "staticsrc", "src", "", "staticsink", "missing", "");
test_link ("NOK", "staticsrc", "src", "", "staticsrc", "src", "");
test_link ("NOK", "staticsrc", "src", "", "staticsink", "sink", "linked");
test_link ("NOK", "staticsrc", "src", "", "staticsink", "sink", "unlinkable");
test_link ("NOK", "staticsrc", NULL, "", "staticsink", "sink", "unlinkable");
test_link ("NOK", "staticsrc", NULL, "", "staticsink", NULL, "unlinkable");
test_link ("NOK", "requestsrc", "missing", "", "staticsink", "sink", "");
test_link ("NOK", "requestsink", "sink_0", "", "staticsink", "sink", "");
test_link ("NOK", "requestsrc", "src_0", "linked", "staticsink", "sink", "");
test_link ("NOK", "requestsrc", "src_0", "", "staticsink", "missing", "");
test_link ("NOK", "requestsrc", "src_0", "", "staticsrc", "src", "");
test_link ("NOK", "requestsrc", "src_0", "", "staticsink", "sink", "linked");
test_link ("NOK", "requestsrc", "src_0", "", "staticsink", "sink",
"unlinkable");
test_link ("NOK", "requestsrc", NULL, "", "staticsink", "sink", "unlinkable");
test_link ("NOK", "requestsrc", NULL, "", "staticsink", NULL, "unlinkable");
test_link ("NOK", "staticsrc", "missing", "", "requestsink", "sink_0", "");
test_link ("NOK", "staticsink", "sink", "", "requestsink", "sink_0", "");
test_link ("NOK", "staticsrc", "src", "linked", "requestsink", "sink_0", "");
test_link ("NOK", "staticsrc", "src", "", "requestsink", "missing", "");
test_link ("NOK", "staticsrc", "src", "", "requestsrc", "src_0", "");
test_link ("NOK", "staticsrc", "src", "", "requestsink", "sink_0", "linked");
test_link ("NOK", "staticsrc", "src", "unlinkable", "requestsink",
"sink_0", "");
test_link ("NOK", "staticsrc", NULL, "unlinkable", "requestsink",
"sink_0", "");
test_link ("NOK", "staticsrc", NULL, "unlinkable", "requestsink", NULL, "");
test_link ("NOK", "requestsrc", "src_0", "", "staticsink", NULL,
"unlinkable");
test_link ("NOK", "requestsrc", NULL, "", "fakerequestsink", NULL, "");
}
GST_END_TEST;
typedef struct _GstTestPadReqSink GstTestPadReqSink;
typedef struct _GstTestPadReqSinkClass GstTestPadReqSinkClass;
struct _GstTestPadReqSink
{
GstElement element;
};
struct _GstTestPadReqSinkClass
{
GstElementClass parent_class;
};
G_GNUC_INTERNAL GType gst_testpadreqsink_get_type (void);
static GstStaticPadTemplate testpadreqsink_video_template =
GST_STATIC_PAD_TEMPLATE ("video_%u",
GST_PAD_SINK,
GST_PAD_REQUEST,
GST_STATIC_CAPS ("video/x-raw"));
static GstStaticPadTemplate testpadreqsink_audio_template =
GST_STATIC_PAD_TEMPLATE ("audio_%u",
GST_PAD_SINK,
GST_PAD_REQUEST,
GST_STATIC_CAPS ("audio/x-raw"));
G_DEFINE_TYPE (GstTestPadReqSink, gst_testpadreqsink, GST_TYPE_ELEMENT);
static GstPad *
gst_testpadreqsink_request_new_pad (GstElement * element,
GstPadTemplate * templ, const gchar * name, const GstCaps * caps)
{
GstPad *pad;
pad = gst_pad_new_from_template (templ, name);
gst_pad_set_active (pad, TRUE);
gst_element_add_pad (GST_ELEMENT_CAST (element), pad);
return pad;
}
static void
gst_testpadreqsink_release_pad (GstElement * element, GstPad * pad)
{
gst_pad_set_active (pad, FALSE);
gst_element_remove_pad (element, pad);
}
static void
gst_testpadreqsink_class_init (GstTestPadReqSinkClass * klass)
{
GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
gst_element_class_set_static_metadata (gstelement_class,
"Test Pad Request Sink", "Sink", "Sink for unit tests with request pads",
"Thiago Santos <thiagoss@osg.samsung.com>");
gst_element_class_add_static_pad_template (gstelement_class,
&testpadreqsink_video_template);
gst_element_class_add_static_pad_template (gstelement_class,
&testpadreqsink_audio_template);
gstelement_class->request_new_pad = gst_testpadreqsink_request_new_pad;
gstelement_class->release_pad = gst_testpadreqsink_release_pad;
}
static void
gst_testpadreqsink_init (GstTestPadReqSink * testpadeqsink)
{
}
static GstCaps *padreqsink_query_caps = NULL;
static gboolean
testpadreqsink_peer_query (GstPad * pad, GstObject * parent, GstQuery * query)
{
gboolean res;
switch (GST_QUERY_TYPE (query)) {
case GST_QUERY_CAPS:
if (padreqsink_query_caps) {
gst_query_set_caps_result (query, padreqsink_query_caps);
res = TRUE;
break;
}
default:
res = gst_pad_query_default (pad, parent, query);
break;
}
return res;
}
static void
check_get_compatible_pad_request (GstElement * element, GstCaps * peer_caps,
GstCaps * filter, gboolean should_get_pad, const gchar * pad_tmpl_name)
{
GstPad *peer, *requested;
GstPadTemplate *tmpl;
gst_caps_replace (&padreqsink_query_caps, peer_caps);
peer = gst_pad_new ("src", GST_PAD_SRC);
gst_pad_set_query_function (peer, testpadreqsink_peer_query);
requested = gst_element_get_compatible_pad (element, peer, filter);
if (should_get_pad) {
fail_unless (requested != NULL);
if (pad_tmpl_name) {
tmpl = gst_pad_get_pad_template (requested);
fail_unless (strcmp (GST_PAD_TEMPLATE_NAME_TEMPLATE (tmpl),
pad_tmpl_name) == 0);
gst_object_unref (tmpl);
}
gst_element_release_request_pad (element, requested);
gst_object_unref (requested);
} else {
fail_unless (requested == NULL);
}
if (peer_caps)
gst_caps_unref (peer_caps);
if (filter)
gst_caps_unref (filter);
gst_object_unref (peer);
}
GST_START_TEST (test_element_get_compatible_pad_request)
{
GstElement *element;
gst_element_register (NULL, "testpadreqsink", GST_RANK_NONE,
gst_testpadreqsink_get_type ());
element = gst_element_factory_make ("testpadreqsink", NULL);
/* Try with a peer pad with any caps and no filter,
* returning any pad is ok */
check_get_compatible_pad_request (element, NULL, NULL, TRUE, NULL);
/* Try with a peer pad with any caps and video as filter */
check_get_compatible_pad_request (element, NULL,
gst_caps_from_string ("video/x-raw"), TRUE, "video_%u");
/* Try with a peer pad with any caps and audio as filter */
check_get_compatible_pad_request (element, NULL,
gst_caps_from_string ("audio/x-raw"), TRUE, "audio_%u");
/* Try with a peer pad with any caps and fake caps as filter */
check_get_compatible_pad_request (element, NULL,
gst_caps_from_string ("foo/bar"), FALSE, NULL);
/* Try with a peer pad with video caps and no caps as filter */
check_get_compatible_pad_request (element,
gst_caps_from_string ("video/x-raw"), NULL, TRUE, "video_%u");
/* Try with a peer pad with audio caps and no caps as filter */
check_get_compatible_pad_request (element,
gst_caps_from_string ("audio/x-raw"), NULL, TRUE, "audio_%u");
/* Try with a peer pad with video caps and foo caps as filter */
check_get_compatible_pad_request (element,
gst_caps_from_string ("video/x-raw"), gst_caps_from_string ("foo/bar"),
FALSE, NULL);
gst_caps_replace (&padreqsink_query_caps, NULL);
gst_object_unref (element);
}
GST_END_TEST;
GST_START_TEST (test_element_link_with_ghost_pads)
{
GstElement *sink_bin, *sink2_bin, *pipeline;
GstElement *src, *tee, *queue, *queue2, *sink, *sink2;
GstMessage *message;
GstBus *bus;
fail_unless (pipeline = gst_pipeline_new (NULL));
fail_unless (sink_bin = gst_bin_new (NULL));
fail_unless (sink2_bin = gst_bin_new (NULL));
fail_unless (src = gst_element_factory_make ("fakesrc", NULL));
fail_unless (tee = gst_element_factory_make ("tee", NULL));
fail_unless (queue = gst_element_factory_make ("queue", NULL));
fail_unless (sink = gst_element_factory_make ("fakesink", NULL));
fail_unless (queue2 = gst_element_factory_make ("queue", NULL));
fail_unless (sink2 = gst_element_factory_make ("fakesink", NULL));
gst_bin_add_many (GST_BIN (pipeline), src, tee, queue, sink, sink2_bin, NULL);
fail_unless (gst_element_link_many (src, tee, queue, sink, NULL));
fail_unless (gst_element_set_state (pipeline,
GST_STATE_PLAYING) == GST_STATE_CHANGE_ASYNC);
/* wait for a buffer to arrive at the sink */
bus = gst_element_get_bus (pipeline);
message = gst_bus_poll (bus, GST_MESSAGE_ASYNC_DONE, -1);
gst_message_unref (message);
gst_object_unref (bus);
gst_bin_add_many (GST_BIN (sink_bin), queue2, sink2, NULL);
fail_unless (gst_element_link (queue2, sink2));
gst_bin_add (GST_BIN (sink2_bin), sink_bin);
/* The two levels of bins with the outer bin in the running state is
* important, when the second ghost pad is created (from this
* gst_element_link()) in the running bin, we need to activate the
* created ghost pad */
fail_unless (gst_element_link (tee, queue2));
fail_unless (gst_element_set_state (pipeline,
GST_STATE_NULL) == GST_STATE_CHANGE_SUCCESS);
gst_object_unref (pipeline);
}
GST_END_TEST;
static const GstClockTime times1[] = {
257116899087539, 120632754291904,
257117935914250, 120633825367344,
257119448289434, 120635306141271,
257120493671524, 120636384357825,
257121550784861, 120637417438878,
257123042669403, 120638895344150,
257124089184865, 120639971729651,
257125545836474, 120641406788243,
257127030618490, 120642885914220,
257128033712770, 120643888843907,
257129081768074, 120644981892002,
257130145383845, 120646016376867,
257131532530200, 120647389850987,
257132578136034, 120648472767247,
257134102475722, 120649953785315,
257135142994788, 120651028858556,
257136585079868, 120652441303624,
257137618260656, 120653491627112,
257139108694546, 120654963978184,
257140644022048, 120656500233068,
257141685671825, 120657578510655,
257142741238288, 120658610889805,
257144243633074, 120660093098060,
257145287962271, 120661172901525,
257146740596716, 120662591572179,
257147757607150, 120663622822179,
257149263992401, 120665135578527,
257150303719290, 120666176166905,
257151355569906, 120667217304601,
257152430578406, 120668326099768,
257153490501095, 120669360554111,
257154512360784, 120670365497960,
257155530610577, 120671399006259,
257156562091659, 120672432728185,
257157945388742, 120673800312414,
257159287547073, 120675142444983,
257160324912880, 120676215076817,
257345408328042, 120861261738196,
257346412270919, 120862265613926,
257347420532284, 120863278644933,
257348431187638, 120864284412754,
257349439018028, 120865293110265,
257351796217938, 120867651111973,
257352803038092, 120868659107578,
257354152688899, 120870008594883,
257355157088906, 120871011097327,
257356162439182, 120872016346348,
257357167872040, 120873021656407,
257358182440058, 120874048633945,
257359198881356, 120875052265538,
257100756525466, 120616619282139,
257101789337770, 120617655475988,
257102816323472, 120618674000157,
257103822485250, 120619679005039,
257104840760423, 120620710743321,
257105859459496, 120621715351476,
257106886662470, 120622764942539,
257108387497864, 120624244221106,
257109428859191, 120625321461096,
257110485892785, 120626356892003,
257111869872141, 120627726459874,
257112915903774, 120628813190830,
257114329982208, 120630187061682,
257115376666026, 120631271992101
};
static const GstClockTime times2[] = {
291678579009762, 162107345029507,
291679770464405, 162108597684538,
291680972924370, 162109745816863,
291682278949629, 162111000577605,
291683590706117, 162112357724822,
291684792322541, 162113613156950,
291685931362506, 162114760556854,
291687132156589, 162115909238493,
291688265012060, 162117120603240,
291689372183047, 162118126279508,
291705506022294, 162134329373992,
291667914301004, 162096795553658,
291668119537668, 162096949051905,
291668274671455, 162097049238371,
291668429435600, 162097256356719,
291668586128535, 162097355689763,
291668741306233, 162097565678460,
291668893789203, 162097661044916,
291669100256555, 162097865694145,
291669216417563, 162098069214693,
291669836394620, 162098677275530,
291669990447821, 162098792601263,
291670149426086, 162098916899184,
291670300232152, 162099114225621,
291670411261917, 162099236784112,
291670598483507, 162099402158751,
291671716582687, 162100558744122,
291672600759788, 162101499326359,
291673919988307, 162102751981384,
291675174441643, 162104005551939,
291676271562197, 162105105252898,
291677376345374, 162106195737516
};
static const GstClockTime times3[] = {
291881924291688, 162223997578228,
291883318122262, 162224167198360,
291884786394838, 162224335172501,
291886004374386, 162224503695531,
291887224353285, 162224673560021,
291888472403367, 162224843760361,
291889727977561, 162225014479362,
291890989982306, 162225174554558,
291892247875763, 162225339753039,
291893502163547, 162225673230987,
291894711382216, 162225829494101,
291895961021506, 162225964530832,
291897251690854, 162226127287981,
291898508630785, 162226303710406,
291899740172868, 162226472478047,
291900998878873, 162226637402085,
291902334919875, 162226797873245,
291903572196610, 162226964352963,
291904727342699, 162227125312525,
291906071189108, 162228361337153,
291907308146005, 162229560625638,
291908351925126, 162230604986650,
291909396411423, 162231653690543,
291910453965348, 162232698550995,
291912096870744, 162233475264947,
291913234148395, 162233606516855,
291915448096576, 162233921145559,
291916707748827, 162234047154298,
291918737451070, 162234370837425,
291919896016205, 162234705504337,
291921098663980, 162234872320397,
291922315691409, 162235031023366
};
static const GstClockTime times4[] = {
10, 0,
20, 20,
30, 40,
40, 60,
50, 80,
60, 100
};
struct test_entry
{
gint n;
const GstClockTime *v;
GstClockTime expect_internal;
GstClockTime expect_external;
guint64 expect_num;
guint64 expect_denom;
} times[] = {
{
32, times1, 257154512360784, 120670380469753, 4052622913376634109,
4052799313904261962}, {
64, times1, 257359198881356, 120875054227405, 2011895759027682422,
2012014931360215503}, {
32, times2, 291705506022294, 162134297192792, 2319535707505209857,
2321009753483354451}, {
32, times3, 291922315691409, 162234934150296, 1370930728180888261,
4392719527011673456}, {
6, times4, 60, 100, 2, 1}
};
GST_START_TEST (test_regression)
{
GstClockTime m_num, m_den, internal, external;
gdouble r_squared, rate, expect_rate;
gint i;
for (i = 0; i < G_N_ELEMENTS (times); i++) {
fail_unless (gst_calculate_linear_regression (times[i].v, NULL, times[i].n,
&m_num, &m_den, &external, &internal, &r_squared));
GST_LOG ("xbase %" G_GUINT64_FORMAT " ybase %" G_GUINT64_FORMAT " rate = %"
G_GUINT64_FORMAT " / %" G_GUINT64_FORMAT " = %.10f r_squared %f\n",
internal, external, m_num, m_den, (gdouble) (m_num) / (m_den),
r_squared);
/* Require high correlation */
fail_unless (r_squared >= 0.9);
fail_unless (internal == times[i].expect_internal,
"Regression params %d fail. internal %" G_GUINT64_FORMAT
" != expected %" G_GUINT64_FORMAT, i, internal,
times[i].expect_internal);
/* Rate must be within 1% tolerance */
expect_rate = ((gdouble) (times[i].expect_num) / times[i].expect_denom);
rate = ((gdouble) (m_num) / m_den);
fail_unless ((expect_rate - rate) >= -0.1 && (expect_rate - rate) <= 0.1,
"Regression params %d fail. Rate out of range. Expected %f, got %f",
i, expect_rate, rate);
fail_unless (external >= times[i].expect_external * 0.99 &&
external <= times[i].expect_external * 1.01,
"Regression params %d fail. external %" G_GUINT64_FORMAT
" != expected %" G_GUINT64_FORMAT, i, external,
times[i].expect_external);
}
}
GST_END_TEST;
static Suite *
gst_utils_suite (void)
{
Suite *s = suite_create ("GstUtils");
TCase *tc_chain = tcase_create ("general");
suite_add_tcase (s, tc_chain);
tcase_add_test (tc_chain, test_buffer_probe_n_times);
tcase_add_test (tc_chain, test_buffer_probe_once);
tcase_add_test (tc_chain, test_math_scale);
tcase_add_test (tc_chain, test_math_scale_round);
tcase_add_test (tc_chain, test_math_scale_ceil);
tcase_add_test (tc_chain, test_math_scale_uint64);
tcase_add_test (tc_chain, test_math_scale_random);
#ifdef HAVE_GSL
#ifdef HAVE_GMP
tcase_add_test (tc_chain, test_math_scale_gmp);
tcase_add_test (tc_chain, test_math_scale_gmp_int);
#endif
#endif
tcase_add_test (tc_chain, test_guint64_to_gdouble);
tcase_add_test (tc_chain, test_gdouble_to_guint64);
#ifndef GST_DISABLE_PARSE
tcase_add_test (tc_chain, test_parse_bin_from_description);
#endif
tcase_add_test (tc_chain, test_element_found_tags);
tcase_add_test (tc_chain, test_element_link);
tcase_add_test (tc_chain, test_element_link_with_ghost_pads);
tcase_add_test (tc_chain, test_element_unlink);
tcase_add_test (tc_chain, test_element_get_compatible_pad_request);
tcase_add_test (tc_chain, test_set_value_from_string);
tcase_add_test (tc_chain, test_binary_search);
tcase_add_test (tc_chain, test_pad_proxy_query_caps_aggregation);
tcase_add_test (tc_chain, test_greatest_common_divisor);
tcase_add_test (tc_chain, test_read_macros);
tcase_add_test (tc_chain, test_write_macros);
tcase_add_test (tc_chain, test_regression);
return s;
}
GST_CHECK_MAIN (gst_utils);
|
{"hexsha": "18ba199b98dc6926def327925da14cbc18b194ab", "size": 67607, "ext": "c", "lang": "C", "max_stars_repo_path": "third_party/gstreamer/tests/check/gst/gstutils.c", "max_stars_repo_name": "isabella232/aistreams", "max_stars_repo_head_hexsha": "209f4385425405676a581a749bb915e257dbc1c1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6.0, "max_stars_repo_stars_event_min_datetime": "2020-09-22T18:07:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-21T01:34:04.000Z", "max_issues_repo_path": "third_party/gstreamer/tests/check/gst/gstutils.c", "max_issues_repo_name": "isabella232/aistreams", "max_issues_repo_head_hexsha": "209f4385425405676a581a749bb915e257dbc1c1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2020-11-10T13:17:39.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T11:22:14.000Z", "max_forks_repo_path": "third_party/gstreamer/tests/check/gst/gstutils.c", "max_forks_repo_name": "isabella232/aistreams", "max_forks_repo_head_hexsha": "209f4385425405676a581a749bb915e257dbc1c1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2020-09-26T08:40:35.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-21T01:33:56.000Z", "avg_line_length": 33.7866066967, "max_line_length": 123, "alphanum_fraction": 0.7175292499, "num_tokens": 21204}
|
#!/usr/bin/python
# coding:utf-8
'''
Created on 2017-05-18
Update on 2017-05-18
Author: Peter Harrington/1988/片刻
GitHub: https://github.com/apachecn/MachineLearning
'''
from numpy import random, mat, eye
'''
# NumPy 矩阵和数组的区别
NumPy存在2中不同的数据类型:
1. 矩阵 matrix
2. 数组 array
相似点:
都可以处理行列表示的数字元素
不同点:
1. 2个数据类型上执行相同的数据运算可能得到不同的结果。
2. NumPy函数库中的 matrix 与 MATLAB中 matrices 等价。
'''
# 生成一个 4*4 的随机数组
randArray = random.rand(4, 4)
# 转化关系, 数组转化为矩阵
randMat = mat(randArray)
'''
.I 表示对矩阵求逆(可以利用矩阵的初等变换)
意义:逆矩阵是一个判断相似性的工具。逆矩阵A与列向量p相乘后,将得到列向量q,q的第i个分量表示p与A的第i个列向量的相似度。
参考案例链接:
https://www.zhihu.com/question/33258489
http://blog.csdn.net/vernice/article/details/48506027
.T 表示对矩阵转置(行列颠倒)
* 等同于: .transpose()
.A 返回矩阵基于的数组
参考案例链接:
http://blog.csdn.net/qq403977698/article/details/47254539
'''
invRandMat = randMat.I
TraRandMat = randMat.T
ArrRandMat = randMat.A
# 输出结果
print 'randArray=(%s) \n' % type(randArray), randArray
print 'randMat=(%s) \n' % type(randMat), randMat
print 'invRandMat=(%s) \n' % type(invRandMat), invRandMat
print 'TraRandMat=(%s) \n' % type(TraRandMat), TraRandMat
print 'ArrRandMat=(%s) \n' % type(ArrRandMat), ArrRandMat
# 矩阵和逆矩阵 进行求积 (单位矩阵,对角线都为1嘛,理论上4*4的矩阵其他的都为0)
myEye = randMat*invRandMat
# 误差
print myEye - eye(4)
'''
如果上面的代码运行没有问题,说明numpy安装没有问题
'''
|
{"hexsha": "c45ad8fcecfaac94d1a17e9f44eb8ed62915e717", "size": 1312, "ext": "py", "lang": "Python", "max_stars_repo_path": "ML-in-Action/MachineLearning-dev/src/py2.x/ML/1.MLFoundation/NumPy.py", "max_stars_repo_name": "cherisyu/ML_in_Action", "max_stars_repo_head_hexsha": "8c1019de911e7fb1bbab973067213f5f62ab9dcd", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-01-23T01:47:31.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-23T01:47:31.000Z", "max_issues_repo_path": "ML-in-Action/MachineLearning-dev/src/py2.x/ML/1.MLFoundation/NumPy.py", "max_issues_repo_name": "cherisyu/ML_in_Action", "max_issues_repo_head_hexsha": "8c1019de911e7fb1bbab973067213f5f62ab9dcd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ML-in-Action/MachineLearning-dev/src/py2.x/ML/1.MLFoundation/NumPy.py", "max_forks_repo_name": "cherisyu/ML_in_Action", "max_forks_repo_head_hexsha": "8c1019de911e7fb1bbab973067213f5f62ab9dcd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.2372881356, "max_line_length": 66, "alphanum_fraction": 0.7149390244, "include": true, "reason": "from numpy", "num_tokens": 683}
|
import os, subprocess, glob
import json
import numpy as np
week_id = "week2/kasina/"
outdir = f"./submissions/week_2/kasina/"
files = glob.glob(os.path.join(week_id+'/','**',"*.ipynb"))
for f in files:
subprocess.call(["jupyter-nbconvert",f"--output-dir={outdir}","--to","HTML",f])
|
{"hexsha": "2c4f527d876aff312a5ce5936d727800572b3223", "size": 287, "ext": "py", "lang": "Python", "max_stars_repo_path": "temp_grading.py", "max_stars_repo_name": "naveenmoto/lablet102", "max_stars_repo_head_hexsha": "24de9daa4ae75cbde93567a3239ede43c735cf03", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-09T16:48:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-09T16:48:44.000Z", "max_issues_repo_path": "temp_grading.py", "max_issues_repo_name": "naveenmoto/lablet102", "max_issues_repo_head_hexsha": "24de9daa4ae75cbde93567a3239ede43c735cf03", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "temp_grading.py", "max_forks_repo_name": "naveenmoto/lablet102", "max_forks_repo_head_hexsha": "24de9daa4ae75cbde93567a3239ede43c735cf03", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7, "max_line_length": 83, "alphanum_fraction": 0.6759581882, "include": true, "reason": "import numpy", "num_tokens": 88}
|
module EMLcompressor
using DanaTypes
using DotPlusInheritance
using Reexport
@reexport using ...streams.EMLstreams
import EMLtypes.length
include("compressor/centrifugal_compressor.jl")
end
|
{"hexsha": "216beaecd20d5d3f1c7613b431273c5ec6002a72", "size": 195, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "JuliaEMSOModels/pressure_changers/compressor.jl", "max_stars_repo_name": "DANA-Laboratory/EMSOModelLibrary.jl", "max_stars_repo_head_hexsha": "e28904cc1bdf8f67c6839ad35b4658dd399c0e47", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-08-18T02:32:44.000Z", "max_stars_repo_stars_event_max_datetime": "2017-08-18T02:32:44.000Z", "max_issues_repo_path": "JuliaEMSOModels/pressure_changers/compressor.jl", "max_issues_repo_name": "DANA-Laboratory/EMSOModelLibrary.jl", "max_issues_repo_head_hexsha": "e28904cc1bdf8f67c6839ad35b4658dd399c0e47", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2015-01-21T16:35:07.000Z", "max_issues_repo_issues_event_max_datetime": "2015-01-21T16:35:07.000Z", "max_forks_repo_path": "JuliaEMSOModels/pressure_changers/compressor.jl", "max_forks_repo_name": "DANA-Laboratory/EMSOModelLibrary.jl", "max_forks_repo_head_hexsha": "e28904cc1bdf8f67c6839ad35b4658dd399c0e47", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.375, "max_line_length": 48, "alphanum_fraction": 0.8358974359, "num_tokens": 51}
|
"""
Parallel Tests for the SimpleComm class
Copyright 2017, University Corporation for Atmospheric Research
See the LICENSE.txt file for details
"""
from __future__ import print_function, unicode_literals
import unittest
import numpy as np
from mpi4py import MPI
from asaptools import simplecomm
from asaptools.partition import Duplicate, EqualStride
MPI_COMM_WORLD = MPI.COMM_WORLD
class SimpleCommParTests(unittest.TestCase):
def setUp(self):
self.gcomm = simplecomm.create_comm()
self.size = MPI_COMM_WORLD.Get_size()
self.rank = MPI_COMM_WORLD.Get_rank()
def tearDown(self):
pass
def testGetSize(self):
actual = self.gcomm.get_size()
expected = self.size
self.assertEqual(actual, expected)
def testIsManager(self):
actual = self.gcomm.is_manager()
expected = self.rank == 0
self.assertEqual(actual, expected)
def testSumInt(self):
data = 5
actual = self.gcomm.allreduce(data, 'sum')
expected = self.size * 5
self.assertEqual(actual, expected)
def testSumList(self):
data = range(5)
actual = self.gcomm.allreduce(data, 'sum')
expected = self.size * sum(data)
self.assertEqual(actual, expected)
def testSumArray(self):
data = np.arange(5)
actual = self.gcomm.allreduce(data, 'sum')
expected = self.size * sum(data)
self.assertEqual(actual, expected)
def testSumDict(self):
data = {'a': range(3), 'b': [5, 7]}
actual = self.gcomm.allreduce(data, 'sum')
expected = {'a': self.size * sum(range(3)), 'b': self.size * sum([5, 7])}
self.assertEqual(actual, expected)
def testMaxInt(self):
data = self.rank
actual = self.gcomm.allreduce(data, 'max')
expected = self.size - 1
self.assertEqual(actual, expected)
def testMaxList(self):
data = range(2 + self.rank)
actual = self.gcomm.allreduce(data, 'max')
expected = self.size
self.assertEqual(actual, expected)
def testMaxArray(self):
data = np.arange(2 + self.rank)
actual = self.gcomm.allreduce(data, 'max')
expected = self.size
self.assertEqual(actual, expected)
def testMaxDict(self):
data = {'rank': self.rank, 'range': range(2 + self.rank)}
actual = self.gcomm.allreduce(data, 'max')
expected = {'rank': self.size - 1, 'range': self.size}
self.assertEqual(actual, expected)
def testPartitionInt(self):
if self.gcomm.is_manager():
data = 10
else:
data = None
actual = self.gcomm.partition(data, func=Duplicate())
if self.gcomm.is_manager():
expected = None
else:
expected = 10
self.assertEqual(actual, expected)
def testPartitionIntInvolved(self):
if self.gcomm.is_manager():
data = 10
else:
data = None
actual = self.gcomm.partition(data, func=Duplicate(), involved=True)
expected = 10
self.assertEqual(actual, expected)
def testPartitionList(self):
if self.gcomm.is_manager():
data = range(10)
else:
data = None
actual = self.gcomm.partition(data)
if self.gcomm.is_manager():
expected = None
else:
expected = range(self.rank - 1, 10, self.size - 1)
self.assertEqual(actual, expected)
def testPartitionListInvolved(self):
if self.gcomm.is_manager():
data = range(10)
else:
data = None
actual = self.gcomm.partition(data, involved=True)
expected = range(self.rank, 10, self.size)
self.assertEqual(actual, expected)
def testPartitionArray(self):
if self.gcomm.is_manager():
data = np.arange(10)
else:
data = None
actual = self.gcomm.partition(data, func=EqualStride())
if self.gcomm.is_manager():
expected = None
else:
expected = np.arange(self.rank - 1, 10, self.size - 1)
if self.gcomm.is_manager():
self.assertEqual(actual, expected)
else:
np.testing.assert_array_equal(actual, expected)
def testPartitionStrArray(self):
indata = list('abcdefghi')
if self.gcomm.is_manager():
data = np.array(indata)
else:
data = None
actual = self.gcomm.partition(data, func=EqualStride())
if self.gcomm.is_manager():
expected = None
else:
expected = np.array(indata[self.rank - 1 :: self.size - 1])
if self.gcomm.is_manager():
self.assertEqual(actual, expected)
else:
np.testing.assert_array_equal(actual, expected)
def testPartitionCharArray(self):
indata = list('abcdefghi')
if self.gcomm.is_manager():
data = np.array(indata, dtype='c')
else:
data = None
actual = self.gcomm.partition(data, func=EqualStride())
if self.gcomm.is_manager():
expected = None
else:
expected = np.array(indata[self.rank - 1 :: self.size - 1], dtype='c')
if self.gcomm.is_manager():
self.assertEqual(actual, expected)
else:
np.testing.assert_array_equal(actual, expected)
def testPartitionArrayInvolved(self):
if self.gcomm.is_manager():
data = np.arange(10)
else:
data = None
actual = self.gcomm.partition(data, func=EqualStride(), involved=True)
expected = np.arange(self.rank, 10, self.size)
np.testing.assert_array_equal(actual, expected)
def testCollectInt(self):
if self.gcomm.is_manager():
data = None
actual = [self.gcomm.collect() for _ in range(1, self.size)]
expected = [i for i in enumerate(range(1, self.size), 1)]
else:
data = self.rank
actual = self.gcomm.collect(data)
expected = None
self.gcomm.sync()
if self.gcomm.is_manager():
for a in actual:
self.assertTrue(a in expected)
else:
self.assertEqual(actual, expected)
def testCollectList(self):
if self.gcomm.is_manager():
data = None
actual = [self.gcomm.collect() for _ in range(1, self.size)]
expected = [(i, range(i)) for i in range(1, self.size)]
else:
data = range(self.rank)
actual = self.gcomm.collect(data)
expected = None
self.gcomm.sync()
if self.gcomm.is_manager():
for a in actual:
self.assertTrue(a in expected)
else:
self.assertEqual(actual, expected)
def testCollectArray(self):
if self.gcomm.is_manager():
data = None
actual = [
(i, list(x)) for (i, x) in [self.gcomm.collect() for _ in range(1, self.size)]
]
expected = [(i, list(np.arange(self.size) + i)) for i in range(1, self.size)]
else:
data = np.arange(self.size) + self.rank
actual = self.gcomm.collect(data)
expected = None
self.gcomm.sync()
if self.gcomm.is_manager():
for a in actual:
self.assertTrue(a in expected)
else:
self.assertEqual(actual, expected)
def testCollectStrArray(self):
if self.gcomm.is_manager():
data = None
actual = [
(i, list(x)) for (i, x) in [self.gcomm.collect() for _ in range(1, self.size)]
]
expected = [
(i, list(map(str, list(np.arange(self.size) + i)))) for i in range(1, self.size)
]
else:
data = np.array([str(i + self.rank) for i in range(self.size)])
actual = self.gcomm.collect(data)
expected = None
self.gcomm.sync()
if self.gcomm.is_manager():
for a in actual:
self.assertTrue(a in expected)
else:
self.assertEqual(actual, expected)
def testCollectCharArray(self):
if self.gcomm.is_manager():
data = None
actual = [
(i, list(x)) for (i, x) in [self.gcomm.collect() for _ in range(1, self.size)]
]
expected = [
(
i,
list(map(lambda c: str(c).encode(), list(np.arange(self.size) + i))),
)
for i in range(1, self.size)
]
else:
data = np.array([str(i + self.rank) for i in range(self.size)], dtype='c')
actual = self.gcomm.collect(data)
expected = None
self.gcomm.sync()
if self.gcomm.is_manager():
for a in actual:
self.assertTrue(a in expected)
else:
self.assertEqual(actual, expected)
def testRationInt(self):
if self.gcomm.is_manager():
data = range(1, self.size)
actual = [self.gcomm.ration(d) for d in data]
expected = [None] * (self.size - 1)
else:
data = None
actual = self.gcomm.ration()
expected = range(1, self.size)
self.gcomm.sync()
if self.gcomm.is_manager():
self.assertEqual(actual, expected)
else:
self.assertTrue(actual in expected)
def testRationArray(self):
if self.gcomm.is_manager():
data = np.arange(3 * (self.size - 1))
actual = [self.gcomm.ration(data[3 * i : 3 * (i + 1)]) for i in range(0, self.size - 1)]
expected = [None] * (self.size - 1)
else:
data = None
actual = self.gcomm.ration()
expected = np.arange(3 * (self.size - 1))
self.gcomm.sync()
if self.gcomm.is_manager():
self.assertEqual(actual, expected)
else:
contained = any(
[
np.all(actual == expected[i : i + actual.size])
for i in range(expected.size - actual.size + 1)
]
)
self.assertTrue(contained)
def testRationStrArray(self):
if self.gcomm.is_manager():
data = np.array(list(map(str, range(3 * (self.size - 1)))), dtype='c')
actual = [
self.gcomm.ration(data[3 * i : 3 * (i + 1)]) for i in range(0, (self.size - 1))
]
expected = [None] * (self.size - 1)
else:
data = None
actual = self.gcomm.ration()
expected = np.array(list(map(str, range(3 * (self.size - 1)))), dtype='c')
self.gcomm.sync()
if self.gcomm.is_manager():
self.assertEqual(actual, expected)
else:
contained = any(
[
np.all(actual == expected[i : i + actual.size])
for i in range(expected.size - actual.size + 1)
]
)
self.assertTrue(contained)
def testRationCharArray(self):
if self.gcomm.is_manager():
data = np.array(list(map(str, range(3 * (self.size - 1)))), dtype='c')
actual = [
self.gcomm.ration(data[3 * i : 3 * (i + 1)]) for i in range(0, (self.size - 1))
]
expected = [None] * (self.size - 1)
else:
data = None
actual = self.gcomm.ration()
expected = np.array(list(map(str, range(3 * (self.size - 1)))), dtype='c')
self.gcomm.sync()
if self.gcomm.is_manager():
self.assertEqual(actual, expected)
else:
contained = any(
[
np.all(actual == expected[i : i + actual.size])
for i in range(expected.size - actual.size + 1)
]
)
self.assertTrue(contained)
if __name__ == '__main__':
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
mystream = StringIO()
tests = unittest.TestLoader().loadTestsFromTestCase(SimpleCommParTests)
unittest.TextTestRunner(stream=mystream).run(tests)
MPI_COMM_WORLD.Barrier()
results = MPI_COMM_WORLD.gather(mystream.getvalue())
if MPI_COMM_WORLD.Get_rank() == 0:
for rank, result in enumerate(results):
print('RESULTS FOR RANK ' + str(rank) + ':')
print(str(result))
|
{"hexsha": "c1585ff2eee12bb5f8a265d46344c9f88790242d", "size": 12701, "ext": "py", "lang": "Python", "max_stars_repo_path": "asaptools/tests/simpleCommParTests.py", "max_stars_repo_name": "NCAR/ASAPPyTools", "max_stars_repo_head_hexsha": "6501dce2d253a3305ce300d67f49b5dc930a7c08", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2016-01-10T14:47:06.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-02T08:24:17.000Z", "max_issues_repo_path": "asaptools/tests/simpleCommParTests.py", "max_issues_repo_name": "kmpaul/ASAPPyTools", "max_issues_repo_head_hexsha": "1117fd99a2f47905276b287bcede84a8421bc5cd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2016-12-29T14:46:21.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-22T15:06:49.000Z", "max_forks_repo_path": "asaptools/tests/simpleCommParTests.py", "max_forks_repo_name": "kmpaul/ASAPPyTools", "max_forks_repo_head_hexsha": "1117fd99a2f47905276b287bcede84a8421bc5cd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2017-05-18T05:10:29.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-15T17:19:56.000Z", "avg_line_length": 33.5118733509, "max_line_length": 100, "alphanum_fraction": 0.5426344382, "include": true, "reason": "import numpy", "num_tokens": 2848}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.