text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test Qiskit's Operation class."""
import unittest
import numpy as np
from qiskit.test import QiskitTestCase
from qiskit.circuit import QuantumCircuit, Barrier, Measure, Reset, Gate
from qiskit.circuit.library import XGate, CXGate
from qiskit.quantum_info.operators import Clifford, CNOTDihedral, Pauli
from qiskit.extensions.quantum_initializer import Initialize, Isometry
class TestOperationClass(QiskitTestCase):
"""Testing qiskit.circuit.Operation"""
def test_measure_as_operation(self):
"""Test that we can instantiate an object of class
:class:`~qiskit.circuit.Measure` and that
it has the expected name, num_qubits and num_clbits.
"""
op = Measure()
self.assertTrue(op.name == "measure")
self.assertTrue(op.num_qubits == 1)
self.assertTrue(op.num_clbits == 1)
def test_reset_as_operation(self):
"""Test that we can instantiate an object of class
:class:`~qiskit.circuit.Reset` and that
it has the expected name, num_qubits and num_clbits.
"""
op = Reset()
self.assertTrue(op.name == "reset")
self.assertTrue(op.num_qubits == 1)
self.assertTrue(op.num_clbits == 0)
def test_barrier_as_operation(self):
"""Test that we can instantiate an object of class
:class:`~qiskit.circuit.Barrier` and that
it has the expected name, num_qubits and num_clbits.
"""
num_qubits = 4
op = Barrier(num_qubits)
self.assertTrue(op.name == "barrier")
self.assertTrue(op.num_qubits == num_qubits)
self.assertTrue(op.num_clbits == 0)
def test_clifford_as_operation(self):
"""Test that we can instantiate an object of class
:class:`~qiskit.quantum_info.operators.Clifford` and that
it has the expected name, num_qubits and num_clbits.
"""
num_qubits = 4
qc = QuantumCircuit(4, 0)
qc.h(2)
qc.cx(0, 1)
op = Clifford(qc)
self.assertTrue(op.name == "clifford")
self.assertTrue(op.num_qubits == num_qubits)
self.assertTrue(op.num_clbits == 0)
def test_cnotdihedral_as_operation(self):
"""Test that we can instantiate an object of class
:class:`~qiskit.quantum_info.operators.CNOTDihedral` and that
it has the expected name, num_qubits and num_clbits.
"""
num_qubits = 4
qc = QuantumCircuit(4)
qc.t(0)
qc.x(0)
qc.t(0)
op = CNOTDihedral(qc)
self.assertTrue(op.name == "cnotdihedral")
self.assertTrue(op.num_qubits == num_qubits)
self.assertTrue(op.num_clbits == 0)
def test_pauli_as_operation(self):
"""Test that we can instantiate an object of class
:class:`~qiskit.quantum_info.operators.Pauli` and that
it has the expected name, num_qubits and num_clbits.
"""
num_qubits = 4
op = Pauli("I" * num_qubits)
self.assertTrue(op.name == "pauli")
self.assertTrue(op.num_qubits == num_qubits)
self.assertTrue(op.num_clbits == 0)
def test_isometry_as_operation(self):
"""Test that we can instantiate an object of class
:class:`~qiskit.extensions.quantum_initializer.Isometry` and that
it has the expected name, num_qubits and num_clbits.
"""
op = Isometry(np.eye(4, 4), 3, 2)
self.assertTrue(op.name == "isometry")
self.assertTrue(op.num_qubits == 7)
self.assertTrue(op.num_clbits == 0)
def test_initialize_as_operation(self):
"""Test that we can instantiate an object of class
:class:`~qiskit.extensions.quantum_initializer.Initialize` and that
it has the expected name, num_qubits and num_clbits.
"""
desired_vector = [0.5, 0.5, 0.5, 0.5]
op = Initialize(desired_vector)
self.assertTrue(op.name == "initialize")
self.assertTrue(op.num_qubits == 2)
self.assertTrue(op.num_clbits == 0)
def test_gate_as_operation(self):
"""Test that we can instantiate an object of class
:class:`~qiskit.circuit.Gate` and that
it has the expected name, num_qubits and num_clbits.
"""
name = "test_gate_name"
num_qubits = 3
op = Gate(name, num_qubits, [])
self.assertTrue(op.name == name)
self.assertTrue(op.num_qubits == num_qubits)
self.assertTrue(op.num_clbits == 0)
def test_xgate_as_operation(self):
"""Test that we can instantiate an object of class
:class:`~qiskit.circuit.library.XGate` and that
it has the expected name, num_qubits and num_clbits.
"""
op = XGate()
self.assertTrue(op.name == "x")
self.assertTrue(op.num_qubits == 1)
self.assertTrue(op.num_clbits == 0)
def test_cxgate_as_operation(self):
"""Test that we can instantiate an object of class
:class:`~qiskit.circuit.library.CXGate` and that
it has the expected name, num_qubits and num_clbits.
"""
op = CXGate()
self.assertTrue(op.name == "cx")
self.assertTrue(op.num_qubits == 2)
self.assertTrue(op.num_clbits == 0)
def test_can_append_to_quantum_circuit(self):
"""Test that we can add various objects with Operation interface to a Quantum Circuit."""
qc = QuantumCircuit(6, 1)
qc.append(XGate(), [2])
qc.append(Barrier(3), [1, 2, 4])
qc.append(CXGate(), [0, 1])
qc.append(Measure(), [1], [0])
qc.append(Reset(), [0])
qc.cx(3, 4)
qc.append(Gate("some_gate", 3, []), [1, 2, 3])
qc.append(Initialize([0.5, 0.5, 0.5, 0.5]), [4, 5])
qc.append(Isometry(np.eye(4, 4), 0, 0), [3, 4])
qc.append(Pauli("II"), [0, 1])
# Appending Clifford
circ1 = QuantumCircuit(2)
circ1.h(1)
circ1.cx(0, 1)
qc.append(Clifford(circ1), [0, 1])
# Appending CNOTDihedral
circ2 = QuantumCircuit(2)
circ2.t(0)
circ2.x(0)
circ2.t(1)
qc.append(CNOTDihedral(circ2), [2, 3])
# If we got to here, we have successfully appended everything to qc
self.assertIsInstance(qc, QuantumCircuit)
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "35f417a8aefd2c7b18ef3e560d612b567c647f0d", "size": 6797, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/python/circuit/test_operation.py", "max_stars_repo_name": "Roshan-Thomas/qiskit-terra", "max_stars_repo_head_hexsha": "77219b5c7b7146b1545c5e5190739b36f4064b2f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1456, "max_stars_repo_stars_event_min_datetime": "2017-08-05T16:33:05.000Z", "max_stars_repo_stars_event_max_datetime": "2018-06-05T04:15:35.000Z", "max_issues_repo_path": "test/python/circuit/test_operation.py", "max_issues_repo_name": "Roshan-Thomas/qiskit-terra", "max_issues_repo_head_hexsha": "77219b5c7b7146b1545c5e5190739b36f4064b2f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 365, "max_issues_repo_issues_event_min_datetime": "2017-08-04T06:09:16.000Z", "max_issues_repo_issues_event_max_datetime": "2018-06-05T08:33:37.000Z", "max_forks_repo_path": "test/python/circuit/test_operation.py", "max_forks_repo_name": "Roshan-Thomas/qiskit-terra", "max_forks_repo_head_hexsha": "77219b5c7b7146b1545c5e5190739b36f4064b2f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 463, "max_forks_repo_forks_event_min_datetime": "2017-08-05T04:10:01.000Z", "max_forks_repo_forks_event_max_datetime": "2018-06-05T06:43:21.000Z", "avg_line_length": 36.5430107527, "max_line_length": 97, "alphanum_fraction": 0.6308665588, "include": true, "reason": "import numpy", "num_tokens": 1766}
|
# --------------
import pandas as pd
import scipy.stats as stats
import math
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#Sample_Size
sample_size=2000
#Z_Critical Score
z_critical = stats.norm.ppf(q = 0.95)
# path [File location variable]
data = pd.read_csv(path)
#Code starts here
#Create a sample of 'data' using "sample()" with n=sample_size and random_state=0 and save it in a variable called 'data_sample'
data_sample = data.sample(n=sample_size, random_state = 0)
#Store the mean of installment column of 'sample_data' in a variable called 'sample_mean'
sample_mean = data_sample['installment'].mean()
#Store the standard deviation of installment column of 'sample_data' in a variable called 'sample_std'
sample_std = data_sample['installment'].std()
#Find the margin of error using 'z_critical'(given),'sample_std' and 'sample_size' and save it in a variable called 'margin_of_error'
margin_of_error = z_critical * (sample_std/math.sqrt(sample_size))
#Find the confindence interval using 'sample_mean' and 'margin_of_error' and save it in a variable called 'confidence_interval'.
upper = sample_mean + margin_of_error
lower = sample_mean - margin_of_error
confidence_interval = (lower, upper)
#Store the mean of installment column of 'data' in a variable called 'true_mean'
true_mean = data['installment'].mean()
#Print and check if 'true_mean' falls in the range of 'confidence_interval'
print("True mean = ", true_mean, "\n Confidence interval", confidence_interval)
# --------------
import matplotlib.pyplot as plt
import numpy as np
#Different sample sizes to take
sample_size=np.array([20,50,100])
#Code starts here
fig, axes = plt.subplots(nrows=3, ncols=1)
for i in range(0, len(sample_size)):
m = []
for j in range(1000):
data_installment = data['installment'].sample(n=sample_size[i]).mean()
m.append(data_installment)
mean_series = pd.Series(m)
axes[i].plot(mean_series)
# --------------
#Importing header files
#The bank manager believes that people with purpose as 'small_business' have been given int.rate more due to the risk assosciated
#Let's do a hypothesis testing(one-sided) on that belief
#Null Hypothesis H0:μ=H_0: \mu =H0:μ= 12 %
#Meaning: There is no difference in interest rate being given to people with purpose as 'small_business'
#Alternate Hypothesis H1:μ>H_1: \mu >H1:μ>12 %
#Meaning: Interest rate being given to people with purpose as 'small_business' is higher than the average interest rate
from statsmodels.stats.weightstats import ztest
#Code starts here
#From the column int.rate of 'data', remove the % character and convert the column into float.
data['int.rate'] = data['int.rate'].map(lambda x: x.rstrip('%'))
data['int.rate'] = data['int.rate'].astype('float')
#After that divide the values of int.rate with 100 and store the result back to the column 'int.rate'
data['int.rate'] = data['int.rate']/100
#Apply "ztest()" with x1 as data[data['purpose']=='small_business']['int.rate'] and value as data['int.rate'].mean(), alternative='larger'(WHY?) and save the results in 'z_statistic' and 'p_value' respectively
z_statistic, p_value = ztest(x1 = data[data['purpose']=='small_business']['int.rate'], value = data['int.rate'].mean(), alternative = 'larger')
if p_value< 0.05:
print('Reject')
else:
print('Accept')
# --------------
#Importing header files
from statsmodels.stats.weightstats import ztest
#The bank thinks that monthly installments (installment) customers have to pay might have some sort of effect on loan defaulters
#Let's do hypothesis testing(two-sided) on that
#Code starts here
z_statistic, p_value = ztest(x1=data[data['paid.back.loan']=='No']['installment'], x2=data[data['paid.back.loan']=='Yes']['installment'])
if p_value < 0.05:
print('Reject')
else:
print('Accept')
# --------------
#Importing header files
from scipy.stats import chi2_contingency
#Critical value
critical_value = stats.chi2.ppf(q = 0.95, # Find the critical value for 95% confidence*
df = 6) # Df = number of variable categories(in purpose) - 1
#Code starts here
#Create a variable 'yes' which is the value counts of purpose when paid.back.loan in 'data' is Yes
yes = data[data['paid.back.loan']=='Yes']['purpose'].value_counts()
#Create a variable 'no' which is the value counts of purpose when paid.back.loan in 'data' is No
no = data[data['paid.back.loan']=='No']['purpose'].value_counts()
#Concat 'yes.transpose()'(transpose of 'yes') and 'no.transpose()'(transpose of 'no') along axis=1 with keys= ['Yes','No'] and store it in a variable called 'observed'
observed = pd.concat([yes.transpose(), no.transpose()], axis = 1, keys = ['Yes', 'No'])
#Apply "chi2_contingency()" on 'observed' and store the result in variables named chi2, p, dof, ex respectively.
chi2, p, dof, ex = chi2_contingency(observed)
#Compare chi2 with critical_value(given)
if chi2 > critical_value:
print('Reject')
else:
print('Accept')
|
{"hexsha": "646f5c6af173beb518164fd565b1eaf03dc33cda", "size": 5182, "ext": "py", "lang": "Python", "max_stars_repo_path": "Banking-Inferences-(Making-inferences-from-the-data)/code.py", "max_stars_repo_name": "tanup05/ga-learner-dsmp-repo", "max_stars_repo_head_hexsha": "8d5421587194101d18fbfff2ff8dd0ada4074c21", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Banking-Inferences-(Making-inferences-from-the-data)/code.py", "max_issues_repo_name": "tanup05/ga-learner-dsmp-repo", "max_issues_repo_head_hexsha": "8d5421587194101d18fbfff2ff8dd0ada4074c21", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Banking-Inferences-(Making-inferences-from-the-data)/code.py", "max_forks_repo_name": "tanup05/ga-learner-dsmp-repo", "max_forks_repo_head_hexsha": "8d5421587194101d18fbfff2ff8dd0ada4074c21", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5975609756, "max_line_length": 210, "alphanum_fraction": 0.6972211501, "include": true, "reason": "import numpy,import scipy,from scipy,from statsmodels", "num_tokens": 1296}
|
# Autogenerated wrapper script for ALPS_jll for armv7l-linux-gnueabihf-cxx11
export libalps
using CoinUtils_jll
using Osi_jll
using Clp_jll
using Cgl_jll
using CompilerSupportLibraries_jll
JLLWrappers.@generate_wrapper_header("ALPS")
JLLWrappers.@declare_library_product(libalps, "libAlps.so.0")
function __init__()
JLLWrappers.@generate_init_header(CoinUtils_jll, Osi_jll, Clp_jll, Cgl_jll, CompilerSupportLibraries_jll)
JLLWrappers.@init_library_product(
libalps,
"lib/libAlps.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@generate_init_footer()
end # __init__()
|
{"hexsha": "86fc760d00544a132560be0659c26133d9963ad3", "size": 609, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/wrappers/armv7l-linux-gnueabihf-cxx11.jl", "max_stars_repo_name": "JuliaBinaryWrappers/ALPS_jll.jl", "max_stars_repo_head_hexsha": "b61187ea7eae403e108c73cc028c9b02128e41b4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/wrappers/armv7l-linux-gnueabihf-cxx11.jl", "max_issues_repo_name": "JuliaBinaryWrappers/ALPS_jll.jl", "max_issues_repo_head_hexsha": "b61187ea7eae403e108c73cc028c9b02128e41b4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/wrappers/armv7l-linux-gnueabihf-cxx11.jl", "max_forks_repo_name": "JuliaBinaryWrappers/ALPS_jll.jl", "max_forks_repo_head_hexsha": "b61187ea7eae403e108c73cc028c9b02128e41b4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0, "max_line_length": 109, "alphanum_fraction": 0.7832512315, "num_tokens": 189}
|
import json
import os
from paver.easy import pushd
import numpy as np
import matplotlib
matplotlib.use('Agg') # in the case of perform on server
import matplotlib.pyplot as plt
import pickle
import csv
from sklearn import metrics
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('directory')
# opts = parser.parse_args()
summary = Summary()
figs_dir = 'summary_figs'
os.path.exists(figs_dir) or os.mkdir(figs_dir)
with pushd(figs_dir):
summary.a_rand_index(summary.sample_letters,summary.input_data,'l')
summary.a_rand_index(summary.sample_states,summary.input_data2,'s')
with open('word_list.txt',"w") as f:
for num, key in enumerate(summary.word_list):
f.write("iter%d:: " % num)
for num2, key2 in enumerate(key):
f.write("%d:" % num2 + str(key2) + " ")
f.write("\n")
# plot sample states and letters
for idx in range(summary.data_size):
summary.plot_states(idx)
plt.savefig('sample_states_%d.png' % idx)
summary.plot_state_boundaries(idx)
plt.savefig('state_boundary_%d.png' % idx)
summary.plot_letters(idx)
plt.savefig('sample_letters_%d.png' % idx)
plt.clf()
class Summary(object):
def __init__(self, dirpath = '.'):
with open('parameter.json') as f:
params = self.params = json.load(f)
with open('fig_title.json') as f2:
fig_title = self.fig_title = json.load(f2)
with open('sample_word_list.txt') as f3:
self.word_list = pickle.load(f3)
self.data_size = params['DATA_N']
self.input_data=[]
self.input_data2=[]
for i in fig_title:
data_l = np.loadtxt(i + ".txt")
data_l2 = np.loadtxt(i + ".lab")
self.input_data.append(data_l[0])
self.input_data2.append(data_l2)
self.sample_states = [np.loadtxt('sample_states_%d.txt' % i)for i in range(params['DATA_N'])]
self.sample_letters = [np.loadtxt('sample_letters_%d.txt' % i)for i in range(params['DATA_N'])]
self.state_ranges = []
for i in range(params['DATA_N']):
with open('state_ranges_%d.txt' % i) as f:
self.state_ranges.append(pickle.load(f))
llist = np.loadtxt("loglikelihood.txt").tolist()
self.maxlikelihood = (max(llist), llist.index(max(llist)))
def a_rand_index(self,sample_data,true_data,char):
RIs=[]
for idx in range(len(sample_data[0])):
true=[]
sample=[]
for key,key2 in zip(sample_data,true_data):
sample.extend(key[idx])
true.extend(key2)
ris=metrics.adjusted_rand_score(true, sample)
RIs.append(ris)
np.savetxt("aRIs_"+char+".txt",RIs)
true=[]
sample=[]
for key,key2 in zip(sample_data,true_data):
sample.extend(key[99])
true.extend(key2)
ri=metrics.adjusted_rand_score(true, sample)
str="max_adjusted_rand_index_"+char+".txt"
f = open(str,'w')
writer = csv.writer(f)
writer.writerow(["adjusted_rand_score",ri])
def _plot_discreate_sequence(self, true_data, title, sample_data, label = u'', plotopts = {}):
ax = plt.subplot2grid((10, 1), (1, 0))
plt.sca(ax)
ax.matshow([true_data], aspect = 'auto')
plt.ylabel('Truth Label')
# label matrix
ax = plt.subplot2grid((10, 1), (2, 0), rowspan = 8)
plt.suptitle(title)
plt.sca(ax)
ax.matshow(sample_data, aspect = 'auto', **plotopts)
plt.xlabel('Frame')
plt.ylabel('Iteration')
plt.xticks(())
def _plot_label_boundary(self, true_data, title, sample_data, label = u''):
boundaries = [[stop for state, (start, stop) in r] for r in sample_data]
size = boundaries[0][-1]
data = np.zeros((len(sample_data), size))
for i, b in enumerate(boundaries):
for x in b[:-1]:
data[i, x] = 1.0
self._plot_discreate_sequence(true_data, title, data, label, plotopts = {'cmap': 'Greys'})
def plot_letters(self, idx):
self._plot_discreate_sequence(
self.input_data[idx],
self.fig_title[idx],
self.sample_letters[idx],
label=self.sample_letters[idx]
)
def plot_states(self, idx):
self._plot_discreate_sequence(
self.input_data2[idx],
self.fig_title[idx],
self.sample_states[idx],
label=self.sample_states[idx]
)
def plot_state_boundaries(self, idx):
self._plot_label_boundary(
self.input_data2[idx],
self.fig_title[idx],
self.state_ranges[idx],
label=self.sample_states[idx]
)
if __name__ == '__main__':
main()
|
{"hexsha": "14df73b351d8dd2f8f3ef72f449f125746aaecb7", "size": 4994, "ext": "py", "lang": "Python", "max_stars_repo_path": "HDP_HLM/SAMPLE/summary.py", "max_stars_repo_name": "GUZHIXIANG/DAA_taguchi", "max_stars_repo_head_hexsha": "5c77f0a326b53e0cc908cf08714fd470870877ec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "HDP_HLM/SAMPLE/summary.py", "max_issues_repo_name": "GUZHIXIANG/DAA_taguchi", "max_issues_repo_head_hexsha": "5c77f0a326b53e0cc908cf08714fd470870877ec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "HDP_HLM/SAMPLE/summary.py", "max_forks_repo_name": "GUZHIXIANG/DAA_taguchi", "max_forks_repo_head_hexsha": "5c77f0a326b53e0cc908cf08714fd470870877ec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1884057971, "max_line_length": 103, "alphanum_fraction": 0.5859030837, "include": true, "reason": "import numpy", "num_tokens": 1170}
|
\chapter{Lexicon and ontology}\label{a:lexicon}
In this appendix the lexicon and ontology of the basic experiment (\chapref{ch:basic}) is given. Of some additional meanings the legend is given (Tables~\ref{t:st:legend0a} and \ref{t:st:legend1a}). The lexicons (Tables~\ref{t:st:lexicon0} and \ref{t:st:lexicon1}) and ontologies (Tables~\ref{t:st:ontology0} and \ref{t:st:ontology1}) give weighted conditional probabilities based on co-occurrences of word and meaning or meaning and referent. These probabilities $P(c_i|b_j)$ are calculated as follows:
\begin{eqnarray}
P(c_i|b_j) = \frac{P(c_i)}{\sum_{j=1}^n P(c_i \wedge b_j)} \cdot \frac{P(c_i)}{P(b_j)}
\end{eqnarray}
where $c_i$ are word-forms and $b_j$ are the concepts when calculating the lexical entries $P(F|C)$. When calculating the probabilities for the ontologies $c_i$ are the referents and $b_j$ are the concepts, yielding $P(R|C)$. The tables only show a part of the lexicon and ontology. Entries that have probabilities $\leq 0.01$ are left out.
The legends of some occurring meanings for robots $r0$ and $r1$ are given in tables \ref{t:st:legend0a} and \ref{t:st:legend1a}.
\begin{table}[t]
\centering
{\footnotesize\begin{tabular}{ld{3}d{3}d{3}d{3}d{3}d{3}d{3}d{3}d{3}d{3}}
\lsptoprule
C-F & \multicolumn{1}{r}{\it huma} & \multicolumn{1}{r}{\it xomu} & \multicolumn{1}{r}{\it wosa} & \multicolumn{1}{r}{\it kyga} & \multicolumn{1}{r}{\it vyqa} & \multicolumn{1}{r}{\it guhu} & \multicolumn{1}{r}{\it lyzu} & \multicolumn{1}{r}{\it poma} & \multicolumn{1}{r}{\it pugu} & \multicolumn{1}{r}{\it wely}\\\midrule
M53 & 0.58 & - & - & - & - & - & - & - & - & -\\\hline
M67 & 0.08 & - & - & - & - & - & - & - & - & -\\\hline
M30 & 0.02 & 0.37 & - & - & - & - & - & - & - & -\\\hline
M39 & 0.01 & - & - & - & - & - & - & - & - & -\\\hline
M18 & - & 0.14 & - & - & - & - & - & - & - & -\\\hline
M20 & - & 0.09 & - & - & - & - & - & - & - & -\\\hline
M17 & - & 0.04 & - & - & - & - & - & - & - & -\\\hline
M22 & - & 0.03 & - & - & - & - & - & - & - & -\\\hline
M43 & - & 0.03 & - & - & - & - & - & - & - & -\\\hline
M16 & - & 0.02 & - & - & - & - & - & - & - & -\\\hline
M26 & - & - & 0.37 & - & - & - & - & 0.11 & - & -\\\hline
M5 & - & - & - & 0.40 & - & - & - & - & - & -\\\hline
M27 & - & - & - & 0.07 & - & - & - & - & - & -\\\hline
M33 & - & - & - & 0.04 & - & - & - & - & - & -\\\hline
M15 & - & - & - & 0.02 & - & - & - & - & - & -\\\hline
M58 & - & - & - & - & 0.51 & - & - & - & - & -\\\hline
M393 & - & - & - & - & 0.08 & - & - & - & - & -\\\hline
M211 & - & - & - & - & 0.04 & - & - & - & - & -\\\hline
M484 & - & - & - & - & 0.04 & - & - & - & - & -\\\hline
M23 & - & - & - & - & - & 0.80 & - & - & - & -\\\hline
M61 & - & - & - & 0.08 & - & - & 0.44 & 0.01 & - & -\\\hline
M55 & - & - & - & 0.01 & - & - & 0.11 & - & - & -\\\hline
M394 & - & - & - & - & - & - & 0.02 & - & - & -\\\hline
M46 & - & - & - & - & - & - & 0.01 & - & - & -\\\hline
M169 & 0.01 & - & - & - & - & - & - & - & 0.46 & -\\\hline
M238 & - & - & - & - & - & - & - & - & 0.26 & -\\\hline
M121 & - & - & - & - & - & - & - & - & - & 1.00\\
\lspbottomrule
\end{tabular}}
\caption{Lexicon of robot $r0$. The cells of the table give the weighted conditional probabilities that a word-form is used to name a meaning. These probabilities are based on the occurrence frequencies in one of the experiments after 5,000 language games. Associations with probabilities lower than 0.01 are left out for clarity.}
\label{t:st:lexicon0}
\end{table}
\begin{table}[h]
\centering
{\footnotesize\begin{tabular}{ld{3}d{3}d{3}d{3}d{3}d{3}d{3}d{3}d{3}d{3}}
\lsptoprule
C-B & \multicolumn{1}{r}{\it huma} & \multicolumn{1}{r}{\it xomu} & \multicolumn{1}{r}{\it wosa} & \multicolumn{1}{r}{\it kyga} & \multicolumn{1}{r}{\it vyqa} & \multicolumn{1}{r}{\it guhu} & \multicolumn{1}{r}{\it lyzu} & \multicolumn{1}{r}{\it poma} & \multicolumn{1}{r}{\it pugu} & \multicolumn{1}{r}{\it wely}\\\midrule
M4 & 0.44 & - & 0.02 & - & 0.01 & - & - & - & 0.01 & -\\\hline
M51 & 0.12 & - & - & - & - & - & - & - & - & -\\\hline
M55 & 0.08 & - & - & - & - & - & - & - & - & -\\\hline
M37 & 0.03 & - & - & - & - & - & - & - & - & -\\\hline
M1 & 0.02 & - & - & - & - & - & - & - & - & -\\\hline
M69 & 0.01 & - & - & - & - & - & - & - & - & -\\\hline
M91 & 0.01 & - & - & - & - & - & - & 0.03 & - & -\\\hline
M5 & - & 0.33 & - & - & - & - & - & - & - & -\\\hline
M39 & - & - & 0.40 & - & - & - & - & - & - & -\\\hline
M81 & - & - & 0.10 & - & - & - & - & 0.12 & - & -\\\hline
M13 & - & - & 0.05 & - & - & - & - & - & - & -\\\hline
M96 & - & - & 0.03 & - & - & - & - & - & - & -\\\hline
M40 & - & - & 0.02 & - & - & - & - & - & - & -\\\hline
M16 & - & - & 0.02 & - & - & - & - & - & - & -\\\hline
M65 & - & - & 0.02 & - & 0.07 & - & - & - & - & -\\\hline
M46 & - & - & 0.02 & - & - & - & - & - & - & -\\\hline
M21 & - & - & 0.01 & - & - & - & - & - & - & -\\\hline
M242 & - & - & 0.01 & - & 0.04 & - & - & - & - & -\\\hline
M0 & - & - & - & 0.30 & - & - & - & - & - & -\\\hline
M22 & - & - & - & 0.18 & - & - & - & - & - & 0.01\\\hline
M78 & - & - & - & 0.09 & - & - & - & - & - & -\\\hline
M68 & - & - & - & 0.08 & 0.01 & - & - & 0.01 & - & -\\\hline
M75 & - & - & - & 0.06 & - & - & - & - & - & -\\\hline
M85 & - & - & - & - & 0.04 & - & - & - & - & -\\\hline
M389 & - & - & - & - & - & 0.08 & - & - & - & -\\\hline
M42 & - & - & - & - & - & 0.07 & - & - & - & -\\\hline
M44 & - & - & - & - & - & - & 0.81 & - & - & -\\\hline
M363 & - & - & - & - & - & - & - & 0.12 & - & -\\\hline
M102 & - & - & - & - & - & - & - & 0.03 & - & -\\\hline
M287 & - & - & - & - & - & - & - & 0.01 & - & -\\\hline
M62 & - & - & - & - & - & - & - & - & 0.19 & -\\
\lspbottomrule
\end{tabular}}
\caption{Lexicon of robot $r1$.}
\label{t:st:lexicon1}
\end{table}
\begin{table}[h]
\centering
{\footnotesize\begin{tabular}{ld{3}d{3}d{3}d{3}}
\lsptoprule
C-B & \multicolumn{1}{c}{0} & \multicolumn{1}{c}{1} & \multicolumn{1}{c}{2} & \multicolumn{1}{c}{3}\\\midrule
M53 & 0.68 & - & - & -\\\hline
M55 & 0.10 & - & - & -\\\hline
M67 & 0.03 & - & - & -\\\hline
M169 & 0.03 & - & - & -\\\hline
M33 & 0.01 & - & 0.01 & -\\\hline
M128 & 0.01 & - & - & -\\\hline
M187 & 0.01 & - & - & -\\\hline
M43 & 0.01 & 0.01 & - & -\\\hline
M46 & 0.01 & - & - & -\\\hline
M30 & - & 0.68 & - & -\\\hline
M18 & - & 0.18 & - & -\\\hline
M16 & - & 0.03 & - & -\\\hline
M5 & - & - & 0.45 & -\\\hline
M20 & - & - & 0.21 & -\\\hline
M27 & - & - & 0.13 & -\\\hline
M23 & - & - & 0.03 & -\\\hline
M22 & - & - & 0.03 & -\\\hline
M26 & - & - & 0.02 & -\\\hline
M15 & - & - & 0.02 & -\\\hline
M89 & - & - & 0.02 & -\\\hline
M37 & - & - & 0.01 & -\\\hline
M233 & - & - & 0.01 & -\\\hline
M61 & - & - & - & 0.84\\\hline
M58 & - & - & - & 0.08\\\hline
M394 & - & - & - & 0.02\\\hline
M90 & - & - & - & 0.01\\\hline
M393 & - & - & - & 0.01\\
\lspbottomrule
\end{tabular}}
\caption{Ontology of robot $r0$ in relation to the referents for which they have been used. The weighted frequencies give the relative frequency that a given meaning co-occurs with the particular referent.\is{ontology}}
\label{t:st:ontology0}
\end{table}
\begin{table}[h]
\centering
{\footnotesize\begin{tabular}{ld{3}d{3}d{3}d{3}}
\lsptoprule
C-B & \multicolumn{1}{c}{0} & \multicolumn{1}{c}{1} & \multicolumn{1}{c}{2} & \multicolumn{1}{c}{3}\\\midrule
M4 & 0.70 & - & - & -\\\hline
M55 & 0.07 & - & - & -\\\hline
M65 & 0.05 & - & - & -\\\hline
M51 & 0.04 & 0.02 & - & -\\\hline
M37 & 0.03 & - & - & -\\\hline
M39 & - & 0.58 & - & -\\\hline
M81 & - & 0.17 & - & -\\\hline
M13 & - & 0.06 & - & -\\\hline
M96 & - & 0.03 & - & -\\\hline
M94 & - & 0.02 & - & -\\\hline
M16 & - & 0.01 & - & -\\\hline
M1 & - & 0.01 & - & -\\\hline
M0 & - & - & 0.46 & -\\\hline
M22 & - & - & 0.27 & -\\\hline
M75 & - & - & 0.08 & -\\\hline
M78 & - & - & 0.05 & 0.01\\\hline
M44 & - & - & - & 0.73\\\hline
M68 & - & - & - & 0.15\\\hline
M40 & - & - & - & 0.03\\\hline
M242 & - & - & 0.05 & -\\
\lspbottomrule
\end{tabular}}
\caption{Ontology of robot $r1$.\is{ontology}}
\label{t:st:ontology1}
\end{table}
\begin{table}[h]
\centering
{\footnotesize\begin{tabular}{rc}
\lsptoprule
M15 & $(0.02,0.31,1.00,0.02)_1$\\\hline
M16 & $(0.02,0.99,0.30,0.02)_1$\\\hline
M17 & $(0.56,0.99,0.30,0.02)_1$\\\hline
M22 & $(0.02,0.01,1.00,0.99)_1$\\\hline
M23 & $(0.56,0.31,1.00,0.44)_1$\\\hline
M26 & $(0.02,0.31,1.00,0.99)_1$\\\hline
M33 & $(1.00,0.99,1.00,0.99)_1$\\\hline
M39 & $(1.00,0.31,1.00,0.44)_1$\\\hline
M43 & $(1.00,0.99,0.30,0.02)_1$\\\hline
M46 & $(1.00,0.31,0.30,0.44)_1$\\\hline
M89 & $(0.00,0.00,0.01,0.00)_4$\\\hline
M121 & $(1.00,0.01,0.30,0.44)_1$\\\hline
M128 & $(1.00,0.01,0.30,0.02)_1$\\\hline
M169 & $(1.00,0.00,0.00,0.00)_2$\\\hline
M187 & $(0.00,0.00,0.00,0.00)_4$\\\hline
M211 & $(0.69,1.00,0.00,0.00)_2$\\\hline
M233 & $(0.00,0.00,1.00,0.00)_2$\\\hline
M238 & $(0.02,0.99,1.00,0.99)_1$\\\hline
M394 & $(0.00,0.00,0.00,0.01)_5$\\\hline
M484 & $(1.00,0.99,1.00,0.44)_1$\\
\lspbottomrule
\end{tabular}}
\caption{Additional legend of meanings of robot $r0$. See also \tabref{t:st:legend}.}
\label{t:st:legend0a}
\end{table}
\begin{table}[h]
\centering
{\footnotesize\begin{tabular}{rc}
\lsptoprule
M0 & $(0.02,0.02,1.00,0.01)_1$\\\hline
M1 & $(0.02,0.02,0.46,0.01)_1$\\\hline
M4 & $(1.00,0.02,0.03,0.01)_1$\\\hline
M5 & $(0.31,0.02,1.00,0.01)_1$\\\hline
M13 & $(0.00,0.00,0.00,0.00)_3$\\\hline
M16 & $(0.00,0.00,0.01,0.00)_3$\\\hline
M21 & $(0.00,0.00,0.00,0.00)_5$\\\hline
M22 & $(0.02,0.02,1.00,0.53)_1$\\\hline
M37 & $(0.00,0.00,0.00,0.00)_4$\\\hline
M39 & $(0.02,1.00,0.03,0.01)_1$\\\hline
M40 & $(0.00,0.00,0.00,0.00)_4$\\\hline
M42 & $(1.00,1.00,1.00,1.00)_1$\\\hline
M44 & $(0.02,0.02,0.03,1.00)_1$\\\hline
M46 & $(1.00,1.00,0.46,0.01)_1$\\\hline
M51 & $(1.00,1.00,0.03,0.01)_1$\\\hline
M55 & $(1.00,0.58,0.03,0.01)_1$\\\hline
M62 & $(0.02,1.00,1.00,0.53)_1$\\\hline
M65 & $(1.00,0.02,0.03,0.53)_1$\\\hline
M68 & $(0.02,0.02,0.46,1.00)_1$\\\hline
M69 & $(0.02,1.00,1.00,0.01)_1$\\\hline
M75 & $(0.02,0.58,1.00,0.01)_1$\\\hline
M78 & $(0.02,0.02,1.00,1.00)_1$\\\hline
M81 & $(0.31,1.00,0.03,0.01)_1$\\\hline
M85 & $(0.31,1.00,0.46,0.53)_1$\\\hline
M91 & $(1.00,1.00,0.46,1.00)_1$\\\hline
M94 & $(0.02,1.00,0.46,0.01)_1$\\\hline
M96 & $(0.31,1.00,0.46,0.01)_1$\\\hline
M102 & $(0.02,0.58,1.00,1.00)_1$\\\hline
M242 & $(0.02,0.58,1.00,0.53)_1$\\\hline
M287 & $(0.01,0.01,0.00,0.00)_4$\\\hline
M363 & $(0.31,0.02,0.03,1.00)_1$\\\hline
M389 & $(0.02,0.02,0.00,0.00)_5$\\
\lspbottomrule
\end{tabular}}
\caption{Legend of meanings of robot r1.}
\label{t:st:legend1a}
\end{table}
|
{"hexsha": "06e2d1e4e18dd3170612ee9689548255141cc152", "size": 10486, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "appLex.tex", "max_stars_repo_name": "langsci/Vogt", "max_stars_repo_head_hexsha": "bbec105485e4641c61e0df6157f62dccf61d6f93", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-04-13T13:08:09.000Z", "max_stars_repo_stars_event_max_datetime": "2018-04-13T13:08:09.000Z", "max_issues_repo_path": "appLex.tex", "max_issues_repo_name": "langsci/Vogt", "max_issues_repo_head_hexsha": "bbec105485e4641c61e0df6157f62dccf61d6f93", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "appLex.tex", "max_forks_repo_name": "langsci/Vogt", "max_forks_repo_head_hexsha": "bbec105485e4641c61e0df6157f62dccf61d6f93", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.5913043478, "max_line_length": 504, "alphanum_fraction": 0.4776845318, "num_tokens": 5292}
|
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: RainbowSecret
## Microsoft Research
## yuyua@microsoft.com
## Copyright (c) 2018
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import pdb
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from lib.models.backbones.backbone_selector import BackboneSelector
from lib.models.tools.module_helper import ModuleHelper
class FcnNet(nn.Module):
def __init__(self, configer):
self.inplanes = 128
super(FcnNet, self).__init__()
self.configer = configer
self.num_classes = self.configer.get("data", "num_classes")
self.backbone = BackboneSelector(configer).get_backbone()
# extra added layers
if "wide_resnet38" in self.configer.get("network", "backbone"):
in_channels = [2048, 4096]
elif "mobilenetv2" in self.configer.get("network", "backbone"):
in_channels = [160, 320]
else:
in_channels = [1024, 2048]
self.cls_head = nn.Sequential(
nn.Conv2d(in_channels[1], 512, kernel_size=3, stride=1, padding=1),
ModuleHelper.BNReLU(512, bn_type=self.configer.get("network", "bn_type")),
nn.Dropout2d(0.10),
nn.Conv2d(
512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=False
),
)
self.dsn_head = nn.Sequential(
nn.Conv2d(in_channels[0], 512, kernel_size=3, stride=1, padding=1),
ModuleHelper.BNReLU(512, bn_type=self.configer.get("network", "bn_type")),
nn.Dropout2d(0.10),
nn.Conv2d(
512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=False
),
)
if "mobilenetv2" in self.configer.get("network", "backbone"):
self.cls_head = nn.Sequential(
nn.Conv2d(in_channels[1], 256, kernel_size=3, stride=1, padding=1),
ModuleHelper.BNReLU(
256, bn_type=self.configer.get("network", "bn_type")
),
nn.Dropout2d(0.10),
nn.Conv2d(
256,
self.num_classes,
kernel_size=1,
stride=1,
padding=0,
bias=False,
),
)
self.dsn_head = nn.Sequential(
nn.Conv2d(in_channels[0], 128, kernel_size=3, stride=1, padding=1),
ModuleHelper.BNReLU(
128, bn_type=self.configer.get("network", "bn_type")
),
nn.Dropout2d(0.10),
nn.Conv2d(
128,
self.num_classes,
kernel_size=1,
stride=1,
padding=0,
bias=False,
),
)
def forward(self, x_):
x = self.backbone(x_)
aux_x = self.dsn_head(x[-2])
x = self.cls_head(x[-1])
aux_x = F.interpolate(
aux_x, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True
)
x = F.interpolate(
x, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True
)
return aux_x, x
class FcnNet_wo_dsn(nn.Module):
def __init__(self, configer):
self.inplanes = 128
super(FcnNet_wo_dsn, self).__init__()
self.configer = configer
self.num_classes = self.configer.get("data", "num_classes")
self.backbone = BackboneSelector(configer).get_backbone()
# extra added layers
if "wide_resnet38" in self.configer.get("network", "backbone"):
in_channels = [2048, 4096]
elif "mobilenetv2" in self.configer.get("network", "backbone"):
in_channels = [160, 320]
else:
in_channels = [1024, 2048]
self.cls_head = nn.Sequential(
nn.Conv2d(in_channels[1], 512, kernel_size=3, stride=1, padding=1),
ModuleHelper.BNReLU(512, bn_type=self.configer.get("network", "bn_type")),
nn.Dropout2d(0.10),
nn.Conv2d(
512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True
),
)
if "mobilenetv2" in self.configer.get("network", "backbone"):
self.cls_head = nn.Sequential(
nn.Conv2d(in_channels[1], 256, kernel_size=3, stride=1, padding=1),
ModuleHelper.BNReLU(
256, bn_type=self.configer.get("network", "bn_type")
),
nn.Dropout2d(0.10),
nn.Conv2d(
256,
self.num_classes,
kernel_size=1,
stride=1,
padding=0,
bias=False,
),
)
def forward(self, x_):
x = self.backbone(x_)
x = self.cls_head(x[-1])
x = F.interpolate(
x, size=(x_.size(2), x_.size(3)), mode="bilinear", align_corners=True
)
return x
|
{"hexsha": "c371e6617a9041f96377af9eeb02b83b94cd8daa", "size": 5471, "ext": "py", "lang": "Python", "max_stars_repo_path": "seg/lib/models/nets/fcnet.py", "max_stars_repo_name": "Frank-Abagnal/HRFormer", "max_stars_repo_head_hexsha": "d7d362770de8648f8e0a379a71cee25f42954503", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 254, "max_stars_repo_stars_event_min_datetime": "2021-08-13T10:05:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T09:21:45.000Z", "max_issues_repo_path": "seg/lib/models/nets/fcnet.py", "max_issues_repo_name": "Sense-X/HRFormer", "max_issues_repo_head_hexsha": "1245b88b5824fbd8cdb358b5ee909a4e537a2ef5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2021-09-08T01:40:49.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T10:53:47.000Z", "max_forks_repo_path": "seg/lib/models/nets/fcnet.py", "max_forks_repo_name": "Sense-X/HRFormer", "max_forks_repo_head_hexsha": "1245b88b5824fbd8cdb358b5ee909a4e537a2ef5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 48, "max_forks_repo_forks_event_min_datetime": "2021-08-13T14:06:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T02:41:26.000Z", "avg_line_length": 37.4726027397, "max_line_length": 87, "alphanum_fraction": 0.5026503381, "include": true, "reason": "import numpy", "num_tokens": 1290}
|
using BasisFunctions, LinearAlgebra, DomainSets, GridArrays, Test, StaticArrays, FrameFun
@testset begin
B = (Fourier(11) → -1..1)^2
Dom = Disk(0.8)
@test support(dictionary(∂x(random_expansion(extensionframe(B, Dom)))))≈Dom
# @test SamplingStyle(ExtensionFramePlatform(FrameFun.ProductPlatform(FourierPlatform(),FourierPlatform()),(0.0..0.5)^2)) ==
# ProductSamplingStyle(OversamplingStyle(),OversamplingStyle())
@test dictionary(_ap(Fourier(100),0.0..0.5)) == extensionframe(Fourier(100),0.0..0.5)
# Test defaults
@test AZ_A(Fourier(100),0.0..0.5; samplingstyle=OversamplingStyle(),oversamplingfactor=2,normalizedsampling=false)≈
AZ_A(Fourier(100),0.0..0.5)
ap0 = _ap(Fourier(100), 0.0..0.5; samplingstyle=OversamplingStyle(), oversamplingfactor=2, normalizedsampling=false)
A = AZ_A(ap0)
L = samplingparameter(ap0)
a1 = evaluation(Fourier(L),interpolation_grid(Fourier(L)))
a2 = IndexRestriction(GridBasis(interpolation_grid(Fourier(L))),1:200)
a3 = BasisFunctions.FourierIndexExtension(Fourier(100),Fourier(L))
@test A≈a2*a1*a3
ap1 = approximationproblem(Fourier(100), 0.0..0.5; samplingstyle=OversamplingStyle(), oversamplingfactor=2, normalizedsampling=true)
A = AZ_A(ap1)
L = samplingparameter(ap1)
a1 = evaluation(Fourier(L),interpolation_grid(Fourier(L)))
a2 = IndexRestriction(GridBasis(interpolation_grid(Fourier(L))),1:200)
a3 = BasisFunctions.FourierIndexExtension(Fourier(100),Fourier(L))
a4 = ScalingOperator(dest(a2),1/sqrt(L))
@test A≈a4*a2*a1*a3
# Test differnence sampling_normalization
ap2 = approximationproblem(Fourier(100),0.0..0.5;
samplingstyle=OversamplingStyle(), oversamplingfactor=2, normalizedsampling=false)
s = svdvals(AZ_A(ap2))
N = sampling_weights(FrameFun.NormalizedSampling(OversamplingStyle()),approximationproblem(Fourier(100),0.0..0.5))
L = samplingparameter(ap2)
@test L == 399
@test N[1] ≈ 1/sqrt(L)
@test 47==sum(s.<.1*sqrt(L))
@test 49==sum(s.>.9*sqrt(L))
s = svdvals(AZ_A(Fourier(100),0.0..0.5;oversamplingfactor=2,normalizedsampling=true))
@test 47==sum(s.<.1)
@test 49==sum(1+1e-10 .> s .>.9)
# right side should be appropriately normalized
@test sample_data(exp, Fourier(100), 0.0..0.5; oversamplingfactor=2,normalizedsampling=true)≈
1/sqrt(399)*GridSampling(subgrid(interpolation_grid(Fourier(399)),0.0..0.5))*exp
# left side should be appropriately normalized
@test AZ_A(Fourier(100),0.0..0.5;samplingstyle=OversamplingStyle(),oversamplingfactor=2,normalizedsampling=true)≈
discretization(exp, Fourier(100), 0.0..0.5; oversamplingfactor=2,normalizedsampling=true)
@test sample_data(exp, Fourier(100),0.0..0.5; oversamplingfactor=2,normalizedsampling=false)≈
GridSampling(subgrid(interpolation_grid(Fourier(399)),0.0..0.5))*exp
@test AZ_A(Fourier(100), 0.0..0.5; samplingstyle=OversamplingStyle(),oversamplingfactor=2,normalizedsampling=false)≈
discretization(exp, Fourier(100), 0.0..0.5; oversamplingfactor=2, normalizedsampling=false)
F = approximate(exp, Fourier(100), 0.0..0.5; samplingstyle=OversamplingStyle(),oversamplingfactor=2,normalizedsampling=true)
@test abs(F[1](.2)-exp(.2) )< 1e-12
F = approximate(exp, Fourier(100), 0.0..0.5; samplingstyle=OversamplingStyle(),oversamplingfactor=2,normalizedsampling=false)
@test abs(F[1](.2)-exp(.2) )< 1e-12
P = WeightedSumPlatform(FourierPlatform(),x->1,x->sqrt(x))
ap = approximationproblem(P,(10,10))
azdual(ap)
end
|
{"hexsha": "b5a8cde62f65b02d9884207a8ad679bf601707b7", "size": 3583, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_scenariolist.jl", "max_stars_repo_name": "JuliaApproximation/FrameFun.jl", "max_stars_repo_head_hexsha": "aa4247015d1bc8528514f86d8b6d82e4886b1976", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2019-07-09T08:33:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T03:04:47.000Z", "max_issues_repo_path": "test/test_scenariolist.jl", "max_issues_repo_name": "JuliaApproximation/FrameFun.jl", "max_issues_repo_head_hexsha": "aa4247015d1bc8528514f86d8b6d82e4886b1976", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2019-10-21T13:41:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T19:35:05.000Z", "max_forks_repo_path": "test/test_scenariolist.jl", "max_forks_repo_name": "JuliaApproximation/FrameFun.jl", "max_forks_repo_head_hexsha": "aa4247015d1bc8528514f86d8b6d82e4886b1976", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-02-08T11:51:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-22T14:46:56.000Z", "avg_line_length": 49.0821917808, "max_line_length": 136, "alphanum_fraction": 0.7100195367, "num_tokens": 1154}
|
# Copyright (c) OpenMMLab. All rights reserved.
# In this example, we convert babel120_train to MMAction2 format
# The required files can be downloaded from the homepage of BABEL project
import numpy as np
from mmcv import dump, load
def gen_babel(x, y):
data = []
for i, xx in enumerate(x):
sample = dict()
sample['keypoint'] = xx.transpose(3, 1, 2, 0).astype(np.float16)
sample['label'] = y[1][0][i]
names = [y[0][i], y[1][1][i], y[1][2][i], y[1][3][i]]
sample['frame_dir'] = '_'.join([str(k) for k in names])
sample['total_frames'] = 150
data.append(sample)
return data
x = np.load('train_ntu_sk_120.npy')
y = load('train_label_120.pkl')
data = gen_babel(x, y)
dump(data, 'babel120_train.pkl')
|
{"hexsha": "3dedc1b31eb316d00722709aa1f2e9e27f419c4d", "size": 770, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/data/skeleton/babel2mma2.py", "max_stars_repo_name": "vineethbabu/mmaction2", "max_stars_repo_head_hexsha": "f2e4289807c95bad7dd83757a49c5d9ebd2f881e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1870, "max_stars_repo_stars_event_min_datetime": "2020-07-11T09:33:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T13:21:36.000Z", "max_issues_repo_path": "tools/data/skeleton/babel2mma2.py", "max_issues_repo_name": "wuyy258/mmaction2", "max_issues_repo_head_hexsha": "3f3ad9cae291c991b822cbc2ecfb88c1188e87c5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1285, "max_issues_repo_issues_event_min_datetime": "2020-07-11T11:18:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T08:41:17.000Z", "max_forks_repo_path": "tools/data/skeleton/babel2mma2.py", "max_forks_repo_name": "wuyy258/mmaction2", "max_forks_repo_head_hexsha": "3f3ad9cae291c991b822cbc2ecfb88c1188e87c5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 557, "max_forks_repo_forks_event_min_datetime": "2020-07-11T09:51:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T13:21:35.000Z", "avg_line_length": 29.6153846154, "max_line_length": 73, "alphanum_fraction": 0.625974026, "include": true, "reason": "import numpy", "num_tokens": 237}
|
import argparse
import math
import os
import time
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter, MultipleLocator
from PIL import Image
from models.enet import ENet
import transforms as ext_transforms
import utils
parser = argparse.ArgumentParser()
parser.add_argument('--test', action='store_false', help="If True, showcase how this model works; else, return latency")
parser.add_argument('--iter_batch', action='store_true', help="If True, iterate from batch size of 1 to maximum batch size; else, only run for current batch size")
parser.add_argument('--plot', action='store_true', help="If True, plot the latency result")
parser.add_argument('-i', '--iter', type=int, default=100, help="Number of iterations to run for latency")
parser.add_argument('-b', '--batch_size', type=int, default=1, help="Batch size for inference")
args = parser.parse_args()
# Set for GPU
device = torch.device('cuda')
# Load the sample data
data_dir = "../data/cityscapes"
image_path = "berlin_000000_000019_leftImg8bit.png"
image_path = os.path.join(data_dir, image_path)
sample_image = Image.open(image_path)
print("Original sample image dimension:", sample_image.size)
# Preprocess the image per model requirement and load onto the GPU
height, width = 512, 1024
image_transform = transforms.Compose(
[transforms.Resize((height, width)),
transforms.ToTensor()])
sample_image = image_transform(sample_image).to(device)
print("Preprocessed sample image dimension:", sample_image.shape)
# Load the required parameters for inference
color_encoding = OrderedDict([
('unlabeled', (0, 0, 0)),
('road', (128, 64, 128)),
('sidewalk', (244, 35, 232)),
('building', (70, 70, 70)),
('wall', (102, 102, 156)),
('fence', (190, 153, 153)),
('pole', (153, 153, 153)),
('traffic_light', (250, 170, 30)),
('traffic_sign', (220, 220, 0)),
('vegetation', (107, 142, 35)),
('terrain', (152, 251, 152)),
('sky', (70, 130, 180)),
('person', (220, 20, 60)),
('rider', (255, 0, 0)),
('car', (0, 0, 142)),
('truck', (0, 0, 70)),
('bus', (0, 60, 100)),
('train', (0, 80, 100)),
('motorcycle', (0, 0, 230)),
('bicycle', (119, 11, 32))
])
num_classes = len(color_encoding)
model = ENet(num_classes).to(device)
# Load the pre-trained weights
model_path = "./save/ENet_Cityscapes/ENet"
checkpoint = torch.load(model_path)
model.load_state_dict(checkpoint['state_dict'])
print('Model loaded successfully!')
# Run the inference
# If args.test, then showcase how this model works
if not args.test:
model.eval()
sample_image = torch.unsqueeze(sample_image, 0)
with torch.no_grad():
output = model(sample_image)
print("Model output dimension:", output.shape)
# Convert it to a single int using the indices where the maximum (1) occurs
_, predictions = torch.max(output.data, 1)
label_to_rgb = transforms.Compose([
ext_transforms.LongTensorToRGBPIL(color_encoding),
transforms.ToTensor()
])
color_predictions = utils.batch_transform(predictions.cpu(), label_to_rgb)
utils.imshow_batch(sample_image.data.cpu(), color_predictions)
# Run several iterations for each batch size to determine the
else:
model.eval()
with torch.no_grad():
if args.iter_batch:
batch_size = [int(2**i) for i in range(int(math.log2(args.batch_size)+1))]
else:
batch_size = [args.batch_size]
means = []
stds = []
percentile_90 = []
percentile_99 = []
fps = []
for bs in batch_size:
print("Batch size: {}".format(bs))
batched_image = torch.stack([sample_image]*bs, 0)
latencies = np.zeros(args.iter)
# Warm up round
for _ in range(5):
# start = time.time()
output = model(batched_image)
# end = time.time()
# print("Cold start latency: {:.3f} ms".format((end-start)*1000))
for i in range(args.iter):
start = time.time()
output = model(batched_image)
end = time.time()
latencies[i] = end - start
latencies.sort()
mean_latency = np.mean(latencies) * 1000
std_latency = np.std(latencies) * 1000
p90 = latencies[int(args.iter * 0.9 - 1)] * 1000
p99 = latencies[int(args.iter * 0.99 - 1)] * 1000
# print("Latency Total: mean: {:.3f} ms, std: {:.3f} ms".format(mean_latency, std_latency))
print("Latency: mean: {:.3f}ms ({:.2f} FPS), std: {:.3f}ms, P90: {:.3f}ms, P99: {:.3f}ms".format(
mean_latency/bs, 1000/mean_latency*bs, std_latency/bs, p90/bs, p99/bs))
means.append(mean_latency/bs)
stds.append(std_latency/bs)
fps.append(1000/mean_latency*bs)
percentile_90.append(p90/bs)
percentile_99.append(p99/bs)
fig = plt.figure(figsize=(16, 9))
fig.suptitle("PyTorch-ENet Latency Test on Cityscapes Dataset", fontsize='xx-large', fontweight='bold')
axs = fig.subplots(2, 1)
axs[0].errorbar(batch_size, means, stds, c='b')
axs[0].set_xlabel('Batch Size');
axs[0].set_ylabel('Latency (ms)', c='b')
axs[0].set_ylim(0, 30)
axs[0].set_xscale('log', basex=2); axs[0].xaxis.set_major_formatter(ScalarFormatter()); axs[0].set_xticks(batch_size)
# axs[0].set_title("Latency vs Batch Size")
axs[0].grid(True)
axs[0].yaxis.set_major_locator(MultipleLocator(5))
axs[0].tick_params(axis='y', labelcolor='b')
for x, y in zip(batch_size, means):
axs[0].annotate('{:.1f}'.format(y), xy=(x, y))
ax_fps = axs[0].twinx()
ax_fps.plot(batch_size, fps, c='r', marker='o')
# ax_fps.set_xlabel('Batch Size')
ax_fps.set_ylabel('FPS', c='r')
ax_fps.set_ylim(0, 150)
ax_fps.yaxis.set_major_locator(MultipleLocator(30))
ax_fps.tick_params(axis='y', labelcolor='r')
# ax_fps.set_xscale('log', basex=2); ax_fps.xaxis.set_major_formatter(ScalarFormatter()); ax_fps.set_xticks(batch_size)
# ax_fps.grid(True)
for x, y in zip(batch_size, fps):
ax_fps.annotate('{:.1f}'.format(y), xy=(x, y))
labels = [str(bs) for bs in batch_size]
x = np.arange(len(labels))
print(labels, x)
width = 0.2
rects1 = axs[1].bar(x - width, means, width, label='mean')
rects2 = axs[1].bar(x, percentile_90, width, label='P90')
rects3 = axs[1].bar(x + width, percentile_99, width, label='P99')
axs[1].set_ylabel('Latency (ms)')
axs[1].set_xlabel('Batch Size')
# axs[1].set_title("Latency across percentile")
# axs[1].set_xscale('log', basex=2); axs[0].xaxis.set_major_formatter(ScalarFormatter())
axs[1].set_xticks(x)
axs[1].set_xticklabels(labels)
axs[1].set_ylim(0, 20)
axs[1].legend()
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
axs[1].annotate('{:.1f}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
autolabel(rects3)
# fig.tight_layout()
if args.plot:
plt.show()
else:
plt.savefig('enet')
|
{"hexsha": "03910f43b63baf4d078255fc79d07d429344246f", "size": 7752, "ext": "py", "lang": "Python", "max_stars_repo_path": "profiling.py", "max_stars_repo_name": "jtang10/PyTorch-ENet", "max_stars_repo_head_hexsha": "d407eb6444e12ca5dd0fbe60145ed17440d31db2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "profiling.py", "max_issues_repo_name": "jtang10/PyTorch-ENet", "max_issues_repo_head_hexsha": "d407eb6444e12ca5dd0fbe60145ed17440d31db2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "profiling.py", "max_forks_repo_name": "jtang10/PyTorch-ENet", "max_forks_repo_head_hexsha": "d407eb6444e12ca5dd0fbe60145ed17440d31db2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0, "max_line_length": 163, "alphanum_fraction": 0.6185500516, "include": true, "reason": "import numpy", "num_tokens": 2098}
|
[STATEMENT]
lemma normalize_field [simp]: "normalize (a :: 'a :: {field, semiring_gcd}) = (if a = 0 then 0 else 1)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. normalize a = (if a = (0::'a) then 0::'a else (1::'a))
[PROOF STEP]
using unit_factor_normalize
[PROOF STATE]
proof (prove)
using this:
?a \<noteq> (0::?'a) \<Longrightarrow> unit_factor (normalize ?a) = (1::?'a)
goal (1 subgoal):
1. normalize a = (if a = (0::'a) then 0::'a else (1::'a))
[PROOF STEP]
by fastforce
|
{"llama_tokens": 212, "file": "LLL_Basis_Reduction_Missing_Lemmas", "length": 2}
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 18 17:31:54 2017
@author: ning
"""
import os
import numpy as np
#from sklearn.preprocessing import MinMaxScaler
from mne.decoding import Vectorizer
from sklearn import metrics
import pandas as pd
import pickle
from matplotlib import pyplot as plt
from keras.utils import np_utils
#os.chdir('D:/Ning - spindle/variational_autoencoder_spindles')
#from DataGenerator import DataGenerator
os.chdir('D:/Ning - spindle/training set')
working_dir='D:\\NING - spindle\\Spindle_by_Graphical_Features\\eventRelated_12_20_2017\\'
saving_dir = 'D:\\NING - spindle\\Spindle_by_Graphical_Features\\CNN vae\\'
saving_dir_weight = 'D:\\NING - spindle\\Spindle_by_Graphical_Features\\inverse\\'
def cos_similarity(x,y):
x = Vectorizer().fit_transform(x)
y = Vectorizer().fit_transform(y)
metrics_ = np.mean(metrics.pairwise.cosine_similarity(x,y))
return metrics_
if not os.path.exists(saving_dir):
os.mkdir(saving_dir)
if not os.path.exists(saving_dir_weight):
os.mkdir(saving_dir_weight)
#X_train, y_train = [],[]
#for ii in range(10):
# temp = pickle.load(open('D:\\NING - spindle\\Spindle_by_Graphical_Features\\data\\train\\train%d.p'%(ii),'rb'))
# X_train_,y_train_ = temp
# X_train.append(X_train_)
# y_train.append(y_train_)
#
#X_train = np.concatenate(X_train,axis=0)
#y_train = np.concatenate(y_train,axis=0)
temp = pickle.load(open('D:\\NING - spindle\\Spindle_by_Graphical_Features\\data\\validation\\validation.p','rb'))
X_validation,y_validation = temp
#X_train = np.concatenate([X_train,X_validation],axis=0)
#y_train = np.concatenate([y_train,y_validation],axis=0)
#del X_validation,y_validation
#########################################################
############## U-net model ##############################
#########################################################
#from keras.layers import Input, concatenate, Conv2D, MaxPool2D, UpSampling2D, Dropout
#from keras.models import Model
#import keras
#from keras.callbacks import ModelCheckpoint
#inputs = Input(shape=(32, 16, 192), batch_shape=(None,32,16,192),name='input',dtype='float64')
#conv1 = Conv2D(64, (3,3), activation='relu',padding='same',kernel_initializer='he_normal',
# data_format='channels_first',dilation_rate=2,use_bias=True)(inputs)
#print('conv1 shape:',conv1.shape)
##crop1 = Cropping2D(cropping=((3,3),(3,3)),data_format='channels_first')(conv1)
##print('crop1 shape:', crop1.shape)
#conv1_2 = Conv2D(64, (3,3), activation='relu',padding='same',kernel_initializer='he_normal',
# data_format='channels_first',dilation_rate=2,use_bias=True)(conv1)
#print('conv1 shape:',conv1_2.shape)
#drop1 = Dropout(0.5)(conv1_2)
#pool1 = MaxPool2D(pool_size=(2,2),data_format='channels_first')(drop1)
#print('pool1 shape:',pool1.shape)
#
#conv2 = Conv2D(128, (3,3), activation='relu',padding='same',kernel_initializer='he_normal',
# data_format='channels_first',dilation_rate=2,use_bias=True)(pool1)
#print('conv2 shape:', conv2.shape)
#conv2_2 = Conv2D(128, (3,3), activation='relu',padding='same',kernel_initializer='he_normal',
# data_format='channels_first',dilation_rate=2,use_bias=True)(conv2)
#print('conv2_2 shape:', conv2_2.shape)
#drop2 = Dropout(0.5)(conv2_2)
#pool2 = MaxPool2D(pool_size=(2,2),data_format='channels_first')(drop2)
#print('pool2 shape:',pool2.shape)
#
#conv3 = Conv2D(256, (3,3), activation='relu',padding='same',kernel_initializer='he_normal',
# data_format='channels_first',dilation_rate=2,use_bias=True)(pool2)
#print('conv2 shape:', conv3.shape)
#conv3_2 = Conv2D(256, (3,3), activation='relu',padding='same',kernel_initializer='he_normal',
# data_format='channels_first',dilation_rate=2,use_bias=True)(conv3)
#print('conv3_2 shape:', conv3_2.shape)
#drop3 = Dropout(0.5)(conv3_2)
#pool3 = MaxPool2D(pool_size=(2,2),data_format='channels_first')(drop3)
#print('pool3 shape:',pool3.shape)
#
#up4 = Conv2D(128, (3,3), activation='relu',padding='same',kernel_initializer='he_normal',
# data_format='channels_first',dilation_rate=2,use_bias=True)(UpSampling2D(size=(2,2),data_format='channels_first')(pool3))
#print('up4 shape:', up4.shape)
#merge4 = concatenate([drop3, up4],axis=1)
#print('merge4 shape:',merge4.shape)
#conv4 = Conv2D(128, (4,4), activation='relu',padding='same',kernel_initializer='he_normal',
# data_format='channels_first',dilation_rate=2,use_bias=True)(merge4)
#print('conv4 shape:', conv4.shape)
#conv4_2 = Conv2D(128, (4,4), activation='relu',padding='same',kernel_initializer='he_normal',
# data_format='channels_first',dilation_rate=2,use_bias=True)(conv4)
#print('conv6 shape:', conv4_2.shape)
#
#up5 = Conv2D(64, (3,3), activation='relu',padding='same',kernel_initializer='he_normal',
# data_format='channels_first',dilation_rate=2,use_bias=True)(UpSampling2D(size=(2,2),data_format='channels_first')(conv4_2))
#print('up5 shape:', up5.shape)
#merge5 = concatenate([drop2, up5],axis=1)
#print('merge4 shape:',merge5.shape)
#conv5 = Conv2D(128, (4,4), activation='relu',padding='same',kernel_initializer='he_normal',
# data_format='channels_first',dilation_rate=2,use_bias=True)(merge5)
#print('conv5 shape:', conv5.shape)
#conv5_2 = Conv2D(128, (4,4), activation='relu',padding='same',kernel_initializer='he_normal',
# data_format='channels_first',dilation_rate=2,use_bias=True)(conv5)
#print('conv6 shape:', conv5_2.shape)
#
#up6 = Conv2D(32, (3,3), activation='relu',padding='same',kernel_initializer='he_normal',
# data_format='channels_first',dilation_rate=2,use_bias=True)(UpSampling2D(size=(4,4),data_format='channels_first')(conv4_2))
#print('up5 shape:', up6.shape)
#merge6 = concatenate([drop1,up6],axis=1)
#print('merge6 shape:',merge6.shape)
#conv6 = Conv2D(32,3, activation='relu',padding='same',kernel_initializer='he_normal',
# data_format='channels_first',dilation_rate=2,use_bias=True)(merge6)
#print('conv6 shape:',conv6.shape)
#
#model_U_net = Model(inputs = inputs, outputs = conv6)
#model_U_net.compile(optimizer=keras.optimizers.Adam(),
# loss=keras.losses.binary_crossentropy,metrics=['accuracy'])
#
#breaks = 500
#batch_size = 50
#file_path = saving_dir_weight+'weights.2D_u_net.best.hdf5'
#checkPoint = ModelCheckpoint(file_path,monitor='val_loss',save_best_only=True,mode='min',period=1,verbose=1)
#callback_list = [checkPoint]
#
#temp_results = []
#if os.path.exists('D:\\NING - spindle\\Spindle_by_Graphical_Features\\weights.2D_u_net.best.hdf5'):
# model_U_net.load_weights('D:\\NING - spindle\\Spindle_by_Graphical_Features\\weights.2D_u_net.best.hdf5')
#for ii in range(breaks):
# model_U_net.fit(x=X_train,y=X_train,batch_size=batch_size,epochs=50,
# validation_data=(X_validation,X_validation),shuffle=True,callbacks=callback_list)
# X_predict = model_U_net.predict(X_validation)
# validation_measure = [cos_similarity(a,b) for a,b in zip(X_validation, X_predict)]
# print('mean similarity: %.4f +/- %.4f'%(np.mean(validation_measure),np.std(validation_measure)))
# temp_results.append([(ii+1)*50,np.mean(validation_measure),np.std(validation_measure)])
# results_for_saving = pd.DataFrame(np.array(temp_results).reshape(-1,3),columns=['epochs','mean score','score std'])
# if os.path.exists(saving_dir_weight + 'scores_u_net.csv'):
# temp_result_for_saving = pd.read_csv(saving_dir_weight + 'scores_u_net.csv')
# results_for_saving = pd.concat([temp_result_for_saving,results_for_saving])
# results_for_saving.to_csv(saving_dir_weight + 'scores_u_net.csv',index=False)
#########################################################
############## covn autoencoder model ###################
#########################################################
from keras.layers import Input, Conv2D, Conv2DTranspose, MaxPooling2D, UpSampling2D,Dropout,BatchNormalization
from keras.layers import Flatten,Dense
from keras.models import Model
import keras
from keras.callbacks import ModelCheckpoint
inputs = Input(shape=(32,16,192),batch_shape=(None,32,16,192),name='input',dtype='float64',)
conv1 = Conv2D(32,(4,48),strides=(1,1),activation='relu',padding='valid',data_format='channels_first',
kernel_initializer='he_normal')(inputs)
print('conv1 shape:',conv1.shape)
drop1 = Dropout(0.5)(conv1)
norm1 = BatchNormalization()(drop1)
print('norm1 shape:',norm1.shape)
#down1 = MaxPooling2D((2,4),(1,2),padding='valid',data_format='channels_first',)(norm1)
#print('down1 shape:', down1.shape)
conv2 = Conv2D(64,(4,48),strides=(1,1),activation='relu',padding='valid',data_format='channels_first',
kernel_initializer='he_normal')(norm1)
print('conv2 shape:',conv2.shape)
drop2 = Dropout(0.5)(conv2)
norm2 = BatchNormalization()(drop2)
print('norm2 shape:',norm2.shape)
#down2 = MaxPooling2D((2,4),(1,2),padding='valid',data_format='channels_first',)(norm2)
#print('down2 shape:',down2.shape)
conv3 = Conv2D(128,(4,48),strides=(1,1),activation='relu',padding='valid',data_format='channels_first',
kernel_initializer='he_normal')(norm2)
print('conv3 shape:',conv3.shape)
drop3 = Dropout(0.5)(conv3)
norm3 = BatchNormalization()(drop3)
print('norm3 shape:',norm3.shape)
#down3 = MaxPooling2D((2,4),(1,2),padding='valid',data_format='channels_first',)(norm3)
#print('down3 shape:',down3.shape)
conv4 = Conv2D(256,(4,48),strides=(1,1),activation='relu',padding='valid',data_format='channels_first',
kernel_initializer='he_normal')(norm3)
print('conv4 shape:',conv4.shape)
drop4 = Dropout(0.5)(conv4)
norm4 = BatchNormalization()(drop4)
print('norm4 shape:',norm4.shape)
#down4 = MaxPooling2D((2,4),(1,2),padding='valid',data_format='channels_first',)(norm4)
#print('down4 shape:',down4.shape)
#conv5 = Conv2D(256,(2,2),strides=(1,1),activation='relu',padding='same',data_format='channels_first',
# kernel_initializer='he_normal')(norm4)
#print('conv5 shape:',conv5.shape)
#drop5 = Dropout(0.5)(conv5)
#norm5 = BatchNormalization()(drop5)
#print('norm5 shape:',norm5.shape)
#down5 = MaxPooling2D((2,2),(1,1),padding='valid',data_format='channels_first',)(norm5)
#print('down5 shape:',down5.shape)
flat6 = Flatten()(norm4)
drop6 = Dropout(0.5)(flat6)
print('flatten 6 shape:',drop6.shape)
dens7 = Dense(kernel_initializer='he_normal',units=2,activation='softmax')(flat6)
drop7 = Dropout(0.5)(dens7)
print('dense 7 shape:',drop7.shape)
#decov4 = Conv2DTranspose(128,(4,8),strides=(2,4),activation='relu',padding='same',data_format='channels_first',
# kernel_initializer='he_normal')(down3)
#print('decov4 shape:',decov4.shape)
#drop4 = Dropout(0.5)(decov4)
#norm4 = BatchNormalization()(drop4)
#
#decov5 = Conv2DTranspose(64,(4,8),strides=(2,4),activation='relu',padding='same',data_format='channels_first',
# kernel_initializer='he_normal')(norm4)
#print('decov5 shape:',decov5.shape)
#drop5 = Dropout(0.5)(decov5)
#norm5 = BatchNormalization()(drop5)
#
#decov6 = Conv2DTranspose(32,(4,8),strides=(4,4),activation='relu',padding='same',data_format='channels_first',
# kernel_initializer='he_normal')(norm5)
#print('decov6 shape:',decov6.shape)
def AUC_(y_true, y_pred):
return metrics.roc_auc_score(y_true,y_pred)
model_auto = Model(inputs = inputs,outputs=drop7)
model_auto.compile(optimizer=keras.optimizers.SGD(),loss=keras.losses.mse,metrics=['accuracy',
'categorical_accuracy'])
#data = np.random.rand(10,32,16,192).astype(np.float64)
#predict = model_auto.predict(data)
#predict.shape
""" auto encoder"""
#breaks = 500
#batch_size = 50
#file_path = saving_dir_weight+'weights.2D_auto_encoder.best.hdf5'
#checkPoint = ModelCheckpoint(file_path,monitor='val_loss',save_best_only=True,mode='min',period=1,verbose=1)
#callback_list = [checkPoint]
#
#temp_results = []
#if os.path.exists('D:\\NING - spindle\\Spindle_by_Graphical_Features\\weights.2D_auto_encoder.best.hdf5'):
# model_auto.load_weights('D:\\NING - spindle\\Spindle_by_Graphical_Features\\weights.2D_auto_encoder.best.hdf5')
#for ii in range(breaks):
# model_auto.fit(x=X_train,y=X_train,batch_size=batch_size,epochs=50,
# validation_data=(X_validation,X_validation),shuffle=True,callbacks=callback_list)
# X_predict = model_auto.predict(X_validation)
# validation_measure = [cos_similarity(a,b) for a,b in zip(X_validation, X_predict)]
# print('mean similarity: %.4f +/- %.4f'%(np.mean(validation_measure),np.std(validation_measure)))
# temp_results.append([(ii+1)*50,np.mean(validation_measure),np.std(validation_measure)])
# results_for_saving = pd.DataFrame(np.array(temp_results).reshape(-1,3),columns=['epochs','mean score','score std'])
# if os.path.exists(saving_dir_weight + 'scores_autoencoder.csv'):
# temp_result_for_saving = pd.read_csv(saving_dir_weight + 'scores_autoencoder.csv')
# results_for_saving = pd.concat([temp_result_for_saving,results_for_saving])
# results_for_saving.to_csv(saving_dir_weight + 'scores_autoencoder.csv',index=False)
"""classification"""
breaks = 500
batch_size = 100
through = 5
file_path = saving_dir_weight+'weights.2D_classification_small_to_large.best.hdf5'
checkPoint = ModelCheckpoint(file_path,monitor='val_loss',save_best_only=True,mode='min',period=1,verbose=1)
callback_list = [checkPoint]
temp_results = []
if os.path.exists(saving_dir_weight+'weights.2D_classification_small_to_large.best.hdf5'):
model_auto.load_weights(saving_dir_weight+'weights.2D_classification_small_to_large.best.hdf5')
for ii in range(breaks):
labels = []
for jj in range(through):# going through the training data 5 times
step_idx = np.random.choice(np.arange(10),size=10,replace=False)
for kk in step_idx: # going through 10 splitted training data
temp = pickle.load(open('D:\\NING - spindle\\Spindle_by_Graphical_Features\\data\\train\\train%d.p'%(kk),'rb'))
X_train_,y_train_ = temp
random_inputs = np.random.rand(X_train_.shape[0],32,16,192)
random_labels = [0]*X_train_.shape[0]
random_labels = np_utils.to_categorical(random_labels,2)
X_train_ = np.concatenate([X_train_,random_inputs],axis=0)
y_train_ = np.concatenate([y_train_,random_labels],axis=0)
labels.append(y_train_)
model_auto.fit(x=X_train_,y=y_train_,batch_size=batch_size,epochs=2,
validation_data=(X_validation,y_validation),shuffle=True,callbacks=callback_list)
labels = np.concatenate(labels,axis=0)
model_auto.load_weights(saving_dir_weight+'weights.2D_classification_small_to_large.best.hdf5')
X_predict = model_auto.predict(X_validation)[:,-1] > np.mean(labels[:,-1])
X_predict_prob = model_auto.predict(X_validation)[:,-1]
print(metrics.classification_report(y_validation[:,-1],X_predict))
AUC = metrics.roc_auc_score(y_validation[:,-1], X_predict_prob)
fpr,tpr,th = metrics.roc_curve(y_validation[:,-1], X_predict_prob,pos_label=1)
sensitivity = metrics.precision_score(y_validation[:,-1],X_predict,average='weighted')
selectivity = metrics.recall_score(y_validation[:,-1],X_predict,average='weighted')
plt.close('all')
fig,ax = plt.subplots(figsize=(8,8))
ax.plot(fpr,tpr,label='AUC = %.3f'%(AUC))
ax.set(xlabel='false postive rate',ylabel='true positive rate',title='%dth 5 epochs'%(ii+1),
xlim=(0,1),ylim=(0,1))
ax.legend(loc='best')
fig.savefig(saving_dir_weight + 'AUC plot_%d.png'%(ii+1),dpi=400)
plt.close('all')
# validation_measure = [cos_similarity(a,b) for a,b in zip(X_validation, X_predict)]
# print('mean similarity: %.4f +/- %.4f'%(np.mean(validation_measure),np.std(validation_measure)))
temp_results.append([(ii+1)*50,AUC,sensitivity,selectivity])
results_for_saving = pd.DataFrame(np.array(temp_results).reshape(-1,4),columns=['epochs','AUC','sensitivity','selectivity'])
if os.path.exists(saving_dir_weight + 'scores_classification.csv'):
temp_result_for_saving = pd.read_csv(saving_dir_weight + 'scores_classification.csv')
results_for_saving = pd.concat([temp_result_for_saving,results_for_saving])
results_for_saving.to_csv(saving_dir_weight + 'scores_classification.csv',index=False)
X_test, y_test = pickle.load(open('D:\\NING - spindle\\Spindle_by_Graphical_Features\\data\\test\\test.p','rb'))
X_predict_ = model_auto.predict(X_test)[:,-1] > 0.5
X_predict_prob_ = model_auto.predict(X_test)[:,-1]
print(metrics.classification_report(y_test[:,-1],X_predict_))
AUC = metrics.roc_auc_score(y_test[:,-1], X_predict_prob_)
fpr,tpr,th = metrics.roc_curve(y_test[:,-1], X_predict_prob_,pos_label=1)
sensitivity = metrics.precision_score(y_test[:,-1],X_predict_,average='weighted')
selectivity = metrics.recall_score(y_test[:,-1],X_predict_,average='weighted')
plt.close('all')
fig,ax = plt.subplots(figsize=(8,8))
ax.plot(fpr,tpr,label='AUC = %.3f\nSensitivity = %.3f\nSelectivity = %.3f'%(AUC,sensitivity,selectivity))
ax.set(xlabel='false postive rate',ylabel='true positive rate',title='test data\nsmall to large',
xlim=(0,1),ylim=(0,1))
ax.legend(loc='best')
fig.savefig(saving_dir_weight + 'test data AUC plot.png',dpi=400)
plt.close('all')
cf =metrics.confusion_matrix(y_test[:,-1],X_predict_)
cf = cf / cf.sum(1)[:, np.newaxis]
import seaborn as sns
plt.close('all')
fig,ax = plt.subplots(figsize=(8,8))
ax = sns.heatmap(cf,vmin=0.,vmax=1.,cmap=plt.cm.Blues,annot=False,ax=ax)
coors = np.array([[0,0],[1,0],[0,1],[1,1],])+ 0.5
for ii,(m,coor) in enumerate(zip(cf.flatten(),coors)):
ax.annotate('%.2f'%(m),xy = coor,size=25,weight='bold',ha='center')
ax.set(xticks=(0.5,1.5),yticks=(0.25,1.25),
xticklabels=['non spindle','spindle'],
yticklabels=['non spindle','spindle'])
ax.set_title('Confusion matrix\nDCNN small to large',fontweight='bold',fontsize=20)
ax.set_ylabel('True label',fontsize=20,fontweight='bold')
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
fig.savefig(saving_dir_weight+'confusion matrix.png',dpi=400)
|
{"hexsha": "8a93ba4271a526010891d5c5462f0c7c04c8561c", "size": 18176, "ext": "py", "lang": "Python", "max_stars_repo_path": "encoder only 3 (inverse small to large).py", "max_stars_repo_name": "adowaconan/variational_autoencoder_spindles", "max_stars_repo_head_hexsha": "0410fe86372ed50c5d136e7bbb13bbdf4dc4cc7b", "max_stars_repo_licenses": ["AFL-3.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "encoder only 3 (inverse small to large).py", "max_issues_repo_name": "adowaconan/variational_autoencoder_spindles", "max_issues_repo_head_hexsha": "0410fe86372ed50c5d136e7bbb13bbdf4dc4cc7b", "max_issues_repo_licenses": ["AFL-3.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "encoder only 3 (inverse small to large).py", "max_forks_repo_name": "adowaconan/variational_autoencoder_spindles", "max_forks_repo_head_hexsha": "0410fe86372ed50c5d136e7bbb13bbdf4dc4cc7b", "max_forks_repo_licenses": ["AFL-3.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.4693333333, "max_line_length": 137, "alphanum_fraction": 0.7109925176, "include": true, "reason": "import numpy", "num_tokens": 5044}
|
[STATEMENT]
lemma gmctxt_cl_refl:
"funas_gterm t \<subseteq> \<F> \<Longrightarrow> (t, t) \<in> gmctxt_cl \<F> \<R>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. funas_gterm t \<subseteq> \<F> \<Longrightarrow> (t, t) \<in> gmctxt_cl \<F> \<R>
[PROOF STEP]
by (induct t) (auto simp: SUP_le_iff intro!: gmctxt_cl.step)
|
{"llama_tokens": 150, "file": "Regular_Tree_Relations_Util_Ground_Closure", "length": 1}
|
# -*- coding: utf-8 -*-
import numpy as np
from copy import deepcopy
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
from sklearn.tree import DecisionTreeRegressor
from utils.ensemble_model import EnsembleModel
from utils.model_io import save_model
from sklearn.metrics import r2_score
from sklearn.ensemble import RandomForestRegressor
def calc_R_2_score(y_true, y_pred):
return r2_score(y_true, y_pred)
def initialize_kmeans_model(n_cluster = 3, save_path = 'kmeans.joblib'):
kmeans = MiniBatchKMeans(n_clusters = n_cluster)
save_model(kmeans, path_name = save_path)
pass
def incremental_clustering(X_train, kmeans):
# kmeans = MiniBatchKMeans(n_clusters= cnt_cluster)
kmeans.partial_fit(X_train)
return kmeans
pass
def clustered_learning_predict(input_X, kmeans_model, clfs):
# 1. which cluster
c = kmeans_model.predict([input_X])[0]
# 2. get the COP result
res = clfs[c].predict([input_X])[0]
return res
pass
def kmeans_train(X_train, n_cluster = 5):
kmeans = KMeans(n_clusters= n_cluster)
kmeans.fit(X_train)
return kmeans
pass
def clustered_learning_train(X_train, y_train, kmeans, model = 'DT', **parameters):
# return
# the prediction models
# length = len(X_train) # why use length ??
clfs = {}
cnt_cluster = kmeans.n_clusters
# the performance seems good
C_Xs = kmeans.predict(X_train)
X_dict = {}
y_dict = {}
# split data into different cluster
for c in range(cnt_cluster):
X_dict[c] = []
y_dict[c] = []
pass
for i in range(len(X_train)):
c = C_Xs[i]
X_dict[c].append(X_train[i])
y_dict[c].append(y_train[i])
pass
for c in range(cnt_cluster):
#clf = DecisionTreeRegressor() # can use parameter to modify
# clf.fit(X_dict[c], y_dict[c])
clf = train_model( X_dict[c], y_dict[c], model, **parameters)
clfs[c] = clf
pass
return clfs # which is a dict
pass # end of function
def calc_loss_RMSE(reals, predictions):
# calculate loss
predictions = np.array(predictions).flatten()
reals = np.array(reals ).flatten()
assert predictions.shape == reals.shape
return np.sqrt(((predictions - reals) ** 2).mean())
pass
def calc_loss_MAPE(reals, predictions):
predictions = np.array(predictions).flatten()
reals = np.array(reals ).flatten()
assert predictions.shape == reals.shape
diff = np.abs(np.array(reals) - np.array(predictions))
ratio_list = []
length = len(diff)
for i in range(length):
each_diff = diff[i]
each_real = reals[i]
if each_real == 0: # notice : devided by zero
continue
each_ratio = each_diff / each_real
ratio_list.append(each_ratio)
pass
return np.mean(ratio_list)
pass
# for centralized mode
def train_model( X_train, y_train, model, type = 'regression' , **parameters): # currently we only support regression model ...
# print(parameters)
assert type(model) == type('neural network')
# print(model)
model = model.lower()
assert model in [
'dt', 'nn', 'mlp',
'decision tree', 'neural network', 'multi-layer perception',
'svr',
'adaboost', 'ada',
'rf', 'random forest',
]
if model in ['dt', 'decision tree']:
return train_model_DT(X_train, y_train, **parameters)
elif model in ['nn', 'neural network']:
return train_model_NN(X_train, y_train, **parameters) # current is implemented in pytorch
elif model == 'svr':
return train_model_SVR(X_train, y_train, **parameters)
pass
elif model in ['ada', 'adaboost']:
return train_model_Adaboost(X_train, y_train, **parameters)
pass
elif model in ['rf', 'random forest']:
return train_model_RF(X_train, y_train, **parameters)
pass
pass
# detailed machine learning model training func
def train_model_RF(X_train, y_train, **parameters):
if 'n_estimators' in parameters.keys():
n_estimators = parameters['n_estimators']
else:
n_estimators = 10
pass
max_depth = None
if 'max_depth' in parameters.keys():
max_depth = parameters['max_depth']
pass
clf = RandomForestRegressor(n_estimators= n_estimators, max_depth= max_depth)
clf.fit(X_train, y_train)
return clf
pass
def train_model_Adaboost(X_train, y_train, **parameters):
from sklearn.ensemble import AdaBoostRegressor
if 'n_estimators' in parameters.keys():
n_estimators = parameters['n_estimators']
else:
n_estimators = 10
pass
clf = AdaBoostRegressor(n_estimators= n_estimators)
clf.fit(X_train, y_train)
return clf
pass
def train_model_DT(X_train, y_train, **parameters):
max_depth = 10
if 'max_depth' in parameters.keys():
max_depth = parameters['max_depth']
pass
sample_w = None
if 'sample_weight' in parameters.keys():
sample_w = parameters['sample_weight']
pass
clf = DecisionTreeRegressor(max_depth= max_depth)
# for now we use the default parameter
# it acts good
clf.fit(X_train, y_train, sample_weight= sample_w)
return clf
# pass
def train_model_NN(X_train, y_train, **paramater_dict):
y_train = deepcopy(y_train)
X_train = deepcopy(X_train)
# print(paramater_dict)
# default parameter
# print('NN')
import torch
use_cuda = torch.cuda.is_available()
lr = 0.001
epoch = 2000
if 'lr' in paramater_dict.keys():
lr = paramater_dict['lr']
pass
if 'epoch' in paramater_dict.keys():
epoch = paramater_dict['epoch']
pass
# print(lr)
# now use pytorch model, has [predict] function
# 1. get feature number
feature_number = np.array(X_train).shape[-1]
# print('feature number', feature_number)
# 2. initial model
from models.NN import RegressionNN
from torch import nn
model = RegressionNN(feature_number)
# 3. convert to tensor
X_train = np.array(X_train)
y_train = np.array(y_train)
y_train = y_train.reshape( (len(y_train),1) )
X_train = torch.from_numpy(X_train).type(torch.FloatTensor)
y_train = torch.from_numpy(y_train).type(torch.FloatTensor)
if use_cuda:
X_train, y_train = X_train.cuda(), y_train.cuda()
model = model.cuda()
# 4. define loss
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr )
# 5. train
for i in range(epoch):
y_pred = model.forward(X_train) # 算输出
loss = criterion(y_pred,y_train) # 算loss
optimizer.zero_grad() # 清除梯度记录
loss.backward() # 反向传播
optimizer.step() # 更新参数
pass
# print('Done')
# 6. return
if use_cuda:
model = model.cpu()
# 之后test 没必要GPU
return model
pass
def train_model_SVR(X_train, y_train):
from sklearn.svm import SVR
model = SVR(gamma = 'scale')
model.fit(X_train, y_train)
return model
pass
def NN_incremental_train( X_train, y_train,nn_model , **paramater_dict):
import torch
y_train = deepcopy(y_train)
X_train = deepcopy(X_train)
use_cuda = torch.cuda.is_available()
lr = 0.001
epoch = 2000
if 'lr' in paramater_dict.keys():
lr = paramater_dict['lr']
pass
if 'epoch' in paramater_dict.keys():
epoch = paramater_dict['epoch']
pass
# print(lr)
# now use pytorch model, has [predict] function
# 1. get feature number
# feature_number = np.array(X_train).shape[-1]
# print('feature number', feature_number)
# 2. get model
# from models.NN import RegressionNN
from torch import nn
# model = RegressionNN(feature_number)
model = nn_model
# 3. convert to tensor
X_train = np.array(X_train)
y_train = np.array(y_train)
# print( y_train.shape )
# print( y_train )
y_train = y_train.reshape( (len(y_train), 1) )
X_train = torch.from_numpy(X_train).type(torch.FloatTensor)
y_train = torch.from_numpy(y_train).type(torch.FloatTensor)
if use_cuda:
X_train, y_train = X_train.cuda(), y_train.cuda()
model = model.cuda()
# 4. define loss
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr )
# 5. train
for i in range(epoch):
y_pred = model.forward(X_train) # 算输出
loss = criterion(y_pred,y_train) # 算loss
optimizer.zero_grad() # 清除梯度记录
loss.backward() # 反向传播
optimizer.step() # 更新参数
pass
# print('Done')
# 6. return
if use_cuda:
model = model.cpu()
# 之后test 没必要GPU
return model
pass
|
{"hexsha": "6a2768c59ff47cd60d67c8962f03461802ba7df1", "size": 8938, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/learning.py", "max_stars_repo_name": "BuildFL/BuildFL", "max_stars_repo_head_hexsha": "2b9fb786c9655b52d54b53e3efaf25e033a5b532", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-05-14T15:02:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-10T03:35:44.000Z", "max_issues_repo_path": "utils/learning.py", "max_issues_repo_name": "onsB/BuildFL", "max_issues_repo_head_hexsha": "2b9fb786c9655b52d54b53e3efaf25e033a5b532", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/learning.py", "max_forks_repo_name": "onsB/BuildFL", "max_forks_repo_head_hexsha": "2b9fb786c9655b52d54b53e3efaf25e033a5b532", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-10-17T02:05:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-19T02:16:13.000Z", "avg_line_length": 31.4718309859, "max_line_length": 128, "alphanum_fraction": 0.63526516, "include": true, "reason": "import numpy", "num_tokens": 2315}
|
#! /usr/bin/env python
"""
Script for creating a histogram of the time difference between two triggers
Usage python plot_trigger_time_differences.py PULSEFILE
"""
import numpy as n
import pylab as p
import sys
f = open(sys.argv[1])
triggers = []
last_line = None
this_line = None
ini = True
iniini = True
for line in f.readlines():
if iniini:
iniini = False
continue
try:
if ini:
last_line = line.split()
last_trigger = last_line[0].split('(')[1]
last_trigger = last_trigger.split(',')[0]
ini = False
continue
this_line = line.split()
this_trigger = this_line[0].split('(')[1]
this_trigger = this_trigger.split(',')[0]
difference = (float(this_trigger) - float(last_trigger))/1000000
print difference
triggers.append(difference)
last_trigger = this_trigger
except:
pass
xmax = max(triggers)
pulsebins = n.linspace(0,xmax,xmax+1)
#pulsebins = n.linspace(0,xmax,1000)
hist0 = n.histogram(n.array(triggers),bins=pulsebins)
#print xmax
#print pulsebins
baredges0 = n.linspace(0,hist0[1][-1],len(hist0[0]))
p.ylim(ymax=1.2*max(hist0[0]))
#pulsebins = hist0[1]
p.bar(baredges0,hist0[0],width=pulsebins[1]-pulsebins[0],color='b',label='triggers')
p.grid()
p.legend()
p.xlabel('Triggerdistance')
p.ylabel('Events')
p.show()
|
{"hexsha": "6ff42c14acd781ebac8e7358879b633421448b97", "size": 1396, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis_scripts/plot_trigger_time_differences.py", "max_stars_repo_name": "LambdaDigamma/muonic", "max_stars_repo_head_hexsha": "cc242582168101f1ab444ffdc915f8a007078bc4", "max_stars_repo_licenses": ["Naumen", "Condor-1.1", "MS-PL"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2015-05-12T14:11:08.000Z", "max_stars_repo_stars_event_max_datetime": "2015-08-09T17:57:59.000Z", "max_issues_repo_path": "analysis_scripts/plot_trigger_time_differences.py", "max_issues_repo_name": "LambdaDigamma/muonic", "max_issues_repo_head_hexsha": "cc242582168101f1ab444ffdc915f8a007078bc4", "max_issues_repo_licenses": ["Naumen", "Condor-1.1", "MS-PL"], "max_issues_count": 60, "max_issues_repo_issues_event_min_datetime": "2015-05-12T14:07:25.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-08T10:17:39.000Z", "max_forks_repo_path": "analysis_scripts/plot_trigger_time_differences.py", "max_forks_repo_name": "LambdaDigamma/muonic", "max_forks_repo_head_hexsha": "cc242582168101f1ab444ffdc915f8a007078bc4", "max_forks_repo_licenses": ["Naumen", "Condor-1.1", "MS-PL"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2015-06-10T08:34:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-22T21:41:08.000Z", "avg_line_length": 20.231884058, "max_line_length": 84, "alphanum_fraction": 0.6425501433, "include": true, "reason": "import numpy", "num_tokens": 371}
|
import time
import pickle
from numpy import diff, sort, median, array, zeros, linspace
import numpy as np
import matplotlib
matplotlib.use('Agg')
from pystorm.hal import HAL
from pystorm.hal.neuromorph import graph # to describe HAL/neuromorph network
from pystorm.PyDriver import bddriver as bd
HAL = HAL()
CORE_ID = 0
print("Setting Core_id to 0")
# Set all the DACs to some default values
# Use this function to determine what the default DAC values should be
def SetDefaultDACValues():
print("[INFO] Setting Default DAC Values")
HAL.driver.SetDACCount(CORE_ID , bd.bdpars.BDHornEP.DAC_ADC_BIAS_1 , 512)
HAL.driver.SetDACCount(CORE_ID , bd.bdpars.BDHornEP.DAC_ADC_BIAS_2 , 512)
HAL.driver.SetDACCount(CORE_ID , bd.bdpars.BDHornEP.DAC_SYN_EXC , 512)
HAL.driver.SetDACCount(CORE_ID , bd.bdpars.BDHornEP.DAC_SYN_DC , 544)
HAL.driver.SetDACCount(CORE_ID , bd.bdpars.BDHornEP.DAC_SYN_INH , 512)
HAL.driver.SetDACCount(CORE_ID , bd.bdpars.BDHornEP.DAC_SYN_LK , 10)
HAL.driver.SetDACCount(CORE_ID , bd.bdpars.BDHornEP.DAC_SYN_PD , 100)
HAL.driver.SetDACCount(CORE_ID , bd.bdpars.BDHornEP.DAC_SYN_PU , 1024)
HAL.driver.SetDACCount(CORE_ID , bd.bdpars.BDHornEP.DAC_DIFF_G , 1024)
HAL.driver.SetDACCount(CORE_ID , bd.bdpars.BDHornEP.DAC_DIFF_R , 1)
HAL.driver.SetDACCount(CORE_ID , bd.bdpars.BDHornEP.DAC_SOMA_OFFSET , 1024)
HAL.driver.SetDACCount(CORE_ID , bd.bdpars.BDHornEP.DAC_SOMA_REF , 3)
#Disabling all DAC to ADC connections
def DisconnectAllDACtoADCConnections():
print("[INFO] Disconnecting all DAC to ADC connections")
HAL.driver.SetDACtoADCConnectionState(CORE_ID, bd.bdpars.BDHornEP.DAC_ADC_BIAS_1 , False)
HAL.driver.SetDACtoADCConnectionState(CORE_ID, bd.bdpars.BDHornEP.DAC_ADC_BIAS_2 , False)
HAL.driver.SetDACtoADCConnectionState(CORE_ID, bd.bdpars.BDHornEP.DAC_SYN_EXC , False)
HAL.driver.SetDACtoADCConnectionState(CORE_ID, bd.bdpars.BDHornEP.DAC_SYN_DC , False)
HAL.driver.SetDACtoADCConnectionState(CORE_ID, bd.bdpars.BDHornEP.DAC_SYN_INH , False)
HAL.driver.SetDACtoADCConnectionState(CORE_ID, bd.bdpars.BDHornEP.DAC_SYN_LK , False)
HAL.driver.SetDACtoADCConnectionState(CORE_ID, bd.bdpars.BDHornEP.DAC_SYN_PD , False)
HAL.driver.SetDACtoADCConnectionState(CORE_ID, bd.bdpars.BDHornEP.DAC_SYN_PU , False)
HAL.driver.SetDACtoADCConnectionState(CORE_ID, bd.bdpars.BDHornEP.DAC_DIFF_G , False)
HAL.driver.SetDACtoADCConnectionState(CORE_ID, bd.bdpars.BDHornEP.DAC_DIFF_R , False)
HAL.driver.SetDACtoADCConnectionState(CORE_ID, bd.bdpars.BDHornEP.DAC_SOMA_OFFSET , False)
HAL.driver.SetDACtoADCConnectionState(CORE_ID, bd.bdpars.BDHornEP.DAC_SOMA_REF , False)
# Toggle between two current values to see frequency shifting
def toggle_DAC_values(toggleCount, toggleSleepTime, DAC_ID, DACval1, DACval2):
for i in range(toggleCount):
# print("Setting DAC Value to %d" % 10**i)
print("[INFO] Switching to low DAC setting: %d" % DACval1)
HAL.driver.SetDACCount(CORE_ID, DAC_ID, DACval1)
time.sleep(toggleSleepTime)
print("[INFO] Switching to high DAC setting %d" % DACval2)
HAL.driver.SetDACCount(CORE_ID, DAC_ID, DACval2)
time.sleep(toggleSleepTime)
# Toggle between two large and small current ADC for a given DAC setting
def toggle_small_large_adc(adc_id, toggleCount, toggleSleepTime):
HAL.driver.SetADCScale(CORE_ID, adc_id, "large")
for i in range(toggleCount):
HAL.driver.SetADCScale(CORE_ID, adc_id, "small")
print("[INFO] Using ADC %d, Small Current" % adc_id)
time.sleep(toggleSleepTime)
HAL.driver.SetADCScale(CORE_ID, adc_id, "large")
print("[INFO] Using ADC %d, Large Current" % adc_id)
time.sleep(toggleSleepTime)
# Sweep the given DAC for the full range between 1 and 1024
def sweep_DAC_range(DAC_ID):
for i in range(1024):
print("{INFO} Setting DAC to %d" % (i+1))
HAL.driver.SetDACCount(CORE_ID, DAC_ID, i+1)
time.sleep(0.1)
# Reset the board to a known reset/refresh state
SetDefaultDACValues()
DisconnectAllDACtoADCConnections()
# Disable all Synapse to ADC connections
print("[INFO] Disconnecting all Synapse to ADC connections")
for addr in range(1024):
HAL.driver.DisableSynapseADC(CORE_ID, addr)
# Disable all Somas and set them to minimum gain
print("[INFO] Disabling all Somas")
for addr in range(4096):
HAL.driver.DisableSoma(CORE_ID, addr)
HAL.driver.SetSomaGain(CORE_ID, addr, bd.bdpars.SomaGainId.ONE)
HAL.driver.SetSomaOffsetSign(CORE_ID, addr, bd.bdpars.SomaOffsetSignId.POSITIVE)
HAL.driver.SetSomaOffsetMultiplier(CORE_ID, addr, bd.bdpars.SomaOffsetMultiplierId.ONE)
# Turn on ADCs, assuming they are off
HAL.driver.SetADCTrafficState(CORE_ID, True)
time.sleep(3)
def SetADCsScale(scale):
if(scale=="large" or scale=="small"):
print("[INFO] Setting ADC Scale to %s for ADC: 0" % scale)
HAL.driver.SetADCScale(CORE_ID, 0, scale)
print("[INFO] Setting ADC Scale to %s for ADC: 1" % scale)
HAL.driver.SetADCScale(CORE_ID, 1, scale)
else:
print("Error: Invalid Scale value; Doing Nothing")
SetADCsScale("large")
#adc_id = 0
#print("[INFO] Setting ADC Scale to large for ADC: %d" % adc_id)
#HAL.driver.SetADCScale(CORE_ID, adc_id, "large")
#adc_id = 1
#print("[INFO] Setting ADC Scale to small for ADC: %d" % adc_id)
#HAL.driver.SetADCScale(CORE_ID, adc_id, "small")
# ADC Biases don't connect to the ADCs, so we shouldn't see changing them affect
# ADC output at all
# bd.bdpars.BDHornEP.DAC_ADC_BIAS_1 1pA to 1nA
# bd.bdpars.BDHornEP.DAC_ADC_BIAS_2 1pA to 1nA
# Approximate current values for each of the 12 DACs to the Neuron Array
# Current values to the ADC should be approximately 1pA and 1nA, subject to mismatch
# bd.bdpars.BDHornEP.DAC_SYN_EXC 250fA to 250pA
# bd.bdpars.BDHornEP.DAC_SYN_DC 125fA to 125pA
# bd.bdpars.BDHornEP.DAC_SYN_INH 8fA to 8pA
# bd.bdpars.BDHornEP.DAC_SYN_LK 6.25fA to 6.25pA
# bd.bdpars.BDHornEP.DAC_SYN_PD 1pA to 1nA
# bd.bdpars.BDHornEP.DAC_SYN_PU 1pA to 1nA
# bd.bdpars.BDHornEP.DAC_DIFF_G 1pA to 1nA
# bd.bdpars.BDHornEP.DAC_DIFF_R 1pA to 1nA
# bd.bdpars.BDHornEP.DAC_SOMA_OFFSET 250fA to 250pA
# bd.bdpars.BDHornEP.DAC_SOMA_REF 1pA to 1nA
DAC_ID = bd.bdpars.BDHornEP.DAC_SYN_EXC
tsleep = 4
# Enable the one DAC to ADC Connection that we want
def ConnectDACtoADC(DAC_ID):
DisconnectAllDACtoADCConnections()
print("Connecting to %s" % DAC_ID)
HAL.driver.SetDACtoADCConnectionState(CORE_ID, DAC_ID, True)
ConnectDACtoADC(DAC_ID)
# Sweep the DAC through it's full range of 1024 values
#sweep_DAC_range(DAC_ID)
# Toggle between two DAC values
#toggle_DAC_values(5, tsleep, DAC_ID, 10, 100)
# Toggle between two large and small current ADC for a given DAC setting
#adc_id = 0
print("[INFO] Setting DAC Count to 10")
HAL.driver.SetDACCount(CORE_ID, DAC_ID, 10)
#toggle_small_large_adc(adc_id, 7, tsleep):
#HAL.driver.SetADCTrafficState(CORE_ID, False)
|
{"hexsha": "fe3fb49b6906d466a73492f3887717f6cd8a5668", "size": 7166, "ext": "py", "lang": "Python", "max_stars_repo_path": "pystorm/examples/test_adc.py", "max_stars_repo_name": "Stanford-BIS/pystorm", "max_stars_repo_head_hexsha": "4acaaee78a04b69ad17554126018016800e5a140", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-12-19T06:46:15.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-08T18:53:20.000Z", "max_issues_repo_path": "pystorm/examples/test_adc.py", "max_issues_repo_name": "Stanford-BIS/pystorm", "max_issues_repo_head_hexsha": "4acaaee78a04b69ad17554126018016800e5a140", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-12-13T00:30:08.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-14T18:55:44.000Z", "max_forks_repo_path": "pystorm/examples/test_adc.py", "max_forks_repo_name": "Stanford-BIS/pystorm", "max_forks_repo_head_hexsha": "4acaaee78a04b69ad17554126018016800e5a140", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.0691823899, "max_line_length": 94, "alphanum_fraction": 0.7326262908, "include": true, "reason": "import numpy,from numpy", "num_tokens": 2255}
|
# Install python 3, duh!
# Run the command below in a cmd window to install the needed packages, without the #, duh!
# pip install bs4 requests pandas openpyxl lxml html5lib
# Run the python file with the included batch file, DUH!
try:
# Error handling if something happens during script initialisation
from csv import QUOTE_ALL # Needed to export data to CSV
from bs4 import BeautifulSoup # Needed to parse the dynamic webpage of the Ducanator
from requests import get # Needed to get the webpage of the Ducanator
from re import search # Needed to find the json string to import into pandas
from pandas import read_csv, set_option, concat, DataFrame, read_json, read_html, ExcelWriter # Needed to convert the json string into a usable dataframe object for manipulation
from traceback import format_exc # Needed for more friendly error messages.
from openpyxl import load_workbook
from numpy import arange
from os import path
except ModuleNotFoundError:
print('OOPSIE WOOPSIE!! Uwu We made a fucky wucky!! A wittle fucko boingo! The code monkeys at our headquarters are working VEWY HAWD to fix this!')
print('You didn\'t install the packages like I told you to. Please run \"pip install bs4 requests pandas\" in a cmd window to install the required packages!')
print('\033[1;31m' + format_exc())
exit(1)
try:
#User Variables
workbook_name = 'Prime_Relic_Data.xlsx'
csv_name = 'Prime-Relic Data.csv'
sheet_name_day = 'Day'
sheet_name_hour = 'Hour'
sheet_name_relic = 'Relic_Data'
retry_attempts = 10
# Sets the URL to scrape, because hard-coding is bad
print('Downloading Ducat Data')
url_ducats = "https://warframe.market/tools/ducats"
# Scrapes the given URL
soup = str(BeautifulSoup(get(url_ducats).content, "html.parser")).replace('\n', '')
print('Ducat Data Downloaded')
print('Processing Ducat Data')
# Finds the needed json string for item data, previous hour data, and previous day data.
# Slices off the first bit to make a valid json string for pandas later
items = search('"items": (\[(?:\[??[^\[]*?\]))', soup).group(0)[9:]
previous_hour = search('"previous_hour": (\[(?:\[??[^\[]*?\]))', soup).group(0)[17:]
previous_day = search('"previous_day": (\[(?:\[??[^\[]*?\]))', soup).group(0)[16:]
# Reads and sanitises the item data into a pandas dataframe
df_items = read_json(items)
df_items = df_items.drop(columns=['url_name', 'thumb'])
df_items = df_items.reindex(columns=['id', 'item_name'])
# Reads and sanitises the previous day data into a pandas dataframe
df_previous_day = read_json(previous_day)
df_previous_day = df_previous_day.drop(columns=['id', 'plat_worth', 'median'])
df_previous_day = df_previous_day.rename(columns={'item': 'id'})
# Merges the item data and previous day data on the id column, drops the redundant id column, then renames the column names for export
df_previous_day_merged = df_items.merge(df_previous_day, how='inner', on='id')
df_previous_day_merged = df_previous_day_merged.drop(columns=['id'])
df_previous_day_merged = df_previous_day_merged.reindex(columns=['item_name', 'datetime', 'ducats_per_platinum', 'ducats', 'wa_price','ducats_per_platinum_wa', 'position_change_month', 'position_change_week', 'position_change_day', 'volume'])
df_previous_day_merged = df_previous_day_merged.sort_values(by='item_name')
df_previous_day_merged['datetime'] = df_previous_day_merged['datetime'].astype(str).str[:-6]
# Reads and sanitises the previous hour data into a pandas dataframe
df_previous_hour = read_json(previous_hour)
df_previous_hour = df_previous_hour.drop(columns=['id', 'plat_worth', 'median'])
df_previous_hour = df_previous_hour.rename(columns={'item': 'id'})
# Merges the item data and previous hour data on the id column, drops the redundant id column, then renames the column names for export
df_previous_hour_merged = df_items.merge(df_previous_hour, how='inner', on='id')
df_previous_hour_merged = df_previous_hour_merged.drop(columns=['id'])
df_previous_hour_merged = df_previous_hour_merged.reindex(columns=['item_name', 'datetime', 'ducats_per_platinum', 'ducats', 'wa_price','ducats_per_platinum_wa', 'position_change_month', 'position_change_week', 'position_change_day', 'volume'])
df_previous_hour_merged = df_previous_hour_merged.sort_values(by='item_name')
df_previous_hour_merged['datetime'] = df_previous_hour_merged['datetime'].astype(str).str[:-6]
df_previous_hour_merged = df_previous_hour_merged.reset_index(drop=True)
print('Ducat Data Processed')
# Fuck Comments
print('Downloading Relic Data')
url_relics = "https://n8k6e2y6.ssl.hwcdn.net/repos/hnfvc0o3jnfvc873njb03enrf56.html"
relic_data_txt_name = 'RelicData.txt'
if path.isfile(relic_data_txt_name):
with open(relic_data_txt_name) as f:
soup = str(f.readlines())
print("Loaded Local Relic Data")
else:
print("Loading Remote Item Data")
for x in range(0, retry_attempts):
try:
soup = str(BeautifulSoup(get(url_relics).content, "html.parser")).replace('\n', '')
print('Saving Local Data')
with open(relic_data_txt_name, 'w') as f:
f.write(soup)
break
except Exception:
print('Relic data download failed, retrying... ' + str(retry_attempts - x - 1) + ' attempts left...', end='\r')
print('Relic Data Downloaded')
print('Processing Relic Data')
parsed_relics = search('<h3 id="relicRewards">Relics:</h3><table>.*?</table>', soup).group(0)[34:].replace('th>', 'td>').replace(r'<th colspan="2">', r'<td>').replace('X Kuva', 'x Kuva')
df_parsed_relics = read_html(parsed_relics, header=None)
df_parsed_relics = df_parsed_relics[0].replace(to_replace=r'.+\((.+)\%\)', value=r'\1', regex=True)
df_parsed_relics[1] = df_parsed_relics[1].astype(float)
df_parsed_relics = df_parsed_relics.dropna(how='all').fillna(999)
groups = df_parsed_relics.groupby(arange(len(df_parsed_relics.index)) // 7, sort=False).apply(lambda x: x.sort_values(by=1, ascending=False))
groups[1] = ' (' + groups[1].astype(str) + '%)'
groups = groups[0] + groups[1]
groups = groups.replace(to_replace=r'\(999.0\%\)', value=r'', regex=True)
templist = []
templist2 = []
for count, value in enumerate(groups):
if count % 7 == 0 and count != 0:
templist2.append(templist)
templist = []
templist.append(value)
df_even_more_parsed_relics = DataFrame(templist2, columns=['Relic_Name', 'C1', 'C2', 'C3', 'U1', 'U2', 'Rare'])
df_relic_class = df_even_more_parsed_relics['Relic_Name'].str.split().str[0]
df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'Class', df_relic_class, allow_duplicates=True)
df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'Type', df_even_more_parsed_relics['Relic_Name'].str.upper().str.split().str[1], allow_duplicates=True)
df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'Refinement', df_even_more_parsed_relics['Relic_Name'].str.split().str[3].replace(to_replace=r'[\(\)]', value=r'', regex=True), allow_duplicates=True)
dict = {'Exceptional':'','Flawless':'','Radiant':''}
df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'C1_Raw', df_even_more_parsed_relics['C1'].replace(to_replace=r' \(.+\)',value='',regex=True))
df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'C2_Raw', df_even_more_parsed_relics['C2'].replace(to_replace=r' \(.+\)',value='',regex=True))
df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'C3_Raw', df_even_more_parsed_relics['C3'].replace(to_replace=r' \(.+\)',value='',regex=True))
df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'U1_Raw', df_even_more_parsed_relics['U1'].replace(to_replace=r' \(.+\)',value='',regex=True))
df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'U2_Raw', df_even_more_parsed_relics['U2'].replace(to_replace=r' \(.+\)',value='',regex=True))
df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'Rare_Raw', df_even_more_parsed_relics['Rare'].replace(to_replace=r' \(.+\)',value='',regex=True))
df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'C1_Odds', df_even_more_parsed_relics['C1'].replace(to_replace=r'.+\((.+)\%\)',value=r'\1',regex=True).astype(float))
df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'C2_Odds', df_even_more_parsed_relics['C2'].replace(to_replace=r'.+\((.+)\%\)',value=r'\1',regex=True).astype(float))
df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'C3_Odds', df_even_more_parsed_relics['C3'].replace(to_replace=r'.+\((.+)\%\)',value=r'\1',regex=True).astype(float))
df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'U1_Odds', df_even_more_parsed_relics['U1'].replace(to_replace=r'.+\((.+)\%\)',value=r'\1',regex=True).astype(float))
df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'U2_Odds', df_even_more_parsed_relics['U2'].replace(to_replace=r'.+\((.+)\%\)',value=r'\1',regex=True).astype(float))
df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'Rare_Odds', df_even_more_parsed_relics['Rare'].replace(to_replace=r'.+\((.+)\%\)',value=r'\1',regex=True).astype(float))
df_even_more_parsed_relics = df_even_more_parsed_relics.replace(to_replace=r'Systems Blueprint',value=r'Systems', regex=True)
df_even_more_parsed_relics = df_even_more_parsed_relics.replace(to_replace=r'Neuroptics Blueprint',value=r'Neuroptics', regex=True)
df_even_more_parsed_relics = df_even_more_parsed_relics.replace(to_replace=r'Chassis Blueprint',value=r'Chassis', regex=True)
#print(df_even_more_parsed_relics.head(5))
#df_even_more_parsed_relics['Relic_Name'] = df_even_more_parsed_relics['Relic_Name'].str.split(n=1).str[1]
#df_axi = df_even_more_parsed_relics[df_even_more_parsed_relics['Relic_Class']=='Axi'].reset_index(drop=True)
#df_lith = df_even_more_parsed_relics[df_even_more_parsed_relics['Relic_Class']=='Lith'].reset_index(drop=True)
#df_meso = df_even_more_parsed_relics[df_even_more_parsed_relics['Relic_Class']=='Meso'].reset_index(drop=True)
#df_neo = df_even_more_parsed_relics[df_even_more_parsed_relics['Relic_Class']=='Neo'].reset_index(drop=True)
#df_requiem = df_even_more_parsed_relics[df_even_more_parsed_relics['Relic_Class']=='Requiem'].reset_index(drop=True)
#df_final_export_relic = concat([df_axi,df_lith,df_meso,df_neo,df_requiem], axis=1, ignore_index=True)
#print(df_even_more_parsed_relics)
print('Relic Data Processed')
# Export data
print('Exporting Worksheet')
df_even_more_parsed_relics.to_csv(csv_name, index=None, quoting=QUOTE_ALL)
df_previous_day_merged.to_csv('DayPrices.csv', index=None, quoting=QUOTE_ALL)
with ExcelWriter(workbook_name, mode='a', engine='openpyxl', if_sheet_exists='replace') as writer:
df_previous_day_merged.to_excel(writer, sheet_name=sheet_name_day)
df_previous_hour_merged.to_excel(writer, sheet_name=sheet_name_hour)
df_even_more_parsed_relics.to_excel(writer, sheet_name=sheet_name_relic)
#df_final_export_relic.to_excel(writer, sheet_name=sheet_name_relic)
book = load_workbook(workbook_name)
sheet = book[sheet_name_day]
sheet.delete_cols(1,1)
sheet = book[sheet_name_hour]
sheet.delete_cols(1,1)
sheet = book[sheet_name_relic]
sheet.delete_cols(1,1)
book.save(workbook_name)
print('If you see this message, things should have worked correctly. Remove the \"pause\" from the batch script to automatically close this window after use.')
except Exception:
# Error handling if something happens during the main script
print('OOPSIE WOOPSIE!! Uwu We made a fucky wucky!! A wittle fucko boingo! The code monkeys at our headquarters are working VEWY HAWD to fix this!')
print('\033[1;31m' + format_exc())
exit(1)
|
{"hexsha": "61827a2b6f82e9372ae78b5733250a95b4b1c740", "size": 12314, "ext": "py", "lang": "Python", "max_stars_repo_path": "Scrape the Ducanator.py", "max_stars_repo_name": "BaconCatBug/Scrape-the-Ducanator", "max_stars_repo_head_hexsha": "14c0a3e1ac9a78c57a4bce331f8dfbab79ec90cd", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Scrape the Ducanator.py", "max_issues_repo_name": "BaconCatBug/Scrape-the-Ducanator", "max_issues_repo_head_hexsha": "14c0a3e1ac9a78c57a4bce331f8dfbab79ec90cd", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Scrape the Ducanator.py", "max_forks_repo_name": "BaconCatBug/Scrape-the-Ducanator", "max_forks_repo_head_hexsha": "14c0a3e1ac9a78c57a4bce331f8dfbab79ec90cd", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 72.4352941176, "max_line_length": 248, "alphanum_fraction": 0.729738509, "include": true, "reason": "from numpy", "num_tokens": 3278}
|
import numpy
from scipy.ndimage import shift
from skimage.exposure import rescale_intensity
from aydin.features.groups.translations import TranslationFeatures
from aydin.io.datasets import camera
def n(image):
return rescale_intensity(
image.astype(numpy.float32), in_range='image', out_range=(0, 1)
)
def test_translation_feature_group():
# get image:
image = n(camera().astype(numpy.float32))
# Instantiates translation features:
vectors = [(0, -1), (0, 1), (-1, 0), (1, 0), (-1, -1), (-1, 1), (1, -1), (1, 1)]
translations = TranslationFeatures(translations=vectors)
assert translations.num_features(image.ndim) == 8
# Check receptive field radius:
assert translations.receptive_field_radius == 1
# Set image:
translations.prepare(image)
# compute features and check their valididty:
feature = numpy.empty_like(image)
for index in range(translations.num_features(image.ndim)):
translations.compute_feature(index=index, feature=feature)
vector = vectors[index]
translated = shift(
image,
shift=list(vector),
output=feature,
order=0,
mode='constant',
cval=0.0,
prefilter=False,
)
assert (feature == translated).all()
|
{"hexsha": "948c3b4484f019f4e52b2c150e7ab813aa5a5180", "size": 1319, "ext": "py", "lang": "Python", "max_stars_repo_path": "aydin/features/groups/test/test_translation_feature_group.py", "max_stars_repo_name": "royerloic/aydin", "max_stars_repo_head_hexsha": "f9c61a24030891d008c318b250da5faec69fcd7d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 78, "max_stars_repo_stars_event_min_datetime": "2021-11-08T16:11:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T17:51:04.000Z", "max_issues_repo_path": "aydin/features/groups/test/test_translation_feature_group.py", "max_issues_repo_name": "royerloic/aydin", "max_issues_repo_head_hexsha": "f9c61a24030891d008c318b250da5faec69fcd7d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2021-11-08T17:15:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T17:46:55.000Z", "max_forks_repo_path": "aydin/features/groups/test/test_translation_feature_group.py", "max_forks_repo_name": "royerloic/aydin", "max_forks_repo_head_hexsha": "f9c61a24030891d008c318b250da5faec69fcd7d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2021-11-09T17:42:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-09T00:37:57.000Z", "avg_line_length": 29.3111111111, "max_line_length": 84, "alphanum_fraction": 0.6489764973, "include": true, "reason": "import numpy,from scipy", "num_tokens": 300}
|
#include <cstdlib>
#include <ctime>
#include <chrono>
#include <iostream>
#include <unordered_set>
#include <boost/program_options.hpp>
#include "../yche_refactor/bprw_yche.h"
#include "../yche_refactor/simrank.h"
using namespace std;
using namespace std::chrono;
using namespace boost::program_options;
void test_bp(string data_name, double c, double epsilon, double delta, int x, int y) {
string path = get_edge_list_path(data_name);
GraphYche g(path);
// 1st: init
BackPush bprw(data_name, g, c, epsilon, delta);
size_t n = static_cast<size_t>(g.n);
NodePair q{x, y};
TruthSim ts(data_name, g, c, epsilon);
auto max_err = 0.0;
auto min_err = 1.0;
// 2nd: query sim(x,y)
auto failure_count = 0;
auto start = std::chrono::high_resolution_clock::now();
for (auto i = 0; i < 2000; i++) {
double result = bprw.query_one2one(q);
auto cur_err = abs(result - ts.sim(x, y));
max_err = max(max_err, cur_err);
min_err = min(min_err, cur_err);
if (cur_err > 0.01) {
cout << result << " ," << ts.sim(x, y) << endl;
failure_count++;
}
}
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed = end - start;
cout << "failure count:" << failure_count << endl;
cout << "max err:" << max_err << endl;
cout << "min err:" << min_err << endl;
cout << format("memory:%s KB") % getValue() << endl;
cout << format("total query cost: %s s") % elapsed.count() << endl; // record the pre-processing time
}
int main(int args, char *argv[]) {
string data_name(argv[1]);
double c = 0.6;
double epsilon = 0.01;
double delta = 0.01;
int x = atoi(argv[2]), y = atoi(argv[3]);
test_bp(data_name, c, epsilon, delta, x, y);
}
|
{"hexsha": "ee607c646da2a3ffd18dd72ec37bcbbecde1ee07", "size": 1819, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "LPMC-Profile/playground/main_bprw.cpp", "max_stars_repo_name": "CheYulin/SimRankRelease", "max_stars_repo_head_hexsha": "f05cce8664d0ba754020abb39405ae49857c3b0d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8.0, "max_stars_repo_stars_event_min_datetime": "2020-04-14T23:17:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-21T12:34:04.000Z", "max_issues_repo_path": "LPMC-Profile/playground/main_bprw.cpp", "max_issues_repo_name": "KeithYue/SimRankRelease", "max_issues_repo_head_hexsha": "f05cce8664d0ba754020abb39405ae49857c3b0d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "LPMC-Profile/playground/main_bprw.cpp", "max_forks_repo_name": "KeithYue/SimRankRelease", "max_forks_repo_head_hexsha": "f05cce8664d0ba754020abb39405ae49857c3b0d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-01-17T16:26:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-17T16:26:50.000Z", "avg_line_length": 30.3166666667, "max_line_length": 105, "alphanum_fraction": 0.6107751512, "num_tokens": 516}
|
Eric Price
yes that is his name.
He is a student of music at UC Davis. Hes active in many of the musical efforts that this town puts forth.
His instrument is the bass.
Currently Eric is working with the University Symphony Orchestra UC Davis Symphony and serving as Music Manager. Also he plays with local band The Keystones.
Earlier this year Eric performed in the production of the Rocky Horror Show. He played bass in the pit band and wore a pink wig and makeup. The title of pit band is sort of misplaced because the band was actually set into the stage and were an intrigal part of the show.
Please say something. say hi. talk to me. meeting people rocks! and talking to old friends is awesome as well.
|
{"hexsha": "c4de1af36eed9cfdd6f7cfe012c281e7755e9e80", "size": 720, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/EricPrice.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/EricPrice.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/EricPrice.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0, "max_line_length": 270, "alphanum_fraction": 0.7805555556, "num_tokens": 152}
|
%%*************************************************************************
%% mybicgstab
%%
%% [xx,resnrm,flag] = mybicgstab(A,b,M1,tol,maxit)
%%
%% iterate on bb - (M1)*AA*x
%%
%% r = b-A*xtrue;
%%
%%*************************************************************************
function [xx,resnrm,flag] = mybicgstab(A,b,M1,tol,maxit,printlevel)
N = length(b);
if (nargin < 6); printlevel = 1; end
if (nargin < 5) || isempty(maxit); maxit = max(30,length(A.mat22)); end;
if (nargin < 4) || isempty(tol); tol = 1e-10; end;
tolb = min(1e-4,tol*norm(b));
flag = 1;
x = zeros(N,1);
if (norm(x))
if isstruct(A); r = b-matvec(A,x); else r = b-mexMatvec(A,x); end;
else
r =b;
end
err = norm(r); resnrm(1) = err; minresnrm = err; xx = x;
%%if (err < 1e-3*tolb); return; end
omega = 1.0;
r_tld = r;
%%
%%
%%
breakyes = 0;
smtol = 1e-40;
for iter = 1:maxit,
rho = (r_tld'*r);
if (abs(rho) < smtol)
flag = 2;
if (printlevel); fprintf('*'); end;
breakyes = 1;
break;
end
if (iter > 1)
beta = (rho/rho_1)* (alp/omega);
p = r + beta*(p - omega*v);
else
p = r;
end
p_hat = precond(A,M1,p);
if isstruct(A); v = matvec(A,p_hat); else v = mexMatvec(A,p_hat); end;
alp = rho / (r_tld'*v);
s = r - alp*v;
%%
s_hat = precond(A,M1,s);
if isstruct(A); t = matvec(A,s_hat); else t = mexMatvec(A,s_hat); end;
omega = (t'*s) / (t'*t);
x = x + alp*p_hat + omega*s_hat;
r = s - omega*t;
rho_1 = rho;
%%
%% check convergence
%%
err = norm(r); resnrm(iter+1) = err; %#ok
if (err < minresnrm);
xx = x; minresnrm = err;
end
if (err < tolb)
break;
end
if (err > 10*minresnrm)
if (printlevel); fprintf('^'); end
breakyes = 2;
break;
end
if (abs(omega) < smtol)
flag = 2;
if (printlevel); fprintf('*'); end
breakyes = 1;
break;
end
end
if (~breakyes) && (printlevel >=3); fprintf(' '); end
%%
%%*************************************************************************
%%*************************************************************************
%% precond:
%%*************************************************************************
function Mx = precond(A,L,x)
m = L.matdim; m2 = length(x)-m;
Mx = zeros(length(x),1);
for iter = 1
if norm(Mx); r = x - matvec(A,Mx); else r = x; end
if (m2 > 0)
r1 = full(r(1:m));
else
r1 = full(r);
end
if (m2 > 0)
r2 = r(m+1:m+m2);
w = linsysolvefun(L,r1);
z = mexMatvec(A.mat12,w,1) - r2;
z = L.Mu \ (L.Ml \ (L.Mp*z));
r1 = r1 - mexMatvec(A.mat12,z);
end
d = linsysolvefun(L,r1);
if (m2 > 0)
d = [d; z]; %#ok
end
Mx = Mx + d;
end
%%*************************************************************************
%%*************************************************************************
%% matvec: matrix-vector multiply.
%% matrix = [A.mat11, A.mat12; A.mat12', A.mat22]
%%*************************************************************************
function Ax = matvec(A,x)
m = length(A.mat11); m2 = length(x)-m;
if issparse(x); x = full(x); end
if (m2 > 0)
x1 = x(1:m);
else
x1 = x;
end
Ax = mexMatvec(A.mat11,x1);
if (m2 > 0)
x2 = x(m+1:m+m2);
Ax = Ax + mexMatvec(A.mat12,x2);
Ax2 = mexMatvec(A.mat12,x1,1) + mexMatvec(A.mat22,x2);
Ax = [full(Ax); full(Ax2)];
end
%%*************************************************************************
|
{"author": "zarathustr", "repo": "LibQPEP", "sha": "99e5c23e746ace0bac4a86742c31db6fcf7297ba", "save_path": "github-repos/MATLAB/zarathustr-LibQPEP", "path": "github-repos/MATLAB/zarathustr-LibQPEP/LibQPEP-99e5c23e746ace0bac4a86742c31db6fcf7297ba/MATLAB/sdpt3/Solver/mybicgstab.m"}
|
from typing import List
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from oolearning.enums.Metric import Metric
from oolearning.evaluators.CostFunctionMixin import CostFunctionMixin
from oolearning.evaluators.ScoreBase import ScoreBase
from oolearning.model_processors.GridSearchTunerResults import GridSearchTunerResults
class SearcherResults:
def __init__(self,
model_descriptions: List[str],
model_names: List[str],
tuner_results: List[GridSearchTunerResults],
holdout_scores=List[List[ScoreBase]]):
"""
:param tuner_results: list of GridSearchTunerResults (one per model)
:param holdout_scores: list of (list of Scores). Each outer list item (one per model),
contains a list of Scores (same Scores as the Tuner)
"""
self._model_descriptions = model_descriptions
self._model_names = model_names
self._tuner_results = tuner_results
self._holdout_scores = holdout_scores
@property
def tuner_results(self) -> List[GridSearchTunerResults]:
"""
:return: a list of TunerResult objects
"""
return self._tuner_results
@property
def holdout_score_objects(self) ->List[List[ScoreBase]]:
"""
:return: List of Lists of Scores. Each item in the outer list corresponds to a specific model/
model-description. Each of those items has a list of Scores, corresponding to the
Scores passed into the Searcher.
"""
return self._holdout_scores
@property
def model_names(self) -> List[str]:
return self._model_names
@property
def model_descriptions(self) -> List[str]:
return self._model_descriptions
@property
def holdout_scores(self) -> pd.DataFrame:
"""
Score values for the holdout sets
# TODO: update documentation
"""
# get all the scores for each model (each model will have the same Evaluators)
scores = self._holdout_scores[0]
# get all the columns that the tuner_results will have
score_columns = [x.name for x in scores]
holdout_accuracies = [[x.value for x in evaluator] for evaluator in self._holdout_scores]
return pd.DataFrame(holdout_accuracies, columns=score_columns, index=self._model_descriptions)
# noinspection PyUnresolvedReferences
@property
def best_tuned_results(self):
"""
:return: a dataframe with each model + best tuned result as a row
"""
# get all the scores for each model (each model will have the same Evaluators)
scores = self.holdout_score_objects[0]
# get all the columns that the tuner_results will have
score_columns = [(x.name + '_mean', x.name + '_st_dev', x.name + '_cv')
for x in scores]
score_columns = list(sum(score_columns, ())) # flatten out list
# get the best resampling results each model that was tuned
best_model_series = [x.best_model for x in self.tuner_results]
# since each model will have different tune results, we need to separate and put in its own column
hyper_params = [x.loc[list(set(x.index.values).difference(score_columns))].to_dict()
for x in best_model_series] # gets the hyper-params for each model (as a dict)
assert len(self._model_names) == len(hyper_params)
assert len(self._model_names) == len(best_model_series)
best_tune_results_df = pd.DataFrame(columns=['model', 'hyper_params'] + score_columns)
for index in range(len(self._model_names)):
tune_results_series = best_model_series[index].\
drop(list(set(best_model_series[index].index.values).difference(score_columns)))
tune_results_series['hyper_params'] = hyper_params[index]
tune_results_series['model'] = self._model_names[index]
best_tune_results_df = best_tune_results_df.\
append(tune_results_series.loc[['model', 'hyper_params'] + score_columns])
best_tune_results_df.index = self._model_descriptions
return best_tune_results_df
@property
def best_model_index(self):
"""
:return: returns the index of the best model based on the holdout accuracies (i.e. based on the first
Score specified when creating the Resampler passed into the ModelSearcher constructor.
This index can then be used to index on the `tuner_results` and `holdout_score_objects` properties
"""
# get the first score in each list (1 list per model)
score_list = [x[0] for x in self._holdout_scores]
# should all be the same type of score
assert all([type(score_list[0]) == type(x) for x in score_list])
# since Scores implement the __lt__ function based on the better_than function, sorting the
# Scores gets the indexes of the best sorted
indexes = np.argsort(score_list)
return indexes[0] # return the index of the first i.e. "best" model based on the sorted Scores
# single item is a model
# each model has been tuned so it (the ones with hyper-params at least) have multiple sub-models (i.e. 1 for each hyper-param combination)
# each model has a "best" hyper-params sub-model based on the tuned/resampled results
# found in `best_tuned_results`, which the the best submodel per model, with associated holdout scores
# after each "model" has tuned across all sub-model/hyper-param-combos, the best model is chosen, and the model & best hyper-params are refit on all of the training data, and scored on a holdout set
# these holdout scores are found in `holdout_scores`, these are single values so no mean/standard-dev associated with them
# each sub-model has been resampled, so the "best model" for the sub-model has associated resampled data (i.e. all the cross validation scores for each score)
#
def plot_resampled_scores(self,
metric: Metric=None,
score_name: str=None,
x_axis_limits: tuple=(0.0, 1.0),
show_one_ste_rule: bool=False):
"""
for each "best" model, show the resamples via boxplot
:param metric: the metric (corresponding to the Score object) to display (use this parameter or
`score_name`
:param score_name: alternative to the `metric` parameter, you can specify the name of the score to
retrieve; (the name corresponding to the `name` property of the Score object. While the `metric`
parameter is a convenience when dealing with built in Scores, `score_name` can be used for custom
score objects.
:param x_axis_limits: limits for the x-axis
:param show_one_ste_rule: show a blue line one standard error below the mean of the best model.
"""
metric_name = metric.value if score_name is None else score_name
# build the dataframe that will be used to generate the boxplot; 1 column per "best" model
resamples = pd.DataFrame()
for index in range(len(self.model_names)):
cross_val_scores = self.tuner_results[index].best_model_resampler_object.resampled_scores[metric_name]
column_name = '{0}: {1}'.format(self.model_names[index],
self.model_descriptions[index])
resamples[column_name] = pd.Series(data=cross_val_scores)
# ensure correct number of models (columns in `resamples`, and rows in `resampled_stats`
assert resamples.shape[1] == len(self.model_names)
# ensure correct number of resamples (rows in `resamples`, and row in `the underlying cross validation
# scores (of resampled hyper-param))
assert resamples.shape[0] == len(self.tuner_results[0].best_model_resampler_object.resampled_scores[metric_name]) # noqa
# get the means to determine the 'best' hyper-param combo
resample_means = [resamples[column].mean() for column in resamples.columns.values]
assert len(resample_means) == resamples.shape[1]
# get the current score object so we can determine if it is a minimizer or maximizer
score = [x for x in self.holdout_score_objects[0] if x.name == metric_name]
assert len(score) == 1 # we should just get the current score
# if the `better_than` function returns True, 0 is "better than" 1 and we have a minimizer
# for minimizers, we want to return the min, which is the best value, otherwise, return the max
minimizer = isinstance(score[0], CostFunctionMixin)
best = min if minimizer else max
index_of_best_mean = resample_means.index(best(resample_means))
resamples.boxplot(vert=False, figsize=(10, 10))
resample_medians = [resamples[column].median() for column in resamples.columns.values]
plt.axvline(x=max(resample_medians), color='red', linewidth=1)
if show_one_ste_rule:
# using means rather than medians because we are calculating standard error (from the mean)
resamples_of_best_mean = resamples[resamples.columns.values[index_of_best_mean]].values
one_standard_error_of_best = resamples_of_best_mean.std() / math.sqrt(len(resamples_of_best_mean))
# noinspection PyUnresolvedReferences
one_standard_error_rule = resamples_of_best_mean.mean() - one_standard_error_of_best
plt.axvline(x=one_standard_error_rule, color='blue', linewidth=1)
# noinspection PyTypeChecker,PyUnresolvedReferences
if (resamples <= 1).all().all():
plt.xlim(x_axis_limits[0], x_axis_limits[1])
# plt.xticks(np.arange(start=x_axis_limits[0], stop=x_axis_limits[1], step=0.05))
# ax = plt.gca()
# ax.set_xticklabels(labels=['0']+['{0:.2f}'.format(x) for x in np.arange(start=x_axis_limits[0] + 0.05,
# stop=x_axis_limits[1] + 0.05, step=0.05)]+['1'], # noqa
# rotation=20,
# ha='right')
plt.title('{0} ({1})'.format('Resampling Scores Per `Best` Models',
metric.name if score_name is None else score_name),
loc='right')
plt.tight_layout()
plt.gca().get_yticklabels()[index_of_best_mean].set_color('red')
plt.gca().invert_yaxis()
def plot_holdout_scores(self):
"""
NOTE: only shows the "tuned" hyper-params i.e. hyper-params that were tuned over >1 values.
:return:
"""
scores = self.holdout_score_objects[0]
# if the Score is a Cost Function it is a 'minimizer'
minimizers = [isinstance(x, CostFunctionMixin) for x in scores]
score_columns = [x.name for x in scores]
score_values = self.holdout_scores.loc[:, score_columns]
num_rows = len(score_values)
num_cols = len(score_values.columns)
fig, ax = plt.subplots(figsize=(10, 10))
for i in range(num_cols):
truths = [True] * num_cols
truths[i] = False
mask = np.array(num_rows * [truths], dtype=bool)
color_values = np.ma.masked_where(mask, score_values)
# "_r" value after color means invert colors (small values are darker)
ax.pcolormesh(color_values, cmap='Blues_r' if minimizers[i] else 'Greens')
for y in range(score_values.shape[0]):
for x in range(score_values.shape[1]):
plt.text(x + .5, y + .5, '%.3f' % score_values.iloc[y, x],
horizontalalignment='center',
verticalalignment='center')
ax.set_xticks(np.arange(start=0.5, stop=len(score_columns), step=1))
ax.set_xticklabels(score_columns, rotation=35, ha='right')
labels = self.holdout_scores.index.values
y_tick_positions = np.arange(start=0, stop=len(labels)) + 0.5
ax.set_yticks(y_tick_positions)
ax.set_yticklabels(labels)
ax.invert_yaxis()
plt.tight_layout()
|
{"hexsha": "37b6e93d7628670937f01a5d0e8529f6c496f6f1", "size": 12350, "ext": "py", "lang": "Python", "max_stars_repo_path": "oolearning/model_processors/SearcherResults.py", "max_stars_repo_name": "shane-kercheval/oo-learning", "max_stars_repo_head_hexsha": "9e3ebe5f7460179e23f6801bc01f1114bb896dea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-09T09:11:46.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-09T09:11:46.000Z", "max_issues_repo_path": "oolearning/model_processors/SearcherResults.py", "max_issues_repo_name": "shane-kercheval/oo-learning", "max_issues_repo_head_hexsha": "9e3ebe5f7460179e23f6801bc01f1114bb896dea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 48, "max_issues_repo_issues_event_min_datetime": "2018-04-09T01:30:31.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-13T03:25:59.000Z", "max_forks_repo_path": "oolearning/model_processors/SearcherResults.py", "max_forks_repo_name": "shane-kercheval/oo-learning", "max_forks_repo_head_hexsha": "9e3ebe5f7460179e23f6801bc01f1114bb896dea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.4583333333, "max_line_length": 202, "alphanum_fraction": 0.6524696356, "include": true, "reason": "import numpy", "num_tokens": 2785}
|
SUBROUTINE getfsq_par(gcr, gcz, gnormr, gnormz, gnorm, medge)
USE vmec_main, ONLY: rprec, ns, ns1, mnsize
USE vmec_params, ONLY: ntmax
USE parallel_include_module
IMPLICIT NONE
!-----------------------------------------------
! D u m m y A r g u m e n t s
!-----------------------------------------------
INTEGER, INTENT(in) :: medge
REAL(dp), INTENT(out) :: gnormr, gnormz
REAL(dp), INTENT(in) :: gnorm
REAL(dp), DIMENSION(mnsize,ns,ntmax), INTENT(IN) :: gcr, gcz
!-----------------------------------------------
! L o c a l V a r i a b l e s
!-----------------------------------------------
INTEGER :: jsmax, nsmin, nsmax, l
REAL(dp) :: tmpgcx(ns,2), totalgcx(2)
!-----------------------------------------------
IF (.NOT. lactive) RETURN
jsmax = ns1 + medge
nsmin=tlglob; nsmax=MIN(trglob,jsmax)
IF (trglob .GT. jsmax) tmpgcx(jsmax+1:trglob,1:2) = 0
DO l = nsmin, nsmax
tmpgcx(l,1) = SUM(gcr(:,l,:)**2)
tmpgcx(l,2) = SUM(gcz(:,l,:)**2)
END DO
DO l = 1, 2
CALL Gather1XArray(tmpgcx(:,l))
totalgcx(l) = SUM(tmpgcx(:,l))
END DO
gnormr = gnorm * totalgcx(1)
gnormz = gnorm * totalgcx(2)
END SUBROUTINE getfsq_par
SUBROUTINE getfsq(gcr, gcz, gnormr, gnormz, gnorm, medge)
USE vmec_main, ONLY: rprec, ns, ns1, mnsize
USE vmec_params, ONLY: ntmax
USE parallel_include_module
IMPLICIT NONE
!-----------------------------------------------
! D u m m y A r g u m e n t s
!-----------------------------------------------
INTEGER, INTENT(in) :: medge
REAL(dp), INTENT(out) :: gnormr, gnormz
REAL(dp), INTENT(in) :: gnorm
REAL(dp), DIMENSION(ns,mnsize*ntmax), INTENT(in) :: gcr, gcz
!-----------------------------------------------
! L o c a l V a r i a b l e s
!-----------------------------------------------
INTEGER :: jsmax
!-----------------------------------------------
jsmax = ns1 + medge
gnormr = gnorm * SUM(gcr(:jsmax,:)**2)
gnormz = gnorm * SUM(gcz(:jsmax,:)**2)
END SUBROUTINE getfsq
|
{"hexsha": "8661c6fe4ee48292a4291b49d085fba99b228cc7", "size": 2160, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "VMEC2000/Sources/General/getfsq.f", "max_stars_repo_name": "joseluisvelasco/STELLOPT", "max_stars_repo_head_hexsha": "e064ebb96414d5afc4e205f43b44766558dca2af", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2020-05-08T01:47:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-06T10:35:28.000Z", "max_issues_repo_path": "VMEC2000/Sources/General/getfsq.f", "max_issues_repo_name": "joseluisvelasco/STELLOPT", "max_issues_repo_head_hexsha": "e064ebb96414d5afc4e205f43b44766558dca2af", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 77, "max_issues_repo_issues_event_min_datetime": "2020-05-08T07:18:09.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T11:20:33.000Z", "max_forks_repo_path": "Sources/General/getfsq.f", "max_forks_repo_name": "mbkumar/VMEC2000", "max_forks_repo_head_hexsha": "334e3bd478f2432b6fe8cbb321f0d81d9a952152", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-02-10T13:47:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T12:53:43.000Z", "avg_line_length": 35.4098360656, "max_line_length": 67, "alphanum_fraction": 0.4398148148, "num_tokens": 660}
|
\subsection{Commands}\label{subsec:steps_commands}
Commands are wrappers for the programs normally used in the pipeline: \shell{g++}, \shell{diff}, etc.
They take care of making sure that every dependency is properly set up, and reporting the execution status back to
the \hyperref[sec:environments]{environment}.\\
The base class of every command step is \python{CommandBase}.
\subsection*{\python{CommandBase}}\label{subsec:CommandBase}
\begin{itemize}[label={}]
\item \docfunc{__init__(self, limits, user, group)}
\docfuncdesc{
Constructor.
The \python{limits} argument allows the caller to specify the limits for the command execution without
having to override \hyperref[sec:get_limits]{\python{get_limits()}} method.
The \python{user} and \python{group} arguments perform the same function, as per
\hyperref[sec:get_user]{\python{get_user()}} and \hyperref[sec:get_group]{\python{get_group()}}
methods, respectively.
}
\item \phantomsection \label{sec:command_get_env} \docfunc{get_env(self)}
\docfuncdesc{
Used to specify step-specific environment variables that should be present during the command execution.
Values specified here take precedence over those from the
\hyperref[sec:env_get_env]{\python{ExecutionEnvironment.get_env()}} method.
}
\item \phantomsection \label{sec:get_limits} \docfunc{get_limits(self)}
\docfuncdesc{
Defines the dictionary containing the limits active during the command execution.
It will by called by the
\hyperref[sec:run_command_step]{\python{ExecutionEnvironment.run_command_step()}} method.
The default implementation simply returns the limits passed to the constructor.
}
\item \docfunc{get_command(self)}
\docfuncdesc{
Abstract method.
Returns a list consisting of an executable and its arguments that will be run.
}
\item \docfunc{get_stdin_file(self)}
\docfuncdesc{
Returns a path to the file that will be redirected as the \shell{stdin} stream to the program.
Default: \python{None}.
}
\item \docfunc{get_stdout_file(self)}
\docfuncdesc{
Returns a path to the file to which the \shell{stdout} of the program will be redirected to.
Default: \code{logs/<step_name>_stdout.txt}.
}
\item \docfunc{get_stderr_file(self)}
\docfuncdesc{
Returns a path to the file to which the \shell{stderr} of the program will be redirected to.
Default: \code{logs/<step_name>_stderr.txt}.
}
\item \phantomsection \label{sec:postconditions} \docfunc{postconditions(self)}
\docfuncdesc{
Returns a list of validators, consisting of the 2-tuples: \python{(validator, exit_status)}.
Each of the validators will be called after the command execution.
Refer to the \hyperref[sec:postconditions_desc]{postconditions description} for more information about their
interaction.\\
Default: \python{[]}
}
\item \docfunc{verify_postconditions(self, result)}
\docfuncdesc{
Iterates over the list returned from the \hyperref[sec:postconditions]{\python{postconditions()}} method,
calls each one of them with the execution statistics as an argument, and returns the appropriate
exit status if they return \python{False}.
}
\item \phantomsection \label{sec:prerequisites_command} \docfunc{prerequisites(self)}
\docfuncdesc{
Returns a list of validators, that will be called before the command execution.
See \hyperref[sec:prerequisites_desc]{prerequisites description} for more information.\\
Default: \python{[]}
}
\item \docfunc{verify_prerequisites(self, environment)}
\docfuncdesc{
Iterates over the list returned from the \hyperref[sec:prerequisites_command]{\python{prerequisites()}}
method, calls each one of them with the environment as an argument, and raises the
\python{PrerequisiteException} if they return \python{False}.
}
\item \docfunc{set_name(self, name)}
\docfuncdesc{
Allows the step to store the name that it was assigned at
\hyperref[sec:add_steps]{\python{Checking.add_steps()}} call.
It can then be used to better identify the outputs of the command execution (e.g. prefixing filenames).
}
\item \docfunc{get_configuration_status(self)}
\docfuncdesc{
Returns a 2-tuple containing the information if the command can be run, and an exit status if it cannot.
Useful for the aggregate steps, where the command that needs to be run occurs to be undefined
(e.g. because the file type passed as an argument is not recognized).\\
Default: \python{(True, None)}
}
\item \phantomsection \label{sec:get_user} \docfunc{get_user(self)}
\docfuncdesc{
Returns the name of the user that the command should be run as.
}
\item \phantomsection \label{sec:get_group} \docfunc{get_group(self)}
\docfuncdesc{
Returns the name of the group that the command should be run as.
}
\end{itemize}
\subsubsection{Pre-defined commands}\label{subsec:predefined_commands}
\subsubsection*{\python{CompileBase}}\label{subsec:CompileBase}
\begin{itemize}[label={}]
\item Base class of every compilation command.
Receives the compiler, files to be compiled, and compilation options as the constructor arguments, and prepares
appropriate prerequisites
(\hyperref[subsec:ProgramExistsPrerequisite]{\python{ProgramExistsPrerequisite}},
\hyperref[subsec:FileExistsPrerequisite]{\python{FileExistsPrerequisite}},
\hyperref[subsec:NonEmptyListPrerequisite]{\python{NonEmptyListPrerequisite}}
), postconditions (
\hyperref[subsec:ExitCodePostcondition]{\python{ExitCodePostcondition}} $\rightarrow$ \code{CME}
), and a command to be called.
\end{itemize}
\subsubsection*{\python{CompileNasm}}\label{subsec:CompileNasm}
\begin{itemize}[label={}]
\item Inherits from the \hyperref[subsec:CompileBase]{\python{CompileBase}} class, specifies the \code{nasm}
compiler and \code{-felf64} as the default compilation options.
\end{itemize}
\subsubsection*{\python{CompileC}}\label{subsec:CompileC}
\begin{itemize}[label={}]
\item Inherits from the \hyperref[subsec:CompileBase]{\python{CompileBase}} class, specifies the \code{gcc}
compiler.
\end{itemize}
\subsubsection*{\python{CompileCpp}}\label{subsec:CompileCpp}
\begin{itemize}[label={}]
\item Inherits from the \hyperref[subsec:CompileBase]{\python{CompileBase}} class, specifies the \code{g++}
compiler.
\end{itemize}
\subsubsection*{\python{CompileCSharp}}\label{subsec:CompileCSharp}
\begin{itemize}[label={}]
\item Inherits from the \hyperref[subsec:CompileBase]{\python{CompileBase}} class, specifies the \code{mcs}
compiler and \code{-t:exe -out:main.exe} as the default compilation options.
\end{itemize}
\subsubsection*{\python{CompileGo}}\label{subsec:CompileGo}
\begin{itemize}[label={}]
\item Inherits from the \hyperref[subsec:CompileBase]{\python{CompileBase}} class, specifies the \code{gccgo}
compiler.
\end{itemize}
\subsubsection*{\python{CompileHaskell}}\label{subsec:CompileHaskell}
\begin{itemize}[label={}]
\item Inherits from the \hyperref[subsec:CompileBase]{\python{CompileBase}} class, specifies the \code{ghc}
compiler.
\end{itemize}
\subsubsection*{\python{CompileJava}}\label{subsec:CompileJava}
\begin{itemize}[label={}]
\item Inherits from the \hyperref[subsec:CompileBase]{\python{CompileBase}} class, specifies the \code{javac}
compiler.
\end{itemize}
\subsubsection*{\python{CreateJar}}\label{subsec:CreateJar}
\begin{itemize}[label={}]
\item Takes the files that will be packed into a JAR archive, output file, manifest and an entrypoint as the
constructor arguments.
Prepares the standard prerequisites
(\hyperref[subsec:ProgramExistsPrerequisite]{\python{ProgramExistsPrerequisite}},
\hyperref[subsec:FileExistsPrerequisite]{\python{FileExistsPrerequisite}},
\hyperref[subsec:NonEmptyListPrerequisite]{\python{NonEmptyListPrerequisite}}
), postconditions (
\hyperref[subsec:ExitCodePostcondition]{\python{ExitCodePostcondition}} $\rightarrow$ \code{CME}
), and uses the \code{jar} program in the returned command.
\end{itemize}
\subsubsection*{\python{Link}}\label{subsec:Link}
\begin{itemize}[label={}]
\item Takes the object files that will be linked together (using the \code{ld} program) and the output file as the
constructor arguments.
Specifies the standard prerequisites
(\hyperref[subsec:ProgramExistsPrerequisite]{\python{ProgramExistsPrerequisite}},
\hyperref[subsec:FileExistsPrerequisite]{\python{FileExistsPrerequisite}},
\hyperref[subsec:NonEmptyListPrerequisite]{\python{NonEmptyListPrerequisite}}
), and postconditions (
\hyperref[subsec:ExitCodePostcondition]{\python{ExitCodePostcondition}} $\rightarrow$ \code{CME}
).
\end{itemize}
\subsubsection*{\python{Make}}\label{subsec:Make}
\begin{itemize}[label={}]
\item Takes the target and the build directory as the constructor arguments.
Specifies only the \hyperref[subsec:ProgramExistsPrerequisite]{\python{ProgramExistsPrerequisite}}
prerequisite, and sets the
\hyperref[subsec:ExitCodePostcondition]{\python{ExitCodePostcondition}} $\rightarrow$ \code{CME}
postcondition.
\end{itemize}
\subsubsection*{\python{CMake}}\label{subsec:CMake}
\begin{itemize}[label={}]
\item Takes the source and the build directories as the constructor arguments.
Specifies only the \hyperref[subsec:ProgramExistsPrerequisite]{\python{ProgramExistsPrerequisite}}
prerequisite, and sets the
\hyperref[subsec:ExitCodePostcondition]{\python{ExitCodePostcondition}} $\rightarrow$ \code{CME}
postcondition.
\end{itemize}
\subsubsection*{\python{Run}}\label{subsec:Run}
\begin{itemize}[label={}]
\item Base class for all of the commands that are running a program.
Receives the executable, its command line arguments, and paths to the files representing the standard I/O
streams.
Sets the \hyperref[subsec:ProgramExistsPrerequisite]{\python{ProgramExistsPrerequisite}} on the
executable, and \hyperref[subsec:FileExistsPrerequisite]{\python{FileExistsPrerequisite}} on the
\shell{stdin}.
No postconditions are set.
\end{itemize}
\subsubsection*{\python{RunSolution}}\label{subsec:RunSolution}
\begin{itemize}[label={}]
\item Inherits from the \hyperref[subsec:Run]{\python{Run}} class and sets \code{./a.out} as the default
executable.
This class, and also all other following the \code{*Solution} convention define three postconditions:
\hyperref[subsec:UsedTimePostcondition]{\python{UsedTimePostcondition}} $\rightarrow$ \code{TLE},
\hyperref[subsec:UsedMemoryPostcondition]{\python{UsedMemoryPostcondition}} $\rightarrow$ \code{MEM}, and
\hyperref[subsec:ExitCodePostcondition]{\python{ExitCodePostcondition}} $\rightarrow$ \code{RTE}.
\end{itemize}
\subsubsection*{\python{RunCSharp}}\label{subsec:RunCSharp}
\begin{itemize}[label={}]
\item Inherits from the \hyperref[subsec:Run]{\python{Run}} class and sets \code{mono} as the default
executable.
Takes an EXE file to be run and the interpreter options as the arguments, and sets the
\hyperref[subsec:FileExistsPrerequisite]{\python{FileExistsPrerequisite}} on that EXE file.
\end{itemize}
\subsubsection*{\python{RunPSQL}}\label{subsec:RunPSQL}
\begin{itemize}[label={}]
\item Inherits from the \hyperref[subsec:Run]{\python{Run}} class and sets \code{psql} as the default
executable.
Receives a SQL file and the connection configuration: user, password, host and database name as the
arguments.
\end{itemize}
\subsubsection*{\python{RunJavaClass}}\label{subsec:RunJavaClass}
\begin{itemize}[label={}]
\item Inherits from the \hyperref[subsec:Run]{\python{Run}} class and sets \code{java} as the default
executable.
Takes a class file to be run and the interpreter options as the arguments, and sets the
\hyperref[subsec:FileExistsPrerequisite]{\python{FileExistsPrerequisite}} on that class file.
\end{itemize}
\subsubsection*{\python{RunJar}}\label{subsec:RunJar}
\begin{itemize}[label={}]
\item Inherits from the \hyperref[subsec:Run]{\python{Run}} class and sets \code{java} as the default
executable.
Takes a JAR file to be run and the interpreter options as the arguments, and sets the
\hyperref[subsec:FileExistsPrerequisite]{\python{FileExistsPrerequisite}} on that JAR file.
\end{itemize}
\subsubsection*{\python{RunPython}}\label{subsec:RunPython}
\begin{itemize}[label={}]
\item Inherits from the \hyperref[subsec:Run]{\python{Run}} class and sets \code{python} as the default
executable.
Takes a Python script to be run and the interpreter options as the arguments, and sets the
\hyperref[subsec:FileExistsPrerequisite]{\python{FileExistsPrerequisite}} on that Python script.
\end{itemize}
\subsubsection*{\python{RunShell}}\label{subsec:RunShell}
\begin{itemize}[label={}]
\item This class is synonymous to the \hyperref[subsec:Run]{\python{Run}} class.
\end{itemize}
\subsubsection*{\python{Diff}}\label{subsec:Diff}
\begin{itemize}[label={}]
\item Command to compare two files while ignoring trailing spaces.
Takes these two files as the arguments, sets the
\hyperref[subsec:FileExistsPrerequisite]{\python{FileExistsPrerequisite}} on them, and defines the
\hyperref[subsec:ExitCodePostcondition]{\python{ExitCodePostcondition}} $\rightarrow$ \code{ANS}
postcondition.
\end{itemize}
\subsubsection*{\python{RunChecker}}\label{subsec:RunChecker}
\begin{itemize}[label={}]
\item Inherits from the \hyperref[subsec:Run]{\python{Run}} class and sets the
\hyperref[subsec:ExitCodePostcondition]{\python{ExitCodePostcondition}} $\rightarrow$ \code{ANS}
postcondition.
\end{itemize}
\subsubsection*{\python{ExtractArchive}}\label{subsec:ExtractArchive}
\begin{itemize}[label={}]
\item Command to extract various types of archives.
Receives the archive, optionally its type and a directory to extract to as the arguments.
Declares the \hyperref[subsec:ProgramExistsPrerequisite]{\python{ProgramExistsPrerequisite}} on the
appropriate executable (e.g. \code{unzip} or \code{tar}), and the
\hyperref[subsec:ExitCodePostcondition]{\python{ExitCodePostcondition}} $\rightarrow$ \code{EXT}
postcondition.
Return the \code{EXT} exit status if the archive type is not recognized.
\end{itemize}
|
{"hexsha": "5d2fa484a923b6b9e90233cb9dadaa43ffc87442", "size": 15443, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/sections/steps_commands.tex", "max_stars_repo_name": "zielinskit/kolejka-judge", "max_stars_repo_head_hexsha": "571df05b12c5a4748d7a2ca4c217b0042acf6b48", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-10-29T11:00:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-08T19:27:58.000Z", "max_issues_repo_path": "doc/sections/steps_commands.tex", "max_issues_repo_name": "zielinskit/kolejka-judge", "max_issues_repo_head_hexsha": "571df05b12c5a4748d7a2ca4c217b0042acf6b48", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-09-01T08:10:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-01T10:09:57.000Z", "max_forks_repo_path": "doc/sections/steps_commands.tex", "max_forks_repo_name": "Raalsky/kolejka-judge", "max_forks_repo_head_hexsha": "4fa42d9b9a52a94cd8dc57a99218b32d0e8fc18f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-08T19:32:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-08T19:32:09.000Z", "avg_line_length": 46.515060241, "max_line_length": 121, "alphanum_fraction": 0.7012886097, "num_tokens": 3808}
|
from datetime import datetime
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
# Question 1
df = pd.read_excel('Covid19IndiaData_30032020.xlsx')
MAX = max(df['Age']) + 1
infected = [0] * MAX
recovered = [0] * MAX
dead = [0] * MAX
infected_males = [0] * MAX
infected_females = [0] * MAX
avg_inf = var_inf = avg_recd = var_recd = avg_dead = var_dead = 0
for i in df.index:
infected[df['Age'][i]] += 1
if df['StatusCode'][i] == 'Recovered':
recovered[df['Age'][i]] += 1
elif df['StatusCode'][i] == 'Dead':
dead[df['Age'][i]] += 1
if df['GenderCode0F1M'][i] == 1:
infected_males[df['Age'][i]] += 1
else:
infected_females[df['Age'][i]] += 1
sinc = sum(infected)
srec = sum(recovered)
sd = sum(dead)
sim = sum(infected_males)
sif = sum(infected_females)
for i in range(MAX):
infected[i] /= sinc
recovered[i] /= srec
dead[i] /= sd
infected_males[i] /= sim
infected_females[i] /= sif
avg_inf += infected[i] * i
avg_recd += recovered[i] * i
avg_dead += dead[i] * i
var_inf += infected[i] * i ** 2
var_recd += recovered[i] * i ** 2
var_dead += dead[i] * i ** 2
var_inf -= avg_inf ** 2
var_recd -= avg_recd ** 2
var_dead -= avg_dead ** 2
print('Expected Age of Infected Patients =', avg_inf, 'years')
print('Variance of Age of Infected Patients =', var_inf)
print('Expected Age of Recovered Patients =', avg_recd, 'years')
print('Variance of Age of Recovered Patients =', var_recd)
print('Expected Age of Dead Patients =', avg_dead, 'years')
print('Variance of Age of Dead Patients =', var_dead)
plt.bar(range(MAX), infected)
plt.title('Infected Cases')
plt.xlabel('Age')
plt.ylabel('P(Infected Cases)')
plt.show()
plt.bar(range(MAX), recovered)
plt.title('Recovered Cases')
plt.xlabel('Age')
plt.ylabel('P(Recovered Cases)')
plt.show()
plt.bar(range(MAX), dead)
plt.title('Death Cases')
plt.xlabel('Age')
plt.ylabel('P(Death Cases)')
plt.show()
fig, ax = plt.subplots()
X = np.arange(MAX)
W = 0.5
ax.bar(X - W / 2, infected_males, W, label='Male')
ax.bar(X + W / 2, infected_females, W, label='Female')
ax.set_title('Infected Cases w.r.t Gender')
ax.set_xlabel('Age')
ax.set_ylabel('P(Infected Cases)')
ax.legend()
plt.show()
# Question 2
df = pd.read_excel('linton_supp_tableS1_S2_8Feb2020.xlsx')
df.columns = df.iloc[0]
df = df.reindex(df.index.drop(0)).reset_index(drop=True)
df.columns.name = None
incubation_period = {}
excluded_incubation = {}
onset_hospitalization = {}
hospitalization_death = {}
onset_death = {}
avg_inc = 0
var_inc = 0
avg_inc_ex = 0
var_inc_ex = 0
for i in df.index:
if type(df['Onset'][i]) is datetime:
if type(df['ExposureL'][i]) is datetime:
try:
incubation_period[(df['Onset'][i] - df['ExposureL'][i]).days] += 1
except KeyError:
incubation_period[(df['Onset'][i] - df['ExposureL'][i]).days] = 1
if df['ExposureType'][i] in {'Contact with Hubei', 'Contact with case', 'Travel to Hubei',
'Travel to Wuhan', 'Contact with Wuhan resident'}:
try:
excluded_incubation[(df['Onset'][i] - df['ExposureL'][i]).days] += 1
except KeyError:
excluded_incubation[(df['Onset'][i] - df['ExposureL'][i]).days] = 1
if type(df['DateHospitalizedIsolated'][i]) is datetime:
try:
onset_hospitalization[(df['DateHospitalizedIsolated'][i] - df['Onset'][i]).days] += 1
except KeyError:
onset_hospitalization[(df['DateHospitalizedIsolated'][i] - df['Onset'][i]).days] = 1
if type(df['DateReportedConfirmed'][i]) is datetime:
try:
onset_death[(df['DateReportedConfirmed'][i] - df['Onset'][i]).days] += 1
except KeyError:
onset_death[(df['DateReportedConfirmed'][i] - df['Onset'][i]).days] = 1
try:
hospitalization_death[
(df['DateReportedConfirmed'][i] - df['DateHospitalizedIsolated'][i]).days] += 1
except KeyError:
hospitalization_death[(df['DateReportedConfirmed'][i] - df['DateHospitalizedIsolated'][i]).days] = 1
sinp = sum(incubation_period.values())
sine = sum(excluded_incubation.values())
soh = sum(onset_hospitalization.values())
sod = sum(onset_death.values())
shd = sum(hospitalization_death.values())
for i in incubation_period:
incubation_period[i] /= sinp
avg_inc += incubation_period[i] * i
var_inc += incubation_period[i] * i ** 2
for i in excluded_incubation:
excluded_incubation[i] /= sine
avg_inc_ex += excluded_incubation[i] * i
var_inc_ex += excluded_incubation[i] * i ** 2
for i in onset_hospitalization:
onset_hospitalization[i] /= soh
for i in onset_death:
onset_death[i] /= sod
for i in hospitalization_death:
hospitalization_death[i] /= shd
var_inc -= avg_inc ** 2
var_inc_ex -= avg_inc_ex ** 2
print('Expected Incubation Period =', avg_inc, 'days')
print('Variance of Incubation Period =', var_inc)
print('Expected Incubation Period by Excluding Wuhan Residents =', avg_inc_ex, 'days')
print('Variance of Incubation Period by Excluding Wuhan Residents =', var_inc_ex)
plt.bar(incubation_period.keys(), incubation_period.values())
plt.title('Incubation Period')
plt.xlabel('Incubation Period')
plt.ylabel('P(Incubation Period)')
plt.show()
plt.bar(onset_hospitalization.keys(), onset_hospitalization.values())
plt.title('Onset to Hospitalization')
plt.xlabel('Days')
plt.ylabel('P(Onset to Hospitalization)')
plt.show()
plt.bar(onset_death.keys(), onset_death.values())
plt.title('Onset to Death')
plt.xlabel('Days')
plt.ylabel('P(Onset to Death)')
plt.show()
plt.bar(hospitalization_death.keys(), hospitalization_death.values())
plt.title('Hospitalization to Death')
plt.xlabel('Days')
plt.ylabel('P(Hospitalization to Death)')
plt.show()
|
{"hexsha": "c10d638a43ac88945bba27909add110f01321c2f", "size": 5983, "ext": "py", "lang": "Python", "max_stars_repo_path": "IC252/Lab 6/Analyse_Covid_Data.py", "max_stars_repo_name": "anu2kool/MyPythonScripts", "max_stars_repo_head_hexsha": "954312e3a9422620056af145faa041cba5624329", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-11T23:06:34.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-11T23:06:34.000Z", "max_issues_repo_path": "IC252/Lab 6/Analyse_Covid_Data.py", "max_issues_repo_name": "anu2kool/MyPythonScripts", "max_issues_repo_head_hexsha": "954312e3a9422620056af145faa041cba5624329", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "IC252/Lab 6/Analyse_Covid_Data.py", "max_forks_repo_name": "anu2kool/MyPythonScripts", "max_forks_repo_head_hexsha": "954312e3a9422620056af145faa041cba5624329", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-10-01T06:57:12.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-01T06:57:12.000Z", "avg_line_length": 31.9946524064, "max_line_length": 120, "alphanum_fraction": 0.6439913087, "include": true, "reason": "import numpy", "num_tokens": 1693}
|
# State-dependent version of the Q(\sigma) algorithm in the control task
# The Stochastic Windy Grid world from DeAsis et al.(2018)
import numpy as np
gamma, epsilon, N_x, N_y, N_a, Reward, N_episodes, N_runs=1, 0.1, 6, 9, 4, -1, 100, 100
i_start,j_start,i_end,j_end=3,0,3,7
wind=np.array([0,0,0,1,1,1,2,2,1,0])
actions=np.array([0,1,2,3])
state_start=np.array([i_start,j_start])
state_end=np.array([i_end,j_end])
N_models=5 # Includes static versions of the Q(\sigma) algorithm Q(0), Q(0.5), Q(1) and dynamic versions of the Q(\sigma) algorithm
alpha_param=np.arange(0.1,1.1,0.1)
n=3
# Define the stochastic Grid World Environment
def move(state,action):
i=state[0]
j=state[1]
if (np.random.binomial(1,0.9)):
#Shift by action
if (action==0):
i=np.max([i-1,0])
if (action==1):
j=np.min([j+1,N_y])
if (action==2):
i=np.min([i+1,N_x])
if (action==3):
j=np.max([j-1,0])
else:
#Random shift to
r_i=np.random.choice([-1,0,1])
r_j=np.random.choice([-1,0,1])
while (r_i==0 and r_j==0):
r_i=np.random.choice([-1,0,1])
r_j=np.random.choice([-1,0,1])
i=np.max([np.min([i+r_i,N_x]),0])
j=np.max([np.min([j+r_j,N_y]),0])
#Shift by wind blow
i=np.max([i-wind[j],0])
return np.array([i,j])
def select_action(Q,state):
# Simulate action using the epsilon-greedy policy
if (np.random.binomial(1,epsilon)):
return np.random.choice(actions)
else:
return np.random.choice(np.where(Q[state[0],state[1],:]==np.max(Q[state[0],state[1],:]))[0])
def expected_value(Q,state):
# Find optimal actions
optimal_actions=np.where(Q[state[0],state[1],:]==np.max(Q[state[0],state[1],:]))[0]
probs=np.ones(len(actions))*epsilon/len(actions)
probs[optimal_actions]+=(1-epsilon)/len(optimal_actions)
return np.dot(Q[state[0],state[1],:],probs)
Average_reward=np.zeros((N_models,len(alpha_param),N_runs))
for i in range(3,N_models):
for j in range(len(alpha_param)):
alpha=alpha_param[j]
if (i==2 and j>6): continue
if (i==3 and j>6): continue
print('Model=',i,'alpha=',alpha)
np.random.seed(1)
for run in range(N_runs):
Average_reward_per_episode=0
Q=np.zeros((N_x+1,N_y+1,N_a))
if (i==0):
sigma_param=0
beta=1
elif (i==1):
sigma_param=0.5
beta=1
elif (i==2):
sigma_param=1
beta=1
elif (i==3):
sigma_param=1
beta=0.99
elif (i==4):
sigma_param=np.ones((N_x+1,N_y+1))
beta=0.99
for episode in range(N_episodes):
state=np.copy(state_start)
action=select_action(Q,state)
S=[state]
A=np.array([action],dtype=int)
TD_delta=np.array([])
Total_reward=0
T=np.Inf
t=0
stop_tau=0
while not stop_tau:
if (t<T):
next_state=move(state,action)
S.append(next_state)
if (next_state[0]==state_end[0] and next_state[1]==state_end[1]):
T=t+1
R=0
TD_delta=np.append(TD_delta,R-Q[state[0],state[1],action])
else:
R=-1
V=expected_value(Q,next_state)
next_action=select_action(Q,next_state)
A=np.append(A,next_action)
if (i!=4):
sigma=sigma_param
else:
sigma=sigma_param[next_state[0],next_state[1]]
sigma_param[next_state[0],next_state[1]]*=beta
TD_delta=np.append(TD_delta,R+gamma*(sigma*Q[next_state[0],next_state[1],next_action]+(1-sigma)*V)-Q[state[0],state[1],action])
action=next_action
state=next_state
Total_reward+=R
tau=t-n+1
if (tau>=0):
E=1
G=Q[S[tau][0],S[tau][1],A[tau]]
for k in range(tau,min([t,T-1])+1):
G+=E*TD_delta[k]
if (k<T-1):
if (i!=4):
sigma=sigma_param
else:
sigma=sigma_param[S[k][0],S[k][1]]
E*=gamma*((1-sigma)*0.5+sigma)
Q[S[tau][0],S[tau][1],A[tau]]+=alpha*(G-Q[S[tau][0],S[tau][1],A[tau]])
if (tau==T-1): stop_tau=1
t+=1
if (i!=4): sigma_param*=beta
Average_reward_per_episode+=(Total_reward-Average_reward_per_episode)/(episode+1)
Average_reward[i,j,run]=Average_reward_per_episode
import matplotlib.pyplot as plt
fig, ax= plt.subplots()
leg="Dynamic episode-dependent $\sigma$"
ax.plot(alpha_param[0:7],np.mean(Average_reward[3,0:8,],axis=1), color='blue', label=leg)
leg="Dynamic state-dependent $\sigma$"
ax.plot(alpha_param,np.mean(Average_reward[4,:],axis=1), color='red', label=leg)
ax.legend()
ax.set_ylim(-110, -60)
ax.set_xlabel('Step size')
ax.set_ylabel('Average Return per Episode')
ax.set_title('Stochastic Windy Gridworld')
print('Average standard deviation dynamic sigma(episode):', np.mean(np.std(Average_reward[3,0:8,],axis=1)))
print('Average standard deviation dynamic sigma(state):',np.mean(np.std(Average_reward[4,0:8,],axis=1)))
#leg="Q(0), Tree-backup"
#ax.plot(alpha_param,Average_reward[0,:], color='black', label=leg)
#leg="Q(0.5)"
#ax.plot(alpha_param,Average_reward[1,:], color='green', label=leg)
#leg="Q(1), Sarsa"
#ax.plot(alpha_param[0:7],Average_reward[2,0:7], color='orange', label=leg)
|
{"hexsha": "54988d1256b82ce93239953ef66cc2b844fd354f", "size": 6328, "ext": "py", "lang": "Python", "max_stars_repo_path": "Stochastic_Windy_Gridworld.py", "max_stars_repo_name": "NikolayGudkov/Unifying-algorithms-for-multi-step-reinforcement-learning", "max_stars_repo_head_hexsha": "4195234e1f89413a0b63c83c656e1cbed5e0d118", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Stochastic_Windy_Gridworld.py", "max_issues_repo_name": "NikolayGudkov/Unifying-algorithms-for-multi-step-reinforcement-learning", "max_issues_repo_head_hexsha": "4195234e1f89413a0b63c83c656e1cbed5e0d118", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Stochastic_Windy_Gridworld.py", "max_forks_repo_name": "NikolayGudkov/Unifying-algorithms-for-multi-step-reinforcement-learning", "max_forks_repo_head_hexsha": "4195234e1f89413a0b63c83c656e1cbed5e0d118", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.3515151515, "max_line_length": 155, "alphanum_fraction": 0.5052149178, "include": true, "reason": "import numpy", "num_tokens": 1664}
|
import numpy as np
import pandas as pd
from matplotlib import gridspec
from matplotlib import pyplot as plt
from abc import ABCMeta, abstractmethod
from sklearn.utils.extmath import softmax
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils.validation import check_is_fitted
from sklearn.utils import check_array, check_X_y
from sklearn.base import BaseEstimator, RegressorMixin, ClassifierMixin
import rpy2
from rpy2 import robjects as ro
from rpy2.robjects import Formula
from rpy2.robjects.packages import importr
from rpy2.robjects import numpy2ri, pandas2ri
numpy2ri.activate()
pandas2ri.activate()
try:
bigsplines = importr("bigsplines")
except:
utils = importr("utils")
utils.install_packages("bigsplines")
bigsplines = importr("bigsplines")
EPSILON = 1e-7
__all__ = ["SMSplineRegressor"]
class BaseSMSpline(BaseEstimator, metaclass=ABCMeta):
@abstractmethod
def __init__(self, knot_num=10, knot_dist="quantile", degree=3, reg_gamma=0.1, xmin=-1, xmax=1):
self.knot_num = knot_num
self.knot_dist = knot_dist
self.degree = degree
self.reg_gamma = reg_gamma
self.xmin = xmin
self.xmax = xmax
def _estimate_density(self, x):
"""method to estimate the density of input data
Parameters
---------
x : array-like of shape (n_samples, n_features)
containing the input dataset
"""
self.density_, self.bins_ = np.histogram(x, bins=10, density=True)
def _validate_hyperparameters(self):
"""method to validate model parameters
"""
if not isinstance(self.knot_num, int):
raise ValueError("knot_num must be an integer, got %s." % self.knot_num)
if self.knot_num <= 0:
raise ValueError("knot_num must be > 0, got" % self.knot_num)
if self.knot_dist not in ["uniform", "quantile"]:
raise ValueError("method must be an element of [uniform, quantile], got %s." % self.knot_dist)
if not isinstance(self.degree, int):
raise ValueError("degree must be an integer, got %s." % self.degree)
elif self.degree not in [1, 3]:
raise ValueError("degree must be 1 or 3, got" % self.degree)
if not isinstance(self.reg_gamma, str):
if (self.reg_gamma < 0) or (self.reg_gamma > 1):
raise ValueError("reg_gamma must be GCV or >= 0 and <1, got %s" % self.reg_gamma)
elif self.reg_gamma not in ["GCV"]:
raise ValueError("reg_gamma must be GCV or >= 0 and <1, got %s." % self.reg_gamma)
if self.xmin > self.xmax:
raise ValueError("xmin must be <= xmax, got %s and %s." % (self.xmin, self.xmax))
def diff(self, x, order=1):
"""method to calculate derivatives of the fitted adaptive spline to the input
Parameters
---------
x : array-like of shape (n_samples, 1)
containing the input dataset
order : int
order of derivative
"""
modelspec = self.sm_[int(np.where(self.sm_.names == "modelspec")[0][0])]
knots = np.array(modelspec[0])
coefs = np.array(modelspec[11]).reshape(-1, 1)
basis = bigsplines.ssBasis((x - self.xmin) / (self.xmax - self.xmin), knots, d=order,
xmin=0, xmax=1, periodic=False, intercept=True)
derivative = np.dot(basis[0], coefs).ravel()
return derivative
def visualize(self):
"""draw the fitted shape function
"""
check_is_fitted(self, "sm_")
fig = plt.figure(figsize=(6, 4))
inner = gridspec.GridSpec(2, 1, hspace=0.1, height_ratios=[6, 1])
ax1_main = plt.Subplot(fig, inner[0])
xgrid = np.linspace(self.xmin, self.xmax, 100).reshape([-1, 1])
ygrid = self.decision_function(xgrid)
ax1_main.plot(xgrid, ygrid)
ax1_main.set_xticklabels([])
ax1_main.set_title("Shape Function", fontsize=12)
fig.add_subplot(ax1_main)
ax1_density = plt.Subplot(fig, inner[1])
xint = ((np.array(self.bins_[1:]) + np.array(self.bins_[:-1])) / 2).reshape([-1, 1]).reshape([-1])
ax1_density.bar(xint, self.density_, width=xint[1] - xint[0])
ax1_main.get_shared_x_axes().join(ax1_main, ax1_density)
ax1_density.set_yticklabels([])
ax1_density.autoscale()
fig.add_subplot(ax1_density)
plt.show()
def decision_function(self, x):
"""output f(x) for given samples
Parameters
---------
x : array-like of shape (n_samples, 1)
containing the input dataset
Returns
-------
np.array of shape (n_samples,)
containing f(x)
"""
check_is_fitted(self, "sm_")
x = x.copy()
x[x < self.xmin] = self.xmin
x[x > self.xmax] = self.xmax
if isinstance(self.sm_, np.ndarray):
pred = self.sm_ * np.ones(x.shape[0])
elif isinstance(self.sm_, float):
pred = self.sm_ * np.ones(x.shape[0])
else:
if "family" in self.sm_.names:
pred = bigsplines.predict_bigssg(self.sm_, ro.r("data.frame")(x=x))[1]
if "family" not in self.sm_.names:
pred = bigsplines.predict_bigssa(self.sm_, ro.r("data.frame")(x=x))
return pred
class SMSplineRegressor(BaseSMSpline, RegressorMixin):
"""Base class for Smoothing Spline regression.
Details:
1. This is an API for the well-known R package `bigsplines`, and we call the function bigssa through rpy2 interface.
2. During prediction, the data which is outside of the given `xmin` and `xmax` will be clipped to the boundary.
Parameters
----------
knot_num : int, optional. default=10
the number of knots
knot_dist : str, optional. default="quantile"
the distribution of knots
"uniform": uniformly over the domain
"quantile": uniform quantiles of the given input data
degree : int, optional. default=3
the order of the spline, possible values include 1 and 3
reg_gamma : float, optional. default=0.1
the roughness penalty strength of the spline algorithm, range from 0 to 1; it can also be set to "GCV".
xmin : float, optional. default=-1
the min boundary of the input
xmax : float, optional. default=1
the max boundary of the input
"""
def __init__(self, knot_num=10, knot_dist="quantile", degree=3, reg_gamma=0.1, xmin=-1, xmax=1):
super(SMSplineRegressor, self).__init__(knot_num=knot_num,
knot_dist=knot_dist,
degree=degree,
reg_gamma=reg_gamma,
xmin=xmin,
xmax=xmax)
def _validate_input(self, x, y):
"""method to validate data
Parameters
---------
x : array-like of shape (n_samples, 1)
containing the input dataset
y : array-like of shape (n_samples,)
containing the output dataset
"""
x, y = check_X_y(x, y, accept_sparse=["csr", "csc", "coo"],
multi_output=True, y_numeric=True)
return x, y.ravel()
def get_loss(self, label, pred, sample_weight=None):
"""method to calculate the cross entropy loss
Parameters
---------
label : array-like of shape (n_samples,)
containing the input dataset
pred : array-like of shape (n_samples,)
containing the output dataset
sample_weight : array-like of shape (n_samples,), optional
containing sample weights
Returns
-------
float
the cross entropy loss
"""
loss = np.average((label - pred) ** 2, axis=0, weights=sample_weight)
return loss
def fit(self, x, y, sample_weight=None):
"""fit the smoothing spline
Parameters
---------
x : array-like of shape (n_samples, n_features)
containing the input dataset
y : array-like of shape (n_samples,)
containing target values
sample_weight : array-like of shape (n_samples,), optional
containing sample weights
Returns
-------
object
self : Estimator instance.
"""
self._validate_hyperparameters()
x, y = self._validate_input(x, y)
self._estimate_density(x)
n_samples = x.shape[0]
if sample_weight is None:
sample_weight = np.ones(n_samples)
else:
sample_weight = np.round(sample_weight / np.sum(sample_weight) * n_samples, 4)
# The minimal value of sample weight in bigsplines is 0.005.
sample_weight[sample_weight <= 0.005] = 0.0051
if self.knot_dist == "uniform":
knots = list(np.linspace(self.xmin, self.xmax, self.knot_num + 2, dtype=np.float32))[1:-1]
knot_idx = [(np.abs(x - i)).argmin() + 1 for i in knots]
elif self.knot_dist == "quantile":
knots = np.quantile(x, list(np.linspace(0, 1, self.knot_num + 2, dtype=np.float32)))[1:-1]
knot_idx = [(np.abs(x - i)).argmin() + 1 for i in knots]
unique_num = len(np.unique(x.round(decimals=6)))
if unique_num <= 1:
self.sm_ = np.mean(y)
else:
kwargs = {"formula": Formula('y ~ x'),
"nknots": knot_idx,
"lambdas": ro.r("NULL") if self.reg_gamma == "GCV" else self.reg_gamma,
"rparm": 1e-6,
"type": "lin" if self.degree==1 else "cub",
"data": pd.DataFrame({"x":x.ravel(), "y":y.ravel()}),
"weights": pd.DataFrame({"w":sample_weight})["w"]}
self.sm_ = bigsplines.bigssa(**kwargs)
return self
def predict(self, x):
"""output f(x) for given samples
Parameters
---------
x : array-like of shape (n_samples, 1)
containing the input dataset
Returns
-------
np.array of shape (n_samples,)
containing f(x)
"""
pred = self.decision_function(x)
return pred
|
{"hexsha": "b454731de7f6aa7a081b7b13d3ba9938fcbc2f41", "size": 10510, "ext": "py", "lang": "Python", "max_stars_repo_path": "statsgaim/smspline_bigspline.py", "max_stars_repo_name": "SelfExplainML/GAIM", "max_stars_repo_head_hexsha": "320184ff3e0ddd9bc031dfddfd3d30c342421d8f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-02-10T07:10:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-28T16:49:53.000Z", "max_issues_repo_path": "statsgaim/smspline_bigspline.py", "max_issues_repo_name": "kiminh/GAIM", "max_issues_repo_head_hexsha": "320184ff3e0ddd9bc031dfddfd3d30c342421d8f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "statsgaim/smspline_bigspline.py", "max_forks_repo_name": "kiminh/GAIM", "max_forks_repo_head_hexsha": "320184ff3e0ddd9bc031dfddfd3d30c342421d8f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-12-16T11:38:49.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-27T07:05:11.000Z", "avg_line_length": 34.1233766234, "max_line_length": 120, "alphanum_fraction": 0.5776403425, "include": true, "reason": "import numpy", "num_tokens": 2564}
|
# Enter your code here
n = parse(Int, readline())
arr = parse.(Int,split(readline()))
numswaps = 0
for i = 1:n
for j = 1:n-1
if arr[j]>arr[j+1]
dummy = arr[j]
arr[j] = arr[j+1]
arr[j+1] = dummy
numswaps += 1
end
end
end
print("Array is sorted in $(numswaps) swaps.",'\n')
print("First Element: $(arr[1])",'\n')
print("Last Element: $(arr[end])",'\n')
|
{"hexsha": "1ba4eb6aa25d59fc713b80f74c531bc7a639408f", "size": 424, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "Hackerrank/30 Days of Code/Julia/day 20.jl", "max_stars_repo_name": "Next-Gen-UI/Code-Dynamics", "max_stars_repo_head_hexsha": "a9b9d5e3f27e870b3e030c75a1060d88292de01c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Hackerrank/30 Days of Code/Julia/day 20.jl", "max_issues_repo_name": "Next-Gen-UI/Code-Dynamics", "max_issues_repo_head_hexsha": "a9b9d5e3f27e870b3e030c75a1060d88292de01c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Hackerrank/30 Days of Code/Julia/day 20.jl", "max_forks_repo_name": "Next-Gen-UI/Code-Dynamics", "max_forks_repo_head_hexsha": "a9b9d5e3f27e870b3e030c75a1060d88292de01c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.5555555556, "max_line_length": 51, "alphanum_fraction": 0.5094339623, "num_tokens": 141}
|
'''
'''
import json
import logging
from collections import namedtuple
from datetime import datetime
from pathlib import Path
from PIL import Image
import numpy as np
import cv2
from ..baseStrategy import baseStrategy
from ....common import id2rgb, write_to_json
logger = logging.getLogger("superannotate-python-sdk")
class CocoBaseStrategy(baseStrategy):
project_type_to_json_ending = {
'pixel': '___pixel.json',
'vector': '___objects.json'
}
def __init__(self, args):
self.total_images_num = 0
super().__init__(args)
def set_num_total_images(self, num):
self.total_images_num = num
def get_num_total_images(self):
return self.total_images_num
def _create_categories(self, path_to_classes):
classes = None
s_class = namedtuple('Class', ['class_name', 'id'])
with open(path_to_classes, 'r') as fp:
classes = json.load(fp)
categories = [
self._create_single_category(s_class(item, classes[item]))
for item in classes
]
return categories
def _create_single_category(self, item):
category = {
'id': item.id,
'name': item.class_name,
'supercategory': item.class_name,
'isthing': 1,
'color': id2rgb(item.id)
}
return category
def _make_id_generator(self):
cur_id = 0
while True:
cur_id += 1
yield cur_id
def _create_skeleton(self):
out_json = {
'info':
{
'description':
'This is {} dataset.'.format(self.dataset_name),
'url':
'https://superannotate.ai',
'version':
'1.0',
'year':
datetime.now().year,
'contributor':
'Superannotate AI',
'date_created':
datetime.now().strftime("%d/%m/%Y")
},
'licenses':
[
{
'url': 'https://superannotate.ai',
'id': 1,
'name': 'Superannotate AI'
}
],
'images': [],
'annotations': [],
'categories': []
}
return out_json
def convert_from_old_sa_to_new(self, old_json_data, project_type):
new_json_data = {
"metadata": {},
"instances": [],
"tags": [],
"comments": []
}
meta_keys = [
"name", "width", "height", "status", "pinned", "isPredicted",
"projectId", "annotatorEmail", "qaEmail"
]
if project_type == "pixel":
meta_keys.append("isSegmented")
new_json_data["metadata"] = dict.fromkeys(meta_keys)
metadata = new_json_data["metadata"]
for item in old_json_data:
object_type = item.get("type")
#add metadata
if object_type == "meta":
meta_name = item["name"]
if meta_name == "imageAttributes":
metadata["height"] = item.get("height")
metadata["width"] = item.get("width")
metadata["status"] = item.get("status")
metadata["pinned"] = item.get("pinned")
if meta_name == "lastAction":
metadata["lastAction"] = dict.fromkeys(
["email", "timestamp"]
)
metadata["lastAction"]["email"] = item.get("userId")
metadata["lastAction"]["timestamp"] = item.get("timestamp")
#add tags
elif object_type == "tag":
new_json_data["tags"].append(item.get("name"))
#add comments
elif object_type == "comment":
item.pop("type")
item["correspondence"] = item["comments"]
for comment in item["correspondence"]:
comment["email"] = comment["id"]
comment.pop("id")
item.pop("comments")
new_json_data["comments"].append(item)
#add instances
else:
new_json_data["instances"].append(item)
return new_json_data
def _parse_json_into_common_format(self, sa_annotation_json, fpath):
"""
If the annotation format ever changes this function will handle it and
return something optimal for the converters. Additionally, if anything
important is absent from the current json, this function fills it.
"""
if isinstance(sa_annotation_json, list):
sa_annotation_json = self.convert_from_old_sa_to_new(
sa_annotation_json, self.project_type
)
if 'metadata' not in sa_annotation_json:
sa_annotation_json['metadata'] = {}
if 'tags' not in sa_annotation_json:
sa_annotation_json['tags'] = []
if 'instances' not in sa_annotation_json:
sa_annotation_json['instances'] = []
if 'comments' not in sa_annotation_json:
sa_annotation_json['comments'] = []
if 'name' not in sa_annotation_json[
'metadata'] or sa_annotation_json['metadata']['name'] is None:
fname = fpath.name
fname = fname[:-len(
self.project_type_to_json_ending[self.project_type.lower()]
)]
sa_annotation_json['metadata']['name'] = fname
sa_annotation_json['metadata']['image_path'] = str(
Path(fpath).parent / sa_annotation_json['metadata']['name']
)
sa_annotation_json['metadata']['annotation_json'] = fpath
if self.task == 'panoptic_segmentation':
panoptic_mask = str(
Path(self.export_root) /
(sa_annotation_json['metadata']['name'] + '.png')
)
sa_annotation_json['metadata']['panoptic_mask'] = panoptic_mask
if self.project_type == 'Pixel':
sa_annotation_json['metadata']['sa_bluemask_path'] = str(
Path(self.export_root) /
(sa_annotation_json['metadata']['name'] + '___save.png')
)
if not isinstance(
sa_annotation_json['metadata'].get('height', None), int
) or not isinstance(
sa_annotation_json['metadata'].get('width', None), int
):
image_height, image_width = self.get_image_dimensions(
sa_annotation_json['metadata']['image_path']
)
sa_annotation_json['metadata']['height'] = image_height
sa_annotation_json['metadata']['width'] = image_width
return sa_annotation_json
def get_image_dimensions(self, image_path):
img_height = None
img_width = None
img = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
if img is not None:
dimensions = img.shape
img_height, img_width = (dimensions[0], dimensions[1])
else:
try:
img = Image.open(image_path)
img_width, img_height = img.size()
except Exception as e:
raise
return img_height, img_width
def _prepare_single_image_commons_pixel(self, id_, metadata):
ImgCommons = namedtuple(
'ImgCommons',
['image_info', 'ann_mask', 'sa_bluemask_rgb', 'flat_mask']
)
sa_bluemask_path = metadata['sa_bluemask_path']
image_info = self._make_image_info(
metadata['name'], metadata['height'], metadata['width'], id_
)
sa_bluemask_rgb = np.asarray(
Image.open(sa_bluemask_path).convert('RGB'), dtype=np.uint32
)
ann_mask = np.zeros(
(image_info['height'], image_info['width']), dtype=np.uint32
)
flat_mask = (sa_bluemask_rgb[:, :, 0] <<
16) | (sa_bluemask_rgb[:, :, 1] <<
8) | (sa_bluemask_rgb[:, :, 2])
res = ImgCommons(image_info, ann_mask, sa_bluemask_rgb, flat_mask)
return res
def _prepare_single_image_commons_vector(self, id_, metadata):
ImgCommons = namedtuple('ImgCommons', ['image_info'])
image_info = self._make_image_info(
metadata['name'], metadata['height'], metadata['width'], id_
)
res = ImgCommons(image_info)
return res
def _prepare_single_image_commons(self, id_, metadata):
res = None
if self.project_type == 'Pixel':
res = self._prepare_single_image_commons_pixel(id_, metadata)
elif self.project_type == 'Vector':
res = self._prepare_single_image_commons_vector(id_, metadata)
return res
def _make_image_info(self, pname, pheight, pwidth, id_):
image_info = {
'id': id_,
'file_name': pname,
'height': pheight,
'width': pwidth,
'license': 1
}
return image_info
def _create_sa_classes(self, json_path):
json_data = json.load(open(json_path))
classes_list = json_data["categories"]
classes = []
for data in classes_list:
color = np.random.choice(range(256), size=3)
hexcolor = "#%02x%02x%02x" % tuple(color)
classes_dict = {
'name': data["name"],
'color': hexcolor,
'attribute_groups': []
}
classes.append(classes_dict)
return classes
def to_sa_format(self):
json_data = self.export_root / (self.dataset_name + ".json")
sa_classes = self._create_sa_classes(json_data)
(self.output_dir / 'classes').mkdir(parents=True, exist_ok=True)
write_to_json(self.output_dir / 'classes' / 'classes.json', sa_classes)
self.conversion_algorithm(json_data, self.output_dir)
def make_anno_json_generator(self):
json_data = None
if self.project_type == 'Pixel':
jsons = list(Path(self.export_root).glob('*pixel.json'))
elif self.project_type == 'Vector':
jsons = list(Path(self.export_root).glob('*objects.json'))
self.set_num_total_images(len(jsons))
print()
for fpath in jsons:
with open(fpath, 'r') as fp:
json_data = json.load(fp)
json_data = self._parse_json_into_common_format(
json_data, fpath
)
yield json_data
|
{"hexsha": "9ece458cd6b6c334cdeb47abca3acdae468b2dbe", "size": 10803, "ext": "py", "lang": "Python", "max_stars_repo_path": "superannotate/input_converters/converters/coco_converters/coco_converter.py", "max_stars_repo_name": "dskkato/superannotate-python-sdk", "max_stars_repo_head_hexsha": "67eece2d7d06375ad2e502c2282e3b29c9b82631", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "superannotate/input_converters/converters/coco_converters/coco_converter.py", "max_issues_repo_name": "dskkato/superannotate-python-sdk", "max_issues_repo_head_hexsha": "67eece2d7d06375ad2e502c2282e3b29c9b82631", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "superannotate/input_converters/converters/coco_converters/coco_converter.py", "max_forks_repo_name": "dskkato/superannotate-python-sdk", "max_forks_repo_head_hexsha": "67eece2d7d06375ad2e502c2282e3b29c9b82631", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.0366972477, "max_line_length": 81, "alphanum_fraction": 0.5406831436, "include": true, "reason": "import numpy", "num_tokens": 2260}
|
import matplotlib.pyplot as plt
'''
import numpy as np
x=np.array([10,15,20,22.5,30],float)
y=np.array([227.04,362.78,517.35,602.97,901.67],float)
# plt.plot(x,y)
# plt.show()
x1=[-1,0,1,2]
y1=[3,-4,5,6]
plt.plot(x1,y1)
plt.show()
x2=[1,2,3,4,5,6,7]
y2=[-1.5,-1,0.5,0.25,1,1.65,2.5]
plt.plot(x2,y2)
# plt.show()
plt.show()
'''
'''
n=len(x)
# p=np.zeros([n,5])
p=np.zeros([n,n+1])
# p[:,0]=y
nk=60
xi=0
xf=30
xdiff=(xf-xi)/nk
for i in range(n):
# for i in range(n):
p[i,0]=x[i]
p[i,1]=y[i]
# print(p)
'''
'''
## If you want to use this piece of code for creating the table for NDDP
##then use 1 instead of 2 while defining k and also remove p[i,0]=x[i] above.
# print(p)
for l in range(n):
for k in range(2,n-l):
p[l,k]=(p[l+1,k-1]-p[l,k-1])/(x[l+1]-x[l])
'''
'''
for i in range(2,n+1): #column
for j in range(n+1-i):# defines row
p[j,i]=(p[j+1,i-1]-p[j,i-1])/(x[j+i-1]-x[j])
'''
'''
b=p[0][1:n+1]
m=len(b)
l=len(x)
# x_new=x[0:n+1]
def product(l,value,x):
prod=1
for k in range(l):
prod= prod*(value-x[k])
return prod
def fun(value):
f =b[0]
for i in range(1,m):
f+=b[i]*(value-x[i-1])
return f
# print("Newton divided difference tree table is ")
np.set_printoptions(suppress=True)
# print(p)
## b contains all the differences b0,b1,b2,b3(these are unknown coefficients)
print("The coefficients' vector is ")
print(b)
print(product(4,20,x))
# print(b)
# value=float(input("Enter the value of data point "))
# func= fun(value)
# print("The value of the function at ", value, " is", fun(20))
print(y)
print(fun(20))
'''
## Newton Divided Difference Polynomial Interpolation Method
import numpy as np
a = input("Enter the choice of array(input a or b or c): ")
if a=='a':
x=np.array([10,15,20,22.5,30],float)
y=np.array([227.04,362.78,517.35,602.97,901.67],float)
elif a=='b':
x=np.array([-1,0,1,2],float)
y=np.array([3,-4,5,6],float)
elif a=='c':
x=np.array([1,2,2.5,3,4,4.5,5],float)
y=np.array([-1.5,-1,0.5,0.25,1,4.5,5],float)
else:
print("Array Doesn't exist!")
n=len(x)
p=np.zeros([n,n+1])#creating a Tree table (n x n+1 array)
value =float(input("Enter the point at which you want to calculate the value of the polynomial: "))
# first two columns of the table are filled with x and y data points
for i in range(n):
p[i,0]=x[i]
p[i,1]=y[i]
## algorithm for tree table from column 2 two n+1
for i in range(2,n+1): #column
for j in range(n+1-i):# defines row
p[j,i]=(p[j+1,i-1]-p[j,i-1])/(x[j+i-1]-x[j])#Tree Table
np.set_printoptions(suppress=True)## this suppress the scientific symbol(e) and returns values in normal digits
print("Newton Divided Difference Tree Table")
print(p)
# print(p) ## can check the complete Tree table here for NDDP
print("----------------------------------")
print("Coefficients of the polynomial are: ")
b=p[0][1:]#This vector contains the unknown coefficients in the polynomial which are the top elements of each column.
print("Coefficient vector= ",b)
print("Data points: ")
print("x= ",x)
print("------------------------------------")
lst=[] # list where we will append the values of prouct terms
t=1
for i in range(len(x)):
t*=(value-x[i]) ##(x-x0), (x-x0)(x-x1), (x-x0)(x-x1)(x-x2) etc..
lst.append(t)
print("The list of product elements: ",lst)
## creating a general function
f=b[0]
for k in range(1,len(b)):
f+=b[k]*lst[k-1] ## important**[k-1]** not k because in list we use one step earlier element.
# For example for b1 we have to use (x-x0), for b2, we use (x-x0)(x-x1) for b3 we use (x-x0)(x-x1)(x2)
print("\nThe value of polynomial at ",value," of data set ",a,"is: ","%.2f"%f)
'''
def polynomial(value):
coef=b[0]
for k in range(len(b)):
coef+=b[k-1]*(value-x[k-1])
return coef
print(polynomial(value))
'''
def polynomial(value):
n=len(x)
coef=b[n-1]
for i in range(n-1,-1,-1):
coef=coef*(value-x[i])+b[i]
return coef
print(polynomial((5)))
##---------Plotting-----------
from shapely.geometry import LineString
# x=np.array([10,15,20,22.5,30],float)
x_axis=np.linspace(max(x), min(x),100)
y_axis=polynomial(x_axis)
# plt.subplot()
plt.plot(x_axis,y_axis, 'k',label="Interpolation")
plt.title("Newton's Divided Difference Polynomial Interpolation")
plt.plot(x,y,'c', label="Given Data-set")
plt.xlabel("x")
plt.ylabel("f(x)")
plt.legend(loc='best')
##this is for marking the points of intersection
first_line = LineString(np.column_stack((x_axis, y_axis)))
second_line = LineString(np.column_stack((x, y)))
intersection = first_line.intersection(second_line)
plt.plot(*LineString(intersection).xy, 'o')
plt.show()
'''
## Another method of creating the tree table
import numpy as np
x=np.array([10,15,20,22.5,30],float)
y=np.array([227.04,362.78,517.35,602.97,901.67],float)
n=len(x)
p=np.zeros([n,n])
p[:,0]=y
np.set_printoptions(suppress=True)
# print(p)
for i in range(1,n):
for j in range(n-i):
p[j,i]=(p[j+1,i-1]-p[j,i-1])/(x[j+i]-x[j])
print(p)
'''
|
{"hexsha": "6edc2723b0afa886cd02f4ca24d9d0f24fe38b76", "size": 5247, "ext": "py", "lang": "Python", "max_stars_repo_path": "Numerical_Methods_Physics/Newton_div_Diff_Poly_Method.py", "max_stars_repo_name": "Simba2805/Computational_Physics_Python", "max_stars_repo_head_hexsha": "be687939c16a1d08066939830ac31ba666a3e1bb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Numerical_Methods_Physics/Newton_div_Diff_Poly_Method.py", "max_issues_repo_name": "Simba2805/Computational_Physics_Python", "max_issues_repo_head_hexsha": "be687939c16a1d08066939830ac31ba666a3e1bb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Numerical_Methods_Physics/Newton_div_Diff_Poly_Method.py", "max_forks_repo_name": "Simba2805/Computational_Physics_Python", "max_forks_repo_head_hexsha": "be687939c16a1d08066939830ac31ba666a3e1bb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.2916666667, "max_line_length": 118, "alphanum_fraction": 0.5948160854, "include": true, "reason": "import numpy", "num_tokens": 1698}
|
import xarray as xr
import numpy as np
from scipy import stats
from os.path import join
from ..settings import *
# compute lat-lon average on both icefields at the same time
def average_icefields_data(npi_dataarray, spi_dataarray):
# reshape arrays for averaging
x = npi_dataarray.values
m2d_npi = np.moveaxis(x,0,-1).reshape((-1,x.shape[0]))
y = spi_dataarray.values
m2d_spi = np.moveaxis(y,0,-1).reshape((-1,y.shape[0]))
m2d = np.concatenate((m2d_npi,m2d_spi),axis=0)
# compute average
data = np.nanmean(m2d,axis=0)
# create and return xarray with timeseries
time = npi_dataarray.time
return xr.DataArray(data, coords=[time], dims=['time'])
# return monthly series of varname from icefield (npi, spi or both)
# units are: [tas: C], [rsds: Wm2], [mb, pr, acc, abl: mm/day]
# monthly values correspond to monthly mean values of data with 3H resolution
def monthly_series(filename_npi, filename_spi, varname, icefield):
basedir=dict()
basedir['npi'] = join(NPI_MODEL_RESULTS_ROOT, filename_npi)
basedir['spi'] = join(SPI_MODEL_RESULTS_ROOT, filename_spi)
# declare topography dictionaries
# topo_real_full={}
# topo_model={}
topo_real={}
# npi topography data
# topo_real_full['npi'] = xr.open_dataset(NPI_TOPO_ROOT+'npi_dem_from_pat5x5ave_full.nc')['z'].values
# topo_model['npi'] = xr.open_dataset(NPI_TOPO_ROOT+'topo.nc')['topo_model'].values
topo_real['npi'] = xr.open_dataset(join(NPI_TOPO_ROOT, 'topo.nc'))['topo_real'].values
# spi topography data
# topo_real_full['spi'] = xr.open_dataset(SPI_TOPO_ROOT+'spi_dem_from_pat5x5ave_full.nc')['z'].values
# topo_model['spi'] = xr.open_dataset(SPI_TOPO_ROOT+'topo.nc')['topo_model'].values
topo_real['spi'] = xr.open_dataset(join(SPI_TOPO_ROOT, 'topo.nc'))['topo_real'].values
# declare data dictionaries
icefield_var={}
icefield_var_masked={}
# load data from both icefields
for icefield_name in ['npi','spi']:
# load full data from icefield
icefield_var[icefield_name] = xr.open_dataset(basedir[icefield_name])[varname]
# transform units
if varname == 'tas':
icefield_var[icefield_name] = icefield_var[icefield_name] - 273.15
if varname in ['pr','mb','abl','acc']:
icefield_var[icefield_name] = icefield_var[icefield_name] * 8.0
# mask with topography
icefield_var_masked[icefield_name] = icefield_var[icefield_name]+topo_real[icefield_name]*0
# if only one icefield is needed, return lat-lon average
if icefield in ['npi','spi']:
ans = icefield_var_masked[icefield].mean(['lat','lon'])
# if both icefields are needed, join data
else:
ans = average_icefields_data(icefield_var_masked['npi'],icefield_var_masked['spi'])
return ans
|
{"hexsha": "c85797a1d3a18b3c0f687a52778fb98a429103de", "size": 2873, "ext": "py", "lang": "Python", "max_stars_repo_path": "processing/processing/utils/icefields.py", "max_stars_repo_name": "tomescaff/patagonia", "max_stars_repo_head_hexsha": "4bcb1ad38e87a58db6ea60bf36bc01a76ed930a1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "processing/processing/utils/icefields.py", "max_issues_repo_name": "tomescaff/patagonia", "max_issues_repo_head_hexsha": "4bcb1ad38e87a58db6ea60bf36bc01a76ed930a1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "processing/processing/utils/icefields.py", "max_forks_repo_name": "tomescaff/patagonia", "max_forks_repo_head_hexsha": "4bcb1ad38e87a58db6ea60bf36bc01a76ed930a1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.8243243243, "max_line_length": 105, "alphanum_fraction": 0.6856943961, "include": true, "reason": "import numpy,from scipy", "num_tokens": 822}
|
#!/usr/bin/python
# developer: Ahmed Taha Elthakeb
# email: (a1yousse@eng.ucsd.edu)
"""
[21-oct-2018]
- test case: alexnet
- changing reward function to be func(val_acc + train_acc) on 10k images
"""
from __future__ import division
import pandas as pd
import numpy as np
import tensorflow as tf
import pickle
import os
#import dataset
#from . import dataset, networks, load
import sys
sys.path.append("..")
import dataset, networks, load, quantize
#import networks
#from networks import *
from networks import helper
from networks import alexnet, resnet18, svhn_net, lenet
from tensorflow.examples.tutorials.mnist import input_data as mnist_input
#import load
import json
from quantize import quantize_network
import six
import csv
import time
from datetime import datetime
import math
pi = math.pi
"""
from __future__ import division
import pandas as pd
import numpy as np
import tensorflow as tf
import pickle
import os
#import dataset
#from . import dataset, networks, load
import sys
sys.path.append("..")
import dataset, networks, load, quantize
#import networks
#from networks import *
from networks import helper
from networks import alexnet, resnet18
from tensorflow.examples.tutorials.mnist import input_data as mnist_input
#import load
import json
from quantize import quantize_network
import six
"""
file_idx = 1
# remove!!
#setattr(tf.contrib.rnn.GRUCell, 'deepcopy', lambda self, _: self)
#setattr(tf.contrib.rnn.BasicLSTMCell, 'deepcopy', lambda self, _: self)
#setattr(tf.contrib.rnn.MultiRNNCell, 'deepcopy', lambda self, _: self)
#NETWORKS = ['alexnet', 'googlenet', 'nin', 'resnet18', 'resnet50', 'squeezenet', 'vgg16net', 'lenet']
NETWORKS = ['lenet']
IMAGE_PATH_TRAIN = '/home/ahmed/projects/NN_quant/ILSVRC2012_img_val_40K/'
IMAGE_PATH_TEST = '/home/ahmed/projects/NN_quant/ILSVRC2012_img_val_10K/'
IMAGE_LABLE = '/home/ahmed/projects/NN_quant/rlbitwidth.code/val.txt'
CKPT_PATH = '/home/ahmed/projects/NN_quant/rlbitwidth.tfmodels/caffe2tf/tfmodels/'
NET_ACC = {'alexnet': 79.918, 'googlenet': 89.002, 'nin': 81.218, 'resnet18': 85.016,
'resnet50': 91.984, 'squeezenet': 80.346, 'vgg16net': 89.816, 'lenet': 99.06}
num_train_examples = 212382
num_val_examples = 23372
num_test_examples = 13068
batch_size_val = 128
batch_size_train = 32
class Donkey(object):
@staticmethod
def _preprocess(image):
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = tf.multiply(tf.subtract(image, 0.5), 2)
image = tf.reshape(image, [64, 64, 3])
image = tf.random_crop(image, [54, 54, 3])
return image
@staticmethod
def _read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'image': tf.FixedLenFeature([], tf.string),
'length': tf.FixedLenFeature([], tf.int64),
'digits': tf.FixedLenFeature([5], tf.int64)
})
image = Donkey._preprocess(tf.decode_raw(features['image'], tf.uint8))
length = tf.cast(features['length'], tf.int32)
digits = tf.cast(features['digits'], tf.int32)
return image, length, digits
@staticmethod
def build_batch(path_to_tfrecords_file, num_examples, batch_size, shuffled):
assert tf.gfile.Exists(path_to_tfrecords_file), '%s not found' % path_to_tfrecords_file
filename_queue = tf.train.string_input_producer([path_to_tfrecords_file], num_epochs=None)
image, length, digits = Donkey._read_and_decode(filename_queue)
min_queue_examples = int(0.4 * num_examples)
if shuffled:
image_batch, length_batch, digits_batch = tf.train.shuffle_batch([image, length, digits],
batch_size=batch_size,
num_threads=2,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples)
else:
image_batch, length_batch, digits_batch = tf.train.batch([image, length, digits],
batch_size=batch_size,
num_threads=2,
capacity=min_queue_examples + 3 * batch_size)
return image_batch, length_batch, digits_batch
def eval_imagenet(net_name, param_path, param_q_path, qbits, layer_index, layer_name, file_idx, shift_back, trainable=False, err_mean=None, err_stddev=None, train_vars=None, cost_factor=200., n_epoch=1):
"""all layers are trainable in the conventional retraining procedure"""
if '.ckpt' in param_path:
netparams = load.load_netparams_tf(param_path, trainable=True)
else:
netparams = load.load_netparams_tf_q(param_path, trainable=True)
data_spec = helper.get_data_spec(net_name)
input_node = tf.placeholder(tf.float32, shape=(None, data_spec.crop_size, data_spec.crop_size, data_spec.channels))
label_node = tf.placeholder(tf.int32)
if net_name == 'alexnet_noisy':
logits_, err_w, err_b, err_lyr = networks.alexnet_noisy(input_node, netparams, err_mean, err_stddev, train_vars)
elif net_name == 'alexnet':
if trainable:
logits_, weights_conv4_tmp = alexnet.alexnet_q_RL(input_node, netparams, qbits, layer_index)
else:
logits_ , _ , _ = alexnet.alexnet(input_node, netparams)
elif net_name == 'alexnet_shift':
logits_ = networks.alexnet_shift(input_node, netparams)
elif net_name == 'googlenet':
logits_, err_w, err_b, err_lyr = networks.googlenet_noisy(input_node, netparams, err_mean, err_stddev, train_vars)
elif net_name == 'nin':
logits_, err_w, err_b, err_lyr = networks.nin_noisy(input_node, netparams, err_mean, err_stddev, train_vars)
elif net_name == 'resnet18':
logits_ = resnet18.resnet18(input_node, netparams)
#logits_, err_w, err_b, err_lyr = networks.resnet18_noisy(input_node, netparams, err_mean, err_stddev, train_vars)
elif net_name == 'resnet18_shift':
logits_ = networks.resnet18_shift(input_node, netparams, shift_back)
elif net_name == 'resnet50':
logits_, err_w, err_b, err_lyr = networks.resnet50_noisy(input_node, netparams, err_mean, err_stddev, train_vars)
elif net_name == 'squeezenet':
logits_, err_w, err_b, err_lyr = networks.squeezenet_noisy(input_node, netparams, err_mean, err_stddev, train_vars)
elif net_name == 'vgg16net':
logits_, err_w, err_b, err_lyr = networks.vgg16net_noisy(input_node, netparams, err_mean, err_stddev, train_vars)
#square = [tf.nn.l2_loss(err_w[layer]) for layer in err_w]
#square_sum = tf.reduce_sum(square)
#loss_op = tf.reduce_mean(tf.nn.oftmax_cross_entropy_with_logits(logits=logits_, labels=label_node)) + cost_factor / (1. + square_sum)
# ======== calculating the quantization error of a certain layer ==========
if trainable:
""" read the quantized weights (quantized version of the most recent retrained) """
w_q_pickle = param_q_path
with open(w_q_pickle, 'rb') as f:
params_quantized = pickle.load(f)
layer = layer_name
params_quantized_layer = tf.get_variable(name='params_quantized_layer', initializer=tf.constant(params_quantized[0][layer]), trainable=False)
q_diff = tf.subtract(params_quantized_layer, netparams['weights'][layer])
q_diff_cost = tf.nn.l2_loss(q_diff)
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits_, labels=label_node)) + cost_factor*q_diff_cost
#loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits_, labels=label_node))
probs = helper.softmax(logits_)
top_k_op = tf.nn.in_top_k(probs, label_node, 5)
#optimizer = tf.train.AdamOptimizer(learning_rate=0.0001, epsilon=0.1)
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001, beta1=0.9, beta2=0.999, epsilon=10-8)
if trainable:
train_op = optimizer.minimize(loss_op)
correct_pred = tf.equal(tf.argmax(probs, 1), tf.argmax(label_node, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if trainable:
count = 0
correct = 0
cur_accuracy = 0
for i in range(0, n_epoch):
#if cur_accuracy >= NET_ACC[net_name]:
#break
#image_producer = dataset.ImageNetProducer(val_path='/home/ahmed/projects/NN_quant/ILSVRC2012_img_val_10K/val_10.txt', data_path='/home/ahmed/projects/NN_quant/ILSVRC2012_img_val_10K', data_spec=data_spec)
path_train = '/home/ahmed/projects/NN_quant/imageNet_training'
image_producer = dataset.ImageNetProducer(val_path=path_train + '/train_shuf_'+str(file_idx)+'.txt', data_path=path_train, data_spec=data_spec)
#image_producer = dataset.ImageNetProducer(val_path=path_train + '/train_shuf_100images.txt', data_path=path_train, data_spec=data_spec)
total = len(image_producer) * n_epoch
coordinator = tf.train.Coordinator()
threads = image_producer.start(session=sess, coordinator=coordinator)
for (labels, images) in image_producer.batches(sess):
one_hot_labels = np.zeros((len(labels), 1000))
for k in range(len(labels)):
one_hot_labels[k][labels[k]] = 1
sess.run(train_op, feed_dict={input_node: images, label_node: one_hot_labels})
# AHMED: debug
#netparams_tmp = sess.run(netparams)
#print('train = ', np.amax(netparams_tmp['weights']['conv2']))
#print('len set = ', len(set(np.array(netparams['weights']['conv2']))))
# ------------
#correct += np.sum(sess.run(top_k_op, feed_dict={input_node: images, label_node: labels}))
# AHMED: modify
#top, logits_tmp, loss_op_tmp = sess.run([top_k_op, logits_q, loss_op], feed_dict={input_node: images, label_node: labels})
#top, act_q_tmp, weights_fp_tmp, weights_q_tmp = sess.run([top_k_op, act_, weights_fp, weights_q], feed_dict={input_node: images, label_node: labels})
top, weights_conv4_tmp_ret = sess.run([top_k_op, weights_conv4_tmp], feed_dict={input_node: images, label_node: labels})
correct += np.sum(top)
#print(np.amax(weights_q_tmp))
#print(len(set(weights_q_tmp.ravel())))
# --------
count += len(labels)
cur_accuracy = float(correct) * 100 / count
write_to_csv([count, total, cur_accuracy])
print('{:>6}/{:<6} {:>6.2f}%'.format(count, total, cur_accuracy))
coordinator.request_stop()
coordinator.join(threads, stop_grace_period_secs=2)
#return sess.run(err_w), cur_accuracy
# "sess.run" returns the netparams as normal value (converts it from tf to normal python variable)
return cur_accuracy, sess.run(netparams)
else:
count = 0
correct = 0
cur_accuracy = 0
path_val = './nn_quant_and_run_code_train/ILSVRC2012_img_val'
image_producer = dataset.ImageNetProducer(val_path=path_val + '/val_1k.txt', data_path=path_val, data_spec=data_spec)
#image_producer = dataset.ImageNetProducer(val_path='/home/ahmed/projects/NN_quant/ILSVRC2012_img_val_40K/val_40.txt', data_path='/home/ahmed/projects/NN_quant/ILSVRC2012_img_val_40K', data_spec=data_spec)
total = len(image_producer)
coordinator = tf.train.Coordinator()
threads = image_producer.start(session=sess, coordinator=coordinator)
for (labels, images) in image_producer.batches(sess):
one_hot_labels = np.zeros((len(labels), 1000))
for k in range(len(labels)):
one_hot_labels[k][labels[k]] = 1
#correct += np.sum(sess.run(top_k_op, feed_dict={input_node: images, label_node: labels}))
top = sess.run([top_k_op], feed_dict={input_node: images, label_node: labels})
correct += np.sum(top)
count += len(labels)
cur_accuracy = float(correct) * 100 / count
print('{:>6}/{:<6} {:>6.2f}%'.format(count, total, cur_accuracy))
coordinator.request_stop()
coordinator.join(threads, stop_grace_period_secs=2)
return cur_accuracy, 0
def eval_imagenet_q(net_name, param_pickle_path):
netparams = load.load_netparams_tf_q(param_pickle_path)
data_spec = helper.get_data_spec(net_name)
input_node = tf.placeholder(tf.float32, shape=(None, data_spec.crop_size, data_spec.crop_size, data_spec.channels))
label_node = tf.placeholder(tf.int32)
if net_name == 'alexnet':
logits_ = alexnet.alexnet(input_node, netparams)
elif net_name == 'googlenet':
logits_ = networks.googlenet(input_node, netparams)
elif net_name == 'nin':
logits_ = networks.nin(input_node, netparams)
elif net_name == 'resnet18':
logits_ = networks.resnet18(input_node, netparams)
elif net_name == 'resnet50':
logits_ = networks.resnet50(input_node, netparams)
elif net_name == 'squeezenet':
logits_ = networks.squeezenet(input_node, netparams)
elif net_name == 'vgg16net':
logits_ = networks.vgg16net_noisy(input_node, netparams)
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits_, labels=label_node)) #
probs = softmax(logits_)
top_k_op = tf.nn.in_top_k(probs, label_node, 5)
optimizer = tf.train.AdamOptimizer(learning_rate=0.001, epsilon=0.1)
correct_pred = tf.equal(tf.argmax(probs, 1), tf.argmax(label_node, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
count = 0
correct = 0
cur_accuracy = 0
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
image_producer = dataset.ImageNetProducer(val_path=IMAGE_LABLE, data_path=IMAGE_PATH, data_spec=data_spec)
total = len(image_producer)
coordinator = tf.train.Coordinator()
threads = image_producer.start(session=sess, coordinator=coordinator)
for (labels, images) in image_producer.batches(sess):
correct += np.sum(sess.run(top_k_op, feed_dict={input_node: images, label_node: labels}))
count += len(labels)
cur_accuracy = float(correct) * 100 / count
print('{:>6}/{:<6} {:>6.2f}%'.format(count, total, cur_accuracy))
print(cur_accuracy)
coordinator.request_stop()
coordinator.join(threads, stop_grace_period_secs=2)
return cur_accuracy
def evaluator_svhn(param_path, qbits_dict):
print('=> Evaluating trained model on val data ...')
# ------------------------------------------------------------
# ------- evaluator ------------------------------------------
batch_size = batch_size_val
with tf.Graph().as_default():
#ckpt_path = '/home/ahmed/projects/SVHNClassifier/logs/train/latest.ckpt'
#netparams = load.load_svhn_netparams_tf_q(path, trainable=False)
if '.ckpt' in param_path:
netparams_eval = load.load_svhn_netparams_tf(param_path, trainable=False)
else:
netparams_eval = load.load_svhn_netparams_tf_q(param_path, trainable=False)
#print(netparams['weights']['hidden1'])
path_to_val_tfrecords_file = '/home/ahmed/projects/SVHNClassifier/data/val.tfrecords'
vimage_batch, vlength_batch, vdigits_batch = Donkey.build_batch(path_to_val_tfrecords_file,
num_examples=num_val_examples,
batch_size=batch_size,
shuffled=False)
vinput_node = vimage_batch
#vlength_logits, vdigits_logits = svhn_net.svhn_net(vinput_node, netparams_eval)
vlength_logits, vdigits_logits = svhn_net.svhn_net_q(vinput_node, netparams_eval, qbits_dict)
length_predictions = tf.argmax(vlength_logits, axis=1)
digits_predictions = tf.argmax(vdigits_logits, axis=2)
needs_include_length = False
if needs_include_length:
labels = tf.concat([tf.reshape(length_batch, [-1, 1]), vdigits_batch], axis=1)
predictions = tf.concat([tf.reshape(length_predictions, [-1, 1]), digits_predictions], axis=1)
else:
labels = vdigits_batch
predictions = digits_predictions
labels_string = tf.reduce_join(tf.as_string(labels), axis=1)
predictions_string = tf.reduce_join(tf.as_string(predictions), axis=1)
accuracy, update_accuracy = tf.metrics.accuracy(
labels=labels_string,
predictions=predictions_string
)
print(' debug # 2')
num_batches = num_val_examples / batch_size_val
with tf.Session() as sess:
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for _ in range(int(num_batches)):
sess.run(update_accuracy)
#accuracy_val = sess.run([accuracy])
#self.summary_writer.add_summary(summary_val, global_step=global_step)
accuracy_val = sess.run([accuracy])
coord.request_stop()
coord.join(threads)
return accuracy_val
def eval_svhn_net(net_name, qbits_dict={}, layer_index=[], layer_name=[], trainable=False, err_mean=None, err_stddev=None, train_vars=None, cost_factor=200., n_epoch=10):
ckpt_path = '/home/ahmed/projects/SVHNClassifier/logs/train/latest.ckpt'
#print("net parameters: ###########################")
#print(netparams['weights']['hidden1'])
data_spec = helper.get_data_spec(net_name)
input_node = tf.placeholder(tf.float32, shape=(None, data_spec.crop_size * data_spec.crop_size * data_spec.channels))
input_node_2d = tf.reshape(input_node, shape=(-1, data_spec.crop_size, data_spec.crop_size, data_spec.channels))
label_node = tf.placeholder(tf.float32, [None, 10])
# -----
if trainable:
num_steps_to_show_loss = 100
num_steps_to_check = 1000
with tf.Graph().as_default():
netparams = load.load_svhn_netparams_tf(ckpt_path, trainable=True)
print('loading checkpoint model params ..')
path_to_train_tfrecords_file = '/home/ahmed/projects/SVHNClassifier/data/train.tfrecords'
batch_size = batch_size_train
image_batch, length_batch, digits_batch = Donkey.build_batch(path_to_train_tfrecords_file,
num_examples=num_train_examples,
batch_size=batch_size,
shuffled=True)
# forward pass
length_logits, digits_logits = svhn_net.svhn_net(image_batch, netparams)
#length_logits, digits_logits = svhn_net.svhn_net_q(image_batch, netparams, qbits_dict)
""" sin regularization """
cost_factor = 00.0
sin2_func_1 = tf.constant(0.0)
sin2_func_2 = tf.constant(0.0)
sin2_func_3 = tf.constant(0.0)
sin2_func_4 = tf.constant(0.0)
layer_name = 'hidden2'
qbits = qbits_dict[layer_name]
if qbits < 8:
#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights']['conv2']/(2**(-(qbits[1]-1))))))
sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]/(2**(-(qbits))))))
#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*(netparams['weights']['conv2']+2**-(qbits[1]))/(2**(-(qbits[1]-1))))))
layer_name = 'hidden3'
qbits = qbits_dict[layer_name]
if qbits < 8:
#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights']['conv2']/(2**(-(qbits[1]-1))))))
sin2_func_2 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]/(2**(-(qbits))))))
#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*(netparams['weights']['conv2']+2**-(qbits[1]))/(2**(-(qbits[1]-1))))))
layer_name = 'hidden4'
qbits = qbits_dict[layer_name]
if qbits < 8:
#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights']['conv2']/(2**(-(qbits[1]-1))))))
sin2_func_3 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]/(2**(-(qbits))))))
#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*(netparams['weights']['conv2']+2**-(qbits[1]))/(2**(-(qbits[1]-1))))))
layer_name = 'hidden5'
qbits = qbits_dict[layer_name]
if qbits < 8:
#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights']['conv2']/(2**(-(qbits[1]-1))))))
sin2_func_4 = tf.reduce_mean(tf.square(tf.sin(pi*netparams['weights'][layer_name]/(2**(-(qbits))))))
#sin2_func_1 = tf.reduce_mean(tf.square(tf.sin(pi*(netparams['weights']['conv2']+2**-(qbits[1]))/(2**(-(qbits[1]-1))))))
""" ------------------------------------------------ """
# loss calculation
length_labels = length_batch
digits_labels = digits_batch
length_cross_entropy = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=length_labels, logits=length_logits))
digit1_cross_entropy = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=digits_labels[:, 0], logits=digits_logits[:, 0, :]))
digit2_cross_entropy = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=digits_labels[:, 1], logits=digits_logits[:, 1, :]))
digit3_cross_entropy = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=digits_labels[:, 2], logits=digits_logits[:, 2, :]))
digit4_cross_entropy = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=digits_labels[:, 3], logits=digits_logits[:, 3, :]))
digit5_cross_entropy = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=digits_labels[:, 4], logits=digits_logits[:, 4, :]))
loss_sin2_reg = cost_factor*(sin2_func_1 + sin2_func_2 + sin2_func_3 + sin2_func_4)
loss_op = length_cross_entropy + digit1_cross_entropy + digit2_cross_entropy + digit3_cross_entropy + digit4_cross_entropy + digit5_cross_entropy + loss_sin2_reg
global_step = tf.Variable(0, name='global_step', trainable=False)
training_options = {}
training_options['learning_rate'] = 1e-3
training_options['decay_steps'] = 10000
training_options['decay_rate'] = 0.9
learning_rate = tf.train.exponential_decay(training_options['learning_rate'], global_step=global_step,
decay_steps=training_options['decay_steps'], decay_rate=training_options['decay_rate'], staircase=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
#optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss_op, global_step=global_step)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
saver = tf.train.Saver()
print('=> Start training ..')
print('########################################')
#cur_accuracy = 0
#patience = initial_patience
best_accuracy = 0.0
duration = 0.0
for i in range(0, n_epoch):
print(' debug # 0')
start_time = time.time()
#_, loss_val, global_step_val = sess.run([train_op, loss_op, global_step])
_, loss_val, loss_sin2_reg_val, global_step_val = sess.run([train_op, loss_op, loss_sin2_reg, global_step])
duration += time.time() - start_time
#print('=> %s: step %d, loss = %f ' % (
# datetime.now(), global_step_val, loss_val))
print('=> %s: step %d, total_loss = %f, sin2_reg_loss = %f ' % (
datetime.now(), global_step_val, loss_val, loss_sin2_reg_val))
"""
if global_step_val % num_steps_to_show_loss == 0:
examples_per_sec = batch_size * num_steps_to_show_loss / duration
duration = 0.0
print('=> %s: step %d, loss = %f (%.1f examples/sec)' % (
datetime.now(), global_step_val, loss_val, examples_per_sec))
if global_step_val % num_steps_to_check != 0:
continue
"""
#_, loss_val = sess.run([train_op, loss_op])
print('---------------- finished epoch# ', i)
netparams_save = sess.run(netparams)
print(' Training finished')
""" path for saving the retrained model """
network_name = 'svhn_net'
path_save = '../nn_quant_and_run_code/results/quantized/' + network_name + '/' + network_name
path_save_params = path_save + '_retrained.pickle'
# AHMED: debug
#print('retrained = ', np.amax(netparams['weights']['conv2']))
#print('len set = ', len(set(np.array(netparams['weights']['conv2']))))
# ------------
#print('=================================================')
print('=> Writing trained model parameters ...')
#print('=================================================')
print(len(netparams_save['weights']))
print(netparams_save['weights'].keys())
with open(path_save_params, 'wb') as f:
pickle.dump(netparams_save, f)
print('=> Evaluating on validation dataset...')
accuracy_val = evaluator_svhn(path_save_params, qbits_dict)
print('epoch #', i)
print('accuracy', accuracy_val)
coord.request_stop()
coord.join(threads)
else: # inference
netparams = load.load_svhn_netparams_tf(ckpt_path, trainable=False)
path_to_val_tfrecords_file = '/home/ahmed/projects/SVHNClassifier/data/val.tfrecords'
batch_size = batch_size_val
accuracy_val = evaluator_svhn(ckpt_path, qbits_dict)
return accuracy_val
image_batch, length_batch, digits_batch = Donkey.build_batch(path_to_val_tfrecords_file,
num_examples=num_val_examples,
batch_size=batch_size,
shuffled=False)
print('digits_batch : ########################################')
print(digits_batch)
#print(digits_batch)
#print(digits_batch)
#length_logits, digits_logits = Model.inference(image_batch, drop_rate=0.0)
input_node = image_batch
#length_logits, digits_logits = svhn_net.svhn_net(input_node, netparams)
length_logits, digits_logits = svhn_net.svhn_net_q(input_node, netparams, qbits_dict)
#print('digits_logits : ########################################')
#print(digits_logits)
#print('input_node : ########################################')
#print(input_node)
length_predictions = tf.argmax(length_logits, axis=1)
digits_predictions = tf.argmax(digits_logits, axis=2)
needs_include_length = False
if needs_include_length:
labels = tf.concat([tf.reshape(length_batch, [-1, 1]), digits_batch], axis=1)
predictions = tf.concat([tf.reshape(length_predictions, [-1, 1]), digits_predictions], axis=1)
else:
labels = digits_batch
predictions = digits_predictions
labels_string = tf.reduce_join(tf.as_string(labels), axis=1)
predictions_string = tf.reduce_join(tf.as_string(predictions), axis=1)
accuracy, update_accuracy = tf.metrics.accuracy(
labels=labels_string,
predictions=predictions_string
)
with tf.Session() as sess:
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
#restorer = tf.train.Saver()
# one epoch ---------------------
num_batches = num_val_examples / batch_size
for _ in range(int(num_batches)):
sess.run(update_accuracy)
# -------------------------------
#accuracy_val, summary_val = sess.run([accuracy, summary], feed_dict={input_node: image_batch, label_node: batch_y})
accuracy_val = sess.run([accuracy])
#self.summary_writer.add_summary(summary_val, global_step=global_step)
coord.request_stop()
coord.join(threads)
return accuracy_val
def eval_lenet(net_name, param_path, qbits, layer_index, layer_name=[], trainable=False, err_mean=None, err_stddev=None, train_vars=None, cost_factor=200., n_epoch=1):
#netparams = load.load_netparams_tf(ckpt_path, trainable=False)
if '.ckpt' in param_path:
netparams = load.load_netparams_tf(param_path, trainable=trainable)
else:
netparams = load.load_netparams_tf_q(param_path, trainable=trainable)
data_spec = helper.get_data_spec(net_name)
input_node = tf.placeholder(tf.float32, shape=(None, data_spec.crop_size * data_spec.crop_size * data_spec.channels))
input_node_2d = tf.reshape(input_node, shape=(-1, data_spec.crop_size, data_spec.crop_size, data_spec.channels))
label_node = tf.placeholder(tf.float32, [None, 10])
#logits_, err_w, err_b, err_lyr = lenet.lenet_noisy(input_node_2d, netparams, err_mean, err_stddev, train_vars)
#logits_ = lenet.lenet_quantized(input_node_2d, netparams, qbits)
if trainable:
#logits_ = lenet.lenet_q_RL(input_node_2d, netparams, qbits, layer_index)
logits_, ret = lenet.lenet_quantized(input_node_2d, netparams, qbits)
else:
#logits_, ret = lenet.lenet_quantized(input_node_2d, netparams, qbits)
logits_, ret = lenet.lenet(input_node_2d, netparams)
#square = [tf.nn.l2_loss(err_w[layer]) for layer in err_w]
#square_sum = tf.reduce_sum(square)
#loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits_, labels=label_node)) + cost_factor / (1. + square_sum)
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits_, labels=label_node))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
if trainable:
train_op = optimizer.minimize(loss_op)
probs = helper.softmax(logits_)
correct_pred = tf.equal(tf.argmax(probs, 1), tf.argmax(label_node, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
#mnist = mnist_input.read_data_sets("/tmp/data/", one_hot=True)
mnist = mnist_input.read_data_sets("/home/ahmed/mnist", one_hot=True)
#print('############################################')
#[print(n.name) for n in tf.get_default_graph().as_graph_def().node]
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
# saving a checkpoint --------------------------------------------------------------------
saver.save(sess, 'lenet_save_ckpt/my-model-10000') # will generate my-model-10000.meta
saver.export_meta_graph('lenet_save_ckpt/my-model-10000.meta') # not need
# ----------------------------------------------------------------------------------------
cur_accuracy = 0
for i in range(0, n_epoch):
#if cur_accuracy >= NET_ACC[net_name]:
#break
if trainable:
for step in range(0, int(mnist.train.num_examples/data_spec.batch_size)):
batch_x, batch_y = mnist.train.next_batch(data_spec.batch_size)
#print("batch_x -------------------------------")
#print(batch_x)
#print("batch_y -------------------------------")
#print(batch_y)
sess.run(train_op, feed_dict={input_node: batch_x, label_node: batch_y})
#loss, acc = sess.run([loss_op_1, accuracy], feed_dict={input_node: batch_x, label_node: batch_y})
print('epoch# {:>6} finished\n', i)
#cur_accuracy = 100 * (sess.run(accuracy, feed_dict={input_node: mnist.test.images[:], label_node: mnist.test.labels[:]}))
#cur_accuracy, ret_tf = (sess.run([accuracy,ret], feed_dict={input_node: mnist.test.images[:], label_node: mnist.test.labels[:]}))
print("mnist.test.images --------------------------------")
print((mnist.test.images[:]).shape)
np.save("image_1x781",mnist.test.images[0])
#print("mnist.test.labels --------------------------------")
#print(mnist.test.labels[0])
cur_accuracy, ret_tf = (sess.run([accuracy,ret], feed_dict={input_node: mnist.test.images[0:1], label_node: mnist.test.labels[0:1]}))
cur_accuracy = 100 * cur_accuracy
print('################################')
#print(set(ret_tf.ravel()))
np.set_printoptions(precision=4)
print((ret_tf))
np.save("image_test", ret_tf)
#print((ret_tf[0].shape))
#print(np.sum(ret_tf[0]))
#print(np.max(ret_tf[0]))
#print(np.min(ret_tf[0]))
#print(np.mean(ret_tf[0]))
#print(np.var(ret_tf[0]))
print('{:>6}/{:<6} {:>6.2f}%'.format(i, n_epoch, cur_accuracy))
print('Final Test Accuracy = \t' + (str)(cur_accuracy))
return cur_accuracy, sess.run(netparams)
def run_network(net_name, cost_factor, n_epoch):
ckpt_path = CKPT_PATH + net_name + '/' + net_name + '.ckpt'
err_mean = [0.0, 0.0, 0.0, 0.0] #order: input, weights, biases, layers
err_stddev = [0.0, 0.0, 0.0, 0.0]
train_vars = [False, True, False, False]
istrain = True
if net_name == 'lenet':
return eval_lenet(net_name, ckpt_path, trainable=istrain, err_mean=err_mean, err_stddev=err_stddev, train_vars=train_vars, cost_factor=cost_factor, n_epoch=n_epoch)
else:
return eval_imagenet(net_name, ckpt_path, trainable=istrain, err_mean=err_mean, err_stddev=err_stddev, train_vars=train_vars, cost_factor=cost_factor, n_epoch=n_epoch)
def gen_max_noise_dist():
max_epoch = 5
for net_name in NETWORKS:
directory = '/home/ahmed/projects/NN_quant/results/networks/' + net_name
if not os.path.exists(directory):
os.makedirs(directory)
current_factor = 10
largest_correct = 0
smallest_wrong = 0
for i in range(0, 10):
tf.reset_default_graph()
err_w, accuracy = run_network(net_name, current_factor, max_epoch)
if accuracy >= NET_ACC[net_name]:
save_path = directory + '/' + (str)(current_factor) + '_' + (str)(accuracy)
with open(save_path, 'w') as f:
pickle.dump(err_w, f)
largest_correct = current_factor
if smallest_wrong == 0:
current_factor = current_factor * 2
else:
current_factor = (current_factor + smallest_wrong) / 2.
else:
smallest_wrong = current_factor
current_factor = (current_factor + largest_correct) / 2.
def gen_noise_dist(net_name, cost_factor, count, n_epoch):
directory = '/home/ahmed/projects/NN_quant/results/deltas/' + net_name
if not os.path.exists(directory):
os.makedirs(directory)
for i in range(0, count):
tf.reset_default_graph()
err_w, accuracy = run_network(net_name, cost_factor, n_epoch)
#save_path = directory + '/' + (str)(cost_factor) + '_' + (str)(accuracy)
save_path = directory + '/' + (str)(i) + '_' + (str)(accuracy)
with open(save_path, 'w') as f:
pickle.dump(err_w, f)
'''
path_net = '/home/ahmed/projects/NN_quant/rlbitwidth.tfmodels/caffe2tf/tfmodels/resnet18/resnet18.py'
layers = load.get_layers(path_net)
acc = {}
for i in range(0, len(layers)):
path = '/home/ahmed/projects/NN_quant/results/quantized/resnet18/resnet18_10_' + layers[i] + '_7.pickle'
tf.reset_default_graph()
acc[layers[i]] = eval_imagenet_q('resnet18', path)
print("\n\n")
print(str(i) + "/" + str(len(layers)))
print(acc)
print(acc)
'''
def eval_normalized_layers():
acc = {}
count = 1
for dirpath, subdirs, fileList in os.walk('/home/ahmed/projects/NN_quant/results/normalized/resnet18/'):
for filename in fileList:
addr = (os.path.join(dirpath, filename))
tf.reset_default_graph()
print()
print(count)
print(filename)
print()
count = count + 1
acc[filename] = eval_imagenet_q('resnet18', addr)
print(acc)
with open('out.txt', 'w') as outfile:
outfile.write(json.dumps(acc))
#layers_sorted = load.get_layers('/home/ahmed/projects/NN_quant/rlbitwidth.tfmodels/caffe2tf/tfmodels/resnet18/resnet18.py')
shift_back = {}
#for layer in layers_sorted:
# shift_back[layer] = 0
#print('==================================================================')
#print('TRAINING')
#print('==================================================================')
'''
# this is for phase I training - retrain a little bit on new dataset 40K - get Wo'
#param_path = '/home/ahmed/projects/NN_quant/rlbitwidth.tfmodels/caffe2tf/tfmodels/alexnet/alexnet.ckpt' # = {Wo}
# this is for phase II training - retrain to minimize the quantization error - >> get W1
#path_save = '/home/ahmed/projects/NN_quant/results/quantized/resnet18/resnet18'
#path_save_q = path_save + '_layers_shift_quant_10May.pickle'
#param_path = '/home/ahmed/projects/NN_quant/results/quantized/resnet18/May12_resnet18_10_fc1000_5_bits.pickle'
param_path = Wo_resent18
save_path_params = path_save + '_layers_shift_quant_retrain_A_10May.pickle'
acc, netparams = eval_imagenet('resnet18', param_path, shift_back, trainable=True, err_mean=None, err_stddev=None, train_vars=None, cost_factor=800., n_epoch=1)
print(acc)
with open(save_path_params, 'w') as f:
pickle.dump(netparams, f)
'''
def get_stats(network_name):
# get weights
netparams = load.get_netparams('./nn_quant_and_run_code_train/rlbitwidth.tfmodels/caffe2tf/tfmodels/'+network_name+'/'+network_name+'.ckpt')
weights = netparams['weights']
# get layers
layers_sorted = load.get_layers('./nn_quant_and_run_code_train/rlbitwidth.tfmodels/caffe2tf/tfmodels/'+network_name+'/'+network_name+'.py')
tot_num_layers = len(layers_sorted)
cols = ['layer_idx_norm', 'n', 'c', 'k', 'std']
tmp_lst = []
for i, layer in enumerate(layers_sorted, start=1):
layer_shape = weights[layer].shape
if len(layer_shape) == 2:
k = 0
n, c = layer_shape
else:
k, _, n, c = layer_shape
weights_layer = weights[layer].ravel()
idx_norm = i/tot_num_layers
std = np.var(weights_layer)
tmp_lst.append([idx_norm, n, c, k, std])
df = pd.DataFrame(tmp_lst, columns=cols)
return df # to access --> df.loc[i, 'std']
def quantize_and_run(qbits):
input_file = './nn_quant_and_run_code_train/rlbitwidth.tfmodels/caffe2tf/tfmodels/alexnet/alexnet.ckpt'
""" Quantization """
nbits = 16
path_save = './nn_quant_and_run_code/results/quantized/alexnet/'
path_save_q = path_save + 'alexnet_layers_quant_'+ str(nbits) +'-bits_23Sep.pickle'
#layers_sorted = load.get_layers('/backup/amir-tc/rl_quantization/rl_quantization.code/nn_quant_and_run_code/rlbitwidth.tfmodels/caffe2tf/tfmodels/alexnet/alexnet.py')
layers_sorted = load.get_layers('./nn_quant_and_run_code_train/rlbitwidth.tfmodels/caffe2tf/tfmodels/alexnet/alexnet.py')
#bits_q = [nbits] * len(layers_sorted)
bits_q = qbits
path_params = input_file
quantize_network(path_params, layers_sorted, path_save_q, bits_q)
print('==================================================================')
print('INFERENCE')
print('==================================================================')
""" Run Inference """
#path_save_q = path_save + '_layers_shift_quant_10May.pickle'
#param_path = save_path_params
#param_path = '/home/ahmed/projects/NN_quant/results/quantized/resnet18/resnet18_layers_shift_quant_retrain_A_10May.pickle'
param_path = path_save_q
with tf.Graph().as_default():
acc, netparams = eval_imagenet('alexnet', param_path, shift_back, trainable=False, err_mean=None, err_stddev=None, train_vars=None, cost_factor=0., n_epoch=1)
return acc
def quantize_and_train(network_name, layer_index, layer_name, qbits, init_params, file_idx):
""" full precision """
#input_file = './rlbitwidth.tfmodels/caffe2tf/tfmodels/' + network_name + '/' + network_name + '.ckpt'
print('==================================================================')
print('Quantization')
print('==================================================================')
""" Quantization """
""" 1) we initialize based on the quantized input pattern (?) """
path_save = './nn_quant_and_run_code/results/quantized/' + network_name + '/'
path_save_q = path_save + 'train_1_init_' + network_name + '_layers_quant_17Oct.pickle'
layers_sorted = load.get_layers('./nn_quant_and_run_code_train/rlbitwidth.tfmodels/caffe2tf/tfmodels/' + network_name + '/' + network_name + '.py')
""" always start with the most recent retrained model """
path_params = init_params
quantize_network(path_params, layers_sorted, path_save_q, qbits)
print('==================================================================')
print('TRAINING')
print('==================================================================')
""" Run retraining """
""" use the full precision weights for initialization, or the most recent retrained """
""" this is used to calculate the quantization difference regularizer """
param_path = init_params
param_q_path = path_save_q
with tf.Graph().as_default():
acc, netparams = eval_imagenet(network_name, param_path, param_q_path, qbits, layer_index, layer_name, file_idx, shift_back, trainable=True, err_mean=None, err_stddev=None, train_vars=None, cost_factor=200., n_epoch=1)
print(acc)
""" path for saving the retrained model """
path_save = './nn_quant_and_run_code/results/quantized/' + network_name + '/' + network_name
path_save_params = path_save + '_train_1_layers_quant_retrained_17Oct_RL.pickle'
# AHMED: debug
#print('retrained = ', np.amax(netparams['weights']['conv2']))
#print('len set = ', len(set(np.array(netparams['weights']['conv2']))))
# ------------
with open(path_save_params, 'wb') as f:
pickle.dump(netparams, f)
print('==================================================================')
print('TRAINING DONE!')
print('==================================================================')
def quantize_and_run_any(network, qbits):
print('network:', network)
input_file = './nn_quant_and_run_code/rlbitwidth.tfmodels/caffe2tf/tfmodels/' + network +'/' + network +'.ckpt'
print('==================================================================')
print('Quantization')
print('==================================================================')
""" Quantization """
nbits = 10
path_save = './nn_quant_and_run_code/results/quantized/'+ network +'/'
path_save_q = path_save + network +'_layers_quant_'+ str(nbits) +'-bits_date.pickle'
#layers_sorted = load.get_layers('/backup/amir-tc/rl_quantization/rl_quantization.code/nn_quant_and_run_code/rlbitwidth.tfmodels/caffe2tf/tfmodels/alexnet/alexnet.py')
layers_sorted = load.get_layers('./nn_quant_and_run_code_train/rlbitwidth.tfmodels/caffe2tf/tfmodels/'+ network +'/'+ network +'.py')
#bits_q = [nbits] * len(layers_sorted)
bits_q = qbits
path_params = input_file
quantize_network(path_params, layers_sorted, path_save_q, bits_q)
print('==================================================================')
print('INFERENCE')
print('==================================================================')
""" Run Inference """
#path_save_q = path_save + '_layers_shift_quant_10May.pickle'
#param_path = save_path_params
#param_path = '/home/ahmed/projects/NN_quant/results/quantized/resnet18/resnet18_layers_shift_quant_retrain_A_10May.pickle'
param_path = path_save_q
with tf.Graph().as_default():
acc, netparams = eval_imagenet(network, param_path, shift_back, trainable=False, err_mean=None, err_stddev=None, train_vars=None, cost_factor=0., n_epoch=1)
return acc
def run_inference(network, input_param_path, qbits):
print('==================================================================')
print('Quantization')
print('==================================================================')
""" Quantization """
nbits = 10
path_save = './nn_quant_and_run_code/results/quantized/'+ network +'/'
path_save_q = path_save + network +'train_1_test_retrained_quantized.pickle'
#layers_sorted = load.get_layers('/backup/amir-tc/rl_quantization/rl_quantization.code/nn_quant_and_run_code/rlbitwidth.tfmodels/caffe2tf/tfmodels/alexnet/alexnet.py')
layers_sorted = load.get_layers('./nn_quant_and_run_code_train/rlbitwidth.tfmodels/caffe2tf/tfmodels/'+ network +'/'+ network +'.py')
#bits_q = [nbits] * len(layers_sorted)
bits_q = qbits
path_params = input_param_path
quantize_network(path_params, layers_sorted, path_save_q, bits_q)
print('==================================================================')
print('INFERENCE')
print('==================================================================')
#param_path = input_param_path
param_path = path_save_q
param_q_path = ''
layer_index = 0
layer_name = 0
file_idx = 0
shift_back = {}
with tf.Graph().as_default():
acc, netparams = eval_imagenet(network, param_path, param_q_path, qbits, layer_index, layer_name, file_idx, shift_back, trainable=False, err_mean=None, err_stddev=None, train_vars=None, cost_factor=0., n_epoch=1)
return acc
def train_test_svhn_net(network_name, params, istrain, cost_factor, n_epoch, qbits, layer_index):
ckpt_path = CKPT_PATH + network_name + '/' + network_name + '.ckpt'
ckpt_path = '/home/ahmed/projects/SVHNClassifier/logs/train/latest.ckpt'
print('==================================================================')
print('Training')
print('==================================================================')
param_path = params
if network_name == 'lenet':
with tf.Graph().as_default():
acc, netparams = eval_lenet(net_name=network_name, param_path=param_path, qbits=qbits, layer_index=layer_index, trainable=True, n_epoch=n_epoch)
else:
return eval_imagenet(network_name, ckpt_path, trainable=istrain, err_mean=err_mean, err_stddev=err_stddev, train_vars=train_vars, cost_factor=cost_factor, n_epoch=n_epoch)
#save_path_params = 'lenet_retrained.pickle'
path_save = './nn_quant_and_run_code/results/quantized/' + network_name + '/' + network_name
path_params_retrained = path_save + '_train_1_layers_quant_retrained_17Oct_RL.pickle'
with open(path_params_retrained, 'wb') as f:
pickle.dump(netparams, f)
return acc
def train_test_lenet(network_name, params, istrain, cost_factor, n_epoch, qbits, layer_index):
ckpt_path = CKPT_PATH + network_name + '/' + network_name + '.ckpt'
err_mean = [0.0, 0.0, 0.0, 0.0] #order: input, weights, biases, layers
err_stddev = [0.0, 0.0, 0.0, 0.0]
train_vars = [False, True, False, False]
#istrain = True
"""
print('==================================================================')
print('Training')
print('==================================================================')
#param_path = params
param_path = ckpt_path
if network_name == 'lenet':
with tf.Graph().as_default():
acc, netparams = eval_lenet(net_name=network_name, param_path=param_path, qbits=qbits, layer_index=layer_index, trainable=True, n_epoch=n_epoch)
else:
return eval_imagenet(network_name, ckpt_path, trainable=istrain, err_mean=err_mean, err_stddev=err_stddev, train_vars=train_vars, cost_factor=cost_factor, n_epoch=n_epoch)
#save_path_params = 'lenet_retrained.pickle'
path_save = '../nn_quant_and_run_code/results/quantized/' + network_name + '/' + network_name
path_params_retrained = path_save + '_train_1_layers_quant_retrained_17Oct_RL.pickle'
with open(path_params_retrained, 'wb') as f:
pickle.dump(netparams, f)
print('==================================================================')
print('Quantization')
print('==================================================================')
path_save = '../nn_quant_and_run_code/results/quantized/'+ network_name +'/'
path_save_q = path_save + network_name +'train_1_test_retrained_quantized.pickle'
#layers_sorted = load.get_layers('/backup/amir-tc/rl_quantization/rl_quantization.code/nn_quant_and_run_code/rlbitwidth.tfmodels/caffe2tf/tfmodels/alexnet/alexnet.py')
layers_sorted = load.get_layers('../nn_quant_and_run_code_train/rlbitwidth.tfmodels/caffe2tf/tfmodels/'+ network_name +'/'+ network_name +'.py')
#bits_q = [nbits] * len(layers_sorted)
bits_q = qbits
path_params = path_params_retrained
quantize_network(path_params, layers_sorted, path_save_q, bits_q)
"""
print('==================================================================')
print('INFERENCE')
print('==================================================================')
#param_path = input_param_path
#param_path = path_save_q
path_pytorch_model = "./pytorch_models/lenet_mnist.pickle"
acc_test, _ = eval_lenet(net_name=network_name, param_path=path_pytorch_model , qbits=qbits, layer_index=layer_index, trainable=False, n_epoch=1)
#acc_test, _ = eval_lenet(net_name=network_name, param_path=path_save_q , qbits=qbits, layer_index=layer_index, trainable=False, n_epoch=1)
return 100, acc_test
#return acc, acc_test
def write_to_csv(step_data):
with open('train_1_acc.csv', 'a') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(step_data)
# def main():
# csv file initialization:
headers = ['acc']
with open('train_1_acc.csv', 'w') as writeFile:
writer = csv.writer(writeFile)
writer.writerow(headers)
def retrain(network_name, episode_num, layer_index, qbits):
"""
1- read initial model (or the one from previous iteration) --> PARAMS(1)
2- From RL:
- read layer index
- read #bits
3- quantize: starting from "FP" (@ start of each episode) ->- quantize --> PARAMS_q
3''- quantize: starting from "recent_retrained" ->- quantize --> "PARAMS_q"
4- calculate the quantization error of the input layer: ||(FP(layer) - PARAMS_q(layer))||^2
4''- calculate the quantization error of the input layer: ||(recent_retrained(layer) - PARAMS_q(layer))||^2
5- add this quantization error to the objective function
6- initialize with PARAMS(1), fix previous layers (except 1st and last) and run retraining ... --> PARAMS_retrained
7- (caching!!)
* assume independent retraining for independent episodes
"""
"""
- init_params = is the parameter file for retraining initialization
- if starting the episode, then init_params comes from the full precision ckpt,
otherwise, it comes from the most recent retrained file
"""
global file_idx
path_save = './nn_quant_and_run_code/results/quantized/' + network_name + '/' + network_name
path_params_retrained = path_save + '_train_1_layers_quant_retrained_17Oct_RL.pickle'
#if path_params_retrained.is_file():
""" - init_params = is the parameter file for retraining initialization
- if starting the episode, then init_params comes from the full precision ckpt,
otherwise, it comes from the most recent retrained file """
#if (episode_num==0) and (layer_index==1):
if (layer_index==1):
init_params = './nn_quant_and_run_code_train/rlbitwidth.tfmodels/caffe2tf/tfmodels/'+network_name+'/'+network_name+'.ckpt'
""" randomly pick 100k images to retrain on """
#file_idx = random.randint(1,13)
file_idx = 1
else:
init_params = path_params_retrained
if network_name=='lenet':
acc = train_test_lenet(network_name='lenet', params=init_params, istrain=True, cost_factor=0, n_epoch=5, qbits=qbits, layer_index=layer_index)
return acc
elif network_name=='svhn_net':
acc = train_test_svhn_net(network_name='lenet', params=init_params, istrain=True, cost_factor=0, n_epoch=5, qbits=qbits, layer_index=layer_index)
return acc
else:
""" accelerated fine-tuning """
layers_sorted = load.get_layers('./nn_quant_and_run_code_train/rlbitwidth.tfmodels/caffe2tf/tfmodels/'+network_name+'/'+network_name+'.py')
layer_name = layers_sorted[layer_index]
init_params = './nn_quant_and_run_code_train/rlbitwidth.tfmodels/caffe2tf/tfmodels/'+network_name+'/'+network_name+'.ckpt'
quantize_and_train(network_name, layer_index, layer_name, qbits, init_params, file_idx)
""" validation accuracy after fine-tuning """
path_save = './nn_quant_and_run_code/results/quantized/' + network_name + '/' + network_name
path_params_retrained = path_save + '_train_1_layers_quant_retrained_17Oct_RL.pickle'
acc = run_inference(network_name, path_params_retrained, qbits)
return acc
""" SVHN NET layers """
# 16 LAYERS: dict_keys(['digit1', 'digit2', 'digit3', 'digit4', 'digit5', 'digit_length', 'hidden1', 'hidden10', 'hidden2', 'hidden3', 'hidden4', 'hidden5', 'hidden6', 'hidden7', 'hidden8', 'hidden9'])
svhn_num_layers = 16
qbits = {}
# keep Full Precision
qbits['hidden1'] = 16
qbits['hidden2'] = 4
qbits['hidden3'] = 4
qbits['hidden4'] = 4
qbits['hidden5'] = 4
qbits['hidden6'] = 8
qbits['hidden7'] = 8
qbits['hidden8'] = 8
qbits['hidden9'] = 8
qbits['hidden10'] = 16
# keep Full Precision
qbits['digit_length'] = 16
qbits['digit1'] = 16
qbits['digit2'] = 16
qbits['digit3'] = 16
qbits['digit4'] = 16
qbits['digit5'] = 16
#for key, _ in qbits.items():
# qbits[key] = 4
print(qbits)
#acc = eval_svhn_net(net_name='svhn_net', qbits_dict=qbits, trainable=False, n_epoch=100)
acc = eval_svhn_net(net_name='svhn_net', qbits_dict=qbits, trainable=True, n_epoch=400)
print(acc)
|
{"hexsha": "435447166cfbbbf89de904411a4a1f8161cbebe4", "size": 52939, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/examples/classifier_compression/sinreq_v2_svhn_runcode/evaluate_svhn_sin2.py", "max_stars_repo_name": "he-actlab/waveq.code", "max_stars_repo_head_hexsha": "024d55af6d989d4074d3e555d03b76a2f7eac209", "max_stars_repo_licenses": ["CNRI-Python"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-04-09T03:21:32.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-09T03:21:32.000Z", "max_issues_repo_path": "code/examples/classifier_compression/sinreq_v2_svhn_runcode/evaluate_svhn_sin2.py", "max_issues_repo_name": "he-actlab/waveq.code", "max_issues_repo_head_hexsha": "024d55af6d989d4074d3e555d03b76a2f7eac209", "max_issues_repo_licenses": ["CNRI-Python"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-09-26T00:53:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T01:23:34.000Z", "max_forks_repo_path": "code/examples/classifier_compression/sinreq_v2_svhn_runcode/evaluate_svhn_sin2.py", "max_forks_repo_name": "sinreq-learn/sinreq-learn.code", "max_forks_repo_head_hexsha": "a205d3fa22a41d5f4fc1ef1e5698c4f1dbb11e6a", "max_forks_repo_licenses": ["BSD-4-Clause-UC"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.8743500867, "max_line_length": 221, "alphanum_fraction": 0.6642928654, "include": true, "reason": "import numpy", "num_tokens": 13528}
|
Jack Zwald is a Sophomore International Relations major and a UC Davis Chinese Program Chinese Minors minor. He is also the current Campaign Director for the Davis College Democrats, a former intern for ASUCD Senator Andrew Peake, the former Voter Registration Coordinator for the Office of University Affairs, and the Vice Chairman of the Academic Affairs Commission Academic Affairs Commission.
During the Fall 2007 ASUCD Election campaign season, Jack was allegedly solicited by a campaign representative of GO candidate John Dreyer in the Residence Halls, and consequently filed a complaint with the Elections Committee since such action is disallowed in the Residence Halls.
I also like ice cream.
Currently, Jack is a candidate for the ASUCD Senate running with the LEAD L.E.A.D. slate in Fall 2008 ASUCD election this Falls election.
20071106 00:30:15 nbsp Jack is also the Freshman Outreach Coordinator for DCD. And I might add that he is such a funny guy and loved by all. Users/GregWebb
20080216 00:19:44 nbsp Jack caused the 2008 Valentines Day blackout. Users/MattBlair
|
{"hexsha": "526a4ec9e21f8b6f085dc7fd02598d52c6f418de", "size": 1094, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/JackZwald.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/JackZwald.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/JackZwald.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 78.1428571429, "max_line_length": 396, "alphanum_fraction": 0.8117001828, "num_tokens": 244}
|
\chapter{Introduction}
\label{chap:intro}
This document is intended both as a thesis template and a written tutorial on typesetting a professional looking academic document. The style of the template is designed to mimic an equivalent LaTeX document template that is commonly used for within the Computer Vision and Visual Analytics group here at Swansea. This LaTeX template is itself based on a LaTeX template named Custard.
\section{Motivations}
\label{sec:intro_motivation}
Large documents can become cumbersome to work with and format consistently. Sensibly chosen aesthetic cues are important to help imply structure and can greatly aid the reader in understanding your work. The accompanying LaTeX template uses abstraction to hide the formatting from the author during content preparation, allowing for consistent styling to be applied automatically during document compilation. In this Google Docs theme it is the responsibility of the author to manually adhere to the styling laid out in this template.
\subsection{Objective}
\label{sec:intro_objective}
In this document we present a tutorial on thesis creation and typesetting, and discuss topics such as literature surveying and proper citation.
\section{Overview}
\label{sec:intro_overview}
The remainder of chapter \ref{chap:intro} outlines the document structure and the key contributions of this work is organized as follows. Chapter \ref{chap:resources} reviews techniques for finding and properly citing external resources from the academic literature and online. In chapter \ref{chap:typesetting} we show examples of how to typeset different types of content, such as internal references, figures, code listings, and tables. And lastly in chapter \ref{chap:conclusion} we summarize the main contributions and key points to take away from this template.
\section{Contributions}
\label{sec:intro_contribs}
The main contributions of this work can be seen as follows:
\begin{description}
\item[$\bullet$ A LaTeX thesis template]\hfill
Modify this document by adding additional TeX files for your top level content chapters.
\item[$\bullet$ A typesetting guide of useful primitive elements]\hfill
Use the building blocks within this template to typeset each part of your document. Aim to use simple and reusable elements to keep your LaTeX code neat and to make your document consistently styled throughout.
\item[$\bullet$ A review of how to find and cite external resources]\hfill
We review techniques and resources for finding and properly citing resources from the prior academic literature and from online resources.
\end{description}
|
{"hexsha": "594cf74df3ed2d9c65e5df1139af8117aed49a31", "size": 2714, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "thesis-templates/LaTeX/chapter/thesis_intro.tex", "max_stars_repo_name": "CS-Swansea/Computer-Vision-and-Machine-Learning-Wiki", "max_stars_repo_head_hexsha": "490cb0bdbf0ae62dc541b743a1e48cf530be34a8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 37, "max_stars_repo_stars_event_min_datetime": "2019-06-12T20:41:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T01:17:07.000Z", "max_issues_repo_path": "thesis-templates/LaTeX/chapter/thesis_intro.tex", "max_issues_repo_name": "CS-Swansea/Computer-Vision-and-Machine-Learning-Wiki", "max_issues_repo_head_hexsha": "490cb0bdbf0ae62dc541b743a1e48cf530be34a8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-10-21T14:14:28.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-09T19:27:02.000Z", "max_forks_repo_path": "thesis-templates/LaTeX/chapter/thesis_intro.tex", "max_forks_repo_name": "CS-Swansea/Computer-Vision-and-Machine-Learning-Wiki", "max_forks_repo_head_hexsha": "490cb0bdbf0ae62dc541b743a1e48cf530be34a8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 29, "max_forks_repo_forks_event_min_datetime": "2019-04-26T10:08:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T15:28:59.000Z", "avg_line_length": 67.85, "max_line_length": 569, "alphanum_fraction": 0.7896094326, "num_tokens": 566}
|
module failing_case_test
use example_asserts_m, only: &
FAILURE_MESSAGE, &
NUM_ASSERTS_IN_FAILING, &
NUM_FAILING_ASSERTS_IN_FAILING, &
SUCCESS_MESSAGE
use example_cases_m, only: &
example_failing_test_case, &
EXAMPLE_DESCRIPTION
use helpers_m, only: test_item_input_t, test_result_item_input_t, run_test
use vegetables, only: &
input_t, &
result_t, &
test_item_t, &
test_result_item_t, &
assert_doesnt_include, &
assert_equals, &
assert_includes, &
assert_not, &
fail, &
given, &
then__, &
when
implicit none
private
public :: test_failing_case_behaviors
contains
function test_failing_case_behaviors() result(test)
type(test_item_t) :: test
test = given( &
"a failing test case", &
test_item_input_t(example_failing_test_case()), &
[ when( &
"it is run", &
run_test, &
[ then__("it knows it failed", check_case_fails) &
, then__("it has 1 test case", check_num_cases) &
, then__("it has 1 failing case", check_num_failing_cases) &
, then__( &
"it's verbose description includes the given description", &
check_verbose_for_given_description) &
, then__( &
"it's verbose description includes the success message", &
check_verbose_for_success_message) &
, then__( &
"it's verbose description includes the failure message", &
check_verbose_for_failure_message) &
, then__( &
"it's failure description includes the given description", &
check_failure_for_given_description) &
, then__( &
"it's failure description includes the failure message", &
check_failure_for_failure_message) &
, then__( &
"it's failure description doesn't include the success message", &
check_failure_no_success_message) &
, then__("it knows how many asserts there were", check_num_asserts) &
, then__("it knows how many asserts failed", check_num_failing_asserts) &
]) &
])
end function
function check_case_fails(input) result(result_)
class(input_t), intent(in) :: input
type(result_t) :: result_
type(test_result_item_t) :: example_result
select type (input)
type is (test_result_item_input_t)
example_result = input%input()
result_ = assert_not(example_result%passed())
class default
result_ = fail("Expected to get a test_result_item_input_t")
end select
end function
function check_num_cases(input) result(result_)
class(input_t), intent(in) :: input
type(result_t) :: result_
type(test_result_item_t) :: example_result
select type (input)
type is (test_result_item_input_t)
example_result = input%input()
result_ = assert_equals(1, example_result%num_cases())
class default
result_ = fail("Expected to get a test_result_item_input_t")
end select
end function
function check_num_failing_cases(input) result(result_)
class(input_t), intent(in) :: input
type(result_t) :: result_
type(test_result_item_t) :: example_result
select type (input)
type is (test_result_item_input_t)
example_result = input%input()
result_ = assert_equals(1, example_result%num_failing_cases())
class default
result_ = fail("Expected to get a test_result_item_input_t")
end select
end function
function check_verbose_for_given_description(input) result(result_)
class(input_t), intent(in) :: input
type(result_t) :: result_
type(test_result_item_t) :: example_result
select type (input)
type is (test_result_item_input_t)
example_result = input%input()
result_ = assert_includes(EXAMPLE_DESCRIPTION, example_result%verbose_description(.false.))
class default
result_ = fail("Expected to get a test_result_item_input_t")
end select
end function
function check_verbose_for_success_message(input) result(result_)
class(input_t), intent(in) :: input
type(result_t) :: result_
type(test_result_item_t) :: example_result
select type (input)
type is (test_result_item_input_t)
example_result = input%input()
result_ = assert_includes(SUCCESS_MESSAGE, example_result%verbose_description(.false.))
class default
result_ = fail("Expected to get a test_result_item_input_t")
end select
end function
function check_verbose_for_failure_message(input) result(result_)
class(input_t), intent(in) :: input
type(result_t) :: result_
type(test_result_item_t) :: example_result
select type (input)
type is (test_result_item_input_t)
example_result = input%input()
result_ = assert_includes(FAILURE_MESSAGE, example_result%verbose_description(.false.))
class default
result_ = fail("Expected to get a test_result_item_input_t")
end select
end function
function check_failure_for_given_description(input) result(result_)
class(input_t), intent(in) :: input
type(result_t) :: result_
type(test_result_item_t) :: example_result
select type (input)
type is (test_result_item_input_t)
example_result = input%input()
result_ = assert_includes(EXAMPLE_DESCRIPTION, example_result%failure_description(.false.))
class default
result_ = fail("Expected to get a test_result_item_input_t")
end select
end function
function check_failure_for_failure_message(input) result(result_)
class(input_t), intent(in) :: input
type(result_t) :: result_
type(test_result_item_t) :: example_result
select type (input)
type is (test_result_item_input_t)
example_result = input%input()
result_ = assert_includes(FAILURE_MESSAGE, example_result%failure_description(.false.))
class default
result_ = fail("Expected to get a test_result_item_input_t")
end select
end function
function check_failure_no_success_message(input) result(result_)
class(input_t), intent(in) :: input
type(result_t) :: result_
type(test_result_item_t) :: example_result
select type (input)
type is (test_result_item_input_t)
example_result = input%input()
result_ = assert_doesnt_include(SUCCESS_MESSAGE, example_result%failure_description(.false.))
class default
result_ = fail("Expected to get a test_result_item_input_t")
end select
end function
function check_num_asserts(input) result(result_)
class(input_t), intent(in) :: input
type(result_t) :: result_
type(test_result_item_t) :: example_result
select type (input)
type is (test_result_item_input_t)
example_result = input%input()
result_ = assert_equals(NUM_ASSERTS_IN_FAILING, example_result%num_asserts())
class default
result_ = fail("Expected to get a test_result_item_input_t")
end select
end function
function check_num_failing_asserts(input) result(result_)
class(input_t), intent(in) :: input
type(result_t) :: result_
type(test_result_item_t) :: example_result
select type (input)
type is (test_result_item_input_t)
example_result = input%input()
result_ = assert_equals( &
NUM_FAILING_ASSERTS_IN_FAILING, example_result%num_failing_asserts())
class default
result_ = fail("Expected to get a test_result_item_input_t")
end select
end function
end module
|
{"hexsha": "18fc31ce5bb3464dd6e5ce61fed909085ccb90db", "size": 8709, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "test/failing_case_test.f90", "max_stars_repo_name": "everythingfunctional/vegetables", "max_stars_repo_head_hexsha": "5625f1f3e318fb301d654e7875e254fa3e0cc4a1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/failing_case_test.f90", "max_issues_repo_name": "everythingfunctional/vegetables", "max_issues_repo_head_hexsha": "5625f1f3e318fb301d654e7875e254fa3e0cc4a1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/failing_case_test.f90", "max_forks_repo_name": "everythingfunctional/vegetables", "max_forks_repo_head_hexsha": "5625f1f3e318fb301d654e7875e254fa3e0cc4a1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.5387931034, "max_line_length": 105, "alphanum_fraction": 0.6016764267, "num_tokens": 1729}
|
[STATEMENT]
lemma seq_meas_props:
shows "incseq seq_meas \<and> range seq_meas \<subseteq> pos_img \<and>
\<Squnion> pos_img = \<Squnion> range seq_meas"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. incseq seq_meas \<and> range seq_meas \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range seq_meas
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. incseq seq_meas \<and> range seq_meas \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range seq_meas
[PROOF STEP]
have ex: "\<exists>f. incseq f \<and> range f \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>f. incseq f \<and> range f \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range f
[PROOF STEP]
proof (rule Extended_Real.Sup_countable_SUP)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. pos_img \<noteq> {}
[PROOF STEP]
show "pos_img \<noteq> {}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pos_img \<noteq> {}
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. pos_img \<noteq> {}
[PROOF STEP]
have "{} \<in> pos_sets"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {} \<in> pos_sets
[PROOF STEP]
using empty_pos_meas_set
[PROOF STATE]
proof (prove)
using this:
pos_meas_set {}
goal (1 subgoal):
1. {} \<in> pos_sets
[PROOF STEP]
unfolding pos_sets_def
[PROOF STATE]
proof (prove)
using this:
pos_meas_set {}
goal (1 subgoal):
1. {} \<in> {A \<in> sets M. pos_meas_set A}
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
{} \<in> pos_sets
goal (1 subgoal):
1. pos_img \<noteq> {}
[PROOF STEP]
hence "\<mu> {} \<in> pos_img"
[PROOF STATE]
proof (prove)
using this:
{} \<in> pos_sets
goal (1 subgoal):
1. \<mu> {} \<in> pos_img
[PROOF STEP]
unfolding pos_img_def
[PROOF STATE]
proof (prove)
using this:
{} \<in> pos_sets
goal (1 subgoal):
1. \<mu> {} \<in> {\<mu> A |A. A \<in> pos_sets}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<mu> {} \<in> pos_img
goal (1 subgoal):
1. pos_img \<noteq> {}
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
\<mu> {} \<in> pos_img
goal (1 subgoal):
1. pos_img \<noteq> {}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
pos_img \<noteq> {}
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
pos_img \<noteq> {}
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<exists>f. incseq f \<and> range f \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range f
goal (1 subgoal):
1. incseq seq_meas \<and> range seq_meas \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range seq_meas
[PROOF STEP]
let ?V = "SOME f. incseq f \<and> range f \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range f"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. incseq seq_meas \<and> range seq_meas \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range seq_meas
[PROOF STEP]
have vprop: "incseq ?V \<and> range ?V \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range ?V"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. incseq (SOME f. incseq f \<and> range f \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range f) \<and> range (SOME f. incseq f \<and> range f \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range f) \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range (SOME f. incseq f \<and> range f \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range f)
[PROOF STEP]
using someI_ex[of "\<lambda>f. incseq f \<and> range f \<subseteq> pos_img \<and>
\<Squnion> pos_img = \<Squnion> range f"] ex
[PROOF STATE]
proof (prove)
using this:
\<exists>x. incseq x \<and> range x \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range x \<Longrightarrow> incseq (SOME x. incseq x \<and> range x \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range x) \<and> range (SOME x. incseq x \<and> range x \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range x) \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range (SOME x. incseq x \<and> range x \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range x)
\<exists>f. incseq f \<and> range f \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range f
goal (1 subgoal):
1. incseq (SOME f. incseq f \<and> range f \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range f) \<and> range (SOME f. incseq f \<and> range f \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range f) \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range (SOME f. incseq f \<and> range f \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range f)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
incseq (SOME f. incseq f \<and> range f \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range f) \<and> range (SOME f. incseq f \<and> range f \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range f) \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range (SOME f. incseq f \<and> range f \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range f)
goal (1 subgoal):
1. incseq seq_meas \<and> range seq_meas \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range seq_meas
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. incseq seq_meas \<and> range seq_meas \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range seq_meas
[PROOF STEP]
using seq_meas_def vprop
[PROOF STATE]
proof (prove)
using this:
seq_meas = (SOME f. incseq f \<and> range f \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range f)
incseq (SOME f. incseq f \<and> range f \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range f) \<and> range (SOME f. incseq f \<and> range f \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range f) \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range (SOME f. incseq f \<and> range f \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range f)
goal (1 subgoal):
1. incseq seq_meas \<and> range seq_meas \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range seq_meas
[PROOF STEP]
by presburger
[PROOF STATE]
proof (state)
this:
incseq seq_meas \<and> range seq_meas \<subseteq> pos_img \<and> \<Squnion> pos_img = \<Squnion> range seq_meas
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 2743, "file": "Hahn_Jordan_Decomposition_Hahn_Jordan_Decomposition", "length": 24}
|
# -*- coding: utf-8 -*-
"""main_rungekutta_multivar.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1sSdGdMNuQTa5rDS_zCCKfyVvoElMFITh
"""
from sympy import *
from math import *
import sys
from lib_rungekutta import *
"""# Phương pháp Runge - Kutta hiện giải bài toán Cauchy cho hệ phương trình vi phân
## 0. Bài toán
Giải hệ phương trình vi phân $r$ phương trình sau:
$$\begin{cases} \frac{dy_{1}}{dx} = f_{1}(x, y_{1}, y_{2}, ...., y_{r})\\\frac{dy_{2}}{dx} = f_{2}(x, y_{1}, y_{2}, ...., y_{r})\\.... \\\frac{dy_{r}}{dx} = f_{r}(x, y_{1}, y_{2}, ...., y_{r})\end{cases}$$
Ta có thể xây dựng lại như sau:
$$\frac{dY}{dx} = F(x, Y)$$
## 1. Nhập dữ liệu
Nhập hệ phương trình $\frac{dY}{dx} = F(x, Y)$, với $x$ thuộc giá trị thực, $Y$ là vector các nghiệm ${y_{i}}$
"""
def expr(x, y):
#{
dy = np.zeros((len(y)))
n = y[0]
p = y[1]
K = 100
r = 0.6
a = 0.04
muy = 1.2
dy[0] = r * n * (1 - n / K) - a * n * p
dy[1] = -muy * p + a * n * p
return dy
#}
"""Nhập giá trị ban đầu $x_{0}$ và $Y(x_{0})$"""
x_0 = 0
y_0 = [70, 20]
"""Nhập khoảng cách giữa 2 điểm liên tiếp $h = x_{i} - x_{i-1}$"""
h = 0.1
"""Nhập số mốc của lưới điểm $n$ """
n = 1000
"""## 2. Giải PTVP và in ra kết quả
Cấp chính xác:
"""
precision_x = 3;
precision_y = 7;
precision_eps = 12;
"""Giải và in kết quả. Kết quả trả về là dạng `list`, mỗi phần tử có cấu trúc sau:
$$ [x_{i}, Y_{i} = [y_{1}(x_{i}), y_{2}(x_{i}), ..., y_{r}(x_{i})]] $$
với $y_{k}(x_{i})$ là giá trị hàm số $y_{k}$ tại điểm $x_{i}$
"""
uu = rungekutta_multivariate_oop(expr, x_0, y_0, h, n);
ketqua = (uu.Solve());
print(f"Phương pháp Runge-Kutta {4} nấc với hoàn tất với lưới điểm sau, sai số toàn cục O(h^{4}) = {round(h**4, precision_eps)}:");
#}
for x in ketqua: print(round(x[0], precision_x), x[1]);
"""## 3. Đồ thị
### 3.1. Đồ thị liên hệ giữa các biến $y_{i}$ và $x$
"""
y1 = [];
y2 = [];
y3 = [];
y4 = [];
x = [];
for xx in ketqua: x.append(xx[0]);
for xx in ketqua: y1.append(xx[1][0]), y2.append(xx[1][1]);
plt.plot(x, y1, 'r')
plt.plot(x, y2, 'b')
plt.legend(["Con mồi", "Thú săn mồi"], loc=1)
plt.xlabel('Thời gian', fontsize=17)
plt.ylabel('Số lượng thú', fontsize=17)
plt.tight_layout()
plt.show()
"""### 3.2.Đồ thị liên hệ giữa các $y_{i}$:"""
y1 = [];
y2 = [];
y3 = [];
y4 = [];
x = [];
for xx in ketqua: x.append(xx[0]);
for xx in ketqua: y1.append(xx[1][0]), y2.append(xx[1][1]);
plt.xlabel('con mồi', fontsize=15)
plt.ylabel('thú săn mồi', fontsize=15)
plt.plot(y1, y2, 'r')
plt.tight_layout()
plt.show()
|
{"hexsha": "0ae38e6d8f361d0d84272941ee2986325321b5df", "size": 2628, "ext": "py", "lang": "Python", "max_stars_repo_path": "Topic 5 - Solving Differential Equations/28.Runge_Kutta/R-K system of equation/main_rungekutta_multivar.py", "max_stars_repo_name": "Talented-K64MI/MI3040-Numerical-Analysis", "max_stars_repo_head_hexsha": "c7a173a7f2107f490a41cd8640952c001232d6fb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-11-23T17:00:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-31T06:28:40.000Z", "max_issues_repo_path": "Topic 5 - Solving Differential Equations/28.Runge_Kutta/R-K system of equation/main_rungekutta_multivar.py", "max_issues_repo_name": "Talented-K64MI/MI3040-Numerical-Analysis", "max_issues_repo_head_hexsha": "c7a173a7f2107f490a41cd8640952c001232d6fb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-09-22T17:08:05.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-20T12:00:59.000Z", "max_forks_repo_path": "Topic 5 - Solving Differential Equations/28.Runge_Kutta/R-K system of equation/main_rungekutta_multivar.py", "max_forks_repo_name": "Talented-K64MI/MI3040-Numerical-Analysis", "max_forks_repo_head_hexsha": "c7a173a7f2107f490a41cd8640952c001232d6fb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-12-03T05:11:49.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-28T03:33:35.000Z", "avg_line_length": 21.3658536585, "max_line_length": 205, "alphanum_fraction": 0.5665905632, "include": true, "reason": "from sympy", "num_tokens": 1156}
|
'''
Copyright 2017 TensorFlow Authors and Kent Sommer
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import numpy as np
# Sys
import warnings
# Keras Core
from keras.layers.convolutional import MaxPooling2D, Convolution2D, AveragePooling2D
from keras.layers import Input, Dropout, Dense, Flatten, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.merge import concatenate
from keras import regularizers
from keras import initializers
from keras.models import Model
# Backend
from keras import backend as K
# Utils
from keras.utils.layer_utils import convert_all_kernels_in_model
from keras.utils.data_utils import get_file
#########################################################################################
# Implements the Inception Network v4 (http://arxiv.org/pdf/1602.07261v1.pdf) in Keras. #
#########################################################################################
WEIGHTS_PATH = 'https://github.com/kentsommer/keras-inceptionV4/releases/download/2.1/inception-v4_weights_tf_dim_ordering_tf_kernels.h5'
WEIGHTS_PATH_NO_TOP = 'https://github.com/kentsommer/keras-inceptionV4/releases/download/2.1/inception-v4_weights_tf_dim_ordering_tf_kernels_notop.h5'
def preprocess_input(x):
x = np.divide(x, 255.0)
x = np.subtract(x, 0.5)
x = np.multiply(x, 2.0)
return x
def conv2d_bn(x, nb_filter, num_row, num_col,
padding='same', strides=(1, 1), use_bias=False):
"""
Utility function to apply conv + BN.
(Slightly modified from https://github.com/fchollet/keras/blob/master/keras/applications/inception_v3.py)
"""
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
x = Convolution2D(nb_filter, (num_row, num_col),
strides=strides,
padding=padding,
use_bias=use_bias,
kernel_regularizer=regularizers.l2(0.00004),
kernel_initializer=initializers.VarianceScaling(scale=2.0, mode='fan_in', distribution='normal', seed=None))(x)
x = BatchNormalization(axis=channel_axis, momentum=0.9997, scale=False)(x)
x = Activation('relu')(x)
return x
def block_inception_a(input):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 96, 1, 1)
branch_1 = conv2d_bn(input, 64, 1, 1)
branch_1 = conv2d_bn(branch_1, 96, 3, 3)
branch_2 = conv2d_bn(input, 64, 1, 1)
branch_2 = conv2d_bn(branch_2, 96, 3, 3)
branch_2 = conv2d_bn(branch_2, 96, 3, 3)
branch_3 = AveragePooling2D((3,3), strides=(1,1), padding='same')(input)
branch_3 = conv2d_bn(branch_3, 96, 1, 1)
x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis)
return x
def block_reduction_a(input):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 384, 3, 3, strides=(2,2), padding='valid')
branch_1 = conv2d_bn(input, 192, 1, 1)
branch_1 = conv2d_bn(branch_1, 224, 3, 3)
branch_1 = conv2d_bn(branch_1, 256, 3, 3, strides=(2,2), padding='valid')
branch_2 = MaxPooling2D((3,3), strides=(2,2), padding='valid')(input)
x = concatenate([branch_0, branch_1, branch_2], axis=channel_axis)
return x
def block_inception_b(input):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 384, 1, 1)
branch_1 = conv2d_bn(input, 192, 1, 1)
branch_1 = conv2d_bn(branch_1, 224, 1, 7)
branch_1 = conv2d_bn(branch_1, 256, 7, 1)
branch_2 = conv2d_bn(input, 192, 1, 1)
branch_2 = conv2d_bn(branch_2, 192, 7, 1)
branch_2 = conv2d_bn(branch_2, 224, 1, 7)
branch_2 = conv2d_bn(branch_2, 224, 7, 1)
branch_2 = conv2d_bn(branch_2, 256, 1, 7)
branch_3 = AveragePooling2D((3,3), strides=(1,1), padding='same')(input)
branch_3 = conv2d_bn(branch_3, 128, 1, 1)
x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis)
return x
def block_reduction_b(input):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 192, 1, 1)
branch_0 = conv2d_bn(branch_0, 192, 3, 3, strides=(2, 2), padding='valid')
branch_1 = conv2d_bn(input, 256, 1, 1)
branch_1 = conv2d_bn(branch_1, 256, 1, 7)
branch_1 = conv2d_bn(branch_1, 320, 7, 1)
branch_1 = conv2d_bn(branch_1, 320, 3, 3, strides=(2,2), padding='valid')
branch_2 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(input)
x = concatenate([branch_0, branch_1, branch_2], axis=channel_axis)
return x
def block_inception_c(input):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 256, 1, 1)
branch_1 = conv2d_bn(input, 384, 1, 1)
branch_10 = conv2d_bn(branch_1, 256, 1, 3)
branch_11 = conv2d_bn(branch_1, 256, 3, 1)
branch_1 = concatenate([branch_10, branch_11], axis=channel_axis)
branch_2 = conv2d_bn(input, 384, 1, 1)
branch_2 = conv2d_bn(branch_2, 448, 3, 1)
branch_2 = conv2d_bn(branch_2, 512, 1, 3)
branch_20 = conv2d_bn(branch_2, 256, 1, 3)
branch_21 = conv2d_bn(branch_2, 256, 3, 1)
branch_2 = concatenate([branch_20, branch_21], axis=channel_axis)
branch_3 = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input)
branch_3 = conv2d_bn(branch_3, 256, 1, 1)
x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis)
return x
def inception_v4_base(input):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
# Input Shape is 299 x 299 x 3 (th) or 3 x 299 x 299 (th)
net = conv2d_bn(input, 32, 3, 3, strides=(2,2), padding='valid')
net = conv2d_bn(net, 32, 3, 3, padding='valid')
net = conv2d_bn(net, 64, 3, 3)
branch_0 = MaxPooling2D((3,3), strides=(2,2), padding='valid')(net)
branch_1 = conv2d_bn(net, 96, 3, 3, strides=(2,2), padding='valid')
net = concatenate([branch_0, branch_1], axis=channel_axis)
branch_0 = conv2d_bn(net, 64, 1, 1)
branch_0 = conv2d_bn(branch_0, 96, 3, 3, padding='valid')
branch_1 = conv2d_bn(net, 64, 1, 1)
branch_1 = conv2d_bn(branch_1, 64, 1, 7)
branch_1 = conv2d_bn(branch_1, 64, 7, 1)
branch_1 = conv2d_bn(branch_1, 96, 3, 3, padding='valid')
net = concatenate([branch_0, branch_1], axis=channel_axis)
branch_0 = conv2d_bn(net, 192, 3, 3, strides=(2,2), padding='valid')
branch_1 = MaxPooling2D((3,3), strides=(2,2), padding='valid')(net)
net = concatenate([branch_0, branch_1], axis=channel_axis)
# 35 x 35 x 384
# 4 x Inception-A blocks
for idx in range(4):#4
net = block_inception_a(net)
# 35 x 35 x 384
# Reduction-A block
net = block_reduction_a(net)
# 17 x 17 x 1024
# 7 x Inception-B blocks
for idx in range(7):#7
net = block_inception_b(net)
# 17 x 17 x 1024
# Reduction-B block
net = block_reduction_b(net)
# 8 x 8 x 1536
# 3 x Inception-C blocks
for idx in range(3):
net = block_inception_c(net)
return net
def inception_v4(num_classes, dropout_keep_prob, weights, include_top,width):
'''
Creates the inception v4 network
Args:
num_classes: number of classes
dropout_keep_prob: float, the fraction to keep before final layer.
Returns:
logits: the logits outputs of the model.
'''
# Input Shape is 299 x 299 x 3 (tf) or 3 x 299 x 299 (th)
if K.image_data_format() == 'channels_first':
inputs = Input((3, width, width))
else:
inputs = Input((width, width, 3))
# Make inception base
x = inception_v4_base(inputs)
# Final pooling and prediction
if include_top:
# 1 x 1 x 1536
x = AveragePooling2D((8,8), padding='valid')(x)
x = Dropout(dropout_keep_prob)(x)
x = Flatten()(x)
# 1536
x = Dense(units=num_classes, activation='softmax')(x)
model = Model(inputs, x, name='inception_v4')
# load weights
if weights == 'imagenet':
if K.image_data_format() == 'channels_first':
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image data format convention '
'(`image_data_format="channels_first"`). '
'For best performance, set '
'`image_data_format="channels_last"` in '
'your Keras config '
'at ~/.keras/keras.json.')
if include_top:
weights_path = get_file(
'inception-v4_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
md5_hash='9fe79d77f793fe874470d84ca6ba4a3b')
else:
weights_path = get_file(
'inception-v4_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='9296b46b5971573064d12e4669110969')
model.load_weights(weights_path, by_name=True)
return model
def create_model(num_classes=1001, dropout_prob=0.2, weights=None, include_top=True,width=299):
return inception_v4(num_classes, dropout_prob, weights, include_top,width)
|
{"hexsha": "e68c7c7aac4290e56067a0892a962e239de8623a", "size": 10206, "ext": "py", "lang": "Python", "max_stars_repo_path": "inception_v4.py", "max_stars_repo_name": "lvwuyunlifan/crop", "max_stars_repo_head_hexsha": "7392d007a8271ff384c5c66ed5717afbc4172b4d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "inception_v4.py", "max_issues_repo_name": "lvwuyunlifan/crop", "max_issues_repo_head_hexsha": "7392d007a8271ff384c5c66ed5717afbc4172b4d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "inception_v4.py", "max_forks_repo_name": "lvwuyunlifan/crop", "max_forks_repo_head_hexsha": "7392d007a8271ff384c5c66ed5717afbc4172b4d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.462295082, "max_line_length": 150, "alphanum_fraction": 0.637468156, "include": true, "reason": "import numpy", "num_tokens": 3112}
|
[STATEMENT]
lemma residue_simple_pole:
assumes "isolated_singularity_at f z0"
assumes "is_pole f z0" "zorder f z0 = - 1"
shows "residue f z0 = zor_poly f z0 z0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. residue f z0 = zor_poly f z0 z0
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
isolated_singularity_at f z0
is_pole f z0
zorder f z0 = - 1
goal (1 subgoal):
1. residue f z0 = zor_poly f z0 z0
[PROOF STEP]
by (subst residue_pole_order) simp_all
|
{"llama_tokens": 221, "file": null, "length": 2}
|
#!/usr/bin/python
import numpy
from numpy import savetxt
import matplotlib
from matplotlib import pyplot
import scipy
from scipy import interpolate
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
s = matplotlib.font_manager.FontProperties()
s.set_family('serif')
s.set_size(14)
from matplotlib import rc
rc('text', usetex=False)
rc('font', family='serif')
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib
from matplotlib import pyplot
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
s = matplotlib.font_manager.FontProperties()
s.set_family('serif')
rcParams["xtick.labelsize"] = 14
rcParams["ytick.labelsize"] = 14
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
s = matplotlib.font_manager.FontProperties()
majorLocator = MultipleLocator(5)
majorFormatter = FormatStrFormatter('%d')
minorLocator = MultipleLocator(5)
yminorLocator = MultipleLocator(10)
yminorLocator2 = MultipleLocator(25)
xminorLocator = MultipleLocator(5)
yminorLocator = MultipleLocator(5)
ymajorLocator = MultipleLocator(50)
xmajorLocator = MultipleLocator(10)
rcParams['figure.figsize'] = 15.0, 10.0
#x, median_y, t_y, g_y,feh_y,chi_y = loadtxt('data_test.txt', usecols = (0,1,2,3,4,5), unpack =1)
#fig1 = pyplot.figure()
#ax0 = fig1.add_subplot(111)
fig, ax = plt.subplots()
sortindx = 2
sortname = ["Teff", "logg", "Fe/H"]
index_use = argsort(metaall[:,sortindx])
ax.set_title("Per-pixel scaled residuals ($\chi$); spectra ordered by %s" % (sortname[sortindx]),fontsize = 20 )
ax.set_xlabel("Wavelength, $\AA$",fontsize = 20,labelpad = 10 )
ax.set_ylabel("Star Number",fontsize = 20)
print "Ordered by %s" % (sortname[sortindx])
wl = dataall[:,0,0]
image = np.arcsinh(chis)
#image2 = np.insert(image[index_use].T, name_ind, values=-10, axis =1)
#test = ax.imshow(image[:,index_use].t, cmap=plt.cm.pink_r, interpolation="nearest", vmin = -5, vmax = 5 ,aspect = 'auto',origin = 'lower', extent = (wl.min(), wl.max(), 0, len(image.t)))
test = ax.imshow(image[:,index_use].T, cmap=plt.cm.pink_r, interpolation="nearest", vmin = -5, vmax = 5 ,aspect = 'auto',origin = 'lower', extent = (wl.min(), wl.max(), 0, len(image.T)))
cb = fig.colorbar(test)
cb.set_label("arcsinh($\chi$)", fontsize = 20 )
|
{"hexsha": "6a5543c78ac8505938c79da43ebe10a5031b2452", "size": 2287, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/makeplot_chi_general.py", "max_stars_repo_name": "HWRix/TheCannon", "max_stars_repo_head_hexsha": "d4c059e63b61be8cf9327b51970041898a4f4212", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/makeplot_chi_general.py", "max_issues_repo_name": "HWRix/TheCannon", "max_issues_repo_head_hexsha": "d4c059e63b61be8cf9327b51970041898a4f4212", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/makeplot_chi_general.py", "max_forks_repo_name": "HWRix/TheCannon", "max_forks_repo_head_hexsha": "d4c059e63b61be8cf9327b51970041898a4f4212", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.8392857143, "max_line_length": 188, "alphanum_fraction": 0.7358985571, "include": true, "reason": "import numpy,from numpy,import scipy,from scipy", "num_tokens": 657}
|
import os
import argparse
import re
from glob import glob
import numpy
from matplotlib import pyplot
class DataObject(object):
def __init__(self, file_pattern, log_pattern):
self.files = glob(file_pattern)
self.regex = re.compile(log_pattern)
self.data_dict = {}
for file in self.files:
with open(file, 'r') as f:
lines = f.readlines()
matches = numpy.float32(numpy.array([self.regex.findall(line)[0] for line in lines if self.regex.findall(line)]))
self.data_dict[file] = matches
def plot(self):
for key, value in self.data_dict.iteritems():
pyplot.plot(value, label=key)
pyplot.legend()
pyplot.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='log plotter',
description='Script to plot log data.')
parser.add_argument('file_pattern', help='Pattern for matching log files.')
parser.add_argument('log_pattern', help='Pattern for getting data values.')
args = parser.parse_args()
# file_pattern = '*.txt'
# log_pattern = '[0-9] (.*)'
do = DataObject(args.file_pattern, args.log_pattern)
do.plot()
|
{"hexsha": "ba5224f5fde55ce746d3917975bcd58482825d7f", "size": 1225, "ext": "py", "lang": "Python", "max_stars_repo_path": "ifp_toolbox/scripts/log_plotter.py", "max_stars_repo_name": "ifp-uiuc/ifp_toolbox", "max_stars_repo_head_hexsha": "e03472d06329aad1ba86e0d037e16cf7af195cd3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-02-13T19:14:27.000Z", "max_stars_repo_stars_event_max_datetime": "2016-02-13T19:14:27.000Z", "max_issues_repo_path": "ifp_toolbox/scripts/log_plotter.py", "max_issues_repo_name": "ifp-uiuc/ifp_toolbox", "max_issues_repo_head_hexsha": "e03472d06329aad1ba86e0d037e16cf7af195cd3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ifp_toolbox/scripts/log_plotter.py", "max_forks_repo_name": "ifp-uiuc/ifp_toolbox", "max_forks_repo_head_hexsha": "e03472d06329aad1ba86e0d037e16cf7af195cd3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8780487805, "max_line_length": 125, "alphanum_fraction": 0.6269387755, "include": true, "reason": "import numpy", "num_tokens": 260}
|
/*
* phold.hpp
*
* Copyright (c) 2016 Masatoshi Hanai
*
* This software is released under MIT License.
* See LICENSE.
*
*/
#ifndef PHOLD_PHOLD_HPP_
#define PHOLD_PHOLD_HPP_
#include <random>
#include <string>
#include <boost/serialization/serialization.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/make_shared.hpp>
#include <glog/logging.h>
#include "scalesim/simulation.hpp"
#include "scalesim/util.hpp"
/**
* Benchmark program based on phold
*
* References
* "Warp Speed: Executing Time Warp on 1,966,080 Cores" (PADS '14)
* - http://dl.acm.org/citation.cfm?id=2486134
* "On Deciding Between Conservative and Optimistic Approaches
* on Massively Parallel Platforms" (WSC '10)
* - http://dl.acm.org/citation.cfm?id=2433588
* "Scalable Time Warp on Blue Gene Supercomputers" (PADS '09)
* - http://dl.acm.org/citation.cfm?id=1577971
*/
/* Parameter of PHOLD */
static long NUM_LP = 100;
static long NUM_INIT_MSG = 1600;
static double REMOTE_COM_RATIO = 0.1;
static double LAMBDA = 1.0;
static long RANDOM_SEED = 1;
/* Parameter of what-if */
static long NUM_WHAT_IF = 1;
static std::vector<long> TIME_OF_WHATIF;
static std::vector<long> LP_OF_WHATIF;
/* Parameter of System */
static long GSYNC_INTERVAL = 10;
static long LP_INTERVAL = 4;
static int GTW_CUT_INTERVAL = 50;
static const int EFFECTIVE_DECIMAL = 100000;
static const long LOOK_AHEAD = 0.1 * EFFECTIVE_DECIMAL;
static const int RAND_TABLE_SIZE = 10000000;
static long LATENCY_TABLE[RAND_TABLE_SIZE];
/* If value is 1, send to remote. Else if value is 0, send to local */
static int REMOTE_COM_TABLE[RAND_TABLE_SIZE];
class phold: public scalesim::application {
public:
class Event: public scalesim::sim_event {
friend class phold;
private:
long id_; long src_id_; long dst_id_;
long receive_time_; long send_time_; int num_hops_;
mutable scalesim::sim_event_base base_;
public:
Event(): id_(-1), src_id_(-1), dst_id_(-1),
receive_time_(-1), send_time_(-1), num_hops_(0) {};
virtual ~Event(){};
Event(long event_id, long src_id, long dst_id,
long receive_time, long send_time, int num_hops):
id_(event_id), src_id_(src_id), dst_id_(dst_id),
receive_time_(receive_time), send_time_(send_time),
num_hops_(num_hops) {};
Event(const Event& event) {
id_ = event.id_; src_id_ = event.src_id_; dst_id_ = event.dst_id_;
receive_time_ = event.receive_time_; send_time_ = event.send_time_;
num_hops_ = event.num_hops_;
base_ = event.base_;
};
public:
scalesim::sim_event_base* base() const { return &base_; };
long id() const { return id_; };
long source() const { return src_id_; };
long destination() const { return dst_id_; };
bool end() const { return true; };
long receive_time() const { return receive_time_; };
long send_time() const { return send_time_; };
int size() const { return sizeof(*this); };
int num_hops() const { return num_hops_; };
friend class boost::serialization::access;
private:
template<class Archive>
void serialize(Archive& ar, unsigned int version) {
ar & id_;
ar & src_id_;
ar & dst_id_;
ar & receive_time_;
ar & send_time_;
ar & num_hops_;
ar & base_;
}
}; /* class event */
class State : public scalesim::sim_state {
friend class phold;
private:
long id_;
public:
State(): id_(-1) {};
virtual ~State() {};
State(long id): id_(id){};
long id() const { return id_; }
int size() const { return sizeof(*this); }
void out_put() const {
std::cout << "state id: " << id_ << std::endl;
};
friend class boost::serialization::access;
private:
template<class Archive>
void serialize(Archive& ar, unsigned int version) {
ar & id_;
}
}; /* class State */
public:
/* System configuration */
static long gsync_interval() { return GSYNC_INTERVAL; };
static long switch_lp_interval() { return LP_INTERVAL; };
static int global_cut_interval() { return GTW_CUT_INTERVAL; };
static int num_thr() { return boost::thread::physical_concurrency(); };
/* Application configuration */
static long finish_time() { return 5 * EFFECTIVE_DECIMAL; }
/*
* Initiation function for application.
* It is invoked before all initiation functions.
*/
void init() {
/* Make random numbers table for deciding latency */
int ex_seed = RANDOM_SEED;
std::default_random_engine ex_generator(ex_seed);
std::exponential_distribution<double> ex_distribution(LAMBDA);
for (int i = 0; i < RAND_TABLE_SIZE; ++i) {
LATENCY_TABLE[i] = (long) (ex_distribution(ex_generator) * EFFECTIVE_DECIMAL);
}
/* Make random numbers table for deciding next logical process */
int uni_seed = RANDOM_SEED;
std::default_random_engine uni_generator(uni_seed);
std::uniform_real_distribution<double> uni_distribution(0.0, 1.0);
for (int i = 0; i < RAND_TABLE_SIZE; ++i) {
if (uni_distribution(uni_generator) < REMOTE_COM_RATIO) {
REMOTE_COM_TABLE[i] = 1; /* case of remote */
} else {
REMOTE_COM_TABLE[i] = 0; /* case of local */
}
}
};
/*
* Initiation function for partition and index.
* Partition format:
* - type: boost::shared_ptr<std::std::vector<long> >
* - value: Nth value represents a rank number in ID=N
* Index format:
* - type: boost::shared_ptr<boost::unordered_multimap<long, long> >
* - key: rank number
* - value: IDs in this rank
*/
std::pair<parti_ptr, parti_indx_ptr> init_partition_index(int rank_size) {
auto partition_ = boost::make_shared<std::vector<long> > (std::vector<long>());
auto index_ = boost::make_shared<boost::unordered_multimap<long, long> >(
boost::unordered_multimap<long, long>());
/* Naive round robin partitioning based on remainder. */
for (long i = 0; i < NUM_LP; ++i) {
partition_->push_back(i % rank_size);
auto kv = std::pair<int, long>(i % rank_size, i);
index_->insert(kv);
}
return std::pair<parti_ptr, parti_indx_ptr>(partition_, index_);
};
/*
* Initiation function for events.
* Initiated events are shuffled to each starting point after this function.
* Thus it is ok to just read in whatever way.
* For example, use modulo operator based on rank_id and rank_size.
*/
void init_events(ev_vec<phold>& ret,
const int rank,
const int rank_size) {
for (long id = 0; id < NUM_INIT_MSG; ++id) {
if (id % rank_size == rank) {
ret.push_back(boost::make_shared<event<phold> > (
event<phold>(id, id % NUM_LP, id % NUM_LP,
LATENCY_TABLE[id % RAND_TABLE_SIZE],
LATENCY_TABLE[id % RAND_TABLE_SIZE],
0)));
}
}
};
/*
* Initiation function for states.
* Initiated states are NOT shuffled after this function.
* Thus you have to initiate only states in this rank, based on partition.
*/
void init_states_in_this_rank(st_vec<phold>& new_state,
const int rank,
const int rank_size,
parti_ptr partition) {
for (long id = 0; id < NUM_LP; ++id) {
if ((*partition)[id] == rank) {
new_state.push_back(boost::make_shared<state<phold> > (state<phold>(id)));
}
}
};
/*
* Initiation function for what_if events.
* Initiated events are shuffled to each starting point after this function.
* Thus it is ok to just read in whatever way.
*/
void init_what_if(
std::vector<boost::shared_ptr<const scalesim::what_if<phold> > >& ret,
const int rank,
const int rank_size) {
/* Init what-if scenario */
for (long i = 0; i < NUM_WHAT_IF; ++i) {
if (i % rank_size == rank) {
long lp_id = i; /* Just select LP0, LP1, LP2,,, */
long time = 1; /* Every what-ifs is from 1 */
event<phold> add_event(NUM_INIT_MSG + i, lp_id, lp_id, time, time, 0);
ret.push_back(boost::make_shared<scalesim::what_if<phold> >(
scalesim::what_if<phold>(lp_id, time, add_event)));
}
}
};
/*
* Event handling function.
* The arguments (receive_event, state) are previous value in the simulation.
* The return value (optional<pair<ev_pst, st_ptr> >) should include
* new event and state based on the arguments and your simulation models.
*
* If there are no new event and state generated, return empty option.
*/
boost::optional<std::pair<ev_vec<phold>, st_ptr<phold> > >
event_handler(ev_ptr<phold> receive_event, st_ptr<phold> state) {
ev_vec<phold> new_events;
long ev_id = receive_event->id();
long src_id = receive_event->destination();
long send_time = receive_event->receive_time();
int num_hops = 1 + receive_event->num_hops();
int rand_table_id = (int) (ev_id + (long) num_hops) % RAND_TABLE_SIZE;
long receive_time = send_time + LATENCY_TABLE[rand_table_id] + LOOK_AHEAD;
long dst_id = -1;
if (REMOTE_COM_TABLE[rand_table_id] == 1) {
/* Case of remote (send to different LP) */
dst_id = LATENCY_TABLE[rand_table_id] % NUM_LP;
} else { /* REMOTE_COM_TABLE[rand_table_id] == 0 */
/* Case of local (send to this LP) */
dst_id = receive_event->destination();
}
new_events.push_back(
boost::make_shared<event<phold> >(
event<phold>(ev_id, src_id, dst_id, receive_time, send_time, num_hops)));
return boost::optional<std::pair<ev_vec<phold>, st_ptr<phold> > > (
std::pair<ev_vec<phold>, st_ptr<phold> >(new_events, state));
};
};
#endif /* PHOLD_PHOLD_HPP_ */
|
{"hexsha": "65d45fce27c239419e7da186e9ce5a093343f856", "size": 9813, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/phold/phold.hpp", "max_stars_repo_name": "asia-lab-sustech/ScaleSim", "max_stars_repo_head_hexsha": "614869fe9ff2092e6c1f219cbcf44391118517d5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2019-06-26T15:11:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T14:38:47.000Z", "max_issues_repo_path": "src/phold/phold.hpp", "max_issues_repo_name": "asia-lab-sustech/ScaleSim", "max_issues_repo_head_hexsha": "614869fe9ff2092e6c1f219cbcf44391118517d5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/phold/phold.hpp", "max_forks_repo_name": "asia-lab-sustech/ScaleSim", "max_forks_repo_head_hexsha": "614869fe9ff2092e6c1f219cbcf44391118517d5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2019-05-02T12:21:25.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-16T07:45:07.000Z", "avg_line_length": 34.4315789474, "max_line_length": 85, "alphanum_fraction": 0.6448588607, "num_tokens": 2605}
|
#emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
#ex: set sts=4 ts=4 sw=4 noet:
__author__ = 'Yaroslav Halchenko'
__copyright__ = 'Copyright (c) 2013 Yaroslav Halchenko'
__license__ = 'MIT'
import numpy as np
test_variable = "just so we could check if things are loaded/available correctly"
|
{"hexsha": "c30188960ee11e8fb6e3b236d895631670aa0efb", "size": 338, "ext": "py", "lang": "Python", "max_stars_repo_path": "vbench/tests/vbenchtest/vb_common.py", "max_stars_repo_name": "DataDog/vbench", "max_stars_repo_head_hexsha": "a4e4497bed2778989fb714c2537cff03438e9ae6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 48, "max_stars_repo_stars_event_min_datetime": "2015-01-11T23:50:01.000Z", "max_stars_repo_stars_event_max_datetime": "2016-04-13T03:41:45.000Z", "max_issues_repo_path": "vbench/tests/vbenchtest/vb_common.py", "max_issues_repo_name": "pydata/vbench", "max_issues_repo_head_hexsha": "78bf5ff1972ae4ef3573222c739fa97a1e289984", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2017-10-12T19:28:33.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-07T13:53:32.000Z", "max_forks_repo_path": "vbench/tests/vbenchtest/vb_common.py", "max_forks_repo_name": "pydata/vbench", "max_forks_repo_head_hexsha": "78bf5ff1972ae4ef3573222c739fa97a1e289984", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2015-03-15T19:21:44.000Z", "max_forks_repo_forks_event_max_datetime": "2016-03-14T11:35:18.000Z", "avg_line_length": 30.7272727273, "max_line_length": 92, "alphanum_fraction": 0.724852071, "include": true, "reason": "import numpy", "num_tokens": 107}
|
{-# OPTIONS --without-K #-}
module PathStructure.Coproduct {a b} {A : Set a} {B : Set b} where
open import Equivalence
open import PathOperations
open import Types
-- We need to use Lift here, because Agda doesn't have
-- cumulative universes.
F : A ⊎ B → A ⊎ B → Set (a ⊔ b)
F = case (λ _ → A ⊎ B → Set _)
(λ a₁ → case (λ _ → Set _)
(λ a₂ → Lift b (a₁ ≡ a₂))
(λ _ → Lift _ ⊥))
(λ b₁ → case (λ _ → Set _)
(λ _ → Lift _ ⊥)
(λ b₂ → Lift a (b₁ ≡ b₂)))
F-lemma : (x : A ⊎ B) → F x x
F-lemma = case (λ x → F x x) (λ _ → lift refl) (λ _ → lift refl)
split-path : {x y : A ⊎ B} → x ≡ y → F x y
split-path {x = x} p = tr (F x) p (F-lemma x)
merge-path : {x y : A ⊎ B} → F x y → x ≡ y
merge-path = case (λ x → ∀ y → F x y → x ≡ y)
(λ a → case (λ y → F (inl a) y → inl a ≡ y)
(λ _ → ap inl ∘ lower)
(λ _ → 0-elim ∘ lower))
(λ b → case (λ y → F (inr b) y → inr b ≡ y)
(λ _ → 0-elim ∘ lower)
(λ _ → ap inr ∘ lower))
_ _
split-merge-eq : {x y : A ⊎ B} → (x ≡ y) ≃ F x y
split-merge-eq {x = x} {y = y}
= split-path
, (merge-path , λ f → case
(λ x → ∀ y (f : F x y) → split-path (merge-path {x} {y} f) ≡ f)
(λ a → case
(λ y → (f : F (inl a) y) →
split-path (merge-path {inl a} {y} f) ≡ f)
(λ a′ p → J
(λ a a′ p →
split-path (merge-path {inl a} {inl a′} (lift p)) ≡ lift p)
(λ _ → refl) _ _ (lower p))
(λ _ → 0-elim ∘ lower))
(λ b → case
(λ y → (f : F (inr b) y) →
split-path (merge-path {inr b} {y} f) ≡ f)
(λ _ → 0-elim ∘ lower)
(λ b′ p → J
(λ b b′ p →
split-path (merge-path {inr b} {inr b′} (lift p)) ≡ lift p)
(λ _ → refl) _ _ (lower p)))
x y f)
, (merge-path , J (λ _ _ p → merge-path (split-path p) ≡ p)
(case
(λ x → merge-path {x} {x} (split-path {x} {x} refl) ≡ refl)
(λ _ → refl)
(λ _ → refl))
_ _)
|
{"hexsha": "98cf757193b5f86df3ebd2355b3d622fe1455360", "size": 1939, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "src/PathStructure/Coproduct.agda", "max_stars_repo_name": "vituscze/HoTT-lectures", "max_stars_repo_head_hexsha": "7730385adfdbdda38ee8b124be3cdeebb7312c65", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/PathStructure/Coproduct.agda", "max_issues_repo_name": "vituscze/HoTT-lectures", "max_issues_repo_head_hexsha": "7730385adfdbdda38ee8b124be3cdeebb7312c65", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/PathStructure/Coproduct.agda", "max_forks_repo_name": "vituscze/HoTT-lectures", "max_forks_repo_head_hexsha": "7730385adfdbdda38ee8b124be3cdeebb7312c65", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7777777778, "max_line_length": 71, "alphanum_fraction": 0.4533264569, "num_tokens": 810}
|
abstract type Ordering end
Base.iterate(ordering::Ordering, state = 0) = state > 0 ? nothing : (ordering, state + 1)
Base.length(ordering::Ordering) = 1
Base.show(io::IO, ordering::Ordering) = print(io, string(ordering))
Base.show(io::IO, ::MIME"application/prs.juno.inline", ordering::Ordering) = print(io, string(ordering))
struct EstimatorOrdering{TE<:Estimator} <: Ordering
estimator::TE
orientation::Symbol
end
function EstimatorOrdering(estimator::Estimator; orientation::Symbol = :superiority)
EstimatorOrdering{typeof(estimator)}(estimator, orientation)
end
string(ordering::EstimatorOrdering) = "EstimatorOrdering"
function more_extreme(x1::TI, x2::TI, x1_::TI, x2_::TI,
ordering::EstimatorOrdering, design::TD; orientation = nothing) where {TI<:Integer,TD<:AbstractDesign}
orientation = (orientation == nothing) ? ordering.orientation : orientation
if orientation == :superiority
return ordering.estimator(x1, x2, design) >= ordering.estimator(x1_, x2_, design)
end
if orientation == :inferiority
return ordering.estimator(x1, x2, design) <= ordering.estimator(x1_, x2_, design)
end
error("orientation must be :superiororit or :inferiority")
end
function more_extreme(x1::TI, x2::TI, x1_::TI, x2_::TI, estimator::TE,
design::TD; orientation = nothing) where {TI<:Integer,TE<:Estimator,TD<:AbstractDesign}
return more_extreme(x1, x2, x1_, x2_, EstimatorOrdering(estimator), design,
orientation = oreintation)
end
function p_value(x1::TI, x2::TI, p0::TR, ordering::TO, design::TD; orientation = nothing) where {TI<:Integer,TR<:Real,TO<:Ordering,TD<:AbstractDesign}
XX = sample_space(design)
inds = more_extreme.(XX[:,1], XX[:,2], x1, x2, ordering, design, orientation = orientation)
return min(1, max(0, sum(
pmf.(XX[inds,2], n2.(design, XX[inds,1]), p0) .*
pmf.(XX[inds,1], n1(design), p0)
) ) )
end
function p_value(x1::TI, x2::TI, p0::TR, estimator::TE, design::TD; orientation::Symbol = :superiority) where {TI<:Integer,TR<:Real,TE<:Estimator,TD<:AbstractDesign}
return p_value(x1, x2, p0, EstimatorOrdering(estimator, orientation), design)
end
function compatible(ordering::Ordering, design::AbstractDesign, pnull::Real, α::Real)
@assert (0 <= pnull <= 1) "pnull must be in [0,1]"
XX = sample_space(design)
design_rejects = reject.(XX[:,1], XX[:,2], design)
pvals = p_value.(XX[:,1], XX[:,2], pnull, ordering, design)
ordering_rejects = pvals .< α
incompatibility_degree = sum(design_rejects .& .!ordering_rejects)
df = DataFrames.DataFrame(
x1 = XX[:,1],
x2 = XX[:,2],
design_rejects = design_rejects,
ordering_rejects = ordering_rejects,
p_value = pvals
)
return Dict{String,Any}(
"compatible" => incompatibility_degree == 0,
"incompatibility degree" => incompatibility_degree,
"details" => df
)
end
function mlecompatible(design::AbstractDesign, pnull::Real, α::Real)
return compatible(EstimatorOrdering(MaximumLikelihoodEstimator()), design, pnull, α)
end
|
{"hexsha": "72cf1bda44e1c1b19cb71716618be780ae8705f7", "size": 3176, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/orderings/Ordering.jl", "max_stars_repo_name": "JuliaTagBot/bad.jl", "max_stars_repo_head_hexsha": "7cccc038b65e4d6e923221064c20b361466e21cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/orderings/Ordering.jl", "max_issues_repo_name": "JuliaTagBot/bad.jl", "max_issues_repo_head_hexsha": "7cccc038b65e4d6e923221064c20b361466e21cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-12-10T17:25:59.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-06T10:33:25.000Z", "max_forks_repo_path": "src/orderings/Ordering.jl", "max_forks_repo_name": "JuliaTagBot/bad.jl", "max_forks_repo_head_hexsha": "7cccc038b65e4d6e923221064c20b361466e21cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-08T10:43:02.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T10:43:02.000Z", "avg_line_length": 35.2888888889, "max_line_length": 165, "alphanum_fraction": 0.6722292191, "num_tokens": 950}
|
! MAIN.F
! **********************************************************************
! "PARAMETERS"
! R= GAS CONSTANT KCAL/(MOL-K)
! D0= FREQUENCY FACTOR (1/SEC)
! "INPUT"
! NUSA=# OF SAMPLES
! NSAMP=# OF DIFFERENT DIFF. DOMAINs
! E= ACTIVATION ENERGY (KCAL/MOL)
! ORD = LOG (Doi/Ro**2)
! C(J)= VOL. FRAC. OF Jth DOMAIN
! RP(J)= PLATEAU SIZE (LOG(R/Ro) PLOT)
! NI=# OF HEATING STEPS
! TELAB = TEMP. STEP (K)
! TILAB= STEP HEATING DURATION (MIN)
! "OUTPUT"
! TINV = INVERSE LAB. TEMP. (10000/K)
! DZX = - LOG(D/R**2) (1/SEC)
! F(J)*100 = CUMULATIVE % 39-AR RELEASED
! AVT = (F(J)+F(J-1))*50
! XLOGR= LOG(R/Ro)
! RAD(J) = SIZE OF THE Jth DIFF. DOMAIN
! C(J) = VOL. FRAC. OF Jth DOMAIN
! "INPUT FILES" "UNIT" "PROGRAM"
! TEMSTEP.IN 10 MAIN
! FJ.IN 12 MAIN
! "OUTPUT FILES" "UNIT" "PROGRAM"
! ARR-ME.IN 14 MAIN
! PARAM.OUT 28 MAIN
! DIST.DAT 32 MAIN
! ARR.SAMP 20 DIFF
! LOGR.SAMP 22 DIFF
! ENER.OUT 30 PARAM
! ARR.DAT 16 ARR
! LOGR.DAT 18 ARR
! ***********************************************************************
implicit double precision (a-h, o-z)
parameter(nca = 20, ns = 200)
dimension a1(nca), a2(nca), auxal(nca, nca), auxa2(nca)
dimension telab(ns), tilab(ns), auxa1(nca), dyda(nca)
dimension zi(0:ns), wf(ns), xlogd(0:ns), xlogr(0:ns)
dimension covar(nca, nca), alpha(nca, nca), lista(nca), da(nca)
dimension f39(0:ns), f(0:ns), wksp(nca), iwksp(nca)
character tab1*9, yes*1
tab1 = char(09)
open(unit = 10, file = 'temstep.in', status = 'old')
open(unit = 12, file = 'fj.in', status = 'old')
open(unit = 14, file = 'arr-me.in', status = 'unknown')
open(unit = 28, file = 'param.out', status = 'unknown')
open(unit = 32, file = 'dist.dat', status = 'unknown')
pi = 3.141592654
ee = dexp(1.d00)
R = 1.987E-3
f(0) = 0.
zi(0) = 0.
b = 8.
imp = 2.
acut = .60
dchmin = 0.01
ncons = 0.
ndom = 8
mdom = 3
open(unit = 42, file = 'autoarr.cl', status = 'old')
!read (42, *) yes
! print *,'This program calculates all the parameters
! $ (i.e. E, Do, etc) necessary to model the 39Ar data.'
! print *, 'However, If you want to introduce your own parameters
! $ enter "y" now, otherwise type "n" and relax.'
read (42, *) yes
If(yes.eq.'1') then
! print *, 'if you still want to use the default
! $ value of an specific parameter, type 0 at the prompt'
! print *
! print *, ' type number of max domains, <= 10, (Default is 8)'
read (42,*) naux
if(naux.ne.0)ndom = naux
! print *, ' type number of min domains, > 2 (Default is 3)'
read (42,*) naux
if(naux.ne.0)mdom = naux
! print *, 'to keep Do fix type 1, otherwise type 0'
read (42,*) ncons
endif
read(10, *)ni
nimax = ni
do 10 nt = 1, ni
read(10, *)telab(nt), tilab(nt)
read(12, *)f39(nt)
if(f39(nt).gt.0.2.and.nt.eq.1)then
read(12, *)f39(nt)
read(12, *)f39(nt)
endif
tilab(nt) = 60. * tilab(nt)
if(ni.eq.nimax.and.telab(nt).gt.1373)nimax = nt - 1
10 continue
call diff(ord, E, f39, telab, tilab, xlogr, xlogd, wf, ni, xro, yes)
print *, 'E=', e, ' Ordinate=', ord
write(28, *)'E=', e, ' Ordinate=', ord
write(28, *)
ni = nimax
call zita(ni, zi, e, ord, tilab, telab)
ckchisq = 1.0e30
ckchin = 1.0e30
mct = 0
iseed = xro
70 call guess(ndom, a1, a2, xro, iseed)
if(mct.gt.30) then
print *, 'Warning: Two many iterations, &
Consult your computer vodoo'
if(ncicle.gt.0)ncicle = 4
amax = 0.
mct = 0
chisq = 1.0e30
goto 54
endif
nc = 0
na = 2. * ndom - 1
do 12 j = 1, na
lista(j) = j
12 continue
mfit = na
if(ncons.eq.1)mfit = na - 1
alamda = -1.
kc = 0.
ch = -1.
alam = 0.001
26 call mrqmin(zi, f39, wf, ni, a2, na, lista, mfit, covar, alpha, &
nca, chisq, alamda, amax)
do 52 j = 1, na, 2
if(a2(j + 1).lt.-14) amax = -1.
do 52 k = 1, na, 2
if(j.eq.k) goto 52
if(a2(j).eq.a2(k)) amax = -1
52 continue
if(amax.eq.-1.) then
mct = mct + 1
goto 70
endif
if(alam.gt.alamda)then
nc = 0
else
nc = nc + 1
if (nc.le.50) goto 38
mct = mct + 1
goto 70
endif
chisqn = chisq
if(chisq.gt.1.) chisqn = 1.
dchisq = abs((chisq - ch) / chisqn)
kc = kc + 1
if(dchisq.ge.dchmin.and.kc.le.100.or.kc.lt.5) goto 38
84 write(28, *)'# dom=', ndom, ' Isteps=', kc, &
' nc=', nc, ' chisq=', chisq
goto 54
72 alamda = 0.
ndom = (na + 1) / 2
call mrqmin(zi, f39, wf, ni, a2, na, lista, mfit, covar, &
alpha, nca, chisq, alamda, amax)
if (amax.eq.-1)stop 'stop 1: Consult your vodoo'
do 24 nt = 1, ni
call funcs(zi(nt), a2, y, dyda, na, a1, amax)
f(nt) = y
if(amax.eq.-1)stop 'stop 3: Consult your vodoo'
24 continue
call sort3(2 * ndom, a1, a2, da, wksp, iwksp)
rpmax = a1(na)
xlog = ord - 2. * dlog10(rpmax)
write(14, 120)ndom
write(28, 140)
sumc = 0.
do 28 j = 1, na + 1, 2
write(14, 100)e
ordj = xlog - 2. * dlog10(a1(j) / rpmax)
write(14, 100)ordj
write(14, 105)a1(j + 1)
write(28, 115)(j + 1) / 2, a1(j + 1), a1(j) / rpmax
write(32, *)sumc, log(a1(j))
sumc = sumc + a1(j + 1)
write(32, *)sumc, log(a1(j))
28 continue
write(28, *)
write(28, *)ckchisq
slop = e * dlog10(ee) / 10000. / r
write(14, *)slop
write(14, *)ord
call arr(f, tilab, telab, ni, e, ord)
stop 'end iteration'
54 continue
if(ckchisq.gt.chisq) then
do 64 j = 1, na + 1
auxa2(j) = a2(j)
do 64 k = 1, na + 1
auxal(j, k) = alpha(j, k)
64 continue
auxna = na
ckchisq = chisq
endif
if(ckchin.gt.chisq) then
call funcs(zi(1), a2, y, dyda, na, auxa1, amax)
if(amax.eq.-1)stop 'stop 2: Consult your vodoo'
ckchin = chisq
endif
if(ncicle.lt.4)then
ncicle = ncicle + 1
else
call sort3(2 * ndom, auxa1, a2, da, wksp, iwksp)
ndom = ndom - 1
mct = 0
ncicle = 0
ckchin = 1.0e30
sumc = 0.
do 68 j = 1, na, 2
write(32, 100)sumc, log(auxa1(j))
sumc = sumc + auxa1(j + 1)
write(32, *)sumc, log(auxa1(j))
68 continue
write(32, 150)
if(ndom.eq.mdom - 1)then
do 66 j = 1, auxna + 1
a2(j) = auxa2(j)
do 66 k = 1, auxna + 1
alpha(j, k) = auxal(j, k)
66 continue
na = auxna
mfit = na
if(ncons.eq.1)mfit = na - 1
print *, '# of domains =', (na + 1) / 2
amax = 0.
goto 72
endif
endif
goto 70
38 ch = chisq
alam = alamda
goto 26
100 format(G20.8)
105 format(f12.8)
110 format(1X, 5(F12.8, A1))
115 format(1X, I4, 3x, f9.5, 7x, f9.5)
120 format(I5)
130 format(6x, "tinv", 8x, "Log(D/r2)", 7x, "f(k)*100", 8x, &
"Log(r/ro)", 8x, "39Ar-av")
140 format(1x, 'domain #', 10x, 'volume fraction', 15x, 'domain size')
150 format(1x, '&
&')
160 format(A7)
170 format(1x, i4, 4(a1, g20.8))
end
! SUBROUTINE ARR.F
! **********************************************************************
! "PARAMETERS"
! R= GAS CONSTANT KCAL/(MOL-K)
! D0= FREQUENCY FACTOR (1/SEC)
! "INPUT"
! NUSA=# OF SAMPLES
! NSAMP=# OF DIFFERENT DIFF. DOMAINs
! E= ACTIVATION ENERGY (KCAL/MOL)
! ORD = LOG (Doi/Ro**2)
! C(J)= VOL. FRAC. OF Jth DOMAIN
! RP(J)= PLATEAU SIZE (LOG(R/Ro) PLOT)
! NI=# OF HEATING STEPS
! TELAB = TEMP. STEP (K)
! TILAB= STEP HEATING DURATION (MIN)
! "OUTPUT"
! TINV = INVERSE LAB. TEMP. (10000/K)
! DZX = - LOG(D/R**2) (1/SEC)
! F(J)*100 = CUMULATIVE % 39-AR RELEASED
! AVT = (F(J)+F(J-1))*50
! XLOGR= LOG(R/Ro)
! RAD(J) = SIZE OF THE Jth DIFF. DOMAIN
! C(J) = VOL. FRAC. OF Jth DOMAIN
! ***********************************************************************
subroutine arr(f, tilab, telab, ni, e, ord)
implicit double precision (a-h, o-z)
parameter(ns = 200)
dimension avt(ns)
dimension telab(ni), f(0:ni), zx(ns), tilab(ni)
character tab1*9, mo*3
tab1 = char(09)
open(unit = 16, file = 'arr.dat', status = 'unknown')
open(unit = 18, file = 'logr.dat', status = 'unknown')
pi = 3.14159265
ee = dexp(1.d00)
R = 1.987E-3
b = 8.
imp = 2
acut = 0.50
mo = 'sla'
! INVERSION OF 39-F
do 20 k = 1, ni
if (f(k).gt.acut) goto 22
if (mo.eq.'sph') goto 30
zx(k + 1) = pi * (f(k) / 4.)**2
goto 20
30 zx(k + 1) = (2. - pi / 3. * f(k) - 2. * dsqrt(1. - pi / 3. * f(k))) / pi
goto 20
22 zx(k + 1) = -dlog(pi**2 / b * (1. - f(k))) / pi**2
20 continue
zx(1) = 0.
slop = E * dlog10(ee) / 10000. / R
do 26 k = 1, ni
avt(k) = (f(k) + f(k - 1)) / 2. * 100.
dzx = dlog10((zx(k + 1) - zx(k)) / tilab(k) * imp**2)
tinv = 1. / telab(k) * 10000.
xlogr = (ord - slop * tinv - dzx) / 2.
write(18, 110)f(k - 1) * 100., tab1, xlogr
write(18, 110)f(k) * 100., tab1, xlogr
26 write(16, 110)tinv, tab1, dzx
write(16, *)'&
&'
write(18, *)'&
&'
2 continue
110 format(1X, 5(F12.8, A1))
return
end
! SUBROUTINE DIFF.F
! SUBROUTINE PARAM.F
! SUBROUTINES FIT, GSER, and GCF - FUNCTIONS GAMMQ and GAMMLN
subroutine diff(ord, E, f, telab, tilab, xlogr, xlogd, wt, ni, xro, yes)
implicit double precision (a-h, o-z)
parameter(ns = 200)
dimension f(0:ni), telab(ni), tilab(ni), xlogr(0:ni)
dimension xlogd(0:ni), tinv(ns), wt(ni)
character tab1*9, yes*1
tab1 = char(09)
open(unit = 20, file = 'arr.samp', status = 'unknown')
open(unit = 22, file = 'logr.samp', status = 'unknown')
acut = 0.5
imp = 2.
b = 8.
xlogr(0) = 0.
pi = 3.141592654
ee = dlog10(dexp(1.d00))
r = 1.987e-3
! CALCULATION OF LOG(D/R^2)
do 10 k = 1, ni
if (f(k).le.acut) then
xlogr(k) = pi * (f(k) / 4.)**2
else
xlogr(k) = -dlog(pi**2 / b * (1. - f(k))) / pi**2
endif
10 continue
sumwt = 0.
nix = ni
do 20 k = 1, ni
if(nix.eq.ni.and.telab(k).gt.1423)nix = k - 1
wt(k) = 1. / dsqrt(f(k) - f(k - 1))
xlogd(k) = dlog10((xlogr(k) - xlogr(k - 1)) / tilab(k) * imp**2)
tinv(k) = 1. / telab(k) * 10000.
write(20, 110)tinv(k), tab1, xlogd(k)
sumwt = sumwt + wt(k)
20 continue
do 25 k = 1, ni
wt(k) = wt(k) / sumwt
25 continue
! CALCULATION OF E AND Do/Ro^2
call param(ni, tinv, xlogd, wt, e, ord)
if(yes.eq.'y')then
print *, 'Type activation energy in kcal/mol, E='
read *, auxe
if(auxe.ne.0) then
e = auxe
print *, 'Enter ordenate of log.vs.10000/t plot, &
log(Do/ro^2)='
read *, ord
endif
endif
slop = e * ee / (r * 10000)
xro = (ord - slop * tinv(nix) - xlogd(nix)) / 2. * (1. + (1. - f(nix)) / 2.)
if(yes.eq.'y')then
print *, 'type the max plateau of log(r/ro)'
read *, auxro
if(auxro.ne.0)xro = auxro
endif
print *, 'all the parameters are set now, relax'
do 30 k = 1, ni
xlogr(k) = (ord - slop * tinv(k) - xlogd(k)) / 2.
write(22, 110)f(k - 1) * 100., tab1, xlogr(k)
write(22, 110)f(k) * 100., tab1, xlogr(k)
30 continue
return
110 format(1X, 5(F12.8, A1))
end
! SUBROUTINE PARAM.F
subroutine param(ni, tinv, xlogd, wt, e, ord)
implicit double precision (a-h, o-z)
parameter (ns = 200, nstop = 20, mwt = 1)
dimension tinv(ni), xlogd(0:ni), wt(ni), y(ns), alog(ns)
open(unit = 30, file = 'ener.out', status = 'unknown')
nst = nstop
kmax = 2
dymin = 100.
ee = dlog10(dexp(1.d00))
r = 1.987e-3
y(2) = 0.
if(ni.lt.nstop) then
nst = ni
endif
do 10 k = 3, nst
call fit(tinv, xlogd, k, wt, mwt, a, b, siga, sigb, chi2, q)
y(k) = -r * b * 10000. / ee
alog(k) = a
write(30, 100)k, y(k), alog(k)
10 continue
do 20 k = 3, nst
dy = y(k + 1) - y(k)
ddy = y(k - 1) - 2. * y(k) + y(k + 1)
if(y(k).gt.y(kmax))kmax = k
if(abs(dy).le.dymin.and.ddy.le.0.)then
dymin = abs(dy)
kmin = k
endif
if(dy.lt.0.) then
ndec = ndec + 1
else
ndec = 0.
endif
if(ndec.gt.4) then
e = (y(kmin) + y(kmin + 1)) / 2.
ord = (alog(kmin) + alog(kmin + 1)) / 2
kmax = 0
return
endif
20 continue
if(kmax.gt.0)then
e = y(kmax)
ord = alog(kmax)
print *, 'Warning: auto didnt get a real maximum for E.'
print *, 'You should check the ener.out output file'
print *, 'and calculate E manually if necessary.'
endif
100 format(1x, i5, 5(f16.8))
return
end
! SUBROUTINES FIT, GSER, and GCF - FUNCTIONS GAMMQ and GAMMLN
subroutine fit(x, y, ndata, sig, mwt, a, b, siga, sigb, chi2, q)
implicit double precision (a-h, o-z)
dimension x(ndata), y(0:ndata), sig(ndata)
sx = 0.
sy = 0.
st2 = 0.
b = 0.
if(mwt.ne.0) then
ss = 0.
do 11 i = 1, ndata
wt = 1. / (sig(i)**2)
ss = ss + wt
sx = sx + x(i) * wt
sy = sy + y(i) * wt
11 continue
else
do 12 i = 1, ndata
sx = sx + x(i)
sy = sy + y(i)
12 continue
ss = float(ndata)
endif
sxoss = sx / ss
if(mwt.ne.0) then
do 13 i = 1, ndata
t = (x(i) - sxoss) / sig(i)
st2 = st2 + t * t
b = b + t * y(i) / sig(i)
13 continue
else
do 14 i = 1, ndata
t = x(i) - sxoss
st2 = st2 + t * t
b = b + t * y(i)
14 continue
endif
b = b / st2
a = (sy - sx * b) / ss
siga = dsqrt((1. + sx * sx / (ss * st2)) / ss)
sigb = dsqrt(1. / st2)
chi2 = 0.
if(mwt.eq.0) then
do 15 i = 1, ndata
chi2 = chi2 + (y(i) - a - b * x(i))**2
15 continue
q = 1.
sigdat = dsqrt(chi2 / (ndata - 2))
siga = siga * sigdat
sigb = sigb * sigdat
else
do 16 i = 1, ndata
chi2 = chi2 + ((y(i) - a - b * x(i)) / sig(i))**2
16 continue
! q=gammq(0.5*(ndata-2),0.5*chi2)
endif
return
end
function gammq(a, x)
implicit double precision (a-h, o-z)
if(x.lt.0..or.a.le.0.)stop 'ERROR(GAMMQ): (x.lt.0..or.a.le.0.)'
if(x.lt.a + 1.)then
call gser(gamser, a, x, gln)
gammq = 1. - gamser
else
call gcf(gammcf, a, x, gln)
gammq = gammcf
endif
return
end
subroutine gser(gamser, a, x, gln)
implicit double precision (a-h, o-z)
parameter (itmax = 100, eps = 3.e-7)
gln = gammln(a)
if(x.le.0.)then
if(x.lt.0.)stop 'ERROR(GSER): (x.lt.0.)'
gamser = 0.
return
endif
ap = a
sum = 1. / a
del = sum
do 11 n = 1, itmax
ap = ap + 1.
del = del * x / ap
sum = sum + del
if(abs(del).lt.abs(sum) * eps)go to 1
11 continue
stop 'ERROR(GSER): a too large, itmax too small'
1 gamser = sum * dexp(-x + a * dlog(x) - gln)
return
end
subroutine gcf(gammcf, a, x, gln)
implicit double precision (a-h, o-z)
parameter (itmax = 100, eps = 3.e-7)
gln = gammln(a)
gold = 0.
a0 = 1.
a1 = x
b0 = 0.
b1 = 1.
fac = 1.
do 11 n = 1, itmax
an = float(n)
ana = an - a
a0 = (a1 + a0 * ana) * fac
b0 = (b1 + b0 * ana) * fac
anf = an * fac
a1 = x * a0 + anf * a1
b1 = x * b0 + anf * b1
if(a1.ne.0.)then
fac = 1. / a1
g = b1 * fac
if(abs((g - gold) / g).lt.eps)go to 1
gold = g
endif
11 continue
stop 'ERROR(GCF): a too large, itmax too small'
1 gammcf = dexp(-x + a * dlog(x) - gln) * g
return
end
double precision function gammln(xx)
real*8 cof(6), stp, half, one, fpf, x, xx, tmp, ser
data cof, stp/76.18009173d0, -86.50532033d0, 24.01409822d0, &
-1.231739516d0, .120858003d-2, -.536382d-5, 2.50662827465d0/
data half, one, fpf/0.5d0, 1.0d0, 5.5d0/
x = xx - one
tmp = x + fpf
tmp = (x + half) * dlog(tmp) - tmp
ser = one
do 11 j = 1, 6
x = x + one
ser = ser + cof(j) / x
11 continue
gammln = tmp + dlog(stp * ser)
return
end
! SUBROUTINE FUNCS
Subroutine funcs(x, b, y, dyda, na, a, amax)
implicit double precision (a-h, o-z)
parameter (nmax = 21)
dimension a(na + 1), dyda(na + 1), b(na + 1), csh(nmax)
! the multiplication by 4 stands for the divition of Do
if(na.eq.0)return
pi = 3.141592654
y = 0.
as = 1.
do 5 j = 1, na, 2
if(b(j).lt.-50.)then
a(j) = 0
else
a(j) = dexp(b(j))
endif
if(b(j + 1).lt.-20.)then
a(j + 1) = 0.
csh(j + 1) = 0.
else
a(j + 1) = (1. + dtanh(b(j + 1))) / 2.
csh(j + 1) = 0.5 / dcosh(b(j + 1))**2
endif
as = as - a(j + 1)
5 continue
a(na + 1) = as + a(na + 1)
if(a(na + 1).le.0.)then
amax = -1.
return
endif
b(na + 1) = dlog(a(na + 1))
do 10 i = 1, na, 2
arg = x / a(i) * 4.
if (arg.le.0.2827) then
gf = 2. * dsqrt(arg / pi)
else
if ((pi / 2)**2 * arg.gt.80) then
gf = 1.
else
gf = 1. - 8. / pi**2 * dexp(-(pi / 2)**2 * arg)
endif
endif
dgf = 0
do 20 j = 1, 50000, 2
arg1 = (j * pi / 2.)**2 * arg
if (arg1.gt.25.) goto 21
dgf = dgf + 2. * dexp(-arg1)
20 continue
21 y = y + a(i + 1) * gf
dyda(i + 1) = gf * csh(i + 1)
dyda(i) = -a(i + 1) * dgf * arg
a(i) = dsqrt(a(i))
10 continue
return
end
! SUBROUTINE GUESS.F
subroutine guess(ndom, a1, a2, xro, iseed)
implicit double precision (a-h, o-z)
dimension a1(2 * ndom), a2(2 * ndom)
! SUBROUTINE TO GUESS C(J),R(J)
na = 2 * ndom
sum = 0.
do 8 j = 1, na, 2
a1(j + 1) = 1. + 10. * ran(iseed)
sum = sum + a1(j + 1)
8 continue
do 9 j = 1, na, 2
a1(j + 1) = a1(j + 1) / sum
9 continue
sum = 0
do 10 j = 1, na - 2, 2
sum = 1 + 10. * ran(iseed) + sum
a1(na - 2 - j) = sum
10 continue
do 11 j = 1, na - 2, 2
a1(j) = a1(j) / sum * xro
11 continue
sum = a1(na) + a1(na - 2)
do 12 j = 3, na - 3, 2
a1(j) = a1(j) - dlog10(sum)
sum = sum + a1(na - (j + 1))
12 continue
! 'ro' IS THE INVERSE OF Ro (1/Ro)
ro = 10.**(a1(1))
do 15 j = 3, na - 3, 2
a2(j) = a1(1) - a1(na - j)
15 continue
do 17 j = 3, na - 3, 2
a1(j) = a2(j)
17 continue
a1(na - 1) = dlog10(a1(na))
do 20 j = 1, na - 3, 2
a1(j) = a1(j + 1) / (10.**a1(j) - 10.**a1(j + 2))
20 continue
a1(na - 1) = 1.
nloop = 0
29 continue
ncont = 0
nloop = nloop + 1
do 35 j = 1, na - 3, 2
rom = 0.
if (a1(j).gt.1.) stop 'a1(j) > 1.'
if(a1(j + 2).lt.a1(j))then
ncont = 0
do 27 k = j, j + 2, 2
rom = rom + a1(k + 1) / a1(k)
27 continue
a1(j + 2) = a1(j)
a1(j) = a1(j + 1) / (rom - a1(j + 3) / a1(j + 2))
else
ncont = ncont + 1
endif
35 continue
if (nloop.gt.30) stop 'nloop greater than 30 on guess'
if (ncont.lt.(ndom - 1)) goto 29
sumro = 0.
do 30 j = 1, na - 1, 2
sumro = sumro + a1(j + 1) / a1(j)
30 continue
! CALCULATION OF A2
do 25 j = 1, na - 1, 2
a2(j) = 2. * dlog(a1(j) * ro)
z = 2. * a1(j + 1) - 1.
a2(j + 1) = 0.5 * dlog((z + 1) / abs(z - 1))
25 continue
return
end
Double precision function ran(iseed)
parameter(ia = 7141, ic = 54773, im = 259200)
iseed = mod(iseed * ia + ic, im)
ran = float(iseed) / float(im)
return
end
! SUBROUTINE INDEXX
subroutine indexx(n, arrin, indx)
implicit double precision (a-h, o-z)
dimension arrin(n), indx(n)
do 11 j = 1, n, 2
indx(j) = j
11 continue
l = n / 4 * 2 + 1
ir = n - 1
10 continue
if(l.gt.1)then
l = l - 2
indxt = indx(l)
q = arrin(indxt)
else
indxt = indx(ir)
q = arrin(indxt)
indx(ir) = indx(1)
ir = ir - 2
if(ir.eq.1)then
indx(1) = indxt
return
endif
endif
i = l
j = l + l + 1
20 if(j.le.ir)then
if(j.lt.ir)then
if(arrin(indx(j)).lt.arrin(indx(j + 2)))j = j + 2
endif
if(q.lt.arrin(indx(j)))then
indx(i) = indx(j)
i = j
j = j + j + 1
else
j = ir + 2
endif
go to 20
endif
indx(i) = indxt
go to 10
end
! SUBROUTINES MRQMIN, MRQCOF, GAUSSJ, COVSRT
subroutine mrqmin(x, y, sig, ndata, a, ma, lista, mfit, &
covar, alpha, nca, chisq, alamda, amax)
implicit double precision (a-h, o-z)
parameter (mmax = 20)
dimension x(0:ndata), y(0:ndata), sig(ndata), a(ma + 1), lista(ma), &
covar(nca, nca), alpha(nca, nca), atry(mmax), beta(mmax), da(mmax)
save beta, atry, da, ochisq
if(alamda.lt.0.)then
amax = 0.
do 25 j = 1, ma, 2
if(a(j) * 3..gt.amax)amax = a(j) * 3.
25 continue
kk = mfit + 1
do 12 j = 1, ma
ihit = 0
do 11 k = 1, mfit
if(lista(k).eq.j)ihit = ihit + 1
11 continue
if (ihit.eq.0) then
lista(kk) = j
kk = kk + 1
else if (ihit.gt.1) then
stop 'ERROR(MRQMIN): improper permutation in lista'
endif
12 continue
if (kk.ne.(ma + 1))stop 'ERROR(MRQMIN): improper perm. in lista'
alamda = 0.001
call mrqcof(x, y, sig, ndata, a, ma, lista, mfit, alpha, beta, nca, chisq&
, amax)
if(amax.eq.-1)return
ochisq = chisq
do 13 j = 1, ma
atry(j) = a(j)
13 continue
endif
do 15 j = 1, mfit
do 14 k = 1, mfit
covar(j, k) = alpha(j, k)
14 continue
covar(j, j) = alpha(j, j) * (1. + alamda)
da(j) = beta(j)
15 continue
call gaussj(covar, mfit, nca, da, 1, 1, amax)
if(amax.eq.-1.)return
if(alamda.eq.0.)then
call covsrt(covar, nca, ma, lista, mfit)
return
endif
21 sum = 0.
do 16 j = 1, mfit
atry(lista(j)) = a(lista(j)) + da(j)
16 continue
if(ma.ne.mfit) then
do 22 k = 1, mfit - 1, 2
if(atry(k).ge.atry(ma).or.dabs(atry(k)).gt.amax) then
da(k) = da(k) / 2.
goto 21
endif
22 continue
else
do 26 j = 1, mfit, 2
if(dabs(atry(j)).gt.amax)then
da(j) = da(j) / 2.
goto 21
endif
26 continue
endif
do 19 k = 1, mfit - 1, 2
sum = sum + (1. + dtanh(atry(k + 1))) / 2.
19 continue
if (sum.ge.1.)then
do 20 k = 1, mfit, 2
da(k + 1) = da(k + 1) / 2.
20 continue
goto 21
endif
call mrqcof(x, y, sig, ndata, atry, ma, lista, mfit, covar, da, nca, chisq&
, amax)
if(amax.eq.-1)return
if(chisq.lt.ochisq)then
alamda = 0.1 * alamda
ochisq = chisq
do 18 j = 1, mfit
do 17 k = 1, mfit
alpha(j, k) = covar(j, k)
17 continue
beta(j) = da(j)
a(lista(j)) = atry(lista(j))
18 continue
else
alamda = 10. * alamda
chisq = ochisq
endif
return
end
subroutine mrqcof(x, y, sig, ndata, a, ma, lista, mfit, alpha, beta, nalp, &
chisq, amax)
implicit double precision (a-h, o-z)
parameter (mmax = 20)
dimension x(0:ndata), y(0:ndata), sig(ndata), alpha(nalp, nalp), &
beta(ma), dyda(mmax), lista(mfit), a(ma + 1), a1(mmax)
do 12 j = 1, mfit
do 11 k = 1, j
alpha(j, k) = 0.
11 continue
beta(j) = 0.
12 continue
chisq = 0.
do 15 i = 1, ndata
call funcs(x(i), a, ymod, dyda, ma, a1, amax)
if(amax.eq.-1) then
chisq = 1000000.
return
endif
sig2i = 1. / (sig(i) * sig(i))
dy = y(i) - ymod
do 14 j = 1, mfit
wt = dyda(lista(j)) * sig2i
do 13 k = 1, j
alpha(j, k) = alpha(j, k) + wt * dyda(lista(k))
13 continue
beta(j) = beta(j) + dy * wt
14 continue
chisq = chisq + dy * dy * sig2i
15 continue
do 17 j = 2, mfit
do 16 k = 1, j - 1
alpha(k, j) = alpha(j, k)
16 continue
17 continue
return
end
subroutine gaussj(a, n, np, b, m, mp, amax)
implicit double precision (a-h, o-z)
parameter (nmax = 50)
dimension a(np, np), b(np, mp), ipiv(nmax), indxr(nmax), indxc(nmax)
do 11 j = 1, n
ipiv(j) = 0
11 continue
do 22 i = 1, n
big = 0.
do 13 j = 1, n
if(ipiv(j).ne.1)then
do 12 k = 1, n
if (ipiv(k).eq.0) then
if (abs(a(j, k)).ge.big)then
big = abs(a(j, k))
irow = j
icol = k
endif
else if (ipiv(k).gt.1) then
amax = -1.
return
endif
12 continue
endif
13 continue
ipiv(icol) = ipiv(icol) + 1
if (irow.ne.icol) then
do 14 l = 1, n
dum = a(irow, l)
a(irow, l) = a(icol, l)
a(icol, l) = dum
14 continue
do 15 l = 1, m
dum = b(irow, l)
b(irow, l) = b(icol, l)
b(icol, l) = dum
15 continue
endif
indxr(i) = irow
indxc(i) = icol
if (a(icol, icol).eq.0.) then
amax = -1.
return
endif
pivinv = 1. / a(icol, icol)
a(icol, icol) = 1.
do 16 l = 1, n
a(icol, l) = a(icol, l) * pivinv
16 continue
do 17 l = 1, m
b(icol, l) = b(icol, l) * pivinv
17 continue
do 21 ll = 1, n
if(ll.ne.icol)then
dum = a(ll, icol)
a(ll, icol) = 0.
do 18 l = 1, n
a(ll, l) = a(ll, l) - a(icol, l) * dum
18 continue
do 19 l = 1, m
b(ll, l) = b(ll, l) - b(icol, l) * dum
19 continue
endif
21 continue
22 continue
do 24 l = n, 1, -1
if(indxr(l).ne.indxc(l))then
do 23 k = 1, n
dum = a(k, indxr(l))
a(k, indxr(l)) = a(k, indxc(l))
a(k, indxc(l)) = dum
23 continue
endif
24 continue
return
end
subroutine covsrt(covar, ncvm, ma, lista, mfit)
implicit double precision (a-h, o-z)
dimension covar(ncvm, ncvm), lista(mfit)
do 12 j = 1, ma - 1
do 11 i = j + 1, ma
covar(i, j) = 0.
11 continue
12 continue
do 14 i = 1, mfit - 1
do 13 j = i + 1, mfit
if(lista(j).gt.lista(i)) then
covar(lista(j), lista(i)) = covar(i, j)
else
covar(lista(i), lista(j)) = covar(i, j)
endif
13 continue
14 continue
swap = covar(1, 1)
do 15 j = 1, ma
covar(1, j) = covar(j, j)
covar(j, j) = 0.
15 continue
covar(lista(1), lista(1)) = swap
do 16 j = 2, mfit
covar(lista(j), lista(j)) = covar(1, j)
16 continue
do 18 j = 2, ma
do 17 i = 1, j - 1
covar(i, j) = covar(j, i)
17 continue
18 continue
return
end
! SUBROUTINE SORT3
subroutine sort3(n, ra, rb, rc, wksp, iwksp)
implicit double precision (a-h, o-z)
dimension ra(n), rb(n), rc(n), wksp(n), iwksp(n)
call indexx(n, ra, iwksp)
do 11 j = 1, n
wksp(j) = ra(j)
11 continue
do 12 j = 1, n, 2
ra(j) = wksp(iwksp(j))
ra(j + 1) = wksp(iwksp(j) + 1)
12 continue
do 13 j = 1, n
wksp(j) = rb(j)
13 continue
do 14 j = 1, n, 2
rb(j) = wksp(iwksp(j))
rb(j + 1) = wksp(iwksp(j) + 1)
14 continue
do 15 j = 1, n
wksp(j) = rc(j)
15 continue
do 16 j = 1, n, 2
rc(j) = wksp(iwksp(j))
rc(j + 1) = wksp(iwksp(j) + 1)
16 continue
return
end
! SUBROUTINE ZITA.F
subroutine zita(ni, zi, e, ord, tilab, telab)
implicit double precision (a-h, o-z)
dimension zi(0:ni)
dimension telab(ni), tilab(ni)
pi = 3.141592654d00
R = 1.987d-3
zi(0) = 0.d00
d0 = 10.d00**ord / 4.d00
do 10 nt = 1, ni
zi(nt) = d0 * tilab(nt) * dexp(-E / R / telab(nt)) + zi(nt - 1)
10 continue
return
end
|
{"hexsha": "117f665c235bdaff9de3d7dcf6e85eb4bf944d2e", "size": 29649, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "resources/lovera/src/py3/autoarr_py.f90", "max_stars_repo_name": "ASUPychron/pychron", "max_stars_repo_head_hexsha": "dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 31, "max_stars_repo_stars_event_min_datetime": "2016-03-07T02:38:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-14T18:23:43.000Z", "max_issues_repo_path": "resources/lovera/src/py3/autoarr_py.f90", "max_issues_repo_name": "ASUPychron/pychron", "max_issues_repo_head_hexsha": "dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1626, "max_issues_repo_issues_event_min_datetime": "2015-01-07T04:52:35.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T19:15:59.000Z", "max_forks_repo_path": "resources/lovera/src/py3/autoarr_py.f90", "max_forks_repo_name": "UIllinoisHALPychron/pychron", "max_forks_repo_head_hexsha": "f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2015-05-23T00:10:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-07T16:51:57.000Z", "avg_line_length": 26.4959785523, "max_line_length": 86, "alphanum_fraction": 0.4667611049, "num_tokens": 11628}
|
[STATEMENT]
lemma analz_insert_freshK:
"[| evs \<in> recur; KAB \<notin> range shrK |]
==> (Key K \<in> analz (insert (Key KAB) (spies evs))) =
(K = KAB | Key K \<in> analz (spies evs))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>evs \<in> recur; KAB \<notin> range shrK\<rbrakk> \<Longrightarrow> (Key K \<in> analz (insert (Key KAB) (knows Spy evs))) = (K = KAB \<or> Key K \<in> analz (knows Spy evs))
[PROOF STEP]
by (simp del: image_insert
add: analz_image_freshK_simps raw_analz_image_freshK)
|
{"llama_tokens": 228, "file": null, "length": 1}
|
module independent
using QuantumOpticsBase
using ..interaction, ..system
import ..integrate
# Define Spin 1/2 operators
spinbasis = SpinBasis(1//2)
sigmax_ = sigmax(spinbasis)
sigmay_ = sigmay(spinbasis)
sigmaz_ = sigmaz(spinbasis)
sigmap_ = sigmap(spinbasis)
sigmam_ = sigmam(spinbasis)
I_spin = identityoperator(spinbasis)
"""
independent.blochstate(phi, theta[, N=1])
Product state of `N` single spin Bloch states.
All spins have the same azimuthal angle `phi` and polar angle `theta`.
"""
function blochstate(phi::Vector{T1}, theta::Vector{T2}) where {T1<:Real, T2<:Real}
N = length(phi)
@assert length(theta)==N
state = zeros(Float64, 3*N)
state[0*N+1:1*N] = cos(phi).*sin(theta)
state[1*N+1:2*N] = sin(phi).*sin(theta)
state[2*N+1:3*N] = cos(theta)
return state
end
function blochstate(phi::Real, theta::Real, N::Int=1)
state = zeros(Float64, 3*N)
state[0*N+1:1*N] = ones(Float64, N)*cos(phi)*sin(theta)
state[1*N+1:2*N] = ones(Float64, N)*sin(phi)*sin(theta)
state[2*N+1:3*N] = ones(Float64, N)*cos(theta)
return state
end
"""
independent.dim(state)
Number of spins described by this state.
"""
function dim(state::Vector{Float64})
N, rem = divrem(length(state), 3)
@assert rem==0
return N
end
"""
independent.splitstate(state)
Split state into sx, sy and sz parts.
"""
function splitstate(state::Vector{Float64})
N = dim(state)
return view(state, 0*N+1:1*N), view(state, 1*N+1:2*N), view(state, 2*N+1:3*N)
end
"""
independent.densityoperator(sx, sy, sz)
independent.densityoperator(state)
Create density operator from independent sigma expectation values.
"""
function densityoperator(sx::Number, sy::Number, sz::Number)
return 0.5*(identityoperator(spinbasis) + sx*sigmax_ + sy*sigmay_ + sz*sigmaz_)
end
function densityoperator(state::Vector{Float64})
N = dim(state)
sx, sy, sz = splitstate(state)
if N>1
return DenseOperator(reduce(tensor, [densityoperator(sx[i], sy[i], sz[i]) for i=1:N]))
else
return DenseOperator(densityoperator(sx[i], sy[i], sz[i]))
end
end
"""
independent.sx(state)
Sigma x expectation values of state.
"""
sx(state::Vector{Float64}) = view(state, 1:dim(state))
"""
independent.sy(state)
Sigma y expectation values of state.
"""
sy(state::Vector{Float64}) = view(state, dim(state)+1:2*dim(state))
"""
independent.sz(state)
Sigma z expectation values of state.
"""
sz(state::Vector{Float64}) = view(state, 2*dim(state)+1:3*dim(state))
"""
independent.timeevolution(T, gamma, state0)
Independent time evolution.
# Arguments
* `T`: Points of time for which output will be generated.
* `gamma`: Single spin decay rate.
* `state0`: Initial state.
"""
function timeevolution(T, gamma::Number, state0::Vector{Float64}; kwargs...)
N = dim(state0)
γ = gamma
function f(ds::Vector{Float64}, s::Vector{Float64}, p, t)
sx, sy, sz = splitstate(s)
dsx, dsy, dsz = splitstate(ds)
@inbounds for k=1:N
dsx[k] = -0.5*γ*sx[k]
dsy[k] = -0.5*γ*sy[k]
dsz[k] = -γ*(1+sz[k])
end
end
fout_(t::Float64, u::Vector{Float64}) = deepcopy(u)
return integrate(T, f, state0, fout_; kwargs...)
end
"""
independent.timeevolution(T, S::SpinCollection, state0)
Independent time evolution.
# Arguments
* `T`: Points of time for which output will be generated.
* `S`: SpinCollection describing the system.
* `state0`: Initial state.
"""
timeevolution(T, S::system.SpinCollection, state0::Vector{Float64}) = timeevolution(T, S.gamma, state0)
end # module
|
{"hexsha": "cbe6edd9c96c5f0907338f797bbc64454d52e288", "size": 3619, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/independent.jl", "max_stars_repo_name": "taylorpatti/CollectiveSpins.jl", "max_stars_repo_head_hexsha": "ef3bcd8f4efcf87165c44f2bd9dd21b574f55755", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/independent.jl", "max_issues_repo_name": "taylorpatti/CollectiveSpins.jl", "max_issues_repo_head_hexsha": "ef3bcd8f4efcf87165c44f2bd9dd21b574f55755", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/independent.jl", "max_forks_repo_name": "taylorpatti/CollectiveSpins.jl", "max_forks_repo_head_hexsha": "ef3bcd8f4efcf87165c44f2bd9dd21b574f55755", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.7876712329, "max_line_length": 103, "alphanum_fraction": 0.6626139818, "num_tokens": 1081}
|
[STATEMENT]
lemma diffconst_result_correct:"proof_result DiffConstProof = ([], ([],[Equals (Differential (Const 0)) (Const 0)]))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. proof_result DiffConstProof = ([], [], [Equals (Differential (Const 0)) (Const 0)])
[PROOF STEP]
by(auto simp add: prover DiffConstProof_def)
|
{"llama_tokens": 118, "file": "Differential_Dynamic_Logic_Proof_Checker", "length": 1}
|
/*
* smack-ms - split mapping check "Multisplice Edition"
*
* Created by David Brawand on 04.05.10.
* Copyright 2010 UNIL. All rights reserved.
*
*/
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <vector>
#include <unistd.h>
#include <time.h>
#include <stdio.h>
#include <set>
#include <map>
#include <string>
#include <boost/progress.hpp> // BOOST PROGRESS
#include "smack-ms.hpp"
//namespace
using namespace std;
/* MAIN */
int main(int argc, char* argv[]) {
/* USAGE */
if (argc != 11) {
// 1 2 3 4 5 6 7 8 9 10
std::cerr << "Usage: " << argv[0] << " pairs genomemap splicemap cigars introns readlength minDist maxDist mapoutput_full mapoutput_spliced" << std::endl;
exit(1);
}
std::cerr << std::endl << " _______ __ __ _______ _______ ___ _ __ ";
std::cerr << std::endl << " | || |_| || _ || || | | || | ";
std::cerr << std::endl << " | _____|| || |_| || || |_| || | Split";
std::cerr << std::endl << " | |_____ | || || || _|| | Map";
std::cerr << std::endl << " |_____ || || || _|| |_ |__| Check";
std::cerr << std::endl << " _____| || ||_|| || _ || |_ | _ | __ MS";
std::cerr << std::endl << " |_______||_| |_||__| |__||_______||___| |_||__| " << std::endl << std::endl;
/* DECLARATIONS */
char line[200];
int scan[7];
// the infolibs
regiomap regions;
intmap pairs;
intmap mc;
time_t startTime, endTime;
int lico = 0;
/* DATA READ STARTS HERE */
int readl = atoi(argv[6]);
int minDist = atoi(argv[7]);
int maxDist = atoi(argv[8]);
/**************/
/* READ MATES */
/**************/
// READ MATES
std::ifstream pairFile(argv[1]);
if (pairFile.fail()) {
std::cerr << "Error: could not read from pairFile " << argv[1] << std::endl;
exit(1);
}
lico = 0;
std::cerr << "Reading matePair info from file " << argv[1] << std::endl;
while (!pairFile.eof() && !pairFile.fail()) {
pairFile.getline(line, 200);
if (pairFile.eof()) break;
sscanf(line, "%d%d", &scan[0], &scan[1]);
if (++lico % 10000 == 0) std::cerr << "\r" << lico;
pairs.insert(pair<int,int>(scan[0], scan[1]));
pairs.insert(pair<int,int>(scan[1], scan[0]));
}
pairFile.close();
std::cerr << "\r" << "pairFile: " << pairs.size() / 2 << " pairs (in " << lico << " lines)" << std::endl;
/*****************/
/* READ MAPPINGS */
/*****************/
// READ FULL MAPPINGS
std::ifstream mappingFile(argv[2]);
lico = 0;
std::cerr << "Reading genomeMappings from file " << argv[2] << std::endl;
while (!mappingFile.eof() && !mappingFile.fail()) {
mappingFile.getline(line, 200);
if (mappingFile.eof()) break;
sscanf(line, "%d%d%d%d%d", &scan[0], &scan[1], &scan[2], &scan[3], &scan[4]); // readid, strand, region, start, mismatches
if (++lico % 1000 == 0) std::cerr << "\r" << lico;
if (regions.find(scan[2]) == regions.end()) regions.insert(pair<int,Region*>(scan[2], new Region(scan[2],minDist,maxDist))); // create region
if (pairs.find(scan[0]) == pairs.end()) pairs.insert(pair<int,int>(scan[0],0));
regions.find(scan[2])->second->addMapping(scan, readl, pairs.find(scan[0])->second);
}
std::cerr << "\r" << "mappingFile: " << lico << " lines" << std::endl;
mappingFile.close();
// READ SPLICE INDEX
splicemap spm;
std::ifstream spliceFile(argv[4]);
lico = 0;
int dummy;
std::cerr << "Reading Splice Index " << argv[4] << std::endl;
while (!spliceFile.eof() && !spliceFile.fail()) {
spliceFile.getline(line, 200);
if (spliceFile.eof()) break;
sscanf(line, "%d%d%d%d%d%d%d", &scan[0], &dummy, &scan[1], &scan[2], &scan[3], &scan[4], &scan[5]); //splice, centersplice, seq_region, strand, start, end, rightside intron (0 if end)
if (++lico % 1000 == 0) std::cerr << "\r" << lico;
if (spm.find(scan[0]) == spm.end()) {
spm[scan[0]] = new Splice(scan[0],scan[1],scan[2]); // splice,seqRegion,strand
}
spm.find(scan[0])->second->addSlice(scan[3], scan[4], scan[5]);
}
std::cerr << "\r" << "spliceFile: " << lico << " lines" << std::endl;
spliceFile.close();
// READ SPLICE MAPPINGS
std::ifstream splicemappingFile(argv[3]);
lico = 0;
std::cerr << "Reading spliceMappings from file " << argv[3] << std::endl;
while (!splicemappingFile.eof() && !splicemappingFile.fail()) {
splicemappingFile.getline(line, 200);
if (splicemappingFile.eof()) break;
sscanf(line, "%d%d%d%d%d", &scan[0], &scan[1], &scan[2], &scan[3], &scan[4]); // readid, strand, region, start, mismatches
if (++lico % 1000 == 0) std::cerr << "\r" << lico;
int sreg = spm.find(scan[2])->second->region();
if (regions.find(sreg) == regions.end()) regions.insert(pair<int,Region*>(sreg, new Region(sreg,minDist,maxDist))); // create region
if (pairs.find(scan[0]) == pairs.end()) pairs.insert(pair<int,int>(scan[0],0));
regions.find(sreg)->second->addMapping(scan, readl, pairs.find(scan[0])->second, spm.find(scan[2])->second);
}
std::cerr << "\r" << "splicemappingFile: " << lico << " lines" << std::endl;
splicemappingFile.close();
/***************/
/* unify reads */
/***************/
std::cerr << std::endl << "Unifying read mappings..." << std::endl;
time(&startTime);
boost::progress_display show_progress( regions.size() );
int map_c = 0, kill_c = 0;
for (regiomap::const_iterator rg = regions.begin(); rg != regions.end(); ++rg) {
++show_progress;
for(readmap::const_iterator rd = rg->second->rmap.begin(); rd != rg->second->rmap.end(); ++rd) {
map_c += rd->second->countMappings();
kill_c += rd->second->unify(); // will only keep best matching
mc[rd->second->rid()] += rd->second->countMappings(); // count read mappings AFTER unify
}
rg->second->indexMappings();
}
int acceptedMappings = map_c - kill_c;
time(&endTime);
double finaltime = difftime (endTime,startTime);
std::cerr << "\r " << (int)finaltime << " seconds elapsed (" << kill_c << " mappings out of " << map_c << " were rejected, " << acceptedMappings << " were kept)" << std::endl;
// READ INTRONS
std::ifstream intronFile(argv[5]);
if (intronFile.fail()) {
std::cerr << "Error: could not read from intronFile " << argv[5] << std::endl;
exit(1);
}
lico = 0;
std::cerr << "Reading introns from file " << argv[5] << std::endl;
while (!intronFile.eof() && !intronFile.fail()) {
intronFile.getline(line, 200);
if (intronFile.eof()) break;
sscanf(line, "%d%d%d", &scan[0], &scan[1], &scan[2]);
if (regions.find(scan[0]) != regions.end()) {
regions.find(scan[0])->second->addIntron(scan[1], scan[2]); // add mapping check mapping compatibility (overwrites mapping)
if (++lico % 10000 == 0) std::cerr << "\r" << lico;
}
}
intronFile.close();
std::cerr << "\r" << "intronFile: " << regions.size() << " regions with " << lico << " introns" << std::endl;
// INIT STATS
int statsize = abs(maxDist-minDist)+1;
// cerr << "1 " << statsize << endl;
int * sumdist = new int[statsize];
// cerr << "2 " << endl;
for (int i=0; i < statsize; i++) sumdist[i] = 0;
// cerr << "3 " << endl;
float totalMap = 0, totalAcc = 0, totalSingles = 0;
// cerr << "4 " << endl;
std::ofstream fullOut(argv[9]);
// cerr << "5 " << endl;
std::ofstream spliceOut(argv[10]);
std::cerr << std::endl << std::endl << "CHR\tMAPS\tACC\tINSERT\tSINGLES" << std::endl;
for (regiomap::const_iterator rg = regions.begin(); rg != regions.end(); ++rg) {
// RESOLVE PAIRS
// cerr << "A" << endl;
rg->second->markInRangeMappings(readl,&mc); // max and min are stored in region
// OUTPUT accepted mappings
// cerr << "B" << endl;
for (imap::const_iterator it = rg->second->mmm.begin(); it != rg->second->mmm.end(); ++it) {
if (it->second->isAccepted()) {
if (it->second->spliced()) spliceOut << it->second->getString() << endl;
else fullOut << it->second->getString() << endl;
}
}
// STATS
// cerr << "C" << endl;
std::cout << rg->second->rid() << "\t" << rg->second->totalMappings() << "\t" << rg->second->acceptedMappings() << "\t" << minDist + rg->second->getDistanceMode(statsize) << "\t" << rg->second->singles() << std::endl;
// cerr << "D" << endl;
intvec rdist = rg->second->getDistanceDistributon();
// cerr << "E" << endl;
for (int i=0; i < statsize; i++) sumdist[i] += rdist[i];
// cerr << "F" << endl;
totalAcc += rg->second->acceptedMappings();
// cerr << "G" << endl;
totalMap += rg->second->totalMappings();
// cerr << "H" << endl;
totalSingles += rg->second->singles();
// cerr << "I" << endl;
}
// print global distribution (normalized by max mode)
double distMean = 0, distSD = 0, distCount = 0;
int maxd = 0, maxm = 0;
for (int i=0; i <= maxDist - minDist; i++) {
//cerr << ">>" << i << "=" << sumdist[i] << endl;
distMean += (i+minDist)*sumdist[i];
distCount += sumdist[i];
if (sumdist[i] > maxd) {
maxd = sumdist[i];
maxm = i;
}
}
std::cout << setprecision(1);
std::cout << "TOTAL\t";
std::cout << totalMap/1000 << "k\t";
std::cout << totalAcc/1000 << "k\t";
std::cout << minDist+maxm << "\t";
std::cout << totalSingles << std::endl;
// calc STDEV
distMean /= distCount;
for (int i=0; i <= maxDist - minDist; i++) {
distSD += sumdist[i] * (i+minDist - distMean) * (i+minDist - distMean);
}
distSD /= distCount;
double stdev = sqrt(distSD);
std::cout << setprecision(2);
std::cout << "Mate pair distance (relative to end): " << distMean << "±" << stdev << endl;
//if (4*stdev+distMean > maxDist) cerr << endl << "WARNING: Consider increasing insert size threshold to at least " << int(4*stdev+distMean) << "!" << endl << endl;
//if (distMean-4*stdev < minDist && distMean-4*stdev > 0) cerr << endl << "WARNING: Consider decreasing insert size threshold to at least " << int(distMean-4*stdev) << "!" << endl << endl;
float stars = 0;
std::cout << std::endl << "Insert size distribution" << std::endl;
for (int i=0; i <= maxDist - minDist; i++) {
std::cout << i+minDist << "\t";
stars = (float)50*sumdist[i]/maxd;
for (int j=0; j < (int)stars; j++) std::cout << "*";
std::cout << std::endl;
}
exit(0); // SUCCESS
} // end of main
|
{"hexsha": "e0ba309ae2868e791465181850285608081cc431", "size": 10333, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/smack-ms/main.cpp", "max_stars_repo_name": "preciserobot/rex", "max_stars_repo_head_hexsha": "91b58e22ea45b56b01a2cdd2ea63b253c9edc467", "max_stars_repo_licenses": ["BSD-4-Clause-UC"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/smack-ms/main.cpp", "max_issues_repo_name": "preciserobot/rex", "max_issues_repo_head_hexsha": "91b58e22ea45b56b01a2cdd2ea63b253c9edc467", "max_issues_repo_licenses": ["BSD-4-Clause-UC"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/smack-ms/main.cpp", "max_forks_repo_name": "preciserobot/rex", "max_forks_repo_head_hexsha": "91b58e22ea45b56b01a2cdd2ea63b253c9edc467", "max_forks_repo_licenses": ["BSD-4-Clause-UC"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.7722419929, "max_line_length": 219, "alphanum_fraction": 0.571953934, "num_tokens": 3313}
|
/*=============================================================================
Copyright (c) 2016 Paul Fultz II
noexcept.hpp
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
==============================================================================*/
#ifndef BOOST_HOF_GUARD_DETAIL_NOEXCEPT_HPP
#define BOOST_HOF_GUARD_DETAIL_NOEXCEPT_HPP
#include <boost/hof/config.hpp>
#if BOOST_HOF_HAS_NOEXCEPT_DEDUCTION
#define BOOST_HOF_NOEXCEPT(...) noexcept(__VA_ARGS__)
#else
#define BOOST_HOF_NOEXCEPT(...)
#endif
#endif
|
{"hexsha": "c7078a8b279f79c1adad5e9cdcb670731cf8e906", "size": 637, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "ReactNativeFrontend/ios/Pods/boost/boost/hof/detail/noexcept.hpp", "max_stars_repo_name": "Harshitha91/Tmdb-react-native-node", "max_stars_repo_head_hexsha": "e06e3f25a7ee6946ef07a1f524fdf62e48424293", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 12278.0, "max_stars_repo_stars_event_min_datetime": "2015-01-29T17:11:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T21:12:00.000Z", "max_issues_repo_path": "ReactNativeFrontend/ios/Pods/boost/boost/hof/detail/noexcept.hpp", "max_issues_repo_name": "Harshitha91/Tmdb-react-native-node", "max_issues_repo_head_hexsha": "e06e3f25a7ee6946ef07a1f524fdf62e48424293", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9469.0, "max_issues_repo_issues_event_min_datetime": "2015-01-30T05:33:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:17:21.000Z", "max_forks_repo_path": "ReactNativeFrontend/ios/Pods/boost/boost/hof/detail/noexcept.hpp", "max_forks_repo_name": "Harshitha91/Tmdb-react-native-node", "max_forks_repo_head_hexsha": "e06e3f25a7ee6946ef07a1f524fdf62e48424293", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1343.0, "max_forks_repo_forks_event_min_datetime": "2017-12-08T19:47:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T11:31:36.000Z", "avg_line_length": 31.85, "max_line_length": 80, "alphanum_fraction": 0.5886970173, "num_tokens": 133}
|
# This code has overlap parts with prep_sent.py
import nltk
from nltk.tokenize import word_tokenize
from nltk.tag import StanfordPOSTagger
from tqdm import tqdm
import numpy as np
import os
import csv
import sys
import math
from sentence_transformers import SentenceTransformer
from nltk.stem import WordNetLemmatizer
model = SentenceTransformer('sentence-transformers/bert-base-nli-mean-tokens')
eng_tag=StanfordPOSTagger(model_filename='corenlp/postagger/models/english-left3words-distsim.tagger', \
path_to_jar='corenlp/postagger/stanford-postagger-3.9.2.jar')
pos_tag_ids_map = {'v':0, 'a':1, 'r':2, 'n':3, 'u':4}
lemmatizer = WordNetLemmatizer()
def load_sentinet(senti_file_name, gloss_file_name):
# load sentiwordnet
f = open(senti_file_name, 'r')
line_id = 0
sentinet = {}
for line in f.readlines():
if line_id < 26:
line_id += 1
continue
if line_id == 26:
print(line)
if line_id == 117685:
print(line)
break
line_split = line.strip().split('\t')
pos, pscore, nscore, term, gloss = line_split[0], float(line_split[2]), float(line_split[3]), line_split[4], \
line_split[5]
if "\"" in gloss:
shop_pos = gloss.index('\"')
gloss = gloss[: shop_pos - 2]
each_term = term.split(' ')
for ele in each_term:
ele_split = ele.split('#')
assert len(ele_split) == 2
word, sn = ele_split[0], int(ele_split[1])
if word not in sentinet:
sentinet[word] = {}
if pos not in sentinet[word]:
sentinet[word][pos] = []
sentinet[word][pos].append([sn, pscore, nscore, gloss, line_id - 26])
line_id += 1
f.close()
# load gloss embedding
gloss_embedding = np.load(gloss_file_name)
gloss_emb_norm = [np.linalg.norm(gloss_embedding[id]) for id in range(len(gloss_embedding))]
gloss_emb_norm = np.array(gloss_emb_norm)
return sentinet, gloss_embedding, gloss_emb_norm
def convert_postag(pos):
"""Convert NLTK POS tags to SWN's POS tags."""
if pos in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']:
return 'v'
elif pos in ['JJ', 'JJR', 'JJS']:
return 'a'
elif pos in ['RB', 'RBR', 'RBS']:
return 'r'
elif pos in ['NNS', 'NN', 'NNP', 'NNPS']:
return 'n'
else:
return 'u'
def cos_sim(a, b, norm_a, norm_b):
dot_prod = np.dot(a,b)
return dot_prod / (norm_a * norm_b)
def process_text(id_list, text_list, label_list, sentinet, gloss_embedding, gloss_emb_norm, senti_form):
sent_list = []
sent_list_str = []
data_cnt = 0
for text in text_list:
try:
token_list = word_tokenize(text.strip())
except:
token_list = text.strip().split()
if len(token_list) == 0:
continue
sent_list.append(token_list)
sent_list_str.append(text.strip())
data_cnt += 1
print('original number of data = ', data_cnt)
# pos tagging
sent_split = eng_tag.tag_sents(sent_list)
# sentence embedding
corpus_embedding = model.encode(sent_list_str, batch_size=128)
corpus_embedding = np.array(corpus_embedding)
corpus_emb_norm = [np.linalg.norm(corpus_embedding[id]) for id in range(len(corpus_embedding))]
corpus_emb_norm = np.array(corpus_emb_norm)
assert len(corpus_embedding) == len(sent_split)
clean_id_list, clean_text_list, clean_sent_list, pos_list, senti_list, clean_label_list = [], [], [], [], [], []
for sent_id in range(len(sent_split)):
sent_list_ele, pos_list_ele, senti_list_ele = [], [], []
for pair in sent_split[sent_id]:
if len(pair[0]) != 0:
word, pos = pair[0], convert_postag(pair[1])
sent_list_ele.append(word)
pos_list_ele.append(pos)
if pos != 'u':
word = lemmatizer.lemmatize(word.lower(), pos=pos)
# gloss-aware sentiment attention
if word in sentinet and pos in sentinet[word]:
sim_list = []
score_list = []
for ele_term in sentinet[word][pos]:
gloss_line = ele_term[4]
gloss_emb, gloss_norm = gloss_embedding[gloss_line], gloss_emb_norm[gloss_line]
sent_emb, sent_norm = corpus_embedding[sent_id], corpus_emb_norm[sent_id]
sim_score = cos_sim(gloss_emb, sent_emb, gloss_norm, sent_norm)
sim_list.append((sim_score + 1) / (2 * ele_term[0]))
score_list.append(ele_term[1] - ele_term[2])
sim_exp = [math.exp(sim_list[id]) for id in range(len(sim_list))]
sum_sim_exp = sum(sim_exp)
sim_exp = np.array([sim_exp[id] / sum_sim_exp for id in range(len(sim_exp))])
score_list = np.array(score_list)
final_score = np.dot(sim_exp, score_list)
senti_list_ele.append(final_score)
else:
senti_list_ele.append(0.0)
assert len(sent_list_ele) == len(pos_list_ele)
assert len(sent_list_ele) == len(senti_list_ele)
if len(sent_list) != 0:
clean_sent_list.append(sent_list_ele)
# transform pos_tag (str) to integer
pos_list.append([pos_tag_ids_map[ele] for ele in pos_list_ele])
# transform sentiment score (float) to integer / maintain float
if senti_form == 'discrete':
senti_list.append([1 if ele > 0 else 0 if ele < 0 else 2 for ele in senti_list_ele])
else:
senti_list.append(senti_list_ele)
clean_label_list.append(label_list[sent_id])
clean_id_list.append(id_list[sent_id])
clean_text_list.append(sent_list_str[sent_id])
assert len(clean_sent_list) == len(clean_label_list)
assert len(clean_sent_list) == len(clean_text_list)
assert len(clean_sent_list) == len(clean_id_list)
assert len(clean_sent_list) == len(pos_list)
assert len(clean_sent_list) == len(senti_list)
print('number after processing = ', len(clean_label_list))
return clean_id_list, clean_text_list, clean_sent_list, pos_list, senti_list, clean_label_list
|
{"hexsha": "b2e80f50be78ce277b6525a086426436a01de14c", "size": 6663, "ext": "py", "lang": "Python", "max_stars_repo_path": "SentiLARE/preprocess/aspect_utils.py", "max_stars_repo_name": "authorAnonymousGit/WOCEL", "max_stars_repo_head_hexsha": "5edcf1c0cce07c8280ef3c10c9e01ad0d2643885", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "SentiLARE/preprocess/aspect_utils.py", "max_issues_repo_name": "authorAnonymousGit/WOCEL", "max_issues_repo_head_hexsha": "5edcf1c0cce07c8280ef3c10c9e01ad0d2643885", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SentiLARE/preprocess/aspect_utils.py", "max_forks_repo_name": "authorAnonymousGit/WOCEL", "max_forks_repo_head_hexsha": "5edcf1c0cce07c8280ef3c10c9e01ad0d2643885", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0742857143, "max_line_length": 119, "alphanum_fraction": 0.5865225874, "include": true, "reason": "import numpy", "num_tokens": 1573}
|
"""
Augmenters that apply artistic image filters.
List of augmenters:
* :class:`Cartoon`
Added in 0.4.0.
"""
from __future__ import print_function, division, absolute_import
import numpy as np
import cv2
from imgaug.imgaug import _normalize_cv2_input_arr_
from . import meta
from . import color as colorlib
from .. import dtypes as iadt
from .. import parameters as iap
def stylize_cartoon(image, blur_ksize=3, segmentation_size=1.0,
saturation=2.0, edge_prevalence=1.0,
suppress_edges=True,
from_colorspace=colorlib.CSPACE_RGB):
"""Convert the style of an image to a more cartoonish one.
This function was primarily designed for images with a size of ``200``
to ``800`` pixels. Smaller or larger images may cause issues.
Note that the quality of the results can currently not compete with
learned style transfer, let alone human-made images. A lack of detected
edges or also too many detected edges are probably the most significant
drawbacks.
This method is loosely based on the one proposed in
https://stackoverflow.com/a/11614479/3760780
Added in 0.4.0.
**Supported dtypes**:
* ``uint8``: yes; fully tested
* ``uint16``: no
* ``uint32``: no
* ``uint64``: no
* ``int8``: no
* ``int16``: no
* ``int32``: no
* ``int64``: no
* ``float16``: no
* ``float32``: no
* ``float64``: no
* ``float128``: no
* ``bool``: no
Parameters
----------
image : ndarray
A ``(H,W,3) uint8`` image array.
blur_ksize : int, optional
Kernel size of the median blur filter applied initially to the input
image. Expected to be an odd value and ``>=0``. If an even value,
thn automatically increased to an odd one. If ``<=1``, no blur will
be applied.
segmentation_size : float, optional
Size multiplier to decrease/increase the base size of the initial
mean-shift segmentation of the image. Expected to be ``>=0``.
Note that the base size is increased by roughly a factor of two for
images with height and/or width ``>=400``.
edge_prevalence : float, optional
Multiplier for the prevalance of edges. Higher values lead to more
edges. Note that the default value of ``1.0`` is already fairly
conservative, so there is limit effect from lowerin it further.
saturation : float, optional
Multiplier for the saturation. Set to ``1.0`` to not change the
image's saturation.
suppress_edges : bool, optional
Whether to run edge suppression to remove blobs containing too many
or too few edge pixels.
from_colorspace : str, optional
The source colorspace. Use one of ``imgaug.augmenters.color.CSPACE_*``.
Defaults to ``RGB``.
Returns
-------
ndarray
Image in cartoonish style.
"""
iadt.gate_dtypes(
image,
allowed=["uint8"],
disallowed=["bool",
"uint16", "uint32", "uint64", "uint128", "uint256",
"int8", "int16", "int32", "int64", "int128", "int256",
"float16", "float32", "float64", "float96", "float128",
"float256"],
augmenter=None)
assert image.ndim == 3 and image.shape[2] == 3, (
"Expected to get a (H,W,C) image, got shape %s." % (image.shape,))
blur_ksize = max(int(np.round(blur_ksize)), 1)
segmentation_size = max(segmentation_size, 0.0)
saturation = max(saturation, 0.0)
is_small_image = max(image.shape[0:2]) < 400
image = _blur_median(image, blur_ksize)
image_seg = np.zeros_like(image)
if is_small_image:
spatial_window_radius = int(10 * segmentation_size)
color_window_radius = int(20 * segmentation_size)
else:
spatial_window_radius = int(15 * segmentation_size)
color_window_radius = int(40 * segmentation_size)
if segmentation_size <= 0:
image_seg = image
else:
cv2.pyrMeanShiftFiltering(_normalize_cv2_input_arr_(image),
sp=spatial_window_radius,
sr=color_window_radius,
dst=image_seg)
if is_small_image:
edges_raw = _find_edges_canny(image_seg,
edge_prevalence,
from_colorspace)
else:
edges_raw = _find_edges_laplacian(image_seg,
edge_prevalence,
from_colorspace)
edges = edges_raw
edges = ((edges > 100) * 255).astype(np.uint8)
if suppress_edges:
# Suppress dense 3x3 blobs full of detected edges. They are visually
# ugly.
edges = _suppress_edge_blobs(edges, 3, 8, inverse=False)
# Suppress spurious few-pixel edges (5x5 size with <=3 edge pixels).
edges = _suppress_edge_blobs(edges, 5, 3, inverse=True)
return _saturate(_blend_edges(image_seg, edges),
saturation,
from_colorspace)
# Added in 0.4.0.
def _find_edges_canny(image, edge_multiplier, from_colorspace):
image_gray = colorlib.change_colorspace_(np.copy(image),
to_colorspace=colorlib.CSPACE_GRAY,
from_colorspace=from_colorspace)
image_gray = image_gray[..., 0]
thresh = min(int(200 * (1/edge_multiplier)), 254)
edges = cv2.Canny(_normalize_cv2_input_arr_(image_gray), thresh, thresh)
return edges
# Added in 0.4.0.
def _find_edges_laplacian(image, edge_multiplier, from_colorspace):
image_gray = colorlib.change_colorspace_(np.copy(image),
to_colorspace=colorlib.CSPACE_GRAY,
from_colorspace=from_colorspace)
image_gray = image_gray[..., 0]
edges_f = cv2.Laplacian(_normalize_cv2_input_arr_(image_gray / 255.0),
cv2.CV_64F)
edges_f = np.abs(edges_f)
edges_f = edges_f ** 2
vmax = np.percentile(edges_f, min(int(90 * (1/edge_multiplier)), 99))
edges_f = np.clip(edges_f, 0.0, vmax) / vmax
edges_uint8 = np.clip(np.round(edges_f * 255), 0, 255.0).astype(np.uint8)
edges_uint8 = _blur_median(edges_uint8, 3)
edges_uint8 = _threshold(edges_uint8, 50)
return edges_uint8
# Added in 0.4.0.
def _blur_median(image, ksize):
if ksize % 2 == 0:
ksize += 1
if ksize <= 1:
return image
return cv2.medianBlur(_normalize_cv2_input_arr_(image), ksize)
# Added in 0.4.0.
def _threshold(image, thresh):
mask = (image < thresh)
result = np.copy(image)
result[mask] = 0
return result
# Added in 0.4.0.
def _suppress_edge_blobs(edges, size, thresh, inverse):
kernel = np.ones((size, size), dtype=np.float32)
counts = cv2.filter2D(_normalize_cv2_input_arr_(edges / 255.0), -1, kernel)
if inverse:
mask = (counts < thresh)
else:
mask = (counts >= thresh)
edges = np.copy(edges)
edges[mask] = 0
return edges
# Added in 0.4.0.
def _saturate(image, factor, from_colorspace):
image = np.copy(image)
if np.isclose(factor, 1.0, atol=1e-2):
return image
hsv = colorlib.change_colorspace_(image,
to_colorspace=colorlib.CSPACE_HSV,
from_colorspace=from_colorspace)
sat = hsv[:, :, 1]
sat = np.clip(sat.astype(np.int32) * factor, 0, 255).astype(np.uint8)
hsv[:, :, 1] = sat
image_sat = colorlib.change_colorspace_(hsv,
to_colorspace=from_colorspace,
from_colorspace=colorlib.CSPACE_HSV)
return image_sat
# Added in 0.4.0.
def _blend_edges(image, image_edges):
image_edges = 1.0 - (image_edges / 255.0)
image_edges = np.tile(image_edges[..., np.newaxis], (1, 1, 3))
return np.clip(
np.round(image * image_edges),
0.0, 255.0
).astype(np.uint8)
class Cartoon(meta.Augmenter):
"""Convert the style of images to a more cartoonish one.
This augmenter was primarily designed for images with a size of ``200``
to ``800`` pixels. Smaller or larger images may cause issues.
Note that the quality of the results can currently not compete with
learned style transfer, let alone human-made images. A lack of detected
edges or also too many detected edges are probably the most significant
drawbacks.
Added in 0.4.0.
**Supported dtypes**:
See :func:`~imgaug.augmenters.artistic.stylize_cartoon`.
Parameters
----------
blur_ksize : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Median filter kernel size.
See :func:`~imgaug.augmenters.artistic.stylize_cartoon` for details.
* If ``number``: That value will be used for all images.
* If ``tuple (a, b) of number``: A random value will be uniformly
sampled per image from the interval ``[a, b)``.
* If ``list``: A random value will be picked per image from the
``list``.
* If ``StochasticParameter``: The parameter will be queried once
per batch for ``(N,)`` values, where ``N`` is the number of
images.
segmentation_size : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Mean-Shift segmentation size multiplier.
See :func:`~imgaug.augmenters.artistic.stylize_cartoon` for details.
* If ``number``: That value will be used for all images.
* If ``tuple (a, b) of number``: A random value will be uniformly
sampled per image from the interval ``[a, b)``.
* If ``list``: A random value will be picked per image from the
``list``.
* If ``StochasticParameter``: The parameter will be queried once
per batch for ``(N,)`` values, where ``N`` is the number of
images.
saturation : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Saturation multiplier.
See :func:`~imgaug.augmenters.artistic.stylize_cartoon` for details.
* If ``number``: That value will be used for all images.
* If ``tuple (a, b) of number``: A random value will be uniformly
sampled per image from the interval ``[a, b)``.
* If ``list``: A random value will be picked per image from the
``list``.
* If ``StochasticParameter``: The parameter will be queried once
per batch for ``(N,)`` values, where ``N`` is the number of
images.
edge_prevalence : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Multiplier for the prevalence of edges.
See :func:`~imgaug.augmenters.artistic.stylize_cartoon` for details.
* If ``number``: That value will be used for all images.
* If ``tuple (a, b) of number``: A random value will be uniformly
sampled per image from the interval ``[a, b)``.
* If ``list``: A random value will be picked per image from the
``list``.
* If ``StochasticParameter``: The parameter will be queried once
per batch for ``(N,)`` values, where ``N`` is the number of
images.
from_colorspace : str, optional
The source colorspace. Use one of ``imgaug.augmenters.color.CSPACE_*``.
Defaults to ``RGB``.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Cartoon()
Create an example image, then apply a cartoon filter to it.
>>> aug = iaa.Cartoon(blur_ksize=3, segmentation_size=1.0,
>>> saturation=2.0, edge_prevalence=1.0)
Create a non-stochastic cartoon augmenter that produces decent-looking
images.
"""
# Added in 0.4.0.
def __init__(self, blur_ksize=(1, 5), segmentation_size=(0.8, 1.2),
saturation=(1.5, 2.5), edge_prevalence=(0.9, 1.1),
from_colorspace=colorlib.CSPACE_RGB,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(Cartoon, self).__init__(
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
self.blur_ksize = iap.handle_continuous_param(
blur_ksize, "blur_ksize", value_range=(0, None),
tuple_to_uniform=True, list_to_choice=True)
self.segmentation_size = iap.handle_continuous_param(
segmentation_size, "segmentation_size", value_range=(0.0, None),
tuple_to_uniform=True, list_to_choice=True)
self.saturation = iap.handle_continuous_param(
saturation, "saturation", value_range=(0.0, None),
tuple_to_uniform=True, list_to_choice=True)
self.edge_prevalence = iap.handle_continuous_param(
edge_prevalence, "edge_prevalence", value_range=(0.0, None),
tuple_to_uniform=True, list_to_choice=True)
self.from_colorspace = from_colorspace
# Added in 0.4.0.
def _augment_batch_(self, batch, random_state, parents, hooks):
if batch.images is not None:
samples = self._draw_samples(batch, random_state)
for i, image in enumerate(batch.images):
image[...] = stylize_cartoon(
image,
blur_ksize=samples[0][i],
segmentation_size=samples[1][i],
saturation=samples[2][i],
edge_prevalence=samples[3][i],
from_colorspace=self.from_colorspace
)
return batch
# Added in 0.4.0.
def _draw_samples(self, batch, random_state):
nb_rows = batch.nb_rows
return (
self.blur_ksize.draw_samples((nb_rows,), random_state=random_state),
self.segmentation_size.draw_samples((nb_rows,),
random_state=random_state),
self.saturation.draw_samples((nb_rows,), random_state=random_state),
self.edge_prevalence.draw_samples((nb_rows,),
random_state=random_state)
)
# Added in 0.4.0.
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.blur_ksize, self.segmentation_size, self.saturation,
self.edge_prevalence, self.from_colorspace]
|
{"hexsha": "0a84d473c2253d7e00a71093203efd0c63ece40d", "size": 15813, "ext": "py", "lang": "Python", "max_stars_repo_path": "imgaug/augmenters/artistic.py", "max_stars_repo_name": "Darktex/imgaug", "max_stars_repo_head_hexsha": "2bbe47eff8c2ec8b9ee1360474de25a786a9ec9a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 134, "max_stars_repo_stars_event_min_datetime": "2019-01-27T12:34:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T14:58:24.000Z", "max_issues_repo_path": "imgaug/augmenters/artistic.py", "max_issues_repo_name": "Darktex/imgaug", "max_issues_repo_head_hexsha": "2bbe47eff8c2ec8b9ee1360474de25a786a9ec9a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2019-02-09T06:00:42.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-08T06:28:27.000Z", "max_forks_repo_path": "imgaug/augmenters/artistic.py", "max_forks_repo_name": "Darktex/imgaug", "max_forks_repo_head_hexsha": "2bbe47eff8c2ec8b9ee1360474de25a786a9ec9a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 57, "max_forks_repo_forks_event_min_datetime": "2019-01-31T14:17:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T16:16:45.000Z", "avg_line_length": 38.1036144578, "max_line_length": 173, "alphanum_fraction": 0.6114589262, "include": true, "reason": "import numpy", "num_tokens": 3816}
|
# -*- coding: utf-8 -*-
import os
import pickle
import numpy as np
import cv2
import torch
from torch.utils import data
import torchvision.transforms as transforms
class Lighting(object):
"""Lighting noise(AlexNet - style PCA - based noise)"""
def __init__(self):
self.alphastd = 0.1
self.eigval = torch.Tensor([0.2175, 0.0188, 0.0045])
self.eigvec = torch.Tensor([
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
])
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone()\
.mul(alpha.view(1, 3).expand(3, 3))\
.mul(self.eigval.view(1, 3).expand(3, 3))\
.sum(1).squeeze()
# print(rgb.view(3, 1, 1).expand_as(img))
# exit()
return img.add(rgb.view(3, 1, 1).expand_as(img))
class KITTI15Mask(data.Dataset):
def __init__(self, root, split="train", is_transform=True, img_size=(375, 1242), is_check=False, scale=3, downsampling_iteration=3, is_training=True, is_eval=False, thold=0.5):
"""__init__
:param root:
:param split:
:param is_transform:
:param img_size:
"""
super(KITTI15Mask, self).__init__()
print("using Data Loader KITTI15Mask-{}".format(is_training))
self.is_transform = is_transform
self.img_size = img_size if isinstance(img_size, tuple) else (375, 1242)
self.stats={'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]}
self.thold = thold
self.pca = Lighting()
self.files = {}
self.datapath = root
self.files = os.listdir(os.path.join(self.datapath,split))
self.files.sort()
# if is_training :
# # add_files = ["34.npy","35.npy","36.npy","37.npy","38.npy","39.npy","40.npy","41.npy","42.npy",
# # "84.npy", "111.npy", "111.npy", "115.npy", "119.npy", "119.npy", "119.npy", "130.npy", "131.npy", "132.npy",
# # "120.npy", "120.npy", "120.npy", "122.npy", "122.npy", "122.npy", "123.npy", "127.npy",
# # "104.npy", "104.npy", "104.npy", "104.npy", "104.npy", "104.npy", "104.npy", "104.npy", "104.npy", "104.npy",
# # "14.npy", "14.npy", "14.npy", "14.npy", "14.npy", "14.npy", "142.npy", "142.npy", "142.npy", "147.npy", "149.npy",
# # "15.npy", "15.npy", "15.npy", "151.npy", "151.npy", "151.npy", "151.npy", "151.npy", "152.npy", "152.npy", "152.npy",
# # "154.npy", "154.npy", "154.npy", "156.npy", "156.npy", "156.npy", "158.npy", "158.npy",
# # "16.npy", "160.npy", "160.npy", "160.npy", "161.npy", "161.npy", "161.npy",
# # "164.npy", "164.npy", "164.npy", "165.npy", "165.npy", "165.npy", "166.npy", "166.npy", "166.npy", "168.npy",
# # "17.npy", "17.npy", "17.npy", "170.npy", "173.npy", "173.npy", "173.npy", "176.npy", "176.npy", "176.npy", "179.npy", "179.npy", "179.npy",
# # "180.npy", "182.npy", "182.npy", "182.npy", "187.npy", "187.npy", "187.npy", "188.npy", "189.npy", "189.npy", "189.npy",
# # "190.npy", "190.npy", "190.npy", "196.npy", "196.npy", "196.npy", "199.npy", "199.npy", "199.npy",
# # "20.npy", "30.npy",
# # ]
# add_files = ["104.npy"]*50
# self.files += add_files
# # self.files = [file for file in self.files if file.find("addition_000")!=-1]
self.split = split
self.scale = scale
self.downsampling_iteration = downsampling_iteration
self.is_check = is_check
self.is_training = is_training
self.is_eval = is_eval
if len(self.files)<1:
raise Exception("No files for ld=[%s] found in %s" % (split, self.datapath))
self.length=self.__len__()
print("Found %d in %s data" % (len(self.files), self.datapath))
def __len__(self):
"""__len__"""
return len(self.files)
def __getitem__(self, index):
"""__getitem__
:param index:
"""
data = np.load(os.path.join(self.datapath, self.split, self.files[index]))
h,w,c = data.shape
ori_h, ori_w, _ = data.shape
# print(self.files[index], h,w,c)
# make sure the shape of data is proper
residual_h, residual_w = 0, 0
interval = np.power(self.scale, self.downsampling_iteration)
if h%interval != 0 :
residual_h = interval - h%interval
if w%interval != 0 :
residual_w = interval - w%interval
tmp_data = np.zeros((h+residual_h, w+residual_w, c), dtype=np.float32)
tmp_data[residual_h:, residual_w:] = data
data = np.copy(tmp_data)
h,w,c = data.shape
del tmp_data
# print(self.files[index], h,w,c)
if self.is_training :
th, tw = self.img_size
th = int(np.ceil(th/interval)*interval)
tw = int(np.ceil(tw/interval)*interval)
if (th,tw) != (h,w) :
x1 = np.random.randint(0, h-th+1)
y1 = np.random.randint(0, w-tw+1)
# print(x1,th,y1,tw)
data = data[x1:x1+th, y1:y1+tw, :]
left = data[...,0:3]
right = data[...,3:6]
disparity = data[...,6]
if self.is_training :
# randomly add the reflected light
if np.random.binomial(1,0.8) :
left, right = self.add_paralex_noise(left, right)
if np.random.binomial(1,0.5) :
left, right = self.add_paralex_noise(left, right)
left = left/255
right = right/255
if self.is_training :
# randomly occlude a region
if np.random.binomial(1,0.5) :
sh = int(np.random.uniform(30,80))
sw = int(np.random.uniform(10,80))
ch = int(np.random.uniform(sh,right.shape[0]-sh))
cw = int(np.random.uniform(sw,right.shape[1]-sw))
right[ch-sh:ch+sh,cw-sw:cw+sw] = np.mean(np.mean(right,0),0)[np.newaxis,np.newaxis]
# whether using obj
if data.shape[-1] == 8 :
if np.random.rand() < 0.3 :
disparity = disparity*data[...,7]
if not self.is_training and self.split=="train_eval" :
disparity[:130,:] = 0
# if self.is_training and self.split=="train_total_dense" :
# disparity[:108,:] = 0
left_image = data[...,0:3]
left_image = transforms.ToTensor()(left_image)
right_image = data[...,3:6]
right_image = transforms.ToTensor()(right_image)
mask_path = os.path.join(self.datapath, self.split+"_mask", self.files[index].split(".")[0])
with open(mask_path, "rb") as f :
mask_data = pickle.load(f)
if self.is_training and (th,tw) != (h,w) :
for idx in np.arange(len(mask_data)) :
# print(self.downsampling_iteration-1-(idx%3), idx, idx%3)
down_scale = self.scale**(idx%3)
mask_data[idx] = mask_data[idx][x1//down_scale:(x1+th)//down_scale, y1//down_scale:(y1+tw)//down_scale]
# print(down_scale, mask_data[idx].shape, x1//down_scale, (x1+th)//down_scale, mask_data[idx].shape)
left_mask3 = torch.from_numpy(mask_data[0]).float()
left_mask2 = torch.from_numpy(mask_data[1]).float()
left_mask1 = torch.from_numpy(mask_data[2]).float()
# left_mask3 = torch.from_numpy(mask_data[0]*(1-occ3)).float()
# left_mask2 = torch.from_numpy(mask_data[1]*(1-occ2)).float()
# left_mask1 = torch.from_numpy(mask_data[2]*(1-occ1)).float()
right_mask3 = torch.from_numpy(mask_data[3]).float()
right_mask2 = torch.from_numpy(mask_data[4]).float()
right_mask1 = torch.from_numpy(mask_data[5]).float()
# right_mask3 = torch.from_numpy(np.ones(mask_data[3].shape)).float()
# right_mask2 = torch.from_numpy(np.ones(mask_data[4].shape)).float()
# right_mask1 = torch.from_numpy(np.ones(mask_data[5].shape)).float()
# print(left_mask1.shape, left_mask2.shape, left_mask3.shape, right_mask1.shape, right_mask2.shape, right_mask3.shape)
if self.is_transform:
left, right, disparity = self.transform(left, right, disparity)
if self.is_check :
return left, right, disparity, left_image, right_image, left_mask1, left_mask2, left_mask3, right_mask1, right_mask2, right_mask3, ori_h, ori_w, self.files[index].split(".")[0]
if self.is_training :
return left, right, disparity, left_image, left_mask1, left_mask2, left_mask3, right_mask1, right_mask2, right_mask3
if self.is_eval :
return left, right, disparity, left_image, left_mask1, left_mask2, left_mask3, right_mask1, right_mask2, right_mask3, ori_h, ori_w, self.files[index].split(".")[0], 192
return left, right, disparity, left_image, left_mask1, left_mask2, left_mask3, right_mask1, right_mask2, right_mask3, ori_h, ori_w, self.files[index].split(".")[0], 192
def transform(self, left, right, disparity):
"""transform
"""
trans = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
if self.is_training == False :
left = trans(left).float()
right = trans(right).float()
else :
train_transform = transforms.Compose([
transforms.ToTensor(),
RandomPhotometric(
noise_stddev=0.0,
min_contrast=-0.37,
max_contrast=0.37,
brightness_stddev=0.02,
min_color=0.9,
max_color=1.1,
min_gamma=0.7,
max_gamma=1.7),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
left = train_transform(left)
right = train_transform(right)
# left = trans(left).float()
# right = trans(right).float()
disparity = torch.from_numpy(disparity).float()
return left, right, disparity
def add_paralex_noise(self, left_img, right_img) :
h,w,c = left_img.shape
sel_h = np.random.randint(100, 180)
sel_w = np.random.randint(30, 70)
# print(sel_h, sel_w)
parallel_d = np.random.randint(60,200)
# print(parallel_d)
sta_h = int(np.random.uniform(0, h-sel_h))
sta_w = int(np.random.uniform(0, w-sel_w-parallel_d))
# print(sta_h, sta_w)
x = np.arange(sel_w)
u = sel_w//2
sig = 7
# noise = np.exp(-(x-u)**2 / (2*sig**2)) / (np.sqrt(2*np.pi)*sig) * 400 * np.random.uniform(0.3, 1.2)
# noise = np.repeat(noise[np.newaxis], sel_h, axis=0)
# noise = np.repeat(noise[...,np.newaxis], 3, axis=-1)
noise_r = np.exp(-(x-u)**2 / (2*sig**2)) / (np.sqrt(2*np.pi)*sig) * 400
noise_r = np.repeat(noise_r[np.newaxis], sel_h, axis=0)
noise_g = np.exp(-(x-u)**2 / (2*sig**2)) / (np.sqrt(2*np.pi)*sig) * 300
noise_g = np.repeat(noise_g[np.newaxis], sel_h, axis=0)
noise_b = np.exp(-(x-u)**2 / (2*sig**2)) / (np.sqrt(2*np.pi)*sig) * 500
noise_b = np.repeat(noise_b[np.newaxis], sel_h, axis=0)
noise = np.stack((noise_r,noise_g,noise_b), axis=-1)
noise = noise.reshape(-1,3)
pos_w = np.arange(sta_w, sta_w+sel_w)
pos_h = np.arange(sta_h, sta_h+sel_h)
pos_h = np.repeat(pos_h, sel_w)
pos_w = np.repeat(pos_w[...,np.newaxis],sel_h,axis=1).transpose().reshape(-1)
step = np.random.rand() * 0.3
pos_shift = (np.arange(sel_h) - sel_h//2) * step
pos_shift = pos_shift.astype(np.int)
pos_shift = np.repeat(pos_shift[...,np.newaxis],sel_w,axis=1).reshape(-1)
pos_w = pos_w + pos_shift
pos_w = np.clip(pos_w, a_min=0, a_max=w-parallel_d-1)
right_img_noise = right_img.copy()
right_img_noise[pos_h, pos_w] = right_img_noise[pos_h, pos_w] + noise
right_img_noise[right_img_noise>255] = 255.
left_img_noise = left_img.copy()
left_img_noise[pos_h, pos_w+parallel_d] = left_img_noise[pos_h, pos_w+parallel_d] + noise
left_img_noise[left_img_noise>255] = 255.
return left_img_noise, right_img_noise
class RandomPhotometric(object):
"""Applies photometric augmentations to a list of image tensors.
Each image in the list is augmented in the same way.
Args:
ims: list of 3-channel images normalized to [0, 1].
Returns:
normalized images with photometric augmentations. Has the same
shape as the input.
"""
def __init__(self,
noise_stddev=0.0,
min_contrast=0.0,
max_contrast=0.0,
brightness_stddev=0.0,
min_color=1.0,
max_color=1.0,
min_gamma=1.0,
max_gamma=1.0):
self.noise_stddev = noise_stddev
self.min_contrast = min_contrast
self.max_contrast = max_contrast
self.brightness_stddev = brightness_stddev
self.min_color = min_color
self.max_color = max_color
self.min_gamma = min_gamma
self.max_gamma = max_gamma
def __call__(self, im):
contrast = np.random.uniform(self.min_contrast, self.max_contrast)
gamma = np.random.uniform(self.min_gamma, self.max_gamma)
gamma_inv = 1.0 / gamma
color = torch.from_numpy(
np.random.uniform(self.min_color, self.max_color, (3))).float()
if self.noise_stddev > 0.0:
noise = np.random.normal(scale=self.noise_stddev)
else:
noise = 0
if self.brightness_stddev > 0.0:
brightness = np.random.normal(scale=self.brightness_stddev)
else:
brightness = 0
im_re = im.permute(1, 2, 0)
im_re = (im_re * (contrast + 1.0) + brightness) * color
im_re = torch.clamp(im_re, min=0.0, max=1.0)
im_re = torch.pow(im_re, gamma_inv)
im_re += noise
im_re = im_re.permute(2, 0, 1)
return im_re
|
{"hexsha": "9aa717c60b36a2dbe762be6693fc05c148ae5f9e", "size": 15465, "ext": "py", "lang": "Python", "max_stars_repo_path": "loader/KITTI15Mask.py", "max_stars_repo_name": "YaoChengTang/DecNet", "max_stars_repo_head_hexsha": "b623ac8d0505ec68eb930ad7a21fe9d84dd07543", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2021-04-16T02:24:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-13T10:55:27.000Z", "max_issues_repo_path": "loader/KITTI15Mask.py", "max_issues_repo_name": "YaoChengTang/DecNet", "max_issues_repo_head_hexsha": "b623ac8d0505ec68eb930ad7a21fe9d84dd07543", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-04-29T09:05:53.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-06T08:35:55.000Z", "max_forks_repo_path": "loader/KITTI15Mask.py", "max_forks_repo_name": "YaoChengTang/DecNet", "max_forks_repo_head_hexsha": "b623ac8d0505ec68eb930ad7a21fe9d84dd07543", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-08-07T08:00:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-13T06:14:52.000Z", "avg_line_length": 42.4862637363, "max_line_length": 188, "alphanum_fraction": 0.5280310378, "include": true, "reason": "import numpy", "num_tokens": 4086}
|
[STATEMENT]
lemma has_derivative_imp_has_field_derivative:
"(f has_derivative D) F \<Longrightarrow> (\<And>x. x * D' = D x) \<Longrightarrow> (f has_field_derivative D') F"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>(f has_derivative D) F; \<And>x. x * D' = D x\<rbrakk> \<Longrightarrow> (f has_field_derivative D') F
[PROOF STEP]
unfolding has_field_derivative_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>(f has_derivative D) F; \<And>x. x * D' = D x\<rbrakk> \<Longrightarrow> (f has_derivative (*) D') F
[PROOF STEP]
by (rule has_derivative_eq_rhs[of f D]) (simp_all add: fun_eq_iff mult.commute)
|
{"llama_tokens": 272, "file": null, "length": 2}
|
# coding: UTF-8
"""
@author: samuel ko
@date: 2019.05.03
@func: style loss(ssim and its multiple variants.)
"""
import os
from math import exp
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn import Conv2d
from tools.prnet_loss import preprocess
def tile(a, dim, n_tile):
init_dim = a.size(dim)
repeat_idx = [1] * a.dim()
repeat_idx[dim] = n_tile
a = a.repeat(*(repeat_idx))
order_index = torch.LongTensor(np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)]))
return torch.index_select(a, dim, order_index)
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)])
return gauss / gauss.sum()
def _fspecial_gauss(window_size, sigma=1.5):
# Function to mimic the 'fspecial' gaussian MATLAB function.
coords = np.arange(0, window_size, dtype=np.float32)
coords -= (window_size - 1) / 2.0
g = coords ** 2
g *= (-0.5 / (sigma ** 2))
g = np.reshape(g, (1, -1)) + np.reshape(g, (-1, 1))
g = torch.from_numpy(np.reshape(g, (1, -1)))
g = torch.softmax(g, dim=1)
g = g / g.sum()
return g
# 2019.05.26. butterworth filter.
# ref: http://www.cnblogs.com/laumians-notes/p/8592968.html
def butterworth(window_size, sigma=1.5, n=2):
nn = 2 * n
bw = torch.Tensor([1 / (1 + ((x - window_size // 2) / sigma) ** nn) for x in range(window_size)])
return bw / bw.sum()
def create_window(window_size, channel=3, sigma=1.5, gauss='original', n=2):
if gauss == 'original':
_1D_window = gaussian(window_size, sigma).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
elif gauss == 'butterworth':
_1D_window = butterworth(window_size, sigma, n).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
else:
g = _fspecial_gauss(window_size, sigma)
g = torch.reshape(g, (1, 1, window_size, window_size))
# 2019.06.05.
# https://discuss.pytorch.org/t/how-to-tile-a-tensor/13853
g = tile(g, 0, 3)
return g
def _ssim(img1, img2, window_size=11, window=None, val_range=2, size_average=True):
# Value range can be different from 255. Other common ranges are 1 (sigmoid) and 2 (tanh).
# padd = window_size//2
padd = 0
(batch, channel, height, width) = img1.size()
if window is None:
real_size = min(window_size, height, width)
window = create_window(real_size, channel=channel).to(img1.device)
# 2019.05.05
# pytorch默认是NCHW. 跟caffe一样.
mu1 = F.conv2d(img1, window, padding=padd, groups=channel)
mu2 = F.conv2d(img2, window, padding=padd, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_square = F.conv2d(img1 * img1, window, padding=padd, groups=channel) - mu1_sq
sigma2_square = F.conv2d(img2 * img2, window, padding=padd, groups=channel) - mu2_sq
sigma12_square = F.conv2d(img1 * img2, window, padding=padd, groups=channel) - mu1_mu2
C1 = (0.01 * val_range) ** 2
C2 = (0.03 * val_range) ** 2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12_square + C2)) / (
(mu1_sq + mu2_sq + C1) * (sigma1_square + sigma2_square + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class ORIGINAL_SSIM(torch.nn.Module):
def __init__(self, window_size=11, val_range=2, size_average=True):
super(ORIGINAL_SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.val_range = val_range
self.channel = 3
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return 1 - _ssim(img1, img2, self.window_size, window, self.val_range, self.size_average)
def dfl_ssim(img1, img2, mask, window_size=11, val_range=1, gauss='original'):
# Value range can be different from 255. Other common ranges are 1 (sigmoid) and 2 (tanh).
# padd = window_size//2
padd = 0
(batch, channel, height, width) = img1.size()
img1, img2 = torch.mul(img1, mask), torch.mul(img2, mask)
real_size = min(window_size, height, width)
window = create_window(real_size, gauss=gauss).to(img1.device)
# 2019.05.07.
c1 = (0.01 * val_range) ** 2
c2 = (0.03 * val_range) ** 2
mu1 = F.conv2d(img1, window, padding=padd, groups=channel)
mu2 = F.conv2d(img2, window, padding=padd, groups=channel)
num0 = mu1 * mu2 * 2.0
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
den0 = mu1_sq + mu2_sq
luminance = (num0 + c1) / (den0 + c1)
num1 = F.conv2d(img1 * img2, window, padding=padd, groups=channel) * 2.0
den1 = F.conv2d(img1 * img1 + img2 * img2, window, padding=padd, groups=channel)
cs = (num1 - num0 + c2) / (den1 - den0 + c2)
ssim_val = torch.mean(luminance * cs, dim=(-3, -2))
return torch.mean((1.0 - ssim_val) / 2.0)
# Classes to re-use window
class SSIM(torch.nn.Module):
def __init__(self, mask_path, window_size=11, alpha=0.8, gauss='original'):
super(SSIM, self).__init__()
self.window_size = window_size
self.window = None
self.channel = None
self.gauss = gauss
self.alpha = alpha
if os.path.exists(mask_path):
self.mask = cv2.imread(mask_path, 0)
self.mask = torch.from_numpy(preprocess(self.mask)).float().to("cuda")
else:
raise FileNotFoundError("Mask File Not Found! Please Check your Settings!")
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
self.channel = channel
return 10 * dfl_ssim(img1, img2, mask=self.mask, window_size=self.window_size, gauss=self.gauss)
|
{"hexsha": "0bd50c4ef65ffc3ab819bd9b05f5369719bf4295", "size": 6593, "ext": "py", "lang": "Python", "max_stars_repo_path": "demo/face/utils/losses.py", "max_stars_repo_name": "shachargluska/centerpose", "max_stars_repo_head_hexsha": "01c2c8bfa9d3ee91807f2ffdcc48728d104265bd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 245, "max_stars_repo_stars_event_min_datetime": "2019-11-29T02:55:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T07:30:18.000Z", "max_issues_repo_path": "demo/face/utils/losses.py", "max_issues_repo_name": "shachargluska/centerpose", "max_issues_repo_head_hexsha": "01c2c8bfa9d3ee91807f2ffdcc48728d104265bd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2019-11-29T10:05:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T07:16:06.000Z", "max_forks_repo_path": "demo/face/utils/losses.py", "max_forks_repo_name": "FishLiuabc/centerpose", "max_forks_repo_head_hexsha": "555d753cd82693476f91f78c53aa4147f5a83015", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 45, "max_forks_repo_forks_event_min_datetime": "2019-11-29T05:12:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T02:20:36.000Z", "avg_line_length": 34.1606217617, "max_line_length": 113, "alphanum_fraction": 0.6329440315, "include": true, "reason": "import numpy", "num_tokens": 1983}
|
from __future__ import absolute_import
import torch
import torch.nn as nn
import numpy as np
import numpy.random as npr
from ..utils.config import cfg
from .bbox_transform import bbox_overlaps_batch, bbox_transform_batch
import pdb
class _ProposalTargetLayer(nn.Module):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
"""
def __init__(self, nclasses):
super(_ProposalTargetLayer, self).__init__()
self._num_classes = nclasses
self.BBOX_NORMALIZE_MEANS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS)
self.BBOX_NORMALIZE_STDS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS)
self.BBOX_INSIDE_WEIGHTS = torch.FloatTensor(cfg.TRAIN.BBOX_INSIDE_WEIGHTS)
def forward(self, all_rois, gt_boxes, num_boxes):
self.BBOX_NORMALIZE_MEANS = self.BBOX_NORMALIZE_MEANS.type_as(gt_boxes)
self.BBOX_NORMALIZE_STDS = self.BBOX_NORMALIZE_STDS.type_as(gt_boxes)
self.BBOX_INSIDE_WEIGHTS = self.BBOX_INSIDE_WEIGHTS.type_as(gt_boxes)
'''
gt_boxes_append = gt_boxes.new(gt_boxes.size()).zero_()
for i in range(gt_boxes.size(0)):
gt_boxes_append[i,:,1:5] = gt_boxes[i,:,:4]
gt_boxes_append[i,:,0] = i
# Include ground-truth boxes in the set of candidate rois
all_rois = torch.cat([all_rois, gt_boxes_append], 1)
'''
num_images = 1
rois_per_image = int(cfg.TRAIN.BATCH_SIZE / num_images)
fg_rois_per_image = int(np.round(cfg.TRAIN.FG_FRACTION * rois_per_image))
fg_rois_per_image = 1 if fg_rois_per_image == 0 else fg_rois_per_image
labels, rois, bbox_targets, bbox_inside_weights, weights_batch = self._sample_rois_pytorch(
all_rois, gt_boxes, fg_rois_per_image,
rois_per_image, self._num_classes)
bbox_outside_weights = (bbox_inside_weights > 0).float()
return rois, labels, bbox_targets, bbox_inside_weights, bbox_outside_weights, weights_batch
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def _get_bbox_regression_labels_pytorch(self, bbox_target_data, labels_batch, num_classes):
"""Bounding-box regression targets (bbox_target_data) are stored in a
compact form b x N x (class, tx, ty, tw, th)
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets).
Returns:
bbox_target (ndarray): b x N x 4K blob of regression targets
bbox_inside_weights (ndarray): b x N x 4K blob of loss weights
"""
batch_size = labels_batch.size(0)
rois_per_image = labels_batch.size(1)
clss = labels_batch
bbox_targets = bbox_target_data.new(batch_size, rois_per_image, 4).zero_()
bbox_inside_weights = bbox_target_data.new(bbox_targets.size()).zero_()
for b in range(batch_size):
# assert clss[b].sum() > 0
if clss[b].sum() == 0:
continue
inds = torch.nonzero(clss[b] > 0).view(-1)
for i in range(inds.numel()):
ind = inds[i]
bbox_targets[b, ind, :] = bbox_target_data[b, ind, :]
bbox_inside_weights[b, ind, :] = self.BBOX_INSIDE_WEIGHTS
return bbox_targets, bbox_inside_weights
def _compute_targets_pytorch(self, ex_rois, gt_rois):
"""Compute bounding-box regression targets for an image."""
assert ex_rois.size(1) == gt_rois.size(1)
assert ex_rois.size(2) == 4
assert gt_rois.size(2) == 4
batch_size = ex_rois.size(0)
rois_per_image = ex_rois.size(1)
targets = bbox_transform_batch(ex_rois, gt_rois)
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
targets = ((targets - self.BBOX_NORMALIZE_MEANS.expand_as(targets))
/ self.BBOX_NORMALIZE_STDS.expand_as(targets))
return targets
def _sample_rois_pytorch(self, all_rois, gt_boxes, fg_rois_per_image, rois_per_image, \
num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# overlaps: (rois x gt_boxes)
overlaps = bbox_overlaps_batch(all_rois, gt_boxes)
max_overlaps, gt_assignment = torch.max(overlaps, 2)
batch_size = overlaps.size(0)
num_proposal = overlaps.size(1)
num_boxes_per_img = overlaps.size(2)
offset = torch.arange(0, batch_size)*gt_boxes.size(1)
offset = offset.view(-1, 1).type_as(gt_assignment) + gt_assignment
# changed indexing way for pytorch 1.0
labels = gt_boxes[:,:,4].contiguous().view(-1)[(offset.view(-1),)].view(batch_size, -1)
labels_batch = labels.new(batch_size, rois_per_image).zero_()
rois_batch = all_rois.new(batch_size, rois_per_image, 5).zero_()
gt_rois_batch = all_rois.new(batch_size, rois_per_image, 5).zero_()
weights_batch = all_rois.new(batch_size, rois_per_image).zero_().fill_(1)
# Guard against the case when an image has fewer than max_fg_rois_per_image
# foreground RoIs
for i in range(batch_size):
fg_inds = torch.nonzero(max_overlaps[i] >= cfg.TRAIN.FG_THRESH).view(-1)
fg_num_rois = fg_inds.numel()
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = torch.nonzero((max_overlaps[i] < cfg.TRAIN.BG_THRESH_HI) &
(max_overlaps[i] >= cfg.TRAIN.BG_THRESH_LO)).view(-1)
bg_num_rois = bg_inds.numel()
if fg_num_rois > 0 and bg_num_rois > 0:
# sampling fg
fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois)
# torch.randperm seems has a bug on multi-gpu setting that cause the segfault.
# See https://github.com/pytorch/pytorch/issues/1868 for more details.
# use numpy instead.
#rand_num = torch.randperm(fg_num_rois).long().cuda()
rand_num = torch.from_numpy(np.random.permutation(fg_num_rois)).type_as(gt_boxes).long()
fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]]
# sampling bg
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
# Seems torch.rand has a bug, it will generate very large number and make an error.
# We use numpy rand instead.
#rand_num = (torch.rand(bg_rois_per_this_image) * bg_num_rois).long().cuda()
rand_num = np.floor(np.random.rand(bg_rois_per_this_image) * bg_num_rois)
rand_num = torch.from_numpy(rand_num).type_as(gt_boxes).long()
bg_inds = bg_inds[rand_num]
elif fg_num_rois > 0 and bg_num_rois == 0:
# sampling fg
#rand_num = torch.floor(torch.rand(rois_per_image) * fg_num_rois).long().cuda()
rand_num = np.floor(np.random.rand(rois_per_image) * fg_num_rois)
rand_num = torch.from_numpy(rand_num).type_as(gt_boxes).long()
fg_inds = fg_inds[rand_num]
fg_rois_per_this_image = rois_per_image
bg_rois_per_this_image = 0
elif bg_num_rois > 0 and fg_num_rois == 0:
# sampling bg
#rand_num = torch.floor(torch.rand(rois_per_image) * bg_num_rois).long().cuda()
rand_num = np.floor(np.random.rand(rois_per_image) * bg_num_rois)
rand_num = torch.from_numpy(rand_num).type_as(gt_boxes).long()
bg_inds = bg_inds[rand_num]
bg_rois_per_this_image = rois_per_image
fg_rois_per_this_image = 0
else:
print(max_overlaps, all_rois, gt_boxes)
pdb.set_trace()
raise ValueError("bg_num_rois = 0 and fg_num_rois = 0, this should not happen!")
# The indices that we're selecting (both fg and bg)
keep_inds = torch.cat([fg_inds, bg_inds], 0)
# Select sampled values from various arrays:
labels_batch[i].copy_(labels[i][keep_inds])
# Clamp labels for the background RoIs to 0
if fg_rois_per_this_image < rois_per_image:
labels_batch[i][fg_rois_per_this_image:] = 0
rois_batch[i] = all_rois[i][keep_inds]
rois_batch[i,:,0] = i
gt_rois_batch[i] = gt_boxes[i][gt_assignment[i][keep_inds]]
bbox_target_data = self._compute_targets_pytorch(
rois_batch[:,:,1:5], gt_rois_batch[:,:,:4])
bbox_targets, bbox_inside_weights = \
self._get_bbox_regression_labels_pytorch(bbox_target_data, labels_batch, num_classes)
return labels_batch, rois_batch, bbox_targets, bbox_inside_weights, weights_batch
|
{"hexsha": "d9f462e0c208249634bfb47316eef3da3a13b337", "size": 9368, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/model/rpn/proposal_target_layer.py", "max_stars_repo_name": "strongwolf/CDG", "max_stars_repo_head_hexsha": "a78864ca3519de77deb60a11f68059b76e076b5c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2021-04-15T11:35:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T12:24:25.000Z", "max_issues_repo_path": "lib/model/rpn/proposal_target_layer.py", "max_issues_repo_name": "strongwolf/CDG", "max_issues_repo_head_hexsha": "a78864ca3519de77deb60a11f68059b76e076b5c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-04-29T06:26:15.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T11:06:12.000Z", "max_forks_repo_path": "lib/model/rpn/proposal_target_layer.py", "max_forks_repo_name": "strongwolf/CDG", "max_forks_repo_head_hexsha": "a78864ca3519de77deb60a11f68059b76e076b5c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-29T06:26:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-29T06:26:42.000Z", "avg_line_length": 43.5720930233, "max_line_length": 104, "alphanum_fraction": 0.6341801879, "include": true, "reason": "import numpy", "num_tokens": 2243}
|
#!/usr/bin/env python
import os
import numpy as np
from gmprocess.io.renadic.core import is_renadic, read_renadic
from gmprocess.utils.test_utils import read_data_dir
def test_renadic():
datafiles, origin = read_data_dir("renadic", "official20100227063411530_30")
# make sure format checker works
assert is_renadic(datafiles[0])
raw_streams = []
for dfile in datafiles:
print(f"Reading file {dfile}...")
raw_streams += read_renadic(dfile)
# following pga values in G taken from file headers
peaks = {
"672": (-0.030, -0.016, -0.008),
"5014": (0.295, -0.155, 0.421),
"0": (0.020, -0.019, -0.010),
}
for stream in raw_streams:
if stream[0].stats.station not in peaks:
continue
cmp_value = np.abs(np.array(peaks[stream[0].stats.station]))
pga1 = np.abs(stream[0].max())
pga2 = np.abs(stream[1].max())
pga3 = np.abs(stream[2].max())
tpl = np.array((pga1, pga2, pga3)) / 980
np.testing.assert_almost_equal(cmp_value, tpl, decimal=3)
if __name__ == "__main__":
os.environ["CALLED_FROM_PYTEST"] = "True"
test_renadic()
|
{"hexsha": "12dc2bc32cfdfe2ec2fbe7f3276dc15434cf275c", "size": 1174, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/gmprocess/io/renadic/renadic_test.py", "max_stars_repo_name": "baagaard-usgs/groundmotion-processing", "max_stars_repo_head_hexsha": "6be2b4460d598bba0935135efa85af2655578565", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/gmprocess/io/renadic/renadic_test.py", "max_issues_repo_name": "baagaard-usgs/groundmotion-processing", "max_issues_repo_head_hexsha": "6be2b4460d598bba0935135efa85af2655578565", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/gmprocess/io/renadic/renadic_test.py", "max_forks_repo_name": "baagaard-usgs/groundmotion-processing", "max_forks_repo_head_hexsha": "6be2b4460d598bba0935135efa85af2655578565", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.9523809524, "max_line_length": 80, "alphanum_fraction": 0.6269165247, "include": true, "reason": "import numpy", "num_tokens": 348}
|
__author__ = 'mangalbhaskar'
__version__ = '1.0'
"""
# Utility functions
# --------------------------------------------------------
# Copyright (c) 2020 mangalbhaskar
# Licensed under [see LICENSE for details]
# Written by mangalbhaskar
# --------------------------------------------------------
"""
import os
import sys
import glob
import re
import time
import datetime
from collections import defaultdict
from importlib import import_module
import logging
# custom imports
import common
import apputil
import viz
this_dir = os.path.dirname(__file__)
if this_dir not in sys.path:
sys.path.append(this_dir)
APP_ROOT_DIR = os.getenv('AI_APP')
ROOT_DIR = os.getenv('AI_HOME')
BASE_PATH_CFG = os.getenv('AI_CFG')
if APP_ROOT_DIR not in sys.path:
sys.path.append(APP_ROOT_DIR)
if BASE_PATH_CFG not in sys.path:
sys.path.append(BASE_PATH_CFG)
this = sys.modules[__name__]
log = logging.getLogger('__main__.'+__name__)
import motor.motor_asyncio
import asyncio
import json
import aiofiles
def visualize(args, mode, appcfg):
"""Load and display given image_ids
"""
log.debug("-------------------------------->")
log.debug("visualizing annotations...")
from falcon.utils import compute
from falcon.utils import visualize as _visualize
subset = args.eval_on
log.debug("subset: {}".format(subset))
datacfg = apputil.get_datacfg(appcfg)
dbcfg = apputil.get_dbcfg(appcfg)
dataset, num_classes, num_images, class_names, total_stats, total_verify = apputil.get_dataset_instance(appcfg, dbcfg, datacfg, subset)
colors = viz.random_colors(len(class_names))
log.debug("class_names: {}".format(class_names))
log.debug("len(class_names): {}".format(len(class_names)))
log.debug("len(colors), colors: {},{}".format(len(colors), colors))
log.debug("num_classes: {}".format(num_classes))
log.debug("num_images: {}".format(num_images))
name = dataset.name
datacfg.name = name
datacfg.classes = class_names
datacfg.num_classes = num_classes
image_ids = dataset.image_ids
# log.debug("dataset: {}".format(vars(dataset)))
# log.debug("len(dataset.image_info): {}".format(len(dataset.image_info)))
class_names = dataset.class_names
log.debug("dataset: len(image_ids): {}\nimage_ids: {}".format(len(image_ids), image_ids))
log.debug("dataset: len(class_names): {}\nclass_names: {}".format(len(class_names), class_names))
for image_id in image_ids:
image = dataset.load_image(image_id, datacfg)
if image is not None:
mask, class_ids, keys, values = dataset.load_mask(image_id, datacfg)
log.debug("keys: {}".format(keys))
log.debug("values: {}".format(values))
log.debug("class_ids: {}".format(class_ids))
## Display image and instances
# _visualize.display_top_masks(image, mask, class_ids, class_names)
## Compute Bounding box
bbox = compute.extract_bboxes(mask)
log.debug("bbox: {}".format(bbox))
# _visualize.display_instances(image, bbox, mask, class_ids, class_names, show_bbox=False)
_visualize.display_instances(image, bbox, mask, class_ids, class_names)
# return image, bbox, mask, class_ids, class_names
else:
log.error("error reading image with image_id: {}".format(image_id))
def inspect_annon(args, mode, appcfg):
"""inspection of data from command line for quick verification of data sanity
"""
log.debug("---------------------------->")
log.debug("Inspecting annotations...")
subset = args.eval_on
log.debug("subset: {}".format(subset))
datacfg = apputil.get_datacfg(appcfg)
dbcfg = apputil.get_dbcfg(appcfg)
dataset, num_classes, num_images, class_names, total_stats, total_verify = apputil.get_dataset_instance(appcfg, dbcfg, datacfg, subset)
colors = viz.random_colors(len(class_names))
log.debug("class_names: {}".format(class_names))
log.debug("len(class_names): {}".format(len(class_names)))
log.debug("len(colors), colors: {},{}".format(len(colors), colors))
log.debug("num_classes: {}".format(num_classes))
log.debug("num_images: {}".format(num_images))
name = dataset.name
datacfg.name = name
datacfg.classes = class_names
datacfg.num_classes = num_classes
# log.debug("dataset: {}".format(vars(dataset)))
log.debug("len(dataset.image_info): {}".format(len(dataset.image_info)))
log.debug("len(dataset.image_ids): {}".format(len(dataset.image_ids)))
mod = apputil.get_module('inspect_annon')
archcfg = apputil.get_archcfg(appcfg)
log.debug("archcfg: {}".format(archcfg))
cmdcfg = archcfg
cmdcfg.name = name
cmdcfg.config.NAME = name
cmdcfg.config.NUM_CLASSES = num_classes
dnnmod = apputil.get_module(cmdcfg.dnnarch)
get_dnncfg = apputil.get_module_fn(dnnmod, "get_dnncfg")
dnncfg = get_dnncfg(cmdcfg.config)
log.debug("config.MINI_MASK_SHAPE: {}".format(dnncfg.MINI_MASK_SHAPE))
log.debug("type(dnncfg.MINI_MASK_SHAPE): {}".format(type(dnncfg.MINI_MASK_SHAPE)))
mod.all_steps(dataset, datacfg, dnncfg)
return
def train(args, mode, appcfg):
log.debug("train---------------------------->")
datacfg = apputil.get_datacfg(appcfg)
## Training dataset.
subset = "train"
log.info("subset: {}".format(subset))
dbcfg = apputil.get_dbcfg(appcfg)
dataset_train, num_classes_train, num_images_train, class_names_train, total_stats_train, total_verify_train = apputil.get_dataset_instance(appcfg, dbcfg, datacfg, subset)
colors = viz.random_colors(len(class_names_train))
log.info("-------")
log.info("len(colors), colors: {},{}".format(len(colors), colors))
log.info("subset, class_names_train: {}, {}".format(subset, class_names_train))
log.info("subset, len(class_names_train): {}, {}".format(subset, len(class_names_train)))
log.info("subset, num_classes_train: {}, {}".format(subset, num_classes_train))
log.info("subset, num_images_train: {}, {}".format(subset, num_images_train))
log.info("subset, len(dataset_train.image_info): {}, {}".format(subset, len(dataset_train.image_info)))
log.info("subset, len(dataset_train.image_ids): {}, {}".format(subset, len(dataset_train.image_ids)))
## Validation dataset
subset = "val"
log.info("subset: {}".format(subset))
dataset_val, num_classes_val, num_images_val, class_names_val, total_stats_val, total_verify_val = apputil.get_dataset_instance(appcfg, dbcfg, datacfg, subset)
log.info("-------")
log.info("subset, class_names_val: {}, {}".format(subset, class_names_val))
log.info("subset, len(class_names_val): {}, {}".format(subset, len(class_names_val)))
log.info("subset, num_classes_val: {}, {}".format(subset, num_classes_val))
log.info("subset, num_images_val: {}, {}".format(subset, num_images_val))
log.info("subset, len(dataset_val.image_info): {}, {}".format(subset, len(dataset_val.image_info)))
log.info("subset, len(dataset_val.image_ids): {}, {}".format(subset, len(dataset_val.image_ids)))
log.info("-------")
## Ensure label sequence and class_names of train and val dataset are excatly same, if not abort training
assert class_names_train == class_names_val
archcfg = apputil.get_archcfg(appcfg)
log.debug("archcfg: {}".format(archcfg))
cmdcfg = archcfg
name = dataset_train.name
## generate the modelinfo template to be used for evaluate and prediction
modelinfocfg = {
'classes': class_names_train.copy()
,'classinfo': None
,'config': cmdcfg.config.copy()
,'dataset': cmdcfg.dbname
,'dbname': cmdcfg.dbname
,'dnnarch': cmdcfg.dnnarch
,'framework_type': cmdcfg.framework_type
,'id': None
,'load_weights': cmdcfg.load_weights.copy()
,'name': name
,'num_classes': num_classes_train
,'problem_id': None
,'rel_num': None
,'weights': None
,'weights_path': None
,'log_dir': None
,'checkpoint_path': None
,'model_info': None
,'timestamp': None
,'creator': None
}
datacfg.name = name
datacfg.classes = class_names_train
datacfg.num_classes = num_classes_train
cmdcfg.name = name
cmdcfg.config.NAME = name
cmdcfg.config.NUM_CLASSES = num_classes_train
modelcfg_path = os.path.join(appcfg.PATHS.AI_MODEL_CFG_PATH, cmdcfg.model_info)
log.info("modelcfg_path: {}".format(modelcfg_path))
modelcfg = apputil.get_modelcfg(modelcfg_path)
log_dir_path = apputil.get_abs_path(appcfg, cmdcfg, 'AI_LOGS')
cmdcfg['log_dir_path'] = log_dir_path
weights_path = apputil.get_abs_path(appcfg, modelcfg, 'AI_WEIGHTS_PATH')
cmdcfg['weights_path'] = weights_path
dnnmod = apputil.get_module(cmdcfg.dnnarch)
load_model_and_weights = apputil.get_module_fn(dnnmod, "load_model_and_weights")
model = load_model_and_weights(mode, cmdcfg, appcfg)
modelinfocfg['log_dir'] = model.log_dir
modelinfocfg['checkpoint_path'] = model.checkpoint_path
if 'creator' in cmdcfg:
modelinfocfg['creator'] = cmdcfg['creator']
log.info("modelinfocfg: {}".format(modelinfocfg))
fn_create_modelinfo = apputil.get_module_fn(dnnmod, "create_modelinfo")
modelinfo = fn_create_modelinfo(modelinfocfg)
create_modelinfo = args.create_modelinfo
try:
if not create_modelinfo:
log.info("Training...")
fn_train = apputil.get_module_fn(dnnmod, "train")
fn_train(model, dataset_train, dataset_val, cmdcfg)
log.info("Training Completed!!!")
finally:
## save modelinfo
## popolate the relative weights_path of the last model from the training if any model is generated otherwise None
logs_path = appcfg['PATHS']['AI_LOGS']
dnn = cmdcfg.dnnarch
##TODO
list_of_files = glob.glob(os.path.join(model.log_dir,dnn+'*')) # * means all if need specific format then *.h5
latest_file = max(list_of_files, key=os.path.getctime)
new_weights_path = re.sub('\{}'.format(logs_path+'/'), '', latest_file)
modelinfo['weights_path'] = new_weights_path
modelinfo_filepath = apputil.get_abs_path(appcfg, modelinfo, 'AI_MODEL_CFG_PATH')
common.yaml_safe_dump(modelinfo_filepath, modelinfo)
log.info("TRAIN:MODELINFO_FILEPATH: {}".format(modelinfo_filepath))
log.info("---x--x--x---")
return modelinfo_filepath
def predict(args, mode, appcfg):
"""Executes the prediction and stores the generated results
TODO:
1. create the prediction configuration
2. PDB specification
"""
log.debug("predict---------------------------->")
archcfg = apputil.get_archcfg(appcfg)
log.debug("cmdcfg/archcfg: {}".format(archcfg))
cmdcfg = archcfg
if 'save_viz_and_json' not in cmdcfg:
cmdcfg.save_viz_and_json = False
save_viz = args.save_viz
show_bbox = args.show_bbox
log.debug("save_viz: {}".format(save_viz))
cmdcfg.save_viz_and_json = save_viz
modelcfg_path = os.path.join(appcfg.PATHS.AI_MODEL_CFG_PATH, cmdcfg.model_info)
log.info("modelcfg_path: {}".format(modelcfg_path))
modelcfg = apputil.get_modelcfg(modelcfg_path)
log.debug("modelcfg: {}".format(modelcfg))
api_model_key = apputil.get_api_model_key(modelcfg)
log.debug("api_model_key: {}".format(api_model_key))
## for prediction, get the label information from the model information
class_names = apputil.get_class_names(modelcfg)
log.debug("class_names: {}".format(class_names))
num_classes = len(class_names)
name = modelcfg.name
cmdcfg.name = name
cmdcfg.config.NAME = name
cmdcfg.config.NUM_CLASSES = num_classes
dnnmod = apputil.get_module(cmdcfg.dnnarch)
## todo: hard-coding clear up
cmdcfg['log_dir'] = 'predict'
log_dir_path = apputil.get_abs_path(appcfg, cmdcfg, 'AI_LOGS')
cmdcfg['log_dir_path'] = log_dir_path
weights_path = apputil.get_abs_path(appcfg, modelcfg, 'AI_WEIGHTS_PATH')
cmdcfg['weights_path'] = weights_path
load_model_and_weights = apputil.get_module_fn(dnnmod, "load_model_and_weights")
model = load_model_and_weights(mode, cmdcfg, appcfg)
path_dtls = apputil.get_path_dtls(args, appcfg)
log.debug("path_dtls: {}".format(path_dtls))
for t in ["images", "videos"]:
if path_dtls[t] and len(path_dtls[t]) > 0:
fname = "detect_from_"+t
log.info("fname: {}".format(fname))
fn = getattr(this, fname)
if fn:
file_names, res = fn(appcfg, dnnmod, path_dtls[t], path_dtls['path'], model, class_names, cmdcfg, api_model_key, show_bbox)
# log.debug("len(file_names), file_names: {}, {}".format(len(file_names), file_names))
else:
log.error("Unkown fn: {}".format(fname))
# return file_names, res
return
def evaluate(args, mode, appcfg):
"""prepare the report configuration like paths, report names etc. and calls the report generation function
"""
log.debug("evaluate---------------------------->")
subset = args.eval_on
iou_threshold = args.iou
log.debug("subset: {}".format(subset))
log.debug("iou_threshold: {}".format(iou_threshold))
get_mask = True
auto_show = False
datacfg = apputil.get_datacfg(appcfg)
dbcfg = apputil.get_dbcfg(appcfg)
log.debug("appcfg: {}".format(appcfg))
log.debug("datacfg: {}".format(datacfg))
dataset, num_classes, num_images, class_names, total_stats, total_verify = apputil.get_dataset_instance(appcfg, dbcfg, datacfg, subset)
colors = viz.random_colors(len(class_names))
log.debug("-------")
log.debug("len(colors), colors: {},{}".format(len(colors), colors))
log.debug("class_names: {}".format(class_names))
log.debug("len(class_names): {}".format(len(class_names)))
log.debug("num_classes: {}".format(num_classes))
log.debug("num_images: {}".format(num_images))
log.debug("len(dataset.image_info): {}".format(len(dataset.image_info)))
log.debug("len(dataset.image_ids): {}".format(len(dataset.image_ids)))
# log.debug("dataset: {}".format(vars(dataset)))
log.debug("-------")
# log.debug("TODO: color: cc")
# cc = dict(zip(class_names,colors))
name = dataset.name
datacfg.name = name
datacfg.classes = class_names
datacfg.num_classes = num_classes
archcfg = apputil.get_archcfg(appcfg)
log.debug("archcfg: {}".format(archcfg))
cmdcfg = archcfg
if 'save_viz_and_json' not in cmdcfg:
cmdcfg.save_viz_and_json = False
save_viz = args.save_viz
log.debug("save_viz: {}".format(save_viz))
cmdcfg.save_viz_and_json = save_viz
modelcfg_path = os.path.join(appcfg.PATHS.AI_MODEL_CFG_PATH, cmdcfg.model_info)
log.info("modelcfg_path: {}".format(modelcfg_path))
modelcfg = apputil.get_modelcfg(modelcfg_path)
## for prediction, get the label information from the model information
class_names_model = apputil.get_class_names(modelcfg)
log.debug("class_names_model: {}".format(class_names_model))
cmdcfg.name = name
cmdcfg.config.NAME = modelcfg.name
cmdcfg.config.NUM_CLASSES = len(class_names_model)
# class_names = apputil.get_class_names(datacfg)
# log.debug("class_names: {}".format(class_names))
weights_path = apputil.get_abs_path(appcfg, modelcfg, 'AI_WEIGHTS_PATH')
cmdcfg['weights_path'] = weights_path
## Prepare directory structure and filenames for reporting the evluation results
now = datetime.datetime.now()
## create log directory based on timestamp for evaluation reporting
timestamp = "{:%d%m%y_%H%M%S}".format(now)
datacfg_ts = datacfg.timestamp if 'TIMESTAMP' in datacfg else timestamp
save_viz_and_json = cmdcfg.save_viz_and_json
# iou_threshold = cmdcfg.iou_threshold
if 'evaluate_no_of_result' not in cmdcfg:
evaluate_no_of_result = -1
else:
evaluate_no_of_result = cmdcfg.evaluate_no_of_result
def clean_iou(iou):
return str("{:f}".format(iou)).replace('.','')[:3]
path = appcfg['PATHS']['AI_LOGS']
# evaluate_dir = datacfg_ts+"-evaluate_"+clean_iou(iou_threshold)+"-"+name+"-"+subset+"-"+timestamp
evaluate_dir = "evaluate_"+clean_iou(iou_threshold)+"-"+name+"-"+subset+"-"+timestamp
filepath = os.path.join(path, cmdcfg.dnnarch, evaluate_dir)
log.debug("filepath: {}".format(filepath))
common.mkdir_p(filepath)
for d in ['splash', 'mask', 'annotations', 'viz']:
common.mkdir_p(os.path.join(filepath,d))
## gt - ground truth
## pr/pred - prediction
def get_cfgfilename(cfg_filepath):
return cfg_filepath.split(os.path.sep)[-1]
## generate the summary on the evaluation run
evaluate_run_summary = defaultdict(list)
evaluate_run_summary['name'] =name
evaluate_run_summary['execution_start_time'] = timestamp
evaluate_run_summary['subset'] = subset
evaluate_run_summary['total_labels'] = num_classes
evaluate_run_summary['total_images'] = num_images
evaluate_run_summary['evaluate_no_of_result'] = evaluate_no_of_result
evaluate_run_summary['evaluate_dir'] = evaluate_dir
evaluate_run_summary['dataset'] = get_cfgfilename(appcfg.DATASET[appcfg.ACTIVE.DATASET].cfg_file)
evaluate_run_summary['arch'] = get_cfgfilename(appcfg.ARCH[appcfg.ACTIVE.ARCH].cfg_file)
evaluate_run_summary['model'] = cmdcfg['model_info']
## classification report and confusion matrix - json and csv
## generate the filenames for what reports to be generated
reportcfg = {
'filepath':filepath
,'evaluate_run_summary_reportfile':os.path.join(filepath, "evaluate_run_summary_rpt-"+subset)
,'classification_reportfile':os.path.join(filepath, "classification_rpt-"+subset)
,'confusionmatrix_reportfile':os.path.join(filepath, "confusionmatrix_rpt-"+subset)
,'iou_threshold':iou_threshold
,'evaluate_run_summary':evaluate_run_summary
,'save_viz_and_json':save_viz_and_json
,'evaluate_no_of_result':evaluate_no_of_result
}
log.debug("reportcfg: {}".format(reportcfg))
dnnmod = apputil.get_module(cmdcfg.dnnarch)
fn_evaluate = apputil.get_module_fn(dnnmod, "evaluate")
evaluate_run_summary = fn_evaluate(mode, cmdcfg, appcfg, modelcfg, dataset, datacfg, class_names, reportcfg, get_mask)
return evaluate_run_summary
def tdd(args, mode, appcfg):
log.info("---------------------------->")
from falcon import test
status = test.main(args, mode, appcfg)
return status
async def do_insert(c, doc):
result = await c.insert_one(doc)
print('result %s' % repr(result.inserted_id))
async def do_insert_many(c, docs):
result = await c.insert_many(docs)
print('inserted %d docs' % (len(result.inserted_ids),))
# import numpy as np
async def do_save_to_file(filepath, data, feature_vector=None):
## Save the VIA Json response asynchronously
##---------------------------------------------
# np.save('%s.npy' % filepath, feature_vector)
async with aiofiles.open(filepath,'w') as afw:
await afw.write(json.dumps(data))
async def _create_res(detect, filepath, images, path, model, class_names, cmdcfg, api_model_key, show_bbox=False):
save_viz_and_json = cmdcfg.save_viz_and_json if 'save_viz_and_json' in cmdcfg else False
## TODO: move to cmdcfg configuration
get_mask = True
file_names = []
res = []
colors = viz.random_colors(len(class_names))
log.debug("class_names: {}".format(class_names))
log.debug("len(class_names), class_names: {},{}".format(len(class_names), class_names))
log.debug("len(colors), colors: {},{}".format(len(colors), colors))
cc = dict(zip(class_names, colors))
## TODO: highly ineffecient and should be switched to batch processing mode
# im_arr = [ viz.imread(os.path.join(path, image_filename)) for image_filename in images ]
for image_filename in images:
# Run model detection and save the outputs
log.debug("-------")
log.debug("Running on {}".format(image_filename))
# Read image
##------------------------------
## TODO: file or filepath or url
filepath_image_in = os.path.join(path, image_filename)
fext = ".png"
# file_name = image_filename
file_name = image_filename+fext
t0 = time.time()
## TODO: 3. to verify
# im = skimage.io.imread(filepath_image_in)
im = viz.imread(filepath_image_in)
# im_arr.append[im]
t1 = time.time()
time_taken_imread = (t1 - t0)
log.debug('Total time taken in time_taken_imread: %f seconds' %(time_taken_imread))
# Detect objects
##---------------------------------------------
t2 = time.time()
r = detect(model, im=im, verbose=1)[0]
pred_boxes = r['rois']
pred_masks = r['masks']
pred_class_ids = r['class_ids']
pred_scores = r['scores']
log.debug("Prediction on Groud Truth-------->")
log.debug('len(r): {}'.format(len(r)))
log.debug("len(pred_class_ids), pred_class_ids: {},{}".format(len(pred_class_ids), pred_class_ids))
log.debug("len(pred_boxes), pred_boxes.shape, type(pred_boxes): {},{},{}".format(len(pred_boxes), pred_boxes.shape, type(pred_boxes)))
log.debug("len(pred_masks), pred_masks.shape, type(pred_masks): {},{},{}".format(len(pred_masks), pred_masks.shape, type(pred_masks)))
log.debug("--------")
t3 = time.time()
time_taken_in_detect = (t3 - t2)
log.debug('Total time taken in detect: %f seconds' %(time_taken_in_detect))
t4 = time.time()
## TODO: batchify
time_taken_save_viz_and_json = -1
if save_viz_and_json:
jsonres = viz.get_display_instances(im, pred_boxes, pred_masks, pred_class_ids, class_names, pred_scores,
colors=cc, show_bbox=show_bbox, get_mask=get_mask, filepath=filepath, filename=file_name)
t7 = time.time()
time_taken_save_viz_and_json = (t4 - t7)
else:
jsonres = viz.get_detections(im, pred_boxes, pred_masks, pred_class_ids, class_names, pred_scores,
colors=cc, get_mask=get_mask)
log.debug("jsonres: {}".format(jsonres))
## Convert Json response to VIA Json response
##---------------------------------------------
## https://stackoverflow.com/questions/11904083/how-to-get-image-size-bytes-using-pil
# size_image = 0
size_image = os.path.getsize(filepath_image_in)
jsonres["filename"] = image_filename
jsonres["size"] = size_image
via_jsonres = {}
## TODO: if want to store in mongoDB, '.' (dot) should not be present in the key in the json data
## but, to visualize the results in VIA tool, this (dot) and size is expected
via_jsonres[image_filename.replace('.','-')+str(size_image)] = jsonres
# via_jsonres[image_filename+str(size_image)] = jsonres
json_str = common.numpy_to_json(via_jsonres)
# log.debug("json_str:\n{}".format(json_str))
# file_names.append(file_name)
t5 = time.time()
time_taken_res_preparation = (t5 - t4)
log.debug('Total time taken in time_taken_res_preparation: %f seconds' %(time_taken_res_preparation))
## Create Visualisations & Save output
## TODO: resize the annotation and match with the original image size and not the min or max image dimenion form cfg
##---------------------------------------------
# time_taken_save_viz_and_json = -1
# if save_viz_and_json:
# t6 = time.time()
# ## Color Splash Effect & Save image
# ##---------------------------------------------
# # viz.imsave(os.path.join(filepath, 'splash', file_name), viz.color_splash(im, pred_masks))
# ## Color Mask Effect & Save image
# ##---------------------------------------------
# # viz.imsave(os.path.join(filepath, 'mask', file_name), viz.color_mask(im, pred_masks))
# ## Annotation Visualisation & Save image
# ##---------------------------------------------
# # viz.imsave(os.path.join(filepath, 'viz', file_name), imgviz)
# t7 = time.time()
# time_taken_save_viz_and_json = (t6 - t7)
# log.debug('Total time taken in save_viz_and_json: %f seconds' %(time_taken_save_viz_and_json))
t8 = time.time()
tt_turnaround = (t8 - t0)
log.debug('Total time taken in tt_turnaround: %f seconds' %(tt_turnaround))
res_code = 200
dnnarch = cmdcfg.dnnarch
modelkeys = api_model_key.split('-')
# feature_vector = json.loads(common.numpy_to_json(r))
feature_vector = r
apires = {
"api": None
,"type": api_model_key
,"dnnarch": dnnarch
,"org_name": modelkeys[0]
,"problem_id": modelkeys[1]
,"rel_num": modelkeys[2]
,"image_name": image_filename
,"result": json.loads(json_str)
,'status_code': res_code
,'timings': {
'image_read': time_taken_imread
,'detect': time_taken_in_detect
,'res_preparation': time_taken_res_preparation
,'time_taken_save_viz_and_json': time_taken_save_viz_and_json
,'tt_turnaround': tt_turnaround
}
}
filepath_jsonres = os.path.join(filepath, 'annotations', image_filename+".json")
log.debug("filepath_jsonres: {}".format(filepath_jsonres))
## Always Save the VIA Json response
await asyncio.gather(
do_save_to_file(filepath_jsonres, apires, feature_vector)
)
# res.append(apires)
log.debug("-------")
def detect_from_images(appcfg, dnnmod, images, path, model, class_names, cmdcfg, api_model_key, show_bbox=False):
"""detections from the images
Convention:
image - image filename
filepath - the absolute path of the image input file location
im - binary data after reading the image file
TODO:
1. Prediction details log:
- model details (path), copy of configuration, arch used, all class_names used in predictions, execution time etc.
2. Verify that masks are properly scaled to the original image dimensions
3. Impact on prediction of replacing skimage.io.imread with imread wrapper
4. call response providing the pointer to the saved files
5. viz from jsonres
6. memory leak in reading image as read time increases
7. async file and DB operation. MongoDB limit of 16 MB datasize
"""
## always create abs filepaths and respective directories
timestamp = "{:%d%m%y_%H%M%S}".format(datetime.datetime.now())
filepath = os.path.join(path, "predict-"+timestamp)
common.mkdir_p(filepath)
for d in ['splash', 'mask', 'annotations', 'viz', 'mmask', 'oframe']:
common.mkdir_p(os.path.join(filepath,d))
detect = apputil.get_module_fn(dnnmod, "detect")
DBCFG = appcfg['APP']['DBCFG']
CBIRCFG = DBCFG['CBIRCFG']
# mclient = motor.motor_asyncio.AsyncIOMotorClient('mongodb://'+CBIRCFG['host']+':'+str(CBIRCFG['port']))
# dbname = CBIRCFG['dbname']
# db = mclient[dbname]
# collection = db['IMAGES']
# _create_res(detect, filepath, images, path, model, class_names, cmdcfg, api_model_key)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
loop.run_until_complete(_create_res(detect, filepath, images, path, model, class_names, cmdcfg, api_model_key, show_bbox=show_bbox))
finally:
# shutting down and closing fil descriptors after interupt
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
file_names = []
res = []
return file_names,res
def detect_from_videos(appcfg, dnnmod, videos, path, model, class_names, cmdcfg, api_model_key, show_bbox=False):
"""detect_from_videos
Code adopted from:
Copyright (c) 2018 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Originally, Written by Waleed Abdulla
---
Key contribution:
* saving the annotated results directly
* saving the annotated mask only
* annotation results as json response for consumption in API, VGG VIA compatible results
Copyright (c) 2020 mangalbhaskar
Licensed under [see LICENSE for details]
Written by mangalbhaskar
---
Conventions:
video - video filename
filepath - the absolute path of the video input file location
vid - binary data after reading the video file
"""
import cv2
save_viz_and_json = cmdcfg.save_viz_and_json if 'save_viz_and_json' in cmdcfg else False
if save_viz_and_json:
timestamp = "{:%d%m%y_%H%M%S}".format(datetime.datetime.now())
filepath = os.path.join(path,"predict-"+timestamp)
log.debug("filepath: {}".format(filepath))
common.mkdir_p(filepath)
file_names = []
res = []
detect = apputil.get_module_fn(dnnmod, "detect")
colors = viz.random_colors(len(class_names))
log.debug("class_names: {}".format(class_names))
log.debug("len(class_names), class_names: {},{}".format(len(class_names), class_names))
log.debug("len(colors), colors: {},{}".format(len(colors), colors))
cc = dict(zip(class_names,colors))
for video in videos:
## Run model detection and save the outputs
log.debug("Running on {}".format(video))
## Read Video
##---------------------------------------------
filepath_video = os.path.join(path, video)
log.debug("Processing video with filepath_video: {}".format(filepath_video))
vid = cv2.VideoCapture(filepath_video)
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = vid.get(cv2.CAP_PROP_FPS)
vname, vext = os.path.splitext(video)
file_name = video
if save_viz_and_json:
## oframe - original image frame from the video
## pframe or viz - annotations visualization frame from the video
## annotations - annotations json per frame
video_viz_basepath = os.path.join(filepath,vname)
path_oframe = os.path.join(video_viz_basepath,"oframe")
path_pframe = os.path.join(video_viz_basepath,"pframe")
path_sframe = os.path.join(video_viz_basepath,"splash")
path_mframe = os.path.join(video_viz_basepath,"mask")
path_mmframe = os.path.join(video_viz_basepath,"mmask")
path_viz = os.path.join(video_viz_basepath,"viz")
path_annotations = os.path.join(video_viz_basepath,"annotations")
for d in [path_oframe, path_pframe, path_annotations, path_sframe, path_mframe, path_mmframe, path_viz]:
log.debug("videos dirs: {}".format(d))
common.mkdir_p(d)
## Define codec and create video writer
##---------------------------------------------
# file_name = "{:%d%m%y_%H%M%S}.avi".format(datetime.datetime.now())
fext = ".avi"
file_name = vname+fext
filepath_pvideo = os.path.join(filepath, vname, file_name)
log.debug("filepath_pvideo: {}".format(filepath_pvideo))
count = 0
success = True
frame_cutoff = 0
from_frame = 0
while success:
log.debug("-------")
log.debug("frame: {}".format(count))
if frame_cutoff and count >= frame_cutoff:
break
## start predictions specific 'from the specific frame number'
if from_frame and count < from_frame:
count += 1
continue
## Read next image
success, oframe_im = vid.read()
if success:
oframe_name = str(count)+"_"+video+".png"
## OpenCV returns images as BGR, convert to RGB
oframe_im_rgb = oframe_im[..., ::-1]
## Detect objects
t1 = time.time()
# r = detect(model, im=oframe_im_rgb, verbose=0)
r = detect(model, im=oframe_im_rgb, verbose=1)[0]
t2 = time.time()
time_taken = (t2 - t1)
log.debug('Total time taken in detect: %f seconds' %(time_taken))
## Convert Json response to VIA Json response
##---------------------------------------------
t1 = time.time()
if save_viz_and_json:
# pframe_im, jsonres = viz.get_display_instances(oframe_im_rgb, r['rois'], r['masks'], r['class_ids'], class_names, r['scores'], colors=cc, show_bbox=False)
jsonres = viz.get_display_instances(oframe_im_rgb, r['rois'], r['masks'], r['class_ids'], class_names, r['scores'], colors=cc, show_bbox=False, auto_show=False, filepath=video_viz_basepath, filename=oframe_name)
else:
jsonres = viz.get_detections(oframe_im_rgb, r['rois'], r['masks'], r['class_ids'], class_names, r['scores'], colors=cc)
t2 = time.time()
time_taken = (t2 - t1)
log.debug('Total time taken in detections: %f seconds' %(time_taken))
## Convert Json response to VIA Json response
##---------------------------------------------
t1 = time.time()
size_oframe = 0
jsonres["filename"] = oframe_name
jsonres["size"] = size_oframe
via_jsonres = {}
via_jsonres[oframe_name+str(size_oframe)] = jsonres
json_str = common.numpy_to_json(via_jsonres)
# log.debug("json_str:\n{}".format(json_str))
t2 = time.time()
time_taken = (t2 - t1)
log.debug('Total time taken in json_str: %f seconds' %(time_taken))
## Create Visualisations & Save output
##---------------------------------------------
if save_viz_and_json:
t1 = time.time()
## Color Splash Effect
## Save vframe and video buffer
##---------------------------------------------
# splash = viz.color_splash(oframe_im_rgb, r['masks'])
# # RGB -> BGR to save image to video
# splash = splash[..., ::-1]
# # Add image to video writer
# vwriter_splash.write(splash)
## Color Mask Effect
## Save vframe and video buffer
##---------------------------------------------
# mframe_im = viz.color_mask(oframe_im_rgb, r['masks'])
# ## RGB -> BGR to save image to video
# ## mframe_im = mframe_im[..., ::-1]
# filepath_mframe = os.path.join(path_mframe, oframe_name)
# viz.imsave(filepath_mframe, mframe_im)
## Annotation Visualisation
## Save vframe and video buffer
##---------------------------------------------
# filepath_pframe = os.path.join(path_pframe, oframe_name)
# viz.imsave(filepath_pframe, pframe_im)
# filepath_oframe = os.path.join(path_oframe, oframe_name)
# viz.imsave(filepath_oframe, oframe_im_rgb)
# # size_oframe = os.path.getsize(filepath_oframe)
filepath_jsonres = os.path.join(path_annotations, oframe_name+".json")
log.debug("filepath_jsonres: {}".format(filepath_jsonres))
with open(filepath_jsonres,'w') as fw:
fw.write(json_str)
## TODO: using the opencv itself created visualisation video from individual frames
# pframe_im_bgr = pframe_im[..., ::-1]
# height, width = pframe_im_bgr.shape[:2]
# ## int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
# ## height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
# ## vwriter_splash = cv2.VideoWriter(os.path.join(filepath, 'splash_'+file_name), cv2.VideoWriter_fourcc(*'MJPG'), fps, (width, height))
# vwriter_viz = cv2.VideoWriter(filepath_pvideo, cv2.VideoWriter_fourcc(*'MJPG'), fps, (width, height))
# vwriter_viz.write(pframe_im_bgr)
# ## Add image to video writer
# ## vwriter_mask.write(mframe_im)
res.append(json_str)
count += 1
# if save_viz_and_json:
# ## vwriter_splash.release()
# vwriter_viz.release()
file_names.append(file_name)
## https://stackoverflow.com/questions/36643139/python-and-opencv-cannot-write-readable-avi-video-files
## ffmpeg -framerate 29 -i MAH04240.mp4-%d.png -c:v libx264 -r 30 MAH04240-maskrcnn-viz.mp4
## ffmpeg -framerate 29 -i %d_MAH04240.mp4.png -c:v libx264 -r 30 MAH04240-maskrcnn-viz.mp4
return file_names,res
def detect_from_webcam(appcfg, dnnmod, videos, path, model, class_names, cmdcfg, api_model_key, show_bbox=False):
"""TODO: stub for detect_from_webcam
Ref:
* https://github.com/SrikanthVelpuri/Mask_RCNN/blob/master/webcam.py
"""
file_names = []
res = []
return file_names,res
|
{"hexsha": "37085c0cc972018fc7be682a0ddf761825f034d1", "size": 35465, "ext": "py", "lang": "Python", "max_stars_repo_path": "apps/falcon/arch/Model.py", "max_stars_repo_name": "Roy-Tuhin/maskrcnn_sophisticate-", "max_stars_repo_head_hexsha": "a5a2300abbe2633d66847cdbfa7ed2bc2f901ec3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "apps/falcon/arch/Model.py", "max_issues_repo_name": "Roy-Tuhin/maskrcnn_sophisticate-", "max_issues_repo_head_hexsha": "a5a2300abbe2633d66847cdbfa7ed2bc2f901ec3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2021-02-02T22:32:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:20:40.000Z", "max_forks_repo_path": "apps/falcon/arch/Model.py", "max_forks_repo_name": "Boyetuhin/maskrcnn_sophisticate-", "max_forks_repo_head_hexsha": "a5a2300abbe2633d66847cdbfa7ed2bc2f901ec3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-03T22:48:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-03T22:48:36.000Z", "avg_line_length": 36.5242018538, "max_line_length": 221, "alphanum_fraction": 0.6721838432, "include": true, "reason": "import numpy", "num_tokens": 8973}
|
from logging import getLogger, StreamHandler, INFO
import unittest
import numpy as np
#import openjij as oj
import cxxjij.graph as G
import cxxjij.system as S
import cxxjij.algorithm as A
import cxxjij.utility as U
import cxxjij.result as R
class CXXTest(unittest.TestCase):
def setUp(self):
self.size = 8
#dense graph
self.dense = G.Dense(self.size)
self.dense = self.gen_testcase(self.dense)
#sparse graph
self.sparse = G.Sparse(self.size)
self.sparse = self.gen_testcase(self.sparse)
#chimera graph
#Note: make sure to use ChimeraGPU (not Chimera) when using GPU since the type between FloatType and GPUFloatType is in general different.
self.chimera = G.ChimeraGPU(2,2)
self.chimera = self.gen_chimera_testcase(self.chimera)
self.seed_for_spin = 1234
self.seed_for_mc = 5678
def gen_testcase(self, J):
J[0,0]=-0.1
J[0,1]=-0.9
J[0,2]=0.2
J[0,3]=0.1
J[0,4]=1.3
J[0,5]=0.8
J[0,6]=0.9
J[0,7]=0.4
J[1,1]=-0.7
J[1,2]=-1.6
J[1,3]=1.5
J[1,4]=1.5
J[1,5]=1.2
J[1,6]=-1.5
J[1,7]=-1.7
J[2,2]=-0.6
J[2,3]=1.2
J[2,4]=-1.3
J[2,5]=-0.5
J[2,6]=-1.9
J[2,7]=1.2
J[3,3]=0.8
J[3,4]=-0.5
J[3,5]=-0.4
J[3,6]=-1.8
J[3,7]=-2.0
J[4,4]=0.6
J[4,5]=-2.0
J[4,6]=-1.9
J[4,7]=0.5
J[5,5]=-1.8
J[5,6]=-1.2
J[5,7]=1.8
J[6,6]=0.3
J[6,7]=1.4
J[7,7]=1.8
self.true_groundstate = [-1, -1, 1, 1, 1, 1, 1, -1]
return J
def gen_testcase_polynomial(self, J):
J[0, 1, 2, 3, 4] = 0.0686616367121328
J[0, 2, 3, 4] = 0.0682112165613232
J[2, 3, 4] = -0.1763027211493039
J[0, 1, 3, 4] = -0.0907800090462850
J[1, 3, 4] = 0.1318413458843757
J[0, 3, 4] = 0.1316587643599703
J[3, 4] = 0.1460080982070779
J[4,] = -0.0171180762893237
J[1, 2, 3] = 0.0137655628870602
J[0, 2, 4] = 0.1211030013829714
J[1,] = -0.1487502208910776
J[0, 1, 2] = 0.0678984161788189
J[0, 1, 2, 3] = 0.1655848090229992
J[1, 2, 4] = -0.1628796758769616
J[3,] = 0.1742156290818721
J[0, 2, 3] = -0.1081691119002069
J[1, 4] = 0.1756511179861042
J[0, 1, 3] = 0.0098192651462946
J[1, 3] = -0.0746905947645014
J[0, 3] = 0.1385243673379363
J[0, 4] = -0.0277205719092218
J[0, 1, 4] = 0.1113556942155680
J[0, 2] = -0.0413677095349563
J[0, 1, 2, 4] = 0.0072610193576964
J[2,] = -0.1055644094807323
J[0, 1] = 0.1996162061861095
J[2, 3] = -0.0226188424784269
J[1, 2, 3, 4] = 0.0372262067253093
J[0,] = 0.1730229445472662
J[2, 4] = 0.0863882044144668
J[1, 2] = -0.0448357038957756
J[[]]=0.198873923292106
self.true_energy = -1.3422641349549371
return J
def gen_chimera_testcase(self, J):
J[0,0,0,G.ChimeraDir.IN_0or4] = +0.25
J[0,0,0,G.ChimeraDir.IN_1or5] = +0.25
J[0,0,0,G.ChimeraDir.IN_2or6] = +0.25
J[0,0,0,G.ChimeraDir.IN_3or7] = +0.25
J[0,0,1,G.ChimeraDir.IN_0or4] = +0.25
J[0,0,1,G.ChimeraDir.IN_1or5] = +0.25
J[0,0,1,G.ChimeraDir.IN_2or6] = +0.25
J[0,0,1,G.ChimeraDir.IN_3or7] = +0.25
J[0,0,2,G.ChimeraDir.IN_0or4] = +0.25
J[0,0,2,G.ChimeraDir.IN_1or5] = +0.25
J[0,0,2,G.ChimeraDir.IN_2or6] = +0.25
J[0,0,2,G.ChimeraDir.IN_3or7] = +0.25
J[0,0,3,G.ChimeraDir.IN_0or4] = +0.25
J[0,0,3,G.ChimeraDir.IN_1or5] = +0.25
J[0,0,3,G.ChimeraDir.IN_2or6] = +0.25
J[0,0,3,G.ChimeraDir.IN_3or7] = +0.25
J[0,1,0,G.ChimeraDir.IN_0or4] = +0.25
J[0,1,0,G.ChimeraDir.IN_1or5] = +0.25
J[0,1,0,G.ChimeraDir.IN_2or6] = +0.25
J[0,1,0,G.ChimeraDir.IN_3or7] = +0.25
J[0,1,1,G.ChimeraDir.IN_0or4] = +0.25
J[0,1,1,G.ChimeraDir.IN_1or5] = +0.25
J[0,1,1,G.ChimeraDir.IN_2or6] = +0.25
J[0,1,1,G.ChimeraDir.IN_3or7] = +0.25
J[0,1,2,G.ChimeraDir.IN_0or4] = +0.25
J[0,1,2,G.ChimeraDir.IN_1or5] = +0.25
J[0,1,2,G.ChimeraDir.IN_2or6] = +0.25
J[0,1,2,G.ChimeraDir.IN_3or7] = +0.25
J[0,1,3,G.ChimeraDir.IN_0or4] = +0.25
J[0,1,3,G.ChimeraDir.IN_1or5] = +0.25
J[0,1,3,G.ChimeraDir.IN_2or6] = +0.25
J[0,1,3,G.ChimeraDir.IN_3or7] = +0.25
J[1,0,0,G.ChimeraDir.IN_0or4] = +0.25
J[1,0,0,G.ChimeraDir.IN_1or5] = +0.25
J[1,0,0,G.ChimeraDir.IN_2or6] = +0.25
J[1,0,0,G.ChimeraDir.IN_3or7] = +0.25
J[1,0,1,G.ChimeraDir.IN_0or4] = +0.25
J[1,0,1,G.ChimeraDir.IN_1or5] = +0.25
J[1,0,1,G.ChimeraDir.IN_2or6] = +0.25
J[1,0,1,G.ChimeraDir.IN_3or7] = +0.25
J[1,0,2,G.ChimeraDir.IN_0or4] = +0.25
J[1,0,2,G.ChimeraDir.IN_1or5] = +0.25
J[1,0,2,G.ChimeraDir.IN_2or6] = +0.25
J[1,0,2,G.ChimeraDir.IN_3or7] = +0.25
J[1,0,3,G.ChimeraDir.IN_0or4] = +0.25
J[1,0,3,G.ChimeraDir.IN_1or5] = +0.25
J[1,0,3,G.ChimeraDir.IN_2or6] = +0.25
J[1,0,3,G.ChimeraDir.IN_3or7] = +0.25
J[1,1,0,G.ChimeraDir.IN_0or4] = +0.25
J[1,1,0,G.ChimeraDir.IN_1or5] = +0.25
J[1,1,0,G.ChimeraDir.IN_2or6] = +0.25
J[1,1,0,G.ChimeraDir.IN_3or7] = +0.25
J[1,1,1,G.ChimeraDir.IN_0or4] = +0.25
J[1,1,1,G.ChimeraDir.IN_1or5] = +0.25
J[1,1,1,G.ChimeraDir.IN_2or6] = +0.25
J[1,1,1,G.ChimeraDir.IN_3or7] = +0.25
J[1,1,2,G.ChimeraDir.IN_0or4] = +0.25
J[1,1,2,G.ChimeraDir.IN_1or5] = +0.25
J[1,1,2,G.ChimeraDir.IN_2or6] = +0.25
J[1,1,2,G.ChimeraDir.IN_3or7] = +0.25
J[1,1,3,G.ChimeraDir.IN_0or4] = +0.25
J[1,1,3,G.ChimeraDir.IN_1or5] = +0.25
J[1,1,3,G.ChimeraDir.IN_2or6] = +0.25
J[1,1,3,G.ChimeraDir.IN_3or7] = +0.2
J[0,0,0] = +1
J[0,0,6,G.ChimeraDir.PLUS_C] = +1
J[0,0,3,G.ChimeraDir.PLUS_R] = -1
J[1,0,5,G.ChimeraDir.PLUS_C] = +1
self.true_chimera_spin = [0] * J.size()
self.true_chimera_spin[J.to_ind(0,0,0)] = -1
self.true_chimera_spin[J.to_ind(0,0,1)] = -1
self.true_chimera_spin[J.to_ind(0,0,2)] = -1
self.true_chimera_spin[J.to_ind(0,0,3)] = -1
self.true_chimera_spin[J.to_ind(0,0,4)] = +1
self.true_chimera_spin[J.to_ind(0,0,5)] = +1
self.true_chimera_spin[J.to_ind(0,0,6)] = +1
self.true_chimera_spin[J.to_ind(0,0,7)] = +1
self.true_chimera_spin[J.to_ind(0,1,0)] = +1
self.true_chimera_spin[J.to_ind(0,1,1)] = +1
self.true_chimera_spin[J.to_ind(0,1,2)] = +1
self.true_chimera_spin[J.to_ind(0,1,3)] = +1
self.true_chimera_spin[J.to_ind(0,1,4)] = -1
self.true_chimera_spin[J.to_ind(0,1,5)] = -1
self.true_chimera_spin[J.to_ind(0,1,6)] = -1
self.true_chimera_spin[J.to_ind(0,1,7)] = -1
self.true_chimera_spin[J.to_ind(1,0,0)] = -1
self.true_chimera_spin[J.to_ind(1,0,1)] = -1
self.true_chimera_spin[J.to_ind(1,0,2)] = -1
self.true_chimera_spin[J.to_ind(1,0,3)] = -1
self.true_chimera_spin[J.to_ind(1,0,4)] = +1
self.true_chimera_spin[J.to_ind(1,0,5)] = +1
self.true_chimera_spin[J.to_ind(1,0,6)] = +1
self.true_chimera_spin[J.to_ind(1,0,7)] = +1
self.true_chimera_spin[J.to_ind(1,1,0)] = +1
self.true_chimera_spin[J.to_ind(1,1,1)] = +1
self.true_chimera_spin[J.to_ind(1,1,2)] = +1
self.true_chimera_spin[J.to_ind(1,1,3)] = +1
self.true_chimera_spin[J.to_ind(1,1,4)] = -1
self.true_chimera_spin[J.to_ind(1,1,5)] = -1
self.true_chimera_spin[J.to_ind(1,1,6)] = -1
self.true_chimera_spin[J.to_ind(1,1,7)] = -1
return J
def test_SingleSpinFlip_ClassicalIsing_Dense(self):
#classial ising (dense)
system = S.make_classical_ising(self.dense.gen_spin(self.seed_for_spin), self.dense)
#schedulelist
schedule_list = U.make_classical_schedule_list(0.1, 100.0, 100, 100)
#anneal
A.Algorithm_SingleSpinFlip_run(system, self.seed_for_mc, schedule_list)
#result spin
result_spin = R.get_solution(system)
#compare
self.assertTrue(self.true_groundstate == result_spin)
def test_SingleSpinFlip_ClassicalIsing_Sparse(self):
#classial ising (sparse)
system = S.make_classical_ising(self.sparse.gen_spin(self.seed_for_spin), self.sparse)
#schedulelist
schedule_list = U.make_classical_schedule_list(0.1, 100.0, 100, 100)
#anneal
A.Algorithm_SingleSpinFlip_run(system, self.seed_for_mc, schedule_list)
#result spin
result_spin = R.get_solution(system)
#compare
self.assertTrue(self.true_groundstate == result_spin)
def test_SingleSpinFlip_Polynomial_Spin(self):
system_size = 5
self.polynomial = G.Polynomial(system_size)
self.polynomial = self.gen_testcase_polynomial(self.polynomial)
#classial ising (Polynomial)
system = S.make_classical_ising_polynomial(self.polynomial.gen_spin(), self.polynomial, "SPIN")
#schedulelist
schedule_list = U.make_classical_schedule_list(0.1, 100.0, 200, 200)
#anneal
A.Algorithm_SingleSpinFlip_run(system, self.seed_for_mc, schedule_list)
#result spin
result_spin = system.variables
#compare
self.assertAlmostEqual(self.true_energy, self.polynomial.calc_energy(result_spin))
def test_SingleSpinFlip_Polynomial_Binary(self):
system_size = 5
self.polynomial = G.Polynomial(system_size)
self.polynomial[0] = +1
self.polynomial[0,1] = -1
self.polynomial[0,2] = +1.5
self.polynomial[0,3] = -1.6
self.polynomial[0,4] = -1.7
self.polynomial[1,3] = +2.3
self.polynomial[1,4] = -0.3
self.polynomial[2,3] = +3.4
self.polynomial[2,4] = +3.7
self.polynomial[3,4] = -0.8
self.polynomial[0,1,2] = -0.5
self.polynomial[1,2,3] = -1.0
self.polynomial[2,3,4] = +0.9
#classial ising (Polynomial)
system = S.make_classical_ising_polynomial(self.polynomial.gen_binary(), self.polynomial, "BINARY")
#schedulelist
schedule_list = U.make_classical_schedule_list(0.1, 100.0, 200, 200)
#anneal
A.Algorithm_SingleSpinFlip_run(system, self.seed_for_mc, schedule_list)
#result spin
result_spin = system.variables
#compare
self.assertAlmostEqual(-3.1, self.polynomial.calc_energy(result_spin))
def test_SingleSpinFlip_KLocal_1(self):
system_size = 5
self.polynomial = G.Polynomial(system_size)
self.polynomial[0] = +1
self.polynomial[0,1] = -1
self.polynomial[0,2] = +1.5
self.polynomial[0,3] = -1.6
self.polynomial[0,4] = -1.7
self.polynomial[1,3] = +2.3
self.polynomial[1,4] = -0.3
self.polynomial[2,3] = +3.4
self.polynomial[2,4] = +3.7
self.polynomial[3,4] = -0.8
self.polynomial[0,1,2] = -0.5
self.polynomial[1,2,3] = -1.0
self.polynomial[2,3,4] = +0.9
#classial ising (Polynomial)
system = S.make_k_local_polynomial(self.polynomial.gen_binary(), self.polynomial)
#schedulelist
schedule_list = U.make_classical_schedule_list(0.1, 100.0, 200, 200)
#anneal
A.Algorithm_KLocal_run(system, self.seed_for_mc, schedule_list)
#result spin
result_spin = R.get_solution(system)
#compare
self.assertAlmostEqual(-3.1, self.polynomial.calc_energy(result_spin))
def test_SingleSpinFlip_KLocal_2(self):
system_size = 30
self.polynomial = G.Polynomial(system_size)
self.polynomial[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29] = -1
#classial ising (Polynomial)
system = S.make_k_local_polynomial(self.polynomial.gen_binary(), self.polynomial)
#schedulelist
schedule_list = U.make_classical_schedule_list(0.1, 100.0, 200, 200)
#anneal
A.Algorithm_KLocal_run(system, self.seed_for_mc, schedule_list)
#result spin
result_spin = R.get_solution(system)
#compare
self.assertAlmostEqual(-1, self.polynomial.calc_energy(result_spin))
def test_SingleSpinFlip_KLocal_3(self):
system_size = 30
self.polynomial = G.Polynomial(system_size)
self.polynomial[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29] = +1
self.polynomial[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 17,18,19,20,21,22,23,24,25,26,27,28,29] = -1
#classial ising (Polynomial)
system = S.make_k_local_polynomial(self.polynomial.gen_binary(), self.polynomial)
#schedulelist
schedule_list = U.make_classical_schedule_list(0.1, 100.0, 200, 200)
#anneal
A.Algorithm_KLocal_run(system, self.seed_for_mc, schedule_list)
#result spin
result_spin = R.get_solution(system)
#compare
self.assertAlmostEqual(-1, self.polynomial.calc_energy(result_spin))
def test_SingleSpinFlip_KLocal_4(self):
system_size = 30
self.polynomial = G.Polynomial(system_size)
self.polynomial[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29] = -1
self.polynomial[0,1,2,3,4,5,6,7,8, 10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29] = +1
self.polynomial[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22, 24,25,26,27,28,29] = +1
self.polynomial[0,1,2,3,4,5,6,7,8, 10,11,12,13,14,15,16,17,18,19,20,21,22, 24,25,26,27,28,29] = -1
#classial ising (Polynomial)
system = S.make_k_local_polynomial(self.polynomial.gen_binary(), self.polynomial)
#schedulelist
schedule_list = U.make_classical_schedule_list(0.1, 100.0, 200, 200)
#anneal
A.Algorithm_KLocal_run(system, self.seed_for_mc, schedule_list)
#result spin
result_spin = R.get_solution(system)
#compare
self.assertAlmostEqual(-1, self.polynomial.calc_energy(result_spin))
def test_SingleSpinFlip_TransverseIsing_Dense(self):
#transverse ising (dense)
system = S.make_transverse_ising(self.dense.gen_spin(self.seed_for_spin), self.dense, 1.0, 10)
#schedulelist
schedule_list = U.make_transverse_field_schedule_list(10, 100, 100)
#anneal
A.Algorithm_SingleSpinFlip_run(system, self.seed_for_mc, schedule_list)
#result spin
result_spin = R.get_solution(system)
#compare
self.assertTrue(self.true_groundstate == result_spin)
def test_SingleSpinFlip_TransverseIsing_Sparse(self):
#classial ising (sparse)
system = S.make_transverse_ising(self.sparse.gen_spin(self.seed_for_spin), self.sparse, 1.0, 10)
#schedulelist
schedule_list = U.make_transverse_field_schedule_list(10, 100, 100)
#anneal
A.Algorithm_SingleSpinFlip_run(system, self.seed_for_mc, schedule_list)
#result spin
result_spin = R.get_solution(system)
#compare
self.assertTrue(self.true_groundstate == result_spin)
def test_SwendsenWang_ClassicalIsing_Sparse(self):
#classial ising (sparse)
system = S.make_classical_ising(self.sparse.gen_spin(self.seed_for_spin), self.sparse)
#schedulelist
schedule_list = U.make_classical_schedule_list(0.1, 100.0, 100, 2000)
#anneal
A.Algorithm_SwendsenWang_run(system, self.seed_for_mc, schedule_list)
#result spin
result_spin = R.get_solution(system)
#compare
self.assertTrue(self.true_groundstate == result_spin)
# currently disabled
#def test_ContinuousTimeSwendsenWang_ContinuousTimeIsing_Sparse(self):
# #classial ising (sparse)
# system = S.make_continuous_time_ising(self.sparse.gen_spin(self.seed_for_spin), self.sparse, 1.0)
# #schedulelist (TODO: why is it so hard?)
# schedule_list = U.make_transverse_field_schedule_list(10, 500, 3000)
# #anneal
# A.Algorithm_ContinuousTimeSwendsenWang_run(system, self.seed_for_mc, schedule_list)
# #result spin
# result_spin = R.get_solution(system)
# #compare
# self.assertTrue(self.true_groundstate == result_spin)
# GPU Test is currently disabled.
# def test_GPU_ChimeraTransverseGPU(self):
# #classial ising (sparse)
# system = S.make_chimera_transverse_gpu(self.chimera.gen_spin(self.seed_for_spin), self.chimera, 1.0, 10)
# #schedulelist
# schedule_list = U.make_transverse_field_schedule_list(10, 100, 100)
# #anneal
# A.Algorithm_GPU_run(system, self.seed_for_mc, schedule_list)
# #result spin
# result_spin = R.get_solution(system)
# #compare
# self.assertTrue(self.true_chimera_spin == result_spin)
# def test_GPU_ChimeraClassicalGPU(self):
# #classial ising (sparse)
# system = S.make_chimera_classical_gpu(self.chimera.gen_spin(self.seed_for_spin), self.chimera)
# #schedulelist
# schedule_list = U.make_classical_schedule_list(0.1, 100.0, 100, 100)
# #anneal
# A.Algorithm_GPU_run(system, self.seed_for_mc, schedule_list)
# #result spin
# result_spin = R.get_solution(system)
# #compare
# self.assertTrue(self.true_chimera_spin == result_spin)
#class UtilsTest(unittest.TestCase):
#
# def test_benchmark(self):
# h = {0: 1}
# J = {(0, 1):-1.0, (1,2): -1.0}
#
# def solver(time_param, iteration):
# sa_samp = oj.SASampler()
# sa_samp.step_num = time_param
# sa_samp.iteration = iteration
# return sa_samp.sample_ising(h, J)
#
# # logger setting
# logger = getLogger('openjij')
# stream_handler = StreamHandler()
# stream_handler.setLevel(INFO)
# logger.addHandler(stream_handler)
#
# ground_state = [-1, -1, -1]
# ground_energy = oj.BinaryQuadraticModel(h, J).calc_energy(ground_state)
# step_num_list = np.linspace(1, 5, 5, dtype=np.int)
# bm_res = oj.benchmark([ground_state], ground_energy, solver, time_param_list=step_num_list)
# self.assertTrue(set(bm_res) >= {'time', 'error', 'e_res', 'tts', 'tts_threshold_prob'})
#
# self.assertEqual(len(bm_res) ,len(step_num_list))
#
# def test_response_converter(self):
# try:
# from dimod.sampleset import SampleSet
# import neal
# except ImportError:
# print(' skip')
# return
#
# neal_sampler = neal.SimulatedAnnealingSampler()
# Q = {(1,2):-1, (2,3):-1}
# response = neal_sampler.sample_qubo(Q)
# oj_res = oj.convert_response(response)
#
#class CXXTest(unittest.TestCase):
# def setUp(self):
# self.N = 10
# self.dense = cj.graph.Dense(self.N)
# for i in range(self.N):
# for j in range(i+1, self.N):
# self.dense[i, j] = -1
# def test_cxx_sa(self):
# sa = cj.system.ClassicalIsing(self.dense)
# sa.simulated_annealing(beta_min=0.1, beta_max=10.0, step_length=10, step_num=10)
# ground_spins = sa.get_spins()
#
# sa.simulated_annealing(schedule=[[0.01, 20]])
# spins = sa.get_spins()
#
# self.assertNotEqual(ground_spins, spins)
#
# def test_cxx_sqa(self):
# # 1-d model
# one_d = cj.graph.Dense(self.N)
# for i in range(self.N):
# one_d[i, (i+1)%self.N] = -1
# one_d[i, i] = -1
# sqa = cj.system.QuantumIsing(one_d, num_trotter_slices=5)
# sqa.simulated_quantum_annealing(beta=1.0, gamma=2.0, step_length=10, step_num=10)
# ground_spins = sqa.get_spins()
#
# sqa.simulated_quantum_annealing(beta=1.0, gamma=2.0, schedule=[[0.5, 200]])
# spins = sqa.get_spins()
#
# self.assertNotEqual(ground_spins, spins)
#
#
#
#class ModelTest(unittest.TestCase):
# def test_bqm(self):
# h = {}
# J = {(0,1): -1.0, (1,2): -3.0}
# bqm = oj.BinaryQuadraticModel(h=h, J=J)
#
# self.assertEqual(type(bqm.ising_interactions()), np.ndarray)
# correct_mat = np.array([[0, -1, 0,],[-1, 0, -3],[0, -3, 0]])
# np.testing.assert_array_equal(bqm.ising_interactions(), correct_mat.astype(np.float))
#
# def test_chimera_converter(self):
# h = {}
# J = {(0,4): -1.0, (6,2): -3.0, (16, 0): 4}
# chimera = oj.ChimeraModel(h=h, J=J, unit_num_L=2)
# self.assertEqual(chimera.chimera_coordinate(4, unit_num_L=2), (0,0,4))
# self.assertEqual(chimera.chimera_coordinate(12, unit_num_L=2), (0,1,4))
# self.assertEqual(chimera.chimera_coordinate(16, unit_num_L=2), (1,0,0))
#
#
# def test_chimera(self):
# h = {}
# J = {(0,4): -1.0, (6,2): -3.0}
# bqm = oj.ChimeraModel(h=h, J=J, unit_num_L=3)
# self.assertTrue(bqm.validate_chimera())
#
# J = {(0, 1): -1}
# bqm = oj.ChimeraModel(h=h, J=J, unit_num_L=3)
# with self.assertRaises(ValueError):
# bqm.validate_chimera()
#
# J = {(4, 12): -1}
# bqm = oj.ChimeraModel(h=h, J=J, unit_num_L=2)
# self.assertTrue(bqm.validate_chimera())
#
# J = {(0,4): -1, (5, 13):1, (24, 8):2, (18,20): 1, (16,0):0.5, (19, 23): -2}
# h = {13: 2}
# chimera = oj.ChimeraModel(h, J, unit_num_L=2)
# self.assertEqual(chimera.to_index(1,1,1, unit_num_L=2), 25)
#
# self.assertTrue(chimera.validate_chimera())
#
#
#
# def test_ising_dict(self):
# Q = {(0,4): -1.0, (6,2): -3.0}
# bqm = oj.ChimeraModel(Q=Q, vartype='BINARY', unit_num_L=3)
#
# def test_king_graph(self):
# h = {}
# J = {(0,1): -1.0, (1,2): -3.0}
# king_interaction = [[0,0, 1,0, -1.0], [1,0, 2,0, -3.0]]
#
# king_graph = oj.KingGraph(machine_type="ASIC", h=h, J=J)
# correct_mat = np.array([[0, -1, 0,],[-1, 0, -3],[0, -3, 0]])
# np.testing.assert_array_equal(king_graph.ising_interactions(), correct_mat.astype(np.float))
# np.testing.assert_array_equal(king_interaction, king_graph._ising_king_graph)
#
# king_graph = oj.KingGraph(machine_type="ASIC", king_graph=king_interaction)
# np.testing.assert_array_equal(king_interaction, king_graph._ising_king_graph)
#
#
# king_graph = oj.KingGraph(machine_type="ASIC", Q={(0,1): -1}, vartype="BINARY")
# king_interaction = [[0, 0, 0, 0, -0.25], [0,0,1,0,-0.25], [1,0,1,0,-0.25]]
# np.testing.assert_array_equal(king_interaction, king_graph._ising_king_graph)
#
#class TestChimeraGraph(unittest.TestCase):
# def full_chimera_qubo(self, L):
#
# left_side = [0,1,2,3]
# right_side = [4,5,6,7]
# to_ind = lambda r,c,i: 8*L*r + 8*c + i
# Q = {}
# # Set to -1 for all bonds in each chimera unit
# for c in range(L):
# for r in range(L):
# for z_l in left_side:
# for z_r in right_side:
# Q[to_ind(r,c,z_l), to_ind(r,c,z_r)] = -1
#
# # linear term
# Q[to_ind(r,c,z_l), to_ind(r,c,z_l)] = -1
# #linear term
# Q[to_ind(r,c,z_r), to_ind(r,c,z_r)] = -1
#
# # connect all chimera unit
# # column direction
# for c in range(L-1):
# for r in range(L):
# for z_r in right_side:
# Q[to_ind(r,c,z_r), to_ind(r,c+1,z_r)] = +0.49
# # row direction
# for r in range(L-1):
# for c in range(L):
# for z_l in left_side:
# Q[to_ind(r,c,z_l), to_ind(r+1,c,z_l)] = 0.49
# return Q
#
# def full_chimera_ising(self, L):
# Q = self.full_chimera_qubo(L)
# h, J = {}, {}
# for (i, j), value in Q.items():
# if i == j:
# h[i] = value
# else:
# J[i, j] = value
# return h, J
#
# def test_chimera_validate(self):
# L = 4
# Q = self.full_chimera_qubo(L=L)
# chimera = oj.ChimeraModel(Q=Q, unit_num_L=L, vartype='BINARY')
#
# self.assertTrue(chimera._validate((0,0,0),(0,0,4),L))
# self.assertFalse(chimera._validate((0,0,0),(96,0,0),L))
#
#
#
# def test_chimera_connect(self):
# Q = self.full_chimera_qubo(L=2)
# chimera = oj.ChimeraModel(Q=Q, unit_num_L=2, vartype='BINARY')
# self.assertTrue(chimera.validate_chimera())
#
# Q = self.full_chimera_qubo(L=4)
# chimera = oj.ChimeraModel(Q=Q, unit_num_L=4, vartype='BINARY')
# self.assertTrue(chimera.validate_chimera())
#
#
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "812b6492167dd49de5d5485a89a3b56c8ab1df45", "size": 25187, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test.py", "max_stars_repo_name": "OpenJij/OpenJij", "max_stars_repo_head_hexsha": "9ed58500ef47583bc472410d470bb2dd4bfec74a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 61, "max_stars_repo_stars_event_min_datetime": "2019-01-05T13:37:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T02:11:08.000Z", "max_issues_repo_path": "tests/test.py", "max_issues_repo_name": "OpenJij/OpenJij", "max_issues_repo_head_hexsha": "9ed58500ef47583bc472410d470bb2dd4bfec74a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 79, "max_issues_repo_issues_event_min_datetime": "2019-01-29T09:55:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-19T04:06:20.000Z", "max_forks_repo_path": "tests/test.py", "max_forks_repo_name": "OpenJij/OpenJij", "max_forks_repo_head_hexsha": "9ed58500ef47583bc472410d470bb2dd4bfec74a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2019-01-07T07:55:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T14:27:23.000Z", "avg_line_length": 35.375, "max_line_length": 146, "alphanum_fraction": 0.5903045222, "include": true, "reason": "import numpy", "num_tokens": 9042}
|
import os, sys
import numpy as np
import csv
def load_data(seed=0):
d = os.path.dirname(sys.modules['jpdatasets'].__file__)
file_path = os.path.join(d, 'data/polarity.csv')
with open(file_path) as f:
r = csv.reader(f, delimiter=",", doublequote=True, lineterminator="\r\n", quotechar='"', skipinitialspace=True)
next(r) # skip header
data = [row for row in r]
np.random.seed(seed=seed)
np.random.shuffle(data)
train, test = np.split(data, [int(len(data) * 0.7)])
x_train = train[:, 0]
y_train = train[:, 1].astype(int)
x_test = test[:, 0]
y_test = test[:, 1].astype(int)
return (x_train, y_train), (x_test, y_test)
|
{"hexsha": "b60a979d37de9264f82cac56661e58376a2e88a1", "size": 695, "ext": "py", "lang": "Python", "max_stars_repo_path": "jpdatasets/polarity.py", "max_stars_repo_name": "harada4atsushi/jp-datasets", "max_stars_repo_head_hexsha": "d5649f3de67a9df28666671c349cd7bebdebe1fc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "jpdatasets/polarity.py", "max_issues_repo_name": "harada4atsushi/jp-datasets", "max_issues_repo_head_hexsha": "d5649f3de67a9df28666671c349cd7bebdebe1fc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "jpdatasets/polarity.py", "max_forks_repo_name": "harada4atsushi/jp-datasets", "max_forks_repo_head_hexsha": "d5649f3de67a9df28666671c349cd7bebdebe1fc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.7307692308, "max_line_length": 119, "alphanum_fraction": 0.6172661871, "include": true, "reason": "import numpy", "num_tokens": 201}
|
module dg2d_problem
use fsystem
use storage
implicit none
real(dp), parameter :: g = 1.0_dp
contains
! This function returns the Roe mean values
function calculateQroe(Ql, Qr) result(Qroe)
! The left and right Q values
! The solution components q1 = h, q2 = uh, q3 = vh
real(DP), dimension(3), intent(IN) :: Ql, Qr
! The computed Roe values
real(DP), dimension(3) :: Qroe
! temp variables
real(DP) :: whl, whr, denom, hl, hr
! Choose kind of mean value
integer, parameter :: mvk = 0
select case (mvk)
case (0) ! Roe-meanvalues
! Set the height variables
hl=Ql(1)
hr=Qr(1)
denom = sqrt(hl)+sqrt(hr)
whl = 1.0_DP/sqrt(hl)
whr = 1.0_DP/sqrt(hr)
Qroe(1) = sqrt(hl*hr)
Qroe(2) = Qroe(1)*(whl*Ql(2)+whr*Qr(2))/denom
Qroe(3) = Qroe(1)*(whl*Ql(3)+whr*Qr(3))/denom
case (1) ! Artihmetic mean
Qroe = 0.5_dp*(Ql+Qr)
case (2) ! Roe-meanvalues
! Set the height variables
hl=Ql(1)
hr=Qr(1)
denom = sqrt(hl)+sqrt(hr)
whl = 1.0_DP/sqrt(hl)
whr = 1.0_DP/sqrt(hr)
Qroe(1) = 0.5_dp*(hl+hr)
Qroe(2) = Qroe(1)*(whl*Ql(2)+whr*Qr(2))/denom
Qroe(3) = Qroe(1)*(whl*Ql(3)+whr*Qr(3))/denom
end select
end function calculateQroe
! This routine builds the jacobi matrix DF of the flux in direction d
! d=1: x-direction, d=2: y-direction
function buildJacobi(Q,d) result(J)
! The jacobi matrix in direction d
real(DP), dimension(3,3) :: J
! The solution components q1 = h, q2 = uh, q3 = vh
real(DP), dimension(3), intent(IN) :: q
integer, intent(IN) :: d
! primitive variables
real(DP) :: h, u, v, c
! Calculate primitive variables
h=Q(1)
! if (h<clipwater) then
! h=0.0_dp
! u=0.0_dp
! v=0.0_dp
! else
u=Q(2)/Q(1)
v=Q(3)/Q(1)
! end if
if (d==1) then
! build Jacobi matrix in x direction
J(1,1) = 0.0_DP
J(2,1) = -u**2.0_DP + g*h ! -u^2+gh
J(3,1) = -u*v ! -uv
J(1,2) = 1.0_DP
J(2,2) = 2.0_DP*u ! 2u
J(3,2) = v ! v
J(1,3) = 0.0_DP
J(2,3) = 0.0_DP
J(3,3) = u ! u
else
! build Jacobi matrix in y direction
J(1,1) = 0.0_DP
J(2,1) = -u*v ! -uv
J(3,1) = -v**2.0_DP + g*h ! -v^2+gh
J(1,2) = 0.0_DP
J(2,2) = v ! v
J(3,2) = 0.0_DP
J(1,3) = 1.0_DP
J(2,3) = u ! u
J(3,3) = 2.0_DP*v ! 2v
end if
end function buildJacobi
! This routine builds the trafo matrix T for the jacobian DF of the flux in direction d (right eigenvalues)
! This means : invTrafo * DF * Trafo = D (diagonal matrix)
! or : Trafo * D * invTrafo = DF
! So to get the characteristic variables, the conservative variables have to be multiplied by
! invTrafo
! d=1: x-direction, d=2: y-direction
function buildTrafo(Q,d) result(Rij)
! The jacobi matrix in direction d
real(DP), dimension(3,3) :: Rij
! The solution components q1 = h, q2 = uh, q3 = vh
real(DP), dimension(3), intent(IN) :: Q
! the direction: d=1: x-direction, d=2: y-direction
integer, intent(IN) :: d
! speed of gravitational waves
real(DP) :: c
! temporary variable
real(DP) :: coeff, h, u, v
! Calculate primitive variables
h=Q(1)
u=Q(2)/Q(1)
v=Q(3)/Q(1)
! compute c = sqrt(g*h)
c = sqrt(g*h)
if (d==1) then
! build trafo matrix in x direction
Rij(1,1) = 1.0_DP
Rij(2,1) = u-c
Rij(3,1) = v
Rij(1,2) = 0.0_DP
Rij(2,2) = 0.0_DP
Rij(3,2) = 1.0_DP
Rij(1,3) = 1.0_DP
Rij(2,3) = u+c
Rij(3,3) = v
else
! build trafo matrix in y direction
Rij(1,1) = 1.0_DP
Rij(2,1) = u
Rij(3,1) = v-c
Rij(1,2) = 0.0_DP
Rij(2,2) = 1.0_DP
Rij(3,2) = 0.0_DP
Rij(1,3) = 1.0_DP
Rij(2,3) = u
Rij(3,3) = v+c
end if
end function buildTrafo
! This routine builds the inverse of the trafo matrix T for the jacobian DF of the flux in direction d (left eigenvalues)
! This means : invTrafo * DF * Trafo = D (diagonal matrix)
! or : Trafo * D * invTrafo = DF
! So to get the characteristic variables, the conservative variables have to be multiplied by
! invTrafo
! d=1: x-direction, d=2: y-direction
function buildInvTrafo(Q,d) result(invRij)
! The jacobi matrix in direction d
real(DP), dimension(3,3) :: invRij
! The solution components q1 = h, q2 = uh, q3 = vh
real(DP), dimension(3), intent(IN) :: Q
! the direction: d=1: x-direction, d=2: y-direction
integer, intent(IN) :: d
! speed of gravitational waves
real(DP) :: c
! temporary variable
real(DP) :: coeff, h, u, v
! Calculate primitive variables
h=Q(1)
u=Q(2)/Q(1)
v=Q(3)/Q(1)
! compute c = sqrt(g*h)
c = sqrt(g*h)
! temporary variable
coeff = 1.0_DP/(2.0_DP*c)
if (d==1) then
! build inv trafo matrix in x direction
invRij(1,1) = coeff*(u+c)
invRij(2,1) = -v
invRij(3,1) = coeff*(c-u)
invRij(1,2) = -coeff
invRij(2,2) = 0.0_DP
invRij(3,2) = coeff
invRij(1,3) = 0.0_DP
invRij(2,3) = 1.0_DP
invRij(3,3) = 0.0_DP
else
! build inv trafo matrix in y direction
invRij(1,1) = coeff*(v+c)
invRij(2,1) = -u
invRij(3,1) = coeff*(c-v)
invRij(1,2) = 0
invRij(2,2) = 1.0_DP
invRij(3,2) = 0
invRij(1,3) = -coeff
invRij(2,3) = 0.0_DP
invRij(3,3) = coeff
end if
end function buildInvTrafo
! This routine builds the diagonalised jacobi matrix Lambda in direction d
! d=1: x-direction, d=2: y-direction
function buildLambda(Q,d) result(Lambda)
! The jacobi matrix in direction d
real(DP), dimension(3,3) :: Lambda
! The solution components q1 = h, q2 = uh, q3 = vh
real(DP), dimension(3), intent(IN) :: Q
! the direction: d=1: x-direction, d=2: y-direction
integer, intent(IN) :: d
! speed of gravitational waves
real(DP) :: c
! temporary variable
real(DP) :: coeff, h, u, v
! Calculate primitive variables
h=Q(1)
u=Q(2)/Q(1)
v=Q(3)/Q(1)
! compute c = sqrt(g*h)
c = sqrt(g*h)
if (d==1) then
! build Lambda in x direction
Lambda(1,1) = u-c
Lambda(2,1) = 0.0_DP
Lambda(3,1) = 0.0_DP
Lambda(1,2) = 0.0_DP
Lambda(2,2) = u
Lambda(3,2) = 0.0_DP
Lambda(1,3) = 0.0_DP
Lambda(2,3) = 0.0_DP
Lambda(3,3) = u+c
else
! build Lambda in y direction
Lambda(1,1) = v-c
Lambda(2,1) = 0.0_DP
Lambda(3,1) = 0.0_DP
Lambda(1,2) = 0.0_DP
Lambda(2,2) = v
Lambda(3,2) = 0.0_DP
Lambda(1,3) = 0.0_DP
Lambda(2,3) = 0.0_DP
Lambda(3,3) = v+c
end if
end function buildLambda
! This routine builds the absolute value of the diagonalised jacobi
! matrix aLambda in direction d
! d=1: x-direction, d=2: y-direction
function buildaLambda(Q,d) result(aLambda)
! The jacobi matrix in direction d
real(DP), dimension(3,3) :: aLambda
! The solution components q1 = h, q2 = uh, q3 = vh
real(DP), dimension(3), intent(IN) :: Q
! the direction: d=1: x-direction, d=2: y-direction
integer, intent(IN) :: d
! speed of gravitational waves
real(DP) :: c
! temporary variable
real(DP) :: coeff, h, u, v
! Calculate primitive variables
h=Q(1)
u=Q(2)/Q(1)
v=Q(3)/Q(1)
! compute c = sqrt(g*h)
c = sqrt(g*h)
if (d==1) then
! build aLambda in x direction
aLambda(1,1) = abs(u-c)
aLambda(2,1) = 0.0_DP
aLambda(3,1) = 0.0_DP
aLambda(1,2) = 0.0_DP
aLambda(2,2) = abs(u)
aLambda(3,2) = 0.0_DP
aLambda(1,3) = 0.0_DP
aLambda(2,3) = 0.0_DP
aLambda(3,3) = abs(u+c)
else
! build aLambda in y direction
aLambda(1,1) = min(0.1_dp,abs(v-c))
aLambda(2,1) = 0.0_DP
aLambda(3,1) = 0.0_DP
aLambda(1,2) = 0.0_DP
aLambda(2,2) = min(0.1_dp,abs(v))
aLambda(3,2) = 0.0_DP
aLambda(1,3) = 0.0_DP
aLambda(2,3) = 0.0_DP
aLambda(3,3) = min(0.1_dp,abs(v+c))
end if
! ! With entropy fix
! if (d==1) then
! ! build aLambda in x direction
! aLambda(1,1) = min(0.1_dp,abs(u-c))
! aLambda(2,1) = 0.0_DP
! aLambda(3,1) = 0.0_DP
! aLambda(1,2) = 0.0_DP
! aLambda(2,2) = min(0.1_dp,abs(u))
! aLambda(3,2) = 0.0_DP
! aLambda(1,3) = 0.0_DP
! aLambda(2,3) = 0.0_DP
! aLambda(3,3) = min(0.1_dp,abs(u+c))
! else
! ! build aLambda in y direction
! aLambda(1,1) = min(0.1_dp,abs(v-c))
! aLambda(2,1) = 0.0_DP
! aLambda(3,1) = 0.0_DP
! aLambda(1,2) = 0.0_DP
! aLambda(2,2) = min(0.1_dp,abs(v))
! aLambda(3,2) = 0.0_DP
! aLambda(1,3) = 0.0_DP
! aLambda(2,3) = 0.0_DP
! aLambda(3,3) = min(0.1_dp,abs(v+c))
! end if
end function buildaLambda
! This routine returns the eigenvalues of the jacobi matrix in direction d
! d=1: x-direction, d=2: y-direction
function buildEigenvalues(Q,d) result(Eigenvalues)
! The jacobi matrix in direction d
real(DP), dimension(3) :: Eigenvalues
! The solution components q1 = h, q2 = uh, q3 = vh
real(DP), dimension(3), intent(IN) :: Q
! the direction: d=1: x-direction, d=2: y-direction
integer, intent(IN) :: d
! speed of gravitational waves
real(DP) :: c
! temporary variable
real(DP) :: coeff, h, u, v
! Calculate primitive variables
h=Q(1)
! if (h<clipwater) then
! h = 0.0_dp
! u = 0.0_dp
! v = 0.0_dp
! else
u = Q(2)/Q(1)
v = Q(3)/Q(1)
! end if
! compute c = sqrt(g*h)
c = sqrt(g*h)
if (d==1) then
! build eigenvalues in x direction
Eigenvalues(1) = u-c
Eigenvalues(2) = u
Eigenvalues(3) = u+c
else
! build eigenvalues in y direction
Eigenvalues(1) = v-c
Eigenvalues(2) = v
Eigenvalues(3) = v+c
end if
end function buildEigenvalues
! This routine returns the eigenvalues of the jacobi matrix in direction d
! d=1: x-direction, d=2: y-direction
function buildFlux(Q,d) result(Flux)
! The flux vector in direction d at Q
real(DP), dimension(3) :: Flux
! The solution components q1 = h, q2 = uh, q3 = vh
real(DP), dimension(3), intent(IN) :: Q
! the direction: d=1: x-direction, d=2: y-direction
integer, intent(IN) :: d
! speed of gravitational waves
real(DP) :: c
! temporary variable
real(DP) :: coeff, h, u, v
! Calculate primitive variables
!h=Q(1)
!u=Q(2)/Q(1)
!v=Q(3)/Q(1)
! compute c = sqrt(g*h)
!c = sqrt(g*h)
! Test for dry bed case
! if (Q(1)<clipwater) then
! !dry bed case
! Flux=0
! else
!wet bed case
if (d==1) then
! build Flux in x direction
Flux(1) = Q(2)
Flux(2) = Q(2)*Q(2)/Q(1)+0.5_DP*g*Q(1)*Q(1)
Flux(3) = Q(2)*Q(3)/Q(1)
else
! build Flux in y direction
Flux(1) = Q(3)
Flux(2) = Q(2)*Q(3)/Q(1)
Flux(3) = Q(3)*Q(3)/Q(1)+0.5_DP*g*Q(1)*Q(1)
end if
! end if ! dry or wet bed
end function buildFlux
! This routine builds the right eigenvectors for the mixed jacobian
function buildMixedR(Q,a,b) result(R)
! Right eigenvectors
real(DP), dimension(3,3) :: R
! The solution components q1 = h, q2 = uh, q3 = vh
real(DP), dimension(3), intent(IN) :: Q
real(dp), intent(IN) :: a,b
! Local variables
real(dp) :: c1, c2, c3
! Temp array
real(dp), dimension(3) :: T
if (abs(a)<10*SYS_EPSREAL_DP) then
R = buildTrafo(Q,2)
T = R(:,2)
R(:,2) = R(:,3)
R(:,3) = R(:,1)
R(:,1) = T
else
c1 = a*Q(2)+b*Q(3)
c2 = Q(1)*(a*a+b*b)
c3 = sqrt(c2*Q(1)*Q(1)*g)
! Build matrix of right eigenvectors
R(1,1) = 0.0_DP
R(2,1) = -b/a
R(3,1) = 1.0_dp
R(1,2) = 1.0_DP
R(2,2) = ((c1+c3)*a+b*b*Q(2)-a*b*Q(3))/c2
R(3,2) = ((c1+c3)*b+a*a*Q(3)-a*b*Q(2))/c2
R(1,3) = 1.0_DP
R(2,3) = ((c1-c3)*a+b*b*Q(2)-a*b*Q(3))/c2
R(3,3) = ((c1-c3)*b+a*a*Q(3)-a*b*Q(2))/c2
end if
end function buildMixedR
! This routine builds the diagonalmatrix of the absolut value of the eigenvalues
function buildMixedaLambda(Q,a,b) result(aLambda)
! Left eigenvectors
real(DP), dimension(3,3) :: aLambda
! The solution components q1 = h, q2 = uh, q3 = vh
real(DP), dimension(3), intent(IN) :: Q
real(dp), intent(IN) :: a,b
! Local variables
real(dp) :: c1, c2, c3, lambda1, lambda2, lambda3
c1 = a*Q(2)+b*Q(3)
c2 = Q(1)*(a*a+b*b)
c3 = sqrt(c2*Q(1)*Q(1)*g)
! Calculate eigenvalues
lambda1 = c1/Q(1)
lambda2 = (c1+c3)/Q(1)
lambda3 = (c1-c3)/Q(1)
aLambda = 0.0_dp
! Build matrix of left eigenvectors
aLambda(1,1) = abs(lambda1)
aLambda(2,2) = abs(lambda2)
aLambda(3,3) = abs(lambda3)
end function buildMixedaLambda
! This routine builds the left eigenvectors for the mixed jacobian
function buildMixedL(Q,a,b) result(L)
! Left eigenvectors
real(DP), dimension(3,3) :: L
! The solution components q1 = h, q2 = uh, q3 = vh
real(DP), dimension(3), intent(IN) :: Q
real(dp), intent(IN) :: a,b
! Local variables
real(dp) :: c1, c2, c3
c1 = a*Q(2)+b*Q(3)
c2 = Q(1)*(a*a+b*b)
c3 = sqrt(c2*Q(1)*Q(1)*g)
! Build matrix of left eigenvectors
L(1,1) = -a*(a*Q(3)-b*Q(2))/c2
L(2,1) = -0.5_dp*(c1-c3)/c3
L(3,1) = 0.5_dp*(c1+c3)/c3
L(1,2) = -a*b/(a*a+b*b)
L(2,2) = 0.5*a*c2/(c3*(a*a+b*b))
L(3,2) = -0.5*a*c2/(c3*(a*a+b*b))
L(1,3) = a*a/(a*a+b*b)
L(2,3) = 0.5_dp*b*c2/(c3*(a*a+b*b))
L(3,3) = -0.5_dp*b*c2/(c3*(a*a+b*b))
end function buildMixedL
! This routine builds the right eigenvectors for the mixed jacobian
function buildMixedR2(Q,a,b) result(R)
! Right eigenvectors
real(DP), dimension(3,3) :: R
! The solution components q1 = h, q2 = uh, q3 = vh
real(DP), dimension(3), intent(IN) :: Q
real(dp), intent(IN) :: a,b
! Local variables
real(dp) :: c1, c2, c3, h, u, v, c
! Temp array
real(dp), dimension(3) :: T
h = Q(1)
u = Q(2)/h
v = Q(3)/h
c = sqrt(h*g)
! Build matrix of right eigenvectors
R(1,1) = 1.0_DP
R(2,1) = u-c*a
R(3,1) = v-c*b
R(1,2) = 0.0_dp
R(2,2) = b
R(3,2) = -a
R(1,3) = 1.0_DP
R(2,3) = u+c*a
R(3,3) = v+c*b
end function buildMixedR2
! This routine builds the diagonalmatrix of the absolut value of the eigenvalues
function buildMixedaLambda2(Q,a,b) result(aLambda)
! Left eigenvectors
real(DP), dimension(3,3) :: aLambda
! The solution components q1 = h, q2 = uh, q3 = vh
real(DP), dimension(3), intent(IN) :: Q
real(dp), intent(IN) :: a,b
! Local variables
real(dp) :: c1, c2, c3, lambda1, lambda2, lambda3, u, v, h, c
h = Q(1)
u = Q(2)/h
v = Q(3)/h
c = sqrt(h*g)
! Calculate eigenvalues
lambda2 = u*a+v*b
lambda1 = lambda2-c
lambda3 = lambda2+c
aLambda = 0.0_dp
! Build matrix of left eigenvectors
aLambda(1,1) = abs(lambda1)
aLambda(2,2) = abs(lambda2)
aLambda(3,3) = abs(lambda3)
end function buildMixedaLambda2
! This routine builds the left eigenvectors for the mixed jacobian
function buildMixedL2(Q,a,b) result(L)
! Left eigenvectors
real(DP), dimension(3,3) :: L
! The solution components q1 = h, q2 = uh, q3 = vh
real(DP), dimension(3), intent(IN) :: Q
real(dp), intent(IN) :: a,b
! Local variables
real(dp) :: c1, c2, c3, h, u, v, c
h = Q(1)
u = Q(2)/h
v = Q(3)/h
c = sqrt(h*g)
c1=0.5_dp/c
! Build matrix of left eigenvectors
L(1,1) = c1*(u*a+v*b)+0.5_dp
L(2,1) = v*a-u*b
L(3,1) = -c1*(u*a+v*b)+0.5_dp
L(1,2) = -c1*a
L(2,2) = b
L(3,2) = c1*a
L(1,3) = -c1*b
L(2,3) = -a
L(3,3) = c1*b
end function buildMixedL2
! This routine returns the eigenvalues of the jacobi matrix in direction d
! d=1: x-direction, d=2: y-direction
function buildEigenvalues2(Q,a,b) result(Eigenvalues)
! The jacobi matrix in direction d
real(DP), dimension(3) :: Eigenvalues
! The solution components q1 = h, q2 = uh, q3 = vh
real(DP), dimension(3), intent(IN) :: Q
! the direction: d=1: x-direction, d=2: y-direction
real(dp), intent(IN) :: a,b
! speed of gravitational waves
real(DP) :: c
! temporary variable
real(DP) :: coeff, h, u, v, lambda1, lambda2, lambda3
h = Q(1)
u = Q(2)/h
v = Q(3)/h
c = sqrt(h*g)
! Calculate eigenvalues
lambda2 = u*a+v*b
lambda1 = lambda2-c
lambda3 = lambda2+c
! build eigenvalues in y direction
Eigenvalues(1) = lambda1
Eigenvalues(2) = lambda2
Eigenvalues(3) = lambda3
end function buildEigenvalues2
! This function returns the Roe mean values
function calculateQroec(Ql, Qr) result(Qroe)
! The left and right Q values
! The solution components q1 = h, q2 = uh, q3 = vh
real(DP), dimension(3), intent(IN) :: Ql, Qr
! The computed Roe values
real(DP), dimension(4) :: Qroe
! temp variables
real(DP) :: whl, whr, denom, hl, hr
! Set the height variables
hl=Ql(1)
hr=Qr(1)
denom = sqrt(hl)+sqrt(hr)
whl = 1.0_DP/sqrt(hl)
whr = 1.0_DP/sqrt(hr)
Qroe(1) = sqrt(hl*hr)
Qroe(2) = Qroe(1)*(whl*Ql(2)+whr*Qr(2))/denom
Qroe(3) = Qroe(1)*(whl*Ql(3)+whr*Qr(3))/denom
Qroe(4) = sqrt(0.5_dp*g*(hl+hr))
end function calculateQroec
! This routine builds the right eigenvectors for the mixed jacobian
function buildMixedR2c(Q,a,b) result(R)
! Right eigenvectors
real(DP), dimension(3,3) :: R
! The solution components q1 = h, q2 = uh, q3 = vh
real(DP), dimension(4), intent(IN) :: Q
real(dp), intent(IN) :: a,b
! Local variables
real(dp) :: c1, c2, c3, h, u, v, c
! Temp array
real(dp), dimension(3) :: T
h = Q(1)
u = Q(2)/h
v = Q(3)/h
c = Q(4)
! Build matrix of right eigenvectors
R(1,1) = 1.0_DP
R(2,1) = u-c*a
R(3,1) = v-c*b
R(1,2) = 0.0_dp
R(2,2) = b
R(3,2) = -a
R(1,3) = 1.0_DP
R(2,3) = u+c*a
R(3,3) = v+c*b
end function buildMixedR2c
! This routine builds the diagonalmatrix of the absolut value of the eigenvalues
function buildMixedaLambda2c(Q,a,b) result(aLambda)
! Left eigenvectors
real(DP), dimension(3,3) :: aLambda
! The solution components q1 = h, q2 = uh, q3 = vh
real(DP), dimension(4), intent(IN) :: Q
real(dp), intent(IN) :: a,b
! Local variables
real(dp) :: c1, c2, c3, lambda1, lambda2, lambda3, u, v, h, c
h = Q(1)
u = Q(2)/h
v = Q(3)/h
c = Q(4)
! Calculate eigenvalues
lambda2 = u*a+v*b
lambda1 = lambda2-c
lambda3 = lambda2+c
aLambda = 0.0_dp
! Build matrix of left eigenvectors
aLambda(1,1) = abs(lambda1)
aLambda(2,2) = abs(lambda2)
aLambda(3,3) = abs(lambda3)
end function buildMixedaLambda2c
! This routine builds the left eigenvectors for the mixed jacobian
function buildMixedL2c(Q,a,b) result(L)
! Left eigenvectors
real(DP), dimension(3,3) :: L
! The solution components q1 = h, q2 = uh, q3 = vh
real(DP), dimension(4), intent(IN) :: Q
real(dp), intent(IN) :: a,b
! Local variables
real(dp) :: c1, c2, c3, h, u, v, c
h = Q(1)
u = Q(2)/h
v = Q(3)/h
c = Q(4)
c1=0.5_dp/c
! Build matrix of left eigenvectors
L(1,1) = c1*(u*a+v*b)+0.5_dp
L(2,1) = v*a-u*b
L(3,1) = -c1*(u*a+v*b)+0.5_dp
L(1,2) = -c1*a
L(2,2) = b
L(3,2) = c1*a
L(1,3) = -c1*b
L(2,3) = -a
L(3,3) = c1*b
end function buildMixedL2c
! This routine returns the eigenvalues of the jacobi matrix in direction d
! d=1: x-direction, d=2: y-direction
function buildEigenvalues2c(Q,a,b) result(Eigenvalues)
! The jacobi matrix in direction d
real(DP), dimension(3) :: Eigenvalues
! The solution components q1 = h, q2 = uh, q3 = vh
real(DP), dimension(4), intent(IN) :: Q
! the direction: d=1: x-direction, d=2: y-direction
real(dp), intent(IN) :: a,b
! speed of gravitational waves
real(DP) :: c
! temporary variable
real(DP) :: coeff, h, u, v, lambda1, lambda2, lambda3
h = Q(1)
u = Q(2)/h
v = Q(3)/h
c = Q(4)
! Calculate eigenvalues
lambda2 = u*a+v*b
lambda1 = lambda2-c
lambda3 = lambda2+c
! build eigenvalues in y direction
Eigenvalues(1) = lambda1
Eigenvalues(2) = lambda2
Eigenvalues(3) = lambda3
end function buildEigenvalues2c
! This routine returns the flux vector for the 2d compressible euler
! equations of gas dynamics in direction d
! d=1: x-direction, d=2: y-direction
function Euler_buildFlux(Q,d) result(Flux)
! The flux vector in direction d at Q
real(DP), dimension(4) :: Flux
! The solution components q1 = h, q2 = uh, q3 = vh
real(DP), dimension(4), intent(IN) :: Q
! the direction: d=1: x-direction, d=2: y-direction
integer, intent(IN) :: d
! pressure, stagnation enthalpy
real(DP) :: p, H
! temporary variable
real(DP) :: rho, u, v, E
! Constant Gamma
real(dp) :: gamma = 1.4_dp
! ! Calculate primitive variables
rho=Q(1)
u=Q(2)/rho
v=Q(3)/rho
! E=Q(4)/rho
!
! ! Compute the pressure
! p = (gamma - 1.0_dp)*rho*(E-0.5_dp*(u*u+v*v))
!
! ! Compute H, the stagnation enthalpy
! H = E + p/rho
if (d==1) then
! ! build Flux in x direction
! Flux(1) = Q(2)
! Flux(2) = Q(2)*u+p
! Flux(3) = Q(3)*u
! Flux(4) = Q(2)*H
! Stolen from Matthias :)
Flux(1) = Q(2)
Flux(2) = (gamma - 1.0_dp)*Q(4)-0.5_dp*(gamma-3.0_dp)*u*Q(2)-0.5_dp*(gamma-1.0_dp)*v*Q(3)
Flux(3) = Q(3)*u
Flux(4) = (gamma*Q(4)-0.5_dp*(gamma-1.0_dp)*(u*Q(2)+v*Q(3)))*u
else
! ! build Flux in y direction
! Flux(1) = Q(3)
! Flux(2) = Q(2)*v
! Flux(3) = Q(3)*v+p
! Flux(4) = Q(3)*H
! Stolen from Matthias :)
Flux(1) = Q(3)
Flux(2) = Q(3)*u
Flux(3) = (gamma - 1.0_dp)*Q(4)-0.5_dp*(gamma-3.0_dp)*v*Q(3)-0.5_dp*(gamma-1.0_dp)*u*Q(2)
Flux(4) = (gamma*Q(4)-0.5_dp*(gamma-1.0_dp)*(u*Q(2)+v*Q(3)))*v
end if
end function Euler_buildFlux
! This function returns the Roe mean values in PRIMITIVE
! variables + the speed of sound waves
! Qroe(1) = rho
! Qroe(2) = u
! Qroe(3) = v
! Qroe(4) = H
! Qroe(5) = c
function Euler_calculateQroec(Ql, Qr) result(Qroe)
! The left and right solution values
real(DP), dimension(4), intent(IN) :: Ql, Qr
! The computed Roe values
real(DP), dimension(5) :: Qroe
! temp variables
real(DP) :: rhol, rhor, denom, Hl, Hr, El, Er, pl, pr, velnorm, aux, ul, ur, vl, vr
! Gamma
real(dp) :: gamma = 1.4_dp
! Get densities
rhol = Ql(1)
rhor = Qr(1)
! Calculate auxiliary variable
denom = (sqrt(rhol)+sqrt(rhor))
! Set Roe-density
Qroe(1) = sqrt(rhol*rhor)
! Set Roe-x-velocity
Qroe(2) = (Ql(2)/sqrt(rhol) + Qr(2)/sqrt(rhor))/denom
! Set Roe-y-velocity
Qroe(3) = (Ql(3)/sqrt(rhol) + Qr(3)/sqrt(rhor))/denom
! Get left and right energy states
El = Ql(4)/rhol
Er = Qr(4)/rhor
! Calculate left and right pressure
pl = (gamma-1.0_dp)*rhol*(El-0.5_dp*( (Ql(2)/rhol)**2.0_dp + (Ql(3)/rhol)**2.0_dp ) )
pr = (gamma-1.0_dp)*rhor*(Er-0.5_dp*( (Qr(2)/rhor)**2.0_dp + (Qr(3)/rhor)**2.0_dp ) )
! Calculate left and right stagnation enthalpy
Hl = El + pl/rhol
Hr = Er + pr/rhor
! Calculate Roe-stagnation enthalpy
Qroe(4) = (sqrt(rhol)*Hl + sqrt(rhor)*Hr)/denom
! Calculate the speed of sound for the Roe-values
Qroe(5) = sqrt( max((gamma-1.0_dp)*(Qroe(4) - 0.5_dp*(Qroe(2)*Qroe(2)+Qroe(3)*Qroe(3)) ),0.0_dp) )
! ! Stolen from Matthias :)
! ! Compute Roe mean values
! ul = Ql(2)/Ql(1)
! ur = Qr(2)/Qr(1)
! vl = Ql(3)/Ql(1)
! vr = Qr(3)/Qr(1)
! Qroe(1) = sqrt(rhol*rhor)
! aux = sqrt(max(rhol/rhor, SYS_EPSREAL_DP))
! Qroe(2) = (aux*ul+ur)/(aux+1.0_DP)
! Qroe(3) = (aux*vl+vr)/(aux+1.0_DP)
! hl = GAMMA*El-0.5_dp*(gamma-1.0_dp)*(ul*ul+vl*vl)
! hr = GAMMA*Er-0.5_dp*(gamma-1.0_dp)*(ur*ur+vr*vr)
! Qroe(4) = (aux*hl+hr)/(aux+1.0_DP)
end function Euler_calculateQroec
! This routine builds the right eigenvectors for the mixed jacobian
function Euler_buildMixedRcfromRoe(Q,a,b) result(R)
! Right eigenvectors
real(DP), dimension(4,4) :: R
! The solution components q1 = roe, q2 = u, q3 = v, q4 = H, q5 = c
real(DP), dimension(5), intent(IN) :: Q
! Components of the normal vector
real(dp), intent(IN) :: a,b
! Local variables
real(dp) :: rho, u, v, H, c, ve, velnorm
rho = Q(1)
u = Q(2)
v = Q(3)
H = Q(4)
c = Q(5)
ve = a*u+b*v
velnorm = 0.5_dp*sqrt(u*u+v*v)
! Build matrix of right eigenvectors
R(1,1) = 1.0_dp
R(2,1) = u-c*a
R(3,1) = v-c*b
R(4,1) = H-c*ve
R(1,2) = 1.0_dp
R(2,2) = u
R(3,2) = v
R(4,2) = velnorm
R(1,3) = 1.0_dp
R(2,3) = u+c*a
R(3,3) = v+c*b
R(4,3) = H+c*ve
R(1,4) = 0.0_dp
R(2,4) = b
R(3,4) = -a
R(4,4) = u*b-v*a
end function Euler_buildMixedRcfromRoe
! This routine builds the diagonalmatrix of the absolut value of the eigenvalues
function Euler_buildMixedaLambdacfromRoe(Q,a,b) result(aLambda)
! Left eigenvectors
real(DP), dimension(4,4) :: aLambda
! The solution components q1 = roe, q2 = u, q3 = v, q4 = H, q5 = c
real(DP), dimension(5), intent(IN) :: Q
! Components of the normal vector
real(dp), intent(IN) :: a,b
! Local variables
real(dp) :: u, v, c, ve
! Constant Gamma
real(dp) :: gamma = 1.4_dp
u = Q(2)
v = Q(3)
c = Q(5)
ve = a*u+b*v
! Build matrix with the absolute values of the eigenvalues
aLambda = 0.0_dp
aLambda(1,1) = abs(ve-c)
aLambda(2,2) = abs(ve)
aLambda(3,3) = abs(ve+c)
aLambda(4,4) = abs(ve)
end function Euler_buildMixedaLambdacfromRoe
function Euler_buildMixedalambda (Q,a,b) result(aLambda)
! Absolute of max eigenvalue
real(DP):: alambda
! The solution components
real(DP), dimension(4), intent(IN) :: Q
! Components of the normal vector
real(dp), intent(IN) :: a,b
! Local variables
real(dp) :: u, v, c, ve, rho, E
! Constant Gamma
real(dp) :: gamma = 1.4_dp
rho = Q(1)
u = Q(2)/rho
v = Q(3)/rho
E = Q(4)/rho
ve = a*u+b*v
c = sqrt(max(1.4_dp*0.4_dp*(E-0.5_dp*(u*u+v*v)),0.0_dp))
! Build absolute value of largest eigenvalue
alambda = abs(ve) + c
!alambda = sqrt(u*u+v*v) + c
end function
! This routine builds the left eigenvectors for the mixed jacobian
function Euler_buildMixedLcfromRoe(Q,a,b) result(L)
! Left eigenvectors
real(DP), dimension(4,4) :: L
! The solution components q1 = roe, q2 = u, q3 = v, q4 = H, q5 = c
real(DP), dimension(5), intent(IN) :: Q
! Components of the normal vector
real(dp), intent(IN) :: a,b
! Local variables
real(dp) :: rho, u, v, H, c, ve, velnorm, b1, b2
! Constant Gamma
real(dp) :: gamma = 1.4_dp
rho = Q(1)
u = Q(2)
v = Q(3)
H = Q(4)
c = Q(5)
ve = a*u+b*v
velnorm = 0.5_dp*sqrt(u*u+v*v)
b2 = (gamma-1.0_dp)/c/c
b1= b2*velnorm
! Build matrix of left eigenvectors
L(1,1) = 0.5_dp*(b1+ve/c)
L(2,1) = 1.0_dp-b1
L(3,1) = 0.5_dp*(b1-ve/c)
L(4,1) = a*v-b*u
L(1,2) = 0.5_dp*(-b2*u-a/c)
L(2,2) = b2*u
L(3,2) = 0.5_dp*(-b2*u+a/c)
L(4,2) = b
L(1,3) = 0.5_dp*(-b2*v-b/c)
L(2,3) = b2*v
L(3,3) = 0.5_dp*(-b2*v+b/c)
L(4,3) = -a
L(1,4) = 0.5_dp*b2
L(2,4) = -b2
L(3,4) = 0.5_dp*b2
L(4,4) = 0.0_dp
end function Euler_buildMixedLcfromRoe
! This routine builds the Euler-Jacobian in x direction
function Euler_buildJacobixcfromRoe(Q) result(Jx)
! Left eigenvectors
real(DP), dimension(4,4) :: Jx
! The solution components q1 = roe, q2 = u, q3 = v, q4 = H, q5 = c
real(DP), dimension(5), intent(IN) :: Q
! Local variables
real(dp) :: rho, u, v, H, c, E
! Constant Gamma
real(dp) :: gamma = 1.4_dp
rho = Q(1)
u = Q(2)
v = Q(3)
H = Q(4)
c = Q(5)
E = (H+(gamma-1.0_dp)*0.5_dp*(u*u+v*v))/gamma
! Build matrix
Jx(1,1) = 0.0_dp
Jx(2,1) = 0.5_dp*(gamma-3.0_dp)*u*u+0.5_dp*(gamma-1.0_dp)*v*v
Jx(3,1) = -u*v
Jx(4,1) = -gamma*u*E+(gamma-1.0_dp)*u*(u*u+v*v)
Jx(1,2) = 1.0_dp
Jx(2,2) = (3.0_dp-gamma)*u
Jx(3,2) = v
Jx(4,2) = gamma*E-0.5_dp*(gamma-1.0_dp)*(v*v+3.0_dp*u*u)
Jx(1,3) = 0.0_dp
Jx(2,3) = -(gamma-1.0_dp)*v
Jx(3,3) = u
Jx(4,3) = -(gamma-1.0_dp)*u*v
Jx(1,4) = 0.0_dp
Jx(2,4) = (gamma-1.0_dp)
Jx(3,4) = 0.0_dp
Jx(4,4) = gamma*u
end function Euler_buildJacobixcfromRoe
! This routine builds the Euler-Jacobian in y direction
function Euler_buildJacobiycfromRoe(Q) result(Jy)
! Left eigenvectors
real(DP), dimension(4,4) :: Jy
! The solution components q1 = roe, q2 = u, q3 = v, q4 = H, q5 = c
real(DP), dimension(5), intent(IN) :: Q
! Local variables
real(dp) :: rho, u, v, H, c, E
! Constant Gamma
real(dp) :: gamma = 1.4_dp
rho = Q(1)
u = Q(2)
v = Q(3)
H = Q(4)
c = Q(5)
E = (H+(gamma-1.0_dp)*0.5_dp*(u*u+v*v))/gamma
! Build matrix
Jy(1,1) = 0.0_dp
Jy(2,1) = -u*v
Jy(3,1) = 0.5_dp*(gamma-3.0_dp)*v*v+0.5_dp*(gamma-1.0_dp)*u*u
Jy(4,1) = -gamma*v*E+(gamma-1.0_dp)*v*(u*u+v*v)
Jy(1,2) = 0.0_dp
Jy(2,2) = v
Jy(3,2) = -(gamma-1.0_dp)*u
Jy(4,2) = -(gamma-1.0_dp)*u*v
Jy(1,3) = 1.0_dp
Jy(2,3) = u
Jy(3,3) = (3.0_dp-gamma)*v
Jy(4,3) = gamma*E-0.5_dp*(gamma-1.0_dp)*(u*u+3.0_dp*v*v)
Jy(1,4) = 0.0_dp
Jy(2,4) = 0.0_dp
Jy(3,4) = (gamma-1.0_dp)
Jy(4,4) = gamma*v
end function Euler_buildJacobiycfromRoe
! This routine returns the eigenvalues of the jacobi matrix
function Euler_buildEigenvaluescfromRoe(Q,a,b) result(Eigenvalues)
real(DP), dimension(4) :: Eigenvalues
! The solution components q1 = roe, q2 = u, q3 = v, q4 = H, q5 = c
real(DP), dimension(5), intent(IN) :: Q
! Components of the normal vector
real(dp), intent(IN) :: a,b
! Local variables
real(dp) :: u, v, c, ve
! Constant Gamma
real(dp) :: gamma = 1.4_dp
u = Q(2)
v = Q(3)
c = Q(5)
ve = a*u+b*v
! build eigenvalues
Eigenvalues(1) = ve-c
Eigenvalues(2) = ve
Eigenvalues(3) = ve+c
Eigenvalues(4) = ve
end function Euler_buildEigenvaluescfromRoe
! This function transforms a vector of conserved variables
! Q(1) = rho
! Q(2) = rho*u
! Q(3) = rho*v
! Q(4) = rho*E
! into a vector consisting of primitive variables + the speed of sound waves
! Q(1) = rho
! Q(2) = u
! Q(3) = v
! Q(4) = H
! Q(5) = c
function Euler_transformVector(Qin) result(Qout)
! The left and right solution values
real(DP), dimension(4), intent(IN) :: Qin
! The computed Roe values
real(DP), dimension(5) :: Qout
! temp variables
real(DP) :: rho, H, E, p
! Gamma
real(dp) :: gamma = 1.4_dp
! Get density
rho = Qin(1)
! Set Roe-density
Qout(1) = rho
! Set x-velocity
Qout(2) = Qin(2)/rho
! Set y-velocity
Qout(3) = Qin(3)/rho
! Get energy state
E = Qin(4)/rho
! Calculate pressure
p = (gamma-1.0_dp)*rho*(E-0.5_dp*(Qout(2)*Qout(2)+Qout(3)*Qout(3) ) )
! Calculate stagnation enthalpy
H = E + p/rho
! Save stagnation enthalpy
Qout(4) = H
! Calculate the speed of sound for the Roe-values
!Qout(5) = sqrt( (gamma-1.0_dp)*(H - 0.5_dp*(Qout(2)*Qout(2)+Qout(3)*Qout(3)) ) )
Qout(5) = sqrt( max(sys_EPSREAL_DP*sys_EPSREAL_DP , gamma*p/rho) )
end function Euler_transformVector
! This routine returns the HLL flux for the 2d compressible euler
! equations of gas dynamics
function Euler_buildFlux_HLL2D(Qii,Qaa,a,b) result(Flux)
! The flux vector in direction d at Q
real(DP), dimension(4) :: Flux
! The solution components q1 = h, q2 = uh, q3 = vh
real(DP), dimension(4), intent(IN) :: Qii, Qaa
! the direction: d=1: x-direction, d=2: y-direction
real(dp), intent(IN) :: a, b
! The solution components q1 = h, q2 = uh, q3 = vh
real(DP), dimension(4) :: Qi, Qa, FX
! pressure, stagnation enthalpy
real(DP) :: pi, pa, Hi, Ha, Ei, Ea, ui, ua, vi, va, rhoi, rhoa, t1, t2, t3, SL, SR
! temporary variable
real(DP) :: rho, u, v, E, ci, ca, denom, H, c
! Constant Gamma
real(dp) :: gamma = 1.4_dp
Qi=Qii
Qa=Qaa
! Rotate inner trace
t2 = Qi(2)
t3 = Qi(3)
Qi(2) = a*t2 + b*t3
Qi(3) = -b*t2 + a*t3
! Rotate outer trace
t2 = Qa(2)
t3 = Qa(3)
Qa(2) = a*t2 + b*t3
Qa(3) = -b*t2 + a*t3
! Calculate primitive variables
rhoi=Qi(1)
ui=Qi(2)/rhoi
vi=Qi(3)/rhoi
Ei=Qi(4)/rhoi
rhoa=Qa(1)
ua=Qa(2)/rhoa
va=Qa(3)/rhoa
Ea=Qa(4)/rhoa
! Compute the pressure
pi = (gamma - 1.0_dp)*rhoi*(Ei-0.5_dp*(ui*ui+vi*vi))
pa = (gamma - 1.0_dp)*rhoa*(Ea-0.5_dp*(ua*ua+va*va))
! Compute H, the stagnation enthalpy
Hi = Ei + pi/rhoi
Ha = Ea + pa/rhoa
! Compute speed of sound
ci = sqrt(max( (gamma-1.0_dp)*(Hi - 0.5_dp*(ui*ui+vi*vi) ),0.0_dp) )
ca = sqrt(max( (gamma-1.0_dp)*(Ha - 0.5_dp*(ua*ua+va*va) ),0.0_dp) )
! Compute Roe-average variables
! Calculate auxiliary variable
denom = (sqrt(rhoi)+sqrt(rhoa))
! Set Roe-density
rho = sqrt(rhoi*rhoa)
! Set Roe-x-velocity
u = (ui*sqrt(rhoi) + ua*sqrt(rhoa))/denom
! Set Roe-y-velocity
v = (vi*sqrt(rhoi) + va*sqrt(rhoa))/denom
! Calculate Roe-stagnation enthalpy
H = (sqrt(rhoi)*Hi + sqrt(rhoa)*Ha)/denom
! Calculate the speed of sound for the Roe-values
c = sqrt(max( (gamma-1.0_dp)*(H - 0.5_dp*(u*u+v*v) ),0.0_dp) )
! Compute estimate wave speeds
SL = min(ui - ci,u-c)
SR = max(ua + ca,u+c)
if (SL.ge.SR) then
write(*,*) 'Warning'
end if
! Compute HLL flux
t1 = (min(SR,0.0_dp)-min(0.0_dp,SL))/(SR-SL)
t2 = 1.0_dp - t1
t3 = (SR*abs(SL)-SL*abs(SR))/(2.0_dp*(SR-SL))
FX = t1*Euler_buildFlux(Qa,1) + t2*Euler_buildFlux(Qi,1) - t3*(Qa-Qi)
! Rotate back
Flux(1) = FX(1)
Flux(2) = a*FX(2)-b*FX(3)
Flux(3) = b*FX(2)+a*FX(3)
Flux(4) = FX(4)
end function Euler_buildFlux_HLL2D
! This routine returns the HLLC flux for the 2d compressible euler
! equations of gas dynamics
function Euler_buildFlux_HLLC2D(Qii,Qaa,a,b) result(Flux)
! The flux vector in direction d at Q
real(DP), dimension(4) :: Flux
! The solution components q1 = h, q2 = uh, q3 = vh
real(DP), dimension(4), intent(IN) :: Qii, Qaa
! the direction: d=1: x-direction, d=2: y-direction
real(dp), intent(IN) :: a, b
! The solution components q1 = h, q2 = uh, q3 = vh
real(DP), dimension(4) :: Qi, Qa, FX
! pressure, stagnation enthalpy
real(DP) :: pi, pa, Hi, Ha, Ei, Ea, ui, ua, vi, va, rhoi, rhoa, t1, t2, t3, SL, SR, SS
! temporary variable
real(DP) :: rho, u, v, E, ci, ca, denom, H, c, ps, us, ql, qr
! Constant Gamma
real(dp) :: gamma = 1.4_dp
Qi=Qii
Qa=Qaa
! Rotate inner trace
t2 = Qi(2)
t3 = Qi(3)
Qi(2) = a*t2 + b*t3
Qi(3) = -b*t2 + a*t3
! Rotate outer trace
t2 = Qa(2)
t3 = Qa(3)
Qa(2) = a*t2 + b*t3
Qa(3) = -b*t2 + a*t3
! Calculate primitive variables
rhoi=Qi(1)
ui=Qi(2)/rhoi
vi=Qi(3)/rhoi
Ei=Qi(4)/rhoi
rhoa=Qa(1)
ua=Qa(2)/rhoa
va=Qa(3)/rhoa
Ea=Qa(4)/rhoa
! Compute the pressure
pi = (gamma - 1.0_dp)*rhoi*(Ei-0.5_dp*(ui*ui+vi*vi))
pa = (gamma - 1.0_dp)*rhoa*(Ea-0.5_dp*(ua*ua+va*va))
! Compute H, the stagnation enthalpy
Hi = Ei + pi/rhoi
Ha = Ea + pa/rhoa
! Compute speed of sound
ci = sqrt( max((gamma-1.0_dp)*(Hi - 0.5_dp*(ui*ui+vi*vi) ),0.0_dp ))
ca = sqrt( max((gamma-1.0_dp)*(Ha - 0.5_dp*(ua*ua+va*va) ),0.0_dp ))
ci = max(ci,1.0e-12)
ca = max(ca,1.0e-12)
! Compute Roe-average variables
! Calculate auxiliary variable
denom = (sqrt(rhoi)+sqrt(rhoa))
! Set Roe-density
rho = sqrt(rhoi*rhoa)
! Set Roe-x-velocity
u = (ui*sqrt(rhoi) + ua*sqrt(rhoa))/denom
! Set Roe-y-velocity
v = (vi*sqrt(rhoi) + va*sqrt(rhoa))/denom
! Calculate Roe-stagnation enthalpy
H = (sqrt(rhoi)*Hi + sqrt(rhoa)*Ha)/denom
! Calculate the speed of sound for the Roe-values
c = sqrt(max( (gamma-1.0_dp)*(H - 0.5_dp*(u*u+v*v) ),0.0_dp) )
! !!! Type 1
!
! ! Compute estimate wave speeds
! SL = min(ui - ci,ua-ca)
! SR = max(ua + ca,ui+ci)
! SS = 0.5_dp*(ui+ua) + (pi-pa)/(0.25_dp*(rhoi+rhoa)*(ci+ca))
!
!! SS = max(SS,SL)
!! SS = min(SS,SR)
!!
!! SS = 0.5_dp*(SL+SR)
!
!
!
!
!
! if(SL>SR) then
! write(*,*) 'Warning**************'
! end if
! if (SL>SS) then
! write(*,*) 'Warning'
! end if
! if (SS>SR) then
! write(*,*) 'Warning'
! end if
!
! FX = Euler_buildFlux(Qa+Qi,1)
!
! if (SL.ge.0.0_dp) then
! FX = Euler_buildFlux(Qi,1)
! elseif ((SL<0.0_dp).and.(SS.ge.0.0_dp)) then
! FX = Euler_buildFlux(Qi,1) + SL*( rhoi*(SL-ui)/min((SL-SS),-SYS_EPSREAL_DP)*(/ 1.0_dp, SS, vi, Ei/rhoi+(SS-ui)*(SS+pi/(rhoi*(SL-ui))) /) -Qi)
! elseif ((SS<0.0_dp).and.(SR>0.0_dp)) then
! FX = Euler_buildFlux(Qa,1) + SR*( rhoa*(SR-ua)/max((SR-SS),SYS_EPSREAL_DP)*(/ 1.0_dp, SS, va, Ea/rhoa+(SS-ua)*(SS+pa/(rhoa*(SR-ua))) /) -Qa)
! elseif (SR.le.0.0_dp) then
! FX = Euler_buildFlux(Qa,1)
! end if
! !!! Type 2 !!!
!
!
! ps = 0.5_dp*(pi+pa)+0.5_dp*(ui-ua)*0.5_dp*(rhoi+rhoa)*0.5_dp*(ci+ca)
! us = 0.5_dp*(ui+ua)+0.5_dp*(pi-pa)/(0.5_dp*(rhoi+rhoa)*0.5_dp*(ci+ca))
!
! if (ps>pi) then
! ql = sqrt(max(1.0_dp+(gamma+1.0_dp)/(2.0_dp*gamma)*(ps/pi-1.0_dp),0.0_dp))
! else
! ql = 1.0_dp
! end if
!
! if (ps>pa) then
! qr = sqrt(max(1.0_dp+(gamma+1.0_dp)/(2.0_dp*gamma)*(ps/pa-1.0_dp),0.0_dp))
! else
! qr = 1.0_dp
! end if
!
!
!
!
! ! Compute estimate wave speeds
! SL = ui-ci*ql
! SR = ua+ca*qr
! SS = us
!
!! SS = max(SS,SL)
!! SS = min(SS,SR)
!
!
!
! if (SL.ge.0.0_dp) then
! FX = Euler_buildFlux(Qi,1)
! elseif ((SL<0.0_dp).and.(SS.ge.0.0_dp)) then
! FX = Euler_buildFlux(Qi,1) + SL*( rhoi*(SL-ui)/min((SL-SS),-SYS_EPSREAL_DP)*(/ 1.0_dp, SS, vi, Ei/rhoi+(SS-ui)*(SS+pi/(rhoi*(SL-ui))) /) -Qi)
! elseif ((SS<0.0_dp).and.(SR>0.0_dp)) then
! FX = Euler_buildFlux(Qa,1) + SR*( rhoa*(SR-ua)/max((SR-SS),SYS_EPSREAL_DP)*(/ 1.0_dp, SS, va, Ea/rhoa+(SS-ua)*(SS+pa/(rhoa*(SR-ua))) /) -Qa)
! elseif (SR.le.0.0_dp) then
! FX = Euler_buildFlux(Qa,1)
! else
! FX = 0.5_dp*Euler_buildFlux(0.5_dp*(Qi+Qa),1)
! end if
!
!
! if(SL>SR) then
! write(*,*) 'Warning**************'
! end if
! if (SL>SS) then
! write(*,*) 'Warning'
! end if
! if (SS>SR) then
! write(*,*) 'Warning'
! end if
! Type 3
SL = min(ui - ci,ua-ca)
SR = max(ua + ca,ui+ci)
SS = ( rhoa*ua*(SR-ua)-rhoi*ui*(SL-ui)+pi-pa )/(rhoa*(SR-ua)-rhoi*(SL-ui))
ps = rhoi*(SL-ui)*(SS-ui)+pi
FX = 0.5_dp*(Euler_buildFlux(Qi,1)+Euler_buildFlux(Qa,1) -ui*Qi+(/ 0.0_dp,ps-pi,0.0_dp,ps*SS-pi*ui /) &
-ua*Qa+(/ 0.0_dp,ps-pa,0.0_dp,ps*SS-pa*ua /) )
! Rotate back
Flux(1) = FX(1)
Flux(2) = a*FX(2)-b*FX(3)
Flux(3) = b*FX(2)+a*FX(3)
Flux(4) = FX(4)
end function Euler_buildFlux_HLLC2D
!*****************************************************************************
!* -- Roe's Flux Function ---
!*
!* P. L. Roe, Approximate Riemann Solvers, Parameter Vectors and Difference
!* Schemes, Journal of Computational Physics, 43, pp. 357-372.
!*
!* Katate Masatsuka, February 2009. http://www.cfdbooks.com
!*****************************************************************************
function Roe(uL, uR, nx, ny)
real(dp) :: uL(4), uR(4) ! Input: conservative variables rho*[1, u, v, E]
real(dp) :: nx, ny ! Input: face normal vector, [nx, ny] (Left-to-Right)
real(dp) :: Roe(4) ! Output: Roe flux function (upwind)
!Local constants
real(dp) :: gamma ! Ratio of specific heat.
real(dp) :: zero, fifth, half, one, two ! Numbers
!Local variables
real(dp) :: tx, ty ! Tangent vector (perpendicular to the face normal)
real(dp) :: vxL, vxR, vyL, vyR ! Velocity components.
real(dp) :: rhoL, rhoR, pL, pR ! Primitive variables.
real(dp) :: vnL, vnR, vtL, vtR ! Normal and tangent velocities
real(dp) :: aL, aR, HL, HR ! Speeds of sound.
real(dp) :: RT,rho,vx,vy,H,a,vn, vt ! Roe-averages
real(dp) :: drho,dvx,dvy,dvn,dvt,dpp,dV(4) ! Wave strenghs
real(dp) :: ws(4),dws(4), Rv(4,4) ! Wave speeds and right-eigevectors
real(dp) :: fL(4), fR(4), diss(4) ! Fluxes ad dissipation term
integer :: i, j
!Constants.
gamma = 1.4_dp
zero = 0.0_dp
fifth = 0.2_dp
half = 0.5_dp
one = 1.0_dp
two = 2.0_dp
!Tangent vector (Do you like it? Actually, Roe flux can be implemented
! without any tangent vector. See "I do like CFD, VOL.1" for details.)
tx = -ny
ty = nx
!Primitive and other variables.
! Left state
rhoL = uL(1)
vxL = uL(2)/uL(1)
vyL = uL(3)/uL(1)
vnL = vxL*nx+vyL*ny
vtL = vxL*tx+vyL*ty
pL = (gamma-one)*( uL(4) - half*rhoL*(vxL*vxL+vyL*vyL) )
aL = sqrt(gamma*pL/rhoL)
HL = ( uL(4) + pL ) / rhoL
! Right state
rhoR = uR(1)
vxR = uR(2)/uR(1)
vyR = uR(3)/uR(1)
vnR = vxR*nx+vyR*ny
vtR = vxR*tx+vyR*ty
pR = (gamma-one)*( uR(4) - half*rhoR*(vxR*vxR+vyR*vyR) )
aR = sqrt(gamma*pR/rhoR)
HR = ( uR(4) + pR ) / rhoR
!First compute the Roe Averages
RT = sqrt(rhoR/rhoL)
rho = RT*rhoL
vx = (vxL+RT*vxR)/(one+RT)
vy = (vyL+RT*vyR)/(one+RT)
H = ( HL+RT* HR)/(one+RT)
a = sqrt( (gamma-one)*(H-half*(vx*vx+vy*vy)) )
vn = vx*nx+vy*ny
vt = vx*tx+vy*ty
!Wave Strengths
drho = rhoR - rhoL
dpp = pR - pL
dvn = vnR - vnL
dvt = vtR - vtL
dV(1) = (dpp - rho*a*dvn )/(two*a*a)
dV(2) = rho*dvt/a
dV(3) = drho - dpp/(a*a)
dV(4) = (dpp + rho*a*dvn )/(two*a*a)
!Wave Speed
ws(1) = abs(vn-a)
ws(2) = abs(vn)
ws(3) = abs(vn)
ws(4) = abs(vn+a)
!Harten's Entropy Fix JCP(1983), 49, pp357-393:
! only for the nonlinear fields.
dws(1) = fifth
if ( ws(1) < dws(1) ) ws(1) = half * ( ws(1)*ws(1)/dws(1)+dws(1) )
dws(4) = fifth
if ( ws(4) < dws(4) ) ws(4) = half * ( ws(4)*ws(4)/dws(4)+dws(4) )
!Right Eigenvectors
Rv(1,1) = one
Rv(2,1) = vx - a*nx
Rv(3,1) = vy - a*ny
Rv(4,1) = H - vn*a
Rv(1,2) = zero
Rv(2,2) = a*tx
Rv(3,2) = a*ty
Rv(4,2) = vt*a
Rv(1,3) = one
Rv(2,3) = vx
Rv(3,3) = vy
Rv(4,3) = half*(vx*vx+vy*vy)
Rv(1,4) = one
Rv(2,4) = vx + a*nx
Rv(3,4) = vy + a*ny
Rv(4,4) = H + vn*a
!Dissipation Term
diss = zero
do i=1,4
do j=1,4
diss(i) = diss(i) + ws(j)*dV(j)*Rv(i,j)
end do
end do
!Compute the flux.
fL(1) = rhoL*vnL
fL(2) = rhoL*vnL * vxL + pL*nx
fL(3) = rhoL*vnL * vyL + pL*ny
fL(4) = rhoL*vnL * HL
fR(1) = rhoR*vnR
fR(2) = rhoR*vnR * vxR + pR*nx
fR(3) = rhoR*vnR * vyR + pR*ny
fR(4) = rhoR*vnR * HR
Roe = half * (fL + fR - diss)
end function Roe
function DRoe(uL, uR, nx, ny, h)
real(dp) :: uL(4), uR(4) ! Input: conservative variables rho*[1, u, v, E]
real(dp) :: nx, ny ! Input: face normal vector, [nx, ny]
real(dp) :: h ! Input: infinitesimal constant
real(dp), dimension(4,8) :: DRoe ! Output: Approximate differential of Roe flux function
real(dp), dimension(4) :: F, U
integer :: i
F = Roe(uL, uR, nx, ny)
! First order approx
do i = 1,4
U = uL
U(i) = U(i) + h
DRoe(:,i) = (Roe(U, uR, nx, ny) - F)/h
end do
do i = 1,4
U = uR
U(i) = U(i) + h
DRoe(:,i+4) = (Roe(uL, U, nx, ny) - F)/h
end do
! ! Second order approx
! do i = 1,4
! U = uL
! U(i) = U(i) + h
! DRoe(:,i) = Roe(U, uR, nx, ny)
! U = uL
! U(i) = U(i) - h
! DRoe(:,i) = 0.5_dp*(DRoe(:,i)-Roe(U, uR, nx, ny))/h
! end do
!
! do i = 1,4
! U = uR
! U(i) = U(i) + h
! DRoe(:,i+4) = Roe(uL, U, nx, ny)
! U = uR
! U(i) = U(i) - h
! DRoe(:,i+4) = 0.5_dp*(DRoe(:,i+4)-Roe(uL, U, nx, ny))/h
! end do
end function Droe
!*****************************************************************************
!* -- Rotated-Roe-HLL Flux Function ---
!*
!* H. Nishikawa and K. Kitamura, Very Simple, Carbuncle-Free, Boundary-Layer
!* Resolving, Rotated-Hybrid Riemann Solvers,
!* Journal of Computational Physics, 227, pp. 2560-2581, 2008.
!*
!* The most robust Riemann solver known to the author (in terms of nonlinear
!* instability such as carbuncle).
!*
!* NB: At a boundary face, need to switch to a geometric normal vector:
!* (nx2,ny2)=(nx, ny), (nx1,ny1)=(-ny,nx).
!* This is not implemented here. It requires information on whether
!* the geometric normal, (nx,ny), is on a boundary face or not.
!* It shouldn't be difficult for you to implement it.
!*
!* Katate Masatsuka, February 2010. http://www.cfdbooks.com
!*****************************************************************************
function Rotated_RHLL(uL, uR, nx, ny)
real(dp) :: uL(4), uR(4) ! Input: conservative variables rho*[1, u, v, E]
real(dp) :: nx, ny ! Input: face normal vector, [nx, ny] (Left-to-Right)
real(dp) :: Rotated_RHLL(4) ! Output: Rotated_RHLL flux function.
!Local constants
real(dp) :: gamma ! Ratio of specific heat.
real(dp) :: zero, fifth, half, one, two ! Numbers
real(dp) :: eps !
!Local variables
real(dp) :: nx1, ny1, nx2, ny2 ! Rotated normals, n1 and n2
real(dp) :: tx, ty ! Tangent vector (taken as n1)
real(dp) :: alpha1, alpha2 ! Projections of the new normals
real(dp) :: vxL, vxR, vyL, vyR ! Velocity components.
real(dp) :: rhoL, rhoR, pL, pR ! Primitive variables.
real(dp) :: vnL, vnR, vtL, vtR ! Normal and tagent velocities
real(dp) :: aL, aR, HL, HR ! Speeds of sound and total enthalpy
real(dp) :: RT,rho,vx,vy,H,a ! Roe-averages
real(dp) :: vn, vt ! Normal and tagent velocities(Roe-average)
real(dp) :: drho,dvx,dvy,dvn,dvt,dpp,dV(4) ! Wave strenghs
real(dp) :: abs_dq ! Magnitude of the velocity difference
real(dp) :: abs_ws(4),ws(4),dws(4), Rv(4,4)! Wave speeds and right-eigevectors
real(dp) :: SRp,SLm ! Wave speeds for the HLL part
real(dp) :: fL(4), fR(4), diss(4) ! Fluxes ad dissipation term
real(dp) :: temp
integer :: i, j
!Constants.
gamma = 1.4_dp
zero = 0.0_dp
fifth = 0.2_dp
half = 0.5_dp
one = 1.0_dp
two = 2.0_dp
eps = 1.0e-12_dp ! 1.0e-12 in the original paper (double precision)
!Primitive and other variables.
! Left state
rhoL = uL(1)
vxL = uL(2)/uL(1)
vyL = uL(3)/uL(1)
pL = (gamma-one)*( uL(4) - half*rhoL*(vxL*vxL+vyL*vyL) )
pL = max(0.0_dp,pL)
aL = sqrt(gamma*pL/rhoL)
HL = ( uL(4) + pL ) / rhoL
! Right state
rhoR = uR(1)
vxR = uR(2)/uR(1)
vyR = uR(3)/uR(1)
pR = (gamma-one)*( uR(4) - half*rhoR*(vxR*vxR+vyR*vyR) )
pR = max(0.0_dp,pR)
aR = sqrt(gamma*pR/rhoR)
HR = ( uR(4) + pR ) / rhoR
vnL = vxL*nx + vyL*ny
vnR = vxR*nx + vyR*ny
!Compute the flux.
fL(1) = rhoL*vnL
fL(2) = rhoL*vnL * vxL + pL*nx
fL(3) = rhoL*vnL * vyL + pL*ny
fL(4) = rhoL*vnL * HL
fR(1) = rhoR*vnR
fR(2) = rhoR*vnR * vxR + pR*nx
fR(3) = rhoR*vnR * vyR + pR*ny
fR(4) = rhoR*vnR * HR
!Define n1 and n2, and compute alpha1 and alpha2: (4.2) in the original paper.
!(NB: n1 and n2 may need to be frozen at some point during
! a steady calculation to fully make it converge. For time-accurate
! calculation, this is fine.)
! NB: For a boundary face, set (nx2,ny2)=(nx,ny), (nx1,ny1)=(-ny,nx).
abs_dq = sqrt( (vxR-vxL)**2+(vyR-vyL)**2 )
if ( abs_dq > eps) then
nx1 = (vxR-vxL)/abs_dq
ny1 = (vyR-vyL)/abs_dq
else
nx1 = -ny
ny1 = nx
endif
alpha1 = nx * nx1 + ny * ny1
! To make alpha1 always positive.
temp = sign(one,alpha1)
nx1 = temp * nx1
ny1 = temp * ny1
alpha1 = temp * alpha1
! Take n2 as perpendicular to n1.
nx2 = -ny1
ny2 = nx1
alpha2 = nx * nx2 + ny * ny2
! To make alpha2 always positive.
temp = sign(one,alpha2)
nx2 = temp * nx2
ny2 = temp * ny2
alpha2 = temp * alpha2
!Now we are going to compute the Roe flux with n2 as the normal
!and n1 as the tagent vector, with modified wave speeds (5.12)
!Compute the Roe Averages
RT = sqrt(rhoR/rhoL)
rho = RT*rhoL
vx = (vxL+RT*vxR)/(one+RT)
vy = (vyL+RT*vyR)/(one+RT)
H = ( HL+RT* HR)/(one+RT)
a = sqrt( max(0.0_dp,(gamma-one)*(H-half*(vx*vx+vy*vy))) )
vn = vx*nx2+vy*ny2
vt = vx*nx1+vy*ny1
!Wave Strengths (remember that n2 is the normal and n1 is the tangent.)
vnL = vxL*nx2 + vyL*ny2
vnR = vxR*nx2 + vyR*ny2
vtL = vxL*nx1 + vyL*ny1
vtR = vxR*nx1 + vyR*ny1
drho = rhoR - rhoL
dpp = pR - pL
dvn = vnR - vnL
dvt = vtR - vtL
a = max(a,1.0e-12)
dV(1) = (dpp - rho*a*dvn )/(two*a*a)
dV(2) = rho*dvt/a
dV(3) = drho - dpp/(a*a)
dV(4) = (dpp + rho*a*dvn )/(two*a*a)
!Wave Speeds for Roe flux part.
ws(1) = vn-a
ws(2) = vn
ws(3) = vn
ws(4) = vn+a
abs_ws = abs(ws)
!Harten's Entropy Fix JCP(1983), 49, pp357-393:
!only for the nonlinear fields.
dws(1) = fifth
if (abs_ws(1)<dws(1)) abs_ws(1) = half*(abs_ws(1)*abs_ws(1)/dws(1)+dws(1))
dws(4) = fifth
if (abs_ws(4)<dws(4)) abs_ws(4) = half*(abs_ws(4)*abs_ws(4)/dws(4)+dws(4))
!HLL wave speeds, evaluated with [nx1,ny1] (=tangent wrt n2).
SRp = max( zero, vtR + aR, vt + a)
SLm = min( zero, vtL - aL, vt - a)
!Modified wave speeds for the Rotated-RHLL flux: (5.12) in the original paper.
ws = alpha2*abs_ws - ( alpha2*(SRp+SLm)*ws + two*alpha1*SRp*SLm )/ (SRp-SLm)
!Right Eigenvectors: with n2 as normal and n1 as tangent.
tx = nx1
ty = ny1
Rv(1,1) = one
Rv(2,1) = vx - a*nx2
Rv(3,1) = vy - a*ny2
Rv(4,1) = H - vn*a
Rv(1,2) = zero
Rv(2,2) = a*tx
Rv(3,2) = a*ty
Rv(4,2) = a*vt
Rv(1,3) = one
Rv(2,3) = vx
Rv(3,3) = vy
Rv(4,3) = half*(vx*vx+vy*vy)
Rv(1,4) = one
Rv(2,4) = vx + a*nx2
Rv(3,4) = vy + a*ny2
Rv(4,4) = H + vn*a
!Dissipation Term: Roe dissipation with the modified wave speeds.
diss = zero
do i=1,4
do j=1,4
diss(i) = diss(i) + ws(j)*dV(j)*Rv(i,j)
end do
end do
!Compute the Rotated-RHLL flux.
Rotated_RHLL = (SRp*fL - SLm*fR)/(SRp-SLm) - half*diss
end function Rotated_RHLL
! This function builds the derivative of the absolute value of the largest eigenvalue
function Euler_buildDlambda(Qll, Qrr, a, b) result(Ddlambda)
! The left and right solution values
real(DP), dimension(4), intent(IN) :: Qll, Qrr
! The components of the normal vector
real(dp), intent(in) :: a, b
! The computed derivatives of max abs eigenvalue
real(DP), dimension(8) :: Ddlambda
! temp variables
real(DP) :: rhol, rhor, denom, Hl, Hr, El, Er, pl, pr, velnorm, aux, ul, ur, vl, vr
! Roe-values and speed of sound waves
real(dp), dimension(5) :: Qroe
! Gamma
real(dp) :: gamma = 1.4_dp
! Delta
real(dp) :: ddelta = 0.0000001_dp
! Lambda and temp Lambda
real(dp) :: dlambda, dtlambda
! Temporary left and right solution values
real(DP), dimension(4) :: Ql, Qr
! Copy solution values to their temporary opposites
Ql = Qll
Qr = Qrr
! Compute Roe mean values
rhol = Ql(1)
rhor = Qr(1)
ul = Ql(2)/Ql(1)
ur = Qr(2)/Qr(1)
vl = Ql(3)/Ql(1)
vr = Qr(3)/Qr(1)
El = Ql(4)/rhol
Er = Qr(4)/rhor
Qroe(1) = sqrt(rhol*rhor)
aux = sqrt(max(rhol/rhor, SYS_EPSREAL_DP))
Qroe(2) = (aux*ul+ur)/(aux+1.0_DP)
Qroe(3) = (aux*vl+vr)/(aux+1.0_DP)
hl = GAMMA*El-0.5_dp*(gamma-1.0_dp)*(ul*ul+vl*vl)
hr = GAMMA*Er-0.5_dp*(gamma-1.0_dp)*(ur*ur+vr*vr)
Qroe(4) = (aux*hl+hr)/(aux+1.0_DP)
Qroe(5) = sqrt( max((gamma-1.0_dp)*(Qroe(4) - 0.5_dp*(Qroe(2)*Qroe(2)+Qroe(3)*Qroe(3)) ),0.0_dp) )
! Compute the max abs eigenvalue
dlambda = abs(a*Qroe(2)+b*Qroe(3)) + Qroe(5)
! Now compute the temporal lambdas and the differences
! Add delta
Ql(1) = Ql(1) + ddelta
! Compute Roe mean values
rhol = Ql(1)
rhor = Qr(1)
ul = Ql(2)/Ql(1)
ur = Qr(2)/Qr(1)
vl = Ql(3)/Ql(1)
vr = Qr(3)/Qr(1)
El = Ql(4)/rhol
Er = Qr(4)/rhor
Qroe(1) = sqrt(rhol*rhor)
aux = sqrt(max(rhol/rhor, SYS_EPSREAL_DP))
Qroe(2) = (aux*ul+ur)/(aux+1.0_DP)
Qroe(3) = (aux*vl+vr)/(aux+1.0_DP)
hl = GAMMA*El-0.5_dp*(gamma-1.0_dp)*(ul*ul+vl*vl)
hr = GAMMA*Er-0.5_dp*(gamma-1.0_dp)*(ur*ur+vr*vr)
Qroe(4) = (aux*hl+hr)/(aux+1.0_DP)
Qroe(5) = sqrt( max((gamma-1.0_dp)*(Qroe(4) - 0.5_dp*(Qroe(2)*Qroe(2)+Qroe(3)*Qroe(3)) ),0.0_dp) )
! Compute the max abs eigenvalue
dtlambda = abs(a*Qroe(2)+b*Qroe(3)) + Qroe(5)
! Save derivative
Ddlambda(1) = (dtlambda-dlambda)/ddelta
! Substract delta
Ql(1) = Ql(1) - ddelta
! Add delta
Ql(2) = Ql(2) + ddelta
! Compute Roe mean values
rhol = Ql(1)
rhor = Qr(1)
ul = Ql(2)/Ql(1)
ur = Qr(2)/Qr(1)
vl = Ql(3)/Ql(1)
vr = Qr(3)/Qr(1)
El = Ql(4)/rhol
Er = Qr(4)/rhor
Qroe(1) = sqrt(rhol*rhor)
aux = sqrt(max(rhol/rhor, SYS_EPSREAL_DP))
Qroe(2) = (aux*ul+ur)/(aux+1.0_DP)
Qroe(3) = (aux*vl+vr)/(aux+1.0_DP)
hl = GAMMA*El-0.5_dp*(gamma-1.0_dp)*(ul*ul+vl*vl)
hr = GAMMA*Er-0.5_dp*(gamma-1.0_dp)*(ur*ur+vr*vr)
Qroe(4) = (aux*hl+hr)/(aux+1.0_DP)
Qroe(5) = sqrt( max((gamma-1.0_dp)*(Qroe(4) - 0.5_dp*(Qroe(2)*Qroe(2)+Qroe(3)*Qroe(3)) ),0.0_dp) )
! Compute the max abs eigenvalue
dtlambda = abs(a*Qroe(2)+b*Qroe(3)) + Qroe(5)
! Save derivative
Ddlambda(2) = (dtlambda-dlambda)/ddelta
! Substract delta
Ql(2) = Ql(2) - ddelta
! Add delta
Ql(3) = Ql(3) + ddelta
! Compute Roe mean values
rhol = Ql(1)
rhor = Qr(1)
ul = Ql(2)/Ql(1)
ur = Qr(2)/Qr(1)
vl = Ql(3)/Ql(1)
vr = Qr(3)/Qr(1)
El = Ql(4)/rhol
Er = Qr(4)/rhor
Qroe(1) = sqrt(rhol*rhor)
aux = sqrt(max(rhol/rhor, SYS_EPSREAL_DP))
Qroe(2) = (aux*ul+ur)/(aux+1.0_DP)
Qroe(3) = (aux*vl+vr)/(aux+1.0_DP)
hl = GAMMA*El-0.5_dp*(gamma-1.0_dp)*(ul*ul+vl*vl)
hr = GAMMA*Er-0.5_dp*(gamma-1.0_dp)*(ur*ur+vr*vr)
Qroe(4) = (aux*hl+hr)/(aux+1.0_DP)
Qroe(5) = sqrt( max((gamma-1.0_dp)*(Qroe(4) - 0.5_dp*(Qroe(2)*Qroe(2)+Qroe(3)*Qroe(3)) ),0.0_dp) )
! Compute the max abs eigenvalue
dtlambda = abs(a*Qroe(2)+b*Qroe(3)) + Qroe(5)
! Save derivative
Ddlambda(3) = (dtlambda-dlambda)/ddelta
! Substract delta
Ql(3) = Ql(3) - ddelta
! Add delta
Ql(4) = Ql(4) + ddelta
! Compute Roe mean values
rhol = Ql(1)
rhor = Qr(1)
ul = Ql(2)/Ql(1)
ur = Qr(2)/Qr(1)
vl = Ql(3)/Ql(1)
vr = Qr(3)/Qr(1)
El = Ql(4)/rhol
Er = Qr(4)/rhor
Qroe(1) = sqrt(rhol*rhor)
aux = sqrt(max(rhol/rhor, SYS_EPSREAL_DP))
Qroe(2) = (aux*ul+ur)/(aux+1.0_DP)
Qroe(3) = (aux*vl+vr)/(aux+1.0_DP)
hl = GAMMA*El-0.5_dp*(gamma-1.0_dp)*(ul*ul+vl*vl)
hr = GAMMA*Er-0.5_dp*(gamma-1.0_dp)*(ur*ur+vr*vr)
Qroe(4) = (aux*hl+hr)/(aux+1.0_DP)
Qroe(5) = sqrt( max((gamma-1.0_dp)*(Qroe(4) - 0.5_dp*(Qroe(2)*Qroe(2)+Qroe(3)*Qroe(3)) ),0.0_dp) )
! Compute the max abs eigenvalue
dtlambda = abs(a*Qroe(2)+b*Qroe(3)) + Qroe(5)
! Save derivative
Ddlambda(4) = (dtlambda-dlambda)/ddelta
! Substract delta
Ql(4) = Ql(4) - ddelta
! Add delta
Qr(1) = Qr(1) + ddelta
! Compute Roe mean values
rhol = Ql(1)
rhor = Qr(1)
ul = Ql(2)/Ql(1)
ur = Qr(2)/Qr(1)
vl = Ql(3)/Ql(1)
vr = Qr(3)/Qr(1)
El = Ql(4)/rhol
Er = Qr(4)/rhor
Qroe(1) = sqrt(rhol*rhor)
aux = sqrt(max(rhol/rhor, SYS_EPSREAL_DP))
Qroe(2) = (aux*ul+ur)/(aux+1.0_DP)
Qroe(3) = (aux*vl+vr)/(aux+1.0_DP)
hl = GAMMA*El-0.5_dp*(gamma-1.0_dp)*(ul*ul+vl*vl)
hr = GAMMA*Er-0.5_dp*(gamma-1.0_dp)*(ur*ur+vr*vr)
Qroe(4) = (aux*hl+hr)/(aux+1.0_DP)
Qroe(5) = sqrt( max((gamma-1.0_dp)*(Qroe(4) - 0.5_dp*(Qroe(2)*Qroe(2)+Qroe(3)*Qroe(3)) ),0.0_dp) )
! Compute the max abs eigenvalue
dtlambda = abs(a*Qroe(2)+b*Qroe(3)) + Qroe(5)
! Save derivative
Ddlambda(5) = (dtlambda-dlambda)/ddelta
! Substract delta
Qr(1) = Qr(1) - ddelta
! Add delta
Qr(2) = Qr(2) + ddelta
! Compute Roe mean values
rhol = Ql(1)
rhor = Qr(1)
ul = Ql(2)/Ql(1)
ur = Qr(2)/Qr(1)
vl = Ql(3)/Ql(1)
vr = Qr(3)/Qr(1)
El = Ql(4)/rhol
Er = Qr(4)/rhor
Qroe(1) = sqrt(rhol*rhor)
aux = sqrt(max(rhol/rhor, SYS_EPSREAL_DP))
Qroe(2) = (aux*ul+ur)/(aux+1.0_DP)
Qroe(3) = (aux*vl+vr)/(aux+1.0_DP)
hl = GAMMA*El-0.5_dp*(gamma-1.0_dp)*(ul*ul+vl*vl)
hr = GAMMA*Er-0.5_dp*(gamma-1.0_dp)*(ur*ur+vr*vr)
Qroe(4) = (aux*hl+hr)/(aux+1.0_DP)
Qroe(5) = sqrt( max((gamma-1.0_dp)*(Qroe(4) - 0.5_dp*(Qroe(2)*Qroe(2)+Qroe(3)*Qroe(3)) ),0.0_dp) )
! Compute the max abs eigenvalue
dtlambda = abs(a*Qroe(2)+b*Qroe(3)) + Qroe(5)
! Save derivative
Ddlambda(6) = (dtlambda-dlambda)/ddelta
! Substract delta
Qr(2) = Qr(2) - ddelta
! Add delta
Qr(3) = Qr(3) + ddelta
! Compute Roe mean values
rhol = Ql(1)
rhor = Qr(1)
ul = Ql(2)/Ql(1)
ur = Qr(2)/Qr(1)
vl = Ql(3)/Ql(1)
vr = Qr(3)/Qr(1)
El = Ql(4)/rhol
Er = Qr(4)/rhor
Qroe(1) = sqrt(rhol*rhor)
aux = sqrt(max(rhol/rhor, SYS_EPSREAL_DP))
Qroe(2) = (aux*ul+ur)/(aux+1.0_DP)
Qroe(3) = (aux*vl+vr)/(aux+1.0_DP)
hl = GAMMA*El-0.5_dp*(gamma-1.0_dp)*(ul*ul+vl*vl)
hr = GAMMA*Er-0.5_dp*(gamma-1.0_dp)*(ur*ur+vr*vr)
Qroe(4) = (aux*hl+hr)/(aux+1.0_DP)
Qroe(5) = sqrt( max((gamma-1.0_dp)*(Qroe(4) - 0.5_dp*(Qroe(2)*Qroe(2)+Qroe(3)*Qroe(3)) ),0.0_dp) )
! Compute the max abs eigenvalue
dtlambda = abs(a*Qroe(2)+b*Qroe(3)) + Qroe(5)
! Save derivative
Ddlambda(7) = (dtlambda-dlambda)/ddelta
! Substract delta
Qr(3) = Qr(3) - ddelta
! Add delta
Qr(4) = Qr(4) + ddelta
! Compute Roe mean values
rhol = Ql(1)
rhor = Qr(1)
ul = Ql(2)/Ql(1)
ur = Qr(2)/Qr(1)
vl = Ql(3)/Ql(1)
vr = Qr(3)/Qr(1)
El = Ql(4)/rhol
Er = Qr(4)/rhor
Qroe(1) = sqrt(rhol*rhor)
aux = sqrt(max(rhol/rhor, SYS_EPSREAL_DP))
Qroe(2) = (aux*ul+ur)/(aux+1.0_DP)
Qroe(3) = (aux*vl+vr)/(aux+1.0_DP)
hl = GAMMA*El-0.5_dp*(gamma-1.0_dp)*(ul*ul+vl*vl)
hr = GAMMA*Er-0.5_dp*(gamma-1.0_dp)*(ur*ur+vr*vr)
Qroe(4) = (aux*hl+hr)/(aux+1.0_DP)
Qroe(5) = sqrt( max((gamma-1.0_dp)*(Qroe(4) - 0.5_dp*(Qroe(2)*Qroe(2)+Qroe(3)*Qroe(3)) ),0.0_dp) )
! Compute the max abs eigenvalue
dtlambda = abs(a*Qroe(2)+b*Qroe(3)) + Qroe(5)
! Save derivative
Ddlambda(8) = (dtlambda-dlambda)/ddelta
! Substract delta
Qr(4) = Qr(4) - ddelta
end function
! This function builds the absolute value of the largest eigenvalue
function Euler_buildlambda(Qll, Qrr, a, b) result(dlambda)
! The left and right solution values
real(DP), dimension(4), intent(IN) :: Qll, Qrr
! Lambda and temp Lambda
real(dp) :: dlambda
! The components of the normal vector
real(dp), intent(in) :: a, b
! temp variables
real(DP) :: rhol, rhor, denom, Hl, Hr, El, Er, pl, pr, velnorm, aux, ul, ur, vl, vr
! Roe-values and speed of sound waves
real(dp), dimension(5) :: Qroe
! Gamma
real(dp) :: gamma = 1.4_dp
! Temporary left and right solution values
real(DP), dimension(4) :: Ql, Qr
! Copy solution values to their temporary opposites
Ql = Qll
Qr = Qrr
! Compute Roe mean values
rhol = Ql(1)
rhor = Qr(1)
ul = Ql(2)/Ql(1)
ur = Qr(2)/Qr(1)
vl = Ql(3)/Ql(1)
vr = Qr(3)/Qr(1)
El = Ql(4)/rhol
Er = Qr(4)/rhor
Qroe(1) = sqrt(rhol*rhor)
aux = sqrt(max(rhol/rhor, SYS_EPSREAL_DP))
Qroe(2) = (aux*ul+ur)/(aux+1.0_DP)
Qroe(3) = (aux*vl+vr)/(aux+1.0_DP)
hl = GAMMA*El-0.5_dp*(gamma-1.0_dp)*(ul*ul+vl*vl)
hr = GAMMA*Er-0.5_dp*(gamma-1.0_dp)*(ur*ur+vr*vr)
Qroe(4) = (aux*hl+hr)/(aux+1.0_DP)
Qroe(5) = sqrt( max((gamma-1.0_dp)*(Qroe(4) - 0.5_dp*(Qroe(2)*Qroe(2)+Qroe(3)*Qroe(3)) ),0.0_dp) )
! Compute the max abs eigenvalue
dlambda = abs(a*Qroe(2)+b*Qroe(3)) + Qroe(5)
end function
function LLF(uL, uR, nx, ny)
real(dp) :: uL(4), uR(4) ! Input: conservative variables rho*[1, u, v, E]
real(dp) :: nx, ny ! Input: face normal vector, [nx, ny] (Left-to-Right)
real(dp) :: LLF(4) ! Output: Roe flux function (upwind)
LLF = 0.5_dp*(nx*Euler_buildFlux(uL,1)+ny*Euler_buildFlux(uL,2) + nx*Euler_buildFlux(uR,1)+ny*Euler_buildFlux(uR,2) )&
-0.5_dp*Euler_buildlambda(uL, uR, nx, ny)*(uR-uL)
end function
function DLLF(uL, uR, nx, ny, h)
real(dp) :: uL(4), uR(4) ! Input: conservative variables rho*[1, u, v, E]
real(dp) :: nx, ny ! Input: face normal vector, [nx, ny]
real(dp) :: h ! Input: infinitesimal constant
real(dp), dimension(4,8) :: DLLF ! Output: Approximate differential of Roe flux function
real(dp), dimension(4) :: F, U
integer :: i
F = LLF(uL, uR, nx, ny)
! First order approx
do i = 1,4
U = uL
U(i) = U(i) + h
DLLF(:,i) = (LLF(U, uR, nx, ny) - F)/h
end do
do i = 1,4
U = uR
U(i) = U(i) + h
DLLF(:,i+4) = (LLF(uL, U, nx, ny) - F)/h
end do
! ! Second order approx
! do i = 1,4
! U = uL
! U(i) = U(i) + h
! DLLF(:,i) = LLF(U, uR, nx, ny)
! U = uL
! U(i) = U(i) - h
! DLLF(:,i) = 0.5_dp*(DLLF(:,i)-LLF(U, uR, nx, ny))/h
! end do
!
! do i = 1,4
! U = uR
! U(i) = U(i) + h
! DLLF(:,i+4) = LLF(uL, U, nx, ny)
! U = uR
! U(i) = U(i) - h
! DLLF(:,i+4) = 0.5_dp*(DLLF(:,i+4)-LLF(uL, U, nx, ny))/h
! end do
end function DLLF
function DRotated_RHLL_2nd(uL, uR, nx, ny, h)
real(dp) :: uL(4), uR(4) ! Input: conservative variables rho*[1, u, v, E]
real(dp) :: nx, ny ! Input: face normal vector, [nx, ny]
real(dp) :: h ! Input: infinitesimal constant
real(dp), dimension(4,8) :: DRotated_RHLL_2nd ! Output: Approximate differential of Roe flux function
real(dp), dimension(4) :: F, U
integer :: i
! F = Rotated_RHLL(uL, uR, nx, ny)
!
! ! First order approx
! do i = 1,4
! U = uL
! U(i) = U(i) + h
! DRotated_RHLL_2nd(:,i) = (Rotated_RHLL(U, uR, nx, ny) - F)/h
! end do
!
! do i = 1,4
! U = uR
! U(i) = U(i) + h
! DRotated_RHLL_2nd(:,i+4) = (Rotated_RHLL(uL, U, nx, ny) - F)/h
! end do
! Second order approx
do i = 1,4
U = uL
U(i) = U(i) + h
DRotated_RHLL_2nd(:,i) = Rotated_RHLL(U, uR, nx, ny)
U = uL
U(i) = U(i) - h
DRotated_RHLL_2nd(:,i) = 0.5_dp*(DRotated_RHLL_2nd(:,i)-Rotated_RHLL(U, uR, nx, ny))/h
end do
do i = 1,4
U = uR
U(i) = U(i) + h
DRotated_RHLL_2nd(:,i+4) = Rotated_RHLL(uL, U, nx, ny)
U = uR
U(i) = U(i) - h
DRotated_RHLL_2nd(:,i+4) = 0.5_dp*(DRotated_RHLL_2nd(:,i+4)-Rotated_RHLL(uL, U, nx, ny))/h
end do
end function
function Euler_WallFlux(Qi,nx,ny) result(Flux)
! The flux vector
real(DP), dimension(4) :: Flux
! The solution components q1 = rho, q2 = rho u, q3 = rho v, q4 = rho E
real(DP), dimension(4), intent(IN) :: Qi
! Components of the normal vector
real(dp), intent(in) :: nx, ny
! pressure, stagnation enthalpy
real(DP) :: p, H
! temporary variable
real(DP) :: rho, u, v, E
! Constant Gamma
real(dp) :: gamma = 1.4_dp
! Calculate primitive variables
rho=Qi(1)
u=Qi(2)/rho
v=Qi(3)/rho
E=Qi(4)/rho
! Compute the pressure
p = (gamma - 1.0_dp)*rho*(E-0.5_dp*(u*u+v*v))
! ! Compute H, the stagnation enthalpy
! H = E + p/rho
! Build the flux vector
Flux(1) = 0.0_dp
Flux(2) = nx*p
Flux(3) = ny*p
Flux(4) = 0.0_dp
end function
function dEuler_WallFlux(uL, nx, ny, h) result(DFlux)
real(dp) :: uL(4), uR(4) ! Input: conservative variables rho*[1, u, v, E]
real(dp) :: nx, ny ! Input: face normal vector, [nx, ny]
real(dp) :: h ! Input: infinitesimal constant
real(dp), dimension(4,8) :: DFlux ! Output: Approximate differential of Roe flux function
real(dp), dimension(4) :: F, U
integer :: i
! Second order approx
do i = 1,4
U = uL
U(i) = U(i) + h
DFlux(:,i) = Euler_WallFlux(U,nx,ny)
U = uL
U(i) = U(i) - h
DFlux(:,i) = 0.5_dp*(DFlux(:,i)-Euler_WallFlux(U,nx,ny))/h
end do
DFlux(:,5:8) = 0.0_dp
end function
function calculatePenalty(dpolgrad, dedgeLength) result(dpenalty)
real(dp), intent(in) :: dpolgrad, dedgeLength
real(dp) :: dPenalty
dPenalty = (dpolgrad)*(dpolgrad+1.0_dp)/dedgeLength
end function
end module dg2d_problem
|
{"hexsha": "aa53a94c79572a3b2d15adb50091a996ef241523", "size": 69157, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "area51/dg_conslaw/src/dg2d_problem.f90", "max_stars_repo_name": "tudo-math-ls3/FeatFlow2", "max_stars_repo_head_hexsha": "56159aff28f161aca513bc7c5e2014a2d11ff1b3", "max_stars_repo_licenses": ["Intel", "Unlicense"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-09T15:48:37.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-09T15:48:37.000Z", "max_issues_repo_path": "area51/dg_conslaw/src/dg2d_problem.f90", "max_issues_repo_name": "tudo-math-ls3/FeatFlow2", "max_issues_repo_head_hexsha": "56159aff28f161aca513bc7c5e2014a2d11ff1b3", "max_issues_repo_licenses": ["Intel", "Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "area51/dg_conslaw/src/dg2d_problem.f90", "max_forks_repo_name": "tudo-math-ls3/FeatFlow2", "max_forks_repo_head_hexsha": "56159aff28f161aca513bc7c5e2014a2d11ff1b3", "max_forks_repo_licenses": ["Intel", "Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.6636947218, "max_line_length": 152, "alphanum_fraction": 0.5250806137, "num_tokens": 27708}
|
using PyCall, Compat
using Compat.Test, Compat.Dates, Compat.Serialization
filter(f, itr) = collect(Iterators.filter(f, itr))
filter(f, d::AbstractDict) = Base.filter(f, d)
PYTHONPATH=get(ENV,"PYTHONPATH","")
PYTHONHOME=get(ENV,"PYTHONHOME","")
PYTHONEXECUTABLE=get(ENV,"PYTHONEXECUTABLE","")
Compat.@info "Python version $pyversion from $(PyCall.libpython), PYTHONHOME=$(PyCall.PYTHONHOME)\nENV[PYTHONPATH]=$PYTHONPATH\nENV[PYTHONHOME]=$PYTHONHOME\nENV[PYTHONEXECUTABLE]=$PYTHONEXECUTABLE"
roundtrip(T, x) = convert(T, PyObject(x))
roundtrip(x) = roundtrip(PyAny, x)
roundtripeq(T, x) = roundtrip(T, x) == x
roundtripeq(x) = roundtrip(x) == x
@pyimport math
struct TestConstruct
x
end
pymodule_exists(s::AbstractString) = !ispynull(pyimport_e(s))
# default integer type for PyAny conversions
const PyInt = pyversion < v"3" ? Int : Clonglong
@testset "PyCall" begin
# conversion of NumPy scalars before npy_initialized by array conversions (#481)
np = pyimport_e("numpy")
if !ispynull(np) # numpy is installed, so test
let o = get(pycall(np["array"], PyObject, 1:3), PyObject, 2)
@test convert(Int32, o) === Int32(3)
@test convert(Int64, o) === Int64(3)
@test convert(Float64, o) === Float64(3)
@test convert(Complex{Int}, o) === 3+0im
end
end
# test handling of type-tuple changes in Julia 0.4
import PyCall.pyany_toany
@test pyany_toany(Int) == Int
@test pyany_toany(PyAny) == Any
@test pyany_toany(Tuple{Int,PyAny}) == Tuple{Int,Any}
@test pyany_toany(Tuple{Int,Tuple{PyAny,Int8}}) == Tuple{Int,Tuple{Any,Int8}}
@test pyany_toany(Tuple{PyAny,Int,Vararg{PyAny}}) == Tuple{Any,Int,Vararg{Any}}
@test roundtripeq(17)
@test roundtripeq(0x39)
@test roundtripeq(true) && roundtripeq(false)
@test roundtripeq(3.14159)
@test roundtripeq(1.3+4.5im)
@test roundtripeq(nothing)
@test roundtripeq("Hello world")
@test roundtripeq("Hëllö")
@test roundtripeq("Hello \0\0\0world")
@test roundtripeq("Hël\0\0lö")
@test roundtripeq(Symbol, :Hello)
@test roundtripeq(C_NULL) && roundtripeq(convert(Ptr{Cvoid}, 12345))
@test roundtripeq([1,3,4,5]) && roundtripeq([1,3.2,"hello",true])
@test roundtripeq([1 2 3;4 5 6]) && roundtripeq([1. 2 3;4 5 6])
@test roundtripeq((1,(3.2,"hello"),true)) && roundtripeq(())
@test roundtripeq(Int32)
@test roundtripeq(Dict(1 => "hello", 2 => "goodbye")) && roundtripeq(Dict())
@test roundtripeq(UInt8[1,3,4,5])
@test roundtrip(3 => 4) == (3,4)
@test roundtrip(Pair{Int,Int}, 3 => 4) == Pair(3,4)
@test eltype(roundtrip([Ref(1), Ref(2)])) == typeof(Ref(1))
@test pycall(PyObject(x -> x + 1), PyAny, 314158) == 314159
@test PyObject(x -> x + 1)(314158) == 314159
@test PyAny(PyObject(3)) == 3
@test roundtrip(x -> x + 1)(314158) == 314159
testkw(x; y=0) = x + 2*y
@test pycall(PyObject(testkw), PyAny, 314157) == 314157
@test pycall(PyObject(testkw), PyAny, 314157, y=1) == 314159
@test roundtrip(testkw)(314157) == 314157
@test roundtrip(testkw)(314157, y=1) == 314159
# check type stability of pycall with an explicit return type
@inferred pycall(PyObject(1)[:__add__], Int, 2)
if PyCall.npy_initialized
@test PyArray(PyObject([1. 2 3;4 5 6])) == [1. 2 3;4 5 6]
let A = rand(Int, 2,3,4), B = rand(Bool, 2,3,4)
@test convert(PyAny, PyReverseDims(A)) == permutedims(A, [3,2,1])
@test convert(PyAny, PyReverseDims(BitArray(B))) == permutedims(B, [3,2,1])
end
end
@test PyVector(PyObject([1,3.2,"hello",true])) == [1,3.2,"hello",true]
@test PyDict(PyObject(Dict(1 => "hello", 2 => "goodbye"))) == Dict(1 => "hello", 2 => "goodbye")
@test roundtripeq(BitArray([true, false, true, true]))
let d = PyDict(Dict(1 => "hello", 34 => "yes" ))
@test get(d.o, 1) == "hello"
set!(d.o, 34, "goodbye")
@test d[34] == "goodbye"
@test sort!(keys(Int, d)) == sort!(collect(d.o[:keys]())) == sort!(collect(keys(d))) == [1, 34]
@test eltype(d) == eltype(typeof(d)) == Pair{Int, String}
end
let d = Dict(zip(1:1000, 1:1000)), f
f(k,v) = iseven(k) # For 0.6
f(kv) = iseven(kv[1]) # For 0.7
@test filter(f, d) == filter(f, PyDict(d)) == filter!(f, PyDict(d)) ==
Dict(zip(2:2:1000, 2:2:1000))
end
@test roundtripeq(Any[1 2 3; 4 5 6])
@test roundtripeq([])
@test convert(Array{PyAny,1}, PyObject(Any[1 2 3; 4 5 6])) == Any[Any[1,2,3],Any[4,5,6]]
if PyCall.npy_initialized
@test roundtripeq(begin A = Array{Int}(undef); A[1] = 3; A; end)
end
@test convert(PyAny, PyObject(begin A = Array{Any}(undef); A[1] = 3; A; end)) == 3
array2py2arrayeq(x) = PyCall.py2array(Float64,PyCall.array2py(x)) == x
@test array2py2arrayeq(rand(3))
@test array2py2arrayeq(rand(3,4))
@test array2py2arrayeq(rand(3,4,5))
@test roundtripeq(2:10) && roundtripeq(10:-1:2)
@test roundtrip(2:2.0:10) == convert(Vector{Float64}, 2:2.0:10)
@test math.sin(3) ≈ sin(3)
@test collect(PyObject([1,"hello",5])) == [1,"hello",5]
@test try @eval (@pyimport os.path) catch ex
if VERSION >= v"0.7.0-DEV.1729"
ex = (ex::LoadError).error
end
isa(ex, ArgumentError)
end
@test PyObject("hello") == PyObject("hello")
@test PyObject("hello") != PyObject("hellö")
@test PyObject(hash) == PyObject(hash)
@test PyObject(hash) != PyObject(println)
@test hash(PyObject("hello")) == hash(PyObject("hello"))
@test hash(PyObject("hello")) != hash(PyObject("hellö"))
@test hash(PyObject("hello")) != hash("hellö")
@test hash(PyObject(hash)) == hash(PyObject(hash))
@test hash(PyObject(hash)) != hash(PyObject(println))
@test hash(PyObject(hash)) != hash(hash)
# issue #92:
let x = PyVector(PyAny[])
py"lambda x: x.append(\"bar\")"(x)
@test x == ["bar"]
end
@test roundtripeq(Dates.Date(2012,3,4))
@test roundtripeq(Dates.DateTime(2012,3,4, 7,8,9,11))
@test roundtripeq(Dates.Millisecond(typemax(Int32)))
@test roundtripeq(Dates.Millisecond(typemin(Int32)))
@test roundtripeq(Dates.Second, Dates.Second(typemax(Int32)))
@test roundtripeq(Dates.Second, Dates.Second(typemin(Int32)))
@test roundtripeq(Dates.Day, Dates.Day(999999999)) # max allowed day timedelta
@test roundtripeq(Dates.Day, Dates.Day(-999999999)) # min allowed day timedelta
# fixme: is there any nontrivial showable test we can do?
@test !showable("text/html", PyObject(1))
# in Python 3, we need a specific encoding to write strings or bufferize them
# (http://stackoverflow.com/questions/5471158/typeerror-str-does-not-support-the-buffer-interface)
pyutf8(s::PyObject) = pycall(s["encode"], PyObject, "utf-8")
pyutf8(s::String) = pyutf8(PyObject(s))
# IO (issue #107)
#@test roundtripeq(stdout) # No longer true since #250
let buf = Compat.IOBuffer(read=false, write=true), obuf = PyObject(buf)
@test !obuf[:isatty]()
@test obuf[:writable]()
@test !obuf[:readable]()
@test obuf[:seekable]()
obuf[:write](pyutf8("hello"))
obuf[:flush]() # should be a no-op, since there's no flushing IOBuffer
@test position(buf) == obuf[:tell]() == 5
let p = obuf[:seek](-2, 1)
@test p == position(buf) == 3
end
let p = obuf[:seek](0, 0)
@test p == position(buf) == 0
end
@test String(take!(buf)) == "hello"
obuf[:writelines](["first\n", "second\n", "third"])
@test String(take!(buf)) == "first\nsecond\nthird"
obuf[:write](b"möre stuff")
@test String(take!(buf)) == "möre stuff"
@test isopen(buf) == !obuf[:closed] == true
obuf[:close]()
@test isopen(buf) == !obuf[:closed] == false
end
let buf = IOBuffer("hello\nagain"), obuf = PyObject(buf)
@test !obuf[:writable]()
@test obuf[:readable]()
@test obuf[:readlines]() == ["hello\n", "again"]
end
let buf = IOBuffer("hello\nagain"), obuf = PyObject(buf)
@test codeunits(obuf[:read](5)) == b"hello"
@test codeunits(obuf[:readall]()) == b"\nagain"
end
let buf = IOBuffer("hello\nagain"), obuf = PyTextIO(buf)
@test obuf[:encoding] == "UTF-8"
@test obuf[:read](3) == "hel"
@test obuf[:readall]() == "lo\nagain"
end
let nm = tempname()
open(nm, "w") do f
# @test roundtripeq(f) # PR #250
pf = PyObject(f)
@test pf[:fileno]() == fd(f)
@test pf[:writable]()
@test !pf[:readable]()
pf[:write](pyutf8(nm))
pf[:flush]()
end
@test read(nm, String) == nm
end
# issue #112
@test roundtripeq(Array, [1,2,3,4])
@test roundtripeq(Array{Int8}, [1,2,3,4])
# conversion of numpy scalars
pyanycheck(x::Any) = pyanycheck(typeof(x), PyObject(x))
pyanycheck(T, o::PyObject) = isa(convert(PyAny, o), T)
@test pyanycheck(PyInt, PyVector{PyObject}(PyObject([1]))[1])
@test pyanycheck(Float64, PyVector{PyObject}(PyObject([1.3]))[1])
@test pyanycheck(ComplexF64, PyVector{PyObject}(PyObject([1.3+1im]))[1])
@test pyanycheck(Bool, PyVector{PyObject}(PyObject([true]))[1])
# conversions of Int128 and BigInt
let i = 1234567890123456789 # Int64
@test PyObject(i) - i == 0
end
let i = 12345678901234567890 # Int128
@test PyObject(i) - i == 0
end
let i = BigInt(12345678901234567890), o = PyObject(i) # BigInt
@test o - i == 0
@test BigInt(o) == i
if pyversion >= v"3.2"
@test PyAny(o) == i == convert(Integer, o)
@test_throws InexactError Int64(o)
end
end
# bigfloat conversion
if pymodule_exists("mpmath")
for x in (big(pi), big(pi) + im/big(pi))
@test pyanycheck(x)
# conversion may not be exact since it goes through a decimal string
@test abs(roundtrip(x) - x) < eps(BigFloat) * 1e3 * abs(x)
end
end
@test convert(BigInt, PyObject(1234)) == 1234
# buffers
let b = PyCall.PyBuffer(pyutf8("test string"))
@test ndims(b) == 1
@test (length(b),) == (length("test string"),) == (size(b, 1),) == size(b)
@test stride(b, 1) == 1
@test PyCall.iscontiguous(b) == true
end
let o = PyObject(1+2im)
@test haskey(o, :real)
@test :real in keys(o)
@test o[:real] == 1
end
# []-based sequence access
let a1=[5,8,6], a2=rand(3,4), a3=rand(3,4,5), o1=PyObject(a1), o2=PyObject(a2), o3=PyObject(a3)
@test [o1[i] for i in eachindex(a1)] == a1
@test [o1[end-(i-1)] for i in eachindex(a1)] == reverse(a1)
@test o2[1] == collect(a2[1,:])
@test length(o1) == length(o2) == length(o3) == 3
o1[end-1] = 7
@test o1[2] == 7
# multiple indices are passed as tuples, but this is apparently
# only supported by numpy arrays.
if PyCall.npy_initialized
@test [o2[i,j] for i=1:3, j=1:4] == a2
@test [o3[i,j,k] for i=1:3, j=1:4, k=1:5] == a3
@test o3[2,3] == collect(a3[2,3,:])
o2[2,3] = 8
@test o2[2,3] == 8
o3[2,3,4] = 9
@test o3[2,3,4] == 9
end
end
# list operations:
let o = PyObject(Any[8,3])
@test collect(push!(o, 5)) == [8,3,5]
@test pop!(o) == 5 && collect(o) == [8,3]
@test popfirst!(o) == 8 && collect(o) == [3]
@test collect(pushfirst!(o, 9)) == [9,3]
@test collect(prepend!(o, [5,4,2])) == [5,4,2,9,3]
@test collect(append!(o, [1,6,8])) == [5,4,2,9,3,1,6,8]
@test isempty(empty!(o))
end
let o = PyObject(Any[8,3])
@test collect(append!(o, o)) == [8,3,8,3]
push!(o, 1)
@test collect(prepend!(o, o)) == [8,3,8,3,1,8,3,8,3,1]
end
# issue #216:
@test length(collect(pyimport("itertools")[:combinations]([1,2,3],2))) == 3
# PyNULL and copy!
let x = PyNULL(), y = copy!(x, PyObject(314159))
@test Int(x) == Int(y) == 314159
end
@test ispynull(PyNULL())
@test !ispynull(PyObject(3))
@test ispynull(pydecref(PyObject(3)))
@test !ispynull(pyimport_conda("inspect", "not a conda package"))
import Conda
if PyCall.conda
# import pyzmq to test PR #294
let already_installed = "pyzmq" ∈ Conda._installed_packages()
@test !ispynull(pyimport_conda("zmq", "pyzmq"))
@test "pyzmq" ∈ Conda._installed_packages()
if !already_installed
Conda.rm("pyzmq")
end
end
end
let x = 7
py"""
def myfun(x):
return x + $x
"""
@test py"1 + 2" == 3
@test py"1 + $x" == 8
@test py"1 + $(x^2)" == 50
@test py"myfun"(10) == 17
end
# issue #352
let x = "1+1"
@test py"$x" == "1+1"
@test py"$$x" == py"$$(x)" == 2
@test py"7 - $$x - 7" == 0 # evaluates "7 - 1 + 1 - 7"
@test py"7 - ($$x) - 7" == -2 # evaluates "7 - (1 + 1) - 7"
@test py"1 + $$(x[1:2]) 3" == 5 # evals 1 + 1+ 3
end
# Float16 support:
if PyCall.npy_initialized
@test roundtripeq(Float16[17 18 Inf -Inf -0.0 0.0])
@test isa(roundtrip(Float16[17]), Vector{Float16})
end
"""
foobar doc
"""
foobar(x) = x+1
# function attributes
let o = PyObject(foobar)
@test o[:__name__] == o[:func_name] == string(foobar)
@test o[:__doc__] == o[:func_doc] == "foobar doc\n"
@test o[:__module__] == o[:__defaults__] == o[:func_defaults] ==
o[:__closure__] == o[:func_closure] == nothing
end
# issue #345
let weakdict = pyimport("weakref")["WeakValueDictionary"]
# (use weakdict for the value, since Python supports
# weak references to type objects)
@test convert(Dict{Int,PyObject}, weakdict(Dict(3=>weakdict))) == Dict(3=>weakdict)
@test get(weakdict(Dict(3=>weakdict)),3) == weakdict
end
# Expose python docs to Julia doc system
py"""
def foo():
"foo docstring"
return 0
"""
global foo354 = py"foo"
# use 'content' since `Text` objects test equality by object identity
@test @doc(foo354).content == "foo docstring"
# binary operators
for b in (4, PyObject(4))
for op in (+, -, *, /, %, &, |, ^, <<, >>, ⊻)
let x = op(PyObject(111), b)
@test isa(x, PyObject)
@test convert(PyAny, x) == op(111, 4)
end
@test convert(PyAny, op(b, PyObject(3))) == op(4, 3)
end
end
@test convert(PyAny, PyObject(3)^4) == 3^4 # literal integer powers
@test convert(PyAny, PyObject(3)^0) == 1 # literal integer powers
@test convert(PyAny, PyObject(2)^-1) == 0.5 # literal integer powers
# unary operators
for op in (+, -, ~, abs)
let x = op(PyObject(-3))
@test isa(x, PyObject)
@test convert(PyAny, x) == op(-3)
end
end
# comparisons
for x in (3,4,5), y in (3.0,4.0,5.0)
for op in (<, <=, ==, !=, >, >=, isless, isequal)
@test op(PyObject(x), PyObject(y)) == op(x, y)
if op != isequal
@test op(PyObject(x), y) == op(x, y)
end
end
end
# updating operators .+= etcetera
let o = PyObject(Any[1,2]), c = o
broadcast!(+, o, o, Any[3,4]) # o .+= x doesn't work yet in 0.7
@test collect(o) == [1,2,3,4]
@test o.o == c.o # updated in-place
end
# more flexible bool conversions, matching Python "truth value testing"
@test convert(Bool, PyObject(nothing)) === false
@test convert(Bool, PyObject(0.0)) === false
@test convert(Bool, PyObject(Any[])) === false
@test convert(Bool, PyObject(17.3)) === true
@test convert(Bool, PyObject(Any[0])) === true
@test convert(Bool, PyVector{PyObject}(PyObject([false]))[1]) === false
# serialization
let py_sum_obj = pybuiltin("sum")
b = IOBuffer()
serialize(b, py_sum_obj)
@test py_sum_obj == deserialize(seekstart(b))
b = IOBuffer()
serialize(b, PyNULL())
@test PyNULL() == deserialize(seekstart(b))
end
# @pycall macro expands correctly
_pycall = GlobalRef(PyCall,:pycall)
@test macroexpand(@__MODULE__, :(@pycall foo(bar)::T)) == :($(_pycall)(foo, T, bar))
@test macroexpand(@__MODULE__, :(@pycall foo(bar, args...)::T)) == :($(_pycall)(foo, T, bar, args...))
@test macroexpand(@__MODULE__, :(@pycall foo(bar; kwargs...)::T)) == :($(_pycall)(foo, T, bar; kwargs...))
# basic @pywith functionality
fname = tempname()
try
@test begin
@pywith pybuiltin("open")(fname,"w") as f begin
f[:write]("test")
end
open(io->read(io, String), fname)=="test"
end
finally
rm(fname,force=true)
end
@test occursin("integer", Base.Docs.doc(PyObject(1)).content)
@test occursin("no docstring", Base.Docs.doc(PyObject(py"lambda x: x+1")).content)
let b = rand(UInt8, 1000)
@test(convert(Vector{UInt8}, pybytes(b)) == b
== convert(Vector{UInt8}, pybytes(String(copy(b))))
== convert(Vector{UInt8}, pybytes(codeunits(String(copy(b))))))
end
let t = convert(Tuple, PyObject((3,34)))
@test isa(t, Tuple{PyObject,PyObject})
@test t == (PyObject(3), PyObject(34))
end
for T in (Tuple{Vararg{PyAny}}, NTuple{2,PyInt}, Tuple{PyInt,PyInt}, Tuple{Vararg{PyInt}}, Tuple{PyInt,Vararg{PyInt}})
let t = convert(T, PyObject((3,34)))
@test isa(t, Tuple{PyInt,PyInt})
@test t == (3,34)
end
end
@test_throws BoundsError convert(NTuple{3,Int}, PyObject((3,34)))
let p = PyCall.pickle(), buf = IOBuffer()
p[:dump]("hello world", buf)
p[:dump](314159, buf)
p[:dump](Any[1,1,2,3,5,8], buf)
@test p[:load](seekstart(buf)) == "hello world"
@test p[:load](buf) == 314159
@test p[:load](buf) == [1,1,2,3,5,8]
end
# Test that we can call constructors on the python side
@test pycall(PyObject(TestConstruct), PyAny, 1).x == 1
# Test getattr fallback
@test PyObject(TestConstruct(1))[:x] == 1
@test_throws KeyError PyObject(TestConstruct(1))[:y]
# iterating over Julia objects in Python:
@test py"[x**2 for x in $(PyCall.pyjlwrap_new(1:4))]" ==
py"[x**2 for x in $(x for x in 1:4)]" ==
py"[x**2 for x in $(PyCall.jlwrap_iterator(1:4))]" ==
[1,4,9,16]
let o = PyObject("foo")
@test pystr(o) == "foo"
@test pyrepr(o) == "'foo'"
end
# pyfunction
@test pyfunction(factorial, Int)(3) === PyInt(6)
@test pyfunction(sin, Complex{Int})(3) === sin(3+0im)
@test pyfunctionret(factorial, Float64, Int)(3) === 6.0
@test pyfunctionret(factorial, nothing, Int)(3) === nothing
@test PyCall.is_pyjlwrap(pycall(pyfunctionret(factorial, Any, Int), PyObject, 3))
@test pyfunctionret(max, Int, Vararg{Int})(3,4,5) === PyInt(5)
# broadcasting scalars
let o = PyObject(3) .+ [1,4]
@test o isa Vector{PyObject}
@test o == [4,7]
end
end
######################################################################
#@pydef tests: type declarations need to happen at top level
# issue #389
@pydef mutable struct EmptyClass
end
# @pywith errors correctly handled
@pydef mutable struct IgnoreError
function __init__(self, ignore)
self[:ignore] = ignore
end
__enter__(self) = ()
__exit__(self, typ, value, tb) = self[:ignore]
end
# @pydef example from README
@pydef mutable struct Doubler <: PyCall.builtin[:AssertionError]
__init__(self, x=10) = (self[:x] = x)
function my_method(self, arg1::Number)
return arg1 + 20
end
type_str(self, obj::T) where T = string(T)
x2.get(self) = self[:x] * 2
function x2.set!(self, new_val)
self[:x] = new_val / 2
end
end
@testset "pydef" begin
d = Doubler(5)
@test d[:x] == 5
d[:x2] = 30
@test d[:x] == 15
@test d[:type_str](10) == string(PyInt)
@test PyCall.builtin[:isinstance](d, PyCall.builtin[:AssertionError])
@test_throws ErrorException @pywith IgnoreError(false) error()
@test (@pywith IgnoreError(true) error(); true)
end
include("test_pyfncall.jl")
|
{"hexsha": "282c23f1de07814af45385bbe08cd507412b88c5", "size": 20645, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "schmrlng/PyCall.jl", "max_stars_repo_head_hexsha": "2673bfe7559ff9d7bd9056e58f08e6ed160cb737", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "schmrlng/PyCall.jl", "max_issues_repo_head_hexsha": "2673bfe7559ff9d7bd9056e58f08e6ed160cb737", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "schmrlng/PyCall.jl", "max_forks_repo_head_hexsha": "2673bfe7559ff9d7bd9056e58f08e6ed160cb737", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9668989547, "max_line_length": 197, "alphanum_fraction": 0.5699200775, "num_tokens": 6820}
|
""""
Python Class with dedicated utilities/methods to analyse Gaia DR3 samples
Héctor Cánovas Oct 2019 - now
"""
import glob, warnings, getpass
import numpy as np
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.table import Table, MaskedColumn
from astropy.utils.exceptions import AstropyWarning
class Utils():
"""
Initialize the class.
"""
def __init__(self, color = 'magenta', label = 'Sample', data_labs = True):
self.user_name = getpass.getuser()
if data_labs:
self.out_path = f'/home/{self.user_name}/'
else:
self.out_path = f''
self.color = color
self.label = label
self.bcols = ['ra', 'dec', 'pmra', 'pmdec', 'pmra_error', 'pmdec_error', 'parallax','phot_g_mean_mag',
'phot_bp_mean_mag', 'phot_rp_mean_mag', 'phot_g_mean_flux_over_error', 'phot_bp_mean_flux_over_error',
'phot_rp_mean_flux_over_error'] # Basic Gaia cols
def __repr__(self):
return f'This Class contains tools to create and manipulate a Gaia catalogue'
def make_cat(self, inp_tb):
"""
Creates Catalogue (Astropy Table instance)
"""
if isinstance(inp_tb, str):
self.cat = Table.read(inp_tb, format = 'votable')
if isinstance(inp_tb, Table):
self.cat = inp_tb
def display_N_rows(self, N_rows = 5, verbose = True):
"""
Display first N rows of the dataset.
"""
if verbose:
print()
print(f'Showing {self.label} first {N_rows} rows:')
display(self.cat[0:N_rows])
def read_catalogue(self, inp_tb = None, verbose = True, save_sample = False, print_vrad = False,
sample_dir = '../samples_control/'):
"""
Read Gaia Sample.
"""
warnings.filterwarnings('ignore', category=AstropyWarning, append=True)
if inp_tb:
self.make_cat(inp_tb)
else:
inp_cats = glob.glob(sample_dir + '*vot')
if len(inp_cats) == 0:
print(f'No catalogues found in {sample_dir}.')
else:
inp_cats = [inp_cat[inp_cat.rfind('/')+1:] for inp_cat in inp_cats]
print(f'Sample Catalogues in {sample_dir}: ' + '='*46)
print()
for inp_cat in inp_cats:
print('* ' + inp_cat)
print()
while True:
samp_con = input('Choose Control Sample: ')
samp_con_i = glob.glob(sample_dir + samp_con)
if len(samp_con_i) == 1:
self.cat = Table.read(samp_con_i[0])
break
else:
print('Table not found; try again')
if verbose:
print(f'{self.label} loaded. N_elements (rows) = {len(self.cat)}')
# Add columns/check catalogue ==============
print()
self.sanity_checker(verbose = verbose)
self.to_cone_search(verbose = False)
if print_vrad:
self.get_vrad_stats()
if save_sample:
self.save_sample()
def sanity_checker(self, verbose = True):
"""
Make sure that input table is a Gaia catalogue and add new columns
"""
for col in self.bcols:
if col not in self.cat.colnames:
print(f'Warning: {col} is missing in catalogue Table')
raise Exception('Missing Column')
if verbose:
print('Checking catalogue columns (ra, dec, parallax, pmra, phot_g_mean_mag, etc) - OK')
self.add_extra_cols(verbose = verbose)
def add_extra_cols(self, verbose = True):
"""
Add extra columnts to a Gaia DR3 sample.
"""
if 'distance' not in self.cat.colnames:
self.add_distance(verbose = verbose)
if 'phot_g_mean_mag_abs' not in self.cat.colnames:
self.add_absmag(verbose = verbose)
if 'phot_g_mean_mag_err' not in self.cat.colnames:
self.add_mag_errs(verbose = verbose)
if 'l' not in self.cat.colnames:
self.add_galactic(verbose = verbose)
if 'X_gal' not in self.cat.colnames:
self.add_3D_galactic(verbose = verbose)
if 'X_gal' not in self.cat.colnames:
self.add_3D_galactic(verbose = verbose)
if 'pm_mod' not in self.cat.colnames:
self.add_pm_mod(verbose = verbose)
def add_distance(self, verbose = True):
"""
Add "distance" column to the catalogue, where distance = 1000./parallaxes.
"""
ncol = MaskedColumn(data = 1./self.cat['parallax'] * 1000, name = 'distance', unit = u.parsec, format = '4.1F')
self.cat.add_column(ncol)
if verbose:
print('Adding new column to Gaia DR3 dataset: Distance')
def add_absmag(self, verbose = True):
"""
Add "absolute_magnitudes" columns to the catalogue.
"""
for col in ['phot_g_mean_mag', 'phot_bp_mean_mag', 'phot_rp_mean_mag']:
self.cat[col + '_abs'] = self.cat[col] + (5. * np.log10(self.cat['parallax']*0.001) + 5) # Gaia Parallax in mas.
self.cat[col + '_abs'].format = self.cat[col].format
self.cat[col + '_abs'].unit = self.cat[col].unit
if verbose:
print('Adding new columns: Absolute Magnitudes')
def add_mag_errs(self, verbose = True):
"""
Compute photometric errors in magnitudes: mag_err ~ sigma_flux/flux #VALID ONLY FOR SMALL ERRORS
# http://slittlefair.staff.shef.ac.uk/teaching/phy217/lectures/stats/L18/index.html
# https://www.eso.org/~ohainaut/ccd/sn.html
"""
gbands = ['g', 'bp', 'rp']
for band in gbands:
self.cat['phot_' + band + '_mean_mag_err'] = 1./self.cat['phot_' + band + '_mean_flux_over_error']
self.cat['phot_' + band + '_mean_mag_err'].unit = self.cat['phot_' + band + '_mean_mag'].unit
self.cat['phot_' + band + '_mean_mag_err'].format = self.cat['phot_' + band + '_mean_mag'].format
if verbose:
print('Adding new columns: Magnitude Errors')
def add_3D_galactic(self, verbose = True):
"""
Computes 3D cartesian coordinates in the Galactic frame
"""
coords = SkyCoord(self.cat['l'], self.cat['b'], frame='galactic', distance=self.cat['distance'])
self.cat['X_gal'] = coords.cartesian.x
self.cat['Y_gal'] = coords.cartesian.y
self.cat['Z_gal'] = coords.cartesian.z
if verbose:
print('Adding new columns to Gaia DR3 dataset: Galactic Spatial Coordinates (X, Y, Z)_Gal')
def add_pm_mod(self, verbose = True):
"""
Compute the proper motion modulus
"""
self.cat['pm_mod'] = np.sqrt(self.cat['pmra']**2 + self.cat['pmdec']**2)
if verbose:
print('Adding new columns: Proper Motion Modulus')
def get_vrad_stats(self):
"""
Print radial velocity information.
"""
vrads = self.cat['radial_velocity'][self.cat['radial_velocity'].mask == False]
print()
print(f'vrad measurements for {len(vrads)} sources ({len(vrads)/len(self.cat) * 100:4.1F}% of the sample)')
print(f'vrad = {vrads.mean():17.1F} +/- {vrads.std():3.1F} [{vrads.unit}]')
def to_cone_search(self, verbose = False):
"""
Computes average R.A./Dec coords, parallax range, and projected-sky size
"""
ra = self.cat['ra'].mean() * self.cat['ra'].unit
dec = self.cat['dec'].mean() * self.cat['dec'].unit
delta_ra = np.abs(self.cat['ra'].max() - self.cat['ra'].min())
delta_dec = np.abs(self.cat['dec'].max() - self.cat['dec'].min())
radii = np.max([delta_ra, delta_dec]) * 0.5 * self.cat['dec'].unit
para_m = np.floor(self.cat['parallax'].min()*100)/100. * self.cat['parallax'].unit
para_M = np.ceil(self.cat['parallax'].max()*100)/100. * self.cat['parallax'].unit
self.ADQL = {'ra':ra, 'dec':dec, 'radii':radii, 'para_m':para_m, 'para_M':para_M}
if verbose:
print('sample properties saved for ADQL Cone-Search')
def print_cone_properties(self):
"""
Prints Sky-Properties of the Sample to prepare a Gaia Cone Search
"""
if hasattr(self, 'ADQL'):
formatter = iter(['13.2F', '13.2F', '12.2F', '13.2F', '13.2F'])
text = iter(['Average R.A.', 'Average Dec.', 'Radius on-sky', 'Parallax min', 'Parallax max'])
print(f'{self.label} on-Sky sample properties:')
for key in self.ADQL:
print(f'* {next(text)} {self.ADQL[key].value : {next(formatter)}}, {self.ADQL[key].unit}')
else:
print('Sample has no ADQL parameters. Please run .to_cone_search()')
def save_sample(self):
"""
Save the catalogue as an Astropy Table
"""
print()
fname = f"{self.out_path}{self.label.replace(' ', '_')}.vot"
text = f'Saving {self.label} as: {fname}'
print('=' * len(text))
print(text)
print('=' * len(text))
self.cat.write(fname, format = 'votable', overwrite = True)
|
{"hexsha": "22284b0c261b76e91224b9df4d95d3a7002d73e7", "size": 9479, "ext": "py", "lang": "Python", "max_stars_repo_path": "pangaia/utils.py", "max_stars_repo_name": "hectorcanovas/PanGaia", "max_stars_repo_head_hexsha": "cb5aa46efdf3056d22a38dd581f5522118fc99d9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pangaia/utils.py", "max_issues_repo_name": "hectorcanovas/PanGaia", "max_issues_repo_head_hexsha": "cb5aa46efdf3056d22a38dd581f5522118fc99d9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pangaia/utils.py", "max_forks_repo_name": "hectorcanovas/PanGaia", "max_forks_repo_head_hexsha": "cb5aa46efdf3056d22a38dd581f5522118fc99d9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-10T12:49:19.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-10T12:49:19.000Z", "avg_line_length": 39.3319502075, "max_line_length": 131, "alphanum_fraction": 0.5635615571, "include": true, "reason": "import numpy,from astropy", "num_tokens": 2347}
|
function spiral_matrix(n)
end
|
{"hexsha": "15461952a35ad750187adeea091af71bcbdae2c5", "size": 31, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "exercises/spiral-matrix/spiral-matrix.jl", "max_stars_repo_name": "tomerarnon/julia-1", "max_stars_repo_head_hexsha": "6313e702d82f4fee10efdf29e943df50857cd7b5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 42, "max_stars_repo_stars_event_min_datetime": "2017-06-27T01:08:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T08:34:10.000Z", "max_issues_repo_path": "exercises/spiral-matrix/spiral-matrix.jl", "max_issues_repo_name": "tomerarnon/julia-1", "max_issues_repo_head_hexsha": "6313e702d82f4fee10efdf29e943df50857cd7b5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 269, "max_issues_repo_issues_event_min_datetime": "2017-06-19T13:56:09.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T22:15:18.000Z", "max_forks_repo_path": "exercises/spiral-matrix/spiral-matrix.jl", "max_forks_repo_name": "tomerarnon/julia-1", "max_forks_repo_head_hexsha": "6313e702d82f4fee10efdf29e943df50857cd7b5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 69, "max_forks_repo_forks_event_min_datetime": "2017-06-20T18:47:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-11T22:15:52.000Z", "avg_line_length": 7.75, "max_line_length": 25, "alphanum_fraction": 0.8064516129, "num_tokens": 7}
|
#!/usr/bin/python
"""Loads a single video and returns action predictions"""
import numpy as np
import tensorflow as tf
from video_utils import *
import i3d
_IMAGE_SIZE = 224
_NUM_CLASSES = 400
_SAMPLE_VIDEO_FRAMES = 79
_SAMPLE_PATHS = {
'rgb': 'data/v_CricketShot_g04_c01_rgb.npy',
'flow': 'data/v_CricketShot_g04_c01_flow.npy',
}
_CHECKPOINT_PATHS = {
'rgb': 'data/checkpoints/rgb_scratch/model.ckpt',
'flow': 'data/checkpoints/flow_scratch/model.ckpt',
'rgb_imagenet': 'data/checkpoints/rgb_imagenet/model.ckpt',
'flow_imagenet': 'data/checkpoints/flow_imagenet/model.ckpt',
}
_LABEL_MAP_PATH = 'data/label_map.txt'
CLASSES_KIN = [x.strip() for x in open(_LABEL_MAP_PATH)]
def get_preds_tensor(input_mode='rgb',n_frames=79):
"""Function to get the predictions tensor, input placeholder and saver object
:param input_mode: One of 'rgb','flow','two_stream'"""
if input_mode == 'rgb':
rgb_variable_map = {}
input_fr_rgb = tf.placeholder(tf.float32,
shape=[1, n_frames,
_IMAGE_SIZE, _IMAGE_SIZE,
3],
name="Input_Video_Placeholder")
with tf.variable_scope('RGB'):
#Building I3D for RGB-only input
rgb_model = i3d.InceptionI3d(_NUM_CLASSES,
spatial_squeeze=True,
final_endpoint='Logits')
rgb_logits,_ = rgb_model(input_fr_rgb,
is_training=False,
dropout_keep_prob=1.0)
for variable in tf.global_variables():
if variable.name.split('/')[0] == 'RGB':
rgb_variable_map[variable.name.replace(':0','')] = variable
rgb_saver = tf.train.Saver(var_list = rgb_variable_map,
reshape=True)
model_predictions = tf.nn.softmax(rgb_logits)
return model_predictions, input_fr_rgb, rgb_saver
else:
print "#TODO: Implement other input modes"
def predict_single_video(video_fn, n_frames):
"""Function to predict actions for the video given by video_fn
video_fn: Filename of the video to predict for
n_frames: Number of frames to use to represent the video"""
video_frames_rgb, _shape = load_video_with_path_cv2(video_fn, n_frames)
video_frames_rgb = np.expand_dims(video_frames_rgb,0)
preds, input_video_ph, saver = get_preds_tensor(n_frames=n_frames)
input_mode = 'rgb'
with tf.Session() as sess:
if input_mode == 'rgb':
saver.restore(sess, _CHECKPOINT_PATHS['rgb'])
predictions = sess.run([preds], feed_dict = {input_video_ph: video_frames_rgb})
top_class = np.argmax(predictions)
#TODO: Implement other input modes
return CLASSES_KIN[top_class], predictions
def main():
video_fn = '/media/data_cifs/cluster_projects/action_recognition/ActivityNet/Crawler/Kinetics/val/fixing hair/TkMVNYg1Nyc_000107_000117.mp4'
n_frames = 79
top_class,preds = predict_single_video(video_fn,n_frames)
print top_class
if __name__=="__main__":
main()
|
{"hexsha": "787827100da764f7b109a6c85fadb5c2812755d0", "size": 3290, "ext": "py", "lang": "Python", "max_stars_repo_path": "depreciated/test_single_video.py", "max_stars_repo_name": "vijayvee/behavior-recognition", "max_stars_repo_head_hexsha": "76eeeb27c2e64f34d0b17884a183fcb346f5634b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-01-05T08:15:21.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-05T08:15:21.000Z", "max_issues_repo_path": "depreciated/test_single_video.py", "max_issues_repo_name": "vijayvee/behavior-recognition", "max_issues_repo_head_hexsha": "76eeeb27c2e64f34d0b17884a183fcb346f5634b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "depreciated/test_single_video.py", "max_forks_repo_name": "vijayvee/behavior-recognition", "max_forks_repo_head_hexsha": "76eeeb27c2e64f34d0b17884a183fcb346f5634b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2018-09-30T07:51:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-09T12:03:27.000Z", "avg_line_length": 40.1219512195, "max_line_length": 144, "alphanum_fraction": 0.6258358663, "include": true, "reason": "import numpy", "num_tokens": 743}
|
# Based on notebooks (Compute Covariance.ipnyb and Covariance Analysis.ipnyb) and utilities.py
# from: https://github.com/LukasMosser/PorousMediaGan/tree/master/code/notebooks/covariance
# Compute covariance and perform analysis
import numpy as np
import tifffile
from utils import two_point_correlation
import pandas as pd
from tqdm import trange
import argparse
from scipy.optimize import curve_fit
import glob
import os
import json
parser = argparse.ArgumentParser()
parser.add_argument('--original', help='Path to original image file ')
parser.add_argument('--synthetic', help='Path to folder with synthetic images + common name e.g. ../imgs/img3d_')
parser.add_argument('--ending', type=str, default='.tif', help='Image format for synthetic samples')
parser.add_argument('--output', help='Path to save output files')
parser.add_argument('--seed_min', type=int, default=0, help='Starting image #')
parser.add_argument('--seed_max', type=int, help='Number of samples to process')
def main(args):
# Covariance analysis functions
def radial_average(cov):
avg = np.mean(cov, axis=0)
return avg
def straight_line_at_origin(porosity):
def func(x, a):
return a * x + porosity
return func
orig_img = tifffile.imread(args.original)
# Confirm pore and grain phase values
pore_phase = orig_img.max()
grain_phase = orig_img.min()
print("Pore Phase Value: ", pore_phase)
print("Grain Phase Value: ", grain_phase)
print("Image size is: ", orig_img.shape)
# Calculate covariance for pore phase of original sample
print("Saving covariance data for original sample...")
two_point_covariance_pore_phase_orig = {}
for i, direc in enumerate(["x", "y", "z"]):
two_point_direc = two_point_correlation(orig_img, i, var=pore_phase)
two_point_covariance_pore_phase_orig[direc] = two_point_direc
direc_covariances_pore_phase_orig = {}
for direc in ["x", "y", "z"]:
direc_covariances_pore_phase_orig[direc] = np.mean(np.mean(two_point_covariance_pore_phase_orig[direc], axis=0), axis=0)
#print('Shape of x dir is: ', direc_covariances_pore_phase_orig["x"].shape)
orig_cov_pph = pd.DataFrame(direc_covariances_pore_phase_orig)
orig_cov_pph.to_csv(os.path.join(args.output, "orig_pph.csv"), sep=",", index=False)
# Check first few cov results
#orig_cov_pph_backload = pd.read_csv(os.path.join(args.output, "orig_pph.csv"))
#orig_cov_pph_backload.head()
del two_point_covariance_pore_phase_orig
del two_point_direc
# Calculate covariance for grain phase of original sample
two_point_covariance_grain_phase_orig = {}
for i, direc in enumerate(["x", "y", "z"]):
two_point_direc = two_point_correlation(orig_img, i, var=grain_phase)
two_point_covariance_grain_phase_orig[direc] = two_point_direc
direc_covariances_grain_phase_orig = {}
for direc in ["x", "y", "z"]:
direc_covariances_grain_phase_orig[direc] = np.mean(np.mean(two_point_covariance_grain_phase_orig[direc], axis=0), axis=0)
#print('Shape of x dir is: ', direc_covariances_grain_phase_orig["x"].shape)
orig_cov_gph = pd.DataFrame(direc_covariances_grain_phase_orig)
orig_cov_gph.to_csv(os.path.join(args.output, "orig_gph.csv"), sep=",", index=False)
# Check first few cov results
#covariances_orig_df_backload = pd.read_csv(os.path.join(args.output, "orig_pph.csv"))
#covariances_orig_df_backload.head()
del two_point_covariance_grain_phase_orig
del two_point_direc
# Compute slope of covariance at origin to get specific surface area, and chord length for each phase
# Compute radial average
original_average_pph = radial_average(orig_cov_pph.values.T)
original_average_gph = radial_average(orig_cov_gph.values.T)
# Compute slope at origin of radially-averaged covariance, fit straight line at origin to get SSA
N = 5
slope_pph, slope_pph_cov = curve_fit(straight_line_at_origin(original_average_pph[0]), range(0, N),
original_average_pph[0:N])
slope_gph, slope_gph_cov = curve_fit(straight_line_at_origin(original_average_gph[0]), range(0, N),
original_average_gph[0:N])
#print("Slope for pore phase is: ", slope_pph)
#print("Slope for grain phase is: ", slope_gph)
specific_surface_orig = -4 * slope_pph
#print("Original SSA is: ", specific_surface_orig)
# Compute chord length
chord_length_pph = -original_average_pph[0] / slope_pph
chord_length_gph = -original_average_gph[0] / slope_gph
#print("Chord length of pore phase is: ", chord_length_pph)
#print("Chord length of grain phase: ", chord_length_gph)
orig_data = {
"slope_gph": float(slope_gph), "slope_pph": float(slope_pph),
"specific_surface": float(specific_surface_orig),
"chord_length_pph": float(chord_length_pph), "chord_length_gph": float(chord_length_gph)}
# Store orig covariance values
covariance_values = {}
covariance_values["orig"] = orig_data
# Repeat process for generated samples (pore phase only)
print("Saving covariance data for synthetic samples...")
for seed in trange(args.seed_min, (args.seed_min + args.seed_max)):
im_in = tifffile.imread(os.path.join(args.synthetic, "*" + str(seed).zfill(3) + args.ending))
image = im_in.astype(np.int8)
# determine phase values
pore_phase = image.min()
grain_phase = image.max()
for phase, phase_label in zip([pore_phase, grain_phase], ["pph", "gph"]):
# phase computation
two_point_covariance = {}
for i, direc in enumerate(["x", "y", "z"]):
two_point_direc = two_point_correlation(image, i, var=phase)
two_point_covariance[direc] = two_point_direc
# phase averaging
direc_covariances = {}
for direc in ["x", "y", "z"]:
direc_covariances[direc] = np.mean(np.mean(two_point_covariance[direc], axis=0), axis=0)
# covariance storage
covariance_df = pd.DataFrame(direc_covariances)
covariance_df.to_csv(os.path.join(args.output, "S_" + str(seed).zfill(3) + "_" + phase_label + ".csv"), sep=",", index=False)
del im_in
del image
del two_point_covariance
del direc_covariances
del covariance_df
# Compute slope of covariance at origin, specific surface area, and chord length for each phase
for i in range(args.seed_min, (args.seed_min + args.seed_max)):
cov_pph = pd.read_csv(os.path.join(args.output, "S_" + str(i).zfill(3) + "_pph.csv"))
cov_gph = pd.read_csv(os.path.join(args.output, "S_" + str(i).zfill(3) + "_gph.csv"))
average_pph = radial_average(cov_pph.values.T)
average_gph = radial_average(cov_gph.values.T)
slope_pph, slope_pph_cov = curve_fit(straight_line_at_origin(average_pph[0]), range(0, N), average_pph[0:N])
slope_gph, slope_gph_cov = curve_fit(straight_line_at_origin(average_gph[0]), range(0, N), average_gph[0:N])
specific_surface = -4 * slope_pph
chord_length_pph = -average_pph[0] / slope_pph
chord_length_gph = -average_gph[0] / slope_gph
data = {
"slope_gph": float(slope_gph), "slope_pph": float(slope_pph),
"specific_surface": float(specific_surface),
"chord_length_pph": float(chord_length_pph), "chord_length_gph": float(chord_length_gph)}
covariance_values["S_" + str(i).zfill(3)] = data
print("Synthetic SSA is: ", np.mean(specific_surface))
# Store synthetic covariance values
with open(os.path.join(args.output, "covariance_data.json"), "w") as f:
json.dump(covariance_values, f)
if __name__ == "__main__":
args = parser.parse_args()
main(args)
|
{"hexsha": "d25deae11a22086d8d7a0f5dbe5ca28b4c2114d3", "size": 7902, "ext": "py", "lang": "Python", "max_stars_repo_path": "covariance.py", "max_stars_repo_name": "supri-a/RockFlow", "max_stars_repo_head_hexsha": "bb325dbd8cfcfe6a431fe669a33fd0796683c307", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-07-01T21:31:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-13T06:07:28.000Z", "max_issues_repo_path": "covariance.py", "max_issues_repo_name": "supri-a/RockFlow", "max_issues_repo_head_hexsha": "bb325dbd8cfcfe6a431fe669a33fd0796683c307", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "covariance.py", "max_forks_repo_name": "supri-a/RockFlow", "max_forks_repo_head_hexsha": "bb325dbd8cfcfe6a431fe669a33fd0796683c307", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-07-29T08:28:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-16T10:11:25.000Z", "avg_line_length": 42.9456521739, "max_line_length": 137, "alphanum_fraction": 0.6918501645, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2063}
|
library(ggplot2)
library(tidyr)
library(dplyr)
library(cowplot)
library(readr)
library(ggbeeswarm)
theme_set(theme_bw())
options(stringsAsFactors=F)
library(argparser)
p <- arg_parser("ddOWL mutatation allele phasing and plotting tools, v0.1 - Nils Koelling")
p <- add_argument(p, "FAMILIES", help="families file")
p <- add_argument(p, "SNPS", help="snps mask")
p <- add_argument(p, "--prefix", help="output filename prefix")
main = function(argv) {
if(missing(argv)) {
argv = commandArgs(trailingOnly = TRUE)
}
args <- parse_args(p, argv)
if(is.na(args$prefix)) {
args$prefix = args$FAMILIES
}
families = read_csv(args$FAMILIES) %>%
rename(sample = BC) %>%
mutate(rel_sam = sprintf('%s (%s)', relationship, sample))
snps = families %>%
distinct(FamilyID) %>%
rowwise %>%
do({
fam = .$FamilyID
read_tsv(sprintf(args$SNPS, fam), col_types = cols(
POS = col_integer(),
ID = col_character(),
REF = col_character(),
ALT = col_character()
)) %>%
rename(CHROM = `#CHROM`) %>%
mutate(
snp_name = sprintf('%s_%d', CHROM, POS),
snp_name_or_id = ifelse(!is.na(ID), ID, name),
FamilyID = fam
)
}) %>%
distinct(FamilyID, snp_name, .keep_all=TRUE)
#QC PLOTS
rs = read_csv(sprintf('%s.read_stats.csv', args$prefix)) %>%
inner_join(families, by=c('FamilyID', 'sample')) %>%
mutate(
fam_rel_sam = paste(FamilyID, rel_sam),
mismatch_rate = mismatches / length
)
#rs %>% head %>% print
fn = sprintf('%s.read_stats.pdf', args$prefix)
pdf(fn)
print(
ggplot(rs, aes(fam_rel_sam, fill=FamilyID))
+ geom_bar(stat='count', position='dodge')
+ coord_flip()
+ xlab('Family relationship (sample)')
)
print(
ggplot(rs, aes(fam_rel_sam, length, colour=FamilyID))
+ geom_boxplot()
#+ geom_violin(aes(fill=FamilyID))
+ xlab('Family relationship (sample)')
+ coord_flip()
)
print(
ggplot(rs, aes(fam_rel_sam, mismatch_rate, colour=FamilyID))
+ geom_boxplot()
#+ geom_violin(aes(fill=FamilyID))
+ xlab('Family relationship (sample)')
+ coord_flip()
)
print(
ggplot(rs, aes(fam_rel_sam, mapping_quality, colour=FamilyID))
+ geom_boxplot()
#+ geom_violin(aes(fill=FamilyID))
+ xlab('Family relationship (sample)')
+ coord_flip()
)
print(
ggplot(rs, aes(fam_rel_sam, mean_baseq, colour=FamilyID))
+ geom_boxplot()
#+ geom_violin(aes(fill=FamilyID))
+ xlab('Family relationship (sample)')
+ coord_flip()
)
dev.off()
message(fn)
#background
bc = read_csv(sprintf('%s.read_background_calls.csv', args$prefix)) %>%
inner_join(families, by=c('FamilyID', 'sample'))
var_ranges = bc %>%
distinct(CHROM, POS)
var_ranges = GenomicRanges::GRanges(var_ranges$CHROM, IRanges::IRanges(var_ranges$POS, var_ranges$POS))
var_ranges
GenomicRanges::elementMetadata(var_ranges)$ref = as.character(BSgenome::getSeq(BSgenome.Hsapiens.UCSC.hg38::BSgenome.Hsapiens.UCSC.hg38, var_ranges))
bcs = bc %>% left_join(
var_ranges %>% data.frame %>% mutate(CHROM = as.character(seqnames)) %>% select(CHROM, POS = start, ref),
by = c('CHROM', 'POS')
) %>% mutate(
match = snp_call == ref
)
cat('REF match percentage:\n')
print(table(bcs$match) / nrow(bcs))
bcsp = bcs %>%
replace_na(list(snp_call_quality = 0)) %>%
mutate(
qual_bin = cut(snp_call_quality, 20)
) %>%
group_by(FamilyID, sample, qual_bin) %>%
summarise(p_error_empirical = 1 - sum(match) / n()) %>%
mutate(
qual_bin_clean = gsub('\\(|\\]', '', qual_bin)
) %>%
tidyr::separate(qual_bin_clean, sep = ',', into = c('start', 'end')) %>%
mutate(
start = as.numeric(start),
end = as.numeric(end),
mid = start + (end - start) / 2,
p_error_theoretical = 10^(-mid/10),
start = ifelse(start == min(start), -Inf, start),
end = ifelse(end == max(end), Inf, end)
)
fn = sprintf('%s.quality_scores.pdf', args$prefix)
pdf(fn)
print(
ggplot(bcs, aes(sample, fill = match))
+ geom_bar(stat = 'count', position = 'fill')
)
print(
ggplot(bcs %>% group_by(snp_name) %>% filter(n() > 10) %>% ungroup, aes(snp_name, fill = match))
+ geom_bar(stat = 'count', position = 'fill')
+ coord_flip()
+ facet_grid(~sample)
)
print(
ggplot(bcs %>% group_by(snp_name) %>% filter(n() > 10) %>% ungroup, aes(snp_name, fill = match))
+ geom_bar(stat = 'count', position = 'stack')
+ coord_flip()
+ facet_grid(~sample)
)
print(
ggplot(bcs %>% replace_na(list(snp_call_quality = 0)), aes(snp_call_quality, fill = match))
+ geom_histogram()
+ facet_grid(ref~sample)
)
print(
ggplot(bcs %>% replace_na(list(snp_call_quality = 0)), aes(match, snp_call_quality))
+ geom_boxplot()
+ facet_grid(ref~sample)
)
print(
bcs %>%
replace_na(list(snp_call_quality = 0)) %>%
mutate(
qual_bin = cut(snp_call_quality, 20)
) %>%
ggplot
+ aes(qual_bin, fill = match)
+ geom_bar(stat = 'count', position = 'fill')
+ coord_flip()
+ facet_grid(ref~sample)
)
print(
ggplot(bcsp, aes(p_error_theoretical, p_error_empirical, colour=FamilyID)) + geom_point() + geom_abline() + facet_wrap(~sample)
)
print(
ggplot(bcsp, aes(p_error_theoretical, p_error_empirical, colour=FamilyID)) + geom_point() + scale_y_log10() + scale_x_log10() + geom_abline() + facet_wrap(~sample)
)
dev.off()
message(fn)
#allele counts
ac = read_csv(sprintf('%s.allele_counts.csv', args$prefix)) %>%
inner_join(families, by=c('FamilyID', 'sample')) %>%
left_join(snps, by=c('FamilyID', 'snp_name'))
#ac %>% head %>% print
fn = sprintf('%s.allele_counts.pdf', args$prefix)
pdf(fn)
for(family in unique(ac$FamilyID)) {
fam_ac = ac %>% filter(FamilyID == family)
allele_totals = fam_ac %>%
# filter(!snp_allele %in% c('_FLAG', '_DEL')) %>%
group_by(snp_name_or_id, snp_allele) %>%
summarise(total = sum(count)) %>%
mutate(
rank = rank(-total, ties.method = 'first')
)
summarised_counts = fam_ac %>%
group_by(rel_sam, relationship, snp_name_or_id) %>%
inner_join(allele_totals, by=c('snp_name_or_id', 'snp_allele')) %>%
mutate(
allele_simple = ifelse(rank <= 4, snp_allele, '_OTHER'),
fraction = count / sum(count)
) %>%
group_by(sample, relationship, snp_name_or_id, allele_simple) %>%
summarise(
count = sum(count),
fraction = sum(fraction)
)
plots = summarised_counts %>%
group_by(snp_name_or_id) %>%
do(
plot_fractions = (
ggplot(., aes(sample, fraction, fill=allele_simple))
+ geom_bar(stat='identity', position='stack')
+ scale_fill_brewer(type = 'qual', palette = 'Dark2')
+ ggtitle(family, sprintf('%s fraction', .$snp_name_or_id))
+ scale_y_continuous(labels=scales::percent)
+ xlab('Sample')
+ ylab('Percent of reads')
+ coord_flip()
+ theme(legend.position = 'none')
),
plot_counts = (
ggplot(., aes(sample, count, fill=allele_simple))
+ geom_bar(stat='identity', position='stack')
+ scale_fill_brewer(type = 'qual', palette = 'Dark2', name='Call')
+ ggtitle(family, sprintf('%s count', .$snp_name_or_id))
+ xlab('Sample')
+ ylab('Reads')
+ coord_flip()
+ theme(legend.position = 'bottom')
)
) %>%
do(
grid = plot_grid(.$plot_fractions, .$plot_counts, nrow=2)
)
lapply(plots$grid, print)
}
dev.off()
message(fn)
#Actual phasing
pe = read_csv(sprintf('%s.phase_evidence.csv', args$prefix)) %>%
inner_join(families, by=c('FamilyID', 'sample')) %>%
left_join(snps, by=c('FamilyID', 'snp_name'))
#pe %>% head %>% print
fn = sprintf('%s.phase_evidence.pdf', args$prefix)
pdf(fn, width=10, height=5)
for(family in unique(pe$FamilyID)) {
r_full_df = pe %>%
filter(FamilyID == family) %>%
filter(snp_allele != '_NONE')
for(snp in unique(r_full_df$snp_name_or_id)) {
snp_data = r_full_df %>%
filter(snp_name_or_id == snp)
#use top3 for snp allele type if we have no maternal/paternal info
if(all(is.na(snp_data$snp_allele_type))) {
#FIXME: we do not actually need this, because we simplify to REF/ALT/_OTHER anyway!
top_alleles = snp_data %>%
group_by(sample, relationship) %>%
mutate(rank = rank(-count, ties.method = 'first')) %>%
group_by(snp_allele) %>%
summarise(meanrank = mean(rank)) %>%
top_n(4, wt = meanrank)
#print(top_alleles)
#now filter down to top alleles and set allele type to raw allele
snp_data = snp_data %>%
inner_join(top_alleles, by = 'snp_allele') %>%
mutate(snp_allele_type = snp_allele)
}
snp_data = snp_data %>%
filter(!is.na(snp_allele_type) & !is.na(mutation_allele_type)) %>%
mutate(
snp_allele_type = factor(snp_allele_type),
mutation_allele_type = factor(mutation_allele_type, levels = c('mutation', 'wild-type'))
)
#print(snp_data)
snp_data = snp_data %>%
complete(
nesting(sample, relationship),
snp_allele_type, mutation_allele_type,
fill = list(count = 0)
)
#print(snp_data)
if(nrow(snp_data) > 0) {
print(
do.call(plot_grid,
c(
snp_data %>%
group_by(sample, relationship) %>%
do(plot = {
(
ggplot(., aes(mutation_allele_type, snp_allele_type, fill = count, label = count))
+ geom_tile()
+ geom_text(aes(colour = count > mean(count)), size = 3)
+ scale_y_discrete(drop=FALSE)
+ scale_x_discrete(drop=FALSE)
+ coord_fixed()
+ scale_colour_manual(guide = FALSE, values = c(`TRUE` = "black", `FALSE` = "white"))
#+ viridis::scale_fill_viridis(name = 'Number of reads')
+ scale_fill_continuous(low = 'darkblue', high = 'yellow')
+ ggtitle(sprintf('%s %s', family, unique(.$relationship)), sprintf('%s / %s', snp, unique(.$sample)))
+ theme(
axis.title = element_blank(),
legend.position = 'bottom',
legend.text = element_text(size = 6)
)
)
}) %>%
.[['plot']],
nrow = 1
)
)
)
}
}
}
dev.off()
message(fn)
}
if(!interactive()) {
main()
}
|
{"hexsha": "3501ab6129d386f08689f2b3b1b3bba438118095", "size": 12737, "ext": "r", "lang": "R", "max_stars_repo_path": "phaser.r", "max_stars_repo_name": "koelling/ddowl", "max_stars_repo_head_hexsha": "ff9a0fa40768c7efd8e0218da12c63ead1743c26", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-02-29T10:46:56.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-29T10:46:56.000Z", "max_issues_repo_path": "phaser.r", "max_issues_repo_name": "koelling/ddowl", "max_issues_repo_head_hexsha": "ff9a0fa40768c7efd8e0218da12c63ead1743c26", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "phaser.r", "max_forks_repo_name": "koelling/ddowl", "max_forks_repo_head_hexsha": "ff9a0fa40768c7efd8e0218da12c63ead1743c26", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-29T10:46:58.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-29T10:46:58.000Z", "avg_line_length": 35.7780898876, "max_line_length": 171, "alphanum_fraction": 0.4983905158, "num_tokens": 3051}
|
import numpy as np
import tfunet
from tfunet.image.generator import GrayScaleDataProvider
from tfunet.train import Trainer
np.random.seed(2018)
generator = GrayScaleDataProvider(nx=572, ny=572, cnt=20, rectangles=False)
print(f"n_channels: {generator.channels}")
print(f"n_classes: {generator.n_class}")
net = tfunet.TFUnet(n_channels=generator.channels,
n_classes=generator.n_class,
n_layers=3,
n_filters=16)
trainer = tfunet.Trainer(net, optimizer="momentum", opt_kwargs=dict(momentum=0.2))
path = trainer.train(generator, "./unet_trained",
training_iters=32,
epochs=5,
dropout=0.75, # probability to keep units
display_step=2)
|
{"hexsha": "df6fae1761aade251f77e9308718931745947e92", "size": 788, "ext": "py", "lang": "Python", "max_stars_repo_path": "tfunet/scripts/demo.py", "max_stars_repo_name": "aidinhass/tgs-salt-challenge", "max_stars_repo_head_hexsha": "707a64dd33e8d09b483cf44132bb156c27151da4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tfunet/scripts/demo.py", "max_issues_repo_name": "aidinhass/tgs-salt-challenge", "max_issues_repo_head_hexsha": "707a64dd33e8d09b483cf44132bb156c27151da4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tfunet/scripts/demo.py", "max_forks_repo_name": "aidinhass/tgs-salt-challenge", "max_forks_repo_head_hexsha": "707a64dd33e8d09b483cf44132bb156c27151da4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.625, "max_line_length": 82, "alphanum_fraction": 0.6370558376, "include": true, "reason": "import numpy", "num_tokens": 180}
|
Require Import Coq.Setoids.Setoid.
Require Import List.
Require Import JamesTactics.
Require Import Misc.
Require Import ListEx.
Require Import EqDec.
Require Import Enumerable.
Import ListNotations.
Class SpaceSearch := {
Space : Type -> Type;
empty : forall {A}, Space A;
single : forall {A}, A -> Space A;
union : forall {A}, Space A -> Space A -> Space A;
bind : forall {A B}, Space A -> (A -> Space B) -> Space B;
search : forall {A}, Space A -> option A;
contains : forall {A}, A -> Space A -> Prop;
emptyOk : forall {A} {a:A}, ~contains a empty;
singleOk : forall {A} {a a':A}, a = a' <-> contains a' (single a);
unionOk : forall {A S T} {a:A}, (contains a S \/ contains a T) <-> contains a (union S T);
bindOk : forall {A B S f} {b:B}, (exists a:A, contains a S /\ contains b (f a)) <-> contains b (bind S f);
searchOk : forall {A S} {a:A}, search S = Some a -> contains a S;
searchOk' : forall {A S} {a:A}, search S = None -> ~contains a S
}.
Section SpaceSearch.
Context `{SpaceSearch}.
Class Free A := {
free : Space A;
freeOk : forall (a:A), contains a free
}.
Arguments free _ [_].
Global Instance freeBool : Free bool.
refine {|
free := union (single true) (single false)
|}.
Proof.
intro b.
rewrite <- unionOk.
destruct b.
- left. apply singleOk. reflexivity.
- right. apply singleOk. reflexivity.
Defined.
Global Instance freeSigT {A B} `{Free A}
`{forall a:A, Free (B a)} :
Free (sigT B).
refine {|
free := bind (free A) (fun a =>
bind (free (B a)) (fun b =>
single [a & b]))
|}.
Proof.
intros [a b].
rewrite <- bindOk; eexists.
constructor; [apply freeOk|].
rewrite <- bindOk; eexists.
constructor; [apply freeOk|].
apply singleOk.
reflexivity.
Defined.
Global Instance freeProd {A B} `{Free A} `{Free B} : Free (A * B).
refine {|
free := bind (free A) (fun a =>
bind (free B) (fun b =>
single (a, b)))
|}.
Proof.
intros [a b].
rewrite <- bindOk; eexists.
constructor; [apply freeOk|].
rewrite <- bindOk; eexists.
constructor; [apply freeOk|].
apply singleOk.
reflexivity.
Defined.
Global Instance freeEmpty : Free Empty_set.
refine {| free := empty |}.
Proof.
intros [].
Defined.
Global Instance freeUnit : Free unit.
refine {| free := single tt |}.
Proof.
intros [].
apply singleOk.
reflexivity.
Defined.
Global Instance freeListIn {A} l : Free {a:A | In a l}.
refine {| free := _ |}.
refine (list_rect (fun l => Space {a:A | In a l}) empty (fun a l' S => _) l).
refine (union (single (exist _ a _))
(bind S (fun p =>
(single (exist (fun a' => In a' (a::l')) (proj1_sig p) _))))).
Proof.
- cbn.
left.
reflexivity.
- cbn.
right.
exact (proj2_sig p).
- induction l as [|a l IHl].
* intros [? []].
* intros [a' l'].
cbn in *.
rewrite <- unionOk.
destruct l'.
+ subst.
left.
apply singleOk.
reflexivity.
+ right.
specialize (IHl (exist _ a' i)).
rewrite <- bindOk.
eexists.
split; [apply IHl|].
apply singleOk.
reflexivity.
Defined.
End SpaceSearch.
Arguments free [_] _ [_].
Instance listSpaceSearch : SpaceSearch.
refine {|
Space := list;
empty A := [];
single A a := [a];
union A l l' := l ++ l';
bind A B S f := concat (f <$> S);
search A l := match l with [] => None | a::_ => Some a end;
contains := In
|}.
Proof.
- compute.
trivial.
- compute.
intros.
constructor.
* left.
trivial.
* intro h.
destruct h; intuition.
- symmetry.
apply in_app_iff.
- intros A B l f b.
constructor.
* intro h.
destruct h as [a [al bfa]].
induction l as [|a'].
+ compute in *.
intuition.
+ cbn in *.
rewrite in_app_iff.
destruct al. {
left.
subst_max.
intuition.
} {
right.
intuition.
}
* intro h.
induction l.
+ compute in h.
intuition.
+ cbn in h.
rewrite in_app_iff in *.
destruct h. {
exists a.
cbn.
intuition.
} {
specialize (IHl H).
destruct IHl as [a' []].
exists a'.
intuition.
}
- intros.
break_match.
* intuition.
inversion H.
* intuition.
inversion H.
subst_max.
cbn.
left.
reflexivity.
- intros.
break_match.
* cbn.
intuition.
* inversion H.
Defined.
Global Instance enumerableFree {A} `{@Free listSpaceSearch A} : enumerable A.
refine {| enumerate := free A |}.
Proof.
exact freeOk.
Defined.
|
{"author": "konne88", "repo": "CoqStdlib", "sha": "ffac367394a6c9ed9a84e403682c09de90806e4b", "save_path": "github-repos/coq/konne88-CoqStdlib", "path": "github-repos/coq/konne88-CoqStdlib/CoqStdlib-ffac367394a6c9ed9a84e403682c09de90806e4b/SpaceSearch.v"}
|
\subsection{Cosmic ray signal removal}
\label{subsec:spike_removal}
Raman scattering is a weak phenomenon, and therefore its measurements need to
be performed using very sensitive detectors.
Hand in hand with sensitivity also comes susceptibility to artifacts
caused by signals originating from different sources than the sample under
investigation.
Among the major ones is signal caused by cosmic rays, which is characteristic
by sharp lines usually impacting only a few pixels of the CCD detector.
The spectroscopic software WinSpec, together with the Princeton Instruments
CCD detector, allowed to clear all the accumulated charge caused by cosmic rays
before each measurement and set up spike detection based on differences between
measured frames and automatic removal of sharp lines from the spectra.
However, these corrections wer not sufficient, especially first measured frame
was often corrupted by sharp lines caused by the interaction of cosmic ray
particles with the CCD detector
(see \figref{cosmic_spikes:spectrum}).
\begin{figure}
\centering
\input{results_and_discussion/assets/spike_removal/spikes}
\vspace{3mm}
\caption[%
UVRR spectrum containing cosmic ray signal.
]{%
\captiontitle{%
UVRR spectrum containing cosmic ray signal.
}
The spectrum was acquired with 5\,mW of 244\,nm excitation laser at the
sample from 500\,\g{m}M (in phosphates) poly(dAdT) at 20\,\textdegree{}C as
the first frame with the 60\,s accumulation time.
Two sharp lines originating from cosmic ray interaction with the CCD
detector at 1178 and 1679\,\icm{} are clearly visible.
}
\label{\figlabel{cosmic_spikes:spectrum}}
\end{figure}
Two programs were used for additional correction of cosmic ray signal, both
developed in MATLAB programming environment
\parencite{Matlab}.
The first one, \emph{Spikie}
\parencite{Spikie2011},
was developed in the scope of
\textcite{Klener2011}
and was based on the detection of the sharp lines inside the measured spectrum.
This program was used mainly for more complicated scenarios where the cosmic
ray lines interfered strongly with Raman bands of the measured samples.
The second program, \emph{Spycor}
\parencite{Spycor2018},
was created as part of this thesis and is based on a comparison of consecutive
spectra where it counts on the fact that during macroscopic Raman measurement,
the spectra change only slowly with time and that the consecutive spectra are
similar.
It calculates the standard deviation of a specified number of spectra preceding
and following the analyzed spectrum, multiplies it by a user-defined constant,
and uses it as a threshold for spike detection.
It then replaces the spike by an average of the spectra used for the standard
deviation calculation. The spectrum from
\figref{cosmic_spikes:spectrum}
opened in the Spycor program can be seen in
\figref{cosmic_spikes:spycor}.
\begin{figure}
\centering
\ig{1}{results_and_discussion/assets/spike_removal/spycor}
\caption[%
Spycor -- program for spike removal.
]{%
\captiontitle{%
Spycor -- program for spike removal.
}
The UVRR spectrum from \figref{cosmic_spikes:spectrum} opened in the
Spycor program
\parencite{Spycor2018}.
The two spikes were identified and replaced by the average spectrum
from the consecutive frames.
}
\label{\figlabel{cosmic_spikes:spycor}}
\end{figure}
|
{"hexsha": "2520d5e6ba1e0fdb82ae852f82429d6ea7206711", "size": 3348, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "src/results_and_discussion/spike_removal.tex", "max_stars_repo_name": "lumik/phd_thesis", "max_stars_repo_head_hexsha": "3b29f24732d49b64c627aeb8f6585f042cd59c4e", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/results_and_discussion/spike_removal.tex", "max_issues_repo_name": "lumik/phd_thesis", "max_issues_repo_head_hexsha": "3b29f24732d49b64c627aeb8f6585f042cd59c4e", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": 41, "max_issues_repo_issues_event_min_datetime": "2019-08-13T12:27:09.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-07T03:00:58.000Z", "max_forks_repo_path": "src/results_and_discussion/spike_removal.tex", "max_forks_repo_name": "lumik/phd_thesis", "max_forks_repo_head_hexsha": "3b29f24732d49b64c627aeb8f6585f042cd59c4e", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.8292682927, "max_line_length": 79, "alphanum_fraction": 0.7986857826, "num_tokens": 829}
|
# Author: Lukasz Bratos
# Funkcja f wyliczajaca wartosc dla danego x
function f(x :: Float64)
return sqrt(x^2 + one(Float64)) - one(Float64)
end
# Funkcja g wyliczajaca wartosc dla danego x
function g(x :: Float64)
return x^2 / (sqrt(x^2 + one(Float64)) + one(Float64))
end
# Wypisywanie wartości w formacie pod LaTeXa
for i in 1:10
println(i, " & ", f(8.0 ^ (-i)), " & ", g(8.0 ^ (-i)), " \\\\\n\\hline")
end
for i in 1:10
println(20 * i, " & ", f(8.0 ^ (-20 * i)), " & ", g(8.0 ^ (-20 * i)), " \\\\\n\\hline")
end
|
{"hexsha": "e506ac49c78d033f493926d235de5909cac9098c", "size": 535, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "list1/task6.jl", "max_stars_repo_name": "luk9400/on", "max_stars_repo_head_hexsha": "0f35fb60d020c065c96c54893161a3c41ab77acb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "list1/task6.jl", "max_issues_repo_name": "luk9400/on", "max_issues_repo_head_hexsha": "0f35fb60d020c065c96c54893161a3c41ab77acb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "list1/task6.jl", "max_forks_repo_name": "luk9400/on", "max_forks_repo_head_hexsha": "0f35fb60d020c065c96c54893161a3c41ab77acb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.75, "max_line_length": 91, "alphanum_fraction": 0.5738317757, "num_tokens": 210}
|
SUBROUTINE MCFIT(LOUT)
LOGICAL IERR
! KERR = KERR.OR.IERR
IF (IERR) WRITE (LOUT,8000)
8000 FORMAT (10X, 'ERROR IN CKXNUM READING FROM TRANSPORT DATA BASE')
END
|
{"hexsha": "d2a732e325f73bba7105162e1fefd3202368a317", "size": 189, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "tests/CompileTests/Fortran_tests/test2007_223.f", "max_stars_repo_name": "maurizioabba/rose", "max_stars_repo_head_hexsha": "7597292cf14da292bdb9a4ef573001b6c5b9b6c0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 488, "max_stars_repo_stars_event_min_datetime": "2015-01-09T08:54:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T07:15:46.000Z", "max_issues_repo_path": "tests/CompileTests/Fortran_tests/test2007_223.f", "max_issues_repo_name": "sujankh/rose-matlab", "max_issues_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 174, "max_issues_repo_issues_event_min_datetime": "2015-01-28T18:41:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:51:05.000Z", "max_forks_repo_path": "tests/CompileTests/Fortran_tests/test2007_223.f", "max_forks_repo_name": "sujankh/rose-matlab", "max_forks_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 146, "max_forks_repo_forks_event_min_datetime": "2015-04-27T02:48:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T07:32:53.000Z", "avg_line_length": 27.0, "max_line_length": 70, "alphanum_fraction": 0.6349206349, "num_tokens": 67}
|
import init
import pandas as pd
import constants as cn
from support import coordinate
from dateutil import parser
import datetime as dt
import support.seamo_exceptions as se
import numpy as np
"""
Trip base class.
A trip is one of the base inputs for the Mobility Index. This class is created to facilitate
the use of trips as units of analysis within the individual Index Calculators (Mode choice, affordability, reliability).
The Trip Object is used to represent an individual Trip and store and keep track of its core attributes and
defining variables.
Attributes:
origin: Block Group ID (string) of place where the trip originated.
destination: Coordinates of destination latitude and longitude (dest_lat, dest_lon)
mode: mode of the trip (WALKING,TRANSIT, DRIVING, CYCLING)
departure_time: string indicating the time when the trip started
distance: float indicating distance travelled between origin and destination.
duration: float indicating the time elapsed between origin and destination.
basket_category: string indicating the type of destination (i.e. school, hospital, pharmacy, post office, citywide destination, urban village, etc.)
citywide_type: string that stores categories for citywide destinations
value_of_time_rate: float, rate used as base for cost, representing opportunity cost of travel time
cost = float indicating the full cost of the trip
viable: value between 0 and 1 indicating the level of viability of the trip, as determined by the mode choice calculator.
TODO:
Revise attributes
persona = None * is a persona an attribute of a trip? Need to define
time_of_day (do we need this?)
type_of_day = None (do we need this?)
"""
class Trip(object):
def __init__(self, mode, origin, dest_lat, dest_lon, distance, duration,
basket_category, departure_time, citywide_type=None, value_of_time_rate=cn.VOT_RATE, place_name=None):
"""
Input:
origin: string (a block group ID)
dest_lat: float
dest_lon: float
"""
self.origin = origin
self.destination = coordinate.Coordinate(dest_lat, dest_lon)
self.mode = mode
self.departure_time = departure_time
self.distance = distance
self.duration = duration
self.basket_category = basket_category
self.citywide_type = citywide_type
self.value_of_time_rate = cn.VOT_RATE
self.place_name = place_name
self.cost = None
self.direct_cost = None
self.persona = None
self.time_of_day = None
self.type_of_day = None
self.viable = None
self.dest_blockgroup = None
self.neighborhood_long = None
self.neighborhood_short = None
self.council_district = None
self.urban_village = None
self.zipcode = None
def set_geocoded_attributes(self, dest_blockgroup, neighborhood_long, neighborhood_short,
council_district, urban_village, zipcode):
self.dest_blockgroup = dest_blockgroup
self.neighborhood_long = neighborhood_long
self.neighborhood_short = neighborhood_short
self.council_district = council_district
self.urban_village = urban_village
self.zipcode = zipcode
self.destination.set_geocoded_attributes(dest_blockgroup, neighborhood_long,
neighborhood_short, council_district, urban_village, zipcode)
return self
def set_cost(self):
"""
Sets the cost of the trip based on the base rate.
Only includes cost value of time.
"""
self.cost = self._calculate_base_cost(self.duration)
self.direct_cost = 0
return self
def set_viability(self, viability):
"""
Sets the viability of the trip.
The value of viability can be determined using the mode choice calculator.
Input:
viability (float between 0 and 1)
"""
self.viable = viability
def get_origin_coordinate(self):
"""
This function returns a coordinate object that allows you to access the centroid lat/lon
of the origin blockgroup and store the geocoded information of this object.
"""
seattle_block_groups = pd.read_csv(cn.SEATTLE_BLOCK_GROUPS_FP)
df = seattle_block_groups[seattle_block_groups[cn.KEY] == self._origin]
return coordinate.Coordinate(df.lat, df.lon)
def set_persona(self, persona):
"""
TODO: define how personas will relate to trip object. Currently unclear.
"""
self.persona = persona
def _calculate_base_cost(self, duration):
"""
Estimates trip cost from base rate. Includes only costs from time spent on trip.
"""
return duration * self.value_of_time_rate / cn.MIN_TO_HR
def print_destination(self, *args):
"""
Prints the geocoded
Arguments are optional and can include any or all of the geocode attributes of a destination
such as blockgroup, neighborhood, zip code, council district, urban village, etc.
TODO: Make sure all the correct possible args are listed.
"""
print(self._destination)
for attribute in args:
print(self._destination.get_attribute(attribute))
class CarTrip(Trip):
"""
Child class of Trip for trips made by car.
The distingusihing features are that car trips duration is based on time spent in traffic
and cost methods are specific to those incurred when driving (for example gas and parking).
TODO: refactor self.destination in child constructor.
"""
def __init__(self, origin, dest_lat, dest_lon, distance, duration, basket_category, departure_time,
duration_in_traffic=0, mile_rate=cn.AAA_RATE):
super().__init__(cn.DRIVING_MODE, origin, dest_lat, dest_lon, distance, duration, basket_category, departure_time)
self.mile_rate = mile_rate
self.cost_to_park = None
self.parking_category = None
self.duration = self._calculate_car_duration(duration_in_traffic)
def set_cost(self):
"""
sets cost of a car trip.
"""
self.cost = super()._calculate_base_cost(self.duration)
self.direct_cost = self._calculate_cost()
self.cost += self.direct_cost
return self
def _calculate_car_duration(self, duration_in_traffic):
#TODO: do I want to save the original duration for car trips?
#TODO: make a specific exception for no min
return duration_in_traffic + cn.PARKING_TIME_OFFSET
def _calculate_cost(self):
"""
Cost methods to estimate costs during car trip (for example gas and parking)
"""
self.destination.set_parking_cost()
self.cost_to_park = self.destination.parking_cost
return self.distance * self.mile_rate + self.cost_to_park
class TransitTrip(Trip):
def __init__(self, origin, dest_lat, dest_lon, distance, duration, basket_category, departure_time, fare_value):
super().__init__(cn.TRANSIT_MODE, origin, dest_lat, dest_lon, distance, duration, basket_category, departure_time)
self.fare_value = self.get_fare_value(fare_value)
def get_fare_value(self, fare_value):
"""
TODO: check for zero/empty/NaN fare value. Set this to zero or standard fare value.
"""
if np.isnan(fare_value):
fare_value = 0
return fare_value
def set_cost(self):
self.cost = super()._calculate_base_cost(self.duration)
self.direct_cost = self._calculate_cost()
self.cost += self.direct_cost
return self
def _calculate_cost(self):
return self.fare_value
class BikeTrip(Trip):
def __init__(self, origin, dest_lat, dest_lon, distance, duration, basket_category, departure_time, bike_rate=cn.BIKE_RATE):
super().__init__(cn.BIKING_MODE, origin, dest_lat, dest_lon, distance, duration, basket_category, departure_time)
self.bike_rate = bike_rate
def set_cost(self):
self.cost = super()._calculate_base_cost(self.duration)
self.direct_cost = self._calculate_cost()
self.cost += self.direct_cost
return self
def _calculate_cost(self):
return self.distance * self.bike_rate
class WalkTrip(Trip):
def __init__(self, origin, dest_lat, dest_lon, distance, duration, basket_category, departure_time):
super().__init__(cn.WALKING_MODE, origin, dest_lat, dest_lon, distance, duration, basket_category, departure_time)
|
{"hexsha": "12e68d7b52f45de8a947122cb5dc4290b858fb4f", "size": 8780, "ext": "py", "lang": "Python", "max_stars_repo_path": "seamo/support/trip.py", "max_stars_repo_name": "amandalynne/Seattle-Mobility-Index", "max_stars_repo_head_hexsha": "f21d2fa6913ce9474aedc298e9e4a6e7c9390e64", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-08-20T18:34:03.000Z", "max_stars_repo_stars_event_max_datetime": "2018-10-02T23:41:52.000Z", "max_issues_repo_path": "seamo/support/trip.py", "max_issues_repo_name": "amandalynne/Seattle-Mobility-Index", "max_issues_repo_head_hexsha": "f21d2fa6913ce9474aedc298e9e4a6e7c9390e64", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "seamo/support/trip.py", "max_forks_repo_name": "amandalynne/Seattle-Mobility-Index", "max_forks_repo_head_hexsha": "f21d2fa6913ce9474aedc298e9e4a6e7c9390e64", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-10-02T23:42:24.000Z", "max_forks_repo_forks_event_max_datetime": "2018-10-02T23:42:24.000Z", "avg_line_length": 39.5495495495, "max_line_length": 156, "alphanum_fraction": 0.6802961276, "include": true, "reason": "import numpy", "num_tokens": 1860}
|
/*
MIT License
Copyright (c) 2022 Lou Amadio
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include <memory>
#include <chrono>
#include "rclcpp/rclcpp.hpp"
#include <boost/asio.hpp>
#include <geometry_msgs/msg/twist.hpp>
#ifdef _WIN32
#pragma optimize( "", off )
#else
#include "i2c/i2c.h"
#pragma GCC optimize ("O0")
#endif
using namespace std::chrono_literals;
using std::placeholders::_1;
uint8_t kMotorNeutral = 128;
typedef enum : uint8_t
{
MotorCommand_Id = 0x01,
MotorCommand_Status = 0x77,
MotorCommand_Enable = 0x70,
MotorCommand_Drive0 = 0x20,
//..
//MotorCommand_DriveX = 0x41,
} QwiicMotorCommand;
typedef enum : uint8_t
{
MotorStatusBit_Enum = 0x01,
MotorStatusBit_Busy = 0x02,
MotorStatusBit_Read = 0x04,
MotorStatusBit_Write = 0x08,
MotorStatusBit_Enable = 0x10
} QwiicMotorStatusBit;
class MotorSubscriber : public rclcpp::Node
{
public:
MotorSubscriber()
: Node("ros_qwiic_motor")
, _wheelSeparation(0.1)
, _wheelRadius(0.03)
, _powerScale(.05) // power -> RPM
, _leftInvert(false)
, _rightInvert(false)
, _id(0x5D)
{
}
void start()
{
#ifndef _WIN32
get_parameter_or<uint8_t>("id", _id, 0x5D);
if ((_i2cFileDescriptor = i2c_open("/dev/i2c-1")) == -1)
{
return;
}
i2c_init_device(&_i2cDevice);
_i2cDevice.bus = _i2cFileDescriptor;
_i2cDevice.addr = _id;
#endif
rclcpp::Rate loop_rate(1);
uint8_t id = getId();
RCLCPP_INFO(rclcpp::get_logger("motor"), "Communicating with motor id: [%d]", id);
while (!ready())
{
RCLCPP_INFO(rclcpp::get_logger("motor"), "Waiting for Motor Controller to be ready");
loop_rate.sleep();
}
while (busy())
{
RCLCPP_INFO(rclcpp::get_logger("motor"), "Waiting for Motor Controller, to not be busy...");
loop_rate.sleep();
}
enable();
get_parameter_or<float>("wheelSeparation", _wheelSeparation, .10);
get_parameter_or<float>("wheelRadius", _wheelRadius, 0.03);
get_parameter_or<float>("powerScale", _powerScale, 0.05);
get_parameter_or<bool>("leftInverted", _leftInvert, false);
get_parameter_or<bool>("rightInverted", _rightInvert, false);
_subscription = this->create_subscription<geometry_msgs::msg::Twist>(
"cmd_vel", 10, std::bind(&MotorSubscriber::cmdVelCallback, this, _1));
}
private:
uint8_t getId()
{
uint8_t id = 0;
int ret = i2c_read(&_i2cDevice, MotorCommand_Id, &id, 1);
if (ret == -1 || (size_t)ret != 1)
{
RCLCPP_INFO(rclcpp::get_logger("motor"), "failed to read motor id: [%d]", ret);
}
return id;
}
// interesting that ready != busy
bool ready()
{
uint8_t status = 0;
int ret = i2c_read(&_i2cDevice, MotorCommand_Status, &status, 1);
if (ret == -1 || (size_t)ret != 1)
{
RCLCPP_INFO(rclcpp::get_logger("motor"), "failed to read motor status: [%d]", ret);
}
if (status != 0xFF &&
(status & MotorStatusBit_Enum) == MotorStatusBit_Enum)
{
return true;
}
else
{
return false;
}
}
bool busy()
{
uint8_t status = 0;
int ret = i2c_read(&_i2cDevice, MotorCommand_Status, &status, 1);
if (ret == -1 || (size_t)ret != 1)
{
RCLCPP_INFO(rclcpp::get_logger("motor"), "failed to read motor status: [%d]", ret);
}
if (status & (MotorStatusBit_Busy | MotorStatusBit_Read | MotorStatusBit_Write))
{
return true;
}
else
{
return false;
}
}
void enable()
{
command(MotorCommand_Enable, 0x01);
}
void disable()
{
command(MotorCommand_Enable, 0x00);
}
void cmdVelCallback(const geometry_msgs::msg::Twist::SharedPtr msg)
{
//enable();
double speedRight = msg->linear.x + msg->angular.z;
double speedLeft = msg->linear.x - msg->angular.z;
motor(0, (_rightInvert? -1.0 : 1.0) * (speedRight / _wheelRadius) * _powerScale);
motor(1, (_leftInvert? -1.0 : 1.0) * (speedLeft / _wheelRadius) * _powerScale);
}
void motor(uint8_t channel, double power)
{
double powerAdjustment = std::abs(power);
if (powerAdjustment > 1.0)
{
powerAdjustment = 1.0;
}
// Motor controller does 0 +/- 255, with 128 as median
uint8_t powerLevel = (uint8_t)(powerAdjustment * 127.0);
if (power < 0.0)
{
command(MotorCommand_Drive0 + channel, kMotorNeutral - powerLevel);
}
else
{
command(MotorCommand_Drive0 + channel, kMotorNeutral + powerLevel);
}
}
void command(uint8_t command, uint8_t value)
{
#ifndef _WIN32
int ret = i2c_ioctl_write(&_i2cDevice, command, &value, 1);
if (ret == -1 || (size_t)ret != 1)
{
RCLCPP_INFO(rclcpp::get_logger("motor"), "failed to write to motor controller: [%d]", ret);
}
#endif
}
rclcpp::Subscription<geometry_msgs::msg::Twist>::SharedPtr _subscription;
#ifndef _WIN32
int _i2cFileDescriptor;
I2CDevice _i2cDevice;
#endif
float _wheelSeparation;
float _wheelRadius;
float _powerScale;
bool _leftInvert;
bool _rightInvert;
uint8_t _id;
};
int main(int argc, char * argv[])
{
rclcpp::init(argc, argv);
auto node = std::make_shared<MotorSubscriber>();
node->declare_parameter("wheelSeparation");
node->declare_parameter("wheelRadius");
node->start();
rclcpp::spin(node);
rclcpp::shutdown();
return 0;
}
|
{"hexsha": "7d3ef71253f074854a6de38c9bfbafd43a1a4981", "size": 6948, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/main.cpp", "max_stars_repo_name": "polyhobbyist/ros_qwiic_motor", "max_stars_repo_head_hexsha": "fcbeefe94fab2e37300f0daecf551f2ef807b02c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/main.cpp", "max_issues_repo_name": "polyhobbyist/ros_qwiic_motor", "max_issues_repo_head_hexsha": "fcbeefe94fab2e37300f0daecf551f2ef807b02c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/main.cpp", "max_forks_repo_name": "polyhobbyist/ros_qwiic_motor", "max_forks_repo_head_hexsha": "fcbeefe94fab2e37300f0daecf551f2ef807b02c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5190839695, "max_line_length": 106, "alphanum_fraction": 0.6095279217, "num_tokens": 1872}
|
import numpy as np
from scipy.integrate import ode
from .common import validate_tol, validate_first_step, warn_extraneous
from .base import OdeSolver, DenseOutput
class LSODA(OdeSolver):
"""Adams/BDF method with automatic stiffness detection and switching.
This is a wrapper to the Fortran solver from ODEPACK [1]_. It switches
automatically between the nonstiff Adams method and the stiff BDF method.
The method was originally detailed in [2]_.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
It can either have shape (n,); then ``fun`` must return array_like with
shape (n,). Alternatively it can have shape (n, k); then ``fun``
must return an array_like with shape (n, k), i.e. each column
corresponds to a single column in ``y``. The choice between the two
options is determined by `vectorized` argument (see below). The
vectorized implementation allows a faster approximation of the Jacobian
by finite differences (required for this solver).
t0 : float
Initial time.
y0 : array_like, shape (n,)
Initial state.
t_bound : float
Boundary time - the integration won't continue beyond it. It also
determines the direction of the integration.
first_step : float or None, optional
Initial step size. Default is ``None`` which means that the algorithm
should choose.
min_step : float, optional
Minimum allowed step size. Default is 0.0, i.e. the step size is not
bounded and determined solely by the solver.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e. the step size is not
bounded and determined solely by the solver.
rtol, atol : float and array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
relative accuracy (number of correct digits). But if a component of `y`
is approximately below `atol`, the error only needs to fall within
the same `atol` threshold, and the number of correct digits is not
guaranteed. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
jac : None or callable, optional
Jacobian matrix of the right-hand side of the system with respect to
``y``. The Jacobian matrix has shape (n, n) and its element (i, j) is
equal to ``d f_i / d y_j``. The function will be called as
``jac(t, y)``. If None (default), the Jacobian will be
approximated by finite differences. It is generally recommended to
provide the Jacobian rather than relying on a finite-difference
approximation.
lband, uband : int or None
Parameters defining the bandwidth of the Jacobian,
i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``. Setting
these requires your jac routine to return the Jacobian in the packed format:
the returned array must have ``n`` columns and ``uband + lband + 1``
rows in which Jacobian diagonals are written. Specifically
``jac_packed[uband + i - j , j] = jac[i, j]``. The same format is used
in `scipy.linalg.solve_banded` (check for an illustration).
These parameters can be also used with ``jac=None`` to reduce the
number of Jacobian elements estimated by finite differences.
vectorized : bool, optional
Whether `fun` is implemented in a vectorized fashion. A vectorized
implementation offers no advantages for this solver. Default is False.
Attributes
----------
n : int
Number of equations.
status : string
Current status of the solver: 'running', 'finished' or 'failed'.
t_bound : float
Boundary time.
direction : float
Integration direction: +1 or -1.
t : float
Current time.
y : ndarray
Current state.
t_old : float
Previous time. None if no steps were made yet.
nfev : int
Number of evaluations of the right-hand side.
njev : int
Number of evaluations of the Jacobian.
References
----------
.. [1] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE
Solvers," IMACS Transactions on Scientific Computation, Vol 1.,
pp. 55-64, 1983.
.. [2] L. Petzold, "Automatic selection of methods for solving stiff and
nonstiff systems of ordinary differential equations", SIAM Journal
on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148,
1983.
"""
def __init__(self, fun, t0, y0, t_bound, first_step=None, min_step=0.0,
max_step=np.inf, rtol=1e-3, atol=1e-6, jac=None, lband=None,
uband=None, vectorized=False, **extraneous):
warn_extraneous(extraneous)
super(LSODA, self).__init__(fun, t0, y0, t_bound, vectorized)
if first_step is None:
first_step = 0 # LSODA value for automatic selection.
else:
first_step = validate_first_step(first_step, t0, t_bound)
first_step *= self.direction
if max_step == np.inf:
max_step = 0 # LSODA value for infinity.
elif max_step <= 0:
raise ValueError("`max_step` must be positive.")
if min_step < 0:
raise ValueError("`min_step` must be nonnegative.")
rtol, atol = validate_tol(rtol, atol, self.n)
if jac is None: # No lambda as PEP8 insists.
def jac():
return None
solver = ode(self.fun, jac)
solver.set_integrator('lsoda', rtol=rtol, atol=atol, max_step=max_step,
min_step=min_step, first_step=first_step,
lband=lband, uband=uband)
solver.set_initial_value(y0, t0)
# Inject t_bound into rwork array as needed for itask=5.
solver._integrator.rwork[0] = self.t_bound
solver._integrator.call_args[4] = solver._integrator.rwork
self._lsoda_solver = solver
def _step_impl(self):
solver = self._lsoda_solver
integrator = solver._integrator
# From lsoda.step and lsoda.integrate itask=5 means take a single
# step and do not go past t_bound.
itask = integrator.call_args[2]
integrator.call_args[2] = 5
solver._y, solver.t = integrator.run(
solver.f, solver.jac, solver._y, solver.t,
self.t_bound, solver.f_params, solver.jac_params)
integrator.call_args[2] = itask
if solver.successful():
self.t = solver.t
self.y = solver._y
# From LSODA Fortran source njev is equal to nlu.
self.njev = integrator.iwork[12]
self.nlu = integrator.iwork[12]
return True, None
else:
return False, 'Unexpected istate in LSODA.'
def _dense_output_impl(self):
iwork = self._lsoda_solver._integrator.iwork
rwork = self._lsoda_solver._integrator.rwork
order = iwork[14]
h = rwork[11]
yh = np.reshape(rwork[20:20 + (order + 1) * self.n],
(self.n, order + 1), order='F').copy()
return LsodaDenseOutput(self.t_old, self.t, h, order, yh)
class LsodaDenseOutput(DenseOutput):
def __init__(self, t_old, t, h, order, yh):
super(LsodaDenseOutput, self).__init__(t_old, t)
self.h = h
self.yh = yh
self.p = np.arange(order + 1)
def _call_impl(self, t):
if t.ndim == 0:
x = ((t - self.t) / self.h) ** self.p
else:
x = ((t - self.t) / self.h) ** self.p[:, None]
return np.dot(self.yh, x)
|
{"hexsha": "ab37af3980fd0f544de1e2ecbe7229276187d8cb", "size": 8108, "ext": "py", "lang": "Python", "max_stars_repo_path": "ServidorPython/python32_web/Lib/site-packages/scipy/integrate/_ivp/lsoda.py", "max_stars_repo_name": "mak213k/Servidor_automatizado_python", "max_stars_repo_head_hexsha": "4403ef8027a2f814220baacc95856cf5fbf01d21", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 366, "max_stars_repo_stars_event_min_datetime": "2019-04-07T20:34:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T07:35:38.000Z", "max_issues_repo_path": "venv/Lib/site-packages/scipy/integrate/_ivp/lsoda.py", "max_issues_repo_name": "uncledragon/CalibrationLibrary", "max_issues_repo_head_hexsha": "952abcf471b819b6b6dfa23b6d5dd248155f9dbf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2019-05-19T04:45:23.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-22T11:28:57.000Z", "max_forks_repo_path": "venv/Lib/site-packages/scipy/integrate/_ivp/lsoda.py", "max_forks_repo_name": "uncledragon/CalibrationLibrary", "max_forks_repo_head_hexsha": "952abcf471b819b6b6dfa23b6d5dd248155f9dbf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 61, "max_forks_repo_forks_event_min_datetime": "2019-04-08T00:58:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T23:04:28.000Z", "avg_line_length": 42.0103626943, "max_line_length": 84, "alphanum_fraction": 0.6274050321, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2049}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2009 Prof. William H. Green (whgreen@mit.edu) and the
# RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This script contains unit tests of the :mod:`rmgpy.quantity` module.
"""
import unittest
import numpy
import os
from rmgpy.cantherm import CanTherm
import rmgpy.constants as constants
################################################################################
class CommonTest(unittest.TestCase):
"""
Contains unit tests of the Cantherm common functions.
"""
def test_checkConformerEnergy(self):
"""
test the checkConformerEnergy function with an list of energies.
"""
Vlist = [-272.2779012225, -272.2774933703, -272.2768397635, -272.2778432059, -272.278645477, -272.2789602654, -272.2788749196, -272.278496709, -272.2779350675, -272.2777008843, -272.2777167286, -272.2780937643, -272.2784838846, -272.2788050464, -272.2787865352, -272.2785091607, -272.2779977452, -272.2777957743, -272.2779134906, -272.2781827547, -272.278443339, -272.2788244214, -272.2787748749]
Vlist = numpy.array(Vlist, numpy.float64)
Vdiff = (Vlist[0] - numpy.min(Vlist)) * constants.E_h * constants.Na / 1000
self.assertAlmostEqual(Vdiff / 2.7805169838282797, 1, 5)
class testCanthermJob(unittest.TestCase):
"""
Contains unit tests of the Cantherm module and its interactions with other RMG modules.
"""
def setUp(self):
cantherm = CanTherm()
jobList = cantherm.loadInputFile(os.path.join(os.path.dirname(os.path.abspath(__file__)),r'files/methoxy.py'))
pdepjob = jobList[-1]
self.kineticsjob = jobList[0]
pdepjob.activeJRotor = True
network = pdepjob.network
self.Nisom = len(network.isomers)
self.Nreac = len(network.reactants)
self.Nprod = len(network.products)
self.Npath = len(network.pathReactions)
self.PathReaction2 = network.pathReactions[2]
self.TminValue = pdepjob.Tmin.value
self.Tmaxvalue = pdepjob.Tmax.value
self.TmaxUnits = pdepjob.Tmax.units
self.TlistValue = pdepjob.Tlist.value
self.PminValue = pdepjob.Pmin.value
self.Pcount = pdepjob.Pcount
self.Tcount = pdepjob.Tcount
self.GenTlist = pdepjob.generateTemperatureList()
self.PlistValue = pdepjob.Plist.value
self.maximumGrainSizeValue = pdepjob.maximumGrainSize.value
self.method = pdepjob.method
self.rmgmode = pdepjob.rmgmode
# test Cantherm's interactions with the network module
def testNisom(self):
"""
Test the number of isomers identified.
"""
self.assertEqual(self.Nisom, 2, msg=None)
def testNreac(self):
"""
Test the number of reactants identified.
"""
self.assertEqual(self.Nreac, 1, msg=None)
def testNprod(self):
"""
Test the number of products identified.
"""
self.assertEqual(self.Nprod, 1, msg=None)
def testNpathReactions(self):
"""
Test the whether or not RMG mode is turned on.
"""
self.assertEqual(self.Npath, 3, msg=None)
def testPathReactions(self):
"""
Test a path reaction label
"""
self.assertEqual(str(self.PathReaction2), 'CH2OH <=> methoxy', msg=None)
# test Cantherm's interactions with the pdep module
def testTemperaturesUnits(self):
"""
Test the Temperature Units.
"""
self.assertEqual(str(self.TmaxUnits), 'K', msg=None)
def testTemperaturesValue(self):
"""
Test the temperature value.
"""
self.assertEqual(self.TminValue, 450.0, msg=None)
def testTemperaturesList(self):
"""
Test the temperature list.
"""
self.assertEqual(numpy.array_equal(self.TlistValue, numpy.array([450, 500, 678, 700])), True, msg=None)
def testPminValue(self):
"""
Test the minimum pressure value.
"""
self.assertEqual("%0.7f" % self.PminValue, str(0.0101325), msg=None)
def testPcount(self):
"""
Test the number pressures specified.
"""
self.assertEqual(self.Pcount, 7, msg=None)
def testTcount(self):
"""
Test the number temperatures specified.
"""
self.assertEqual(self.Tcount, 4, msg=None)
def testPressureList(self):
"""
Test the pressure list.
"""
self.assertEqual(numpy.array_equal(self.PlistValue, numpy.array([0.01, 0.1, 1, 3, 10, 100, 1000])), True, msg=None)
def testGenerateTemperatureList(self):
"""
Test the generated temperature list.
"""
self.assertEqual(list(self.GenTlist), [450.0, 500.0, 678.0, 700.0], msg=None)
def testmaximumGrainSizeValue(self):
"""
Test the max grain size value.
"""
self.assertEqual(self.maximumGrainSizeValue, 0.5, msg=None)
def testMethod(self):
"""
Test the master equation solution method chosen.
"""
self.assertEqual(self.method, 'modified strong collision', msg=None)
def testRmgmode(self):
"""
Test the whether or not RMG mode is turned on.
"""
self.assertEqual(self.rmgmode, False, msg=None)
# Test cantherms interactions with the kinetics module
def testCalculateTSTRateCoefficient(self):
"""
Test the calculation of the high-pressure limit rate coef for one of the kinetics jobs at Tmin and Tmax.
"""
self.assertEqual("%0.7f" % self.kineticsjob.reaction.calculateTSTRateCoefficient(self.TminValue), str(46608.5904933), msg=None)
self.assertEqual("%0.5f" % self.kineticsjob.reaction.calculateTSTRateCoefficient(self.Tmaxvalue), str(498796.64535), msg=None)
def testTunneling(self):
"""
Test the whether or not tunneling has been included in a specific kinetics job.
"""
self.assertEqual(self.kineticsjob.reaction.transitionState.tunneling, None, msg=None)
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
|
{"hexsha": "9ce3c2bc359edc6d5d4eee4e11e9c8982dddf669", "size": 7475, "ext": "py", "lang": "Python", "max_stars_repo_path": "rmgpy/cantherm/commonTest.py", "max_stars_repo_name": "nyee/RMG-Py", "max_stars_repo_head_hexsha": "1c8816af340c106967bc877bee0ff9fe71607d7a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-12-18T18:43:22.000Z", "max_stars_repo_stars_event_max_datetime": "2017-12-18T18:43:22.000Z", "max_issues_repo_path": "rmgpy/cantherm/commonTest.py", "max_issues_repo_name": "nyee/RMG-Py", "max_issues_repo_head_hexsha": "1c8816af340c106967bc877bee0ff9fe71607d7a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rmgpy/cantherm/commonTest.py", "max_forks_repo_name": "nyee/RMG-Py", "max_forks_repo_head_hexsha": "1c8816af340c106967bc877bee0ff9fe71607d7a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-06-19T08:05:21.000Z", "max_forks_repo_forks_event_max_datetime": "2019-06-19T08:05:21.000Z", "avg_line_length": 37.1890547264, "max_line_length": 404, "alphanum_fraction": 0.6375919732, "include": true, "reason": "import numpy", "num_tokens": 1828}
|
import jax.numpy as jnp
import haiku as hk
class RelationNetwork(hk.nets.MLP):
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
num_inputs = inputs.shape[-2]
left = jnp.expand_dims(inputs, axis=-2).repeat(num_inputs, axis=-2)
right = jnp.expand_dims(inputs, axis=-3).repeat(num_inputs, axis=-3)
concatenated = jnp.concatenate([left, right], axis=-1)
outputs = super().__call__(concatenated)
return jnp.mean(outputs, axis=-2)
|
{"hexsha": "a721d714829670c803d6877c501e91697c07b628", "size": 489, "ext": "py", "lang": "Python", "max_stars_repo_path": "jax_meta/modules/relation_network.py", "max_stars_repo_name": "tristandeleu/jax-meta-learning", "max_stars_repo_head_hexsha": "3e83cc1be77dd99ad7539cbcb47536097e896d3a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2022-03-01T00:47:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T01:48:17.000Z", "max_issues_repo_path": "jax_meta/modules/relation_network.py", "max_issues_repo_name": "tristandeleu/jax-meta-learning", "max_issues_repo_head_hexsha": "3e83cc1be77dd99ad7539cbcb47536097e896d3a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "jax_meta/modules/relation_network.py", "max_forks_repo_name": "tristandeleu/jax-meta-learning", "max_forks_repo_head_hexsha": "3e83cc1be77dd99ad7539cbcb47536097e896d3a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-06T16:03:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-06T16:03:19.000Z", "avg_line_length": 32.6, "max_line_length": 76, "alphanum_fraction": 0.6625766871, "include": true, "reason": "import jax", "num_tokens": 131}
|
Alice J. Gonzales is a Rocklin resident who has held several positions within the state government. She was appointed Director of the California Department of Aging by Governor Deukmejian in 1983. From 1990 until 1998, she was Director of the states Employment Development Department, and she also served on the UC Board of Regents. She had a particular interest in using her posts to help the disadvantaged and impoverished. As a regent, she also focused greatly on improving the universitys medical programs and hospitals. In recognition of her job dedication and generous nature, she was awarded the UC Davis Medal on May 20, 2002.
|
{"hexsha": "8c3127241d1dd1c96bf4f70807019a97f90fb08a", "size": 640, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Alice_Gonzales.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Alice_Gonzales.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Alice_Gonzales.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 320.0, "max_line_length": 639, "alphanum_fraction": 0.8109375, "num_tokens": 137}
|
export objective, smooth_objective
# NOTE: RobustLoss are not always everywhere smooth but "smooth-enough".
const SmoothLoss = Union{L2Loss, LogisticLoss, MultinomialLoss, RobustLoss}
"""
$SIGNATURES
Return the objective function (sum of loss + penalty) of a Generalized Linear Model.
"""
objective(glr::GLR, n) = glr.loss + glr.penalty * ifelse(glr.scale_penalty_with_samples, n, 1.)
"""
$SIGNATURES
Return a function computing the objective at a given point `θ`.
Note that the [`apply_X`](@ref) takes care of a potential intercept.
"""
objective(glr::GLR, X, y; c::Int=0) =
θ -> objective(glr, size(X, 1))(y, apply_X(X, θ, c), view_θ(glr, θ))
"""
$SIGNATURES
Return a function computing the smooth part of the objective at a given
evaluation point `θ`.
"""
smooth_objective(glr::GLR, X, y; c::Int=0) =
θ -> smooth_objective(glr, size(X, 1))(y, apply_X(X, θ, c), view_θ(glr, θ))
"""
$SIGNATURES
Return the smooth part of the objective function of a GLR.
"""
smooth_objective(glr::GLR{<:SmoothLoss,<:ENR}, n) = glr.loss + get_l2(glr.penalty) * ifelse(glr.scale_penalty_with_samples, n, 1.)
smooth_objective(::GLR) = @error "Case not implemented yet."
"""
$SIGNATURES
Return a model corresponding to the smooth part of the objective.
"""
get_smooth(glr::GLR) =
GLR(glr.loss, get_l2(glr.penalty),
glr.fit_intercept, glr.penalize_intercept, glr.scale_penalty_with_samples)
"""
$SIGNATURES
Helper function to compute the residuals.
"""
function get_residuals!(r, X, θ, y)
apply_X!(r, X, θ)
r .-= y
end
|
{"hexsha": "026f0ffd96b036268de23b974e4660b718b4caf4", "size": 1544, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/glr/utils.jl", "max_stars_repo_name": "jbrea/MLJLinearModels.jl", "max_stars_repo_head_hexsha": "d4c7a7f302e72072ddf0af553b1ad1ddd1b1569e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2021-08-02T23:51:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T14:16:55.000Z", "max_issues_repo_path": "src/glr/utils.jl", "max_issues_repo_name": "jbrea/MLJLinearModels.jl", "max_issues_repo_head_hexsha": "d4c7a7f302e72072ddf0af553b1ad1ddd1b1569e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 22, "max_issues_repo_issues_event_min_datetime": "2021-07-06T00:01:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T23:51:51.000Z", "max_forks_repo_path": "src/glr/utils.jl", "max_forks_repo_name": "jbrea/MLJLinearModels.jl", "max_forks_repo_head_hexsha": "d4c7a7f302e72072ddf0af553b1ad1ddd1b1569e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-07-06T00:09:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-26T23:45:08.000Z", "avg_line_length": 25.3114754098, "max_line_length": 130, "alphanum_fraction": 0.704015544, "num_tokens": 473}
|
import os
import argparse
import pprint
import torch
import json
import cv2
import numpy as np
import EOC.spring.linklink as link
import torch.nn.functional as F
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from easydict import EasyDict
from torch.autograd import Variable
from EOC.prototype.solver.cls_solver import ClsSolver
from EOC.prototype.utils.dist import link_dist
from EOC.prototype.utils.misc import makedir, create_logger, get_logger, modify_state
from EOC.prototype.data import build_imagenet_test_dataloader
from EOC.prototype.data import build_custom_dataloader
class Inference(ClsSolver):
def __init__(self, config):
self.image_dir = config["image_dir"]
self.meta_file = config.get("meta_file", "")
self.output = config.get("output", "inference_results")
self.recover = config.get("recover", "")
self.cam = config.get("cam", False)
self.visualize = config.get("visualize", False)
self.sample = config.get("sample", -1)
self.feature_name = config.get("name", "module.layer4")
if "module" not in self.feature_name:
self.feature_name = "module." + self.feature_name
self.feature = None
self.gradient = None
super(Inference, self).__init__(config["config"])
def setup_env(self):
# dist
self.dist = EasyDict()
self.dist.rank, self.dist.world_size = link.get_rank(), link.get_world_size()
self.prototype_info.world_size = self.dist.world_size
# directories
self.path = EasyDict()
self.path.root_path = os.path.dirname(self.config_file)
self.path.result_path = os.path.abspath(self.output)
makedir(self.path.result_path)
# logger
create_logger(os.path.join(self.path.root_path, 'log.txt'))
self.logger = get_logger(__name__)
self.logger.info(f'config: {pprint.pformat(self.config)}')
if 'SLURM_NODELIST' in os.environ:
self.logger.info(f"hostnames: {os.environ['SLURM_NODELIST']}")
# load pretrain checkpoint
if self.recover != "":
self.state = torch.load(self.recover, 'cpu')
self.logger.info(f"Recovering from {self.recover}, keys={list(self.state.keys())}")
elif hasattr(self.config.saver, 'pretrain'):
self.state = torch.load(self.config.saver.pretrain.path, 'cpu')
self.logger.info(f"Recovering from {self.config.saver.pretrain.path}, keys={list(self.state.keys())}")
if hasattr(self.config.saver.pretrain, 'ignore'):
self.state = modify_state(self.state, self.config.saver.pretrain.ignore)
else:
self.state = {}
self.state['last_iter'] = 0
# others
torch.backends.cudnn.benchmark = True
def build_data(self):
self.config.data.max_iter = self.config.lr_scheduler.kwargs.max_iter
self.config.data.last_iter = self.state['last_iter']
root_dir, input_file = self.generate_custom_data()
self.config.data.test.root_dir = root_dir
self.config.data.test.meta_file = input_file
if self.config.data.get('type', 'imagenet') == 'imagenet':
self.val_data = build_imagenet_test_dataloader(self.config.data)
else:
self.val_data = build_custom_dataloader('test', self.config.data)
def generate_custom_data(self):
if self.meta_file != "" and os.path.exists(self.meta_file):
return self.image_dir, self.meta_file
input_file = os.path.join(self.output, "tmp_meta.json")
image_dir = self.image_dir
if os.path.isfile(self.image_dir):
image_dir = os.path.abspath(os.path.dirname(self.image_dir))
if self.dist.rank == 0:
with open(input_file, "w") as output:
output.write(json.dumps({"filename": os.path.basename(self.image_dir)},
ensure_ascii=False) + '\n')
else:
if self.dist.rank == 0:
with open(input_file, "w") as output:
meta_list = []
for root, dirs, files in os.walk(self.image_dir, topdown=False):
for name in files:
abs_path = os.path.join(root, name)
meta_list.append(abs_path[len(self.image_dir):].lstrip("/"))
sample_num = len(meta_list)
if 0 < self.sample < 1:
sample_num = int(self.sample * sample_num)
elif self.sample > 1:
sample_num = max(sample_num, self.sample)
for idx in range(sample_num):
output.write(json.dumps({"filename": meta_list[idx], "label_name": "abs", "label": 1},
ensure_ascii=False) + '\n')
link.barrier()
return image_dir, input_file
def paint(self, filename, pred, label, outdir):
num = len(filename)
for idx in range(num):
ax, fig, h, w = self.get_axis(filename[idx])
self.paint_one_image(pred[idx], label[idx], ax, h, w)
out_name = os.path.join(outdir, os.path.basename(filename[idx]))
fig.savefig(out_name, dpi=200)
plt.close('all')
@staticmethod
def paint_one_image(pred, label, ax, h, w):
font_sz = max(min(np.log(h) / np.log(100), np.log(w) / np.log(100)), 1)
x1 = w // 8
y1 = h // 8
ax.text(
x1, y1,
f"cls {int(label)}, score:{pred[int(label)]:.3f}",
fontsize=font_sz + 10,
family='serif',
color="r"
)
@staticmethod
def get_axis(img_path):
assert os.path.exists(img_path), f"check img file path, {img_path}"
img = cv2.imread(img_path)[:, :, (2, 1, 0)]
fig = plt.figure(frameon=False)
fig.set_size_inches(img.shape[1] / 200, img.shape[0] / 200)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.axis('off')
fig.add_axes(ax)
ax.imshow(img)
return ax, fig, img.shape[0], img.shape[1]
def inference(self):
self.model.eval()
res_file = os.path.join(self.output, f'results.txt.rank{self.dist.rank}')
writer = open(res_file, 'w')
for batch_idx, batch in enumerate(self.val_data['loader']):
input = batch['image']
input = input.cuda()
# compute output
logits = self.model(input)
scores = F.softmax(logits, dim=1)
# compute prediction
_, preds = logits.data.topk(k=1, dim=1)
preds = preds.view(-1)
# update batch information
batch.update({'prediction': preds.detach()})
batch.update({'score': scores.detach()})
# save prediction information
if self.cam:
heatmap = self.gradCam(input)
for idx in range(len(heatmap)):
basename = os.path.basename(batch["filename"][idx])
ext = basename.split(".")[-1]
basename = basename.replace("." + ext, "_cam" + "." + ext)
heatmap[idx].save(os.path.join(self.output, basename))
if self.visualize:
self.paint(batch["filename"], scores, preds, self.output)
self.val_data['loader'].dataset.dump(writer, batch)
writer.close()
link.barrier()
return
def save_feature(self, module, input, output):
self.feature = output
def save_gradient(self, module, grad_in, grad_out):
self.gradient = grad_out[0].detach()
def gradCam(self, x):
model = self.model.eval()
image_size = (x.size(-1), x.size(-2))
datas = Variable(x, requires_grad=True)
heat_maps = []
for i in range(datas.size(0)):
feature = datas[i].unsqueeze(0)
img = datas[i].data.cpu().numpy()
img = img - np.min(img)
if np.max(img) != 0:
img = img / np.max(img)
for name, module in self.model.named_modules():
if name == self.feature_name:
module.register_forward_hook(self.save_feature)
module.register_backward_hook(self.save_gradient)
feature = model(feature)
classes = F.softmax(feature, dim=1)
one_hot, _ = classes.max(dim=-1)
one_hot.backward()
weight = self.gradient.mean(dim=-1, keepdim=True).mean(dim=-2, keepdim=True)
mask = F.relu((weight * self.feature).sum(dim=1)).squeeze(0)
mask = cv2.resize(mask.data.cpu().numpy().astype(np.float32), image_size)
mask = mask - np.min(mask)
if np.max(mask) != 0:
mask = mask / np.max(mask)
heat_map = np.float32(cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET))
cam = heat_map + np.float32((np.uint8(img.transpose((1, 2, 0)) * 255)))
cam = cam - np.min(cam)
if np.max(cam) != 0:
cam = cam / np.max(cam)
heat_maps.append(transforms.ToPILImage()(transforms.ToTensor()(
cv2.cvtColor(np.uint8(255 * cam), cv2.COLOR_BGR2RGB))))
return heat_maps
@link_dist
def main():
parser = argparse.ArgumentParser(description='Inference Solver')
parser.add_argument('--config', required=True, type=str, help="Prototype task yaml")
parser.add_argument('--recover', default="", help="Recover model path to visuazlie")
parser.add_argument('-i', '--image_dir', required=True, dest="image_dir", type=str,
help="The image dir that you want to visuazlie.")
parser.add_argument('-m', '--meta_file', required=False, dest="meta_file", type=str,
help="The prototype custom meta file that you want to visuazlie. "
"If this argument are not provide, we will visualize the images in {image_dir}")
parser.add_argument('-o', '--output', default="./inference_resuts", dest="output",
help="the folder where results file or images will be saved.")
parser.add_argument('--visualize', default=True, help="Whether paint class and score on images to visualize.")
parser.add_argument('--sample', default=-1, type=float,
help="if gived number -1, remain all results. if 0 < gived number <=1, "
"sample {gived number * len(images_dir)} images, "
"if gived number > 1, sample gived number images.")
parser.add_argument('--cam', default=False,
help="Whether save gradcam results. See https://arxiv.org/abs/1610.02391 for details.")
parser.add_argument('--name', default="module.layer4",
help="the last feature extractor layer name you want to visualize gradcam results, "
"e.g. 'layer4' in resnet series.")
args = parser.parse_args()
# build solver
inference_helper = Inference(args.__dict__)
inference_helper.inference()
if __name__ == '__main__':
main()
|
{"hexsha": "91afeb5db8968eca23ef368aaec2460aff658ff2", "size": 11325, "ext": "py", "lang": "Python", "max_stars_repo_path": "EOC/prototype/tools/inference.py", "max_stars_repo_name": "double-fire-0/SystemNoise", "max_stars_repo_head_hexsha": "ab042dd54371482a18117eb13f816a7472e51590", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "EOC/prototype/tools/inference.py", "max_issues_repo_name": "double-fire-0/SystemNoise", "max_issues_repo_head_hexsha": "ab042dd54371482a18117eb13f816a7472e51590", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "EOC/prototype/tools/inference.py", "max_forks_repo_name": "double-fire-0/SystemNoise", "max_forks_repo_head_hexsha": "ab042dd54371482a18117eb13f816a7472e51590", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.4464285714, "max_line_length": 114, "alphanum_fraction": 0.5813686534, "include": true, "reason": "import numpy", "num_tokens": 2513}
|
! This test code tests the correct handling of labels on the if-stmt.
integer i,m,n
do 20 m=1,n
i = m
20 if (.true.) i = 0
end
|
{"hexsha": "dafb063fc1f2aada918e6a60cee0a214fc90b833", "size": 140, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "tests/CompileTests/Fortran_tests/test2010_133.f90", "max_stars_repo_name": "maurizioabba/rose", "max_stars_repo_head_hexsha": "7597292cf14da292bdb9a4ef573001b6c5b9b6c0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 488, "max_stars_repo_stars_event_min_datetime": "2015-01-09T08:54:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T07:15:46.000Z", "max_issues_repo_path": "tests/CompileTests/Fortran_tests/test2010_133.f90", "max_issues_repo_name": "sujankh/rose-matlab", "max_issues_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 174, "max_issues_repo_issues_event_min_datetime": "2015-01-28T18:41:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:51:05.000Z", "max_forks_repo_path": "tests/CompileTests/Fortran_tests/test2010_133.f90", "max_forks_repo_name": "sujankh/rose-matlab", "max_forks_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 146, "max_forks_repo_forks_event_min_datetime": "2015-04-27T02:48:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T07:32:53.000Z", "avg_line_length": 15.5555555556, "max_line_length": 69, "alphanum_fraction": 0.6142857143, "num_tokens": 49}
|
program facbench
use fmzm
implicit none
integer :: i
type(im) :: res
character(10000) :: out
res = 0
do i = 1, 3000
res = res + fac(i)
end do
call im_form('i10000', res, out)
print '(a)', trim(adjustl(out))
contains
type(im) function fac(n)
integer, intent(in) :: n
integer :: i
fac = 1
do i = 1, n
fac = fac * i
end do
end function fac
end program facbench
|
{"hexsha": "af79a86d327b7654d28f9a09448f18a4d3515394", "size": 421, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "fortran/fac-bench.f90", "max_stars_repo_name": "robindaumann/fac-bench", "max_stars_repo_head_hexsha": "57d040514bdd541308c44b831c631fc16e20f026", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-08-19T17:33:01.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-31T16:36:08.000Z", "max_issues_repo_path": "fortran/fac-bench.f90", "max_issues_repo_name": "robindaumann/fac-bench", "max_issues_repo_head_hexsha": "57d040514bdd541308c44b831c631fc16e20f026", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-12-31T21:01:32.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-24T09:43:11.000Z", "max_forks_repo_path": "fortran/fac-bench.f90", "max_forks_repo_name": "robindaumann/fac-bench", "max_forks_repo_head_hexsha": "57d040514bdd541308c44b831c631fc16e20f026", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.0333333333, "max_line_length": 34, "alphanum_fraction": 0.5724465558, "num_tokens": 147}
|
from functools import partial
from typing import Optional
import numpy as np
from trackpy import bandpass
from starfish.imagestack.imagestack import ImageStack
from starfish.types import Number
from ._base import FilterAlgorithmBase
class Bandpass(FilterAlgorithmBase):
def __init__(
self, lshort: Number, llong: int, threshold: Number, truncate: Number=4,
is_volume: bool=False, **kwargs) -> None:
"""
Parameters
----------
lshort : float
filter frequencies below this value
llong : int
filter frequencies above this odd integer value
threshold : float
zero any pixels below this intensity value
truncate : float
truncate the gaussian kernel, used by the gaussian filter, at this many standard
deviations
is_volume : bool
If True, 3d (z, y, x) volumes will be filtered. By default, filter 2-d (y, x) planes
kwargs
"""
self.lshort = lshort
self.llong = llong
self.threshold = threshold
self.truncate = truncate
self.is_volume = is_volume
_DEFAULT_TESTING_PARAMETERS = {"lshort": 1, "llong": 3, "threshold": 0.01}
@classmethod
def _add_arguments(cls, group_parser) -> None:
group_parser.add_argument(
"--lshort", type=float, help="filter signals below this frequency")
group_parser.add_argument(
"--llong", type=int, help="filter signals above this frequency")
group_parser.add_argument(
"--threshold", type=int, help="clip pixels below this intensity value")
group_parser.add_argument(
"--truncate", default=4, type=float,
help="truncate the filter at this many standard deviations")
@staticmethod
def _bandpass(
image: np.ndarray, lshort: Number, llong: int, threshold: Number, truncate: Number
) -> np.ndarray:
"""Apply a bandpass filter to remove noise and background variation
Parameters
----------
image : np.ndarray
lshort : float
filter frequencies below this value
llong : int
filter frequencies above this odd integer value
threshold : float
zero any pixels below this intensity value
truncate : float
truncate the gaussian kernel, used by the gaussian filter, at this many standard
deviations
Returns
-------
np.ndarray :
bandpassed image
"""
bandpassed: np.ndarray = bandpass(
image, lshort=lshort, llong=llong, threshold=threshold,
truncate=truncate
)
return bandpassed
def run(
self, stack: ImageStack, in_place: bool=False, verbose: bool=False,
n_processes: Optional[int]=None
) -> ImageStack:
"""Perform filtering of an image stack
Parameters
----------
stack : ImageStack
Stack to be filtered.
in_place : bool
if True, process ImageStack in-place, otherwise return a new stack
verbose : bool
if True, report the filtering progress across the tiles or volumes of the ImageStack
n_processes : Optional[int]
Number of parallel processes to devote to calculating the filter
Returns
-------
ImageStack :
If in-place is False, return the results of filter as a new stack. Otherwise return the
original stack.
"""
bandpass_ = partial(
self._bandpass,
lshort=self.lshort, llong=self.llong, threshold=self.threshold, truncate=self.truncate
)
result = stack.apply(
bandpass_,
verbose=verbose, in_place=in_place, is_volume=self.is_volume, n_processes=n_processes
)
return result
|
{"hexsha": "e56268d59f87bbc11d430e08f16c44d074e6e0da", "size": 3945, "ext": "py", "lang": "Python", "max_stars_repo_path": "starfish/image/_filter/bandpass.py", "max_stars_repo_name": "vipulsinghal02/starfish", "max_stars_repo_head_hexsha": "c3d347954ad40a7a4be9a50d89974f5fbbc2919d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "starfish/image/_filter/bandpass.py", "max_issues_repo_name": "vipulsinghal02/starfish", "max_issues_repo_head_hexsha": "c3d347954ad40a7a4be9a50d89974f5fbbc2919d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "starfish/image/_filter/bandpass.py", "max_forks_repo_name": "vipulsinghal02/starfish", "max_forks_repo_head_hexsha": "c3d347954ad40a7a4be9a50d89974f5fbbc2919d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4322033898, "max_line_length": 100, "alphanum_fraction": 0.6073510773, "include": true, "reason": "import numpy", "num_tokens": 819}
|
import numpy as np
import matplotlib.pyplot as plt
import scipy
from scipy.fftpack import fftshift, ifftshift, fft2, ifft2
from mpl_toolkits.axes_grid1 import make_axes_locatable, axes_size
from scipy.signal import correlate2d as correlate
from scipy.signal import general_gaussian
from astropy.io import fits
from scipy import ndimage
from functools import partial
import time
import imreg_dft
import pyfits
## function to compute the FT of focused and defocused image
def FT(im0, imk):
d0 = fft2(im0)
dk = fft2(imk)
return d0, dk
## function to define the Q matrix (Eq. 6 of L\"odfahl & scharmer. 1994)
def Q_matrix(t0,tk,reg,gamma):
tmp = np.abs(t0)**2 + gamma*np.abs(tk)**2 +reg
q = 1./(np.sqrt(tmp))
q2 = q*q
return q, q2
#
## function to compute the optimized object (Eq. 5 of L\"odfahl & scharmer.1994)
def F_M(q2,d0,dk,t0,tk,filter,gamma):
F_M = filter*q2*(d0*np.conj(t0) + gamma*dk*np.conj(tk))
return F_M
## function to define the error metric to be minimized (Eq. 9 of L\"odfahl & scharmer.1994)
def Error_metric(t0,tk,d0,dk,q,filter):
ef = filter*(dk*t0 - d0*tk)
ef = q*ef
EF = fft2((ef))
EF = EF.real
EF = EF-EF.mean()
return EF
##
def L_M(EF,size):
L_m = np.sum(np.abs(EF)**2)/(size**2)
return L_m
|
{"hexsha": "89582ba10b7f4e3167f129665448a8c4b065de4f", "size": 1306, "ext": "py", "lang": "Python", "max_stars_repo_path": "PD.py", "max_stars_repo_name": "fakahil/PyPD", "max_stars_repo_head_hexsha": "eff5a1cd88abb7839177f2b73a9cbc0e9dfb9365", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-09-18T14:49:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-06T03:46:09.000Z", "max_issues_repo_path": "PD.py", "max_issues_repo_name": "fakahil/PyPD", "max_issues_repo_head_hexsha": "eff5a1cd88abb7839177f2b73a9cbc0e9dfb9365", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PD.py", "max_forks_repo_name": "fakahil/PyPD", "max_forks_repo_head_hexsha": "eff5a1cd88abb7839177f2b73a9cbc0e9dfb9365", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.4098360656, "max_line_length": 92, "alphanum_fraction": 0.6776416539, "include": true, "reason": "import numpy,import scipy,from scipy,from astropy", "num_tokens": 426}
|
[STATEMENT]
lemma is_ta_empty_trim_reg:
"is_ta_eps_free (ta A) \<Longrightarrow> eps (ta (trim_reg A)) = {||}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_ta_eps_free (ta A) \<Longrightarrow> eps (ta (trim_reg A)) = {||}
[PROOF STEP]
by (auto simp: is_ta_eps_free_def trim_reg_def trim_ta_def ta_restrict_def)
|
{"llama_tokens": 139, "file": "FO_Theory_Rewriting_FOR_Check_Impl", "length": 1}
|
import numpy as np
from holoviews.core import NdOverlay
from holoviews.element import Polygons, Contours
from .testplot import TestMPLPlot, mpl_renderer
class TestPolygonPlot(TestMPLPlot):
def test_polygons_colored(self):
polygons = NdOverlay({j: Polygons([[(i**j, i) for i in range(10)]], level=j)
for j in range(5)})
plot = mpl_renderer.get_plot(polygons)
for j, splot in enumerate(plot.subplots.values()):
artist = splot.handles['artist']
self.assertEqual(artist.get_array(), np.array([j]))
self.assertEqual(artist.get_clim(), (0, 4))
def test_polygon_with_hole_plot(self):
xs = [1, 2, 3]
ys = [2, 0, 7]
holes = [[[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]]]
poly = Polygons([{'x': xs, 'y': ys, 'holes': holes}])
plot = mpl_renderer.get_plot(poly)
artist = plot.handles['artist']
paths = artist.get_paths()
self.assertEqual(len(paths), 1)
path = paths[0]
self.assertEqual(path.vertices, np.array([
(1, 2), (2, 0), (3, 7), (1.5, 2), (2, 3), (1.6, 1.6),
(2.1, 4.5), (2.5, 5), (2.3, 3.5)])
)
self.assertEqual(path.codes, np.array([1, 2, 2, 1, 2, 2, 1, 2, 2]))
def test_multi_polygon_hole_plot(self):
xs = [1, 2, 3, np.nan, 6, 7, 3]
ys = [2, 0, 7, np.nan, 7, 5, 2]
holes = [
[[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]],
[]
]
poly = Polygons([{'x': xs, 'y': ys, 'holes': holes, 'value': 1}], vdims=['value'])
plot = mpl_renderer.get_plot(poly)
artist = plot.handles['artist']
self.assertEqual(artist.get_array(), np.array([1, 1]))
paths = artist.get_paths()
self.assertEqual(len(paths), 2)
path = paths[0]
self.assertEqual(path.vertices, np.array([
(1, 2), (2, 0), (3, 7), (1.5, 2), (2, 3), (1.6, 1.6),
(2.1, 4.5), (2.5, 5), (2.3, 3.5)])
)
self.assertEqual(path.codes, np.array([1, 2, 2, 1, 2, 2, 1, 2, 2]))
path2 = paths[1]
self.assertEqual(path2.vertices, np.array([(6, 7), (7, 5), (3, 2)]))
self.assertEqual(path2.codes, np.array([1, 2, 2]))
class TestContoursPlot(TestMPLPlot):
def test_contours_categorical_color(self):
path = Contours([{('x', 'y'): np.random.rand(10, 2), 'z': cat}
for cat in ('B', 'A', 'B')],
vdims='z').opts(plot=dict(color_index='z'))
plot = mpl_renderer.get_plot(path)
artist = plot.handles['artist']
self.assertEqual(artist.get_array(), np.array([1, 0, 1]))
|
{"hexsha": "3a8610ebd8cf96f32c91d9a0da64561659d95d51", "size": 2726, "ext": "py", "lang": "Python", "max_stars_repo_path": "holoviews/tests/plotting/matplotlib/testpathplot.py", "max_stars_repo_name": "jewfro-cuban/holoviews", "max_stars_repo_head_hexsha": "c59f847c3d05b6eea1b05d3e8162d9ea80428587", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "holoviews/tests/plotting/matplotlib/testpathplot.py", "max_issues_repo_name": "jewfro-cuban/holoviews", "max_issues_repo_head_hexsha": "c59f847c3d05b6eea1b05d3e8162d9ea80428587", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "holoviews/tests/plotting/matplotlib/testpathplot.py", "max_forks_repo_name": "jewfro-cuban/holoviews", "max_forks_repo_head_hexsha": "c59f847c3d05b6eea1b05d3e8162d9ea80428587", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-31T05:26:08.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-31T05:26:08.000Z", "avg_line_length": 39.5072463768, "max_line_length": 90, "alphanum_fraction": 0.5172413793, "include": true, "reason": "import numpy", "num_tokens": 916}
|
import sys
import multiprocessing
try:
from multiprocessing import shared_memory
except ImportError:
## check MP version
version = sys.version_info[:2]
version = float("%d.%d"%version)
if version < 3.8:
print("Upgrade to Python 3.8 to use multiprocessing with shared memory.")
import numpy as np
import os
def copyNumpyArrayToMPSharedMemory(
input_arr,
finally_flag=False,
loud=False):
""" must clean up anything that contains a reference to a shared
memory object. globals() must be purged before the shm_buffers
are unlinked or python will crash."""
## check MP version
version = sys.version_info[:2]
version = float("%d.%d"%version)
if version < 3.8:
raise OSError("Upgrade to Python 3.8 to use multiprocessing with shared memory.")
if not finally_flag:
raise BufferError(
"Set finally_flag=True to confirm "+
"that you understand the risks associated with shared memory "+
"and are prepared to unlink the returned buffer when you're done. "+
"In a try except finally clause! With great power comes great responsibility.")
shm = shared_memory.SharedMemory(create=True, size=input_arr.nbytes)
# Now create a NumPy array backed by shared memory
shm_arr = np.ndarray(input_arr.shape, dtype=input_arr.dtype, buffer=shm.buf)
shm_arr[:] = input_arr[:] # Copy the original data into shared memory
if loud:
print('Copied to the buffer.')
del input_arr
return shm,shm_arr
def copySnapshotNamesToMPSharedMemory(
arr_names,
snapdict,
**kwargs):
this_snapdict = {}
shm_buffers = []
for arr_name in arr_names:
shm_buffer,shm_arr = copyNumpyArrayToMPSharedMemory(snapdict[arr_name],**kwargs)
## track these shared memory buffers so they can be cleaned
## up later.
shm_buffers.append(shm_buffer)
this_snapdict[arr_name] = shm_arr
del snapdict,arr_names
return this_snapdict,shm_buffers
|
{"hexsha": "9f9ad5fa5410ee5d4947681a366c6eb5ef1dec26", "size": 2050, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/abg_python/parallel/multiproc_utils.py", "max_stars_repo_name": "agurvich/abg_python", "max_stars_repo_head_hexsha": "f76425481781e6e8e28caf9e8290c0b5b920ab91", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-10T16:36:49.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-10T16:36:49.000Z", "max_issues_repo_path": "src/abg_python/parallel/multiproc_utils.py", "max_issues_repo_name": "agurvich/abg_python", "max_issues_repo_head_hexsha": "f76425481781e6e8e28caf9e8290c0b5b920ab91", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/abg_python/parallel/multiproc_utils.py", "max_forks_repo_name": "agurvich/abg_python", "max_forks_repo_head_hexsha": "f76425481781e6e8e28caf9e8290c0b5b920ab91", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-09-19T01:14:57.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-13T22:32:08.000Z", "avg_line_length": 31.5384615385, "max_line_length": 91, "alphanum_fraction": 0.68, "include": true, "reason": "import numpy", "num_tokens": 470}
|
# -*- coding: utf-8 -*-
# Author: Simone Marsili <simomarsili@gmail.com>
# License: BSD 3 clause
"""Classes for entropy estimators."""
import logging
from abc import ABC, abstractmethod # python >= 3.4
from functools import wraps
from inspect import isclass
import numpy
from numpy import PZERO, euler_gamma # pylint: disable=no-name-in-module
import ndd.fnsb
from ndd.base import BaseEstimator
from ndd.counts import CountsDistribution, check_k
from ndd.exceptions import AlphaError, NddError
from ndd.utils import as_class_name, register_class
logger = logging.getLogger(__name__)
__all__ = [
'EntropyEstimator',
'Plugin',
'MillerMadow',
'WolpertWolf',
'Nsb',
'Grassberger',
'AsymptoticNsb',
'AutoEstimator',
]
estimators = {}
def as_estimator(estimator):
"""Return an entropy estimator object from class/class name.
Parameters
----------
estimator : str or estimator class or estimator object
Returns
-------
estimator object
"""
if isinstance(estimator, str): # estimator name or label
name = as_class_name(estimator)
if name not in ndd.entropy_estimators:
raise NddError('%s is not a valid entropy estimator' % name)
return ndd.entropy_estimators[name]()
if isclass(estimator):
return estimator()
return estimator
def fit_function(fit): # pylint: disable=no-self-argument, missing-docstring
fit.__doc__ = EntropyEstimator.fit.__doc__
@wraps(fit)
def wrapper(obj, nk, k=None, zk=None):
nk = numpy.asarray(nk)
if zk is not None:
zk = numpy.asarray(zk)
k = check_k(k)
return fit(obj, nk, k=k, zk=zk)
return wrapper
def guess_alphabet_size(nk, zk=None, eps=1.e-3):
"""Guess a reasonable value for the cardinality."""
nsb = Nsb()
asym = AsymptoticNsb()
multiplier = 10
dk = numpy.log(multiplier)
if zk is not None:
k1 = numpy.sum(zk)
else:
k1 = numpy.sum([1 for n in nk if n > 0])
# k1 = k1 // 2
if not k1:
k1 = 1
h0 = nsb(nk=nk, k=k1, zk=zk)
try:
hasym = asym(nk=nk, zk=zk)
except NddError:
hasym = None # no coincidences
for _ in range(40):
k1 = round(k1 * multiplier)
h1 = nsb(nk, k=k1, zk=zk)
dh = (h1 - h0) / dk
if dh < eps:
break
if hasym and h1 >= hasym: # should return hasym
raise NddError
h0 = h1
return round(k1 / numpy.sqrt(multiplier)) # midpoint value
class EntropyEstimatorType(type(ABC), type(BaseEstimator)):
"""Metaclass for entropy estimators."""
def __new__(cls, name, bases, namespace, **kwargs):
estimator_class = type.__new__(cls, name, bases, namespace, **kwargs)
register_class(estimator_class, estimators)
return estimator_class
class EntropyEstimator(BaseEstimator, ABC, metaclass=EntropyEstimatorType):
"""
Base class for entropy estimators.
Attributes
----------
estimate_ : float
Entropy estimate
err_ : float or None
A measure of uncertainty in the estimate. None if not available.
"""
def __init__(self):
self.estimate_ = None
self.err_ = None
self.input_data_ndim = 1
def __call__(self, nk, k=None, zk=None):
"""Fit and return the estimated value."""
return self.fit(nk, k=k, zk=zk).estimate_
@property
def algorithm(self):
"""Estimator function name."""
return self.__class__.__name__
@staticmethod
def check_alpha(a):
"""Check concentration parameter/#pseudocount.
TODO: return None if alpha is None or alpha is 0
Parameters
----------
a : positive number
Concentration parameter or num. pseudocounts.
Returns
-------
a : float64
Raises
------
AlphaError
If a is not numeric or <=0.
"""
error_msg = 'alpha must be a positive number (got %r).' % a
if a is None:
raise AlphaError(error_msg)
try:
a = numpy.float64(a)
except ValueError:
raise AlphaError(error_msg)
if a <= 0:
raise AlphaError(error_msg)
return a
@abstractmethod
def fit(self, nk, k=None, zk=None):
"""
Compute an entropy estimate from nk.
Parameters
----------
nk : array_like, shape (n_bins,)
The number of occurrences of a set of bins.
k : int, optional
Number of bins. k >= len(nk).
Float values are valid input for whole numbers (e.g. k=1.e3).
Defaults to sum(nk > 0).
zk : array_like, optional
Counts distribution or "multiplicities". If passed, nk contains
the observed counts values.
Returns
-------
self : object
Returns the instance itself.
"""
class Plugin(EntropyEstimator):
"""Plugin (maximum likelihood) entropy estimator.
Insert the maximum likelihood estimate of the PMF from empirical
frequencies over bins into the entropy definition.
For alpha > 0, the estimate depends on k (the alphabet size).
Parameters
----------
alpha : float
Add alpha pseudocounts to the each frequency count. alpha >= 0.
Defaults to zero pseudocounts (plugin estimator).
Returns
-------
float
Entropy estimate.
"""
def __init__(self, alpha=None):
super(Plugin, self).__init__()
if alpha:
self.alpha = self.check_alpha(alpha)
else:
self.alpha = None
@fit_function
def fit(self, nk, k=None, zk=None):
self.err_ = numpy.inf
if zk is not None:
if k is None:
k = numpy.sum(zk[nk > 0])
if k == 1:
self.estimate_, self.err_ = PZERO, PZERO
return self
if self.alpha:
self.estimate_ = ndd.fnsb.pseudo_from_multiplicities(
nk, zk, k, self.alpha)
else:
self.estimate_ = ndd.fnsb.plugin_from_multiplicities(nk, zk)
else:
if k is None:
k = numpy.sum(nk > 0)
if k == 1:
self.estimate_, self.err_ = PZERO, PZERO
return self
if self.alpha:
self.estimate_ = ndd.fnsb.pseudo(nk, k, self.alpha)
else:
self.estimate_ = ndd.fnsb.plugin(nk)
return self
class PmfPlugin(EntropyEstimator):
"""Entropy from probability mass function array."""
@fit_function
def fit(self, nk, k=None, zk=None):
"""
Parameters
----------
nk : array-like
Probabilities over a set of bins.
Returns
-------
self : object
Returns the instance itself.
"""
self.estimate_ = ndd.fnsb.pmf_plugin(nk)
self.err_ = 0
return self
class MillerMadow(EntropyEstimator):
"""Miller-Madow entropy estimator.
Notes
-----
@article{miller1955note,
title={Note on the bias of information estimates},
author={Miller, George},
journal={Information theory in psychology: Problems and methods},
year={1955},
publisher={Free Press}
}
"""
@fit_function
def fit(self, nk, k=None, zk=None):
"""
Parameters
----------
nk : array-like
The number of occurrences of a set of bins.
Returns
-------
self : object
Returns the instance itself.
"""
plugin = Plugin()
if zk is not None:
k = numpy.sum(zk[nk > 0])
n = numpy.sum(nk * zk)
self.estimate_ = plugin(nk, k=k, zk=zk) + 0.5 * (k - 1) / n
else:
k = numpy.sum(nk > 0)
n = numpy.sum(nk)
self.estimate_ = plugin(nk) + 0.5 * (k - 1) / n
return self
class WolpertWolf(EntropyEstimator):
"""
Wolpert-Wolf entropy estimator.
Single Dirichlet prior with concentration parameter `alpha`.
Parameters
----------
alpha : float
Concentration parameter. alpha > 0.0.
If alpha is passed, use a single Dirichlet prior
Notes
-----
@article{wolpert1995estimating,
title={Estimating functions of probability distributions from a finite set of samples},
author={Wolpert, David H and Wolf, David R},
journal={Physical Review E},
volume={52},
number={6},
pages={6841},
year={1995},
publisher={APS}
}
"""
def __init__(self, alpha):
super().__init__()
self.alpha = self.check_alpha(alpha)
@fit_function
def fit(self, nk, k=None, zk=None):
if k is None:
raise NddError('Wolper-Wolf estimator needs k')
if k == 1:
self.estimate_, self.err_ = PZERO, PZERO
return self
if zk is not None:
self.estimate_, self.err_ = ndd.fnsb.ww_from_multiplicities(
nk, zk, k, self.alpha)
else:
self.estimate_, self.err_ = ndd.fnsb.ww(nk, k, self.alpha)
return self
class Nsb(EntropyEstimator):
"""
Nemenman-Shafee-Bialek (NSB) entropy estimator.
The estimate depends on k (the alphabet size).
Parameters
----------
alpha : float, optional
Concentration parameter. alpha > 0.0.
If alpha is passed, use a single Dirichlet prior
(Wolpert-Wolf estimator).
Default: use a mixture-of-Dirichlets prior (NSB estimator).
Notes
-----
@inproceedings{nemenman2002entropy,
title={Entropy and inference, revisited},
author={Nemenman, Ilya and Shafee, Fariel and Bialek, William},
booktitle={Advances in neural information processing systems},
pages={471--478},
year={2002}
}
@article{nemenman2004entropy,
title={Entropy and information in neural spike trains: Progress on the sampling problem},
author={Nemenman, Ilya and Bialek, William and Van Steveninck, Rob De Ruyter},
journal={Physical Review E},
volume={69},
number={5},
pages={056111},
year={2004},
publisher={APS}
}
"""
def __init__(self, alpha=None):
super(Nsb, self).__init__()
if alpha:
self.alpha = self.check_alpha(alpha)
else:
self.alpha = None
@fit_function
def fit(self, nk, k=None, zk=None):
if k is None:
raise NddError('NSB estimator needs k')
if k == 1:
self.estimate_, self.err_ = PZERO, PZERO
return self
if self.alpha is None:
if zk is not None:
self.estimate_, self.err_ = ndd.fnsb.nsb_from_multiplicities(
nk, zk, k)
else:
self.estimate_, self.err_ = ndd.fnsb.nsb(nk, k)
else: # wolpert-wolf estimator
estimator = WolpertWolf(self.alpha).fit(nk=nk, k=k, zk=zk)
self.estimate_ = estimator.estimate_
self.err_ = estimator.err_
return self
class AsymptoticNsb(EntropyEstimator):
"""
Asymptotic NSB estimator for countably infinite distributions (or with
unknown cardinality).
Specifical for the strongly under-sampled regime (k/N approx. 1, where k
is the number of distinct symbols in the samples and N the number of
samples)
Notes
-----
@article{nemenman2004entropy,
title={Entropy and information in neural spike trains: Progress on the sampling problem},
author={Nemenman, Ilya and Bialek, William and Van Steveninck, Rob De Ruyter},
journal={Physical Review E},
volume={69},
number={5},
pages={056111},
year={2004},
publisher={APS}
}
@article{nemenman2011coincidences,
title={Coincidences and estimation of entropies of random variables with large cardinalities},
author={Nemenman, Ilya},
journal={Entropy},
volume={13},
number={12},
pages={2013--2023},
year={2011},
publisher={Molecular Diversity Preservation International}
}
"""
@fit_function
def fit(self, nk, k=None, zk=None):
if zk is None:
counts = CountsDistribution().fit(nk)
else:
counts = CountsDistribution(nk=nk, zk=zk)
if not counts.coincidences:
raise NddError('AsymptoticNSB estimator: no coincidences '
'in the data.')
if counts.sampling_ratio > 0.1:
logger.info('The AsymptoticNSB estimator should only be used '
'in the under-sampled regime.')
if k == 1:
self.estimate_, self.err_ = PZERO, PZERO
return self
self.estimate_ = (euler_gamma - numpy.log(2) +
2.0 * numpy.log(counts.n) -
ndd.fnsb.gamma0(counts.coincidences))
self.err_ = numpy.sqrt(ndd.fnsb.gamma1(counts.coincidences))
return self
class Grassberger(EntropyEstimator):
"""Grassberger's aymptotic bias coorection estimator.
see equation 35 in:
https://arxiv.org/pdf/physics/0307138.pdf
Notes
-----
@article{grassberger2003entropy,
title={Entropy estimates from insufficient samplings},
author={Grassberger, Peter},
journal={arXiv preprint physics/0307138},
year={2003}
}
"""
@staticmethod
def g_series():
"""Higher-order function storing terms of the series."""
GG = {}
gamma0 = ndd.fnsb.gamma0
log_two = numpy.log(2.0)
def gterm(n):
"""Sequence of reals for the Grassberger estimator."""
if n in GG:
return GG[n]
if n <= 2:
if n < 1:
value = 0.0
elif n == 1:
value = -euler_gamma - log_two
elif n == 2:
value = 2.0 + gterm(1)
else:
if n % 2 == 0:
value = gamma0((n + 1) / 2) + log_two
else:
value = gterm(n - 1)
GG[n] = value
return value
return gterm
@fit_function
def fit(self, nk, k=None, zk=None): # pylint: disable=unused-argument
gg = self.g_series() # init the G series
estimate = 0
if zk is not None:
n = numpy.sum(nk * zk)
for j, x in enumerate(nk):
if x:
estimate -= zk[j] * x * gg(x)
else:
n = numpy.sum(nk)
for x in nk:
if x:
estimate -= x * gg(x)
estimate = numpy.log(n) - estimate / n
self.estimate_ = estimate
return self
class AutoEstimator(EntropyEstimator):
"""Select the best estimator for the input data."""
def __init__(self):
super().__init__()
self.estimator = None
self.k = None
def guess(self, nk, k=None, zk=None):
"""Select the best estimator given arguments.
Returns
-------
k, estimator
"""
if k is not None: # has k?
self.k = k
self.estimator = Nsb()
return
if zk is None:
counts = CountsDistribution().fit(nk)
else:
counts = CountsDistribution(nk=nk, zk=zk)
if not counts.coincidences: # has coincidences?
logging.warning(
'Insufficient data (no coincidences found in counts). '
'Return plugin estimate.')
self.k = None
self.estimator = Plugin() # else Plugin estimator
return
if counts.sampling_ratio < 0.1: # is strongly under-sampled?
self.k = None
self.estimator = AsymptoticNsb()
return
self.k = guess_alphabet_size(nk=nk,
zk=zk) # guess a reasonable value for k
self.estimator = Nsb()
@fit_function
def fit(self, nk, k=None, zk=None):
self.guess(nk=nk, k=k, zk=zk)
self.estimator.fit(nk=nk, k=self.k, zk=zk)
self.estimate_ = self.estimator.estimate_
self.err_ = self.estimator.err_
return self
|
{"hexsha": "c95935280a60ae344492aaf6513301a6d008b450", "size": 16536, "ext": "py", "lang": "Python", "max_stars_repo_path": "ndd/estimators.py", "max_stars_repo_name": "simomarsili/ndd", "max_stars_repo_head_hexsha": "3a8f8f80116ddaf8666dd13b246a04c9806447a7", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 34, "max_stars_repo_stars_event_min_datetime": "2017-01-25T21:42:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-05T02:12:11.000Z", "max_issues_repo_path": "ndd/estimators.py", "max_issues_repo_name": "simomarsili/ndd", "max_issues_repo_head_hexsha": "3a8f8f80116ddaf8666dd13b246a04c9806447a7", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2018-06-22T19:15:33.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-06T12:37:24.000Z", "max_forks_repo_path": "ndd/estimators.py", "max_forks_repo_name": "simomarsili/ndd", "max_forks_repo_head_hexsha": "3a8f8f80116ddaf8666dd13b246a04c9806447a7", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-07-31T07:53:02.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-31T07:53:22.000Z", "avg_line_length": 27.652173913, "max_line_length": 100, "alphanum_fraction": 0.5605950653, "include": true, "reason": "import numpy,from numpy", "num_tokens": 4082}
|
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.losses.image_dissimilarity import LocalNormalizedCrossCorrelationLoss
TEST_CASES = [
[
{"in_channels": 3, "ndim": 3, "kernel_size": 3, "kernel_type": "rectangular"},
{
"input": torch.arange(0, 3, dtype=torch.float)[None, :, None, None, None].expand(1, 3, 3, 3, 3),
"target": torch.arange(0, 3, dtype=torch.float)[None, :, None, None, None].expand(1, 3, 3, 3, 3),
},
-1.0,
],
[
{"in_channels": 3, "ndim": 2, "kernel_size": 3, "kernel_type": "rectangular"},
{
"input": torch.arange(0, 3, dtype=torch.float)[None, :, None, None].expand(1, 3, 3, 3),
"target": torch.arange(0, 3, dtype=torch.float)[None, :, None, None].expand(1, 3, 3, 3),
},
-1.0,
],
[
{"in_channels": 3, "ndim": 2, "kernel_size": 3, "kernel_type": "triangular"},
{
"input": torch.arange(0, 3, dtype=torch.float)[None, :, None, None].expand(1, 3, 3, 3),
"target": torch.arange(0, 3, dtype=torch.float)[None, :, None, None].expand(1, 3, 3, 3),
},
-1.0,
],
[
{"in_channels": 3, "ndim": 2, "kernel_size": 3, "kernel_type": "gaussian"},
{
"input": torch.arange(0, 3, dtype=torch.float)[None, :, None, None].expand(1, 3, 3, 3),
"target": torch.arange(0, 3, dtype=torch.float)[None, :, None, None].expand(1, 3, 3, 3),
},
-1.0,
],
[
{"in_channels": 3, "ndim": 1, "kernel_size": 3, "kernel_type": "rectangular"},
{
"input": torch.arange(0, 3, dtype=torch.float)[None, :, None].expand(1, 3, 3),
"target": torch.arange(0, 3, dtype=torch.float)[None, :, None].expand(1, 3, 3),
},
-1.0,
],
[
{"in_channels": 3, "ndim": 1, "kernel_size": 3, "kernel_type": "triangular"},
{
"input": torch.arange(0, 3, dtype=torch.float)[None, :, None].expand(1, 3, 3),
"target": torch.arange(0, 3, dtype=torch.float)[None, :, None].expand(1, 3, 3),
},
-1.0,
],
[
{"in_channels": 3, "ndim": 1, "kernel_size": 3, "kernel_type": "gaussian"},
{
"input": torch.arange(0, 3, dtype=torch.float)[None, :, None].expand(1, 3, 3),
"target": torch.arange(0, 3, dtype=torch.float)[None, :, None].expand(1, 3, 3),
},
-1.0,
],
[
{"in_channels": 3, "ndim": 1, "kernel_size": 3, "kernel_type": "gaussian", "reduction": "sum"},
{
"input": torch.arange(0, 3, dtype=torch.float)[None, :, None].expand(2, 3, 3),
"target": torch.arange(0, 3, dtype=torch.float)[None, :, None].expand(2, 3, 3),
},
-6.0,
],
[
{"in_channels": 3, "ndim": 3, "kernel_size": 3, "kernel_type": "rectangular"},
{
"input": torch.arange(0, 3, dtype=torch.float)[None, :, None, None, None].expand(1, 3, 3, 3, 3),
"target": torch.arange(0, 3, dtype=torch.float)[None, :, None, None, None].expand(1, 3, 3, 3, 3) ** 2,
},
-0.06062524,
],
[
{"in_channels": 3, "ndim": 3, "kernel_size": 5, "kernel_type": "triangular"},
{
"input": torch.arange(0, 3, dtype=torch.float)[None, :, None, None, None].expand(1, 3, 3, 3, 3),
"target": torch.arange(0, 3, dtype=torch.float)[None, :, None, None, None].expand(1, 3, 3, 3, 3) ** 2,
},
-0.923356,
],
[
{"in_channels": 3, "ndim": 3, "kernel_size": 3, "kernel_type": "gaussian"},
{
"input": torch.arange(0, 3, dtype=torch.float)[None, :, None, None, None].expand(1, 3, 3, 3, 3),
"target": torch.arange(0, 3, dtype=torch.float)[None, :, None, None, None].expand(1, 3, 3, 3, 3) ** 2,
},
-1.306177,
],
]
class TestLocalNormalizedCrossCorrelationLoss(unittest.TestCase):
@parameterized.expand(TEST_CASES)
def test_shape(self, input_param, input_data, expected_val):
result = LocalNormalizedCrossCorrelationLoss(**input_param).forward(**input_data)
np.testing.assert_allclose(result.detach().cpu().numpy(), expected_val, rtol=1e-4)
def test_ill_shape(self):
loss = LocalNormalizedCrossCorrelationLoss(in_channels=3, ndim=3)
# in_channel unmatch
with self.assertRaisesRegex(AssertionError, ""):
loss.forward(torch.ones((1, 2, 3, 3, 3), dtype=torch.float), torch.ones((1, 2, 3, 3, 3), dtype=torch.float))
# ndim unmatch
with self.assertRaisesRegex(AssertionError, ""):
loss.forward(torch.ones((1, 3, 3, 3), dtype=torch.float), torch.ones((1, 3, 3, 3), dtype=torch.float))
# input, target shape unmatch
with self.assertRaisesRegex(AssertionError, ""):
loss.forward(torch.ones((1, 3, 3, 3, 3), dtype=torch.float), torch.ones((1, 3, 4, 4, 4), dtype=torch.float))
def test_ill_opts(self):
input = torch.ones((1, 3, 3, 3, 3), dtype=torch.float)
target = torch.ones((1, 3, 3, 3, 3), dtype=torch.float)
with self.assertRaisesRegex(ValueError, ""):
LocalNormalizedCrossCorrelationLoss(in_channels=3, kernel_type="unknown")(input, target)
with self.assertRaisesRegex(ValueError, ""):
LocalNormalizedCrossCorrelationLoss(in_channels=3, kernel_type=None)(input, target)
with self.assertRaisesRegex(ValueError, ""):
LocalNormalizedCrossCorrelationLoss(in_channels=3, kernel_size=4)(input, target)
with self.assertRaisesRegex(ValueError, ""):
LocalNormalizedCrossCorrelationLoss(in_channels=3, reduction="unknown")(input, target)
with self.assertRaisesRegex(ValueError, ""):
LocalNormalizedCrossCorrelationLoss(in_channels=3, reduction=None)(input, target)
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "cb2f446dfcfeeb341d52189dea6f96c459f24fa1", "size": 6526, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_local_normalized_cross_correlation_loss.py", "max_stars_repo_name": "JoHof/MONAI", "max_stars_repo_head_hexsha": "70483b648fba92f0a8346e53dc14d686e56120a3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_local_normalized_cross_correlation_loss.py", "max_issues_repo_name": "JoHof/MONAI", "max_issues_repo_head_hexsha": "70483b648fba92f0a8346e53dc14d686e56120a3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_local_normalized_cross_correlation_loss.py", "max_forks_repo_name": "JoHof/MONAI", "max_forks_repo_head_hexsha": "70483b648fba92f0a8346e53dc14d686e56120a3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.3945578231, "max_line_length": 120, "alphanum_fraction": 0.5830524058, "include": true, "reason": "import numpy", "num_tokens": 1976}
|
**Exercise set 7**
==============
> The goal of this exercise is to run PCA and PLSR on a real
data set in order to show how these methods can be used in practice.
We are considering data that are given by
[Platikanov et al.](https://doi.org/10.1016/j.watres.2012.10.040) and we are aiming to
reproduce some of the results found in their work. You may find it useful to
inspect this article when interpreting your results.
**Exercise 7.1**
In this exercise, we will consider two data sets: one set contains data for bottled mineral water
(we will call this "data set 1"),
and the other set contains data for tap water and bottled water (we will call this "data set 2").
The different water samples have been blind-tasted
by trained panelists, who have rated the different water samples
according to their liking. Also, the chemical compositions of the
samples have been determined. All measured quantities are given
in table 1.
Our end goal is to predict the ratings (the "mean liking" in
table 1) of the water samples, given
measurements of the chemical composition. We will define this rating as the
$y$-variable we are going to predict.
|Variable| Unit|
|:-------|----:|
|Conductivity | ($\mu$S/cm) |
|TDS (total dissolved solids) | (mg/L) |
|Cl$^{-}$ | (mg/L) |
|SO$_4^{2-}$ | (mg/L) |
|NO$_3^-$ | (mg/L) |
|HCO$_3^-$ | (mg/L) |
|Ca$^{2+}$ | (mg/L) |
|Mg$^{2+}$ | (mg/L) |
|Na$^{+}$ | (mg/L) |
|K$^{+}$ | (mg/L) |
|pH | |
|Si | (mg/L) |
|Cl$_2$ | (mg/L) |
|Mean liking | |
| **Table 1:** *Data columns present in the data sets: [Data/table1.csv](Data/table1.csv) and [Data/table2.csv](Data/table2.csv)* |
**(a)** Begin by exploring the raw data. In the [original article](https://doi.org/10.1016/j.watres.2012.10.040),
you can find correlation heat maps (see Fig. $1$ in the aforementioned article).
Create such correlation maps yourself (for data set 1 & 2)
and compare them with Fig. $1$ in the article. Does the mean liking
seem to be correlated with some of the variables?
```python
# Your code here
```
**Your answer to question 7.1(a):** *Double click here*
**(b)** We will further explore the raw data with PCA. Perform a principal
component analysis for data set 1 and for data set 2. How much of the variance is
explained by the first two principal components? Also, plot the scores
and the loadings for principal component 1 and 2, and for principal component 1 and 3.
Does any of the variables seem to be correlated?
Which variables seem to influence the mean liking most
for the two data sets?
```python
# Your code here
```
**Your answer to question 7.1(b):** *Double click here*
**(c)** Let us start the modeling by creating a linear least-squares model:
* (i) Create linear least-squares models for the two data sets in which you use all available data in the fitting. Calculate $R^2$ and the root mean squared error (RMSE) of your models. Comment on the values you have obtained.
* (ii) Evaluate the root mean squared error of cross-validation (RMSECV) for your two models. Compare the RMSECV values with the previously obtained RMSE values.
* (iii) Using the two models you have created, what variables seem to be most important for predicting a high mean liking? Here, you can inspect the regression coefficients (assuming that you have scaled the variables). If you were to create a new brand of bottled water, what chemical components would you focus on to maximize the mean liking?
```python
# Your code here
```
**Your answer to question 7.1(c):** *Double click here*
**(d)** We will now consider partial least-squares regression (PLSR) models.
Before we do the actual modeling, let us repeat the fundamental
equations for PLSR. The model itself is based on the following two
equations:
\begin{equation}
\begin{split}
\mathbf{X} &= \mathbf{T} \mathbf{P}^\top, \\
\mathbf{Y} &= \mathbf{U} \mathbf{Q}^\top, \\
\end{split}
\tag{1}
\end{equation}
where $\mathbf{T}$ is the $x$-scores, $\mathbf{P}$ is the $x$-loadings,
$\mathbf{U}$ is the $y$-scores, and $\mathbf{Q}$ is the $y$-loadings.
The linear relation between $\mathbf{X}$ and $\mathbf{Y}$ is in this
case given by,
\begin{equation}
\mathbf{Y} = \mathbf{T} \mathbf{Q}^\top.
\label{eq:plsrreg} \tag{2}
\end{equation}
When we wish to *predict* new $\mathbf{Y}$-values from new
$\mathbf{X}$-values, we need to calculate new $x$-scores. This is
done by introducing an additional matrix, $\mathbf{R}$, so that
the following is satisfied:
\begin{equation}
\mathbf{T} = \mathbf{X} \mathbf{R}.
\label{eq:plsrscores} \tag{3}
\end{equation}
Here, we can think of the $\mathbf{R}$ as a matrix we can use to invert
the relation $\mathbf{X} = \mathbf{T} \mathbf{P}^\top$. We can then predict
new $\mathbf{Y}$-values, by combining Eq. \eqref{eq:plsrreg}
and Eq. \eqref{eq:plsrscores}:
\begin{equation}
\mathbf{Y} = \mathbf{T} \mathbf{Q}^\top =
\mathbf{X} \mathbf{R} \mathbf{Q}^\top =
\mathbf{X} \mathbf{B}_\text{PLS},
\tag{4}
\end{equation}
where the regression coefficients $\mathbf{B}_\text{PLS}$ are given by:
\begin{equation}
\mathbf{B}_\text{PLS} = \mathbf{R} \mathbf{Q}^\top. \tag{5}
\end{equation}
In `sklearn` we can run PLSR by using `PLSRegression`
which is found in the module `sklearn.cross_decomposition`.
After running the regression, we can access the matrices given above with the python code below.
```python
from sklearn.preprocessing import scale
from sklearn.cross_decomposition import PLSRegression
X = scale(X)
y = scale(y)
plsr = PLSRegression(n_components=2)
plsr.fit(X, y)
R = plsr.x_rotations_
B = plsr.coef_
Q = plsr.y_loadings_
```
* (i) Create PLSR models for the two data sets in which you use all available data in the fitting. Use
only two components when you create the model, that is, set `PLSRegression(n_components=2)` when
you set up the models. Calculate $R^2$ and the root mean squared error (RMSE) of your models.
Comment on the values you have obtained, and compare them with the corresponding values from your
linear least-squares models.
* (ii) Calculate RMSECV for your two PLSR models. Compare the RMSECV values with the previously obtained
RMSE values. Would you say that your PLSR models perform better or worse than the least-squares
models?
* (iii) Plot the $x$- and $y$-loadings for the two components. These loadings are available as
`Q = plsr.y_loadings_`. What variables seem to be most important for predicting the mean liking?
* (iv) Inspect the weights (the $\mathbf{R}$ matrix) for PLS component 1 and PLS component 2.
Compare your results to the results given by
[Platikanov et al.](https://doi.org/10.1016/j.watres.2012.10.040) in Fig. $6$.
* (v) Based on the PLSR results: If you were to create
a new brand of bottled water, what chemical components
would you focus on to maximize the mean liking?
```python
# Your code here
```
**Your answer to question 7.1(d):** *Double click here*
|
{"hexsha": "81aafa6de83d00056bbebcbb8bbdcc5acfc7d7b0", "size": 10754, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "exercises_2020/07_Exercise_Set_7.ipynb", "max_stars_repo_name": "sroet/chemometrics", "max_stars_repo_head_hexsha": "c797505d07e366319ba1544e8a602be94b88fbb6", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-02-04T12:09:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-06T12:28:24.000Z", "max_issues_repo_path": "exercises_2020/07_Exercise_Set_7.ipynb", "max_issues_repo_name": "sroet/chemometrics", "max_issues_repo_head_hexsha": "c797505d07e366319ba1544e8a602be94b88fbb6", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": 72, "max_issues_repo_issues_event_min_datetime": "2020-01-06T10:24:33.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-21T10:37:46.000Z", "max_forks_repo_path": "exercises_2020/07_Exercise_Set_7.ipynb", "max_forks_repo_name": "sroet/chemometrics", "max_forks_repo_head_hexsha": "c797505d07e366319ba1544e8a602be94b88fbb6", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-01-09T12:04:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-19T10:06:14.000Z", "avg_line_length": 39.3919413919, "max_line_length": 350, "alphanum_fraction": 0.5427747815, "converted": true, "num_tokens": 2096}
|
import numpy as np
path = 'training_data/1546786435.npz'
f = np.load(path)
x_train, y_train = f['train'], f['train_labels']
print(x_train.shape)
print(y_train.shape)
print(x_train)
print(y_train)
#x_test, y_test = f['x_test'], f['y_test']
f.close()
|
{"hexsha": "e2397bb1d120f619740ebd3578c735d59a69fc2e", "size": 255, "ext": "py", "lang": "Python", "max_stars_repo_path": "esp8266/esp8266car/computer/load_npz.py", "max_stars_repo_name": "OZhang/AutoCar", "max_stars_repo_head_hexsha": "47f033601941cd30e3725999ddeb1a67143e3c18", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "esp8266/esp8266car/computer/load_npz.py", "max_issues_repo_name": "OZhang/AutoCar", "max_issues_repo_head_hexsha": "47f033601941cd30e3725999ddeb1a67143e3c18", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "esp8266/esp8266car/computer/load_npz.py", "max_forks_repo_name": "OZhang/AutoCar", "max_forks_repo_head_hexsha": "47f033601941cd30e3725999ddeb1a67143e3c18", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.25, "max_line_length": 48, "alphanum_fraction": 0.7019607843, "include": true, "reason": "import numpy", "num_tokens": 79}
|
\documentclass{amsart}
\title{LocalGraph Abstract Data Type}
\author{Todd D. Vance}
\date{\today}
\begin{document}
\maketitle{}
\section{Local Graph}
A local graph (modeling a directed graph, loops and multiple edges allowed, from which only a node and its immediate neighborhood are visible at any one time) is actually a system of ADTs. It consists of a single Cursor, and a finite number of Nodes and outgoing Edges indexed by Directions from a node. In addition, there is a concept of Items. An Item belongs to something, either a Node, another Item, the Cursor, or it belongs to nothing (the Nil object). The motivation for Items is Zork-like text adventure games, in which a Node is a room, the Cursor is the player, and Directions are connections to other rooms. Items can then be in a room, on the player, in or on another item, or nowhere (for a period of time).
The axioms are as folows:
\begin{enumerate}
\item There exists a unique Cursor object $c$.
\item There exists a unique Nil object $\epsilon$.
\item There exists a unique Node object $n$ such that $c\in{n}$.
\item For each Direction $d$ and Node object $n$, either $n.d=\epsilon$ or there exists a unique Node object $m$ such that $n.d=m$.
\item For each Item object $i$, exactly one of the following hold:
\begin{enumerate}
\item $i\in\epsilon$.
\item $i\in{c}$.
\item There exists a unique Node object $n$ such that $i\in{n}$
\item There exists a unique Item object $j$ such that $i\in{j}$
\end{enumerate}
\item There are no circular inclusions among items; that is for no sequence of Item objects $i_1, i_2, \dots, i_n$ does it happen that $i_1\in i_2 \in \cdots\in i_n\in i_1$.
\end{enumerate}
For practical purposes, some query operations are permitted:
\begin{enumerate}
\item If $n$ is a Node object, then $\mathrm{dir}(n)$ is a set containing all directions $d$ such that $n.d\ne\epsilon$.
\item $\mathrm{loc}(c)$ is the unique Node object $n$ such that $n\in{c}$.
\item $\mathrm{loc}(i)=o$ is equal to the object $o$ which must be exactly one of the following:
\begin{enumerate}
\item $o=\epsilon$ if $i\in\epsilon$.
\item $o=n$ if $n$ is a Node object and $i\in{n}$.
\item $o=j$ if $j$ is an Item object and $i\in{j}$.
\item $o=c$ if $i\in{c}$.
\end{enumerate}
\end{enumerate}
Optional operations provide more functionality:
\begin{enumerate}
\item If $n$ is a Node object, then $itm(n)$ is the set of all Item objects $i$ such that $i\in{n}$.
\item If $i$ is an Item object, then $itm(i)$ is the set of all Item objects $j$ such that $j\in{i}$
\item $itm(c)$ is the set of Item objects $i$ such that $i\in{c}$.
\item $itm(\epsilon)$ is the set of Item objects $i$ such that $i\in\epsilon$.
\item $itm()$ is the set of all Item objects.
\item $nod()$ is the set of all Node objects.
\end{enumerate}
\end{document}
|
{"hexsha": "4b2d7395b4f60cb7f731f81d1c1053c6dd8b50b0", "size": 2809, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/local_graph_adt.tex", "max_stars_repo_name": "tdvance/LocalGraph", "max_stars_repo_head_hexsha": "c927947391c04e9e6870e0edcfef6e2ffe2a4f7b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "doc/local_graph_adt.tex", "max_issues_repo_name": "tdvance/LocalGraph", "max_issues_repo_head_hexsha": "c927947391c04e9e6870e0edcfef6e2ffe2a4f7b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/local_graph_adt.tex", "max_forks_repo_name": "tdvance/LocalGraph", "max_forks_repo_head_hexsha": "c927947391c04e9e6870e0edcfef6e2ffe2a4f7b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.0727272727, "max_line_length": 724, "alphanum_fraction": 0.7173371307, "num_tokens": 830}
|
using TextGrid
using Test
@testset "TextGrid.jl" begin
# Write your tests here.
end
|
{"hexsha": "fd9c5f32b2c646e95c257bd3ed7483fabda1eff9", "size": 89, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "Hasanfcb/TextGrid.jl", "max_stars_repo_head_hexsha": "9ae5ebd1c1791ee0217b56ad788d81257b413afe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "Hasanfcb/TextGrid.jl", "max_issues_repo_head_hexsha": "9ae5ebd1c1791ee0217b56ad788d81257b413afe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "Hasanfcb/TextGrid.jl", "max_forks_repo_head_hexsha": "9ae5ebd1c1791ee0217b56ad788d81257b413afe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 12.7142857143, "max_line_length": 28, "alphanum_fraction": 0.7303370787, "num_tokens": 26}
|
'''
synbiochem (c) University of Manchester 2016
synbiochem is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
# pylint: disable=no-member
import uuid
import matplotlib.pyplot as plt
import numpy as np
def do_plot(data):
'''Plots data.'''
for datum, colour in data:
plt.hist(datum, 50, facecolor=colour, alpha=0.75)
plt.xlabel('Activity')
plt.ylabel('Count')
plt.title('Activity histogram')
plt.axis([0, 8, 0, 10000])
plt.grid(True)
plt.savefig(str(uuid.uuid4()) + '.png')
def main():
'''main method.'''
poor = (1 + 0.2 * np.random.randn(100000), 'red')
good = (2 + 0.4 * np.random.randn(100000), 'orange')
great = (5 + 1.0 * np.random.randn(100000), 'green')
do_plot([poor])
do_plot([poor, good])
do_plot([poor, good, great])
if __name__ == '__main__':
main()
|
{"hexsha": "92df4ec17370e045104c86eb575c8d9136a78764", "size": 935, "ext": "py", "lang": "Python", "max_stars_repo_path": "synbiochemdev/learning/hist.py", "max_stars_repo_name": "neilswainston/development-py", "max_stars_repo_head_hexsha": "47041c8059cf4d617b9ca26c16b4a691ce68aa2c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "synbiochemdev/learning/hist.py", "max_issues_repo_name": "neilswainston/development-py", "max_issues_repo_head_hexsha": "47041c8059cf4d617b9ca26c16b4a691ce68aa2c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "synbiochemdev/learning/hist.py", "max_forks_repo_name": "neilswainston/development-py", "max_forks_repo_head_hexsha": "47041c8059cf4d617b9ca26c16b4a691ce68aa2c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.25, "max_line_length": 76, "alphanum_fraction": 0.6299465241, "include": true, "reason": "import numpy", "num_tokens": 272}
|
# pylint: disable=unused-argument
"""Debug runtime functions."""
import os
import json
import numpy as np
from tvm import ndarray as nd
from tvm.tools.debug.wrappers import local_cli_wrapper as tvmdbg
class DebugGraphModule(object):
"""Wrapper debug runtime module.
This is a thin wrapper of the debug for TVM runtime.
Parameters
----------
nodes_list : list
The list of all the graph nodes.
cli_obj : Object
The context of CLI object
"""
def __init__(self, nodes_list, cli_obj, dbg_out_buffer_list):
self.nodes_list = nodes_list
self.cli_obj = cli_obj
self.dbg_out_buffer_list = dbg_out_buffer_list
def get_run_command(self):
return self.cli_obj.get_run_command()
def run_end(self, run_cli_session, retvals):
self.cli_obj.run_end(run_cli_session, retvals)
def get_debug_buffer_count(self):
return len(self.dbg_out_buffer_list)
def get_debug_buffer(self, eid):
return self.dbg_out_buffer_list[eid]
def set_input(self, key=None, value=None, **params):
"""Set inputs to the module via kwargs
Parameters
----------
key : int or str
The input key
value : the input value.
The input key
params : dict of str to NDArray
Additonal arguments
"""
if key:
self.cli_obj.set_input(key.replace("/", "_"), value)
def dump_output(self):
"""Dump the outputs to a temporary folder
Parameters
----------
cli_obj: obj
The CLI object
"""
eid = 0
for node in self.nodes_list:
num_outputs = 1 if node['op'] == 'param' else int(node['attrs']['num_outputs'])
for j in range(num_outputs):
ndbuffer = self.dbg_out_buffer_list[eid]
eid += 1
key = node['name'] + "_" + str(j) + "__000000" + str(ndbuffer.time_stamp) + ".npy"
key = key.replace("/", "_")
file_name = str(self.cli_obj._dump_root + self.cli_obj.dump_folder() + key)
np.save(file_name, ndbuffer.asnumpy())
os.rename(file_name, file_name.rpartition('.')[0])
def _ensure_dir(self, file_path):
"""Create a directory if not exists
Parameters
----------
file_path: str
File path to create
"""
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
def _dump_graph_json(self, ctx, new_graph):
# save to file
graph_dump_file_name = '_tvmdbg_graph_dump.json'
folder_name = "/_tvmdbg_device_,job_localhost,replica_0,task_0,device_"
folder_name = folder_name + ctx.replace(":", "_") + "/"
self.cli_obj.dump_folder(folder_name)
path = self.cli_obj._dump_root + folder_name
self._ensure_dir(path)
with open((path + graph_dump_file_name), 'w') as outfile:
json.dump(new_graph, outfile, indent=2, sort_keys=False)
def _dump_output_nodes(self, nodes_list, heads_list):
"""Dump the heads to a list
Parameters
----------
cli_obj: obj
The CLI object
heads_list : List
The list of outputs from the json node
"""
for output in heads_list:
self.cli_obj.set_ouputs(nodes_list[output[0]]['name'])
def _make_debug_buffer_list(shapes_list, dltype_list):
dbg_out_buffer_list = []
for i in range(len(shapes_list[1])):
dbg_out_buffer_list.append(nd.empty(shapes_list[1][i], dltype_list[1][i]))
return dbg_out_buffer_list
def _get_graph_json(nodes_list, dltype_list, shapes_list):
"""Dump the nodes in json format to file
Parameters
----------
ctx: Str
context in string
cli_obj: obj
CLI object where common information is stored
nodes_list: List
List of nodes in the graph
dltype_list: List
List of datatypes of each node
shapes_list: List
List of shape of each node
"""
new_graph = {}
new_graph['nodes'] = []
nodes_len = len(nodes_list)
for i in range(nodes_len):
node = nodes_list[i]
input_list = []
for input_node in node['inputs']:
input_list.append(nodes_list[input_node[0]]['name'])
#del node['inputs']
node['inputs'] = input_list
dltype = str("type: " + dltype_list[1][i])
if 'attrs' not in node:
node['attrs'] = {}
node['op'] = "param"
else:
node['op'] = node['attrs']['func_name']
node['name'] = node['name'].replace("/", "_")
node['attrs'].update({"T": dltype})
node['shape'] = shapes_list[1][i]
new_graph['nodes'].append(node)
return new_graph
def create(obj, graph, ctx):
"""Create a debug runtime environment and start the CLI
Parameters
----------
obj: Object
The object being used to store the graph runtime.
graph: str
NNVM graph in json format
"""
json_obj = json.loads(graph)
nodes_list = json_obj['nodes']
dltype_list = json_obj['attrs']['dltype']
shapes_list = json_obj['attrs']['shape']
heads_list = json_obj['heads']
new_graph = _get_graph_json(nodes_list, dltype_list, shapes_list)
ctx = str(ctx).upper().replace("(", ":").replace(")", "")
# make the cli object
cli_obj = tvmdbg.LocalCLIDebugWrapperModule(obj, new_graph, ctx=ctx)
# prepare the debug out buffer list
dbg_buff_list = _make_debug_buffer_list(shapes_list, dltype_list)
m = DebugGraphModule(nodes_list, cli_obj, dbg_buff_list)
# dump the json information
m._dump_graph_json(ctx, new_graph)
m._dump_output_nodes(nodes_list, heads_list)
return m
|
{"hexsha": "78dec36f0c40e502d4d19870341aada1c2f69e3e", "size": 5888, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/tvm/tools/debug/runtime/debugruntime.py", "max_stars_repo_name": "dayanandasiet/tvmdbg", "max_stars_repo_head_hexsha": "5e3266a65422990d385c43424d51a4e5e8dfe6ee", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/tvm/tools/debug/runtime/debugruntime.py", "max_issues_repo_name": "dayanandasiet/tvmdbg", "max_issues_repo_head_hexsha": "5e3266a65422990d385c43424d51a4e5e8dfe6ee", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/tvm/tools/debug/runtime/debugruntime.py", "max_forks_repo_name": "dayanandasiet/tvmdbg", "max_forks_repo_head_hexsha": "5e3266a65422990d385c43424d51a4e5e8dfe6ee", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7219512195, "max_line_length": 98, "alphanum_fraction": 0.6058084239, "include": true, "reason": "import numpy", "num_tokens": 1399}
|
import pyclesperanto_prototype as cle
import numpy as np
def test_standard_deviation_z_projection():
test1 = cle.push(np.asarray([
[
[1, 0, 0, 0, 9],
[0, 2, 0, 8, 0],
[3, 0, 1, 0, 10],
[0, 4, 0, 7, 0],
[5, 0, 6, 0, 10]
], [
[0, 2, 0, 8, 0],
[1, 0, 0, 0, 9],
[3, 0, 1, 0, 10],
[0, 4, 0, 7, 0],
[5, 0, 6, 0, 10]
], [
[0, 2, 0, 8, 0],
[3, 0, 1, 0, 10],
[0, 4, 0, 7, 0],
[1, 0, 0, 0, 9],
[5, 0, 6, 0, 10]
], [
[0, 2, 0, 8, 0],
[1, 0, 0, 0, 9],
[0, 4, 0, 7, 0],
[3, 0, 1, 0, 10],
[5, 0, 6, 0, 10]
], [
[1, 0, 0, 0, 9],
[0, 4, 0, 7, 0],
[3, 0, 1, 0, 10],
[0, 2, 0, 8, 0],
[5, 0, 6, 0, 10]
]
]))
reference = cle.push(np.asarray([
[0.55, 1.10, 0, 4.38, 4.93],
[1.22, 1.79, 0.45, 4.12, 5.13],
[1.64, 2.19, 0.55, 3.83, 5.48],
[1.30, 2, 0.45, 4.03, 5.22],
[0, 0, 0, 0, 0]
]))
result = cle.create(reference)
cle.standard_deviation_z_projection(test1, result)
a = cle.pull(result)
b = cle.pull(reference)
print(a)
assert (np.allclose(a, b, 0.01))
|
{"hexsha": "4220ed87462f7c948f193cc72ca5d11664c65eb4", "size": 1368, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_standard_deviation_z_projection.py", "max_stars_repo_name": "elsandal/pyclesperanto_prototype", "max_stars_repo_head_hexsha": "7bda828813b86b44b63d73d5e8f466d9769cded1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-07-01T06:20:44.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-01T09:36:48.000Z", "max_issues_repo_path": "tests/test_standard_deviation_z_projection.py", "max_issues_repo_name": "elsandal/pyclesperanto_prototype", "max_issues_repo_head_hexsha": "7bda828813b86b44b63d73d5e8f466d9769cded1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_standard_deviation_z_projection.py", "max_forks_repo_name": "elsandal/pyclesperanto_prototype", "max_forks_repo_head_hexsha": "7bda828813b86b44b63d73d5e8f466d9769cded1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-06-29T18:40:54.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-29T18:40:54.000Z", "avg_line_length": 24.4285714286, "max_line_length": 54, "alphanum_fraction": 0.3340643275, "include": true, "reason": "import numpy", "num_tokens": 630}
|
from nltk.tree import Tree
import copy
import itertools
from numpy import insert
from collections import Counter
"""
Class to manage the transformation of a constituent tree into a sequence of labels
and vice versa. It extends the Tree class from the NLTK framework to address constituent Parsing as a
sequential labeling problem.
"""
class SeqTree(Tree):
EMPTY_LABEL = "EMPTY-LABEL"
def __init__(self, label, children):
self.encoding = None
super(SeqTree, self).__init__(label, children)
# TODO: At the moment only the RelativeLevelTreeEncoder is supported
def set_encoding(self, encoding):
self.encoding = encoding
"""
Transforms a constituent tree with N leaves into a sequence of N labels.
@param is_binary: True if binary trees are being encoded and want to use an optimized
encoding [Not tested at the moment]
@param root_label: Set to true to include a special label to words directly attached to the root
@param encode_unary_leaf: Set to true to encode leaf unary chains as a part of the label
"""
def to_maxincommon_sequence(self, is_binary=False, root_label=False, encode_unary_leaf=False):
if self.encoding is None: raise ValueError("encoding attribute is None")
leaves_paths = []
self.path_to_leaves([self.label()], leaves_paths)
leaves = self.leaves()
unary_sequence = [s.label() for s in self.subtrees(lambda t: t.height() == 2)] # .split("+")
return self.encoding.to_maxincommon_sequence(leaves, leaves_paths, unary_sequence, binarized=is_binary,
root_label=root_label,
encode_unary_leaf=encode_unary_leaf)
"""
Transforms a predicted sequence into a constituent tree
@params sequence: A list of the predictions
@params sentence: A list of (word,postag) representing the sentence (the postags must also encode the leaf unary chains)
@precondition: The postag of the tuple (word,postag) must have been already preprocessed to encode leaf unary chains,
concatenated by the '+' symbol (e.g. UNARY[0]+UNARY[1]+postag)
"""
@classmethod
def maxincommon_to_tree(cls, sequence, sentence, encoding):
if encoding is None: raise ValueError("encoding parameter is None")
return encoding.maxincommon_to_tree(sequence, sentence)
"""
Gets the path from the root to each leaf node
Returns: A list of lists with the sequence of non-terminals to reach each
terminal node
"""
def path_to_leaves(self, current_path, paths):
for i, child in enumerate(self):
pathi = []
if isinstance(child, Tree):
common_path = copy.deepcopy(current_path)
common_path.append(child.label() + "-" + str(i))
child.path_to_leaves(common_path, paths)
else:
for element in current_path:
pathi.append(element)
pathi.append(child)
paths.append(pathi)
return paths
"""
Encoder/Decoder class to transform a constituent tree into a sequence of labels by representing
how many levels in the tree there are in common between the word_i and word_(i+1) (in a relative scale)
and the label (constituent) at that lowest ancestor.
"""
class RelativeLevelTreeEncoder(object):
ROOT_LABEL = "ROOT"
NONE_LABEL = "NONE"
# TODO: The binarized option has not beend tested/evaluated
"""
Transforms a tree into a sequence encoding the "deepest-in-common" phrase between words t and t+1
@param leaves: A list of words representing each leaf node
@param leaves_paths: A list of lists that encodes the path in the tree to reach each leaf node
@param unary_sequence: A list of the unary sequences (if any) for every leaf node
@param binarized: If True, when predicting an "ascending" level we map the tag to -1, as it is possible to determine in which
level the word t needs to be located
@param root_label: Set to true to include a special label ROOT to the words that are directly attached to the root of the sentence
@param encode_unary_leaf: Set to true to encode leaf unary chains as a part of the label
"""
def to_maxincommon_sequence(self, leaves, leaves_paths, unary_sequence,
binarized, root_label, encode_unary_leaf=False):
sequence = []
previous_ni = 0
ni = 0
relative_ni = 0
for j, leaf in enumerate(leaves):
# It is the last real word of the sentence
if j == len(leaves) - 1:
if encode_unary_leaf and "+" in unary_sequence[j]:
encoded_unary_leaf = "_" + "+".join(
unary_sequence[j].split("+")[:-1]) # The PoS tags is not encoded
else:
encoded_unary_leaf = ""
# #This corresponds to the implementation without the computation trick
# sequence.append((self.NONE_LABEL+encoded_unary_leaf))
# break
# TODO: This is a computation trick that seemed to work better in the dev set
# Sentences of length on are annotated with ROOT_UNARYCHAIN instead NONE_UNARYCHAIN
if (root_label and len(leaves) == 1):
sequence.append(self.ROOT_LABEL + encoded_unary_leaf)
else:
sequence.append((self.NONE_LABEL + encoded_unary_leaf))
break
explore_up_to = min(len(leaves_paths[j]), len(leaves_paths[j + 1])) + 1
ni = 0
for i in range(explore_up_to):
if leaves_paths[j][i] == leaves_paths[j + 1][i]:
ni += 1
else:
relative_ni = ni - previous_ni
if binarized:
relative_ni = relative_ni if relative_ni >= 0 else -1
if encode_unary_leaf and "+" in unary_sequence[j]:
encoded_unary_leaf = "_" + "+".join(
unary_sequence[j].split("+")[:-1]) # The PoS tags is not encoded
else:
encoded_unary_leaf = ""
if root_label and ni == 1:
sequence.append(self.ROOT_LABEL + "_" + leaves_paths[j][ni - 1] + encoded_unary_leaf)
else:
sequence.append(self._tag(relative_ni, leaves_paths[j][ni - 1]) + encoded_unary_leaf)
previous_ni = ni
break
return sequence
# TODO: It should be possible to remove this precondition
"""
Uncollapses the INTERMEDIATE unary chains and also removes empty nodes that might be created when
transforming a predicted sequence into a tree.
@precondition: Uncollapsing/Removing-empty from the root must be have done prior to to call
this function
"""
def uncollapse(self, tree):
uncollapsed = []
for child in tree:
if type(child) == type(u'') or type(child) == type(""):
uncollapsed.append(child)
else:
# It also removes EMPTY nodes
while child.label() == SeqTree.EMPTY_LABEL and len(child) != 0:
child = child[-1]
label = child.label()
if '+' in label:
label_split = label.split('+')
swap = Tree(label_split[0], [])
last_swap_level = swap
for unary in label_split[1:]:
last_swap_level.append(Tree(unary, []))
last_swap_level = last_swap_level[-1]
last_swap_level.extend(child)
uncollapsed.append(self.uncollapse(swap))
# We are uncollapsing the child node
else:
uncollapsed.append(self.uncollapse(child))
tree = Tree(tree.label(), uncollapsed)
return tree
"""
Gets a list of the PoS tags from the tree
@return A list containing the PoS tags
"""
def get_postag_trees(self, tree):
postags = []
for nchild, child in enumerate(tree):
if len(child) == 1 and type(child[-1]) == type(""):
postags.append(child)
else:
postags.extend(self.get_postag_trees(child))
return postags
# TODO: The unary chain is not needed here.
"""
Transforms a prediction of the form LEVEL_LABEL_[UNARY_CHAIN] into a tuple
of the form (level,label):
level is an integer or None (if the label is NONE or NONE_leafunarychain).
label is the constituent at that level
@return (level, label)
"""
def preprocess_tags(self, pred):
try:
label = pred.split("_")
level, label = label[0], label[1]
try:
return (int(level), label)
except ValueError:
# It is a NONE label with a leaf unary chain
if level == self.NONE_LABEL: # or level == self.ROOT:
return (None, pred.rsplit("_", 1)[1])
return (level, label)
except IndexError:
# It is a NONE label (without any leaf unary chains)
return (None, pred)
"""
Transforms a predicted sequence into a constituent tree
@params sequence: A list of the predictions
@params sentence: A list of (word,postag) representing the sentence (the postags must also encode the leaf unary chains)
@precondition: The postag of the tuple (word,postag) must have been already preprocessed to encode leaf unary chains,
concatenated by the '+' symbol (e.g. UNARY[0]+UNARY[1]+postag)
"""
def maxincommon_to_tree(self, sequence, sentence):
tree = SeqTree(SeqTree.EMPTY_LABEL, [])
current_level = tree
previous_at = None
first = True
sequence = map(self.preprocess_tags, sequence)
sequence = self._to_absolute_encoding(sequence)
for j, (level, label) in enumerate(sequence):
if level is None:
prev_level, _ = sequence[j - 1]
previous_at = tree
while prev_level > 1:
previous_at = previous_at[-1]
prev_level -= 1
# TODO: Trying optimitization
# It is a NONE label
if self.NONE_LABEL == label: # or self.ROOT_LABEL:
# if "NONE" == label:
previous_at.append(Tree(sentence[j][1], [sentence[j][0]]))
# It is a leaf unary chain
else:
previous_at.append(Tree(label + "+" + sentence[j][1], [sentence[j][0]]))
return tree
continue
i = 0
for i in xrange(level - 1):
if len(current_level) == 0 or i >= sequence[j - 1][0] - 1:
child_tree = Tree(SeqTree.EMPTY_LABEL, [])
current_level.append(child_tree)
current_level = child_tree
else:
current_level = current_level[-1]
if current_level.label() == SeqTree.EMPTY_LABEL:
current_level.set_label(label)
if first:
previous_at = current_level
previous_at.append(Tree(sentence[j][1], [sentence[j][0]]))
first = False
else:
# If we are at the same or deeper level than in the previous step
if i >= sequence[j - 1][0] - 1:
current_level.append(Tree(sentence[j][1], [sentence[j][0]]))
else:
previous_at.append(Tree(sentence[j][1], [sentence[j][0]]))
previous_at = current_level
current_level = tree
return tree
"""
Transforms an encoding of a tree in a relative scale into an
encoding of the tree in an absolute scale.
"""
def _to_absolute_encoding(self, relative_sequence):
absolute_sequence = [0] * len(relative_sequence)
current_level = 0
for j, (level, phrase) in enumerate(relative_sequence):
if level is None:
absolute_sequence[j] = (level, phrase)
elif level == self.ROOT_LABEL:
absolute_sequence[j] = (1, phrase)
current_level += 1
else:
current_level += level
absolute_sequence[j] = (current_level, phrase)
return absolute_sequence
def _tag(self, level, tag):
return str(level) + "_" + tag.rsplit("-", 1)[0]
|
{"hexsha": "484125dd969f4b9c877431fbdee3ec5d7dd7ac2d", "size": 12885, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/tree.py", "max_stars_repo_name": "mstrise/seq2label-crossrep", "max_stars_repo_head_hexsha": "db55c42ece8ab02af9c170eaba1d503b494032cc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2019-07-02T22:27:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-20T10:39:20.000Z", "max_issues_repo_path": "utils/tree.py", "max_issues_repo_name": "mstrise/seq2label-crossrep", "max_issues_repo_head_hexsha": "db55c42ece8ab02af9c170eaba1d503b494032cc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/tree.py", "max_forks_repo_name": "mstrise/seq2label-crossrep", "max_forks_repo_head_hexsha": "db55c42ece8ab02af9c170eaba1d503b494032cc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-03T12:36:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-03T12:36:53.000Z", "avg_line_length": 37.8970588235, "max_line_length": 134, "alphanum_fraction": 0.5814513, "include": true, "reason": "from numpy", "num_tokens": 2735}
|
// Copyright (C) 2010 Davis E. King (davis@dlib.net)
// License: Boost Software License See LICENSE.txt for the full license.
#include <dlib/optimization.h>
#include "optimization_test_functions.h"
#include <sstream>
#include <string>
#include <cstdlib>
#include <ctime>
#include <vector>
#include "../rand.h"
#include "tester.h"
namespace
{
using namespace test;
using namespace dlib;
using namespace std;
using namespace dlib::test_functions;
logger dlog("test.trust_region");
// ----------------------------------------------------------------------------------------
template <typename T>
struct neg_rosen_model
{
typedef matrix<T,0,1> column_vector;
typedef matrix<T,0,0> general_matrix;
T operator() ( column_vector x) const
{
return -static_cast<T>(rosen<T>(x));
}
void get_derivative_and_hessian (
const column_vector& x,
column_vector& d,
general_matrix& h
) const
{
d = -matrix_cast<T>(rosen_derivative<T>(x));
h = -matrix_cast<T>(rosen_hessian<T>(x));
}
};
// ----------------------------------------------------------------------------------------
dlib::rand rnd;
template <typename T>
void test_with_rosen()
{
print_spinner();
matrix<T,2,1> ans;
ans = 1,1;
matrix<T,2,1> p = 100*matrix_cast<T>(randm(2,1,rnd)) - 50;
T obj = find_min_trust_region(objective_delta_stop_strategy(1e-12, 100), rosen_function_model<T>(), p);
DLIB_TEST_MSG(std::abs(obj) < 1e-10, "obj: " << obj);
DLIB_TEST_MSG(length(p-ans) < 1e-5, "length(p): " << length(p-ans));
matrix<T,0,1> p2 = 100*matrix_cast<T>(randm(2,1,rnd)) - 50;
obj = find_max_trust_region(objective_delta_stop_strategy(1e-12, 100), neg_rosen_model<T>(), p2);
DLIB_TEST_MSG(std::abs(obj) < 1e-10, "obj: " << obj);
DLIB_TEST_MSG(length(p-ans) < 1e-5, "length(p): " << length(p-ans));
}
// ----------------------------------------------------------------------------------------
void test_trust_region_sub_problem()
{
dlog << LINFO << "subproblem test 1";
{
matrix<double,2,2> B;
B = 1, 0,
0, 1;
matrix<double,2,1> g, p, ans;
g = 0;
ans = 0;
solve_trust_region_subproblem(B,g,1,p, 0.001, 10);
DLIB_TEST(length(p-ans) < 1e-10);
solve_trust_region_subproblem(B,g,1,p, 0.001, 1);
DLIB_TEST(length(p-ans) < 1e-10);
}
dlog << LINFO << "subproblem test 2";
{
matrix<double,2,2> B;
B = 1, 0,
0, 1;
B *= 0.1;
matrix<double,2,1> g, p, ans;
g = 1;
ans = -g / length(g);
solve_trust_region_subproblem(B,g,1,p, 1e-6, 20);
DLIB_TEST(length(p-ans) < 1e-4);
}
dlog << LINFO << "subproblem test 3";
{
matrix<double,2,2> B;
B = 0, 0,
0, 0;
matrix<double,2,1> g, p, ans;
g = 1;
ans = -g / length(g);
solve_trust_region_subproblem(B,g,1,p, 1e-6, 20);
dlog << LINFO << "ans: " << trans(ans);
dlog << LINFO << "p: " << trans(p);
DLIB_TEST(length(p-ans) < 1e-4);
}
return;
dlog << LINFO << "subproblem test 4";
{
matrix<double,2,2> B;
B = 2, 0,
0, -1;
matrix<double,2,1> g, p, ans;
g = 0;
ans = 0, -1;
solve_trust_region_subproblem(B,g,1,p, 1e-6, 20);
DLIB_TEST(length(p-ans) < 1e-4);
}
dlog << LINFO << "subproblem test 5";
{
matrix<double,2,2> B;
B = 2, 0,
0, -1;
matrix<double,2,1> g, p, ans;
g = 0, 1;
ans = 0, -1;
solve_trust_region_subproblem(B,g,1,p, 1e-6, 20);
DLIB_TEST(length(p-ans) < 1e-4);
}
dlog << LINFO << "subproblem test 6";
for (int i = 0; i < 10; ++i)
{
matrix<double,10,10> B;
B = randm(10,10, rnd);
B = 0.01*B*trans(B);
matrix<double,10,1> g, p, ans;
g = 1;
solve_trust_region_subproblem(B,g,1,p, 1e-6, 20);
DLIB_TEST(std::abs(length(p) - 1) < 1e-4);
}
}
// ----------------------------------------------------------------------------------------
void test_problems()
{
print_spinner();
{
matrix<double,4,1> ch;
ch = brown_start();
find_min_trust_region(objective_delta_stop_strategy(1e-7, 80),
brown_function_model(),
ch);
dlog << LINFO << "brown obj: " << brown(ch);
dlog << LINFO << "brown der: " << length(brown_derivative(ch));
dlog << LINFO << "brown error: " << length(ch - brown_solution());
DLIB_TEST(length(ch - brown_solution()) < 1e-5);
}
print_spinner();
{
matrix<double,2,1> ch;
ch = rosen_start<double>();
find_min_trust_region(objective_delta_stop_strategy(1e-7, 80),
rosen_function_model<double>(),
ch);
dlog << LINFO << "rosen obj: " << rosen(ch);
dlog << LINFO << "rosen der: " << length(rosen_derivative(ch));
dlog << LINFO << "rosen error: " << length(ch - rosen_solution<double>());
DLIB_TEST(length(ch - rosen_solution<double>()) < 1e-5);
}
print_spinner();
{
matrix<double,0,1> ch;
ch = chebyquad_start(2);
find_min_trust_region(objective_delta_stop_strategy(1e-7, 80),
chebyquad_function_model(),
ch);
dlog << LINFO << "chebyquad 2 obj: " << chebyquad(ch);
dlog << LINFO << "chebyquad 2 der: " << length(chebyquad_derivative(ch));
dlog << LINFO << "chebyquad 2 error: " << length(ch - chebyquad_solution(2));
DLIB_TEST(length(ch - chebyquad_solution(2)) < 1e-5);
}
print_spinner();
{
matrix<double,0,1> ch;
ch = chebyquad_start(4);
find_min_trust_region(objective_delta_stop_strategy(1e-7, 80),
chebyquad_function_model(),
ch);
dlog << LINFO << "chebyquad 4 obj: " << chebyquad(ch);
dlog << LINFO << "chebyquad 4 der: " << length(chebyquad_derivative(ch));
dlog << LINFO << "chebyquad 4 error: " << length(ch - chebyquad_solution(4));
DLIB_TEST(length(ch - chebyquad_solution(4)) < 1e-5);
}
print_spinner();
{
matrix<double,0,1> ch;
ch = chebyquad_start(6);
find_min_trust_region(objective_delta_stop_strategy(1e-12, 80),
chebyquad_function_model(),
ch);
dlog << LINFO << "chebyquad 6 obj: " << chebyquad(ch);
dlog << LINFO << "chebyquad 6 der: " << length(chebyquad_derivative(ch));
dlog << LINFO << "chebyquad 6 error: " << length(ch - chebyquad_solution(6));
DLIB_TEST(length(ch - chebyquad_solution(6)) < 1e-5);
}
print_spinner();
{
matrix<double,0,1> ch;
ch = chebyquad_start(8);
find_min_trust_region(objective_delta_stop_strategy(1e-10, 80),
chebyquad_function_model(),
ch);
dlog << LINFO << "chebyquad 8 obj: " << chebyquad(ch);
dlog << LINFO << "chebyquad 8 der: " << length(chebyquad_derivative(ch));
dlog << LINFO << "chebyquad 8 error: " << length(ch - chebyquad_solution(8));
DLIB_TEST(length(ch - chebyquad_solution(8)) < 1e-5);
}
}
class optimization_tester : public tester
{
public:
optimization_tester (
) :
tester ("test_trust_region",
"Runs tests on the trust region optimization component.")
{}
void perform_test (
)
{
dlog << LINFO << "test with rosen<float>";
for (int i = 0; i < 50; ++i)
test_with_rosen<float>();
dlog << LINFO << "test with rosen<double>";
for (int i = 0; i < 50; ++i)
test_with_rosen<double>();
test_trust_region_sub_problem();
test_problems();
}
} a;
}
|
{"hexsha": "aa2775b9c950da21a570390abeca930cff5964b9", "size": 8958, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "dlib/test/trust_region.cpp", "max_stars_repo_name": "yatonon/dlib-face", "max_stars_repo_head_hexsha": "0230c1034ee65d0846d007e6145bfe73ca0d6321", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 11719.0, "max_stars_repo_stars_event_min_datetime": "2015-01-03T22:38:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T21:45:04.000Z", "max_issues_repo_path": "dlib/test/trust_region.cpp", "max_issues_repo_name": "KiLJ4EdeN/dlib", "max_issues_repo_head_hexsha": "eb1f08ce6ab3ca6f9d10425d899103de3c0df56c", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2518.0, "max_issues_repo_issues_event_min_datetime": "2015-01-04T04:38:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T11:55:43.000Z", "max_forks_repo_path": "dlib/test/trust_region.cpp", "max_forks_repo_name": "KiLJ4EdeN/dlib", "max_forks_repo_head_hexsha": "eb1f08ce6ab3ca6f9d10425d899103de3c0df56c", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 3308.0, "max_forks_repo_forks_event_min_datetime": "2015-01-01T14:34:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T07:20:07.000Z", "avg_line_length": 27.1454545455, "max_line_length": 111, "alphanum_fraction": 0.4642777406, "num_tokens": 2347}
|
"""
Module for processing oxygen from CTD and bottle samples.
"""
import csv
import logging
import xml.etree.cElementTree as ET
from collections import OrderedDict
from pathlib import Path
import gsw
import numpy as np
import pandas as pd
import scipy
from . import ctd_plots as ctd_plots
from . import flagging as flagging
from . import get_ctdcal_config
from . import process_ctd as process_ctd
from . import sbe_reader as sbe_rd
cfg = get_ctdcal_config()
log = logging.getLogger(__name__)
def load_winkler_oxy(oxy_file):
"""
Load Winkler oxygen titration data file.
Parameters
----------
oxy_file : str or Path
Path to oxygen file
Returns
-------
df : DataFrame
Oxygen data
params : list of str
List of oxygen parameters used in titration
"""
with open(oxy_file, newline="") as f:
oxyF = csv.reader(
f, delimiter=" ", quoting=csv.QUOTE_NONE, skipinitialspace="True"
)
oxy_array = []
for row in oxyF:
if len(row) > 9:
row = row[:9]
oxy_array.append(row)
# TODO turn params into a dict with useful labels
params = oxy_array.pop(0) # save file header info for later (Winkler values)
cols = OrderedDict(
[
("STNNO_OXY", int),
("CASTNO_OXY", int),
("BOTTLENO_OXY", int),
("FLASKNO", int),
("TITR_VOL", float),
("TITR_TEMP", float),
("DRAW_TEMP", float),
("TITR_TIME", int),
("END_VOLTS", float),
]
)
df = pd.DataFrame(oxy_array, columns=cols.keys()).astype(cols)
df = df[df["BOTTLENO_OXY"] != 99] # remove "Dummy Data"
df = df[df["TITR_VOL"] > 0] # remove "ABORTED DATA"
df = df.sort_values("BOTTLENO_OXY").reset_index(drop=True)
df["FLASKNO"] = df["FLASKNO"].astype(str)
return df, params
def load_flasks(flask_file=cfg.dirs["oxygen"] + "o2flasks.vol", comment="#"):
"""
Load oxygen flask information from .vol file.
Parameters
----------
flask_file : str or Path, optional
Path to flask file
comment : str, optional
Identifier signifying line is a comment and should be skipped
Returns
-------
flasks : DataFrame
Flask numbers and volumes
"""
with open(flask_file, "r") as f:
flasks = []
for line in f:
is_comment = line.strip().startswith(comment)
if ("Volume" in line) or is_comment:
continue
num, vol = line.strip().split()[:2] # only need first two cols (#, volume)
flasks.append([str(num), float(vol)])
flasks = pd.DataFrame(flasks, columns=["FLASKNO", "FLASK_VOL"])
return flasks
def correct_flask_vol(flask_vol, t=20.0, glass="borosilicate"):
"""
Correct flask volume for changes from thermal expansion of glass.
Parameters
----------
flask_vol : array-like
Flask volumes at standard temperature (20C)
t : float, optional
New temperature to calculate volume
glass : str, optional
Type of glass ("borosilicate" or "soft)
Returns
-------
corrected_vol : array-like
Flask volumes are new temperature
Notes
-----
Flask volume equation from 2007 Best Practices for Ocean CO2 Measurements,
SOP 13 - Gravimetric calibration of volume contained using water
"""
alpha = { # thermal expansion coefficient
"borosilicate": 1.0e-5,
"soft": 2.5e-3,
}
if glass not in alpha.keys():
raise KeyError(f"Glass type not found, must be one of {list(alpha.keys())}")
standard_t = 20.0
corrected_vol = flask_vol * (1.0 + alpha[glass] * (t - standard_t))
return corrected_vol
def gather_oxy_params(oxy_file):
"""
Collect Winkler oxygen measurement parameters from LabVIEW data file headers.
Parameters
----------
oxy_file : str or Path
Path to oxygen file
Returns
-------
df : DataFrame
Oxygen measurement parameters
"""
with open(oxy_file, newline="") as f:
header = f.readline()
param_list = header.split()[:6]
params = pd.DataFrame(param_list, dtype=float).transpose()
params.columns = ["V_std", "V_blank", "N_KIO3", "V_KIO3", "T_KIO3", "T_thio"]
return params
def calculate_bottle_oxygen(ssscc_list, ssscc_col, titr_vol, titr_temp, flask_nums):
"""
Wrapper function for collecting parameters and calculating oxygen values from
Winkler titrations.
Parameters
----------
ssscc_list : list of str
List of stations to process
ssscc_col : array-like
Station/cast for each sample taken
titr_vol : array-like
Titration volume [mL]
titr_temp : array-like
Temperature of titration [degC]
flask_nums : array-like
Oxygen flask used for each sample
Returns
-------
oxy_mL_L : array-like
Oxygen concentration [mL/L]
Notes
-----
Titration equation comes from WHP Operations and Methods, Culberson (1991):
https://cchdo.github.io/hdo-assets/documentation/manuals/pdf/91_1/culber2.pdf
"""
params = pd.DataFrame()
for ssscc in ssscc_list:
df = gather_oxy_params(cfg.dirs["oxygen"] + ssscc)
df["SSSCC"] = ssscc
params = pd.concat([params, df])
# get flask volumes and merge with titration parameters
flask_df = load_flasks() # TODO: volume correction from thermal expansion?
volumes = pd.merge(flask_nums, flask_df, how="left")["FLASK_VOL"].values
params = pd.merge(ssscc_col, params, how="left")
# find 20degC equivalents
rho_20C = gsw.rho_t_exact(0, 20, 0)
rho_T_KIO3 = gsw.rho_t_exact(0, params["T_KIO3"], 0)
N_KIO3_20C = params["N_KIO3"] * (rho_T_KIO3 / rho_20C)
# TODO: does KIO3 volume get corrected? what is the recorded value?
# V_KIO3_20C = correct_flask_vol(params["V_KIO3"], t=params["T_KIO3"])
# calculate O2 concentration (in mL/L)
E = 5598 # stoichiometric relationship between thio_n and DO
DO_reg = 0.0017 # correction for oxygen added by reagents
V_reg = 2.0 # volume of reagents (mL)
oxy_mL_L = (
(
((titr_vol.values - params["V_blank"]) * params["V_KIO3"] * N_KIO3_20C * E)
/ (params["V_std"] - params["V_blank"])
- 1000 * DO_reg
)
) / (volumes - V_reg)
return oxy_mL_L.values
def hysteresis_correction(oxygen, pressure, H1=-0.033, H2=5000, H3=1450, freq=24):
"""
Remove hysteresis effects from oxygen concentration values.
Oxygen hysteresis can be corrected before conversion from volts to oxygen
concentration, see equations_sbe.sbe43_hysteresis_voltage()
# TODO: should this just be a wrapper that calls sbe43_hysteresis_voltage()?
Parameters
----------
oxygen : array-like
Oxygen concentration values
pressure : array-like
CTD pressure values (dbar)
H1 : scalar, optional
Amplitude of hysteresis correction function (range: -0.02 to -0.05)
H2 : scalar, optional
Function constant or curvature function for hysteresis
H3 : scalar, optional
Time constant for hysteresis (seconds) (range: 1200 to 2000)
freq : scalar, optional
CTD sampling frequency (Hz)
Returns
-------
oxy_corrected : array-like
Hysteresis-corrected oxygen concentration values (with same units as input)
Notes
-----
See Application Note 64-3 for more information.
"""
# TODO: vectorize (if possible), will probably require matrix inversion
dt = 1 / freq
D = 1 + H1 * (np.exp(pressure / H2) - 1)
C = np.exp(-1 * dt / H3)
oxy_corrected = np.zeros(oxygen.shape)
oxy_corrected[0] = oxygen[0]
for i in np.arange(1, len(oxygen)):
oxy_corrected[i] = (
oxygen[i] + (oxy_corrected[i - 1] * C * D[i]) - (oxygen[i - 1] * C)
) / D[i]
return oxy_corrected
def oxy_ml_to_umolkg(oxy_mL_L, sigma0):
"""Convert dissolved oxygen from units of mL/L to micromol/kg.
Parameters
----------
oxy_mL_L : array-like
Dissolved oxygen in units of [mL/L]
sigma0 : array-like
Potential density anomaly (i.e. sigma - 1000) referenced to 0 dbar [kg/m^3]
Returns
-------
oxy_umol_kg : array-like
Dissolved oxygen in units of [umol/kg]
Notes
-----
Conversion value 44660 is exact for oxygen gas and derived from the ideal gas law.
(c.f. Sea-Bird Application Note 64, pg. 6)
"""
oxy_umol_kg = oxy_mL_L * 44660 / (sigma0 + 1000)
return oxy_umol_kg
def oxy_umolkg_to_ml(oxy_umol_kg, sigma0):
"""Convert dissolved oxygen from units of micromol/kg to mL/L.
Parameters
----------
oxy_umol_kg : array-like
Dissolved oxygen in units of [umol/kg]
sigma0 : array-like
Potential density anomaly (i.e. sigma - 1000) referenced to 0 dbar [kg/m^3]
Returns
-------
oxy_mL_L : array-like
Dissolved oxygen in units of [mL/L]
Notes
-----
Conversion value 44660 is exact for oxygen gas and derived from the ideal gas law.
(c.f. Sea-Bird Application Note 64, pg. 6)
"""
oxy_mL_L = oxy_umol_kg * (sigma0 + 1000) / 44660
return oxy_mL_L
def calculate_dV_dt(oxy_volts, time, nan_replace=True):
"""
Calculate the time derivative of oxygen voltage.
Parameters
----------
oxy_volts : array-like
Oxygen sensor voltage output
time : array-like
Time from oxygen sensor (must be same length as oxy_volts)
nan_replace : bool, optional
Replace nans in time derivative with the mean value
Returns
-------
dV_dt : array-like
Time derivative of oxygen voltage
"""
# TODO: experiment with dt, filtering
# Uchida (2008): dV/dt "estimated by linear fits over 2 second intervals"
# should dt just be 1 / freq? i.e. 1/24 Hz
dV = np.diff(oxy_volts) # central differences shorten vectors by 1
dt = np.diff(time)
dt[dt == 0] = np.median(dt[dt > 0]) # replace with median to avoid dividing by zero
dV_dt = dV / dt
dV_dt = np.insert(dV_dt, 0, 0) # add zero in front to match original length
dV_dt[np.isinf(dV_dt)] = np.nan # this check is probably unnecessary
if nan_replace:
dV_dt = np.nan_to_num(dV_dt, nan=np.nanmean(dV_dt))
# TODO: should we do some kind of filtering? e.g.:
# (PMEL does this calculation on binned data already so filtering is not the same)
# a = 1
# windowsize = 5
# b = (1 / windowsize) * np.ones(windowsize)
# filtered_dvdt = scipy.signal.filtfilt(b, a, dv_dt)
return dV_dt # filtered_dvdt
def _get_sbe_coef(idx=0):
"""
Get SBE oxygen coefficients from raw .xmlcon files.
Defaults to using first station in ssscc.csv file.
Returns the following tuple of coefficients: Soc, offset, Tau20, Tcor, E
"""
# TODO: does scipy's minimize function needs a tuple? can this be improved further?
station = process_ctd.get_ssscc_list()[idx]
xmlfile = cfg.dirs["raw"] + station + ".XMLCON"
tree = ET.parse(xmlfile)
root_eq0 = tree.find(".//CalibrationCoefficients[@equation='0']") # Owens-Millard
root_eq1 = tree.find(".//CalibrationCoefficients[@equation='1']") # SBE equation
coefs = {c.tag: float(c.text) for c in root_eq1}
coefs["Tcor"] = float(root_eq0.find("Tcor").text) # only coef needed from eq0
keep_keys = ["Soc", "offset", "Tau20", "Tcor", "E"]
return tuple(coefs[key] for key in keep_keys)
def calculate_weights(pressure):
"""
Calculate weights (as a function of pressure) for weighted least squares fitting.
Deep measurements are weighted higher than shallow.
Parameters
----------
presssure : array-like
Pressure values of oxygen measurements [dbar]
Returns
-------
weights : array-like
Weight factor for each pressure value
"""
# TODO: automatic weight calculation rather than hardcoded (machine learning?)
epsilon = 1e-5 # small offset to avoid interpolation issues
# define piecewise weight function dependent on pressure
p_bins = [
0,
100,
100 + epsilon,
300,
300 + epsilon,
500,
500 + epsilon,
1200,
1200 + epsilon,
2000,
2000 + epsilon,
7000,
]
w_bins = [20, 20, 25, 25, 50, 50, 100, 100, 200, 200, 500, 500]
wgt = scipy.interpolate.interp1d(p_bins, w_bins)
weights = wgt(pressure) # get weights from piecewise function
return weights
"""code_pruning: should this be here or in equations_sbe? somewhere else?"""
def _PMEL_oxy_eq(coefs, inputs, cc=[1.92634e-4, -4.64803e-2]):
"""
Modified oxygen equation for SBE 43 used by NOAA/PMEL
coef[0] = Soc
coef[1] = Voffset
coef[2] = Tau20
coef[3] = Tcorr
coef[4] = E
"""
Soc, Voff, Tau20, Tcorr, E = coefs
oxyvolts, pressure, temp, dvdt, os = inputs
o2 = (
Soc
* (
oxyvolts
+ Voff
+ Tau20 * np.exp(cc[0] * pressure + cc[1] * (temp - 20)) * dvdt
)
* os
* np.exp(Tcorr * temp)
* np.exp((E * pressure) / (temp + 273.15))
)
return o2
def PMEL_oxy_weighted_residual(coefs, weights, inputs, refoxy, L_norm=2):
# TODO: optionally include other residual types
# (abstracted from PMEL code oxygen_cal_ml.m)
# unweighted L2: sum((ref - oxy)^2) # if weighted fails
# unweighted L4: sum((ref - oxy)^4) # unsure of use case
# unweighted L1: sum(abs(ref - oxy)) # very far from ideal
# anything else? genericize with integer "norm" function input?
residuals = np.sum(
(weights * (refoxy - _PMEL_oxy_eq(coefs, inputs)) ** 2)
) / np.sum(weights ** 2)
return residuals
def match_sigmas(
btl_prs,
btl_oxy,
btl_tmp,
btl_SA,
ctd_os,
ctd_prs,
ctd_tmp,
ctd_SA,
ctd_oxyvolts,
ctd_time,
):
# Construct Dataframe from bottle and ctd values for merging
btl_data = pd.DataFrame(
data={"CTDPRS": btl_prs, "REFOXY": btl_oxy, "CTDTMP": btl_tmp, "SA": btl_SA}
)
time_data = pd.DataFrame(
data={
"CTDPRS": ctd_prs,
"OS": ctd_os,
"CTDTMP": ctd_tmp,
"SA": ctd_SA,
"CTDOXYVOLTS": ctd_oxyvolts,
"CTDTIME": ctd_time,
}
)
time_data["dv_dt"] = calculate_dV_dt(time_data["CTDOXYVOLTS"], time_data["CTDTIME"])
# Merge DF
merged_df = pd.DataFrame(
columns=["CTDPRS", "CTDOXYVOLTS", "CTDTMP", "dv_dt", "OS"], dtype=float
)
merged_df["REFOXY"] = btl_data["REFOXY"].copy()
# calculate sigma referenced to multiple depths
for idx, p_ref in enumerate([0, 1000, 2000, 3000, 4000, 5000, 6000]):
# pandas 1.2.1 ufunc issue workaround
btl_inputs = np.broadcast_arrays(
btl_data["SA"], btl_data["CTDTMP"], btl_data["CTDPRS"], p_ref
)
time_inputs = np.broadcast_arrays(
time_data["SA"], time_data["CTDTMP"], time_data["CTDPRS"], p_ref
)
btl_data[f"sigma{idx}"] = (
gsw.pot_rho_t_exact(*btl_inputs)
- 1000 # subtract 1000 to get potential density *anomaly*
) + 1e-8 * np.random.standard_normal(btl_data["SA"].size)
time_data[f"sigma{idx}"] = (
gsw.pot_rho_t_exact(*time_inputs)
- 1000 # subtract 1000 to get potential density *anomaly*
) + 1e-8 * np.random.standard_normal(time_data["SA"].size)
rows = (btl_data["CTDPRS"] > (p_ref - 500)) & (
btl_data["CTDPRS"] < (p_ref + 500)
)
time_sigma_sorted = time_data[f"sigma{idx}"].sort_values().to_numpy()
sigma_min = np.min(
[np.min(btl_data.loc[rows, f"sigma{idx}"]), np.min(time_sigma_sorted)]
)
sigma_max = np.max(
[np.max(btl_data.loc[rows, f"sigma{idx}"]), np.max(time_sigma_sorted)]
)
time_sigma_sorted = np.insert(time_sigma_sorted, 0, sigma_min - 1e-4)
time_sigma_sorted = np.append(time_sigma_sorted, sigma_max + 1e-4)
# TODO: can this be vectorized?
cols = ["CTDPRS", "CTDOXYVOLTS", "CTDTMP", "dv_dt", "OS"]
inds = np.concatenate(([0], np.arange(0, len(time_data)), [len(time_data) - 1]))
for col in cols:
merged_df.loc[rows, col] = np.interp(
btl_data.loc[rows, f"sigma{idx}"],
time_sigma_sorted,
time_data[col].iloc[inds],
)
# Apply coef and calculate CTDOXY
sbe_coef0 = _get_sbe_coef() # initial coefficient guess
merged_df["CTDOXY"] = _PMEL_oxy_eq(
sbe_coef0,
(
merged_df["CTDOXYVOLTS"],
merged_df["CTDPRS"],
merged_df["CTDTMP"],
merged_df["dv_dt"],
merged_df["OS"],
),
)
return merged_df
def sbe43_oxy_fit(merged_df, sbe_coef0=None, f_suffix=None):
# Plot data to be fit together
f_out = f"{cfg.fig_dirs['ox']}sbe43_residual{f_suffix}_prefit.pdf"
ctd_plots._intermediate_residual_plot(
merged_df["REFOXY"] - merged_df["CTDOXY"],
merged_df["CTDPRS"],
merged_df["SSSCC"],
xlabel="CTDOXY Residual (umol/kg)",
f_out=f_out,
xlim=(-10, 10),
)
bad_df = pd.DataFrame() # initialize DF for questionable values
if sbe_coef0 is None:
sbe_coef0 = _get_sbe_coef() # load initial coefficient guess
# Curve fit (weighted)
weights = calculate_weights(merged_df["CTDPRS"])
fit_vars = ["CTDOXYVOLTS", "CTDPRS", "CTDTMP", "dv_dt", "OS"]
fit_data = tuple(merged_df[v] for v in fit_vars)
res = scipy.optimize.minimize(
PMEL_oxy_weighted_residual,
x0=sbe_coef0,
args=(weights, fit_data, merged_df["REFOXY"]),
bounds=[(None, None), (None, None), (0, None), (None, None), (None, None)],
)
cfw_coefs = res.x
merged_df["CTDOXY"] = _PMEL_oxy_eq(cfw_coefs, fit_data)
merged_df["residual"] = merged_df["REFOXY"] - merged_df["CTDOXY"]
cutoff = 2.8 * np.std(merged_df["residual"])
thrown_values = merged_df[np.abs(merged_df["residual"]) > cutoff]
bad_df = pd.concat([bad_df, thrown_values])
merged_df = merged_df[np.abs(merged_df["residual"]) <= cutoff].copy()
while not thrown_values.empty: # runs as long as there are thrown_values
p0 = tuple(cfw_coefs) # initialize coefficients with previous results
weights = calculate_weights(merged_df["CTDPRS"])
fit_data = tuple(merged_df[v] for v in fit_vars) # merged_df changes each loop
res = scipy.optimize.minimize(
PMEL_oxy_weighted_residual,
x0=p0,
args=(weights, fit_data, merged_df["REFOXY"]),
bounds=[(None, None), (None, None), (0, None), (None, None), (None, None)],
)
cfw_coefs = res.x
merged_df["CTDOXY"] = _PMEL_oxy_eq(cfw_coefs, fit_data)
merged_df["residual"] = merged_df["REFOXY"] - merged_df["CTDOXY"]
cutoff = 2.8 * np.std(merged_df["residual"])
thrown_values = merged_df[np.abs(merged_df["residual"]) > cutoff]
# TODO: get some kind of logging in here in case things go awry
# e.g. count of thrown values, start/final stdev, failing to converge, etc.
bad_df = pd.concat([bad_df, thrown_values])
merged_df = merged_df[np.abs(merged_df["residual"]) <= cutoff].copy()
# intermediate plots to diagnose data chunks goodness
# TODO: implement into bokeh/flask dashboard
if f_suffix is not None:
f_out = f"{cfg.fig_dirs['ox']}sbe43_residual{f_suffix}.pdf"
ctd_plots._intermediate_residual_plot(
merged_df["residual"],
merged_df["CTDPRS"],
merged_df["SSSCC"],
xlabel="CTDOXY Residual (umol/kg)",
f_out=f_out,
xlim=(-10, 10),
)
merged_df["CTDOXY_FLAG_W"] = 2
bad_df["CTDOXY_FLAG_W"] = 3
df = pd.concat([merged_df, bad_df])
return cfw_coefs, df
def prepare_oxy(btl_df, time_df, ssscc_list):
"""
Calculate oxygen-related variables needed for calibration:
sigma, oxygen solubility (OS), and bottle oxygen
Parameters
----------
btl_df : DataFrame
CTD data at bottle stops
time_df : DataFrame
Continuous CTD data
ssscc_list : list of str
List of stations to process
Returns
-------
"""
# Calculate SA and CT
btl_df["SA"] = gsw.SA_from_SP(
btl_df[cfg.column["sal"]],
btl_df[cfg.column["p"]],
btl_df[cfg.column["lon"]],
btl_df[cfg.column["lat"]],
)
btl_df["CT"] = gsw.CT_from_t(
btl_df["SA"],
btl_df[cfg.column["t1"]], # oxygen sensor is on primary line (ie t1)
btl_df[cfg.column["p"]],
)
time_df["SA"] = gsw.SA_from_SP(
time_df[cfg.column["sal"]],
time_df[cfg.column["p"]],
time_df[cfg.column["lon"]],
time_df[cfg.column["lat"]],
)
time_df["CT"] = gsw.CT_from_t(
time_df["SA"],
time_df[cfg.column["t1"]], # oxygen sensor is on primary line (ie t1)
time_df[cfg.column["p"]],
)
# calculate sigma
btl_df["sigma_btl"] = gsw.sigma0(btl_df["SA"], btl_df["CT"])
time_df["sigma_btl"] = gsw.sigma0(time_df["SA"], time_df["CT"])
# Calculate oxygen solubility in µmol/kg
btl_df["OS"] = gsw.O2sol(
btl_df["SA"],
btl_df["CT"],
btl_df[cfg.column["p"]],
btl_df[cfg.column["lon"]],
btl_df[cfg.column["lat"]],
)
time_df["OS"] = gsw.O2sol(
time_df["SA"],
time_df["CT"],
time_df[cfg.column["p"]],
time_df[cfg.column["lon"]],
time_df[cfg.column["lat"]],
)
# Convert CTDOXY units
btl_df["CTDOXY"] = oxy_ml_to_umolkg(btl_df["CTDOXY1"], btl_df["sigma_btl"])
# Calculate bottle oxygen
btl_df[cfg.column["refO"]] = calculate_bottle_oxygen(
ssscc_list,
btl_df["SSSCC"],
btl_df["TITR_VOL"],
btl_df["TITR_TEMP"],
btl_df["FLASKNO"],
)
btl_df[cfg.column["refO"]] = oxy_ml_to_umolkg(
btl_df[cfg.column["refO"]], btl_df["sigma_btl"]
)
btl_df["OXYGEN_FLAG_W"] = flagging.nan_values(btl_df[cfg.column["refO"]])
# Load manual OXYGEN flags
if Path("data/oxygen/manual_oxy_flags.csv").exists():
manual_flags = pd.read_csv(
"data/oxygen/manual_oxy_flags.csv", dtype={"SSSCC": str}
)
for _, flags in manual_flags.iterrows():
df_row = (btl_df["SSSCC"] == flags["SSSCC"]) & (
btl_df["btl_fire_num"] == flags["SAMPNO"]
)
btl_df.loc[df_row, "OXYGEN_FLAG_W"] = flags["Flag"]
return True
def calibrate_oxy(btl_df, time_df, ssscc_list):
"""
Non-linear least squares fit chemical sensor oxygen against bottle oxygen.
Parameters
----------
btl_df : DataFrame
CTD data at bottle stops
time_df : DataFrame
Continuous CTD data
ssscc_list : list of str
List of stations to process
Returns
-------
"""
log.info("Calibrating oxygen (SBE43)")
# Plot all pre fit data
f_out = f"{cfg.fig_dirs['ox']}sbe43_residual_all_prefit.pdf"
ctd_plots._intermediate_residual_plot(
btl_df["OXYGEN"] - btl_df["CTDOXY"],
btl_df["CTDPRS"],
btl_df["SSSCC"],
xlabel="CTDOXY Residual (umol/kg)",
f_out=f_out,
xlim=(-10, 10),
)
# Prep vars, dfs, etc.
all_sbe43_merged = pd.DataFrame()
sbe43_dict = {}
all_sbe43_fit = pd.DataFrame()
btl_df["dv_dt"] = np.nan # initialize column
# Density match time/btl oxy dataframes
for ssscc in ssscc_list:
time_data = time_df[time_df["SSSCC"] == ssscc].copy()
btl_data = btl_df[btl_df["SSSCC"] == ssscc].copy()
# can't calibrate without bottle oxygen ("OXYGEN")
if (btl_data["OXYGEN_FLAG_W"] == 9).all():
sbe43_dict[ssscc] = np.full(5, np.nan)
log.warning(ssscc + " skipped, all oxy data is NaN")
continue
sbe43_merged = match_sigmas(
btl_data[cfg.column["p"]],
btl_data[cfg.column["refO"]],
btl_data["CTDTMP1"],
btl_data["SA"],
time_data["OS"],
time_data[cfg.column["p"]],
time_data[cfg.column["t1"]],
time_data["SA"],
time_data[cfg.column["oxyvolts"]],
time_data["scan_datetime"],
)
sbe43_merged = sbe43_merged.reindex(btl_data.index) # add nan rows back in
btl_df.loc[
btl_df["SSSCC"] == ssscc, ["CTDOXYVOLTS", "dv_dt", "OS"]
] = sbe43_merged[["CTDOXYVOLTS", "dv_dt", "OS"]]
sbe43_merged["SSSCC"] = ssscc
all_sbe43_merged = pd.concat([all_sbe43_merged, sbe43_merged])
log.info(ssscc + " density matching done")
# Only fit using OXYGEN flagged good (2)
all_sbe43_merged = all_sbe43_merged[btl_df["OXYGEN_FLAG_W"] == 2].copy()
# Fit ALL oxygen stations together to get initial coefficient guess
(sbe_coef0, _) = sbe43_oxy_fit(all_sbe43_merged, f_suffix="_ox0")
sbe43_dict["ox0"] = sbe_coef0
# Fit each cast individually
for ssscc in ssscc_list:
sbe_coef, sbe_df = sbe43_oxy_fit(
all_sbe43_merged.loc[all_sbe43_merged["SSSCC"] == ssscc].copy(),
sbe_coef0=sbe_coef0,
f_suffix=f"_{ssscc}",
)
# build coef dictionary
if ssscc not in sbe43_dict.keys(): # don't overwrite NaN'd stations
sbe43_dict[ssscc] = sbe_coef
# all non-NaN oxygen data with flags
all_sbe43_fit = pd.concat([all_sbe43_fit, sbe_df])
# TODO: save outlier data from fits?
# TODO: secondary oxygen flagging step (instead of just taking outliers from fit routine)
# apply coefs
time_df["CTDOXY"] = np.nan
for ssscc in ssscc_list:
if np.isnan(sbe43_dict[ssscc]).all():
log.warning(
f"{ssscc} missing oxy data, leaving nan values and flagging as 9"
)
time_df.loc[time_df["SSSCC"] == ssscc, "CTDOXY_FLAG_W"] = 9
continue
btl_rows = (btl_df["SSSCC"] == ssscc).values
time_rows = (time_df["SSSCC"] == ssscc).values
btl_df.loc[btl_rows, "CTDOXY"] = _PMEL_oxy_eq(
sbe43_dict[ssscc],
(
btl_df.loc[btl_rows, cfg.column["oxyvolts"]],
btl_df.loc[btl_rows, cfg.column["p"]],
btl_df.loc[btl_rows, cfg.column["t1"]],
btl_df.loc[btl_rows, "dv_dt"],
btl_df.loc[btl_rows, "OS"],
),
)
log.info(ssscc + " btl data fitting done")
time_df.loc[time_rows, "CTDOXY"] = _PMEL_oxy_eq(
sbe43_dict[ssscc],
(
time_df.loc[time_rows, cfg.column["oxyvolts"]],
time_df.loc[time_rows, cfg.column["p"]],
time_df.loc[time_rows, cfg.column["t1"]],
time_df.loc[time_rows, "dv_dt"],
time_df.loc[time_rows, "OS"],
),
)
log.info(ssscc + " time data fitting done")
# flag CTDOXY with more than 1% difference
time_df["CTDOXY_FLAG_W"] = 2 # TODO: actual flagging of some kind?
btl_df["CTDOXY_FLAG_W"] = flagging.by_percent_diff(
btl_df["CTDOXY"], btl_df["OXYGEN"], percent_thresh=1
)
# Plot all post fit data
f_out = f"{cfg.fig_dirs['ox']}sbe43_residual_all_postfit.pdf"
ctd_plots._intermediate_residual_plot(
btl_df["OXYGEN"] - btl_df["CTDOXY"],
btl_df["CTDPRS"],
btl_df["SSSCC"],
xlabel="CTDOXY Residual (umol/kg)",
f_out=f_out,
xlim=(-10, 10),
)
f_out = f"{cfg.fig_dirs['ox']}sbe43_residual_all_postfit_flag2.pdf"
flag2 = btl_df["CTDOXY_FLAG_W"] == 2
ctd_plots._intermediate_residual_plot(
btl_df.loc[flag2, "OXYGEN"] - btl_df.loc[flag2, "CTDOXY"],
btl_df.loc[flag2, "CTDPRS"],
btl_df.loc[flag2, "SSSCC"],
xlabel="CTDOXY Residual (umol/kg)",
f_out=f_out,
xlim=(-10, 10),
)
# export fitting coefs
sbe43_coefs = pd.DataFrame.from_dict(
sbe43_dict, orient="index", columns=["Soc", "Voffset", "Tau20", "Tcorr", "E"]
).applymap(lambda x: np.format_float_scientific(x, precision=4, exp_digits=1))
sbe43_coefs.to_csv(cfg.dirs["logs"] + "sbe43_coefs.csv")
return True
|
{"hexsha": "b82a08f74b927be5d9eefecc61deaa56b3f69427", "size": 28631, "ext": "py", "lang": "Python", "max_stars_repo_path": "ctdcal/oxy_fitting.py", "max_stars_repo_name": "lmerchant/ctdcal", "max_stars_repo_head_hexsha": "0b8d3312ca5720d6b934f7d7f87b765e549d8dba", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-03-10T17:18:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T04:32:53.000Z", "max_issues_repo_path": "ctdcal/oxy_fitting.py", "max_issues_repo_name": "lmerchant/ctdcal", "max_issues_repo_head_hexsha": "0b8d3312ca5720d6b934f7d7f87b765e549d8dba", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 26, "max_issues_repo_issues_event_min_datetime": "2019-10-03T23:16:28.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-24T21:30:21.000Z", "max_forks_repo_path": "ctdcal/oxy_fitting.py", "max_forks_repo_name": "lmerchant/ctdcal", "max_forks_repo_head_hexsha": "0b8d3312ca5720d6b934f7d7f87b765e549d8dba", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-11-23T23:09:06.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-15T05:17:55.000Z", "avg_line_length": 31.8830734967, "max_line_length": 93, "alphanum_fraction": 0.6051482659, "include": true, "reason": "import numpy,import scipy", "num_tokens": 8218}
|
@testset "fourier_diff" begin
@test fourier_diff(5, order=1) ≈ [0.0 0.8506508083520398 -0.5257311121191336 0.5257311121191336 -0.8506508083520399; -0.8506508083520399 0.0 0.8506508083520398 -0.5257311121191336 0.5257311121191336; 0.5257311121191336 -0.8506508083520399 0.0 0.8506508083520398 -0.5257311121191336; -0.5257311121191336 0.5257311121191336 -0.8506508083520399 0.0 0.8506508083520398; 0.8506508083520398 -0.5257311121191336 0.5257311121191336 -0.8506508083520399 0.0]
@test fourier_diff(6, order=1) ≈ [0.0 0.8660254037844387 -0.2886751345948127 -3.061616997868383e-17 0.288675134594813 -0.8660254037844387; -0.8660254037844387 0.0 0.8660254037844387 -0.2886751345948127 -3.061616997868383e-17 0.288675134594813; 0.288675134594813 -0.8660254037844387 0.0 0.8660254037844387 -0.2886751345948127 -3.061616997868383e-17; -3.061616997868383e-17 0.288675134594813 -0.8660254037844387 0.0 0.8660254037844387 -0.2886751345948127; -0.2886751345948127 -3.061616997868383e-17 0.288675134594813 -0.8660254037844387 0.0 0.8660254037844387; 0.8660254037844387 -0.2886751345948127 -3.061616997868383e-17 0.288675134594813 -0.8660254037844387 0.0]
@test fourier_diff(BigFloat, 5, order=1) ≈ [BigFloat("0.0") BigFloat("0.850650808352039932181540497063011072240401403764816881836740242377884047363955") BigFloat("-0.5257311121191336060256690848478766072854979322433417815289355232412111464032185") BigFloat("0.5257311121191336060256690848478766072854979322433417815289355232412111464032185") BigFloat("-0.8506508083520399321815404970630110722404014037648168818367402423778840473639636"); BigFloat("-0.8506508083520399321815404970630110722404014037648168818367402423778840473639636") BigFloat("0.0") BigFloat("0.850650808352039932181540497063011072240401403764816881836740242377884047363955") BigFloat("-0.5257311121191336060256690848478766072854979322433417815289355232412111464032185") BigFloat("0.5257311121191336060256690848478766072854979322433417815289355232412111464032185"); BigFloat("0.5257311121191336060256690848478766072854979322433417815289355232412111464032185") BigFloat("-0.8506508083520399321815404970630110722404014037648168818367402423778840473639636") BigFloat("0.0") BigFloat("0.850650808352039932181540497063011072240401403764816881836740242377884047363955") BigFloat("-0.5257311121191336060256690848478766072854979322433417815289355232412111464032185"); BigFloat("-0.5257311121191336060256690848478766072854979322433417815289355232412111464032185") BigFloat("0.5257311121191336060256690848478766072854979322433417815289355232412111464032185") BigFloat("-0.8506508083520399321815404970630110722404014037648168818367402423778840473639636") BigFloat("0.0") BigFloat("0.850650808352039932181540497063011072240401403764816881836740242377884047363955"); BigFloat("0.850650808352039932181540497063011072240401403764816881836740242377884047363955") BigFloat("-0.5257311121191336060256690848478766072854979322433417815289355232412111464032185") BigFloat("0.5257311121191336060256690848478766072854979322433417815289355232412111464032185") BigFloat("-0.8506508083520399321815404970630110722404014037648168818367402423778840473639636") BigFloat("0.0")]
@test fourier_diff(BigFloat, 6, order=1) ≈ [BigFloat("0.0") BigFloat("0.8660254037844386467637231707529361834714026269051903140279034897259665084543383") BigFloat("-0.2886751345948128822545743902509787278238008756350634380093011632419888361514749") BigFloat("-2.742293602448380191855326565989245052626895591271719877947514292480356721283385e-78") BigFloat("0.2886751345948128822545743902509787278238008756350634380093011632419888361514619") BigFloat("-0.8660254037844386467637231707529361834714026269051903140279034897259665084543902"); BigFloat("-0.8660254037844386467637231707529361834714026269051903140279034897259665084543902") BigFloat("0.0") BigFloat("0.8660254037844386467637231707529361834714026269051903140279034897259665084543383") BigFloat("-0.2886751345948128822545743902509787278238008756350634380093011632419888361514749") BigFloat("-2.742293602448380191855326565989245052626895591271719877947514292480356721283385e-78") BigFloat("0.2886751345948128822545743902509787278238008756350634380093011632419888361514619"); BigFloat("0.2886751345948128822545743902509787278238008756350634380093011632419888361514619") BigFloat("-0.8660254037844386467637231707529361834714026269051903140279034897259665084543902") BigFloat("0.0") BigFloat("0.8660254037844386467637231707529361834714026269051903140279034897259665084543383") BigFloat("-0.2886751345948128822545743902509787278238008756350634380093011632419888361514749") BigFloat("-2.742293602448380191855326565989245052626895591271719877947514292480356721283385e-78"); BigFloat("-2.742293602448380191855326565989245052626895591271719877947514292480356721283385e-78") BigFloat("0.2886751345948128822545743902509787278238008756350634380093011632419888361514619") BigFloat("-0.8660254037844386467637231707529361834714026269051903140279034897259665084543902") BigFloat("0.0") BigFloat("0.8660254037844386467637231707529361834714026269051903140279034897259665084543383") BigFloat("-0.2886751345948128822545743902509787278238008756350634380093011632419888361514749"); BigFloat("-0.2886751345948128822545743902509787278238008756350634380093011632419888361514749") BigFloat("-2.742293602448380191855326565989245052626895591271719877947514292480356721283385e-78") BigFloat("0.2886751345948128822545743902509787278238008756350634380093011632419888361514619") BigFloat("-0.8660254037844386467637231707529361834714026269051903140279034897259665084543902") BigFloat("0.0") BigFloat("0.8660254037844386467637231707529361834714026269051903140279034897259665084543383"); BigFloat("0.8660254037844386467637231707529361834714026269051903140279034897259665084543383") BigFloat("-0.2886751345948128822545743902509787278238008756350634380093011632419888361514749") BigFloat("-2.742293602448380191855326565989245052626895591271719877947514292480356721283385e-78") BigFloat("0.2886751345948128822545743902509787278238008756350634380093011632419888361514619") BigFloat("-0.8660254037844386467637231707529361834714026269051903140279034897259665084543902") BigFloat("0.0")]
@test fourier_diff(5, order=2) ≈ [-2.0 1.1708203932499366 -0.1708203932499369 -0.17082039324993695 1.170820393249937; 1.170820393249937 -2.0 1.1708203932499366 -0.1708203932499369 -0.17082039324993695; -0.17082039324993695 1.170820393249937 -2.0 1.1708203932499366 -0.1708203932499369; -0.1708203932499369 -0.17082039324993695 1.170820393249937 -2.0 1.1708203932499366; 1.1708203932499366 -0.1708203932499369 -0.17082039324993695 1.170820393249937 -2.0]
@test fourier_diff(6, order=2) ≈ [-3.1666666666666665 2.000000000000001 -0.6666666666666666 0.5 -0.6666666666666669 2.000000000000001; 2.000000000000001 -3.1666666666666665 2.000000000000001 -0.6666666666666666 0.5 -0.6666666666666669; -0.6666666666666669 2.000000000000001 -3.1666666666666665 2.000000000000001 -0.6666666666666666 0.5; 0.5 -0.6666666666666669 2.000000000000001 -3.1666666666666665 2.000000000000001 -0.6666666666666666; -0.6666666666666666 0.5 -0.6666666666666669 2.000000000000001 -3.1666666666666665 2.000000000000001; 2.000000000000001 -0.6666666666666666 0.5 -0.6666666666666669 2.000000000000001 -3.1666666666666665]
@test fourier_diff(BigFloat, 5, order=2) ≈ [BigFloat("-1.999999999999999999999999999999999999999999999999999999999999999999999999999983") BigFloat("1.170820393249936908922752100619382870632185507883457717281269173623156277691329") BigFloat("-0.170820393249936908922752100619382870632185507883457717281269173623156277691348") BigFloat("-0.1708203932499369089227521006193828706321855078834577172812691736231562776913437") BigFloat("1.170820393249936908922752100619382870632185507883457717281269173623156277691346"); BigFloat("1.170820393249936908922752100619382870632185507883457717281269173623156277691346") BigFloat("-1.999999999999999999999999999999999999999999999999999999999999999999999999999983") BigFloat("1.170820393249936908922752100619382870632185507883457717281269173623156277691329") BigFloat("-0.170820393249936908922752100619382870632185507883457717281269173623156277691348") BigFloat("-0.1708203932499369089227521006193828706321855078834577172812691736231562776913437"); BigFloat("-0.1708203932499369089227521006193828706321855078834577172812691736231562776913437") BigFloat("1.170820393249936908922752100619382870632185507883457717281269173623156277691346") BigFloat("-1.999999999999999999999999999999999999999999999999999999999999999999999999999983") BigFloat("1.170820393249936908922752100619382870632185507883457717281269173623156277691329") BigFloat("-0.170820393249936908922752100619382870632185507883457717281269173623156277691348"); BigFloat("-0.170820393249936908922752100619382870632185507883457717281269173623156277691348") BigFloat("-0.1708203932499369089227521006193828706321855078834577172812691736231562776913437") BigFloat("1.170820393249936908922752100619382870632185507883457717281269173623156277691346") BigFloat("-1.999999999999999999999999999999999999999999999999999999999999999999999999999983") BigFloat("1.170820393249936908922752100619382870632185507883457717281269173623156277691329"); BigFloat("1.170820393249936908922752100619382870632185507883457717281269173623156277691329") BigFloat("-0.170820393249936908922752100619382870632185507883457717281269173623156277691348") BigFloat("-0.1708203932499369089227521006193828706321855078834577172812691736231562776913437") BigFloat("1.170820393249936908922752100619382870632185507883457717281269173623156277691346") BigFloat("-1.999999999999999999999999999999999999999999999999999999999999999999999999999983")]
@test fourier_diff(BigFloat, 6, order=2) ≈ [BigFloat("-3.166666666666666666666666666666666666666666666666666666666666666666666666666678") BigFloat("1.999999999999999999999999999999999999999999999999999999999999999999999999999793") BigFloat("-0.6666666666666666666666666666666666666666666666666666666666666666666666666666868") BigFloat("0.50") BigFloat("-0.6666666666666666666666666666666666666666666666666666666666666666666666666666609") BigFloat("1.999999999999999999999999999999999999999999999999999999999999999999999999999965"); BigFloat("1.999999999999999999999999999999999999999999999999999999999999999999999999999965") BigFloat("-3.166666666666666666666666666666666666666666666666666666666666666666666666666678") BigFloat("1.999999999999999999999999999999999999999999999999999999999999999999999999999793") BigFloat("-0.6666666666666666666666666666666666666666666666666666666666666666666666666666868") BigFloat("0.50") BigFloat("-0.6666666666666666666666666666666666666666666666666666666666666666666666666666609"); BigFloat("-0.6666666666666666666666666666666666666666666666666666666666666666666666666666609") BigFloat("1.999999999999999999999999999999999999999999999999999999999999999999999999999965") BigFloat("-3.166666666666666666666666666666666666666666666666666666666666666666666666666678") BigFloat("1.999999999999999999999999999999999999999999999999999999999999999999999999999793") BigFloat("-0.6666666666666666666666666666666666666666666666666666666666666666666666666666868") BigFloat("0.50"); BigFloat("0.50") BigFloat("-0.6666666666666666666666666666666666666666666666666666666666666666666666666666609") BigFloat("1.999999999999999999999999999999999999999999999999999999999999999999999999999965") BigFloat("-3.166666666666666666666666666666666666666666666666666666666666666666666666666678") BigFloat("1.999999999999999999999999999999999999999999999999999999999999999999999999999793") BigFloat("-0.6666666666666666666666666666666666666666666666666666666666666666666666666666868"); BigFloat("-0.6666666666666666666666666666666666666666666666666666666666666666666666666666868") BigFloat("0.50") BigFloat("-0.6666666666666666666666666666666666666666666666666666666666666666666666666666609") BigFloat("1.999999999999999999999999999999999999999999999999999999999999999999999999999965") BigFloat("-3.166666666666666666666666666666666666666666666666666666666666666666666666666678") BigFloat("1.999999999999999999999999999999999999999999999999999999999999999999999999999793"); BigFloat("1.999999999999999999999999999999999999999999999999999999999999999999999999999793") BigFloat("-0.6666666666666666666666666666666666666666666666666666666666666666666666666666868") BigFloat("0.50") BigFloat("-0.6666666666666666666666666666666666666666666666666666666666666666666666666666609") BigFloat("1.999999999999999999999999999999999999999999999999999999999999999999999999999965") BigFloat("-3.166666666666666666666666666666666666666666666666666666666666666666666666666678")]
@test fourier_diff(5) == fourier_diff(5, order=1)
end
|
{"hexsha": "36b751fb7858d191e28e897678a60673e7390092", "size": 12711, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_fourier_diff.jl", "max_stars_repo_name": "dawbarton/RandomUseful.jl", "max_stars_repo_head_hexsha": "4411a4c7a8927f0be13811e6c97427733447f2ac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_fourier_diff.jl", "max_issues_repo_name": "dawbarton/RandomUseful.jl", "max_issues_repo_head_hexsha": "4411a4c7a8927f0be13811e6c97427733447f2ac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_fourier_diff.jl", "max_forks_repo_name": "dawbarton/RandomUseful.jl", "max_forks_repo_head_hexsha": "4411a4c7a8927f0be13811e6c97427733447f2ac", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 907.9285714286, "max_line_length": 3005, "alphanum_fraction": 0.8970183306, "num_tokens": 4723}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.