text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
def analysis():
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
file = input('Enter the file name ')
df = pd.read_csv(file, index_col=0)
print('\n')
print('\033[1m' + 'Summary Statistic' + '\033[0m')
print('\n')
print('There are total {0} row and {1} columns.'.format(df.shape[0], df.shape[1]))
print('\n')
print('Columns in data: {}'.format(list(df.columns)))
print('\n')
Numeric, Categorical, datetime = [], [], []
for i in df.columns:
if df[i].dtype == 'int64':
Numeric.append(i)
elif df[i].dtype == 'float64':
Numeric.append(i)
elif df[i].dtype == 'object':
Categorical.append(i)
else:
datetime.append(i)
print('There are {0} Numeric, {1} Categorical, and {2} Datetime features are in dataset'.format(len(Numeric),len(Categorical),len(datetime)))
print('\n')
print(df.head(5))
print('\n')
print(df.tail(5))
print('\n')
missing_value = pd.DataFrame({
'Missing Value': df.isnull().sum(),
'Percentage': (df.isnull().sum() / len(df))*100 })
print(missing_value.sort_values(by='Percentage', ascending=False))
print('\n')
print(df.describe())
print('\n')
try:
print(df.describe(include=object))
except ValueError as e:
print(e)
print('\n')
df_num_features=df.select_dtypes(include=np.number)
print('Outlier using Z-score \n')
for i in df_num_features.columns:
thresold = 3
mean = df[i].mean()
std = df[i].std()
outliers = []
for value in df[i]:
zscore = (value-mean)/std
if abs(zscore) > thresold:
outliers.append(value)
print('The count of Outliers in the column {0} is {1}'.format(i,len(outliers)))
print('\n Outlier using IQR \n')
Q1 = df_num_features.quantile(0.25)
Q3 = df_num_features.quantile(0.75)
IQR = Q3 - Q1
outlier = pd.DataFrame((df_num_features < (Q1 - 1.5 * IQR)) | (df_num_features > (Q3 + 1.5 * IQR)))
for i in outlier.columns:
print('Total number of Outliers in column {} are {}'.format(i, (len(outlier[outlier[i] == True][i]))))
analysis()
|
{"hexsha": "6bcca5b1ec4e9d40957ddcfb60033a7489020361", "size": 2099, "ext": "py", "lang": "Python", "max_stars_repo_path": "build/lib/analytica/__init__.py", "max_stars_repo_name": "d0r1h/Analytica", "max_stars_repo_head_hexsha": "36afee1e2574bd1d3451ebe539b0c7283c3a27cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "build/lib/analytica/__init__.py", "max_issues_repo_name": "d0r1h/Analytica", "max_issues_repo_head_hexsha": "36afee1e2574bd1d3451ebe539b0c7283c3a27cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "build/lib/analytica/__init__.py", "max_forks_repo_name": "d0r1h/Analytica", "max_forks_repo_head_hexsha": "36afee1e2574bd1d3451ebe539b0c7283c3a27cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5696202532, "max_line_length": 142, "alphanum_fraction": 0.634587899, "include": true, "reason": "import numpy,from scipy", "num_tokens": 590}
|
from kernel_exp_family.examples.tools import pdf_grid, visualise_array
from kernel_hmc.tools.mcmc_convergence import autocorr
import matplotlib.pyplot as plt
import numpy as np
def visualise_trajectory(Qs, acc_probs, log_pdf_q, D, log_pdf=None):
assert Qs.ndim == 2
plot_density = log_pdf is not None and D==2
plt.figure(figsize=(10, 12))
plt.subplot(411)
# plot density if given and dimension is 2
if plot_density:
Xs = np.linspace(-30, 30, 75)
Ys = np.linspace(-10, 20, len(Xs))
D, G = pdf_grid(Xs, Ys, log_pdf)
visualise_array(Xs, Ys, D)
plt.plot(Qs[:, 0], Qs[:, 1])
plt.plot(Qs[0, 0], Qs[0, 1], 'r*', markersize=15)
plt.title("Log-pdf surrogate")
plt.subplot(412)
if plot_density:
visualise_array(Xs, Ys, G)
plt.plot(Qs[:, 0], Qs[:, 1])
plt.plot(Qs[0, 0], Qs[0, 1], 'r*', markersize=15)
plt.title("Gradient norm surrogate")
plt.subplot(413)
plt.title("Acceptance probability")
plt.xlabel("Leap frog iteration")
plt.plot(acc_probs)
plt.plot([0, len(acc_probs)], [np.mean(acc_probs) for _ in range(2)], 'r--')
plt.xlim([0, len(acc_probs)])
plt.subplot(414)
plt.title("Target log-pdf")
plt.xlabel("Leap frog iteration")
plt.plot(log_pdf_q)
plt.xlim([0, len(log_pdf_q)])
def visualise_trace(samples, log_pdf_trajectory, accepted, step_sizes=None, log_pdf_density=None, idx0=0, idx1=1):
assert samples.ndim == 2
D = samples.shape[1]
plt.figure(figsize=(15, 12))
plt.subplot(421)
plt.plot(samples[:, idx0])
plt.title("Trace $x_%d$" % (idx0+1))
plt.xlabel("MCMC iteration")
plt.grid(True)
plt.subplot(422)
plt.plot(samples[:, idx1])
plt.title("Trace $x_%d$" % (idx1+1))
plt.xlabel("MCMC iteration")
plt.grid(True)
plt.subplot(423)
if not log_pdf_density is None and D == 2:
Xs = np.linspace(-28, 28, 50)
Ys = np.linspace(-6, 16, len(Xs))
D, _ = pdf_grid(Xs, Ys, log_pdf_density)
visualise_array(Xs, Ys, D)
plt.plot(samples[:, idx0], samples[:, idx1])
plt.title("Trace $(x_%d, x_%d)$" % (idx0+1, idx1+1))
plt.grid(True)
plt.xlabel("$x_%d$" % (idx0+1))
plt.ylabel("$x_%d$" % (idx1+1))
plt.subplot(424)
plt.plot(log_pdf_trajectory)
plt.title("log pdf along trajectory")
plt.xlabel("MCMC iteration")
plt.grid(True)
plt.subplot(425)
plt.plot(autocorr(samples[:, idx0]))
plt.title("Autocorrelation $x_%d$" % (idx0+1))
plt.xlabel("Lag")
plt.grid(True)
plt.subplot(426)
plt.plot(autocorr(samples[:, idx1]))
plt.title("Autocorrelation $x_%d$" % (idx1+1))
plt.xlabel("Lag")
plt.grid(True)
plt.subplot(427)
plt.plot(np.cumsum(accepted) / np.arange(1, len(accepted)+1))
plt.title("Average acceptance rate")
plt.xlabel("MCMC iterations")
plt.grid(True)
if step_sizes is not None:
plt.subplot(428)
if step_sizes.ndim>1:
for i in range(step_sizes.shape[1]):
plt.plot(step_sizes[:,i])
plt.title("Step sizes")
else:
plt.plot(step_sizes)
plt.title("Step size")
plt.xlabel("MCMC iterations")
plt.grid(True)
|
{"hexsha": "971ce94b3676385f3eac9cd769b4cfdba5470a92", "size": 3334, "ext": "py", "lang": "Python", "max_stars_repo_path": "kernel_hmc/examples/plotting.py", "max_stars_repo_name": "karlnapf/kernel_hmc", "max_stars_repo_head_hexsha": "8ab93ae0470cc5916d5349b40bae7f91075bc385", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 27, "max_stars_repo_stars_event_min_datetime": "2015-11-23T21:28:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-30T20:29:14.000Z", "max_issues_repo_path": "kernel_hmc/examples/plotting.py", "max_issues_repo_name": "karlnapf/kernel_hmc", "max_issues_repo_head_hexsha": "8ab93ae0470cc5916d5349b40bae7f91075bc385", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kernel_hmc/examples/plotting.py", "max_forks_repo_name": "karlnapf/kernel_hmc", "max_forks_repo_head_hexsha": "8ab93ae0470cc5916d5349b40bae7f91075bc385", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2015-12-08T14:29:38.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-30T04:38:35.000Z", "avg_line_length": 29.5044247788, "max_line_length": 114, "alphanum_fraction": 0.5947810438, "include": true, "reason": "import numpy", "num_tokens": 982}
|
\newcommand{\symb}[2]{\makebox[6em][l]{#1} #2}% used to generate the list of symbols
\chapter{List of Symbols}
\symb{$i$}{Unit imaginary number; or, an index of numbers}\\
\symb{$j$, $ j' $}{Fine-structure angular momentum quantum number of individual atoms in a ground state or excited state (with prime), respectively; or, indices of numbers}\\
\symb{$f$, $ f' $}{Hyperfine-structure or hyperfine angular momentum quantum number of individual atoms in a ground state or excited state (with prime), respectively}\\
\symb{$J$, $ J' $}{The collective fine-structure angular momentum quantum number of an atomic ensemble in a ground state or excited state, respectively}\\
\symb{$F$, $ F' $}{The collective hyperfine-structure angular momentum quantum number of an atomic ensemble in a ground state or excited state, respectively}\\
\symb{$ \hat{\mathbf{S}}$, $\hat{S}_i$}{Stokes operator and its $i$-th component $ (i=0,1,2,3) $}\\
\symb{$\mathbf{e}_i$}{The unit vector along the $ i $ direction}\\
\symb{$\mathbf{u}\mathbf{v}$}{The outer product of vector $ \mathbf{u} $ and vector $ \mathbf{v} $ to form a tensor}\\
\symb{$\delta_{jk}$}{Kronecker delta function}\\
\symb{$\delta(x)$}{Dirac delta function}\\
\symb{$\dt A$}{Time derivative of $A$}\\
\symb{$A^\dagger$}{Hermitian conjugate of $A$}\\
\symb{$A^*$}{Complex conjugate of $A$}\\
\symb{$A^\transp$}{Transpose of $A$}\\
\symb{$\hat{A}^{(n)}$}{Operator $ \hat{A} $ on the $ n $th atom}\\
\symb{$\hat{\rho}^{\otimes N}$}{Tensor product of $ N $ operator $ \hat{\rho} $}\\
\symb{$\det(A)$}{Determinant of $A$}\\
\symb{$\tr(A)$, $ \tr\left[ A\right] $}{Trace of $A$}\\
%\symb{$\av{A}$}{Expectation value of $A$}\\
\symb{$[A,B]$}{Commutator of $A$ and $B$}\\
\symb{$\{A,B\}$}{Anticommutator of $A$ and $B$}\\
\symb{$\re(z)$}{Real part of $z$}\\
\symb{$\im(z)$}{Imaginary part of $z$}\\
\symb{$\mathrm{abs}[z]$, $ |z| $}{Absolute value of $z$}\\
\symb{$\identity$, $\nullmatrix$}{The identity matrix and null matrix}\\
\symb{$ \hat{\mathbbm{1}} $}{Identity operator}\\
\symb{$\unittensor$}{The identity tensor}\\
\symb{$ \expect{\hat{A}} $}{Expectation value of operator $ \hat{A} $}\\
\symb{$ \Delta A^2 $, $ \expect{\Delta A^2} $}{Variance of operator $ \hat{A} $: $ \expect{\hat{A}^2}-\expect{\hat{A}}^2 $}\\
\symb{$ \Delta A $, $ \expect{\Delta A} $}{The square root of $ \Delta A^2 $}\\
\symb{$ \expect{\Delta A \Delta B} $}{Covariance of operators $ \hat{A} $ and $ \hat{B} $}\\
\symb{$\Delta$}{Detuning}\\
\symb{$\Omega$}{Rabi frequency}\\
\symb{$\xi^2$}{metrologically relevant squeezing parameter}\\
\symb{$\chi$}{Coupling strength between atoms and light}\\
\symb{$\kappa$}{Measurement strength}\\
\symb{$\gamma_s$}{Characteristic photon scattering rate}\\
\symb{$\gamma_{op}$}{Optical pumping rate}\\
\symb{$I_{\rm in}$}{Intensity of input light}\\
\symb{$\br$,\,$\br'$}{Full spatial position vectors of a detector and the source}\\
\symb{$\br_\perp$,\,$\br'_\perp$}{The transverse spatial position vectors of a detector and the source}\\
\symb{$n_1$}{Index of refraction of the core of a waveguide}\\
\symb{$n_2$}{Index of refraction of the cladding of a waveguide}\\
\symb{$\tensor{\mathbf{G}}$}{Tensors of spatial degrees of freedom}\\
\symb{$\tensor{\mathbf{G}}(\br,\br')$}{The dyadic Green's function or Green's function tensor responded at position $ \br $ due to a source at $ \br' $}\\
\symb{$\hat{\tensor{\boldsymbol{\alpha}}}$}{Polarizability tensor operator}\\
\symb{$\Gamma_0$}{The decay rate or spontaneous emission rate of an atom in free-space}\\
\symb{$\Gamma_{\rm 1D}$}{The decay rate or spontaneous emission rate of an atom coupled to guided modes}\\
\symb{$\hat{T}^{(K)}$}{Rank-$K$ tensor operator}\\
\symb{$C^{(K)}_{j'ff'}$, $C^{(K)}_{j'f}$}{Rank-$K$ hyperfine atomic transition coefficients defined by Eqs.~\eqref{eq:Cjpffp} and~\eqref{eq:Cjpf} }\\
\symb{$C^{f_1,m_1;f_2,m_2}_{f',m'}$}{Clebsch-Gordan coefficients $ \bra{f_1,m_1;f_2,m_2}f',m'\rangle $}\\
\symb{$o^{j'f'}_{jf}$}{Relative oscillation strengths defined by Eq.~\eqref{Eq::OscStrength}}\\
\symb{$ \sigma_0 $}{On-resonance cross section of an atom}\\
\symb{$ N_A $}{Total number of atoms}\\
\symb{$ \hat{N}_C $, $ N_C $}{Number operator in the \emph{clock space} and its expectation value}\\
\symb{$ C_1 $}{Cooperativity per atom}\\
\symb{OD, OD$/ N_A $}{Optical depth, and optical depth per atom}\\
\symb{$\hat{a}$, $\hat{a}^\dagger$ }{Creation and annihilation operators for bosonic modes}\\
\symb{$ \beta $, $\beta_0$}{Propagation constant of a guided mode}\\
\symb{$ k $, $k_0$}{The total wave vector number of a propagating light}\\
\symb{$ \omega $, $\omega_0$}{Angular frequency of a light or an electromagnetic wave}\\
\symb{$\lambda$}{Wavelength of a light or an electromagnetic wave}\\
\symb{$\ket{\uparrow}$}{Fiducial state}\\
\symb{$\ket{\downarrow}$}{Coupled state}\\
\symb{$\ket{T}$}{Transfer state}\\
\symb{$\mathrm{c.c.}$}{Complex conjugate}\\
\symb{$\mathrm{H.c.}$}{Hermitian conjugate}
%\symb{$\braket{\psi}{\uppsi}$}{Annihilation operator of the state $\psi(\xbf)$, i.e., $\int\uppsi(\xbf)\,\psi^*(\xbf)\,\dif \xbf$}\\
%\symb{$\braket{\uppsi}{\psi}$}{Creation operator of the state $\psi(\xbf)$, i.e., $\int\uppsi^\dagger(\xbf)\,\psi(\xbf)\,\dif \xbf$}\\
%\symb{$\ket{\varPsi}$}{Slanted capital Greek letters for many-body quantum states}\\
|
{"hexsha": "dc453eed5c728b9c809977a8f02812b8a484845a", "size": 5311, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "inputs/listofsymbols.tex", "max_stars_repo_name": "i2000s/PhD_Thesis", "max_stars_repo_head_hexsha": "a9bc6bc4213896c70c90cbb3d9b533782d428761", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-03-26T01:58:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-27T19:11:43.000Z", "max_issues_repo_path": "inputs/listofsymbols.tex", "max_issues_repo_name": "i2000s/PhD_Thesis", "max_issues_repo_head_hexsha": "a9bc6bc4213896c70c90cbb3d9b533782d428761", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-07-18T01:47:21.000Z", "max_issues_repo_issues_event_max_datetime": "2018-07-18T01:47:21.000Z", "max_forks_repo_path": "inputs/listofsymbols.tex", "max_forks_repo_name": "i2000s/PhD_Thesis", "max_forks_repo_head_hexsha": "a9bc6bc4213896c70c90cbb3d9b533782d428761", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-07-17T21:55:09.000Z", "max_forks_repo_forks_event_max_datetime": "2018-07-17T21:55:09.000Z", "avg_line_length": 68.0897435897, "max_line_length": 174, "alphanum_fraction": 0.6605159104, "num_tokens": 1805}
|
import js
from RobotRaconteur.Client import *
import importlib_resources
import traceback
import numpy as np
import base64
from pyri.webui_browser import util
class NewCameraIntrinsicCalibrationDialog:
def __init__(self, new_name, core, device_manager):
self.vue = None
self.core = core
self.device_manager = device_manager
self.new_name = new_name
def init_vue(self,vue):
self.vue = vue
def handle_create(self,*args):
try:
camera_local_device_name = self.vue["$data"].camera_selected
image_sequence_global_name = self.vue["$data"].image_sequence_selected
calib_target = self.vue["$data"].calibration_target_selected
self.core.create_task(do_calibration(camera_local_device_name,image_sequence_global_name,calib_target,self.new_name,self.core))
except:
traceback.print_exc()
def handle_hidden(self,*args):
try:
l = self.vue["$el"]
l.parentElement.removeChild(l)
except:
traceback.print_exc()
async def do_show_new_camera_calibration_intrinsic_dialog(new_name: str, variable_type: str, variable_tags: str, core: "PyriWebUIBrowser"):
try:
core.device_manager.connect_device("vision_camera_calibration")
dialog_html = importlib_resources.read_text(__package__,"new_calibrate_intrinsic_dialog.html")
dialog_obj = NewCameraIntrinsicCalibrationDialog(new_name, core, core.device_manager)
el = js.document.createElement('div')
el.id = "new_calibrate_intrinsic_dialog_wrapper"
js.document.getElementById("wrapper").appendChild(el)
dialog = js.Vue.new(js.python_to_js({
"el": "#new_calibrate_intrinsic_dialog_wrapper",
"template": dialog_html,
"data":
{
"camera_selected": "",
"camera_select_options": [],
"image_sequence_selected": "",
"image_sequence_select_options": [],
"calibration_target_selected": "",
"calibration_target_select_options": []
},
"methods":
{
"handle_create": dialog_obj.handle_create,
"handle_hidden": dialog_obj.handle_hidden
}
}))
dialog_obj.init_vue(dialog)
cameras = []
camera_names = util.get_devices_with_type(core, "com.robotraconteur.imaging.Camera")
cameras = util.device_names_to_dropdown_options(camera_names)
dialog["$data"].camera_select_options = js.python_to_js(cameras)
if len(cameras) > 0:
dialog["$data"].camera_selected = cameras[0]["value"]
db = core.device_manager.get_device_subscription("variable_storage").GetDefaultClient()
seq_var_names = await db.async_filter_variables("globals","",["image_sequence"],None)
seq_vars = []
for v in seq_var_names:
seq_vars.append({"value": v, "text": v})
dialog["$data"].image_sequence_select_options = js.python_to_js(seq_vars)
if len(seq_vars) > 0:
dialog["$data"].image_sequence_selected = seq_vars[0]["value"]
cal = [{"value": "chessboard", "text": "chessboard"}]
dialog["$data"].calibration_target_select_options = js.python_to_js(cal)
dialog["$data"].calibration_target_selected = "chessboard"
dialog["$bvModal"].show("new_vision_camera_calibrate_intrinsic")
except:
js.alert(f"Calibration failed:\n\n{traceback.format_exc()}")
def show_new_camera_calibration_intrinsic_dialog(new_name: str, variable_type: str, variable_tags: str, core: "PyriWebUIBrowser"):
core.create_task(do_show_new_camera_calibration_intrinsic_dialog(new_name, variable_type, variable_tags, core))
async def do_calibration(camera_local_device_name, image_sequence_global_name, calib_target, new_name, core):
try:
camera_calib = core.device_manager.get_device_subscription("vision_camera_calibration").GetDefaultClient()
calib_res = await camera_calib.async_calibrate_camera_intrinsic(camera_local_device_name, image_sequence_global_name, calib_target, new_name, None)
except:
js.alert(f"Calibration failed:\n\n{traceback.format_exc()}")
return
try:
do_show_new_camera_calibration_intrinsic_dialog2(new_name, calib_res.calibration, calib_res.display_images, core)
except:
traceback.print_exc()
def do_show_new_camera_calibration_intrinsic_dialog2(new_name: str, calibration_res, display_images, core: "PyriWebUIBrowser"):
try:
dialog2_html = importlib_resources.read_text(__package__,"new_calibrate_intrinsic_dialog2.html")
el = js.document.createElement('div')
el.id = "new_calibrate_intrinsic_dialog2_wrapper"
js.document.getElementById("wrapper").appendChild(el)
def handle_hidden(*args):
try:
el.parentElement.removeChild(el)
except:
traceback.print_exc()
K = np.array_str(calibration_res.K, precision=4, suppress_small=True)
dist = calibration_res.distortion_info.data
k1 = f"{dist.k1:4e}"
k2 = f"{dist.k2:4e}"
p1 = f"{dist.p1:4e}"
p2 = f"{dist.p2:4e}"
k3 = f"{dist.k3:4e}"
imgs = []
i=0
for d in display_images:
d_encoded = str(base64.b64encode(d.data))[2:-1]
d2 = {
"id": i,
"caption": f"Calibration image result {i+1}",
"img": "data:image/jpeg;base64," + d_encoded
}
del d_encoded
imgs.append(d2)
i+=1
#TODO: check for png?
dialog = js.Vue.new(js.python_to_js({
"el": "#new_calibrate_intrinsic_dialog2_wrapper",
"template": dialog2_html,
"data":
{
"K": K,
"k1": k1,
"k2": k2,
"p1": p1,
"p2": p2,
"k3": k3,
"display_images": imgs
},
"methods":
{
"handle_hidden": handle_hidden
}
}))
dialog["$bvModal"].show("new_vision_camera_calibrate_intrinsic2")
except:
traceback.print_exc()
|
{"hexsha": "b6b312416ef61bbe7db92b1e7a13a1a89f615c01", "size": 6386, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/pyri/vision_browser/dialogs/new_calibrate_intrinsic_dialog.py", "max_stars_repo_name": "pyri-project/pyri-vision-browser", "max_stars_repo_head_hexsha": "7cd501e4ec0633be16f5f6c62146a6e006163d49", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/pyri/vision_browser/dialogs/new_calibrate_intrinsic_dialog.py", "max_issues_repo_name": "pyri-project/pyri-vision-browser", "max_issues_repo_head_hexsha": "7cd501e4ec0633be16f5f6c62146a6e006163d49", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/pyri/vision_browser/dialogs/new_calibrate_intrinsic_dialog.py", "max_forks_repo_name": "pyri-project/pyri-vision-browser", "max_forks_repo_head_hexsha": "7cd501e4ec0633be16f5f6c62146a6e006163d49", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4914285714, "max_line_length": 155, "alphanum_fraction": 0.6262135922, "include": true, "reason": "import numpy", "num_tokens": 1388}
|
# ---------------------------------------------------------------------------- #
#
# hdgSolveElas.jl
#
# Solve convection-diffusion equations (n-dimensional)
#
# λυτέος
# Fall 2017
#
# Max Opgenoord
#
# ---------------------------------------------------------------------------- #
"""
hdgSolve( master::Master, mesh::Mesh, problem::CDR )
Solves convection-diffusion equations for n-dimensional problems.
"""
function hdgSolve( master::Master, mesh::Mesh, problem::CDR; method=:LU)
dim = mesh.dim
nelem = size( mesh.nodes, 3 ) # Mumber of elements in mesh
nnodes = size( mesh.nodes, 1 ) # Number of nodes in one element
nodfac = master.nodfac # Number of nodes on a face
unkUhat = nodfac # Total of uhat unknowns per face
nfaces = dim + 1 # Number of faces per element
# Stability parameter
τ = 5
### Initialize quantities
# Equations of motion (Newton's second law)
A = fill(0.0, nnodes*dim , nnodes*dim) # (v_i, q^h_i)_{T^h}
B = fill(0.0, nnodes*dim , nnodes ) # <v_i,i, >_{∂T^h}
C = fill(0.0, nnodes*dim , nodfac*nfaces) # <v_i, func(\hat{u}^h_k) >_{∂T^h}
N = fill(0.0, nnodes , nnodes*dim) # <v_i, func(\hat{u}^h_k) >_{∂T^h}
D = fill(0.0, nnodes , nnodes) # <v_i, func(\hat{u}^h_k) >_{∂T^h}
E = fill(0.0, nnodes , nodfac*nfaces) # <v_i, func(\hat{u}^h_k) >_{∂T^h}
K = fill(0.0, nodfac*nfaces , nnodes*dim) # <v_i, func(\hat{u}^h_k) >_{∂T^h}
L = fill(0.0, nodfac*nfaces , nnodes) # <v_i, func(\hat{u}^h_k) >_{∂T^h}
M = fill(0.0, nodfac*nfaces , nodfac*nfaces) # <v_i, func(\hat{u}^h_k) >_{∂T^h}
G = fill(0.0, nodfac*nfaces , 1) # (v_{i}, b_{i})_{T^h}
# Matrix with unknowns for uhath
H = fill(0.0, nodfac*nfaces , nodfac*nfaces)
R = fill(0.0, nodfac*nfaces , 1 ) # (v_{i}, b_{i})_{T^h}
zm = fill( 0.0, nnodes*dim, 1)
# Matrices that need to be saved in order to recover u and qh
F = fill(0.0, nnodes , 1, nelem) # (v_{i}, b_{i})_{T^h}
ABND = fill(0.0, nnodes*(dim+1), nnodes*(dim+1), nelem)
CE = fill(0.0, nnodes*(dim+1), nodfac*nfaces, nelem)
# Set up column and row indices for sparse matrix
indRow = fill( 0.0, (dim+1)^2 * nelem * unkUhat^2 )
indCol = fill( 0.0, (dim+1)^2 * nelem * unkUhat^2 )
indUnk = fill( 0.0, (dim+1)^2 * nelem * unkUhat^2 )
Rfull = fill( 0.0, size(mesh.f,1)*unkUhat ) # RHS vector
rr = 1 # iterator for unknowns in sparsity matrix
∇ϕc = fill(0.0, size(master.∇ϕ))
κ = problem.κ
c = problem.c
# preallocate
jcwd = fill( 0.0, size(master.∇ϕ,2), size(master.∇ϕ,2) )
∂ξ∂x = fill( 0.0, size(master.∇ϕ,2), dim^2 )
∂x∂ξ = fill( 0.0, size(master.∇ϕ,2), dim^2 )
ϕjϕn = [Array{Float64}(nodfac,nodfac) for idx in 1:dim]
for pp in 1:nelem # Loop over all elements
# zero all matrices
A .*= 0.0
B .*= 0.0
C .*= 0.0
N .*= 0.0
D .*= 0.0
E .*= 0.0
K .*= 0.0
L .*= 0.0
M .*= 0.0
G .*= 0.0
H .*= 0.0
R .*= 0.0
# Compute Jacobians
compJacob!( master, mesh.nodes[:,:,pp], ∂ξ∂x, jcwd, ∂x∂ξ )
pLoc = master.ϕ' * mesh.nodes[:,:,pp]
∇ϕc = getderbfel( master, ∂ξ∂x )
# mass matrices
ϕj = master.ϕ * jcwd
ϕjϕ = master.ϕ * jcwd * master.ϕ'
# ------------------------- Volume integrals ------------------------------- #
## A
# (v,qₕ)_{Tₕ}
for ii in 1:dim
A[1+(ii-1)*nnodes:ii*nnodes,1+(ii-1)*nnodes:ii*nnodes] = ϕjϕ
end
## B
# (w_{ij} , ϵ^h_{ij})_{T^h}
for ii in 1:dim
B[1+(ii-1)*nnodes:ii*nnodes,:] = ∇ϕc[:,:,ii] * ϕj'
end
## N
N += -κ * B'
## D (second part)
# (w_{ij,j}, u^h_i)_{T^h} + (w_{ij,i}, u^h_j)_{T^h}
for ii in 1:dim
D -= c[ii] * ∇ϕc[:,:,ii] * ϕj'
end
## F
# (z_{ij} , σ^h_{ij})_{T^h}
src = problem.source(pLoc)
F[:,:,pp] = ϕj * src
# -------------------------------------------------------------------------- #
# ------------------------- Boundary integrals ----------------------------- #
for qq in 1:nfaces # loop over number of faces
(ϕdm, pdm, nod, normal, jcwdm) = compJacobFace( mesh, master, pp, qq )
jcwddm = diagm( jcwdm )
faceInd = (qq-1) * nodfac + ( 1:nodfac )
cn = fill(0.0, size(normal,1))
for ii in 1:dim
cn += c[ii] * normal[:,ii]
end
# mass matrices
ϕjf = ϕdm * jcwddm
ϕjϕf = ϕjf * ϕdm'
for ii in 1:dim
ϕjϕn[ii] = ϕjf * ( ϕdm * diagm(normal[:,ii]) )'
end
## C
# -<v_i, σ^h_{ij} n_j>_{∂T^h}
for ii in 1:dim
C[(ii-1)*nnodes + nod,faceInd] -= ϕjϕn[ii]
end
## D
# <v_i, τ_{ijkl} u^h_k * n_l * n_j >_{∂T^h}
D[nod,nod] += τ * ϕjϕf
## E
# <v_i, τ_{ijkl} \hat{u}^h_k * n_l * n_j >_{∂T^h}
E[nod,faceInd] = - τ * ϕjϕf + ϕdm * ( diagm(cn) * jcwddm ) * ϕdm'
indF = abs( mesh.t2f[pp,qq] )
if ( mesh.f[ indF, end ] < 0 )
# At boundary
bNo = - mesh.f[ indF, end ]
if problem.bctype[bNo] == 1
# Dirichlet boundary condition
## M
# (μ_{i} , \hat{u}^h_{i})_{∂Ω_D}
M[faceInd,faceInd] = ϕjϕf
# BC RHS
## G
# (μ_{i} , \bar{u}_i)_{∂Ω_D}
bcout = problem.bcfunc[bNo](pdm)
G[faceInd, 1] = ϕjf * bcout
elseif problem.bctype[bNo] == 2
# Neumann boundary condition
## K
# <μ_{i} , q_{ij}*n_j>_{∂Ω_N}
for ii in 1:dim
K[faceInd, (ii-1)*nnodes + nod] = ϕjϕn[ii]
end
# BC RHS
## G
# (μ_{i} , \bar{∂u∂x})_{∂Ω_N})
bcout = problem.bcfunc[bNo](pdm)
G[faceInd, 1] = ϕjf * bcout
else
error("hdgSolveElas:: BC type not recognized. 1 = Dirichlet, 2 = Neumann")
end
else
# In interior
## K
# <μ_{i} , σ^h_{ij}*n_j>_{∂T^h\∂Ω_D}
for ii in 1:dim
K[faceInd, (ii-1)*nnodes + nod] = - ϕjϕn[ii]
end
## L
# <μ_{i} , -τ_{ijkl} u^h_k n_l n_j ) >_{∂T^h\∂Ω_D}
L[faceInd,nod] = τ * ϕjϕf
## M
# <μ_{i} , τ_{ijkl} \hat{u}^h_k n_l n_j ) >_{∂T^h\∂Ω_D}
M[faceInd,faceInd] = - τ * ϕjϕf + ϕdm * ( diagm(cn) * jcwddm ) * ϕdm'
end # boundary if-statement
end # end loop over element faces
# -------------------------------------------------------------------------- #
# ------------------------- Form elemental quantities ---------------------- #
ABND[:,:,pp] = [ A B;
N D ]
CE[:,:,pp] = [ C; E ]
H += M - [K L] * ( ABND[:,:,pp] \ CE[:,:,pp] )
R += G - [K L] * ( ABND[:,:,pp] \ [ zm; F[:,:,pp] ] )
# -------------------------------------------------------------------------- #
# ------------------------- Fill up complete H and R matrices -------------- #
for qq = 1:nfaces
indJ = [i for i=1:nfaces]
deleteat!(indJ,qq)
indFall = abs.(mesh.t2f[pp,1:nfaces])
deleteat!(indFall,qq)
indF = abs(mesh.t2f[pp,qq])
Rfull[1+(indF-1)*unkUhat:indF*unkUhat] += R[1+(qq-1)*unkUhat:qq*unkUhat,1]
indRow[rr:rr-1+unkUhat^2] = repmat( 1+(indF-1)*unkUhat:indF*unkUhat, unkUhat )
indCol[rr:rr-1+unkUhat^2] = repmat( 1+(indF-1)*unkUhat:indF*unkUhat, 1, unkUhat )'[:]
indUnk[rr:rr-1+unkUhat^2] = H[ 1+(qq-1)*unkUhat:qq*unkUhat, 1+(qq-1)*unkUhat:qq*unkUhat ][:]
rr = rr + unkUhat^2
for tt = 1:(nfaces-1)
indRow[rr:rr-1+unkUhat^2] = repmat( 1+(indF-1)*unkUhat:indF*unkUhat, unkUhat )
indCol[rr:rr-1+unkUhat^2] = repmat( 1+(indFall[tt]-1)*unkUhat:indFall[tt]*unkUhat, 1, unkUhat )'[:]
indUnk[rr:rr-1+unkUhat^2] = H[ 1+(qq-1)*unkUhat:qq*unkUhat, 1+(indJ[tt]-1)*unkUhat:indJ[tt]*unkUhat ][:]
rr = rr + unkUhat^2
end
end
# -------------------------------------------------------------------------- #
end # end element loop
### Compute approximate trace
Hfull = sparse( indRow, indCol, indUnk, size(mesh.f,1) * unkUhat, size(mesh.f,1) * unkUhat )
#fact = ILU.crout_ilu( Hfull, τ = 0.1 )
uhath = [0.]
if method == :LU
uhath = Hfull \ Rfull
elseif method == :GMRES
uhath = IterativeSolvers.gmres( Hfull, Rfull, Pl=fact )
end
# ---------------------------------------------------------------------------- #
## Compute approximate scalar value and flux
uhathTri = fill( 0.0, unkUhat*nfaces, 1 )
uh = fill( 0.0, nnodes, 1, nelem )
qh = fill( 0.0, nnodes, dim, nelem )
for pp in 1:nelem
# ----------- Find uhath corresponding to this element ------------------- #
indF = abs.( mesh.t2f[pp,1:nfaces] )
for qq in 1:nfaces
uhathTri[ 1+(qq-1)*unkUhat:qq*unkUhat, 1 ] = uhath[ 1+(indF[qq]-1)*unkUhat:indF[qq]*unkUhat ]
end
# ------------------------------------------------------------------------ #
# ----------- Compute approximate displacement value --------------------- #
rhsTemp = [ zm; F[:,:,pp] ] - CE[:,:,pp] * uhathTri
uTemp = ABND[:,:,pp] \ rhsTemp
for ii in 1:dim
qh[:,ii,pp] = uTemp[ 1+(ii-1)*nnodes:ii*nnodes ]
end
uh[:,1,pp] = uTemp[ 1+dim*nnodes:(dim+1)*nnodes ]
end
return ( uhath, uh, qh )
end # end hdgSolveCD
|
{"hexsha": "517340716ad46db21d74112a0755711e4667029c", "size": 8975, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/solve/hdgSolveCDR.jl", "max_stars_repo_name": "mopg/luteos.jl", "max_stars_repo_head_hexsha": "05b72e7ab6c905f55a768ae943ac3173dcf980ae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-25T16:11:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-25T16:11:33.000Z", "max_issues_repo_path": "src/solve/hdgSolveCDR.jl", "max_issues_repo_name": "mopg/luteos.jl", "max_issues_repo_head_hexsha": "05b72e7ab6c905f55a768ae943ac3173dcf980ae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/solve/hdgSolveCDR.jl", "max_forks_repo_name": "mopg/luteos.jl", "max_forks_repo_head_hexsha": "05b72e7ab6c905f55a768ae943ac3173dcf980ae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6741214058, "max_line_length": 110, "alphanum_fraction": 0.484902507, "num_tokens": 3594}
|
import numpy as np
import pandas as pd
from pathlib import Path
import os
from lightgbm import LGBMRegressor
from sklearn.metrics import mean_squared_error
os.makedirs('../output/ensemble')
pref = '10'
versions = ['051', '052', '074', '076', '078', '079', '080', '081', '082']
for i, version in enumerate(versions):
oof_name = [x for x in os.listdir(f'../output/') if f'oof_{version}' in x]
print(oof_name)
if i==0:
oof = pd.read_csv(f'../output/{oof_name[0]}').sort_values('object_id')
oof[f'pred_{i}'] = oof.pred.values
oof[['conf_0', 'conf_1', 'conf_2', 'conf_3']] = 0
sub = pd.read_csv(f'../output/sub_{version}.csv')
sub[f'pred_{i}'] = sub.target.values
sub[['conf_0', 'conf_1', 'conf_2', 'conf_3']] = 0
else:
_tmp = pd.read_csv(f'../output/{oof_name[0]}').sort_values('object_id')
oof[f'pred_{i}'] = _tmp.pred.values
if version in ['081', '082']:
oof[['conf_0', 'conf_1', 'conf_2', 'conf_3']] += 0.5*_tmp[['conf_0', 'conf_1', 'conf_2', 'conf_3']].values
_tmp = pd.read_csv(f'../output/sub_{version}.csv')
sub[f'pred_{i}'] = _tmp.target.values
if version in ['081', '082']:
sub[['conf_0', 'conf_1', 'conf_2', 'conf_3']] += 0.5*_tmp[['conf_0', 'conf_1', 'conf_2', 'conf_3']].values
oof = oof.reset_index(drop=True)
print(oof.head())
print(sub.head())
params = {
'objective': 'rmse',
'metrics': 'rmse',
'n_estimators': 10000,
'boosting_type': 'gbdt',
'num_leaves': 32,
'max_depth': 2,
'learning_rate': 0.01,
'feature_fraction': 0.8,
'bagging_fraction': 0.3,
'bagging_freq': 5,
'reg_alpha': 0.5,
'reg_lambda': 0,
}
stacking_oof = np.zeros(len(oof))
stacking_sub = np.zeros(len(sub))
features = [f'pred_{i}' for i in range(len(versions))]
features.extend(['conf_0', 'conf_1', 'conf_2', 'conf_3'])
print(features)
n_fold = 5
for fold in range(n_fold):
print(f'{fold=}')
trn = oof[oof.fold!=fold]
val = oof[oof.fold==fold]
val_idx = val.index
trn_x = trn[features]
trn_y = trn['target']
val_x = val[features]
val_y = val['target']
tst_x = sub[features]
model = LGBMRegressor(**params)
model.fit(trn_x, trn_y, eval_metric='rmse',
eval_set=[(val_x, val_y)],
verbose=100, early_stopping_rounds=200)
val_pred = model.predict(val_x)
stacking_sub += model.predict(tst_x)
stacking_oof[val_idx] = val_pred
oof.pred = stacking_oof
oof.pred = np.clip(oof.pred.values, 0.0, 3.0)
print(oof.pred)
score = np.sqrt(mean_squared_error(oof.target.values, oof.pred.values))
print(f'{score:.6f}')
stacking_sub = stacking_sub / float(n_fold)
stacking_sub = np.clip(stacking_sub, 0.0, 3.0)
sub_name = '_'.join(versions)
pd.DataFrame(stacking_sub, columns=['target']).to_csv(f'../output/ensemble/{pref}_stacking_{sub_name}.csv', index=False)
oof.to_csv(f'../output/ensemble/{pref}_oof_stacking_{sub_name}.csv', index=False)
|
{"hexsha": "3c6e9957df40e6716bdbe64d7ffaa2dd12ac2cb8", "size": 3002, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/lgbm_stacking.py", "max_stars_repo_name": "ishikei14k/atma11_1st_solution", "max_stars_repo_head_hexsha": "91d29eb83f3e5470f82470f0434ad0fc75a90c61", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2021-07-28T02:52:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T04:03:42.000Z", "max_issues_repo_path": "src/lgbm_stacking.py", "max_issues_repo_name": "ishikei14k/atma11_1st_solution", "max_issues_repo_head_hexsha": "91d29eb83f3e5470f82470f0434ad0fc75a90c61", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/lgbm_stacking.py", "max_forks_repo_name": "ishikei14k/atma11_1st_solution", "max_forks_repo_head_hexsha": "91d29eb83f3e5470f82470f0434ad0fc75a90c61", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.320754717, "max_line_length": 120, "alphanum_fraction": 0.6219187209, "include": true, "reason": "import numpy", "num_tokens": 977}
|
"""Training a face recognizer with TensorFlow based on the FaceNet paper
FaceNet: A Unified Embedding for Face Recognition and Clustering: http://arxiv.org/abs/1503.03832
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import time
import sys
import tensorflow as tf
import numpy as np
import importlib
import argparse
import facenet
import lfw
import tensorflow.contrib.slim as slim
def main(args):
network = importlib.import_module(args.model_def, 'inference')
subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
if not os.path.isdir(log_dir): # Create the log directory if it doesn't exist
os.makedirs(log_dir)
model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
if not os.path.isdir(model_dir): # Create the model directory if it doesn't exist
os.makedirs(model_dir)
# Store some git revision info in a text file in the log directory
src_path,_ = os.path.split(os.path.realpath(__file__))
facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))
np.random.seed(seed=args.seed)
train_set = facenet.get_dataset(args.data_dir)
nrof_classes = len(train_set)
print('Model directory: %s' % model_dir)
print('Log directory: %s' % log_dir)
pretrained_model = None
if args.pretrained_model:
pretrained_model = os.path.expanduser(args.pretrained_model)
print('Pre-trained model: %s' % pretrained_model)
if args.lfw_dir:
print('LFW directory: %s' % args.lfw_dir)
# Read the file containing the pairs used for testing
pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
# Get the paths for the corresponding images
lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
with tf.Graph().as_default():
tf.set_random_seed(args.seed)
global_step = tf.Variable(0, trainable=False)
# Get a list of image paths and their labels
image_list, label_list = facenet.get_image_paths_and_labels(train_set)
# Read data and apply label preserving distortions
image_batch, label_batch = facenet.read_and_augument_data(image_list, label_list, args.image_size,
args.batch_size, args.max_nrof_epochs, args.random_crop, args.random_flip, args.random_rotate,
args.nrof_preprocess_threads)
print('Total number of classes: %d' % nrof_classes)
print('Total number of examples: %d' % len(image_list))
print('Building training graph')
# Placeholder for the learning rate
learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')
# Build the inference graph
prelogits, _ = network.inference(image_batch, args.keep_probability,
phase_train=True, weight_decay=args.weight_decay)
logits = slim.fully_connected(prelogits, len(train_set), activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(args.weight_decay),
scope='Logits', reuse=False)
# Add DeCov regularization loss
if args.decov_loss_factor>0.0:
logits_decov_loss = facenet.decov_loss(logits) * args.decov_loss_factor
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, logits_decov_loss)
# Add center loss
if args.center_loss_factor>0.0:
prelogits_center_loss, _ = facenet.center_loss(prelogits, label_batch, args.center_loss_alfa, nrof_classes)
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_center_loss * args.center_loss_factor)
learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
tf.summary.scalar('learning_rate', learning_rate)
# Calculate the average cross entropy loss across the batch
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, label_batch, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# Calculate the total losses
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')
# Build a Graph that trains the model with one batch of examples and updates the model parameters
train_op = facenet.train(total_loss, global_step, args.optimizer,
learning_rate, args.moving_average_decay, tf.global_variables(), args.log_histograms)
# Evaluation
if args.lfw_dir:
print('Building evaluation graph')
lfw_label_list = range(0,len(lfw_paths))
assert (len(lfw_paths) % args.lfw_batch_size == 0), "The number of images in the LFW test set need to be divisible by the lfw_batch_size"
eval_image_batch, eval_label_batch = facenet.read_and_augument_data(lfw_paths, lfw_label_list, args.image_size,
args.lfw_batch_size, None, False, False, False, args.nrof_preprocess_threads, shuffle=False)
# Node for input images
eval_image_batch.set_shape((None, args.image_size, args.image_size, 3))
eval_image_batch = tf.identity(eval_image_batch, name='input')
eval_prelogits, _ = network.inference(eval_image_batch, 1.0,
phase_train=False, weight_decay=0.0, reuse=True)
eval_embeddings = tf.nn.l2_normalize(eval_prelogits, 1, 1e-10, name='embeddings')
# Create a saver
saver = tf.train.Saver(tf.global_variables(), max_to_keep=3)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
# Start running operations on the Graph.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
tf.train.start_queue_runners(sess=sess)
with sess.as_default():
if pretrained_model:
print('Restoring pretrained model: %s' % pretrained_model)
saver.restore(sess, pretrained_model)
# Training and validation loop
print('Running training')
epoch = 0
while epoch < args.max_nrof_epochs:
step = sess.run(global_step, feed_dict=None)
epoch = step // args.epoch_size
# Train for one epoch
train(args, sess, epoch, learning_rate_placeholder, global_step,
total_loss, train_op, summary_op, summary_writer, regularization_losses, args.learning_rate_schedule_file)
# Save variables and the metagraph if it doesn't exist already
save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, step)
# Evaluate on LFW
if args.lfw_dir:
evaluate(sess, eval_embeddings, eval_label_batch, actual_issame, args.lfw_batch_size, args.seed,
args.lfw_nrof_folds, log_dir, step, summary_writer)
return model_dir
def train(args, sess, epoch, learning_rate_placeholder, global_step,
loss, train_op, summary_op, summary_writer, regularization_losses, learning_rate_schedule_file):
batch_number = 0
if args.learning_rate>0.0:
lr = args.learning_rate
else:
lr = facenet.get_learning_rate_from_file(learning_rate_schedule_file, epoch)
# Training loop
while batch_number < args.epoch_size:
train_time = 0
i = 0
while batch_number < args.epoch_size:
start_time = time.time()
feed_dict = {learning_rate_placeholder: lr}
err, _, step, reg_loss = sess.run([loss, train_op, global_step, regularization_losses], feed_dict=feed_dict)
if (batch_number % 100 == 0):
summary_str, step = sess.run([summary_op, global_step], feed_dict=feed_dict)
summary_writer.add_summary(summary_str, global_step=step)
duration = time.time() - start_time
print('Epoch: [%d][%d/%d]\tTime %.3f\tLoss %2.3f\tRegLoss %2.3f' %
(epoch, batch_number+1, args.epoch_size, duration, err, np.sum(reg_loss)))
batch_number += 1
i += 1
train_time += duration
# Add validation loss and accuracy to summary
summary = tf.Summary()
#pylint: disable=maybe-no-member
summary.value.add(tag='time/total', simple_value=train_time)
summary_writer.add_summary(summary, step)
return step
def evaluate(sess, embeddings, labels, actual_issame, batch_size,
seed, nrof_folds, log_dir, step, summary_writer):
start_time = time.time()
# Run forward pass to calculate embeddings
print('Runnning forward pass on LFW images')
embedding_size = embeddings.get_shape()[1]
nrof_images = len(actual_issame)*2
nrof_batches = nrof_images // batch_size
emb_array = np.zeros((nrof_images, embedding_size))
for i in range(nrof_batches):
t = time.time()
emb, lab = sess.run([embeddings, labels])
emb_array[lab] = emb
print('Batch %d in %.3f seconds' % (i, time.time()-t))
_, _, accuracy, val, val_std, far = lfw.evaluate(emb_array, seed, actual_issame, nrof_folds=nrof_folds)
print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy)))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
lfw_time = time.time() - start_time
# Add validation loss and accuracy to summary
summary = tf.Summary()
#pylint: disable=maybe-no-member
summary.value.add(tag='lfw/accuracy', simple_value=np.mean(accuracy))
summary.value.add(tag='lfw/val_rate', simple_value=val)
summary.value.add(tag='time/lfw', simple_value=lfw_time)
summary_writer.add_summary(summary, step)
with open(os.path.join(log_dir,'lfw_result.txt'),'at') as f:
f.write('%d\t%.5f\t%.5f\n' % (step, np.mean(accuracy), val))
def save_variables_and_metagraph(sess, saver, summary_writer, model_dir, model_name, step):
# Save the model checkpoint
print('Saving variables')
start_time = time.time()
checkpoint_path = os.path.join(model_dir, 'model-%s.ckpt' % model_name)
saver.save(sess, checkpoint_path, global_step=step, write_meta_graph=False)
save_time_variables = time.time() - start_time
print('Variables saved in %.2f seconds' % save_time_variables)
metagraph_filename = os.path.join(model_dir, 'model-%s.meta' % model_name)
save_time_metagraph = 0
if not os.path.exists(metagraph_filename):
print('Saving metagraph')
start_time = time.time()
saver.export_meta_graph(metagraph_filename)
save_time_metagraph = time.time() - start_time
print('Metagraph saved in %.2f seconds' % save_time_metagraph)
summary = tf.Summary()
#pylint: disable=maybe-no-member
summary.value.add(tag='time/save_variables', simple_value=save_time_variables)
summary.value.add(tag='time/save_metagraph', simple_value=save_time_metagraph)
summary_writer.add_summary(summary, step)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--logs_base_dir', type=str,
help='Directory where to write event logs.', default='~/logs/facenet')
parser.add_argument('--models_base_dir', type=str,
help='Directory where to write trained models and checkpoints.', default='~/models/facenet')
parser.add_argument('--gpu_memory_fraction', type=float,
help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)
parser.add_argument('--pretrained_model', type=str,
help='Load a pretrained model before training starts.')
parser.add_argument('--data_dir', type=str,
help='Path to the data directory containing aligned face patches. Multiple directories are separated with colon.',
default='~/datasets/facescrub/fs_aligned:~/datasets/casia/casia-webface-aligned')
parser.add_argument('--model_def', type=str,
help='Model definition. Points to a module containing the definition of the inference graph.', default='models.nn4')
parser.add_argument('--max_nrof_epochs', type=int,
help='Number of epochs to run.', default=500)
parser.add_argument('--batch_size', type=int,
help='Number of images to process in a batch.', default=90)
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=96)
parser.add_argument('--epoch_size', type=int,
help='Number of batches per epoch.', default=1000)
parser.add_argument('--random_crop',
help='Performs random cropping of training images. If false, the center image_size pixels from the training images are used. ' +
'If the size of the images in the data directory is equal to image_size no cropping is performed', action='store_true')
parser.add_argument('--random_flip',
help='Performs random horizontal flipping of training images.', action='store_true')
parser.add_argument('--random_rotate',
help='Performs random rotations of training images.', action='store_true')
parser.add_argument('--keep_probability', type=float,
help='Keep probability of dropout for the fully connected layer(s).', default=1.0)
parser.add_argument('--weight_decay', type=float,
help='L2 weight regularization.', default=0.0)
parser.add_argument('--decov_loss_factor', type=float,
help='DeCov loss factor.', default=0.0)
parser.add_argument('--center_loss_factor', type=float,
help='Center loss factor.', default=0.0)
parser.add_argument('--center_loss_alfa', type=float,
help='Center update rate for center loss.', default=0.95)
parser.add_argument('--optimizer', type=str, choices=['ADAGRAD', 'ADADELTA', 'ADAM', 'RMSPROP', 'MOM'],
help='The optimization algorithm to use', default='ADAGRAD')
parser.add_argument('--learning_rate', type=float,
help='Initial learning rate. If set to a negative value a learning rate ' +
'schedule can be specified in the file "learning_rate_schedule.txt"', default=0.1)
parser.add_argument('--learning_rate_decay_epochs', type=int,
help='Number of epochs between learning rate decay.', default=100)
parser.add_argument('--learning_rate_decay_factor', type=float,
help='Learning rate decay factor.', default=1.0)
parser.add_argument('--moving_average_decay', type=float,
help='Exponential decay for tracking of training parameters.', default=0.9999)
parser.add_argument('--seed', type=int,
help='Random seed.', default=666)
parser.add_argument('--nrof_preprocess_threads', type=int,
help='Number of preprocessing (data loading and augumentation) threads.', default=4)
parser.add_argument('--log_histograms',
help='Enables logging of weight/bias histograms in tensorboard.', action='store_true')
parser.add_argument('--learning_rate_schedule_file', type=str,
help='File containing the learning rate schedule that is used when learning_rate is set to to -1.', default='../data/learning_rate_schedule.txt')
# Parameters for validation on LFW
parser.add_argument('--lfw_pairs', type=str,
help='The file containing the pairs to use for validation.', default='../data/pairs.txt')
parser.add_argument('--lfw_file_ext', type=str,
help='The file extension for the LFW dataset.', default='png', choices=['jpg', 'png'])
parser.add_argument('--lfw_dir', type=str,
help='Path to the data directory containing aligned face patches.', default='')
parser.add_argument('--lfw_batch_size', type=int,
help='Number of images to process in a batch in the LFW test set.', default=100)
parser.add_argument('--lfw_nrof_folds', type=int,
help='Number of folds to use for cross validation. Mainly used for testing.', default=10)
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
|
{"hexsha": "02a33d354ac1d6d168840d21af415e88d486e414", "size": 18072, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/facenet_train_classifier.py", "max_stars_repo_name": "KittenCN/pyFaceNet", "max_stars_repo_head_hexsha": "0804d06a3533a83ff865a3c4343cfca2a5cbe063", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-04-27T22:52:14.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-27T22:52:14.000Z", "max_issues_repo_path": "src/facenet_train_classifier.py", "max_issues_repo_name": "KittenCN/pyFaceNet", "max_issues_repo_head_hexsha": "0804d06a3533a83ff865a3c4343cfca2a5cbe063", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/facenet_train_classifier.py", "max_forks_repo_name": "KittenCN/pyFaceNet", "max_forks_repo_head_hexsha": "0804d06a3533a83ff865a3c4343cfca2a5cbe063", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.9310344828, "max_line_length": 153, "alphanum_fraction": 0.6943891102, "include": true, "reason": "import numpy", "num_tokens": 4092}
|
[STATEMENT]
lemma monad_fail_alt_writerT [locale_witness]:
assumes "monad_fail_alt return bind fail alt"
shows "monad_fail_alt return_writer bind_writer fail_writer alt_writer"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. monad_fail_alt local.return_writer local.bind_writer local.fail_writer local.alt_writer
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. monad_fail_alt local.return_writer local.bind_writer local.fail_writer local.alt_writer
[PROOF STEP]
interpret monad_fail_alt return bind fail alt
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. monad_fail_alt return bind fail alt
[PROOF STEP]
by fact
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. monad_fail_alt local.return_writer local.bind_writer local.fail_writer local.alt_writer
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. monad_fail_alt local.return_writer local.bind_writer local.fail_writer local.alt_writer
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>m. local.alt_writer local.fail_writer m = m
2. \<And>m. local.alt_writer m local.fail_writer = m
[PROOF STEP]
show "alt_writer fail_writer m = m" for m
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. local.alt_writer local.fail_writer m = m
[PROOF STEP]
by(rule writerT.expand)(simp add: alt_fail1)
[PROOF STATE]
proof (state)
this:
local.alt_writer local.fail_writer ?m4 = ?m4
goal (1 subgoal):
1. \<And>m. local.alt_writer m local.fail_writer = m
[PROOF STEP]
show "alt_writer m fail_writer = m" for m
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. local.alt_writer m local.fail_writer = m
[PROOF STEP]
by(rule writerT.expand)(simp add: alt_fail2)
[PROOF STATE]
proof (state)
this:
local.alt_writer ?m4 local.fail_writer = ?m4
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
monad_fail_alt local.return_writer local.bind_writer local.fail_writer local.alt_writer
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 771, "file": "Monomorphic_Monad_Monomorphic_Monad", "length": 11}
|
##########################################
# Some uncertain datasets
##########################################
UV = UncertainValueDataset(example_uvals)
##########################################
# Apply functions to datasets `n` times
##########################################
n = 3
@test resample(median, UV, n) isa Vector{T} where T <: Real
@test resample(cor, UV, UV, n) isa Vector{T} where T <: Real
|
{"hexsha": "d7b0af7064262780cf70e8628828913cb3172929", "size": 411, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/resampling/uncertain_datasets/test_resampling_datasets_uncertainvaluedataset_apply_funcs.jl", "max_stars_repo_name": "JuliaTagBot/UncertainData.jl", "max_stars_repo_head_hexsha": "4d9dc513b97f04a1d761e0a94eab3e3b11cc4c8a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2019-01-04T10:13:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-03T01:11:13.000Z", "max_issues_repo_path": "test/resampling/uncertain_datasets/test_resampling_datasets_uncertainvaluedataset_apply_funcs.jl", "max_issues_repo_name": "JuliaTagBot/UncertainData.jl", "max_issues_repo_head_hexsha": "4d9dc513b97f04a1d761e0a94eab3e3b11cc4c8a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 78, "max_issues_repo_issues_event_min_datetime": "2018-12-17T20:12:44.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-19T20:46:04.000Z", "max_forks_repo_path": "test/resampling/uncertain_datasets/test_resampling_datasets_uncertainvaluedataset_apply_funcs.jl", "max_forks_repo_name": "JuliaTagBot/UncertainData.jl", "max_forks_repo_head_hexsha": "4d9dc513b97f04a1d761e0a94eab3e3b11cc4c8a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-01-22T23:05:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-19T12:21:02.000Z", "avg_line_length": 27.4, "max_line_length": 60, "alphanum_fraction": 0.4136253041, "num_tokens": 78}
|
struct CountingFunction{F} <: AbstractFunction
counter::Base.RefValue{Int}
f::F
end
getdim(f::CountingFunction) = getdim(f.f)
CountingFunction(f::Function) = CountingFunction(Ref(0), f)
function (f::CountingFunction)(x)
f.counter[] += 1
return f.f(x)
end
|
{"hexsha": "e0dba427bf5999c5acc322fb674b12e8fb27c3ea", "size": 271, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/functions/counting_function.jl", "max_stars_repo_name": "tmigot/Nonconvex.jl", "max_stars_repo_head_hexsha": "688f699ada98844427b9b701422638d533aed313", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/functions/counting_function.jl", "max_issues_repo_name": "tmigot/Nonconvex.jl", "max_issues_repo_head_hexsha": "688f699ada98844427b9b701422638d533aed313", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/functions/counting_function.jl", "max_forks_repo_name": "tmigot/Nonconvex.jl", "max_forks_repo_head_hexsha": "688f699ada98844427b9b701422638d533aed313", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-18T07:34:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-18T07:34:56.000Z", "avg_line_length": 24.6363636364, "max_line_length": 59, "alphanum_fraction": 0.7011070111, "num_tokens": 85}
|
import numpy as np
import shapely #may need submodules
from shapely.geometry import Point, Polygon
class Item:
"""
Parent class for all items
"""
def __init__(self, polygon):
self.polygon = polygon #May need only dimensions
self.type = None
self.subtype = None
self.pos = self.get_position()
def get_position(self):
"""
Returns the centroid of self.polygon
"""
p = self.polygon.centroid
return p.x, p.y
class Attractor(Item):
"""
Item type used for surround/perimeter behaviour
"""
def __init__(self, polygon):
super().__init__(polygon)
self.type = 'attractor'
class Clutter(Item):
"""
Movable items
"""
def __init__(self, polygon, weight):
super().__init__(polygon)
self.type = 'clutter'
self.weight = weight
self.min_bots_needed = weight/4
"""
Montion is possible when num bots > min_bots_needed
Max speed is achieved when num bots >= weight
"""
def move(self, translation, rotation):
"""
*** NOT IMPLEMENTED ***
*** MAY CONFLICT WITH move() FROM OTHER CHILDREN CLASSES ***
Translation: 2d vector
Rotation: angle
"""
#Use poly = shapely.affinity.<rot/translate>(poly)
pass
class Nest(Attractor):
"""
Area for storing gathered resources in foraging
"""
def __init__(self, position, radius):
"""
Args:
position: tuple (x,y)
radius
"""
super().__init__(Point(*position).buffer(radius))
self.subtype = 'nest'
self.pos = position
self.radius = radius
class Contamination(Attractor):
"""
Used in contamination removal scenarios
Contamination begins at a hotspot and grows in all directions
Robots at the perimeter can slow down/reduce the spread
"""
def __init__(self, position, radius):
"""
Args:
position: tuple (x,y)
radius
"""
super().__init__(Point(*position).buffer(radius))
self.radius = radius
self.pos = Point(*position)
self.subtype = 'contamination'
def update(self, increment=1, rate = 0.005):
"""
Increment can be positive or negative
Radius change is proportional to area and incerment
"""
self.radius += increment*rate/(self.radius**2)
self.polygon = self.pos.buffer(self.radius)
#print(self.radius)
class Resource(Attractor):
"""
Movable circular items
Used in resource gathering & foraging
Weight of the item depends on its area
Difficulty of moving the item depends on its weight
"""
def __init__(self, position, radius=0.5, density=1):
"""
Movable resource, for forraging/gathering
Args:
position: (x,y) tuple
radius
"""
super().__init__(Point(*position).buffer(radius))
self.radius = radius
self.pos = position
self.subtype = 'resource'
self.weight = density*radius*radius
self.min_bots_needed = self.weight/4
def move_to(self, new_pos):
#new_pos: (x,y) tuple
self.pos = Point(*new_pos)
self.polygon = self.pos.buffer(self.radius)
def move(self, dir_, speed):
#Similar to bot.move
#Can be used with cmd exec
speed/=40 #Tuned
x_,y_ = np.array(self.pos)
self.move_to((x_+ speed*np.cos(dir_), y_+ speed*np.sin(dir_)))
return None
def deplete(self, decrement=1, rate = 0.1):
#'Consume' the item
#Not in use yet
#Similar to contam update() method
self.radius -= decrement*rate/(self.radius**2)
self.polygon = self.pos.buffer(self.radius)
class Obstacle(Item):
#Enhancement
pass
|
{"hexsha": "bb03a9903edd9f0a0e03bcb26931c9b042d56938", "size": 3318, "ext": "py", "lang": "Python", "max_stars_repo_path": "swarm_tasks/utils/item.py", "max_stars_repo_name": "rmvanarse/swarm_tasks", "max_stars_repo_head_hexsha": "3335297ba8fcdbff756ae519002bcce919d54a84", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-03-13T12:54:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-29T12:12:28.000Z", "max_issues_repo_path": "swarm_tasks/utils/item.py", "max_issues_repo_name": "rmvanarse/swarm_tasks", "max_issues_repo_head_hexsha": "3335297ba8fcdbff756ae519002bcce919d54a84", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "swarm_tasks/utils/item.py", "max_forks_repo_name": "rmvanarse/swarm_tasks", "max_forks_repo_head_hexsha": "3335297ba8fcdbff756ae519002bcce919d54a84", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-08-06T15:02:15.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-08T12:11:30.000Z", "avg_line_length": 21.9735099338, "max_line_length": 64, "alphanum_fraction": 0.6919831224, "include": true, "reason": "import numpy", "num_tokens": 938}
|
\section{201403-4}
\input{problem/1/201403-4-p.tex}
|
{"hexsha": "2da76ac9a25ebc32d45cc653aa8b234a13adf0a4", "size": 52, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "problem/1/201403-4.tex", "max_stars_repo_name": "xqy2003/CSP-Project", "max_stars_repo_head_hexsha": "26ef348463c1f948c7c7fb565edf900f7c041560", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-14T01:47:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-14T01:47:19.000Z", "max_issues_repo_path": "problem/1/201403-4.tex", "max_issues_repo_name": "xqy2003/CSP-Project", "max_issues_repo_head_hexsha": "26ef348463c1f948c7c7fb565edf900f7c041560", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "problem/1/201403-4.tex", "max_forks_repo_name": "xqy2003/CSP-Project", "max_forks_repo_head_hexsha": "26ef348463c1f948c7c7fb565edf900f7c041560", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.3333333333, "max_line_length": 32, "alphanum_fraction": 0.7307692308, "num_tokens": 22}
|
from stock.marketdata import *
import logging
import logging.config
from stock.globalvar import *
import numpy as np
logging.config.fileConfig(LOGCONF)
logger = logging.getLogger(__name__)
class CoVar:
def __init__(self, marketdata):
self.marketdata = marketdata
def check(self, exsymbols):
basket = []
i = 0
num = 50
for i in xrange(len(exsymbols)):
ex1 = exsymbols[i]
bars1 = [0] * num
if isinstance(self.marketdata, realtimedata.RealTimeData):
bars1[0] = self.marketdata.get_data(ex1)
history = self.marketdata.get_history_by_date(ex1)
if len(history) < 100:
continue
bars1[1:num] = history[0:num-1]
elif isinstance(self.marketdata, backtestdata.BackTestData):
history = self.marketdata.get_history_by_date(ex1)
if len(history) < 100:
continue
bars1[0:num] = history[0:num]
for j in range(i+1, len(exsymbols)):
ex2 = exsymbols[j]
bars2 = [0] * num
try:
if isinstance(self.marketdata, realtimedata.RealTimeData):
bar_today = self.marketdata.get_data(ex2)
history = self.marketdata.get_history_by_date(ex2)
if len(history) < 100:
continue
bars2[0:num] = history[0:num]
elif isinstance(self.marketdata, backtestdata.BackTestData):
history = self.marketdata.get_history_by_date(ex2)
bar_today = history[0]
if len(history) < 100:
continue
bars2[0:num] = history[1:num+1]
chg = bar_today.close / bars2[0].close
bars1_close = map(lambda x: x.close, bars1)
bars2_close = map(lambda x: x.close, bars2)
ts1 = np.array(bars1_close)
ts2 = np.array(bars2_close)
cc = np.corrcoef([ts1, ts2])
if cc[0, 1] > 0.8 and chg > 1.05:
print "%s <=> %s: %f" % (ex1, ex2, cc[0, 1])
except IOError, e:
logger.error("cannot open: %s" % (e.filename))
except Exception, e:
logger.error("%s: %s" % (type(e), e.message))
|
{"hexsha": "1325cbb68b662e431180f8c15ad093feffc7f0cd", "size": 2530, "ext": "py", "lang": "Python", "max_stars_repo_path": "stock/quant/covar.py", "max_stars_repo_name": "shenzhongqiang/cnstock_py", "max_stars_repo_head_hexsha": "2bb557657a646acb9d20d3ce78e15cf68390f8ea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2016-10-31T04:05:11.000Z", "max_stars_repo_stars_event_max_datetime": "2017-04-17T08:46:53.000Z", "max_issues_repo_path": "stock/quant/covar.py", "max_issues_repo_name": "shenzhongqiang/cnstock_py", "max_issues_repo_head_hexsha": "2bb557657a646acb9d20d3ce78e15cf68390f8ea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "stock/quant/covar.py", "max_forks_repo_name": "shenzhongqiang/cnstock_py", "max_forks_repo_head_hexsha": "2bb557657a646acb9d20d3ce78e15cf68390f8ea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.8064516129, "max_line_length": 80, "alphanum_fraction": 0.4944664032, "include": true, "reason": "import numpy", "num_tokens": 562}
|
import argparse
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
logger.setLevel(logging.INFO)
import os
import time
import chainer
import chainercv
import chainer.functions as F
import cv2
import numpy as np
from predict import prepare_setting, restore_args
from food101_dataset import get_food101_dataset
def video(args):
args_trained = restore_args(args.trained)
inH, inW = args_trained["height"], args_trained["width"]
dataset = get_food101_dataset(args.dataset, mode="test")
idx2name = dataset.base.idx2name
model, xp, _ = prepare_setting(args)
cap = cv2.VideoCapture(args.camera)
if cap.isOpened() is False:
raise Exception("Error opening video stream or file")
fps_time = 0
with chainer.using_config("train", False), chainer.function.no_backprop_mode():
while cap.isOpened():
ret_val, img = cap.read()
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.transpose(2, 0, 1)
_, capH, capW = img.shape
crop_size = min(capH, capW)
img = chainercv.transforms.center_crop(img, (crop_size, crop_size))
img = chainercv.transforms.resize(img, (inH, inW))
vis_img = img.copy().transpose(1, 2, 0)
# RGB->BGR
vis_img = vis_img[:, :, ::-1]
start = time.time()
h = model.predictor(xp.expand_dims(xp.array(img, dtype=xp.float32), axis=0))
prediction = F.softmax(h)
if args.device >= 0:
prediction = xp.asnumpy(prediction[0].data)
else:
prediction = prediction[0].data
top_ten = np.argsort(-prediction)[:10]
end = time.time()
logger.info("Elapsed {}".format(end - start))
blank = np.zeros((inH, 2 * inW, 3)).astype(img.dtype)
for rank, label_idx in enumerate(top_ten):
score = prediction[label_idx]
name = idx2name[label_idx]
logger.info("{:>3d} {:>6.2f}% {}".format(
rank + 1, score * 100, name))
cv2.putText(blank, "{:>3d} {:>6.2f}% {}".format(
rank + 1, prediction[label_idx] * 100, idx2name[label_idx]),
(10, 20 * (rank + 2)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.putText(blank, "FPS: %f" % (1.0 / (time.time() - fps_time)),
(10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
title = "Food-101"
cv2.imshow(title, cv2.hconcat([vis_img, blank]))
fps_time = time.time()
"""Hit esc key"""
if cv2.waitKey(1) == 27:
break
def parse_argument():
parser = argparse.ArgumentParser()
parser.add_argument(
"trained", type=str, help="path/to/trained")
parser.add_argument("--dataset", type=str, help="path/to/food-101",
default=os.path.expanduser("~/dataset/food-101"))
parser.add_argument("--device", type=int, default=-1,
help="specify GPU_ID. If negative, use CPU")
parser.add_argument("--camera", type=int, default=0, help="specify camera id")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_argument()
video(args)
|
{"hexsha": "b6be3a3db79dfcdfe6c40f00846b92427bf5ffeb", "size": 3366, "ext": "py", "lang": "Python", "max_stars_repo_path": "demo.py", "max_stars_repo_name": "terasakisatoshi/chainer-food-101-revised", "max_stars_repo_head_hexsha": "3d84f596f22b6a95e33fe196ed5ebba3d3c573a2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-08T15:18:26.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-08T15:18:26.000Z", "max_issues_repo_path": "demo.py", "max_issues_repo_name": "terasakisatoshi/chainer-food-101-revised", "max_issues_repo_head_hexsha": "3d84f596f22b6a95e33fe196ed5ebba3d3c573a2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "demo.py", "max_forks_repo_name": "terasakisatoshi/chainer-food-101-revised", "max_forks_repo_head_hexsha": "3d84f596f22b6a95e33fe196ed5ebba3d3c573a2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.25, "max_line_length": 97, "alphanum_fraction": 0.5808080808, "include": true, "reason": "import numpy", "num_tokens": 841}
|
from EQTransformer.core.EqT_utils import f1, SeqSelfAttention, FeedForward, LayerNormalization
from EQTransformer.core.mseed_predictor import (
mseed_predictor,
_mseed2nparry,
PreLoadGeneratorTest,
_picker,
_get_snr,
_output_writter_prediction,
_plotter_prediction,
_resampling,
)
import keras
from keras.models import load_model
from keras.optimizers import Adam
from keras.engine.training_utils import iter_sequence_infinite
import keras2onnx
import platform
from os import listdir
from os.path import join
import pprint as pp
import numpy as np
import onnxruntime
import sys
import os
import csv
import shutil
import time
import pandas as pd
import json
import obspy
from obspy import read
"""
params_pred = {'batch_size': 500,
'norm_mode': 'std'}
args = {'input_dir': 'downloads_mseeds',
'input_model': 'EqT_model.h5',
'stations_json': 'station_list.json',
'output_dir': 'detections2',
'loss_weights': [0.02, 0.40, 0.58],
'detection_threshold': 0.3,
'P_threshold': 0.1,
'S_threshold': 0.1,
'number_of_plots': 10,
'plot_mode': 'time_frequency',
'normalization_mode': 'std',
'batch_size': 500,
'overlap': 0.3,
'gpuid': None,
'gpu_limit': None}
overwrite = False
"""
params_pred = {'batch_size': 1,
'norm_mode': 'std'}
args = {'input_dir': 'test_dataset',
'input_model': 'EqT_model.h5',
'stations_json': 'test_dataset.json',
'output_dir': 'test_detections',
'loss_weights': [0.02, 0.40, 0.58],
'detection_threshold': 0.3,
'P_threshold': 0.1,
'S_threshold': 0.1,
'number_of_plots': 10,
'plot_mode': 'time_frequency',
'normalization_mode': 'std',
'batch_size': 1,
'overlap': 0.3,
'gpuid': None,
'gpu_limit': None}
overwrite = False
# ONNX model predict_generator monkey typing
def onnx_predict_generator(pred_generator, sess):
all_outs = []
out_pred_generator = iter_sequence_infinite(pred_generator)
steps_done = 0
steps = len(pred_generator)
while steps_done < steps:
generator_output = next(out_pred_generator)
x = generator_output
x_test = list(x.values())[0].astype(np.float32)
outs = sess.run(None, input_feed={'input': x_test})
if not all_outs:
for out in outs:
all_outs.append([])
for i, out in enumerate(outs):
all_outs[i].append(out)
steps_done += 1
results = [np.concatenate(out) for out in all_outs]
#print(f'{len(results[0])},{len(results[1])},{len(results[2])}')
return results[0], results[1], results[2]
def mseed2nparry_one_minute(args, matching, time_slots, comp_types, st_name):
' read miniseed files and from a list of string names and returns 3 dictionaries of numpy arrays, meta data, and time slice info'
json_file = open(args['stations_json'])
stations_ = json.load(json_file)
st = obspy.core.Stream()
tsw = False
for m in matching:
temp_st = read(os.path.join(str(args['input_dir']), m),debug_headers=True)
if tsw == False and temp_st:
tsw = True
for tr in temp_st:
time_slots.append((tr.stats.starttime, tr.stats.endtime))
try:
temp_st.merge(fill_value=0)
except Exception:
temp_st =_resampling(temp_st)
temp_st.merge(fill_value=0)
temp_st.detrend('demean')
st += temp_st
st.filter(type='bandpass', freqmin = 1.0, freqmax = 45, corners=2, zerophase=True)
st.taper(max_percentage=0.001, type='cosine', max_length=2)
if len([tr for tr in st if tr.stats.sampling_rate != 100.0]) != 0:
try:
st.interpolate(100, method="linear")
except Exception:
st=_resampling(st)
st.trim(min([tr.stats.starttime for tr in st]), max([tr.stats.endtime for tr in st]), pad=True, fill_value=0)
start_time = st[0].stats.starttime
end_time = st[0].stats.endtime
meta = {"start_time":start_time,
"end_time": end_time,
"trace_name":m
}
chanL = [tr.stats.channel[-1] for tr in st]
comp_types.append(len(chanL))
tim_shift = int(60-(args['overlap']*60))
next_slice = start_time+60
data_set={}
sl = 0; st_times = []
#while next_slice <= end_time:
npz_data = np.zeros([6000, 3])
st_times.append(str(start_time).replace('T', ' ').replace('Z', ''))
w = st.slice(start_time, next_slice)
if 'Z' in chanL:
npz_data[:,2] = w[chanL.index('Z')].data[:6000]
if ('E' in chanL) or ('1' in chanL):
try:
npz_data[:,0] = w[chanL.index('E')].data[:6000]
except Exception:
npz_data[:,0] = w[chanL.index('1')].data[:6000]
if ('N' in chanL) or ('2' in chanL):
try:
npz_data[:,1] = w[chanL.index('N')].data[:6000]
except Exception:
npz_data[:,1] = w[chanL.index('2')].data[:6000]
data_set.update( {str(start_time).replace('T', ' ').replace('Z', '') : npz_data})
start_time = start_time+tim_shift
next_slice = next_slice+tim_shift
sl += 1
meta["trace_start_time"] = st_times
try:
meta["receiver_code"]=st[0].stats.station
meta["instrument_type"]=st[0].stats.channel[:2]
meta["network_code"]=stations_[st[0].stats.station]['network']
meta["receiver_latitude"]=stations_[st[0].stats.station]['coords'][0]
meta["receiver_longitude"]=stations_[st[0].stats.station]['coords'][1]
meta["receiver_elevation_m"]=stations_[st[0].stats.station]['coords'][2]
except Exception:
meta["receiver_code"]=st_name
meta["instrument_type"]=stations_[st_name]['channels'][0][:2]
meta["network_code"]=stations_[st_name]['network']
meta["receiver_latitude"]=stations_[st_name]['coords'][0]
meta["receiver_longitude"]=stations_[st_name]['coords'][1]
meta["receiver_elevation_m"]=stations_[st_name]['coords'][2]
return meta, time_slots, comp_types, data_set
if __name__ == '__main__':
# original
# detection.ipynb
print('Keras:')
model = load_model('EqT_model.h5',
custom_objects={
'SeqSelfAttention': SeqSelfAttention,
'FeedForward': FeedForward,
'LayerNormalization': LayerNormalization,
'f1': f1})
model.compile(loss = ['binary_crossentropy', 'binary_crossentropy', 'binary_crossentropy'],
loss_weights = [0.02, 0.40, 0.58],
optimizer = Adam(lr = 0.001),
metrics = [f1])
out_dir = os.path.join(os.getcwd(), str(args['output_dir']))
if os.path.isdir(out_dir):
# print('============================================================================')
# print(f' *** {out_dir} already exists!')
print(f"*** {out_dir} already exists!")
if overwrite == True:
inp = "y"
print("Overwriting your previous results")
else:
inp = input(" --> Type (Yes or y) to create a new empty directory! This will erase your previous results so make a copy if you want them.")
if inp.lower() == "yes" or inp.lower() == "y":
shutil.rmtree(out_dir)
os.makedirs(out_dir)
else:
print("Okay.")
sys.exit(1)
if platform.system() == 'Windows':
station_list = [ev.split(".")[0] for ev in listdir(args['input_dir']) if ev.split("\\")[-1] != ".DS_Store"]
else:
station_list = [ev.split(".")[0] for ev in listdir(args['input_dir']) if ev.split("/")[-1] != ".DS_Store"]
station_list = sorted(set(station_list))
for ct, st in enumerate(station_list):
# create output directories
save_dir = os.path.join(out_dir, str(st)+'_outputs')
save_figs = os.path.join(save_dir, 'figures')
if os.path.isdir(save_dir):
shutil.rmtree(save_dir)
os.makedirs(save_dir)
if args['number_of_plots']:
os.makedirs(save_figs)
plt_n = 0
csvPr_gen = open(os.path.join(save_dir,'X_prediction_results.csv'), 'w')
predict_writer = csv.writer(csvPr_gen, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
predict_writer.writerow(['file_name',
'network',
'station',
'instrument_type',
'station_lat',
'station_lon',
'station_elv',
'event_start_time',
'event_end_time',
'detection_probability',
'detection_uncertainty',
'p_arrival_time',
'p_probability',
'p_uncertainty',
'p_snr',
's_arrival_time',
's_probability',
's_uncertainty',
's_snr'
])
csvPr_gen.flush()
print(f"Started working on {st}, {ct+1} out of {len(station_list)} ...", flush=True)
start_Predicting = time.time()
if platform.system() == 'Windows':
file_list = [join(st, ev) for ev in listdir(args["input_dir"]+"\\"+st) if ev.split("\\")[-1].split(".")[-1].lower() == "mseed"]
else:
file_list = [join(st, ev) for ev in listdir(args["input_dir"]+"/"+st) if ev.split("/")[-1].split(".")[-1].lower() == "mseed"]
mon = [ev.split('__')[1]+'__'+ev.split('__')[2] for ev in file_list]
uni_list = list(set(mon))
uni_list.sort()
time_slots, comp_types = [], []
for _, month in enumerate(uni_list):
matching = [s for s in file_list if month in s]
print(f'{month}')
#meta, time_slots, comp_types, data_set = _mseed2nparry(args, matching, time_slots, comp_types, st)
meta, time_slots, comp_types, data_set = mseed2nparry_one_minute(args, matching, time_slots, comp_types, st)
pred_generator = PreLoadGeneratorTest(meta["trace_start_time"], data_set, **params_pred)
predD, predP, predS = model.predict_generator(pred_generator)
detection_memory = []
for ix in range(len(predD)):
matches, pick_errors, yh3 = _picker(args, predD[ix][:, 0], predP[ix][:, 0], predS[ix][:, 0])
if (len(matches) >= 1) and ((matches[list(matches)[0]][3] or matches[list(matches)[0]][6])):
snr = [_get_snr(data_set[meta["trace_start_time"][ix]], matches[list(matches)[0]][3], window = 100), _get_snr(data_set[meta["trace_start_time"][ix]], matches[list(matches)[0]][6], window = 100)]
pre_write = len(detection_memory)
detection_memory=_output_writter_prediction(meta, predict_writer, csvPr_gen, matches, snr, detection_memory, ix)
post_write = len(detection_memory)
if plt_n < args['number_of_plots'] and post_write > pre_write:
_plotter_prediction(data_set[meta["trace_start_time"][ix]], args, save_figs, predD[ix][:, 0], predP[ix][:, 0], predS[ix][:, 0], meta["trace_start_time"][ix], matches)
plt_n += 1
end_Predicting = time.time()
delta = (end_Predicting - start_Predicting)
hour = int(delta / 3600)
delta -= hour * 3600
minute = int(delta / 60)
delta -= minute * 60
seconds = delta
dd = pd.read_csv(os.path.join(save_dir,'X_prediction_results.csv'))
print(f"Finished the prediction in: {hour} hours and {minute} minutes and {round(seconds, 2)} seconds.", flush=True)
print(f'*** Detected: '+str(len(dd))+' events.', flush=True)
print(' *** Wrote the results into --> " ' + str(save_dir)+' "', flush=True)
"""
# ONNX port
print('ONNX:')
sess_options = onnxruntime.SessionOptions()
sess = onnxruntime.InferenceSession('eqt_model.onnx', sess_options)
#sess = onnxruntime.InferenceSession('eqt_optimized.onnx', sess_options)
#args['output_dir'] = 'detections_onnx_optimized'
out_dir = os.path.join(os.getcwd(), str(args['output_dir']))
if os.path.isdir(out_dir):
# print('============================================================================')
# print(f' *** {out_dir} already exists!')
print(f"*** {out_dir} already exists!")
if overwrite == True:
inp = "y"
print("Overwriting your previous results")
else:
inp = input(" --> Type (Yes or y) to create a new empty directory! This will erase your previous results so make a copy if you want them.")
if inp.lower() == "yes" or inp.lower() == "y":
shutil.rmtree(out_dir)
os.makedirs(out_dir)
else:
print("Okay.")
sys.exit(1)
if platform.system() == 'Windows':
station_list = [ev.split(".")[0] for ev in listdir(args['input_dir']) if ev.split("\\")[-1] != ".DS_Store"]
else:
station_list = [ev.split(".")[0] for ev in listdir(args['input_dir']) if ev.split("/")[-1] != ".DS_Store"]
station_list = sorted(set(station_list))
for ct, st in enumerate(station_list):
# create output directories
save_dir = os.path.join(out_dir, str(st)+'_outputs')
save_figs = os.path.join(save_dir, 'figures')
if os.path.isdir(save_dir):
shutil.rmtree(save_dir)
os.makedirs(save_dir)
if args['number_of_plots']:
os.makedirs(save_figs)
plt_n = 0
csvPr_gen = open(os.path.join(save_dir,'X_prediction_results.csv'), 'w')
predict_writer = csv.writer(csvPr_gen, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
predict_writer.writerow(['file_name',
'network',
'station',
'instrument_type',
'station_lat',
'station_lon',
'station_elv',
'event_start_time',
'event_end_time',
'detection_probability',
'detection_uncertainty',
'p_arrival_time',
'p_probability',
'p_uncertainty',
'p_snr',
's_arrival_time',
's_probability',
's_uncertainty',
's_snr'
])
csvPr_gen.flush()
print(f"Started working on {st}, {ct+1} out of {len(station_list)} ...", flush=True)
start_Predicting = time.time()
if platform.system() == 'Windows':
file_list = [join(st, ev) for ev in listdir(args["input_dir"]+"\\"+st) if ev.split("\\")[-1].split(".")[-1].lower() == "mseed"]
else:
file_list = [join(st, ev) for ev in listdir(args["input_dir"]+"/"+st) if ev.split("/")[-1].split(".")[-1].lower() == "mseed"]
mon = [ev.split('__')[1]+'__'+ev.split('__')[2] for ev in file_list]
uni_list = list(set(mon))
uni_list.sort()
time_slots, comp_types = [], []
for _, month in enumerate(uni_list):
matching = [s for s in file_list if month in s]
print(f'{month}', flush=True)
meta, time_slots, comp_types, data_set = mseed2nparry_one_minute(args, matching, time_slots, comp_types, st)
pred_generator = PreLoadGeneratorTest(meta["trace_start_time"], data_set, **params_pred)
predD, predP, predS = onnx_predict_generator(pred_generator)
detection_memory = []
for ix in range(len(predD)):
matches, pick_errors, yh3 = _picker(args, predD[ix][:, 0], predP[ix][:, 0], predS[ix][:, 0])
if (len(matches) >= 1) and ((matches[list(matches)[0]][3] or matches[list(matches)[0]][6])):
snr = [_get_snr(data_set[meta["trace_start_time"][ix]], matches[list(matches)[0]][3], window = 100), _get_snr(data_set[meta["trace_start_time"][ix]], matches[list(matches)[0]][6], window = 100)]
pre_write = len(detection_memory)
detection_memory=_output_writter_prediction(meta, predict_writer, csvPr_gen, matches, snr, detection_memory, ix)
post_write = len(detection_memory)
if plt_n < args['number_of_plots'] and post_write > pre_write:
_plotter_prediction(data_set[meta["trace_start_time"][ix]], args, save_figs, predD[ix][:, 0], predP[ix][:, 0], predS[ix][:, 0], meta["trace_start_time"][ix], matches)
plt_n += 1
end_Predicting = time.time()
delta = (end_Predicting - start_Predicting)
hour = int(delta / 3600)
delta -= hour * 3600
minute = int(delta / 60)
delta -= minute * 60
seconds = delta
dd = pd.read_csv(os.path.join(save_dir,'X_prediction_results.csv'))
print(f"Finished the prediction in: {hour} hours and {minute} minutes and {round(seconds, 2)} seconds.", flush=True)
print(f'*** Detected: '+str(len(dd))+' events.', flush=True)
print(' *** Wrote the results into --> " ' + str(save_dir)+' "', flush=True)
"""
|
{"hexsha": "552b73840d960daa9168491530323b78f5ca37da", "size": 18226, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/compare.py", "max_stars_repo_name": "Cuda-Chen/EQTransformer-onnx-convertor", "max_stars_repo_head_hexsha": "fe5a72c785da69f8282325c96c01ea79a89f508a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/compare.py", "max_issues_repo_name": "Cuda-Chen/EQTransformer-onnx-convertor", "max_issues_repo_head_hexsha": "fe5a72c785da69f8282325c96c01ea79a89f508a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/compare.py", "max_forks_repo_name": "Cuda-Chen/EQTransformer-onnx-convertor", "max_forks_repo_head_hexsha": "fe5a72c785da69f8282325c96c01ea79a89f508a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.8847058824, "max_line_length": 214, "alphanum_fraction": 0.5469110063, "include": true, "reason": "import numpy", "num_tokens": 4352}
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 25 23:47:20 2019
@author: YQ
"""
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
from rnn import HyperLSTMCell
from rnn import LayerNormLSTMCell as LSTMCell
ohc = tfp.distributions.OneHotCategorical
seq2seq = tf.contrib.seq2seq
w_init = tf.contrib.layers.xavier_initializer()
class MusicVAE:
def __init__(self, x_depth=[89, 33, 33],
enc_rnn_dim=512, enc_hyper_unit=256, enc_dropout=0.1,
dec_rnn_dim=1024, dec_hyper_unit=256, dec_dropout=0.2,
enc_rnn_layer=1, dec_rnn_layer=1,
enc_rnn="hyperlstm", dec_rnn="hyperlstm",
attention=0,
cont_dim=256, cat_dim=2, mu_force=2.0,
gumbel=0.05, style_embed_dim=256,
training=True,
beta_anneal_steps=1000, kl_reg=1.0
):
self.features = ["pitch", "dt", "duration", "velocity"]
self.x_depth = x_depth
self.x_dim = np.sum(x_depth)
self.enc_rnn_dim = enc_rnn_dim
self.enc_hyper_unit = enc_hyper_unit
self.enc_dropout = 1 - enc_dropout
self.enc_rnn = enc_rnn
self.enc_rnn_layer = enc_rnn_layer
self.dec_rnn_dim = dec_rnn_dim
self.dec_hyper_unit = dec_hyper_unit
self.dec_dropout = 1 - dec_dropout
self.dec_rnn = dec_rnn
self.dec_rnn_layer = dec_rnn_layer
self.attention = attention
self.cont_dim = cont_dim
self.cat_dim = cat_dim
self.mu_force = mu_force
self.style_embed_dim = style_embed_dim
self.gumbel = gumbel
self.training = training
self.beta_anneal_steps = beta_anneal_steps
self.kl_reg = kl_reg
self.summaries = []
# https://datascience.stackexchange.com/questions/29851/one-hot-encoding-vs-word-embeding-when-to-choose-one-or-another
self.pitch_embedding = tf.Variable(tf.random_uniform([89, 32], -1.0, 1.0),
name="pitch_embedding")
self.style_embedding = tf.Variable(
tf.random_uniform([self.cat_dim, self.style_embed_dim], -1.0, 1.0),
name="style_embedding")
def kl_cost(self, enc_out, mode="cont"):
if mode == "cont":
mu, log_z_var = enc_out
loss = -0.5 * tf.reduce_sum(1 + log_z_var - tf.square(mu) - tf.exp(log_z_var), axis=-1)
elif mode == "cat":
alpha = tf.nn.softmax(enc_out)
log_dim = tf.math.log(tf.cast(tf.shape(enc_out)[-1], tf.float32))
neg_entropy = tf.reduce_sum(alpha * tf.math.log(alpha + 1e-10), axis=-1)
loss = log_dim + neg_entropy
return loss
def rnn_cell(self, rnn, n_units, hyper_unit, dropout, n_layers=1, input_dropout=False, device=None):
if rnn == "hyperlstm":
rnn = tf.nn.rnn_cell.MultiRNNCell([
HyperLSTMCell(n_units,
hyper_num_units=hyper_unit,
dropout_keep_prob=dropout if self.training else 1.0,
use_recurrent_dropout=self.training)
for _ in range(n_layers)])
elif rnn == "lstm":
rnn = tf.nn.rnn_cell.MultiRNNCell([
LSTMCell(n_units,
dropout_keep_prob=dropout if self.training else 1.0,
use_recurrent_dropout=self.training)
for _ in range(n_layers)])
if input_dropout:
keep_prob = dropout if self.training else 1.0
rnn = tf.nn.rnn_cell.DropoutWrapper(rnn, input_keep_prob=keep_prob)
if device is not None:
rnn = tf.nn.rnn_cell.DeviceWrapper(rnn, device)
return rnn
def get_initial_rnn_state(self, z, rnn, rnn_dim, rnn_layer):
init_state = []
if rnn == "hyperlstm":
init_state = []
for i in range(rnn_layer):
tmp = tf.layers.dense(z, 2*(self.dec_rnn_dim+self.dec_hyper_unit), activation=tf.nn.elu,
name="dec_init_state_{}".format(i), kernel_initializer=w_init)
init_state.append(tmp)
elif rnn == "lstm":
for i in range(rnn_layer):
tmp = tf.layers.dense(z, rnn_dim*2, name="rnn_state_{}".format(i), activation=tf.nn.tanh,
kernel_initializer=w_init)
init_state.append(tmp)
init_state = tuple(init_state)
return init_state
def encoder(self, x, seq_len):
with tf.variable_scope("encoder"):
cell_fw = self.rnn_cell(self.enc_rnn, self.enc_rnn_dim,
self.enc_hyper_unit, self.enc_dropout,
self.enc_rnn_layer)
cell_bw = self.rnn_cell(self.enc_rnn, self.enc_rnn_dim,
self.enc_hyper_unit, self.enc_dropout,
self.enc_rnn_layer)
# sequence_length exclude the <end> token
outputs, (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, x,
dtype=tf.float32,
sequence_length=seq_len-1)
states = tf.concat([state_fw[-1], state_bw[-1]], axis=-1)
with tf.variable_scope("enc_heads"):
cont_head = tf.layers.dense(states, 512, name="cont_head",
kernel_initializer=w_init, activation=tf.nn.relu)
z_mean = tf.layers.dense(cont_head, self.cont_dim, name="z_mean", kernel_initializer=w_init)
log_z_var = tf.layers.dense(cont_head, self.cont_dim, name="z_variance",
kernel_initializer=w_init)
cat_head = tf.layers.dense(states, 512, name="cat_head",
kernel_initializer=w_init, activation=tf.nn.relu)
z_cat_logit = tf.layers.dense(cat_head, self.cat_dim, name="z_cat_logit",
kernel_initializer=w_init)
return z_mean, log_z_var, z_cat_logit
def decoder(self, z, x, seq_len):
start_token = tf.zeros((tf.shape(z)[0], 1, self.x_dim), dtype=tf.float32)
start_token = self.embedding_lookup(start_token, self.pitch_embedding)
mask = tf.stop_gradient(tf.sequence_mask(seq_len, dtype=tf.bool))
x = tf.concat([start_token, x[:, :-1, :]], axis=1)
if self.attention > 0:
key = tf.keras.layers.Dense(self.attention)
value = tf.keras.layers.Dense(self.attention)
attn = tf.keras.layers.Attention(use_scale=True, causal=True)
keys = key(x)
values = value(x)
if self.training:
keys = tf.keras.layers.Dropout(0.2)(keys)
values = tf.keras.layers.Dropout(0.2)(values)
x = tf.concat([x, attn([keys, values], [mask, mask])], axis=-1)
start_token = tf.concat([start_token,
tf.zeros((tf.shape(z)[0], 1, self.attention),
dtype=tf.float32)], axis=-1)
initialize_fn = lambda: (tf.zeros((tf.shape(z)[0],), tf.bool), tf.squeeze(start_token, 1))
def sample_fn(time, outputs, state):
logits = dense(outputs)
logits = tf.split(logits, self.x_depth, axis=-1)
samples = []
for (logit, depth) in zip(logits, self.x_depth):
if depth == 1:
tmp = logit
else:
tmp = ohc(logits=logit/self.temperature, dtype=tf.float32).sample()
samples.append(tmp)
samples = tf.concat(samples, axis=-1)
return samples
def next_input_fn(time, outputs, state, sample_ids):
outputs = sample_fn(None, outputs, None)
finished, _, _ = tf.split(outputs, self.x_depth, axis=-1)
finished = tf.argmax(finished, -1, output_type=tf.int32)
finished = tf.math.equal(finished, 88)
outputs = self.embedding_lookup(outputs, self.pitch_embedding)
if self.attention > 0:
k = key(outputs)
v = value(outputs)
outputs = tf.concat([outputs, attn([k, v])], axis=-1)
return finished, outputs, state
with tf.variable_scope("decoder"):
dense = tf.layers.Dense(self.x_dim, name="logit_dense", kernel_initializer=w_init)
init_state = self.get_initial_rnn_state(z, self.dec_rnn, self.dec_rnn_dim, self.dec_rnn_layer)
cell = self.rnn_cell(self.dec_rnn, self.dec_rnn_dim, self.dec_hyper_unit, self.dec_dropout,
self.dec_rnn_layer, input_dropout=False)
train_out, _ = tf.nn.dynamic_rnn(cell, x, sequence_length=seq_len, initial_state=init_state)
logits = dense(train_out)
logits_split = tf.split(logits, self.x_depth, axis=-1)
train_out = [tf.argmax(x, axis=-1, output_type=tf.int32) for x in logits_split]
train_out = tf.stack(train_out, axis=-1)
helper = seq2seq.CustomHelper(initialize_fn, sample_fn, next_input_fn,
sample_ids_shape=(self.x_dim), sample_ids_dtype=tf.float32)
decoder = seq2seq.BasicDecoder(cell, helper, init_state)
outputs, final_state, final_seq_len = seq2seq.dynamic_decode(decoder, maximum_iterations=512)
sample_outputs = outputs.sample_id
sample_outputs = tf.split(sample_outputs, self.x_depth, axis=-1)
final_outputs = [tf.argmax(x, axis=-1, output_type=tf.int32) for x in sample_outputs]
final_outputs = tf.stack(final_outputs, axis=-1)
final_state = final_state[-1]
return logits, final_outputs, final_seq_len, final_state
def reconstruction_loss(self, logits, X, X_len):
logits = tf.split(logits, self.x_depth, axis=-1)
X = tf.split(X, self.x_depth, axis=-1)
mask = tf.stop_gradient(tf.sequence_mask(X_len, dtype=tf.float32))
loss = []
for (logit, x, depth, feature) in zip(logits, X, self.x_depth, self.features):
if depth == 1:
tmp_loss = tf.losses.mean_squared_error(tf.squeeze(x, axis=-1),
tf.squeeze(logit, axis=-1), weights=mask)
else:
tmp_loss = tf.nn.softmax_cross_entropy_with_logits(labels=x, logits=logit)
tmp_loss = tf.losses.compute_weighted_loss(tmp_loss, weights=mask)
if feature == "pitch":
tmp_loss = tmp_loss * 3
loss.append(tmp_loss)
self.summaries.append(tf.summary.scalar(feature+"_loss", tmp_loss))
return tf.reduce_sum(loss), loss
def reconstruction_accuracy(self, logits, X, X_len):
logits = tf.split(logits, self.x_depth, axis=-1)
X = tf.split(X, self.x_depth, axis=-1)
mask = tf.sequence_mask(X_len, dtype=tf.float32)
accuracy = []
for (logit, x, feature, depth) in zip(logits, X, self.features, self.x_depth):
if depth == 1:
continue
tmp_x = tf.argmax(x, axis=-1, output_type=tf.int32)
tmp_logit = tf.argmax(logit, axis=-1, output_type=tf.int32)
tmp_acc = tf.contrib.metrics.accuracy(tmp_x, tmp_logit,
mask, feature+"_accuracy")
accuracy.append(tmp_acc)
self.summaries.append(tf.summary.scalar(feature+"_acc", tmp_acc))
return accuracy
def sample(self, enc_out, mode="cont"):
if mode == "cont":
z_mean, log_z_var = enc_out
z_sigma = tf.sqrt(tf.exp(log_z_var))
eps = tf.random_normal(tf.shape(z_mean), 0.0, 1.0, tf.float32)
z = z_mean + tf.multiply(z_sigma, eps)
elif mode == "cat":
if self.training:
unif = tf.random.uniform(shape=tf.shape(enc_out))
gumbel_noise = -tf.math.log(-tf.math.log(unif + 1e-10) + 1e-10)
logit = (enc_out + gumbel_noise) / self.gumbel
z = tf.nn.softmax(logit)
else:
z = ohc(logits=enc_out, dtype=tf.float32).sample()
return z
def embedding_lookup(self, X, lookup_table):
X_split = tf.split(X, self.x_depth, -1)
p = tf.nn.embedding_lookup(lookup_table, tf.argmax(X_split[0], axis=-1))
x = tf.concat([p] + X_split[1:], -1)
return x
def build(self, X=None, S=None, labels=None, gpu="/gpu:0"):
"""
X: Onehot encoded MIDI representation of notes.
S: sequence length
labels: genre/style labels
"""
with tf.device(gpu):
if X is None:
self.X = tf.placeholder(tf.float32, (None, None, self.x_dim))
self.S = tf.placeholder(tf.int32, (None,))
self.labels = tf.placeholder(tf.int32, (None,))
else:
self.X = X
self.S = S
self.labels = labels
labels = tf.one_hot(self.labels, self.cat_dim)
model_input = tf.split(self.X, self.x_depth, axis=-1)
model_input = [tf.argmax(x, axis=-1, output_type=tf.int32) for x in model_input]
self.model_input = tf.stack(model_input, axis=-1)
self.temperature = tf.placeholder_with_default(1.0, shape=[], name="temperature")
self.step = tf.train.create_global_step()
beta = 1.0 - self.beta_anneal_steps / (self.beta_anneal_steps + tf.exp(self.step / self.beta_anneal_steps))
beta = tf.cast(beta, tf.float32)
# embed features
X_embed = self.embedding_lookup(self.X, self.pitch_embedding)
z_mean, log_z_var, z_cat_logit = self.encoder(X_embed, self.S)
self.z_cont = self.sample([z_mean, log_z_var], mode="cont")
self.z_cat = self.sample(z_cat_logit, mode="cat")
z_cat = tf.matmul(self.z_cat, self.style_embedding)
if self.style_embed_dim == self.cont_dim:
z = self.z_cont + z_cat
else:
z = tf.concat([self.z_cont, z_cat], axis=-1)
logits, self.output, self.len, dec_state = self.decoder(z, X_embed, self.S)
with tf.variable_scope("losses"):
self.recon_loss, self.feature_loss = self.reconstruction_loss(logits, self.X, self.S)
z_mean_mu = tf.reduce_mean(z_mean, 0)
z_mean_mu = tf.tile(tf.expand_dims(z_mean_mu, 0), [tf.shape(self.z_cont)[0], 1])
mu_loss = tf.nn.relu(self.mu_force - tf.losses.mean_squared_error(z_mean_mu, z_mean))
cont_kl_cost = tf.reduce_mean(self.kl_cost([z_mean, log_z_var], mode="cont")/self.cont_dim)
# categorical z
cat_loss = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=z_cat_logit)
cat_loss = tf.reduce_mean(cat_loss)
self.kl_loss = cont_kl_cost
self.loss = 0
self.loss += self.recon_loss + mu_loss
self.loss += cont_kl_cost * beta * self.kl_reg
self.loss += cat_loss
self.summaries.append(tf.summary.scalar("total_loss", self.loss))
self.summaries.append(tf.summary.scalar("cat_loss", cat_loss))
self.summaries.append(tf.summary.scalar("cont_kl_cost", cont_kl_cost))
self.summaries.append(tf.summary.scalar("mu_loss", mu_loss))
with tf.variable_scope("accuracies"):
self.accuracies = self.reconstruction_accuracy(logits, self.X, self.S)
if self.training:
# optimizer... training...
self.learning_rate = tf.maximum(5e-4 * 0.95 ** ((self.step - 10000) / 5000), 1e-4)
opt = tf.train.AdamOptimizer(self.learning_rate)
g, v = zip(*opt.compute_gradients(self.loss, tf.trainable_variables()))
g, _ = tf.clip_by_global_norm(g, 1.0)
gvs = zip(g, v)
self.op = opt.apply_gradients(gvs, global_step=self.step)
self.init = tf.global_variables_initializer()
with tf.variable_scope("misc"):
self.summaries.append(tf.summary.scalar("beta", beta))
self.summaries.append(tf.summary.scalar("learning_rate", self.learning_rate))
self.summ_op = tf.summary.merge(self.summaries)
self.saver = tf.train.Saver(max_to_keep=20)
total_parameters = 0
for variable in tf.trainable_variables():
# shape is an array of tf.Dimension
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
print("Total learnable parameters: {}".format(total_parameters))
if __name__ == "__main__":
tf.reset_default_graph()
m = MusicVAE()
m.build()
|
{"hexsha": "cf365419b44616ad0741670856698e8ff0a797c9", "size": 18366, "ext": "py", "lang": "Python", "max_stars_repo_path": "model.py", "max_stars_repo_name": "y33-j3T/DeepMusicvStyle", "max_stars_repo_head_hexsha": "f1a6b149d8412ad480952e6820708b2b6eaf4b96", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "model.py", "max_issues_repo_name": "y33-j3T/DeepMusicvStyle", "max_issues_repo_head_hexsha": "f1a6b149d8412ad480952e6820708b2b6eaf4b96", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model.py", "max_forks_repo_name": "y33-j3T/DeepMusicvStyle", "max_forks_repo_head_hexsha": "f1a6b149d8412ad480952e6820708b2b6eaf4b96", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.915, "max_line_length": 127, "alphanum_fraction": 0.5411085702, "include": true, "reason": "import numpy", "num_tokens": 3992}
|
!
! CalculiX - A 3-dimensional finite element program
! Copyright (C) 1998-2019 Guido Dhondt
!
! This program is free software; you can redistribute it and/or
! modify it under the terms of the GNU General Public License as
! published by the Free Software Foundation(version 2);
!
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License
! along with this program; if not, write to the Free Software
! Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
!
subroutine hcrit(xflow,rho,b,theta,dg,sqrts0,hk)
!
! determine the critical depth
!
implicit none
!
real*8 xflow,rho,b,dg,sqrts0,hk,theta,tth,c1,xflow2,
& A,dBBdh,dAdh,BB,dhk
!
hk=((xflow/(rho*b))**2/(dg*sqrts0))**(1.d0/3.d0)
!
if(dabs(theta).lt.1.d-10) return
!
! critical depth for trapezoid, non-rectangular cross section
!
tth=dtan(theta)
c1=rho*rho*dg*sqrts0
xflow2=xflow*xflow
!
do
A=hk*(b+hk*tth)
dBBdh=2.d0*tth
dAdh=b+hk*dBBdh
BB=dAdh
dhk=(xflow2*BB-c1*A**3)/(xflow2*dBBdh-3.d0*c1*A*A*dAdh)
if(dabs(dhk)/dhk.lt.1.d-3) exit
hk=hk-dhk
enddo
!
return
end
|
{"hexsha": "25e9665c4a206f07157d9ec6500c3cb1a28baec4", "size": 1516, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "ccx_prool/CalculiX/ccx_2.16/src/hcrit.f", "max_stars_repo_name": "alleindrach/calculix-desktop", "max_stars_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ccx_prool/CalculiX/ccx_2.16/src/hcrit.f", "max_issues_repo_name": "alleindrach/calculix-desktop", "max_issues_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ccx_prool/CalculiX/ccx_2.16/src/hcrit.f", "max_forks_repo_name": "alleindrach/calculix-desktop", "max_forks_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1538461538, "max_line_length": 71, "alphanum_fraction": 0.6220316623, "num_tokens": 470}
|
subroutine onepath(phpad, index, nleg, deg, iorder,
& cxc, rs, vint, xmu, edge, xkf, rnrmav, gamach,
& versn, ipot, rat, iz,
& ipol, evec, elpty, xivec,
& innnn, ijson, ivrbse, ri, beta, eta,
& ne1,col1,col2,col3,col4,col5,col6,col7)
implicit double precision (a-h, o-z)
c+---------------------------------------------------------------------
c "Based on or developed using Distribution: FEFF8.5L
c Copyright (c) [2013] University of Washington"
c
C See ../HEADERS/license.h for full llicense information
c+---------------------------------------------------------------------
c compute a single path, generating the F matrix then returning the
c information contained in a feffNNNN.dat file
c
c INPUT:
c phpad: path to phase.pad file character*256
c index: path index integer
c nleg: number of legs in path integer
c deg: path degeneracy double
c iorder: order of approximation in genfmt integer
c ipot: array of unique potentials integer(legtot)
c rat: cartesian coordinates of scatterers double(3,0:legtot+1)
c ipol: flag to do polarization integer
c evec: polarization vector double(3)
c elpty: ellipticity double
c xivec: direction of travel double(3)
c innnn: flag to write feffNNNN.dat file integer
c ijson: flag to write feffNNNN.json file integer
c ivrbse: flag to write screen messages integer
c
c also requires a phase.pad file from an earlier run of xsph
c
c OUTPUT
c ri: leg lengths double(legtot)
c beta: beta angles double(legtot+1)
c eta: eta angles double(legtot+2)
c ne: number of k-grid points integer
c col1: k-grid double(nex)
c col2: central atom phase shifts double(nex)
c col3: magnitude of F_eff double(nex)
c col4: phase of F_eff double(nex)
c col5: reduction factor double(nex)
c col6: mean free path double(nex)
c col7: real partof complex momentum double(nex)
c
c Potential information:
c cxc: description of the potential model character*8
c rs: approximate interstitial radius double
c vint: interstitial potential double
c xmu: Fermi energy double
c edge: threshold relative to atomic value double
c xkf: k value at Fermi energy double
c rnrmav: average Norman radius double
c versn: Feff versioning character*__
c+---------------------------------------------------------------------
include '../HEADERS/const.h'
include '../HEADERS/dim.h'
include '../HEADERS/vers.h'
character*256 phpad
c+---------------------------------------------------------------------
c parameters related to the call to regenf
c
c Input flags:
c iorder, order of approx in f-matrix expansion (see setlam)
c (normal use, 2. Do ss exactly regardless of iorder)
c+---------------------------------------------------------------------
double precision evec(3), xivec(3), spvec(3)
complex*16 ptz(-1:1, -1:1)
integer iorder
c integer mfeff, ipr5
logical wnstar
double precision angks, elpty
c double precision critcw
logical nnnn, json, verbse
integer innnn, ijson, ivrbse
c+----------------------------------------------------------------------
c removing local common blocks, replacing them with explicit passing
c of the various data srtuctures
c+----------------------------------------------------------------------
c include 'clmz.h'
complex*16 clmi(ltot+1,mtot+ntot+1,legtot)
c include 'fmatrx.h'
complex*16 fmati(lamtot,lamtot,legtot)
c include 'lambda.h'
c . mlam(lamtot), !mu for each lambda
c . nlam(lamtot), !nu for each lambda
c . lamx, !max lambda in problem
c . laml0x, !max lambda for vectors involving absorbing atom
c . mmaxp1, nmax !max mu in problem + 1, max nu in problem
integer mlam(lamtot), nlam(lamtot), lamx, laml0x, mmaxp1, nmax
c include 'nlm.h'
dimension xnlm(ltot+1,mtot+1)
c include 'rotmat.h'
dimension dri(ltot+1,2*mtot+1,2*mtot+1,legtot+1)
c include 'pdata.h'
c character*80 text(5)
character*6 potlbl(0:nphx)
complex*16 ph(nex,-ltot:ltot,0:nphx), eref(nex), em(nex)
complex caps(nex)
double precision rat(3,0:legtot+1), rathea(3,legtot)
double precision ri(legtot), beta(legtot+1), eta(0:legtot+1)
double precision deg, rnrmav, xmu, edge, rs, vint
integer lmax(nex,0:nphx), ipot(0:legtot), ipthea(legtot),
& iz(0:nphx)
c+----------------------------------------------------------------------
c parameters used for calling sthead
c+----------------------------------------------------------------------
double precision xion(0:nphx), rmt(0:nphx), rnrm(0:nphx)
logical lreal
c integer ltext(5), ntext
integer nsc, nleg, npot, ne, ik0, ihole, ixc
integer kinit, linit, ilinit, lmaxp1
c common /pdata/ ph(nex,-ltot:ltot,0:nphx), !complex phase shifts ipot=0
c . eref(nex), !complex energy reference
c . rat(3,0:legtot+1), !position of each atom, code units(bohr)
c . em(nex), !energy mesh
c . ri(legtot), beta(legtot+1), eta(0:legtot+1), !r, beta, eta for each leg
c . deg, rnrmav, xmu, edge, !(output only)
c . lmax(nex,0:nphx), !max l with non-zero phase for each energy
c . ipot(0:legtot), !potential for each atom in path
c . iz(0:nphx), !atomic number (output only)
c . ltext (5), !length of each string
c . nsc, nleg, !nscatters, nlegs (nleg = nsc+1)
c . npot, ne, !number of potentials, energy points
c . ik0, !index of energy grid corresponding to k=0 (edge)
c . ipath, ihole, !index of current path and hole (output only)
c . kinit, linit, ilinit, ! initial state kappa and ang. mom.
c . lmaxp1, !largest lmax in problem + 1
c . ntext !number of text lines
c+----------------------------------------------------------------------
c parameters used in the code taken from GENFMT/genfmt.f
c+----------------------------------------------------------------------
complex*16 rho(legtot), pmati(lamtot,lamtot,2)
complex*16 pllp, ptrac, srho, prho, cfac
complex*16 cchi(nex), rkk(nex,8), rkk2(nex,8,nspx)
complex*16 eref2(nex,nspx), ph4(nex,-ltot:ltot, nspx, 0:nphx)
complex*16 bmati(-mtot:mtot, 8, -mtot:mtot, 8)
complex*16 ck(nex), lind(8)
dimension xk(nex), ckmag(nex)
c ckp and ffmag are used to compute importance factor
c complex*16 ckp
c dimension ffmag(nex)
dimension eps1(3), eps2(3), vec1(3), vec2(3)
character*512 slog
integer ntit
character*80 titles(nheadx), lines(2*nheadx)
c+----------------------------------------------------------------------
c parameters related to using padlib
c+----------------------------------------------------------------------
real phff(nex), amff(nex)
double precision xkr(nex)
integer mpadx
parameter (mpadx = 8)
c+----------------------------------------------------------------------
c parameters related to using fdtarr.f and fdthea.f
c+----------------------------------------------------------------------
character*12 fname
character*13 fjson
dimension col1(nex), col2(nex), col3(nex), col4(nex), col5(nex)
dimension col6(nex), col7(nex)
real sxk(nex)
complex sck(nex)
double precision gamach
c used for divide-by-zero and trig tests
parameter (eps = 1.0e-16)
external xstar
dimension atarr(3,natx)
character*30 versn
character*8 cxc, sout(0:7)
data sout /'H-L exch', 'D-H exch', 'Gd state', 'DH - HL ',
1 'DH + HL ', 'val=s+d ', 'sigmd(r)', 'sigmd=c '/
do 5 i=1,natx
atarr(1, i) = 0
atarr(2, i) = 0
atarr(3, i) = 0
5 continue
c atarr is a dummy array used to call mkptz
c CAUTION: atom coordinates may have been changed by Feff for some
c funny polarization or ellipticity. need a test case of funny
c pol/ell
wnstar = .false.
c+----------------------------------------------------------------------
c read genfmt.json and global.json
c keep at input: iorder, ipol, evec, elpty, xivec
c+----------------------------------------------------------------------
c call regenf(mfeff, ipr5, critcw, iorder, wnstar,
c & ipol, ispin, le2, angks, elpty, evec, xivec, ptz)
c+----------------------------------------------------------------------
c initialize everything needed for the genfmt calculation
c+----------------------------------------------------------------------
do 10 i=0,nphx
iz(i) = 0
10 continue
call genfmt_prep(phpad, ispin,
c arguments for rdxsph
& ne, ne1, ne3, npot, ihole, rnrmav,
& xmu, edge, ik0, ixc, rs, vint,
& em, eref2, iz, potlbl, ph4, rkk2, lmax, lmaxp1,
c arguments for setkap
& kinit, linit, ilinit,
c argument for snlm (an output)
& xnlm,
c things set in genfmt_prep
& eref, ph, xk, ck, ckmag, xkr,
& nsp, ll, npath, ntotal, nused, xportx)
c print *, "ik0 mu kf edge rnrmav"
c print *, ik0, real(em(ik0))*hart, real(ck(ik0))/bohr, edge*hart,
c & rnrmav
xkf = real(ck(ik0))
cxc = sout(ixc)
write(versn,12) vfeff//vf85e
call setgam(iz(0), ihole, gamach)
c print *, "iz(0), ihole, gamach", iz(0), ihole, gamach
12 format( a30)
c+----------------------------------------------------------------------
c pull out the central atom phase shifts
c+----------------------------------------------------------------------
do 100 ie=1,ne
caps(ie) = cmplx(ph(ie, ll, 0))
100 continue
c+----------------------------------------------------------------------
c read the input JSON file for this program: onepath.json
c return ri, beta, eta, rat (like ri, but with 0th and (n++1)th atom)
c+----------------------------------------------------------------------
le2 = 0
ispin = 0
spvec(1) = 0
spvec(2) = 0
spvec(3) = 0
c call json_read_onepath(index, iorder, ipol,
c & nleg, deg, rat, ipot, elpty, evec, xivec, nnnn, json)
call pathgeom(nleg, nsc, ipol, rat, ipot, ri, beta, eta)
call mkptz(ipol, elpty, evec, xivec, ispin, spvec, natx, atarr,
& angks, le2, ptz)
nnnn = .false.
if (innnn .gt. 0) nnnn=.true.
json = .false.
if (ijson .gt. 0) json=.true.
verbse = .false.
if (ivrbse .gt. 0) verbse=.true.
c+----------------------------------------------------------------------
c fetch the standard output header lines from xsect.json
c+----------------------------------------------------------------------
ntit = 0
c call read_titles(ntit, titles)
c+----------------------------------------------------------------------
c this section is cut-n-pasted from genfmt
c this is the loop over paragraphs in the paths.dat file
c the call to rdpath is replaced by the reading of the onepath.json file (for now)
c+----------------------------------------------------------------------
npath = npath + 1
ntotal = ntotal + 1
if (wnstar) then
c should be ipol=1
do 1150 ic =1,3
vec1(ic) = rat(ic,1) - rat(ic,0)
vec2(ic) = rat(ic,nleg-1) - rat(ic,0)
eps1(ic) = evec(ic)
1150 continue
if (elpty.ne.0.0) then
eps2(1) = xivec(2)*evec(3)-xivec(3)*evec(2)
eps2(2) = xivec(3)*evec(1)-xivec(1)*evec(3)
eps2(3) = xivec(1)*evec(2)-xivec(2)*evec(1)
endif
ndeg = nint (deg)
xxstar = xstar(eps1, eps2, vec1, vec2, ndeg, elpty,
& ilinit)
c write(4,'(1x,i6,f10.3)') npath, xxstar
endif
c Need reff in code units
reff = 0
do 1200 i = 1, nleg
reff = reff + ri(i)
1200 continue
reff = reff/2
c Set lambda for low k
call setlam(iorder, 1, beta, nsc, nleg, ilinit,
& mlam, nlam, lamx, laml0x, mmaxp1, nmax)
c Calculate and store rotation matrix elements
do 1300 isc = 1, nleg
call rot3i (lmaxp1, mmaxp1, isc, beta, dri)
1300 continue
if (ipol.gt.0) then
c one more rotation in polarization case
c NEED MORE rot3j FOR CENTRAL ATOM ( l \pm 1 )
call rot3i (ilinit+1, ilinit+1, nleg+1, beta, dri)
endif
c Start cycle over spin
do ie = 1, ne
cchi(ie) = 0
enddo
do 6000 is = 1, nsp
if (nsp.eq.1) then
call mmtr(bmati, ipol, ispin, le2, angks, ptz, lind,
& dri, eta, nsc, nleg, kinit, ilinit)
else
call mmtr(bmati, ipol, is, le2, angks, ptz, lind,
& dri, eta, nsc, nleg, kinit, ilinit)
endif
do 510 ie = 1, ne
eref(ie) = eref2(ie,is)
510 continue
do 520 iph = 0, npot
do 522 ie = 1, ne
do 524 il = -lmax(ie, iph), lmax(ie, iph)
ph(ie,il, iph) = ph4(ie, il, is, iph)
524 continue
522 continue
520 continue
do 530 ie = 1, ne
do 532 kdif = 1, 8
rkk(ie,kdif) = rkk2(ie,kdif,is)
532 continue
530 continue
do 540 ie = 1, ne
ck(ie) = sqrt (2* (em(ie) - eref(ie)))
540 continue
c Big energy loop
do 5000 ie = 1, ne
c complex rho
do 2010 ileg = 1, nleg
rho(ileg) = ck(ie) * ri(ileg)
2010 continue
c if ck is zero, xafs is undefined. Make it zero and jump
c to end of calc part of loop.
if (abs(ck(ie)) .le. eps) then
cchi(ie) = cchi(ie) + 0
write(slog,2055) ie, ck(ie)
2055 format (' genfmt: ck=0. ie, ck(ie)',i5,1p,2e14.5)
call wlog(slog)
goto 4990
endif
c Calculate and store spherical wave factors c_l^(m)z^m/m!
c in a matrix clmi(il,im,ileg), ileg=1...nleg.
c Result is that common /clmz/ is updated for use by fmtrxi.
c
c zero clmi arrays
do 2100 ileg = 1, legtot
do 2102 im = 1, mtot+ntot+1
do 2104 il = 1, ltot+1
clmi(il,im,ileg) = 0
2104 continue
2102 continue
2100 continue
mnmxp1 = mmaxp1 + nmax
do 2150 ileg = 1, nleg
isc0 = ileg-1
if (isc0.eq.0) isc0=nleg
isc1 = ileg
lxp1 = max (lmax(ie,ipot(isc0))+1,
& lmax(ie,ipot(isc1))+1)
mnp1 = min (lxp1, mnmxp1)
call sclmz (rho, lxp1, mnp1, ileg, clmi)
2150 continue
c Calculate and store scattering matrices fmati.
c First matrix
call fmtrxi(lamx, laml0x, ie, 2, 1,
& clmi, mlam, nlam, xnlm, dri,
& ph, eta, lmax, ipot, fmati)
c Last matrix if needed
if (nleg .gt. 2) then
call fmtrxi(laml0x, lamx, ie, nleg, nleg-1,
& clmi, mlam, nlam, xnlm, dri,
& ph, eta, lmax, ipot, fmati)
endif
c Intermediate scattering matrices
do 2200 ilegp = 2, nsc-1
ileg = ilegp + 1
call fmtrxi(lamx, lamx, ie, ileg, ilegp,
& clmi, mlam, nlam, xnlm, dri,
& ph, eta, lmax, ipot, fmati)
2200 continue
c Big matrix multiplication loops.
c Calculates trace of matrix product
c M(1,N) * f(N,N-1) * ... * f(3,2) * f(2,1), as in reference.
c We will calculate the trace over lambda_N, working from
c right to left.
c Use only 2 pmati arrays, alternating indp (index p)
c 1 and 2.
c to start f(2,1) -> pmat(1)
indp = 1
do 2250 lmp = 1, laml0x
do 2252 lm = 1, lamx
pmati(lm,lmp,indp)= fmati(lm,lmp,1)
2252 continue
2250 continue
c f(N,N-1) * ... * f(3,2) * [f(2,1)]
c Term in [] is pmat(1)
do 2900 isc = 2, nleg-1
c indp is current p matrix, indp0 is previous p matrix
indp = 2 - mod(isc,2)
indp0 = 1 + mod(indp,2)
do 2850 lmp = 1, laml0x
do 2852 lm = 1, lamx
pllp=0
do 2800 lmi = 1, lamx
pllp = pllp +
1 fmati(lm,lmi,isc)*pmati(lmi,lmp,indp0)
2800 continue
pmati(lm,lmp,indp) = pllp
2852 continue
2850 continue
2900 continue
c srho=sum pr(i), prho = prod pr(i)
srho=0
prho=1
do 3200 ileg = 1, nleg
srho = srho + rho(ileg)
prho = prho * rho(ileg)
3200 continue
c Termination matrix, fmati(...,nleg)
c Polarization enters only this matrix
c this will fill fmati(...,nleg) (NO LONGER in common /fmtrxi/)
call mmtrxi(rkk, laml0x, bmati, ie, 1, nleg,lind,
& clmi, mlam, nlam, xnlm, eta, fmati)
c Final trace over matrix
ptrac=0
do 4400 lm = 1, laml0x
do 4402 lmp = 1, laml0x
ptrac = ptrac + fmati(lm,lmp,nleg) *
& pmati(lmp,lm,indp)
4402 continue
4400 continue
c Calculate xafs
c Complex chi (without 2kr term)
c ipot(nleg) is central atom
c cdel1 = exp(2*coni*ph(ie,ilinit+1,0))
c central atom phase shift are included in normalized
c reduced matrix elements rkk(....)
cfac = exp(coni*(srho-2*xk(ie)*reff)) / prho
c now factor 1/(2*l0+1) is inside termination matrix
c cchi(ie) = ptrac * cfac/(2*l0+1)
if (nsp.eq.2 .and. is.eq.1) cfac = -cfac
cchi(ie) = cchi(ie) + ptrac * cfac
c write(7,5) xk(ie), -12*dimag(cchi(ie)*exp(coni*2*xk(ie)*reff))
c 5 format (3f13.5)
c When ck(ie)=0, xafs is set to zero. Calc above undefined.
c Jump to here from ck(ie)=0 test above.
4990 continue
5000 continue
c end of energy loop
6000 continue
c end of loop over spins
c+----------------------------------------------------------------------
c compute the importance factor of this path
c+----------------------------------------------------------------------
c call import(ne1, nsp, ik0, deg, ckmag, em, eref2,
c & cchi, xportx, crit)
c+----------------------------------------------------------------------
c compute mag and phase arrays for F_eff, set single precision
c arrays for xk and ck
c+----------------------------------------------------------------------
phffo = 0
do 15 ie = 1, ne1
phff(ie) = 0
if (abs(cchi(ie)) .ge. eps) then
phff(ie) = real(atan2 (dimag(cchi(ie)), dble(cchi(ie))))
end if
c remove 2 pi jumps in phase
if (ie.gt.1) call pijump (dble(phff(ie)), phffo)
phffo = dble(phff(ie))
amff(ie) = real(abs(cchi(ie)))
sxk(ie) = real(xk(ie))
sck(ie) = cmplx(ck(ie))
15 continue
c+----------------------------------------------------------------------
c the following get stored in feff.pad for each path:
c ipath, nleg, deg, reff (*bohr), crit, ipot(1, nleg)
c rat beta eta ri amff phff
c
c instead, we'll skip straight to the chore performed in feffdt where
c the stuff from feff.pad has been read and is written out to the form
c of a feffNNNN.dat file
c+----------------------------------------------------------------------
c+----------------------------------------------------------------------
c compute the columns of feffNNNN.dat
c+----------------------------------------------------------------------
call fdtarr(ne1, real(reff), ilinit, amff, phff, caps, sxk, sck,
& col1, col2, col3, col4, col5, col6, col7)
if (nnnn) then
c Prepare output file feffnnnn.dat
write(fname,20) index
20 format ('f3ff', i4.4, '.dat')
write(slog,30) index, fname
30 format (i8, 5x, a)
if (verbse) print *, slog(1:40)
c call wlog(slog)
c Write feff.dat's
open (unit=3, file=fname, status='unknown', iostat=ios)
call chopen (ios, fname, 'onepath')
c+----------------------------------------------------------------------
c write out the feffNNNN.dat header
c+----------------------------------------------------------------------
do 36 il=1,legtot
ipthea(il) = ipot(il)
do 33 ix=1,3
rathea(ix,il) = rat(ix,il)
33 continue
36 continue
do 38 ip = 0, nphx
xion(ip) = 0.
rmt(ip) = 0.
rnrm(ip) = 0.
38 continue
lreal = .false.
rgrd = 0.05
vr0 = 0.
vi0 = 0.
gamach = gamach/hart
call sthead (ntit, titles, npot, iz, rmt, rnrm,
1 xion, ihole, ixc,
2 vr0, vi0, gamach, xmu, xkf, vint, rs,
2 lreal, rgrd)
gamach = gamach*hart
call fdthea(ntit, titles, index, iorder, nleg, real(deg),
& real(reff), real(rnrmav), real(edge), rathea, ipthea,
& iz, potlbl, nlines, lines)
do 40 i=1, nlines
write(3, 50)lines(i)
40 continue
50 format(a)
c+----------------------------------------------------------------------
c write out the feffNNNN.dat columns
c+----------------------------------------------------------------------
do 60 ie = 1, ne1
write(3,70) col1(ie), col2(ie), col3(ie), col4(ie),
& col5(ie), col6(ie), col7(ie)
60 continue
70 format (1x, f6.3, 1x, 3(1pe11.4,1x),1pe10.3,1x,
1 2(1pe11.4,1x))
c Done with feff.dat
close (unit=3)
end if
c end of conditional for writing feffNNNN.dat
c+----------------------------------------------------------------------
c write out a JSON file with the same information as feffNNNN.dat
c+----------------------------------------------------------------------
fjson = ''
c$$$ if (json) then
c$$$ write(fjson,80) index
c$$$ 80 format ('feff', i4.4, '.json')
c$$$ write(slog,90) index, fjson
c$$$ 90 format (i8, 5x, a)
c$$$ if (verbse) print *, slog(1:40)
c$$$c call wlog(slog)
c$$$
c$$$ call json_nnnn(fjson, ntit, titles, rat, ipot, ri, beta, eta,
c$$$ & index, iorder, nleg, deg, reff, rnrmav, edge,
c$$$ & ne1, col1, col2, col3, col4, col5, col6, col7)
c$$$
c$$$ end if
c end of conditional for writing feffNNNN.json
end
|
{"hexsha": "3f412b0dcfd4bdb8758c98e746770c6ffdc497f8", "size": 24222, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/GENFMT/onepath.f", "max_stars_repo_name": "bruceravel/feff85exafs", "max_stars_repo_head_hexsha": "9698ce3703a73def4c1a965f276708d689ea5acb", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-01-28T15:52:14.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-28T15:52:14.000Z", "max_issues_repo_path": "src/GENFMT/onepath.f", "max_issues_repo_name": "bruceravel/feff85exafs", "max_issues_repo_head_hexsha": "9698ce3703a73def4c1a965f276708d689ea5acb", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/GENFMT/onepath.f", "max_forks_repo_name": "bruceravel/feff85exafs", "max_forks_repo_head_hexsha": "9698ce3703a73def4c1a965f276708d689ea5acb", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.8796147673, "max_line_length": 97, "alphanum_fraction": 0.467550161, "num_tokens": 7123}
|
# Function to pull python functions
# included in spotify.ipynb
import time
import os
import dotenv
import requests
import pandas as pd
import numpy as np
def pull_albums(artist_id):
global requests
global url
album_names_dates = {}
# for duplicates
albs_added = []
to_remove = []
track_info = []
repeat_detection = []
albums = requests.get(url + 'artists/' + artist_id + '/albums',
headers=headers,
params={'include_groups': 'album', 'limit': 50}).json()
for album in albums['items']:
album_names_dates[album['name']] = album['release_date']
artist_name = requests.get(url + 'artists/' + artist_id, headers=headers).json()['name']
print(f'Successfully accessed {artist_name}')
for i in range(len(albums['items'])):
alb_name = albums['items'][i]['name']
if alb_name in albs_added:
to_remove.append(i)
albs_added.append(alb_name)
iter = 0
for i in albums['items']:
if iter not in to_remove:
r = requests.get(url + 'albums/' + i['id'] + '/tracks',
headers=headers)
tracks = r.json()['items']
for track in tracks:
detailsr = requests.get(url + 'audio-features/' + track['id'], headers=headers).json()
# combine with album info
detailsr.update({
'track_name': track['name'],
'album_name': i['name'],
'album_id': i['id'],
'release_date': i['release_date']
})
track_info.append(detailsr)
print('{} added...'.format(i['name']))
iter += 1
print('Execution time: ', time.process_time() - start, sep='')
|
{"hexsha": "706a92bd3f865d0a0df49c7087496d194f33fc17", "size": 1700, "ext": "py", "lang": "Python", "max_stars_repo_path": "spotify_proj/pull_albums.py", "max_stars_repo_name": "DrakeWagner/projects", "max_stars_repo_head_hexsha": "998ef5ef0320db5167fb1bfcf46085b3a18abc42", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-10-02T01:10:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-02T01:10:52.000Z", "max_issues_repo_path": "spotify_proj/pull_albums.py", "max_issues_repo_name": "DrakeWagner/projects", "max_issues_repo_head_hexsha": "998ef5ef0320db5167fb1bfcf46085b3a18abc42", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "spotify_proj/pull_albums.py", "max_forks_repo_name": "DrakeWagner/projects", "max_forks_repo_head_hexsha": "998ef5ef0320db5167fb1bfcf46085b3a18abc42", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.7575757576, "max_line_length": 99, "alphanum_fraction": 0.5811764706, "include": true, "reason": "import numpy", "num_tokens": 394}
|
#include <boost/callable_traits.hpp>
#include <functional>
#include <iostream>
#include <type_traits>
template < //
typename Derived, //
bool IsConst, //
bool IsNoexcept, //
typename Return, //
typename... Args //
>
class function_ref_impl
{
private:
using erased_fn_type = Return (*)(void*, Args...);
using final_erased_fn_type = std::conditional_t< //
IsNoexcept, //
boost::callable_traits::add_noexcept_t<erased_fn_type>, //
erased_fn_type //
>;
void* _ptr;
final_erased_fn_type _erased_fn;
template <typename T>
using propagate_const = std::conditional_t< //
IsConst, //
std::add_const_t<T>, //
T //
>;
template <typename... Xs>
using invocable_check = std::conditional_t< //
IsNoexcept, //
std::is_nothrow_invocable_r<Xs...>, //
std::is_invocable_r<Xs...> //
>;
template <typename T>
using is_compatibly_invokable = invocable_check< //
Return, //
std::add_lvalue_reference_t<propagate_const<T>>, //
Args...>;
template <typename T>
using enable_if_not_self = std::enable_if_t< //
is_compatibly_invokable<T>::value //
&& !std::is_same_v<std::decay_t<T>, Derived> //
>;
template <typename T>
using enable_if_compatible_const = std::enable_if_t< //
boost::callable_traits::is_const_member_v<T> //
|| !IsConst //
|| std::is_function_v<std::remove_pointer_t<T>> //
>;
template <typename T>
using enable_if_compatible_noexcept = std::enable_if_t< //
boost::callable_traits::is_noexcept_v<T> //
|| !IsNoexcept //
>;
template <typename T>
using enable_if_valid = std::conjunction< //
enable_if_not_self<T>, //
enable_if_compatible_const<T>, //
enable_if_compatible_noexcept<T> //
>;
template <typename T>
auto make_erased_fn() noexcept
{
return [](void* ptr, Args... xs) noexcept(IsNoexcept)->Return
{
return std::invoke(
*reinterpret_cast<std::add_pointer_t<propagate_const<T>>>(ptr),
std::forward<Args>(xs)...);
};
}
protected:
Return call(Args... xs) const
{
return _erased_fn(_ptr, std::forward<Args>(xs)...);
}
public:
template <typename T, typename = enable_if_valid<T>>
constexpr function_ref_impl(T&& x) noexcept
: _ptr{(void*)std::addressof(x)}, _erased_fn{make_erased_fn<T>()}
{
}
constexpr function_ref_impl(
const function_ref_impl& rhs) noexcept = default;
template <typename T, typename = enable_if_valid<T>>
constexpr function_ref_impl& operator=(T&& x) noexcept
{
_ptr = (void*)std::addressof(x);
_erased_fn = make_erased_fn<T>();
return *this;
}
constexpr function_ref_impl& operator=(
const function_ref_impl& rhs) noexcept = default;
constexpr void swap(function_ref_impl& rhs) noexcept
{
std::swap(_ptr, rhs._ptr);
std::swap(_erased_fn, rhs._erased_fn);
}
};
template <typename Signature>
class function_ref;
#define DEFINE_FUNCTION_REF_SPECIALIZATION(is_const, is_noexcept, ...) \
template <typename Return, typename... Args> \
class function_ref<Return(Args...) __VA_ARGS__> \
: public function_ref_impl<function_ref<Return(Args...)>, is_const, \
is_noexcept, Return, Args...> \
{ \
private: \
using base_type = function_ref_impl<function_ref<Return(Args...)>, \
is_const, is_noexcept, Return, Args...>; \
\
public: \
using base_type::base_type; \
\
Return operator()(Args... xs) __VA_ARGS__ \
{ \
return this->call(std::forward<Args>(xs)...); \
} \
};
DEFINE_FUNCTION_REF_SPECIALIZATION(false, false, )
DEFINE_FUNCTION_REF_SPECIALIZATION(true, false, const)
DEFINE_FUNCTION_REF_SPECIALIZATION(false, true, noexcept)
DEFINE_FUNCTION_REF_SPECIALIZATION(true, true, const noexcept)
#undef DEFINE_FUNCTION_REF_SPECIALIZATION
template <typename Sig>
struct remove_first_arg;
template <typename R, typename A, typename... As>
struct remove_first_arg<R(A, As...)>
{
using type = R(As...);
};
template <typename Sig>
using remove_first_arg_t = typename remove_first_arg<Sig>::type;
template <typename R, typename... Args>
function_ref(R (*)(Args...))->function_ref<R(Args...)>;
template <typename R, typename... Args>
function_ref(R (*)(Args...) noexcept)->function_ref<R(Args...) noexcept>;
// TODO: noexcept and const qualifier
template <typename F, typename S = decltype(&std::decay_t<F>::operator())>
function_ref(F &&)
->function_ref<
remove_first_arg_t<boost::callable_traits::function_type_t<S>>>;
template <typename Signature>
constexpr void swap(
function_ref<Signature>& lhs, function_ref<Signature>& rhs) noexcept
{
lhs.swap(rhs);
}
void foo(function_ref<void()> f)
{
f();
}
void fp_nonnoexcept()
{
}
void fp_noexcept() noexcept
{
}
struct c
{
void ff()
{
}
void tf() const
{
}
void ft() noexcept
{
}
void tt() const noexcept
{
}
};
void test_nonconst_nonnoexcept()
{
using T = function_ref<void()>;
using M = function_ref<void(c)>;
static_assert(!boost::callable_traits::is_const_member_v<T>);
static_assert(!boost::callable_traits::is_const_member_v<M>);
static_assert(!boost::callable_traits::is_noexcept_v<T>);
static_assert(!boost::callable_traits::is_noexcept_v<M>);
// FPtrs:
T{&fp_nonnoexcept}; // OK
T{&fp_noexcept}; // OK
// MPtrs:
M{&c::ff}; // OK
M{&c::tf}; // OK
M{&c::ft}; // OK
M{&c::tt}; // OK
// Lambdas:
T{[] {}}; // OK
T{[]() mutable {}}; // OK
T{[]() noexcept {}}; // OK
T{[]() mutable noexcept {}}; // OK
}
void test_const_nonnoexcept()
{
using T = function_ref<void() const>;
using M = function_ref<void(c) const>;
static_assert(boost::callable_traits::is_const_member_v<T>);
static_assert(boost::callable_traits::is_const_member_v<M>);
static_assert(!boost::callable_traits::is_noexcept_v<T>);
static_assert(!boost::callable_traits::is_noexcept_v<M>);
// FPtrs:
T{&fp_nonnoexcept}; // OK, const ignored for non-members
T{&fp_noexcept}; // OK, const ignored for non-members
// MPtrs:
// M{&c::ff}; // OK - does not compile as intended
M{&c::tf}; // OK
// M{&c::ft}; // OK - does not compile as intended
M{&c::tt}; // OK
// Lambdas:
// T{[]() mutable {}}; // OK - does not compile as intended
T{[] {}}; // OK
// T{[]() mutable noexcept {}}; // OK - does not compile as intended
T{[]() noexcept {}}; // OK
}
void test_nonconst_noexcept()
{
using T = function_ref<void() noexcept>;
using M = function_ref<void(c) noexcept>;
static_assert(!boost::callable_traits::is_const_member_v<T>);
static_assert(!boost::callable_traits::is_const_member_v<M>);
static_assert(boost::callable_traits::is_noexcept_v<T>);
static_assert(boost::callable_traits::is_noexcept_v<M>);
// FPtrs:
// T{&fp_nonnoexcept}; // OK - does not compile as intended
T{&fp_noexcept}; // OK
// MPtrs:
// M{&c::ff}; // OK - does not compile as intended
// M{&c::tf}; // OK - does not compile as intended
M{&c::ft}; // OK
M{&c::tt}; // OK
// Lambdas:
// T{[]() mutable {}}; // OK - does not compile as intended
// T{[] {}}; // OK - does not compile as intended
T{[]() mutable noexcept {}}; // OK
T{[]() noexcept {}}; // OK
}
void test_const_noexcept()
{
using T = function_ref<void() const noexcept>;
using M = function_ref<void(c) const noexcept>;
static_assert(boost::callable_traits::is_const_member_v<T>);
static_assert(boost::callable_traits::is_const_member_v<M>);
static_assert(boost::callable_traits::is_noexcept_v<T>);
static_assert(boost::callable_traits::is_noexcept_v<M>);
// FPtrs:
// T{&fp_nonnoexcept}; // OK - does not compile as intended
T{&fp_noexcept}; // OK
// MPtrs:
// M{&c::ff}; // OK - does not compile as intended
// M{&c::tf}; // OK - does not compile as intended
// M{&c::ft}; // OK - does not compile as intended
M{&c::tt}; // OK
// Lambdas:
// T{[]() mutable {}}; // OK - does not compile as intended
// T{[] {}}; // OK - does not compile as intended
// T{[]() mutable noexcept {}}; // OK - does not compile as intended
T{[]() noexcept {}}; // OK
}
struct F
{
int operator()() const noexcept
{
return 0;
}
};
struct anything
{
template <typename T>
anything(T const&)
{
}
};
int main()
{
auto l = [i = 0]() mutable { std::cout << i++ << "\n"; };
foo(l);
foo(l);
foo(l);
std::function<void()> avbxf{function_ref<void()>{[]{}}};
// Should not compile:
// function_ref<anything() noexcept> fun = F{};
// function_ref f1{[] {}};
}
#include <cassert>
// `function_ref`: non-owning wrapper over generic `Callable`
// with a particular signature
int xfoo(function_ref<int()> f)
{
return f();
}
int xbar()
{
auto l = [i = 0]() mutable { return i++; };
assert(xfoo(l) == 0); // <== reference semantics
assert(xfoo(l) == 1);
assert(xfoo(l) == 2);
auto g = [](function_ref<int() const> k){ k(); };
/* g(l); */ // <== does not compile ^~~~~
g([]{ return 0; }); // <== works with temporaries
return 0;
}
|
{"hexsha": "62a714acb4aa6888253bfff24e957763e5468529", "size": 10744, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "function_ref.cpp", "max_stars_repo_name": "SuperV1234/Experiments", "max_stars_repo_head_hexsha": "572c94d1afb367c241645b479019f6cb3883f98f", "max_stars_repo_licenses": ["AFL-3.0"], "max_stars_count": 12.0, "max_stars_repo_stars_event_min_datetime": "2015-10-10T16:27:28.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-28T17:48:32.000Z", "max_issues_repo_path": "function_ref.cpp", "max_issues_repo_name": "vittorioromeo/Experiments", "max_issues_repo_head_hexsha": "572c94d1afb367c241645b479019f6cb3883f98f", "max_issues_repo_licenses": ["AFL-3.0"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2016-11-20T21:17:19.000Z", "max_issues_repo_issues_event_max_datetime": "2018-05-25T16:52:32.000Z", "max_forks_repo_path": "function_ref.cpp", "max_forks_repo_name": "vittorioromeo/Experiments", "max_forks_repo_head_hexsha": "572c94d1afb367c241645b479019f6cb3883f98f", "max_forks_repo_licenses": ["AFL-3.0"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2016-11-20T21:02:49.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-09T18:11:29.000Z", "avg_line_length": 28.8042895442, "max_line_length": 79, "alphanum_fraction": 0.5385331348, "num_tokens": 2613}
|
import tensorflow as tf
import numpy as np
from RelationNetwork import RN
from prepare import SClevrDataset,ClevrDataset
from utils import Config, Config_SClevr
import argparse
import sys
def str2bool(s):
if s == 'true':
return True
else:
return False
class Trainer(object):
def __init__(self, config):
"""
Trainer to train the model.
based on the configuration, the model could be either for Clevr task or Sort-of-Clevr task.
"""
self.restore = str2bool(config.restore)
self.mode = config.model
if self.mode == 'clevr':
self.config = Config()
self.dataset = Clevrdataset(self.config, dataset_name('train'), load_vocab = False)
self.model = RN(self.config, is_train = True, restore = self.restore, mode = 'clevr')
else:
self.config = Config_SClevr()
self.dataset = SClevrDataset(self.config)
self.model = RN(self.config, is_train = True, restore = self.restore, mode = 'sclevr')
def train(self):
"""
Train the model until the max_iter is reached.
"""
loss_history = []
acc_history = []
loss = [0]
acc = [0]
num_epoch = 0
for i in range(self.config.max_iter):
if self.dataset.counter < self.config.batch_size:
sys.stdout.write('\n')
sys.stdout.flush()
if num_epoch > 0:
loss_history.append(np.mean(loss))
acc_history.append(np.mean(acc))
num_epoch += 1
loss = []
acc = []
l, p, a = self.model.run_batch(self.dataset.next_batch(self.config.batch_size))
loss.append(l)
acc.append(a)
sys.stdout.write('\rEpoch: {}, Progress: {} / {}, Loss: {}, Acc: {}'.format(num_epoch, self.dataset.counter, len(self.dataset.questions), str(np.mean(loss)), str(np.mean(acc))))
sys.stdout.flush()
if(i % self.config.save == 0):
self.model.save()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='sclevr', choices=['clevr', 'sclevr'])
parser.add_argument('--restore', type=str, default='false', choices=['true', 'false'])
config = parser.parse_args()
trainer = Trainer(config)
trainer.train()
if __name__ == '__main__':
main()
|
{"hexsha": "b86fed276e3f02d8212cc16bc2fe889a6fbab68c", "size": 2093, "ext": "py", "lang": "Python", "max_stars_repo_path": "Trainer.py", "max_stars_repo_name": "obitto/relation-network", "max_stars_repo_head_hexsha": "2cbea587c9d43d6e02dba8ddd79e9ae18eca5356", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Trainer.py", "max_issues_repo_name": "obitto/relation-network", "max_issues_repo_head_hexsha": "2cbea587c9d43d6e02dba8ddd79e9ae18eca5356", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Trainer.py", "max_forks_repo_name": "obitto/relation-network", "max_forks_repo_head_hexsha": "2cbea587c9d43d6e02dba8ddd79e9ae18eca5356", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3333333333, "max_line_length": 180, "alphanum_fraction": 0.6770186335, "include": true, "reason": "import numpy", "num_tokens": 557}
|
import os
import numpy as np
from astropy.table import Table
# sky_subd_sciences[ap] = [waves,diff,bool_mask]
from zestipy.data_structures import waveform
from zestipy.plotting_tools import summary_plot
from zestipy.sncalc import sncalc
from zestipy.z_est import z_est
def fit_redshifts_wrapper(input_dict):
return fit_redshifts(**input_dict)
def fit_redshifts(sky_subd_sciences,mask_name,run_auto=True,prior = None,savetemplate_func=None):
# 3.0e-5
if run_auto:
outnames = ['FIBNAME','redshift_est', 'cor', 'template', 'SNavg', 'SNHKmin', 'HSN', 'KSN', 'GSN']
types = ['S4',float,float,'S3',float,float,float,float,float]
else:
outnames = ['FIBNAME','redshift_est', 'quality_val', 'cor', 'template', 'SNavg', 'SNHKmin', 'HSN', 'KSN', 'GSN']
types = ['S4',float,int,float,'S3',float,float,float,float,float]
outtable = Table(names=outnames,dtype=types)
science_fiber_names = list(sky_subd_sciences.keys())
if len(science_fiber_names)>0:
first_ap = science_fiber_names[0]
else:
return outtable
first_waves, flux, boolmask = sky_subd_sciences[first_ap]
R = z_est(lower_w=first_waves.min()/(1+0.52), upper_w=first_waves.max()/(1+0.1), lower_z=0.10, upper_z=0.5, \
z_res=1.0e-5, prior_width=0.02, use_zprior=False, \
skip_initial_priors=True, \
auto_pilot=True)
del first_waves, flux, boolmask
template_names = ['spDR2-023.fit', 'spDR2-024.fit']#, 'spDR2-028.fit']
# ['spDR2-0'+str(x)+'.fit' for x in np.arange(23,31)]
template_dir = '../sdss_templates' # hack
path_to_temps = os.path.abspath(os.path.join(os.curdir, template_dir)) # hack
# Import template spectrum (SDSS early type) and continuum subtract the flux
R.add_sdsstemplates_fromfile(path_to_temps, template_names)
if not run_auto:
quality_val = {}
for ap in sky_subd_sciences.keys():
waves, flux, boolmask = sky_subd_sciences[ap]
# mask = boolmask.copy()
nmaskbins = 5 ## must be odd
start = (nmaskbins - 1)
half = start // 2
mask = boolmask[start:].copy()
for ii in range(1, start + 1):
mask = (mask | boolmask[(start - ii):-ii])
mask = np.append(np.append([True] * half, mask), [True] * half)
test_waveform = waveform(waves, flux, ap, mask)
# test_waveform = waveform(waves, flux, ap, boolmask)
redshift_outputs = R.redshift_estimate(test_waveform)
redshift_est = redshift_outputs.best_zest
cor = redshift_outputs.max_cor
ztest = redshift_outputs.ztest_vals
corr_val = redshift_outputs.corr_vals
template = redshift_outputs.template.name
if not run_auto:
qualityval = redshift_outputs.qualityval
try:
HSN, KSN, GSN = sncalc(redshift_est, test_waveform.masked_wave,
test_waveform.continuum_subtracted_flux)
except ValueError:
HSN, KSN, GSN = 0.0, 0.0, 0.0
print("\n\n {}:".format(ap))
names = ['Z Best','Mac Cor','Templt','H S/N', 'K S/N', 'G S/N']
vals = [redshift_est, cor, template, HSN, KSN, GSN]
for name,val in zip(names,vals):
if type(val) in [int,str]:
print('---> {}:\t{}'.format(name, val))
else:
print('---> {}:\t{:06f}'.format(name,val))
SNavg = np.average(np.array([HSN, KSN, GSN]))
SNHKmin = np.min(np.array([HSN, KSN]))
# Create a summary plot of the best z-fit
comment = 'redEst_{}_Tmplt{}'.format(test_waveform.name, redshift_outputs.template.name)
plt_name = savetemplate_func(cam='',ap=ap,imtype='science',step='zfit',comment=comment)
summary_plot(test_waveform.masked_wave, test_waveform.masked_flux, redshift_outputs.template.wave, \
redshift_outputs.template.flux, redshift_outputs.best_zest, redshift_outputs.ztest_vals, \
redshift_outputs.corr_vals, plt_name, test_waveform.name, None)
if run_auto:
outtable.add_row([ap,redshift_est, cor, template, SNavg, SNHKmin, HSN, KSN, GSN])
else:
outtable.add_row([ap, redshift_est, quality_val, cor, template, SNavg, SNHKmin, HSN, KSN, GSN])
return outtable
|
{"hexsha": "ce434e76b2f8a52f0ea13bc56228a83cd1bd8e09", "size": 4361, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyM2FS/fit_redshifts.py", "max_stars_repo_name": "akremin/M2FSreduce", "max_stars_repo_head_hexsha": "42092f18aa1e5d7ad6f6528a395ee93e89165b30", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyM2FS/fit_redshifts.py", "max_issues_repo_name": "akremin/M2FSreduce", "max_issues_repo_head_hexsha": "42092f18aa1e5d7ad6f6528a395ee93e89165b30", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyM2FS/fit_redshifts.py", "max_forks_repo_name": "akremin/M2FSreduce", "max_forks_repo_head_hexsha": "42092f18aa1e5d7ad6f6528a395ee93e89165b30", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.7549019608, "max_line_length": 120, "alphanum_fraction": 0.6315065352, "include": true, "reason": "import numpy,from astropy", "num_tokens": 1238}
|
#!/usr/bin/env python
import os
import csv
import linecache
import numpy as np
from CP2K_kit.tools import log_info
from CP2K_kit.tools import traj_info
from CP2K_kit.tools import data_op
from CP2K_kit.analyze import check_analyze
from CP2K_kit.lib import rmsd_mod
from CP2K_kit.lib import statistic_mod
def rmsd(atoms_num, pre_base_block, end_base_block, pre_base, each, atom_id, start_frame_id, ref_frame, comp_frame_list, traj_coord_file):
#Reference literature: J. Comput. Chem. 2004, 25, 1849-1857.
'''
rmsd: get rmsd of choosed atoms in md trajectory.
Args:
atoms_num: int
atoms_num is the number of atoms in the system.
pre_base_block: int
pre_base_block is the number of lines before structure in a structure block.
end_base_block: int
end_base_block is the number of lines after structure in a structure block.
pre_base: int
pre_base is the number of lines before block of the trajectory.
each: int
each is printing frequency of md.
atom_id: int list
atom_id is the id of atoms.
Example: [1,2,3,7,8]
start_frame_id: int
start_frame_id is the starting frame id in trajectory file.
ref_frame: int
ref_frame is the reference frame.
comp_frame_list: 1-d int list
comp_frame_list is comparring frames.
traj_coord_file: string
traj_coord_file is the name of coordination trajectory file.
Returns:
rmsd_value_list: 1-d float list
rmsd_value_list is the list of rmsd value.
'''
coord_ref = np.asfortranarray(np.zeros((len(atom_id),3)),dtype='float32')
coord_comp = np.asfortranarray(np.zeros((len(atom_id),3)),dtype='float32')
for i in range(len(atom_id)):
line_i_num = int((ref_frame-start_frame_id)/each)*(pre_base_block+atoms_num+end_base_block)+atom_id[i]+pre_base_block+pre_base
line_i = linecache.getline(traj_coord_file, line_i_num)
line_i_split = data_op.split_str(line_i, ' ', '\n')
coord_ref[i,0] = float(line_i_split[1])
coord_ref[i,1] = float(line_i_split[2])
coord_ref[i,2] = float(line_i_split[3])
coord_ref_center = np.asfortranarray(np.zeros(3),dtype='float32')
for i in range(3):
value_avg, sigma = statistic_mod.statistic.numerical_average(coord_ref[:,i],len(atom_id))
coord_ref_center[i] = value_avg
rmsd_value_list = []
for m in range(len(comp_frame_list)):
for i in range(len(atom_id)):
line_mi_num = int((comp_frame_list[m]-start_frame_id)/each)*(pre_base_block+atoms_num+end_base_block)+atom_id[i]+pre_base_block+pre_base
line_mi = linecache.getline(traj_coord_file, line_mi_num)
line_mi_split = data_op.split_str(line_mi, ' ', '\n')
coord_comp[i,0] = float(line_mi_split[1])
coord_comp[i,1] = float(line_mi_split[2])
coord_comp[i,2] = float(line_mi_split[3])
coord_comp_center = np.asfortranarray(np.zeros(3),dtype='float32')
for i in range(3):
value_avg, sigma = statistic_mod.statistic.numerical_average(coord_comp[:,i],len(atom_id))
coord_comp_center[i] = value_avg
cov_matrix = rmsd_mod.rmsd.get_cov_matrix(coord_comp,coord_ref,coord_comp_center,coord_ref_center)
quart_matrix = rmsd_mod.rmsd.quarternion_rotate(cov_matrix)
eigen = np.linalg.eig(quart_matrix)
eigen_value = eigen[0]
eigen_vector = eigen[1]
eigen_max = max(eigen_value)
max_index = list(eigen_value).index(max(eigen_value))
quart_vec = eigen_vector[max_index]
#If you need rotate matrix, you could print it or return it
rotate_max = rmsd_mod.rmsd.quart_to_rot(quart_vec)
rmsd_value = rmsd_mod.rmsd.get_rmsd(coord_comp,coord_ref,coord_comp_center,coord_ref_center,eigen_max)
rmsd_value_list.append(rmsd_value)
linecache.clearcache()
return rmsd_value_list
def rmsd_run(rmsd_param, work_dir):
'''
rmsd_run: the kernel function to run rmsd function.
Args:
rmsd_param: dictionary
rmsd_param contains keywords used in rmsd functions.
work_dir: string
work_dir is the working directory of CP2K_kit.
Returns:
none
'''
rmsd_param = check_analyze.check_rmsd_inp(rmsd_param)
traj_coord_file = rmsd_param['traj_coord_file']
atoms_num, pre_base_block, end_base_block, pre_base, frames_num, each, start_frame_id, end_frame_id, time_step = \
traj_info.get_traj_info(traj_coord_file, 'coord_xyz')
log_info.log_traj_info(atoms_num, frames_num, each, start_frame_id, end_frame_id, time_step)
atom_id = rmsd_param['atom_id']
ref_frame = rmsd_param['ref_frame']
compare_frame = rmsd_param['compare_frame']
print ('RMSD'.center(80, '*'), flush=True)
print ('Calculate root mean square deviation based on reference frame %d' %(ref_frame), flush=True)
rmsd_value = rmsd(atoms_num, pre_base_block, end_base_block, pre_base, each, atom_id, start_frame_id, ref_frame, compare_frame, traj_coord_file)
rmsd_file = ''.join((work_dir, '/rmsd.csv'))
with open(rmsd_file, 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['time', 'rmsd'])
for i in range(len(rmsd_value)):
writer.writerow([i*time_step*each, rmsd_value[i]])
str_print = 'The rmsd vs time is written in %s' %(rmsd_file)
print (data_op.str_wrap(str_print, 80), flush=True)
|
{"hexsha": "5feaf4e2377da098ac39b3f0dcb206932ae6eaa2", "size": 5210, "ext": "py", "lang": "Python", "max_stars_repo_path": "analyze/rmsd.py", "max_stars_repo_name": "JunboLu/CP2K_kit", "max_stars_repo_head_hexsha": "0950f37f253c3f90d6a0539c57f1be1045e7317d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2021-04-19T03:40:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-21T12:53:33.000Z", "max_issues_repo_path": "analyze/rmsd.py", "max_issues_repo_name": "JunboLu/CP2K_kit", "max_issues_repo_head_hexsha": "0950f37f253c3f90d6a0539c57f1be1045e7317d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analyze/rmsd.py", "max_forks_repo_name": "JunboLu/CP2K_kit", "max_forks_repo_head_hexsha": "0950f37f253c3f90d6a0539c57f1be1045e7317d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-11-28T02:55:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-21T12:54:52.000Z", "avg_line_length": 36.6901408451, "max_line_length": 146, "alphanum_fraction": 0.7355086372, "include": true, "reason": "import numpy", "num_tokens": 1443}
|
import numpy as np
from OSIM.Modeling.AbstractComponents.SingleComponent import SingleComponent
from OSIM.Modeling.CircuitSystemEquations import CircuitSystemEquations
class Impedance(SingleComponent):
def __init__(self, nodes, name, value, superComponent, **kwargs):
if complex(value) == 0:
print(name + " Resistor invalid value, will be is set to Rmin = 0.001")
super(Impedance, self).__init__(nodes, name, 0.0000001, superComponent,**kwargs)
else:
super(Impedance, self).__init__(nodes, name, value, superComponent, **kwargs)
def doStep(self, freq_or_tau):
if self.sys.atype == CircuitSystemEquations.ATYPE_AC:
return
self.insertAdmittanceintoSystem(freq_or_tau)
def getAdmittance(self, nodesFromTo, freq_or_tstep):
return np.complex128(1 / self.value)
|
{"hexsha": "2879adb759859814e05f2c32651e178746b73d21", "size": 862, "ext": "py", "lang": "Python", "max_stars_repo_path": "OSIM/Modeling/Components/Impedance.py", "max_stars_repo_name": "tmaiwald/OSIM", "max_stars_repo_head_hexsha": "11127aaee61d93bb6f26ca5147a300af05db14ec", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "OSIM/Modeling/Components/Impedance.py", "max_issues_repo_name": "tmaiwald/OSIM", "max_issues_repo_head_hexsha": "11127aaee61d93bb6f26ca5147a300af05db14ec", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "OSIM/Modeling/Components/Impedance.py", "max_forks_repo_name": "tmaiwald/OSIM", "max_forks_repo_head_hexsha": "11127aaee61d93bb6f26ca5147a300af05db14ec", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.4782608696, "max_line_length": 92, "alphanum_fraction": 0.7053364269, "include": true, "reason": "import numpy", "num_tokens": 215}
|
import json
import numpy as np
"""
Format of ecosystem is:
{
'last_generation': int,
'times': [float,...],
'improvements': [float,...],
'average_total_improve': [float,...],
'runtime_running_avg': float,
'total_runtime': float,
'need_drift': [False,...],
'drifted_last_generation': [False,...],
'best_mae': [float,...],
'std': [float,...],
'avg_mae_survivors': [{
'generation': int,
'values': [None] * n_territories,
}],
'territories': [
[{hp:{}, mae=None},...],
.
.
.
]
}
"""
ecosystem = None
with open('ecosystem.json') as f:
ecosystem = json.load(f)
best_ter = ecosystem['territories'][0]
for ter in ecosystem['territories']:
if ter[0]['mae'] < best_ter[0]['mae']:
best_ter = ter
params = {}
for ind in best_ter:
for key, val in ind['hp'].items():
if key == 'n_estimators' and val > 1500:
print(val)
if key in params:
params[key].append(val)
else:
params[key] = [val]
for key, val in params.items():
print("*" * 10, key, "*" * 10)
mean = np.mean(val)
std = np.std(val)
minimum = np.min(val)
maximum = np.max(val)
print("Mean:", mean)
print("STD:", std)
print("Range:", mean - std, mean + std, mean + std - (mean - std))
print("Minimum:", minimum, "Maximum:", maximum)
print("Best:", best_ter[0]['hp'][key])
|
{"hexsha": "12cca18c9be9ffdb1a103d42395d879d645d949d", "size": 1335, "ext": "py", "lang": "Python", "max_stars_repo_path": "stats.py", "max_stars_repo_name": "ZackYovel/using_genetic_algorithm_for_hyper_parameter_tuning", "max_stars_repo_head_hexsha": "10530c5c3802d65f8a5cb651a5e0245d049d2702", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-29T22:36:41.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-29T22:36:41.000Z", "max_issues_repo_path": "stats.py", "max_issues_repo_name": "ZackYovel/using_genetic_algorithm_for_hyper_parameter_tuning", "max_issues_repo_head_hexsha": "10530c5c3802d65f8a5cb651a5e0245d049d2702", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "stats.py", "max_forks_repo_name": "ZackYovel/using_genetic_algorithm_for_hyper_parameter_tuning", "max_forks_repo_head_hexsha": "10530c5c3802d65f8a5cb651a5e0245d049d2702", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.25, "max_line_length": 68, "alphanum_fraction": 0.5760299625, "include": true, "reason": "import numpy", "num_tokens": 407}
|
\section{Fit}%
\label{period.detailed}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Fit folder}%
\label{period.folder}
\begin{figure}[h]
$$\image{0cm;0cm}{PFolder.eps}$$%
\caption{The frequency folder}%
\label{period.folder.dialog}
\end{figure}
This folder shows almost all the information regarding the current fit.
(In some rare circumstances the
\helpref{log folder}{log.detailed}
may provide more detailed information.)
{\bf Active frequencies}:
Gives the total number of active frequencies.
These are the frequencies for which in the frequency table,
the check box is selected near the frequency number.
{\bf Use weights}:
use the time string in a weighted form for the
least square fit calculation, if it has been selected.
{\bf Zero point}:
Shows the calculated zero point for the last calculated fit.
{\bf Residuals}:
Shows the residuals for the last calculated fit.
{\bf Calculations based on:} dialog item indicates which data set
will be used for the next calculation. Either the {\bf original}
or the {\bf adjusted} data.
With the four buttons:
{\bf Prev X},
{\bf Prev},
{\bf Next},
{\bf Next X}
scrolling in the frequency list is possible.
(Prev is short for previous.)
(X depends on the size of the screen,
but can also be specified with a
command line switch of {\tt -r$ <$rows$> $} when invoking the
program.)
The next view lines give information on some of the frequencies themselves.
The first column gives the {\bf number of the frequency}
with the possibility to activate or deactivate it by
selecting the check box.
The next column gives the value of the {\bf frequency} itself.
In case of a {\bf harmonic} or {\bf frequency combination}
the contents will be preceded by {\tt``=''} and may be
of one of the following formats:
frequency combination:
\myitemize{
\item =fx+fy
\item =nfx+mfy or =n*fx+m*fy
}
harmonic:
\myitemize{
\item =hfx or =h*fx
}
x,y may be values from 1 to 255, but not be the number
of the frequency itself or a reference to another frequency
which is itself a combination. If one of the composing
frequencies is not active, the combination is not active either!
h may be an integer greater than 1, while m,n can be any integer number.
When entering or editing a frequency value, another feature may make life easy:
if an arbitrary number of ``+'' and ``-'' are trailing the number, then
the value of the
\helpref{alias gap value}{period.aliasgap} is added to or subtracted from
the value, as often as ``+'' or ``-'' are given.
The following two columns give the value of the {\bf amplitude}
and {\bf phase}.
The last column, which is by default not visible, tells
the current numerical value of a composite frequency or harmonic.
The next 3 buttons start a calculation:
\myitemize{
\item \button{Calculate}
takes the current settings and improves
the amplitudes and phases of all active frequencies.
\item \button{Improve all}
takes the current settings and improves
not only amplitudes and phases, but also the frequencies.
\item \button{Improve special}
gives full power to the user for calculations, as it allows to
decide for every active frequency, amplitude or phase whether it
should be a fixed or free parameter.
This option also allows the calculation of
\helpref {amplitude and/or phase variations}{period.ampvar}.
}
{\bf Import}
reads in a set of predefined frequencies.
{\bf Export}
writes out the whole table of frequencies.
{\bf Phase plot}
allows to display the selected time string as a phase plot to
a selectable frequency.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Alias gap}%
\label{period.aliasgap}
As already mentioned in the
\helpref{Fit folder}{period.folder},
a frequency may be shifted up or down by some arbitrary value by placing
``+'' or ``-'', after the frequency value itself.
This value is closely connected to the
spectral window function of the currently selected time string.
For astronomical reasons, the default is set to 1/365.
This value is a good estimate for a time string
with large gaps (of approximately a year) between
densely packed data.
The \helpfigref{alias gap dialog}{period.aliasgap.dialog},
which can be reached from the \menu{Fit} by selecting the
\menuentry{Alias gap}, allows to change this value.
\begin{figure}[h]
$$\image{0cm;0cm}{PAliasgap.eps}$$%
\caption{The ``alias gap'' dialog}%
\label{period.aliasgap.dialog}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Improve Special}%
\label{period.special}
As already mentioned in the description of the
\helpref{Fit folder}{period.folder}, {\bf Improve special} gives
full control over the calculation to make.
\note{This option should only be used with great care
and only after a stable solution has already been reached with the
\button{Calculate} or \button{Improve}.}
\note{Improve automatically
does a calculate before trying to improve the frequencies as well!
This safeguard has not been implemented for {\bf Improve Special},
as this might effect the desired result in unwanted ways.}
The \helpfigref{improve special dialog}{period.special.dialog}
basically contains 3 list boxes.
These list the frequency, amplitude, and phase for every active frequency.
For ease of recognition, in front of each value the number of the
frequency itself is written.
As for the frequency list, frequencies that are combinations or
harmonics are not listed.
\begin{figure}[h]
$$\image{0cm;0cm}{PSpecial.eps}$$%
\caption{The ``improve special'' dialog}%
\label{period.special.dialog}
\end{figure}
As an example, a list of predefined frequencies (f1, f2 and f3) is already
known, and should stay fixed. But other additional frequencies (f4 to f10)
should be improved.
Then in the list box with the frequencies all frequencies
but the first three should be selected, as well as all the amplitudes
and phases.
The three buttons in the bottom allow different calculation modes.
\myitemize{
\item \button{Calculate} gives the requested calculation
\item \button{Calculate amplitude/phase variations} opens up another dialog,
giving access to an even more specialised calculation mode.
See \helpref{Amplitude/phase variations}{period.ampvar}
for details.
\item \button{Cancel} stops the calculation.
}
\note{Interpretation of the log output of the {\bf Fit} module:\\
It will output a before/after scenario with all the active Frequencies listed.
Values that have leading and trailing asterisk are free parameters to the fit.
The others are fixed. }
\note{And a final note: the {\bf zero point} is {\it always} a free parameter!}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Calculate amplitude/phase variations}%
\label{period.ampvar}
The last and most sophisticated calculation mode provided by the
{\bf fit module} is the {\bf Amplitude/Phase variation mode}.
This allows to analyze data where
amplitudes and/or phases have different values, for different subsets
of the currently selected time string.
An example for this is the observation of daily
high and low temperature averages.
Or a star that changes intrinsically its amplitude for some frequency.
Also measuring an object in two filters generally gives different amplitudes
and phases for each filter.
Why is this calculation mode necessary?
Fitting different subsets is possible
and should give the same results.
This is not fully true, as precision of the final result
gets lost. Not only because of the limited number of points, but also
because the frequency for each of these subsets will yield a slightly
different result when using {\bf improve} for calculation.
Thus the results are not fully comparable, especially when large datasets
with huge gaps (say maybe 10 years) are used.
But will {\bf calculate} or {\bf improve special} not do the magic to
some extent? True again, but again a certain frequency has to be assumed
from the start for all subsets, which will still not represent the
best fit.
But a blind eye shot with this new tool will not give necessarily the
wanted results either. Because, when the degrees of freedom are increased
for the calculations (and that is what amplitude/phase variation does!),
the numerical stability may be compromised.
So this tool should only be used with great caution and the
number of frequencies,
which are tested for amplitude and/or phase variations, should be kept
to a minimum. Other techniques should be used first to find such variations
and only {\it then} this tool should be used.
The other techniques are: calculating values for different subsets
and the more optical approach of using the
\helpref{phase plot}{period.phase} feature of the {\bf fit module}.
After the \button{Calculate amplitude/phase variations} has been pressed
in the \helpref{Improve special dialog}{period.special}, the
\helpfigref{amplitude/phase variation dialog}{period.ampvar.dialog} shows up.
\begin{figure}[h]
$$\image{0cm;0cm}{PAmpvar.eps}$$%
\caption{The ``amplitude/phase variation'' dialog}%
\label{period.ampvar.dialog}
\end{figure}
First an {\bf attribute} has to be chosen, for which its selected labels
define subsets of the time string.
Then the {\bf calculation mode} has to be selected.
Possibilities are:
\myitemize{
\item amplitude variation
\item phase variation
\item amplitude and phase variation
}
And finally the {\bf frequencies} for which amplitude/phase
variation should be done, have to be selected. The ones not selected
are assumed to have the same amplitude and phase for all points.
The \button{Calculate} starts the calculation.
As the \helpref{fit folder}{period.folder} does not offer
the possibility to show the complex results in its window,
a new dialog shows up, which tabulates the result.
The \helpref{fit folder}{period.folder} will only reflect
changes to the values, for which separate amplitude and phase variation
has not been done.
The residuals are calculated correctly and individual residuals for
the subsets can be examined with the
\helpref{Adjust selection}{timestring.adjust} tool in the
\helpref{Time string module}{timestring.folder}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Calculate message}%
\label{period.calculate}
When calculating a fit the main window will change its appearance
to the following:
\begin{figure}[h]
$$\image{0cm;0cm}{PCalculate.eps}$$%
\caption{The ``calculate status'' window}%
\label{period.calculate.message}
\end{figure}
It informs about the current state of calculations,
where the first number is the number of iterations already done.
The second number tells at what number of iterations the program will stop
iterating and ask for advice.
With the \button{Cancel} the calculation can be stopped prematurely.
In this case, the result may not be a stable solution.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Predict signal}%
\label{period.predict}
With the \menuentry{Predict} in the \menu{Fit} it is possible to
predict an amplitude at a certain time from the current fit.
The \helpfigref{predict amplitude dialog}{period.predict.dialog} only
allows to enter a {\bf time} and the \button{Calculate}
will update the display accordingly.
\begin{figure}[h]
$$\image{0cm;0cm}{PPredict.eps}$$%
\caption{The ``predict amplitude'' dialog}%
\label{period.predict.dialog}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Create artificial data}%
\label{period.artificial}
With the
\helpfigref{create artificial data dialog}{period.artificial.dialog},
which can be activated by selecting the
\menuentry {Create artificial data} from the \menu{Fit},
it is possible to create an equally spaced artificial time string.
\begin{figure}[h]
$$\image{0cm;0cm}{PArtificial.eps}$$%
\caption{The ``create artificial data'' dialog}%
\label{period.artificial.dialog}
\end{figure}
To do so, \period has to know about the {\bf start} and {\bf end time}
for the selected time span
and as well the {\bf steps} in between
defaults for this values are: first and last time of the
currently selected time string and for the step 1/(20*Fmax) to give a good
sampling for the highest frequency as well.
{\bf Leading/trailing} allows to extend the time span defined by start
and end in both directions further by a common value.
After pressing the \button{Append to file} or the \button{Create new file}
a file needs to be selected. For {\it append} the data will be appended
to the file, and with {\it create} a new file will be created, and
the old file destroyed.
To create artificial data with times from the currently selected time string,
the ``wished'' frequencies should be entered in the
\helpref{Fit folder}{period.folder} and then the
\menuentry{Recalculate residuals} in the \menu{Fit} should be used to
\helpref{recalculate the residuals}{period.recalculate}.
Then the calculated values can be exported with
\helpref{Time string export}{timestring.export}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Recalculate residuals}%
\label{period.recalculate}
With the \menuentry{Recalculate residuals} in the \menu{Fit}, it is
possible to update the residuals using the values of the current fit.
In addition, the zero point will be asked in the
\helpfigref{zero point dialog}{period.residuals.dialog}
before the calculations are done.
\begin{figure}[h]
$$\image{0cm;0cm}{PZeropoint.eps}$$%
\caption{The ``zero point'' dialog}%
\label{period.residuals.dialog}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Epochs}%
\label{period.epochs}
The time of maximum light closest to a certain time (epoch) according
to the current fit can be calculated by selecting the
\menuentry{Epoch} in the \menu{Fit}.
Then the \helpfigref{epoch dialog}{period.epochs.dialog} will show up,
which basically lists the table of frequencies along with the epoch.
{\bf Time of epoch} gives the time, for which the epochs should be calculated.
And {\bf Data is in intensity} allows for correct interpretation of
maximum light. By default magnitudes are assumed!
\begin{figure}[h]
$$\image{0cm;0cm}{PEpochs.eps}$$%
\caption{The ``epoch'' dialog}%
\label{period.epochs.dialog}
\end{figure}
The \button{Calculate} will update these results.
The \button{Print} will print the results and
the \button{Save} will save the results.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Import frequencies}%
\label{period.import}
To import a table of frequencies, all that is needed to do is
to press the \button{Import} in the \helpref{Fit folder}{period.folder}%
. Then a file selector dialog will show up and a file to
read in may be selected.
The table of frequencies will be erased and the information will be filled
in.
The file format is the following for {\it each} line:
\myitemize{
\item Frequency identifier: in the format: F$<$num$>$ or f$<$num$>$
\item if a bracket is the next character, then the frequency is
assumed to be inactive
\item frequency
\item amplitude (optional)
\item phase (optional)
\item additional data ignored
}
Lines starting with ``/'', ``;'', ``\#'', ''\%''
are assumed to be comment lines.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Export frequencies}%
\label{period.export}
To export the table of frequencies, all that is needed to do is
to press the \button{Export} in the \helpref{Fit folder}{period.folder}.
Then a file selector dialog will show up and a filename to save
may be selected.
For the format of the output file please see
\helpref{Import frequencies}{period.import}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Clean all frequencies}%
\label{period.cleanfrequencies}
To clean the tabel of frequencies, select in the \menu{Fit} by selecting the
\menuentry{Clean all frequencies}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Phase plot}%
\label{period.phase}
As a last tool, the {\bf Fit module} offers the possibility of phase plotting
the selected time string. This tool has been in use for a long time
and has had its prime time in the early days of asteroseismology.
Nowadays this tool is scarcely used, even though
it can prove to be a powerful visual diagnostic tool.
It allows to view the shape of a light curve not in numbers
but visually. And this possibility can help finding a solution to some,
otherwise possibly overlooked, properties in the data.
The plot basically uses the the reminder of time of a point multiplied by a
certain frequency, which can be edited by the user,
as abscissa, and as Y-Axis most frequently residuals
(of any kind - Original or Adjusted) to plot a point.
By default, \period tries to find a frequency in the list of frequencies,
that has not been activated and chooses the first one found.
This frequency is displayed in the top part of the graph.
If none is found, then a frequency of 1.0 is chosen.
The point itself is plotted as a filled circles with a color that
corresponds to the color of one of its attributes labels.
To change the the attribute to use for to select the color,
the \menu{Color} is used.
(See \helpref{Edit name properties}{timestring.edit} in the
\helpref{Time string folder}{timestring.folder}) for how to
change the color for a label.)
With the \menu{Data} the data to be displayed on the Y-axis can be selected.
For explanation of the {\bf Color, Data and Zoom menus} please see
the documentation on \helpref{Time string graph}{timestring.graph}.
Some of the possible uses of this graph are:
\myitemize{
\item finding systematic changes of amplitudes
in different subsets of the time string.
\item finding systematic changes of phases
in different subsets of the time string.
\item detecting ``bad'' data, which do not follow the average
``light curve shape'' for whatever reason.
\item reveals the ``true'' ``light curve shape'', as the light curve
does not have to be sinusoidal, but still periodic.
}
\note{Note: with {\bf harmonic frequencies}, that can be entered
in the frequency table, these light shapes may be ``approximated''
quite accurately and thus be removed from the residuals.}
So, how does it work? Bringing up the
\helpfigref{Phase plot}{period.phase.window}
is as easy as pressing the \button{Phase plot} in the
\helpref{Fit folder}{period.folder}.
\begin{figure}[h]
$$\image{0cm;0cm}{PPhase.eps}$$%
\caption{The ``phase plot'' window}%
\label{period.phase.window}
\end{figure}
The \menu{Frequency} gives all the necessary possibilities that have to do
with the graph, like changing the frequency used for calculating
the phase of a point,
or to activate the {\bf binning feature}, which allows to extract the
light shape ``corresponding''to this frequency.
For closer detail please see \helpref{Binning}{period.phase.binning}.
The values from which the graph has been created, can be written out in a
similar manner as the normal
\helpref{time string export}{timestring.export}%
. The only difference is that instead of time, phase is written out.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Binning}%
\label{period.phase.binning}
As already mentioned, the ``Phase plot'' also offers the possibility of
binning, which is averaging data for discrete phase ranges
and displaying these values along with error bars.
Additionally, a spline like curve is fitted to the data resulting from this
procedure.
The size of the bin box can be changed by selecting the
\menuentry{binning spacing} in the \menu{Frequency}.
Then the \helpfigref{Binning spacing dialog}{period.phase.binning.dialog}
will open up and ask for a new value. This value should be in the
range of {\bf 0} and {\bf 1} exclusively!
\begin{figure}[h]
$$\image{0cm;0cm}{PPhaseBinning.eps}$$%
\caption{The ``binning'' dialog}%
\label{period.phase.binning.dialog}%
\end{figure}
The binned values can also be exported to a file by selecting the
\menuentry{Export binned} in the \menu{File}. This file contains
3 columns: phase, mean amplitude and sigma of mean.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Frequency choice}%
\label{period.phase.frequency}
If the frequency used in the graph needs to be changed, then the
\menuentry{Change Frequency} in the \menu{Frequency} should be selected.
This opens up the
\helpfigref{frequency choice dialog}{period.phase.frequency.dialog}.
\begin{figure}[h]
$$\image{0cm;0cm}{PPhaseFrequency.eps}$$%
\caption{The ``frequency choice'' dialog}%
\label{period.phase.frequency.dialog}
\end{figure}
This Dialogs contains a list box, that
lists all deactivated frequencies, as \period assumes
that activated frequencies already have been prewhitened.
Selecting one of these and pressing the \button{OK} selects
this frequency and updates the graph and binning accordingly.
If a totally different frequency needs to be selected, then there is also
the ``Choose different value'' in the list box.
When this has been selected the
\helpfigref{custom frequency dialog}{period.phase.frequency.other.dialog}
will show up, and a new frequency can be entered.
\begin{figure}[h]
$$\image{0cm;0cm}{PPhaseFrequencyother.eps}$$%
\caption{The ``custom frequency'' dialog}%
\label{period.phase.frequency.other.dialog}
\end{figure}
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "period98"
%%% End:
|
{"hexsha": "6eefc4ae05b7ba54f166f2bc8d98ff35a2b86518", "size": 21801, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/src/d_period.tex", "max_stars_repo_name": "msperl/Period", "max_stars_repo_head_hexsha": "da4b4364e8228852cc2b82639470dab0b3579055", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-10T20:13:11.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-10T20:13:11.000Z", "max_issues_repo_path": "doc/src/d_period.tex", "max_issues_repo_name": "msperl/Period", "max_issues_repo_head_hexsha": "da4b4364e8228852cc2b82639470dab0b3579055", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/src/d_period.tex", "max_forks_repo_name": "msperl/Period", "max_forks_repo_head_hexsha": "da4b4364e8228852cc2b82639470dab0b3579055", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7229129663, "max_line_length": 79, "alphanum_fraction": 0.7266639145, "num_tokens": 5021}
|
import matplotlib
from sigpipes.sigcontainer import SigContainer
from sigpipes.sigoperator import (
Print, UfuncOnSignals, Convolution, FeatureExtraction,
SampleSplitter)
from sigpipes.joiner import JoinChannels
from sigpipes.plotting import Plot, GraphOpts
import numpy as np
from glob import glob
for filename in glob("/mnt/windows/Dokumenty/26_02_2020_11_38/1hp.txt"):
print(filename)
(SigContainer.from_csv(filename, header=False, fs=250,
annotation="/home/fiser/IKON/ICON/anotace/1hp.a.csv")
| Print()
| Plot(annot_specs=[("1hp.a/S", "1hp.a/H")], graph_opts=GraphOpts(title=[filename]))).show()
|
{"hexsha": "5d14161a08aec04e536077ed12bb20707c2283a9", "size": 655, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/ikon.py", "max_stars_repo_name": "Poselsky/signal-pipes", "max_stars_repo_head_hexsha": "ded180cfce4f25931554e0099330b962c2af4550", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-11-18T01:13:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-17T19:13:00.000Z", "max_issues_repo_path": "tests/ikon.py", "max_issues_repo_name": "Poselsky/signal-pipes", "max_issues_repo_head_hexsha": "ded180cfce4f25931554e0099330b962c2af4550", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-17T16:56:12.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-17T16:56:12.000Z", "max_forks_repo_path": "tests/ikon.py", "max_forks_repo_name": "Poselsky/signal-pipes", "max_forks_repo_head_hexsha": "ded180cfce4f25931554e0099330b962c2af4550", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-06-12T22:34:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-25T14:20:39.000Z", "avg_line_length": 36.3888888889, "max_line_length": 97, "alphanum_fraction": 0.7297709924, "include": true, "reason": "import numpy", "num_tokens": 171}
|
# coding=utf-8
import logging
from typing import List
import numpy as np
import openeye.oechem as oechem
import openeye.oeomega as oeomega
import openeye.oeshape as oeshape
import utils
from .slurmmanager import slurmmanager
class rocs_similarity_base(object):
def __init__(self, ligand: utils.FilePath, max_tanimoto=0.6, shape_weight=0.5, color_weight=0.5):
self.ligand = ligand
self.k = max_tanimoto
self.shape_weight = shape_weight
self.color_weight = color_weight
reffs = oechem.oemolistream(self.ligand)
refmol = oechem.OEMol()
oechem.OEReadMolecule(reffs, refmol)
self.best = oeshape.OEBestOverlay()
self.best.SetRefMol(refmol)
self.best.SetColorForceField(oeshape.OEColorFFType_ImplicitMillsDean)
self.best.SetColorOptimize(True)
self.best.SetInitialOrientation(oeshape.OEBOOrientation_Inertial)
omegaOpts = oeomega.OEOmegaOptions()
omegaOpts.SetStrictStereo(False)
self.omega = oeomega.OEOmega(omegaOpts)
self.keepsize = 1
oechem.OEThrow.SetLevel(10000)
def __call__(self, smile):
imol = oechem.OEMol()
if not oechem.OESmilesToMol(imol, smile):
return 0
best_Tanimoto = 0.0
if self.omega(imol):
scoreiter = oeshape.OEBestOverlayScoreIter()
oeshape.OESortOverlayScores(scoreiter, self.best.Overlay(imol),
oeshape.OEHighestTanimotoCombo())
for score in scoreiter:
outmol = oechem.OEGraphMol(imol.GetConf(oechem.OEHasConfIdx(score.fitconfidx)))
score.Transform(outmol)
best_Tanimoto = (self.shape_weight * score.GetTanimoto()) + (
self.color_weight * score.GetColorTanimoto())
best_Tanimoto = np.minimum(best_Tanimoto, self.k)
break
else:
logging.debug("Omega failed")
return best_Tanimoto
def get_conformation(self, smile):
imol = oechem.OEMol()
if not oechem.OESmilesToMol(imol, smile):
return None
if self.omega(imol):
scoreiter = oeshape.OEBestOverlayScoreIter()
oeshape.OESortOverlayScores(scoreiter, self.best.Overlay(imol),
oeshape.OEHighestTanimotoCombo())
for score in scoreiter:
outmol = oechem.OEGraphMol(imol.GetConf(oechem.OEHasConfIdx(score.fitconfidx)))
score.Transform(outmol)
ofs = oechem.oemolostream()
ofs.openstring()
ofs.SetFormat(oechem.OEFormat_MOL2)
oechem.OEWriteMolecule(ofs, outmol)
result = ofs.GetString().decode()
return result
class rocs_similarity(rocs_similarity_base):
"""Scores based on ROCS shape and color similarity. Runs on a single CPU core."""
def __call__(self, smiles: List[str]) -> dict:
score = np.full(len(smiles), 0, dtype=np.float32)
for idx, smi in enumerate(smiles):
score[idx] = super().__call__(smi)
return {"total_score": np.array(score, dtype=np.float32)}
def __reduce__(self):
"""
:return: A tuple with the constructor and its arguments. Used to reinitialize the object for pickling
"""
return rocs_similarity, (self.ligand, self.k, self.shape_weight, self.color_weight)
class rocs_similarity_slurm(slurmmanager):
"""Scores based on ROCS shape and color similarity. Distributes the calculation using SLURM."""
def __init__(self, ligand: utils.FilePath, port=31992, nb_local=8, nb_slurm=6, cpu_per_job=8, max_tanimoto=0.6,
shape_weight=0.5, color_weight=0.5):
super().__init__(port=port, nb_local=nb_local, nb_slurm=nb_slurm, cpu_per_job=cpu_per_job, ligand=ligand,
max_tanimoto=max_tanimoto, shape_weight=shape_weight, color_weight=color_weight,
scoring_function="rocs_similarity")
def __call__(self, smiles: List[str]) -> dict:
return {"total_score": super().__call__(smiles)}
|
{"hexsha": "abe5240f070653944afaac2399e62d9c049b3ff3", "size": 4169, "ext": "py", "lang": "Python", "max_stars_repo_path": "scoring/rocs_similarity.py", "max_stars_repo_name": "MauriceKarrenbrock/reinvent-memory", "max_stars_repo_head_hexsha": "57860dabb6534daf14fe2ab81d57589a90760442", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scoring/rocs_similarity.py", "max_issues_repo_name": "MauriceKarrenbrock/reinvent-memory", "max_issues_repo_head_hexsha": "57860dabb6534daf14fe2ab81d57589a90760442", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scoring/rocs_similarity.py", "max_forks_repo_name": "MauriceKarrenbrock/reinvent-memory", "max_forks_repo_head_hexsha": "57860dabb6534daf14fe2ab81d57589a90760442", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.8725490196, "max_line_length": 115, "alphanum_fraction": 0.6414008155, "include": true, "reason": "import numpy", "num_tokens": 1048}
|
"""
Module for specifying output variables as part of the data file.
"""
import numpy as np
from andes.core.model import ModelData, Model
from andes.core.param import DataParam
class OutputData(ModelData):
"""
Data for outputs.
"""
def __init__(self):
ModelData.__init__(self, three_params=False)
self.model = DataParam(info='Name of the model', mandatory=True)
self.varname = DataParam(info='Variable name', )
self.dev = DataParam(info='Device name', )
class Output(OutputData, Model):
"""
Model for specifying output models and/or variables.
"""
def __init__(self, system, config):
OutputData.__init__(self)
Model.__init__(self, system, config)
self.group = 'OutputSelect'
self.xidx = []
self.yidx = []
def in1d(self, addr, v_code):
"""
Helper function for finding boolean flags to indicate intersections.
Parameters
----------
idx : array-like
indices to find
v_code : str
variable code in 'x' and 'y'
"""
if v_code == 'x':
return np.in1d(self.xidx, addr)
if v_code == 'y':
return np.in1d(self.yidx, addr)
raise NotImplementedError("v_code <%s> not recognized" % v_code)
def to_output_addr(self, addr, v_code):
"""
Convert DAE-based variable address to relative output addresses.
"""
bool_intersect = self.in1d(addr, v_code)
return np.where(bool_intersect)
|
{"hexsha": "23082220a72e9c3c5f7cdccece64b5ec3ee1a4db", "size": 1555, "ext": "py", "lang": "Python", "max_stars_repo_path": "andes/models/misc/output.py", "max_stars_repo_name": "cuihantao/Andes", "max_stars_repo_head_hexsha": "6cdc057986c4a8382194ef440b6e92b8dfb77e25", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2017-06-16T14:21:04.000Z", "max_stars_repo_stars_event_max_datetime": "2018-08-18T08:52:27.000Z", "max_issues_repo_path": "andes/models/misc/output.py", "max_issues_repo_name": "cuihantao/Andes", "max_issues_repo_head_hexsha": "6cdc057986c4a8382194ef440b6e92b8dfb77e25", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-12-12T07:51:16.000Z", "max_issues_repo_issues_event_max_datetime": "2017-12-12T07:51:16.000Z", "max_forks_repo_path": "andes/models/misc/output.py", "max_forks_repo_name": "cuihantao/Andes", "max_forks_repo_head_hexsha": "6cdc057986c4a8382194ef440b6e92b8dfb77e25", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2017-12-10T07:32:36.000Z", "max_forks_repo_forks_event_max_datetime": "2018-09-19T16:38:30.000Z", "avg_line_length": 25.0806451613, "max_line_length": 76, "alphanum_fraction": 0.6006430868, "include": true, "reason": "import numpy", "num_tokens": 349}
|
import pickle
import re
import string
import pkg_resources
from gensim.models import KeyedVectors
import numpy as np
class Preprocessor(object):
char_search = re.compile(r"[^\u0020\u0027\u002b-\u002e\u0030-\u0039\u0041-\u005a\u0061-\u007a]")
strip_multi_ws = re.compile(r"( {2,})")
word_re = re.compile(r"([\w|-]+)")
punc = set(string.punctuation)
def __init__(self):
self.kp = self._load_kp()
self.gram_counts = self._load_gram_counts()
def __call__(self, x):
x = self.char_search.sub(" ", x)
x = self.strip_multi_ws.sub(" ", x)
x = self.word_re.findall(x)
x = [w.lower() for w in x if len(w) > 1 and w not in self.punc]
# trimmed_x = self._trim_title(" ".join(x))
return " ".join(x)
def _load_kp(self):
data_path = pkg_resources.resource_filename('title_graph', 'data/kp.pkl')
with open(data_path, "rb") as pfile:
return pickle.load(pfile)
def _load_gram_counts(self):
data_path = pkg_resources.resource_filename('title_graph', 'data/gram_counts.pkl')
with open(data_path, "rb") as pfile:
return pickle.load(pfile)
def _trim_title(self, x):
matches = self.kp.extract_keywords(x)
if not matches:
return x
if len(matches) > 1:
return max([(kw, self.gram_counts.get(kw, 0)) for kw in matches], key=lambda x: x[1])[0]
else:
return matches[0]
class TitleGraph(object):
def __init__(self, preprocessor=Preprocessor):
self.graph = self._load_graph()
self.model = self._load_model()
self.preprocessor = preprocessor()
def _load_graph(self):
data_path = pkg_resources.resource_filename('title_graph', 'data/graph.pkl')
with open(data_path, "rb") as pfile:
return pickle.load(pfile)
def _load_model(self):
data_path = pkg_resources.resource_filename('title_graph', 'data/title_model.kv')
return KeyedVectors.load(data_path, mmap='r')
def query_forward(self, title, min_weight=2, topn=25):
"""
Given a Job Title, find the most likely Job Title to occur next
:param title: str, a Job Title
:param min_weight: int, the minimum weight to consider from the graph. Setting this higher will reduce the number of matches returned
:return: results if title in self.graph else None
"""
x = self.preprocessor(title)
if x not in self.graph:
return None
results = [(x, y) for x, y in self.graph.succ.get(x).items()]
result_vecs = []
for title, data in results:
if data['weight'] < min_weight:
continue
td = {'title': title, 'weight': data['weight'], 'vec': self.model.wv.get_vector(title) * data['weight']}
result_vecs.append(td)
if not result_vecs:
return []
resulting_vec = np.mean([x['vec'] for x in result_vecs], axis=0)
return self.model.wv.similar_by_vector(resulting_vec, topn=topn)
def query_backwards(self, title, min_weight=2, topn=25):
"""
Given a Job Title, find the most likely previous Job Title
:param title: str, a Job Title
:param min_weight:
:param topn: int, The number of results to return
:return: results if title in self.graph else None
"""
x = self.preprocessor(title)
if x not in self.graph:
return None
results = [(x, y) for x, y in self.graph.pred.get(x).items()]
result_vecs = []
for title, data in results:
if data['weight'] < min_weight:
continue
td = {'title': title, 'weight': data['weight'], 'vec': self.model.wv.get_vector(title) * data['weight']}
result_vecs.append(td)
if not result_vecs:
return []
resulting_vec = np.mean([x['vec'] for x in result_vecs], axis=0)
return self.model.wv.similar_by_vector(resulting_vec, topn=topn)
def query_similar_semantic(self, title, topn=25, as_tokens=False):
"""
Given a Job Title, use FastText via Gensim and return topn similar titles
:param title: str, a Job Title
:param topn: int, The number of results to return
:param as_tokens: bool, Whether to split the string. This should only effect Job Title queries with 2+ words.
If the order of the words is important, leave as False.
:return: results
"""
x = self.preprocessor(title)
if as_tokens:
x = x.split()
return self.model.most_similar(x, topn=topn)
|
{"hexsha": "75f7ab4bbf5d8cf47ec3fbf3a2c2a5049a17141c", "size": 4703, "ext": "py", "lang": "Python", "max_stars_repo_path": "title_graph/title_graph.py", "max_stars_repo_name": "estasney/TitleGraph", "max_stars_repo_head_hexsha": "ad44215849dae7069cad7729c30249a6b87a7dc0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-07-11T08:27:08.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-11T08:27:08.000Z", "max_issues_repo_path": "title_graph/title_graph.py", "max_issues_repo_name": "estasney/TitleGraph", "max_issues_repo_head_hexsha": "ad44215849dae7069cad7729c30249a6b87a7dc0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "title_graph/title_graph.py", "max_forks_repo_name": "estasney/TitleGraph", "max_forks_repo_head_hexsha": "ad44215849dae7069cad7729c30249a6b87a7dc0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.2123287671, "max_line_length": 141, "alphanum_fraction": 0.6085477355, "include": true, "reason": "import numpy", "num_tokens": 1168}
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from emukit.core import ParameterSpace, ContinuousParameter, InformationSourceParameter
from emukit.core.loop.user_function import MultiSourceFunctionWrapper
def multi_fidelity_forrester_function(high_fidelity_noise_std_deviation=0, low_fidelity_noise_std_deviation=0):
"""
Two-level multi-fidelity forrester function where the high fidelity is given by:
.. math::
f(x) = (6x - 2)^2 \sin(12x - 4)
and the low fidelity approximation given by:
.. math::
f_{low}(x) = 0.5 f_{high}(x) + 10 (x - 0.5) + 5
:param high_fidelity_noise_std_deviation: Standard deviation of observation noise on high fidelity observations.
Defaults to zero.
:param low_fidelity_noise_std_deviation: Standard deviation of observation noise on low fidelity observations.
Defaults to zero.
:return: Tuple of user function object and parameter space object
"""
parameter_space = ParameterSpace([ContinuousParameter('x', 0, 1), InformationSourceParameter(2)])
user_function = MultiSourceFunctionWrapper([
lambda x: forrester_low(x, low_fidelity_noise_std_deviation),
lambda x: forrester(x, high_fidelity_noise_std_deviation)])
return user_function, parameter_space
def forrester_function(noise_standard_deviation=0):
"""
Forrester function
.. math::
f(x) = (6x - 2)^2 \sin(12x - 4)
:param noise_standard_deviation: Standard deviation of normally distributed observation noise
:return: Tuple of function and parameter space object
"""
def forrester_fcn(x):
return forrester(x, sd=noise_standard_deviation)
return forrester_fcn, ParameterSpace([ContinuousParameter('x', 0, 1)])
def forrester(x, sd=0):
"""
Forrester function
:param x: input vector to be evaluated
:param sd: standard deviation of noise parameter
:return: outputs of the function
"""
x = x.reshape((len(x), 1))
n = x.shape[0]
fval = ((6 * x - 2) ** 2) * np.sin(12 * x - 4)
if sd == 0:
noise = np.zeros(n).reshape(n, 1)
else:
noise = np.random.normal(0, sd, n).reshape(n, 1)
return fval.reshape(n, 1) + noise
def forrester_low(x, sd=0):
"""
Low fidelity forrester function approximation:
:param x: input vector to be evaluated
:param sd: standard deviation of observation noise at low fidelity
:return: outputs of the function
"""
high_fidelity = forrester(x, 0)
return 0.5 * high_fidelity + 10 * (x[:, [0]] - 0.5) + 5 + np.random.randn(x.shape[0], 1) * sd
|
{"hexsha": "5813147889717c28661ecc036ff12c226c567f44", "size": 2753, "ext": "py", "lang": "Python", "max_stars_repo_path": "emukit/test_functions/forrester.py", "max_stars_repo_name": "ndalchau/emukit", "max_stars_repo_head_hexsha": "eb6754ea016a7cd82b275bb4075676b5ed662634", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 152, "max_stars_repo_stars_event_min_datetime": "2020-10-24T13:12:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T11:35:41.000Z", "max_issues_repo_path": "emukit/test_functions/forrester.py", "max_issues_repo_name": "Tony-Chiong/emukit", "max_issues_repo_head_hexsha": "a068c8d5e06b2ae8b038f67bf2e4f66c4d91651a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 87, "max_issues_repo_issues_event_min_datetime": "2020-10-26T10:29:25.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-04T11:17:59.000Z", "max_forks_repo_path": "emukit/test_functions/forrester.py", "max_forks_repo_name": "Tony-Chiong/emukit", "max_forks_repo_head_hexsha": "a068c8d5e06b2ae8b038f67bf2e4f66c4d91651a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 41, "max_forks_repo_forks_event_min_datetime": "2020-10-24T11:59:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T17:08:30.000Z", "avg_line_length": 34.8481012658, "max_line_length": 116, "alphanum_fraction": 0.6709044679, "include": true, "reason": "import numpy", "num_tokens": 719}
|
"""
Responsible for providing detiled views about a single stock and closely related views
"""
from collections import defaultdict
from datetime import datetime
import pandas as pd
import numpy as np
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from app.models import (validate_stock, validate_user, stock_info, cached_all_stocks_cip,
Timeframe, company_prices, rsi_data, user_watchlist, selected_cached_stocks_cip,
validate_date, user_purchases, all_available_dates)
from app.analysis import analyse_sector_performance, default_point_score_rules, rank_cumulative_change, calculate_trends
from app.messages import warning
from app.plots import plot_point_scores, plot_fundamentals, make_rsi_plot, plot_trend, plot_company_rank, plot_portfolio
@login_required
def show_stock_sector(request, stock):
validate_stock(stock)
validate_user(request.user)
_, company_details = stock_info(stock, lambda msg: warning(request, msg))
sector = company_details.sector_name if company_details else None
all_stocks_cip = cached_all_stocks_cip(Timeframe(past_n_days=180))
# invoke separate function to cache the calls when we can
c_vs_s_plot, sector_momentum_plot, sector_companies = analyse_sector_performance(stock, sector, all_stocks_cip)
point_score_plot = net_rule_contributors_plot = None
if sector_companies is not None:
point_score_plot, net_rule_contributors_plot = \
plot_point_scores(stock,
sector_companies,
all_stocks_cip,
default_point_score_rules())
context = {
"is_sector": True,
"asx_code": stock,
"sector_momentum_plot": sector_momentum_plot,
"sector_momentum_title": "{} sector stocks".format(sector),
"company_versus_sector_plot": c_vs_s_plot,
"company_versus_sector_title": "{} vs. {} performance".format(stock, sector),
"point_score_plot": point_score_plot,
"point_score_plot_title": "Points score due to price movements",
"net_contributors_plot": net_rule_contributors_plot,
"net_contributors_plot_title": "Contributions to point score by rule",
}
return render(request, "stock_sector.html", context)
@login_required
def show_fundamentals(request, stock=None, n_days=2 * 365):
validate_user(request.user)
validate_stock(stock)
timeframe = Timeframe(past_n_days=n_days)
df = company_prices(
[stock],
timeframe,
fields=("eps", "volume", "last_price", "annual_dividend_yield", \
"pe", "change_in_percent", "change_price", "market_cap", \
"number_of_shares"),
missing_cb=None
)
#print(df)
df['change_in_percent_cumulative'] = df['change_in_percent'].cumsum() # nicer to display cumulative
df = df.drop('change_in_percent', axis=1)
fundamentals_plot = plot_fundamentals(df, stock)
context = {
"asx_code": stock,
"is_fundamentals": True,
"fundamentals_plot": fundamentals_plot
}
return render(request, "stock_fundamentals.html", context)
@login_required
def show_stock(request, stock=None, n_days=2 * 365):
"""
Displays a view of a single stock via the stock_view.html template and associated state
"""
validate_stock(stock)
validate_user(request.user)
timeframe = Timeframe(past_n_days=n_days+200) # add 200 days so MA 200 can initialise itself before the plotting starts...
stock_df = rsi_data(stock, timeframe) # may raise 404 if too little data available
securities, company_details = stock_info(stock, lambda msg: warning(request, msg))
momentum_plot = make_rsi_plot(stock, stock_df)
# plot the price over timeframe in monthly blocks
prices = stock_df[['last_price']].transpose() # use list of columns to ensure pd.DataFrame not pd.Series
#print(prices)
monthly_maximum_plot = plot_trend(prices, sample_period='M')
# populate template and render HTML page with context
context = {
"asx_code": stock,
"securities": securities,
"cd": company_details,
"rsi_plot": momentum_plot,
"is_momentum": True,
"monthly_highest_price_plot_title": "Maximum price each month trend",
"monthly_highest_price_plot": monthly_maximum_plot,
"timeframe": f"{n_days} days",
"watched": user_watchlist(request.user),
}
return render(request, "stock_view.html", context=context)
@login_required
def show_trends(request):
validate_user(request.user)
watchlist_stocks = user_watchlist(request.user)
timeframe = Timeframe(past_n_days=300)
cip = selected_cached_stocks_cip(watchlist_stocks, timeframe)
trends = calculate_trends(cip, watchlist_stocks)
#print(trends)
# for now we only plot trending companies... too slow and unreadable to load the page otherwise!
cip = rank_cumulative_change(
cip.filter(trends.keys(), axis="index"), timeframe
)
#print(cip)
trending_companies_plot = plot_company_rank(cip)
context = {
"watchlist_trends": trends,
"trending_companies_plot": trending_companies_plot,
"trending_companies_plot_title": "Trending watchlist companies by rank: {}".format(timeframe.description),
}
return render(request, "trends.html", context=context)
def sum_portfolio(df: pd.DataFrame, date_str: str, stock_items):
validate_date(date_str)
portfolio_worth = sum(map(lambda t: df.at[t[0], date_str] * t[1], stock_items))
return portfolio_worth
@login_required
def show_purchase_performance(request):
purchase_buy_dates = []
purchases = []
stocks = []
for stock, purchases_for_stock in user_purchases(request.user).items():
stocks.append(stock)
for purchase in purchases_for_stock:
purchase_buy_dates.append(purchase.buy_date)
purchases.append(purchase)
purchase_buy_dates = sorted(purchase_buy_dates)
# print("earliest {} latest {}".format(purchase_buy_dates[0], purchase_buy_dates[-1]))
timeframe = Timeframe(from_date=str(purchase_buy_dates[0]), to_date=all_available_dates()[-1])
df = company_prices(stocks, timeframe, transpose=True)
rows = []
stock_count = defaultdict(int)
stock_cost = defaultdict(float)
portfolio_cost = 0.0
for d in [datetime.strptime(x, "%Y-%m-%d").date() for x in timeframe.all_dates()]:
d_str = str(d)
if d_str not in df.columns: # not a trading day?
continue
purchases_to_date = filter(lambda vp, d=d: vp.buy_date <= d, purchases)
for purchase in purchases_to_date:
if purchase.buy_date == d:
portfolio_cost += purchase.amount
stock_count[purchase.asx_code] += purchase.n
stock_cost[purchase.asx_code] += purchase.amount
portfolio_worth = sum_portfolio(df, d_str, stock_count.items())
#print(df)
# emit rows for each stock and aggregate portfolio
for asx_code in stocks:
cur_price = df.at[asx_code, d_str]
if np.isnan(cur_price): # price missing? ok, skip record
continue
assert cur_price is not None and cur_price >= 0.0
stock_worth = cur_price * stock_count[asx_code]
rows.append(
{
"portfolio_cost": portfolio_cost,
"portfolio_worth": portfolio_worth,
"portfolio_profit": portfolio_worth - portfolio_cost,
"stock_cost": stock_cost[asx_code],
"stock_worth": stock_worth,
"stock_profit": stock_worth - stock_cost[asx_code],
"date": d_str,
"stock": asx_code,
}
)
t = plot_portfolio(pd.DataFrame.from_records(rows))
portfolio_performance_figure, stock_performance_figure, profit_contributors_figure = t
context = {
"title": "Portfolio performance",
"portfolio_title": "Overall",
"portfolio_figure": portfolio_performance_figure,
"stock_title": "Stock",
"stock_figure": stock_performance_figure,
"profit_contributors": profit_contributors_figure,
}
return render(request, "portfolio_trends.html", context=context)
|
{"hexsha": "c41b941f5c5201336e6973a35a378411b096bc50", "size": 8479, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/viewer/app/views/stock.py", "max_stars_repo_name": "mappin/asxtrade", "max_stars_repo_head_hexsha": "2b97ffcdefae642a49ce5bfcc131db17796f1691", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/viewer/app/views/stock.py", "max_issues_repo_name": "mappin/asxtrade", "max_issues_repo_head_hexsha": "2b97ffcdefae642a49ce5bfcc131db17796f1691", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-04-13T05:00:40.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-13T05:00:40.000Z", "max_forks_repo_path": "src/viewer/app/views/stock.py", "max_forks_repo_name": "mappin/asxtrade", "max_forks_repo_head_hexsha": "2b97ffcdefae642a49ce5bfcc131db17796f1691", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.9752475248, "max_line_length": 126, "alphanum_fraction": 0.6779101309, "include": true, "reason": "import numpy", "num_tokens": 1861}
|
import os
import shutil
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
from urllib.request import urlopen
def clean_run(model_dir='', source_data=''):
"""Remove model and data files for a clean run"""
if model_dir:
if os.path.exists(model_dir):
print("Deleting resource: Model directory [%s]." % model_dir)
shutil.rmtree(model_dir)
print("Removed resource: Model directory [%s]." % model_dir)
for resource in [source_data, 'training_set.csv', 'test_set.csv']:
if resource:
if os.path.exists(resource):
print("Deleting resource: Data [%s]." % resource)
os.remove(resource)
print("Removed resource: Data [%s]." % resource)
def download_data(data_file, url):
"""Download data if not present on local FileSystem"""
download_url = url + data_file
if not os.path.exists(data_file):
print(
"%s not found on local filesystem. File will be downloaded from %s."
% (data_file, download_url))
raw = urlopen(download_url).read()
with open(data_file, 'wb') as f:
f.write(raw)
print("%s written to local filesystem." % data_file)
def process_source(local_data, col_names, missing_vals='', drop_cols=[]):
"""Clean data of missing vals and irrelevant cols."""
dataframe = pd.read_csv(local_data, names=col_names)
dataframe.replace(missing_vals, np.nan, inplace=True)
dataframe.dropna(inplace=True)
dataframe.drop(drop_cols, axis=1, inplace=True)
return dataframe
def replace_classification_labels(dataframe, result_col='', values_to_replace=[]):
"""Replace default classifications by numerical/binary result."""
target_labels = [x for x in range(0, len(values_to_replace))]
dataframe[result_col].replace(values_to_replace, target_labels, inplace=True)
return dataframe
def split_sets(dataframe_all):
"""Split dataset 80:20 into training and test datasets."""
train_set, test_set = train_test_split(dataframe_all, test_size=0.2, random_state=0)
train_set.to_csv("training_set.csv", index=False, header=None)
test_set.to_csv("test_set.csv", index=False, header=None)
return load_tensor_data("training_set.csv"), load_tensor_data("test_set.csv")
def load_tensor_data(dataset):
"""Load dataset into tensorflow contrib.learn.dataset"""
return tf.contrib.learn.datasets.base.load_csv_without_header(
filename=dataset,
target_dtype=np.int,
features_dtype=np.float32,
target_column=-1)
def get_inputs(data_set):
"""Define inputs for tensor input_fn"""
data = tf.constant(data_set.data)
target = tf.constant(data_set.target)
return data, target
def construct_net(num_features, model_dir):
"""Constructs a 3 layer Deep Neural Net with 10, 20, 10 units"""
feature_cols = [tf.contrib.layers.real_valued_column("", dimension=num_features)]
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_cols,
hidden_units=[10, 20, 10],
n_classes=2,
model_dir=model_dir)
return classifier
def fit_model(model, train_data, steps):
"""Fit model with custom input function"""
model.fit(input_fn=lambda: get_inputs(train_data), steps=steps)
print("\nModel trained after %s steps." % steps)
def evaluate_model(model, test_data, steps):
"""Evaluate model with custom input function"""
accuracy_score = model.evaluate(input_fn=lambda: get_inputs(test_data), steps=steps)["accuracy"]
print("\nModel Accuracy: {0:f}\n".format(accuracy_score))
def new_samples(feature_names):
"""Input new samples for classification"""
request_input = 0
while int(request_input) not in [1, 2]:
request_input = input(
"Predict classification: Enter own data (1) or simulate fake data (2)?\n Enter 1 or 2: ")
if int(request_input) == 1:
sample = np.array([[int(input("Enter value 0-10 for %s: " % x)) for x in feature_names]], dtype=np.float32)
else:
sample = np.array([np.random.randint(11, size=len(feature_names))], dtype=np.float32)
print("Data generated:")
for i, x in enumerate(feature_names):
print("%s: %s" % (x, i))
return sample
def predict_class(model, binary_mappings):
"""Predict classification for new data"""
predict_loop = 'Y'
while predict_loop.upper() == 'Y':
binary_prediction = list(model.predict(input_fn=lambda: new_samples(feature_names)))
print("\nClass Prediction: %s\n" % binary_mappings[binary_prediction[0]])
predict_loop = input("Would you like to try another prediction? Enter Y/N: ")
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.ERROR)
model_dir = 'nn_classifier'
cancer_data = 'breast-cancer-wisconsin.data'
data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/'
clean_run(model_dir=model_dir)
feature_names = ['clump_thickness', 'unif_cell_size', 'unif_cell_shape', 'marg_adhesion',
'single_epith_cell_size', 'bare_nuclei', 'bland_chrom', 'norm_nucleoli', 'mitoses']
column_names = ['id'] + feature_names + ['class']
download_data(cancer_data, data_url)
cancer_df = process_source(cancer_data, column_names, missing_vals='?', drop_cols=['id'])
replace_classification_labels(cancer_df, result_col='class', values_to_replace=[2, 4])
train_set, test_set = split_sets(cancer_df)
dnn_model = construct_net(num_features=9, model_dir=model_dir)
fit_model(dnn_model, train_set, steps=2000)
evaluate_model(dnn_model, test_set, steps=1)
predict_class(dnn_model, {0: 'benign', 1: 'malignant'})
|
{"hexsha": "75a275e62bbf72e499a766dfa022865f8681b359", "size": 5998, "ext": "py", "lang": "Python", "max_stars_repo_path": "Project 8 -- Deep Learning for Cancer Classification/dnn_data_classifier/main.py", "max_stars_repo_name": "Vauke/Deep-Neural-Networks-HealthCare", "max_stars_repo_head_hexsha": "a6e0cc9d44e06ab3b3f3a947c512ca25f3e17a14", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-05-15T11:05:17.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-16T16:31:49.000Z", "max_issues_repo_path": "Project 8 -- Deep Learning for Cancer Classification/dnn_data_classifier/main.py", "max_issues_repo_name": "Vauke/Deep-Neural-Networks-HealthCare", "max_issues_repo_head_hexsha": "a6e0cc9d44e06ab3b3f3a947c512ca25f3e17a14", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-09-26T01:27:55.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-13T03:14:02.000Z", "max_forks_repo_path": "Project 8 -- Deep Learning for Cancer Classification/dnn_data_classifier/main.py", "max_forks_repo_name": "Vauke/Deep-Neural-Networks-HealthCare", "max_forks_repo_head_hexsha": "a6e0cc9d44e06ab3b3f3a947c512ca25f3e17a14", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-01-20T09:58:08.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-25T18:29:49.000Z", "avg_line_length": 33.3222222222, "max_line_length": 115, "alphanum_fraction": 0.6682227409, "include": true, "reason": "import numpy", "num_tokens": 1345}
|
# Energy spectrum of oscillations at a fixed point.
using FFTW, JLD2, CurveFit, PyPlot
using Vlasiator: RE
file = "satellites_uniform_sampled.jld2"
data = JLD2.load(file)
nSatellite = length(data["t"])
nI, nJ = size(data["rho"])[2:3]
t = data["t"]
# Select spatial point
i, j = 5, 5
var = data["rho"][:,i,j]
dt = t[2] - t[1] # uniform sample interval [s]
Fs = 1 / dt # sample frequency, [Hz]
Fn = Fs / 2 # Nyquist frequency, [Hz]
## Frequency calculation
nPoints = length(var)
nFFT = nPoints
df = Fs / nFFT
freq_fullrange = -Fn:df:Fn
freq = freq_fullrange[(nPoints ÷ 2 + 1):end-1]
var_freq = fft(var)
var_power = abs.(fftshift(var_freq))[(nPoints ÷ 2 + 1):end]
# k is the exponential coefficient
a, k = @views power_fit(freq[10:end], var_power[10:end])
figure(figsize=(6,8))
loglog(freq, var_power)
axis("scaled")
min_power, max_power = extrema(@view var_power[10:end])
xlim(freq[8], Fs)
ylim(min_power * 0.75, max_power * 2.0)
xlabel("Frequency [Hz]"; fontsize=14)
ylabel("Power Density "; fontsize=14)
title(string(round.(data["locations"][i,j]./RE, digits=1))*"Re"; fontsize=14)
loglog(freq[10:end], a.*freq[10:end].^k, label="k = $(round(k, digits=1))")
legend()
|
{"hexsha": "e505ca97dacc490c4e67fcc99e1a965fc83ac58f", "size": 1194, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/demo_energy_spectrum.jl", "max_stars_repo_name": "alhom/Vlasiator.jl", "max_stars_repo_head_hexsha": "615333705b5346522479ab72398f059cb94ab026", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/demo_energy_spectrum.jl", "max_issues_repo_name": "alhom/Vlasiator.jl", "max_issues_repo_head_hexsha": "615333705b5346522479ab72398f059cb94ab026", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/demo_energy_spectrum.jl", "max_forks_repo_name": "alhom/Vlasiator.jl", "max_forks_repo_head_hexsha": "615333705b5346522479ab72398f059cb94ab026", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.9615384615, "max_line_length": 77, "alphanum_fraction": 0.6658291457, "num_tokens": 401}
|
%% EVENT OBJECT (event.m) %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This class is designed to define a generic agent and import this variables
% into the simulation space for the purpose of multi-vehicle control simulation.
% Author: James A. Douthwaite
classdef eventDefinition
%%% EVENT BASE CLASS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This class contains the basic properties of a generic event, neither
properties
% EVENT PARAMETERS
eventID;
name;
type;
time;
info;
end
%% CLASS METHODS
methods
% CONSTRUCTION METHOD
function obj = eventDefinition(time,name,summaryInfo)
% This function is designed to generate an event with an
% automatically defined ID number and basic properties
% INPUTS:
% time
% name
% summaryInfo - Assigned description string
% OUTPUTS:
% obj - The generated object
% INPUT HANDLING
% CONFIRM EVENT TIME
if ~isnumeric(time) % Default time setting
% if ~exist('time','var') || ~isnumeric(time) % Default time setting
obj.time = 0;
%warning('An event time must be defined.');
return
else
obj.time = time;
end
% CONFIRM NAME STRING
if isnumeric(name) || isempty(name) % Default name setting
obj.name = 'EVENT';
else
obj.name = name;
end
% CONFIRM DEFAULT SUMMARY INFORMATION
% if ~exist('summaryInfo','var') || isempty(summaryInfo)
if isempty(summaryInfo)
obj.info = 'None.'; % Default information setting
else
obj.info = summaryInfo;
end
% ALLOCATE AUTOINCREMENTING ID NUMBER
persistent eventcount;
if isempty(eventcount)
eventcount = 1;
else
eventcount = eventcount + 1;
end
obj.eventID = uint16(eventcount); % Assign the number as an ID tag
% ASSIGN THE DEFAULT TYP
obj.type = eventType.event; % Assign default enumeration of 'event'
end
end
% PRIVATE METHODS (CLASS SPECIFIC TOOLS)
methods (Access = private)
% PRINT THE OBJECT SUMMARY
function displaySummary(obj)
fprintf('[%s]\tevent created (ID:%d @%.2fs):\t%s\n',obj.name,obj.eventID,obj.time,obj.info);
end
end
end
|
{"author": "douthwja01", "repo": "OpenMAS", "sha": "962f321f82167db78066b2c88c783423ecc3b73a", "save_path": "github-repos/MATLAB/douthwja01-OpenMAS", "path": "github-repos/MATLAB/douthwja01-OpenMAS/OpenMAS-962f321f82167db78066b2c88c783423ecc3b73a/environment/events/eventDefinition.m"}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Evaluate submissions on kbpo server.
"""
import pdb
import sys
import csv
import logging
from collections import Counter, defaultdict
import numpy as np
from tqdm import tqdm
from kbpo import evaluation_api
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logging.basicConfig(level=logging.INFO)
def fn(n):
return "{:0.04f}".format(n)
# Actually call the code.
def do_evaluate(args):
writer = csv.writer(args.output, delimiter="\t")
writer.writerow(['run_id',
'p', 'r', 'f1',
'err-p-left', 'err-r-left', 'err-f1-left',
'err-p-right', 'err-r-right', 'err-f1-right',
])
for system, score in evaluation_api.get_updated_scores(args.corpus_tag, mode=args.mode, num_epochs=args.num_epochs):
score = score._replace(
p_left=score.p - score.p_left,
r_left=score.r - score.r_left,
f1_left=score.f1 - score.f1_left,
p_right=-score.p + score.p_right,
r_right=-score.r + score.r_right,
f1_right=-score.f1 + score.f1_right,)
writer.writerow([system,] + [fn(v) for v in score])
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Evaluate submissions from the KBPO database')
parser.add_argument('-m', '--mode', choices=['simple', 'joint'], default='simple', help='Mode to evaluate experiments with')
parser.add_argument('-t', '--corpus-tag', choices=['kbp2016'], default='kbp2016', help='Evaluation corpus to get scores for')
parser.add_argument('-n', '--num-epochs', type=int, default=1000, help="Number of epochs to average over")
parser.add_argument('-o', '--output', type=argparse.FileType('w'), default=sys.stdout, help="Outputs a list of results for every system (true, predicted, stdev)")
parser.set_defaults(func=do_evaluate)
ARGS = parser.parse_args()
if ARGS.func is None:
parser.print_help()
sys.exit(1)
else:
ARGS.func(ARGS)
|
{"hexsha": "0fa164c3cae6c5f5a4884d9be20a0ccb1cd7de90", "size": 2089, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/evaluate.py", "max_stars_repo_name": "arunchaganty/kbp-online", "max_stars_repo_head_hexsha": "9f8763d8f4bfb1fb8a01f1f4f506f56625dd38d8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2017-08-09T14:05:48.000Z", "max_stars_repo_stars_event_max_datetime": "2018-12-25T01:34:23.000Z", "max_issues_repo_path": "src/evaluate.py", "max_issues_repo_name": "arunchaganty/kbp-online", "max_issues_repo_head_hexsha": "9f8763d8f4bfb1fb8a01f1f4f506f56625dd38d8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2017-01-19T23:18:18.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-23T18:57:54.000Z", "max_forks_repo_path": "src/evaluate.py", "max_forks_repo_name": "arunchaganty/kbp-online", "max_forks_repo_head_hexsha": "9f8763d8f4bfb1fb8a01f1f4f506f56625dd38d8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-08-08T09:48:20.000Z", "max_forks_repo_forks_event_max_datetime": "2018-07-09T09:12:43.000Z", "avg_line_length": 36.0172413793, "max_line_length": 166, "alphanum_fraction": 0.6462422212, "include": true, "reason": "import numpy", "num_tokens": 499}
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from PyQt5 import QtCore, QtWidgets
import gr
from qtgr import GRWidget
import csv
from util.logger import Logger
import sys
from statistics.pdf import PDF, Kernels
import numpy as np
import os
logger = Logger("gui.pdf_widget")
logger.setstream("default", sys.stdout, Logger.DEBUG)
class GrPlotWidget(GRWidget):
def __init__(self, *args, **kwargs):
super(GrPlotWidget, self).__init__(*args, **kwargs)
self.xvalues = None
self.yvalues = None
self.title = None
self.datapoints = None
def quit(self):
gr.emergencyclosegks()
self.close()
def setdata(self, xvalues, yvalues, title, datapoints=None):
self.xvalues = xvalues
self.yvalues = yvalues
self.title = title
self.datapoints = datapoints
def draw(self, wsviewport=None):
if self.xvalues is not None:
rangex = (self.xvalues.min(), self.xvalues.max())
else:
rangex = (0, 10)
if self.yvalues is not None:
rangey = gr.adjustrange(self.yvalues.min(), self.yvalues.max())
else:
rangey = (0, 4)
if wsviewport is None:
gr.setwsviewport(0, self.mwidth, 0, self.mheight)
else:
gr.setwsviewport(*wsviewport)
gr.setwswindow(0, self.sizex, 0, self.sizey)
gr.setviewport(0.075 * self.sizex, 0.95 * self.sizex,
0.075 * self.sizey, 0.95 * self.sizey)
gr.setwindow(rangex[0], rangex[1], rangey[0], rangey[1])
gr.setcharheight(0.012)
gr.setfillintstyle(1)
gr.setfillcolorind(0)
gr.fillrect(rangex[0], rangex[1], rangey[0], rangey[1])
if self.xvalues is not None and self.yvalues is not None:
gr.setlinecolorind(2)
gr.polyline(self.xvalues, self.yvalues)
else:
gr.text(0.4 * self.sizex, 0.5 * self.sizey, "no elements selected")
gr.setlinecolorind(1)
gr.axes(0.2, 0.2, rangex[0], rangey[0], 5, 5, 0.0075)
gr.axes(0.2, 0.2, rangex[1], rangey[1], -5, -5, -0.0075)
if self.title is not None:
gr.text(0.8 * self.sizex, 0.9 * self.sizey, self.title)
class PDFWidget(QtWidgets.QWidget):
def __init__(self, parent):
QtWidgets.QWidget.__init__(self, parent)
self.control = parent.control
self.results = None
self.pdf = None
self.init_gui()
def init_gui(self):
vbox = QtWidgets.QVBoxLayout()
grid = QtWidgets.QGridLayout()
self.gr_widget = GrPlotWidget()
self.datasetlabel = QtWidgets.QLabel("No data loaded.", self)
self.datasetlabel.setAlignment(QtCore.Qt.AlignHCenter)
elembox = QtWidgets.QHBoxLayout()
elembox.addWidget(QtWidgets.QLabel("Elements:", self), 0)
self.elem1 = QtWidgets.QComboBox(self)
self.elem1.setMinimumWidth(170)
elembox.addWidget(self.elem1, 1)
self.elem2 = QtWidgets.QComboBox(self)
self.elem2.setMinimumWidth(170)
elembox.addWidget(self.elem2, 1)
grid.addLayout(elembox, 0, 0)
rangebox = QtWidgets.QHBoxLayout()
rangebox.addWidget(QtWidgets.QLabel("Plot range:", self))
self.range1 = QtWidgets.QLineEdit("0", self)
self.range1.setMinimumWidth(30)
rangebox.addWidget(self.range1)
rangebox.addWidget(QtWidgets.QLabel("-", self))
self.range2 = QtWidgets.QLineEdit("8", self)
self.range2.setMinimumWidth(30)
rangebox.addWidget(self.range2)
grid.addLayout(rangebox, 1, 0)
cutoffbox = QtWidgets.QHBoxLayout()
cutoffbox.addWidget(QtWidgets.QLabel("Kernel:", self))
self.kernels = {"Gaussian": Kernels.gauss, "Epanechnikov": Kernels.epanechnikov, "Compact": Kernels.compact, "Triangular": Kernels.triang, "Box": Kernels.quad, "Right Box": Kernels.posquad, "Left Box": Kernels.negquad}
self.kernel = QtWidgets.QComboBox(self)
self.kernel .setMinimumWidth(130)
self.kernel.addItems(["Gaussian", "Epanechnikov", "Compact", "Triangular", "Box", "Right Box", "Left Box"])
cutoffbox.addWidget(self.kernel)
cutoffbox.addWidget(QtWidgets.QLabel("Cutoff:", self))
self.cutoff = QtWidgets.QLineEdit("12", self)
self.cutoff.setMinimumWidth(30)
cutoffbox.addWidget(self.cutoff)
cutoffbox.addWidget(QtWidgets.QLabel("Bandwidth:", self))
self.bandwidth = QtWidgets.QLineEdit("", self)
self.bandwidth.setMinimumWidth(30)
cutoffbox.addWidget(self.bandwidth)
grid.addLayout(cutoffbox, 2, 0)
buttonbox = QtWidgets.QHBoxLayout()
self.plotbutton = QtWidgets.QPushButton("Plot", self)
buttonbox.addWidget(self.plotbutton)
self.plotbutton.clicked.connect(self.draw)
self.export_image_button = QtWidgets.QPushButton("Save Image", self)
buttonbox.addWidget(self.export_image_button)
self.export_image_button.clicked.connect(self.export_image)
self.export_data_button = QtWidgets.QPushButton("Export Data", self)
buttonbox.addWidget(self.export_data_button)
self.export_data_button.clicked.connect(self.export_data)
grid.addLayout(buttonbox, 3, 0)
vbox.addWidget(self.gr_widget, stretch=1)
vbox.addWidget(self.datasetlabel, stretch=0)
vbox.addLayout(grid)
self.setLayout(vbox)
self.show()
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Return \
or event.key() == QtCore.Qt.Key_Enter:
self.update()
def draw(self, now=False, wsviewport=None):
xvalues = None
yvalues = None
title = None
datapoints = None
if self.pdf is None:
self.refresh()
if self.pdf is not None:
elem1 = str(self.elem1.currentText())
if elem1 == "cavity domain centers":
elem1 = "cav"
elem2 = str(self.elem2.currentText())
if elem2 == "cavity domain centers":
elem2 = "cav"
range1 = float(str(self.range1.text()))
range2 = float(str(self.range2.text()))
cutoff = str(self.cutoff.text())
if len(cutoff) > 0 and float(cutoff) > 0:
cutoff = float(cutoff)
else:
cutoff = None
try:
bandwidth = float(str(self.bandwidth.text()))
if bandwidth < 0:
bandwidth = 0
self.bandwidth.setText('0')
except ValueError:
bandwidth = None
self.bandwidth.setText('')
kernel = self.kernels.get(self.kernel.currentText(), None)
f = self.pdf.pdf(elem1, elem2, cutoff=cutoff, h=bandwidth, kernel=kernel)
if f is not None:
if callable(f):
xvalues = np.linspace(range1, range2, 400)
yvalues = f(xvalues)
title = "{} - {}".format(elem1, elem2)
datapoints = f.f.x
else:
peaks = f[np.logical_and(range1 < f, f < range2)]
if len(peaks) > 2:
xvalues = np.zeros(len(peaks) * 3)
xvalues[0::3] = peaks
xvalues[1::3] = peaks
xvalues[2::3] = peaks
yvalues = np.zeros(len(peaks) * 3)
yvalues[1::3] = 1
datapoints = peaks
self.gr_widget.setdata(xvalues, yvalues, title, datapoints)
self.gr_widget.update()
if now:
self.gr_widget.draw(wsviewport=wsviewport)
def export_image(self):
extensions = (".pdf", ".png", ".bmp", ".jpg", ".jpeg", ".png",
".tiff", ".fig", ".svg", ".wmf", ".eps", ".ps")
qtext = "*" + " *".join(extensions)
filepath = QtWidgets.QFileDialog.getSaveFileName(self, "Save Image",
".", "Image Files ({})".format(qtext))[0]
if len(filepath) == 0:
return
if filepath.endswith('.eps') or filepath.endswith('.ps'):
gr.beginprintext(filepath, 'Color', 'A4', 'Landscape')
self.draw(now=True, wsviewport=(0, 0.297*0.9, 0, 0.21*0.95))
else:
gr.beginprint(filepath)
self.draw(now=True)
gr.endprint()
def export_data(self):
qtext = " *.csv"
filepath = QtWidgets.QFileDialog.getSaveFileName(self, "Save Data",
".", "CSV Files ({})".format(qtext))[0]
if len(filepath) == 0:
return
self.update()
xvalues = self.gr_widget.xvalues
yvalues = self.gr_widget.yvalues
if xvalues is None or yvalues is None:
return
with open(filepath, 'wb') as csvfile:
csvwriter = csv.writer(csvfile)
for x, y in zip(xvalues, yvalues):
csvwriter.writerow([x, y])
def refresh(self):
results = self.control.results
if results is not None:
results = results[-1][-1]
if self.results != results or self.pdf is None:
self.results = results
self.pdf = PDF(results)
e = np.unique(results.atoms.elements).tolist()
if results.domains is not None \
and len(results.domains.centers) > 0 \
and "cav" not in e:
e.append("cavity domain centers")
self.elem1.clear()
self.elem1.addItems(e)
self.elem2.clear()
self.elem2.addItems(e)
self.gr_widget.setdata(None, None, None)
self.gr_widget.update()
self.datasetlabel.setText(str(results))
else:
self.datasetlabel.setText("")
self.elem1.clear()
self.elem2.clear()
def activate(self):
self.refresh()
def updatestatus(self):
self.refresh()
|
{"hexsha": "b51b1ae0a53562e3645ef1d8837b1a551501df46", "size": 10235, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/gui/pdf_widget.py", "max_stars_repo_name": "sciapp/pyMolDyn", "max_stars_repo_head_hexsha": "fba6ea91cb185f916b930cd25b4b1d28a22fb4c5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2016-10-25T09:48:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-30T18:59:50.000Z", "max_issues_repo_path": "src/gui/pdf_widget.py", "max_issues_repo_name": "sciapp/pyMolDyn", "max_issues_repo_head_hexsha": "fba6ea91cb185f916b930cd25b4b1d28a22fb4c5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-09-19T06:03:36.000Z", "max_issues_repo_issues_event_max_datetime": "2017-09-28T11:29:23.000Z", "max_forks_repo_path": "src/gui/pdf_widget.py", "max_forks_repo_name": "sciapp/pyMolDyn", "max_forks_repo_head_hexsha": "fba6ea91cb185f916b930cd25b4b1d28a22fb4c5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.0833333333, "max_line_length": 226, "alphanum_fraction": 0.5655105032, "include": true, "reason": "import numpy", "num_tokens": 2399}
|
import torch
import numpy as np
EPS = 1e-8
class TrajStorage(object):
def __init__(self, rollouts, aug_fn=None):
trajs = []
num_processes = rollouts.obs.shape[1]
for env_index in range(num_processes):
env_masks = rollouts.masks[:, env_index]
env_obs = rollouts.obs[:, env_index]
env_actions = rollouts.pi[:, env_index]
env_recurrent_states = rollouts.recurrent_hidden_states[:, env_index]
masks_indices = 1 - env_masks[:, 0]
indices = masks_indices.nonzero()[:, 0].tolist()
if len(indices) < 2:
continue
prev_index = indices[0]
for index in indices[1:]:
obs = env_obs[prev_index: index]
actions = env_actions[prev_index: index]
traj_masks = env_masks[prev_index: index]
recurrent_states = env_recurrent_states[prev_index: index]
prev_index = index
trajs.append(((obs, recurrent_states, traj_masks), actions))
self.trajs = trajs
self.num_trajs = len(trajs)
self.aug_fn = aug_fn
def sample_traj_pair(self):
idx1, idx2 = np.random.randint(0, self.num_trajs, 2)
traj1, traj2 = self.trajs[idx1], self.trajs[idx2]
if self.aug_fn is not None:
obs_aug1 = self.aug_fn.do_augmentation(traj1[0][0])
obs_aug2 = self.aug_fn.do_augmentation(traj2[0][0])
traj_inputs1 = (obs_aug1, traj1[0][1], traj1[0][2])
traj_inputs2 = (obs_aug2, traj2[0][1], traj2[0][2])
traj1, traj2 = (traj_inputs1, traj1[1]), (traj_inputs2, traj2[1])
return traj1, traj2
def metric_fixed_point(cost_matrix, gamma=0.99, eps=1e-7):
"""DP for calculating PSM (approximately).
Args:
cost_matrix: DIST matrix where entries at index (i, j) is DIST(x_i, y_j)
gamma: Metric discount factor.
eps: Threshold for stopping the fixed point iteration.
"""
d = torch.zeros_like(cost_matrix)
def operator(d_cur):
d_new = 1 * cost_matrix
discounted_d_cur = gamma * d_cur
d_new[:-1, :-1] += discounted_d_cur[1:, 1:]
d_new[:-1, -1] += discounted_d_cur[1:, -1]
d_new[-1, :-1] += discounted_d_cur[-1, 1:]
return d_new
while True:
d_new = operator(d)
if torch.sum(torch.abs(d - d_new)) < eps:
break
else:
d = d_new[:]
return d
def _calculate_action_cost_matrix(actions_1, actions_2):
diff = torch.unsqueeze(actions_1, dim=1) - torch.unsqueeze(actions_2, dim=0)
tv_distance = 0.5 * torch.sum(torch.abs(diff), dim=2)
return tv_distance
def contrastive_loss(similarity_matrix,
metric_values,
temperature=1.0,
beta=1.0):
"""Contrative Loss with embedding similarity ."""
metric_shape = metric_values.shape
## z_\theta(X): embedding_1 = nn_model.representation(X)
## z_\theta(Y): embedding_2 = nn_model.representation(Y)
## similarity_matrix = cosine_similarity(embedding_1, embedding_2)
## metric_values = PSM(X, Y)
soft_similarity_matrix = similarity_matrix / temperature
col_indices = torch.argmin(metric_values, dim=1)
pos_indices1 = (torch.arange(start=0, end=metric_shape[0],
dtype=torch.int64), col_indices)
metric_values /= beta
similarity_measure = torch.exp(-metric_values)
pos_weights1 = -metric_values[pos_indices1]
pos_logits1 = soft_similarity_matrix[pos_indices1] + pos_weights1
negative_weights = torch.log((1.0 - similarity_measure) + 1e-8)
negative_weights[pos_indices1] += pos_weights1
neg_logits1 = soft_similarity_matrix + negative_weights
neg_logits1 = torch.logsumexp(neg_logits1, dim=1)
return torch.mean(neg_logits1 - pos_logits1) # Equation 4
def representation_alignment_loss(nn_model,
traj_tuple,
coupling_temperature=0.1,
gamma = 0.1,
temperature=1.0):
(inputs1, ac1), (inputs2, ac2) = traj_tuple
metric_vals = compute_metric(ac1, ac2, gamma=gamma)
representation_1 = nn_model.representation(*inputs1)
representation_2 = nn_model.representation(*inputs2)
similarity_matrix = _cosine_similarity(representation_1, representation_2)
alignment_loss = contrastive_loss(
similarity_matrix,
metric_vals,
temperature=temperature,
beta=coupling_temperature)
return alignment_loss
def compute_metric(actions1, actions2, gamma):
action_cost = _calculate_action_cost_matrix(actions1, actions2)
return metric_fixed_point(action_cost, gamma=gamma)
def _cosine_similarity(x, y):
"""Computes cosine similarity between all pairs of vectors in x and y."""
x_expanded, y_expanded = torch.unsqueeze(x, dim=1), torch.unsqueeze(y, dim=0)
similarity_matrix = torch.sum(x_expanded * y_expanded, dim=-1)
similarity_matrix /= (
torch.norm(x_expanded, dim=-1) * torch.norm(y_expanded, dim=-1) + EPS)
return similarity_matrix
|
{"hexsha": "a26c79e74dd7d77408a9f371fe4f0ea119d2073a", "size": 4952, "ext": "py", "lang": "Python", "max_stars_repo_path": "ucb_rl2_meta/algo/contrastive_helpers.py", "max_stars_repo_name": "agarwl/auto-drac", "max_stars_repo_head_hexsha": "d86c480b51929e6e4ec0ae1adba84d9f78e91705", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ucb_rl2_meta/algo/contrastive_helpers.py", "max_issues_repo_name": "agarwl/auto-drac", "max_issues_repo_head_hexsha": "d86c480b51929e6e4ec0ae1adba84d9f78e91705", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ucb_rl2_meta/algo/contrastive_helpers.py", "max_forks_repo_name": "agarwl/auto-drac", "max_forks_repo_head_hexsha": "d86c480b51929e6e4ec0ae1adba84d9f78e91705", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.9552238806, "max_line_length": 79, "alphanum_fraction": 0.6653877221, "include": true, "reason": "import numpy", "num_tokens": 1288}
|
// Boost string_algo library std_containers_traits.hpp header file ---------------------------//
// Copyright Pavol Droba 2002-2003.
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// See http://www.boost.org/ for updates, documentation, and revision history.
#ifndef BOOST_STRING_STD_CONTAINERS_TRAITS_HPP
#define BOOST_STRING_STD_CONTAINERS_TRAITS_HPP
/*!\file
This file includes sequence traits for stl containers.
*/
#include <boost/config.hpp>
#include <boost/algorithm/string/std/string_traits.hpp>
#include <boost/algorithm/string/std/list_traits.hpp>
// MONGODB MODIFICATION: The computed include in slist_straits breaks
// icecream with clang due to -frewrite-includes. We don't use <slist>
// anywhere, so we don't need these traits.
#if 0 // def BOOST_HAS_SLIST
# include <boost/algorithm/string/std/slist_traits.hpp>
#endif
#endif // BOOST_STRING_STD_CONTAINERS_TRAITS_HPP
|
{"hexsha": "5053380e7652502ad8272193b0eb7e1a53ad1d86", "size": 1027, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/third_party/boost/boost/algorithm/string/std_containers_traits.hpp", "max_stars_repo_name": "benety/mongo", "max_stars_repo_head_hexsha": "203430ac9559f82ca01e3cbb3b0e09149fec0835", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/third_party/boost/boost/algorithm/string/std_containers_traits.hpp", "max_issues_repo_name": "benety/mongo", "max_issues_repo_head_hexsha": "203430ac9559f82ca01e3cbb3b0e09149fec0835", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/third_party/boost/boost/algorithm/string/std_containers_traits.hpp", "max_forks_repo_name": "benety/mongo", "max_forks_repo_head_hexsha": "203430ac9559f82ca01e3cbb3b0e09149fec0835", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2333333333, "max_line_length": 98, "alphanum_fraction": 0.7448880234, "num_tokens": 247}
|
"""
Helper infrastructure to compile and sample models using `cmdstan`.
[`StanModel`](@ref) wraps a model definition (source code), while [`stan_sample`](@ref) can
be used to sample from it.
[`stan_compile`](@ref) can be used to pre-compile a model without sampling. A
[`StanModelError`](@ref) is thrown if this fails, which contains the error messages from
`stanc`.
"""
module StanSample
using Reexport
@reexport using StanBase
using DocStringExtensions: FIELDS, SIGNATURES, TYPEDEF
import StanRun: stan_sample, stan_cmd_and_paths, default_output_base
import StanSamples: read_samples
import StanBase: cmdline
include("stanmodel/sample_types.jl")
include("stanmodel/SampleModel.jl")
include("stanrun/cmdline.jl")
include("stanrun/stan_generate_quantities.jl")
include("stansamples/read_samples.jl")
export
SampleModel,
stan_generate_quantities
end # module
|
{"hexsha": "a166adea21177c19bdac1632c07f0b89ae5d146c", "size": 871, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/StanSample.jl", "max_stars_repo_name": "UnofficialJuliaMirror/StanSample.jl-c1514b29-d3a0-5178-b312-660c88baa699", "max_stars_repo_head_hexsha": "768894f98284a1840f01fd9c6c51c5247bae2ad5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/StanSample.jl", "max_issues_repo_name": "UnofficialJuliaMirror/StanSample.jl-c1514b29-d3a0-5178-b312-660c88baa699", "max_issues_repo_head_hexsha": "768894f98284a1840f01fd9c6c51c5247bae2ad5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/StanSample.jl", "max_forks_repo_name": "UnofficialJuliaMirror/StanSample.jl-c1514b29-d3a0-5178-b312-660c88baa699", "max_forks_repo_head_hexsha": "768894f98284a1840f01fd9c6c51c5247bae2ad5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6176470588, "max_line_length": 91, "alphanum_fraction": 0.7864523536, "num_tokens": 216}
|
# Experiment 2
# Shift in fire regime from small-frequent to large-infrequent
# Shift occurs at different times in the past, and between widely divergent regimes and closer regimes
## Launch model
library(doParallel)
library(foreach)
source("cat_face_mortality_pfire_split.r")
registerDoParallel(cores=16)
######################################################
######################################################
# Experiment 2: Split regime, mortality on
############################################
FIELD_SIZE=200 # Number of rows/columns
SIM_LENGTH=700 # number of years to run
FIRE_FREQ_1=100 # percentage of years regime 1
FIRE_PROB_1=5 # Area regime 1
FIRE_FREQ_2=5 # Percentage of years regime 2
FIRE_PROB_2=100 # Area regime 2
MORTALITY_ON=TRUE # Non-fire Mortality turned on?
F_MORTALITY_ON=TRUE # Fire-Morality turned on?
MORT_b1 = 20 # First mortality age break
MORT_b2 = 400 # Second mortality age break
MORT_p1 =.02
MORT_p2 =.005
MORT_p3 =.02
MORT_F_b1 = 10 # First fire mortality age break
MORT_F_b2 = 30 # Second fire mortality age break
MORT_F_b3 = 400 # Second fire mortality age break
MORT_F_p1 = 0.7
MORT_F_p2 = 0.3
MORT_F_p3 = 0.05
MORT_F_p4 = 0.4
NUMBER_SIMS=400
SPLIT_YEAR=rep(c(150,200,250,300,350,400,450,500,550,600),each=40) # Sequence of split years
output_dir="e2_range_wide"
dir.create(output_dir)
#########################################
# Create dataframe to define this sim
# This can vary between runs
#
#########################################
sim_def_frame=data.frame(SIM_ID=1:NUMBER_SIMS)
sim_def_frame$FIELD_SIZE=FIELD_SIZE
sim_def_frame$SIM_LENGTH=SIM_LENGTH
sim_def_frame$FIRE_FREQ_1=FIRE_FREQ_1
sim_def_frame$FIRE_FREQ_2=FIRE_FREQ_2
sim_def_frame$FIRE_PROB_1=FIRE_PROB_1
sim_def_frame$FIRE_PROB_2=FIRE_PROB_2
sim_def_frame$MORTALITY_ON=MORTALITY_ON
sim_def_frame$F_MORTALITY_ON=F_MORTALITY_ON
sim_def_frame$MORT_b1 = MORT_b1
sim_def_frame$MORT_b2 = MORT_b2
sim_def_frame$MORT_p1 = MORT_p1
sim_def_frame$MORT_p2 = MORT_p2
sim_def_frame$MORT_p3 = MORT_p3
sim_def_frame$MORT_F_b1 = MORT_F_b1
sim_def_frame$MORT_F_b2 = MORT_F_b2
sim_def_frame$MORT_F_b3 = MORT_F_b3
sim_def_frame$MORT_F_p1 = MORT_F_p1
sim_def_frame$MORT_F_p2 = MORT_F_p2
sim_def_frame$MORT_F_p3 = MORT_F_p3
sim_def_frame$MORT_F_p4 = MORT_F_p4
sim_def_frame$SPLIT_YEAR=SPLIT_YEAR
sim_def_frame$output_dir = output_dir
write.csv(sim_def_frame,paste0(output_dir,"/sim_list.csv"))
foreach(i=1:NUMBER_SIMS) %dopar% launch_sim_split(i)
######################################################
######################################################
# Experiment 2: Split regime, mortality on narror
############################################
FIELD_SIZE=200 # Number of rows/columns
SIM_LENGTH=700 # number of years to run
FIRE_FREQ_1=20 # percentage of years
FIRE_PROB_1=25
FIRE_FREQ_2=10
FIRE_PROB_2=50
MORTALITY_ON=TRUE # Non-fire Mortality turned on?
F_MORTALITY_ON=TRUE # Fire-Morality turned on?
MORT_b1 = 20 # First mortality age break
MORT_b2 = 400 # Second mortality age break
MORT_p1 =.02
MORT_p2 =.005
MORT_p3 =.02
MORT_F_b1 = 10 # First fire mortality age break
MORT_F_b2 = 30 # Second fire mortality age break
MORT_F_b3 = 400 # Second fire mortality age break
MORT_F_p1 = 0.7
MORT_F_p2 = 0.3
MORT_F_p3 = 0.05
MORT_F_p4 = 0.4
NUMBER_SIMS=400
SPLIT_YEAR=rep(c(150,200,250,300,350,400,450,500,550,600),each=40)
output_dir="e2_range_narrow"
dir.create(output_dir)
#########################################
# Create dataframe to define this sim
# This can vary between runs
#
#########################################
sim_def_frame=data.frame(SIM_ID=1:NUMBER_SIMS)
sim_def_frame$FIELD_SIZE=FIELD_SIZE
sim_def_frame$SIM_LENGTH=SIM_LENGTH
sim_def_frame$FIRE_FREQ_1=FIRE_FREQ_1
sim_def_frame$FIRE_FREQ_2=FIRE_FREQ_2
sim_def_frame$FIRE_PROB_1=FIRE_PROB_1
sim_def_frame$FIRE_PROB_2=FIRE_PROB_2
sim_def_frame$MORTALITY_ON=MORTALITY_ON
sim_def_frame$F_MORTALITY_ON=F_MORTALITY_ON
sim_def_frame$MORT_b1 = MORT_b1
sim_def_frame$MORT_b2 = MORT_b2
sim_def_frame$MORT_p1 = MORT_p1
sim_def_frame$MORT_p2 = MORT_p2
sim_def_frame$MORT_p3 = MORT_p3
sim_def_frame$MORT_F_b1 = MORT_F_b1
sim_def_frame$MORT_F_b2 = MORT_F_b2
sim_def_frame$MORT_F_b3 = MORT_F_b3
sim_def_frame$MORT_F_p1 = MORT_F_p1
sim_def_frame$MORT_F_p2 = MORT_F_p2
sim_def_frame$MORT_F_p3 = MORT_F_p3
sim_def_frame$MORT_F_p4 = MORT_F_p4
sim_def_frame$SPLIT_YEAR=SPLIT_YEAR
sim_def_frame$output_dir = output_dir
write.csv(sim_def_frame,paste0(output_dir,"/sim_list.csv"))
foreach(i=1:NUMBER_SIMS) %dopar% launch_sim_split(i)
|
{"hexsha": "27be05c146badeb378b5155648ed2e628e060be7", "size": 4973, "ext": "r", "lang": "R", "max_stars_repo_path": "experiment_2.r", "max_stars_repo_name": "ozjimbob/FireScar", "max_stars_repo_head_hexsha": "da4b1a8c5ef13427e01c057e80c7c09cb3d6882f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-01-03T05:25:44.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-03T05:47:55.000Z", "max_issues_repo_path": "experiment_2.r", "max_issues_repo_name": "ozjimbob/FireScar", "max_issues_repo_head_hexsha": "da4b1a8c5ef13427e01c057e80c7c09cb3d6882f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiment_2.r", "max_forks_repo_name": "ozjimbob/FireScar", "max_forks_repo_head_hexsha": "da4b1a8c5ef13427e01c057e80c7c09cb3d6882f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6975308642, "max_line_length": 102, "alphanum_fraction": 0.6633822642, "num_tokens": 1501}
|
import numpy as np
import pytest
from brainio.assemblies import BehavioralAssembly
from brainscore.benchmarks.objectnet import Objectnet
from brainscore.model_interface import BrainModel
@pytest.mark.private_access
class TestObjectnet:
def test_groundtruth(self):
benchmark = Objectnet()
source = benchmark._stimulus_set
class GroundTruth(BrainModel):
def start_task(self, task, fitting_stimuli):
assert task == BrainModel.Task.label
assert fitting_stimuli == 'imagenet' # shortcut
def look_at(self, stimuli, number_of_trials=1):
source_image_ids = source['image_id'].values
stimuli_image_ids = stimuli['image_id'].values
sorted_x = source_image_ids[np.argsort(source_image_ids)]
sorted_index = np.searchsorted(sorted_x, stimuli_image_ids)
aligned_source = source.loc[sorted_index]
labels = aligned_source['synset'].values
return BehavioralAssembly([labels], coords={
**{column: ('presentation', aligned_source[column].values) for column in aligned_source.columns},
**{'choice': ('choice', ['dummy'])}}, dims=['choice', 'presentation'])
candidate = GroundTruth()
score = benchmark(candidate)
assert score.sel(aggregation='center') == pytest.approx(1)
|
{"hexsha": "a1bce2802a86168bc10e21a52cecbc127ae9bfea", "size": 1417, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_benchmarks/test_objectnet.py", "max_stars_repo_name": "BonnerLab/brain-score", "max_stars_repo_head_hexsha": "8edbbfcdb8efc5112768bfa2b57746f250f3abd4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_benchmarks/test_objectnet.py", "max_issues_repo_name": "BonnerLab/brain-score", "max_issues_repo_head_hexsha": "8edbbfcdb8efc5112768bfa2b57746f250f3abd4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_benchmarks/test_objectnet.py", "max_forks_repo_name": "BonnerLab/brain-score", "max_forks_repo_head_hexsha": "8edbbfcdb8efc5112768bfa2b57746f250f3abd4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.9393939394, "max_line_length": 117, "alphanum_fraction": 0.6499647142, "include": true, "reason": "import numpy", "num_tokens": 278}
|
struct LocalVar
is_mutable :: Ref{Bool} # mutability
is_shared :: Ref{Bool} # shared between different physical scopes/actual functions.
sym :: Symbol
end
GlobalVar = Symbol
readable_var(sym::Symbol) = LocalVar(Ref(false), Ref(false), sym)
global_var(sym::Symbol) = sym
|
{"hexsha": "c7f9fe443b7688e6a3b1c12348048d8d3aa1c571", "size": 295, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Variable.jl", "max_stars_repo_name": "devmotion/NameResolution.jl", "max_stars_repo_head_hexsha": "df4997900ea492dfb5bac52278dae4e5e53968ed", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2019-08-30T14:30:06.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-18T16:17:19.000Z", "max_issues_repo_path": "src/Variable.jl", "max_issues_repo_name": "JuliaStaging/NameResolution.jl", "max_issues_repo_head_hexsha": "463ad09584ce823514a58bbc1391c6db51035a0b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-09-28T03:21:24.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-20T16:29:06.000Z", "max_forks_repo_path": "src/Variable.jl", "max_forks_repo_name": "JuliaStaging/NameResolution.jl", "max_forks_repo_head_hexsha": "463ad09584ce823514a58bbc1391c6db51035a0b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-08T11:29:08.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T11:29:08.000Z", "avg_line_length": 26.8181818182, "max_line_length": 89, "alphanum_fraction": 0.6949152542, "num_tokens": 78}
|
(* Title: HOL/Imperative_HOL/ex/Linked_Lists.thy
Author: Lukas Bulwahn, TU Muenchen
*)
section {* Linked Lists by ML references *}
theory Linked_Lists
imports "../Imperative_HOL" "~~/src/HOL/Library/Code_Target_Int"
begin
section {* Definition of Linked Lists *}
setup {* Sign.add_const_constraint (@{const_name Ref}, SOME @{typ "nat \<Rightarrow> 'a\<Colon>type ref"}) *}
datatype 'a node = Empty | Node 'a "'a node ref"
primrec
node_encode :: "'a\<Colon>countable node \<Rightarrow> nat"
where
"node_encode Empty = 0"
| "node_encode (Node x r) = Suc (to_nat (x, r))"
instance node :: (countable) countable
proof (rule countable_classI [of "node_encode"])
fix x y :: "'a\<Colon>countable node"
show "node_encode x = node_encode y \<Longrightarrow> x = y"
by (induct x, auto, induct y, auto, induct y, auto)
qed
instance node :: (heap) heap ..
primrec make_llist :: "'a\<Colon>heap list \<Rightarrow> 'a node Heap"
where
[simp del]: "make_llist [] = return Empty"
| "make_llist (x#xs) = do { tl \<leftarrow> make_llist xs;
next \<leftarrow> ref tl;
return (Node x next)
}"
partial_function (heap) traverse :: "'a\<Colon>heap node \<Rightarrow> 'a list Heap"
where
[code del]: "traverse l =
(case l of Empty \<Rightarrow> return []
| Node x r \<Rightarrow> do { tl \<leftarrow> Ref.lookup r;
xs \<leftarrow> traverse tl;
return (x#xs)
})"
lemma traverse_simps[code, simp]:
"traverse Empty = return []"
"traverse (Node x r) = do { tl \<leftarrow> Ref.lookup r;
xs \<leftarrow> traverse tl;
return (x#xs)
}"
by (simp_all add: traverse.simps[of "Empty"] traverse.simps[of "Node x r"])
section {* Proving correctness with relational abstraction *}
subsection {* Definition of list_of, list_of', refs_of and refs_of' *}
primrec list_of :: "heap \<Rightarrow> ('a::heap) node \<Rightarrow> 'a list \<Rightarrow> bool"
where
"list_of h r [] = (r = Empty)"
| "list_of h r (a#as) = (case r of Empty \<Rightarrow> False | Node b bs \<Rightarrow> (a = b \<and> list_of h (Ref.get h bs) as))"
definition list_of' :: "heap \<Rightarrow> ('a::heap) node ref \<Rightarrow> 'a list \<Rightarrow> bool"
where
"list_of' h r xs = list_of h (Ref.get h r) xs"
primrec refs_of :: "heap \<Rightarrow> ('a::heap) node \<Rightarrow> 'a node ref list \<Rightarrow> bool"
where
"refs_of h r [] = (r = Empty)"
| "refs_of h r (x#xs) = (case r of Empty \<Rightarrow> False | Node b bs \<Rightarrow> (x = bs) \<and> refs_of h (Ref.get h bs) xs)"
primrec refs_of' :: "heap \<Rightarrow> ('a::heap) node ref \<Rightarrow> 'a node ref list \<Rightarrow> bool"
where
"refs_of' h r [] = False"
| "refs_of' h r (x#xs) = ((x = r) \<and> refs_of h (Ref.get h x) xs)"
subsection {* Properties of these definitions *}
lemma list_of_Empty[simp]: "list_of h Empty xs = (xs = [])"
by (cases xs, auto)
lemma list_of_Node[simp]: "list_of h (Node x ps) xs = (\<exists>xs'. (xs = x # xs') \<and> list_of h (Ref.get h ps) xs')"
by (cases xs, auto)
lemma list_of'_Empty[simp]: "Ref.get h q = Empty \<Longrightarrow> list_of' h q xs = (xs = [])"
unfolding list_of'_def by simp
lemma list_of'_Node[simp]: "Ref.get h q = Node x ps \<Longrightarrow> list_of' h q xs = (\<exists>xs'. (xs = x # xs') \<and> list_of' h ps xs')"
unfolding list_of'_def by simp
lemma list_of'_Nil: "list_of' h q [] \<Longrightarrow> Ref.get h q = Empty"
unfolding list_of'_def by simp
lemma list_of'_Cons:
assumes "list_of' h q (x#xs)"
obtains n where "Ref.get h q = Node x n" and "list_of' h n xs"
using assms unfolding list_of'_def by (auto split: node.split_asm)
lemma refs_of_Empty[simp] : "refs_of h Empty xs = (xs = [])"
by (cases xs, auto)
lemma refs_of_Node[simp]: "refs_of h (Node x ps) xs = (\<exists>prs. xs = ps # prs \<and> refs_of h (Ref.get h ps) prs)"
by (cases xs, auto)
lemma refs_of'_def': "refs_of' h p ps = (\<exists>prs. (ps = (p # prs)) \<and> refs_of h (Ref.get h p) prs)"
by (cases ps, auto)
lemma refs_of'_Node:
assumes "refs_of' h p xs"
assumes "Ref.get h p = Node x pn"
obtains pnrs
where "xs = p # pnrs" and "refs_of' h pn pnrs"
using assms
unfolding refs_of'_def' by auto
lemma list_of_is_fun: "\<lbrakk> list_of h n xs; list_of h n ys\<rbrakk> \<Longrightarrow> xs = ys"
proof (induct xs arbitrary: ys n)
case Nil thus ?case by auto
next
case (Cons x xs')
thus ?case
by (cases ys, auto split: node.split_asm)
qed
lemma refs_of_is_fun: "\<lbrakk> refs_of h n xs; refs_of h n ys\<rbrakk> \<Longrightarrow> xs = ys"
proof (induct xs arbitrary: ys n)
case Nil thus ?case by auto
next
case (Cons x xs')
thus ?case
by (cases ys, auto split: node.split_asm)
qed
lemma refs_of'_is_fun: "\<lbrakk> refs_of' h p as; refs_of' h p bs \<rbrakk> \<Longrightarrow> as = bs"
unfolding refs_of'_def' by (auto dest: refs_of_is_fun)
lemma list_of_refs_of_HOL:
assumes "list_of h r xs"
shows "\<exists>rs. refs_of h r rs"
using assms
proof (induct xs arbitrary: r)
case Nil thus ?case by auto
next
case (Cons x xs')
thus ?case
by (cases r, auto)
qed
lemma list_of_refs_of:
assumes "list_of h r xs"
obtains rs where "refs_of h r rs"
using list_of_refs_of_HOL[OF assms]
by auto
lemma list_of'_refs_of'_HOL:
assumes "list_of' h r xs"
shows "\<exists>rs. refs_of' h r rs"
proof -
from assms obtain rs' where "refs_of h (Ref.get h r) rs'"
unfolding list_of'_def by (rule list_of_refs_of)
thus ?thesis unfolding refs_of'_def' by auto
qed
lemma list_of'_refs_of':
assumes "list_of' h r xs"
obtains rs where "refs_of' h r rs"
using list_of'_refs_of'_HOL[OF assms]
by auto
lemma refs_of_list_of_HOL:
assumes "refs_of h r rs"
shows "\<exists>xs. list_of h r xs"
using assms
proof (induct rs arbitrary: r)
case Nil thus ?case by auto
next
case (Cons r rs')
thus ?case
by (cases r, auto)
qed
lemma refs_of_list_of:
assumes "refs_of h r rs"
obtains xs where "list_of h r xs"
using refs_of_list_of_HOL[OF assms]
by auto
lemma refs_of'_list_of'_HOL:
assumes "refs_of' h r rs"
shows "\<exists>xs. list_of' h r xs"
using assms
unfolding list_of'_def refs_of'_def'
by (auto intro: refs_of_list_of)
lemma refs_of'_list_of':
assumes "refs_of' h r rs"
obtains xs where "list_of' h r xs"
using refs_of'_list_of'_HOL[OF assms]
by auto
lemma refs_of'E: "refs_of' h q rs \<Longrightarrow> q \<in> set rs"
unfolding refs_of'_def' by auto
lemma list_of'_refs_of'2:
assumes "list_of' h r xs"
shows "\<exists>rs'. refs_of' h r (r#rs')"
proof -
from assms obtain rs where "refs_of' h r rs" by (rule list_of'_refs_of')
thus ?thesis by (auto simp add: refs_of'_def')
qed
subsection {* More complicated properties of these predicates *}
lemma list_of_append:
"list_of h n (as @ bs) \<Longrightarrow> \<exists>m. list_of h m bs"
apply (induct as arbitrary: n)
apply auto
apply (case_tac n)
apply auto
done
lemma refs_of_append: "refs_of h n (as @ bs) \<Longrightarrow> \<exists>m. refs_of h m bs"
apply (induct as arbitrary: n)
apply auto
apply (case_tac n)
apply auto
done
lemma refs_of_next:
assumes "refs_of h (Ref.get h p) rs"
shows "p \<notin> set rs"
proof (rule ccontr)
assume a: "\<not> (p \<notin> set rs)"
from this obtain as bs where split:"rs = as @ p # bs" by (fastforce dest: split_list)
with assms obtain q where "refs_of h q (p # bs)" by (fast dest: refs_of_append)
with assms split show "False"
by (cases q,auto dest: refs_of_is_fun)
qed
lemma refs_of_distinct: "refs_of h p rs \<Longrightarrow> distinct rs"
proof (induct rs arbitrary: p)
case Nil thus ?case by simp
next
case (Cons r rs')
thus ?case
by (cases p, auto simp add: refs_of_next)
qed
lemma refs_of'_distinct: "refs_of' h p rs \<Longrightarrow> distinct rs"
unfolding refs_of'_def'
by (fastforce simp add: refs_of_distinct refs_of_next)
subsection {* Interaction of these predicates with our heap transitions *}
lemma list_of_set_ref: "refs_of h q rs \<Longrightarrow> p \<notin> set rs \<Longrightarrow> list_of (Ref.set p v h) q as = list_of h q as"
using assms
proof (induct as arbitrary: q rs)
case Nil thus ?case by simp
next
case (Cons x xs)
thus ?case
proof (cases q)
case Empty thus ?thesis by auto
next
case (Node a ref)
from Cons(2) Node obtain rs' where 1: "refs_of h (Ref.get h ref) rs'" and rs_rs': "rs = ref # rs'" by auto
from Cons(3) rs_rs' have "ref \<noteq> p" by fastforce
hence ref_eq: "Ref.get (Ref.set p v h) ref = (Ref.get h ref)" by (auto simp add: Ref.get_set_neq)
from rs_rs' Cons(3) have 2: "p \<notin> set rs'" by simp
from Cons.hyps[OF 1 2] Node ref_eq show ?thesis by simp
qed
qed
lemma refs_of_set_ref: "refs_of h q rs \<Longrightarrow> p \<notin> set rs \<Longrightarrow> refs_of (Ref.set p v h) q as = refs_of h q as"
proof (induct as arbitrary: q rs)
case Nil thus ?case by simp
next
case (Cons x xs)
thus ?case
proof (cases q)
case Empty thus ?thesis by auto
next
case (Node a ref)
from Cons(2) Node obtain rs' where 1: "refs_of h (Ref.get h ref) rs'" and rs_rs': "rs = ref # rs'" by auto
from Cons(3) rs_rs' have "ref \<noteq> p" by fastforce
hence ref_eq: "Ref.get (Ref.set p v h) ref = (Ref.get h ref)" by (auto simp add: Ref.get_set_neq)
from rs_rs' Cons(3) have 2: "p \<notin> set rs'" by simp
from Cons.hyps[OF 1 2] Node ref_eq show ?thesis by auto
qed
qed
lemma refs_of_set_ref2: "refs_of (Ref.set p v h) q rs \<Longrightarrow> p \<notin> set rs \<Longrightarrow> refs_of (Ref.set p v h) q rs = refs_of h q rs"
proof (induct rs arbitrary: q)
case Nil thus ?case by simp
next
case (Cons x xs)
thus ?case
proof (cases q)
case Empty thus ?thesis by auto
next
case (Node a ref)
from Cons(2) Node have 1:"refs_of (Ref.set p v h) (Ref.get (Ref.set p v h) ref) xs" and x_ref: "x = ref" by auto
from Cons(3) this have "ref \<noteq> p" by fastforce
hence ref_eq: "Ref.get (Ref.set p v h) ref = (Ref.get h ref)" by (auto simp add: Ref.get_set_neq)
from Cons(3) have 2: "p \<notin> set xs" by simp
with Cons.hyps 1 2 Node ref_eq show ?thesis
by simp
qed
qed
lemma list_of'_set_ref:
assumes "refs_of' h q rs"
assumes "p \<notin> set rs"
shows "list_of' (Ref.set p v h) q as = list_of' h q as"
proof -
from assms have "q \<noteq> p" by (auto simp only: dest!: refs_of'E)
with assms show ?thesis
unfolding list_of'_def refs_of'_def'
by (auto simp add: list_of_set_ref)
qed
lemma list_of'_set_next_ref_Node[simp]:
assumes "list_of' h r xs"
assumes "Ref.get h p = Node x r'"
assumes "refs_of' h r rs"
assumes "p \<notin> set rs"
shows "list_of' (Ref.set p (Node x r) h) p (x#xs) = list_of' h r xs"
using assms
unfolding list_of'_def refs_of'_def'
by (auto simp add: list_of_set_ref Ref.noteq_sym)
lemma refs_of'_set_ref:
assumes "refs_of' h q rs"
assumes "p \<notin> set rs"
shows "refs_of' (Ref.set p v h) q as = refs_of' h q as"
using assms
proof -
from assms have "q \<noteq> p" by (auto simp only: dest!: refs_of'E)
with assms show ?thesis
unfolding refs_of'_def'
by (auto simp add: refs_of_set_ref)
qed
lemma refs_of'_set_ref2:
assumes "refs_of' (Ref.set p v h) q rs"
assumes "p \<notin> set rs"
shows "refs_of' (Ref.set p v h) q as = refs_of' h q as"
using assms
proof -
from assms have "q \<noteq> p" by (auto simp only: dest!: refs_of'E)
with assms show ?thesis
unfolding refs_of'_def'
apply auto
apply (subgoal_tac "prs = prsa")
apply (insert refs_of_set_ref2[of p v h "Ref.get h q"])
apply (erule_tac x="prs" in meta_allE)
apply auto
apply (auto dest: refs_of_is_fun)
done
qed
lemma refs_of'_set_next_ref:
assumes "Ref.get h1 p = Node x pn"
assumes "refs_of' (Ref.set p (Node x r1) h1) p rs"
obtains r1s where "rs = (p#r1s)" and "refs_of' h1 r1 r1s"
proof -
from assms refs_of'_distinct[OF assms(2)] have "\<exists> r1s. rs = (p # r1s) \<and> refs_of' h1 r1 r1s"
apply -
unfolding refs_of'_def'[of _ p]
apply (auto, frule refs_of_set_ref2) by (auto dest: Ref.noteq_sym)
with assms that show thesis by auto
qed
section {* Proving make_llist and traverse correct *}
lemma refs_of_invariant:
assumes "refs_of h (r::('a::heap) node) xs"
assumes "\<forall>refs. refs_of h r refs \<longrightarrow> (\<forall>ref \<in> set refs. Ref.present h ref \<and> Ref.present h' ref \<and> Ref.get h ref = Ref.get h' ref)"
shows "refs_of h' r xs"
using assms
proof (induct xs arbitrary: r)
case Nil thus ?case by simp
next
case (Cons x xs')
from Cons(2) obtain v where Node: "r = Node v x" by (cases r, auto)
from Cons(2) Node have refs_of_next: "refs_of h (Ref.get h x) xs'" by simp
from Cons(2-3) Node have ref_eq: "Ref.get h x = Ref.get h' x" by auto
from ref_eq refs_of_next have 1: "refs_of h (Ref.get h' x) xs'" by simp
from Cons(2) Cons(3) have "\<forall>ref \<in> set xs'. Ref.present h ref \<and> Ref.present h' ref \<and> Ref.get h ref = Ref.get h' ref"
by fastforce
with Cons(3) 1 have 2: "\<forall>refs. refs_of h (Ref.get h' x) refs \<longrightarrow> (\<forall>ref \<in> set refs. Ref.present h ref \<and> Ref.present h' ref \<and> Ref.get h ref = Ref.get h' ref)"
by (fastforce dest: refs_of_is_fun)
from Cons.hyps[OF 1 2] have "refs_of h' (Ref.get h' x) xs'" .
with Node show ?case by simp
qed
lemma refs_of'_invariant:
assumes "refs_of' h r xs"
assumes "\<forall>refs. refs_of' h r refs \<longrightarrow> (\<forall>ref \<in> set refs. Ref.present h ref \<and> Ref.present h' ref \<and> Ref.get h ref = Ref.get h' ref)"
shows "refs_of' h' r xs"
using assms
proof -
from assms obtain prs where refs:"refs_of h (Ref.get h r) prs" and xs_def: "xs = r # prs"
unfolding refs_of'_def' by auto
from xs_def assms have x_eq: "Ref.get h r = Ref.get h' r" by fastforce
from refs assms xs_def have 2: "\<forall>refs. refs_of h (Ref.get h r) refs \<longrightarrow>
(\<forall>ref\<in>set refs. Ref.present h ref \<and> Ref.present h' ref \<and> Ref.get h ref = Ref.get h' ref)"
by (fastforce dest: refs_of_is_fun)
from refs_of_invariant [OF refs 2] xs_def x_eq show ?thesis
unfolding refs_of'_def' by auto
qed
lemma list_of_invariant:
assumes "list_of h (r::('a::heap) node) xs"
assumes "\<forall>refs. refs_of h r refs \<longrightarrow> (\<forall>ref \<in> set refs. Ref.present h ref \<and> Ref.present h' ref \<and> Ref.get h ref = Ref.get h' ref)"
shows "list_of h' r xs"
using assms
proof (induct xs arbitrary: r)
case Nil thus ?case by simp
next
case (Cons x xs')
from Cons(2) obtain ref where Node: "r = Node x ref"
by (cases r, auto)
from Cons(2) obtain rs where rs_def: "refs_of h r rs" by (rule list_of_refs_of)
from Node rs_def obtain rss where refs_of: "refs_of h r (ref#rss)" and rss_def: "rs = ref#rss" by auto
from Cons(3) Node refs_of have ref_eq: "Ref.get h ref = Ref.get h' ref"
by auto
from Cons(2) ref_eq Node have 1: "list_of h (Ref.get h' ref) xs'" by simp
from refs_of Node ref_eq have refs_of_ref: "refs_of h (Ref.get h' ref) rss" by simp
from Cons(3) rs_def have rs_heap_eq: "\<forall>ref\<in>set rs. Ref.present h ref \<and> Ref.present h' ref \<and> Ref.get h ref = Ref.get h' ref" by simp
from refs_of_ref rs_heap_eq rss_def have 2: "\<forall>refs. refs_of h (Ref.get h' ref) refs \<longrightarrow>
(\<forall>ref\<in>set refs. Ref.present h ref \<and> Ref.present h' ref \<and> Ref.get h ref = Ref.get h' ref)"
by (auto dest: refs_of_is_fun)
from Cons(1)[OF 1 2]
have "list_of h' (Ref.get h' ref) xs'" .
with Node show ?case
unfolding list_of'_def
by simp
qed
lemma effect_ref:
assumes "effect (ref v) h h' x"
obtains "Ref.get h' x = v"
and "\<not> Ref.present h x"
and "Ref.present h' x"
and "\<forall>y. Ref.present h y \<longrightarrow> Ref.get h y = Ref.get h' y"
(* and "lim h' = Suc (lim h)" *)
and "\<forall>y. Ref.present h y \<longrightarrow> Ref.present h' y"
using assms
unfolding Ref.ref_def
apply (elim effect_heapE)
unfolding Ref.alloc_def
apply (simp add: Let_def)
unfolding Ref.present_def
apply auto
unfolding Ref.get_def Ref.set_def
apply auto
done
lemma make_llist:
assumes "effect (make_llist xs) h h' r"
shows "list_of h' r xs \<and> (\<forall>rs. refs_of h' r rs \<longrightarrow> (\<forall>ref \<in> (set rs). Ref.present h' ref))"
using assms
proof (induct xs arbitrary: h h' r)
case Nil thus ?case by (auto elim: effect_returnE simp add: make_llist.simps)
next
case (Cons x xs')
from Cons.prems obtain h1 r1 r' where make_llist: "effect (make_llist xs') h h1 r1"
and effect_refnew:"effect (ref r1) h1 h' r'" and Node: "r = Node x r'"
unfolding make_llist.simps
by (auto elim!: effect_bindE effect_returnE)
from Cons.hyps[OF make_llist] have list_of_h1: "list_of h1 r1 xs'" ..
from Cons.hyps[OF make_llist] obtain rs' where rs'_def: "refs_of h1 r1 rs'" by (auto intro: list_of_refs_of)
from Cons.hyps[OF make_llist] rs'_def have refs_present: "\<forall>ref\<in>set rs'. Ref.present h1 ref" by simp
from effect_refnew rs'_def refs_present have refs_unchanged: "\<forall>refs. refs_of h1 r1 refs \<longrightarrow>
(\<forall>ref\<in>set refs. Ref.present h1 ref \<and> Ref.present h' ref \<and> Ref.get h1 ref = Ref.get h' ref)"
by (auto elim!: effect_ref dest: refs_of_is_fun)
with list_of_invariant[OF list_of_h1 refs_unchanged] Node effect_refnew have fstgoal: "list_of h' r (x # xs')"
unfolding list_of.simps
by (auto elim!: effect_refE)
from refs_unchanged rs'_def have refs_still_present: "\<forall>ref\<in>set rs'. Ref.present h' ref" by auto
from refs_of_invariant[OF rs'_def refs_unchanged] refs_unchanged Node effect_refnew refs_still_present
have sndgoal: "\<forall>rs. refs_of h' r rs \<longrightarrow> (\<forall>ref\<in>set rs. Ref.present h' ref)"
by (fastforce elim!: effect_refE dest: refs_of_is_fun)
from fstgoal sndgoal show ?case ..
qed
lemma traverse: "list_of h n r \<Longrightarrow> effect (traverse n) h h r"
proof (induct r arbitrary: n)
case Nil
thus ?case
by (auto intro: effect_returnI)
next
case (Cons x xs)
thus ?case
apply (cases n, auto)
by (auto intro!: effect_bindI effect_returnI effect_lookupI)
qed
lemma traverse_make_llist':
assumes effect: "effect (make_llist xs \<guillemotright>= traverse) h h' r"
shows "r = xs"
proof -
from effect obtain h1 r1
where makell: "effect (make_llist xs) h h1 r1"
and trav: "effect (traverse r1) h1 h' r"
by (auto elim!: effect_bindE)
from make_llist[OF makell] have "list_of h1 r1 xs" ..
from traverse [OF this] trav show ?thesis
using effect_deterministic by fastforce
qed
section {* Proving correctness of in-place reversal *}
subsection {* Definition of in-place reversal *}
partial_function (heap) rev' :: "('a::heap) node ref \<Rightarrow> 'a node ref \<Rightarrow> 'a node ref Heap"
where
[code]: "rev' q p =
do {
v \<leftarrow> !p;
(case v of
Empty \<Rightarrow> return q
| Node x next \<Rightarrow>
do {
p := Node x q;
rev' p next
})
}"
primrec rev :: "('a:: heap) node \<Rightarrow> 'a node Heap"
where
"rev Empty = return Empty"
| "rev (Node x n) = do { q \<leftarrow> ref Empty; p \<leftarrow> ref (Node x n); v \<leftarrow> rev' q p; !v }"
subsection {* Correctness Proof *}
lemma rev'_invariant:
assumes "effect (rev' q p) h h' v"
assumes "list_of' h q qs"
assumes "list_of' h p ps"
assumes "\<forall>qrs prs. refs_of' h q qrs \<and> refs_of' h p prs \<longrightarrow> set prs \<inter> set qrs = {}"
shows "\<exists>vs. list_of' h' v vs \<and> vs = (List.rev ps) @ qs"
using assms
proof (induct ps arbitrary: qs p q h)
case Nil
thus ?case
unfolding rev'.simps[of q p] list_of'_def
by (auto elim!: effect_bindE effect_lookupE effect_returnE)
next
case (Cons x xs)
(*"LinkedList.list_of h' (get_ref v h') (List.rev xs @ x # qsa)"*)
from Cons(4) obtain ref where
p_is_Node: "Ref.get h p = Node x ref"
(*and "ref_present ref h"*)
and list_of'_ref: "list_of' h ref xs"
unfolding list_of'_def by (cases "Ref.get h p", auto)
from p_is_Node Cons(2) have effect_rev': "effect (rev' p ref) (Ref.set p (Node x q) h) h' v"
by (auto simp add: rev'.simps [of q p] elim!: effect_bindE effect_lookupE effect_updateE)
from Cons(3) obtain qrs where qrs_def: "refs_of' h q qrs" by (elim list_of'_refs_of')
from Cons(4) obtain prs where prs_def: "refs_of' h p prs" by (elim list_of'_refs_of')
from qrs_def prs_def Cons(5) have distinct_pointers: "set qrs \<inter> set prs = {}" by fastforce
from qrs_def prs_def distinct_pointers refs_of'E have p_notin_qrs: "p \<notin> set qrs" by fastforce
from Cons(3) qrs_def this have 1: "list_of' (Ref.set p (Node x q) h) p (x#qs)"
unfolding list_of'_def
apply (simp)
unfolding list_of'_def[symmetric]
by (simp add: list_of'_set_ref)
from list_of'_refs_of'2[OF Cons(4)] p_is_Node prs_def obtain refs where refs_def: "refs_of' h ref refs" and prs_refs: "prs = p # refs"
unfolding refs_of'_def' by auto
from prs_refs prs_def have p_not_in_refs: "p \<notin> set refs"
by (fastforce dest!: refs_of'_distinct)
with refs_def p_is_Node list_of'_ref have 2: "list_of' (Ref.set p (Node x q) h) ref xs"
by (auto simp add: list_of'_set_ref)
from p_notin_qrs qrs_def have refs_of1: "refs_of' (Ref.set p (Node x q) h) p (p#qrs)"
unfolding refs_of'_def'
apply (simp)
unfolding refs_of'_def'[symmetric]
by (simp add: refs_of'_set_ref)
from p_not_in_refs p_is_Node refs_def have refs_of2: "refs_of' (Ref.set p (Node x q) h) ref refs"
by (simp add: refs_of'_set_ref)
from p_not_in_refs refs_of1 refs_of2 distinct_pointers prs_refs have 3: "\<forall>qrs prs. refs_of' (Ref.set p (Node x q) h) p qrs \<and> refs_of' (Ref.set p (Node x q) h) ref prs \<longrightarrow> set prs \<inter> set qrs = {}"
apply - apply (rule allI)+ apply (rule impI) apply (erule conjE)
apply (drule refs_of'_is_fun) back back apply assumption
apply (drule refs_of'_is_fun) back back apply assumption
apply auto done
from Cons.hyps [OF effect_rev' 1 2 3] show ?case by simp
qed
lemma rev_correctness:
assumes list_of_h: "list_of h r xs"
assumes validHeap: "\<forall>refs. refs_of h r refs \<longrightarrow> (\<forall>r \<in> set refs. Ref.present h r)"
assumes effect_rev: "effect (rev r) h h' r'"
shows "list_of h' r' (List.rev xs)"
using assms
proof (cases r)
case Empty
with list_of_h effect_rev show ?thesis
by (auto simp add: list_of_Empty elim!: effect_returnE)
next
case (Node x ps)
with effect_rev obtain p q h1 h2 h3 v where
init: "effect (ref Empty) h h1 q"
"effect (ref (Node x ps)) h1 h2 p"
and effect_rev':"effect (rev' q p) h2 h3 v"
and lookup: "effect (!v) h3 h' r'"
using rev.simps
by (auto elim!: effect_bindE)
from init have a1:"list_of' h2 q []"
unfolding list_of'_def
by (auto elim!: effect_ref)
from list_of_h obtain refs where refs_def: "refs_of h r refs" by (rule list_of_refs_of)
from validHeap init refs_def have heap_eq: "\<forall>refs. refs_of h r refs \<longrightarrow> (\<forall>ref\<in>set refs. Ref.present h ref \<and> Ref.present h2 ref \<and> Ref.get h ref = Ref.get h2 ref)"
by (fastforce elim!: effect_ref dest: refs_of_is_fun)
from list_of_invariant[OF list_of_h heap_eq] have "list_of h2 r xs" .
from init this Node have a2: "list_of' h2 p xs"
apply -
unfolding list_of'_def
apply (auto elim!: effect_refE)
done
from init have refs_of_q: "refs_of' h2 q [q]"
by (auto elim!: effect_ref)
from refs_def Node have refs_of'_ps: "refs_of' h ps refs"
by (auto simp add: refs_of'_def'[symmetric])
from validHeap refs_def have all_ref_present: "\<forall>r\<in>set refs. Ref.present h r" by simp
from init refs_of'_ps this
have heap_eq: "\<forall>refs. refs_of' h ps refs \<longrightarrow> (\<forall>ref\<in>set refs. Ref.present h ref \<and> Ref.present h2 ref \<and> Ref.get h ref = Ref.get h2 ref)"
by (auto elim!: effect_ref [where ?'a="'a node", where ?'b="'a node", where ?'c="'a node"] dest: refs_of'_is_fun)
from refs_of'_invariant[OF refs_of'_ps this] have "refs_of' h2 ps refs" .
with init have refs_of_p: "refs_of' h2 p (p#refs)"
by (auto elim!: effect_refE simp add: refs_of'_def')
with init all_ref_present have q_is_new: "q \<notin> set (p#refs)"
by (auto elim!: effect_refE intro!: Ref.noteq_I)
from refs_of_p refs_of_q q_is_new have a3: "\<forall>qrs prs. refs_of' h2 q qrs \<and> refs_of' h2 p prs \<longrightarrow> set prs \<inter> set qrs = {}"
by (fastforce simp only: list.set dest: refs_of'_is_fun)
from rev'_invariant [OF effect_rev' a1 a2 a3] have "list_of h3 (Ref.get h3 v) (List.rev xs)"
unfolding list_of'_def by auto
with lookup show ?thesis
by (auto elim: effect_lookupE)
qed
section {* The merge function on Linked Lists *}
text {* We also prove merge correct *}
text{* First, we define merge on lists in a natural way. *}
fun Lmerge :: "('a::ord) list \<Rightarrow> 'a list \<Rightarrow> 'a list"
where
"Lmerge (x#xs) (y#ys) =
(if x \<le> y then x # Lmerge xs (y#ys) else y # Lmerge (x#xs) ys)"
| "Lmerge [] ys = ys"
| "Lmerge xs [] = xs"
subsection {* Definition of merge function *}
partial_function (heap) merge :: "('a::{heap, ord}) node ref \<Rightarrow> 'a node ref \<Rightarrow> 'a node ref Heap"
where
[code]: "merge p q = (do { v \<leftarrow> !p; w \<leftarrow> !q;
(case v of Empty \<Rightarrow> return q
| Node valp np \<Rightarrow>
(case w of Empty \<Rightarrow> return p
| Node valq nq \<Rightarrow>
if (valp \<le> valq) then do {
npq \<leftarrow> merge np q;
p := Node valp npq;
return p }
else do {
pnq \<leftarrow> merge p nq;
q := Node valq pnq;
return q }))})"
lemma if_return: "(if P then return x else return y) = return (if P then x else y)"
by auto
lemma if_distrib_App: "(if P then f else g) x = (if P then f x else g x)"
by auto
lemma redundant_if: "(if P then (if P then x else z) else y) = (if P then x else y)"
"(if P then x else (if P then z else y)) = (if P then x else y)"
by auto
lemma sum_distrib: "case_sum fl fr (case x of Empty \<Rightarrow> y | Node v n \<Rightarrow> (z v n)) = (case x of Empty \<Rightarrow> case_sum fl fr y | Node v n \<Rightarrow> case_sum fl fr (z v n))"
by (cases x) auto
subsection {* Induction refinement by applying the abstraction function to our induct rule *}
text {* From our original induction rule Lmerge.induct, we derive a new rule with our list_of' predicate *}
lemma merge_induct2:
assumes "list_of' h (p::'a::{heap, ord} node ref) xs"
assumes "list_of' h q ys"
assumes "\<And> ys p q. \<lbrakk> list_of' h p []; list_of' h q ys; Ref.get h p = Empty \<rbrakk> \<Longrightarrow> P p q [] ys"
assumes "\<And> x xs' p q pn. \<lbrakk> list_of' h p (x#xs'); list_of' h q []; Ref.get h p = Node x pn; Ref.get h q = Empty \<rbrakk> \<Longrightarrow> P p q (x#xs') []"
assumes "\<And> x xs' y ys' p q pn qn.
\<lbrakk> list_of' h p (x#xs'); list_of' h q (y#ys'); Ref.get h p = Node x pn; Ref.get h q = Node y qn;
x \<le> y; P pn q xs' (y#ys') \<rbrakk>
\<Longrightarrow> P p q (x#xs') (y#ys')"
assumes "\<And> x xs' y ys' p q pn qn.
\<lbrakk> list_of' h p (x#xs'); list_of' h q (y#ys'); Ref.get h p = Node x pn; Ref.get h q = Node y qn;
\<not> x \<le> y; P p qn (x#xs') ys'\<rbrakk>
\<Longrightarrow> P p q (x#xs') (y#ys')"
shows "P p q xs ys"
using assms(1-2)
proof (induct xs ys arbitrary: p q rule: Lmerge.induct)
case (2 ys)
from 2(1) have "Ref.get h p = Empty" unfolding list_of'_def by simp
with 2(1-2) assms(3) show ?case by blast
next
case (3 x xs')
from 3(1) obtain pn where Node: "Ref.get h p = Node x pn" by (rule list_of'_Cons)
from 3(2) have "Ref.get h q = Empty" unfolding list_of'_def by simp
with Node 3(1-2) assms(4) show ?case by blast
next
case (1 x xs' y ys')
from 1(3) obtain pn where pNode:"Ref.get h p = Node x pn"
and list_of'_pn: "list_of' h pn xs'" by (rule list_of'_Cons)
from 1(4) obtain qn where qNode:"Ref.get h q = Node y qn"
and list_of'_qn: "list_of' h qn ys'" by (rule list_of'_Cons)
show ?case
proof (cases "x \<le> y")
case True
from 1(1)[OF True list_of'_pn 1(4)] assms(5) 1(3-4) pNode qNode True
show ?thesis by blast
next
case False
from 1(2)[OF False 1(3) list_of'_qn] assms(6) 1(3-4) pNode qNode False
show ?thesis by blast
qed
qed
text {* secondly, we add the effect statement in the premise, and derive the effect statements for the single cases which we then eliminate with our effect elim rules. *}
lemma merge_induct3:
assumes "list_of' h p xs"
assumes "list_of' h q ys"
assumes "effect (merge p q) h h' r"
assumes "\<And> ys p q. \<lbrakk> list_of' h p []; list_of' h q ys; Ref.get h p = Empty \<rbrakk> \<Longrightarrow> P p q h h q [] ys"
assumes "\<And> x xs' p q pn. \<lbrakk> list_of' h p (x#xs'); list_of' h q []; Ref.get h p = Node x pn; Ref.get h q = Empty \<rbrakk> \<Longrightarrow> P p q h h p (x#xs') []"
assumes "\<And> x xs' y ys' p q pn qn h1 r1 h'.
\<lbrakk> list_of' h p (x#xs'); list_of' h q (y#ys');Ref.get h p = Node x pn; Ref.get h q = Node y qn;
x \<le> y; effect (merge pn q) h h1 r1 ; P pn q h h1 r1 xs' (y#ys'); h' = Ref.set p (Node x r1) h1 \<rbrakk>
\<Longrightarrow> P p q h h' p (x#xs') (y#ys')"
assumes "\<And> x xs' y ys' p q pn qn h1 r1 h'.
\<lbrakk> list_of' h p (x#xs'); list_of' h q (y#ys'); Ref.get h p = Node x pn; Ref.get h q = Node y qn;
\<not> x \<le> y; effect (merge p qn) h h1 r1; P p qn h h1 r1 (x#xs') ys'; h' = Ref.set q (Node y r1) h1 \<rbrakk>
\<Longrightarrow> P p q h h' q (x#xs') (y#ys')"
shows "P p q h h' r xs ys"
using assms(3)
proof (induct arbitrary: h' r rule: merge_induct2[OF assms(1) assms(2)])
case (1 ys p q)
from 1(3-4) have "h = h' \<and> r = q"
unfolding merge.simps[of p q]
by (auto elim!: effect_lookupE effect_bindE effect_returnE)
with assms(4)[OF 1(1) 1(2) 1(3)] show ?case by simp
next
case (2 x xs' p q pn)
from 2(3-5) have "h = h' \<and> r = p"
unfolding merge.simps[of p q]
by (auto elim!: effect_lookupE effect_bindE effect_returnE)
with assms(5)[OF 2(1-4)] show ?case by simp
next
case (3 x xs' y ys' p q pn qn)
from 3(3-5) 3(7) obtain h1 r1 where
1: "effect (merge pn q) h h1 r1"
and 2: "h' = Ref.set p (Node x r1) h1 \<and> r = p"
unfolding merge.simps[of p q]
by (auto elim!: effect_lookupE effect_bindE effect_returnE effect_ifE effect_updateE)
from 3(6)[OF 1] assms(6) [OF 3(1-5)] 1 2 show ?case by simp
next
case (4 x xs' y ys' p q pn qn)
from 4(3-5) 4(7) obtain h1 r1 where
1: "effect (merge p qn) h h1 r1"
and 2: "h' = Ref.set q (Node y r1) h1 \<and> r = q"
unfolding merge.simps[of p q]
by (auto elim!: effect_lookupE effect_bindE effect_returnE effect_ifE effect_updateE)
from 4(6)[OF 1] assms(7) [OF 4(1-5)] 1 2 show ?case by simp
qed
subsection {* Proving merge correct *}
text {* As many parts of the following three proofs are identical, we could actually move the
same reasoning into an extended induction rule *}
lemma merge_unchanged:
assumes "refs_of' h p xs"
assumes "refs_of' h q ys"
assumes "effect (merge p q) h h' r'"
assumes "set xs \<inter> set ys = {}"
assumes "r \<notin> set xs \<union> set ys"
shows "Ref.get h r = Ref.get h' r"
proof -
from assms(1) obtain ps where ps_def: "list_of' h p ps" by (rule refs_of'_list_of')
from assms(2) obtain qs where qs_def: "list_of' h q qs" by (rule refs_of'_list_of')
show ?thesis using assms(1) assms(2) assms(4) assms(5)
proof (induct arbitrary: xs ys r rule: merge_induct3[OF ps_def qs_def assms(3)])
case 1 thus ?case by simp
next
case 2 thus ?case by simp
next
case (3 x xs' y ys' p q pn qn h1 r1 h' xs ys r)
from 3(9) 3(3) obtain pnrs
where pnrs_def: "xs = p#pnrs"
and refs_of'_pn: "refs_of' h pn pnrs"
by (rule refs_of'_Node)
with 3(12) have r_in: "r \<notin> set pnrs \<union> set ys" by auto
from pnrs_def 3(12) have "r \<noteq> p" by auto
with 3(11) 3(12) pnrs_def refs_of'_distinct[OF 3(9)] have p_in: "p \<notin> set pnrs \<union> set ys" by auto
from 3(11) pnrs_def have no_inter: "set pnrs \<inter> set ys = {}" by auto
from 3(7)[OF refs_of'_pn 3(10) this p_in] 3(3) have p_is_Node: "Ref.get h1 p = Node x pn"
by simp
from 3(7)[OF refs_of'_pn 3(10) no_inter r_in] 3(8) `r \<noteq> p` show ?case
by simp
next
case (4 x xs' y ys' p q pn qn h1 r1 h' xs ys r)
from 4(10) 4(4) obtain qnrs
where qnrs_def: "ys = q#qnrs"
and refs_of'_qn: "refs_of' h qn qnrs"
by (rule refs_of'_Node)
with 4(12) have r_in: "r \<notin> set xs \<union> set qnrs" by auto
from qnrs_def 4(12) have "r \<noteq> q" by auto
with 4(11) 4(12) qnrs_def refs_of'_distinct[OF 4(10)] have q_in: "q \<notin> set xs \<union> set qnrs" by auto
from 4(11) qnrs_def have no_inter: "set xs \<inter> set qnrs = {}" by auto
from 4(7)[OF 4(9) refs_of'_qn this q_in] 4(4) have q_is_Node: "Ref.get h1 q = Node y qn" by simp
from 4(7)[OF 4(9) refs_of'_qn no_inter r_in] 4(8) `r \<noteq> q` show ?case
by simp
qed
qed
lemma refs_of'_merge:
assumes "refs_of' h p xs"
assumes "refs_of' h q ys"
assumes "effect (merge p q) h h' r"
assumes "set xs \<inter> set ys = {}"
assumes "refs_of' h' r rs"
shows "set rs \<subseteq> set xs \<union> set ys"
proof -
from assms(1) obtain ps where ps_def: "list_of' h p ps" by (rule refs_of'_list_of')
from assms(2) obtain qs where qs_def: "list_of' h q qs" by (rule refs_of'_list_of')
show ?thesis using assms(1) assms(2) assms(4) assms(5)
proof (induct arbitrary: xs ys rs rule: merge_induct3[OF ps_def qs_def assms(3)])
case 1
from 1(5) 1(7) have "rs = ys" by (fastforce simp add: refs_of'_is_fun)
thus ?case by auto
next
case 2
from 2(5) 2(8) have "rs = xs" by (auto simp add: refs_of'_is_fun)
thus ?case by auto
next
case (3 x xs' y ys' p q pn qn h1 r1 h' xs ys rs)
from 3(9) 3(3) obtain pnrs
where pnrs_def: "xs = p#pnrs"
and refs_of'_pn: "refs_of' h pn pnrs"
by (rule refs_of'_Node)
from 3(10) 3(9) 3(11) pnrs_def refs_of'_distinct[OF 3(9)] have p_in: "p \<notin> set pnrs \<union> set ys" by auto
from 3(11) pnrs_def have no_inter: "set pnrs \<inter> set ys = {}" by auto
from merge_unchanged[OF refs_of'_pn 3(10) 3(6) no_inter p_in] have p_stays: "Ref.get h1 p = Ref.get h p" ..
from 3 p_stays obtain r1s
where rs_def: "rs = p#r1s" and refs_of'_r1:"refs_of' h1 r1 r1s"
by (auto elim: refs_of'_set_next_ref)
from 3(7)[OF refs_of'_pn 3(10) no_inter refs_of'_r1] rs_def pnrs_def show ?case by auto
next
case (4 x xs' y ys' p q pn qn h1 r1 h' xs ys rs)
from 4(10) 4(4) obtain qnrs
where qnrs_def: "ys = q#qnrs"
and refs_of'_qn: "refs_of' h qn qnrs"
by (rule refs_of'_Node)
from 4(10) 4(9) 4(11) qnrs_def refs_of'_distinct[OF 4(10)] have q_in: "q \<notin> set xs \<union> set qnrs" by auto
from 4(11) qnrs_def have no_inter: "set xs \<inter> set qnrs = {}" by auto
from merge_unchanged[OF 4(9) refs_of'_qn 4(6) no_inter q_in] have q_stays: "Ref.get h1 q = Ref.get h q" ..
from 4 q_stays obtain r1s
where rs_def: "rs = q#r1s" and refs_of'_r1:"refs_of' h1 r1 r1s"
by (auto elim: refs_of'_set_next_ref)
from 4(7)[OF 4(9) refs_of'_qn no_inter refs_of'_r1] rs_def qnrs_def show ?case by auto
qed
qed
lemma
assumes "list_of' h p xs"
assumes "list_of' h q ys"
assumes "effect (merge p q) h h' r"
assumes "\<forall>qrs prs. refs_of' h q qrs \<and> refs_of' h p prs \<longrightarrow> set prs \<inter> set qrs = {}"
shows "list_of' h' r (Lmerge xs ys)"
using assms(4)
proof (induct rule: merge_induct3[OF assms(1-3)])
case 1
thus ?case by simp
next
case 2
thus ?case by simp
next
case (3 x xs' y ys' p q pn qn h1 r1 h')
from 3(1) obtain prs where prs_def: "refs_of' h p prs" by (rule list_of'_refs_of')
from 3(2) obtain qrs where qrs_def: "refs_of' h q qrs" by (rule list_of'_refs_of')
from prs_def 3(3) obtain pnrs
where pnrs_def: "prs = p#pnrs"
and refs_of'_pn: "refs_of' h pn pnrs"
by (rule refs_of'_Node)
from prs_def qrs_def 3(9) pnrs_def refs_of'_distinct[OF prs_def] have p_in: "p \<notin> set pnrs \<union> set qrs" by fastforce
from prs_def qrs_def 3(9) pnrs_def have no_inter: "set pnrs \<inter> set qrs = {}" by fastforce
from no_inter refs_of'_pn qrs_def have no_inter2: "\<forall>qrs prs. refs_of' h q qrs \<and> refs_of' h pn prs \<longrightarrow> set prs \<inter> set qrs = {}"
by (fastforce dest: refs_of'_is_fun)
from merge_unchanged[OF refs_of'_pn qrs_def 3(6) no_inter p_in] have p_stays: "Ref.get h1 p = Ref.get h p" ..
from 3(7)[OF no_inter2] obtain rs where rs_def: "refs_of' h1 r1 rs" by (rule list_of'_refs_of')
from refs_of'_merge[OF refs_of'_pn qrs_def 3(6) no_inter this] p_in have p_rs: "p \<notin> set rs" by auto
with 3(7)[OF no_inter2] 3(1-5) 3(8) p_rs rs_def p_stays
show ?case by (auto simp: list_of'_set_ref)
next
case (4 x xs' y ys' p q pn qn h1 r1 h')
from 4(1) obtain prs where prs_def: "refs_of' h p prs" by (rule list_of'_refs_of')
from 4(2) obtain qrs where qrs_def: "refs_of' h q qrs" by (rule list_of'_refs_of')
from qrs_def 4(4) obtain qnrs
where qnrs_def: "qrs = q#qnrs"
and refs_of'_qn: "refs_of' h qn qnrs"
by (rule refs_of'_Node)
from prs_def qrs_def 4(9) qnrs_def refs_of'_distinct[OF qrs_def] have q_in: "q \<notin> set prs \<union> set qnrs" by fastforce
from prs_def qrs_def 4(9) qnrs_def have no_inter: "set prs \<inter> set qnrs = {}" by fastforce
from no_inter refs_of'_qn prs_def have no_inter2: "\<forall>qrs prs. refs_of' h qn qrs \<and> refs_of' h p prs \<longrightarrow> set prs \<inter> set qrs = {}"
by (fastforce dest: refs_of'_is_fun)
from merge_unchanged[OF prs_def refs_of'_qn 4(6) no_inter q_in] have q_stays: "Ref.get h1 q = Ref.get h q" ..
from 4(7)[OF no_inter2] obtain rs where rs_def: "refs_of' h1 r1 rs" by (rule list_of'_refs_of')
from refs_of'_merge[OF prs_def refs_of'_qn 4(6) no_inter this] q_in have q_rs: "q \<notin> set rs" by auto
with 4(7)[OF no_inter2] 4(1-5) 4(8) q_rs rs_def q_stays
show ?case by (auto simp: list_of'_set_ref)
qed
section {* Code generation *}
text {* A simple example program *}
definition test_1 where "test_1 = (do { ll_xs <- make_llist [1..(15::int)]; xs <- traverse ll_xs; return xs })"
definition test_2 where "test_2 = (do { ll_xs <- make_llist [1..(15::int)]; ll_ys <- rev ll_xs; ys <- traverse ll_ys; return ys })"
definition test_3 where "test_3 =
(do {
ll_xs \<leftarrow> make_llist (filter (%n. n mod 2 = 0) [2..8]);
ll_ys \<leftarrow> make_llist (filter (%n. n mod 2 = 1) [5..11]);
r \<leftarrow> ref ll_xs;
q \<leftarrow> ref ll_ys;
p \<leftarrow> merge r q;
ll_zs \<leftarrow> !p;
zs \<leftarrow> traverse ll_zs;
return zs
})"
code_reserved SML upto
ML_val {* @{code test_1} () *}
ML_val {* @{code test_2} () *}
ML_val {* @{code test_3} () *}
export_code test_1 test_2 test_3 checking SML SML_imp OCaml? OCaml_imp? Haskell? Scala Scala_imp
end
|
{"author": "Josh-Tilles", "repo": "isabelle", "sha": "990accf749b8a6e037d25012258ecae20d59ca62", "save_path": "github-repos/isabelle/Josh-Tilles-isabelle", "path": "github-repos/isabelle/Josh-Tilles-isabelle/isabelle-990accf749b8a6e037d25012258ecae20d59ca62/src/HOL/Imperative_HOL/ex/Linked_Lists.thy"}
|
__author__ = "Laurence Elliott - 16600748"
from capstone import *
import pefile, os
import numpy as np
from matplotlib import pyplot as plt
benignPaths = ["../bin-utf8-vec/benignSamples/" + sample for sample in os.listdir("../bin-utf8-vec/benignSamples")]
malwarePaths = ["../bin-utf8-vec/malwareSamples/" + sample for sample in os.listdir("../bin-utf8-vec/malwareSamples")]
ransomPaths = ["../bin-utf8-vec/ransomwareSamples/" + sample for sample in os.listdir("../bin-utf8-vec/ransomwareSamples")]
nSamples = len(benignPaths) + len(malwarePaths) + len(ransomPaths)
benignOpCodeSet = set()
benignOpCodeDicts = []
benignOpCodeFreqs = {}
count = 1
for sample in benignPaths:
try:
pe = pefile.PE(sample, fast_load=True)
entryPoint = pe.OPTIONAL_HEADER.AddressOfEntryPoint
data = pe.get_memory_mapped_image()[entryPoint:]
cs = Cs(CS_ARCH_X86, CS_MODE_32)
opcodes = []
for i in cs.disasm(data, 0x1000):
opcodes.append(i.mnemonic)
opcodeDict = {}
total = len(opcodes)
benignOpCodeSet = set(list(benignOpCodeSet) + opcodes)
for opcode in benignOpCodeSet:
freq = 1
for op in opcodes:
if opcode == op:
freq += 1
try:
benignOpCodeFreqs[opcode] += freq
except:
benignOpCodeFreqs[opcode] = freq
opcodeDict[opcode] = round((freq / total) * 100, 2)
benignOpCodeDicts.append(opcodeDict)
os.system("clear")
print(str((count / nSamples) * 100) + "%")
count += 1
except Exception as e:
print(e)
malwareOpCodeSet = set()
malwareOpCodeDicts = []
malwareOpCodeFreqs = {}
count = len(malwarePaths)
for sample in malwarePaths:
try:
pe = pefile.PE(sample, fast_load=True)
entryPoint = pe.OPTIONAL_HEADER.AddressOfEntryPoint
data = pe.get_memory_mapped_image()[entryPoint:]
cs = Cs(CS_ARCH_X86, CS_MODE_32)
opcodes = []
for i in cs.disasm(data, 0x1000):
opcodes.append(i.mnemonic)
opcodeDict = {}
total = len(opcodes)
malwareOpCodeSet = set(list(malwareOpCodeSet) + opcodes)
for opcode in malwareOpCodeSet:
freq = 1
for op in opcodes:
if opcode == op:
freq += 1
try:
malwareOpCodeFreqs[opcode] += freq
except:
malwareOpCodeFreqs[opcode] = freq
opcodeDict[opcode] = round((freq / total) * 100, 2)
malwareOpCodeDicts.append(opcodeDict)
os.system("clear")
print(str((count / nSamples) * 100) + "%")
count += 1
except Exception as e:
print(e)
ransomOpCodeSet = set()
ransomOpCodeDicts = []
ransomOpCodeFreqs = {}
count = len(benignPaths) + len(malwarePaths)
for sample in ransomPaths:
try:
pe = pefile.PE(sample, fast_load=True)
entryPoint = pe.OPTIONAL_HEADER.AddressOfEntryPoint
data = pe.get_memory_mapped_image()[entryPoint:]
cs = Cs(CS_ARCH_X86, CS_MODE_32)
opcodes = []
for i in cs.disasm(data, 0x1000):
opcodes.append(i.mnemonic)
opcodeDict = {}
total = len(opcodes)
ransomOpCodeSet = set(list(ransomOpCodeSet) + opcodes)
for opcode in ransomOpCodeSet:
freq = 1
for op in opcodes:
if opcode == op:
freq += 1
try:
ransomOpCodeFreqs[opcode] += freq
except:
ransomOpCodeFreqs[opcode] = freq
opcodeDict[opcode] = round((freq / total) * 100, 2)
ransomOpCodeDicts.append(opcodeDict)
os.system("clear")
print(str((count / nSamples) * 100) + "%")
count += 1
except Exception as e:
print(e)
opCodeFreqsSorted = np.genfromtxt("top50opcodes.csv", delimiter=",", dtype="str")[1:, 0]
count = 0
for opDict in benignOpCodeDicts:
opFreqVec = []
for opcode in opCodeFreqsSorted[:50]:
try:
opFreqVec.append(opDict[opcode])
except Exception as e:
if str(type(e)) == "<class 'KeyError'>":
opFreqVec.append(0.0)
np.save("benignHistVecs/" + str(count)+".npy", opFreqVec)
os.system("clear")
print(str((count / nSamples) * 100) + "%")
count += 1
count = len(benignPaths)
for opDict in malwareOpCodeDicts:
opFreqVec = []
for opcode in opCodeFreqsSorted[:50]:
try:
opFreqVec.append(opDict[opcode])
except Exception as e:
if str(type(e)) == "<class 'KeyError'>":
opFreqVec.append(0.0)
np.save("malwareHistVecs/" + str(count)+".npy", opFreqVec)
os.system("clear")
print(str((count / nSamples) * 100) + "%")
count += 1
count = len(benignPaths) + len(malwarePaths)
for opDict in ransomOpCodeDicts:
opFreqVec = []
for opcode in opCodeFreqsSorted[:50]:
try:
opFreqVec.append(opDict[opcode])
except Exception as e:
if str(type(e)) == "<class 'KeyError'>":
opFreqVec.append(0.0)
np.save("ransomHistVecs/" + str(count)+".npy", opFreqVec)
os.system("clear")
print(str((count / nSamples) * 100) + "%")
count += 1
# benignVecPaths = ["benignHistVecs/" + vecPath for vecPath in os.listdir("benignHistVecs")]
# for vecPath in benignVecPaths:
# opFreqVec = np.load(vecPath)
# print(opFreqVec)
# plt.figure(count)
# plt.bar(np.arange(len(opFreqVec)), opFreqVec)
# plt.show()
|
{"hexsha": "e87545d3326a550777fb0f0d104b93477628966e", "size": 5618, "ext": "py", "lang": "Python", "max_stars_repo_path": "bin-opcodes-vec/bin-opcodes-vec.py", "max_stars_repo_name": "laurencejbelliott/Ensemble_DL_Ransomware_Detector", "max_stars_repo_head_hexsha": "0cae02c2425e787a810513537a47897f3a42e5b5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2019-04-10T21:16:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-03T00:22:14.000Z", "max_issues_repo_path": "bin-opcodes-vec/bin-opcodes-vec.py", "max_issues_repo_name": "laurencejbelliott/Ensemble_DL_Ransomware_Detector", "max_issues_repo_head_hexsha": "0cae02c2425e787a810513537a47897f3a42e5b5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bin-opcodes-vec/bin-opcodes-vec.py", "max_forks_repo_name": "laurencejbelliott/Ensemble_DL_Ransomware_Detector", "max_forks_repo_head_hexsha": "0cae02c2425e787a810513537a47897f3a42e5b5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2019-06-29T18:09:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-10T22:15:13.000Z", "avg_line_length": 28.6632653061, "max_line_length": 123, "alphanum_fraction": 0.5879316483, "include": true, "reason": "import numpy", "num_tokens": 1530}
|
-- Local Variables:
-- idris-load-packages: ("prelude" "effects" "contrib" "base")
-- End:
import Data.Vect
import Data.Fin
-- https://en.wikipedia.org/wiki/Netpbm_format
-- A little file for writing PPM format. Why? Because I want to do some little image things
-- in Idris like the ones in the class I helped with using that Haskell library
preamble : Nat -> Nat -> String
preamble x y = unlines ["P3",show x ++ " " ++ show y]
PPM : Nat -> Nat -> Type
PPM x y = Vect y (Vect x (Int,Int,Int))
auxFun : (Int,Int,Int) -> String
auxFun (r,g,b) = (show r) ++ " " ++ (show g) ++ " " ++ (show b)
unwordsV : Vect x String -> String
unwordsV [] = ""
unwordsV (x :: xs) = x ++ " " ++ (unwordsV xs)
unlinesV : Vect x String -> String
unlinesV [] = ""
unlinesV (x :: xs) = x ++ "\n" ++ (unlinesV xs)
ppmToString : PPM x y -> String
ppmToString p = unlinesV $ map (unwordsV . map auxFun) p
ppmToFile : PPM x y -> String -> IO (Either FileError ())
ppmToFile p file = writeFile file (ppmToString p)
-- so we'll do the same thing as the original Haskell code I saw and have a function from doubles
-- to colors and then, being given dimensions to render, create an actual PPM type
coords : Double -> Double -> Double -> Double -> (xsample : Nat) -> (ysample : Nat) -> Vect ysample (Vect xsample (Double, Double))
coords x y delx dely xsample Z = []
coords x y delx dely xsample (S k) = (?createRow (S k))
funToVector : (Double -> Double -> (Int,Int,Int)) -> Double -> Double -> Double -> Double -> (xsample : Nat) -> (ysample : Nat) -> PPM xsample ysample
funToVector f startx starty delx dely xsample ysample = map (map (uncurry f)) (coords startx starty delx dely xsample ysample)
|
{"hexsha": "6fb974b053b095710c0bcecde3782c74e30ea558", "size": 1682, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "PPM.idr", "max_stars_repo_name": "clarissalittler/idris-practice", "max_stars_repo_head_hexsha": "e307a93fa4ab7bce9f6cf7fef9973c398b3d65ea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PPM.idr", "max_issues_repo_name": "clarissalittler/idris-practice", "max_issues_repo_head_hexsha": "e307a93fa4ab7bce9f6cf7fef9973c398b3d65ea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PPM.idr", "max_forks_repo_name": "clarissalittler/idris-practice", "max_forks_repo_head_hexsha": "e307a93fa4ab7bce9f6cf7fef9973c398b3d65ea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.1162790698, "max_line_length": 150, "alphanum_fraction": 0.6605231867, "num_tokens": 519}
|
from pathlib import Path
from typing import List
import pandas as pd
from pandas import DataFrame
import matplotlib.pyplot as plt
import os
import zipfile
from src_homework.config import COMMON_COLUMN
# data pre processing
base_data_folder_path = 'data/HMOG'
file_name_to_colume_names = {
'Accelerometer.csv': ['Systime', 'EventTime', 'ActivityID', 'X', 'Y', 'Z', 'Phone_orientation'],
'Activity.csv': ['ID', 'SubjectID', 'Start_time', 'End_time', 'Relative_Start_time', 'Relative_End_time',
'Gesture_scenario', 'TaskID', 'ContentID'],
'Gyroscope.csv': ['Systime', 'EventTime', 'ActivityID', 'X', 'Y', 'Z', 'Phone_orientation'],
}
print('sa')
def unzip_file(parent_file):
import zipfile, fnmatch, os
pattern = '*.zip'
for root, dirs, files in os.walk(parent_file):
for filename in fnmatch.filter(files, pattern):
print(os.path.join(root, filename))
zipfile.ZipFile(os.path.join(root, filename)).extractall(os.path.join(root, os.path.splitext(filename)[0]))
os.remove(os.path.join(root, filename))
def get_user_ids(path):
"""
Get all user ids based on name of folders under "public_dataset/"
:return: a list of user ids
"""
file_name = os.listdir(path)
user_id = [i for i in file_name if '.pdf' not in i and '.DS_Store' not in i]
return user_id
def get_user_session_ids(user_id, parent_path):
"""
Get all session ids for a specific user based on folder structure
e.g. "public_dataset/100669/100669_session_13" has user_id=100669, session_id=13
:param user_id: user id
:return: list of user session ids
"""
file_set = os.listdir(os.path.join(parent_path, user_id, user_id))
session_id_set = [i.split('_')[-1] for i in file_set if i != '.DS_Store']
return session_id_set
def read_file(parent_folder, user_id, user_session_id, file_name, colume_names):
"""
Read one of the csv files for a user
:param user_id: user id
:param user_session_id: user session id
:param file_name: csv file name (key of file_name_to_colume_names)
:param colume_names: a list of column names of the csv file (value of file_name_to_colume_names)
:return: content of the csv file as pandas DataFrame
"""
import pandas as pd
data = pd.read_csv(os.path.join(parent_folder, user_id, user_id,
user_id + '_session_' + user_session_id,
file_name), names = colume_names)
return data
def get_user_session_data(parent_folder, user_id, user_session_id):
"""
Combine accelerometer, gyroscope, and activity labels for a specific session of a user
Note: Timestamps are ignored when joining accelerometer and gyroscope data.
:param user_id: user id
:param user_session_id: user session id
:return: combined DataFrame for a session
"""
colume_names_1 = ['Systime', 'EventTime', 'ActivityID', 'X_a', 'Y_a', 'Z_a', 'Phone_orientation']
colume_names_2 = ['Systime', 'EventTime', 'ActivityID', 'X_g', 'Y_g', 'Z_g', 'Phone_orientation']
sub_data1 = read_file(parent_folder, user_id, user_session_id, 'Accelerometer'+'.csv', colume_names_1)
sub_data2 = read_file(parent_folder, user_id, user_session_id, 'Gyroscope'+'.csv', colume_names_2)
sub_data1.drop(['Systime', 'Phone_orientation'], axis=1, inplace = True)
sub_data2.drop(['Systime', 'Phone_orientation'], axis=1, inplace = True)
data = sub_data1.merge(sub_data2, on = ['EventTime', 'ActivityID'], how = 'outer')
return data
# pick the user as well as activities and extract 3 out of 6 features
user_ids = get_user_ids(base_data_folder_path)
user_id = user_ids[0]
user_session_id = '10'
data = get_user_session_data(base_data_folder_path, user_id, user_session_id)
data = data.sort_values(by = 'EventTime')
data = data.interpolate()
data = data.dropna()
import random
feature = random.sample(['X_a', 'Y_a', 'Z_a', 'X_g', 'Y_g', 'Z_g'], k=3)
data_feature = data[['EventTime', 'ActivityID'] + feature]
activity = list(set(data_feature['ActivityID']))
# visualize of the features you pick
from src_homework.utilis import time_series_plot
save_path = 'result_hw/event_time_series'
for fea in feature:
sub_data = data_feature[['EventTime', 'ActivityID', fea]].copy()
time_series_plot(sub_data, fea, save_path)
print(fea, 'finished')
def multiV_curvature(sub_data, t):
"""
Calculate multi V curvature
:param nbddata: neighborhood of time t_i containing (t, x(t), y(t), z(t)),
where x(t), y(t), z(t) are the 3 out of the 6 features.
:return: multi V curvature
"""
from numpy.linalg import det, norm
from numpy import cross, dot, polyfit, zeros
import numpy as np
nbddata = sub_data.copy()
t0 = nbddata['EventTime'][0]
nbddata['t'] = nbddata['EventTime'] - t0
nbddata = nbddata.drop(['EventTime'], axis=1)
not_common_col = [i for i in nbddata.columns if i not in COMMON_COLUMN]
for col in not_common_col:
v = polyfit(nbddata['t'], nbddata[col], 3)
if col == not_common_col[0]:
v_matrix = np.array(v)
else:
temp = np.array(v)
v_matrix = np.vstack((v_matrix, temp))
v_matrix = v_matrix[:, 1:]
a1 = v_matrix[:, 0] + 2*v_matrix[:, 1]*t + 3*v_matrix[:, 2]*t**2
a2 = 2*v_matrix[:, 1] + 6*v_matrix[:, 2]*t
curvature = norm(cross(a1, a2), 2) / norm(a1,2)**3
return curvature
def multiV_torsion(sub_data, t):
"""
Calculate multi V torsion
:param nbddata: neighborhood of time t_i containing (t, x(t), y(t), z(t)),
where x(t), y(t), z(t) are the 3 out of the 6 features.
:return: multi V torsion
"""
from numpy.linalg import det, norm
from numpy import cross, dot, polyfit, zeros
import numpy as np
nbddata = sub_data.copy()
t0 = nbddata['EventTime'][0]
nbddata['t'] = nbddata['EventTime'] - t0
nbddata = nbddata.drop(['EventTime'], axis=1)
not_common_col = [i for i in nbddata.columns if i not in COMMON_COLUMN]
for col in not_common_col:
v = polyfit(nbddata['t'], nbddata[col], 3)
if col == not_common_col[0]:
v_matrix = np.array(v)
else:
temp = np.array(v)
v_matrix = np.vstack((v_matrix, temp))
v_matrix = v_matrix[:, 1:]
a1 = v_matrix[:, 0] + 2*v_matrix[:, 1]*t + 3*v_matrix[:, 2]*t**2
a2 = 2*v_matrix[:, 1] + 6*v_matrix[:, 2]*t
a3 = 6*v_matrix[:, 2]
torsion = dot(cross(a1, a2), a3) / norm(cross(a1, a2), 2)**2
return torsion
def get_neigbor_data(data, t, size):
"""
get the neigbor data of t
:param data:
:param t:
:param size:
:return: nbdata
"""
t_index = data.index[data['EventTime'] == t].tolist()[0]
data_point_index = range(t_index - int(size / 2), t_index + int(size / 2)+1)
nbddata = data.iloc[data_point_index, :]
nbddata.reset_index(drop = True, inplace = True)
return nbddata
# Calucate and plot curvature and torsion of the features you pick
# reduce the size of EventTime
try:
os.makedirs((os.path.join('result_hw', 'curvature_torsion')))
except:
pass
data_feature['EventTime'] = data_feature['EventTime']*10**(-10)
size = 201
start_point = int(size/2)
for act in activity:
act_data = data_feature[data_feature['ActivityID'] == act].copy()
end_point = len(act_data) - start_point
act_data.drop(['ActivityID'], axis = 1, inplace = True)
act_data = act_data.reset_index(drop = True)
result_t = pd.DataFrame(columns = ['time', 'curvature', 'torsion'])
for i in range(start_point, end_point):
t = act_data['EventTime'][i]
sub_data = get_neigbor_data(act_data, t, size)
curvature = multiV_curvature(sub_data, t)
torsion = multiV_torsion(sub_data, t)
result_t.loc[-1] = [t, curvature, torsion]
result_t.index = result_t.index + 1
print(t, curvature, torsion)
result_t.reset_index(drop=True, inplace=True)
plt.subplot(211)
plt.plot(result_t['time'], result_t['curvature'])
plt.title('curvature')
plt.subplot(212)
plt.plot(result_t['time'], result_t['torsion'])
plt.title('torsion')
plt.savefig(os.path.join('result_hw', 'curvature_torsion', '{}.png'.format(act)))
plt.show()
|
{"hexsha": "e8803d10eb2ac453b4b92d1fabaa284905e2a33d", "size": 8330, "ext": "py", "lang": "Python", "max_stars_repo_path": "src_homework/HW2_starter_files.py", "max_stars_repo_name": "jjkindergarten/Nonlinear-Data-Analysis", "max_stars_repo_head_hexsha": "4ee31f0e9ef231fb0087307b1235558c27586a5a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src_homework/HW2_starter_files.py", "max_issues_repo_name": "jjkindergarten/Nonlinear-Data-Analysis", "max_issues_repo_head_hexsha": "4ee31f0e9ef231fb0087307b1235558c27586a5a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src_homework/HW2_starter_files.py", "max_forks_repo_name": "jjkindergarten/Nonlinear-Data-Analysis", "max_forks_repo_head_hexsha": "4ee31f0e9ef231fb0087307b1235558c27586a5a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.0, "max_line_length": 119, "alphanum_fraction": 0.6579831933, "include": true, "reason": "import numpy,from numpy", "num_tokens": 2335}
|
"""
Dynamic Edge-Conditioned Filters in Convolutional Neural Networks on Graphs
https://github.com/mys007/ecc
https://arxiv.org/abs/1704.02901
2017 Martin Simonovsky
"""
from __future__ import division
from __future__ import print_function
from builtins import range
import unittest
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable, gradcheck
from .GraphConvModule import *
from .GraphConvInfo import GraphConvInfo
class TestGraphConvModule(unittest.TestCase):
def test_gradcheck(self):
torch.set_default_tensor_type('torch.DoubleTensor') #necessary for proper numerical gradient
for cuda in range(0,2):
# without idxe
n,e,in_channels, out_channels = 20,50,10, 15
input = torch.randn(n,in_channels)
weights = torch.randn(e,in_channels,out_channels)
idxn = torch.from_numpy(np.random.randint(n,size=e))
idxe = None
degs = torch.LongTensor([5, 0, 15, 20, 10]) #strided conv
degs_gpu = degs
edge_mem_limit = 30 # some nodes will be combined, some not
if cuda:
input = input.cuda(); weights = weights.cuda(); idxn = idxn.cuda(); degs_gpu = degs_gpu.cuda()
func = GraphConvFunction(in_channels, out_channels, idxn, idxe, degs, degs_gpu, edge_mem_limit=edge_mem_limit)
data = (Variable(input, requires_grad=True), Variable(weights, requires_grad=True))
ok = gradcheck(func, data)
self.assertTrue(ok)
# with idxe
weights = torch.randn(30,in_channels,out_channels)
idxe = torch.from_numpy(np.random.randint(30,size=e))
if cuda:
weights = weights.cuda(); idxe = idxe.cuda()
func = GraphConvFunction(in_channels, out_channels, idxn, idxe, degs, degs_gpu, edge_mem_limit=edge_mem_limit)
ok = gradcheck(func, data)
self.assertTrue(ok)
torch.set_default_tensor_type('torch.FloatTensor')
def test_batch_splitting(self):
n,e,in_channels, out_channels = 20,50,10, 15
input = torch.randn(n,in_channels)
weights = torch.randn(e,in_channels,out_channels)
idxn = torch.from_numpy(np.random.randint(n,size=e))
idxe = None
degs = torch.LongTensor([5, 0, 15, 20, 10]) #strided conv
func = GraphConvFunction(in_channels, out_channels, idxn, idxe, degs, degs, edge_mem_limit=1e10)
data = (Variable(input, requires_grad=True), Variable(weights, requires_grad=True))
output1 = func(*data)
func = GraphConvFunction(in_channels, out_channels, idxn, idxe, degs, degs, edge_mem_limit=1)
output2 = func(*data)
self.assertLess((output1-output2).norm().data[0], 1e-6)
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "a7307d2f4ba6fe8dd921013ed1a47942d97da45f", "size": 3023, "ext": "py", "lang": "Python", "max_stars_repo_path": "learning/ecc/test_GraphConvModule.py", "max_stars_repo_name": "davijo/superpoint_graph", "max_stars_repo_head_hexsha": "0d60fb364bfa37fb70570784899ce46c0296ee22", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 655, "max_stars_repo_stars_event_min_datetime": "2018-01-18T03:15:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T07:14:20.000Z", "max_issues_repo_path": "learning/ecc/test_GraphConvModule.py", "max_issues_repo_name": "davijo/superpoint_graph", "max_issues_repo_head_hexsha": "0d60fb364bfa37fb70570784899ce46c0296ee22", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 259, "max_issues_repo_issues_event_min_datetime": "2018-01-23T04:59:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T10:12:02.000Z", "max_forks_repo_path": "learning/ecc/test_GraphConvModule.py", "max_forks_repo_name": "davijo/superpoint_graph", "max_forks_repo_head_hexsha": "0d60fb364bfa37fb70570784899ce46c0296ee22", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 217, "max_forks_repo_forks_event_min_datetime": "2018-01-18T14:23:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T16:36:44.000Z", "avg_line_length": 37.7875, "max_line_length": 123, "alphanum_fraction": 0.6232219649, "include": true, "reason": "import numpy", "num_tokens": 713}
|
import numpy as np
import pandas as pd
import keras.backend as K
from keras.layers import multiply
from keras.layers.core import Dense, Reshape, Lambda, RepeatVector, Permute, Flatten
from keras.layers.recurrent import LSTM
from keras.models import Model, Input
# plot part.
import matplotlib.pyplot as plt
# ## Helper functions
def get_activations(model, inputs, print_shape_only=False, layer_name=None, verbose=False):
"""
Get activations from a model
Args:
model: a keras model
inputs: the inputs for the model
print_shape_only: whether to print the shape of the layer or the whole activation layer
layer_name: name of specific layer to return
verbose: whether to show all outputs
Returns:
activations: list, list of activations
"""
activations = []
inp = model.input
if layer_name is None:
outputs = [layer.output for layer in model.layers]
else:
outputs = [layer.output for layer in model.layers if layer.name == layer_name] # all layer outputs
funcs = [K.function([inp] + [K.learning_phase()], [out]) for out in outputs] # evaluation functions
layer_outputs = [func([inputs, 1.])[0] for func in funcs]
for layer_activations in layer_outputs:
activations.append(layer_activations)
if verbose:
print('----- activations -----')
if print_shape_only:
print(layer_activations.shape)
else:
print(layer_activations)
return activations
def get_data_recurrent(n, time_steps, input_dim, attention_column=10):
"""
Data generation. x is random except that first value equals the target y.
network should learn that the target = x[attention_column].
Therefore, most of its attention should be focused on the value addressed by attention_column.
Args:
n: the number of samples to retrieve.
time_steps: the number of time steps of your series.
input_dim: the number of dimensions of each element in the series.
attention_column: the column linked to the target. Everything else is purely random.
Returns:
x: model inputs
y: model targets
"""
x = np.random.standard_normal(size=(n, time_steps, input_dim))
y = np.random.randint(low=0, high=2, size=(n, 1))
x[:, attention_column, :] = np.tile(y[:], (1, input_dim))
return x, y
def attention_3d_block(inputs, TIME_STEPS):
"""
inputs.shape = (batch_size, time_steps, input_dim)
"""
input_dim = int(inputs.shape[2])
a = Permute((2, 1))(inputs)
a = Reshape((input_dim, TIME_STEPS))(a)
a = Dense(TIME_STEPS, activation='softmax')(a)
a_probs = Permute((2, 1), name='attention_vec')(a)
#output_attention_mul = merge([inputs, a_probs], name='attention_mul', mode='mul')
output_attention_mul = multiply([inputs, a_probs])
return output_attention_mul
def attention_3d_block_time_features(inputs, TIME_STEPS):
"""
inputs.shape = (batch_size, time_steps, input_dim)
"""
input_dim = int(inputs.shape[2])
a = Flatten()(inputs)
a = Dense(TIME_STEPS*input_dim, activation='softmax')(a)
a = Reshape((input_dim, TIME_STEPS))(a)
a_probs = Permute((2, 1), name='attention_vec')(a)
output_attention_mul = multiply([inputs, a_probs])
return output_attention_mul
def attention_spatial_block(inputs):
"""
inputs.shape = (batch_size, time_steps, input_dim)
"""
input_dim = int(inputs.shape[2])
a = Reshape((TIME_STEPS, input_dim))(inputs)
a_probs = Dense(input_dim, activation='softmax', name='attention_vec')(a)
output_attention_mul = merge([inputs, a_probs], name='attention_mul', mode='mul')
return output_attention_mul
# ## Hyperparameters and builder methods
def model_attention_applied_before_lstm():
inputs = Input(shape=(TIME_STEPS, INPUT_DIM,))
attention_mul = attention_3d_block(inputs)
#attention_mul = attention_spatial_block(inputs)
lstm_units = 32
attention_mul = LSTM(lstm_units, return_sequences=False)(attention_mul)
output = Dense(1, activation='sigmoid')(attention_mul)
model = Model(input=[inputs], output=output)
return model
|
{"hexsha": "f21675789278a6dcaa0b38f16e225d0a458b1718", "size": 4225, "ext": "py", "lang": "Python", "max_stars_repo_path": "attention_function.py", "max_stars_repo_name": "deepak-kaji/mimic-lstm", "max_stars_repo_head_hexsha": "4900bb6fa3b4828000a18e35c534bb1b3f23dd05", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 73, "max_stars_repo_stars_event_min_datetime": "2018-10-29T12:05:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-04T10:37:12.000Z", "max_issues_repo_path": "attention_function.py", "max_issues_repo_name": "deepak-kaji/mimic-lstm", "max_issues_repo_head_hexsha": "4900bb6fa3b4828000a18e35c534bb1b3f23dd05", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-10-05T15:10:03.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-13T00:44:00.000Z", "max_forks_repo_path": "attention_function.py", "max_forks_repo_name": "deepak-kaji/mimic-lstm", "max_forks_repo_head_hexsha": "4900bb6fa3b4828000a18e35c534bb1b3f23dd05", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2018-11-05T17:39:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-01T09:12:26.000Z", "avg_line_length": 35.2083333333, "max_line_length": 107, "alphanum_fraction": 0.685443787, "include": true, "reason": "import numpy", "num_tokens": 999}
|
// Copyright Carl Philipp Reh 2009 - 2016.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <fcppt/make_int_range_count.hpp>
#include <fcppt/tag_type.hpp>
#include <fcppt/use.hpp>
#include <fcppt/algorithm/loop.hpp>
#include <fcppt/algorithm/loop_break_mpl.hpp>
#include <fcppt/preprocessor/disable_gcc_warning.hpp>
#include <fcppt/preprocessor/pop_warning.hpp>
#include <fcppt/preprocessor/push_warning.hpp>
#include <fcppt/config/external_begin.hpp>
#include <boost/mpl/range_c.hpp>
#include <boost/test/unit_test.hpp>
#include <fcppt/config/external_end.hpp>
FCPPT_PP_PUSH_WARNING
FCPPT_PP_DISABLE_GCC_WARNING(-Weffc++)
BOOST_AUTO_TEST_CASE(
algorithm_loop_mpl
)
{
FCPPT_PP_POP_WARNING
int value{
0
};
fcppt::algorithm::loop(
boost::mpl::range_c<
int,
0,
5
>{},
[
&value
](
auto const _index
)
{
FCPPT_USE(
_index
);
typedef
fcppt::tag_type<
decltype(
_index
)
>
index;
static_assert(
index::value
<
5,
""
);
value +=
index::value;
}
);
BOOST_CHECK_EQUAL(
value,
10
);
}
FCPPT_PP_PUSH_WARNING
FCPPT_PP_DISABLE_GCC_WARNING(-Weffc++)
BOOST_AUTO_TEST_CASE(
algorithm_loop_range
)
{
FCPPT_PP_POP_WARNING
int value{
0
};
fcppt::algorithm::loop(
fcppt::make_int_range_count(
5
),
[
&value
](
int const _value
)
{
value +=
_value;
}
);
BOOST_CHECK_EQUAL(
value,
10
);
}
|
{"hexsha": "58c95c677fea2c14535fce1b62b52caaa2de8e91", "size": 1568, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/algorithm/loop.cpp", "max_stars_repo_name": "vinzenz/fcppt", "max_stars_repo_head_hexsha": "3f8cc5babdee178a9bbd06ca3ce7ad405d19aa6a", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/algorithm/loop.cpp", "max_issues_repo_name": "vinzenz/fcppt", "max_issues_repo_head_hexsha": "3f8cc5babdee178a9bbd06ca3ce7ad405d19aa6a", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/algorithm/loop.cpp", "max_forks_repo_name": "vinzenz/fcppt", "max_forks_repo_head_hexsha": "3f8cc5babdee178a9bbd06ca3ce7ad405d19aa6a", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.3853211009, "max_line_length": 61, "alphanum_fraction": 0.6709183673, "num_tokens": 505}
|
C @(#)getyeq.f 20.3 2/13/96
subroutine getyeq(k1, k2, id, ksect, yeq, y1, yxy, y2)
C compute the following 2-port Y-matrices:
C YEQ - Equivalent parallel 2-port
C Y1 - 2-port left of section KSECT
C YXY - 2-port for section KSECT
C Y2 - 2-port right of section KSECT
include 'ipfinc/parametr.inc'
include 'ipfinc/blank.inc'
c Global variables used:
c None
include 'ipfinc/branch.inc'
c Global variables used:
c brtype, ky, brsect, brnch_nxt, brid
include 'ipfinc/bus.inc'
c Global variables used:
c None
include 'ipfinc/lfiles.inc'
c Global variables used:
c None
include 'ipfinc/prt.inc'
c Global variables used:
c None
character id*1, nxid*1
integer count1, count2, first1, first2, first, ptr, find_br, sect
complex yeq(2, 2), y1(2, 2), y2(2, 2), yxy(2, 2)
double complex yscr(3, 3), y(2,2), yxy_temp(2,2)
logical found, finished
count1 = 0
count2 = 0
found = .false.
first1 = 0
last1 = 0
last2 = 0
first2 = 0
match = 0
ptr = find_br(k1, k2, id, sect, 0)
if (ptr .gt. 0 .and.
& (brtype(ptr) .eq. 4 .or. brtype(ptr) .eq. 9))
& ptr = brnch_nxt(ptr)
do while (ptr .gt. 0 .and.
& (ky(ptr) .eq. k2 .and. brid(ptr) .eq. id))
if (brsect(ptr) .eq. 0) then
c
c PIEQIV is expecting a double complex. Use a temporary
c variable so we don't have to change all the code in
c routines that call this routine.
c
if (ksect .eq. brsect(ptr)) then
call pieqiv(ptr, yxy_temp, ierr)
do 70 i = 1, 2
do 60 j = 1, 2
yxy(i,j) = cmplx(yxy_temp(i,j))
60 continue
70 continue
endif
if (brtype(ptr) .eq. 1) goto 100
endif
if (ksect .eq. brsect(ptr)) then
match = ptr
c
c PIEQIV is expecting a double complex. Use a temporary
c variable so we don't have to change all the code in
c routines that call this routine.
c
call pieqiv(ptr, yxy_temp, ierr)
do 90 i = 1, 2
do 80 j = 1, 2
yxy(i,j) = cmplx(yxy_temp(i,j))
80 continue
90 continue
found = .true.
elseif (found) then
count2 = count2 + 1
if (first2 .eq. 0) first2 = ptr
last2 = ptr
else
count1 = count1 + 1
if (first1 .eq. 0) first1 = ptr
last1 = ptr
endif
100 continue
ptr = brnch_nxt(ptr)
enddo
C The equivalent pi admittance for a branch with sections is not
C available; also, it cannot be computed using conventional calls
C to FIRSEC, NEXSEC, and FINSEC since that would jeopardize the
C data being stored for the same branch!
C EQVFIR, EQVNEX, and EQVFIN are entry points in EQVSEC. It
C is a sharable image. All data including the equivalent Y-matri
C is stored in the calling program.
nsect = 0
first = first1
if (first .eq. 0) first = match
last = last2
if (last .eq. 0) last = match
if (last .eq. 0) last = last1
ptr = first
finished = .false.
do while (ptr .ne. last .and. .not. finished)
if (brtype(ptr) .ne. 9) then
call pieqiv(ptr, y, ierr)
nsect = nsect + 1
if (nsect .eq. 1) then
call eqvfird(y, yscr)
else
call eqvnexd(y, yscr)
endif
endif
if (ptr .eq. last) then
finished = .false.
else
ptr = brnch_nxt(ptr)
endif
enddo
call eqvfin(yeq, yscr)
C Step 1. YEQ is now completed. Get sections to left of KSECT.
nsect = 0
if (count1 .gt. 0) then
finished = .false.
ptr = first1
do while (ptr .ne. last1 .and. .not. finished)
if (brtype(ptr) .ne. 9) then
call pieqiv(ptr, y, ierr)
nsect = nsect + 1
if (nsect .eq. 1) then
call eqvfird(y, yscr)
else
call eqvnexd(y, yscr)
endif
endif
if (ptr .eq. last1) then
finished = .false.
else
ptr = brnch_nxt(ptr)
endif
enddo
call eqvfin(y1, yscr)
else
do l = 1, 2
do k = 1, 2
y1(k, l) = cmplx(0.0, 0.0)
enddo
enddo
endif
C Step 2. Y1 is now completed. Get sections to right of KSECT
nsect = 0
if (count2 .gt. 0) then
finished = .false.
ptr = first2
do while (ptr .ne. last2 .and. .not. finished)
if (brtype(ptr) .ne. 9) then
call pieqiv(ptr, y, ierr)
nsect = nsect + 1
if (nsect .eq. 1) then
call eqvfird(y, yscr)
else
call eqvnexd(y, yscr)
endif
endif
if (ptr .eq. last2) then
finished = .false.
else
ptr = brnch_nxt(ptr)
endif
enddo
call eqvfin(y2, yscr)
else
do l = 1, 2
do k = 1, 2
y2(k, l) = cmplx(0.0, 0.0)
C Y2 is now completed.
enddo
enddo
endif
return
end
|
{"hexsha": "5d45a9ed0f150ba5374462137df56eb17960721c", "size": 5246, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "ipf/getyeq.f", "max_stars_repo_name": "mbheinen/bpa-ipf-tsp", "max_stars_repo_head_hexsha": "bf07dd456bb7d40046c37f06bcd36b7207fa6d90", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2020-04-02T15:34:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T08:57:45.000Z", "max_issues_repo_path": "ipf/getyeq.f", "max_issues_repo_name": "cuihantao/bpa-ipf-tsp", "max_issues_repo_head_hexsha": "cb2d0917ae42eff571017e9162f550f87900b83f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-02-08T14:21:23.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-13T01:27:56.000Z", "max_forks_repo_path": "ipf/getyeq.f", "max_forks_repo_name": "mbheinen/bpa-ipf-tsp", "max_forks_repo_head_hexsha": "bf07dd456bb7d40046c37f06bcd36b7207fa6d90", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2020-02-03T04:26:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T15:04:31.000Z", "avg_line_length": 26.6294416244, "max_line_length": 71, "alphanum_fraction": 0.5259245139, "num_tokens": 1732}
|
[STATEMENT]
lemma redT_updLns_iff [simp]:
"\<And>ln. redT_updLns ls t ln las $ l = upd_threadRs (ln $ l) (ls $ l) t (las $ l)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>ln. redT_updLns ls t ln las $ l = upd_threadRs (ln $ l) (ls $ l) t (las $ l)
[PROOF STEP]
by(simp add: redT_updLns_def)
|
{"llama_tokens": 150, "file": "JinjaThreads_Framework_FWLockingThread", "length": 1}
|
#!conda install -c anaconda seaborn -y
#conda install -c anaconda nltk
import re
import string
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
# importing data set into dataframes with two columns: Text and Class
testData = pd.read_csv("/home/jovyan/binder/test.csv", names=["Review", "Class"], delimiter=",", header=None)
trainData = pd.read_csv("/home/jovyan/binder/train.csv", names=["Review", "Class"], delimiter=",", header=None)
valData = pd.read_csv("/home/jovyan/binder/val.csv", names=["Review", "Class"], delimiter=",", header=None)
# Count of samples in each data set
print(testData.head())
print("")
print(trainData.head())
print("")
print(valData.head())
print("")
print("Test Samples per class: {}".format(np.bincount(testData.Class)))
print("Train Samples per class: {}".format(np.bincount(trainData.Class)))
print("Val Samples per class: {}".format(np.bincount(valData.Class)))
# function used for text cleaning of input data
def clean(df):
corpus = list() # define empty list for corpus
lines = df["Review"].values.tolist() # apply text values from "Review" column to the data frame
for text in lines:
text = text.lower()
text = re.sub(r"[,.\"!$%^&*(){}?/;`~:<>+=-]", "", text) # regexp used to remove all special characters
tokens = word_tokenize(text) # splitting text
table = str.maketrans('', '', string.punctuation)
stripped = [w.translate(table) for w in tokens]
words = [word for word in stripped if word.isalpha()]
stop_words = set(stopwords.words("english"))
stop_words.discard("not")
words = ' '.join(words) # joining tokenize words together
corpus.append(words) # amends cleaned text to corpus
return corpus
# applying clean function to data sets
clTest = clean(testData)
clTrain = clean(trainData)
clVal = clean(valData)
# loading TF-IDF class for feature extraction
from sklearn.feature_extraction.text import TfidfVectorizer
TF = TfidfVectorizer(min_df=15, ngram_range=(1,2))
xTrain = TF.fit_transform(clTrain).toarray()
yTrain = trainData[['Class']].values
xTest = TF.transform(clTest).toarray()
yTest = testData[['Class']].values
xVal = TF.transform(clVal).toarray()
yVal = valData[['Class']].values
# loading Multinomial Naive Bayes model for text classification
from sklearn.naive_bayes import MultinomialNB
mNB = MultinomialNB()
mNB.fit(xTrain, np.ravel(yTrain))
y_pred_ts = mNB.predict(xTest)
y_pred_tr = mNB.predict(xTrain)
y_pred_va = mNB.predict(xVal)
# sklearn metrics used to evaluate perfomance (Accuracy) of ML model on test and val datasets and plot confusion matrix
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
tsaccuracy = accuracy_score(yTest, y_pred_ts)
tCM = confusion_matrix(yTest, y_pred_ts)
tClasses = np.unique(yTest)
print("Test Set Accuracy:", round(tsaccuracy,2))
print("")
print("Test Set Metrics:\n{}".format(classification_report(yTest, y_pred_ts)))
print("")
# Plot confusion matrix
fig, ax = plt.subplots()
sns.heatmap(tCM, annot=True, fmt='d', ax=ax, cmap=plt.cm.Blues,
cbar=False)
ax.set(xlabel="Pred", ylabel="True", xticklabels=tClasses,
yticklabels=tClasses, title="Confusion matrix")
plt.yticks(rotation=0)
vlaccuracy = accuracy_score(yVal, y_pred_va)
vCM = confusion_matrix(yVal, y_pred_va)
vClasses = np.unique(yVal)
print("Val Set Accuracy:", round(vlaccuracy,2))
print("")
print("Val Set Metrics:\n{}".format(classification_report(yVal, y_pred_va)))
print("")
# Plot confusion matrix
fig, ax = plt.subplots()
sns.heatmap(vCM, annot=True, fmt='d', ax=ax, cmap=plt.cm.Blues,
cbar=False)
ax.set(xlabel="Pred", ylabel="True", xticklabels=vClasses,
yticklabels=vClasses, title="Confusion matrix")
plt.yticks(rotation=0)
|
{"hexsha": "ccb4c96312e9d701743669f23a16682c6f2e7454", "size": 4030, "ext": "py", "lang": "Python", "max_stars_repo_path": "NLP Model Code.py", "max_stars_repo_name": "keenanbernard/Data", "max_stars_repo_head_hexsha": "ae3460f02ac913e5482c7b3e63d8760d5d41dbfc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "NLP Model Code.py", "max_issues_repo_name": "keenanbernard/Data", "max_issues_repo_head_hexsha": "ae3460f02ac913e5482c7b3e63d8760d5d41dbfc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "NLP Model Code.py", "max_forks_repo_name": "keenanbernard/Data", "max_forks_repo_head_hexsha": "ae3460f02ac913e5482c7b3e63d8760d5d41dbfc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9821428571, "max_line_length": 120, "alphanum_fraction": 0.6970223325, "include": true, "reason": "import numpy", "num_tokens": 1000}
|
from amfe.io import AmfeMeshConverter, GidJsonMeshReader
from amfe.tools import amfe_dir
from amfe.material import KirchhoffMaterial
from amfe.component import StructuralComponent
from amfe.neumann import FixedDirectionNeumann
import logging
import numpy as np
from amfe.mesh import Mesh
# Units:
# Length: mm
# Mass: g
# Time: s
#
# Derived Units:
# Force: g mm s-2 = µN
# Stiffness: g s-2 mm-1 = Pa
# velocity: mm/s
# acceleration: mm/s^2
# density: g/mm3
E_alu = 70e6
nu_alu = 0.34
rho_alu = 2.7e-3
logging.basicConfig(level=logging.DEBUG)
input_file = amfe_dir('meshes/amfe_json/simple_beam/simple_beam.json')
my_mesh = GidJsonMeshReader(input_file, AmfeMeshConverter()).parse()
my_material = KirchhoffMaterial(E_alu, nu_alu, rho_alu, thickness=10)
my_component = StructuralComponent(my_mesh)
my_component.assign_material(my_material, 'Quad8', 'S', 'shape')
my_neumann = FixedDirectionNeumann(np.array([0, 1]), time_func = lambda t: 2)
my_component.assign_neumann('Neumann0', my_neumann, ['right_boundary'], '_groups')
my_constraint = my_component.constraints.create_dirichlet_constraint()
fixed_nodeids = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=int)
my_component.assign_constraint('Dirichlet0', my_constraint, fixed_nodeids, '_nodeids', 'elim')
print('END')
|
{"hexsha": "71bdf5847ae2da927b7801f8a81f4d9cce6f30bb", "size": 1305, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/simple_beam/simple_beam.py", "max_stars_repo_name": "ma-kast/AMfe", "max_stars_repo_head_hexsha": "99686cc313fb8904a093fb42e6cf0b38f8cfd791", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/simple_beam/simple_beam.py", "max_issues_repo_name": "ma-kast/AMfe", "max_issues_repo_head_hexsha": "99686cc313fb8904a093fb42e6cf0b38f8cfd791", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/simple_beam/simple_beam.py", "max_forks_repo_name": "ma-kast/AMfe", "max_forks_repo_head_hexsha": "99686cc313fb8904a093fb42e6cf0b38f8cfd791", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1875, "max_line_length": 94, "alphanum_fraction": 0.7540229885, "include": true, "reason": "import numpy", "num_tokens": 397}
|
import time
import numpy as np
import pandas as pd
import pickle
import more_itertools as mit
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import pandas as pd
from itertools import combinations
from scipy import stats
# from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
import statsmodels.api as sm
import scipy.stats
import os
import sys
from skimage import io
import utils.general_utils as general_utils
import utils.plot_utils as plot_utils
import utils.list_twoP_exp as list_twoP_exp
import utils.list_innervation_MCFO as list_innervation_MCFO
import utils.sync_utils as sync_utils
import utils.math_utils as math_utils
# x=[0,1,0,0,0,0,0,2,0]
# y=[0,2,0,0,0,0,0,1,0]
# slope_ss, intercept_ss, Corr_coef_ss, p_value_ss, _ = math_utils.linear_regress(x,y)
# print('slope_ss, intercept_ss, Corr_coef_ss, p_value_ss', slope_ss, intercept_ss, Corr_coef_ss, p_value_ss)
# sys.exit(0)
assign_ROI_for_comparison={}
# assign_ROI_for_comparison.update({'SS29579':[0,1,2,3,4,5]})
assign_ROI_for_comparison.update({'SS29579':[0,1,2,3]})
assign_ROI_for_comparison.update({'SS49172':[0,1,2]})
assign_ROI_for_comparison.update({'SS31480':[0,2]})
assign_ROI_for_comparison.update({'SS34574':[0,1]})
assign_ROI_for_comparison.update({'SS29893':[2,3]})
assign_ROI_for_comparison.update({'SS51046':[0,1]})
assign_ROI_for_comparison.update({'SS42740':[0,1]})
assign_ROI_for_comparison.update({'R70H06':[0,1]})
assign_ROI_for_comparison.update({'SS25469':[0,1]})
# assign_ROI_for_comparison.update({'SS27485':[0,1,2,3]})
assign_ROI_for_comparison.update({'SS27485':[0,2]})
assign_ROI_for_comparison.update({'SS36131':[0,1]})
assign_ROI_for_comparison.update({'SS38624':[0,1,2,3]})
assign_ROI_for_comparison.update({'SS38592':[0,1,2,5]})
assign_ROI_for_comparison.update({'SS41822':[0,1]})
assign_ROI_for_comparison.update({'SS43652':[0,2,3,4]})
assign_ROI_for_comparison.update({'SS31232':[0,1]})
assign_ROI_for_comparison.update({'SS30303':[0,1]})
assign_ROI_for_comparison.update({'SS25451':[2,4]})
assign_ROI_for_comparison.update({'SS42749':[0,1]})
assign_ROI_for_comparison.update({'SS44270':[0,1]})
assign_ROI_for_comparison.update({'SS41605':[0,1]})
assign_ROI_for_comparison.update({'SS36112':[0,1]})
assign_ROI_for_comparison.update({'SS41806':[0,1]})
assign_ROI_for_comparison.update({'SS51029':[0,1,2,3]})
assign_ROI_for_comparison.update({'SS31456':[0,1]})
assign_ROI_for_comparison.update({'SS38631':[0,1]})
assign_ROI_for_comparison.update({'SS51017':[0,1]})
assign_ROI_for_comparison.update({'SS51021':[0,1]})
assign_ROI_for_comparison.update({'SS29633':[0,1,2]})
assign_ROI_for_comparison.update({'SS41815':[0,1]})
assign_ROI_for_comparison.update({'SS40134':[0,1]})
manual_ROI_order=[
'SS25451',
'SS30303',
'SS31232',
'R70H06',
'SS42740',
'SS38624',
'SS41605',
'SS41822',
'SS41806',
'SS29579',
'SS43652',
'SS44270',
'SS38592',
'SS38631',
'SS40134',
'SS41815',
'SS51017',
'SS51029',
'SS49172',
'SS51046',
'SS29893',
'SS34574',
'SS31456',
'SS29633',
'SS36112',
'SS36131',
'SS25469',
'SS27485',
'SS31480',
'SS42749',
'SS51021',
]
experiments=list_twoP_exp.TwoP_recordings_sparseLine_list
VNC_neurites_filelist=list_innervation_MCFO.VNC_neurites_filelist
mcfo_Gal4name_list=[]
for traced_file_dir in VNC_neurites_filelist:
if len(traced_file_dir)>9:
Gal4_name = traced_file_dir.split('/')[7][4:]
else:
Gal4_name=traced_file_dir
mcfo_Gal4name_list.append(Gal4_name)
print('mcfo_Gal4name_list', mcfo_Gal4name_list)
def make_new_pair_ID_list(old_id_list):
# print('old_id_list', old_id_list)
new_id_list=[]
for i, roi_pair_id in enumerate(old_id_list):
pair_roi=roi_pair_id.split(' ')[0]+' '+roi_pair_id.split(' ')[1]+'\n'+'-'+'\n'+roi_pair_id.split(' ')[2]
# print('pair_roi', pair_roi)
new_id_list.append(pair_roi)
return new_id_list
def sorting_roiID_correspondingMat_based_on_an_order(roi_id_list, corrspd_list, base_of_order_list, rename_ID_into_ROI=True):
print('roi_id_list', roi_id_list)
if rename_ID_into_ROI==True:
ressembled_name_for_baseOrder_list=[]
for i, id_name in enumerate(base_of_order_list):
ressembled_name=id_name.split(' ')[0]+'-ROI#'+id_name.split(' ')[1]
ressembled_name_for_baseOrder_list.append(ressembled_name)
else:
ressembled_name_for_baseOrder_list=base_of_order_list
numbers_for_old_order=[]
for i, id_name in enumerate(roi_id_list):
id_name_Gal4=id_name.split(' ')[0]
print(id_name_Gal4)
if id_name_Gal4 in ressembled_name_for_baseOrder_list:
numbers_for_old_order.append(ressembled_name_for_baseOrder_list.index(id_name_Gal4))
else:
print(id_name_Gal4, 'not in ressembled_name_for_baseOrder_list')
print('Please recheck if base order list has all the ID of roi_id_list!!')
sys.exit(0)
# print('numbers_for_old_order', numbers_for_old_order)
# print('len roi_id_list', len(roi_id_list))
# print('len numbers_for_old_order', len(numbers_for_old_order))
zipped_id_lists = zip(numbers_for_old_order, roi_id_list)
# print('zipped_id_lists', zipped_id_lists)
sorted_zipped_id_lists = sorted(zipped_id_lists)
sorted_new_roi_id_list = [element for _, element in sorted_zipped_id_lists]
# print('sorted_zipped_id_lists', sorted_zipped_id_lists)
zipped_corrspd_lists = zip(numbers_for_old_order, corrspd_list)
sorted_zipped_corrspd_lists = sorted(zipped_corrspd_lists)
sorted_new_corrspd_list = [element for _, element in sorted_zipped_corrspd_lists]
# print('sorted_new_roi_id_list', sorted_new_roi_id_list)
return sorted_new_roi_id_list, sorted_new_corrspd_list
def activity_symmetricity_check_betwn_ROIpair(trace_set, ROI_pair):
trace1=trace_set[ROI_pair[0]]
trace2=trace_set[ROI_pair[1]]
trace1=math_utils.norm_to_max(trace_set[ROI_pair[0]], percentile_th_to_norm=100)
trace2=math_utils.norm_to_max(trace_set[ROI_pair[1]], percentile_th_to_norm=100)
slope_ss, intercept_ss, Corr_coef_ss, p_value_ss, _ = math_utils.linear_regress(trace1,trace2)
Corr_coef_ss_pear=scipy.stats.pearsonr(trace1, trace2)
model = sm.OLS(trace2, trace1).fit()
model_summary = model.summary()
r2_sm=model.rsquared
p_value_sm=model.pvalues[0]
slope_sm=model.params[0]
predictions = model.predict(trace1)
r2_sk=r2_score(trace2, predictions)
r2=r2_sm
p_value=p_value_sm
slope=slope_sm
Corr_coef=Corr_coef_ss
print('r2_sm', r2_sm)
print('r2_sk', r2_sk)
print('Corr_coef_ss_pear', Corr_coef_ss_pear)
print('Corr_coef_ss', Corr_coef_ss)
print('slope', slope)
print('intercept_ss', intercept_ss)
print('Corr_coef', Corr_coef)
print('p_value', p_value)
return r2, slope, Corr_coef, p_value
def plot_scatter(data1, data2, x_y_name=['x', 'y'], datapoint_labels=[], savedir=None, filename=None):
slope_ss, intercept_ss, Corr_coef_ss, p_value_ss, _ = math_utils.linear_regress(data1,data2)
data1_intcp = sm.add_constant(data1)
model = sm.OLS(data2, data1_intcp).fit()
model_summary = model.summary()
r2_sm=model.rsquared
slope_sm=model.params[0]
p_value_sm=model.pvalues[0]
predictions = model.predict(data1_intcp)
r2_sk=r2_score(data2, predictions)
print('model_summary', model_summary)
print('model.params', model.params)
print('model.pvalues', model.pvalues)
print('p_value_ss', p_value_ss)
print('slope_ss', slope_ss)
r2=r2_sm
slope=slope_sm
p_value=p_value_sm
slope=slope_sm
intercept=intercept_ss
Corr_coef=Corr_coef_ss
print('r2_sm', r2_sm)
print('r2_sk', r2_sk)
print('slope', slope)
print('intercept', intercept)
print('Corr_coef', Corr_coef)
print('p_value', p_value)
interested_ROI_to_label=[
'SS29579',
'SS34574',
'SS51046',
'SS25469',
'SS42740',
'SS27485',
'SS31232',
'SS36112',
# 'SS51021',
# 'SS42749',
# 'SS41822',
# # 'SS38592',
# 'SS36131',
# 'SS29633',
]
fig = plt.figure(facecolor='white', figsize=(5,5), dpi=300)
fig.suptitle(filename, color='k')
fig.subplots_adjust(wspace = 0.01, hspace=0.01, left=0.17, right = 0.83, bottom = 0.17 , top = 0.83)
plt.scatter(data1, data2, s=7, facecolors='none', edgecolors='gray')
plt.plot(data1, predictions, color='k')
plt.text(int(np.nanmin(data1)), int(0.80*max(data2)), 'Slope = '+str(round(slope, 2))+'\nR^2 = '+str(round(r2, 2))+'\nP value = '+str(round(p_value, 2)))
plt.xlim(0,1)
plt.ylim(0,1)
plt.xlabel(x_y_name[0])
plt.ylabel(x_y_name[1])
for i, txt in enumerate(datapoint_labels):
if txt[:-4] in interested_ROI_to_label:
plt.annotate(txt, (data1[i], data2[i]))
plt.scatter(data1[i], data2[i], s=7, facecolors='r', edgecolors='r')
plt.savefig(savedir+filename+'.pdf')
plt.savefig(savedir+filename+'.png')
plt.clf()
plt.close(fig)
return
##main##
NAS_Dir=general_utils.NAS_Dir
NAS_AN_Proj_Dir=general_utils.NAS_AN_Proj_Dir
JRC_MCFO_dir=NAS_AN_Proj_Dir + '04_mcfo_traced_singleAN_exp/'
VNC_dir= JRC_MCFO_dir+'VNC/'
T1_R_vnc_name='T1_R_VNC.nrrd'
T2_R_vnc_name='T2_R_VNC.nrrd'
T3_R_vnc_name='T3_R_VNC.nrrd'
T1_L_vnc_name='T1_L_VNC.nrrd'
T2_L_vnc_name='T2_L_VNC.nrrd'
T3_L_vnc_name='T3_L_VNC.nrrd'
T1_R_vnc_mask = general_utils.read_nrrd(VNC_dir, T1_R_vnc_name)
T2_R_vnc_mask = general_utils.read_nrrd(VNC_dir, T2_R_vnc_name)
T3_R_vnc_mask = general_utils.read_nrrd(VNC_dir, T3_R_vnc_name)
T1_L_vnc_mask = general_utils.read_nrrd(VNC_dir, T1_L_vnc_name)
T2_L_vnc_mask = general_utils.read_nrrd(VNC_dir, T2_L_vnc_name)
T3_L_vnc_mask = general_utils.read_nrrd(VNC_dir, T3_L_vnc_name)
T1_R_vnc_mask=np.transpose(T1_R_vnc_mask, (2,1,0))
T2_R_vnc_mask=np.transpose(T2_R_vnc_mask, (2,1,0))
T3_R_vnc_mask=np.transpose(T3_R_vnc_mask, (2,1,0))
T1_L_vnc_mask=np.transpose(T1_L_vnc_mask, (2,1,0))
T2_L_vnc_mask=np.transpose(T2_L_vnc_mask, (2,1,0))
T3_L_vnc_mask=np.transpose(T3_L_vnc_mask, (2,1,0))
nrrd_T1T2T3_R_mask_stacks=[
T1_R_vnc_mask,
T2_R_vnc_mask,
T3_R_vnc_mask
]
nrrd_T1T2T3_L_mask_stacks=[
T1_L_vnc_mask,
T2_L_vnc_mask,
T3_L_vnc_mask
]
Visual_check_line_T1T2T3={}
Visual_check_line_T1T2T3.update({'SS46233':['T1', 'T2', 'T3']})
Visual_check_line_T1T2T3.update({'R85A11':[]})
Visual_check_line_T1T2T3.update({'SS42008':['T1', 'T2', 'T3']})
Visual_check_line_T1T2T3.update({'SS40619':[]})
# Visual_check_line_T1T2T3.update({'SS29893':['catch as SS34574']})
Visual_check_line_T1T2T3.update({'SS29621':[]})
Visual_check_line_T1T2T3.update({'SS28596':['T1']})
Visual_check_line_T1T2T3.update({'SS51038':[]})
# Visual_check_line_T1T2T3.update({'R70H06':['catch as SS42740']})
# Visual_check_line_T1T2T3.update({'SS41605':['catch as SS44270']})
Visual_check_line_T1T2T3.update({'SS31219':['T1', 'T2', 'T3']}) #each neuron per leg neuromere
Visual_check_line_T1T2T3.update({'MAN':['T2', 'T3']})
Visual_check_line_T1T2T3.update({'SS36118':[]})
Visual_check_line_T1T2T3.update({'SS40489':['T1', 'T2', 'T3']}) #each neuron per leg neuromere
Visual_check_line_T1T2T3.update({'SS45363':[]})
Visual_check_line_T1T2T3.update({'SS45605':['T1', 'T2', 'T3']})
Visual_check_line_T1T2T3.update({'SS52147':[]})
Visual_check_line_T1T2T3.update({'R36G04':['T1', 'T2', 'T3']})
Visual_check_line_T1T2T3.update({'R30A08':['T1', 'T2', 'T3']})
Visual_check_line_T1T2T3.update({'R39G01':['T1', 'T2', 'T3']})
Visual_check_line_T1T2T3.update({'R69H10':[]})
Visual_check_line_T1T2T3.update({'R87H02':['T3']})
Visual_check_T1T2T3_list=[]
for i, v in Visual_check_line_T1T2T3.items():
Visual_check_T1T2T3_list.append(i)
ROI_id_list=[]
lateralization_value_allROI=[]
r2_symm_list_allROI=[]
slope_symm_list_allROI=[]
Corr_coef_symm_list_allROI=[]
p_value_symm_list_allROI=[]
unilateralized_ratio_list_allROI=[]
bilateralized_ratio_list_allROI=[]
experiments_group_per_fly=general_utils.group_expList_per_fly(experiments)
# print('experiments_group_per_fly', experiments_group_per_fly)
for exp_lists_per_fly in experiments_group_per_fly:
# print('exp_lists_per_fly', exp_lists_per_fly)
gapFree_GC_fly=[]
Gal4=exp_lists_per_fly[0][1].split('-')[0]
print('Gal4', Gal4)
if not Gal4 in Visual_check_T1T2T3_list:
for date, genotype, fly, recrd_num in exp_lists_per_fly:
fly_beh=fly[0].upper()+fly[1:]
flyDir = NAS_AN_Proj_Dir +'03_general_2P_exp/'+ Gal4 +'/2P/'+ date+'/'+genotype+'-'+fly+'/'
outDir_AN_recrd=flyDir+genotype+'-'+fly+'-'+recrd_num+'/output'
outDirGC6_axoid = outDir_AN_recrd + '/GC6_auto/final/'
GC_set_temp = general_utils.readGCfile(outDirGC6_axoid)
if np.isnan(GC_set_temp).any():
print('Replacing NaN with interpolaration')
GC_set=sync_utils.replace_nan_with_interp(GC_set_temp)
else:
print('No NaN detected. Not replacing Na')
GC_set=GC_set_temp
GC_raw_datafreq=len(GC_set[0])/248 #s
for i, GC_trace in enumerate(GC_set):
if len(gapFree_GC_fly)!=len(GC_set):
gapFree_GC_fly.append([])
GC_trace=math_utils.smooth_data(GC_trace, windowlen=int(GC_raw_datafreq*0.9))
# GC_trace=math_utils.norm_to_max(GC_trace, percentile_th_to_norm=100)
gapFree_GC_fly[i].extend(GC_trace)
target_ROIs=assign_ROI_for_comparison[Gal4]
combinations_ROIs = list(combinations(target_ROIs, 2))
if Gal4=='SS38592':
combinations_ROIs=[(0,5),(1,5),(2,5)]
if Gal4=='SS38624':
combinations_ROIs=[(0,2),(0,3),(1,2),(1,3)]
if Gal4=='SS43652':
combinations_ROIs=[(0,3),(0,4),(2,3),(2,4)]
if Gal4=='SS51029':
combinations_ROIs=[(0,2),(0,3),(1,2),(1,3)]
if Gal4=='SS49172':
combinations_ROIs=[(0,1),(0,2)]
if Gal4=='SS29579':
combinations_ROIs=[(0,2),(0,3),(1,2),(1,3)]
# combinations_ROIs=[(0,3)]
print('combinations_ROIs', combinations_ROIs)
for i, roi_pair in enumerate(combinations_ROIs):
#the fitting value is also depndent on the portion of baseline datapoints. Better balance the activated vs baseline amount of datapoint.
r2_symm, slope_symm, Corr_coef_symm, p_value_symm=activity_symmetricity_check_betwn_ROIpair(gapFree_GC_fly, roi_pair)
ROI_id_list.append(Gal4+' '+str(roi_pair[0])+' '+str(roi_pair[1]))
r2_symm_list_allROI.append(r2_symm)
slope_symm_list_allROI.append(slope_symm)
Corr_coef_symm_list_allROI.append(Corr_coef_symm)
p_value_symm_list_allROI.append(p_value_symm)
## Process analyzing lateralization of VNC innervation
R_intsct_px_list=[]
L_intsct_px_list=[]
##Substitute redundant Gal4 line with those have available MCFO image
if Gal4=='SS41605':
Gal4='SS44270'
if Gal4=='SS29893':
Gal4='SS34574'
if Gal4=='R70H06':
Gal4='SS42740'
Gal4_idx_in_mcfp_list=mcfo_Gal4name_list.index(Gal4)
traced_file_dir=VNC_neurites_filelist[Gal4_idx_in_mcfp_list]
traced_neurites_stack = np.asarray(io.imread(traced_file_dir))
traced_neurites_stack[traced_neurites_stack>0]=1
traced_neurite_px_count=np.count_nonzero(traced_neurites_stack>0)
for i, vnc_r_mask in enumerate(nrrd_T1T2T3_R_mask_stacks):
r_intrsct_stacks = traced_neurites_stack*vnc_r_mask
r_intrsct_px_count = np.count_nonzero(r_intrsct_stacks>0)
R_intsct_px_list.append(r_intrsct_px_count)
for i, vnc_l_mask in enumerate(nrrd_T1T2T3_L_mask_stacks):
l_intrsct_stacks = traced_neurites_stack*vnc_l_mask
l_intrsct_px_count = np.count_nonzero(l_intrsct_stacks>0)
L_intsct_px_list.append(l_intrsct_px_count)
T1_diff_px_RL=abs(R_intsct_px_list[0]-L_intsct_px_list[0])
T2_diff_px_RL=abs(R_intsct_px_list[1]-L_intsct_px_list[1])
T3_diff_px_RL=abs(R_intsct_px_list[2]-L_intsct_px_list[2])
unilateralized_ratio=(T1_diff_px_RL+T2_diff_px_RL+T3_diff_px_RL)/traced_neurite_px_count
bilateralized_ratio=1-unilateralized_ratio
else:
continue
# repeat appending the values till it match the amounts of ROI pairs in dFF data
for i in range(0, len(combinations_ROIs)):
unilateralized_ratio_list_allROI.append(unilateralized_ratio)
bilateralized_ratio_list_allROI.append(bilateralized_ratio)
print('ROI_id_list', ROI_id_list)
new_ROI_id_list=make_new_pair_ID_list(ROI_id_list)
print('new_ROI_id_list', new_ROI_id_list)
GCsymm_morphoLterl_summary = NAS_AN_Proj_Dir + 'output/FigS8-morphoSymmetry-activitySymmetry/plots/'
if not os.path.exists(GCsymm_morphoLterl_summary):
os.makedirs(GCsymm_morphoLterl_summary)
reordered_ROI_ID_list, reordered_Corr_coef_symm_list=sorting_roiID_correspondingMat_based_on_an_order(new_ROI_id_list, Corr_coef_symm_list_allROI, manual_ROI_order, rename_ID_into_ROI=False)
reordered_ROI_ID_list, reordered_bilateralized_ratio_list=sorting_roiID_correspondingMat_based_on_an_order(new_ROI_id_list, bilateralized_ratio_list_allROI, manual_ROI_order, rename_ID_into_ROI=False)
# plot_utils.plot_matrix(new_ROI_id_list, ['r-squared'], r2_symm_list_allROI, second_x_list=p_value_symm_list_allROI, roi_seperation_marker=' ', savedir=GCsymm_morphoLterl_summary, title='dFF_r2_betwn_ROIpair', PlotMethod='other', unit=' ', cmap='BuPu')
# plot_utils.plot_matrix(new_ROI_id_list, ['slope'], slope_symm_list_allROI, second_x_list=p_value_symm_list_allROI, roi_seperation_marker=' ', savedir=GCsymm_morphoLterl_summary, title='dFF_slope_betwn_ROIpair', PlotMethod='other', unit=' ', cmap='BuPu')
plot_utils.plot_matrix(reordered_ROI_ID_list, ['Corr. coef.'], reordered_Corr_coef_symm_list, second_x_list=p_value_symm_list_allROI, roi_seperation_marker=' ', savedir=GCsymm_morphoLterl_summary, title='dFF_corr_coef_betwn_ROIpair', Gal4_x_list_reformat=True, PlotMethod='other', unit=' ', cmap='BuPu')
# plot_utils.plot_matrix(new_ROI_id_list, ['Unilateralized_ratio'], unilateralized_ratio_list_allROI, roi_seperation_marker=' ',savedir=GCsymm_morphoLterl_summary, title='UnilateralizationIdx_all_Gal4', PlotMethod='other', unit=' ', cmap='BuPu')
plot_utils.plot_matrix(reordered_ROI_ID_list, ['bilateralization_ratio'], reordered_bilateralized_ratio_list, roi_seperation_marker=' ',savedir=GCsymm_morphoLterl_summary, title='BilateralizationIdx_all_Gal4', PlotMethod='other', Gal4_x_list_reformat=True, unit=' ', cmap='BuPu')
# plot_scatter(unilateralized_ratio_list_allROI, Corr_coef_symm_list_allROI, x_y_name=['Unilateralization ratio', 'Corr. coef.'], datapoint_labels=ROI_id_list, savedir=GCsymm_morphoLterl_summary, filename='morphoUnilatrl-GCsymmCorr')
plot_scatter(bilateralized_ratio_list_allROI, Corr_coef_symm_list_allROI, x_y_name=['bilateralization ratio', 'Corr. coef.'], datapoint_labels=ROI_id_list, savedir=GCsymm_morphoLterl_summary, filename='morphoBilatrl-GCsymmCorr' )
# plot_scatter(bilateralized_ratio_list_allROI, slope_symm_list_allROI, x_y_name=['bilateralization ratio', 'slope'], datapoint_labels=ROI_id_list, savedir=GCsymm_morphoLterl_summary, filename='morphoBilatrl-GCsymmSlope' )
# plot_scatter(bilateralized_ratio_list_allROI, r2_symm_list_allROI, x_y_name=['bilateralization ratio', 'r-squared'], datapoint_labels=ROI_id_list, savedir=GCsymm_morphoLterl_summary, filename='morphoBilatrl-GCsymmRsqured' )
|
{"hexsha": "98c723b074c1c0a3c08bc10e1b39b09d2ef5564d", "size": 18669, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts_for_public/FigS8-morphoLateralization-activitySymmetry.py", "max_stars_repo_name": "NeLy-EPFL/Ascending_neuron_screen_analysis_pipeline", "max_stars_repo_head_hexsha": "438b9db15765bf26581ecd4b8a1f93e8a844ebbd", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts_for_public/FigS8-morphoLateralization-activitySymmetry.py", "max_issues_repo_name": "NeLy-EPFL/Ascending_neuron_screen_analysis_pipeline", "max_issues_repo_head_hexsha": "438b9db15765bf26581ecd4b8a1f93e8a844ebbd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts_for_public/FigS8-morphoLateralization-activitySymmetry.py", "max_forks_repo_name": "NeLy-EPFL/Ascending_neuron_screen_analysis_pipeline", "max_forks_repo_head_hexsha": "438b9db15765bf26581ecd4b8a1f93e8a844ebbd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.4055374593, "max_line_length": 303, "alphanum_fraction": 0.7745460389, "include": true, "reason": "import numpy,import scipy,from scipy,import statsmodels", "num_tokens": 6044}
|
import numpy as np
def np_sma(data):
return np.sum(data) / len(data)
|
{"hexsha": "ed8d69fe4ea110682181005edf32caa06186b1bf", "size": 74, "ext": "py", "lang": "Python", "max_stars_repo_path": "indicators/sma.py", "max_stars_repo_name": "Tiqur/live-crypto-alerts", "max_stars_repo_head_hexsha": "860fe73c9f2a960b398d132c2eb2c3ee333aa968", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "indicators/sma.py", "max_issues_repo_name": "Tiqur/live-crypto-alerts", "max_issues_repo_head_hexsha": "860fe73c9f2a960b398d132c2eb2c3ee333aa968", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "indicators/sma.py", "max_forks_repo_name": "Tiqur/live-crypto-alerts", "max_forks_repo_head_hexsha": "860fe73c9f2a960b398d132c2eb2c3ee333aa968", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-11T09:17:25.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-11T09:17:25.000Z", "avg_line_length": 14.8, "max_line_length": 35, "alphanum_fraction": 0.6756756757, "include": true, "reason": "import numpy", "num_tokens": 21}
|
module MLib.Prelude.RelProps where
open import MLib.Prelude.FromStdlib
import Relation.Binary.Indexed as I
open FE using (cong)
import Data.Product.Relation.SigmaPropositional as OverΣ
Σ-bij : ∀ {a b c} {A : Set a} {B : A → Set b} {C : A → Set c} → (∀ x → B x ↔ C x) → Σ A B ↔ Σ A C
Σ-bij pw = record
{ to = ≡.→-to-⟶ (uncurry λ x y → x , Inverse.to (pw x) ⟨$⟩ y)
; from = ≡.→-to-⟶ (uncurry λ x y → x , Inverse.from (pw x) ⟨$⟩ y)
; inverse-of = record
{ left-inverse-of = uncurry λ x y → OverΣ.to-≡ (≡.refl , Inverse.left-inverse-of (pw x) y)
; right-inverse-of = uncurry λ x y → OverΣ.to-≡ (≡.refl , Inverse.right-inverse-of (pw x) y)
}
}
Σ-↞ : ∀ {a b c} {A : Set a} {B : A → Set b} {C : A → Set c} → (∀ x → B x ↞ C x) → Σ A B ↞ Σ A C
Σ-↞ f = record
{ to = ≡.→-to-⟶ (uncurry λ x y → x , LeftInverse.to (f x) ⟨$⟩ y)
; from = ≡.→-to-⟶ (uncurry λ x y → x , LeftInverse.from (f x) ⟨$⟩ y)
; left-inverse-of = uncurry λ x y → OverΣ.to-≡ (≡.refl , LeftInverse.left-inverse-of (f x) y)
}
Σ-↞′ :
∀ {a a′ b β} {A : Set a} {A′ : Set a′} {B-setoid : A → Setoid b β} (f : A ↞ A′)
→ LeftInverse (OverΣ.setoid B-setoid) (OverΣ.setoid (B-setoid ∘ (LeftInverse.from f ⟨$⟩_)))
Σ-↞′ {A = A} {A′} {B-setoid} f = record
{ to = record
{ _⟨$⟩_ = uncurry λ x y → LeftInverse.to f ⟨$⟩ x , ≡.subst B (≡.sym (LeftInverse.left-inverse-of f _)) y
; cong = uncurry λ {≡.refl y → ≡.refl , subst≈ _ _ (≡.sym (LeftInverse.left-inverse-of f _)) y}
}
; from = record
{ _⟨$⟩_ = uncurry λ x y → LeftInverse.from f ⟨$⟩ x , y
; cong = λ { (≡.refl , q) → ≡.refl , q }
}
; left-inverse-of = uncurry λ x y → OverΣ.symmetric sym (OverΣ.subst (≡.sym (LeftInverse.left-inverse-of f _)) refl)
}
where
module B x = Setoid (B-setoid x)
module B′ {x} = Setoid (B-setoid x)
open B using () renaming (Carrier to B)
open B′
subst≈ : ∀ {i j} (x y : B i) (p : i ≡ j) → x ≈ y → ≡.subst B p x ≈ ≡.subst B p y
subst≈ x y ≡.refl q = q
|
{"hexsha": "6209fecb1c83bcdc3ebcf0eeac237ade0ae7417b", "size": 1975, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "src/MLib/Prelude/RelProps.agda", "max_stars_repo_name": "bch29/agda-matrices", "max_stars_repo_head_hexsha": "e26ae2e0aa7721cb89865aae78625a2f3fd2b574", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/MLib/Prelude/RelProps.agda", "max_issues_repo_name": "bch29/agda-matrices", "max_issues_repo_head_hexsha": "e26ae2e0aa7721cb89865aae78625a2f3fd2b574", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/MLib/Prelude/RelProps.agda", "max_forks_repo_name": "bch29/agda-matrices", "max_forks_repo_head_hexsha": "e26ae2e0aa7721cb89865aae78625a2f3fd2b574", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.5, "max_line_length": 118, "alphanum_fraction": 0.5432911392, "num_tokens": 862}
|
@testset "Born-Mayer Unit Tests" begin
A = 1.0u"eV"
ρ = 0.25u"bohr"
σ = 0.25u"bohr"
C = 1.0u"eV*Å"
D = 1.0u"eV*Å"
rcutoff = 2.0u"Å"
species = [:Ar, :H]
p = BornMayer(A, ρ, σ, C, D, rcutoff, species)
@test p isa EmpiricalPotential{NamedTuple{(:A, :ρ, :σ, :C, :D)},NamedTuple{(:rcutoff,)}}
@test get_rcutoff(p) == austrip(rcutoff)
@test get_species(p) == (:Ar, :H)
@test get_parameters(p) == (; A=austrip(A), ρ=austrip(ρ), σ = austrip(σ), C = austrip(C), D = austrip(D))
@test set_parameters(p, (; A=2.0, ρ=3.0, σ = 4.0, C = 5.0, D = 6.0)) == BornMayer(2.0, 3.0, 4.0, 5.0, 6.0, austrip(rcutoff), species)
@test serialize_parameters(p) == [austrip(A), austrip(ρ), austrip(σ), austrip(C), austrip(D)]
@test deserialize_parameters(p, [2.0, 3.0, 4.0, 5.0, 6.0]) == BornMayer(2.0, 3.0, 4.0, 5.0, 6.0, austrip(rcutoff), species)
@test get_hyperparameters(p) == (; rcutoff=austrip(rcutoff))
@test set_hyperparameters(p, (; rcutoff=1.0)) == BornMayer(austrip(A), austrip(ρ), austrip(σ), austrip(C), austrip(D), 1.0, species)
@test serialize_hyperparameters(p) == [austrip(rcutoff)]
@test deserialize_hyperparameters(p, [1.0]) == BornMayer(austrip(A), austrip(ρ), austrip(σ), austrip(C), austrip(D), 1.0, species)
r = @SVector[1.0, 1.0, 1.0]
R = norm(r)
@test potential_energy(R, p) isa Float64
@test force(R, r, p) isa SVector{3,Float64}
end
|
{"hexsha": "4e02d78d8d8dea68f69132c8523a329253069af0", "size": 1430, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/unit/empirical/bm.jl", "max_stars_repo_name": "cesmix-mit/InteratomicPotentials.jl", "max_stars_repo_head_hexsha": "100af9067e69d4e3fa2f4697b4915c93cb08f419", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-10-04T09:43:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T11:15:24.000Z", "max_issues_repo_path": "test/unit/empirical/bm.jl", "max_issues_repo_name": "cesmix-mit/InteratomicPotentials.jl", "max_issues_repo_head_hexsha": "100af9067e69d4e3fa2f4697b4915c93cb08f419", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2022-01-19T01:22:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-27T19:09:50.000Z", "max_forks_repo_path": "test/unit/empirical/bm.jl", "max_forks_repo_name": "cesmix-mit/InteratomicPotentials.jl", "max_forks_repo_head_hexsha": "100af9067e69d4e3fa2f4697b4915c93cb08f419", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-10-11T00:39:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T20:55:29.000Z", "avg_line_length": 44.6875, "max_line_length": 137, "alphanum_fraction": 0.6027972028, "num_tokens": 598}
|
\documentclass[modern]{aastex63}
\usepackage{amsmath}
\newcommand{\dd}{\ensuremath{\mathrm{d}}}
\newcommand{\diff}[2]{\frac{\dd #1}{\dd #2}}
% Affiliations
\newcommand{\flatironCCA}{Center for Computational Astrophysics, Flatiron Institute, 162 5th Ave, New York NY 10010, United States}
\newcommand{\stonybrook}{Department of Physics and Astronomy, Stony Brook University, Stony Brook NY 11794, United States}
\begin{document}
\title{Re-Weighting Existing Samples to a Population Analysis}
\author{Will M. Farr}
\email{will.farr@stonybrook.edu}
\email{wfarr@flatironinstitute.org}
\affiliation{\stonybrook}
\affiliation{\flatironCCA}
\author{Thomas A. Callister}
\email{thomas.callister@flatironinstitute.org}
\affiliation{\flatironCCA}
\begin{abstract} We show how to re-weight pre-existing parameter samples to
obtain samples distributed according to a hierarchical population analysis. We
discuss the behavior of the marginalized distribution for single-event
parameters in a hierarchical population analysis. \end{abstract}
\section*{ }
For an alternative presentation of essentially identical material, see
\citet{Hogg2010}.
It is a common problem in astronomy to have a collection of observations of some
objects from a population in need of a simultaneous analysis of the population
properties and object properties. Such analyses are called ``hierarchical''
because they naturally separate into several distinct ``levels.'' To be a bit
more mathematically precise: we are presented with a set of data,
%
\begin{equation}
D \equiv \left\{ d_i \mid i = 1, \ldots, N \right\},
\end{equation}
%
consisting of $N$ distinct data sets, $d_i$, each representing some measurement
of an object. Each object may have parameters, $\theta_i$, that are of
interest. We think that the set of parameters,
%
\begin{equation}
\Theta \equiv \left\{ \theta_i \mid i = 1, \ldots, N \right\},
\end{equation}
%
comes to us as fair draws\footnote{If the draws are not fair, then the sample is
said to suffer from \emph{selection effects}; in this case see
\citet{Loredo2004,Messenger2013,Mandel2019}. \citet{Loredo2004,Mandel2019} also
describe how to fit a \emph{rate} of objects from such observations.} from a
population distribution, which may in turn depend on some paramteters,
$\lambda$:
%
\begin{equation}
\theta_i \sim p\left( \theta \mid \lambda \right).
\end{equation}
%
We assume that we know enough about the data generating, or measurement, process
that we can write down a probabilistic description of the data conditioned on
the parameters $\theta$; this function is commonly called the ``likelihood:''
%
\begin{equation}
d_i \sim p\left( d \mid \theta_i \right).
\end{equation}
%
(Note that $\theta_i$ can contain both parameters that are \emph{intrinsic} to
the object, and also parameters that describe the measurement process for that
object---detector noise levels, or calibration parameters, for example---and
that the population distribution can be both an \emph{intrinsic} population and
also a model of the \emph{distribution} of such measurement parameters.) A
graphical description of our hierarchical model appears in Figure \ref{fig:pgm}.
\begin{figure}
\begin{center}
\includegraphics[height=0.5\columnwidth]{pgm}
\end{center}
%
\caption{\label{fig:pgm} A graphical description of our hierarchical model.
Each node in the graph is a variable. Shaded nodes are \emph{observed}
variables whose values are conditioned on in the analysis. An arrow
connecting two nodes represents a distribution of the target node conditioned
on the source node's value.}
%
\end{figure}
If all these distributions are available to us, then it is straightforward to
sample over the joint distribution of $\Theta$ and $\lambda$ given the data $D$.
Impose a prior on $\lambda$, $p\left( \lambda \right)$; then\footnote{We are
here implicitly assuming that the data generating process is such that
\emph{once conditioned on parameters $\theta$} successive observations are
independent of each other. This is almost certainly false, but may be ``true
enough'' for our purposes, particularly if there are parameters describing
systematic or ``calibration'' effects in our instrument in each $\theta_i$.}
%
\begin{equation}
\label{eq:joint-posterior}
p\left( \Theta, \lambda \mid D \right) \propto \left[ \prod_{i=1}^N p\left( d_i \mid \theta_i \right) p\left( \theta_i \mid \lambda \right) \right] p\left( \lambda \right),
\end{equation}
%
and your favorite stochastic sampling method\texttrademark{} can be used to draw
samples in the (possibly high-dimensional) space of $\Theta$ and $\lambda$.
However, the more common situation is that we are provided instead with a
\emph{catalog} of objects and parameters already inferred from them according
to some prior. Thus, we have a set of $M$ samples,
%
\begin{equation}
\left\{ \theta_i^{(j)} \mid j = 1, \ldots, M \right\},
\end{equation}
%
where each sample is drawn from a posterior with a prior $p_0$:
%
\begin{equation}
\theta_i^{(j)} \sim p\left( d_i \mid \theta \right) p_0\left(\theta \right).
\end{equation}
A common trick in this situation \citep{Hogg2010} is to give up on sampling in
$\Theta$, and integrate the $\theta_i$ out of Eq. \eqref{eq:joint-posterior}:
%
\begin{equation}
p\left( \lambda \mid D \right) \propto \left[ \prod_{i=1}^N \int \dd \theta_i p\left( d_i \mid \theta_i \right) p\left( \theta_i \mid \lambda \right) \right] p\left( \lambda \right) \equiv p\left( D \mid \lambda \right) p\left( \lambda \right).
\end{equation}
%
The integrals inside the product can be approximated (up to ignorable constants)
using importance sampling with the samples from our catalog, $\theta_i^{(j)}$:
%
\begin{equation}
\label{eq:marginal-population}
p\left( \lambda \mid D \right) \propto \left[ \prod_{i=1}^N \left\langle \frac{p\left(\theta \mid \lambda \right)}{p_0\left( \theta \right)} \right\rangle_{\theta_i^{(j)}} \right] p\left( \lambda \right),
\end{equation}
%
where the average is taken over the samples $\theta_i^{(j)}$. Here the ratio of
the population distribution to the catalog prior is the \emph{importance weight}
for each of the $\theta_i^{(j)}$. Depending on the number of samples associated
to each object in the catalog and the relative widths of the likelihood, catalog
prior, and population, this procedure can go awry; but often it is good enough,
and its simplicity combined with the (often dramatic) reduction in
dimensionality (the components of $\Theta$ often dominate the number of degrees
of freedom in $\lambda$) argues for using it when possible.
Sometimes we are only interested in the population parameters, $\lambda$; in
this case the individual-object parameters $\Theta$ are a nuisance anyway, and
we need not worry about integrating them out. However, we are often interested
in \emph{both} the population and individual-level parameters; and sometimes we
are only using the population to improve estimates of individual-level
parameters by (partially) pooling information across observations
\citep{Lieu2017}. In this case we must recover samples of $\Theta$ after we
have samples from the marginal distribution over population parameters.
Recall that it is a theorem of probability that
%
\begin{equation}
\label{eq:joint-conditional-relation}
p\left( \Theta, \lambda \mid D \right) = p\left( \Theta \mid \lambda, D \right) p\left( \lambda \mid D \right).
\end{equation}
If we are given samples, $\lambda^{(k)}$, drawn from $p\left( \lambda \mid D \right)$,
%
\begin{equation}
\lambda^{(k)} \sim p\left( \lambda \mid D \right),
\end{equation}
%
then augmenting each $\lambda^{(k)}$ with a set $\Theta^{(k)}$, where each $\theta_i^{(k)}$
is drawn from
%
\begin{equation}
\label{eq:conditional-theta}
\theta_i^{(k)} \sim p\left( d_i \mid \theta \right) p\left( \theta \mid \lambda^{(k)} \right),
\end{equation}
%
will produce a draw from the joint distribution over $\Theta$ and $\lambda$.
This works because
%
\begin{equation}
p\left( d_i \mid \theta \right) p\left( \theta \mid \lambda^{(k)} \right) \propto p\left( \theta \mid \lambda, D \right),
\end{equation}
%
with the constant of proportionality \emph{independent} of $\Theta$ at fixed $D$
and $\lambda$. A draw from Eq.\ \eqref{eq:conditional-theta} can be
accomplished by choosing randomly one of the existing catalog samples
$\theta_i^{(j)}$ with weight, $w_i^{(j)}$, proportional to \citep{Hogg2010}
%
\begin{equation}
\label{eq:population-weights}
w_i^{(j)} \propto \frac{p\left( \theta_i^{(j)} \mid \lambda^{(k)} \right)}{p_0 \left( \theta_i^{(j)} \right)}.
\end{equation}
So, to summarize, here is the algorithm for sampling from the joint distribution
of $\Theta$ and $\lambda$ given $D$ and a catalog of samples, $\theta_i^{(j)}$
drawn from a catalog posterior with prior $p_0\left(\theta\right)$.
\begin{enumerate}
%
\item Use a stochastic sampler to draw samples $\lambda^{(k)}$ from Eq.\ \eqref{eq:marginal-population}.
%
\item For each sample, $\lambda_k$, and each object $i$, draw a random catalog
sample, $\theta_i^{(k)}$ from the $\theta_i^{(j)}$ with weights given by Eq.\
\eqref{eq:population-weights}.
%
\end{enumerate}
The pairs of $\lambda^{(k)}$, and the associated set of catalog draws,
$\Theta^{(k)}$, constitute a sample from the joint distribution on $\Theta$ and
$\lambda$ defined in Eq.\ \eqref{eq:joint-posterior}, and can be used to
estimate population properties, individual-event properties informed by a
population, correlations between population properties and individual-event
properties, etc.
In the case where we are not interested in samples $\lambda$ at all, we can also
integrate out $\lambda$ in Eq.\ \eqref{eq:joint-conditional-relation}. We will
need the properly-normalized version of Eq.\ \eqref{eq:conditional-theta} since
the normalization depends on $\lambda$:
%
\begin{equation}
p\left( \theta_i \mid \lambda, D \right) = \frac{p\left( d_i \mid \theta_i \right) p\left( \theta_i \mid \lambda\right)}{p\left( d_i \mid \lambda \right)} = \frac{p\left( d_i \mid \theta_i \right) p\left( \theta_i \mid \lambda\right)}{\int \dd \theta \, p\left( d_i \mid \theta\right) p\left( \theta \mid \lambda \right)}.
\end{equation}
%
Then the marginal distribution for $\theta_i$ is
%
\begin{equation}
\label{eq:theta-marginal}
p\left( \theta_i \mid D \right) \propto p\left( d_i \mid \theta_i \right) \int \dd \lambda \, \frac{1}{p\left( d_i \mid \lambda \right)} p\left( \theta_i \mid \lambda \right) p\left( \lambda \mid D \right).
\end{equation}
%
Note that the evidence for object $i$ modifies what otherwise would be the
posterior expectation over $\lambda$ of the population distribution for
$\theta_i$. Assuming, again, that we have posterior samples $\theta_i^{(k)}$
drawn from a catalog with prior $p_0\left(\theta \right)$ and posterior samples
for $\lambda$ drawn from the marginal posterior, $\lambda^{(l)} \sim p\left(
\lambda \mid D \right)$, this can be approximated as
%
\begin{equation}
\label{eq:theta-marginal-samples}
p\left( \theta_i \mid D \right) \propto p\left( d_i \mid \theta_i \right) \left\langle p\left( \theta_i \mid \lambda^{(l)} \right) \left[\left\langle \frac{p\left( \theta_i^{(k)} \mid \lambda^{(l)} \right)}{p_0 \left( \theta_i^{(k)} \right)} \right\rangle_{\theta_i^{(k)}} \right]^{-1} \right\rangle_{\lambda^{(l)}},
\end{equation}
%
or, expressed as importance weights for resampling the $\theta^{(k)}_i$
%
\begin{equation}
\label{eq:theta-marginal-weights}
w_i^{(k)} \propto \frac{1}{p_0\left( \theta_i^{(k)} \right)} \left\langle p\left( \theta_i^{(k)} \mid \lambda^{(l)} \right) \left[\left\langle \frac{p\left( \theta_i^{(k')} \mid \lambda^{(l)} \right)}{p_0 \left( \theta_i^{(k')} \right)} \right\rangle_{\theta_i^{(k')}} \right]^{-1} \right\rangle_{\lambda^{(l)}},
\end{equation}
%
where we have introduced the index $k'$ to the inner expectation value to
emphasize that it should be taken independently of the outer expectation over
samples of $\theta_i$. This expression is equivalent to Eq.\ (6) of
\citet{Callister2019}.
Eq.\ \eqref{eq:theta-marginal} and its implementation in Eqs.\
\eqref{eq:theta-marginal-samples} and \eqref{eq:theta-marginal-weights} are
equivalent to imposing a prior on $\theta_i$ that comes from the population
distribution weighted by the ``leave one out'' posterior on
$\lambda$:\footnote{It seems like this must be a well-known fact---probably it
appears in \citet{Gelman2013}---but I am not familiar with it.}
%
\begin{equation}
p\left( \theta_i \mid D \right) \propto p\left( d_i \mid \theta_i \right) \int \dd \lambda \, p\left( \theta_i \mid \lambda \right) p\left( \lambda \mid D \backslash d_i \right).
\end{equation}
%
\clearpage
\bibliography{reweighting}
\end{document}
|
{"hexsha": "af9e584338d8bee27dc064f481317475e0cb11d3", "size": 12760, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "note/reweighting.tex", "max_stars_repo_name": "farr/Reweighting", "max_stars_repo_head_hexsha": "3762a0849c98799bb97d0748d803d05406645518", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-19T19:01:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-19T19:01:08.000Z", "max_issues_repo_path": "note/reweighting.tex", "max_issues_repo_name": "farr/Reweighting", "max_issues_repo_head_hexsha": "3762a0849c98799bb97d0748d803d05406645518", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "note/reweighting.tex", "max_forks_repo_name": "farr/Reweighting", "max_forks_repo_head_hexsha": "3762a0849c98799bb97d0748d803d05406645518", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.4349442379, "max_line_length": 324, "alphanum_fraction": 0.732523511, "num_tokens": 3760}
|
type PiecewiseYieldCurve{B <: BootstrapHelper, DC <: DayCount, P <: Interpolation, T <: BootstrapTrait, BT <: Bootstrap} <: InterpolatedCurve{P, T}
lazyMixin::LazyMixin
settlementDays::Int
referenceDate::Date
instruments::Vector{B}
dc::DC
interp::P
trait::T
accuracy::Float64
boot::BT
times::Vector{Float64}
dates::Vector{Date}
data::Vector{Float64}
errors::Vector{Function}
validCurve::Bool
end
function PiecewiseYieldCurve{B <: BootstrapHelper, DC <: DayCount, P <: Interpolation, T <: BootstrapTrait, BT <: Bootstrap}(referenceDate::Date, instruments::Vector{B}, dc::DC, interp::P, trait::T,
accuracy::Float64, boot::BT = IterativeBootstrap())
# get the initial length of instruments
n = length(instruments)
# create an initial state of the curve
pyc = PiecewiseYieldCurve(LazyMixin(),
0,
referenceDate,
instruments,
dc,
interp,
trait,
accuracy,
boot,
Vector{Float64}(n + 1),
Vector{Date}(n + 1),
Vector{Float64}(n + 1),
Vector{Function}(n + 1),
false)
# initialize the bootstrapping
initialize(pyc.boot, pyc)
return pyc
end
|
{"hexsha": "52e9ab11071fb20327bc14c0aaf7438e0108434a", "size": 1469, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/termstructures/yield/piecewise_yield_curve.jl", "max_stars_repo_name": "JuliaQuant/QuantLib.jl", "max_stars_repo_head_hexsha": "b1a806daa3b15b1f3705e36f716e66cc24c1dd5f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2016-03-07T07:29:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T09:43:02.000Z", "max_issues_repo_path": "src/termstructures/yield/piecewise_yield_curve.jl", "max_issues_repo_name": "JuliaQuant/QuantLib.jl", "max_issues_repo_head_hexsha": "b1a806daa3b15b1f3705e36f716e66cc24c1dd5f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/termstructures/yield/piecewise_yield_curve.jl", "max_forks_repo_name": "JuliaQuant/QuantLib.jl", "max_forks_repo_head_hexsha": "b1a806daa3b15b1f3705e36f716e66cc24c1dd5f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2016-03-09T08:33:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-15T18:21:38.000Z", "avg_line_length": 34.1627906977, "max_line_length": 198, "alphanum_fraction": 0.5234853642, "num_tokens": 333}
|
"""
Implement Twin-Delayed DDPG in Addressing Function Approximation Error in Actor-Critic Methods, Fujimoto et al, 2018
The key difference with DDPG lies in
1. Add noise to target policy served as regularization to prevent overfitting to current best policy
2. Use clipped double Q function to avoid overestimation in Q value
3. Add Gaussian noise to explore at training time.
"""
import copy
import os
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from torchlib import deep_rl
from torchlib.common import convert_to_tensor, enable_cuda, FloatTensor
from torchlib.deep_rl.utils.replay.replay import TransitionReplayBuffer
from torchlib.deep_rl.utils.replay.sampler import StepSampler
from torchlib.utils.logx import EpochLogger
from torchlib.utils.timer import Timer
from torchlib.utils.weight import soft_update, hard_update
from tqdm.auto import tqdm
class Agent(deep_rl.BaseAgent):
def __init__(self, nets, learning_rate, **kwargs):
self.actor_module = nets['actor']
self.actor_optimizer = optim.Adam(self.actor_module.parameters(), lr=learning_rate)
self.critic_module = nets['critic']
self.critic_optimizer = optim.Adam(self.critic_module.parameters(), lr=learning_rate)
self.target_actor_module = copy.deepcopy(self.actor_module)
self.target_critic_module = copy.deepcopy(self.critic_module)
hard_update(self.target_actor_module, self.actor_module)
hard_update(self.target_critic_module, self.critic_module)
if enable_cuda:
self.actor_module.cuda()
self.critic_module.cuda()
self.target_actor_module.cuda()
self.target_critic_module.cuda()
@torch.no_grad()
def predict_batch(self, state):
state = convert_to_tensor(state.astype(np.float32))
return self.actor_module.forward(state).cpu().numpy()
def state_dict(self):
return {
'actor': self.actor_module.state_dict(),
'critic': self.critic_module.state_dict()
}
def load_state_dict(self, states):
self.actor_module.load_state_dict(states['actor'])
self.critic_module.load_state_dict(states['critic'])
def save_checkpoint(self, checkpoint_path):
torch.save(self.state_dict(), checkpoint_path)
def load_checkpoint(self, checkpoint_path):
state_dict = torch.load(checkpoint_path)
self.load_state_dict(state_dict)
def update(self, replay_buffer, num_updates, action_limit, policy_freq=2, batch_size=128, target_noise=0.2,
clip_noise=0.5, tau=5e-3, gamma=0.99):
for i in range(num_updates):
transition = replay_buffer.sample(batch_size)
s_batch, a_batch, s2_batch, r_batch, t_batch = convert_to_tensor(transition, location='gpu')
r_batch = r_batch.type(FloatTensor)
t_batch = t_batch.type(FloatTensor)
# get ground truth q value
with torch.no_grad():
target_action_noise = torch.clamp(torch.randn_like(a_batch) * target_noise, min=-clip_noise,
max=clip_noise)
target_action = torch.clamp(self.target_actor_module.forward(s2_batch) + target_action_noise,
min=-action_limit, max=action_limit)
target_q = self.target_critic_module.forward(state=s2_batch, action=target_action, minimum=True)
q_target = r_batch + gamma * target_q * (1 - t_batch)
# critic loss
q_values, q_values2 = self.critic_module.forward(s_batch, a_batch, minimum=False)
q_values_loss = F.mse_loss(q_values, q_target) + F.mse_loss(q_values2, q_target)
self.critic_optimizer.zero_grad()
q_values_loss.backward()
self.critic_optimizer.step()
if i % policy_freq == 0:
action = self.actor_module.forward(s_batch)
q_values = self.critic_module.forward(s_batch, action, minimum=False)[0]
loss = -torch.mean(q_values)
self.actor_optimizer.zero_grad()
loss.backward()
self.actor_optimizer.step()
soft_update(self.target_critic_module, self.critic_module, tau)
soft_update(self.target_actor_module, self.actor_module, tau)
def train(self, env, exp_name, actor_noise=None,
prefill_steps=10000, num_epochs=1000, epoch_length=1000,
replay_pool_size=1000000, replay_buffer=None,
num_updates=10, policy_freq=2, batch_size=128,
target_noise=0.2, clip_noise=0.5, tau=5e-3, gamma=0.99,
log_dir=None, checkpoint_path=None, **kwargs):
logger = EpochLogger(output_dir=log_dir, exp_name=exp_name)
if checkpoint_path is None:
dummy_env = env.env_fns[0]()
checkpoint_path = os.path.join(logger.get_output_dir(), dummy_env.spec.id)
del dummy_env
best_mean_episode_reward = -np.inf
timer = Timer()
timer.reset()
# create action noise for exploration
if actor_noise is None:
actor_noise = lambda: np.random.randn(env.num_envs, *env.single_action_space.shape).astype(np.float32) * 0.1
# create replay buffer
if replay_buffer is None:
replay_buffer = TransitionReplayBuffer(capacity=replay_pool_size,
obs_shape=env.single_observation_space.shape,
obs_dtype=env.single_observation_space.dtype,
ac_shape=env.single_action_space.shape,
ac_dtype=env.single_action_space.dtype)
assert np.all(env.single_action_space.high[0] == env.single_action_space.high) and \
np.all(env.single_action_space.low[0] == env.single_action_space.low)
action_limit = env.single_action_space.high[0]
exploration_agent = deep_rl.RandomAgent(action_space=env.action_space)
sampler = StepSampler(prefill_steps=prefill_steps, logger=logger)
sampler.initialize(env, exploration_agent, replay_buffer)
total_timesteps = prefill_steps // env.num_envs * prefill_steps
exploration_agent.predict_batch = lambda state: np.clip(self.predict_batch(state) + actor_noise(),
-action_limit, action_limit)
for epoch in range(num_epochs):
for _ in tqdm(range(epoch_length), desc='Epoch {}/{}'.format(epoch + 1, num_epochs)):
sampler.sample(policy=exploration_agent)
self.update(replay_buffer, num_updates, action_limit, policy_freq, batch_size, target_noise,
clip_noise, tau, gamma)
total_timesteps += epoch_length * env.num_envs
# save best model
avg_return = logger.get_stats('EpReward')[0]
if avg_return > best_mean_episode_reward:
best_mean_episode_reward = avg_return
if checkpoint_path:
self.save_checkpoint(checkpoint_path)
# logging
logger.log_tabular('Time Elapsed', timer.get_time_elapsed())
logger.log_tabular('EpReward', with_min_and_max=True)
logger.log_tabular('EpLength', average_only=True, with_min_and_max=True)
logger.log_tabular('TotalSteps', total_timesteps)
logger.log_tabular('TotalEpisodes', sampler.get_total_episode())
logger.log_tabular('BestAvgReward', best_mean_episode_reward)
logger.log_tabular('Replay Size', len(replay_buffer))
logger.dump_tabular()
|
{"hexsha": "72d6ad51220ae5a127d0b660303edd1b59c480e4", "size": 7854, "ext": "py", "lang": "Python", "max_stars_repo_path": "torchlib/deep_rl/algorithm/td3/agent.py", "max_stars_repo_name": "vermouth1992/torchlib", "max_stars_repo_head_hexsha": "63b2bedb40f670b2d9fbfc0daeab4a8d44623095", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-07-23T21:32:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-04T23:13:30.000Z", "max_issues_repo_path": "torchlib/deep_rl/algorithm/td3/agent.py", "max_issues_repo_name": "vermouth1992/torchlib", "max_issues_repo_head_hexsha": "63b2bedb40f670b2d9fbfc0daeab4a8d44623095", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "torchlib/deep_rl/algorithm/td3/agent.py", "max_forks_repo_name": "vermouth1992/torchlib", "max_forks_repo_head_hexsha": "63b2bedb40f670b2d9fbfc0daeab4a8d44623095", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-07-23T21:32:23.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-23T21:32:23.000Z", "avg_line_length": 45.3988439306, "max_line_length": 120, "alphanum_fraction": 0.6511331805, "include": true, "reason": "import numpy", "num_tokens": 1606}
|
#! -*- coding:utf-8 -*-
# CLUE评测
# iflytek文本分类
# 思路:取[CLS]然后接Dense+Softmax分类
import json
import numpy as np
from snippets import *
from bert4keras.backend import keras
from bert4keras.snippets import sequence_padding, DataGenerator
from bert4keras.snippets import open
from tqdm import tqdm
# 基本参数
num_classes = 119
maxlen = 128
batch_size = 32
epochs = 10
def load_data(filename):
"""加载数据
格式:[(文本, 标签id)]
"""
D = []
with open(filename) as f:
for i, l in enumerate(f):
l = json.loads(l)
text, label = l['sentence'], l.get('label', 0)
D.append((text, int(label)))
return D
# 加载数据集
train_data = load_data(data_path + 'iflytek/train.json')
valid_data = load_data(data_path + 'iflytek/dev.json')
class data_generator(DataGenerator):
"""数据生成器
"""
def __iter__(self, random=False):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for is_end, (text, label) in self.sample(random):
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
if len(batch_token_ids) == self.batch_size or is_end:
batch_token_ids = sequence_padding(batch_token_ids)
batch_segment_ids = sequence_padding(batch_segment_ids)
batch_labels = sequence_padding(batch_labels)
yield [batch_token_ids, batch_segment_ids], batch_labels
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
# 转换数据集
train_generator = data_generator(train_data, batch_size)
valid_generator = data_generator(valid_data, batch_size)
# 构建模型
output = base.model.output
output = keras.layers.Lambda(lambda x: x[:, 0])(output)
output = keras.layers.Dense(
units=num_classes,
activation='softmax',
kernel_initializer=base.initializer
)(output)
model = keras.models.Model(base.model.input, output)
model.summary()
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy']
)
class Evaluator(keras.callbacks.Callback):
"""保存验证集acc最好的模型
"""
def __init__(self):
self.best_val_acc = 0.
def on_epoch_end(self, epoch, logs=None):
val_acc = self.evaluate(valid_generator)
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
model.save_weights('weights/iflytek.weights')
print(
u'val_acc: %.5f, best_val_acc: %.5f\n' %
(val_acc, self.best_val_acc)
)
def evaluate(self, data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
y_true = y_true[:, 0]
total += len(y_true)
right += (y_true == y_pred).sum()
return right / total
def test_predict(in_file, out_file):
"""输出测试结果到文件
结果文件可以提交到 https://www.cluebenchmarks.com 评测。
"""
test_data = load_data(in_file)
test_generator = data_generator(test_data, batch_size)
results = []
for x_true, _ in tqdm(test_generator, ncols=0):
y_pred = model.predict(x_true).argmax(axis=1)
results.extend(y_pred)
fw = open(out_file, 'w')
with open(in_file) as fr:
for l, r in zip(fr, results):
l = json.loads(l)
l = json.dumps({'id': str(l['id']), 'label': str(r)})
fw.write(l + '\n')
fw.close()
if __name__ == '__main__':
evaluator = Evaluator()
model.fit_generator(
train_generator.forfit(),
steps_per_epoch=len(train_generator),
epochs=epochs,
callbacks=[evaluator]
)
model.load_weights('weights/iflytek.weights')
test_predict(
in_file=data_path + 'iflytek/test.json',
out_file='results/iflytek_predict.json'
)
else:
model.load_weights('weights/iflytek.weights')
|
{"hexsha": "13f1dbada436e122cfd0376d9089ce4a0980010f", "size": 3982, "ext": "py", "lang": "Python", "max_stars_repo_path": "clue/iflytek.py", "max_stars_repo_name": "dumpmemory/roformer-v2", "max_stars_repo_head_hexsha": "95b71ae03b8bb910998285e194d7752b1e4104c0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 44, "max_stars_repo_stars_event_min_datetime": "2022-03-17T02:58:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T13:08:29.000Z", "max_issues_repo_path": "clue/iflytek.py", "max_issues_repo_name": "dumpmemory/roformer-v2", "max_issues_repo_head_hexsha": "95b71ae03b8bb910998285e194d7752b1e4104c0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "clue/iflytek.py", "max_forks_repo_name": "dumpmemory/roformer-v2", "max_forks_repo_head_hexsha": "95b71ae03b8bb910998285e194d7752b1e4104c0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-03-17T05:47:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T10:33:54.000Z", "avg_line_length": 27.0884353741, "max_line_length": 77, "alphanum_fraction": 0.6356102461, "include": true, "reason": "import numpy", "num_tokens": 1000}
|
from tkinter import *
from tkinter import messagebox
from tkinter import filedialog
from tkinter import ttk
from tkinter.scrolledtext import ScrolledText
import xmltodict
import uuid
import os
import shutil
import json
import copy
from cyclus_gui.gui.sim_window import SimulationWindow
from cyclus_gui.gui.arche_window import ArchetypeWindow
from cyclus_gui.gui.proto_window import PrototypeWindow
from cyclus_gui.gui.region_window import RegionWindow
from cyclus_gui.gui.recipe_window import RecipeWindow
from cyclus_gui.gui.backend_window import BackendWindow
import subprocess
import copy
from cyclus_gui.gui.run_cyclus import cyclus_run
import cyclus_gui.tools.from_pris as fp
import cyclus_gui.tools.pris_data as pris_data
from cyclus_gui.gui.hovertip import CreateToolTip
from cyclus_gui.gui.window_tools import *
import platform
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import networkx as nx
os_ = platform.system()
print('Your OS is:', os_)
if 'windows' in os_.lower() or 'linux' in os_.lower():
no_hover=True
else:
no_hover=False
uniq_id = str(uuid.uuid4())[:3]
file_path = os.path.abspath('.')
# generate unique id
folders = os.listdir(file_path)
folders = [f for f in folders if os.path.isdir(os.path.join(file_path, f))]
unique = False
while not unique:
for folder in folders:
if uniq_id in folder:
print('JACKPOT! You have two identical 3 random letternumbers! Today is your lucky day go buy a lottery')
uniq_id = str(uuid.uuid4())[:3]
continue
unique = True
output_path = os.path.join(file_path, 'output_'+uniq_id)
os.mkdir(output_path)
print('This your id boy:', uniq_id)
class Cygui(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.file_list = ['control.xml', 'archetypes.xml', 'facility.xml',
'region.xml', 'recipe.xml']
self.master = master
# self.master.geometry('+0+0')
self.init_window()
self.uniq_id = uniq_id
print('Your screen resolution is:')
self.screen_width = master.winfo_screenwidth()
self.screen_height = master.winfo_screenheight()
print(self.screen_width, self.screen_height)
self.guide()
def init_window(self):
self.master.title('GUI')
# menu instance
menu = Menu(self.master)
self.initialized = {}
# self.master.config(menu=menu)
self.hash_var = StringVar()
self.hash_var.set(uniq_id)
columnspan=5
q = Label(root, text='Cyclus Helper', bg='yellow')
q.grid(row=0, column=2)
CreateToolTip(q, text='you found the secret\n hover tip \n huehuehuehue')
Label(root, text='').grid(row=1, column=0, columnspan=2)
Label(root, textvariable=self.hash_var, bg='pale green').grid(row=1, column=2)
saveas_button = Button(root, text='Save as', command=lambda:self.save_as(), highlightbackground='blue')
saveas_button.grid(row=1, column=columnspan-1)
Label(root, text='==========================').grid(row=2, column=0, columnspan=columnspan)
Label(root, text='Generate / Edit Blocks').grid(row=3, column=0)
Label(root, text='=============').grid(row=4, column=0)
Label(root, text='Load:').grid(row=3, column=2)
Label(root, text='=============').grid(row=4, column=2)
Label(root, text='Run / Visualize').grid(row=3, column=4)
Label(root, text='=============').grid(row=4, column=4)
sim_button = Button(root, text='Simulation', command=lambda : self.open_window('simulation', output_path))
sim_button.grid(row=5, column=0)
library_button = Button(root, text='Libraries', command=lambda : self.open_window('archetype', output_path))
library_button.grid(row=6, column=0)
prototype_button = Button(root, text='Facilities', command=lambda : self.open_window('facility', output_path))
prototype_button.grid(row=7, column=0)
region_button = Button(root, text='Regions', command=lambda : self.open_window('region', output_path))
region_button.grid(row=8, column=0)
recipe_button = Button(root, text='Recipes', command=lambda : self.open_window('recipe', output_path))
recipe_button.grid(row=9, column=0)
for i in range(5,10):
Label(root, text=' ').grid(row=i, column=1)
Label(root, text=' ').grid(row=i, column=3)
load_button = Button(root, text='From Instance', command=lambda: self.load_prev_window())
load_complete_input = Button(root, text='From xml', command=lambda: self.askopenfile())
load_pris = Button(root, text='From PRIS', command=lambda: self.load_from_pris())
view_input_button = Button(root, text='View Input', command=lambda: self.xml_window())
make_input_button = Button(root, text='Generate Input', command=lambda: self.check_and_run(run=False))
combine_run_button = Button(root, text='Combine and Run', command= lambda: self.check_and_run())
backend_button = Button(root, text='Backend Analysis', command= lambda: self.open_window('backend', output_path))
if not no_hover:
CreateToolTip(saveas_button, text='You can save your current instance with a different three-letter hash.')
CreateToolTip(load_button, text='You can load from a previous instance.\nFor every instance, the GUI automatically creates `output_xxx` directory\nwhere it saves all the files, so that it can be called later on.')
CreateToolTip(load_complete_input, text='You can load from a previously-existing Cyclus input xml file.\nThere are limitations to some input files, if they use special archetypes. You can edit or run cyclus on the file!')
CreateToolTip(load_pris, text='You can initialize a simulation to a real-world initial condition!\nUsing this method the real-life fleet is automatically generated from the\nIAEA Power Reactor Information System (PRIS) database.')
CreateToolTip(view_input_button, text='See the input in a window, just to see\nwhat it looks like.')
CreateToolTip(make_input_button, text='Compile the input (into `input.xml`)\nbut not run the file')
CreateToolTip(combine_run_button, text='You can compile and run this simulation\nYou can do this locally if you have a local installation of Cyclus\nBut you can also run it remotely.')
CreateToolTip(backend_button, text='After getting the output file, you can get plots and csv files\nwith ease using this module.')
load_button.grid(row=6, column=2)
load_complete_input.grid(row=7, column=2)
load_pris.grid(row=8, column=2)
view_input_button.grid(row=5, column=4)
make_input_button.grid(row=6, column=4)
combine_run_button.grid(row=7, column=4)
backend_button.grid(row=8, column=4)
def open_window(self, name, output_path):
if name == 'simulation':
self.app = SimulationWindow(self.master, output_path)
if name == 'archetype':
self.app = ArchetypeWindow(self.master, output_path)
if name == 'facility':
if not os.path.isfile(os.path.join(output_path, 'archetypes.xml')):
messagebox.showerror('Error', 'You must define the Archetype libraries first!')
return
self.app = PrototypeWindow(self.master, output_path)
if name == 'region':
self.app = RegionWindow(self.master, output_path)
if name == 'recipe':
self.app = RecipeWindow(self.master, output_path)
if name == 'backend':
if not os.path.isfile(os.path.join(output_path, 'cyclus.sqlite')):
messagebox.showerror('Error', 'You must have the output file first!')
else:
self.app = BackendWindow(self.master, output_path)
def save_as(self):
self.saveas_window = Toplevel(self.master)
self.saveas_window.title('Save as new three-letter hash')
Label(self.saveas_window, text='Enter new three-letter hash:', bg='yellow').pack()
entry = Entry(self.saveas_window)
entry.pack()
Button(self.saveas_window, text='Save as!', command=lambda:self.save_as_exec(entry)).pack()
def save_as_exec(self,entry):
new_hash = str(entry.get())
if os.path.isdir('output_' + new_hash):
messagebox.showerror('Already exists', 'Folder output_%s already exists!\n Try a different hash' %new_hash)
return
else:
global uniq_id
global output_path
shutil.copytree(output_path, os.path.join(os.path.dirname(output_path), 'output_'+new_hash))
messagebox.showinfo('Success', 'Saved as new hash %s.\nYour current working instance has been changed from %s to %s' %(new_hash, uniq_id, new_hash))
uniq_id = new_hash
self.hash_var.set(new_hash)
print('Changed ID to %s' %new_hash)
output_path = os.path.join(file_path, 'output_'+new_hash)
self.saveas_window.destroy()
self.uniq_id = new_hash
return
def load_prev_window(self):
try:
if self.initialized['prev']:
return
except:
z = 0
self.initialized['prev'] = True
self.load_window = Toplevel(self.master)
self.load_window.title('Load previous with hash')
folders = os.listdir(file_path)
folders = [f for f in folders if os.path.isdir(os.path.join(file_path, f))]
folders = [f for f in folders if 'output_' in f]
hashs = [f.replace('output_', '') for f in folders]
hashs = sorted([f for f in hashs if f != self.uniq_id])
Label(self.load_window, text='Current working directory:').pack()
Label(self.load_window, text=os.path.abspath(file_path), bg='yellow').pack()
Label(self.load_window, text='Available instances:').pack()
for h in hashs:
Button(self.load_window, text=h, command=lambda:self.load_prev(h)).pack()
if not hashs:
# if list is empty:
Label(self.load_window, text='NONE', bg='red').pack()
def load_prev(self, h):
files_in = os.listdir(os.path.join(file_path, 'output_%s'%h))
info_text = 'Found folder output_%s.\nLoading input blocks:\n\n' %h
for f_ in files_in:
f_ = f_.replace('.xml', '')
info_text += '\t%s\n' %f_
messagebox.showinfo('Found!', info_text)
global uniq_id
global output_path
uniq_id = h
self.hash_var.set(h)
print('Changed ID to %s' %h)
output_path = os.path.join(file_path, 'output_%s' %h)
self.load_window.destroy()
shutil.rmtree('output_%s' %self.uniq_id)
self.uniq_id = h
self.initialized['prev'] = False
return
def askopenfile(self):
file = filedialog.askopenfile(parent=self.master, mode='r', title='Choose an xml file')
if not file:
return
self.load_xml_file(file)
messagebox.showinfo('Successfully loaded file', 'Successfully loaded file')
# self.load_xml_window.destroy()
def load_xml_file(self, file):
xml_dict = xmltodict.parse(file.read())['simulation']
# check if file is good:
elements = ['control', 'archetypes', 'facility', 'region', 'recipe']
if list(xml_dict.keys()) != elements:
messagebox.showerror('Error', 'This is a malformed xml file! Check file to see if it has all the nodes:\nIt needs:\n%s\n\nBut it only has:\n %s' %(', '.join(elements), ', '.join(list(xml_dict.keys()))))
for part in elements:
with open(os.path.join(output_path, part+'.xml'), 'w') as f:
if part in ['facility', 'region', 'recipe']:
f.write('\n<root>')
f.write(xmltodict.unparse({part: xml_dict[part]}, pretty=True, full_document=False))
if part in ['facility', 'region', 'recipe']:
f.write('\n</root>')
def load_from_pris(self):
guide_text = """
You can `initialize' your simulation as a real-life nation!
This method loads from the PRIS database and deploys reactors in your
desired country, in a desired initial time. The reactor lifetimes
are calculated as a remaining lifetime.
Assumptions:
1. Timestep is assumed to be a month
2. Reactors below 100 MWe are filtered out (assumed to be research reactors)
3. Core size is linearly scaled with power capacity
4. Reactor lifetimes are all assumed to be 60 years from their first criticality date
5. Fuel Cycle facilities are deployed with infinite capacity.
Simulation defaults:
1. Reactors are cycamore::Reactor (recipe reactors)
2. By default deploys a `RandLand' region with `Fuel_Cycle_Facilities' institution with facilities:
a. `nat_u_source' -> [natl_u]
b. [natl_u] -> `enrichment' -> [uox]
d. [uox_waste, used_candu, mox_waste, tailings, reprocess_waste] -> `SomeSink'
"""
self.guide(guide_text)
try:
if self.initialized['pris']:
return
except:
z=0
self.initialized['pris'] = True
self.load_from_pris_window = Toplevel(self.master)
self.load_from_pris_window.geometry('+0+%s' %(int(self.screen_height/3)))
self.entry_dict = {}
self.load_from_pris_window.title('Load from PRIS database')
Label(self.load_from_pris_window, text='Load existing fleets using the PRIS database').grid(row=0, columnspan=2)
Label(self.load_from_pris_window, text='initial date (YYYYMMDD)').grid(row=1, column=0)
self.entry_dict['initial_date'] = Entry(self.load_from_pris_window)
self.entry_dict['initial_date'].grid(row=1, column=1)
Label(self.load_from_pris_window, text='duration (Timesteps)').grid(row=2, column=0)
self.entry_dict['duration'] = Entry(self.load_from_pris_window)
self.entry_dict['duration'].grid(row=2, column=1)
self.select_countries()
Label(self.load_from_pris_window, text=' ').grid(row=4, columnspan=2)
Button(self.load_from_pris_window, text='Done', command=lambda: self.gen_pris()).grid(row=5, columnspan=2)
def select_countries(self):
self.pris_csv_path = os.path.join(output_path, 'pris.csv')
#print('PRIS CSV PATH')
#print(self.pris_csv_path)
#with open(self.pris_csv_path, 'r') as f:
# q = f.readlines()
q = pris_data.pris_data().split('\n')
with open(self.pris_csv_path, 'w') as f:
f.write('\n'.join(q))
country_list = sorted(list(set([w.split(',')[0] for w in q if 'Country' not in w])))
self.country_select_window = Toplevel(self.load_from_pris_window)
self.country_select_window.geometry('+%s+0' %(int(self.screen_width/4.5)))
self.country_select_window.title('Select Countries')
parent = assess_scroll_deny(len(country_list), self.country_select_window)
self.selected_countries = []
# get lists of countries
Label(parent, text='Click on country to select:').grid(row=0, column=0)
self.button_color = {}
self.button_loc = {}
for indx, c in enumerate(country_list):
self.button_color[c] = Button(parent, text=c, command=lambda country=c : self.add_country(country, parent), fg='black')
self.button_color[c].grid(row=indx+1, column=0)
self.button_loc[c] = indx+1
def add_country(self, country, parent):
if country in self.selected_countries:
self.selected_countries.remove(country)
self.button_color[country] = Button(parent, text=country, command=lambda country=country: self.add_country(country, parent), fg='black').grid(row=self.button_loc[country], column=0)
else:
self.selected_countries.append(country)
self.button_color[country] = Button(parent, text=country, command=lambda country=country: self.add_country(country, parent), fg='green', foreground='green').grid(row=self.button_loc[country], column=0)
def gen_pris(self):
if len(self.selected_countries) == 0 or len(self.entry_dict['initial_date'].get()) == 0 or len(self.entry_dict['duration'].get()) == 0:
messagebox.showerror('You missed something', ' You need to select at least one country and fill out the dates')
else:
init_date = int(self.entry_dict['initial_date'].get())
duration = int(self.entry_dict['duration'].get())
outpath = os.path.join(output_path, 'input.xml')
fp.main(self.pris_csv_path, init_date, duration, self.selected_countries,
output_file=outpath)
self.load_xml_file(open(outpath, 'r'))
messagebox.showinfo('Successfully loaded file',
'Successfully loaded file with countries\n\n'+'\n'.join(self.selected_countries)+'\n See the flowchart for commodity and facility default definitions.')
self.load_from_pris_window.destroy()
self.pris_flowchart()
self.initialized['pris'] = False
def pris_flowchart(self):
G = nx.DiGraph()
natu = 'nat_u_source'
enrichment = 'enrichment'
reactors = 'Reactors\n(various names)'
somesink = 'somesink'
natl_u = 'natl_u\n[natl_u_recipe]'
uox = 'uox\n[uox_fuel_recipe]'
uox_waste = 'uox_waste\n[uox_used_fuel_recipe]'
tailings = 'tailings\n(0.3% U235)'
fac_color = '#d9fbd0'
com_color = '#fbd5d0'
G.add_node(natu, pos=(0,0), color=fac_color)
G.add_node(enrichment, pos=(6, 6), color=fac_color)
G.add_node(reactors, pos=(12, 12), color=fac_color)
G.add_node(somesink, pos=(18, 18), color=fac_color)
G.add_node(natl_u, pos=(3,3), color=com_color)
G.add_node(uox, pos=(9,9), color=com_color)
G.add_node(uox_waste, pos=(15,15), color=com_color)
G.add_node(tailings, pos=(6,16), color=com_color)
G.add_edge(natu, natl_u)
G.add_edge(natl_u, enrichment)
G.add_edge(enrichment, uox)
G.add_edge(uox, reactors)
G.add_edge(reactors, uox_waste)
G.add_edge(uox_waste, somesink)
G.add_edge(enrichment, tailings)
G.add_edge(tailings, somesink)
node_colors = list(nx.get_node_attributes(G, 'color').values())
pos = nx.get_node_attributes(G, 'pos')
f = plt.figure(1, figsize=(8, 8))
ax = f.add_subplot(1,1,1)
ax.scatter([0], [0], c=fac_color, label='Facility')
ax.scatter([0], [0], c=com_color, label='Commodity [recipe]')
nx.draw(G, pos, with_labels=True, node_color=node_colors, ax=ax, node_size=900)
plt.legend(loc='lower right')
plt.show()
def check_and_run(self, run=True):
files = os.listdir(output_path)
okay = True
absentee = []
for i in self.file_list:
if i not in files:
absentee.append(i.replace('.xml', ''))
if len(absentee) != 0:
string = 'You have not made the following blocks:\n'
for abse in absentee:
string += '\t' + abse + '\n'
messagebox.showerror('Error', string)
okay = False
if okay:
input_file = '<simulation>\n'
for i in self.file_list:
skipfront = 0
skipback = 0
with open(os.path.join(output_path,i), 'r') as f:
lines = f.read().split('\n')
x = []
for line in lines:
if 'root>' in line:
x.append(line.replace('<root>', '').replace('</root>', ''))
continue
if 'xml version' in line and 'encoding' in line:
continue
else:
x.append(line)
x = '\n'.join(x)
if i == 'archetypes.xml' and 'DeployInst' not in x:
x = x.replace('</archetypes>', """\t<spec>
<lib>cycamore</lib>
<name>DeployInst</name>
</spec>
</archetypes>""")
input_file += x + '\n\n'
"""
with open(os.path.join(output_path, i), 'r') as f:
x = f.readlines()
if 'facility' in i:
skipfront += 1
if 'facility' in i or 'region' in i or 'recipe' in i:
skipfront += 1
skipback -= 1
if skipback == 0:
lines = x[skipfront:]
else:
lines = x[skipfront:skipback]
input_file += ''.join(lines)
input_file += '\n\n\n'
"""
input_file += '\n</simulation>'
with open(os.path.join(output_path, 'input.xml'), 'w') as f:
f.write(input_file)
if run:
input_path = os.path.join(output_path, 'input.xml')
output = os.path.join(output_path, 'cyclus.sqlite')
run = cyclus_run(self.master, input_path, output)
else:
messagebox.showinfo('Success', 'successfully rendered input.xml')
def guide(self, guide_text=''):
self.guide_window = Toplevel(self.master)
self.guide_window.title('Guide')
if guide_text != '':
self.guide_window.geometry('+%s+0' %(int(self.screen_width/1.5)))
else:
self.guide_window.geometry('+%s+0' %int(self.screen_width//2))
if guide_text == '':
guide_text = """
Welcome!
I am the guide window, and I will guide you
through the intricacies of Cyclus!
A Cyclus input file has 5 major blocks.
It is recommended you fill them out sequentially:
Simulation:
Here, you define simulation parameters like
startyear, timesteps, and decay methods.
Libraries:
Since Cyclus is a modular framework, here you
decide what libraries and what archetypes to use.
An archetype is a self-contained code that defines
a facility's behavior (e.g. reactor, sink). It is
automatically populated, so don't do anything
unless you need some specific library.
(A reactor archetype [takes in, depletes, and discharges fuel at a
predefined cycle length])
Facilities:
Here, you define the facilities' parameters.
You can define more than one facility for one archetype.
For example, you can have:
reactor with 3 60-assembly batches with power 1000 MWe.
reactor with 1 140-assembly batch with power 500 MWe.
They both use the reactor archetype, but are different facilities.
This block is crucial, since you must set the in-and-out commodities
of each facility to match others' in-and-out commodity.
For example, if you want the reactor to trade with the source,
the out-commodity of the source facility should match the
in-commodity of the reactor facility, so they trade.
( The Clinton reactor facility takes in, depletes and discharges
fuel in [18-month cycles], outputs [1,062 MWe], and uses [UOX] fuel.)
Regions:
Here, you actually set up how the facility prototypes will be `played'
- when to enter, when to exit, and how many to play.
(The Clinton reactor (facility prototype) is inside the Exelon Institution,
which is inside the U.S.A. region, has 1 unit (n_build),
has a lifetime of 960 months (lifetimes),
and enters simulation in timestep 100 (build_times).)
Recipes:
Recipes are predefined compositions of various material. They can
be defined as mass or atomic concentrations. You can import them
from a CSV file or manually write them yourself.
All feedback and comments / bug reports can be made to baej@ornl.gov
Enjoy :)
"""
st = ScrolledText(master=self.guide_window,
wrap=WORD)
st.pack()
st.insert(INSERT, guide_text)
#Label(self.guide_window, text=guide_text, justify=LEFT).pack(padx=30, pady=30)
def xml_window(self):
try:
self.xml_window_.destroy()
except:
t=0
self.xml_window_ = Toplevel(self.master)
self.xml_window_.title('XML rendering')
self.xml_window_.geometry('+350+0')
tab_parent = ttk.Notebook(self.xml_window_)
file_paths = [os.path.join(output_path, x) for x in self.file_list]
tab_dict = {}
for indx, file in enumerate(file_paths):
key = self.file_list[indx].replace('.xml', '')
tab_dict[key] = Frame(tab_parent)
tab_parent.add(tab_dict[key], text=key)
#tab_dict[key] = assess_scroll_deny(100, tab_dict[key])
st = ScrolledText(master=tab_dict[key], wrap=WORD, width=100, height=30)
st.pack()
#q = Text(tab_dict[key], width=100, height=30)
#q.pack()
if os.path.isfile(file):
with open(file, 'r') as f:
s = f.read().replace('<root>', '').replace('</root>', '')
else:
s = '-- file does not exist --'
st.insert(INSERT, s)
#q.insert(END, s)
tab_parent.pack(expand=1, fill='both')
root = Tk()
app = Cygui(root)
root.mainloop()
|
{"hexsha": "55b49b7a0add29859508d234d0201ddae7ce6592", "size": 25758, "ext": "py", "lang": "Python", "max_stars_repo_path": "cyclus_gui/gui/gui.py", "max_stars_repo_name": "gonuke/cyclus_gui", "max_stars_repo_head_hexsha": "ef67df351585ab8a476b1577380ec6034bf0753f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cyclus_gui/gui/gui.py", "max_issues_repo_name": "gonuke/cyclus_gui", "max_issues_repo_head_hexsha": "ef67df351585ab8a476b1577380ec6034bf0753f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cyclus_gui/gui/gui.py", "max_forks_repo_name": "gonuke/cyclus_gui", "max_forks_repo_head_hexsha": "ef67df351585ab8a476b1577380ec6034bf0753f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-20T16:23:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-20T16:23:16.000Z", "avg_line_length": 42.2262295082, "max_line_length": 242, "alphanum_fraction": 0.622447395, "include": true, "reason": "import networkx", "num_tokens": 6014}
|
\documentclass[]{article}
\usepackage{lmodern}
\usepackage{amssymb,amsmath}
\usepackage{ifxetex,ifluatex}
\usepackage{fixltx2e} % provides \textsubscript
\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\else % if luatex or xelatex
\ifxetex
\usepackage{mathspec}
\else
\usepackage{fontspec}
\fi
\defaultfontfeatures{Ligatures=TeX,Scale=MatchLowercase}
\fi
% use upquote if available, for straight quotes in verbatim environments
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
% use microtype if available
\IfFileExists{microtype.sty}{%
\usepackage{microtype}
\UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts
}{}
\usepackage[margin=1in]{geometry}
\usepackage{hyperref}
\hypersetup{unicode=true,
pdftitle={Exploring Revision 5a MCMC and vb},
pdfborder={0 0 0},
breaklinks=true}
\urlstyle{same} % don't use monospace font for urls
\usepackage{color}
\usepackage{fancyvrb}
\newcommand{\VerbBar}{|}
\newcommand{\VERB}{\Verb[commandchars=\\\{\}]}
\DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}}
% Add ',fontsize=\small' for more characters per line
\usepackage{framed}
\definecolor{shadecolor}{RGB}{248,248,248}
\newenvironment{Shaded}{\begin{snugshade}}{\end{snugshade}}
\newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{{#1}}}}
\newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{{#1}}}
\newcommand{\DecValTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{{#1}}}
\newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{{#1}}}
\newcommand{\FloatTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{{#1}}}
\newcommand{\ConstantTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{{#1}}}
\newcommand{\CharTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{{#1}}}
\newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{{#1}}}
\newcommand{\StringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{{#1}}}
\newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{{#1}}}
\newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{{#1}}}
\newcommand{\ImportTok}[1]{{#1}}
\newcommand{\CommentTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{{#1}}}}
\newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{{#1}}}}}
\newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{{#1}}}}}
\newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{{#1}}}}}
\newcommand{\OtherTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{{#1}}}
\newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{{#1}}}
\newcommand{\VariableTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{{#1}}}
\newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{{#1}}}}
\newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.81,0.36,0.00}{\textbf{{#1}}}}
\newcommand{\BuiltInTok}[1]{{#1}}
\newcommand{\ExtensionTok}[1]{{#1}}
\newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{{#1}}}}
\newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.77,0.63,0.00}{{#1}}}
\newcommand{\RegionMarkerTok}[1]{{#1}}
\newcommand{\InformationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{{#1}}}}}
\newcommand{\WarningTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{{#1}}}}}
\newcommand{\AlertTok}[1]{\textcolor[rgb]{0.94,0.16,0.16}{{#1}}}
\newcommand{\ErrorTok}[1]{\textcolor[rgb]{0.64,0.00,0.00}{\textbf{{#1}}}}
\newcommand{\NormalTok}[1]{{#1}}
\usepackage{graphicx,grffile}
\makeatletter
\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi}
\def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi}
\makeatother
% Scale images if necessary, so that they will not overflow the page
% margins by default, and it is still possible to overwrite the defaults
% using explicit options in \includegraphics[width, height, ...]{}
\setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio}
\IfFileExists{parskip.sty}{%
\usepackage{parskip}
}{% else
\setlength{\parindent}{0pt}
\setlength{\parskip}{6pt plus 2pt minus 1pt}
}
\setlength{\emergencystretch}{3em} % prevent overfull lines
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\setcounter{secnumdepth}{0}
% Redefines (sub)paragraphs to behave more like sections
\ifx\paragraph\undefined\else
\let\oldparagraph\paragraph
\renewcommand{\paragraph}[1]{\oldparagraph{#1}\mbox{}}
\fi
\ifx\subparagraph\undefined\else
\let\oldsubparagraph\subparagraph
\renewcommand{\subparagraph}[1]{\oldsubparagraph{#1}\mbox{}}
\fi
%%% Use protect on footnotes to avoid problems with footnotes in titles
\let\rmarkdownfootnote\footnote%
\def\footnote{\protect\rmarkdownfootnote}
%%% Change title format to be more compact
\usepackage{titling}
% Create subtitle command for use in maketitle
\newcommand{\subtitle}[1]{
\posttitle{
\begin{center}\large#1\end{center}
}
}
\setlength{\droptitle}{-2em}
\title{Exploring Revision 5a MCMC and vb}
\pretitle{\vspace{\droptitle}\centering\huge}
\posttitle{\par}
\author{}
\preauthor{}\postauthor{}
\date{}
\predate{}\postdate{}
\begin{document}
\maketitle
Revision 5a has the following features:
\begin{itemize}
\tightlist
\item
includes one group of subjects
\item
Has a level for multiple runs as random effects
\item
Runs can be either reward, punishment, or unspecified. Each subject
has an individual parameter specifying difference between reward and
punishment runs, and these are drawn from a group-level distribution
of runs.
\end{itemize}
The variational bayes estimates for Revision 5 looked good. So I ran
Revision 5 on MCMC:
\begin{itemize}
\tightlist
\item
on each of the three groups
\item
Using both variational bayes and MCMC
\item
three times so that we could look at consistency across different
runs.
\end{itemize}
\subsection{Variational Bayes: posterior
comparison}\label{variational-bayes-posterior-comparison}
Though we have already done a variational Bayes estimate (see
rev5\_exploration-vb.Rmd), here, these are presented side-by-side with
MCMC analyses for comparison.
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{source}\NormalTok{(}\StringTok{"du_model_rev5a-mcmc1timerun.R"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] "initializing..."
\end{verbatim}
\begin{verbatim}
## Loading required package: hBayesDM
\end{verbatim}
\begin{verbatim}
## Loading required package: Rcpp
\end{verbatim}
\begin{verbatim}
##
##
## This is hBayesDM version 0.5.1
\end{verbatim}
\includegraphics{rev5_exploration-2_files/figure-latex/unnamed-chunk-2-1.pdf}
\includegraphics{rev5_exploration-2_files/figure-latex/unnamed-chunk-2-2.pdf}
\includegraphics{rev5_exploration-2_files/figure-latex/unnamed-chunk-2-3.pdf}
These all look plausible. I think for the first time, I've got a model
that estimates repeated runs and reward and punishment within the same
model, and yields separate estimates for reward and punishment by using
a parameter that separates the two.
While they all look \emph{plausible}, estimates vary widely, indicating
that we need to look at MCMC in order to calculate the outcomes for
these values. Fortunately, this current model isn't unwieldy and I
should be able to run through MCMC analyses in a tractable period of
time.
\subsection{Differences}\label{differences}
I want to take a look at differences:
\begin{itemize}
\tightlist
\item
between reward and punishment runs
\item
between Group 2 and Group 3
\item
An interaction of those differences.
\end{itemize}
\begin{verbatim}
## Warning in `[.data.table`(model.summary.all.rewpun, ,
## `:=`(rew_minus_pun_mu, : Invalid .internal.selfref detected and fixed
## by taking a (shallow) copy of the data.table so that := can add this new
## column by reference. At an earlier point, this data.table has been copied
## by R (or been created manually using structure() or similar). Avoid key<-,
## names<- and attr<- which in R currently (and oddly) may copy the whole
## data.table. Use set* syntax instead to avoid copying: ?set, ?setnames and ?
## setattr. Also, in R<=v3.0.2, list(DT1,DT2) copied the entire DT1 and DT2
## (R's list() used to copy named objects); please upgrade to R>v3.0.2 if that
## is biting. If this message doesn't help, please report to datatable-help so
## the root cause can be fixed.
\end{verbatim}
\begin{verbatim}
## iter Run Parameter TestId Group ModelName
## 1: 1 All alpha 1 1 double_update_rev5a
## 2: 1 All alpha 2 2 double_update_rev5a
## 3: 1 All alpha 3 3 double_update_rev5a
## 4: 1 All alpha 4 1 double_update_rev5a
## 5: 1 All alpha 5 2 double_update_rev5a
## ---
## 323924: 48000 All alpha 2 2 double_update_rev5a
## 323925: 48000 All alpha 3 3 double_update_rev5a
## 323926: 48000 All beta 1 1 double_update_rev5a
## 323927: 48000 All beta 2 2 double_update_rev5a
## 323928: 48000 All beta 3 3 double_update_rev5a
## AnalysisRepetition EstimationMethod rew_mu pun_mu
## 1: 1 MCMC 0.3253670 0.4013265
## 2: 1 MCMC 0.3158299 0.3744320
## 3: 1 MCMC 0.2462382 0.2674455
## 4: 1 variationalbayes 0.3229920 0.3862500
## 5: 1 variationalbayes 0.3183860 0.2937640
## ---
## 323924: 1 MCMC 0.3103962 0.3346431
## 323925: 1 MCMC 0.1837489 0.1793888
## 323926: 1 MCMC 0.6338423 0.5454490
## 323927: 1 MCMC 0.7020964 0.6493959
## 323928: 1 MCMC 0.6900773 0.7203194
## rew_minus_pun_mu
## 1: -0.07595953
## 2: -0.05860209
## 3: -0.02120736
## 4: -0.06325800
## 5: 0.02462200
## ---
## 323924: -0.02424691
## 323925: 0.00436020
## 323926: 0.08839326
## 323927: 0.05270057
## 323928: -0.03024212
\end{verbatim}
\includegraphics{rev5_exploration-2_files/figure-latex/unnamed-chunk-3-1.pdf}
\includegraphics{rev5_exploration-2_files/figure-latex/unnamed-chunk-3-2.pdf}
\includegraphics{rev5_exploration-2_files/figure-latex/unnamed-chunk-3-3.pdf}
\begin{Shaded}
\begin{Highlighting}[]
\CommentTok{# group 2 compared to group 3.}
\NormalTok{model.summary.all.notestid <-}\StringTok{ }\NormalTok{model.summary.all[, }\StringTok{`}\DataTypeTok{:=}\StringTok{`}\NormalTok{(TestId, }\OtherTok{NULL}\NormalTok{)]}
\NormalTok{model.summary.all.groupcompare <-}\StringTok{ }\NormalTok{tidyr::}\KeywordTok{spread}\NormalTok{(model.summary.all.notestid, }
\NormalTok{Group, Value, }\DataTypeTok{sep =} \StringTok{""}\NormalTok{)}
\NormalTok{model.summary.all.groupcompare$Group3_minus_Group2 <-}\StringTok{ }\NormalTok{model.summary.all.groupcompare$Group3 -}\StringTok{ }
\StringTok{ }\NormalTok{model.summary.all.groupcompare$Group2}
\KeywordTok{ggplot}\NormalTok{(model.summary.all.groupcompare[EstimationMethod ==}\StringTok{ "variationalbayes"}\NormalTok{], }
\KeywordTok{aes}\NormalTok{(}\DataTypeTok{x =} \NormalTok{Group3_minus_Group2, }\DataTypeTok{fill =} \KeywordTok{factor}\NormalTok{(AnalysisRepetition), }\DataTypeTok{color =} \KeywordTok{factor}\NormalTok{(AnalysisRepetition))) +}\StringTok{ }
\StringTok{ }\KeywordTok{geom_freqpoly}\NormalTok{(}\DataTypeTok{alpha =} \FloatTok{0.9}\NormalTok{, }\DataTypeTok{binwidth =} \FloatTok{0.001}\NormalTok{) +}\StringTok{ }\KeywordTok{geom_hdi}\NormalTok{(}\DataTypeTok{size =} \DecValTok{2}\NormalTok{, }\DataTypeTok{lineend =} \StringTok{"round"}\NormalTok{, }
\DataTypeTok{alpha =} \FloatTok{0.5}\NormalTok{, }\DataTypeTok{credible_mass =} \FloatTok{0.95}\NormalTok{) +}\StringTok{ }\KeywordTok{facet_grid}\NormalTok{(Statistic ~}\StringTok{ }\NormalTok{Parameter, }\DataTypeTok{scales =} \StringTok{"free"}\NormalTok{) +}\StringTok{ }
\StringTok{ }\KeywordTok{labs}\NormalTok{(}\DataTypeTok{title =} \KeywordTok{paste0}\NormalTok{(}\StringTok{"mu statistic (all rounds), Group 3 Minus Group 2"}\NormalTok{))}
\end{Highlighting}
\end{Shaded}
\includegraphics{rev5_exploration-2_files/figure-latex/unnamed-chunk-4-1.pdf}
These all look like plausible estimates of group-level parameters.
\subsection{Exploring the structure of the
parameters}\label{exploring-the-structure-of-the-parameters}
Can we take a peak at how \ldots{}..
\section{MCMC results}\label{mcmc-results}
Let's take a look at the same distributions using MCMC.
\includegraphics{rev5_exploration-2_files/figure-latex/unnamed-chunk-5-1.pdf}
\includegraphics{rev5_exploration-2_files/figure-latex/unnamed-chunk-5-2.pdf}
\includegraphics{rev5_exploration-2_files/figure-latex/unnamed-chunk-6-1.pdf}
\includegraphics{rev5_exploration-2_files/figure-latex/unnamed-chunk-6-2.pdf}
\includegraphics{rev5_exploration-2_files/figure-latex/unnamed-chunk-6-3.pdf}
Need to check the scale these reward and punishment parameters are on.
Are they transformed into the actual space there implemented within?
\begin{Shaded}
\begin{Highlighting}[]
\CommentTok{#group 2 compared to group 3.}
\NormalTok{model.summary.all.notestid<-model.summary.all[,TestId:}\ErrorTok{=}\OtherTok{NULL}\NormalTok{] }
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Warning in `[.data.table`(model.summary.all, , `:=`(TestId, NULL)): Adding
## new column 'TestId' then assigning NULL (deleting it).
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{model.summary.all.groupcompare<-}\StringTok{ }\NormalTok{tidyr::}\KeywordTok{spread}\NormalTok{(model.summary.all.notestid,Group,Value,}\DataTypeTok{sep=}\StringTok{""}\NormalTok{)}
\NormalTok{model.summary.all.groupcompare$Group3_minus_Group2<-}
\StringTok{ }\NormalTok{model.summary.all.groupcompare$Group3-model.summary.all.groupcompare$Group2}
\KeywordTok{ggplot}\NormalTok{(model.summary.all.groupcompare[EstimationMethod==}\StringTok{"MCMC"}\NormalTok{],}\KeywordTok{aes}\NormalTok{(}\DataTypeTok{x=}\NormalTok{Group3_minus_Group2 ,}\DataTypeTok{fill=}\KeywordTok{factor}\NormalTok{(AnalysisRepetition),}\DataTypeTok{color=}\KeywordTok{factor}\NormalTok{(AnalysisRepetition)}
\NormalTok{))+}
\StringTok{ }\KeywordTok{geom_freqpoly}\NormalTok{(}\DataTypeTok{alpha=}\FloatTok{0.9}\NormalTok{,}\DataTypeTok{binwidth=}\FloatTok{0.001}\NormalTok{)+}
\StringTok{ }\KeywordTok{geom_hdi}\NormalTok{(}\DataTypeTok{size=}\DecValTok{2}\NormalTok{, }\DataTypeTok{lineend =} \StringTok{"round"}\NormalTok{,}\DataTypeTok{alpha=}\FloatTok{0.5}\NormalTok{,}\DataTypeTok{credible_mass=}\FloatTok{0.95}\NormalTok{)+}
\StringTok{ }\KeywordTok{facet_grid}\NormalTok{(Statistic~Parameter,}\DataTypeTok{scales =} \StringTok{"free"}\NormalTok{)+}
\StringTok{ }\KeywordTok{labs}\NormalTok{(}\DataTypeTok{title=}\KeywordTok{paste0}\NormalTok{(}\StringTok{"mu statistic (all rounds), Group 3 Minus Group 2"}\NormalTok{))}
\end{Highlighting}
\end{Shaded}
\includegraphics{rev5_exploration-2_files/figure-latex/unnamed-chunk-7-1.pdf}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{head}\NormalTok{(model.summary.all.groupcompare)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## iter Run Statistic Parameter ModelName AnalysisRepetition
## 1: 1 All mu alpha double_update_rev5a 1
## 2: 1 All mu alpha double_update_rev5a 1
## 3: 1 All mu alpha double_update_rev5a 2
## 4: 1 All mu alpha double_update_rev5a 3
## 5: 1 All mu alpha double_update_rev5a 4
## 6: 1 All mu alpha double_update_rev5a 5
## EstimationMethod Group1 Group2 Group3 Group3_minus_Group2
## 1: MCMC 0.36266527 0.3446611 0.2567271 -0.08793403
## 2: variationalbayes 0.35411300 0.3059640 0.2504850 -0.05547900
## 3: variationalbayes 0.00942966 0.2985100 0.0860701 -0.21243990
## 4: variationalbayes 0.02321940 0.0274609 0.4028540 0.37539310
## 5: variationalbayes 0.28699100 0.3255150 0.2894890 -0.03602600
## 6: variationalbayes 0.29553300 0.4532400 0.1735660 -0.27967400
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{model.summary.all.g3g2compare.bypar<-}\StringTok{ }
\StringTok{ }\NormalTok{tidyr::}\KeywordTok{spread}\NormalTok{(}
\NormalTok{model.summary.all.groupcompare[,.(iter,Run,Statistic,Parameter,ModelName,AnalysisRepetition,}
\NormalTok{EstimationMethod,Group3_minus_Group2)],}
\NormalTok{Parameter,}
\NormalTok{Group3_minus_Group2)}
\KeywordTok{ggplot}\NormalTok{(model.summary.all.g3g2compare.bypar[}
\NormalTok{EstimationMethod==}\StringTok{"MCMC"} \NormalTok{&}\StringTok{ }
\StringTok{ }\NormalTok{Statistic %in%}\StringTok{ }\KeywordTok{c}\NormalTok{(}\StringTok{"mu"}\NormalTok{,}\StringTok{"rew_mu"}\NormalTok{,}\StringTok{"pun_mu"}\NormalTok{)}
\NormalTok{],}\KeywordTok{aes}\NormalTok{(}\DataTypeTok{x=}\NormalTok{alpha,}\DataTypeTok{y=}\NormalTok{beta }
\CommentTok{#,fill=factor(Statistic),color=factor(Statistic)}
\NormalTok{))+}
\StringTok{ }\KeywordTok{geom_point}\NormalTok{(}\DataTypeTok{alpha=}\FloatTok{0.1}\NormalTok{)+}
\StringTok{ }\KeywordTok{facet_grid}\NormalTok{(~Statistic,}\DataTypeTok{scales =} \StringTok{"free"}\NormalTok{)+}
\StringTok{ }\KeywordTok{labs}\NormalTok{(}\DataTypeTok{title=}\KeywordTok{paste0}\NormalTok{(}\StringTok{"mu statistic (all rounds), Group 3 Minus Group 2"}\NormalTok{))}
\end{Highlighting}
\end{Shaded}
\includegraphics{rev5_exploration-2_files/figure-latex/unnamed-chunk-7-2.pdf}
\begin{Shaded}
\begin{Highlighting}[]
\CommentTok{# group 1 compared to group 2}
\NormalTok{model.summary.all.groupcompare$Group2_minus_Group1 <-}\StringTok{ }\NormalTok{model.summary.all.groupcompare$Group2 -}\StringTok{ }
\StringTok{ }\NormalTok{model.summary.all.groupcompare$Group1}
\KeywordTok{ggplot}\NormalTok{(model.summary.all.groupcompare[EstimationMethod ==}\StringTok{ "MCMC"}\NormalTok{], }\KeywordTok{aes}\NormalTok{(}\DataTypeTok{x =} \NormalTok{Group2_minus_Group1, }
\DataTypeTok{fill =} \KeywordTok{factor}\NormalTok{(AnalysisRepetition), }\DataTypeTok{color =} \KeywordTok{factor}\NormalTok{(AnalysisRepetition))) +}\StringTok{ }
\StringTok{ }\KeywordTok{geom_freqpoly}\NormalTok{(}\DataTypeTok{alpha =} \FloatTok{0.9}\NormalTok{, }\DataTypeTok{binwidth =} \FloatTok{0.001}\NormalTok{) +}\StringTok{ }\KeywordTok{geom_hdi}\NormalTok{(}\DataTypeTok{size =} \DecValTok{2}\NormalTok{, }\DataTypeTok{lineend =} \StringTok{"round"}\NormalTok{, }
\DataTypeTok{alpha =} \FloatTok{0.5}\NormalTok{, }\DataTypeTok{credible_mass =} \FloatTok{0.95}\NormalTok{) +}\StringTok{ }\KeywordTok{facet_grid}\NormalTok{(Statistic ~}\StringTok{ }\NormalTok{Parameter, }\DataTypeTok{scales =} \StringTok{"free"}\NormalTok{) +}\StringTok{ }
\StringTok{ }\KeywordTok{labs}\NormalTok{(}\DataTypeTok{title =} \KeywordTok{paste0}\NormalTok{(}\StringTok{"mu statistic (all rounds), Group 2 Minus Group 1"}\NormalTok{))}
\end{Highlighting}
\end{Shaded}
\includegraphics{rev5_exploration-2_files/figure-latex/unnamed-chunk-8-1.pdf}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{model.summary.all.g2g1compare.bypar <-}\StringTok{ }\NormalTok{tidyr::}\KeywordTok{spread}\NormalTok{(model.summary.all.groupcompare[, }
\NormalTok{.(iter, Run, Statistic, Parameter, ModelName, AnalysisRepetition, EstimationMethod, }
\NormalTok{Group2_minus_Group1)], Parameter, Group2_minus_Group1)}
\KeywordTok{ggplot}\NormalTok{(model.summary.all.g2g1compare.bypar[EstimationMethod ==}\StringTok{ "MCMC"} \NormalTok{&}\StringTok{ }\NormalTok{Statistic %in%}\StringTok{ }
\StringTok{ }\KeywordTok{c}\NormalTok{(}\StringTok{"mu"}\NormalTok{, }\StringTok{"rew_mu"}\NormalTok{, }\StringTok{"pun_mu"}\NormalTok{)], }\KeywordTok{aes}\NormalTok{(}\DataTypeTok{x =} \NormalTok{alpha, }\DataTypeTok{y =} \NormalTok{beta)) +}\StringTok{ }\KeywordTok{geom_point}\NormalTok{(}\DataTypeTok{alpha =} \FloatTok{0.02}\NormalTok{) +}\StringTok{ }
\StringTok{ }\KeywordTok{facet_grid}\NormalTok{(~Statistic, }\DataTypeTok{scales =} \StringTok{"free"}\NormalTok{) +}\StringTok{ }\KeywordTok{labs}\NormalTok{(}\DataTypeTok{title =} \KeywordTok{paste0}\NormalTok{(}\StringTok{"mu statistic (all rounds), Group 2 Minus Group 1"}\NormalTok{))}
\end{Highlighting}
\end{Shaded}
\includegraphics{rev5_exploration-2_files/figure-latex/unnamed-chunk-8-2.pdf}
\end{document}
|
{"hexsha": "aa7f4f9a630d3481d9283f43eb215dcf91912746", "size": 21019, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "notebooks/rev5_exploration-2.tex", "max_stars_repo_name": "bjsmith/reversallearning", "max_stars_repo_head_hexsha": "023304731d41c3109bacbfd49d4c850a92353978", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "notebooks/rev5_exploration-2.tex", "max_issues_repo_name": "bjsmith/reversallearning", "max_issues_repo_head_hexsha": "023304731d41c3109bacbfd49d4c850a92353978", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notebooks/rev5_exploration-2.tex", "max_forks_repo_name": "bjsmith/reversallearning", "max_forks_repo_head_hexsha": "023304731d41c3109bacbfd49d4c850a92353978", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-08-15T22:00:15.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-11T22:40:20.000Z", "avg_line_length": 49.808056872, "max_line_length": 361, "alphanum_fraction": 0.7001284552, "num_tokens": 6802}
|
# coding=utf-8
# Copyright 2020 The Google AI Perception Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Visualize the AIST++ Dataset."""
from . import utils
import cv2
import numpy as np
_COLORS = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0],
[170, 255, 0], [85, 255, 0], [0, 255, 0], [0, 255, 85],
[0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255],
[0, 0, 255], [85, 0, 255], [170, 0, 255], [255, 0, 255],
[255, 0, 170], [255, 0, 85]]
def plot_kpt(keypoint, canvas, color=None):
for i, (x, y) in enumerate(keypoint[:, 0:2]):
if np.isnan(x) or np.isnan(y) or x < 0 or y < 0:
continue
cv2.circle(canvas, (int(x), int(y)),
7,
color if color is not None else _COLORS[i % len(_COLORS)],
thickness=-1)
return canvas
def plot_on_video(keypoints2d, video_path, save_path, fps=60):
assert len(keypoints2d.shape) == 3, (
f'Input shape is not valid! Got {keypoints2d.shape}')
video = utils.ffmpeg_video_read(video_path, fps=fps)
for iframe, keypoint in enumerate(keypoints2d):
if iframe >= video.shape[0]:
break
video[iframe] = plot_kpt(keypoint, video[iframe])
utils.ffmpeg_video_write(video, save_path, fps=fps)
|
{"hexsha": "31a250ac726221166b46775a62514c34bca1a94a", "size": 1783, "ext": "py", "lang": "Python", "max_stars_repo_path": "aist_plusplus/visualizer.py", "max_stars_repo_name": "google/aistplusplus_ap", "max_stars_repo_head_hexsha": "83d78d8cbc9b417616cd2200b9afdf37228509e1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 189, "max_stars_repo_stars_event_min_datetime": "2021-01-22T02:40:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T02:18:11.000Z", "max_issues_repo_path": "aist_plusplus/visualizer.py", "max_issues_repo_name": "google/aistplusplus_ap", "max_issues_repo_head_hexsha": "83d78d8cbc9b417616cd2200b9afdf37228509e1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 27, "max_issues_repo_issues_event_min_datetime": "2021-01-21T03:35:33.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T20:49:15.000Z", "max_forks_repo_path": "aist_plusplus/visualizer.py", "max_forks_repo_name": "google/aistplusplus_ap", "max_forks_repo_head_hexsha": "83d78d8cbc9b417616cd2200b9afdf37228509e1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 36, "max_forks_repo_forks_event_min_datetime": "2021-01-21T03:26:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T10:16:30.000Z", "avg_line_length": 35.66, "max_line_length": 74, "alphanum_fraction": 0.6416152552, "include": true, "reason": "import numpy", "num_tokens": 551}
|
"""Identity matrix."""
from scipy import sparse
import numpy as np
def iden(dim: int, is_sparse: bool = False) -> np.ndarray:
r"""
Calculate the :code:`dim`-by-:code:`dim` identity matrix [WIKID]_.
Returns the :code:`dim`-by-:code:`dim` identity matrix. If :code:`is_sparse
= False` then the matrix will be full. If :code:`is_sparse = True` then the
matrix will be sparse.
.. math::
\mathbb{I} = \begin{pmatrix}
1 & 0 & 0 & \ldots & 0 \\
0 & 1 & 0 & \ldots & 0 \\
0 & 0 & 1 & \ldots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \ldots & 1
\end{pmatrix}
Only use this function within other functions to easily get the correct
identity matrix. If you always want either the full or the sparse
identity matrix, just use numpy's built-in np.identity function.
Examples
==========
The identity matrix generated from :math:`d = 3` yields the following
matrix:
.. math::
\mathbb{I}_3 = \begin{pmatrix}
1 & 0 & 0 \\
0 & 1 & 0 \\
0 & 0 & 1
\end{pmatrix}
>>> from toqito.matrices import iden
>>> iden(3)
[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
It is also possible to create sparse identity matrices. The sparse identity
matrix generated from :math:`d = 10` yields the following matrix:
>>> from toqito.matrices import iden
>>> iden(10, True)
<10x10 sparse matrix of type '<class 'numpy.float64'>' with 10 stored
elements (1 diagonals) in DIAgonal format>
References
==========
.. [WIKID] Wikipedia: Identity matrix
https://en.wikipedia.org/wiki/Identity_matrix
:param dim: Integer representing dimension of identity matrix.
:param is_sparse: Whether or not the matrix is sparse.
:return: Sparse identity matrix of dimension :code:`dim`.
"""
if is_sparse:
id_mat = sparse.eye(dim)
else:
id_mat = np.identity(dim)
return id_mat
|
{"hexsha": "bd68070b41d4e02ad4840cfc88ac6d7927ba3541", "size": 2119, "ext": "py", "lang": "Python", "max_stars_repo_path": "toqito/matrices/iden.py", "max_stars_repo_name": "paniash/toqito", "max_stars_repo_head_hexsha": "ab67c2a3fca77b3827be11d1e79531042ea62b82", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 76, "max_stars_repo_stars_event_min_datetime": "2020-01-28T17:02:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-14T18:02:15.000Z", "max_issues_repo_path": "toqito/matrices/iden.py", "max_issues_repo_name": "paniash/toqito", "max_issues_repo_head_hexsha": "ab67c2a3fca77b3827be11d1e79531042ea62b82", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 82, "max_issues_repo_issues_event_min_datetime": "2020-05-31T20:09:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T17:13:59.000Z", "max_forks_repo_path": "toqito/matrices/iden.py", "max_forks_repo_name": "paniash/toqito", "max_forks_repo_head_hexsha": "ab67c2a3fca77b3827be11d1e79531042ea62b82", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 30, "max_forks_repo_forks_event_min_datetime": "2020-04-02T16:07:11.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-05T13:39:22.000Z", "avg_line_length": 30.7101449275, "max_line_length": 79, "alphanum_fraction": 0.5639452572, "include": true, "reason": "import numpy,from scipy", "num_tokens": 596}
|
//
// $Id: Exception.hpp 2008 2010-05-29 02:46:49Z brendanx $
//
//
// Original author: Matt Chambers <matt.chambers .@. vanderbilt.edu>
//
// Copyright 2010 Vanderbilt University - Nashville, TN 37232
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef _STD_HPP_
#define _STD_HPP_
// a meta-header providing including common std headers and using common std classes;
// note that Filesystem.hpp is not included since it depends on Filesystem.cpp
#include "pwiz/utility/misc/Exception.hpp"
#include "pwiz/utility/misc/Environment.hpp"
#include "pwiz/utility/misc/Container.hpp"
#include "pwiz/utility/misc/String.hpp"
#include "pwiz/utility/misc/Stream.hpp"
#include <limits>
using std::numeric_limits;
#include <cmath>
#include <complex>
#include "pwiz/utility/math/round.hpp"
using std::abs;
using std::min;
using std::max;
using std::complex;
using std::swap;
using std::copy;
using std::locale;
#include <memory>
#include <boost/smart_ptr.hpp>
using std::auto_ptr;
using boost::shared_ptr;
using boost::weak_ptr;
using boost::scoped_ptr;
using std::exception;
using std::runtime_error;
using std::out_of_range;
using std::domain_error;
using std::invalid_argument;
using std::length_error;
using std::logic_error;
using std::overflow_error;
using std::range_error;
using std::underflow_error;
#endif // _STD_HPP_
|
{"hexsha": "c8680129b10c9780deede757811072a94451c50f", "size": 1840, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "cpp/src/pwiz/utility/misc/Std.hpp", "max_stars_repo_name": "toppic-suite/topmsv", "max_stars_repo_head_hexsha": "fef5d1f1f1c00ffdad2c258401d319f1e227c7cb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cpp/src/pwiz/utility/misc/Std.hpp", "max_issues_repo_name": "toppic-suite/topmsv", "max_issues_repo_head_hexsha": "fef5d1f1f1c00ffdad2c258401d319f1e227c7cb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8.0, "max_issues_repo_issues_event_min_datetime": "2021-02-19T18:42:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T17:37:42.000Z", "max_forks_repo_path": "cpp/src/pwiz/utility/misc/Std.hpp", "max_forks_repo_name": "toppic-suite/topmsv", "max_forks_repo_head_hexsha": "fef5d1f1f1c00ffdad2c258401d319f1e227c7cb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-04-19T11:55:08.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-19T11:55:08.000Z", "avg_line_length": 26.6666666667, "max_line_length": 85, "alphanum_fraction": 0.7565217391, "num_tokens": 444}
|
[STATEMENT]
lemma nxtActive_lactive:
assumes "\<exists>i\<ge>n. \<parallel>c\<parallel>\<^bsub>t i\<^esub>"
and "\<not> (\<exists>i>\<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>. \<parallel>c\<parallel>\<^bsub>t i\<^esub>)"
shows "\<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>=\<langle>c \<and> t\<rangle>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub> = \<langle>c \<and> t\<rangle>
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub> = \<langle>c \<and> t\<rangle>
[PROOF STEP]
from assms(1)
[PROOF STATE]
proof (chain)
picking this:
\<exists>i\<ge>n. \<parallel>c\<parallel>\<^bsub>t i\<^esub>
[PROOF STEP]
have "\<parallel>c\<parallel>\<^bsub>t \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>\<^esub>"
[PROOF STATE]
proof (prove)
using this:
\<exists>i\<ge>n. \<parallel>c\<parallel>\<^bsub>t i\<^esub>
goal (1 subgoal):
1. \<parallel>c\<parallel>\<^bsub>t \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>\<^esub>
[PROOF STEP]
using nxtActI
[PROOF STATE]
proof (prove)
using this:
\<exists>i\<ge>n. \<parallel>c\<parallel>\<^bsub>t i\<^esub>
\<exists>i\<ge>?n. \<parallel>?c\<parallel>\<^bsub>?t i\<^esub> \<Longrightarrow> ?n \<le> \<langle>?c \<rightarrow> ?t\<rangle>\<^bsub>?n\<^esub> \<and> \<parallel>?c\<parallel>\<^bsub>?t \<langle>?c \<rightarrow> ?t\<rangle>\<^bsub>?n\<^esub>\<^esub> \<and> \<not> (\<exists>k\<ge>?n. latestAct_cond ?c ?t \<langle>?c \<rightarrow> ?t\<rangle>\<^bsub>?n\<^esub> k)
goal (1 subgoal):
1. \<parallel>c\<parallel>\<^bsub>t \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>\<^esub>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<parallel>c\<parallel>\<^bsub>t \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>\<^esub>
goal (1 subgoal):
1. \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub> = \<langle>c \<and> t\<rangle>
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<parallel>c\<parallel>\<^bsub>t \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>\<^esub>
goal (1 subgoal):
1. \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub> = \<langle>c \<and> t\<rangle>
[PROOF STEP]
from assms
[PROOF STATE]
proof (chain)
picking this:
\<exists>i\<ge>n. \<parallel>c\<parallel>\<^bsub>t i\<^esub>
\<not> (\<exists>i>\<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>. \<parallel>c\<parallel>\<^bsub>t i\<^esub>)
[PROOF STEP]
have "\<not> (\<exists>i'\<ge>Suc \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>. \<parallel>c\<parallel>\<^bsub>t i'\<^esub>)"
[PROOF STATE]
proof (prove)
using this:
\<exists>i\<ge>n. \<parallel>c\<parallel>\<^bsub>t i\<^esub>
\<not> (\<exists>i>\<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>. \<parallel>c\<parallel>\<^bsub>t i\<^esub>)
goal (1 subgoal):
1. \<not> (\<exists>i'\<ge>Suc \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>. \<parallel>c\<parallel>\<^bsub>t i'\<^esub>)
[PROOF STEP]
using nxtActive_no_active
[PROOF STATE]
proof (prove)
using this:
\<exists>i\<ge>n. \<parallel>c\<parallel>\<^bsub>t i\<^esub>
\<not> (\<exists>i>\<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>. \<parallel>c\<parallel>\<^bsub>t i\<^esub>)
\<exists>!i. ?n \<le> i \<and> \<parallel>?c\<parallel>\<^bsub>?t i\<^esub> \<Longrightarrow> \<not> (\<exists>i'\<ge>Suc \<langle>?c \<rightarrow> ?t\<rangle>\<^bsub>?n\<^esub>. \<parallel>?c\<parallel>\<^bsub>?t i'\<^esub>)
goal (1 subgoal):
1. \<not> (\<exists>i'\<ge>Suc \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>. \<parallel>c\<parallel>\<^bsub>t i'\<^esub>)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<not> (\<exists>i'\<ge>Suc \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>. \<parallel>c\<parallel>\<^bsub>t i'\<^esub>)
goal (1 subgoal):
1. \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub> = \<langle>c \<and> t\<rangle>
[PROOF STEP]
hence "(\<And>x. \<parallel>c\<parallel>\<^bsub>t x\<^esub> \<Longrightarrow> x \<le> \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>)"
[PROOF STATE]
proof (prove)
using this:
\<not> (\<exists>i'\<ge>Suc \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>. \<parallel>c\<parallel>\<^bsub>t i'\<^esub>)
goal (1 subgoal):
1. \<And>x. \<parallel>c\<parallel>\<^bsub>t x\<^esub> \<Longrightarrow> x \<le> \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>
[PROOF STEP]
using not_less_eq_eq
[PROOF STATE]
proof (prove)
using this:
\<not> (\<exists>i'\<ge>Suc \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>. \<parallel>c\<parallel>\<^bsub>t i'\<^esub>)
(\<not> ?m \<le> ?n) = (Suc ?n \<le> ?m)
goal (1 subgoal):
1. \<And>x. \<parallel>c\<parallel>\<^bsub>t x\<^esub> \<Longrightarrow> x \<le> \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<parallel>c\<parallel>\<^bsub>t ?x\<^esub> \<Longrightarrow> ?x \<le> \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>
goal (1 subgoal):
1. \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub> = \<langle>c \<and> t\<rangle>
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
\<parallel>c\<parallel>\<^bsub>t \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>\<^esub>
\<parallel>c\<parallel>\<^bsub>t ?x\<^esub> \<Longrightarrow> ?x \<le> \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<parallel>c\<parallel>\<^bsub>t \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>\<^esub>
\<parallel>c\<parallel>\<^bsub>t ?x\<^esub> \<Longrightarrow> ?x \<le> \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>
goal (1 subgoal):
1. \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub> = \<langle>c \<and> t\<rangle>
[PROOF STEP]
using \<open>\<not> (\<exists>i'\<ge>Suc \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>. \<parallel>c\<parallel>\<^bsub>t i'\<^esub>)\<close> lActive_equality
[PROOF STATE]
proof (prove)
using this:
\<parallel>c\<parallel>\<^bsub>t \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>\<^esub>
\<parallel>c\<parallel>\<^bsub>t ?x\<^esub> \<Longrightarrow> ?x \<le> \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>
\<not> (\<exists>i'\<ge>Suc \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub>. \<parallel>c\<parallel>\<^bsub>t i'\<^esub>)
\<lbrakk>\<parallel>?c\<parallel>\<^bsub>?t ?i\<^esub>; \<And>x. \<parallel>?c\<parallel>\<^bsub>?t x\<^esub> \<Longrightarrow> x \<le> ?i\<rbrakk> \<Longrightarrow> \<langle>?c \<and> ?t\<rangle> = ?i
goal (1 subgoal):
1. \<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub> = \<langle>c \<and> t\<rangle>
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<langle>c \<rightarrow> t\<rangle>\<^bsub>n\<^esub> = \<langle>c \<and> t\<rangle>
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 2710, "file": "DynamicArchitectures_Configuration_Traces", "length": 18}
|
using Replace
using Test
using MacroTools
module Sine
sine(x) = sin(x)
end
@testset "Replace" begin
@testset "Basic" begin
@test 1.0 == @replace sin cos sin(0.0)
@test 0.0 == @replace cos sin cos(0.0)
# make sure that we haven't clobbered the definition of sin and cos
@assert cos(0.0) == 1.0 && sin(0.0) == 0.0
replacement() = pi
@test pi + exp(0) == @replace rand replacement rand() + exp(0)
x, y = rand(2)
mapping = Dict(cos=>sin, tan=>exp)
f(x, y) = sin(x) + exp(y)
g(x, y) = cos(x) + tan(y)
@test f(x, y) == @replace mapping g(x, y)
end
@testset "In a function" begin
function foo()
x = @replace cos sin cos(0.0)
end
@test foo() == 0.0
function bar()
x = @replace Dict(tan=>exp) tan(1.0)
end
@test bar() == exp(1.0)
end
@testset "Function declared outside another" begin
foo(x) = cos(x)
function bar(x)
return @replace cos sin foo(x)
end
@test bar(0.0) == 0.0
function baz(x)
return @replace Dict(cos=>sin) foo(x)
end
@test baz(0.0) == 0.0
end
@testset "Inside a module" begin
@test 1.0 == @replace sin cos Sine.sine(0.0)
end
@testset "How Cassette is supposed to work" begin
replacement() = true
original() = false
@eval Cassette.@context Ctx
Cassette.overdub(::Ctx, fn::typeof(original), args...) = replacement(args...)
function functionbarrier()
x = Cassette.overdub(Ctx(), original)
return x
end
@test functionbarrier()
end
@testset "Some things inside a function" begin
replacement() = true
original() = false
function functionbarrier()
x = @replace Dict(original=>replacement) original()
return x
end
@test_broken functionbarrier()
@test functionbarrier()
end
@testset "Everything inside a function" begin
function functionbarrier()
replacement() = true
original() = false
x = @replace Dict(original=>replacement) original()
return x
end
@test_broken functionbarrier()
@test functionbarrier()
end
end
|
{"hexsha": "919f624e20417f1aacb35baefb48315eefc6cc92", "size": 1960, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "ScottishCovidResponse/Replace.jl", "max_stars_repo_head_hexsha": "610c1df9007ee0b1328acf3cd4500b63d4478a93", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "ScottishCovidResponse/Replace.jl", "max_issues_repo_head_hexsha": "610c1df9007ee0b1328acf3cd4500b63d4478a93", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-09-01T14:28:48.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-04T13:33:08.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "jwscook/Replace.jl", "max_forks_repo_head_hexsha": "610c1df9007ee0b1328acf3cd4500b63d4478a93", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.5384615385, "max_line_length": 79, "alphanum_fraction": 0.6494897959, "num_tokens": 634}
|
from sklearn.tree import DecisionTreeClassifier
from sklearn import preprocessing
import numpy as np
le = preprocessing.LabelEncoder()
clf = DecisionTreeClassifier()
training = np.array([
[3, "yes", 62, "accept"],
[4, "yes", 70, "accept"],
[2, "yes", 71, "reject"],
[5, "yes", 58, "reject"],
[1, "no", 76, "reject"],
[6, "no", 64, "reject"],
[2, "yes", 74, "reject"],
[3, "yes", 75, "accept"],
[4, "yes", 67, "accept"],
[2, "no", 73, "reject"],
])
training[:,1] = le.fit_transform(training[:,1])
X = training[:,:-1]
y = training[:,-1]
clf.fit(X,y)
# test data
test = np.array([
[3, "yes", 63],
[1, "no", 59],
])
test[:,1] = le.transform(test[:,1])
print(clf.predict(test))
|
{"hexsha": "6ff00d76af086eae544bfe2f1f7d72eba520be05", "size": 727, "ext": "py", "lang": "Python", "max_stars_repo_path": "script.py", "max_stars_repo_name": "ashwinvaidya17/DecisionTree-ScikitLearn", "max_stars_repo_head_hexsha": "a1e9e382a6b3ed96352a96bbe600420139923dc0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "script.py", "max_issues_repo_name": "ashwinvaidya17/DecisionTree-ScikitLearn", "max_issues_repo_head_hexsha": "a1e9e382a6b3ed96352a96bbe600420139923dc0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "script.py", "max_forks_repo_name": "ashwinvaidya17/DecisionTree-ScikitLearn", "max_forks_repo_head_hexsha": "a1e9e382a6b3ed96352a96bbe600420139923dc0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.641025641, "max_line_length": 48, "alphanum_fraction": 0.5543328748, "include": true, "reason": "import numpy", "num_tokens": 246}
|
import argparse
import sys
sys.path.append('/home/lyan/Documents/tf-pose-estimation/tf_pose/')
sys.path.append('/home/lyan/Documents/tf-pose-estimation/')
import json
import numpy as np
import cv2
from tf_pose.estimator import TfPoseEstimator
parser = argparse.ArgumentParser(description='inference speed tester')
parser.add_argument('--graph',
default=None,
type=str, required=True)
parser.add_argument('--img-size', type=int, default=224, required=False)
parser.add_argument('--video', type=str,
default="/var/ssd_1t/ptai/data/videos/squat/good/IMG_6110.MOV.mp4",
required=False)
args = parser.parse_args()
model = TfPoseEstimator(args.graph, (args.img_size, args.img_size))
# fixme, add other videos
with open('alpha_landmarks/AlphaPose_video_2019-08-23_14-22-33.json') as f:
ground_truth_keypoints = json.load(f)
cap = cv2.VideoCapture('video_2019-08-23_14-22-33.mp4')
sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89]) / 10.0
vars = (sigmas * 2) ** 2
def oks_score(gt, pred, image_shape):
'''
more about object keypoint similarity -
http://presentations.cocodataset.org/COCO17-Keypoints-Overview.pdf
:param gt:
:param pred:
:param image_shape
:return:
'''
result = 0
for i in range(18):
dx = pred[i * 3] / image_shape[1] - gt[i * 3] / image_shape[1]
dy = pred[i * 3 + 1] / image_shape[0] - gt[i * 3 + 1] / image_shape[0]
e = (dx ** 2 + dy ** 2) / vars / 1 / 2
if gt[i * 3] > 0:
result += np.exp(-e.mean())
return result / 18.0
def get_keypoints(humans, img_shape):
'''
transforms humans detection from tf-pose to list of keypoints
takes by default only 0th human
:param humans: list of human poses detected
:param img_shape: tuple of size 3 - dimensions
:return:
'''
assert len(humans) == 18, 'Should be 18 keypoints in detection'
if humans is None or len(humans) == 0:
return [0] * 18 * 3 # 0 is x, 1 is y, 2 is visibility
kp_det = []
for i in range(18):
if i in humans[0].body_parts:
kp_det.append(humans[0].body_parts[i].x * img_shape[1])
kp_det.append(humans[0].body_parts[i].y * img_shape[0])
kp_det.append(0)
else:
kp_det.append(0)
kp_det.append(0)
kp_det.append(0)
return kp_det
def detect_poses(video_capture, det_model):
result = []
img_shape = None
while True:
ret, image = video_capture.read()
if img_shape is None:
img_shape = image.shape
if image is None:
break
humans, _, _ = det_model.inference(image)
result.append(humans)
return result, img_shape
detected_poses, img_shape = detect_poses(cap, model)
n = len(detected_poses)
ground_truth_keypoints = {int(k.split('.')[0]): ground_truth_keypoints[k] for k in ground_truth_keypoints}
metrics = []
for k in ground_truth_keypoints:
kp2 = ground_truth_keypoints[k]['bodies'][0]['joints']
m = oks_score(kp2, get_keypoints(detected_poses[k], img_shape), img_shape)
metrics.append(m)
def find_non_empty_detections(result):
ids = []
for i in range(len(result)):
if len(result[i]) > 0:
ids.append(i)
return set(ids)
non_empty_ids = find_non_empty_detections(detected_poses)
print('ground truth labels - outputs of alpha pose')
print('detection intersection', len(non_empty_ids & ground_truth_keypoints.keys()) / float(n))
print('mean object keypoints similarity (oks)', np.array(metrics).mean())
|
{"hexsha": "56906dd47cf67f76a12d01932ea22d3c56c564e9", "size": 3685, "ext": "py", "lang": "Python", "max_stars_repo_path": "utilities/evaluate_scores.py", "max_stars_repo_name": "dodler/tf-pose-estimation", "max_stars_repo_head_hexsha": "539d4a1d351ca32d67c1418f5a796a1e69e9075b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utilities/evaluate_scores.py", "max_issues_repo_name": "dodler/tf-pose-estimation", "max_issues_repo_head_hexsha": "539d4a1d351ca32d67c1418f5a796a1e69e9075b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utilities/evaluate_scores.py", "max_forks_repo_name": "dodler/tf-pose-estimation", "max_forks_repo_head_hexsha": "539d4a1d351ca32d67c1418f5a796a1e69e9075b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-10-05T17:03:31.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-05T17:03:31.000Z", "avg_line_length": 29.48, "max_line_length": 113, "alphanum_fraction": 0.6379918589, "include": true, "reason": "import numpy", "num_tokens": 1043}
|
import numpy
import chainer
from chainer import Variable
import chainer.functions as F
def func_y(w, x, dim):
pred_y = sum([w[d] * (x ** d) for d in range(dim + 1)])
return pred_y.reshape(pred_y.shape[0])
def func_J(y, pred_y):
return 0.5 * F.sqrt(F.mean_squared_error(y, pred_y))
class LSM():
"""
chainerのモデル風に使えるモデル.
"""
def __init__(self, *, dimension=2, learning_rate=0.1, define_by_run=False):
self.dimension = dimension
self.learning_rate = learning_rate
self.define_by_run = define_by_run
if self.define_by_run:
self.w = numpy.random.randn(self.dimension + 1)
self.w = self.w.astype(numpy.float32)
self.w = Variable(self.w.reshape(self.dimension + 1))
self.w.cleargrad()
if self.w.grad is None:
self.grads = numpy.zeros([self.dimension + 1])
else:
self.grads = self.w.grad.reshape(self.dimension + 1)
else:
self.w = numpy.random.randn(self.dimension + 1)
self.grads = numpy.zeros([self.dimension + 1])
def __call__(self, *args):
# パラメータが多すぎたらエラー
if (len(args) > 2):
print("Please check parameter.")
elif (len(args) > 0):
# ただのスコア計算なら
self.x = numpy.array(args[0])
self.x = self.x.astype(numpy.float32)
self.data = self.__score__()
if self.define_by_run:
pred_y = self.data
self.data = self.data.data.reshape(self.data.data.shape[0])
# 学習するなら
if (len(args) > 1):
self.y = numpy.array(args[1])
self.y = self.y.astype(numpy.float32)
if self.define_by_run:
self.J = func_J(Variable(self.y), pred_y)
else:
self.error = (self.y - self.data)
return self
def __score__(self):
"""
データ点を入れたときのyの推定値.
"""
if self.define_by_run:
scores = func_y(self.w, Variable(self.x), self.dimension)
else:
self.X = numpy.array([(x ** numpy.ones([self.dimension + 1]))
for x in self.x])
self.X = self.X ** numpy.arange(self.dimension + 1)
scores = numpy.dot(self.X, self.w)
return scores
def zerograds(self):
attr_self = [i for i in dir(self) if "__" not in i]
if "x" in attr_self:
del self.x
if "y" in attr_self:
del self.y
if "X" in attr_self:
del self.X
if "data" in attr_self:
del self.data
if "error" in attr_self:
del self.error
if self.define_by_run:
self.w.cleargrad()
if self.w.grad is None:
self.grads = numpy.zeros([self.dimension + 1])
else:
self.grads = self.w.grad.reshape(self.dimension + 1)
else:
self.grads = numpy.zeros([self.dimension + 1])
def backward(self):
if self.define_by_run:
self.J.backward(retain_grad=True)
self.grads = - self.w.grad.reshape(self.dimension + 1)
else:
self.grads = numpy.dot(self.error, self.X)
|
{"hexsha": "618afdfbb1cdaa88fc9a5291ba86d5dccc9f7c8b", "size": 3299, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python/likely_chainer/models.py", "max_stars_repo_name": "Atsuto0519/LSMuseSGD", "max_stars_repo_head_hexsha": "ec40572b59f9a4ac2d85e5a690d590434e36fa7c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-04-22T23:54:22.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-22T23:54:22.000Z", "max_issues_repo_path": "Python/likely_chainer/models.py", "max_issues_repo_name": "Atsuto0519/LSMuseSGD", "max_issues_repo_head_hexsha": "ec40572b59f9a4ac2d85e5a690d590434e36fa7c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python/likely_chainer/models.py", "max_forks_repo_name": "Atsuto0519/LSMuseSGD", "max_forks_repo_head_hexsha": "ec40572b59f9a4ac2d85e5a690d590434e36fa7c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7211538462, "max_line_length": 79, "alphanum_fraction": 0.5292512883, "include": true, "reason": "import numpy", "num_tokens": 829}
|
[STATEMENT]
lemma protocol_inverse:
assumes "m0 \<in> carrier \<G>" "m1 \<in> carrier \<G>"
shows" ((\<^bold>g [^] ((a*b) mod (order \<G>))) [^] (s1 :: nat)) \<otimes> ((\<^bold>g [^] b) [^] (r1::nat)) \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b)
= (if v then m0 else m1)"
(is "?lhs = ?rhs")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<^bold>g [^] (a * b mod order \<G>)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b) = (if v then m0 else m1)
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<^bold>g [^] (a * b mod order \<G>)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b) = (if v then m0 else m1)
[PROOF STEP]
have 1: "(a*b)*(s1) + b*r1 =((a::nat)*(s1) + r1)*b "
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a * b * s1 + b * r1 = (a * s1 + r1) * b
[PROOF STEP]
using mult.commute mult.assoc add_mult_distrib
[PROOF STATE]
proof (prove)
using this:
?a * ?b = ?b * ?a
?a * ?b * ?c = ?a * (?b * ?c)
(?m + ?n) * ?k = ?m * ?k + ?n * ?k
goal (1 subgoal):
1. a * b * s1 + b * r1 = (a * s1 + r1) * b
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
a * b * s1 + b * r1 = (a * s1 + r1) * b
goal (1 subgoal):
1. (\<^bold>g [^] (a * b mod order \<G>)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b) = (if v then m0 else m1)
[PROOF STEP]
have "?lhs =
((\<^bold>g [^] (a*b)) [^] s1) \<otimes> ((\<^bold>g [^] b) [^] r1) \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<^bold>g [^] (a * b mod order \<G>)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b) = (\<^bold>g [^] (a * b)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b)
[PROOF STEP]
by(simp add: pow_generator_mod)
[PROOF STATE]
proof (state)
this:
(\<^bold>g [^] (a * b mod order \<G>)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b) = (\<^bold>g [^] (a * b)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b)
goal (1 subgoal):
1. (\<^bold>g [^] (a * b mod order \<G>)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b) = (if v then m0 else m1)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(\<^bold>g [^] (a * b mod order \<G>)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b) = (\<^bold>g [^] (a * b)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b)
goal (1 subgoal):
1. (\<^bold>g [^] (a * b mod order \<G>)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b) = (if v then m0 else m1)
[PROOF STEP]
have "... = (\<^bold>g [^] ((a*b)*(s1))) \<otimes> ((\<^bold>g [^] (b*r1))) \<otimes> ((if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] ((a*(s1) + r1)*b)))))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<^bold>g [^] (a * b)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b) = \<^bold>g [^] (a * b * s1) \<otimes> \<^bold>g [^] (b * r1) \<otimes> ((if v then m0 else m1) \<otimes> inv (\<^bold>g [^] ((a * s1 + r1) * b)))
[PROOF STEP]
by(auto simp add: nat_pow_pow nat_pow_mult assms cyclic_group_assoc)
[PROOF STATE]
proof (state)
this:
(\<^bold>g [^] (a * b)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b) = \<^bold>g [^] (a * b * s1) \<otimes> \<^bold>g [^] (b * r1) \<otimes> ((if v then m0 else m1) \<otimes> inv (\<^bold>g [^] ((a * s1 + r1) * b)))
goal (1 subgoal):
1. (\<^bold>g [^] (a * b mod order \<G>)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b) = (if v then m0 else m1)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(\<^bold>g [^] (a * b)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b) = \<^bold>g [^] (a * b * s1) \<otimes> \<^bold>g [^] (b * r1) \<otimes> ((if v then m0 else m1) \<otimes> inv (\<^bold>g [^] ((a * s1 + r1) * b)))
goal (1 subgoal):
1. (\<^bold>g [^] (a * b mod order \<G>)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b) = (if v then m0 else m1)
[PROOF STEP]
have "... = \<^bold>g [^] ((a*b)*(s1)) \<otimes> \<^bold>g [^] (b*r1) \<otimes> ((inv (((\<^bold>g [^] ((a*(s1) + r1)*b))))) \<otimes> (if v then m0 else m1))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<^bold>g [^] (a * b * s1) \<otimes> \<^bold>g [^] (b * r1) \<otimes> ((if v then m0 else m1) \<otimes> inv (\<^bold>g [^] ((a * s1 + r1) * b))) = \<^bold>g [^] (a * b * s1) \<otimes> \<^bold>g [^] (b * r1) \<otimes> (inv (\<^bold>g [^] ((a * s1 + r1) * b)) \<otimes> (if v then m0 else m1))
[PROOF STEP]
by(simp add: nat_pow_mult cyclic_group_commute assms)
[PROOF STATE]
proof (state)
this:
\<^bold>g [^] (a * b * s1) \<otimes> \<^bold>g [^] (b * r1) \<otimes> ((if v then m0 else m1) \<otimes> inv (\<^bold>g [^] ((a * s1 + r1) * b))) = \<^bold>g [^] (a * b * s1) \<otimes> \<^bold>g [^] (b * r1) \<otimes> (inv (\<^bold>g [^] ((a * s1 + r1) * b)) \<otimes> (if v then m0 else m1))
goal (1 subgoal):
1. (\<^bold>g [^] (a * b mod order \<G>)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b) = (if v then m0 else m1)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
\<^bold>g [^] (a * b * s1) \<otimes> \<^bold>g [^] (b * r1) \<otimes> ((if v then m0 else m1) \<otimes> inv (\<^bold>g [^] ((a * s1 + r1) * b))) = \<^bold>g [^] (a * b * s1) \<otimes> \<^bold>g [^] (b * r1) \<otimes> (inv (\<^bold>g [^] ((a * s1 + r1) * b)) \<otimes> (if v then m0 else m1))
goal (1 subgoal):
1. (\<^bold>g [^] (a * b mod order \<G>)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b) = (if v then m0 else m1)
[PROOF STEP]
have "... = (\<^bold>g [^] ((a*b)*(s1) + b*r1) \<otimes> inv (((\<^bold>g [^] ((a*(s1) + r1)*b))))) \<otimes> (if v then m0 else m1)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<^bold>g [^] (a * b * s1) \<otimes> \<^bold>g [^] (b * r1) \<otimes> (inv (\<^bold>g [^] ((a * s1 + r1) * b)) \<otimes> (if v then m0 else m1)) = \<^bold>g [^] (a * b * s1 + b * r1) \<otimes> inv (\<^bold>g [^] ((a * s1 + r1) * b)) \<otimes> (if v then m0 else m1)
[PROOF STEP]
by(simp add: nat_pow_mult cyclic_group_assoc assms)
[PROOF STATE]
proof (state)
this:
\<^bold>g [^] (a * b * s1) \<otimes> \<^bold>g [^] (b * r1) \<otimes> (inv (\<^bold>g [^] ((a * s1 + r1) * b)) \<otimes> (if v then m0 else m1)) = \<^bold>g [^] (a * b * s1 + b * r1) \<otimes> inv (\<^bold>g [^] ((a * s1 + r1) * b)) \<otimes> (if v then m0 else m1)
goal (1 subgoal):
1. (\<^bold>g [^] (a * b mod order \<G>)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b) = (if v then m0 else m1)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
\<^bold>g [^] (a * b * s1) \<otimes> \<^bold>g [^] (b * r1) \<otimes> (inv (\<^bold>g [^] ((a * s1 + r1) * b)) \<otimes> (if v then m0 else m1)) = \<^bold>g [^] (a * b * s1 + b * r1) \<otimes> inv (\<^bold>g [^] ((a * s1 + r1) * b)) \<otimes> (if v then m0 else m1)
goal (1 subgoal):
1. (\<^bold>g [^] (a * b mod order \<G>)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b) = (if v then m0 else m1)
[PROOF STEP]
have "... = (\<^bold>g [^] ((a*b)*(s1) + b*r1) \<otimes> inv (((\<^bold>g [^] (((a*b)*(s1) + r1*b)))))) \<otimes> (if v then m0 else m1)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<^bold>g [^] (a * b * s1 + b * r1) \<otimes> inv (\<^bold>g [^] ((a * s1 + r1) * b)) \<otimes> (if v then m0 else m1) = \<^bold>g [^] (a * b * s1 + b * r1) \<otimes> inv (\<^bold>g [^] (a * b * s1 + r1 * b)) \<otimes> (if v then m0 else m1)
[PROOF STEP]
using 1
[PROOF STATE]
proof (prove)
using this:
a * b * s1 + b * r1 = (a * s1 + r1) * b
goal (1 subgoal):
1. \<^bold>g [^] (a * b * s1 + b * r1) \<otimes> inv (\<^bold>g [^] ((a * s1 + r1) * b)) \<otimes> (if v then m0 else m1) = \<^bold>g [^] (a * b * s1 + b * r1) \<otimes> inv (\<^bold>g [^] (a * b * s1 + r1 * b)) \<otimes> (if v then m0 else m1)
[PROOF STEP]
by (simp add: mult.commute)
[PROOF STATE]
proof (state)
this:
\<^bold>g [^] (a * b * s1 + b * r1) \<otimes> inv (\<^bold>g [^] ((a * s1 + r1) * b)) \<otimes> (if v then m0 else m1) = \<^bold>g [^] (a * b * s1 + b * r1) \<otimes> inv (\<^bold>g [^] (a * b * s1 + r1 * b)) \<otimes> (if v then m0 else m1)
goal (1 subgoal):
1. (\<^bold>g [^] (a * b mod order \<G>)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b) = (if v then m0 else m1)
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
(\<^bold>g [^] (a * b mod order \<G>)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b) = \<^bold>g [^] (a * b * s1 + b * r1) \<otimes> inv (\<^bold>g [^] ((a * s1 + r1) * b)) \<otimes> (if v then m0 else m1)
\<^bold>g [^] (a * b * s1 + b * r1) \<otimes> inv (\<^bold>g [^] ((a * s1 + r1) * b)) \<otimes> (if v then m0 else m1) = \<^bold>g [^] (a * b * s1 + b * r1) \<otimes> inv (\<^bold>g [^] (a * b * s1 + r1 * b)) \<otimes> (if v then m0 else m1)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
(\<^bold>g [^] (a * b mod order \<G>)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b) = \<^bold>g [^] (a * b * s1 + b * r1) \<otimes> inv (\<^bold>g [^] ((a * s1 + r1) * b)) \<otimes> (if v then m0 else m1)
\<^bold>g [^] (a * b * s1 + b * r1) \<otimes> inv (\<^bold>g [^] ((a * s1 + r1) * b)) \<otimes> (if v then m0 else m1) = \<^bold>g [^] (a * b * s1 + b * r1) \<otimes> inv (\<^bold>g [^] (a * b * s1 + r1 * b)) \<otimes> (if v then m0 else m1)
goal (1 subgoal):
1. (\<^bold>g [^] (a * b mod order \<G>)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b) = (if v then m0 else m1)
[PROOF STEP]
using l_cancel_inv assms
[PROOF STATE]
proof (prove)
using this:
(\<^bold>g [^] (a * b mod order \<G>)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b) = \<^bold>g [^] (a * b * s1 + b * r1) \<otimes> inv (\<^bold>g [^] ((a * s1 + r1) * b)) \<otimes> (if v then m0 else m1)
\<^bold>g [^] (a * b * s1 + b * r1) \<otimes> inv (\<^bold>g [^] ((a * s1 + r1) * b)) \<otimes> (if v then m0 else m1) = \<^bold>g [^] (a * b * s1 + b * r1) \<otimes> inv (\<^bold>g [^] (a * b * s1 + r1 * b)) \<otimes> (if v then m0 else m1)
?h \<in> carrier \<G> \<Longrightarrow> \<^bold>g [^] ?a \<otimes> inv (\<^bold>g [^] ?a) \<otimes> ?h = ?h
m0 \<in> carrier \<G>
m1 \<in> carrier \<G>
goal (1 subgoal):
1. (\<^bold>g [^] (a * b mod order \<G>)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b) = (if v then m0 else m1)
[PROOF STEP]
by (simp add: mult.commute)
[PROOF STATE]
proof (state)
this:
(\<^bold>g [^] (a * b mod order \<G>)) [^] s1 \<otimes> (\<^bold>g [^] b) [^] r1 \<otimes> (if v then m0 else m1) \<otimes> inv (((\<^bold>g [^] a) [^] s1 \<otimes> \<^bold>g [^] r1) [^] b) = (if v then m0 else m1)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 6401, "file": "Multi_Party_Computation_Noar_Pinkas_OT", "length": 24}
|
import os
import os.path as osp
from tqdm import tqdm
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from torchnet import meter
from model.resnet_deconv import get_deconv_net
from model.hourglass import PoseNet
from model.loss import My_SmoothL1Loss
from dataloader.nyu_loader import NYU
from util.feature_tool import FeatureModule
from util.eval_tool import EvalUtil
from util.vis_tool import VisualUtil
from config import opt
class Trainer(object):
def __init__(self, config):
torch.cuda.set_device(config.gpu_id)
cudnn.benchmark = True
self.config = config
self.data_dir = osp.join(self.config.data_dir, self.config.dataset)
# output dirs for model, log and result figure saving
self.work_dir = osp.join(self.config.output_dir, self.config.dataset, 'checkpoint')
self.result_dir = osp.join(self.config.output_dir, self.config.dataset, 'results' )
if not osp.exists(self.work_dir):
os.makedirs(self.work_dir)
if not osp.exists(self.result_dir):
os.makedirs(self.result_dir)
if 'resnet' in self.config.net:
net_layer = int(self.config.net.split('_')[1])
self.net = get_deconv_net(net_layer, self.config.jt_num, self.config.downsample)
elif 'hourglass' in self.config.net:
self.stacks = int(self.config.net.split('_')[1])
self.net = PoseNet(self.config.net, self.config.jt_num)
self.net = self.net.cuda()
if self.config.load_model :
print('loading model from %s' % self.config.load_model)
pth = torch.load(self.config.load_model)
self.net.load_state_dict(pth['model'])
print(pth['best_records'])
self.net = self.net.cuda()
if self.config.dataset == 'nyu':
self.testData = NYU(self.data_dir, 'test', img_size=self.config.img_size, cube=self.config.cube)
self.criterion = My_SmoothL1Loss().cuda()
self.FM = FeatureModule()
@torch.no_grad()
def test(self, epoch):
self.testLoader = DataLoader(self.testData, batch_size=self.config.batch_size, shuffle=False, num_workers=self.config.num_workers)
self.net.eval()
eval_tool = EvalUtil(self.testData.img_size, self.testData.paras, self.testData.flip, self.testData.jt_num)
loss_meter = meter.AverageValueMeter()
for ii, (img, jt_xyz_gt, jt_uvd_gt, center_xyz, M, cube) in tqdm(enumerate(self.testLoader)):
input = img.cuda()
loss = 0
self.ft_sz = int(self.config.img_size / self.config.downsample)
jt_uvd_gt = jt_uvd_gt.cuda()
offset_gt = self.FM.joint2offset(jt_uvd_gt, input, self.config.kernel_size, self.ft_sz)
if 'hourglass' in self.config.net:
for stage_idx in range(self.stacks):
offset_pred = self.net(input)[stage_idx]
jt_uvd_pred = self.FM.offset2joint_softmax(offset_pred, input, self.config.kernel_size)
loss_coord = self.config.coord_weight * self.criterion(jt_uvd_pred, jt_uvd_gt)
loss_offset = self.config.dense_weight * self.criterion(offset_pred, offset_gt)
loss += (loss_coord + loss_offset)
else:
offset_pred = self.net(input)
jt_uvd_pred = self.FM.offset2joint_softmax(offset_pred, input, self.config.kernel_size)
loss_coord = self.config.coord_weight * self.criterion(jt_uvd_pred, jt_uvd_gt)
loss_offset = self.config.dense_weight * self.criterion(offset_pred, offset_gt)
loss += (loss_coord + loss_offset)
loss_meter.add(loss.item())
jt_uvd_gt = jt_uvd_gt.detach().cpu().numpy()
jt_xyz_gt = jt_xyz_gt.detach().cpu().numpy()
center_xyz = center_xyz.detach().cpu().numpy()
M = M.detach().numpy()
cube = cube.detach().numpy()
jt_uvd_pred = jt_uvd_pred.detach().cpu().numpy()
for i in range(jt_uvd_pred.shape[0]):
eval_tool.feed(jt_uvd_pred[i],jt_xyz_gt[i],center_xyz[i],M[i],cube[i])
mpe, mid, auc, pck, thresh = eval_tool.get_measures()
print("results: [epoch %d][MPE %.3f][AUC %.3f]" % (epoch, mpe, auc))
if epoch == -1:
eval_tool.plot_pck(osp.join(self.result_dir, 'test_pck_epoch%d.png' % epoch), pck, thresh)
txt_file = osp.join(self.work_dir, 'test_%.3f.txt' % mpe)
jt_uvd = np.array(eval_tool.jt_uvd_pred, dtype = np.float32)
if not txt_file == None:
np.savetxt(txt_file, jt_uvd.reshape([jt_uvd.shape[0], self.config.jt_num * 3]), fmt='%.3f')
return mpe
if __name__=='__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
trainer = Trainer(opt)
trainer.test(-1)
|
{"hexsha": "492ef1964c4ccffbc0b0340ad2f39a5e0548372c", "size": 4941, "ext": "py", "lang": "Python", "max_stars_repo_path": "test.py", "max_stars_repo_name": "Jvictor97/AWR-Adaptive-Weighting-Regression", "max_stars_repo_head_hexsha": "2c29f8ac3d824edfff07465232ffed8e4d837ebf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 90, "max_stars_repo_stars_event_min_datetime": "2020-03-16T15:18:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T10:02:52.000Z", "max_issues_repo_path": "test.py", "max_issues_repo_name": "Jvictor97/AWR-Adaptive-Weighting-Regression", "max_issues_repo_head_hexsha": "2c29f8ac3d824edfff07465232ffed8e4d837ebf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2020-05-01T03:11:44.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-14T13:03:38.000Z", "max_forks_repo_path": "test.py", "max_forks_repo_name": "Jvictor97/AWR-Adaptive-Weighting-Regression", "max_forks_repo_head_hexsha": "2c29f8ac3d824edfff07465232ffed8e4d837ebf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2020-05-21T09:07:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-22T13:00:19.000Z", "avg_line_length": 42.5948275862, "max_line_length": 138, "alphanum_fraction": 0.6385347096, "include": true, "reason": "import numpy", "num_tokens": 1174}
|
function evol(fitness, lb, ub, numParticles, maxiter, verbose)
sr = [(lb[i], ub[i]) for i=1:length(lb)]
fopt = 10000
xopt = []
for i=1:15
println(i)
result = BlackBoxOptim.bboptimize(fitness; SearchRange = sr, NumDimensions = length(lb),
Method = :adaptive_de_rand_1_bin_radiuslimited, MaxSteps = 18000,
TraceMode = :silent)
f_i = BlackBoxOptim.best_fitness(result)
if f_i < fopt
fopt = f_i
println(fopt)
xopt = BlackBoxOptim.best_candidate(result)
end
end
sr = [(xopt[i]-0.05*abs(xopt[i]), xopt[i]+0.05*abs(xopt[i])) for i=1:length(lb)]
sr[end] = (lb[end], lb[end])
result = BlackBoxOptim.bboptimize(fitness; SearchRange = sr, NumDimensions = length(lb),
Method = :adaptive_de_rand_1_bin_radiuslimited, MaxSteps = 30000,
TraceMode = :silent)
fopt = BlackBoxOptim.best_fitness(result)
xopt = BlackBoxOptim.best_candidate(result)
return xopt, fopt
end
|
{"hexsha": "ec8ecca634b48dc9e517df694b4da92e47de7e12", "size": 1050, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/evol.jl", "max_stars_repo_name": "rjvial/LotMassing.jl", "max_stars_repo_head_hexsha": "e69d823bc80720b59b3a13c7609cb9dfea0baff6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/evol.jl", "max_issues_repo_name": "rjvial/LotMassing.jl", "max_issues_repo_head_hexsha": "e69d823bc80720b59b3a13c7609cb9dfea0baff6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/evol.jl", "max_forks_repo_name": "rjvial/LotMassing.jl", "max_forks_repo_head_hexsha": "e69d823bc80720b59b3a13c7609cb9dfea0baff6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8181818182, "max_line_length": 96, "alphanum_fraction": 0.6028571429, "num_tokens": 304}
|
//
// Created by mbodych on 11.05.18.
//
#include <v4r/features/types.h>
#include <boost/algorithm/string.hpp>
namespace v4r {
std::istream &operator>>(std::istream &in, FeatureType &t) {
std::string token;
in >> token;
boost::to_upper(token);
if (token == "FPFH")
t = FeatureType::FPFH;
else if (token == "SHOT")
t = FeatureType::SHOT;
else if (token == "AKAZE")
t = FeatureType::AKAZE;
else if (token == "ALEXNET")
t = FeatureType::ALEXNET;
else if (token == "ESF")
t = FeatureType::ESF;
else if (token == "GLOBAL_COLOR")
t = FeatureType::GLOBAL_COLOR;
else if (token == "OURCVFH")
t = FeatureType::OURCVFH;
else if (token == "ROPS")
t = FeatureType::ROPS;
else if (token == "SHOT_COLOR")
t = FeatureType::SHOT_COLOR;
else if (token == "SIFT_GPU")
t = FeatureType::SIFT_GPU;
else if (token == "SIFT_OPENCV")
t = FeatureType::SIFT_OPENCV;
else if (token == "SIMPLE_SHAPE")
t = FeatureType::SIMPLE_SHAPE;
else if (token == "SURF")
t = FeatureType::SURF;
else
in.setstate(std::ios_base::failbit);
return in;
}
std::ostream &operator<<(std::ostream &out, const FeatureType &t) {
switch (t) {
case FeatureType::FPFH:
out << "FPFH";
break;
case FeatureType::SHOT:
out << "SHOT";
break;
case FeatureType::AKAZE:
out << "AKAZE";
break;
case FeatureType::ALEXNET:
out << "ALEXNET";
break;
case FeatureType::ESF:
out << "ESF";
break;
case FeatureType::GLOBAL_COLOR:
out << "GLOBAL_COLOR";
break;
case FeatureType::OURCVFH:
out << "OURCVFH";
break;
case FeatureType::ROPS:
out << "ROPS";
break;
case FeatureType::SHOT_COLOR:
out << "SHOT_COLOR";
break;
case FeatureType::SIFT_GPU:
out << "SIFT_GPU";
break;
case FeatureType::SIFT_OPENCV:
out << "SIFT_OPENCV";
break;
case FeatureType::SIMPLE_SHAPE:
out << "SIMPLE_SHAPE";
break;
case FeatureType::SURF:
out << "SURF";
break;
default:
out.setstate(std::ios_base::failbit);
}
return out;
}
} // namespace v4r
|
{"hexsha": "8c6a8aaee2e94e4db6c9db048092710cb36f3218", "size": 2169, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "modules/features/src/types.cpp", "max_stars_repo_name": "v4r-tuwien/v4r", "max_stars_repo_head_hexsha": "ff3fbd6d2b298b83268ba4737868bab258262a40", "max_stars_repo_licenses": ["BSD-1-Clause", "BSD-2-Clause"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2021-02-22T11:36:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-20T11:31:08.000Z", "max_issues_repo_path": "modules/features/src/types.cpp", "max_issues_repo_name": "v4r-tuwien/v4r", "max_issues_repo_head_hexsha": "ff3fbd6d2b298b83268ba4737868bab258262a40", "max_issues_repo_licenses": ["BSD-1-Clause", "BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modules/features/src/types.cpp", "max_forks_repo_name": "v4r-tuwien/v4r", "max_forks_repo_head_hexsha": "ff3fbd6d2b298b83268ba4737868bab258262a40", "max_forks_repo_licenses": ["BSD-1-Clause", "BSD-2-Clause"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2018-10-19T10:39:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-07T13:39:03.000Z", "avg_line_length": 23.5760869565, "max_line_length": 67, "alphanum_fraction": 0.5905947441, "num_tokens": 652}
|
import lang
import normalization
import data.set
-- V⟦−⟧ : type → PowerSet(ClosedVal)
-- V⟦−⟧ : type → (ClosedVal → 2)
-- interp_val : type → val → Prop
--
open exp typ
notation e ` ↦* `:90 e' := is_many_step e e'
def irred (e:exp) := ¬(∃ e', e ↦str e')
-- Approach to defining inductive relation inspired from ModuRes.
def T := set exp
instance T_has_mem : has_mem (exp) T := set.has_mem
instance T_has_inter : has_inter T := set.has_inter
def step_closure (s:T) : T :=
{ e:exp | ∀ e', (e ↦* e') ∧ irred(e') → e' ∈ s }
inductive rel_arrow: T → T → T
| Rlam (Rτ1 Rτ2:T) (x:string) (τ:typ) (e:exp)
(Hfunc: ∀ (v:exp), (v ∈ Rτ1) → (substitute x v e) ∈ step_closure Rτ2) :
(rel_arrow Rτ1 Rτ2 (lam x τ e))
def closed_vals := { e : exp | is_val e }
def interp_val : typ → (set exp)
| unitT := (singleton exp.unit)
| (arrowT τ1 τ2) := (rel_arrow (interp_val τ1) (interp_val τ2)) ∩ closed_vals
def interp_exp (τ:typ) := step_closure (interp_val τ)
notation `V⟦` τ `⟧` := interp_val τ
notation `E⟦` τ `⟧` := interp_exp τ
-- XXX: now, we're gonna call context_list Γ, and have to explicitly to
-- (mk_context Γ) in has_type.
def interp_ctx : context_list → set env
| [] [] := true
| ((y,τ)::Γ) ((x,v)::γ) := x=y ∧ interp_ctx Γ γ ∧ (v ∈ V⟦τ⟧)
| _ _ := false
notation `G⟦` Γ `⟧` := interp_ctx Γ
def semantic_has_type (Γ:context_list) (e:exp) (τ:typ) : Prop :=
∀ γ, γ ∈ G⟦Γ⟧ → env_sub γ e ∈ E⟦τ⟧
notation Γ ` ⊨ `:90 e:90 ` : `:90 τ := semantic_has_type Γ e τ
def safe (e:exp) : Prop :=
∀ e', (e ↦* e') → (is_val e') ∨ (∃ e'', e' ↦str e'')
lemma interp_val_implies_closedval :
∀ v τ, v ∈ V⟦τ⟧ → v ∈ closed_vals :=
begin
introv Hval,
cases τ,
{
unfold interp_val at Hval,
unfold singleton at Hval,
simp at Hval, subst Hval,
constructor
},
{
unfold interp_val at Hval,
cases Hval,
unfold closed_vals at Hval_right,
simp at Hval_right, assumption
}
end
lemma semantic_implies_type_safety :
∀ e τ,
([] ⊨ e : τ) →
safe(e) :=
begin
introv HTy,
intros e' Hstep,
by_cases irred(e'), tactic.swap,
{ -- easy case, just take a step
unfold irred at h,
simp at h,
cases h with e'',
right,
existsi e'',
assumption
},
{ -- otherwise, show
unfold semantic_has_type at HTy,
specialize HTy [] _,
{ constructor },
unfold env_sub at HTy,
specialize HTy e' ⟨Hstep, h⟩,
left,
apply interp_val_implies_closedval,
assumption
}
end
def not_in_env (γ:env) (x:string) : Prop := (∀ v, (x, v) ∉ γ)
lemma env_sub_lam_notin :
∀ γ x τ e,
not_in_env γ x →
env_sub γ (lam x τ e) = lam x τ (env_sub γ e) :=
begin
introv Hnot,
induction γ,
{ unfold env_sub },
cases γ_hd,
unfold env_sub,
sorry,
end
lemma env_sub_lam_in :
∀ γ x τ e v,
((x, v) ∈ γ) →
env_sub γ (lam x τ e) = lam x τ e :=
begin
sorry
end
-- This is the "substitution lemma" on page 13 of notes.
lemma substitution_lemma :
∀ γ x vx e,
not_in_env γ x →
env_sub ((x,vx)::γ) e = substitute x vx (env_sub γ e) :=
begin
introv Hnot,
unfold env_sub,
induction γ generalizing e,
{ unfold env_sub },
cases γ_hd,
unfold env_sub,
rw <- γ_ih, tactic.swap,
{
intros v,
specialize Hnot v,
intros H,
apply Hnot,
right, assumption
},
rw substitute_commute,
repeat { sorry }
end
theorem fundamental_property :
∀ Γ e τ,
(mk_context Γ ⊢ e : τ) →
(Γ ⊨ e : τ) :=
begin
introv Hty,
generalize h : (mk_context Γ) = (ctx),
rw h at *,
induction Hty generalizing Γ; subst h; unfold semantic_has_type,
{ -- case: unit
introv Hγ, rw env_sub_unit,
unfold interp_exp step_closure, simp *,
introv Hstep Hirred,
cases Hstep,
{ constructor },
exfalso, cases Hstep_Hstep,
},
{ -- case: var
introv Hγ,
rename [Hty_x → x, Hty_τ → τ],
-- Argument:
-- Knowing (mk_context Γ) x = some τ and γ ∈ G⟦Γ⟧ should tells us:
-- ∃ (x,vx) ∈ γ, vx ∈ V⟦τ⟧,
-- env_sub γ (var x) = vx
-- At that point, we're done because V⟦τ⟧ ⊆ E⟦τ⟧.
sorry,
},
{ -- case: abs; this is where induction will be a bit tricky.
introv Hγ,
rename [Hty_x → x, Hty_τ1 → τ1, Hty_τ2 → τ2, Hty_e → e],
-- FIXME: using this false lemma (could be made true modulo α-substitutions)
-- to mimic the argument in the lecture notes for now.
by_cases (not_in_env γ x), tactic.swap,
{ sorry },
{
rw env_sub_lam_notin, tactic.swap, { tauto },
intros e' Hstep,
cases Hstep with Hstep Hirred,
cases Hstep, tactic.swap, { cases Hstep_Hstep },
unfold interp_val,
split, tactic.swap,
{ unfold closed_vals, constructor },
constructor,
intros v Hv,
-- NOTE: step_closure V⟦τ2⟧ is the same as E⟦τ2⟧.
rw (_:step_closure V⟦τ2⟧ = E⟦τ2⟧), tactic.swap,
{ unfold interp_exp },
-- Apply inductive hypothesis
specialize Hty_ih ((x,τ1)::Γ) _,
{ unfold mk_context },
specialize Hty_ih ((x,v)::γ) _,
{ constructor, { refl },
split; assumption,
},
unfold env_sub at Hty_ih,
rw <- substitution_lemma, tactic.swap, assumption,
apply Hty_ih,
}
},
repeat { sorry }
end
|
{"author": "upamanyus", "repo": "pl-experiments", "sha": "ff4434ae9df0c00f50520eac64b87d5ae42991c1", "save_path": "github-repos/lean/upamanyus-pl-experiments", "path": "github-repos/lean/upamanyus-pl-experiments/pl-experiments-ff4434ae9df0c00f50520eac64b87d5ae42991c1/stlc/src/typesafety.lean"}
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 16 17:03:00 2018
@author: jumtsai
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
'''Import this part for using Tensor Board to visualizing each nodes in CNN.
'''
#DCNN's TensorFlow(GPU) Version
from astropy.io import fits
import os, glob, time
import logging,logging.handlers
import numpy as np
import tensorflow as tf
from manager import GPUManager
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
#--------------------------------Envir Default---------------------------------#
gm = GPUManager()
max_step=150
log_dir='CNNinfo/'
save_dir=log_dir+'restore/'
checkpoint_dir=log_dir+'model/'
num,weight,height=(1,4096,4096)
#--------------------------------Logging Module--------------------------------#
LOG_FILE = log_dir+'train_detail.log'
if os.path.isfile(LOG_FILE) is True:
os.remove(LOG_FILE)
handler = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes = 10*1024*1024, backupCount = 5)
fmt = '%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(message)s'
formatter = logging.Formatter(fmt)
handler.setFormatter(formatter)
logger = logging.getLogger('train_detail')
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
#----------------------------------Function------------------------------------#
def Padding(Input,ker_size):
'''Image Padding Function
'''
xpadpre = int(np.floor(ker_size[0]/2.0))
xpadpost = ker_size[0] - xpadpre
ypadpre = int(np.floor(ker_size[1]/2.0))
ypadpost = ker_size[1] - ypadpre
paddings = [[0,0],[xpadpre,xpadpost],[ypadpre,ypadpost],[0,0]]
padded = tf.pad(Input,paddings,"SYMMETRIC")
return padded
def batch_normal(input,is_train ,is_out=True,decay=0.9999):
with tf.name_scope('BN'):
scale=tf.Variable(tf.ones([input.get_shape()[-1]]))
beta=tf.Variable(tf.zeros([input.get_shape()[-1]]))
pop_mean=tf.Variable(tf.zeros([input.get_shape()[-1]]),trainable=False)
pop_var=tf.Variable(tf.ones([input.get_shape()[-1]]),trainable=False)
if is_train:
if is_out:
batch_mean,batch_var = tf.nn.moments(input,[0,1,2])
else:
batch_mean,batch_var = tf.nn.moments(input,[0])
train_mean = tf.assign(pop_mean,pop_mean*decay+batch_mean*(1-decay))
train_var = tf.assign(pop_var,pop_var*decay+batch_var*(1-decay))
with tf.control_dependencies([train_mean,train_var]):
return tf.nn.batch_normalization(input,batch_mean,batch_var,beta,scale,0.0001)
else:
return tf.nn.batch_normalization(input,pop_mean,pop_var,beta,scale,0.0001)
def Conv_Layer(Input, k_num, k_size, p_size = 2, activity_func = None):
'''Add convolutional layer Function, if tensor is Input, output size would be
smaller than Input size.
'''
with tf.name_scope('Convolutional_Layer'):
padded = Padding(Input, k_size)
raw_image = tf.layers.Input(tensor = padded)
shape=raw_image.get_shape().as_list()
weights = tf.Variable(tf.truncated_normal([k_size[0],k_size[1],int(shape[3]),k_num],stddev=15.0,dtype=tf.float32))
biases = tf.Variable(tf.truncated_normal([k_num],stddev=5.0,dtype=tf.float32))
unBN = tf.add(tf.nn.conv2d(raw_image, weights, strides=[1, 1, 1, 1], padding='VALID'), biases)
Conv = batch_normal(unBN,is_train=True)
if activity_func is not None:
Act =activity_func(Conv,)
down_sample = tf.layers.max_pooling2d(Act, p_size, strides = 1, padding = 'valid')
else:
down_sample = tf.layers.max_pooling2d(Conv, p_size, strides = 1, padding = 'valid')
return down_sample
def Block(feature, num, kernel):
c_in = Conv_Layer(feature, num, [kernel,1], activity_func = tf.nn.relu)
c_out= Conv_Layer(c_in, num, [1,kernel], activity_func = tf.nn.relu)
return c_out -c_in
def mse(r,x):
with gm.auto_choice(mode=0):
return tf.reduce_mean(tf.square(r-x))
def tobatch(array,w,h):
pixelsize=array.shape[0]*array.shape[1]
batch=np.zeros([int(pixelsize/(h*w)),w,h],dtype=np.dtype('>i2'))
k=0
for i in range(int(array.shape[0]/h)):
for j in range(int(array.shape[1]/w)):
batch[k]=array[h*i:h*(i+1),w*j:w*(j+1)]
k+=1
return batch
def toarray(mesh):
n,w,h=mesh.shape
n_w=n_h=int(np.sqrt(n*w*h))
grid=np.sqrt(n)
array=np.zeros([n_w,n_h])
for i in range(n):
m=int(i/grid)
n=int(i%grid)
array[m*w:(m+1)*w,n*h:(n+1)*h]=mesh[i]
return array
#------------------------------------Input-------------------------------------#
with tf.name_scope('Placeholder'):
blur = tf.placeholder(tf.float32,[1 ,4096, 4096, 1], name='Blur')
oril = tf.placeholder(tf.float32,[1 ,4096, 4096, 1], name='Oril')
imgsize = oril.get_shape().as_list()
batch_size = imgsize[0]
tf.summary.image('Blur_input', blur, 10)
tf.summary.image('Oril_input', oril, 10)
#-----------------------------Add Hidden Layers--------------------------------#
with tf.name_scope('Hidden_Layer'): #Using multi GPUs
with gm.auto_choice(mode=0): #Allocating single GPUs
cl1 = Conv_Layer(blur, 16, [63,63], activity_func = tf.nn.relu)
cl2 = Block(cl1-blur, 16, 63)
cl3 = Block(cl2, 32, 31)
cl4 = Block(cl3, 64, 15)
cl5 = Block(cl4,128, 11)
cl6 = Block(cl5, 144, 9)
cl7 = Block(cl6, 192, 7)
cl8 = Block(cl7, 256, 5)
cl9 = Block(cl8, 512, 3)
cl10 = Block(cl9,1024, 1)
dense = tf.layers.dense(cl10, 1)
#dc3 = DeConv_Layer(cl3,12,1,activity_func = tf.nn.relu)
with tf.name_scope('Loss'):
pre = tf.reshape(dense,imgsize)+blur
loss=mse(pre,oril)
tf.summary.image('Output', pre, 10)
tf.summary.scalar('loss', loss)
with tf.name_scope('Train'):
step=tf.Variable(0,trainable=False)
learnrate=tf.train.exponential_decay(5.0, step, 100, 0.96, staircase = True)
train_step = tf.train.AdadeltaOptimizer(learnrate).minimize(loss, global_step = step)
#---------------------------------Read Data------------------------------------#
files=glob.glob('gauss127/*.fits')
files.sort(reverse = True)
trainset=open('train.txt','r')
train=[]
for trainname in trainset:
train.append(trainname.split('\n')[0])
#-----------------------------------Initiate-----------------------------------#
init = tf.global_variables_initializer()
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
config.gpu_options.allocator_type = 'BFC'
with tf.Session(config = config) as sess:
sess.run(init)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(log_dir + 'train', sess.graph)
test_writer = tf.summary.FileWriter(log_dir + 'test', sess.graph)
saver = tf.train.Saver(max_to_keep=128)
sess.graph.finalize()
#----------------------------------Iteration-----------------------------------#
for epoch in range(1,max_step+1):
trainloss = []
testloss = []
s=time.clock()
for fitsfile in files:
name=fitsfile.split('/')[-1]
blurred=fits.open('gauss127/'+name)[0].data
try:
original=fits.open('original/'+name)[0].data
except IOError:
original=np.zeros(blurred.shape)
blurred=tobatch(blurred,weight,height)
original=tobatch(original,weight,height)
blurred=blurred.reshape([num,weight,height,1])
original=original.reshape([num,weight,height,1])
if name in train:
epoch_result =np.zeros([num,weight,height])
for batch in range(0,num):
Input_x=np.zeros([1,weight,height,1],dtype=np.float32)
Input_y=np.zeros([1,weight,height,1],dtype=np.float32)
Input_x[0]=np.float32(blurred[batch])
Input_y[0]=np.float32(original[batch])
_,lvalue,summary,result=sess.run([train_step,loss,merged,pre],
feed_dict={blur:Input_x,oril:Input_y})
train_writer.add_summary(summary, epoch)
saver.save(sess, checkpoint_dir + 'model'+str(batch)+'.ckpt', global_step=batch+1,write_meta_graph=False,write_state=False)
epoch_result[batch]=result.reshape(weight,height)
epoch_result=toarray(epoch_result)
recon=np.int16(epoch_result)
trainloss.append(lvalue)
train_writer.close()
if os.path.isfile(save_dir+'Train_'+name) is True:
os.remove(save_dir+'Train_'+name)
fits.HDUList([fits.PrimaryHDU(recon)]).writeto(save_dir+'Train_'+name)
else:
epoch_result =np.zeros([num,weight,height])
for batch in range(0,num):
Input_x=np.zeros([1,weight,height,1],dtype=np.float32)
Input_y=np.zeros([1,weight,height,1],dtype=np.float32)
Input_x[0]=np.float32(blurred[batch])
Input_y[0]=np.float32(original[batch])
saver.restore(sess, checkpoint_dir +'model'+str(batch)+'.ckpt-'+str(batch+1))
lvalue,summary,result=sess.run([loss,merged,pre],
feed_dict={blur:Input_x,oril:Input_y})
test_writer.add_summary(summary, epoch)
epoch_result[batch]=result.reshape(weight,height)
epoch_result=toarray(epoch_result)
recon=np.int16(epoch_result)
testloss.append(lvalue)
test_writer.close()
if os.path.isfile(save_dir +'Test_'+name) is True:
os.remove(save_dir+'Test_'+name)
fits.HDUList([fits.PrimaryHDU(recon)]).writeto(save_dir+'Test_'+name)
e=time.clock()
print('Epoch %d mean train loss is %e, time is %f.'%(epoch,np.mean(trainloss),(e-s)))
print('Epoch %d mean test loss is %e.'%(epoch,np.mean(testloss)))
logger.info('Epoch %d mean train loss is %e, time is %f'%(epoch,np.mean(trainloss),(e-s)))
logger.info('Epoch %d mean test loss is %e.'%(epoch,np.mean(testloss)))
if os.path.isfile('residual/'+name) is True:
os.remove('residual/'+name)
original=toarray(original.reshape(num,weight,height))
fits.HDUList([fits.PrimaryHDU(original-recon)]).writeto('residual/'+name)
|
{"hexsha": "9570eb931026eab404061bf45a31776ebba279d8", "size": 10959, "ext": "py", "lang": "Python", "max_stars_repo_path": "DCNN.py", "max_stars_repo_name": "caibojun/DeconvolutionalNeuralNetwork", "max_stars_repo_head_hexsha": "7ddf459fcec5b3fb01f2f6f4a074e7a16a9bfca2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "DCNN.py", "max_issues_repo_name": "caibojun/DeconvolutionalNeuralNetwork", "max_issues_repo_head_hexsha": "7ddf459fcec5b3fb01f2f6f4a074e7a16a9bfca2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DCNN.py", "max_forks_repo_name": "caibojun/DeconvolutionalNeuralNetwork", "max_forks_repo_head_hexsha": "7ddf459fcec5b3fb01f2f6f4a074e7a16a9bfca2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.0120481928, "max_line_length": 162, "alphanum_fraction": 0.5764212063, "include": true, "reason": "import numpy,from astropy", "num_tokens": 2715}
|
from __future__ import print_function
import numpy as np
from astropy.table import Table
def split_asts(ast_file, sd_map_file, bin_width=1.):
"""
Split the ASTs into sub-files for each source density bin.
Parameters
----------
ast_file : string
Name of the file that contains the AST results (mag_in and mag_out)
sd_map_file : string
Name of the fits file that contains the source densities
bin_width : float
Width of source density bin in star/arcsec
"""
# read in the AST file
ast_data = Table.read(ast_file)
# read in the source density file
sd_data = Table.read(sd_map_file)
# define SD bins
sd_bins = np.arange(np.floor(np.min(sd_data['value'])),
np.ceil(np.max(sd_data['value']))+1,
bin_width,
dtype=int)
# go through each source density bin
for i in range(len(sd_bins)-1):
print('getting ASTs in SD bin '+str(sd_bins[i])+'-'+str(sd_bins[i+1]) )
# list to hold all of the indices
ast_all_ind = []
# indices for this bin
sd_ind = np.where((sd_data['value'] >= sd_bins[i]) &
(sd_data['value'] < sd_bins[i+1]))[0]
# for each index, grab the ASTs within that RA/Dec box
for j,ind in enumerate(sd_ind):
ast_ind = np.where((ast_data['RA_J2000'] > sd_data['min_ra'][ind]) &
(ast_data['RA_J2000'] < sd_data['max_ra'][ind]) &
(ast_data['DEC_J2000'] > sd_data['min_dec'][ind]) &
(ast_data['DEC_J2000'] < sd_data['max_dec'][ind]) )[0]
# append indices to master list
if len(ast_ind) > 0:
ast_all_ind += ast_ind.tolist()
# make a new table out of the indices
print(' found '+str(len(ast_all_ind))+' ASTs')
if len(ast_all_ind) > 0:
print(' writing new table')
ast_table = ast_data[ast_all_ind]
new_filename = ast_file.replace('.fits','_SD'+str(sd_bins[i])+'-'+str(sd_bins[i+1])+'.fits')
ast_table.write(new_filename, overwrite=True)
|
{"hexsha": "81d9a3c8e22c95da399e92592be16762ebb19a61", "size": 2247, "ext": "py", "lang": "Python", "max_stars_repo_path": "beast/tools/split_asts_by_source_density.py", "max_stars_repo_name": "marthaboyer/beast", "max_stars_repo_head_hexsha": "1ca71fb64ab60827e4e4e1937b64f319a98166c3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "beast/tools/split_asts_by_source_density.py", "max_issues_repo_name": "marthaboyer/beast", "max_issues_repo_head_hexsha": "1ca71fb64ab60827e4e4e1937b64f319a98166c3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "beast/tools/split_asts_by_source_density.py", "max_forks_repo_name": "marthaboyer/beast", "max_forks_repo_head_hexsha": "1ca71fb64ab60827e4e4e1937b64f319a98166c3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1, "max_line_length": 104, "alphanum_fraction": 0.5540720961, "include": true, "reason": "import numpy,from astropy", "num_tokens": 542}
|
#include <bitset>
#include <sstream>
#include <string>
#include <thread>
#include <boost/beast/core.hpp>
#include <boost/beast/websocket.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <math_nerd/hill_cipher.h>
#include "file_handler.h"
namespace beast = boost::beast;
namespace http = beast::http;
namespace websocket = beast::websocket;
namespace net = boost::asio;
namespace mod = math_nerd::int_mod;
namespace matrix = math_nerd::matrix_t;
namespace hc = math_nerd::hill_cipher;
using tcp = net::ip::tcp;
auto operator<<(std::ostream &os, hc::hill_key key)->std::ostream &;
auto do_session(tcp::socket socket) -> void
{
websocket::stream<tcp::socket> ws{ std::move(socket) };
ws.set_option(websocket::stream_base::decorator(
[](websocket::response_type &res)
{
res.set(http::field::server,
"Hill Cipher");
}));
ws.accept();
auto key_size{ 5 };
std::unique_ptr<hc::hill_key> key{ new hc::hill_key{ key_size } };
while( true )
{
beast::flat_buffer input;
ws.read(input);
auto cmd{ beast::buffers_to_string(input.data()) };
std::bitset<3> cmd_type{ 0x0 };
enum command
{
generate_key,
encrypt,
decrypt
};
std::string output;
output.resize(100);
output = "Error.";
switch( cmd[0] )
{
case 'g':
{
if( cmd[1] >= '0' && cmd[1] <= '9' )
{
key_size = std::max(cmd[1] - '0', 2);
cmd_type[command::generate_key] = true;
}
}
break;
case 'e':
{
cmd_type[command::encrypt] = true;
cmd.erase(0, 1);
}
break;
case 'd':
{
cmd_type[command::decrypt] = true;
cmd.erase(0, 1);
}
break;
default:
{
// Empty.
}
}
if( cmd_type[command::generate_key] )
{
if( key_size != key->row_count() )
{
key.reset(new hc::hill_key{ key_size });
}
output = "g" + std::to_string(key_size);
// Create our (not cryptographically secure) PRNG.
std::random_device device;
std::mt19937 rng(device());
// Key distribution.
std::uniform_int_distribution<int> key_dist(0, 96);
// Index distribution for fixing invalid keys.
std::uniform_int_distribution<int> idx_dist(0, key_size);
for( auto i{ 0 }; i < key_size; ++i )
{
for( auto j{ 0 }; j < key_size; ++j )
{
// Create (not cryptographically secure) random elements.
(*key)[i][j] = key_dist(rng);
}
}
while( not hc::is_valid_key((*key)) )
{ // Randomly touch parts of the key to attempt to fix.
(*key)[idx_dist(rng)][idx_dist(rng)] += key_dist(rng);
}
std::stringstream os;
os << (*key);
output += os.str();
}
else if( cmd_type[command::encrypt] )
{
output = "e" + hc::encrypt((*key), cmd);
}
else if( cmd_type[command::decrypt] )
{
output = "d" + hc::decrypt((*key), cmd);
}
beast::multi_buffer buffer;
auto msg = net::buffer_copy(buffer.prepare(output.size()), net::buffer(output));
buffer.commit(msg);
ws.text(ws.got_text());
ws.write(buffer.data());
}
}
auto main() -> int
try
{
auto const address{ net::ip::make_address("127.0.0.1") };
auto const port{ static_cast<std::uint16_t>(31337) };
file_handler file;
net::io_context ioc{ 1 };
tcp::acceptor acceptor{ ioc, {address, port} };
tcp::socket socket{ ioc };
acceptor.accept(socket);
do_session(std::move(socket));
return EXIT_SUCCESS;
}
catch( std::exception const &e )
{
if( std::string(e.what()) == "The WebSocket stream was gracefully closed at both endpoints" )
{
return EXIT_SUCCESS;
}
std::cerr << e.what() << std::endl;
return EXIT_FAILURE;
}
auto operator<<(std::ostream &os, hc::hill_key key) -> std::ostream &
{
// Starting bracket for key.
os << "[";
for( auto i{ 0 }; i < key.row_count(); ++i )
{
for( auto j{ 0 }; j < key.column_count(); ++j )
{
// Print element.
os << key[i][j];
if( j != key.column_count() - 1 )
{
// Delimit with comma.
os << ",";
}
}
if( i != key.row_count() - 1 )
{ // Delimit row with semicolon.
os << ";";
}
}
// Ending bracket for key.
os << "]";
return os;
}
|
{"hexsha": "def46877309b8d2471fae2a6bf473936da9696c5", "size": 5030, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "hill.cpp", "max_stars_repo_name": "JacobSzepsy/Hill-Cipher-Webpage", "max_stars_repo_head_hexsha": "7fb9c7af9fd90af993de992fb0d5f556ecdc6b71", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hill.cpp", "max_issues_repo_name": "JacobSzepsy/Hill-Cipher-Webpage", "max_issues_repo_head_hexsha": "7fb9c7af9fd90af993de992fb0d5f556ecdc6b71", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hill.cpp", "max_forks_repo_name": "JacobSzepsy/Hill-Cipher-Webpage", "max_forks_repo_head_hexsha": "7fb9c7af9fd90af993de992fb0d5f556ecdc6b71", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-06-18T21:27:10.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-18T21:27:10.000Z", "avg_line_length": 23.8388625592, "max_line_length": 97, "alphanum_fraction": 0.4876739563, "num_tokens": 1198}
|
[STATEMENT]
lemma lower_higher_commute: "higher (lower p s) t = lower (higher p t) s"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. higher (lower p s) t = lower (higher p t) s
[PROOF STEP]
by (rule poly_mapping_eqI, simp add: lookup_higher lookup_lower)
|
{"llama_tokens": 103, "file": "Polynomials_MPoly_Type_Class_Ordered", "length": 1}
|
import math
import os
from pathlib import Path
from typing import Dict, Optional, Tuple, Union
import librosa
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# import pandas as pd
import pytorch_lightning as pl
import segmentation_models_pytorch as smp
import torch
import torch.nn.functional as F
import torchvision
import yaml
from omegaconf import DictConfig, OmegaConf
from src.dataset.datamodule import IMG_MEAN, IMG_STD, get_input_size_wo_pad, show_stft
from src.dataset.dataset import WaveformDataset
from src.postprocess.visualize import plot_rec
class LitModel(pl.LightningModule):
def __init__(
self, conf: DictConfig, dataset_len: int = 72899, logger_name="tensorboard"
) -> None:
super().__init__()
self.save_hyperparameters() # type: ignore
# self.hparams = conf # type: ignore[misc]
self.conf = conf
self.dataset_len = dataset_len
self.logger_name = logger_name
print("\t >>do segmentation")
self.num_inchannels = len(self.conf.stft_targets) * 3
self.classes_num = self.num_inchannels
smp_params = OmegaConf.to_container(conf.model.smp_params)
smp_params["classes"] = self.classes_num
smp_arch = smp_params.pop("arch_name")
if smp_arch == "unet":
smp_func = smp.Unet
elif smp_arch == "unetpp":
smp_func = smp.UnetPlusPlus
elif smp_arch == "manet":
smp_func = smp.MAnet
elif smp_arch == "deeplabv3":
smp_func = smp.DeepLabV3
elif smp_arch == "deeplabv3p":
smp_func = smp.DeepLabV3Plus
self.model = smp_func(**smp_params)
# self.model = nn.Sequential(smp_func(**smp_params),)
if self.num_inchannels != 3:
patch_first_conv(self.model, in_channels=self.num_inchannels)
if self.conf.model.channels_last:
# Need to be done once, after model initialization (or load)
self.model = self.model.to(memory_format=torch.channels_last)
if self.conf.model.loss == "mse":
self.criterion = torch.nn.MSELoss(reduction="none")
else:
raise NotImplementedError
self.loss_ch = (
len(self.conf.stft_targets) * 1
if self.conf.train_energy_only
else len(self.conf.stft_targets) * 3
)
if self.conf.model.metrics == "mse":
self.metrics = pl.metrics.MeanSquaredError()
else:
raise NotImplementedError
if self.conf.model.last_act == "sigmoid":
self.activation = torch.nn.Sigmoid()
elif self.conf.model.last_act == "tanh":
self.activation = torch.nn.Tanh()
elif self.conf.model.last_act == "identity":
self.activation = torch.nn.Identity()
else:
raise NotImplementedError
self.val_sync_dist = self.conf.trainer.gpus > 1
self.is_debug = self.conf.is_debug
self.h_, self.w_ = get_input_size_wo_pad(
n_fft=self.conf.stft_params.n_fft, input_width=self.conf.input_width
)
def on_fit_start(self):
self._set_image_normalization()
def on_test_start(self):
self._set_image_normalization()
def forward(self, x):
x = self.model(x)
return x
def _remove_pad(
self, inputs: torch.Tensor, pred: torch.Tensor, targets: torch.Tensor
) -> Tuple[torch.Tensor, ...]:
return (
inputs[:, :, : self.h_, : self.w_],
pred[:, :, : self.h_, : self.w_],
targets[:, :, : self.h_, : self.w_],
)
def training_step(self, batch, batch_idx):
inputs = batch["image"]
if self.conf.model.channels_last:
# Need to be done for every input
inputs = inputs.to(memory_format=torch.channels_last)
targets = batch["target_image"]
outputs = self.model(inputs)
pred = self.activation(outputs)
# if self.conf.model.last_act == "tanh":
# pred = pred * 2.0
inputs, pred, targets = self._remove_pad(
inputs=inputs, pred=pred, targets=targets
)
if self.conf.gt_as_mask:
loss = self.criterion(
pred, targets - (inputs * self._img_std + self._img_mean)
)[:, : self.loss_ch].mean()
else:
loss = self.criterion(pred, targets - inputs)[:, : self.loss_ch].mean()
if self.logger_name == "tensorboard":
self.log("train_loss", loss)
elif self.logger_name == "neptune":
self.logger.experiment["loss/train"].log(loss)
return loss
def validation_step(self, batch, batch_idx):
inputs = batch["image"]
if self.conf.model.channels_last:
# Need to be done for every input
inputs = inputs.to(memory_format=torch.channels_last)
targets = batch["target_image"]
outputs = self.model(inputs)
pred = self.activation(outputs)
# if self.conf.model.last_act == "tanh":
# pred = pred * 2.0
inputs, pred, targets = self._remove_pad(
inputs=inputs, pred=pred, targets=targets
)
if self.conf.gt_as_mask:
loss = self.criterion(
pred, targets - (inputs * self._img_std + self._img_mean)
)[:, : self.loss_ch].mean()
pred = pred + (inputs * self._img_std + self._img_mean)
else:
loss = self.criterion(pred, targets - inputs)[:, : self.loss_ch].mean()
pred = pred + inputs
# only for checkpoint call back
self.log("val_loss", loss)
sequence_results = self.convert_img_pred_to_sequence(pred=pred, batch=batch)
abs_error = sequence_results.pop("abs_error")
metrics = np.mean(abs_error)
if batch_idx in [0, 2]:
pred = torch.clamp(pred, 0.0, 1.0)
epoch = (
self.trainer.global_step * self.conf.batch_size
) // self.dataset_len
num_ = 3
ba_ind = 0
imgs = {
"inputs": inputs[:num_].cpu().numpy().transpose((0, 2, 3, 1)),
"pred": pred[:num_].detach().cpu().numpy().transpose((0, 2, 3, 1)),
"targets": targets[:num_]
.detach()
.cpu()
.numpy()
.transpose((0, 2, 3, 1)),
}
# === PLOT ===
nrows = 3
ncols = 3
ch_ = 0
fig, axes = plt.subplots(
nrows=nrows, ncols=ncols, figsize=(12, 6), sharey=True, sharex=True,
)
fig.suptitle(
"_".join(
[
str(batch["phone"][ba_ind]),
str(batch["millisSinceGpsEpoch"][ba_ind].cpu().numpy()),
str(batch["phone_time"][ba_ind].cpu().numpy()),
"epoch",
str(epoch),
]
)
)
D_mats = {}
for j, (key, img) in enumerate(imgs.items()):
gt_as_mask = (key in ["pred", "targets"]) and self.conf.gt_as_mask
abs_, cos_, sin_ = WaveformDataset.handle_stft_normalize(
img=img.copy(),
cnum=len(self.conf.stft_targets),
is_encode=False,
img_std=self._img_std.cpu().numpy().transpose((0, 2, 3, 1)),
img_mean=self._img_mean.cpu().numpy().transpose((0, 2, 3, 1)),
gt_as_mask=gt_as_mask,
)
show_stft(
conf=self.conf,
D_abs=abs_[ba_ind][..., ch_],
D_cos=cos_[ba_ind][..., ch_],
D_sin=sin_[ba_ind][..., ch_],
ax=axes,
stft_ind=j,
stft_name=key,
)
D_mats[key] = {
"D_abs": abs_[ba_ind][..., ch_],
"D_theta": np.arctan2(
sin_[ba_ind][..., ch_], cos_[ba_ind][..., ch_]
),
}
if self.logger_name == "tensorboard":
self.logger.experiment.add_figure(
"prediction_fig", fig, global_step=self.trainer.global_step,
)
elif self.logger_name == "neptune":
self.logger.experiment[f"val/pred_{batch_idx}_{ba_ind}"].log(fig)
plt.close()
x_gts = batch[self.conf.stft_targets[0].replace("_diff", "_gt_diff")]
x = batch[self.conf.stft_targets[0]]
plot_rec(
x=x[ba_ind].cpu().numpy(),
x_gt=x_gts[ba_ind].cpu().numpy(),
D_abs=D_mats["pred"]["D_abs"],
D_theta=D_mats["pred"]["D_theta"],
D_abs_gt=D_mats["targets"]["D_abs"],
D_theta_gt=D_mats["targets"]["D_theta"],
length=x_gts[ba_ind].shape[0],
is_db=self.conf.stft_params.is_db,
hop_length=self.conf.stft_params.hop_length,
win_length=self.conf.stft_params.win_length,
logger=self.logger,
logger_name=self.logger_name,
global_step=self.trainer.global_step,
log_name=f"val/pred_{batch_idx}_{ba_ind}_line",
target_name=self.conf.stft_targets[0],
)
return {"loss": loss, "metrics": metrics, "sequence_results": sequence_results}
def validation_epoch_end(self, validation_step_outputs):
keys = list(validation_step_outputs[0].keys())
met_dict = {key: [] for key in keys}
for pred in validation_step_outputs:
for key in keys:
met_dict[key].append(pred[key])
sequence_results = {key: [] for key in met_dict["sequence_results"][0].keys()}
for key in keys:
if key == "sequence_results":
for seq_res in met_dict[key]:
for seq_key, values in seq_res.items():
if not isinstance(values, np.ndarray):
values = np.array(values)
sequence_results[seq_key].append(values)
elif isinstance(met_dict[key][0], torch.Tensor):
met_dict[key] = torch.mean(torch.stack(met_dict[key])).cpu().numpy()
else:
met_dict[key] = np.mean(np.stack(met_dict[key]))
for seq_key, values in sequence_results.items():
sequence_results[seq_key] = np.concatenate(values)
pred_df = self.generate_pred_df(
sequence_results=sequence_results, is_test=False, agg_mode="mean"
)
pred_df.to_csv(f"./val_{self.trainer.global_step}.csv", index=False)
phone_mae = []
# the fisrt step, there is no velocity info
pred_df.dropna(axis=0, inplace=True)
for phone, df_ in pred_df.groupby("phone"):
gt_targets = [
target.replace("_diff", "_gt_diff") for target in self.conf.stft_targets
]
phone_mae.append(
np.mean(
np.abs(df_[self.conf.stft_targets].values - df_[gt_targets].values)
)
)
self.log("phone_mae", np.mean(phone_mae).tolist())
if self.logger_name == "tensorboard":
self.log("epoch_loss/val", met_dict["loss"].tolist())
self.log("epoch_metrics/val", met_dict["metrics"].tolist())
elif self.logger_name == "neptune":
self.logger.experiment["metrics/phone_mae"].log(np.mean(phone_mae).tolist())
self.logger.experiment["loss/val"].log(met_dict["loss"].tolist())
self.logger.experiment["metrics/val"].log(met_dict["metrics"].tolist())
def convert_img_pred_to_sequence(
self, pred: torch.Tensor, batch: Dict[str, torch.Tensor], is_test: bool = False
) -> Dict[str, np.ndarray]:
pred = torch.clamp(pred, 0.0, 1.0)
D_abs_b, D_cos_b, D_sin_b = WaveformDataset.handle_stft_normalize(
img=pred.clone(),
cnum=len(self.conf.stft_targets),
is_encode=False,
img_std=self._img_std,
img_mean=self._img_mean,
gt_as_mask=True,
image_fomat="torch",
)
D_theta_b = torch.atan2(D_sin_b, D_cos_b)
D_abs_b = D_abs_b.cpu().numpy().transpose((0, 2, 3, 1))
D_theta_b = D_theta_b.cpu().numpy().transpose((0, 2, 3, 1))
stft_preds = {}
for i, key in enumerate(self.conf.stft_targets):
stft_preds[key] = {
"D_abs_b": D_abs_b[..., i],
"D_theta_b": D_theta_b[..., i],
}
abs_errors = []
sequence_results = {}
for stft_target, D_mats in stft_preds.items():
# batch
x_rec_b = []
for D_abs, D_theta in zip(D_mats["D_abs_b"], D_mats["D_theta_b"]):
if self.conf.stft_params.is_db:
D_abs = librosa.db_to_amplitude(D_abs, ref=1.0)
x_rec = librosa.istft(
np.exp(1j * D_theta) * D_abs,
hop_length=self.conf.stft_params.hop_length,
win_length=self.conf.stft_params.win_length,
length=self.conf.input_width,
)
x_rec_b.append(x_rec)
x_rec_b = np.stack(x_rec_b)
sequence_results[stft_target] = x_rec_b
if not is_test:
stft_target_gt = stft_target.replace("_diff", "_gt_diff")
sequence_results[stft_target_gt] = batch[stft_target_gt].cpu().numpy()
error = np.abs(x_rec_b - sequence_results[stft_target_gt])
abs_errors.append(error)
sequence_results["abs_error"] = np.stack(abs_errors)
sequence_results.update(
{
"phone": batch["phone"],
"phone_time": batch["phone_time"].cpu().numpy(),
"millisSinceGpsEpoch": batch["millisSinceGpsEpoch"].cpu().numpy(),
}
)
return sequence_results
def generate_pred_df(
self, sequence_results: dict, is_test: bool = False, agg_mode: str = "mean"
) -> pd.DataFrame:
phone_results = {
str(phone): {} for phone in np.unique(sequence_results["phone"])
}
df = []
stft_targets = OmegaConf.to_container(self.conf.stft_targets)
if not is_test:
gt_targets = [
target.replace("_diff", "_gt_diff") for target in stft_targets
]
stft_targets = stft_targets + gt_targets
samplling_delta = self.conf.val_sampling_delta
else:
samplling_delta = self.conf.test_sampling_delta
for phone in np.unique(sequence_results["phone"]):
phone_mask = sequence_results["phone"] == phone
phone_time = sequence_results["phone_time"][phone_mask]
phone_results[phone].update({"phone_time": phone_time})
millisSinceGpsEpoch = sequence_results["millisSinceGpsEpoch"][phone_mask]
millisSinceGpsEpoch = np.arange(
millisSinceGpsEpoch.min() - 1000,
millisSinceGpsEpoch.max() + self.conf.input_width * 1000,
1000,
dtype=np.int64,
)
phone_results[phone].update({"millisSinceGpsEpoch": millisSinceGpsEpoch})
for stft_target in stft_targets:
num_preds = np.sum(phone_mask)
assert (
num_preds
<= (phone_time.max() + self.conf.input_width) // samplling_delta
)
pred_sequence = np.zeros(
(num_preds, phone_time.max() + self.conf.input_width,),
dtype=np.float64,
)
pred_mask = np.zeros(
(num_preds, phone_time.max() + self.conf.input_width,),
dtype=np.bool,
)
for i, (ph_t, seq) in enumerate(
zip(phone_time, sequence_results[stft_target][phone_mask])
):
pred_sequence[i, ph_t : ph_t + self.conf.input_width] = seq
pred_mask[i, ph_t : ph_t + self.conf.input_width] = True
if agg_mode == "mean":
pred_sequence = np.sum(pred_sequence, axis=0) / np.sum(
pred_mask, axis=0
)
else:
raise NotImplementedError
phone_results[phone].update({stft_target: pred_sequence})
for_df = {
"phone": np.repeat(phone, pred_sequence.shape[0]),
"millisSinceGpsEpoch": millisSinceGpsEpoch,
}
for_df.update(
{
stft_target: phone_results[phone][stft_target]
for stft_target in stft_targets
}
)
df.append(pd.DataFrame(for_df))
# df.append(np.stack(for_df).transpose(1, 0))
pred_df = pd.concat(df, axis=0)
dtypes = {target: np.float64 for target in stft_targets}
pred_df = pred_df.astype(dtypes)
return pred_df
def flip_tta(
self, model, inputs: torch.Tensor, pred: torch.Tensor,
):
transforms = [
torchvision.transforms.functional.hflip,
]
inverts = [
torchvision.transforms.functional.hflip,
]
for trans_, invert_ in zip(transforms, inverts):
outputs = self.model(trans_(inputs))
pred_aug = self.activation(outputs)
pred += invert_(pred_aug)
pred *= 1.0 / (len(transforms) + 1)
return pred
def test_step(self, batch, batch_idx):
inputs = batch["image"]
if self.conf.model.channels_last:
# Need to be done for every input
inputs = inputs.to(memory_format=torch.channels_last)
outputs = self.model(inputs)
pred = self.activation(outputs)
if self.conf.use_flip_tta:
pred = self.flip_tta(model=self.model, inputs=inputs, pred=pred)
inputs, pred, _ = self._remove_pad(inputs=inputs, pred=pred, targets=pred)
# if self.conf.model.last_act == "tanh":
# pred = pred * 2.0
if self.conf.gt_as_mask:
pred = pred + (inputs * self._img_std + self._img_mean)
else:
pred = pred + inputs
sequence_results = self.convert_img_pred_to_sequence(
pred=pred, batch=batch, is_test=True
)
return {"sequence_results": sequence_results}
def test_epoch_end(self, test_step_outputs):
keys = list(test_step_outputs[0].keys())
met_dict = {key: [] for key in keys}
for pred in test_step_outputs:
for key in keys:
met_dict[key].append(pred[key])
sequence_results = {key: [] for key in met_dict["sequence_results"][0].keys()}
for key in keys:
if key == "sequence_results":
for seq_res in met_dict[key]:
for seq_key, values in seq_res.items():
if not isinstance(values, np.ndarray):
values = np.array(values)
sequence_results[seq_key].append(values)
elif isinstance(met_dict[key][0], torch.Tensor):
met_dict[key] = torch.mean(torch.stack(met_dict[key])).cpu().numpy()
else:
met_dict[key] = np.mean(np.stack(met_dict[key]))
for seq_key, values in sequence_results.items():
sequence_results[seq_key] = np.concatenate(values)
pred_df = self.generate_pred_df(
sequence_results=sequence_results, is_test=True, agg_mode="mean"
)
fname = f"pred_test_flip_{self.conf.use_flip_tta}_d{self.conf.test_sampling_delta}.csv"
save_path = os.path.join(os.path.dirname(self.conf.ckpt_path), fname)
print("test prediction csv", save_path)
pred_df.to_csv(save_path, index=False)
def _set_image_normalization(self) -> None:
img_mean = IMG_MEAN[: self.num_inchannels] # type: ignore[union-attr]
img_std = IMG_STD[: self.num_inchannels] # type: ignore[union-attr]
self._img_std = torch.tensor(
np.array(img_std, dtype=np.float32)[None, :, None, None], device=self.device
)
self._img_mean = torch.tensor(
np.array(img_mean, dtype=np.float32)[None, :, None, None],
device=self.device,
)
def optimizer_step(
self,
current_epoch,
batch_nb,
optimizer,
optimizer_idx,
closure,
on_tpu=False,
using_native_amp=False,
using_lbfgs=False,
):
if not self.conf.find_lr:
if self.trainer.global_step < self.warmup_steps:
lr_scale = min(
1.0, float(self.trainer.global_step + 1) / self.warmup_steps
)
for pg in optimizer.param_groups:
pg["lr"] = lr_scale * self.conf.lr
else:
pct = (self.trainer.global_step - self.warmup_steps) / (
self.total_steps - self.warmup_steps
)
pct = min(1.0, pct)
for pg in optimizer.param_groups:
pg["lr"] = self._annealing_cos(pct, start=self.conf.lr, end=0.0)
if self.logger_name == "neptune":
self.logger.experiment["train/lr"].log(optimizer.param_groups[0]["lr"])
optimizer.step(closure=closure)
optimizer.zero_grad()
def _annealing_cos(self, pct: float, start: float = 0.1, end: float = 0.0) -> float:
"""
https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#CosineAnnealingLR
Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0.
"""
cos_out = math.cos(math.pi * pct) + 1
return end + (start - end) / 2.0 * cos_out
def configure_optimizers(self):
self.total_steps = (
self.dataset_len // self.conf.batch_size
) * self.conf.trainer.max_epochs
self.warmup_steps = int(self.total_steps * self.conf.warmup_ratio)
if self.conf.optim_name == "sgd":
optimizer = torch.optim.SGD(
self.parameters(), lr=self.conf.lr, momentum=0.9, weight_decay=4e-5,
)
elif self.conf.optim_name == "adam":
optimizer = torch.optim.Adam(self.parameters(), lr=self.conf.lr)
else:
raise NotImplementedError
# steps_per_epoch = self.hparams.dataset_len // self.hparams.batch_size
# scheduler = torch.optim.lr_scheduler.OneCycleLR(
# optimizer,
# max_lr=self.hparams.lr,
# max_epochs=self.hparams.max_epochs,
# steps_per_epoch=steps_per_epoch,
# )
# return [optimizer], [scheduler]
return optimizer
def patch_first_conv(model, in_channels: int = 4) -> None:
"""
from segmentation_models_pytorch/encoders/_utils.py
Change first convolution layer input channels.
In case:
in_channels == 1 or in_channels == 2 -> reuse original weights
in_channels > 3 -> make random kaiming normal initialization
"""
# get first conv
for module in model.modules():
if isinstance(module, torch.nn.Conv2d):
break
# change input channels for first conv
module.in_channels = in_channels
weight = module.weight.detach()
# reset = False
if in_channels == 1:
weight = weight.sum(1, keepdim=True)
elif in_channels == 2:
weight = weight[:, :2] * (3.0 / 2.0)
elif in_channels == 4:
weight = torch.nn.Parameter(torch.cat([weight, weight[:, -1:, :, :]], dim=1))
elif in_channels % 3 == 0:
weight = torch.nn.Parameter(torch.cat([weight] * (in_channels // 3), dim=1))
module.weight = weight
|
{"hexsha": "61a2f15513307e995cdbf833cd56a2f12a619ade", "size": 24307, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/modeling/pl_model.py", "max_stars_repo_name": "caoyizhi-filter/kaggle_Google_Smartphone_Decimeter_Challenge-10th-", "max_stars_repo_head_hexsha": "10e6bd6cb3b7222dc141e820daea4fc1db5cabe2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-08-07T19:31:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-28T08:20:33.000Z", "max_issues_repo_path": "src/modeling/pl_model.py", "max_issues_repo_name": "caoyizhi-filter/kaggle_Google_Smartphone_Decimeter_Challenge-10th-", "max_issues_repo_head_hexsha": "10e6bd6cb3b7222dc141e820daea4fc1db5cabe2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/modeling/pl_model.py", "max_forks_repo_name": "caoyizhi-filter/kaggle_Google_Smartphone_Decimeter_Challenge-10th-", "max_forks_repo_head_hexsha": "10e6bd6cb3b7222dc141e820daea4fc1db5cabe2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-08-07T19:31:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-19T02:43:30.000Z", "avg_line_length": 38.15855573, "max_line_length": 96, "alphanum_fraction": 0.5550253014, "include": true, "reason": "import numpy", "num_tokens": 5507}
|
from PIL import Image
import numpy as np
from ImageProcessing import mediumGaussian
def readStack(dir, filted = None):
"""
dir --- directory of image
This method takes input directory of image, and read the image as a numpy stack
"""
img = Image.open(dir)
maxiter = 1000 # large default number for searching all the frames in tiff stack
sx, sy = np.array(img).shape # all the later frame should have the same size as the first frame
ImageData = []
for i in range(maxiter):
print "read image slice: " + str(i)
try:
img.seek(i)
if filted is not None:
imageSlice = mediumGaussian(np.array(img))
else:
imageSlice = np.array(img)
ImageData.append(imageSlice) # frist store the data in the list, then stacking is faster than stacking array each time.
except EOFError:
# Not enough frames in img
break
return np.stack(ImageData, axis=2)
|
{"hexsha": "a6a3206e62bf439abc38ca122b8167bb44759a70", "size": 1068, "ext": "py", "lang": "Python", "max_stars_repo_path": "InputOutput/ReadTiffStack.py", "max_stars_repo_name": "jzw0025/Kyber", "max_stars_repo_head_hexsha": "ce2069da469095e6a086f7bbf9cd980f10563b22", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2017-02-20T18:18:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-31T17:00:56.000Z", "max_issues_repo_path": "InputOutput/ReadTiffStack.py", "max_issues_repo_name": "jzw0025/Kyber", "max_issues_repo_head_hexsha": "ce2069da469095e6a086f7bbf9cd980f10563b22", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "InputOutput/ReadTiffStack.py", "max_forks_repo_name": "jzw0025/Kyber", "max_forks_repo_head_hexsha": "ce2069da469095e6a086f7bbf9cd980f10563b22", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2016-12-16T17:51:32.000Z", "max_forks_repo_forks_event_max_datetime": "2016-12-16T17:51:32.000Z", "avg_line_length": 27.3846153846, "max_line_length": 131, "alphanum_fraction": 0.5880149813, "include": true, "reason": "import numpy", "num_tokens": 233}
|
"""Test that the following CLI command returns the expected outputs
label-maker package -d integration-od -c test/fixtures/integration/config.integration.object_detection.json"""
import unittest
from os import makedirs
from shutil import copyfile, copytree, rmtree
import subprocess
import numpy as np
class TestObjectDetectionPackage(unittest.TestCase):
"""Tests for object detection package creation"""
@classmethod
def setUpClass(cls):
makedirs('integration-od')
copyfile('test/fixtures/integration/labels-od.npz', 'integration-od/labels.npz')
copytree('test/fixtures/integration/tiles', 'integration-od/tiles')
@classmethod
def tearDownClass(cls):
rmtree('integration-od')
def test_cli(self):
"""Verify data.npz produced by CLI"""
cmd = 'label-maker package -d integration-od -c test/fixtures/integration/config.integration.object_detection.json'
cmd = cmd.split(' ')
subprocess.run(cmd, universal_newlines=True)
data = np.load('integration-od/data.npz')
self.assertEqual(np.sum(data['x_train']), 144752757)
self.assertEqual(np.sum(data['x_test']), 52758414)
self.assertEqual(data['x_train'].shape, (6, 256, 256, 3))
self.assertEqual(data['x_test'].shape, (2, 256, 256, 3))
# validate our label data with exact matches in shape
self.assertEqual(data['y_train'].shape, (6, 16, 5))
self.assertEqual(data['y_test'].shape, (2, 16, 5))
|
{"hexsha": "acf3e5bccd40f0b64f13f30e105d6ac2afe2d5e0", "size": 1494, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/integration/test_object_package.py", "max_stars_repo_name": "jonaslalin/label-maker", "max_stars_repo_head_hexsha": "c271189fbfd0f0c198184ef45032e16546e25243", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 428, "max_stars_repo_stars_event_min_datetime": "2018-01-10T19:22:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T06:25:53.000Z", "max_issues_repo_path": "test/integration/test_object_package.py", "max_issues_repo_name": "jonaslalin/label-maker", "max_issues_repo_head_hexsha": "c271189fbfd0f0c198184ef45032e16546e25243", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 136, "max_issues_repo_issues_event_min_datetime": "2018-01-10T20:25:31.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-13T00:48:57.000Z", "max_forks_repo_path": "test/integration/test_object_package.py", "max_forks_repo_name": "jonaslalin/label-maker", "max_forks_repo_head_hexsha": "c271189fbfd0f0c198184ef45032e16546e25243", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 105, "max_forks_repo_forks_event_min_datetime": "2018-01-10T19:57:50.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-26T10:40:41.000Z", "avg_line_length": 39.3157894737, "max_line_length": 123, "alphanum_fraction": 0.6907630522, "include": true, "reason": "import numpy", "num_tokens": 350}
|
import os
import tempfile
import unittest
import numpy as np
from PIL import Image
import colortrans
np.random.seed(0)
class TestColorTrans(unittest.TestCase):
"""colortrans tests"""
def test_colortrans(self):
content = np.random.randint(256, size=(20, 30, 3), dtype=np.uint8)
reference = np.random.randint(256, size=(40, 50, 3), dtype=np.uint8)
for method in colortrans.METHODS:
func = getattr(colortrans, f'transfer_{method}')
output = func(content, reference)
self.assertEqual(output.shape, content.shape)
self.assertEqual(output.dtype, np.uint8)
with tempfile.TemporaryDirectory() as tmp:
content_path = os.path.join(tmp, 'content.png')
reference_path = os.path.join(tmp, 'reference.png')
output_path = os.path.join(tmp, 'output.png')
Image.fromarray(content).save(content_path)
Image.fromarray(reference).save(reference_path)
argv = ['colortrans', content_path, reference_path, output_path, '--method', method]
colortrans.colortrans.main(argv)
self.assertTrue(np.array_equal(np.array(Image.open(output_path)), output))
argv = ['colortrans', content_path, reference_path, output_path]
colortrans.colortrans.main(argv)
assert_func = self.assertTrue if method == 'lhm' else self.assertFalse
assert_func(np.array_equal(np.array(Image.open(output_path)), output))
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "43cfef2d67c5d19eba972f8f5468dd24b1106b33", "size": 1605, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_colortrans.py", "max_stars_repo_name": "dstein64/colortrans", "max_stars_repo_head_hexsha": "bda872f85733a91c375f138c694d9692f719b7fa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_colortrans.py", "max_issues_repo_name": "dstein64/colortrans", "max_issues_repo_head_hexsha": "bda872f85733a91c375f138c694d9692f719b7fa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-09T05:59:03.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-19T22:36:02.000Z", "max_forks_repo_path": "tests/test_colortrans.py", "max_forks_repo_name": "dstein64/colortrans", "max_forks_repo_head_hexsha": "bda872f85733a91c375f138c694d9692f719b7fa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.3255813953, "max_line_length": 100, "alphanum_fraction": 0.6342679128, "include": true, "reason": "import numpy", "num_tokens": 340}
|
##############################################################################
# Institute for the Design of Advanced Energy Systems Process Systems
# Engineering Framework (IDAES PSE Framework) Copyright (c) 2018-2020, by the
# software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia
# University Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.txt and LICENSE.txt for full copyright and
# license information, respectively. Both files are also available online
# at the URL "https://github.com/IDAES/idaes-pse".
##############################################################################
"""
Simple rankine cycle model. Has couple of options:
1. Recover waste heat after turbine to mimic feed water heater integration
2. Option to include boiler efficiency which is a linear fit f(capacity factor)
if no heat recovery, the flowsheet is as follows:
Boiler --> Turbine --> Condenser --> Pump --> Boiler
if heat_recovery, the flowsheet is as follows:
Boiler --> Turbine --> pre-condenser(- Q_recovered) --> Condenser -->
Pump --> Feed water heater(+ Q_recovered) --> Boiler
Note:
* Boiler and condenser are simple heater blocks
* IAPWS95 for water and steam properties
"""
__author__ = "Jaffer Ghouse"
# Import Pyomo libraries
from pyomo.environ import ConcreteModel, units, Var, \
TransformationFactory, value, Block, Expression, Constraint, Param, \
Objective
from pyomo.network import Arc
# from pyomo.util.infeasible import log_close_to_bounds
# Import IDAES components
from idaes.core import FlowsheetBlock, UnitModelBlockData
# Import heat exchanger unit model
from idaes.generic_models.unit_models import Heater, PressureChanger
from idaes.generic_models.unit_models.pressure_changer import \
ThermodynamicAssumption
from idaes.power_generation.costing.power_plant_costing import get_PP_costing
# Import steam property package
from idaes.generic_models.properties.iapws95 import htpx, Iapws95ParameterBlock
from idaes.core.util.model_statistics import degrees_of_freedom
from idaes.core.util.initialization import propagate_state
from idaes.core.util import get_solver
import idaes.logger as idaeslog
from idaes.core.util import to_json, from_json
def create_model(heat_recovery=False, calc_boiler_eff=False, capital_fs=False):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.steam_prop = Iapws95ParameterBlock()
m.fs.boiler = Heater(
default={
"dynamic": False,
"property_package": m.fs.steam_prop,
"has_pressure_change": False})
m.fs.turbine = PressureChanger(
default={
"property_package": m.fs.steam_prop,
"compressor": False,
"thermodynamic_assumption": ThermodynamicAssumption.isentropic})
if heat_recovery:
m.fs.pre_condenser = Heater(
default={
"dynamic": False,
"property_package": m.fs.steam_prop,
"has_pressure_change": True})
# Spec for pre-condenser
m.fs.pre_condenser.eq_outlet_cond = Constraint(
expr=m.fs.pre_condenser.control_volume.
properties_out[0].enth_mol == m.fs.pre_condenser.control_volume.
properties_out[0].enth_mol_sat_phase["Liq"]
)
m.fs.feed_water_heater = Heater(
default={
"dynamic": False,
"property_package": m.fs.steam_prop,
"has_pressure_change": True})
# Link precondenser heat and feed water heater
m.fs.eq_heat_recovery = Constraint(
expr=m.fs.pre_condenser.heat_duty[0] ==
- m.fs.feed_water_heater.heat_duty[0]
)
m.fs.condenser = Heater(
default={
"dynamic": False,
"property_package": m.fs.steam_prop,
"has_pressure_change": True})
m.fs.bfw_pump = PressureChanger(
default={
"property_package": m.fs.steam_prop,
"thermodynamic_assumption": ThermodynamicAssumption.pump})
# create arcs
m.fs.boiler_to_turbine = Arc(source=m.fs.boiler.outlet,
destination=m.fs.turbine.inlet)
if heat_recovery:
m.fs.turbine_to_precondenser = Arc(
source=m.fs.turbine.outlet,
destination=m.fs.pre_condenser.inlet)
m.fs.precondenser_to_condenser = Arc(
source=m.fs.pre_condenser.outlet,
destination=m.fs.condenser.inlet)
m.fs.pump_to_feedwaterheater = Arc(
source=m.fs.bfw_pump.outlet,
destination=m.fs.feed_water_heater.inlet)
else:
m.fs.turbine_to_condenser = Arc(source=m.fs.turbine.outlet,
destination=m.fs.condenser.inlet)
m.fs.condenser_to_pump = Arc(source=m.fs.condenser.outlet,
destination=m.fs.bfw_pump.inlet)
# expand arcs
TransformationFactory("network.expand_arcs").apply_to(m)
# Compute gross power
m.fs.gross_cycle_power_output = \
Expression(expr=(-m.fs.turbine.work_mechanical[0] -
m.fs.bfw_pump.work_mechanical[0]))
# account for generator loss = 5% of gross power output
m.fs.net_cycle_power_output = Expression(
expr=0.95*m.fs.gross_cycle_power_output)
if capital_fs or not calc_boiler_eff:
# if fs is a capital cost fs, then the P is at P_max and hence
# set boiler efficiency to value at P_max instead of computing
m.fs.boiler_eff = Param(initialize=0.95)
if calc_boiler_eff:
# Var for net_power max variable.
# This is needed to compute boiler efficiency as a function of
# capacity factor; p and p_max must be in MWs
m.fs.net_power_max = Var(initialize=100)
# # Boiler efficiency
# # Linear fit as function of capacity factor; at P_max eff. is 95%
m.fs.boiler_eff = Expression(
expr=0.2143*(m.fs.net_cycle_power_output*1e-6/m.fs.net_power_max)
+ 0.7357
)
# cycle efficiency
m.fs.cycle_efficiency = Expression(
expr=m.fs.net_cycle_power_output/m.fs.boiler.heat_duty[0]
* m.fs.boiler_eff * 100
)
m.heat_recovery = heat_recovery
return m
def initialize_model(m, outlvl=idaeslog.INFO):
# Deactivate the constraint linking the pre_condenser Q and feed water Q
if m.heat_recovery:
m.fs.eq_heat_recovery.deactivate()
# Check for degrees of freedom before proceeding with initialization
assert degrees_of_freedom(m) == 0
# Proceed with initialization
m.fs.boiler.initialize(outlvl=outlvl)
propagate_state(m.fs.boiler_to_turbine)
m.fs.turbine.initialize(outlvl=outlvl)
if m.heat_recovery:
propagate_state(m.fs.turbine_to_precondenser)
m.fs.pre_condenser.initialize(outlvl=outlvl)
propagate_state(m.fs.precondenser_to_condenser)
m.fs.condenser.initialize()
propagate_state(m.fs.condenser_to_pump)
m.fs.bfw_pump.initialize(outlvl=outlvl)
propagate_state(m.fs.pump_to_feedwaterheater)
m.fs.feed_water_heater.initialize(outlvl=outlvl)
else:
propagate_state(m.fs.turbine_to_condenser)
m.fs.condenser.initialize(outlvl=outlvl)
propagate_state(m.fs.condenser_to_pump)
m.fs.bfw_pump.initialize(outlvl=outlvl)
solver = get_solver()
solver.solve(m, tee=False)
if m.heat_recovery:
# Unfix feed water heater temperature as the constraint linking Q
# will be activated
m.fs.feed_water_heater.outlet.enth_mol[0].unfix()
m.fs.eq_heat_recovery.activate()
solver.solve(m, tee=True)
assert degrees_of_freedom(m) == 0
return m
def generate_report(m, unit_model_report=True):
# Print reports
if unit_model_report:
for i in m.fs.component_objects(Block):
if isinstance(i, UnitModelBlockData):
i.report()
print()
print('Net power = ', value(m.fs.net_cycle_power_output)*1e-6, ' MW')
print('Cycle efficiency = ', value(m.fs.cycle_efficiency), "%")
print('Heat rate = ', value(m.fs.heat_rate), 'Btu/kWh')
print('Boiler feed water flow = ',
value(m.fs.boiler.inlet.flow_mol[0]), "mol/s")
print()
try:
print('Capital cost = ', value(m.fs.capital_cost), '$M')
except AttributeError:
print("No cap cost for opex plant")
try:
print('Operating cost = ',
value(m.fs.operating_cost/(m.fs.net_cycle_power_output*1e-6)),
'$/MWh')
except AttributeError:
print("No operating cost for capex plant")
def set_inputs(m, bfw_pressure=24.23e6, bfw_flow=10000):
# Main steam pressure
bfw_pressure = bfw_pressure # Pa
# Boiler inlet
m.fs.boiler.inlet.flow_mol[0].fix(bfw_flow) # mol/s
m.fs.boiler.inlet.pressure[0].fix(bfw_pressure) # MPa
m.fs.boiler.inlet.enth_mol[0].fix(
htpx(T=563.6*units.K,
P=value(m.fs.boiler.inlet.pressure[0])*units.Pa))
# Unit specifications
m.fs.boiler.outlet.enth_mol[0].fix(
htpx(T=866.5*units.K,
P=value(m.fs.boiler.inlet.pressure[0])*units.Pa))
turbine_pressure_ratio = 2e6/bfw_pressure
m.fs.turbine.ratioP.fix(turbine_pressure_ratio)
m.fs.turbine.efficiency_isentropic.fix(0.85)
if m.heat_recovery:
# precondenser
m.fs.pre_condenser.deltaP.fix(-0.5e6) # Pa
# feed water heater
m.fs.feed_water_heater.deltaP[0].fix(0) # Pa
m.fs.feed_water_heater.outlet.enth_mol[0].fix(
htpx(T=563.6*units.K,
P=value(m.fs.condenser.outlet.pressure[0])*units.Pa))
m.fs.condenser.outlet.pressure[0].fix(1.05e6) # Pa
m.fs.condenser.outlet.enth_mol[0].fix(
htpx(T=311*units.K,
P=value(m.fs.condenser.outlet.pressure[0])*units.Pa))
m.fs.bfw_pump.efficiency_pump.fix(0.80)
m.fs.bfw_pump.deltaP.fix(bfw_pressure)
return m
def close_flowsheet_loop(m):
"""Closes the loop i.e. the arc between the feed water heater and
boiler. When the pressure and enthalpy arcs are enabled, the bfw_pump
spec for deltaP and the inlet enth_mol for the boiler need to be unfixed.
Returns:
m: model object after closing the loop
"""
# Unfix bfw pump pressure spec
m.fs.bfw_pump.deltaP.unfix()
# Unfix inlet boiler enthalpy
m.fs.boiler.inlet.enth_mol[0].unfix()
if m.heat_recovery:
# Constraint to link pressure
m.fs.eq_pressure = Constraint(
expr=m.fs.feed_water_heater.outlet.pressure[0] ==
m.fs.boiler.inlet.pressure[0]
)
# Constraint to link enthalpy
m.fs.eq_enthalpy = Constraint(
expr=m.fs.feed_water_heater.outlet.enth_mol[0] ==
m.fs.boiler.inlet.enth_mol[0]
)
else:
# Constraint to link pressure
m.fs.eq_pressure = Constraint(
expr=m.fs.bfw_pump.outlet.pressure[0] ==
m.fs.boiler.inlet.pressure[0]
)
# Constraint to link enthalpy
m.fs.eq_enthalpy = Constraint(
expr=m.fs.bfw_pump.outlet.enth_mol[0] ==
m.fs.boiler.inlet.enth_mol[0]
)
return m
def add_capital_cost(m):
"""Add capital cost expressions. Leverages costing correlations from the
IDAES costing library. Note that the capital cost correlations are all
based on the boiler feed water flowrate.
Returns:
m: model object after adding capital cost correlations
"""
m.fs.get_costing(year='2018')
# Add boiler capital cost
boiler_power_account = ['4.9']
# convert flow rate of BFW from mol/s to lb/hr for costing expressions
m.fs.bfw_lb_hr = Expression(
expr=m.fs.boiler.inlet.flow_mol[0]*0.018*2.204*3600)
get_PP_costing(
m.fs.boiler, boiler_power_account, m.fs.bfw_lb_hr, 'lb/hr', 2)
# Add turbine capital cost
turb_power_account = ['8.1']
# convert the turbine power from W to kW for costing expressions
m.fs.turbine_power_mw = Expression(
expr=-m.fs.turbine.work_mechanical[0] * 1e-3)
get_PP_costing(
m.fs.turbine, turb_power_account,
m.fs.turbine_power_mw, 'kW', 2)
# Add condenser cost
cond_power_account = ['8.3']
# convert the heat duty from J/s to MMBtu/hr for costing expressions
m.fs.condenser_duty_mmbtu_h = Expression(
expr=-m.fs.condenser.heat_duty[0] * 3.412*1e-6)
get_PP_costing(
m.fs.condenser, cond_power_account,
m.fs.condenser_duty_mmbtu_h, "MMBtu/hr", 2)
# Add feed water system costs
# Note that though no feed water heaters were used, BFW flowrate is used
# to cost the fed water system
fwh_power_account = ['3.1', '3.3', '3.5']
get_PP_costing(m.fs.bfw_pump, fwh_power_account,
m.fs.bfw_lb_hr, 'lb/hr', 2)
# Add expression for total capital cost
m.fs.capital_cost = Expression(
expr=m.fs.boiler.costing.total_plant_cost['4.9'] +
m.fs.turbine.costing.total_plant_cost['8.1'] +
m.fs.condenser.costing.total_plant_cost['8.3'] +
sum(m.fs.bfw_pump.costing.total_plant_cost[:]),
doc="Total capital cost $ Million")
return m
def add_operating_cost(m, include_cooling_cost=True):
"""Add operating cost expressions. The operating cost only includes
the cost of coal. This is computed by calculating the amount of coal
required based on HHV value of coal and the boiler heat duty.
Returns:
m: model object after adding operating cost correlations
"""
# Add condenser cooling water cost
# temperature for the cooling water from/to cooling tower in K
t_cw_in = 289.15
t_cw_out = 300.15
# compute the delta_h based on fixed temperature of cooling water
# utility
m.fs.enth_cw_in = Param(
initialize=htpx(T=t_cw_in*units.K, P=101325*units.Pa),
doc="inlet enthalpy of cooling water to condenser")
m.fs.enth_cw_out = Param(
initialize=htpx(T=t_cw_out*units.K, P=101325*units.Pa),
doc="outlet enthalpy of cooling water from condenser")
m.fs.cw_flow = Expression(
expr=-m.fs.condenser.heat_duty[0]*0.018*0.26417*3600 /
(m.fs.enth_cw_out-m.fs.enth_cw_in),
doc="cooling water flow rate in gallons/hr")
# cooling water cost in $/1000 gallons
m.fs.cw_cost = Param(
initialize=0.19,
doc="cost of cooling water for condenser in $/1000 gallon")
m.fs.cw_total_cost = Expression(
expr=m.fs.cw_flow*m.fs.cw_cost/1000,
doc="total cooling water cost in $/hr"
)
# Add coal feed costs
# HHV value of coal (Reference - NETL baseline report rev #4)
m.fs.coal_hhv = Param(
initialize=27113,
doc="Higher heating value of coal as received kJ/kg")
# cost of coal (Reference - NETL baseline report rev #4)
m.fs.coal_cost = Param(
initialize=51.96,
doc="$ per ton of Illinois no. 6 coal"
)
# Expression to compute coal flow rate in ton/hr using Q_boiler and
# hhv values
m.fs.coal_flow = Expression(
expr=((m.fs.boiler.heat_duty[0]/m.fs.boiler_eff * 3600)
/ (907.18*1000*m.fs.coal_hhv)),
doc="coal flow rate for boiler ton/hr")
# Expression to compute total cost of coal feed in $/hr
m.fs.total_coal_cost = Expression(
expr=m.fs.coal_flow*m.fs.coal_cost,
doc="total cost of coal feed in $/hr"
)
# Expression to compute heat rate (Btu/kWh)
# Factors:
# 907.18 to convert from ton to Kg
# 0.9478 to convert 1 KJ to 1 BTU
# 1e3 to convert power in MW to kW
m.fs.heat_rate = Expression(
expr=m.fs.coal_flow*907.18*m.fs.coal_hhv*0.9478
/ m.fs.net_cycle_power_output*1e3,
doc="heat rate of plant in Btu/kWh")
if include_cooling_cost:
# Expression for total operating cost
m.fs.operating_cost = Expression(
expr=m.fs.total_coal_cost+m.fs.cw_total_cost,
doc="Total operating cost in $/hr")
else:
# Expression for total operating cost
m.fs.operating_cost = Expression(
expr=m.fs.total_coal_cost,
doc="Total operating cost in $/hr")
return m
def square_problem(heat_recovery=None,
capital_fs=False,
net_power=100,
p_max=100,
calc_boiler_eff=False,
capital_payment_years=5):
"""This method simulates the simple rankine cycle by adding capital and
operating costs.
"""
m = ConcreteModel()
# Create plant flowsheet
m = create_model(
heat_recovery=heat_recovery,
capital_fs=capital_fs,
calc_boiler_eff=calc_boiler_eff)
# Set model inputs for the capex and opex plant
m = set_inputs(m)
# Set p_max for plant that is set in a square problem
if calc_boiler_eff:
m.fs.net_power_max.fix(p_max)
# Initialize the capex and opex plant
m = initialize_model(m)
# Closing the loop in the flowsheet
m = close_flowsheet_loop(m)
# Unfixing the boiler inlet flowrate
m.fs.boiler.inlet.flow_mol[0].unfix()
# Net power constraint for the capex plant
m.fs.eq_net_power = Constraint(
expr=m.fs.net_cycle_power_output == net_power*1e6
)
m = add_capital_cost(m)
m = add_operating_cost(m, include_cooling_cost=True)
# Expression for total cap and op cost - $/hr
m.total_cost = Expression(
expr=(m.fs.capital_cost*1e6/capital_payment_years/8760) +
m.fs.operating_cost)
solver = get_solver()
solver.solve(m, tee=True)
generate_report(m, unit_model_report=False)
return m
if __name__ == "__main__":
#Run a square problem with rankine cycle
m = square_problem(
heat_recovery=True,
capital_fs=True,
calc_boiler_eff=True,
p_max=300, net_power=300)
|
{"hexsha": "e248e9f538e22f3ac7ebef1238fbde96b42966e5", "size": 18183, "ext": "py", "lang": "Python", "max_stars_repo_path": "idaes/apps/rankine/simple_rankine_cycle.py", "max_stars_repo_name": "shermanjasonaf/idaes-pse", "max_stars_repo_head_hexsha": "b3c69a9c2a31cfe79683a95161a98112b9059912", "max_stars_repo_licenses": ["RSA-MD"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "idaes/apps/rankine/simple_rankine_cycle.py", "max_issues_repo_name": "shermanjasonaf/idaes-pse", "max_issues_repo_head_hexsha": "b3c69a9c2a31cfe79683a95161a98112b9059912", "max_issues_repo_licenses": ["RSA-MD"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-08-31T01:48:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-31T01:48:32.000Z", "max_forks_repo_path": "idaes/apps/rankine/simple_rankine_cycle.py", "max_forks_repo_name": "shermanjasonaf/idaes-pse", "max_forks_repo_head_hexsha": "b3c69a9c2a31cfe79683a95161a98112b9059912", "max_forks_repo_licenses": ["RSA-MD"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-04T14:57:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-04T14:57:20.000Z", "avg_line_length": 33.0, "max_line_length": 79, "alphanum_fraction": 0.6521476104, "include": true, "reason": "from pyomo", "num_tokens": 4663}
|
import numpy as np
from torch.utils.data._utils.collate import default_collate
from torch.utils.data.distributed import DistributedSampler
from .datasets import get_dataset
from .dataloader import FastDataloader
from augment import (get_transforms, get_center_crop_transforms,
get_simple_transforms)
from distributed import comm
DEBUG_NUM_BATCH = 2
def _collate_fn(batch):
imgs, targets = default_collate(batch)
if len(imgs) == 1:
# squeeze single view dim
imgs = imgs[0]
return imgs, targets
def get_loaders_for_trainer(cfg):
train_loader, eval_loader, num_classes = (None,) * 3
# train dataset
if cfg.train.enabled:
if cfg.debug:
n_samples = comm.get_world_size() * DEBUG_NUM_BATCH
n_samples = n_samples * cfg.train.batch_size_train
else:
n_samples = -1
train_dataset, num_classes = get_dataset(
data_name=cfg.dataset.name,
data_root=cfg.dataset.root,
train=True,
transform=get_transforms(cfg, train=True),
num_subsample=int(n_samples),
)
train_sampler = DistributedSampler(
dataset=train_dataset, rank=comm.get_rank(),
num_replicas=comm.get_world_size(), shuffle=True
)
train_loader = FastDataloader(
dataset=train_dataset, batch_size=cfg.train.batch_size_train,
num_workers=cfg.train.num_workers, sampler=train_sampler,
drop_last=False, collate_fn=_collate_fn
)
# test dataset (for online evaluation)
if cfg.train.enabled and cfg.train.online_eval:
if cfg.debug:
n_samples = comm.get_world_size() * DEBUG_NUM_BATCH
n_samples = n_samples * cfg.train.batch_size_eval
eval_dataset, num_classes = get_dataset(
data_name=cfg.dataset.name,
data_root=cfg.dataset.root,
train=False,
transform=get_transforms(cfg, train=False),
num_subsample=int(n_samples),
)
eval_sampler = DistributedSampler(
dataset=eval_dataset, rank=comm.get_rank(),
num_replicas=comm.get_world_size(), shuffle=True
)
eval_loader = FastDataloader(
dataset=eval_dataset, batch_size=cfg.train.batch_size_eval,
num_workers=cfg.train.num_workers, sampler=eval_sampler,
drop_last=False, collate_fn=_collate_fn
)
return train_loader, eval_loader, num_classes
def get_loaders_for_linear_eval(cfg):
if cfg.debug:
train_n_samples = comm.get_world_size() * DEBUG_NUM_BATCH
train_n_samples = train_n_samples * cfg.eval.batch_size_train
eval_n_samples = comm.get_world_size() * DEBUG_NUM_BATCH
eval_n_samples = eval_n_samples * cfg.eval.batch_size_eval
else:
train_n_samples = eval_n_samples = -1
# augmentation
train_transforms = get_simple_transforms(
input_size=cfg.augment.input_size
)
eval_transforms = get_center_crop_transforms(
input_size=cfg.augment.input_size
)
# dataset
train_dataset, num_classes = get_dataset(
data_name=cfg.dataset.name,
data_root=cfg.dataset.root,
train=True,
transform=train_transforms,
num_subsample=int(train_n_samples),
)
eval_dataset, _ = get_dataset(
data_name=cfg.dataset.name,
data_root=cfg.dataset.root,
train=False,
transform=eval_transforms,
num_subsample=int(eval_n_samples),
)
# sampler
train_sampler = DistributedSampler(
dataset=train_dataset, rank=comm.get_rank(),
num_replicas=comm.get_world_size(), shuffle=True
)
eval_sampler = DistributedSampler(
dataset=eval_dataset, rank=comm.get_rank(),
num_replicas=comm.get_world_size(), shuffle=True
)
# dataloader
num_workers = cfg.eval.num_workers if not cfg.debug else 4
train_loader = FastDataloader(
dataset=train_dataset, batch_size=cfg.eval.batch_size_train,
num_workers=num_workers, drop_last=False,
sampler=train_sampler, collate_fn=_collate_fn
)
eval_loader = FastDataloader(
dataset=eval_dataset, batch_size=cfg.eval.batch_size_eval,
num_workers=num_workers, drop_last=False,
sampler=eval_sampler, collate_fn=_collate_fn
)
return train_loader, eval_loader, num_classes
|
{"hexsha": "d3195679916e2d34d420d8cc2d307459ea0a617f", "size": 4502, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/getters.py", "max_stars_repo_name": "merlinarer/scrl", "max_stars_repo_head_hexsha": "f5bc426ed6eef130d44dd3a5609dc0772da59613", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 102, "max_stars_repo_stars_event_min_datetime": "2021-03-25T08:54:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T03:46:47.000Z", "max_issues_repo_path": "data/getters.py", "max_issues_repo_name": "merlinarer/scrl", "max_issues_repo_head_hexsha": "f5bc426ed6eef130d44dd3a5609dc0772da59613", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2021-03-23T01:53:55.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-30T08:51:19.000Z", "max_forks_repo_path": "data/getters.py", "max_forks_repo_name": "merlinarer/scrl", "max_forks_repo_head_hexsha": "f5bc426ed6eef130d44dd3a5609dc0772da59613", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2021-04-15T06:02:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-10T15:34:39.000Z", "avg_line_length": 33.5970149254, "max_line_length": 73, "alphanum_fraction": 0.665037761, "include": true, "reason": "import numpy", "num_tokens": 981}
|
!
! :::::::::::::: BOUND :::::::::::::::::::::::::::::::::::::::::::
! This routine sets the boundary values for a given grid
! at level level.
! We are setting the values for a strip ng zones wide on
! both borders.
!
! Outputs from this routine:
! The values around the border of the grid are inserted
! directly into the enlarged valbig array.
!
! This routine calls the routine filpatch
! which for any block of mesh points on a given level,
! intersects that block with all grids on that level and with
! the physical boundaries, copies the values into the
! appropriate intersecting regions, and interpolates the remaining
! cells from coarser grids as required.
! :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
subroutine bound(time,nvar,ng,valbig,mitot,mptr,aux,naux)
use amr_module, only: rnode, node, hxposs, cornxlo, cornxhi
use amr_module, only: ndilo, ndihi
use amr_module, only: nestlevel, xlower, xupper
use amr_module, only: xperdom
implicit none
! Input
integer, intent(in) :: nvar, ng, mitot, mptr, naux
real(kind=8), intent(in) :: time
real(kind=8), intent(in out) :: valbig(nvar,mitot)
real(kind=8), intent(in out) :: aux(naux,mitot)
! Locals
integer :: ilo, ihi, level
real(kind=8) :: xleft, xright, hx, xl, xr
real(kind=8) :: xloWithGhost, xhiWithGhost
logical :: patchOnly
xleft = rnode(cornxlo, mptr)
xright = rnode(cornxhi, mptr)
ilo = node(ndilo, mptr)
ihi = node(ndihi, mptr)
level = node(nestlevel, mptr)
hx = hxposs(level)
xloWithGhost = xleft - ng*hx
xhiWithGhost = xright + ng*hx
! used in filaptch for bc1amr: for patches it is called. for full grids called from bound below
patchOnly = .false.
! left boundary
xl = xleft - ng*hx
xr = xleft
if ((xl < xlower) .and. xperdom) then
call prefilrecur(level,nvar,valbig,aux,naux,time,mitot,1, &
ilo-ng,ilo-1,ilo-ng,ihi+ng,patchOnly)
else
call filrecur(level,nvar,valbig,aux,naux,time,mitot,1,ilo-ng, &
ilo-1,patchOnly,mptr)
endif
! right boundary
xl = xright
xr = xright + ng*hx
if ((xr .gt. xupper) .and. xperdom) then
call prefilrecur(level,nvar,valbig,aux,naux,time,mitot, &
mitot-ng+1,ihi+1,ihi+ng,ilo-ng,ihi+ng,patchOnly)
else
call filrecur(level,nvar,valbig,aux,naux,time,mitot, &
mitot-ng+1,ihi+1,ihi+ng,patchOnly,mptr)
endif
! set all exterior (physical) boundary conditions for this grid at once
! used to be done from filpatch, but now only for recursive calls with new patch
! where the info matches. more efficient to do whole grid at once, and avoid copying
call bc1amr(valbig,aux,mitot,nvar,naux,hx,level,time, &
xloWithGhost,xhiWithGHost)
end subroutine bound
|
{"hexsha": "caa87d2e0e102acbdd4419d62d79af1e7d6a08ca", "size": 2819, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/1d/bound.f90", "max_stars_repo_name": "navravi/amrclaw", "max_stars_repo_head_hexsha": "727d98d243c521267c927f6fe107ba6f1155597b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2015-05-27T08:16:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T06:36:24.000Z", "max_issues_repo_path": "src/1d/bound.f90", "max_issues_repo_name": "navravi/amrclaw", "max_issues_repo_head_hexsha": "727d98d243c521267c927f6fe107ba6f1155597b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 107, "max_issues_repo_issues_event_min_datetime": "2015-01-02T19:51:43.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-24T03:35:32.000Z", "max_forks_repo_path": "src/1d/bound.f90", "max_forks_repo_name": "BrisaDavis/amrclaw", "max_forks_repo_head_hexsha": "c5cacdf00f1959e160ea5616cdf6ea7b6cd374f3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 28, "max_forks_repo_forks_event_min_datetime": "2015-01-10T00:03:56.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-11T23:52:34.000Z", "avg_line_length": 33.9638554217, "max_line_length": 97, "alphanum_fraction": 0.6495211068, "num_tokens": 889}
|
from __future__ import print_function
import os
import scipy
from py2gcode import gcode_cmd
from py2gcode import cnc_pocket
from py2gcode import cnc_boundary
from params import params
alignTest = False
# Cutting parameters
safeZ = 0.5
startZ = 0.0
overlap = 0.4
overlapFinish = 0.6
maxCutDepth = 0.05
if alignTest:
toolDiam = 0.001
else:
toolDiam = 0.25
direction = 'ccw'
startDwell = 2.0
feedrate = 24.0
cutThruMargin = 0.04
prog = gcode_cmd.GCodeProg()
prog.add(gcode_cmd.GenericStart())
prog.add(gcode_cmd.Space())
prog.add(gcode_cmd.FeedRate(feedrate))
prog.add(gcode_cmd.Space())
for i,posLayout in enumerate(zip(params['xPosList'],params['yPosList'])):
xPosLayout, yPosLayout = posLayout
if 1:
# Magnet pocket
magnetParams = params['magnetCut']
for xPosRel, yPosRel in zip(magnetParams['xPosList'], magnetParams['yPosList']):
xPos = xPosLayout + xPosRel
yPos = yPosLayout + yPosRel
if alignTest:
depth = maxCutDepth
else:
depth = magnetParams['depth']
pocketDict = {
'centerX' : xPos,
'centerY' : yPos,
'radius' : 0.5*magnetParams['diameter'],
'depth' : depth,
'startZ' : startZ,
'safeZ' : safeZ,
'overlap' : overlap,
'overlapFinish' : overlapFinish,
'maxCutDepth' : maxCutDepth,
'toolDiam' : toolDiam,
'direction' : direction,
'startDwell' : startDwell,
}
if alignTest:
pocketDict['thickness'] = toolDiam
pocket = cnc_pocket.CircAnnulusPocketXY(pocketDict)
else:
pocket = cnc_pocket.CircPocketXY(pocketDict)
prog.add(pocket)
if 1:
#if i == (params['numParts'] - 1):
# prog.add(gcode_cmd.Space())
# prog.add(gcode_cmd.Comment('Pause'))
# prog.add(gcode_cmd.Pause())
# prog.add(gcode_cmd.Space())
boundaryParams = params['boundaryCut']
if alignTest:
depth = maxCutDepth
else:
depth = boundaryParams['depth'] + cutThruMargin
# Part boundary
boundaryDict = {
'centerX' : xPosLayout,
'centerY' : yPosLayout,
'radius' : boundaryParams['radius'],
'depth' : depth,
'startZ' : startZ,
'safeZ' : safeZ,
'toolDiam' : toolDiam,
'toolOffset' : 'outside',
'direction' : direction,
'maxCutDepth' : maxCutDepth,
'startDwell' : startDwell,
}
boundary = cnc_boundary.CircBoundaryXY(boundaryDict)
prog.add(boundary)
prog.add(gcode_cmd.End(),comment=True)
print(prog)
baseName, dummy = os.path.splitext(__file__)
fileName = '{0}.ngc'.format(baseName)
prog.write(fileName)
|
{"hexsha": "416a1e2f40ad47e1bf41172b3461a5f2080396d1", "size": 3205, "ext": "py", "lang": "Python", "max_stars_repo_path": "cnc/motor_hub/motor_hub/magnet_and_boundary/mill.py", "max_stars_repo_name": "iorodeo/stir_plate_mechanics", "max_stars_repo_head_hexsha": "ad721e708d962afcb14dd69456df4231c83ffed8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-07-23T19:03:18.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-10T19:45:46.000Z", "max_issues_repo_path": "cnc/motor_hub/motor_hub/magnet_and_boundary/mill.py", "max_issues_repo_name": "iorodeo/stir_plate_mechanics", "max_issues_repo_head_hexsha": "ad721e708d962afcb14dd69456df4231c83ffed8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cnc/motor_hub/motor_hub/magnet_and_boundary/mill.py", "max_forks_repo_name": "iorodeo/stir_plate_mechanics", "max_forks_repo_head_hexsha": "ad721e708d962afcb14dd69456df4231c83ffed8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-07T20:39:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-07T20:39:18.000Z", "avg_line_length": 30.2358490566, "max_line_length": 88, "alphanum_fraction": 0.5254290172, "include": true, "reason": "import scipy", "num_tokens": 793}
|
'''
MIT License
Copyright 2019 Oak Ridge National Laboratory
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Created on Feb 14, 2019
@author: bolme
'''
import numpy as np
import multiprocessing as mp
import numpy as np
import faro
import time
import faro.proto.face_service_pb2 as fsd
import scipy.spatial as spat
import os
import faro.proto.proto_types as pt
from faro.proto.face_service_pb2 import DetectRequest,DetectExtractRequest,ExtractRequest,FaceRecordList,GalleryList,GalleryInfo,TemplateList,Empty,FaceRecord
# TODO: Remove this and make it a local variable
STORAGE = {}
class GalleryWorker(object):
def __init__(self,options):
self.gallery_storage = os.path.join(options.storage_dir,'galleries',str(options.algorithm))
if not os.path.isdir(self.gallery_storage):
print( 'GALLERY WORKER: Creating directory for gallery storage:',self.gallery_storage)
os.makedirs(self.gallery_storage)
self.loadGalleries()
def loadGalleries(self):
'''Load gallery information into memory on startup.'''
import h5py
global STORAGE
galleries = os.listdir(self.gallery_storage)
galleries = list(filter(lambda x: x.endswith('.h5'), galleries))
print("Loading %d galleries: %s"%(len(galleries),galleries))
for each in galleries:
gallery_name = each[:-3]
path = os.path.join(self.gallery_storage,gallery_name+'.h5')
STORAGE[gallery_name] = h5py.File(path,'a') # Open in read/write mode
face_count = len(STORAGE[gallery_name]['faces'])
print(" * Loaded %s with %d faces."%(gallery_name,face_count))
print('Done Loading Galleries.')
def galleryNames(self):
return list(STORAGE)
def size(self, gallery_name):
''' Return the size a gallery. '''
return len(STORAGE[gallery_name]['faces'])
def addFaceToGallery(self, gallery_name, gallery_key, face):
''' Enrolls the faces in the gallery. '''
import h5py
global STORAGE
replaced = 0
print('Gallery name:' , gallery_name)
print(list(STORAGE.keys()))
if gallery_name not in STORAGE:
path = os.path.join(self.gallery_storage,gallery_name+'.h5')
print('adding new gallery at ', path)
STORAGE[gallery_name] = h5py.File(path,'a')
STORAGE[gallery_name].create_group('faces')
STORAGE[gallery_name].create_group('sources')
STORAGE[gallery_name].create_group('detections')
STORAGE[gallery_name].create_group('tags')
STORAGE[gallery_name].create_group('logs')
enrolled = 0
face_id = faro.generateFaceId(face)
face.gallery_key = face_id
enrolled += 1
if face_id in STORAGE[gallery_name]['faces']:
del STORAGE[gallery_name]['faces'][face_id] # delete so it can be replaced.
replaced += 1
STORAGE[gallery_name]['faces'][face_id] = np.bytes_(face.SerializeToString())
template = pt.vector_proto2np(face.template.data)
temp_length = template.shape[0]
print('template shape: ', template.shape)
if 'templates' not in STORAGE[gallery_name]:
# Create an empty dataset
f = STORAGE[gallery_name]
dset = f.create_dataset('templates',data=np.zeros((0,temp_length)), maxshape=(None,temp_length),dtype=np.float32)
if 'facelist' not in STORAGE[gallery_name]:
# Create an empty dataset
f = STORAGE[gallery_name]
dt = h5py.special_dtype(vlen=str)
dset = f.create_dataset('facelist',(0,), maxshape=(None,),dtype=dt)
# Append to the end
dset = STORAGE[gallery_name]['templates']
size = dset.shape
print('dataset size 2:', size)
dset.resize((size[0]+1,size[1]))
dset[-1,:] = template
dset = STORAGE[gallery_name]['facelist']
size = dset.shape
dset.resize((size[0]+1,))
dset[-1] = face_id
STORAGE[gallery_name].flush()
self.clearIndex(gallery_name)
return enrolled, replaced
def deleteGallery(self, gallery_name):
''' Delete a gallery. '''
if gallery_name not in STORAGE:
raise ValueError("Gallery '" + gallery_name +"' not found.")
deleted_faces = len(STORAGE[gallery_name]['faces'])
# Close and remove the file
STORAGE[gallery_name].close()
del STORAGE[gallery_name]
# Delete the file from disk
path = os.path.join(self.gallery_storage,gallery_name+'.h5')
os.remove(path)
return deleted_faces
def enrollmentList(self, gallery_name):
''' List the faces enrolled in this gallery. '''
result = FaceRecordList()
global STORAGE
count = 0
for face_id in STORAGE[gallery_name]['faces']:
data = STORAGE[gallery_name]['faces'][face_id]
face_record = FaceRecord()
face_record.ParseFromString(np.array(data).tobytes())
face = result.face_records.add()
face.gallery_key = face_id
face.name = face_record.name
face.subject_id = face_record.subject_id
face.source = face_record.source
face.frame = face_record.frame
count += 1
return result
def subjectDelete(self, gallery_name, subject_id):
''' List the galleries for this service. '''
self.clearIndex(gallery_name)
if gallery_name not in STORAGE:
raise ValueError("No gallery named '%s'"%(gallery_name,))
delete_count = 0
keys = list(STORAGE[gallery_name]['faces'])
for gallery_key in keys:
tmp = STORAGE[gallery_name]['faces'][gallery_key]
face = FaceRecord()
face.ParseFromString(np.array(tmp).tobytes())
if face.subject_id == subject_id:
del STORAGE[gallery_name]['faces'][gallery_key]
delete_count += 1
self.clearIndex()
STORAGE[gallery_name].flush()
return delete_count
def isSearchable(self):
''' Return true of the gallery implements fast search. '''
return False
def clearIndex(self, gallery_name):
''' Remove the index to free space and allow it to be regenerated when needed. '''
pass
def generateIndex(self, gallery_name):
''' Process the gallery to generate a fast index. '''
raise NotImplementedError()
def getAllFaceRecords(self, gallery_name):
''' Get all the face records in the gallery. '''
if gallery_name not in STORAGE:
raise ValueError("Unknown gallery: "+gallery_name)
gallery = FaceRecordList()
for key in STORAGE[gallery_name]['faces']:
tmp = STORAGE[gallery_name]['faces'][key]
face = FaceRecord()
face.ParseFromString(np.array(tmp).tobytes())
gallery.face_records.add().CopyFrom(face)
return gallery
def getAllTemplates(self, gallery_name):
''' Get all the face records in the gallery. '''
if gallery_name not in STORAGE:
raise ValueError("Unknown gallery: "+gallery_name)
gallery = TemplateList()
for key in STORAGE[gallery_name]['faces']:
tmp = STORAGE[gallery_name]['faces'][key]
face = FaceRecord()
face.ParseFromString(np.array(tmp).tobytes())
gallery.templates.add().CopyFrom(face.template)
return gallery
def getFaceRecord(self, gallery_name, face_id):
''' Get all the face records in the gallery. '''
if gallery_name not in STORAGE:
raise ValueError("Unknown gallery: "+gallery_name)
tmp = STORAGE[gallery_name]['faces'][face_id]
face = FaceRecord()
face.ParseFromString(np.array(tmp).tobytes())
return face
class SearchableGalleryWorker(GalleryWorker):
''' Implements a fast gallery to speed up searches. Requires templates to be simple vectors.'''
def __init__(self,options,score_type):
GalleryWorker.__init__(self,options)
self.score_type = score_type
self.indexes = {}
self.face_ids = {}
def isSearchable(self):
''' Return true of the gallery implements fast search. '''
return True
def clearIndex(self, gallery_name):
''' Remove the index to free space and allow it to be regenerated when needed. '''
if gallery_name not in STORAGE:
raise ValueError("Unknown gallery: "+gallery_name)
try:
del STORAGE[gallery_name]['index']
except:
pass
try:
del STORAGE[gallery_name]['face_ids']
except:
pass
try:
del self.indexes[gallery_name]
except:
pass
try:
del self.face_ids[gallery_name]
except:
pass
def generateIndex(self, gallery_name):
''' Process the gallery to generate a fast index. '''
import h5py
#try:
# print("Gallery Names:",list(STORAGE[gallery_name]))
# self.clearIndex(gallery_name)
#except:
# print("Problem clearing index.")
if gallery_name not in STORAGE:
raise ValueError("Unknown gallery: "+gallery_name)
if gallery_name in self.indexes:
# This seems to exist and be loaded into memory so just continue
return
if 'index' in STORAGE[gallery_name]:
# Use the existing index
self.indexes[gallery_name] = np.array(STORAGE[gallery_name]['index'],dtype=np.float32)
self.face_ids[gallery_name] = list(STORAGE[gallery_name]['face_ids'])
return
else:
# Generate the index
start = time.time()
gsize = self.size(gallery_name)
dset = None
print("Building Gallery Index...")
i = 0
for key in STORAGE[gallery_name]['faces']:
i += 1
if i % 1000 == 0: print("Scanning ",i," of ",gsize)
tmp = STORAGE[gallery_name]['faces'][key]
face = FaceRecord()
face.ParseFromString(np.array(tmp).tobytes())
vec = pt.vector_proto2np(face.template.data)
# Figure out the size of the vectors
assert len(vec.shape) == 1
cols = vec.shape[0]
# Store into an h5 datasets to keep memory requirements low
if dset is None:
try:
del STORAGE[gallery_name]['index']
except:
pass
dset = STORAGE[gallery_name].create_dataset("index", (0,cols), maxshape=(None, cols),dtype='f4')
dt = None
try:
dt = h5py.string_dtype() # h5py > 2.10.0
except:
dt = h5py.special_dtype(vlen=str) # h5py==2.9.0
try:
del STORAGE[gallery_name]['face_ids']
except:
pass
fset = STORAGE[gallery_name].create_dataset("face_ids", (0,), maxshape=(None,),dtype=dt)
r,c = dset.shape
dset.resize((r+1,cols))
dset[r,:] = vec
fset.resize((r+1,))
fset[r] = key
stop = time.time()
# save the index in memory
self.indexes[gallery_name] = np.array(STORAGE[gallery_name]['index'],dtype=np.float32)
self.face_ids[gallery_name] = list(STORAGE[gallery_name]['face_ids'])
print(" Index Complete: %d faces in %0.3fs Total Size: %s"%(self.size(gallery_name),stop-start, STORAGE[gallery_name]['index'].shape))
def search(self, gallery_name, probes, max_results, threshold):
''' search the gallery using the index. '''
score_type = self.score_type
probe_mat = [pt.vector_proto2np(face_rec.template.data) for face_rec in probes.face_records]
probe_mat = np.array(probe_mat,dtype=np.float32)
gal_mat = self.indexes[gallery_name]
# Compute the distance
if score_type == fsd.L1:
scores = spat.distance_matrix(probe_mat,gal_mat,1)
elif score_type == fsd.L2:
scores = spat.distance_matrix(probe_mat,gal_mat,2)
elif score_type == fsd.NEG_DOT:
scores = -np.dot(probe_mat,gal_mat.T)
else:
NotImplementedError("ScoreType %s is not implemented."%(score_type,))
face_ids = self.face_ids[gallery_name]
for p in range(scores.shape[0]):
#probe = probes.face_records[p]
#out = result.probes.face_records[p].search_results
matches = []
for g in range(scores.shape[1]):
score = scores[p,g]
if score > threshold:
continue
face = self.getFaceRecord(gallery_name,face_ids[g])
matches.append( [ score, face ] )
matches.sort(key=lambda x: x[0])
if max_results > 0:
matches = matches[:max_results]
for score,face in matches:
probes.face_records[p].search_results.face_records.add().CopyFrom(face)
probes.face_records[p].search_results.face_records[-1].score=score
return probes
|
{"hexsha": "4d493e189904a7264c7723bd85003cea3588e9f1", "size": 14863, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/faro/FaceGallery.py", "max_stars_repo_name": "reidej/faro", "max_stars_repo_head_hexsha": "b85b1c6ba7cb69fac6cfd62ba64558676de24fd0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2019-06-26T16:32:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-17T12:54:45.000Z", "max_issues_repo_path": "src/faro/FaceGallery.py", "max_issues_repo_name": "reidej/faro", "max_issues_repo_head_hexsha": "b85b1c6ba7cb69fac6cfd62ba64558676de24fd0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 22, "max_issues_repo_issues_event_min_datetime": "2020-01-28T22:40:41.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:52:36.000Z", "max_forks_repo_path": "src/faro/FaceGallery.py", "max_forks_repo_name": "reidej/faro", "max_forks_repo_head_hexsha": "b85b1c6ba7cb69fac6cfd62ba64558676de24fd0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2019-02-11T19:08:38.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-31T13:10:56.000Z", "avg_line_length": 32.955654102, "max_line_length": 158, "alphanum_fraction": 0.5956401803, "include": true, "reason": "import numpy,import scipy", "num_tokens": 3138}
|
%% brute_force_tune
% Code to test the performance of various tuning parameters
% Works sorta like RANSAC I guess?
% Adam Werries 2016, see Apache 2.0 license.
k_max = 50;
% Specify ranges
accel_bias_PSD = logspace(-10,-4,100);
gyro_bias_PSD = logspace(-10,-4,100);
% Repeat arrays
accel_bias_PSD = repmat(accel_bias_PSD, [1 k_max]);
gyro_bias_PSD = repmat(gyro_bias_PSD, [1 k_max]);
% Generate random selections of each vector
num_items = length(accel_bias_PSD);
accel_bias_i = randperm(num_items);
gyro_bias_i = randperm(num_items);
rms_error_filter = Inf*ones(1,num_items);
max_error_filter = Inf*ones(1,num_items);
parfor i = 1:num_items
fprintf('Iteration: %d, ABias: %08.5e, GBias: %08.5e\n', i, accel_bias_PSD(accel_bias_i(i)), gyro_bias_PSD(gyro_bias_i(i)));
temp_conf = LC_KF_config;
temp_conf.accel_bias_PSD = accel_bias_PSD(accel_bias_i(i));
temp_conf.gyro_bias_PSD = gyro_bias_PSD(gyro_bias_i(i));
[out_profile,out_IMU_bias_est,out_KF_SD] = Loosely_coupled_INS_GNSS(init_cond, filter_time, epoch, lla, gps, imu, temp_conf, est_IMU_bias);
xyz = out_profile(:,2:4);
if ~any(any(isnan(xyz))) && ~any(any(isinf(xyz)))
llh = ecef2lla(xyz);
[x,y] = deg2utm(llh(:,1),llh(:,2));
x = x-min_x;
y = y-min_y;
% h = -llh(:,3);
% distance = ((ground_truth_full(:,1)-x).^2 + (ground_truth_full(:,2)-y).^2 + (ground_truth_full(:,3)-h).^2).^0.5;
distance = ((ground_truth_full(:,1)-x).^2 + (ground_truth_full(:,2)-y).^2).^0.5;
rms_error_filter(i) = rms(distance);
max_error_filter(i) = max(distance);
end
end
[minmax, i] = min(max_error_filter);
fprintf('\nBest max: %08.4f, rms is %08.4f\n', minmax, rms_error_filter(i));
fprintf('Best iteration for max: %d, ABias: %08.5e, GBias: %08.5e\n', i, accel_bias_PSD(accel_bias_i(i)), gyro_bias_PSD(gyro_bias_i(i)));
[minrms, i] = min(rms_error_filter);
fprintf('Best rms: %08.4f, max is %08.4f\n', minrms, max_error_filter(i));
fprintf('Best iteration for rms: %d, ABias: %08.5e, GBias: %08.5e\n', i, accel_bias_PSD(accel_bias_i(i)), gyro_bias_PSD(gyro_bias_i(i)));
[minrms, i] = min((rms_error_filter+max_error_filter)/2);
fprintf('Best average of RMS and max: %08.4f, rms is %08.4f, max is %08.4f\n', minrms, rms_error_filter(i), max_error_filter(i));
fprintf('Best iteration for rms: %d, ABias: %08.5e, GBias: %08.5e\n', i, accel_bias_PSD(accel_bias_i(i)), gyro_bias_PSD(gyro_bias_i(i)));
|
{"author": "awerries", "repo": "kalman-localization", "sha": "558ca7fae1779aa71da61ec4829299bbbdbf62ff", "save_path": "github-repos/MATLAB/awerries-kalman-localization", "path": "github-repos/MATLAB/awerries-kalman-localization/kalman-localization-558ca7fae1779aa71da61ec4829299bbbdbf62ff/MATLAB/Tuning/tune_bias_psd.m"}
|
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
# Copyright (c) 2020 jeonsworld
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file has been modified by Graphcore
import numpy as np
import poptorch
import torch
import transformers
from args import parse_args
from datasets import dataset
from ipu_options import get_options
from log import logger
from metrics import accuracy
from model import PipelinedViTForImageClassification
if __name__ == "__main__":
# Validation loop
# Build config from args
config = transformers.ViTConfig(**vars(parse_args()))
logger.info(f"Running config: {config.config}")
# Execution parameters
opts = get_options(config)
test_loader = dataset.get_data(config, opts, train=False, async_dataloader=True)
# Init from a checkpoint
model = PipelinedViTForImageClassification.from_pretrained(config.pretrained_checkpoint, config=config).parallelize().half().train()
if config.precision.startswith("16."):
model.half()
# Execution parameters
valid_opts = poptorch.Options()
valid_opts.deviceIterations(4)
valid_opts.outputMode(poptorch.OutputMode.All)
valid_opts.Precision.enableStochasticRounding(False)
# Wrap in the PopTorch inference wrapper
inference_model = poptorch.inferenceModel(model, options=valid_opts)
all_acc = []
all_preds, all_labels = [], []
for step, (input_data, labels) in enumerate(test_loader):
losses, logits = inference_model(input_data, labels)
preds = torch.argmax(logits, dim=-1)
acc = accuracy(preds, labels)
all_acc.append(acc)
all_preds.append(preds.detach().clone())
all_labels.append(labels.detach().clone())
logger.info("Valid Loss: {:.3f} Acc: {:.3f}".format(torch.mean(losses).item(), acc))
all_preds = torch.cat(all_preds)
all_labels = torch.cat(all_labels)
val_accuracy = accuracy(all_preds, all_labels)
logger.info("\n")
logger.info("Validation Results")
logger.info("Valid Loss: %2.5f" % torch.mean(losses).item())
logger.info("Valid Aver Batch Accuracy: %2.5f" % np.mean(all_acc))
logger.info("Valid Accuracy: %2.5f" % val_accuracy)
|
{"hexsha": "c651432f332c335b93039868e4a3990ce79b3efd", "size": 2700, "ext": "py", "lang": "Python", "max_stars_repo_path": "applications/pytorch/vit/validation.py", "max_stars_repo_name": "payoto/graphcore_examples", "max_stars_repo_head_hexsha": "46d2b7687b829778369fc6328170a7b14761e5c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 260, "max_stars_repo_stars_event_min_datetime": "2019-11-18T01:50:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T23:08:53.000Z", "max_issues_repo_path": "applications/pytorch/vit/validation.py", "max_issues_repo_name": "payoto/graphcore_examples", "max_issues_repo_head_hexsha": "46d2b7687b829778369fc6328170a7b14761e5c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 27, "max_issues_repo_issues_event_min_datetime": "2020-01-28T23:07:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-14T15:37:06.000Z", "max_forks_repo_path": "applications/pytorch/vit/validation.py", "max_forks_repo_name": "payoto/graphcore_examples", "max_forks_repo_head_hexsha": "46d2b7687b829778369fc6328170a7b14761e5c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 56, "max_forks_repo_forks_event_min_datetime": "2019-11-18T02:13:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-28T14:36:09.000Z", "avg_line_length": 36.0, "max_line_length": 136, "alphanum_fraction": 0.7255555556, "include": true, "reason": "import numpy", "num_tokens": 630}
|
import unittest
import pycqed as pq
import numpy as np
import matplotlib.pyplot as plt
import os
from pycqed.analysis_v2 import measurement_analysis as ma
class Test_flipping_analysis(unittest.TestCase):
@classmethod
def tearDownClass(self):
plt.close("all")
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], "tests", "test_data")
ma.a_tools.datadir = self.datadir
def test_flipping_analysis(self):
# this test is based on an experiment with a known
# added detuning in the amplitude. The test tests that the analysis
# works for a range of known scale factors.
# 20% detuning only works for coarse
self._check_scaling("20170726_164507", 0.8, 1)
self._check_scaling("20170726_164536", 0.9, 1)
self._check_scaling("20170726_164550", 0.9, 1)
self._check_scaling("20170726_164605", 0.95, 2)
self._check_scaling("20170726_164619", 0.95, 2)
self._check_scaling("20170726_164635", 0.99, 2)
self._check_scaling("20170726_164649", 0.99, 2)
self._check_scaling("20170726_164704", 1, 2)
self._check_scaling("20170726_164718", 1, 2)
self._check_scaling("20170726_164733", 1.01, 2)
self._check_scaling("20170726_164747", 1.01, 2)
self._check_scaling("20170726_164802", 1.05, 1)
self._check_scaling("20170726_164816", 1.05, 1)
self._check_scaling("20170726_164831", 1.1, 1)
self._check_scaling("20170726_164845", 1.1, 1)
# 20% detuning only works for coarse
self._check_scaling("20170726_164901", 1.2, 1)
# Test running it once with showing the initial fit
ma.FlippingAnalysis(t_start="20170726_164901", options_dict={"plot_init": True})
def _check_scaling(self, timestamp, known_detuning, places):
a = ma.FlippingAnalysis(t_start=timestamp)
s = a.get_scale_factor()
self.assertAlmostEqual(s * known_detuning, 1, places=places)
print("Scale factor {:.4f} known detuning {:.4f}".format(s, known_detuning))
class Test_Idling_Error_Rate_Analyisis(unittest.TestCase):
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], "tests", "test_data")
ma.a_tools.datadir = self.datadir
@unittest.skip("TODO: fix this test")
def test_error_rates_vary_N2(self):
a = ma.Idling_Error_Rate_Analyisis(
t_start="20180210_181633",
options_dict={"close_figs": True, "vary_N2": True},
)
expected_dict = {
"A": 0.41685563870942149,
"N1": 1064.7100611208791,
"N2": 3644.550952436859,
"offset": 0.52121402524448934,
}
for key, value in expected_dict.items():
np.testing.assert_almost_equal(
a.fit_res["fit +"].best_values[key], value, decimal=2
)
expected_dict = {
"A": -0.13013585779457398,
"N1": 1138.3895116903586,
"N2": 601415.64642756886,
"offset": 0.14572799876310505,
}
for key, value in expected_dict.items():
np.testing.assert_almost_equal(
a.fit_res["fit 0"].best_values[key], value, decimal=2
)
expected_dict = {
"A": 0.74324542246644376,
"N1": 939.61974247762646,
"N2": 3566698.2870284803,
"offset": 0.18301612896797623,
}
for key, value in expected_dict.items():
np.testing.assert_almost_equal(
a.fit_res["fit 1"].best_values[key], value, decimal=2
)
def test_error_rates_fixed_N2(self):
a = ma.Idling_Error_Rate_Analyisis(
t_start="20180210_181633",
options_dict={"close_figs": True, "vary_N2": False},
)
expected_dict = {
"A": 0.43481425072120633,
"N1": 1034.9644095297574,
"N2": 1e21,
"offset": 0.50671519356947314,
}
for key, value in expected_dict.items():
np.testing.assert_almost_equal(
a.fit_res["fit +"].best_values[key], value, decimal=2
)
expected_dict = {
"A": -0.13013614484482647,
"N1": 1138.3896694924019,
"N2": 1e21,
"offset": 0.1457282565842071,
}
for key, value in expected_dict.items():
np.testing.assert_almost_equal(
a.fit_res["fit 0"].best_values[key], value, decimal=2
)
expected_dict = {
"A": 0.7432454022744126,
"N1": 939.61870748568992,
"N2": 1e21,
"offset": 0.18301632862249007,
}
for key, value in expected_dict.items():
np.testing.assert_almost_equal(
a.fit_res["fit 1"].best_values[key], value, decimal=2
)
class Test_Conditional_Oscillation_Analysis(unittest.TestCase):
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], "tests", "test_data")
ma.a_tools.datadir = self.datadir
# [2020-08-05 Victor] Experiment code and analysis was upgraded
# new tests are needed, including the case of measuring phase on the
# parked qubit
@unittest.skip("FIXME: test dataset has wrong channel convention")
def test_condition_oscillation_extracted_pars(self):
a = ma.Conditional_Oscillation_Analysis(
t_start="20181126_131143", cal_points="gef"
)
qoi = a.proc_data_dict["quantities_of_interest"]
print(qoi)
extracted = np.array(
[
qoi["phi_cond"].nominal_value,
qoi["phi_cond"].std_dev,
qoi["phi_0"].nominal_value,
qoi["phi_0"].std_dev,
qoi["phi_1"].nominal_value,
qoi["phi_1"].std_dev,
qoi["osc_amp_0"].nominal_value,
qoi["osc_amp_0"].std_dev,
qoi["osc_amp_1"].nominal_value,
qoi["osc_amp_1"].std_dev,
qoi["offs_diff"].nominal_value,
qoi["offs_diff"].std_dev,
qoi["osc_offs_0"].nominal_value,
qoi["osc_offs_0"].std_dev,
qoi["osc_offs_1"].nominal_value,
qoi["osc_offs_1"].std_dev,
]
)
expected = np.array(
[
7.139e01,
1.077e00,
8.753e01,
5.926e-01,
1.614e01,
8.990e-01,
4.859e-01,
5.026e-03,
4.792e-01,
7.518e-03,
1.225e-02,
6.395e-03,
4.869e-01,
3.554e-03,
4.992e-01,
5.316e-03,
]
)
np.testing.assert_almost_equal(extracted, expected, decimal=2)
|
{"hexsha": "0744383b09a279190056db6deaf34c6754c68525", "size": 6985, "ext": "py", "lang": "Python", "max_stars_repo_path": "pycqed/tests/analysis_v2/test_timedomain_analysis_v2.py", "max_stars_repo_name": "nuttamas/PycQED_py3", "max_stars_repo_head_hexsha": "1ee35c7428d36ed42ba4afb5d4bda98140b2283e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 60, "max_stars_repo_stars_event_min_datetime": "2016-08-03T10:00:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-10T11:46:16.000Z", "max_issues_repo_path": "pycqed/tests/analysis_v2/test_timedomain_analysis_v2.py", "max_issues_repo_name": "nuttamas/PycQED_py3", "max_issues_repo_head_hexsha": "1ee35c7428d36ed42ba4afb5d4bda98140b2283e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 512, "max_issues_repo_issues_event_min_datetime": "2016-08-03T17:10:02.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T14:03:43.000Z", "max_forks_repo_path": "pycqed/tests/analysis_v2/test_timedomain_analysis_v2.py", "max_forks_repo_name": "nuttamas/PycQED_py3", "max_forks_repo_head_hexsha": "1ee35c7428d36ed42ba4afb5d4bda98140b2283e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 34, "max_forks_repo_forks_event_min_datetime": "2016-10-19T12:00:52.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-19T04:43:26.000Z", "avg_line_length": 34.7512437811, "max_line_length": 88, "alphanum_fraction": 0.5697924123, "include": true, "reason": "import numpy", "num_tokens": 1929}
|
"""Here special plot scripts are defined, which can be accessed from the config"""
from forge.tools import (
customize_plot,
config_layout,
relabelPlot,
reject_outliers,
text_box,
)
import holoviews as hv
from holoviews import opts
from holoviews.operation import histogram
import logging
import pandas as pd
import numpy as np
from scipy import stats
log = logging.getLogger(__name__)
def dospecialPlots(data, config, analysisType, plotType, measurements, **plotConfigs):
"""
Can plot all plots from the specialPlots library and returns
It looks in all data files and plots it, if specified in the config!
Returns a Holoviews plot object configured with the standard configs
It does not allow additional configs for the holoview
:param data: Dictionary with the data frames
:param config: The configs dict
:param analysisType: The analysis type in which should be looked
:param plotType: The type of plot as str
:param plotConfigs: The parameters for the special plot, not the holoviews framework!!!
:param measurements: A list of all possible measurements
:return: Holoviews plot object with all plots
"""
# Plot all Histograms
Plots = None
log.info("Plotting special plot: {}".format(plotType))
for meas in measurements: #
if plotType in config[analysisType].get(meas, {}).get(
"AdditionalPlots", ""
) and plotType in config[analysisType].get("DoSpecialPlots", []):
if Plots:
Plots += eval(
"{plotType}({dfs},{measurement},{configs}, {analysisType}, **{plotConfigs})".format(
plotType="{}".format(plotType),
dfs="data",
measurement="meas",
configs="config",
analysisType="analysisType",
plotConfigs="plotConfigs",
)
)
else:
Plots = eval(
"{plotType}({dfs},{measurement},{configs}, {analysisType}, **{plotConfigs})".format(
plotType="{}".format(plotType),
dfs="data",
measurement="meas",
configs="config",
analysisType="analysisType",
plotConfigs="plotConfigs",
)
)
if Plots:
try:
Plots = config_layout(Plots, **config[analysisType].get("Layout", {}))
except:
pass
else:
log.warning(
"No plots could be generated for {} Plot. No data had a flag for plotting this type of plot".format(
plotType
)
)
return Plots
def BoxWhisker(dfs, measurement, configs, analysisType, **addConfigs):
"""Plots a measurement from all df as boxwisker"""
newConfigs = addConfigs
log.info("Generating BoxWhisker Plot for {}".format(measurement))
try:
plot = hv.BoxWhisker(
dfs["All"],
kdims="Name",
vdims=measurement,
label="BoxWhisker: {}".format(measurement),
group="BoxWhisker: {}".format(measurement),
)
# plot = relabelPlot(plot, label="{}".format(measurement))
# get labels from the configs
# ylabel = "{} [{}]".format(measurement, dfs[dfs["keys"][0]]["units"][dfs[dfs["keys"][0]]["measurements"].index(measurement)])
try:
ylabel = "{} [{}]".format(
measurement,
dfs[dfs["keys"][0]]["units"][
dfs[dfs["keys"][0]]["measurements"].index(measurement)
],
)
except Exception as err:
log.error(
"Label could not be genereated for concatonated Histogram {}. Error: {}".format(
measurement, err
)
)
ylabel = "X-Axis"
plot.opts(
box_alpha=0.3,
xrotation=80,
box_color="blue",
height=500,
show_legend=False,
width=600,
whisker_color="blue",
ylabel=ylabel,
)
# Update the plot specific options if need be
generalOptions = configs[analysisType].get("General", {})
newConfigs.update(generalOptions.copy())
data_options = (
configs[analysisType]
.get(measurement, {})
.get("BoxWhisker", {})
.get("PlotOptions", {})
)
newConfigs.update(
configs[analysisType].get("{}Options".format("BoxWhisker"), {})
)
newConfigs.update(data_options)
plot = customize_plot(plot, "", configs[analysisType], **newConfigs)
except Exception as err:
log.error(
"Unexpected error happened during BoxWhisker plot generation {}. Error: {}".format(
measurement, err
)
)
return None
return plot
def Violin(dfs, measurement, configs, analysisType, **addConfigs):
"""Plots a measurement from all df as boxwisker"""
newConfigs = addConfigs
log.info("Generating Violin Plot for {}".format(measurement))
try:
plot = hv.Violin(
dfs["All"],
kdims="Name",
vdims=measurement,
label="Violin: {}".format(measurement),
group="Violin: {}".format(measurement),
)
# get labels from the configs
# ylabel = "{} [{}]".format(measurement, dfs[dfs["keys"][0]]["units"][dfs[dfs["keys"][0]]["measurements"].index(measurement)])
try:
ylabel = "{} [{}]".format(
measurement,
dfs[dfs["keys"][0]]["units"][
dfs[dfs["keys"][0]]["measurements"].index(measurement)
],
)
except Exception as err:
log.error(
"Label could not be generated for violin plot {}. Error: {}".format(
measurement, err
)
)
ylabel = "Y-Axis"
plot.opts(
box_alpha=0.3,
xrotation=80,
box_color="blue",
height=500,
show_legend=False,
width=600,
ylabel=ylabel, # inner='quartiles'
)
# Update the plot specific options if need be
generalOptions = configs[analysisType].get("General", {})
newConfigs.update(generalOptions.copy())
data_options = (
configs[analysisType]
.get(measurement, {})
.get("Violin", {})
.get("PlotOptions", {})
)
newConfigs.update(configs[analysisType].get("{}Options".format("Violin"), {}))
newConfigs.update(data_options)
plot = customize_plot(plot, "", configs[analysisType], **newConfigs)
except Exception as err:
log.error(
"Unexpected error happened during violin plot generation {}. Error: {}".format(
measurement, err
)
)
return None
return plot
def concatHistogram(
dfs, measurement, configs, analysisType, bins=50, iqr=None, **addConfigs
):
"""Concatenates dataframes and generates a Histogram for all passed columns"""
newConfigs = addConfigs
log.info("Generating concat histograms for measurements {}...".format(measurement))
try:
df = dfs["All"]
# Sanatize data
data = df[measurement].dropna() # Drop all nan
if iqr:
log.info("Outliers correction with iqr: {}".format(iqr))
data = reject_outliers(data, iqr)
mean = np.round(np.mean(data), 2)
rms = np.round(np.sqrt(np.mean(data ** 2)), 2)
std = np.round(np.std(data), 2)
median = np.round(np.median(data), 2)
data = np.histogram(data, bins=bins)
plt = hv.Histogram(
data,
label="Concatenated Histogram: {}".format(measurement),
group="Concatenated Histogram: {}".format(measurement),
)
# plt = hv.Histogram(data, vdims=to_plot, group="Concatenated Histogram: {}".format(to_plot))
try:
xlabel = "{} [{}]".format(
measurement,
dfs[dfs["keys"][0]]["units"][
dfs[dfs["keys"][0]]["measurements"].index(measurement)
],
)
except Exception as err:
log.error(
"Label could not be genereated for concatonated Histogram {}. Error: {}".format(
measurement, err
)
)
xlabel = "X-Axis"
plt.opts(xlabel=xlabel)
# Update the plot specific options if need be
generalOptions = configs[analysisType].get("General", {})
newConfigs.update(generalOptions.copy())
data_options = (
configs[analysisType]
.get(measurement, {})
.get("Concatenated Histogram", {})
.get("PlotOptions", {})
)
newConfigs.update(
configs[analysisType].get("{}Options".format("Histogram"), {})
)
newConfigs.update(data_options)
# addConfigs.update({"xlabel": measurement})
plots = customize_plot(plt, "", configs[analysisType], **newConfigs)
# Add text
text = (
"\nMean: {mean} \n"
"Median: {median} \n"
"RMS: {rms}\n"
"std: {std}".format(mean=mean, median=median, rms=rms, std=std)
)
log.info(text)
y = data[0].max()
x = data[1][int(len(data[1]) * 0.9)]
text = hv.Text(x, y, text).opts(fontsize=30)
plots = plots * text
except Exception as err:
log.error(
"Unexpected error happened during concatHist plot generation {}. Error: {}".format(
measurement, err
)
)
return None
return plots
def Histogram(dfs, measurement, configs, analysisType, bins=50, iqr=None, **addConfigs):
"""Generates a Points Plot with a corresponding Histogram"""
newConfigs = addConfigs
log.info("Generating histograms for measurement {}...".format(measurement))
finalplots = None
try:
for key in dfs["keys"]:
log.info(
"Generating histograms for measurement {} for file {}...".format(
measurement, key
)
)
# Sanatize data
data = dfs[key]["data"][measurement].dropna() # Drop all nan
if iqr:
log.info("Outliers correction with iqr: {}".format(iqr))
data = reject_outliers(data, iqr)
mean = np.round(np.mean(data), 2)
rms = np.round(np.sqrt(np.mean(data ** 2)), 2)
std = np.round(np.std(data), 2)
median = np.round(np.median(data), 2)
data = np.histogram(data, bins=bins)
plt = hv.Histogram(
data,
label="Histogram: {}".format(measurement),
group="Histogram: {}: {}".format(measurement, key),
)
try:
xlabel = "{} [{}]".format(
measurement,
dfs[dfs["keys"][0]]["units"][
dfs[dfs["keys"][0]]["measurements"].index(measurement)
],
)
except Exception as err:
log.error(
"Label could not be generated for Histogram {}. Error: {}".format(
measurement, err
)
)
xlabel = "X-Axis"
plt.opts(xlabel=xlabel)
# Update the plot specific options if need be
generalOptions = configs[analysisType].get("General", {})
newConfigs.update(generalOptions.copy())
data_options = (
configs[analysisType]
.get(measurement, {})
.get("Single Histogram", {})
.get("PlotOptions", {})
)
newConfigs.update(
configs[analysisType].get("{}Options".format("Histogram"), {})
)
newConfigs.update(data_options)
plots = customize_plot(plt, "", configs[analysisType], **newConfigs)
# Add text
text = (
"\nMean: {mean} \n"
"Median: {median} \n"
"RMS: {rms}\n"
"std: {std}".format(mean=mean, median=median, rms=rms, std=std)
)
log.info(text)
y = data[0].max()
x = data[1][int(len(data[1]) * 0.9)]
text = hv.Text(x, y, text).opts(fontsize=30)
# text = text_box(text, x, y, boxsize= (100, 150))
plots = plots * text
if finalplots:
finalplots += plots
else:
finalplots = plots
except Exception as err:
log.error(
"Unexpected error happened during Hist plot generation {}. Error: {}".format(
measurement, err
)
)
return None
return finalplots
def SimplifiedBarChart(
dfs, measurement, configs, analysisType, xaxis, bins=50, **addConfigs
):
"""Generates a simplified bar chart with a simplified x axis, can be handy if you have lots of points """
newConfigs = addConfigs
log.info("Generating BarChart for measurement {}...".format(measurement))
finalplots = None
try:
for key in dfs["keys"]:
log.info(
"Generating histograms for measurement {} for file {}...".format(
measurement, key
)
)
# Sanatize data
data = dfs[key]["data"][[measurement, xaxis]].dropna() # Drop all nan
invertedaxis = data.reset_index().set_index(measurement)
data = np.histogram(data[measurement], bins=data[xaxis])
plt = hv.Histogram(
data, label="BarChart: {}".format(measurement), group="{}".format(key)
)
try:
xlabel = "{} [{}]".format(
measurement,
dfs[dfs["keys"][0]]["units"][
dfs[dfs["keys"][0]]["measurements"].index(measurement)
],
)
except Exception as err:
log.error(
"Label could not be generated for Histogram {}. Error: {}".format(
measurement, err
)
)
xlabel = "X-Axis"
plt.opts(xlabel=xlabel)
# Update the plot specific options if need be
generalOptions = configs[analysisType].get("General", {})
newConfigs.update(generalOptions.copy())
data_options = (
configs[analysisType]
.get(measurement, {})
.get("Single Histogram", {})
.get("PlotOptions", {})
)
newConfigs.update(
configs[analysisType].get("{}Options".format("Histogram"), {})
)
newConfigs.update(data_options)
plots = customize_plot(plt, "", configs[analysisType], **newConfigs)
if finalplots:
finalplots += plots
else:
finalplots = plots
except Exception as err:
log.error(
"Unexpected error happened during Hist plot generation {}. Error: {}".format(
measurement, err
)
)
return None
return finalplots
|
{"hexsha": "e2ddc2b569b0b4fbbfcbccf4661d2705b3b2a213", "size": 15771, "ext": "py", "lang": "Python", "max_stars_repo_path": "COMET/misc_plugins/PlotScripts/forge/specialPlots.py", "max_stars_repo_name": "dallaval5u/COMET", "max_stars_repo_head_hexsha": "8c5793faafe2797dd4100507aa0fe1e71cf9f6c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "COMET/misc_plugins/PlotScripts/forge/specialPlots.py", "max_issues_repo_name": "dallaval5u/COMET", "max_issues_repo_head_hexsha": "8c5793faafe2797dd4100507aa0fe1e71cf9f6c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "COMET/misc_plugins/PlotScripts/forge/specialPlots.py", "max_forks_repo_name": "dallaval5u/COMET", "max_forks_repo_head_hexsha": "8c5793faafe2797dd4100507aa0fe1e71cf9f6c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.5202702703, "max_line_length": 134, "alphanum_fraction": 0.5167078816, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3358}
|
import os
# hacky, but whatever
import sys
my_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(my_path, '..'))
import mdpsim # noqa: #402
import pytest # noqa: E402
import tensorflow as tf # noqa: E402
import numpy as np # noqa: E402
pytest.register_assert_rewrite('models')
from models import check_prob_dom_meta, get_domain_meta, \
get_problem_meta # noqa: E402
from tf_utils import masked_softmax # noqa: E402
def test_prob_dom_meta():
tt_path = os.path.join(my_path, 'triangle-tire.pddl')
mdpsim.parse_file(tt_path)
domain = mdpsim.get_domains()['triangle-tire']
problem = mdpsim.get_problems()['triangle-tire-2']
dom_meta = get_domain_meta(domain)
prob_meta = get_problem_meta(problem)
check_prob_dom_meta(prob_meta, dom_meta)
class TestMaskedSoftmax(tf.test.TestCase):
def test_masked_softmax(self):
with self.test_session():
values = [[-1, 3.5, 2], [-1, 3.5, 2], [1, 0, 3]]
mask = [[0, 0, 0], [1, 1, 1], [0, 1, 1]]
result = masked_softmax(values, mask)
def real_softmax(vec):
exps = np.exp(vec)
return exps / np.sum(exps, axis=-1, keepdims=True)
# uniform because nothing's enabled
row0 = [1 / 3.0, 1 / 3.0, 1 / 3.0]
# not uniform
row1 = real_softmax([-1, 3.5, 2])
# not uniform, but first one doesn't count
row2 = np.concatenate([[0], real_softmax([0, 3])])
expected = [row0, row1, row2]
self.assertAllClose(expected, result.eval())
|
{"hexsha": "49c085c1e0faa3b8f92412f5e2bc2c1413bdc226", "size": 1608, "ext": "py", "lang": "Python", "max_stars_repo_path": "asnets/tests/tests.py", "max_stars_repo_name": "xf1590281/ASNets", "max_stars_repo_head_hexsha": "5f4b29fb62a5e72004b813228442d06246c9ec33", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2017-12-05T13:27:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-16T20:32:33.000Z", "max_issues_repo_path": "asnets/tests/tests.py", "max_issues_repo_name": "xf1590281/ASNets", "max_issues_repo_head_hexsha": "5f4b29fb62a5e72004b813228442d06246c9ec33", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-07-16T12:15:46.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-31T00:02:49.000Z", "max_forks_repo_path": "asnets/tests/tests.py", "max_forks_repo_name": "xf1590281/ASNets", "max_forks_repo_head_hexsha": "5f4b29fb62a5e72004b813228442d06246c9ec33", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2018-03-19T13:45:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T07:52:20.000Z", "avg_line_length": 32.16, "max_line_length": 66, "alphanum_fraction": 0.6200248756, "include": true, "reason": "import numpy", "num_tokens": 453}
|
import numpy as np
import pandas as pd
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
ratings = pd.read_csv("https://s3-us-west-2.amazonaws.com/recommender-tutorial/ratings.csv")
# a = ratings.head()
# print(a) # 1
movies = pd.read_csv("https://s3-us-west-2.amazonaws.com/recommender-tutorial/movies.csv")
# b = movies.head()
# print(b) # 2
n_ratings = len(ratings)
n_movies = ratings['movieId'].nunique()
n_users = ratings['userId'].nunique()
# print(f'Numver of ratings: {n_ratings}')
# print(f"Numver of unique movieId's: {n_movies}")
# print(f"Number of unique users: {n_users}")
# print(f"Average number of ratings per user: {round(n_ratings/n_users, 2)}")
# print(f"Average number of ratings per movie: {round(n_ratings/n_movies, 2)}") # 3
user_freq = ratings[['userId', 'movieId']].groupby('userId').count().reset_index()
user_freq.columns = ['userId', 'n_ratings']
# c = user_freq.head()
# print(c) # 4
# print(f"Mean number of ratings for a giver user: {user_freq['n_ratings'].mean():.2f}") # 5
# sns.set_style('whitegrid')
# plt.figure(figsize=(14,5))
# plt.subplot(1, 2, 1)
# ax = sns.countplot(x = "rating", data=ratings, palette='viridis')
# plt.title("Distribution of movie ratings")
#
# plt.subplot(1, 2, 2)
# ax = sns.kdeplot(user_freq['n_ratings'], shade=True, legend=False)
# plt.axvline(user_freq['n_ratings'].mean(), color="k", linestyle="--")
# plt.xlabel("# ratings per user")
# plt.ylabel("density")
# plt.title("Number of movies rated per user")
# plt.show() # 6
mean_rating = ratings.groupby('movieId')[['rating']].mean()
#
# lowest_rated = mean_rating['rating'].idxmin()
# d = movies.loc[movies['movieId'] == lowest_rated]
# print(d) # 7
highest_rated = mean_rating['rating'].idxmax()
e = movies.loc[movies['movieId'] == highest_rated]
# print(e) # 8
# f = ratings[ratings['movieId'] == highest_rated]
# print(f) # 9
movie_stats = ratings.groupby('movieId')[['rating']].agg(['count', 'mean'])
movie_stats.columns = movie_stats.columns.droplevel()
C = movie_stats['count'].mean()
m = movie_stats['mean'].mean()
def bayesian_avg(ratings):
bayesian_avg = ((C*m+ratings.sum()))/(C+ratings.count())
return bayesian_avg
bayesian_avg_ratings = ratings.groupby('movieId')['rating'].agg(bayesian_avg).reset_index()
bayesian_avg_ratings.columns = ['movieId', 'bayesian_avg']
movie_stats = movie_stats.merge(bayesian_avg_ratings, on='movieId')
movie_stats = movie_stats.merge(movies[['movieId', 'title']])
g = movie_stats.sort_values('bayesian_avg', ascending=False).head()
# print(g) # 10
h = movie_stats.sort_values('bayesian_avg', ascending=True).head()
# print(h) # 11
from scipy.sparse import csr_matrix
def create_X(df):
"""
Generates a sparse matrix from ratings dataframe.
Args:
df: pandas dataframe
Returns:
X: sparse matrix
user_mapper: dict that maps id's to user indices
user_inv_mapper: dict that maps user indices to user id's
movie_mapper: dict that maps movie id's to movie indices
movie_inv_mapper: dict that maps movie indices to movie id's
"""
N = df['userId'].nunique()
M = df['movieId'].nunique()
user_mapper = dict(zip(np.unique(df["userId"]), list(range(N))))
movie_mapper = dict(zip(np.unique(df["movieId"]), list(range(M))))
user_inv_mapper = dict(zip(list(range(N)), np.unique(df["userId"])))
movie_inv_mapper = dict(zip(list(range(M)), np.unique(df["movieId"])))
user_index = [user_mapper[i] for i in df['userId']]
movie_index = [movie_mapper[i] for i in df['movieId']]
X = csr_matrix((df["rating"], (movie_index, user_index)), shape=(M, N))
return X, user_mapper, movie_mapper, user_inv_mapper, movie_inv_mapper
X, user_mapper, movie_mapper, user_inv_mapper, movie_inv_mapper = create_X(ratings)
sparsity = X.count_nonzero()/(X.shape[0]*X.shape[1])
# print(f"Matrix sparsity: {round(sparsity*100, 2)}%") # 12
# from scipy.sparse import save_npz
# save_npz('Users\Dalvani\Desktop\TCC', X) # 13
from sklearn.neighbors import NearestNeighbors
def find_similar_movies(movie_id, X, k, metric='cosine', show_distance=False):
"""
Finds k-nearest neighbours for a given movie id
Args:
movie_id: id of the movie of interest
X: user-item utility matrix
k: number of similar movies to retrieve
metric: distance metric for kNN calculations
Returns:
list of k similar movie ID's
"""
neighbours_ids = []
movie_ind = movie_mapper[movie_id]
movie_vec = X[movie_ind]
k += 1
kNN = NearestNeighbors(n_neighbors=k, algorithm="brute", metric=metric)
kNN.fit(X)
if isinstance(movie_vec, (np.ndarray)):
movie_vec = movie_vec.reshape(1, -1)
neighbour = kNN.kneighbors(movie_vec, return_distance=show_distance)
for i in range(0, k):
n = neighbour.item(i)
neighbours_ids.append(movie_inv_mapper[n])
neighbours_ids.pop(0)
return neighbours_ids
movie_titles = dict(zip(movies['movieId'], movies['title']))
movie_id = 2
similar_ids = find_similar_movies(movie_id, X, k=10)
movie_title = movie_titles[movie_id]
print(f"Because you watched {movie_title}")
print('You should also watch: ')
print('-' * 30)
for i in similar_ids:
print(movie_titles[i])
|
{"hexsha": "620f5c722e340285a5cc275bd99f47ed35f878e4", "size": 5364, "ext": "py", "lang": "Python", "max_stars_repo_path": "first_test.py", "max_stars_repo_name": "VictorBenoiston/algorithms_testing", "max_stars_repo_head_hexsha": "78ea075d4c49515d2a4ae96901e1c7d66ed7b9f3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "first_test.py", "max_issues_repo_name": "VictorBenoiston/algorithms_testing", "max_issues_repo_head_hexsha": "78ea075d4c49515d2a4ae96901e1c7d66ed7b9f3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "first_test.py", "max_forks_repo_name": "VictorBenoiston/algorithms_testing", "max_forks_repo_head_hexsha": "78ea075d4c49515d2a4ae96901e1c7d66ed7b9f3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.119760479, "max_line_length": 94, "alphanum_fraction": 0.6907158837, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1448}
|
MODULE params_model
USE common, ONLY: r_size
IMPLICIT NONE
PUBLIC
! Now definable via namelist at runtime:
! MOM4 ncep2012 tripolar converted to spherical
#ifdef DYNAMIC
INTEGER :: nlon=720
INTEGER :: nlat=410
INTEGER :: nlev=5
#else
INTEGER,PARAMETER :: nlon=720
INTEGER,PARAMETER :: nlat=410
INTEGER,PARAMETER :: nlev=5 ! These are the different ice thicknesss categories:
! (0-0.1, 0.1-0.3, 0.3-0.7, 0.7-1.1, and > 1.1 m)
#endif
INTEGER,PARAMETER :: ilev_sfc=1
!
INTEGER,PARAMETER :: nv3d=5
INTEGER,PARAMETER :: nv4d=0 ! x,y,z !(OCEAN) STEVE: add t,x,y,z,id for DRIFTERS
INTEGER,PARAMETER :: nv2d=1 !STEVE: update when adding ui and vi -> nv2d=3
!
! Initialize via subroutine below:
INTEGER,SAVE :: nij0 ! Number of gridpoints handled by myrank processor
INTEGER,SAVE :: nlevall ! Total number of variables and levels (3d + 2d)
INTEGER,SAVE :: ngpv ! Total number of gridpoints, including nij0*nlevall
! Placeholders needed by letkf_tools.f90 to compile:
INTEGER,PARAMETER :: iv3d_t = -1
INTEGER,PARAMETER :: iv2d_mld = -1
!
! 3D Variables:
INTEGER,PARAMETER :: iv3d_hs=1
INTEGER,PARAMETER :: iv3d_hi=2
INTEGER,PARAMETER :: iv3d_t1=3
INTEGER,PARAMETER :: iv3d_t2=4
INTEGER,PARAMETER :: iv3d_ps=5 ! 3D part size, from restart file levels 2-nlev+1
! 2D Variables:
INTEGER,PARAMETER :: iv2d_cn=1 ! 2D ice concentration as measured
INTEGER,PARAMETER :: iv2d_ui=2 ! 2D ice drift (zonal)
INTEGER,PARAMETER :: iv2d_vi=3 ! 2D ice drift (meridional)
REAL(r_size) :: obs_noise_coeff = 0.01 !(SIS) !STEVE: can set in namelist
! INTEGER,PARAMETER :: iv4d_x=1 !(OCEAN) (DRIFTERS)
! INTEGER,PARAMETER :: iv4d_y=2 !(OCEAN) (DRIFTERS)
! INTEGER,PARAMETER :: iv4d_z=3 !(OCEAN) (DRIFTERS)
!
! Elements
!
CHARACTER(20), SAVE :: element(nv3d+nv2d+nv4d)
!For input/output model files:
CHARACTER(16) :: basefile = 'ice_model.res.nc'
! CHARACTER(14) :: SSHclm_file = 'aEtaCds9399.nc'
! CHARACTER(32) :: ts_basefile = 'ocean_temp_salt.res.nc'
! CHARACTER(32) :: uv_basefile = 'ocean_velocity.res.nc'
! CHARACTER(32) :: sf_basefile = 'ocean_sbc.res.nc'
! CHARACTER(32) :: sh_basefile = 'ocean_barotropic.res.nc'
! CHARACTER(32) :: hs_basefile = 'ocean_TS.nc'
! For grid_spec.nc data file:
CHARACTER(12) :: gridfile = 'grid_spec.nc'
! variable names in gridfile:
CHARACTER(8) :: grid_lon_name = 'grid_x_T'
CHARACTER(8) :: grid_lat_name = 'grid_y_T'
CHARACTER(2) :: grid_lev_name = 'zt'
CHARACTER(3) :: grid_lon2d_name = 'x_T'
CHARACTER(3) :: grid_lat2d_name = 'y_T'
CHARACTER(3) :: grid_wet_name = 'wet'
CHARACTER(10):: grid_kmt_name = 'num_levels'
! variable names in diag file:
CHARACTER(6) :: diag_hs_name = 'h_snow'
CHARACTER(5) :: diag_hi_name = 'h_ice'
CHARACTER(6) :: diag_t1_name = 't_ice1'
CHARACTER(6) :: diag_t2_name = 't_ice2'
CHARACTER(9) :: diag_ps_name = 'part_size'
! variable names in restart file:
CHARACTER(6) :: rsrt_hs_name = 'h_snow'
CHARACTER(5) :: rsrt_hi_name = 'h_ice'
CHARACTER(6) :: rsrt_t1_name = 't_ice1'
CHARACTER(6) :: rsrt_t2_name = 't_ice2'
CHARACTER(9) :: rsrt_ps_name = 'part_size'
! Bounds checking (for output by common_mom4.f90::write_restart)
LOGICAL :: do_physlimit=.true.
REAL(r_size) :: max_t = 40.0d0 ! ÂC
REAL(r_size) :: min_t = -4.0d0 ! ÂC
REAL(r_size) :: max_s = 50.0d0 ! psu
REAL(r_size) :: min_s = 0.0d0 ! psu
!STEVE: needed for letkf.f90 to compile
CHARACTER(14) :: SSHclm_file = 'aEtaCds9399.nc'
LOGICAL,SAVE :: params_model_initialized = .false.
CONTAINS
SUBROUTINE initialize_params_model
IMPLICIT NONE
if (params_model_initialized) then
WRITE(6,*) "initialize_params_model:: already initialized, RETURNING..."
RETURN
endif
nij0=nlon*nlat
nlevall=nlev*nv3d+nv2d
ngpv=nij0*nlevall
!
! Elements
!
element(iv3d_hs) = 'snow layer height'
element(iv3d_hi) = ' ice layer height'
element(iv3d_t1) = ' ice layer 1 temp'
element(iv3d_t2) = ' ice layer 2 temp'
params_model_initialized = .true.
END SUBROUTINE initialize_params_model
END MODULE params_model
|
{"hexsha": "e43f4eecd057fbbf6968ac267c6e9dd5d812feaa", "size": 4212, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/model_specific/sis/params_model.f90", "max_stars_repo_name": "GEOS-ESM/Ocean-LETKF", "max_stars_repo_head_hexsha": "a7c4bbf86cdbff078212914dcc059d0b1450accf", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-12-31T15:40:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-03T13:44:20.000Z", "max_issues_repo_path": "src/model_specific/sis/params_model.f90", "max_issues_repo_name": "GEOS-ESM/Ocean-LETKF", "max_issues_repo_head_hexsha": "a7c4bbf86cdbff078212914dcc059d0b1450accf", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/model_specific/sis/params_model.f90", "max_forks_repo_name": "GEOS-ESM/Ocean-LETKF", "max_forks_repo_head_hexsha": "a7c4bbf86cdbff078212914dcc059d0b1450accf", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-14T18:46:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-14T18:46:56.000Z", "avg_line_length": 30.5217391304, "max_line_length": 96, "alphanum_fraction": 0.6747388414, "num_tokens": 1428}
|
# Invertible network based on Glow (Kingma and Dhariwal, 2018)
# Includes 1x1 convolution and residual block
# Author: Philipp Witte, pwitte3@gatech.edu
# Date: February 2020
export NetworkGlow, NetworkGlow3D
"""
G = NetworkGlow(n_in, n_hidden, L, K; k1=3, k2=1, p1=1, p2=0, s1=1, s2=1)
G = NetworkGlow3D(n_in, n_hidden, L, K; k1=3, k2=1, p1=1, p2=0, s1=1, s2=1)
Create an invertible network based on the Glow architecture. Each flow step in the inner loop
consists of an activation normalization layer, followed by an invertible coupling layer with
1x1 convolutions and a residual block. The outer loop performs a squeezing operation prior
to the inner loop, and a splitting operation afterwards.
*Input*:
- 'n_in': number of input channels
- `n_hidden`: number of hidden units in residual blocks
- `L`: number of scales (outer loop)
- `K`: number of flow steps per scale (inner loop)
- `k1`, `k2`: kernel size of convolutions in residual block. `k1` is the kernel of the first and third
operator, `k2` is the kernel size of the second operator.
- `p1`, `p2`: padding for the first and third convolution (`p1`) and the second convolution (`p2`)
- `s1`, `s2`: stride for the first and third convolution (`s1`) and the second convolution (`s2`)
- `ndims` : numer of dimensions
*Output*:
- `G`: invertible Glow network.
*Usage:*
- Forward mode: `Y, logdet = G.forward(X)`
- Backward mode: `ΔX, X = G.backward(ΔY, Y)`
*Trainable parameters:*
- None in `G` itself
- Trainable parameters in activation normalizations `G.AN[i,j]` and coupling layers `G.C[i,j]`,
where `i` and `j` range from `1` to `L` and `K` respectively.
See also: [`ActNorm`](@ref), [`CouplingLayerGlow!`](@ref), [`get_params`](@ref), [`clear_grad!`](@ref)
"""
struct NetworkGlow <: InvertibleNetwork
AN::AbstractArray{ActNorm, 2}
CL::AbstractArray{CouplingLayerGlow, 2}
Z_dims::AbstractArray{Tuple, 1}
L::Int64
K::Int64
end
@Flux.functor NetworkGlow
# Constructor
function NetworkGlow(n_in, n_hidden, L, K; k1=3, k2=1, p1=1, p2=0, s1=1, s2=1, ndims=2)
AN = Array{ActNorm}(undef, L, K) # activation normalization
CL = Array{CouplingLayerGlow}(undef, L, K) # coupling layers w/ 1x1 convolution and residual block
Z_dims = Array{Tuple}(undef, L-1) # save dimensions for inverse/backward pass
for i=1:L
n_in *= 4 # squeeze
for j=1:K
AN[i, j] = ActNorm(n_in; logdet=true)
CL[i, j] = CouplingLayerGlow(n_in, n_hidden; k1=k1, k2=k2, p1=p1, p2=p2, s1=s1, s2=s2, logdet=true, ndims=ndims)
end
(i < L) && (n_in = Int64(n_in/2)) # split
end
return NetworkGlow(AN, CL, Z_dims, L, K)
end
NetworkGlow3D(args; kw...) = NetworkGlow(args...; kw..., ndims=3)
# Concatenate states Zi and final output
function cat_states(Z_save, X)
Y = []
for j=1:length(Z_save)
Y = cat(Y, vec(Z_save[j]); dims=1)
end
Y = cat(Y, vec(X); dims=1)
return Float32.(Y) # convert to Array{Float32, 1}
end
# Split 1D vector in latent space back to states Zi
function split_states(Y, Z_dims)
L = length(Z_dims) + 1
Z_save = Array{Array}(undef, L-1)
count = 1
for j=1:L-1
Z_save[j] = reshape(Y[count: count + prod(Z_dims[j])-1], Z_dims[j])
count += prod(Z_dims[j])
end
X = reshape(Y[count: count + prod(Z_dims[end])-1], Int.(Z_dims[end].*(.5, .5, 4, 1)))
return Z_save, X
end
# Forward pass and compute logdet
function forward(X, G::NetworkGlow)
Z_save = Array{Array}(undef, G.L-1)
logdet = 0f0
for i=1:G.L
X = squeeze(X; pattern="checkerboard")
for j=1:G.K
X, logdet1 = G.AN[i, j].forward(X)
X, logdet2 = G.CL[i, j].forward(X)
logdet += (logdet1 + logdet2)
end
if i < G.L # don't split after last iteration
X, Z = tensor_split(X)
Z_save[i] = Z
G.Z_dims[i] = size(Z)
end
end
X = cat_states(Z_save, X)
return X, logdet
end
# Inverse pass and compute gradients
function inverse(X, G::NetworkGlow)
Z_save, X = split_states(X, G.Z_dims)
for i=G.L:-1:1
if i < G.L
X = tensor_cat(X, Z_save[i])
end
for j=G.K:-1:1
X = G.CL[i, j].inverse(X)
X = G.AN[i, j].inverse(X)
end
X = unsqueeze(X; pattern="checkerboard")
end
return X
end
# Backward pass and compute gradients
function backward(ΔX, X, G::NetworkGlow; set_grad::Bool=true)
ΔZ_save, ΔX = split_states(ΔX, G.Z_dims)
Z_save, X = split_states(X, G.Z_dims)
if ~set_grad
Δθ = Array{Parameter, 1}(undef, 10*G.L*G.K)
∇logdet = Array{Parameter, 1}(undef, 10*G.L*G.K)
end
blkidx = 10*G.L*G.K
for i=G.L:-1:1
if i < G.L
X = tensor_cat(X, Z_save[i])
ΔX = tensor_cat(ΔX, ΔZ_save[i])
end
for j=G.K:-1:1
if set_grad
ΔX, X = G.CL[i, j].backward(ΔX, X)
ΔX, X = G.AN[i, j].backward(ΔX, X)
else
ΔX, Δθcl_ij, X, ∇logdetcl_ij = G.CL[i, j].backward(ΔX, X; set_grad=set_grad)
ΔX, Δθan_ij, X, ∇logdetan_ij = G.AN[i, j].backward(ΔX, X; set_grad=set_grad)
Δθ[blkidx-9:blkidx] = cat(Δθan_ij, Δθcl_ij; dims=1)
∇logdet[blkidx-9:blkidx] = cat(∇logdetan_ij, ∇logdetcl_ij; dims=1)
end
blkidx -= 10
end
X = unsqueeze(X; pattern="checkerboard")
ΔX = unsqueeze(ΔX; pattern="checkerboard")
end
set_grad ? (return ΔX, X) : (return ΔX, Δθ, X, ∇logdet)
end
## Jacobian-related utils
function jacobian(ΔX, Δθ::Array{Parameter, 1}, X, G::NetworkGlow)
Z_save = Array{Array}(undef, G.L-1)
ΔZ_save = Array{Array}(undef, G.L-1)
logdet = 0f0
GNΔθ = Array{Parameter, 1}(undef, 10*G.L*G.K)
blkidx = 0
for i=1:G.L
X = squeeze(X; pattern="checkerboard")
ΔX = squeeze(ΔX; pattern="checkerboard")
for j=1:G.K
Δθ_ij = Δθ[blkidx+1:blkidx+10]
ΔX, X, logdet1, GNΔθ1 = G.AN[i, j].jacobian(ΔX, Δθ_ij[1:2], X)
ΔX, X, logdet2, GNΔθ2 = G.CL[i, j].jacobian(ΔX, Δθ_ij[3:end], X)
logdet += (logdet1 + logdet2)
GNΔθ[blkidx+1:blkidx+10] = cat(GNΔθ1,GNΔθ2; dims=1)
blkidx += 10
end
if i < G.L # don't split after last iteration
X, Z = tensor_split(X)
ΔX, ΔZ = tensor_split(ΔX)
Z_save[i] = Z
ΔZ_save[i] = ΔZ
G.Z_dims[i] = size(Z)
end
end
X = cat_states(Z_save, X)
ΔX = cat_states(ΔZ_save, ΔX)
return ΔX, X, logdet, GNΔθ
end
adjointJacobian(ΔX, X, G::NetworkGlow) = backward(ΔX, X, G; set_grad=false)
## Other utils
# Clear gradients
function clear_grad!(G::NetworkGlow)
L, K = size(G.AN)
for i=1:L
for j=1:K
clear_grad!(G.AN[i, j])
clear_grad!(G.CL[i, j])
end
end
end
# Get parameters
function get_params(G::NetworkGlow)
L, K = size(G.AN)
p = Array{Parameter, 1}(undef, 0)
for i=1:L
for j=1:K
p = cat(p, get_params(G.AN[i, j]); dims=1)
p = cat(p, get_params(G.CL[i, j]); dims=1)
end
end
return p
end
|
{"hexsha": "8b130ec3ed00254c2b31ba70d87b6dde2b86903e", "size": 7300, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/networks/invertible_network_glow.jl", "max_stars_repo_name": "PetersBas/InvertibleNetworks.jl", "max_stars_repo_head_hexsha": "c53dacf426ecd1381f79f297f6954e6695c515b3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/networks/invertible_network_glow.jl", "max_issues_repo_name": "PetersBas/InvertibleNetworks.jl", "max_issues_repo_head_hexsha": "c53dacf426ecd1381f79f297f6954e6695c515b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/networks/invertible_network_glow.jl", "max_forks_repo_name": "PetersBas/InvertibleNetworks.jl", "max_forks_repo_head_hexsha": "c53dacf426ecd1381f79f297f6954e6695c515b3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.4166666667, "max_line_length": 124, "alphanum_fraction": 0.5882191781, "num_tokens": 2515}
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Read image
img = cv2.imread("imori.jpg").astype(np.float32)
H, W, C = img.shape
img2 = cv2.imread("thorino.jpg").astype(np.float32)
a = 0.6
out = img * a + img2 * (1 - a)
out = out.astype(np.uint8)
# Save result
cv2.imwrite("out.jpg", out)
cv2.imshow("result", out)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
{"hexsha": "7003d5902ab62a4f060edc115b6aa7ccb1358e5d", "size": 377, "ext": "py", "lang": "Python", "max_stars_repo_path": "Question_51_60/answers/answer_60.py", "max_stars_repo_name": "Zpadger/ImageProcessing100Wen", "max_stars_repo_head_hexsha": "993ebc6c16c43b1fc664382833ef7724b439e1ec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-11-28T09:21:07.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-28T09:21:18.000Z", "max_issues_repo_path": "Question_51_60/answers/answer_60.py", "max_issues_repo_name": "Zpadger/ImageProcessing100Wen", "max_issues_repo_head_hexsha": "993ebc6c16c43b1fc664382833ef7724b439e1ec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-10-13T19:22:07.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-13T19:22:07.000Z", "max_forks_repo_path": "Question_51_60/answers/answer_60.py", "max_forks_repo_name": "Zpadger/ImageProcessing100Wen", "max_forks_repo_head_hexsha": "993ebc6c16c43b1fc664382833ef7724b439e1ec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-30T15:38:48.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-30T15:38:48.000Z", "avg_line_length": 18.85, "max_line_length": 51, "alphanum_fraction": 0.6816976127, "include": true, "reason": "import numpy", "num_tokens": 121}
|
# Script 2/2 to ensure that corr did the same thing between R and Python (June 2016)
import numpy as np
import pandas as pd
METHOD='spearman'
# my_df = pd.DataFrame([[3,2,np.nan], [5,9,3], [1,np.nan],[2,8,2], [4,1,8]])
my_df = pd.DataFrame.from_dict({"a":[3,5,1,2,4], "b":[2,9,np.nan,8,1], "c":[np.nan,3,5,2,8]})
print("in_df: \n{}".format(my_df))
# Compute correlation for whole df
out_cor = my_df.corr(method=METHOD)
print("all_corrs: \n{}".format(out_cor))
# Expect col1 v. col2 to be [3,5,2,4] v. [2,9,8,1]
col1_v_col2_cor = pd.Series([3,5,2,4]).corr(pd.Series([2,9,8,1]), method=METHOD)
print("a v. b: {}, {}".format(col1_v_col2_cor, np.isclose(col1_v_col2_cor, out_cor.iloc[0,1])))
# Expect col1 v. col3 to be [5,1,2,4] v. [3,5,2,8]
col1_v_col3_cor = pd.Series([5,1,2,4]).corr(pd.Series([3,5,2,8]), method=METHOD)
print("a v. c: {}, {}".format(col1_v_col3_cor, np.isclose(col1_v_col3_cor, out_cor.iloc[0,2])))
# Expect col2 v. col3 to be [9,8,1] v. [3,2,8]
col2_v_col3_cor = pd.Series([9,8,1]).corr(pd.Series([3,2,8]), method=METHOD)
print("b v. c: {}, {}".format(col2_v_col3_cor, np.isclose(col2_v_col3_cor, out_cor.iloc[1,2])))
|
{"hexsha": "148a5e2e9927f2c1ab6e88f0e36d9db56e3f7a7a", "size": 1142, "ext": "py", "lang": "Python", "max_stars_repo_path": "broadinstitute_psp/utils/corr_verification.py", "max_stars_repo_name": "cmap/psp", "max_stars_repo_head_hexsha": "9389e9d86424e460e577dd1d9027f4a1d1f8227a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2017-08-16T07:43:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-01T20:18:08.000Z", "max_issues_repo_path": "broadinstitute_psp/utils/corr_verification.py", "max_issues_repo_name": "cmap/psp", "max_issues_repo_head_hexsha": "9389e9d86424e460e577dd1d9027f4a1d1f8227a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2017-11-14T18:51:39.000Z", "max_issues_repo_issues_event_max_datetime": "2018-09-12T17:43:03.000Z", "max_forks_repo_path": "broadinstitute_psp/utils/corr_verification.py", "max_forks_repo_name": "cmap/psp", "max_forks_repo_head_hexsha": "9389e9d86424e460e577dd1d9027f4a1d1f8227a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2018-01-25T17:47:47.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-17T16:05:33.000Z", "avg_line_length": 43.9230769231, "max_line_length": 95, "alphanum_fraction": 0.6541155867, "include": true, "reason": "import numpy", "num_tokens": 459}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.