text
stringlengths
0
1.25M
meta
stringlengths
47
1.89k
import os import fsspec import numpy as np import pandas as pd import pytest import scipy.sparse from cirrocumulus.parquet_dataset import ParquetDataset from cirrocumulus.prepare_data import PrepareData from cirrocumulus.zarr_dataset import ZarrDataset def read_and_diff(ds_reader, path, test_data, measures, dimensions, continuous_obs, basis): dataset = dict(id='') fs = fsspec.filesystem('file') prepared_adata = ds_reader.read_dataset(filesystem=fs, path=path, dataset=dataset, keys=dict(X=measures, obs=dimensions + continuous_obs, basis=[basis])) assert scipy.sparse.issparse(test_data.X) == scipy.sparse.issparse(prepared_adata.X) if scipy.sparse.issparse(test_data.X): test_data.X = test_data.X.toarray() prepared_adata.X = prepared_adata.X.toarray() np.testing.assert_equal(test_data.X, prepared_adata.X) for key in dimensions + continuous_obs: pd.testing.assert_series_equal(test_data.obs[key], prepared_adata.obs[key], check_index=False, check_flags=False) np.testing.assert_equal(prepared_adata.obsm[basis], test_data.obsm[basis]) # ensure shape is correct when reading with no keys prepared_adata2 = ds_reader.read_dataset(filesystem=fs, path=path, dataset=dataset, keys=dict()) assert prepared_adata2.shape[0] == test_data.shape[0] def test_prepare_cxg_tile_db(test_data, measures, dimensions, continuous_obs, basis, tmp_path): try: from cirrocumulus.tiledb_dataset import TileDBDataset output_dir = str(tmp_path) test_data = test_data[:, measures] test_data.obs = test_data.obs[dimensions + continuous_obs] import subprocess output_cxg = os.path.join(output_dir, 'test.cxg') output_h5ad = os.path.join(output_dir, 'test.h5ad') test_data.write(output_h5ad) subprocess.check_call(['cellxgene', 'convert', '-o', output_cxg, '--disable-corpora-schema', output_h5ad]) read_and_diff(TileDBDataset(), output_cxg, test_data, measures, dimensions, continuous_obs, basis) except ModuleNotFoundError: print('Skipping TileDB tests') def test_prepare_join_obs_index(test_data, tmp_path): output_dir = str(tmp_path) test_data2 = test_data.copy() test_data2 = test_data2[[0, 1, 2]] with pytest.raises(ValueError): PrepareData(datasets=[test_data, test_data2], output=output_dir) @pytest.mark.parametrize("file_format", ['zarr', 'parquet']) def test_prepare(test_data, measures, dimensions, continuous_obs, basis, file_format, tmp_path): file_format2ext = dict(parquet='.cpq', zarr='.zarr') output_dir = str(tmp_path / 'test.{}'.format(file_format2ext[file_format])) test_data = test_data[:, measures] test_data.obs = test_data.obs[dimensions + continuous_obs] prepare_data = PrepareData(datasets=[test_data], output=output_dir, output_format=file_format) prepare_data.execute() if file_format == 'parquet': reader = ParquetDataset() elif file_format == 'zarr': reader = ZarrDataset() read_and_diff(reader, output_dir, test_data, measures, dimensions, continuous_obs, basis) def test_prepare_jsonl(test_data, measures, dimensions, continuous_obs, basis, tmp_path): output_dir = str(tmp_path) test_data = test_data[:, measures] test_data.obs = test_data.obs[dimensions + continuous_obs] prepare_data = PrepareData(datasets=[test_data], output=os.path.join(output_dir, 'test.jsonl'), output_format='jsonl') prepare_data.execute()
{"hexsha": "e5529a3dbdd3da8ecdf66b520f0b955e045f72bf", "size": 3686, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_prepare_data.py", "max_stars_repo_name": "PfizerRD/cirrocumulus", "max_stars_repo_head_hexsha": "c7ce0c8c3c246282046e6d373d60442af55d3f09", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 27, "max_stars_repo_stars_event_min_datetime": "2019-11-08T15:41:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-06T23:07:07.000Z", "max_issues_repo_path": "tests/test_prepare_data.py", "max_issues_repo_name": "PfizerRD/cirrocumulus", "max_issues_repo_head_hexsha": "c7ce0c8c3c246282046e6d373d60442af55d3f09", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2020-08-01T20:11:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T14:56:25.000Z", "max_forks_repo_path": "tests/test_prepare_data.py", "max_forks_repo_name": "PfizerRD/cirrocumulus", "max_forks_repo_head_hexsha": "c7ce0c8c3c246282046e6d373d60442af55d3f09", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-03-18T08:45:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-06T23:08:20.000Z", "avg_line_length": 44.4096385542, "max_line_length": 114, "alphanum_fraction": 0.7013022246, "include": true, "reason": "import numpy,import scipy", "num_tokens": 846}
import numpy as np import dace as dc M, N = (dc.symbol(s, dtype=dc.int64) for s in ('M', 'N')) @dc.program def kernel(float_n: dc.float64, data: dc.float64[N, M]): mean = np.mean(data, axis=0) # stddev = np.std(data, axis=0) stddev = np.sqrt(np.mean(np.subtract(data, mean)**2, axis=0)) stddev[stddev <= 0.1] = 1.0 # data -= mean np.subtract(data, mean, out=data) # data /= np.sqrt(float_n) * stddev np.divide(data, np.sqrt(float_n) * stddev, out=data) corr = np.eye(M, dtype=data.dtype) for i in range(M - 1): # corr[i, i+1:M] = np.transpose(data[:, i+1:M]) @ data[:, i] corr[i, i + 1:M] = data[:, i] @ data[:, i + 1:M] corr[i + 1:M, i] = corr[i, i + 1:M] return corr
{"hexsha": "00761bd4d1bb5614c688927c5c6832ddc2432a3d", "size": 742, "ext": "py", "lang": "Python", "max_stars_repo_path": "npbench/benchmarks/polybench/correlation/correlation_dace.py", "max_stars_repo_name": "frahlg/npbench", "max_stars_repo_head_hexsha": "1bc4d9e2e22f3ca67fa2bc7f40e2e751a9c8dd26", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 27, "max_stars_repo_stars_event_min_datetime": "2021-05-10T11:49:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T18:07:19.000Z", "max_issues_repo_path": "npbench/benchmarks/polybench/correlation/correlation_dace.py", "max_issues_repo_name": "frahlg/npbench", "max_issues_repo_head_hexsha": "1bc4d9e2e22f3ca67fa2bc7f40e2e751a9c8dd26", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-12-01T13:03:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-17T10:53:00.000Z", "max_forks_repo_path": "npbench/benchmarks/polybench/correlation/correlation_dace.py", "max_forks_repo_name": "frahlg/npbench", "max_forks_repo_head_hexsha": "1bc4d9e2e22f3ca67fa2bc7f40e2e751a9c8dd26", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2021-06-24T03:40:25.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-26T09:04:33.000Z", "avg_line_length": 29.68, "max_line_length": 68, "alphanum_fraction": 0.5579514825, "include": true, "reason": "import numpy", "num_tokens": 255}
import time import numpy as np from spectra_gen import * from to_rank import * from utils import * from datetime import datetime from mask import * from torch import sigmoid, tensor import os def to_explain(eobj): print ('\n[To explain: SFL (Software Fault Localization) is used]') print (' ### [Measures: {0}]'.format(eobj.measures)) model=eobj.model ## to create output DI #print ('\n[Create output folder: {0}]'.format(eobj.outputs)) di=eobj.outputs sub_mat=None try: os.system('mkdir -p {0}'.format(di)) except: pass if not eobj.boxes is None: f = open(di+"/wsol-results.txt", "a") f.write('input_name x_method intersection_with_groundtruth\n') f.close() if eobj.immunobert: # example = {'input_ids': tensor([ 2, 15, 21, 8, 21, 9, 13, 20, 15, 9, 13, 22, 11, 14, 9, 21, 3, 15, # 9, 8, 15, 17, 10, 19, 9, 13, 3, 14, 21, 21, 14, 16, 5, 8, 21, 14, # 8, 9, 8, 21, 14, 20, 3, 28, 12, 23, 14, 28, 21, 9, 13, 22, 23, 17, # 23, 28, 9, 17, 13, 5, 28, 26, 21, 28, 17, 15, 28, 23, 26, 5, 9, 15, # 5, 28, 15, 26, 28, 3]), # 'token_type_ids': tensor([2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, # 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, # 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 0, 0, 0, 0, 0, 0]), # 'position_ids': tensor([ 0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, # 2, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, # 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, # 0, 7, 9, 24, 45, 59, 62, 63, 66, 67, 69, 70, 73, 74, # 76, 77, 80, 81, 84, 95, 97, 99, 114, 116, 118, 143, 147, 150, # 152, 156, 158, 159, 163, 167, 171, 0]), # 'input_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 1, 1, 1, 1, 1, 1]), # 'targets': tensor([1.])} # example = np.array([np.array([ 2, 15, 21, 8, 21, 9, 13, 20, 15, 9, 13, 22, 11, 14, 9, 21, 3, 15, # 9, 8, 15, 17, 10, 19, 9, 13, 3, 14, 21, 21, 14, 16, 5, 8, 21, 14, # 8, 9, 8, 21, 14, 20, 3, 28, 12, 23, 14, 28, 21, 9, 13, 22, 23, 17, # 23, 28, 9, 17, 13, 5, 28, 26, 21, 28, 17, 15, 28, 23, 26, 5, 9, 15, # 5, 28, 15, 26, 28, 3]), # np.array([2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, # 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, # 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 0, 0, 0, 0, 0, 0]), # np.array([ 0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, # 2, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, # 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, # 0, 7, 9, 24, 45, 59, 62, 63, 66, 67, 69, 70, 73, 74, # 76, 77, 80, 81, 84, 95, 97, 99, 114, 116, 118, 143, 147, 150, # 152, 156, 158, 159, 163, 167, 171, 0]), # np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 0, 0, 0, 0, 0, 0]), # np.array([1.])]) # eobj.inputs=[example] blosum_path = os.path.dirname(os.path.realpath(__file__)) + os.sep + '..' + os.sep + 'blosum.txt' sub_mat = substitution_matrix(blosum_path) for i in range(0, len(eobj.inputs)): x=eobj.inputs[i] # print(repr(x)) y = eobj.predict(x) # TODO: Change all predictions to this once settled on this solution. print ('\n[Input {2}: {0} / Output Label (to Explain): {1}]'.format(eobj.fnames[i], y, i)) ite=0 reasonable_advs=False while ite<eobj.testgen_iter: # Number of test generation iterations. Default 1 print (' #### [Start generating SFL spectra...]') start=time.time() ite+=1 if eobj.immunobert: if eobj.dynamic: passing, failing = spectra_gen_dynamic_immunobert(eobj, x, y, substitution_matrix=sub_mat, testgen_size=eobj.testgen_size) else: passing, failing, indexes = spectra_gen_immunobert(eobj, x, y, mask_sizes=[1,2,3], substitution_matrix=sub_mat, testgen_size=eobj.testgen_size) else: passing, failing=spectra_sym_gen(eobj, x, y, substitution_matrix=sub_mat, adv_value=eobj.adv_value, testgen_factor=eobj.testgen_factor, testgen_size=eobj.testgen_size) spectra=[] num_advs=len(failing) adv_xs=[] adv_ys=[] for e in passing: adv_xs.append(e) adv_ys.append(0) for e in failing: adv_xs.append(e) adv_ys.append(-1) tot=len(adv_xs) print('passing: ' + str(len(passing))) print('failing: ' + str(len(failing))) adv_part=num_advs*1./tot #print ('###### adv_percentage:', adv_part, num_advs, tot) end=time.time() print (' #### [SFL spectra generation DONE: passing {0:.2f} / failing {1:.2f}, total {2}; time: {3:.0f} seconds]'.format(1-adv_part, adv_part, tot, end-start)) if adv_part<=eobj.adv_lb: print (' #### [too few failing tests: SFL explanation aborts]') continue elif adv_part>=eobj.adv_ub: print (' #### [too few many tests: SFL explanation aborts]') continue else: reasonable_advs=True break if not reasonable_advs: #print ('###### failed to explain') continue ## to obtain the ranking for Input i # set y to 0 bacause in adv_ys, 0 if "passing" and -1 otherwise selement=sbfl_elementt(x, 0, adv_xs, adv_ys, model, eobj.fnames[i], immunobert=eobj.immunobert) #dii=di+'/{0}'.format(str(datetime.now()).replace(' ', '-')) #dii=dii.replace(':', '-') dii = di+'/'+str(eobj.fnames[i]) os.system('mkdir -p {0}'.format(dii)) # Save the instance so we know which part is peptide, hla etc. if not os.path.isdir(dii): os.makedirs(dii) np.save(dii+'/instance', x) #save_stats(selement, dii, len(passing), len(failing)) for measure in eobj.measures: print (' #### [Measuring: {0} is used]'.format(measure)) ranking_i, spectrum=to_rank(selement, measure) #selement.y = y diii=dii+'/{0}'.format(measure) print (' #### [Saving: {0}]'.format(diii)) os.system('mkdir -p {0}'.format(diii)) if not os.path.isdir(diii): os.makedirs(diii) np.savetxt(diii+'/ranking.txt', ranking_i, fmt='%s') save_spectrum(spectrum, diii+'/spectrum.txt') #if eobj.immunobert: # immunobert_spectrum_plot(selement.x, spectrum, diii+'/'+measure+'.png') if not eobj.immunobert: # to plot the heatmap spectrum = np.array((spectrum/spectrum.max())*255) gray_img = np.array(spectrum[:,:,0],dtype='uint8') #print (gray_img) heatmap_img = cv2.applyColorMap(gray_img, cv2.COLORMAP_JET) if x.shape[2]==1: x3d = np.repeat(x[:, :, 0][:, :, np.newaxis], 3, axis=2) else: x3d = x fin = cv2.addWeighted(heatmap_img, 0.7, x3d, 0.3, 0) plt.rcParams["axes.grid"] = False plt.imshow(cv2.cvtColor(fin, cv2.COLOR_BGR2RGB)) plt.savefig(diii+'/heatmap_{0}.png'.format(measure)) # to plot the top ranked pixels if not eobj.text_only: ret=top_plot(selement, ranking_i, diii, measure, eobj) if not eobj.boxes is None: f = open(di+"/wsol-results.txt", "a") f.write('{0} {1} {2}\n'.format(eobj.fnames[i], measure, ret)) f.close()
{"hexsha": "a75e5aa7894a0ccc068b0d11bf12977f75e50459", "size": 8201, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/to_explain.py", "max_stars_repo_name": "gwenty/deepcover", "max_stars_repo_head_hexsha": "5cd4c9c0b60585bf30cf548c119f48d6faeb611f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/to_explain.py", "max_issues_repo_name": "gwenty/deepcover", "max_issues_repo_head_hexsha": "5cd4c9c0b60585bf30cf548c119f48d6faeb611f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/to_explain.py", "max_forks_repo_name": "gwenty/deepcover", "max_forks_repo_head_hexsha": "5cd4c9c0b60585bf30cf548c119f48d6faeb611f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.3297297297, "max_line_length": 175, "alphanum_fraction": 0.5032313133, "include": true, "reason": "import numpy", "num_tokens": 3653}
from dataclasses import dataclass, field from operator import setitem from isu.models.step import Step from isu.models.section import Section from isu.models.audio import Audio, SoundBite from pathlib import Path from PIL import Image import cv2 import numpy as np from enum import Enum from typing import List, Optional, Tuple, Dict from PySide6.QtCore import ( QItemSelection, QItemSelectionModel, QItemSelectionRange, QObject, Signal, Slot, QEnum, QRect, QRectF, QSize, QSizeF, QPoint, QPointF, QLine, QLineF, QSaveFile, QThread, QBuffer, QUuid, QUrl, QDir, QEnum, QEasingCurve, QTime, QTimeLine, QTimer, QTimerEvent,QElapsedTimer, QSequentialAnimationGroup, QAnimationGroup, QParallelAnimationGroup, ) @dataclass class Time(object): """ Enum for the time of the animation. """ # elapsed = QElapsedTimer() t: float end: bool = False def __init__(self, t: float = 0.0, end: bool = False): self.t = t self.elapsed = QElapsedTimer(). self.end = end @QEnum class EasingFn(Enum, QItemSelection): """ An easing function. """ LINEAR = 0 EASE_IN = 1 EASE_OUT = 2 EASE_IN_OUT = 3 QUADRATIC = 4 CUBIC = 5 QUARTIC = 6 @dataclass class Easing(QObject): @dataclass class Path(object): pos: QPointF frame_i: int = 0 time_i: float = 0.0 easing: QEasingCurve.Type = QEasingCurve.Type.Linear path: QLineF = field(default_factory=QLineF((0.0, 0.0), (0.0, 0.0))) frames: List[QPointF] = field(default_factory=list) time_max: None | float = 1.0 def __init__( self, p1=QPointF(0.0, 0.0), p2=QPointF(0.0, 0.0), res=QSize(1920, 1080), fps=24.0, ease=QEasingCurve.Type.Linear, tmax: None | float = 1.0, ) -> None: self.from_line(QLineF(p1, p2), res, fps, ease, tmax) def from_ln(self, line: QLineF, res: QSize, fps: float, ease: QEasingCurve.Type, tmax: float | None = None ): self.start() self.path = ln self.res = res self.fps = fps self.frames = [ln.p1(), ln.p2()] self.easing = ease self.time_max: None | float = tmax def from_pts(self, p1: QPointF, p2: QPointF, res: QSize, fps=24.0, tmax: float | None = None ): ln = QLineF(p1, p2) self.from_ln(ln, res, fps, QEasingCurve.Type.Linear, tmax) @classmethod def start(self) -> None: self.frame_i: int = 0 self.time_i: float = 0.0 @classmethod def end(self) -> None: self.frame_i = self.frame_len() self.time_i = self.time_len() @property def p1(self) -> QPointF: return self.path.p1() @property def p2(self) -> QPointF: return self.path.p2() @property def y2(self) -> float: return self.path.y2() @property def x2(self) -> float: return self.path.x2() @property def y1(self) -> float: return self.path.y1() @property def x1(self) -> float: return self.path.x1() def point_at(self, pct: float) -> QPointF: return self.path.pointAt(pct) def init_frames(self) -> List[QPointF]: return [] def frame_len(self) -> int: " The length of the animation in frames " return len(self.frames) def time_len(self) -> float: " The length of the animation in seconds " return round(self.frame_len() / self.fps) @property.setter(self, orig) def set_orig(self, o: QPointF) -> None: self.orig = value @property.setter(self, dest) def set_dest(self, d: QPointF) -> None: self.dest = value @property.setter(self, time_i) def set_time(self, t: float) -> None: self.time_i = t @property.setter(self, frame_i) def set_idx(self, i: int) -> None: self.frame_i = i @property.getter(self, index) def index(self) -> int: return self.frame_i @property.getter(self, time) def time(self) -> float: return self.time_i @property.setter(self, pos) def set_pos(self, p: QPointF) -> None: self.pos = p @property.setter(self, value) def set_time_max(self, tmax: float) -> None: self.time_max = tmax def pct_to_time(self, pct: float) -> float: return pct * self.time_len() def pct_path(self, pct: float) -> float: return self.path.center(pct) def from_sect(self, s: Section, fps=24.0, tmax: float|None = 1.0, ease=QEasingCurve.Type.Linear ): self.frames = [] step_n = len(s.steps) for i, step in enumerate(s.steps): if i == 0: self.frames.append(QPointF(step.mouse_x, step.mouse_y)) self.frames.append(QPointF(step.mouse_x, step.mouse_y)) else: self.from_pts(step.pos, s.steps[i-1].pos, s.res, fps, tmax) self.from_pts(sect.p1, sect.p2, sect.res, fps, tmax, ease) def from_steps( self, s1: Step, n2: Step, fps=24.0, tmax: float | None = 1.0, ease=QEasingCurve.Type.Linear ) -> None: ln = QLineF((s1.mouse_x, s1.mouse_y), (s2.mouse_x, s2.mouse_y)) res = Image.open(n1.img).size d1, d2 = s1.get_delay(), s2.get_delay() a1, a2 = s1.animated, s2.animated mh1 = QPointF(s1.mouse_hover[0], s1.mouse_hover[1]) hm1 = s1.has_mouse mp1, mp2 = s1.audio, s2.audio h1, h2 = s1.hover, h2.hover ht1, ht2 = s1.hover_time, s2.hover_time t1, t2 = s1.transition, s2.transition self.from_ln(line=ln, res=res, fps=fps, ease=ease, tmax=None) @classmethod def from_p1(self, p1: QPointF = QPointF(0.0, 0.0), tmax: float = 1.0): self.idx: int = 0 self.time: float = 0.0 self.easing = QEasingCurve.Type.Linear self.time_max: None | float = tmax self.pos = QPointF(p1) self.frames: List[QPointF] = [p1] def dist(self) -> float: return self.path.length() def center(self) -> QPointF: return self.path.center() def __append__(self, p: QPointF) -> None: self.frames.insert(index=len(self.frames)-1, obj=p) def __pop__(self) -> QPointF: return self.frames.pop(0) def __getitem__(self, i: int) -> QPointF: return self.frames[i] def __len__(self) -> int: return self.frame_len() def __str__(self) -> str: return self.__repr__() def __repr__(self) -> str: return f"{self.__class__.__name__}: " + \ f"(frame_i={self.frame_i}, time_i={self.time_i}, " + \ f"p1={self.p1}, p2={self.p2}, pos={self.pos}" + \ f"res={self.res}, fps={self.fps}, easing={self.easing})" + \ f" time_max={self.time_max}, frames={self.frames})" def __setitem__(self, i: int, p: QPointF) -> None: self.frames[i] = p def __next__(self) -> QPointF: self.idx += 1 if self.frame_i < len(self.frames) if self.time_i < self.time_max: else: self.idx += 1 self.time = self.time + 1.0/self.fps self.idx = 0 return self.frames[self.idx] def __iter__(self) -> bool: pass @dataclass class Builder(QObject): p1: None | QPointF = None p2: None | QPointF = None fps: float = 24.0 res: QSize = QSize(0, 0) easing: QEasingCurve | None = None path: QLineF | None = None frames: List[QPointF] = field(default_factory=list) time_max: float | None = None @classmethod def default(self) -> None: self.path = QLineF(QPointF(0.0, 0.0), QPointF(0.0, 0.0)) self.pos = self.path.p2() self.res = QSize(1920, 1080) self.fps = 24.0 self.easing = QEasingCurve.Type.Linear self.time_max = 1.0 self.frames = [self.p1, self.p2] def build(self) -> Path: if self.easing is None: raise ValueError("easing is None") if self.path is None: if self.p1 is None: raise ValueError("p1 is None - and no path") if self.p2 is None: raise ValueError("p2 is None - and no path") if self.time_max is None: raise ValueError("time_max is None") if self.easing is None: self.easing = QEasingCurve.Type.Linear ln, e = QLineF(self.p1, self.p2), self.easing else: ln = self.path rs, fp, tm = self.res, self.fps, self.time_max return Path.from_ln(ln, rs, fp, e, tm) easing: QEasingCurve.Type = QEasingCurve.Type.Linear path: Path = Path() orig: QPointF = QPointF(0.0, 0.0) dest: QPointF = QPointF(1920.0, 1080.0) t_lim: Optional[float] = None def t_at_pos(self, pos: QPointF = QPointF(0.0, 0.0)) -> float: """ Get the time of the easing function at position pos. """ return self.curr_pos t: float = 0.0 match self: case EasingFn.LINEAR: return pos case EasingFn.EASE_IN: return pos * pos case EasingFn.EASE_OUT: return t * (2.0 - t) case EasingFn.EASE_IN_OUT: return self.curr_t * self.curr_t * (2.0 - self.curr_t) case EasingFn.QUADRATIC: pass case EasingFn.CUBIC: pass case EasingFn.QUARTIC: pass return t def pos_at_t(self, t: float = 0.0) -> QPointF: """ Get the position of the easing function at time t. """ p: QPointF = QPointF(0.0, 0.0) match self.easing_fn: case EasingFn.LINEAR: return t case EasingFn.EASE_IN: return t * t case EasingFn.EASE_OUT: return t * (2 - t) case EasingFn.EASE_IN_OUT: if t < 0.5: return 2 * t * t else: return -2 * t * t + 4 * t - 1 case EasingFn.QUADRATIC: return t * t case EasingFn.CUBIC: return t * t * t case EasingFn.QUARTIC: return t * t * t * t return p
{"hexsha": "9959cee9112186300b83f299c1cab0fc4c02ff99", "size": 11669, "ext": "py", "lang": "Python", "max_stars_repo_path": "isu/models/actions/animation.py", "max_stars_repo_name": "pecusys/isutils", "max_stars_repo_head_hexsha": "39fa92dc391cc430dcf1864f4c2f0212f0db58b6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "isu/models/actions/animation.py", "max_issues_repo_name": "pecusys/isutils", "max_issues_repo_head_hexsha": "39fa92dc391cc430dcf1864f4c2f0212f0db58b6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "isu/models/actions/animation.py", "max_forks_repo_name": "pecusys/isutils", "max_forks_repo_head_hexsha": "39fa92dc391cc430dcf1864f4c2f0212f0db58b6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4528301887, "max_line_length": 79, "alphanum_fraction": 0.4995286657, "include": true, "reason": "import numpy", "num_tokens": 2949}
using LightGraphs using LightGraphsMatching using Test using Cbc using JuMP using LinearAlgebra: I @testset "LightGraphsMatching" begin @testset "maximum_weight_matching" begin g = complete_graph(3) w = [ 1 2 1 1 1 1 3 1 1 ] match = maximum_weight_matching(g, with_optimizer(Cbc.Optimizer, logLevel=0), w) @test match.mate[1] == 3 @test match.weight ≈ 3 g = complete_graph(3) w = zeros(3,3) w[1,2] = 1 w[3,2] = 1 w[1,3] = 1 match = maximum_weight_matching(g,with_optimizer(Cbc.Optimizer, logLevel=0),w) @test match.weight ≈ 1 g = Graph(4) add_edge!(g, 1,3) add_edge!(g, 1,4) add_edge!(g, 2,4) w =zeros(4,4) w[1,3] = 1 w[1,4] = 3 w[2,4] = 1 match = maximum_weight_matching(g,with_optimizer(Cbc.Optimizer, logLevel=0),w) @test match.weight ≈ 3 @test match.mate[1] == 4 @test match.mate[2] == -1 @test match.mate[3] == -1 @test match.mate[4] == 1 g = Graph(4) add_edge!(g, 1,2) add_edge!(g, 2,3) add_edge!(g, 3,1) add_edge!(g, 3,4) match = maximum_weight_matching(g,with_optimizer(Cbc.Optimizer, logLevel=0)) @test match.weight ≈ 2 @test match.mate[1] == 2 @test match.mate[2] == 1 @test match.mate[3] == 4 @test match.mate[4] == 3 w = zeros(4,4) w[1,2] = 1 w[2,3] = 1 w[1,3] = 1 w[3,4] = 1 match = maximum_weight_matching(g,with_optimizer(Cbc.Optimizer, logLevel=0), w) @test match.weight ≈ 2 @test match.mate[1] == 2 @test match.mate[2] == 1 @test match.mate[3] == 4 @test match.mate[4] == 3 w = zeros(4,4) w[1,2] = 1 w[2,3] = 1 w[1,3] = 5 w[3,4] = 1 match = maximum_weight_matching(g,with_optimizer(Cbc.Optimizer, logLevel=0),w) @test match.weight ≈ 5 @test match.mate[1] == 3 @test match.mate[2] == -1 @test match.mate[3] == 1 @test match.mate[4] == -1 end @testset "maximum_weight_maximal_matching" begin g = complete_bipartite_graph(2,2) w = zeros(4,4) w[1,3] = 10. w[1,4] = 1. w[2,3] = 2. w[2,4] = 11. match = maximum_weight_maximal_matching(g, w, algorithm=LPAlgorithm(), solver=with_optimizer(Cbc.Optimizer, logLevel=0)) @test match.weight ≈ 21 @test match.mate[1] == 3 @test match.mate[3] == 1 @test match.mate[2] == 4 @test match.mate[4] == 2 g =complete_bipartite_graph(2,4) w =zeros(6,6) w[1,3] = 10 w[1,4] = 0.5 w[2,3] = 11 w[2,4] = 1 match = maximum_weight_maximal_matching(g, w, algorithm=LPAlgorithm(), solver=with_optimizer(Cbc.Optimizer, logLevel=0)) @test match.weight ≈ 11.5 @test match.mate[1] == 4 @test match.mate[4] == 1 @test match.mate[2] == 3 @test match.mate[3] == 2 g =complete_bipartite_graph(2,6) w =zeros(8,8) w[1,3] = 10 w[1,4] = 0.5 w[2,3] = 11 w[2,4] = 1 w[2,5] = -1 w[2,6] = -1 match = maximum_weight_maximal_matching(g, w, algorithm=LPAlgorithm(), solver=with_optimizer(Cbc.Optimizer, logLevel=0), cutoff=0) @test match.weight ≈ 11.5 @test match.mate[1] == 4 @test match.mate[4] == 1 @test match.mate[2] == 3 @test match.mate[3] == 2 g =complete_bipartite_graph(4,2) w = zeros(6,6) w[3,5] = 10 w[3,6] = 0.5 w[2,5] = 11 w[1,6] = 1 w[1,5] = -1 match = maximum_weight_maximal_matching(g, w, algorithm=LPAlgorithm(), solver=with_optimizer(Cbc.Optimizer, logLevel=0), cutoff=0) @test match.weight ≈ 12 @test match.mate[1] == 6 @test match.mate[2] == 5 @test match.mate[3] == -1 @test match.mate[4] == -1 @test match.mate[5] == 2 @test match.mate[6] == 1 g = complete_bipartite_graph(2, 2) w = zeros(4, 4) w[1, 3] = 10. w[1, 4] = 1. w[2, 3] = 2. w[2, 4] = 11. match = maximum_weight_maximal_matching(g, w, algorithm=HungarianAlgorithm()) @test match.weight ≈ 21 @test match.mate[1] == 3 @test match.mate[3] == 1 @test match.mate[2] == 4 @test match.mate[4] == 2 g = complete_graph(3) w = zeros(3, 3) @test ! is_bipartite(g) @test_throws ErrorException maximum_weight_maximal_matching(g, w, algorithm=HungarianAlgorithm()) g = complete_bipartite_graph(2, 4) w = zeros(6, 6) w[1, 3] = 10 w[1, 4] = 0.5 w[2, 3] = 11 w[2, 4] = 1 match = maximum_weight_maximal_matching(g, w, algorithm=HungarianAlgorithm()) @test match.weight ≈ 11.5 g = Graph(4) add_edge!(g, 1, 3) add_edge!(g, 1, 4) add_edge!(g, 2, 4) w = zeros(4, 4) w[1, 3] = 1 w[1, 4] = 3 w[2, 4] = 1 match = maximum_weight_maximal_matching(g, w, algorithm=HungarianAlgorithm()) @test match.weight ≈ 2 end @testset "minimum_weight_perfect_matching" begin w = Dict(Edge(1,2)=> 500) g =Graph(2) add_edge!(g,1,2) match = minimum_weight_perfect_matching(g, w) @test match.mate[1] == 2 w=Dict( Edge(1,2)=>500, Edge(1,3)=>600, Edge(2,3)=>700, Edge(3,4)=>100, Edge(2,4)=>1000) g = complete_graph(4) match = minimum_weight_perfect_matching(g, w) @test match.mate[1] == 2 @test match.mate[2] == 1 @test match.mate[3] == 4 @test match.mate[4] == 3 @test match.weight ≈ 600 w = Dict( Edge(1, 2) => 500, Edge(1, 3) => 400, Edge(2, 3) => 300, Edge(3, 4) => 1000, Edge(2, 4) => 1000 ) g = complete_graph(4) match = minimum_weight_perfect_matching(g, w) @test match.mate[1] == 3 @test match.mate[2] == 4 @test match.mate[3] == 1 @test match.mate[4] == 2 @test match.weight ≈ 1400 g =complete_bipartite_graph(2,2) w =Dict{Edge,Float64}() w[Edge(1,3)] = -10 w[Edge(1,4)] = -0.5 w[Edge(2,3)] = -11 w[Edge(2,4)] = -1 match = minimum_weight_perfect_matching(g, w) @test match.mate[1] == 4 @test match.mate[4] == 1 @test match.mate[2] == 3 @test match.mate[3] == 2 @test match.weight ≈ -11.5 g = complete_graph(4) w = Dict{Edge,Float64}() w[Edge(1,3)] = 10 w[Edge(1,4)] = 0.5 w[Edge(2,3)] = 11 w[Edge(2,4)] = 2 w[Edge(1,2)] = 100 match = minimum_weight_perfect_matching(g, w, 50) @test match.mate[1] == 4 @test match.mate[4] == 1 @test match.mate[2] == 3 @test match.mate[3] == 2 @test match.weight ≈ 11.5 end end
{"hexsha": "97e426f83e53a3da57fce7fc52df04f4b6d0b05c", "size": 6446, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/LightGraphsMatching.jl-160ba089-64bf-5ba7-9e14-98ab1d9bcb0a", "max_stars_repo_head_hexsha": "80f15d9bc165c65b0db138fff7eaafe8b4ba6d76", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2018-01-10T17:53:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-09T21:36:18.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/LightGraphsMatching.jl-160ba089-64bf-5ba7-9e14-98ab1d9bcb0a", "max_issues_repo_head_hexsha": "80f15d9bc165c65b0db138fff7eaafe8b4ba6d76", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2017-12-29T15:57:05.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-07T16:30:42.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/LightGraphsMatching.jl-160ba089-64bf-5ba7-9e14-98ab1d9bcb0a", "max_forks_repo_head_hexsha": "80f15d9bc165c65b0db138fff7eaafe8b4ba6d76", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2018-01-19T15:21:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-06T19:28:04.000Z", "avg_line_length": 24.6973180077, "max_line_length": 134, "alphanum_fraction": 0.5628296618, "num_tokens": 2627}
# coding=utf-8 # 导入自己的函数包d2lzh_pytorch,注意要先将目标包的父路径添加到系统路径中 import sys sys.path.append(r".") from d2lzh_pytorch import data_process from d2lzh_pytorch import layers from d2lzh_pytorch import train from collections import OrderedDict import torch.nn as nn import numpy as np import torch from torch.nn import init """ 这一节介绍了如何用torch的神经网络模组来实现3.6的softmax回归 """ # 初始化参数和读取数据 batch_size = 256 train_iter, test_iter = data_process.load_data_fashion_mnist(batch_size) num_features = 28*28 num_outputs = 10 class LinearNet(nn.Module): # 初始化网络,仍然只用一个单层的线性回归即可 def __init__(self, num_features, num_outputs): super(LinearNet, self).__init__() # 对比3.3的线性回归网络,最大的差别就是这里有了多个输出点 self.linear = nn.Linear(num_features, num_outputs) # 前向传播 def forward(self, x): # 线性平坦化层,将X变为一维的状态 # 这部分被单独定义为一个类FlattenLayer了 y = self.linear(x.view(x.shape[0], -1)) return y # 从类初始化网络 net = LinearNet(num_features, num_outputs) # 3.3提到的序列化组合网络方法,利用到了提取出来的平坦层类FlattenLayer net = nn.Sequential(OrderedDict([ ('flatten', layers.FlattenLayer()), ('linear', nn.Linear(num_features, num_outputs)) ])) # 初始化网络的参数,仍然是正态初始化和常数初始化 init.normal_(net.linear.weight, 0, 0.01) init.constant_(net.linear.bias, 0) # 损失函数指针的设置,使用交叉熵损失 loss = nn.CrossEntropyLoss() # 优化器的设置,使用之前使用的SGD,网络参数和学习率输入到优化器中 learning_rate = 0.03 optim = torch.optim.SGD(net.parameters(), learning_rate) # 开始训练模型,使用3.6编写的训练模板 num_epochs = 5 # 注意这里由于有了优化器所以不需要在这里输入网络参数和学习率了 train.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, optim)
{"hexsha": "83e04fdf2a2e7c087d75e277063e8231966e237e", "size": 1606, "ext": "py", "lang": "Python", "max_stars_repo_path": "Chapter3/Chapter3_7.py", "max_stars_repo_name": "ZFhuang/DiveIntoDLSketches", "max_stars_repo_head_hexsha": "cf0654d06ab6eeaefc35fa3bebd4937f1cbbb165", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Chapter3/Chapter3_7.py", "max_issues_repo_name": "ZFhuang/DiveIntoDLSketches", "max_issues_repo_head_hexsha": "cf0654d06ab6eeaefc35fa3bebd4937f1cbbb165", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Chapter3/Chapter3_7.py", "max_forks_repo_name": "ZFhuang/DiveIntoDLSketches", "max_forks_repo_head_hexsha": "cf0654d06ab6eeaefc35fa3bebd4937f1cbbb165", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.6176470588, "max_line_length": 72, "alphanum_fraction": 0.7384806974, "include": true, "reason": "import numpy", "num_tokens": 647}
(* Default settings (from HsToCoq.Coq.Preamble) *) Generalizable All Variables. Unset Implicit Arguments. Set Maximal Implicit Insertion. Unset Strict Implicit. Unset Printing Implicit Defensive. Require Coq.Program.Tactics. Require Coq.Program.Wf. (* Converted imports: *) Require BinNums. Require Data.Either. Require Data.Set.Internal. Require EnumSet. Require GHC.Base. Require GHC.Char. Require GHC.Num. Require HsToCoq.Err. Require Module. Require SrcLoc. (* Converted type declarations: *) Inductive Way : Type := | WayCustom : GHC.Base.String -> Way | WayThreaded : Way | WayDebug : Way | WayProf : Way | WayEventLog : Way | WayDyn : Way. Inductive WarningFlag : Type := | Opt_WarnDuplicateExports : WarningFlag | Opt_WarnDuplicateConstraints : WarningFlag | Opt_WarnRedundantConstraints : WarningFlag | Opt_WarnHiShadows : WarningFlag | Opt_WarnImplicitPrelude : WarningFlag | Opt_WarnIncompletePatterns : WarningFlag | Opt_WarnIncompleteUniPatterns : WarningFlag | Opt_WarnIncompletePatternsRecUpd : WarningFlag | Opt_WarnOverflowedLiterals : WarningFlag | Opt_WarnEmptyEnumerations : WarningFlag | Opt_WarnMissingFields : WarningFlag | Opt_WarnMissingImportList : WarningFlag | Opt_WarnMissingMethods : WarningFlag | Opt_WarnMissingSignatures : WarningFlag | Opt_WarnMissingLocalSignatures : WarningFlag | Opt_WarnNameShadowing : WarningFlag | Opt_WarnOverlappingPatterns : WarningFlag | Opt_WarnTypeDefaults : WarningFlag | Opt_WarnMonomorphism : WarningFlag | Opt_WarnUnusedTopBinds : WarningFlag | Opt_WarnUnusedLocalBinds : WarningFlag | Opt_WarnUnusedPatternBinds : WarningFlag | Opt_WarnUnusedImports : WarningFlag | Opt_WarnUnusedMatches : WarningFlag | Opt_WarnUnusedTypePatterns : WarningFlag | Opt_WarnUnusedForalls : WarningFlag | Opt_WarnWarningsDeprecations : WarningFlag | Opt_WarnDeprecatedFlags : WarningFlag | Opt_WarnAMP : WarningFlag | Opt_WarnMissingMonadFailInstances : WarningFlag | Opt_WarnSemigroup : WarningFlag | Opt_WarnDodgyExports : WarningFlag | Opt_WarnDodgyImports : WarningFlag | Opt_WarnOrphans : WarningFlag | Opt_WarnAutoOrphans : WarningFlag | Opt_WarnIdentities : WarningFlag | Opt_WarnTabs : WarningFlag | Opt_WarnUnrecognisedPragmas : WarningFlag | Opt_WarnDodgyForeignImports : WarningFlag | Opt_WarnUnusedDoBind : WarningFlag | Opt_WarnWrongDoBind : WarningFlag | Opt_WarnAlternativeLayoutRuleTransitional : WarningFlag | Opt_WarnUnsafe : WarningFlag | Opt_WarnSafe : WarningFlag | Opt_WarnTrustworthySafe : WarningFlag | Opt_WarnMissedSpecs : WarningFlag | Opt_WarnAllMissedSpecs : WarningFlag | Opt_WarnUnsupportedCallingConventions : WarningFlag | Opt_WarnUnsupportedLlvmVersion : WarningFlag | Opt_WarnInlineRuleShadowing : WarningFlag | Opt_WarnTypedHoles : WarningFlag | Opt_WarnPartialTypeSignatures : WarningFlag | Opt_WarnMissingExportedSignatures : WarningFlag | Opt_WarnUntickedPromotedConstructors : WarningFlag | Opt_WarnDerivingTypeable : WarningFlag | Opt_WarnDeferredTypeErrors : WarningFlag | Opt_WarnDeferredOutOfScopeVariables : WarningFlag | Opt_WarnNonCanonicalMonadInstances : WarningFlag | Opt_WarnNonCanonicalMonadFailInstances : WarningFlag | Opt_WarnNonCanonicalMonoidInstances : WarningFlag | Opt_WarnMissingPatternSynonymSignatures : WarningFlag | Opt_WarnUnrecognisedWarningFlags : WarningFlag | Opt_WarnSimplifiableClassConstraints : WarningFlag | Opt_WarnCPPUndef : WarningFlag | Opt_WarnUnbangedStrictPatterns : WarningFlag | Opt_WarnMissingHomeModules : WarningFlag | Opt_WarnPartialFields : WarningFlag | Opt_WarnMissingExportList : WarningFlag. Inductive WarnReason : Type := | NoReason : WarnReason | Reason : WarningFlag -> WarnReason | ErrReason : (option WarningFlag) -> WarnReason. Definition TurnOnFlag := bool%type. Inductive TrustFlag : Type := | TrustPackage : GHC.Base.String -> TrustFlag | DistrustPackage : GHC.Base.String -> TrustFlag. Inductive SseVersion : Type := | SSE1 : SseVersion | SSE2 : SseVersion | SSE3 : SseVersion | SSE4 : SseVersion | SSE42 : SseVersion. Axiom Settings : Type. Inductive SafeHaskellMode : Type := | Sf_None : SafeHaskellMode | Sf_Unsafe : SafeHaskellMode | Sf_Trustworthy : SafeHaskellMode | Sf_Safe : SafeHaskellMode. Inductive RtsOptsEnabled : Type := | RtsOptsNone : RtsOptsEnabled | RtsOptsIgnore : RtsOptsEnabled | RtsOptsIgnoreAll : RtsOptsEnabled | RtsOptsSafeOnly : RtsOptsEnabled | RtsOptsAll : RtsOptsEnabled. Inductive ProfAuto : Type := | NoProfAuto : ProfAuto | ProfAutoAll : ProfAuto | ProfAutoTop : ProfAuto | ProfAutoExports : ProfAuto | ProfAutoCalls : ProfAuto. Inductive PkgConfRef : Type := | GlobalPkgConf : PkgConfRef | UserPkgConf : PkgConfRef | PkgConfFile : GHC.Base.String -> PkgConfRef. Inductive PackageDBFlag : Type := | PackageDB : PkgConfRef -> PackageDBFlag | NoUserPackageDB : PackageDBFlag | NoGlobalPackageDB : PackageDBFlag | ClearPackageDBs : PackageDBFlag. Inductive PackageArg : Type := | Mk_PackageArg : GHC.Base.String -> PackageArg | UnitIdArg : Module.UnitId -> PackageArg. Inductive Option : Type := | FileOption : GHC.Base.String -> GHC.Base.String -> Option | Mk_Option : GHC.Base.String -> Option. Inductive OnOff a : Type := | On : a -> OnOff a | Off : a -> OnOff a. Inductive ModRenaming : Type := | Mk_ModRenaming (modRenamingWithImplicit : bool) (modRenamings : list (Module.ModuleName * Module.ModuleName)%type) : ModRenaming. Inductive PackageFlag : Type := | ExposePackage : GHC.Base.String -> PackageArg -> ModRenaming -> PackageFlag | HidePackage : GHC.Base.String -> PackageFlag. Inductive LlvmTarget : Type := | Mk_LlvmTarget (lDataLayout : GHC.Base.String) (lCPU : GHC.Base.String) (lAttributes : list GHC.Base.String) : LlvmTarget. Definition LlvmTargets := (list (GHC.Base.String * LlvmTarget)%type)%type. Inductive LinkerInfo : Type := | GnuLD : list Option -> LinkerInfo | GnuGold : list Option -> LinkerInfo | LlvmLLD : list Option -> LinkerInfo | DarwinLD : list Option -> LinkerInfo | SolarisLD : list Option -> LinkerInfo | AixLD : list Option -> LinkerInfo | UnknownLD : LinkerInfo. Inductive Language : Type := | Haskell98 : Language | Haskell2010 : Language. Inductive IgnorePackageFlag : Type := | IgnorePackage : GHC.Base.String -> IgnorePackageFlag. Inductive HscTarget : Type := | HscC : HscTarget | HscAsm : HscTarget | HscLlvm : HscTarget | HscInterpreted : HscTarget | HscNothing : HscTarget. Inductive GhcMode : Type := | CompManager : GhcMode | OneShot : GhcMode | MkDepend : GhcMode. Inductive GhcLink : Type := | NoLink : GhcLink | LinkBinary : GhcLink | LinkInMemory : GhcLink | LinkDynLib : GhcLink | LinkStaticLib : GhcLink. Inductive GeneralFlag : Type := | Opt_DumpToFile : GeneralFlag | Opt_D_faststring_stats : GeneralFlag | Opt_D_dump_minimal_imports : GeneralFlag | Opt_DoCoreLinting : GeneralFlag | Opt_DoStgLinting : GeneralFlag | Opt_DoCmmLinting : GeneralFlag | Opt_DoAsmLinting : GeneralFlag | Opt_DoAnnotationLinting : GeneralFlag | Opt_NoLlvmMangler : GeneralFlag | Opt_FastLlvm : GeneralFlag | Opt_WarnIsError : GeneralFlag | Opt_ShowWarnGroups : GeneralFlag | Opt_HideSourcePaths : GeneralFlag | Opt_PrintExplicitForalls : GeneralFlag | Opt_PrintExplicitKinds : GeneralFlag | Opt_PrintExplicitCoercions : GeneralFlag | Opt_PrintExplicitRuntimeReps : GeneralFlag | Opt_PrintEqualityRelations : GeneralFlag | Opt_PrintUnicodeSyntax : GeneralFlag | Opt_PrintExpandedSynonyms : GeneralFlag | Opt_PrintPotentialInstances : GeneralFlag | Opt_PrintTypecheckerElaboration : GeneralFlag | Opt_CallArity : GeneralFlag | Opt_Exitification : GeneralFlag | Opt_Strictness : GeneralFlag | Opt_LateDmdAnal : GeneralFlag | Opt_KillAbsence : GeneralFlag | Opt_KillOneShot : GeneralFlag | Opt_FullLaziness : GeneralFlag | Opt_FloatIn : GeneralFlag | Opt_Specialise : GeneralFlag | Opt_SpecialiseAggressively : GeneralFlag | Opt_CrossModuleSpecialise : GeneralFlag | Opt_StaticArgumentTransformation : GeneralFlag | Opt_CSE : GeneralFlag | Opt_StgCSE : GeneralFlag | Opt_LiberateCase : GeneralFlag | Opt_SpecConstr : GeneralFlag | Opt_SpecConstrKeen : GeneralFlag | Opt_DoLambdaEtaExpansion : GeneralFlag | Opt_IgnoreAsserts : GeneralFlag | Opt_DoEtaReduction : GeneralFlag | Opt_CaseMerge : GeneralFlag | Opt_CaseFolding : GeneralFlag | Opt_UnboxStrictFields : GeneralFlag | Opt_UnboxSmallStrictFields : GeneralFlag | Opt_DictsCheap : GeneralFlag | Opt_EnableRewriteRules : GeneralFlag | Opt_Vectorise : GeneralFlag | Opt_VectorisationAvoidance : GeneralFlag | Opt_RegsGraph : GeneralFlag | Opt_RegsIterative : GeneralFlag | Opt_PedanticBottoms : GeneralFlag | Opt_LlvmTBAA : GeneralFlag | Opt_LlvmPassVectorsInRegisters : GeneralFlag | Opt_LlvmFillUndefWithGarbage : GeneralFlag | Opt_IrrefutableTuples : GeneralFlag | Opt_CmmSink : GeneralFlag | Opt_CmmElimCommonBlocks : GeneralFlag | Opt_OmitYields : GeneralFlag | Opt_FunToThunk : GeneralFlag | Opt_DictsStrict : GeneralFlag | Opt_DmdTxDictSel : GeneralFlag | Opt_Loopification : GeneralFlag | Opt_CprAnal : GeneralFlag | Opt_WorkerWrapper : GeneralFlag | Opt_SolveConstantDicts : GeneralFlag | Opt_AlignmentSanitisation : GeneralFlag | Opt_CatchBottoms : GeneralFlag | Opt_SimplPreInlining : GeneralFlag | Opt_IgnoreInterfacePragmas : GeneralFlag | Opt_OmitInterfacePragmas : GeneralFlag | Opt_ExposeAllUnfoldings : GeneralFlag | Opt_WriteInterface : GeneralFlag | Opt_AutoSccsOnIndividualCafs : GeneralFlag | Opt_ProfCountEntries : GeneralFlag | Opt_Pp : GeneralFlag | Opt_ForceRecomp : GeneralFlag | Opt_IgnoreOptimChanges : GeneralFlag | Opt_IgnoreHpcChanges : GeneralFlag | Opt_ExcessPrecision : GeneralFlag | Opt_EagerBlackHoling : GeneralFlag | Opt_NoHsMain : GeneralFlag | Opt_SplitObjs : GeneralFlag | Opt_SplitSections : GeneralFlag | Opt_StgStats : GeneralFlag | Opt_HideAllPackages : GeneralFlag | Opt_HideAllPluginPackages : GeneralFlag | Opt_PrintBindResult : GeneralFlag | Opt_Haddock : GeneralFlag | Opt_HaddockOptions : GeneralFlag | Opt_BreakOnException : GeneralFlag | Opt_BreakOnError : GeneralFlag | Opt_PrintEvldWithShow : GeneralFlag | Opt_PrintBindContents : GeneralFlag | Opt_GenManifest : GeneralFlag | Opt_EmbedManifest : GeneralFlag | Opt_SharedImplib : GeneralFlag | Opt_BuildingCabalPackage : GeneralFlag | Opt_IgnoreDotGhci : GeneralFlag | Opt_GhciSandbox : GeneralFlag | Opt_GhciHistory : GeneralFlag | Opt_LocalGhciHistory : GeneralFlag | Opt_HelpfulErrors : GeneralFlag | Opt_DeferTypeErrors : GeneralFlag | Opt_DeferTypedHoles : GeneralFlag | Opt_DeferOutOfScopeVariables : GeneralFlag | Opt_PIC : GeneralFlag | Opt_PIE : GeneralFlag | Opt_PICExecutable : GeneralFlag | Opt_SccProfilingOn : GeneralFlag | Opt_Ticky : GeneralFlag | Opt_Ticky_Allocd : GeneralFlag | Opt_Ticky_LNE : GeneralFlag | Opt_Ticky_Dyn_Thunk : GeneralFlag | Opt_RPath : GeneralFlag | Opt_RelativeDynlibPaths : GeneralFlag | Opt_Hpc : GeneralFlag | Opt_FlatCache : GeneralFlag | Opt_ExternalInterpreter : GeneralFlag | Opt_OptimalApplicativeDo : GeneralFlag | Opt_VersionMacros : GeneralFlag | Opt_WholeArchiveHsLibs : GeneralFlag | Opt_ErrorSpans : GeneralFlag | Opt_DiagnosticsShowCaret : GeneralFlag | Opt_PprCaseAsLet : GeneralFlag | Opt_PprShowTicks : GeneralFlag | Opt_ShowHoleConstraints : GeneralFlag | Opt_ShowLoadedModules : GeneralFlag | Opt_SuppressCoercions : GeneralFlag | Opt_SuppressVarKinds : GeneralFlag | Opt_SuppressModulePrefixes : GeneralFlag | Opt_SuppressTypeApplications : GeneralFlag | Opt_SuppressIdInfo : GeneralFlag | Opt_SuppressUnfoldings : GeneralFlag | Opt_SuppressTypeSignatures : GeneralFlag | Opt_SuppressUniques : GeneralFlag | Opt_SuppressStgFreeVars : GeneralFlag | Opt_SuppressTicks : GeneralFlag | Opt_AutoLinkPackages : GeneralFlag | Opt_ImplicitImportQualified : GeneralFlag | Opt_KeepHiDiffs : GeneralFlag | Opt_KeepHcFiles : GeneralFlag | Opt_KeepSFiles : GeneralFlag | Opt_KeepTmpFiles : GeneralFlag | Opt_KeepRawTokenStream : GeneralFlag | Opt_KeepLlvmFiles : GeneralFlag | Opt_KeepHiFiles : GeneralFlag | Opt_KeepOFiles : GeneralFlag | Opt_BuildDynamicToo : GeneralFlag | Opt_DistrustAllPackages : GeneralFlag | Opt_PackageTrust : GeneralFlag | Opt_G_NoStateHack : GeneralFlag | Opt_G_NoOptCoercion : GeneralFlag. Axiom FlushOut : Type. Axiom FlushErr : Type. Axiom FlagSpec : Type -> Type. Inductive FilesToClean : Type := | Mk_FilesToClean (ftcGhcSession : (Data.Set.Internal.Set_ GHC.Base.String)) (ftcCurrentModule : (Data.Set.Internal.Set_ GHC.Base.String)) : FilesToClean. Inductive DynLibLoader : Type := | Deployable : DynLibLoader | SystemDependent : DynLibLoader. Axiom DynFlags : Type. Record HasDynFlags__Dict (m : Type -> Type) := HasDynFlags__Dict_Build { getDynFlags__ : m DynFlags }. Definition HasDynFlags (m : Type -> Type) := forall r__, (HasDynFlags__Dict m -> r__) -> r__. Existing Class HasDynFlags. Definition getDynFlags `{g__0__ : HasDynFlags m} : m DynFlags := g__0__ _ (getDynFlags__ m). Inductive DumpFlag : Type := | Opt_D_dump_cmm : DumpFlag | Opt_D_dump_cmm_from_stg : DumpFlag | Opt_D_dump_cmm_raw : DumpFlag | Opt_D_dump_cmm_verbose : DumpFlag | Opt_D_dump_cmm_cfg : DumpFlag | Opt_D_dump_cmm_cbe : DumpFlag | Opt_D_dump_cmm_switch : DumpFlag | Opt_D_dump_cmm_proc : DumpFlag | Opt_D_dump_cmm_sp : DumpFlag | Opt_D_dump_cmm_sink : DumpFlag | Opt_D_dump_cmm_caf : DumpFlag | Opt_D_dump_cmm_procmap : DumpFlag | Opt_D_dump_cmm_split : DumpFlag | Opt_D_dump_cmm_info : DumpFlag | Opt_D_dump_cmm_cps : DumpFlag | Opt_D_dump_asm : DumpFlag | Opt_D_dump_asm_native : DumpFlag | Opt_D_dump_asm_liveness : DumpFlag | Opt_D_dump_asm_regalloc : DumpFlag | Opt_D_dump_asm_regalloc_stages : DumpFlag | Opt_D_dump_asm_conflicts : DumpFlag | Opt_D_dump_asm_stats : DumpFlag | Opt_D_dump_asm_expanded : DumpFlag | Opt_D_dump_llvm : DumpFlag | Opt_D_dump_core_stats : DumpFlag | Opt_D_dump_deriv : DumpFlag | Opt_D_dump_ds : DumpFlag | Opt_D_dump_foreign : DumpFlag | Opt_D_dump_inlinings : DumpFlag | Opt_D_dump_rule_firings : DumpFlag | Opt_D_dump_rule_rewrites : DumpFlag | Opt_D_dump_simpl_trace : DumpFlag | Opt_D_dump_occur_anal : DumpFlag | Opt_D_dump_parsed : DumpFlag | Opt_D_dump_parsed_ast : DumpFlag | Opt_D_dump_rn : DumpFlag | Opt_D_dump_rn_ast : DumpFlag | Opt_D_dump_shape : DumpFlag | Opt_D_dump_simpl : DumpFlag | Opt_D_dump_simpl_iterations : DumpFlag | Opt_D_dump_spec : DumpFlag | Opt_D_dump_prep : DumpFlag | Opt_D_dump_stg : DumpFlag | Opt_D_dump_call_arity : DumpFlag | Opt_D_dump_exitify : DumpFlag | Opt_D_dump_stranal : DumpFlag | Opt_D_dump_str_signatures : DumpFlag | Opt_D_dump_tc : DumpFlag | Opt_D_dump_tc_ast : DumpFlag | Opt_D_dump_types : DumpFlag | Opt_D_dump_rules : DumpFlag | Opt_D_dump_cse : DumpFlag | Opt_D_dump_worker_wrapper : DumpFlag | Opt_D_dump_rn_trace : DumpFlag | Opt_D_dump_rn_stats : DumpFlag | Opt_D_dump_opt_cmm : DumpFlag | Opt_D_dump_simpl_stats : DumpFlag | Opt_D_dump_cs_trace : DumpFlag | Opt_D_dump_tc_trace : DumpFlag | Opt_D_dump_ec_trace : DumpFlag | Opt_D_dump_if_trace : DumpFlag | Opt_D_dump_vt_trace : DumpFlag | Opt_D_dump_splices : DumpFlag | Opt_D_th_dec_file : DumpFlag | Opt_D_dump_BCOs : DumpFlag | Opt_D_dump_vect : DumpFlag | Opt_D_dump_ticked : DumpFlag | Opt_D_dump_rtti : DumpFlag | Opt_D_source_stats : DumpFlag | Opt_D_verbose_stg2stg : DumpFlag | Opt_D_dump_hi : DumpFlag | Opt_D_dump_hi_diffs : DumpFlag | Opt_D_dump_mod_cycles : DumpFlag | Opt_D_dump_mod_map : DumpFlag | Opt_D_dump_timings : DumpFlag | Opt_D_dump_view_pattern_commoning : DumpFlag | Opt_D_verbose_core2core : DumpFlag | Opt_D_dump_debug : DumpFlag | Opt_D_dump_json : DumpFlag | Opt_D_ppr_debug : DumpFlag | Opt_D_no_debug_output : DumpFlag. Inductive Deprecation : Type := | NotDeprecated : Deprecation | Deprecated : Deprecation. Record ContainsDynFlags__Dict (t : Type) := ContainsDynFlags__Dict_Build { extractDynFlags__ : t -> DynFlags }. Definition ContainsDynFlags (t : Type) := forall r__, (ContainsDynFlags__Dict t -> r__) -> r__. Existing Class ContainsDynFlags. Definition extractDynFlags `{g__0__ : ContainsDynFlags t} : t -> DynFlags := g__0__ _ (extractDynFlags__ t). Inductive CompilerInfo : Type := | GCC : CompilerInfo | Clang : CompilerInfo | AppleClang : CompilerInfo | AppleClang51 : CompilerInfo | UnknownCC : CompilerInfo. Inductive BmiVersion : Type := | BMI1 : BmiVersion | BMI2 : BmiVersion. Arguments On {_} _. Arguments Off {_} _. Instance Default__Way : HsToCoq.Err.Default Way := HsToCoq.Err.Build_Default _ WayThreaded. Instance Default__WarningFlag : HsToCoq.Err.Default WarningFlag := HsToCoq.Err.Build_Default _ Opt_WarnDuplicateExports. Instance Default__WarnReason : HsToCoq.Err.Default WarnReason := HsToCoq.Err.Build_Default _ NoReason. Instance Default__SseVersion : HsToCoq.Err.Default SseVersion := HsToCoq.Err.Build_Default _ SSE1. Instance Default__SafeHaskellMode : HsToCoq.Err.Default SafeHaskellMode := HsToCoq.Err.Build_Default _ Sf_None. Instance Default__RtsOptsEnabled : HsToCoq.Err.Default RtsOptsEnabled := HsToCoq.Err.Build_Default _ RtsOptsNone. Instance Default__ProfAuto : HsToCoq.Err.Default ProfAuto := HsToCoq.Err.Build_Default _ NoProfAuto. Instance Default__PkgConfRef : HsToCoq.Err.Default PkgConfRef := HsToCoq.Err.Build_Default _ GlobalPkgConf. Instance Default__PackageDBFlag : HsToCoq.Err.Default PackageDBFlag := HsToCoq.Err.Build_Default _ NoUserPackageDB. Instance Default__ModRenaming : HsToCoq.Err.Default ModRenaming := HsToCoq.Err.Build_Default _ (Mk_ModRenaming HsToCoq.Err.default HsToCoq.Err.default). Instance Default__LlvmTarget : HsToCoq.Err.Default LlvmTarget := HsToCoq.Err.Build_Default _ (Mk_LlvmTarget HsToCoq.Err.default HsToCoq.Err.default HsToCoq.Err.default). Instance Default__LinkerInfo : HsToCoq.Err.Default LinkerInfo := HsToCoq.Err.Build_Default _ UnknownLD. Instance Default__Language : HsToCoq.Err.Default Language := HsToCoq.Err.Build_Default _ Haskell98. Instance Default__HscTarget : HsToCoq.Err.Default HscTarget := HsToCoq.Err.Build_Default _ HscC. Instance Default__GhcMode : HsToCoq.Err.Default GhcMode := HsToCoq.Err.Build_Default _ CompManager. Instance Default__GhcLink : HsToCoq.Err.Default GhcLink := HsToCoq.Err.Build_Default _ NoLink. Instance Default__GeneralFlag : HsToCoq.Err.Default GeneralFlag := HsToCoq.Err.Build_Default _ Opt_DumpToFile. Instance Default__FilesToClean : HsToCoq.Err.Default FilesToClean := HsToCoq.Err.Build_Default _ (Mk_FilesToClean HsToCoq.Err.default HsToCoq.Err.default). Instance Default__DynLibLoader : HsToCoq.Err.Default DynLibLoader := HsToCoq.Err.Build_Default _ Deployable. Instance Default__DumpFlag : HsToCoq.Err.Default DumpFlag := HsToCoq.Err.Build_Default _ Opt_D_dump_cmm. Instance Default__Deprecation : HsToCoq.Err.Default Deprecation := HsToCoq.Err.Build_Default _ NotDeprecated. Instance Default__CompilerInfo : HsToCoq.Err.Default CompilerInfo := HsToCoq.Err.Build_Default _ GCC. Instance Default__BmiVersion : HsToCoq.Err.Default BmiVersion := HsToCoq.Err.Build_Default _ BMI1. (* Midamble *) Instance Unpeel_IgnorePackageFlag : HsToCoq.Unpeel.Unpeel IgnorePackageFlag GHC.Base.String := HsToCoq.Unpeel.Build_Unpeel _ _ (fun x => match x with | IgnorePackage y => y end) IgnorePackage. Instance Default__DynFlags : HsToCoq.Err.Default DynFlags. Admitted. (* Converted value declarations: *) (* Skipping instance `DynFlags.Eq___DumpFlag' of class `GHC.Base.Eq_' *) (* Skipping all instances of class `GHC.Show.Show', including `DynFlags.Show__DumpFlag' *) (* Skipping all instances of class `GHC.Enum.Enum', including `DynFlags.Enum__DumpFlag' *) (* Skipping instance `DynFlags.Eq___GeneralFlag' of class `GHC.Base.Eq_' *) (* Skipping all instances of class `GHC.Show.Show', including `DynFlags.Show__GeneralFlag' *) (* Skipping all instances of class `GHC.Enum.Enum', including `DynFlags.Enum__GeneralFlag' *) (* Skipping instance `DynFlags.Eq___WarningFlag' of class `GHC.Base.Eq_' *) (* Skipping all instances of class `GHC.Show.Show', including `DynFlags.Show__WarningFlag' *) (* Skipping all instances of class `GHC.Enum.Enum', including `DynFlags.Enum__WarningFlag' *) (* Skipping all instances of class `GHC.Show.Show', including `DynFlags.Show__WarnReason' *) Instance Eq___Language : GHC.Base.Eq_ Language. Proof. Admitted. (* Skipping all instances of class `GHC.Enum.Enum', including `DynFlags.Enum__Language' *) (* Skipping all instances of class `GHC.Show.Show', including `DynFlags.Show__Language' *) Instance Eq___SafeHaskellMode : GHC.Base.Eq_ SafeHaskellMode. Proof. Admitted. Instance Eq___ProfAuto : GHC.Base.Eq_ ProfAuto. Proof. Admitted. (* Skipping all instances of class `GHC.Enum.Enum', including `DynFlags.Enum__ProfAuto' *) Instance Eq___HscTarget : GHC.Base.Eq_ HscTarget. Proof. Admitted. (* Skipping all instances of class `GHC.Show.Show', including `DynFlags.Show__HscTarget' *) Instance Eq___GhcMode : GHC.Base.Eq_ GhcMode. Proof. Admitted. Instance Eq___GhcLink : GHC.Base.Eq_ GhcLink. Proof. Admitted. (* Skipping all instances of class `GHC.Show.Show', including `DynFlags.Show__GhcLink' *) Instance Eq___PackageArg : GHC.Base.Eq_ PackageArg. Proof. Admitted. (* Skipping all instances of class `GHC.Show.Show', including `DynFlags.Show__PackageArg' *) Instance Eq___ModRenaming : GHC.Base.Eq_ ModRenaming. Proof. Admitted. Instance Eq___IgnorePackageFlag : GHC.Base.Eq_ IgnorePackageFlag. Proof. Admitted. Instance Eq___TrustFlag : GHC.Base.Eq_ TrustFlag. Proof. Admitted. Instance Eq___PackageFlag : GHC.Base.Eq_ PackageFlag. Proof. Admitted. Instance Eq___DynLibLoader : GHC.Base.Eq_ DynLibLoader. Proof. Admitted. (* Skipping all instances of class `GHC.Show.Show', including `DynFlags.Show__RtsOptsEnabled' *) Instance Eq___Way : GHC.Base.Eq_ Way. Proof. Admitted. (* Skipping instance `DynFlags.Ord__Way' of class `GHC.Base.Ord' *) (* Skipping all instances of class `GHC.Show.Show', including `DynFlags.Show__Way' *) Instance Eq___OnOff : forall {a}, forall `{GHC.Base.Eq_ a}, GHC.Base.Eq_ (OnOff a). Proof. Admitted. (* Skipping all instances of class `GHC.Show.Show', including `DynFlags.Show__OnOff' *) Instance Eq___Option : GHC.Base.Eq_ Option. Proof. Admitted. Instance Eq___Deprecation : GHC.Base.Eq_ Deprecation. Proof. Admitted. Instance Ord__Deprecation : GHC.Base.Ord Deprecation. Proof. Admitted. Instance Eq___PkgConfRef : GHC.Base.Eq_ PkgConfRef. Proof. Admitted. Instance Eq___PackageDBFlag : GHC.Base.Eq_ PackageDBFlag. Proof. Admitted. Instance Eq___SseVersion : GHC.Base.Eq_ SseVersion. Proof. Admitted. (* Skipping instance `DynFlags.Ord__SseVersion' of class `GHC.Base.Ord' *) Instance Eq___BmiVersion : GHC.Base.Eq_ BmiVersion. Proof. Admitted. Instance Ord__BmiVersion : GHC.Base.Ord BmiVersion. Proof. Admitted. Instance Eq___LinkerInfo : GHC.Base.Eq_ LinkerInfo. Proof. Admitted. Instance Eq___CompilerInfo : GHC.Base.Eq_ CompilerInfo. Proof. Admitted. (* Skipping all instances of class `Outputable.Outputable', including `DynFlags.Outputable__WarnReason' *) (* Skipping all instances of class `Json.ToJson', including `DynFlags.ToJson__WarnReason' *) (* Skipping all instances of class `Outputable.Outputable', including `DynFlags.Outputable__Language' *) (* Skipping all instances of class `GHC.Show.Show', including `DynFlags.Show__SafeHaskellMode' *) (* Skipping all instances of class `Outputable.Outputable', including `DynFlags.Outputable__SafeHaskellMode' *) (* Skipping instance `DynFlags.HasDynFlags__WriterT' of class `DynFlags.HasDynFlags' *) (* Skipping instance `DynFlags.HasDynFlags__ReaderT' of class `DynFlags.HasDynFlags' *) (* Skipping instance `DynFlags.HasDynFlags__MaybeT' of class `DynFlags.HasDynFlags' *) (* Skipping instance `DynFlags.HasDynFlags__ExceptT' of class `DynFlags.HasDynFlags' *) (* Skipping all instances of class `Outputable.Outputable', including `DynFlags.Outputable__GhcMode' *) (* Skipping all instances of class `Outputable.Outputable', including `DynFlags.Outputable__PackageArg' *) (* Skipping all instances of class `Outputable.Outputable', including `DynFlags.Outputable__ModRenaming' *) (* Skipping all instances of class `Outputable.Outputable', including `DynFlags.Outputable__PackageFlag' *) (* Skipping all instances of class `Outputable.Outputable', including `DynFlags.Outputable__OnOff' *) Axiom cONTROL_GROUP_CONST_291 : DynFlags -> BinNums.N. Axiom sTD_HDR_SIZE : DynFlags -> BinNums.N. Axiom pROF_HDR_SIZE : DynFlags -> BinNums.N. Axiom bLOCK_SIZE : DynFlags -> BinNums.N. Axiom bLOCKS_PER_MBLOCK : DynFlags -> BinNums.N. Axiom tICKY_BIN_COUNT : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rR1 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rR2 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rR3 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rR4 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rR5 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rR6 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rR7 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rR8 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rR9 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rR10 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rF1 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rF2 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rF3 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rF4 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rF5 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rF6 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rD1 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rD2 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rD3 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rD4 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rD5 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rD6 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rXMM1 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rXMM2 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rXMM3 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rXMM4 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rXMM5 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rXMM6 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rYMM1 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rYMM2 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rYMM3 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rYMM4 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rYMM5 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rYMM6 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rZMM1 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rZMM2 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rZMM3 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rZMM4 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rZMM5 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rZMM6 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rL1 : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rSp : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rSpLim : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rHp : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rHpLim : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rCCCS : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rCurrentTSO : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rCurrentNursery : DynFlags -> BinNums.N. Axiom oFFSET_StgRegTable_rHpAlloc : DynFlags -> BinNums.N. Axiom oFFSET_stgEagerBlackholeInfo : DynFlags -> BinNums.N. Axiom oFFSET_stgGCEnter1 : DynFlags -> BinNums.N. Axiom oFFSET_stgGCFun : DynFlags -> BinNums.N. Axiom oFFSET_Capability_r : DynFlags -> BinNums.N. Axiom oFFSET_bdescr_start : DynFlags -> BinNums.N. Axiom oFFSET_bdescr_free : DynFlags -> BinNums.N. Axiom oFFSET_bdescr_blocks : DynFlags -> BinNums.N. Axiom oFFSET_bdescr_flags : DynFlags -> BinNums.N. Axiom sIZEOF_CostCentreStack : DynFlags -> BinNums.N. Axiom oFFSET_CostCentreStack_mem_alloc : DynFlags -> BinNums.N. Axiom oFFSET_CostCentreStack_scc_count : DynFlags -> BinNums.N. Axiom oFFSET_StgHeader_ccs : DynFlags -> BinNums.N. Axiom oFFSET_StgHeader_ldvw : DynFlags -> BinNums.N. Axiom sIZEOF_StgSMPThunkHeader : DynFlags -> BinNums.N. Axiom oFFSET_StgEntCounter_allocs : DynFlags -> BinNums.N. Axiom oFFSET_StgEntCounter_allocd : DynFlags -> BinNums.N. Axiom oFFSET_StgEntCounter_registeredp : DynFlags -> BinNums.N. Axiom oFFSET_StgEntCounter_link : DynFlags -> BinNums.N. Axiom oFFSET_StgEntCounter_entry_count : DynFlags -> BinNums.N. Axiom sIZEOF_StgUpdateFrame_NoHdr : DynFlags -> BinNums.N. Axiom sIZEOF_StgMutArrPtrs_NoHdr : DynFlags -> BinNums.N. Axiom oFFSET_StgMutArrPtrs_ptrs : DynFlags -> BinNums.N. Axiom oFFSET_StgMutArrPtrs_size : DynFlags -> BinNums.N. Axiom sIZEOF_StgSmallMutArrPtrs_NoHdr : DynFlags -> BinNums.N. Axiom oFFSET_StgSmallMutArrPtrs_ptrs : DynFlags -> BinNums.N. Axiom sIZEOF_StgArrBytes_NoHdr : DynFlags -> BinNums.N. Axiom oFFSET_StgArrBytes_bytes : DynFlags -> BinNums.N. Axiom oFFSET_StgTSO_alloc_limit : DynFlags -> BinNums.N. Axiom oFFSET_StgTSO_cccs : DynFlags -> BinNums.N. Axiom oFFSET_StgTSO_stackobj : DynFlags -> BinNums.N. Axiom oFFSET_StgStack_sp : DynFlags -> BinNums.N. Axiom oFFSET_StgStack_stack : DynFlags -> BinNums.N. Axiom oFFSET_StgUpdateFrame_updatee : DynFlags -> BinNums.N. Axiom oFFSET_StgFunInfoExtraFwd_arity : DynFlags -> BinNums.N. Axiom sIZEOF_StgFunInfoExtraRev : DynFlags -> BinNums.N. Axiom oFFSET_StgFunInfoExtraRev_arity : DynFlags -> BinNums.N. Axiom mAX_SPEC_SELECTEE_SIZE : DynFlags -> BinNums.N. Axiom mAX_SPEC_AP_SIZE : DynFlags -> BinNums.N. Axiom mIN_PAYLOAD_SIZE : DynFlags -> BinNums.N. Axiom mIN_INTLIKE : DynFlags -> BinNums.N. Axiom mAX_INTLIKE : DynFlags -> BinNums.N. Axiom mIN_CHARLIKE : DynFlags -> BinNums.N. Axiom mAX_CHARLIKE : DynFlags -> BinNums.N. Axiom mUT_ARR_PTRS_CARD_BITS : DynFlags -> BinNums.N. Axiom mAX_Vanilla_REG : DynFlags -> BinNums.N. Axiom mAX_Float_REG : DynFlags -> BinNums.N. Axiom mAX_Double_REG : DynFlags -> BinNums.N. Axiom mAX_Long_REG : DynFlags -> BinNums.N. Axiom mAX_XMM_REG : DynFlags -> BinNums.N. Axiom mAX_Real_Vanilla_REG : DynFlags -> BinNums.N. Axiom mAX_Real_Float_REG : DynFlags -> BinNums.N. Axiom mAX_Real_Double_REG : DynFlags -> BinNums.N. Axiom mAX_Real_XMM_REG : DynFlags -> BinNums.N. Axiom mAX_Real_Long_REG : DynFlags -> BinNums.N. Axiom rESERVED_C_STACK_BYTES : DynFlags -> BinNums.N. Axiom rESERVED_STACK_WORDS : DynFlags -> BinNums.N. Axiom aP_STACK_SPLIM : DynFlags -> BinNums.N. Axiom wORD_SIZE : DynFlags -> BinNums.N. Axiom dOUBLE_SIZE : DynFlags -> BinNums.N. Axiom cINT_SIZE : DynFlags -> BinNums.N. Axiom cLONG_SIZE : DynFlags -> BinNums.N. Axiom cLONG_LONG_SIZE : DynFlags -> BinNums.N. Axiom bITMAP_BITS_SHIFT : DynFlags -> BinNums.N. Axiom tAG_BITS : DynFlags -> BinNums.N. Axiom wORDS_BIGENDIAN : DynFlags -> bool. Axiom dYNAMIC_BY_DEFAULT : DynFlags -> bool. Axiom lDV_SHIFT : DynFlags -> BinNums.N. Axiom iLDV_CREATE_MASK : DynFlags -> GHC.Num.Integer. Axiom iLDV_STATE_CREATE : DynFlags -> GHC.Num.Integer. Axiom iLDV_STATE_USE : DynFlags -> GHC.Num.Integer. Axiom optimisationFlags : EnumSet.EnumSet GeneralFlag. (* Skipping definition `DynFlags.targetPlatform' *) Axiom programName : DynFlags -> GHC.Base.String. Axiom projectVersion : DynFlags -> GHC.Base.String. Axiom ghcUsagePath : DynFlags -> GHC.Base.String. Axiom ghciUsagePath : DynFlags -> GHC.Base.String. Axiom topDir : DynFlags -> GHC.Base.String. Axiom tmpDir : DynFlags -> GHC.Base.String. Axiom rawSettings : DynFlags -> list (GHC.Base.String * GHC.Base.String)%type. Axiom extraGccViaCFlags : DynFlags -> list GHC.Base.String. Axiom systemPackageConfig : DynFlags -> GHC.Base.String. Axiom pgm_L : DynFlags -> GHC.Base.String. Axiom pgm_P : DynFlags -> (GHC.Base.String * list Option)%type. Axiom pgm_F : DynFlags -> GHC.Base.String. Axiom pgm_c : DynFlags -> (GHC.Base.String * list Option)%type. Axiom pgm_s : DynFlags -> (GHC.Base.String * list Option)%type. Axiom pgm_a : DynFlags -> (GHC.Base.String * list Option)%type. Axiom pgm_l : DynFlags -> (GHC.Base.String * list Option)%type. Axiom pgm_dll : DynFlags -> (GHC.Base.String * list Option)%type. Axiom pgm_T : DynFlags -> GHC.Base.String. Axiom pgm_windres : DynFlags -> GHC.Base.String. Axiom pgm_libtool : DynFlags -> GHC.Base.String. Axiom pgm_lcc : DynFlags -> (GHC.Base.String * list Option)%type. Axiom pgm_ar : DynFlags -> GHC.Base.String. Axiom pgm_ranlib : DynFlags -> GHC.Base.String. Axiom pgm_lo : DynFlags -> (GHC.Base.String * list Option)%type. Axiom pgm_lc : DynFlags -> (GHC.Base.String * list Option)%type. Axiom pgm_i : DynFlags -> GHC.Base.String. Axiom opt_L : DynFlags -> list GHC.Base.String. Axiom opt_P : DynFlags -> list GHC.Base.String. Axiom opt_F : DynFlags -> list GHC.Base.String. Axiom opt_c : DynFlags -> list GHC.Base.String. Axiom opt_a : DynFlags -> list GHC.Base.String. Axiom opt_l : DynFlags -> list GHC.Base.String. Axiom opt_windres : DynFlags -> list GHC.Base.String. Axiom opt_lcc : DynFlags -> list GHC.Base.String. Axiom opt_lo : DynFlags -> list GHC.Base.String. Axiom opt_lc : DynFlags -> list GHC.Base.String. Axiom opt_i : DynFlags -> list GHC.Base.String. (* Skipping definition `DynFlags.versionedAppDir' *) Axiom versionedFilePath : DynFlags -> GHC.Base.String. Axiom isObjectTarget : HscTarget -> bool. Axiom targetRetainsAllBindings : HscTarget -> bool. Axiom isOneShot : GhcMode -> bool. Axiom isNoLink : GhcLink -> bool. Axiom packageFlagsChanged : DynFlags -> DynFlags -> bool. (* Skipping definition `DynFlags.defaultHscTarget' *) (* Skipping definition `DynFlags.defaultObjectTarget' *) Axiom tablesNextToCode : DynFlags -> bool. Axiom mkTablesNextToCode : bool -> bool. Axiom shouldUseColor : DynFlags -> bool. Axiom positionIndependent : DynFlags -> bool. Axiom allowed_combination : list Way -> bool. Axiom mkBuildTag : list Way -> GHC.Base.String. Axiom wayTag : Way -> GHC.Base.String. Axiom wayRTSOnly : Way -> bool. Axiom wayDesc : Way -> GHC.Base.String. (* Skipping definition `DynFlags.wayGeneralFlags' *) (* Skipping definition `DynFlags.wayUnsetGeneralFlags' *) (* Skipping definition `DynFlags.wayOptc' *) (* Skipping definition `DynFlags.wayOptl' *) (* Skipping definition `DynFlags.wayOptP' *) (* Skipping definition `DynFlags.whenGeneratingDynamicToo' *) (* Skipping definition `DynFlags.ifGeneratingDynamicToo' *) (* Skipping definition `DynFlags.whenCannotGenerateDynamicToo' *) (* Skipping definition `DynFlags.ifCannotGenerateDynamicToo' *) (* Skipping definition `DynFlags.generateDynamicTooConditional' *) Axiom dynamicTooMkDynamicDynFlags : DynFlags -> DynFlags. (* Skipping definition `DynFlags.initDynFlags' *) Axiom defaultDynFlags : Settings -> LlvmTargets -> DynFlags. Axiom defaultWays : Settings -> list Way. Axiom interpWays : list Way. Axiom interpreterProfiled : DynFlags -> bool. (* Skipping definition `DynFlags.interpreterDynamic' *) (* Skipping definition `DynFlags.defaultLogOutput' *) (* Skipping definition `DynFlags.defaultFatalMessager' *) (* Skipping definition `DynFlags.jsonLogOutput' *) (* Skipping definition `DynFlags.jsonLogAction' *) (* Skipping definition `DynFlags.jsonLogFinaliser' *) (* Skipping definition `DynFlags.defaultLogAction' *) (* Skipping definition `DynFlags.defaultLogActionHPrintDoc' *) (* Skipping definition `DynFlags.defaultLogActionHPutStrDoc' *) Axiom defaultFlushOut : FlushOut. (* Skipping definition `DynFlags.defaultFlushErr' *) (* Skipping definition `DynFlags.flattenExtensionFlags' *) (* Skipping definition `DynFlags.languageExtensions' *) Axiom hasPprDebug : DynFlags -> bool. Axiom hasNoDebugOutput : DynFlags -> bool. Axiom hasNoStateHack : DynFlags -> bool. Axiom hasNoOptCoercion : DynFlags -> bool. (* Skipping definition `DynFlags.dopt' *) Axiom dopt_set : DynFlags -> DumpFlag -> DynFlags. Axiom dopt_unset : DynFlags -> DumpFlag -> DynFlags. Axiom gopt : GeneralFlag -> DynFlags -> bool. Axiom gopt_set : DynFlags -> GeneralFlag -> DynFlags. (* Skipping definition `DynFlags.gopt_unset' *) (* Skipping definition `DynFlags.wopt' *) (* Skipping definition `DynFlags.wopt_set' *) (* Skipping definition `DynFlags.wopt_unset' *) Axiom wopt_fatal : WarningFlag -> DynFlags -> bool. Axiom wopt_set_fatal : DynFlags -> WarningFlag -> DynFlags. Axiom wopt_unset_fatal : DynFlags -> WarningFlag -> DynFlags. (* Skipping definition `DynFlags.xopt' *) (* Skipping definition `DynFlags.xopt_set' *) (* Skipping definition `DynFlags.xopt_unset' *) (* Skipping definition `DynFlags.lang_set' *) Axiom useUnicodeSyntax : DynFlags -> bool. (* Skipping definition `DynFlags.setLanguage' *) (* Skipping definition `DynFlags.dynFlagDependencies' *) Axiom packageTrustOn : DynFlags -> bool. Axiom safeHaskellOn : DynFlags -> bool. Axiom safeLanguageOn : DynFlags -> bool. Axiom safeInferOn : DynFlags -> bool. (* Skipping definition `DynFlags.safeImportsOn' *) (* Skipping definition `DynFlags.setSafeHaskell' *) Axiom safeDirectImpsReq : DynFlags -> bool. Axiom safeImplicitImpsReq : DynFlags -> bool. (* Skipping definition `DynFlags.combineSafeFlags' *) Axiom unsafeFlags : list (GHC.Base.String * (DynFlags -> SrcLoc.SrcSpan) * (DynFlags -> bool) * (DynFlags -> DynFlags))%type. Axiom unsafeFlagsForInfer : list (GHC.Base.String * (DynFlags -> SrcLoc.SrcSpan) * (DynFlags -> bool) * (DynFlags -> DynFlags))%type. Axiom getOpts : forall {a : Type}, DynFlags -> (DynFlags -> list a) -> list a. Axiom getVerbFlags : DynFlags -> list GHC.Base.String. Axiom setObjectDir : GHC.Base.String -> DynFlags -> DynFlags. Axiom setHiDir : GHC.Base.String -> DynFlags -> DynFlags. Axiom setStubDir : GHC.Base.String -> DynFlags -> DynFlags. Axiom setDumpDir : GHC.Base.String -> DynFlags -> DynFlags. Axiom setOutputDir : GHC.Base.String -> DynFlags -> DynFlags. Axiom setDylibInstallName : GHC.Base.String -> DynFlags -> DynFlags. Axiom setObjectSuf : GHC.Base.String -> DynFlags -> DynFlags. Axiom setDynObjectSuf : GHC.Base.String -> DynFlags -> DynFlags. Axiom setHiSuf : GHC.Base.String -> DynFlags -> DynFlags. Axiom setDynHiSuf : GHC.Base.String -> DynFlags -> DynFlags. Axiom setHcSuf : GHC.Base.String -> DynFlags -> DynFlags. Axiom setOutputFile : option GHC.Base.String -> DynFlags -> DynFlags. Axiom setDynOutputFile : option GHC.Base.String -> DynFlags -> DynFlags. Axiom setOutputHi : option GHC.Base.String -> DynFlags -> DynFlags. Axiom setJsonLogAction : DynFlags -> DynFlags. Axiom thisComponentId : DynFlags -> Module.ComponentId. Axiom thisUnitIdInsts : DynFlags -> list (Module.ModuleName * Module.Module)%type. Axiom thisPackage : DynFlags -> Module.UnitId. Axiom parseUnitIdInsts : GHC.Base.String -> list (Module.ModuleName * Module.Module)%type. Axiom setUnitIdInsts : GHC.Base.String -> DynFlags -> DynFlags. Axiom setComponentId : GHC.Base.String -> DynFlags -> DynFlags. Axiom addPluginModuleName : GHC.Base.String -> DynFlags -> DynFlags. Axiom addPluginModuleNameOption : GHC.Base.String -> DynFlags -> DynFlags. Axiom addFrontendPluginOption : GHC.Base.String -> DynFlags -> DynFlags. Axiom parseDynLibLoaderMode : GHC.Base.String -> DynFlags -> DynFlags. Axiom setDumpPrefixForce : option GHC.Base.String -> DynFlags -> DynFlags. Axiom setPgmP : GHC.Base.String -> DynFlags -> DynFlags. Axiom addOptl : GHC.Base.String -> DynFlags -> DynFlags. Axiom addOptc : GHC.Base.String -> DynFlags -> DynFlags. Axiom addOptP : GHC.Base.String -> DynFlags -> DynFlags. Axiom setDepMakefile : GHC.Base.String -> DynFlags -> DynFlags. Axiom setDepIncludePkgDeps : bool -> DynFlags -> DynFlags. Axiom addDepExcludeMod : GHC.Base.String -> DynFlags -> DynFlags. Axiom addDepSuffix : GHC.Base.String -> DynFlags -> DynFlags. Axiom addCmdlineFramework : GHC.Base.String -> DynFlags -> DynFlags. Axiom addGhcVersionFile : GHC.Base.String -> DynFlags -> DynFlags. Axiom addHaddockOpts : GHC.Base.String -> DynFlags -> DynFlags. Axiom addGhciScript : GHC.Base.String -> DynFlags -> DynFlags. Axiom setInteractivePrint : GHC.Base.String -> DynFlags -> DynFlags. Axiom showOpt : Option -> GHC.Base.String. (* Skipping definition `DynFlags.updOptLevel' *) (* Skipping definition `DynFlags.parseDynamicFlagsCmdLine' *) (* Skipping definition `DynFlags.parseDynamicFilePragma' *) (* Skipping definition `DynFlags.parseDynamicFlagsFull' *) (* Skipping definition `DynFlags.setLogAction' *) (* Skipping definition `DynFlags.putLogMsg' *) Axiom updateWays : DynFlags -> DynFlags. Axiom safeFlagCheck : bool -> DynFlags -> (DynFlags * list (SrcLoc.Located GHC.Base.String))%type. Axiom allNonDeprecatedFlags : list GHC.Base.String. (* Skipping definition `DynFlags.allFlagsDeps' *) (* Skipping definition `DynFlags.flagsAll' *) (* Skipping definition `DynFlags.flagsAllDeps' *) (* Skipping definition `DynFlags.flagsDynamic' *) (* Skipping definition `DynFlags.flagsPackage' *) (* Skipping definition `DynFlags.make_ord_flag' *) (* Skipping definition `DynFlags.make_dep_flag' *) (* Skipping definition `DynFlags.add_dep_message' *) (* Skipping definition `DynFlags.dynamic_flags_deps' *) (* Skipping definition `DynFlags.unrecognisedWarning' *) (* Skipping definition `DynFlags.package_flags_deps' *) Axiom flagsForCompletion : bool -> list GHC.Base.String. Axiom turnOn : TurnOnFlag. Axiom turnOff : TurnOnFlag. Axiom flagSpec : forall {flag}, GHC.Base.String -> flag -> (Deprecation * FlagSpec flag)%type. (* Skipping definition `DynFlags.flagSpec'' *) (* Skipping definition `DynFlags.depFlagSpecOp' *) Axiom depFlagSpec : forall {flag}, GHC.Base.String -> flag -> GHC.Base.String -> (Deprecation * FlagSpec flag)%type. (* Skipping definition `DynFlags.depFlagSpecOp'' *) (* Skipping definition `DynFlags.depFlagSpec'' *) Axiom depFlagSpecCond : forall {flag}, GHC.Base.String -> flag -> (TurnOnFlag -> bool) -> GHC.Base.String -> (Deprecation * FlagSpec flag)%type. Axiom flagGhciSpec : forall {flag}, GHC.Base.String -> flag -> (Deprecation * FlagSpec flag)%type. (* Skipping definition `DynFlags.flagGhciSpec'' *) Axiom flagHiddenSpec : forall {flag}, GHC.Base.String -> flag -> (Deprecation * FlagSpec flag)%type. (* Skipping definition `DynFlags.flagHiddenSpec'' *) Axiom hideFlag : forall {a}, (Deprecation * FlagSpec a)%type -> (Deprecation * FlagSpec a)%type. (* Skipping definition `DynFlags.mkFlag' *) Axiom deprecatedForExtension : GHC.Base.String -> TurnOnFlag -> GHC.Base.String. (* Skipping definition `DynFlags.useInstead' *) (* Skipping definition `DynFlags.nop' *) Axiom flagSpecOf : WarningFlag -> option (FlagSpec WarningFlag). Axiom wWarningFlags : list (FlagSpec WarningFlag). Axiom wWarningFlagsDeps : list (Deprecation * FlagSpec WarningFlag)%type. Axiom negatableFlagsDeps : list (Deprecation * FlagSpec GeneralFlag)%type. Axiom dFlagsDeps : list (Deprecation * FlagSpec GeneralFlag)%type. Axiom fFlags : list (FlagSpec GeneralFlag). Axiom fFlagsDeps : list (Deprecation * FlagSpec GeneralFlag)%type. (* Skipping definition `DynFlags.fLangFlags' *) (* Skipping definition `DynFlags.fLangFlagsDeps' *) Axiom supportedLanguages : list GHC.Base.String. (* Skipping definition `DynFlags.supportedLanguageOverlays' *) (* Skipping definition `DynFlags.supportedExtensions' *) Axiom supportedLanguagesAndExtensions : list GHC.Base.String. Axiom languageFlagsDeps : list (Deprecation * FlagSpec Language)%type. Axiom safeHaskellFlagsDeps : list (Deprecation * FlagSpec SafeHaskellMode)%type. (* Skipping definition `DynFlags.xFlags' *) (* Skipping definition `DynFlags.xFlagsDeps' *) Axiom defaultFlags : Settings -> list GeneralFlag. (* Skipping definition `DynFlags.default_PIC' *) (* Skipping definition `DynFlags.impliedGFlags' *) Axiom impliedOffGFlags : list (GeneralFlag * TurnOnFlag * GeneralFlag)%type. (* Skipping definition `DynFlags.impliedXFlags' *) Axiom optLevelFlags : list (list BinNums.N * GeneralFlag)%type. Axiom warningGroups : list (GHC.Base.String * list WarningFlag)%type. Axiom warningHierarchies : list (list GHC.Base.String). Axiom smallestGroups : WarningFlag -> list GHC.Base.String. Axiom standardWarnings : list WarningFlag. Axiom minusWOpts : list WarningFlag. Axiom minusWallOpts : list WarningFlag. Axiom minusWeverythingOpts : list WarningFlag. Axiom minusWcompatOpts : list WarningFlag. (* Skipping definition `DynFlags.enableUnusedBinds' *) (* Skipping definition `DynFlags.disableUnusedBinds' *) (* Skipping definition `DynFlags.unusedBindsFlags' *) (* Skipping definition `DynFlags.enableGlasgowExts' *) (* Skipping definition `DynFlags.disableGlasgowExts' *) (* Skipping definition `DynFlags.glasgowExtsFlags' *) (* Skipping definition `DynFlags.rtsIsProfiled' *) (* Skipping definition `DynFlags.dynamicGhc' *) (* Skipping definition `DynFlags.setWarnSafe' *) (* Skipping definition `DynFlags.setWarnUnsafe' *) (* Skipping definition `DynFlags.setPackageTrust' *) (* Skipping definition `DynFlags.setGenDeriving' *) (* Skipping definition `DynFlags.setOverlappingInsts' *) (* Skipping definition `DynFlags.setIncoherentInsts' *) (* Skipping definition `DynFlags.checkTemplateHaskellOk' *) (* Skipping definition `DynFlags.upd' *) (* Skipping definition `DynFlags.updM' *) (* Skipping definition `DynFlags.noArg' *) (* Skipping definition `DynFlags.noArgM' *) (* Skipping definition `DynFlags.hasArg' *) (* Skipping definition `DynFlags.sepArg' *) (* Skipping definition `DynFlags.intSuffix' *) (* Skipping definition `DynFlags.intSuffixM' *) (* Skipping definition `DynFlags.floatSuffix' *) (* Skipping definition `DynFlags.optIntSuffixM' *) (* Skipping definition `DynFlags.setDumpFlag' *) (* Skipping definition `DynFlags.addWay' *) Axiom addWay' : Way -> DynFlags -> DynFlags. (* Skipping definition `DynFlags.removeWayDyn' *) (* Skipping definition `DynFlags.setGeneralFlag' *) (* Skipping definition `DynFlags.unSetGeneralFlag' *) (* Skipping definition `DynFlags.setGeneralFlag'' *) (* Skipping definition `DynFlags.unSetGeneralFlag'' *) (* Skipping definition `DynFlags.setWarningFlag' *) (* Skipping definition `DynFlags.unSetWarningFlag' *) (* Skipping definition `DynFlags.setFatalWarningFlag' *) (* Skipping definition `DynFlags.unSetFatalWarningFlag' *) (* Skipping definition `DynFlags.setExtensionFlag' *) (* Skipping definition `DynFlags.unSetExtensionFlag' *) (* Skipping definition `DynFlags.setExtensionFlag'' *) (* Skipping definition `DynFlags.unSetExtensionFlag'' *) Axiom alterSettings : (Settings -> Settings) -> DynFlags -> DynFlags. (* Skipping definition `DynFlags.setDumpFlag'' *) (* Skipping definition `DynFlags.forceRecompile' *) (* Skipping definition `DynFlags.setVerboseCore2Core' *) (* Skipping definition `DynFlags.setVerbosity' *) (* Skipping definition `DynFlags.setDebugLevel' *) (* Skipping definition `DynFlags.addPkgConfRef' *) (* Skipping definition `DynFlags.removeUserPkgConf' *) (* Skipping definition `DynFlags.removeGlobalPkgConf' *) (* Skipping definition `DynFlags.clearPkgConf' *) (* Skipping definition `DynFlags.parsePackageFlag' *) (* Skipping definition `DynFlags.exposePackage' *) (* Skipping definition `DynFlags.exposePackageId' *) (* Skipping definition `DynFlags.exposePluginPackage' *) (* Skipping definition `DynFlags.exposePluginPackageId' *) (* Skipping definition `DynFlags.hidePackage' *) (* Skipping definition `DynFlags.ignorePackage' *) (* Skipping definition `DynFlags.trustPackage' *) (* Skipping definition `DynFlags.distrustPackage' *) Axiom exposePackage' : GHC.Base.String -> DynFlags -> DynFlags. (* Skipping definition `DynFlags.parsePackageArg' *) (* Skipping definition `DynFlags.parseUnitIdArg' *) (* Skipping definition `DynFlags.setUnitId' *) Axiom canonicalizeHomeModule : DynFlags -> Module.ModuleName -> Module.Module. (* Skipping definition `DynFlags.interpretPackageEnv' *) (* Skipping definition `DynFlags.setTarget' *) (* Skipping definition `DynFlags.setTargetWithPlatform' *) (* Skipping definition `DynFlags.setObjTarget' *) (* Skipping definition `DynFlags.setOptLevel' *) Axiom checkOptLevel : BinNums.N -> DynFlags -> Data.Either.Either GHC.Base.String DynFlags. (* Skipping definition `DynFlags.setDPHOpt' *) (* Skipping definition `DynFlags.setMainIs' *) Axiom addLdInputs : Option -> DynFlags -> DynFlags. (* Skipping definition `DynFlags.addImportPath' *) (* Skipping definition `DynFlags.addLibraryPath' *) (* Skipping definition `DynFlags.addIncludePath' *) (* Skipping definition `DynFlags.addFrameworkPath' *) Axiom split_marker : GHC.Char.Char. Axiom splitPathList : GHC.Base.String -> list GHC.Base.String. (* Skipping definition `DynFlags.setTmpDir' *) (* Skipping definition `DynFlags.setRtsOpts' *) (* Skipping definition `DynFlags.setRtsOptsEnabled' *) (* Skipping definition `DynFlags.setOptHpcDir' *) Axiom picCCOpts : DynFlags -> list GHC.Base.String. Axiom picPOpts : DynFlags -> list GHC.Base.String. Axiom can_split : bool. Axiom compilerInfo : DynFlags -> list (GHC.Base.String * GHC.Base.String)%type. Axiom bLOCK_SIZE_W : DynFlags -> BinNums.N. Axiom wORD_SIZE_IN_BITS : DynFlags -> BinNums.N. Axiom tAG_MASK : DynFlags -> BinNums.N. Axiom mAX_PTR_TAG : DynFlags -> BinNums.N. Axiom tARGET_MIN_INT : DynFlags -> GHC.Num.Integer. Axiom tARGET_MAX_INT : DynFlags -> GHC.Num.Integer. Axiom tARGET_MAX_WORD : DynFlags -> GHC.Num.Integer. Axiom makeDynFlagsConsistent : DynFlags -> (DynFlags * list (SrcLoc.Located GHC.Base.String))%type. Axiom defaultGlobalDynFlags : DynFlags. (* Skipping definition `DynFlags.v_unsafeGlobalDynFlags' *) Axiom unsafeGlobalDynFlags : DynFlags. (* Skipping definition `DynFlags.setUnsafeGlobalDynFlags' *) Axiom isSseEnabled : DynFlags -> bool. Axiom isSse2Enabled : DynFlags -> bool. Axiom isSse4_2Enabled : DynFlags -> bool. Axiom isAvxEnabled : DynFlags -> bool. Axiom isAvx2Enabled : DynFlags -> bool. Axiom isAvx512cdEnabled : DynFlags -> bool. Axiom isAvx512erEnabled : DynFlags -> bool. Axiom isAvx512fEnabled : DynFlags -> bool. Axiom isAvx512pfEnabled : DynFlags -> bool. Axiom isBmiEnabled : DynFlags -> bool. Axiom isBmi2Enabled : DynFlags -> bool. Axiom decodeSize : GHC.Base.String -> GHC.Num.Integer. Axiom emptyFilesToClean : FilesToClean. (* External variables: Type bool list op_zt__ option BinNums.N Data.Either.Either Data.Set.Internal.Set_ EnumSet.EnumSet GHC.Base.Eq_ GHC.Base.Ord GHC.Base.String GHC.Char.Char GHC.Num.Integer HsToCoq.Err.Build_Default HsToCoq.Err.Default HsToCoq.Err.default Module.ComponentId Module.Module Module.ModuleName Module.UnitId SrcLoc.Located SrcLoc.SrcSpan *)
{"author": "plclub", "repo": "hs-to-coq", "sha": "e6401f6f054a2c1ff5e63a17ab8af2bcd5861c9c", "save_path": "github-repos/coq/plclub-hs-to-coq", "path": "github-repos/coq/plclub-hs-to-coq/hs-to-coq-e6401f6f054a2c1ff5e63a17ab8af2bcd5861c9c/examples/ghc/lib/DynFlags.v"}
""" MM1K(λ, μ, k) Tạo mô hình `M/M/1/K`. """ struct MM1K{T} <: AbstractMMCK λ::Union{T, Real} μ::Union{T, Real} k::Union{T, Integer} ρ::Union{T, Real} function MM1K(λ, μ, k) T = Union{typeof(μ), typeof(λ)} new{T}(λ, μ, k, λ/μ) end end function pn(m::MM1K, n::Int) ρ = m.ρ k = m.k if ρ == 1 1 / (k + 1) else ρ^n * (1 - ρ) / (1 - ρ^(k+1)) end end function L(m::MM1K) ρ = m.ρ k = m.k if ρ == 1 k / 2 else l = k + 1 ρ * (1 + k*ρ^l - l*ρ^k) / (1 - ρ) / (1 - ρ^l) end end function Lq(m::MM1K) ρ = m.ρ k = m.k if ρ == 1 k * (k - 1) / 2 / (k + 1) else L(m) - ρ * (1 - ρ^k) / (1 - ρ^(k+1)) end end function W(m::MM1K) L(m) / λe(m) end function Wq(m::MM1K) W(m) - 1/m.μ end
{"hexsha": "b840d036b875d1df128933fcf4ecb21c39f8f531", "size": 712, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/mm1k.jl", "max_stars_repo_name": "ndgnuh/MarkovAndQueueModels.jl", "max_stars_repo_head_hexsha": "30d0c3ecb9da61a466b95b791e8dbefc854ed38f", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/mm1k.jl", "max_issues_repo_name": "ndgnuh/MarkovAndQueueModels.jl", "max_issues_repo_head_hexsha": "30d0c3ecb9da61a466b95b791e8dbefc854ed38f", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mm1k.jl", "max_forks_repo_name": "ndgnuh/MarkovAndQueueModels.jl", "max_forks_repo_head_hexsha": "30d0c3ecb9da61a466b95b791e8dbefc854ed38f", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 12.9454545455, "max_line_length": 47, "alphanum_fraction": 0.4747191011, "num_tokens": 380}
[STATEMENT] lemma finite_completion_lemma: "finite I ==> (\<forall>i \<in> I. F \<in> (A i) leadsTo (A' i \<union> C)) --> (\<forall>i \<in> I. F \<in> (A' i) co (A' i \<union> C)) --> F \<in> (\<Inter>i \<in> I. A i) leadsTo ((\<Inter>i \<in> I. A' i) \<union> C)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. finite I \<Longrightarrow> (\<forall>i\<in>I. F \<in> A i \<longmapsto> A' i \<union> C) \<longrightarrow> (\<forall>i\<in>I. F \<in> A' i co A' i \<union> C) \<longrightarrow> F \<in> \<Inter> (A ` I) \<longmapsto> \<Inter> (A' ` I) \<union> C [PROOF STEP] apply (erule finite_induct, auto) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<And>x Fa. \<lbrakk>finite Fa; x \<notin> Fa; F \<in> A x \<longmapsto> A' x \<union> C; \<forall>i\<in>Fa. F \<in> A i \<longmapsto> A' i \<union> C; F \<in> A' x co A' x \<union> C; \<forall>i\<in>Fa. F \<in> A' i co A' i \<union> C; F \<in> \<Inter> (A ` Fa) \<longmapsto> \<Inter> (A' ` Fa) \<union> C\<rbrakk> \<Longrightarrow> F \<in> A x \<inter> \<Inter> (A ` Fa) \<longmapsto> A' x \<inter> \<Inter> (A' ` Fa) \<union> C [PROOF STEP] apply (rule completion) [PROOF STATE] proof (prove) goal (4 subgoals): 1. \<And>x Fa. \<lbrakk>finite Fa; x \<notin> Fa; F \<in> A x \<longmapsto> A' x \<union> C; \<forall>i\<in>Fa. F \<in> A i \<longmapsto> A' i \<union> C; F \<in> A' x co A' x \<union> C; \<forall>i\<in>Fa. F \<in> A' i co A' i \<union> C; F \<in> \<Inter> (A ` Fa) \<longmapsto> \<Inter> (A' ` Fa) \<union> C\<rbrakk> \<Longrightarrow> F \<in> A x \<longmapsto> A' x \<union> C 2. \<And>x Fa. \<lbrakk>finite Fa; x \<notin> Fa; F \<in> A x \<longmapsto> A' x \<union> C; \<forall>i\<in>Fa. F \<in> A i \<longmapsto> A' i \<union> C; F \<in> A' x co A' x \<union> C; \<forall>i\<in>Fa. F \<in> A' i co A' i \<union> C; F \<in> \<Inter> (A ` Fa) \<longmapsto> \<Inter> (A' ` Fa) \<union> C\<rbrakk> \<Longrightarrow> F \<in> A' x co A' x \<union> C 3. \<And>x Fa. \<lbrakk>finite Fa; x \<notin> Fa; F \<in> A x \<longmapsto> A' x \<union> C; \<forall>i\<in>Fa. F \<in> A i \<longmapsto> A' i \<union> C; F \<in> A' x co A' x \<union> C; \<forall>i\<in>Fa. F \<in> A' i co A' i \<union> C; F \<in> \<Inter> (A ` Fa) \<longmapsto> \<Inter> (A' ` Fa) \<union> C\<rbrakk> \<Longrightarrow> F \<in> \<Inter> (A ` Fa) \<longmapsto> \<Inter> (A' ` Fa) \<union> C 4. \<And>x Fa. \<lbrakk>finite Fa; x \<notin> Fa; F \<in> A x \<longmapsto> A' x \<union> C; \<forall>i\<in>Fa. F \<in> A i \<longmapsto> A' i \<union> C; F \<in> A' x co A' x \<union> C; \<forall>i\<in>Fa. F \<in> A' i co A' i \<union> C; F \<in> \<Inter> (A ` Fa) \<longmapsto> \<Inter> (A' ` Fa) \<union> C\<rbrakk> \<Longrightarrow> F \<in> \<Inter> (A' ` Fa) co \<Inter> (A' ` Fa) \<union> C [PROOF STEP] prefer 4 [PROOF STATE] proof (prove) goal (4 subgoals): 1. \<And>x Fa. \<lbrakk>finite Fa; x \<notin> Fa; F \<in> A x \<longmapsto> A' x \<union> C; \<forall>i\<in>Fa. F \<in> A i \<longmapsto> A' i \<union> C; F \<in> A' x co A' x \<union> C; \<forall>i\<in>Fa. F \<in> A' i co A' i \<union> C; F \<in> \<Inter> (A ` Fa) \<longmapsto> \<Inter> (A' ` Fa) \<union> C\<rbrakk> \<Longrightarrow> F \<in> \<Inter> (A' ` Fa) co \<Inter> (A' ` Fa) \<union> C 2. \<And>x Fa. \<lbrakk>finite Fa; x \<notin> Fa; F \<in> A x \<longmapsto> A' x \<union> C; \<forall>i\<in>Fa. F \<in> A i \<longmapsto> A' i \<union> C; F \<in> A' x co A' x \<union> C; \<forall>i\<in>Fa. F \<in> A' i co A' i \<union> C; F \<in> \<Inter> (A ` Fa) \<longmapsto> \<Inter> (A' ` Fa) \<union> C\<rbrakk> \<Longrightarrow> F \<in> A x \<longmapsto> A' x \<union> C 3. \<And>x Fa. \<lbrakk>finite Fa; x \<notin> Fa; F \<in> A x \<longmapsto> A' x \<union> C; \<forall>i\<in>Fa. F \<in> A i \<longmapsto> A' i \<union> C; F \<in> A' x co A' x \<union> C; \<forall>i\<in>Fa. F \<in> A' i co A' i \<union> C; F \<in> \<Inter> (A ` Fa) \<longmapsto> \<Inter> (A' ` Fa) \<union> C\<rbrakk> \<Longrightarrow> F \<in> A' x co A' x \<union> C 4. \<And>x Fa. \<lbrakk>finite Fa; x \<notin> Fa; F \<in> A x \<longmapsto> A' x \<union> C; \<forall>i\<in>Fa. F \<in> A i \<longmapsto> A' i \<union> C; F \<in> A' x co A' x \<union> C; \<forall>i\<in>Fa. F \<in> A' i co A' i \<union> C; F \<in> \<Inter> (A ` Fa) \<longmapsto> \<Inter> (A' ` Fa) \<union> C\<rbrakk> \<Longrightarrow> F \<in> \<Inter> (A ` Fa) \<longmapsto> \<Inter> (A' ` Fa) \<union> C [PROOF STEP] apply (simp only: INT_simps [symmetric]) [PROOF STATE] proof (prove) goal (4 subgoals): 1. \<And>x Fa. \<lbrakk>finite Fa; x \<notin> Fa; F \<in> A x \<longmapsto> A' x \<union> C; \<forall>i\<in>Fa. F \<in> A i \<longmapsto> A' i \<union> C; F \<in> A' x co A' x \<union> C; \<forall>i\<in>Fa. F \<in> A' i co A' i \<union> C; F \<in> \<Inter> (A ` Fa) \<longmapsto> (\<Inter>x\<in>Fa. A' x \<union> C)\<rbrakk> \<Longrightarrow> F \<in> \<Inter> (A' ` Fa) co (\<Inter>x\<in>Fa. A' x \<union> C) 2. \<And>x Fa. \<lbrakk>finite Fa; x \<notin> Fa; F \<in> A x \<longmapsto> A' x \<union> C; \<forall>i\<in>Fa. F \<in> A i \<longmapsto> A' i \<union> C; F \<in> A' x co A' x \<union> C; \<forall>i\<in>Fa. F \<in> A' i co A' i \<union> C; F \<in> \<Inter> (A ` Fa) \<longmapsto> \<Inter> (A' ` Fa) \<union> C\<rbrakk> \<Longrightarrow> F \<in> A x \<longmapsto> A' x \<union> C 3. \<And>x Fa. \<lbrakk>finite Fa; x \<notin> Fa; F \<in> A x \<longmapsto> A' x \<union> C; \<forall>i\<in>Fa. F \<in> A i \<longmapsto> A' i \<union> C; F \<in> A' x co A' x \<union> C; \<forall>i\<in>Fa. F \<in> A' i co A' i \<union> C; F \<in> \<Inter> (A ` Fa) \<longmapsto> \<Inter> (A' ` Fa) \<union> C\<rbrakk> \<Longrightarrow> F \<in> A' x co A' x \<union> C 4. \<And>x Fa. \<lbrakk>finite Fa; x \<notin> Fa; F \<in> A x \<longmapsto> A' x \<union> C; \<forall>i\<in>Fa. F \<in> A i \<longmapsto> A' i \<union> C; F \<in> A' x co A' x \<union> C; \<forall>i\<in>Fa. F \<in> A' i co A' i \<union> C; F \<in> \<Inter> (A ` Fa) \<longmapsto> \<Inter> (A' ` Fa) \<union> C\<rbrakk> \<Longrightarrow> F \<in> \<Inter> (A ` Fa) \<longmapsto> \<Inter> (A' ` Fa) \<union> C [PROOF STEP] apply (rule constrains_INT, auto) [PROOF STATE] proof (prove) goal: No subgoals! [PROOF STEP] done
{"llama_tokens": 2666, "file": null, "length": 6}
# # Copyright (c) 2017, UT-BATTELLE, LLC # All rights reserved. # # This software is released under the BSD license detailed # in the LICENSE file in the top level a-prime directory # ###Work in Progress: Plot meridional averages for different fields in the same plot. ###07/03/2017 import matplotlib as mpl #changing the default backend to agg to resolve contouring issue on rhea mpl.use('Agg') from mpl_toolkits.basemap import Basemap import matplotlib.pyplot as plt from matplotlib.ticker import MaxNLocator import numpy from netCDF4 import Dataset from read_monthly_data_ts import read_monthly_data_ts from get_season_months_index import get_season_months_index from get_days_in_season_months import get_days_in_season_months from get_reg_area_avg import get_reg_area_avg from aggregate_ts_weighted import aggregate_ts_weighted from get_reg_seasonal_avg import get_reg_seasonal_avg from get_season_name import get_season_name from get_reg_avg_climo import get_reg_avg_climo from optparse import OptionParser import argparse def plot_meridional_avg_multiple_fields_climo (indir, casename, field_name, interp_grid, interp_method, ref_case, ref_interp_grid, ref_interp_method, begin_yr, end_yr, begin_month, end_month, aggregate, debug = False): n_fields = len(field_names) for i,field_name in enumerate(field_names): print __name__, 'casename: ', casename meridional_avg, lon_reg, units = get_reg_meridional_avg_climo ( indir = indir, casename = casename, field_name = field_names[i], interp_grid = interp_grid, interp_method = interp_method, begin_yr = begin_yr, end_yr = end_yr, begin_month = begin_month, end_month = end_month, reg = reg, debug = debug) if i == 0: plot_field = numpy.zeros((n_fields, meridional_avg.shape[0])) units_list = [] plot_field[i, :] = meridional_avg units_list.append(units) if ref_case == 'CERES-EBAF': if field_name == 'FLNT': field_name_ref = 'FLUT' if field_name == 'RESTOM': field_name_ref = 'RESTOA' if field_name == 'FSNT': field_name_ref = 'FSNTOA' elif ref_case == 'HadISST': if field_name == 'TS': field_name_ref = 'SST' else: field_name_ref = field_name ref_meridional_avg, lon_reg, ref_units = get_reg_meridional_avg_climo ( indir = ref_case_dir, casename = ref_case, field_name = field_name_ref, interp_grid = ref_interp_grid, interp_method = ref_interp_method, begin_yr = begin_yr, end_yr = end_yr, begin_month = begin_month, end_month = end_month, reg = reg, debug = debug) if i == 0: ref_plot_field = numpy.zeros((n_fields, meridional_avg.shape[0])) ref_plot_field[i, :] = ref_meridional_avg if debug: print __name__, 'ref_plot_field.shape ', ref_plot_field.shape if debug: print __name__, 'plot_field: ', plot_field plot_field_mean = numpy.mean(plot_field, axis = 1) ref_plot_field_mean = numpy.mean(ref_plot_field, axis = 1) f, ax = plt.subplots(n_fields, sharex = True, figsize=(8.5,11)) nlon = lon_reg.shape[0] f.text(0.5, 0.04, 'Longitude', ha='center', fontsize = 24) season = get_season_name(begin_month, end_month) plt.suptitle(reg_name + '\n Meridional Avg. ' + season, fontsize = 24) ref_case_text = ref_case + ' ' + field_name_ref + ' climo' for i,field_name in enumerate(field_names): min_plot = min(numpy.amin(plot_field[i, :]), ref_plot_field[i, 0]) max_plot = max(numpy.amax(plot_field[i, :]), ref_plot_field[i, 0]) y_axis_ll = min_plot - 0.5*numpy.std(plot_field[i, :]) y_axis_ul = max_plot + 0.5 * numpy.std(plot_field[i,:]) ax[i].axis([lon_reg[0],lon_reg[-1], y_axis_ll, y_axis_ul]) print 'lon_reg[0],lon_reg[-1], 1.1*min_plot, 1.1*max_plot: ', \ lon_reg[0],lon_reg[-1], 1.1*min_plot, 1.1*max_plot test_line, = ax[i].plot(lon_reg, plot_field[i, :], color = colors[i], linewidth = 1.0, label = casename) ref_line, = ax[i].plot(lon_reg, ref_plot_field[i, :], color = 'black', linewidth = 1.0, label = ref_case) if i == 0: ax[i].legend(bbox_to_anchor = (1.0,1.5), handles=[ref_line, test_line], fontsize = 10) ax[i].set_title(field_name, fontsize = 12) ax[i].text(0.04, 0.5, field_name + ' (' + units_list[i] + ')', va='center', rotation='vertical', fontsize = 16) ax[i].get_yaxis().get_major_formatter().set_useOffset(False) ax[i].yaxis.set_major_locator(MaxNLocator(6)) for tick in ax[i].yaxis.get_major_ticks(): tick.label.set_fontsize(10) for tick in ax[i].xaxis.get_major_ticks(): tick.label.set_fontsize(10) plt.subplots_adjust(hspace=0.3) mpl.rcParams['savefig.dpi']=300 outfile = plots_dir + '/' + casename + '_' \ + meridional_avg + '_' + reg + '_' + season + '.png' plt.savefig(outfile) #plt.show() if __name__ == "__main__": parser = argparse.ArgumentParser(usage = "python %prog [options]") parser.add_argument("-d", "--debug", dest = "debug", default = False, help = "debug option to print some data") parser.add_argument("--indir", dest = "indir", help = "filepath to directory model data") parser.add_argument("-c", "--casename", dest = "casename", help = "casename of the run") parser.add_argument("-f", "--field_name", dest = "field_names", nargs = '+', help = "variable name") parser.add_argument("--interp_grid", dest = "interp_grid", help = "variable name") parser.add_argument("--interp_method", dest = "interp_method", help = "method used for interpolating the test case e.g. conservative_mapping") parser.add_argument("--ref_case_dir", dest = "ref_case_dir", help = "filepath to ref_case directory") parser.add_argument("--ref_case", dest = "ref_case", help = "reference casename") parser.add_argument("--ref_interp_grid", dest = "ref_interp_grid", help = "name of the interpolated grid of reference case") parser.add_argument("--ref_interp_method", dest = "ref_interp_method", help = "method used for interpolating the reference case e.g. conservative_mapping") parser.add_argument("--begin_yr", dest = "begin_yr", type = int, help = "begin year") parser.add_argument("--end_yr", dest = "end_yr", type = int, help = "end year") parser.add_argument("--begin_month", dest = "begin_month", type = int, help = "begin_month", default = 0) parser.add_argument("--end_month", dest = "end_month", type = int, help = "end_month", default = 11) parser.add_argument("--aggregate", dest = "aggregate", type = int, help = "end_month", default = 1) parser.add_argument("--reg", dest = "reg", nargs = '+', help = "regions to be analyzed/plotted") parser.add_argument("--reg_name", dest = "reg_name", nargs = '+', help = "names of regions to be placed in plots") parser.add_argument("--plots_dir", dest = "plots_dir", help = "filepath to GPCP directory") args = parser.parse_args() debug = args.debug indir = args.indir casename = args.casename field_names = args.field_names interp_grid = args.interp_grid interp_method = args.interp_method ref_case_dir = args.ref_case_dir ref_case = args.ref_case ref_interp_grid = args.ref_interp_grid ref_interp_method = args.ref_interp_method begin_yr = args.begin_yr end_yr = args.end_yr begin_month = args.begin_month end_month = args.end_month aggregate = args.aggregate reg = args.reg reg_name = args.reg_name plots_dir = args.plots_dir colors = ['b', 'g', 'r', 'c', 'm', 'y'] x = mpl.get_backend() print 'backend: ', x plot_meridional_avg_multiple_fields_climo( indir = indir, casename = casename, field_names = field_names, interp_grid = interp_grid, interp_method = interp_method, ref_case = ref_case, ref_interp_grid = ref_interp_grid, ref_interp_method = ref_interp_method, begin_yr = begin_yr, end_yr = end_yr, begin_month = begin_month, end_month = end_month, reg = reg, reg_name = reg_name, aggregate = aggregate, debug = debug)
{"hexsha": "2ae22ebc1ff5af7ea983e4b1cf1879b8fdb9382c", "size": 9880, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/plot_meridional_avg_multiple_fields_climo.py", "max_stars_repo_name": "E3SM-Project/a-prime", "max_stars_repo_head_hexsha": "a8c084ab6f727904a2b38d8a93b9c83e2f978e3f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-06-07T13:13:32.000Z", "max_stars_repo_stars_event_max_datetime": "2017-06-07T13:13:32.000Z", "max_issues_repo_path": "python/plot_meridional_avg_multiple_fields_climo.py", "max_issues_repo_name": "ACME-Climate/a-prime", "max_issues_repo_head_hexsha": "a8c084ab6f727904a2b38d8a93b9c83e2f978e3f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 31, "max_issues_repo_issues_event_min_datetime": "2017-06-07T00:26:58.000Z", "max_issues_repo_issues_event_max_datetime": "2018-04-09T17:03:15.000Z", "max_forks_repo_path": "python/plot_meridional_avg_multiple_fields_climo.py", "max_forks_repo_name": "ACME-Climate/a-prime", "max_forks_repo_head_hexsha": "a8c084ab6f727904a2b38d8a93b9c83e2f978e3f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-08-05T23:43:59.000Z", "max_forks_repo_forks_event_max_datetime": "2018-08-05T23:43:59.000Z", "avg_line_length": 37.0037453184, "max_line_length": 119, "alphanum_fraction": 0.5660931174, "include": true, "reason": "import numpy", "num_tokens": 2271}
module RoboSimples using PyCall using AbstractActuators export NRoboClient, NRoboTest export move, moveX, moveY, moveZ, devposition, setreference export rmove, rmoveX, rmoveY, rmoveZ export positionX, positionY, positionZ export setreferenceX, setreferenceY, setreferenceZ export numaxes, axesnames, moveto struct NRoboClient <: AbstractCartesianRobot devname::String ip::String port::Int32 server::PyObject axes::Vector{String} end AbstractActuators.numaxes(dev::NRoboClient) = length(dev.axes) AbstractActuators.axesnames(dev::NRoboClient) = dev.axes function NRoboClient(devname, ip="192.168.0.140", port=9543; axes=["x", "y", "z"]) xmlrpc = pyimport("xmlrpc.client") server = xmlrpc.ServerProxy("http://$ip:$port") NRoboClient(devname, ip, port, server, axes) end AbstractActuators.move(dev::NRoboClient, ax, mm; r=false) = dev.server["move"](mm, string(ax), r) AbstractActuators.move(dev::NRoboClient, ax::Integer, mm; r=false) = move(dev, dev.axes[ax], mm; r=r) function AbstractActuators.move(dev::NRoboClient, axes::AbstractVector, x=AbstractVector; r=false) ndof = length(axes) for i in 1:ndof move(dev, axes[i], x[i]; r=r) end return end AbstractActuators.moveto(dev::NRoboClient, x) = move(dev, dev.axes, x, r=false) AbstractActuators.moveX(dev::NRoboClient, mm) = dev.server["moveX"](mm) AbstractActuators.moveY(dev::NRoboClient, mm) = dev.server["moveY"](mm) AbstractActuators.moveZ(dev::NRoboClient, mm) = dev.server["moveZ"](mm) AbstractActuators.rmoveX(dev::NRoboClient, mm) = dev.server["rmoveX"](mm) AbstractActuators.rmoveY(dev::NRoboClient, mm) = dev.server["rmoveY"](mm) AbstractActuators.rmoveZ(dev::NRoboClient, mm) = dev.server["rmoveZ"](mm) import Base function AbstractActuators.devposition(dev::NRoboClient; pulses=false) x = dev.server["position"]("x", pulses) y = dev.server["position"]("y", pulses) z = dev.server["position"]("z", pulses) return Dict{String,Float64}("x"=>x, "y"=>y, "z"=>z) end AbstractActuators.devposition(dev::NRoboClient, ax; pulses=false) = dev.server["position"](string(ax), pulses) AbstractActuators.devposition(dev::NRoboClient, ax::Integer; pulses=false) = devposition(dev, dev.axes[ax]; pulses=pulses) function AbstractActuators.devposition(dev::NRoboClient, axes::AbstractVector; pulses=false) ndof = length(axes) if ndof == 1 return [devposition(dev, dev.axes[axes[1]]; pulses=pulse)] elseif ndof == 2 return [devposition(dev, dev.axes[axes[1]]; pulses=pulse), devposition(dev, dev.axes[axes[2]]; pulses=pulse)] else return [devposition(dev, dev.axes[axes[1]]; pulses=pulse), devposition(dev, dev.axes[axes[2]]; pulses=pulse), devposition(dev, dev.axes[axes[3]]; pulses=pulse)] end end AbstractActuators.positionX(dev::NRoboClient; pulses=false) = devposition(dev, "x"; pulses=pulses) AbstractActuators.positionY(dev::NRoboClient; pulses=false) = devposition(dev, "y"; pulses=pulses) AbstractActuators.positionZ(dev::NRoboClient; pulses=false) = devposition(dev, "z"; pulses=pulses) AbstractActuators.setreference(dev::NRoboClient, ax, mm=0; pulses=false) = dev.server["set_reference"](string(ax), mm, pulses) AbstractActuators.setreference(dev::NRoboClient, ax::Integer, mm=0; pulses=false) = setreference(dev, dev.axes[ax], mm; pulses=pulses) function AbstractActuators.setreference(dev::NRoboClient, ax::AbstractVector, mm=0; pulses=false) nax = length(ax) if length(mm) == 1 mm = fill(mm[1], nax) end for i in 1:nax setreference(dev, ax[i], mm[i]; pulses=pulses) end end AbstractActuators.setreferenceX(dev::NRoboClient, mm=0 ; pulses=false) = setreference(dev, "x", mm; pulses=pulses) AbstractActuators.setreferenceY(dev::NRoboClient, mm=0 ; pulses=false) = setreference(dev, "y", mm; pulses=pulses) AbstractActuators.setreferenceZ(dev::NRoboClient, mm=0 ; pulses=false) = setreference(dev, "z", mm; pulses=pulses) end
{"hexsha": "e53fe1d58dc6d8c12210666a63ab5416eb345e00", "size": 4238, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/RoboSimples.jl", "max_stars_repo_name": "pjsjipt/RoboSimples.jl", "max_stars_repo_head_hexsha": "b30d59c223d6cde866937d8bbd1c4779b5c933db", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/RoboSimples.jl", "max_issues_repo_name": "pjsjipt/RoboSimples.jl", "max_issues_repo_head_hexsha": "b30d59c223d6cde866937d8bbd1c4779b5c933db", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/RoboSimples.jl", "max_forks_repo_name": "pjsjipt/RoboSimples.jl", "max_forks_repo_head_hexsha": "b30d59c223d6cde866937d8bbd1c4779b5c933db", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3925925926, "max_line_length": 83, "alphanum_fraction": 0.6757904672, "num_tokens": 1234}
include(joinpath("..", "common", "utils.jl")) using LinearAlgebra: Diagonal, diag ⊗ = kron abstract type AbstractProblem end struct Isothermal{T} <: AbstractProblem T₀::T R::T p₀::T g::T ρ₀::T H::T Cᵥ::T γ::T function Isothermal{T}(; T₀ = 300, R = 287, p₀ = 10^5, g = T(98 // 100), ) where {T} H = R * T₀ / g ρ₀ = p₀ / (R * T₀) Cᵥ = 5R / 2 γ = R / Cᵥ + 1 new{T}(T₀, R, p₀, g, ρ₀, H, Cᵥ, γ) end end p̄_(s::AbstractProblem, z) = s.p₀ * exp(-z / s.H) ρ̄_(s::AbstractProblem, z) = s.ρ₀ * exp(-z / s.H) Ē_(s::AbstractProblem, z) = ρ̄_(s, z) * (s.Cᵥ * s.T₀ + s.g * z) δp_(s::AbstractProblem, z, δE, δρ) = (s.R / s.Cᵥ) * (δE - s.g * z * δρ) c̄_(s::AbstractProblem, _) = sqrt(s.γ * s.p₀ / s.ρ₀) A_(s, z) = [ 0 1 0 -(s.R / s.Cᵥ)*s.g*z 0 (s.R/s.Cᵥ) 0 (Ē_(s, z) + p̄_(s, z))/ρ̄_(s, z) 0 ] function element_operators(N, T = Float64) ξ, ω = lglpoints(T, N) D = spectralderivative(ξ) return (ξ = ξ, ω = ω, D = D) end function create_mesh(K, elem, z0, z1) ξ = elem.ξ # Number of pointss in element and polynomial order Nq = length(ξ) N = Nq - 1 # Element size Δz = (z1 - z0) / K J = Δz / (ξ[Nq] - ξ[1]) # shift ξ to go (0, 1) ξ01 = (ξ[1:(end - 1)] .- ξ[1]) / (ξ[Nq] - ξ[1]) # cg to dg scatter matrix Q = scatter_matrix(N, K) # CG DOF locations zcg = [z0 .+ Δz * (ξ01 .+ (0:(K - 1))')[:]; z1] # DG DOF locations zdg = reshape(Q * zcg, Nq, K) W = Diagonal(elem.ω) I_KK = sparse(I, K, K) Wcg = Array(diag(Q' * (I_KK ⊗ (J * W)) * Q)) return (zcg = zcg, zdg = zdg, scatter = Q, J = J, Δz = Δz, Wcg = Wcg) end """ element_tendency!(∂q, q, elem_operator, problem, z_elem) Evaluate the element rhs `∂q` associated with `q` for the `problem::AbstractProblem` using the DG `elem_operator`. `q` and `∂q` are taken to be `NamedTuple`s with fields `(δρ, δw, δE)` """ function element_tendency!(∂q, q, O, J, s, z) ρ̄ = ρ̄_.(Ref(s), z) Ē = Ē_.(Ref(s), z) p̄ = p̄_.(Ref(s), z) δp = δp_.(Ref(s), z, q.δE, q.δρ) # ∫ ϕ ∂δρ = ∫ (∂_ξ ϕ) ρ̄ δw ∂q.δρ .= (O.D' * (O.ω .* ρ̄ .* q.δw)) ./ (J .* O.ω) # ∫ ϕ ∂δw = -∫ (ϕ / ρ̄) (∂_ξ δp + g δρ) ∂q.δw .= -s.g * q.δρ ./ ρ̄ - (O.D * δp) ./ (J .* ρ̄) # ∫ ϕ ∂δE = ∫ (∂_ξ ϕ) (Ē + p̄) δw ∂q.δE .= (O.D' * (O.ω .* (Ē + p̄) .* q.δw)) ./ (J .* O.ω) end function dg2cg_scatter(q, mesh, elem) Q = mesh.scatter ω = elem.ω for i in 1:length(q) q[i][:] .= Q * ((Q' * (mesh.J * ω .* q[i])[:]) ./ mesh.Wcg) end end function tendency!(∂q, q, t, problem, mesh, elem, bc, forcing = nothing) # Evaluate the volume terms element_tendency!(∂q, q, elem, mesh.J, problem, mesh.zdg) # Add in MMS formcing if !isnothing(forcing) ∂q.δρ .+= forcing.ρ.(mesh.zdg, t) ∂q.δw .+= forcing.w.(mesh.zdg, t) ∂q.δE .+= forcing.E.(mesh.zdg, t) end # Use upwind boundary treatment if !isnothing(bc[1]) # Get the boundary velocity and reference density δw0 = bc[1].δw(t) ρ̄0 = ρ̄_(problem, mesh.zdg[1]) # Set the plus and minus states q⁻ = [q.δρ[1], ρ̄0 * q.δw[1], q.δE[1]] q⁺ = [q⁻[1], -q⁻[2] + 2ρ̄0 * δw0, q⁻[3]] # Evaluate the upwind flux f0 = bc[1].A⁻ * q⁻ + bc[1].A⁺ * q⁺ # Evaluate the minus-side flux for w (strong derivative) flw = bc[1].A[2, :]' * q⁻ # Apply lift ∂q.δρ[1] += f0[1] / (mesh.J * elem.ω[1]) ∂q.δw[1] += (f0[2] - flw) / (mesh.J * elem.ω[1] * ρ̄0) ∂q.δE[1] += f0[3] / (mesh.J * elem.ω[1]) end if !isnothing(bc[2]) # Get the boundary velocity and reference density δw1 = bc[2].δw(t) ρ̄1 = ρ̄_(problem, mesh.zdg[end]) # Set the plus and minus states q⁻ = [q.δρ[end], ρ̄1 * q.δw[end], q.δE[end]] q⁺ = [q⁻[1], -q⁻[2] + 2ρ̄1 * δw1, q⁻[3]] # Evaluate the upwind flux f1 = bc[2].A⁻ * q⁻ + bc[2].A⁺ * q⁺ # Evaluate the minus-side flux for w (strong derivative) flw = bc[2].A[2, :]' * q⁻ # Apply lift ∂q.δρ[end] += f1[1] / (mesh.J * elem.ω[end]) ∂q.δw[end] += (f1[2] - flw) / (mesh.J * elem.ω[end] * ρ̄1) ∂q.δE[end] += f1[3] / (mesh.J * elem.ω[end]) end # DG -> CG projection and then scatter for storage dg2cg_scatter(∂q, mesh, elem) end
{"hexsha": "1f373cfecf10ec18c0c1ebaa234bf83f986d01d3", "size": 4436, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "linear_energy/cg_sem.jl", "max_stars_repo_name": "jkozdon/clima_sem_vertical", "max_stars_repo_head_hexsha": "69c7d1743cb970ed699cb62a4dac347e178967bd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "linear_energy/cg_sem.jl", "max_issues_repo_name": "jkozdon/clima_sem_vertical", "max_issues_repo_head_hexsha": "69c7d1743cb970ed699cb62a4dac347e178967bd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "linear_energy/cg_sem.jl", "max_forks_repo_name": "jkozdon/clima_sem_vertical", "max_forks_repo_head_hexsha": "69c7d1743cb970ed699cb62a4dac347e178967bd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5628742515, "max_line_length": 73, "alphanum_fraction": 0.4833183048, "num_tokens": 2003}
function KeggSpeciespKa = getKeggpKas(target_cids, target_inchi, n_pkas) if nargin < 3 n_pkas = 20; end if ismac cxcalc_cmd = '/Applications/ChemAxon/JChem/bin/cxcalc'; babel_cmd = '/usr/local/bin/babel'; else cxcalc_cmd = 'cxcalc'; babel_cmd = 'babel'; end [success, ~] = system(cxcalc_cmd); if success ~= 0 error('Please make sure the command line program "babel" is installed and in the path'); end KeggSpeciespKa = []; for i = 1:length(target_cids) cid = target_cids(i); inchi = target_inchi{i}; if isempty(inchi) continue end if ispc [success, smiles] = system(['echo ' inchi ' | ' babel_cmd ' -iinchi -osmi']); else [success, smiles] = system(['echo "' inchi '" | ' babel_cmd ' -iinchi -osmi']); end if success == 0 smiles = strtok(smiles); structure = smiles; else structure = inchi; end fprintf('Using cxcalc on C%05d: %s\n', cid, structure); if ispc cmd = [cxcalc_cmd ' "' structure '" pka -a ' num2str(n_pkas) ' -b ' num2str(n_pkas) ' majorms -M true --pH 7']; else cmd = ['echo "' structure '" | ' cxcalc_cmd ' pka -a ' num2str(n_pkas) ' -b ' num2str(n_pkas) ' majorms -M true --pH 7']; end [success, cxcalc_stdout] = system(cmd); if ~isempty(strfind(cxcalc_stdout,'Inconsistent molecular structure')) success = -1; end if success == 0 %fprintf(cxcalc_stdout); pkalist = regexp(cxcalc_stdout,'\n','split'); titles = regexp(pkalist{1,1}, '\t', 'split'); vals = regexp(pkalist{1,2}, '\t', 'split'); if all(cellfun(@isempty,vals(2:end))) vals = cell(1,2*n_pkas + 3); end inds = zeros(2*n_pkas, 1); for i = 1:n_pkas inds(2*i-1, 1) = find(strcmp(titles, ['apKa' num2str(i)])); inds(2*i, 1) = find(strcmp(titles, ['bpKa' num2str(i)])); end pkalist = vals(1, inds); pkalist = regexprep(pkalist, ',', '\.'); pkalist = str2double(pkalist); pkalist = sort(pkalist,'descend'); pkalist = pkalist(pkalist>=0 & pkalist<=14); % find the nH and charge of the major macrospecies ind = find(strcmp(titles, 'major-ms')); majorms_smiles = vals{1, ind}; if isempty(majorms_smiles) majorms_smiles = smiles; end if ispc cmd = ['echo ' majorms_smiles ' | babel -ismiles -oinchi ---errorlevel 0 -xFT/noiso']; else cmd = ['echo "' majorms_smiles '" | babel -ismiles -oinchi ---errorlevel 0 -xFT/noiso']; end [success, babel_stdout] = system(cmd); if success == 0 && ~isempty(babel_stdout) && strcmp('InChI=',babel_stdout(1:6)) majorms_nstd_inchi = strtok(babel_stdout); [~, nH, charge] = getFormulaAndChargeFromInChI(majorms_nstd_inchi); else nH = 0; charge = 0; end idx = length(KeggSpeciespKa) + 1; if ~isempty(pkalist) pkas = zeros(length(pkalist)+1,length(pkalist)+1); pkas(2:end,1:end-1) = diag(pkalist); pkas = pkas + pkas'; KeggSpeciespKa(idx).pKas = pkas; mmsbool = false(size(pkas,1),1); if any(pkalist <= 7) mmsbool(find(pkalist <= 7,1)) = true; else mmsbool(end) = true; end KeggSpeciespKa(idx).majorMSpH7 = mmsbool; zs = 1:size(pkas,1); zs = zs - find(mmsbool); zs = zs + charge; KeggSpeciespKa(idx).zs = zs; nHs = 1:size(pkas,1); nHs = nHs - find(mmsbool); nHs = nHs + nH; KeggSpeciespKa(idx).nHs = nHs; KeggSpeciespKa(idx).cid = cid; else KeggSpeciespKa(idx).pKas = []; KeggSpeciespKa(idx).majorMSpH7 = true; KeggSpeciespKa(idx).zs = charge; KeggSpeciespKa(idx).nHs = nH; KeggSpeciespKa(idx).cid = cid; end end end KeggSpeciespKa = KeggSpeciespKa';
{"author": "opencobra", "repo": "cobratoolbox", "sha": "e60274d127f65d518535fd0814d20c53dc530f73", "save_path": "github-repos/MATLAB/opencobra-cobratoolbox", "path": "github-repos/MATLAB/opencobra-cobratoolbox/cobratoolbox-e60274d127f65d518535fd0814d20c53dc530f73/src/analysis/thermo/trainingModel/new/getKeggpKas.m"}
from os.path import dirname, join from os import path import fire import random import torch import numpy as np from tempfile import NamedTemporaryFile from torch import nn from sklearn.utils import shuffle from sklearn.metrics import accuracy_score, f1_score, precision_recall_fscore_support, confusion_matrix, classification_report from model_pytorch import DoubleHeadModel, load_openai_pretrained_model, dotdict from loss import ClassificationLossCompute from opt import OpenAIAdam from datasets import SemEval2010Task8 from text_utils import TextEncoder, LabelEncoder from train_utils import predict, iter_data, iter_apply, persist_model, load_model from logging_utils import ResultLogger from analysis_util import evaluate_semeval2010_task8 def _remove_label_direction(label): direction_suffix_start = label.find('(') if direction_suffix_start != -1: return label[:direction_suffix_start] else: return label def _get_max_label_length(labels): return max([len(label) for label in labels]) def _print_labeled_confusion_matrix(labels, labels_dev, labels_pred_dev): conf_matrix = confusion_matrix(labels_dev, labels_pred_dev, labels=labels) conf_matrix_str = np.array2string(conf_matrix, max_line_width=120, threshold=999999) max_label_length = _get_max_label_length(labels) for (label, matrix_row) in zip(labels, conf_matrix_str.splitlines()): n_whitespaces = (max_label_length - len(label)) + 1 print(label + (n_whitespaces * ' ') + matrix_row) def _print_undirected_classifcation_scores(labels, negative_label, labels_dev, labels_pred_dev): undirected_labels = list(set([_remove_label_direction(label) for label in labels if label != '<unk>'])) tp_counts = dict() fp_counts = dict() tn_counts = dict() fn_counts = dict() for example_idx in range(len(labels_dev)): true_label = labels_dev[example_idx] pred_label = labels_pred_dev[example_idx] undirected_true_label = _remove_label_direction(true_label) undirected_pred_label = _remove_label_direction(pred_label) for undirected_label in undirected_labels: # for this label the example is supposed to be a true positive if undirected_label == undirected_true_label: if pred_label == true_label: tp_counts[undirected_label] = tp_counts.get(undirected_label, 0) + 1 else: fn_counts[undirected_label] = fn_counts.get(undirected_label, 0) + 1 # for this label the example is supposed to be a true negative else: if undirected_pred_label != undirected_label: tn_counts[undirected_label] = tn_counts.get(undirected_label, 0) + 1 else: fp_counts[undirected_label] = fp_counts.get(undirected_label, 0) + 1 macro_f1_scores = [] macro_f1_scores_wo_negative = [] print() max_label_length = _get_max_label_length(undirected_labels) print(max_label_length * ' ' + ' P R F1') for undirected_label in undirected_labels: tps = tp_counts.get(undirected_label, 0) fps = fp_counts.get(undirected_label, 0) fns = fn_counts.get(undirected_label, 0) precision_denominator = tps + fps recall_denominator = tps + fns if precision_denominator == 0 or recall_denominator == 0: print("Skipping %s: division by zero, assuming f1 of 0" % undirected_label) macro_f1_scores.append(0) if undirected_label != negative_label: macro_f1_scores_wo_negative.append(0) continue precision = tps / precision_denominator recall = tps / recall_denominator f1_denominator = precision + recall if f1_denominator == 0: print("Skipping %s: division by zero, assuming f1 of 0" % undirected_label) macro_f1_scores.append(0) if undirected_label != negative_label: macro_f1_scores_wo_negative.append(0) continue f1 = 2 * (precision * recall) / f1_denominator label_padding = (max_label_length - len(undirected_label) - 1) * ' ' print("{}{:6.2f}{:6.2f}{:6.2f}".format(undirected_label + ':' + label_padding, precision, recall, f1)) macro_f1_scores.append(f1) if undirected_label != negative_label: macro_f1_scores_wo_negative.append(f1) print() print("Per relation macro f1: {:.2f}".format(np.mean(macro_f1_scores))) print("Per relation macro f1 excluding negative relation: {:.2f}".format(np.mean(macro_f1_scores_wo_negative))) print() def _print_classification_details(label_encoder, label_idxs_dev, label_idxs_pred_dev, negative_label): labels = label_encoder.get_items() labels_dev = [label_encoder.get_item_for_index(index) for index in label_idxs_dev] labels_pred_dev = [label_encoder.get_item_for_index(index) for index in label_idxs_pred_dev] print(classification_report(labels_dev, labels_pred_dev)) _print_labeled_confusion_matrix(labels, labels_dev, labels_pred_dev) _print_undirected_classifcation_scores(labels, negative_label, labels_dev, labels_pred_dev) def run_epoch(model, train, dev, test, compute_loss_fct, batch_size, device, epoch, label_encoder, logger, negative_label, log_with_id=True, verbose=False): print('-' * 100) indices_train, mask_train, labels_train, _, _ = train n_batches = len(indices_train) // batch_size current_loss: float = 0 seen_sentences = 0 modulo = max(1, int(n_batches / 10)) positive_labels = set(label_encoder.get_items()) positive_labels.discard(negative_label) positive_labels = [label_encoder.get_idx_for_item(label) for label in positive_labels] epoch_labels_pred_train = [] epoch_labels_train = [] # TODO: refactor! for batch_no, (batch_indices, batch_mask, batch_labels) in enumerate(iter_data( *shuffle(indices_train, mask_train, labels_train, random_state=np.random), batch_size=batch_size, truncate=True, verbose=True)): model.train() x = torch.tensor(batch_indices, dtype=torch.long).to(device) y = torch.tensor(batch_labels, dtype=torch.long).to(device) mask = torch.tensor(batch_mask).to(device) lm_logits, clf_logits = model(x) loss = compute_loss_fct(x, y, mask, clf_logits, lm_logits) epoch_labels_pred_train.extend(np.argmax(clf_logits.detach().cpu(), 1)) epoch_labels_train.extend(batch_labels) seen_sentences += len(batch_indices) current_loss += loss if batch_no % modulo == 0: train_acc = accuracy_score(epoch_labels_train, epoch_labels_pred_train) * 100 train_micro_f1 = f1_score(epoch_labels_train, epoch_labels_pred_train, average='micro', labels=positive_labels) train_macro_f1 = f1_score(epoch_labels_train, epoch_labels_pred_train, average='macro', labels=positive_labels) print("epoch {0} - iter {1}/{2} - loss {3:.8f} - acc {4:.2f} - micro f1 {5:.2f} - macro f1 {6:.2f}" .format(epoch, batch_no, n_batches, current_loss / seen_sentences, train_acc, train_micro_f1, train_macro_f1)) current_loss /= len(indices_train) # IMPORTANT: Switch to eval mode model.eval() indices_dev, mask_dev, labels_dev, ids_dev, _ = dev print('-' * 100) dev_logits, dev_loss = iter_apply(indices_dev, mask_dev, labels_dev, model, compute_loss_fct, device, batch_size) avg_dev_loss = dev_loss / len(indices_dev) label_pred_dev = np.argmax(dev_logits, 1) dev_accuracy = accuracy_score(labels_dev, label_pred_dev) * 100. dev_micro_f1 = f1_score(labels_dev, label_pred_dev, average='micro', labels=positive_labels) dev_macro_f1 = f1_score(labels_dev, label_pred_dev, average='macro', labels=positive_labels) if verbose: _print_classification_details(label_encoder, labels_dev, label_pred_dev, negative_label) print('EVALUATION: cost: {} | acc: {} | micro f1: {} | macro f1: {}'.format( dev_loss / len(indices_dev), dev_accuracy, dev_micro_f1, dev_macro_f1)) # save predictions on test dataset per epoch logger.log(train_loss=current_loss, dev_loss=avg_dev_loss, dev_accuracy=dev_accuracy, dev_micro_f1=dev_micro_f1, dev_macro_f1=dev_macro_f1) label_idxs_pred_dev, _ = predict(indices_dev, model, device, batch_size) labels_pred_dev = [label_encoder.get_item_for_index(label_index) for label_index in label_idxs_pred_dev] logger.log_dev_predictions(epoch, labels_pred_dev, ids_dev, log_with_id=log_with_id) if test is not None: indices_test, _, labels_test, ids_test, entity_ids_test = test log_pr_curve = len(labels_test) > 0 and entity_ids_test is not None label_idxs_pred_test, probs_test = predict(indices_test, model, device, batch_size, compute_probs=log_pr_curve) labels_pred_test = [label_encoder.get_item_for_index(label_index) for label_index in label_idxs_pred_test] logger.log_test_predictions(epoch, labels_pred_test, ids_test, log_with_id=log_with_id) if log_pr_curve: negative_label_idx = label_encoder.get_idx_for_item(negative_label) logger.log_test_pr_curve(epoch, entity_ids_test, labels_test, probs_test, negative_label_idx, label_encoder) return avg_dev_loss, dev_micro_f1, dev_macro_f1 def train(dataset, data_dir, log_dir, max_grad_norm=1, learning_rate=6.25e-5, learning_rate_warmup=0.002, n_ctx=512, n_embd=768, n_head=12, n_layer=12, embd_pdrop=.1, lm_coef=.5, attn_pdrop=.1, resid_pdrop=.1, clf_pdrop=.1, word_pdrop=.0, l2=0.01, vector_l2=True, optimizer='adam', afn='gelu', learning_rate_schedule='warmup_linear', encoder_path='model/encoder_bpe_40000.json', bpe_path='model/vocab_40000.bpe', n_transfer=12, beta1=.9, beta2=.999, e=1e-8, batch_size=8, max_epochs=3, dev_size=.1, seed=0, load_pre_trained=True, subsampling_rate=1.0, train_set_limit=None, dev_file=None, dev_set_limit=None, skip_test_set=False, verbose_fetcher=False, verbose_training=False, masking_mode=None, write_model=True): cfg = dotdict(locals().items()) print(cfg) logger = ResultLogger(log_dir, **cfg) random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") n_gpu = torch.cuda.device_count() print('Device: {} | n_gpu: {}'.format(device, n_gpu)) # create / load encoders for text and labels text_encoder = TextEncoder(encoder_path, bpe_path) label_encoder = LabelEncoder(add_unk=False) if dataset == 'semeval_2010_task8': predefined_dev_set = False negative_label = 'Other' log_with_id = True elif dataset == 'tacred': predefined_dev_set = True dev_size = None negative_label = 'no_relation' log_with_id = False else: raise ValueError("Dataset '{}' not supported.".format(dataset)) encoder = text_encoder.encoder encoder['_start_'] = len(encoder) encoder['_delimiter_'] = len(encoder) encoder['_delimiter2_'] = len(encoder) encoder['_classify_'] = len(encoder) n_special = 4 if dataset == 'tacred': for t in SemEval2010Task8.MASKED_ENTITY_TOKENS: text_encoder.encoder[t] = len(text_encoder.encoder) n_special += 1 # TODO: improve (as a sentence is generally much longer than the two entities) # the input has 3 parts (entity 1, entity 2, sentence) and special tokens # all together should not exceed the context length max_len = (n_ctx - n_special - 1) // 3 if dataset == 'semeval_2010_task8' or dataset == 'tacred': corpus = SemEval2010Task8.fetch(data_dir, dev_size, seed, negative_label=negative_label, subsampling_rate=subsampling_rate, train_set_limit=train_set_limit, dev_set_limit=dev_set_limit, skip_test_set=skip_test_set, predefined_dev_set=predefined_dev_set, verbose=verbose_fetcher, masking_mode=masking_mode, dev_file=dev_file) corpus = SemEval2010Task8.encode(*corpus, text_encoder=text_encoder, label_encoder=label_encoder) n_ctx = min(SemEval2010Task8.max_length(*corpus, max_len=max_len) + n_special + 1, n_ctx) transformed_corpus = SemEval2010Task8.transform(*corpus, text_encoder=text_encoder, max_length=max_len, n_ctx=n_ctx) else: raise ValueError("Dataset '{}' not supported.".format(dataset)) if not skip_test_set: train, dev, test = transformed_corpus else: train, dev = transformed_corpus test = None _, _, labels_dev, ids_dev, _ = dev logger.log_dev_labels( labels_dev=[label_encoder.get_item_for_index(label) for label in labels_dev], ids=ids_dev) batch_size_train = batch_size * max(n_gpu, 1) n_updates_total = (len(train[0]) // batch_size_train) * max_epochs clf_token = text_encoder.encoder['_classify_'] vocab = len(text_encoder.encoder) + n_ctx n_class = len(label_encoder) dh_model = DoubleHeadModel(cfg, clf_token, ('classification', n_class), vocab, n_ctx) criterion = nn.CrossEntropyLoss(reduce=False) model_opt = OpenAIAdam(dh_model.parameters(), lr=learning_rate, schedule=learning_rate_schedule, warmup=learning_rate_warmup, t_total=n_updates_total, b1=beta1, b2=beta2, e=e, l2=l2, vector_l2=vector_l2, max_grad_norm=max_grad_norm) compute_loss_fct = ClassificationLossCompute(criterion, criterion, lm_coef, model_opt) if load_pre_trained: load_openai_pretrained_model(dh_model.transformer, n_ctx=n_ctx, n_special=n_special, n_transfer=n_transfer) dh_model.to(device) dh_model = nn.DataParallel(dh_model) if write_model: model_dir = path.join(logger.get_base_dir(), 'models') persist_model(model_dir, dh_model, text_encoder, label_encoder) # run training! best_f1 = 0. for epoch in range(1, max_epochs + 1): dev_loss, _, dev_macro_f1 = run_epoch(dh_model, train, dev, test, compute_loss_fct, batch_size, device, epoch, label_encoder, logger, negative_label, log_with_id=log_with_id, verbose=verbose_training) if dev_macro_f1 > best_f1: best_f1 = dev_macro_f1 if write_model: print(f'Saving model at epoch {epoch}. With dev_f1 score of {dev_macro_f1}.') model_file_name = f'model_epoch-{epoch}_dev-macro-f1-{dev_macro_f1}_' \ f'dev-loss-{dev_loss}_{logger.start_time}.pt' persist_model(model_dir, dh_model, text_encoder, label_encoder, model_name=model_file_name) def evaluate(dataset, test_file, log_dir, save_dir, model_file='model.pt', batch_size=8, masking_mode=None): cfg = dotdict(locals().items()) print(cfg) logger = ResultLogger(log_dir, **cfg) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model, text_encoder, label_encoder = load_model(save_dir, model_file=model_file) model = model.to(device) n_special = 4 n_ctx = model.n_ctx max_len = 512 // 3 if dataset == 'semeval_2010_task8' or dataset == 'tacred': test = SemEval2010Task8._load_from_jsonl(test_file, is_test=False, masking_mode=masking_mode) test = SemEval2010Task8.encode(test, text_encoder=text_encoder, label_encoder=label_encoder) test = SemEval2010Task8.transform(*test, text_encoder=text_encoder, max_length=max_len, n_ctx=n_ctx)[0] else: raise ValueError("Dataset '{}' not supported.".format(dataset)) if dataset == 'semeval_2010_task8': negative_label = 'Other' elif dataset == 'tacred': negative_label = 'no_relation' else: raise ValueError("Dataset '{}' not supported.".format(dataset)) indices_test, _, label_idxs_test, ids_test, entity_ids_test = test log_pr_curve = entity_ids_test is not None label_idxs_pred, probs_test = predict(indices_test, model, device, batch_size, compute_probs=log_pr_curve) labels_pred_test = [label_encoder.get_item_for_index(label_index) for label_index in label_idxs_pred] logger.log_test_predictions(0, labels_pred_test, ids_test) test_accuracy = accuracy_score(label_idxs_test, label_idxs_pred) * 100. if dataset == 'semeval_2010_task8': id_labels_true = [(id_, label_encoder.get_item_for_index(label_index)) for id_, label_index in zip(ids_test, label_idxs_test)] id_labels_pred = list(zip(ids_test, labels_pred_test)) input_files = [] for id_labels in [id_labels_true, id_labels_pred]: tmp_file = NamedTemporaryFile(delete=True) input_files.append(tmp_file) with open(tmp_file.name, 'w') as f: for id_, label in id_labels: f.write('{}\t{}\n'.format(id_, label)) tmp_file.file.close() path_to_eval_script = path.join(path.dirname(path.realpath(__file__)), 'analysis/semeval/semeval2010_task8_scorer-v1.2.pl') test_f1 = evaluate_semeval2010_task8(id_labels_true_file=input_files[0].name, id_labels_pred_file=input_files[1].name, eval_script=path_to_eval_script) print(f'TEST: ACC: {test_accuracy} | F1: {test_f1}') else: labels = list(sorted(set(label_idxs_test))) labels.remove(label_encoder.get_idx_for_item(negative_label)) test_precision, test_recall, test_f1, _ = precision_recall_fscore_support( label_idxs_test, label_idxs_pred, average='micro', labels=labels) print(f'TEST: ACC: {test_accuracy} | P: {test_precision} | R: {test_recall} | F1: {test_f1}') if log_pr_curve: negative_label_idx = label_encoder.get_idx_for_item(negative_label) logger.log_test_pr_curve(0, entity_ids_test, label_idxs_test, probs_test, negative_label_idx, label_encoder) logger.close() if __name__ == '__main__': fire.Fire({ 'train': train, 'evaluate': evaluate })
{"hexsha": "96ec781c0210333d0bc8e1d90f61401fefa4895e", "size": 19223, "ext": "py", "lang": "Python", "max_stars_repo_path": "relation_extraction.py", "max_stars_repo_name": "DFKI-NLP/lm-transformer-re", "max_stars_repo_head_hexsha": "650ac2c261ee6ed6113b3af1e9ee813c9952ad3c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 110, "max_stars_repo_stars_event_min_datetime": "2019-04-08T08:48:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T13:20:17.000Z", "max_issues_repo_path": "relation_extraction.py", "max_issues_repo_name": "DFKI-NLP/lm-transformer-re", "max_issues_repo_head_hexsha": "650ac2c261ee6ed6113b3af1e9ee813c9952ad3c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2019-04-10T04:30:27.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-16T00:52:25.000Z", "max_forks_repo_path": "relation_extraction.py", "max_forks_repo_name": "DFKI-NLP/lm-transformer-re", "max_forks_repo_head_hexsha": "650ac2c261ee6ed6113b3af1e9ee813c9952ad3c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2019-04-10T10:04:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-09T07:51:51.000Z", "avg_line_length": 42.2483516484, "max_line_length": 134, "alphanum_fraction": 0.6683139989, "include": true, "reason": "import numpy", "num_tokens": 4476}
#== # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # Description # # Tests related to TLE parser. # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # ==# # File: ./src/orbit/tle.jl # ======================== # Macros tle_str and tlenc_str # ---------------------------- @testset "Macros tle_str and tlenc_str" begin # Read the SCDs TLE from the file. tles_file = read_tle("./SCDs.tle") # Read the same TLEs from a string. tles_str = tle""" SCD 1 1 22490U 93009B 18165.62596833 .00000225 00000-0 11410-4 0 9991 2 22490 24.9690 231.7852 0042844 200.7311 292.7198 14.44524498338066 SCD 2 1 25504U 98060A 18165.15074951 .00000201 00000-0 55356-5 0 9994 2 25504 24.9961 80.1303 0017060 224.4822 286.6438 14.44043397 37312 """ # Read the same TLES from a string with wrong checksums. # This should not output any exceptions. tles_str_nc = tlenc""" SCD 1 1 22490U 93009B 18165.62596833 .00000225 00000-0 11410-4 0 9990 2 22490 24.9690 231.7852 0042844 200.7311 292.7198 14.44524498338060 SCD 2 1 25504U 98060A 18165.15074951 .00000201 00000-0 55356-5 0 9990 2 25504 24.9961 80.1303 0017060 224.4822 286.6438 14.44043397 37310 """ # Compare the TLEs. @test length(tles_file) == length(tles_str) @test length(tles_file) == length(tles_str_nc) for i = 1:length(tles_file) for sym in fieldnames(TLE) @test getfield(tles_file[i], sym) == getfield(tles_str[i], sym) # Skip the comparison of checksums for `tles_str_nc`. ( (sym == :checksum_l1) || (sym == :checksum_l2) ) && continue @test getfield(tles_file[i], sym) == getfield(tles_str_nc[i], sym) end end end
{"hexsha": "31644a3d82f38927c0f6aeae6f88e1bf83061644", "size": 1872, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/orbit/tle.jl", "max_stars_repo_name": "FedericoStra/SatelliteToolbox.jl", "max_stars_repo_head_hexsha": "31b50dfc698f8f4d0309960ac95a51824b24283a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/orbit/tle.jl", "max_issues_repo_name": "FedericoStra/SatelliteToolbox.jl", "max_issues_repo_head_hexsha": "31b50dfc698f8f4d0309960ac95a51824b24283a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/orbit/tle.jl", "max_forks_repo_name": "FedericoStra/SatelliteToolbox.jl", "max_forks_repo_head_hexsha": "31b50dfc698f8f4d0309960ac95a51824b24283a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4285714286, "max_line_length": 79, "alphanum_fraction": 0.5528846154, "num_tokens": 732}
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Author: Samragni Banerjee <samragnibanerjee4@gmail.com> # Alexander Sokolov <alexander.y.sokolov@gmail.com> # import numpy as np import pyscf.ao2mo ### Integral transformation ### def transform_integrals(myadc): occ_a = myadc.mo_coeff[0][:,:myadc._nocc[0]] occ_b = myadc.mo_coeff[1][:,:myadc._nocc[1]] vir_a = myadc.mo_coeff[0][:,myadc._nocc[0]:] vir_b = myadc.mo_coeff[1][:,myadc._nocc[1]:] occ = occ_a, occ_b vir = vir_a, vir_b eris = lambda:None eris.oovv = transform_antisymmetrize_integrals(myadc._scf._eri, (occ,occ,vir,vir)) eris.vvvv = transform_antisymmetrize_integrals(myadc._scf._eri, (vir,vir,vir,vir)) eris.oooo = transform_antisymmetrize_integrals(myadc._scf._eri, (occ,occ,occ,occ)) eris.voov = transform_antisymmetrize_integrals(myadc._scf._eri, (vir,occ,occ,vir)) eris.ooov = transform_antisymmetrize_integrals(myadc._scf._eri, (occ,occ,occ,vir)) eris.vovv = transform_antisymmetrize_integrals(myadc._scf._eri, (vir,occ,vir,vir)) eris.vvoo = transform_antisymmetrize_integrals(myadc._scf._eri, (vir,vir,occ,occ)) eris.vvvo = transform_antisymmetrize_integrals(myadc._scf._eri, (vir,vir,vir,occ)) eris.ovoo = transform_antisymmetrize_integrals(myadc._scf._eri, (occ,vir,occ,occ)) eris.ovov = transform_antisymmetrize_integrals(myadc._scf._eri, (occ,vir,occ,vir)) eris.vooo = transform_antisymmetrize_integrals(myadc._scf._eri, (vir,occ,occ,occ)) eris.oovo = transform_antisymmetrize_integrals(myadc._scf._eri, (occ,occ,vir,occ)) eris.vovo = transform_antisymmetrize_integrals(myadc._scf._eri, (vir,occ,vir,occ)) eris.vvov = transform_antisymmetrize_integrals(myadc._scf._eri, (vir,vir,occ,vir)) eris.ovvo = transform_antisymmetrize_integrals(myadc._scf._eri, (occ,vir,vir,occ)) eris.ovvv = transform_antisymmetrize_integrals(myadc._scf._eri, (occ,vir,vir,vir)) return eris # TODO: disk flag def transform_antisymmetrize_integrals(v2e_ao, mo, disk = False): mo_1, mo_2, mo_3, mo_4 = mo mo_1_a, mo_1_b = mo_1 mo_2_a, mo_2_b = mo_2 mo_3_a, mo_3_b = mo_3 mo_4_a, mo_4_b = mo_4 v2e_a = None v2e_a = pyscf.ao2mo.general(v2e_ao, (mo_1_a, mo_3_a, mo_2_a, mo_4_a), compact=False) v2e_a = v2e_a.reshape(mo_1_a.shape[1], mo_3_a.shape[1], mo_2_a.shape[1], mo_4_a.shape[1]) v2e_a = v2e_a.transpose(0,2,1,3).copy() if (mo_1_a is mo_2_a): v2e_a -= v2e_a.transpose(1,0,2,3).copy() elif (mo_3_a is mo_4_a): v2e_a -= v2e_a.transpose(0,1,3,2).copy() else: v2e_temp = None v2e_temp = pyscf.ao2mo.general(v2e_ao, (mo_1_a, mo_4_a, mo_2_a, mo_3_a), compact=False) v2e_temp = v2e_temp.reshape(mo_1_a.shape[1], mo_4_a.shape[1], mo_2_a.shape[1], mo_3_a.shape[1]) v2e_a -= v2e_temp.transpose(0,2,3,1).copy() del v2e_temp v2e_a = disk_helper.dataset(v2e_a) if disk else v2e_a v2e_b = None v2e_b = pyscf.ao2mo.general(v2e_ao, (mo_1_b, mo_3_b, mo_2_b, mo_4_b), compact=False) v2e_b = v2e_b.reshape(mo_1_b.shape[1], mo_3_b.shape[1], mo_2_b.shape[1], mo_4_b.shape[1]) v2e_b = v2e_b.transpose(0,2,1,3).copy() if (mo_1_b is mo_2_b): v2e_b -= v2e_b.transpose(1,0,2,3).copy() elif (mo_3_b is mo_4_b): v2e_b -= v2e_b.transpose(0,1,3,2).copy() else: v2e_temp = None v2e_temp = pyscf.ao2mo.general(v2e_ao, (mo_1_b, mo_4_b, mo_2_b, mo_3_b), compact=False) v2e_temp = v2e_temp.reshape(mo_1_b.shape[1], mo_4_b.shape[1], mo_2_b.shape[1], mo_3_b.shape[1]) v2e_b -= v2e_temp.transpose(0,2,3,1).copy() del v2e_temp v2e_b = disk_helper.dataset(v2e_b) if disk else v2e_b v2e_ab = None v2e_ab = pyscf.ao2mo.general(v2e_ao, (mo_1_a, mo_3_a, mo_2_b, mo_4_b), compact=False) v2e_ab = v2e_ab.reshape(mo_1_a.shape[1], mo_3_a.shape[1], mo_2_b.shape[1], mo_4_b.shape[1]) v2e_ab = v2e_ab.transpose(0,2,1,3).copy() v2e_ab = disk_helper.dataset(v2e_ab) if disk else v2e_ab return (v2e_a, v2e_ab, v2e_b)
{"hexsha": "41aea5d3a1fe79f193c99f48f0a7657128a4f777", "size": 4618, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyscf/adc/uadc_ao2mo.py", "max_stars_repo_name": "azag0/pyscf", "max_stars_repo_head_hexsha": "1e3e27b61b3cfd22c9679d2c9851c13b3ebc5a1b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-08-03T12:32:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-29T08:19:02.000Z", "max_issues_repo_path": "pyscf/adc/uadc_ao2mo.py", "max_issues_repo_name": "azag0/pyscf", "max_issues_repo_head_hexsha": "1e3e27b61b3cfd22c9679d2c9851c13b3ebc5a1b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyscf/adc/uadc_ao2mo.py", "max_forks_repo_name": "azag0/pyscf", "max_forks_repo_head_hexsha": "1e3e27b61b3cfd22c9679d2c9851c13b3ebc5a1b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-06-01T05:31:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-08T02:38:33.000Z", "avg_line_length": 42.7592592593, "max_line_length": 103, "alphanum_fraction": 0.703118233, "include": true, "reason": "import numpy", "num_tokens": 1788}
[STATEMENT] lemma alw_safe_combined2: "FullSpec s \<Longrightarrow> alw (holds safe_combined2) s" [PROOF STATE] proof (prove) goal (1 subgoal): 1. FullSpec s \<Longrightarrow> alw (holds safe_combined2) s [PROOF STEP] apply (frule exch_alw_InvCapsNonneg) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>FullSpec s; alw (\<lambda>s. cri.InvCapsNonneg (exchange_config (shd s))) s\<rbrakk> \<Longrightarrow> alw (holds safe_combined2) s [PROOF STEP] apply (drule alw_safe_combined) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>alw (\<lambda>s. cri.InvCapsNonneg (exchange_config (shd s))) s; alw (holds safe_combined) s\<rbrakk> \<Longrightarrow> alw (holds safe_combined2) s [PROOF STEP] apply (simp add: alw_iff_sdrop safe_combined_implies_safe_combined2) [PROOF STATE] proof (prove) goal: No subgoals! [PROOF STEP] done
{"llama_tokens": 349, "file": "Progress_Tracking_Combined", "length": 4}
import numpy from numpy import * from math import sqrt def rigid_transform_3D(A, B): assert len(A) == len(B) N = A.shape[0]; # total points centroid_A = mean(A, axis=0) centroid_B = mean(B, axis=0) # centre the points AA = A - tile(centroid_A, (N, 1)) BB = B - tile(centroid_B, (N, 1)) # dot is matrix multiplication for array #print(transpose(AA)) #print(BB) H = numpy.dot(transpose(AA),(BB)) U, S, Vt = linalg.svd(H) R = numpy.dot(Vt.T,U.T) # special reflection case if linalg.det(R) < 0: print "Reflection detected" Vt[2,:] *= -1 R = numpy.dot(Vt.T,U.T) t = numpy.dot(-1*R,centroid_A.T) + centroid_B.T print t return R, t def check(V0,V1,R,T=[0,0,0]): V0 = numpy.array(V0) V1 = numpy.array(V1) v1 = numpy.dot(R,V0.T) #print (tile(T, (20,1))) v1 = v1.T + tile(T, (10,1)) print(v1) print('-----------') print(V1) print('-----------') return numpy.allclose(v1, V1) d = numpy.load("calibration_data_1.npz") observed_pts = d['arr_0'] measured_pts = d['arr_1'] / 1000.0 print(observed_pts) print('----') print(measured_pts) print('----') R, t = rigid_transform_3D(numpy.array(observed_pts), numpy.array(measured_pts)) print(R) print('-----') print(t) print('-----') print(check(observed_pts, measured_pts, R, t))
{"hexsha": "0e533ade4103ecc822f4703bd579e09449858b34", "size": 1362, "ext": "py", "lang": "Python", "max_stars_repo_path": "test.py", "max_stars_repo_name": "madeira-sustcer/ME336-Project2", "max_stars_repo_head_hexsha": "94334569418b4270a326453a000dfd53938d2c82", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test.py", "max_issues_repo_name": "madeira-sustcer/ME336-Project2", "max_issues_repo_head_hexsha": "94334569418b4270a326453a000dfd53938d2c82", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test.py", "max_forks_repo_name": "madeira-sustcer/ME336-Project2", "max_forks_repo_head_hexsha": "94334569418b4270a326453a000dfd53938d2c82", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.9538461538, "max_line_length": 79, "alphanum_fraction": 0.580763583, "include": true, "reason": "import numpy,from numpy", "num_tokens": 445}
function measure_mean!(tuning_run::Run, x::Configuration) configurations = Array{Configuration}(undef, tuning_run.cost_evaluations) fill!(configurations, deepcopy(x)) pmap_cost(x::Configuration) = tuning_run.cost(x, tuning_run.cost_arguments) results = pmap(pmap_cost, configurations) for i = 1:tuning_run.cost_evaluations tuning_run.cost_values[i] = results[i] end mean(results) end function sequential_measure_mean!(tuning_run::Run, x::Configuration) for i = 1:tuning_run.cost_evaluations tuning_run.cost_values[i] = tuning_run.cost(x, tuning_run.cost_arguments) end mean(tuning_run.cost_values) end
{"hexsha": "d9c883a4ee419f510349d8a9dc3247deba8299c9", "size": 662, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/core/measurement/measure.jl", "max_stars_repo_name": "phrb/OPAL.jl", "max_stars_repo_head_hexsha": "5b0edb948892b5e4f3ad9df563c6216d7e550439", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2017-10-17T21:10:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-22T01:54:07.000Z", "max_issues_repo_path": "src/core/measurement/measure.jl", "max_issues_repo_name": "phrb/OPAL.jl", "max_issues_repo_head_hexsha": "5b0edb948892b5e4f3ad9df563c6216d7e550439", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2015-08-12T23:37:45.000Z", "max_issues_repo_issues_event_max_datetime": "2017-04-24T00:37:24.000Z", "max_forks_repo_path": "src/core/measurement/measure.jl", "max_forks_repo_name": "phrb/OPAL.jl", "max_forks_repo_head_hexsha": "5b0edb948892b5e4f3ad9df563c6216d7e550439", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2015-08-14T07:48:49.000Z", "max_forks_repo_forks_event_max_datetime": "2017-05-16T06:09:26.000Z", "avg_line_length": 31.5238095238, "max_line_length": 81, "alphanum_fraction": 0.7432024169, "num_tokens": 160}
#!/usr/bin/env python # ------------------------------------------------------------------------------------------------------% # Created by "Thieu Nguyen" at 07:03, 18/03/2020 % # % # Email: nguyenthieu2102@gmail.com % # Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 % # Github: https://github.com/thieu1995 % #-------------------------------------------------------------------------------------------------------% from numpy import exp, sign, ones, mean, multiply from numpy.random import uniform, randint, normal, random, choice from copy import deepcopy from mealpy.root import Root class BaseEO(Root): """ The original version of: Equilibrium Optimizer (EO) (Equilibrium Optimizer: A Novel Optimization Algorithm) Link: https://doi.org/10.1016/j.knosys.2019.105190 https://www.mathworks.com/matlabcentral/fileexchange/73352-equilibrium-optimizer-eo """ def __init__(self, obj_func=None, lb=None, ub=None, problem_size=50, batch_size=10, verbose=True, epoch=750, pop_size=100): Root.__init__(self, obj_func, lb, ub, problem_size, batch_size, verbose) self.epoch = epoch self.pop_size = pop_size self.V = 1 self.a1 = 2 self.a2 = 1 self.GP = 0.5 def train(self): #c_eq1 = [None, float("inf")] # it is global best position c_eq2 = [None, float("inf")] c_eq3 = [None, float("inf")] c_eq4 = [None, float("inf")] # ---------------- Memory saving------------------- pop = [self.create_solution() for _ in range(self.pop_size)] g_best = self.get_global_best_solution(pop, self.ID_FIT, self.ID_MIN_PROB) c_eq1 = deepcopy(g_best) for epoch in range(0, self.epoch): for i in range(0, self.pop_size): if pop[i][self.ID_FIT] < c_eq1[self.ID_FIT]: c_eq1 = deepcopy(pop[i]) elif c_eq1[self.ID_FIT] < pop[i][self.ID_FIT] < c_eq2[self.ID_FIT]: c_eq2 = deepcopy(pop[i]) elif c_eq1[self.ID_FIT] < pop[i][self.ID_FIT] and c_eq2[self.ID_FIT] < pop[i][self.ID_FIT] < c_eq3[self.ID_FIT]: c_eq3 = deepcopy(pop[i]) elif c_eq1[self.ID_FIT] < pop[i][self.ID_FIT] and c_eq2[self.ID_FIT] < pop[i][self.ID_FIT] and c_eq3[self.ID_FIT] < pop[i][self.ID_FIT] < c_eq4[self.ID_FIT]: c_eq4 = deepcopy(pop[i]) # make equilibrium pool c_eq_ave = (c_eq1[self.ID_POS] + c_eq2[self.ID_POS] + c_eq3[self.ID_POS] + c_eq4[self.ID_POS]) / 4 fit_ave = self.get_fitness_position(c_eq_ave) c_pool = [c_eq1, c_eq2, c_eq3, c_eq4, [c_eq_ave, fit_ave]] # Eq. 9 t = (1 - epoch/self.epoch) ** (self.a2 * epoch / self.epoch) for i in range(0, self.pop_size): lamda = uniform(0, 1, self.problem_size) # lambda in Eq. 11 r = uniform(0, 1, self.problem_size) # r in Eq. 11 c_eq = c_pool[randint(0, len(c_pool))][self.ID_POS] # random selection 1 of candidate from the pool f = self.a1 * sign(r - 0.5) * (exp(-lamda * t) - 1.0) # Eq. 11 r1 = uniform() r2 = uniform() # r1, r2 in Eq. 15 gcp = 0.5 * r1 * ones(self.problem_size) * (r2 >= self.GP) # Eq. 15 g0 = gcp * (c_eq - lamda * pop[i][self.ID_POS]) # Eq. 14 g = g0 * f # Eq. 13 temp = c_eq + (pop[i][self.ID_POS] - c_eq) * f + (g * self.V / lamda) * (1.0 - f) # Eq. 16 fit = self.get_fitness_position(temp) pop[i] = [temp, fit] g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best) self.loss_train.append(g_best[self.ID_FIT]) if self.verbose: print(">Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT])) self.solution = g_best return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train class ModifiedEO(BaseEO): """ Original version of: Modified Equilibrium Optimizer (MEO) (An efficient equilibrium optimizer with mutation strategy for numerical optimization) Link: https://doi.org/10.1016/j.asoc.2020.106542 """ def __init__(self, obj_func=None, lb=None, ub=None, problem_size=50, batch_size=10, verbose=True, epoch=750, pop_size=100): BaseEO.__init__(self, obj_func, lb, ub, problem_size, batch_size, verbose, epoch, pop_size) def _make_equilibrium_pool__(self, list_equilibrium=None): pos_list = [item[self.ID_POS] for item in list_equilibrium] pos_mean = mean(pos_list, axis=0) fit = self.get_fitness_position(pos_mean) list_equilibrium.append([pos_mean, fit]) return list_equilibrium def train(self): # Initialization pop_len = int(self.pop_size/3) pop = [self.create_solution() for _ in range(self.pop_size)] # ---------------- Memory saving------------------- # make equilibrium pool pop_sorted = sorted(pop, key=lambda item: item[self.ID_FIT]) c_eq_list = deepcopy(pop_sorted[:4]) g_best = deepcopy(c_eq_list[0]) c_pool = self._make_equilibrium_pool__(c_eq_list) for epoch in range(0, self.epoch): # Eq. 5 t = (1 - epoch / self.epoch) ** (self.a2 * epoch / self.epoch) for i in range(0, self.pop_size): lamda = uniform(0, 1, self.problem_size) # lambda in Eq. 4 r = uniform(0, 1, self.problem_size) # r in Eq. 6 c_eq = c_pool[randint(0, len(c_pool))][self.ID_POS] # random selection 1 of candidate from the pool f = self.a1 * sign(r - 0.5) * (exp(-lamda * t) - 1.0) # Eq. 4 r1 = uniform() r2 = uniform() gcp = 0.5 * r1 * ones(self.problem_size) * (r2 >= self.GP) # Eq. 10 g0 = gcp * (c_eq - lamda * pop[i][self.ID_POS]) # Eq. 9 g = g0 * f pos_new = c_eq + (pop[i][self.ID_POS] - c_eq) * f + (g * self.V / lamda) * (1.0 - f) # Eq. 2 fit = self.get_fitness_position(pos_new) pop[i] = [pos_new, fit] ## Sort the updated population based on fitness pop_sorted = sorted(pop, key=lambda item: item[self.ID_FIT]) pop_s1 = pop_sorted[:pop_len] pop_s2 = deepcopy(pop_s1) pop_s3 = deepcopy(pop_s1) ## Mutation scheme for i in range(0, pop_len): pos_new = pop_s1[i][self.ID_POS] * (1 + normal(0, 1, self.problem_size)) # Eq. 12 fit = self.get_fitness_position(pos_new) pop_s2[i] = [pos_new, fit] ## Search Mechanism pos_s1_list = [item[self.ID_POS] for item in pop_s1] pos_s1_mean = mean(pos_s1_list, axis=0) for i in range(0, pop_len): pos_new = (c_pool[0][self.ID_POS] - pos_s1_mean) - random() * (self.lb + random() * (self.ub - self.lb)) fit = self.get_fitness_position(pos_new) pop_s3[i] = [pos_new, fit] ## Construct a new population pop = pop_s1 + pop_s2 + pop_s3 temp = self.pop_size - len(pop) idx_selected = choice(range(0, len(c_pool)), temp, replace=False) for i in range(0, temp): pop.append(c_pool[idx_selected[i]]) # Update the equilibrium pool pop_sorted = sorted(pop, key=lambda item: item[self.ID_FIT]) c_eq_list = deepcopy(pop_sorted[:4]) c_pool = self._make_equilibrium_pool__(c_eq_list) if pop_sorted[0][self.ID_FIT] < g_best[self.ID_FIT]: g_best = deepcopy(pop_sorted[0]) self.loss_train.append(g_best[self.ID_FIT]) if self.verbose: print(">Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT])) self.solution = g_best return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train class AdaptiveEO(BaseEO): """ Original version of: Adaptive Equilibrium Optimization (AEO) (A novel interdependence based multilevel thresholding technique using adaptive equilibrium optimizer) Link: https://doi.org/10.1016/j.engappai.2020.103836 """ def __init__(self, obj_func=None, lb=None, ub=None, problem_size=50, batch_size=10, verbose=True, epoch=750, pop_size=100): BaseEO.__init__(self, obj_func, lb, ub, problem_size, batch_size, verbose, epoch, pop_size) def _make_equilibrium_pool__(self, list_equilibrium=None): pos_list = [item[self.ID_POS] for item in list_equilibrium] pos_mean = mean(pos_list, axis=0) fit = self.get_fitness_position(pos_mean) list_equilibrium.append([pos_mean, fit]) return list_equilibrium def train(self): # Initialization pop_len = int(self.pop_size / 3) pop_new = [self.create_solution() for _ in range(self.pop_size)] # ---------------- Memory saving------------------- # make equilibrium pool pop_sorted = sorted(pop_new, key=lambda item: item[self.ID_FIT]) c_eq_list = deepcopy(pop_sorted[:4]) g_best = deepcopy(c_eq_list[0]) c_pool = self._make_equilibrium_pool__(c_eq_list) pop = deepcopy(pop_new) for epoch in range(0, self.epoch): ## Memory saving, Eq 20, 21 if epoch != 0: for i in range(0, self.pop_size): if pop_new[i][self.ID_FIT] > pop[i][self.ID_FIT]: pop_new[i] = deepcopy(pop[i]) pop = deepcopy(pop_new) t = (1 - epoch / self.epoch) ** (self.a2 * epoch / self.epoch) for i in range(0, self.pop_size): lamda = uniform(0, 1, self.problem_size) r = uniform(0, 1, self.problem_size) c_eq = c_pool[randint(0, len(c_pool))][self.ID_POS] # random selection 1 of candidate from the pool f = self.a1 * sign(r - 0.5) * (exp(-lamda * t) - 1.0) # Eq. 14 r1 = uniform() r2 = uniform() gcp = 0.5 * r1 * ones(self.problem_size) * (r2 >= self.GP) g0 = gcp * (c_eq - lamda * pop[i][self.ID_POS]) g = g0 * f fit_average = mean([item[self.ID_FIT] for item in pop_new]) # Eq. 19 pos_new = c_eq + (pop_new[i][self.ID_POS] - c_eq) * f + (g * self.V / lamda) * (1.0 - f) # Eq. 9 if pop_new[i][self.ID_FIT] >= fit_average: pos_new = multiply(pos_new, (0.5 + uniform(0, 1, self.problem_size))) fit = self.get_fitness_position(pos_new) pop_new[i] = [pos_new, fit] # Update the equilibrium pool pop_sorted = sorted(pop_new, key=lambda item: item[self.ID_FIT]) c_eq_list = deepcopy(pop_sorted[:4]) c_pool = self._make_equilibrium_pool__(c_eq_list) if pop_sorted[0][self.ID_FIT] < g_best[self.ID_FIT]: g_best = deepcopy(pop_sorted[0]) self.loss_train.append(g_best[self.ID_FIT]) if self.verbose: print(">Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT])) self.solution = g_best return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train class LevyEO(BaseEO): """ My modified version of: Equilibrium Optimizer (EO) """ def __init__(self, obj_func=None, lb=None, ub=None, problem_size=50, batch_size=10, verbose=True, epoch=750, pop_size=100): BaseEO.__init__(self, obj_func, lb, ub, problem_size, batch_size, verbose, epoch, pop_size) def _make_equilibrium_pool__(self, list_equilibrium=None): pos_list = [item[self.ID_POS] for item in list_equilibrium] pos_mean = mean(pos_list, axis=0) fit = self.get_fitness_position(pos_mean) list_equilibrium.append([pos_mean, fit]) return list_equilibrium def train(self): # Initialization pop = [self.create_solution() for _ in range(self.pop_size)] # ---------------- Memory saving------------------- # make equilibrium pool pop_sorted = sorted(pop, key=lambda item: item[self.ID_FIT]) c_eq_list = deepcopy(pop_sorted[:4]) g_best = deepcopy(c_eq_list[0]) c_pool = self._make_equilibrium_pool__(c_eq_list) for epoch in range(0, self.epoch): # Eq. 9 t = (1 - epoch / self.epoch) ** (self.a2 * epoch / self.epoch) for i in range(0, self.pop_size): if uniform() < 0.5: lamda = uniform(0, 1, self.problem_size) # lambda in Eq. 11 r = uniform(0, 1, self.problem_size) # r in Eq. 11 c_eq = c_pool[randint(0, len(c_pool))][self.ID_POS] # random selection 1 of candidate from the pool f = self.a1 * sign(r - 0.5) * (exp(-lamda * t) - 1.0) # Eq. 11 r1 = uniform() r2 = uniform() # r1, r2 in Eq. 15 gcp = 0.5 * r1 * ones(self.problem_size) * (r2 >= self.GP) # Eq. 15 g0 = gcp * (c_eq - lamda * pop[i][self.ID_POS]) # Eq. 14 g = g0 * f # Eq. 13 temp = c_eq + (pop[i][self.ID_POS] - c_eq) * f + (g * self.V / lamda) * (1.0 - f) # Eq. 16 else: ## Idea: Sometimes, an unpredictable event happens, It make the status of equilibrium change. temp = self.levy_flight(epoch, pop[i][self.ID_POS], g_best[self.ID_POS]) fit = self.get_fitness_position(temp) pop[i] = [temp, fit] # Update the equilibrium pool pop_sorted = deepcopy(pop) pop_sorted = pop_sorted + c_pool pop_sorted = sorted(pop_sorted, key=lambda item: item[self.ID_FIT]) c_eq_list = deepcopy(pop_sorted[:4]) c_pool = self._make_equilibrium_pool__(c_eq_list) if pop_sorted[0][self.ID_FIT] < g_best[self.ID_FIT]: g_best = deepcopy(pop_sorted[0]) self.loss_train.append(g_best[self.ID_FIT]) if self.verbose: print(">Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT])) self.solution = g_best return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
{"hexsha": "fbc48d6082fe62de07aa859f4dee50e2ce00ec99", "size": 15197, "ext": "py", "lang": "Python", "max_stars_repo_path": "mealpy/physics_based/EO.py", "max_stars_repo_name": "chenyuxiang0425/mealpy", "max_stars_repo_head_hexsha": "69e8dc727e15527e31ac5ace1debe92a0bc7d828", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-20T06:53:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-20T06:53:08.000Z", "max_issues_repo_path": "mealpy/physics_based/EO.py", "max_issues_repo_name": "chenyuxiang0425/mealpy", "max_issues_repo_head_hexsha": "69e8dc727e15527e31ac5ace1debe92a0bc7d828", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mealpy/physics_based/EO.py", "max_forks_repo_name": "chenyuxiang0425/mealpy", "max_forks_repo_head_hexsha": "69e8dc727e15527e31ac5ace1debe92a0bc7d828", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.552715655, "max_line_length": 173, "alphanum_fraction": 0.5348424031, "include": true, "reason": "from numpy", "num_tokens": 3908}
import cv2 as cv import numpy as np # 载入手写数字图片 img = cv.imread('handwriting.jpg', 0) # 将图像二值化 _, thresh = cv.threshold(img, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU) contours, hierarchy = cv.findContours(thresh, 3, 2) # 创建出两幅彩色图用于绘制 img_color1 = cv.cvtColor(img, cv.COLOR_GRAY2BGR) img_color2 = np.copy(img_color1) # 创建一幅彩色图像用作结果对比 img_color = np.copy(img_color1) # 计算数字1的轮廓特征 cnt = contours[1] cv.drawContours(img_color1, [cnt], 0, (0, 0, 255), 2) # 1.轮廓面积 area = cv.contourArea(cnt) # 6289.5 print(area) # 2.轮廓周长 perimeter = cv.arcLength(cnt, True) # 527.4041 print(perimeter) # 3.图像矩 M = cv.moments(cnt) print(M) print(M['m00']) # 轮廓面积 cx, cy = M['m10'] / M['m00'], M['m01'] / M['m00'] # 轮廓质心 print(cx, cy) # 4.图像外接矩形和最小外接矩形 x, y, w, h = cv.boundingRect(cnt) # 外接矩形 cv.rectangle(img_color1, (x, y), (x + w, y + h), (0, 255, 0), 2) rect = cv.minAreaRect(cnt) # 最小外接矩形 box = np.int0(cv.boxPoints(rect)) # 矩形的四个角点并取整 cv.drawContours(img_color1, [box], 0, (255, 0, 0), 2) # 5.最小外接圆 (x, y), radius = cv.minEnclosingCircle(cnt) (x, y, radius) = map(int, (x, y, radius)) # 这也是取整的一种方式噢 cv.circle(img_color2, (x, y), radius, (0, 0, 255), 2) # 6.拟合椭圆 ellipse = cv.fitEllipse(cnt) cv.ellipse(img_color2, ellipse, (0, 255, 0), 2) result = np.hstack((img_color1,img_color2)) cv.imshow('result',result) cv.waitKey(0) cv.destroyAllWindows()
{"hexsha": "7732da08c88a07847729007c2b4f425509b02273", "size": 1353, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/cv_tools.py", "max_stars_repo_name": "Monologuethl/yolov4-pytorch-16bit", "max_stars_repo_head_hexsha": "4411081a875ee779f9b7808dc922658a549dff8c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-08-13T01:33:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-05T09:36:09.000Z", "max_issues_repo_path": "utils/cv_tools.py", "max_issues_repo_name": "Monologuethl/yolov4-pytorch-16bit", "max_issues_repo_head_hexsha": "4411081a875ee779f9b7808dc922658a549dff8c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/cv_tools.py", "max_forks_repo_name": "Monologuethl/yolov4-pytorch-16bit", "max_forks_repo_head_hexsha": "4411081a875ee779f9b7808dc922658a549dff8c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-13T01:33:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-13T01:33:42.000Z", "avg_line_length": 24.1607142857, "max_line_length": 76, "alphanum_fraction": 0.6703621582, "include": true, "reason": "import numpy", "num_tokens": 638}
"""Work with a collection of moles.""" import argparse import json import math import pathlib import uuid import numpy import mel.lib.fs import mel.lib.image import mel.lib.math import mel.rotomap.mask KEY_IS_CONFIRMED = "is_uuid_canonical" KEY_IS_UNCHANGED = "is_unchanged" IGNORE_NEW_FILENAME = "ignore-new" IGNORE_MISSING_FILENAME = "ignore-missing" IGNORE_CHANGED_FILENAME = "ignore-changed" ROTOMAP_DIR_LESIONS_FILENAME = "lesions.json" class RotomapDirectory: """RotomapFrame-s for all images in a single rotomap dir.""" def __init__(self, path): self.path = pathlib.Path(path) if not self.path.is_dir(): raise ValueError( '"{}" is not a directory, so not a rotomap.'.format(self.path) ) self.image_paths = [ str(f) for f in self.path.iterdir() if mel.lib.fs.is_jpeg_name(f) ] self.image_paths.sort() self.lesions = load_rotomap_dir_lesions_file(self.path) if not self.image_paths: raise ValueError( '"{}" has no images, so not a rotomap.'.format(self.path) ) def yield_mole_lists(self): """Yield (image_path, mole_list) for all mole image files.""" for imagepath in self.image_paths: yield imagepath, load_image_moles(imagepath) def yield_frames(self, *, extra_stem=None): for imagepath in self.image_paths: yield RotomapFrame(imagepath, extra_stem=extra_stem) def calc_uuids(self): return { uuid_ for frame in self.yield_frames() for uuid_ in frame.moledata.uuids } def __repr__(self): return f"RotomapDirectory({self.path!r})" class RotomapFrame: """Image and mole data for a single image in a rotomap.""" def __init__(self, path, *, extra_stem=None): self.path = pathlib.Path(path) if self.path.is_dir(): raise ValueError(f"Expected file, not directory: {path}") if not self.path.exists(): raise ValueError(f"Path does not exist: {path}") if not mel.lib.fs.is_jpeg_name(self.path): raise ValueError(f"Unrecognised suffix for rotomap frame: {path}") self.moles = load_image_moles(self.path, extra_stem=extra_stem) self.moledata = MoleData(self.moles) self.metadata = load_image_metadata(self.path) def load_image(self): return mel.lib.image.load_image(self.path) # def load_mask(self): # return mel.rotomap.mask.load_or_none(self.path) def has_mole_file(self): return pathlib.Path(str(self.path) + ".json").exists() def has_mask(self): return mel.rotomap.mask.has_mask(self.path) def __repr__(self): return f"RotomapFrame({self.path!r})" class MoleData: """Iterables of UUIDs, locations, and other data on moles in an image.""" def __init__(self, mole_iter): self.moles = tuple(mole_iter) self.uuids = frozenset(m["uuid"] for m in self.moles) self.uuid_points = to_uuid_points(self.moles) self.uuid_points_list = [ (m["uuid"], mole_to_point(m)) for m in self.moles ] # vulture will report this as unused unless we do this # # pylint: disable=pointless-statement self.uuid_points_list # pylint: enable=pointless-statement # self.canonical_uuids = frozenset( # m["uuid"] for m in self.moles if m[KEY_IS_CONFIRMED] # ) # self.uuid_moles = {m['uuid']: m for m in self.moles} def make_argparse_rotomap_directory(path): """Use in the 'type=' parameter to add_argument().""" try: return RotomapDirectory(path) except ValueError as e: raise argparse.ArgumentTypeError(str(e)) def make_argparse_image_moles(path): """Use in the 'type=' parameter to add_argument().""" try: path = pathlib.Path(path) if not path.exists(): raise ValueError(f"'{path}' does not exist.") if path.is_file(): yield path, load_image_moles(path) else: yield from RotomapDirectory(path).yield_mole_lists() except ValueError as e: raise argparse.ArgumentTypeError(str(e)) def make_argparse_image_moles_tree(path): """Use in the 'type=' parameter to add_argument().""" path = pathlib.Path(path) if path.is_dir(): for item in sorted(path.iterdir()): if item.is_dir(): yield from make_argparse_image_moles_tree(item) elif mel.lib.fs.is_jpeg_name(item): yield from make_argparse_image_moles(item) else: yield from make_argparse_image_moles(path) class MoleListDiff: def __init__(self, old_uuids, new_uuids, ignore_new, ignore_missing): self.new = (new_uuids - old_uuids) - ignore_new self.missing = (old_uuids - new_uuids) - ignore_missing # self.matching = old_uuids & new_uuids # self.ignored_new = (new_uuids - old_uuids) & ignore_new # self.ignored_missing = (old_uuids - new_uuids) & ignore_missing # self.would_ignore_new = ignore_new - (new_uuids - old_uuids) # self.would_ignore_missing = ignore_missing - (old_uuids - new_uuids) def normalised_ellipse_mask(ellipse): """Return a normalized copy of the supplied ellipse. Here 'normalised' means that the rotation is as close to zero as possible. Examples: >>> normalised_ellipse_mask( ... ((1, 2), (100, 200), 90) ... ) ((1, 2), (200, 100), 0) """ # Don't overwrite the original, we'll return a new ellipse. centre, extents, rotation = ellipse centre = list(centre[:]) extents = list(extents[:]) # Get the rotation as close to zero as possible. while rotation > 45: extents[0], extents[1] = extents[1], extents[0] rotation -= 90 while rotation < -45: extents[0], extents[1] = extents[1], extents[0] rotation += 90 return tuple(centre), tuple(extents), rotation def validate_ellipse_mask(ellipse, max_x=10000, max_y=10000): max_length = max(max_x, max_y) * 2 if ellipse[1][0] < 1 or ellipse[1][1] < 1: raise ValueError(f"Ellipse too small: {ellipse}") elif ellipse[1][0] > max_length or ellipse[1][1] > max_length: raise ValueError(f"Ellipse too big: {ellipse}") elif ellipse[0][0] < 0 or ellipse[0][1] < 0: raise ValueError(f"Ellipse out of bounds: {ellipse}") elif ellipse[0][0] > max_x or ellipse[0][1] > max_y: raise ValueError(f"Ellipse out of bounds: {ellipse}") def load_image_metadata(image_path): metadata_path = pathlib.Path(str(image_path) + ".meta.json") metadata = {} if metadata_path.exists(): metadata = load_json(metadata_path) if "ellipse" in metadata: try: validate_ellipse_mask(metadata["ellipse"]) except ValueError as e: raise ValueError(f"Bad data from '{metadata_path}'.") from e return metadata def load_rotomap_dir_lesions_file(rotomap_dir_path): rotomap_dir_path = pathlib.Path(rotomap_dir_path) if not rotomap_dir_path.exists(): raise ValueError( f"Rotomap directory does not exist: '{rotomap_dir_path}'." ) lesions_path = rotomap_dir_path / ROTOMAP_DIR_LESIONS_FILENAME lesions = [] if lesions_path.exists(): lesions = load_json(lesions_path) for m in lesions: if KEY_IS_UNCHANGED not in m: raise Exception( f"Mole must have {KEY_IS_UNCHANGED} status: {lesions_path} {m}" ) for m in lesions: if m["uuid"] is None: raise Exception(f"Lesion UUID cannot be None: {lesions_path} {m}") return lesions def save_rotomap_dir_lesions_file(rotomap_dir_path, lesions): rotomap_dir_path = pathlib.Path(rotomap_dir_path) if not rotomap_dir_path.exists(): raise ValueError( f"Rotomap directory does not exist: '{rotomap_dir_path}'." ) lesions_path = rotomap_dir_path / ROTOMAP_DIR_LESIONS_FILENAME save_json(lesions_path, lesions) def load_image_moles(image_path, *, extra_stem=None): if not pathlib.Path(image_path).exists(): raise ValueError(f"Mole image does not exist: '{image_path}'.") suffix = ".json" if extra_stem is not None: suffix = f".{extra_stem}.json" moles_path = pathlib.Path(str(image_path) + suffix) moles = [] if moles_path.exists(): moles = load_json(moles_path) for m in moles: if KEY_IS_CONFIRMED not in m: raise Exception( f"Mole must have {KEY_IS_CONFIRMED} status: {moles_path} {m}" ) for m in moles: m["x"] = int(m["x"]) m["y"] = int(m["y"]) for m in moles: if m["uuid"] is None: raise Exception(f"Mole UUID cannot be None: {moles_path} {m}") return moles def normalise_moles(moles): for m in moles: m["x"] = int(m["x"]) m["y"] = int(m["y"]) def save_image_metadata(metadata, image_path): meta_path = image_path + ".meta.json" save_json(meta_path, metadata) def save_image_moles(moles, image_path, *, extra_stem=None): # Explicitly convert 'image_path' to str. It might be a pathlib.Path, which # doesn't support addition in this way. moles_path = str(image_path) if extra_stem: moles_path += f".{extra_stem}" moles_path += ".json" save_json(moles_path, moles) def load_json(path): with open(path) as f: return json.load(f) def save_json(path, data): with open(path, "w") as f: json.dump(data, f, indent=4, separators=(",", ": "), sort_keys=True) # There's no newline after dump(), add one here for happier viewing print(file=f) def make_new_uuid(): return uuid.uuid4().hex def add_mole(moles, x, y, mole_uuid=None): is_uuid_canonical = True if mole_uuid is None: mole_uuid = make_new_uuid() is_uuid_canonical = False moles.append( { "x": x, "y": y, "uuid": mole_uuid, KEY_IS_CONFIRMED: is_uuid_canonical, } ) def nearest_mole_index(moles, x, y): return nearest_mole_index_distance(moles, x, y)[0] def nearest_mole_index_distance(moles, x, y): nearest_index = None nearest_distance = None for i, mole in enumerate(moles): dx = x - mole["x"] dy = y - mole["y"] distance = math.sqrt(dx * dx + dy * dy) if nearest_distance is None or distance < nearest_distance: nearest_index = i nearest_distance = distance return nearest_index, nearest_distance def uuid_mole_index(moles, mole_uuid): """Return the index of the first mole with the specified uuid.""" for i, mole in enumerate(moles): if mole["uuid"] == mole_uuid: return i return None def set_nearest_mole_uuid(moles, x, y, mole_uuid, is_canonical=True): nearest_index = nearest_mole_index(moles, x, y) if nearest_index is not None: moles[nearest_index]["uuid"] = mole_uuid moles[nearest_index][KEY_IS_CONFIRMED] = is_canonical def get_nearest_mole_uuid(moles, x, y): nearest_index = nearest_mole_index(moles, x, y) if nearest_index is not None: return moles[nearest_index]["uuid"] return None def move_nearest_mole(moles, x, y): nearest_index = nearest_mole_index(moles, x, y) if nearest_index is not None: moles[nearest_index]["x"] = x moles[nearest_index]["y"] = y def remove_nearest_mole(moles, x, y): nearest_index = nearest_mole_index(moles, x, y) if nearest_index is not None: del moles[nearest_index] def mole_list_to_pointvec(mole_list): return numpy.array(tuple((m["x"], m["y"]) for m in mole_list)) def mole_to_point(mole): pos = numpy.array((mole["x"], mole["y"])) mel.lib.math.raise_if_not_int_vector2(pos) return pos def to_uuid_points(moles): uuid_points = {} for m in moles: uuid_points[m["uuid"]] = mole_to_point(m) return uuid_points def load_potential_set_file(path, filename): ignore_set = set() file_path = path / filename if file_path.is_file(): with file_path.open() as f: lines = f.read().splitlines() for text in lines: text = text.strip() if text and not text.startswith("#"): ignore_set.add(text) return ignore_set # ----------------------------------------------------------------------------- # Copyright (C) 2016-2019 Angelos Evripiotis. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------ END-OF-FILE ----------------------------------
{"hexsha": "7a0b5bb0de522ec1628b53070e0612752a3a9f91", "size": 13361, "ext": "py", "lang": "Python", "max_stars_repo_path": "mel/rotomap/moles.py", "max_stars_repo_name": "aevri/mel", "max_stars_repo_head_hexsha": "4451b272ddc2095f757423ff519f36fc57708ec6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2015-04-20T12:27:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-12T21:01:39.000Z", "max_issues_repo_path": "mel/rotomap/moles.py", "max_issues_repo_name": "aevri/mel", "max_issues_repo_head_hexsha": "4451b272ddc2095f757423ff519f36fc57708ec6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 27, "max_issues_repo_issues_event_min_datetime": "2016-06-20T15:02:03.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T06:23:01.000Z", "max_forks_repo_path": "mel/rotomap/moles.py", "max_forks_repo_name": "aevri/mel", "max_forks_repo_head_hexsha": "4451b272ddc2095f757423ff519f36fc57708ec6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2015-12-27T12:33:34.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-23T04:05:47.000Z", "avg_line_length": 29.7572383073, "max_line_length": 79, "alphanum_fraction": 0.6316144001, "include": true, "reason": "import numpy", "num_tokens": 3309}
import os import math import pickle import mxnet as mx import matplotlib.pyplot as plt from mxnet.gluon.data.vision import transforms from .utils import * from ..base.base_predictor import BasePredictor from ...utils import save, load, tqdm import warnings import logging import numpy as np from mxnet.gluon import nn from mxnet import gluon, init, autograd, nd from mxnet.gluon.data.vision import transforms import gluoncv as gcv from gluoncv.model_zoo import get_model from gluoncv import utils as gutils from gluoncv.data.batchify import Tuple, Stack, Pad from gluoncv.data.transforms.presets.yolo import YOLO3DefaultTrainTransform from gluoncv.data.transforms.presets.yolo import YOLO3DefaultValTransform from gluoncv.data.dataloader import RandomTransformDataLoader from gluoncv.utils.metrics.voc_detection import VOC07MApMetric from gluoncv.utils.metrics.coco_detection import COCODetectionMetric from gluoncv.utils import LRScheduler, LRSequential from gluoncv.data.transforms import presets import gluoncv as gcv class Detector(BasePredictor): """ Classifier returned by task.fit() Example user workflow: """ def __init__(self, model, results, scheduler_checkpoint, args, **kwargs): self.model = model self.results = self._format_results(results) self.scheduler_checkpoint = scheduler_checkpoint self.args = args def evaluate(self, dataset, ctx=[mx.cpu()]): """The task evaluation function given the test dataset. Args: dataset: test dataset Example: >>> from autogluon import ImageClassification as task >>> dataset = task.Dataset(name='shopeeiet', test_path='~/data/test') >>> test_reward = classifier.evaluate(dataset) """ args = self.args net = self.model batch_size = args.batch_size * max(len(ctx), 1) def _get_dataloader(net, test_dataset, data_shape, batch_size, num_workers, args): """Get dataloader.""" width, height = data_shape, data_shape val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1)) test_loader = gluon.data.DataLoader( test_dataset.transform(YOLO3DefaultValTransform(width, height)), batch_size, False, batchify_fn=val_batchify_fn, last_batch='keep', num_workers=num_workers) return test_loader def _validate(net, val_data, ctx, eval_metric): """Test on validation dataset.""" eval_metric.reset() # set nms threshold and topk constraint net.set_nms(nms_thresh=0.45, nms_topk=400) mx.nd.waitall() net.hybridize() for batch in val_data: data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False) label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False) det_bboxes = [] det_ids = [] det_scores = [] gt_bboxes = [] gt_ids = [] gt_difficults = [] for x, y in zip(data, label): # get prediction results ids, scores, bboxes = net(x) det_ids.append(ids) det_scores.append(scores) # clip to image size det_bboxes.append(bboxes.clip(0, batch[0].shape[2])) # split ground truths gt_ids.append(y.slice_axis(axis=-1, begin=4, end=5)) gt_bboxes.append(y.slice_axis(axis=-1, begin=0, end=4)) gt_difficults.append(y.slice_axis(axis=-1, begin=5, end=6) if y.shape[-1] > 5 else None) # update metric eval_metric.update(det_bboxes, det_ids, det_scores, gt_bboxes, gt_ids, gt_difficults) return eval_metric.get() if isinstance(dataset, AutoGluonObject): dataset = dataset.init() test_dataset, eval_metric = dataset.get_dataset_and_metric() test_data = _get_dataloader(net, test_dataset, args.data_shape, args.batch_size, args.num_workers, args) return _validate(net, test_data, ctx, eval_metric) @staticmethod def _format_results(results): def _merge_scheduler_history(training_history, config_history, reward_attr): trial_info = {} for tid, config in config_history.items(): trial_info[tid] = {} trial_info[tid]['config'] = config if tid in training_history: trial_info[tid]['history'] = training_history[tid] trial_info[tid]['metadata'] = {} if len(training_history[tid]) > 0 and reward_attr in training_history[tid][-1]: last_history = training_history[tid][-1] trial_info[tid][reward_attr] = last_history.pop(reward_attr) trial_info[tid]['metadata'].update(last_history) return trial_info training_history = results.pop('training_history') config_history = results.pop('config_history') results['trial_info'] = _merge_scheduler_history(training_history, config_history, results['reward_attr']) results[results['reward_attr']] = results.pop('best_reward') results['search_space'] = results['metadata'].pop('search_space') results['search_strategy'] = results['metadata'].pop('search_strategy') return results def predict(self, X, input_size=224, thresh=0.15, plot=True): net = self.model net.set_nms(0.45, 200) net.collect_params().reset_ctx(ctx = mx.cpu()) x, img = presets.yolo.load_test(X, short=512) ids, scores, bboxes = [xx[0].asnumpy() for xx in net(x)] if plot: ax = gcv.utils.viz.plot_bbox(img, bboxes, scores, ids, thresh=thresh, class_names=net.classes, ax=None) plt.show() return ids, scores, bboxes def load(cls, checkpoint): raise NotImplemented def save(self, checkpoint): raise NotImplemented def predict_proba(self, X): raise NotImplemented def _save_model(self, *args, **kwargs): raise NotImplemented def evaluate_predictions(self, y_true, y_pred): raise NotImplemented
{"hexsha": "c8b265de4368b923a9f9f7730481f938f8c272e5", "size": 6574, "ext": "py", "lang": "Python", "max_stars_repo_path": "autogluon/task/object_detection/detector.py", "max_stars_repo_name": "awesome-archive/autogluon", "max_stars_repo_head_hexsha": "2fa500f1359e1909cfb5eac8c6ecdea6d4e2e9aa", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-14T21:28:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-17T09:52:41.000Z", "max_issues_repo_path": "autogluon/task/object_detection/detector.py", "max_issues_repo_name": "CharlotteSean/autogluon", "max_issues_repo_head_hexsha": "58c51d4fd5c8abe3db5c509ac94064111cf1198d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "autogluon/task/object_detection/detector.py", "max_forks_repo_name": "CharlotteSean/autogluon", "max_forks_repo_head_hexsha": "58c51d4fd5c8abe3db5c509ac94064111cf1198d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-14T21:28:55.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-14T21:28:55.000Z", "avg_line_length": 39.3652694611, "max_line_length": 112, "alphanum_fraction": 0.6171280803, "include": true, "reason": "import numpy", "num_tokens": 1416}
import tqdm import matplotlib.pyplot as plt from tensorboardX import SummaryWriter from torch.utils.data import DataLoader from object_dataset import DatasetObjects import numpy as np labels_to_object = { 0: 'GoodGoal', 1: 'BadGoal', 2: 'GoodGoalMulti', 3: 'Wall', 4: 'Ramp', 5: 'CylinderTunnel', 6: 'WallTransparent', 7: 'CylinderTunnelTransparent', 8: 'Cardbox1', 9: 'Cardbox2', 10: 'UObject', 11: 'LObject', 12: 'LObject2', 13: 'DeathZone', 14: 'HotZone', 15: 'lol' } def log_plots(log_dir, train_data, test_data, samples=100): # Define logger writer = SummaryWriter(log_dir, flush_secs=5) dataset_train = DatasetObjects(train_data) # Define dataloader dataloader_parameters = { "num_workers": 0, "shuffle": True, "pin_memory": True, "batch_size": 1, "drop_last": True } dataloader_train = DataLoader(dataset_train, **dataloader_parameters) t = tqdm.tqdm(dataloader_train) for idx, data in enumerate(t): if idx == samples: break obs, label = data obs = obs[0, :, :, :].permute(1, 2, 0).numpy() label = label[0].numpy() fig = plt.figure() ax1 = plt.axes() plt.tick_params( axis='both', which='both', bottom=False, top=False, left=False, labelbottom=False, labelleft=False) ax1.imshow(obs / 255.) writer.add_figure( 'plots/{}_{}'.format(labels_to_object[label[0]], idx), fig, idx) if __name__ == "__main__": import os import argparse parser = argparse.ArgumentParser(description='RL') parser.add_argument('--data-dir', help='Data directory') parser.add_argument('--log-dir', help='Target log directory') args = parser.parse_args() if not os.path.isdir(args.log_dir): os.mkdir(args.log_dir) args.log_dir += "/plots/" if not os.path.isdir(args.log_dir): os.mkdir(args.log_dir) log_plots( log_dir=args.log_dir, train_data=args.data_dir + "/train_object_data.npz", test_data=args.data_dir + "/test_object_data.npz", )
{"hexsha": "7b57bf1aceef87e5e4061910362b80167951f621", "size": 2324, "ext": "py", "lang": "Python", "max_stars_repo_path": "main/object_detection_module/object_visualize_data.py", "max_stars_repo_name": "compsciencelab/ppo_D", "max_stars_repo_head_hexsha": "1870c908f498ceb29295e5625ff5598bed82cbb3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-08-18T07:47:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-06T17:27:21.000Z", "max_issues_repo_path": "main/object_detection_module/object_visualize_data.py", "max_issues_repo_name": "compsciencelab/ppo_D", "max_issues_repo_head_hexsha": "1870c908f498ceb29295e5625ff5598bed82cbb3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main/object_detection_module/object_visualize_data.py", "max_forks_repo_name": "compsciencelab/ppo_D", "max_forks_repo_head_hexsha": "1870c908f498ceb29295e5625ff5598bed82cbb3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-16T11:03:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-16T11:03:12.000Z", "avg_line_length": 23.7142857143, "max_line_length": 80, "alphanum_fraction": 0.57616179, "include": true, "reason": "import numpy", "num_tokens": 581}
import obspy import numpy as np class Dataset(object): """ Seismic data container Basically, a list of obspy streams. Each stream corresponds to a single seismic station and holds all the components recorded at that station. Methods that help with data processing and metadata extraction are also provided. Each supported file format will have a corresponding reader utility that creates an MTUQ Dataset from files stored in that format. For an example, see mtuq.dataset.sac.reader """ def __init__(self, streams=None, id=None): # typically the id is the event name, event origin time, or some other # attribute shared by all streams self.id = id self.__list__ = [] if not streams: # if nothing given return an empty container, streams can be added # later on return for stream in streams: self.__add__(stream) # the next two methods can be used to apply signal processing operations or # other functions to the dataset def apply(self, function, *args, **kwargs): """ Applies a function in-place to each Stream in the dataset. Similar to the behavior of the python built-in "apply". """ processed = self.__class__(id=self.id) for stream in self.__list__: processed += function(stream, *args, **kwargs) return processed def map(self, function, *sequences): """ Applies a function in-pace to each Stream in the dataset. If one or more optional sequences are given, the function is called with an argument list consisting of the corresponding item of each sequence. Similar to the behavior of the python built-in "map". """ processed = self.__class__(id=self.id) for _i, stream in enumerate(self.__list__): args = [sequence[_i] for sequence in sequences] processed += function(stream, *args) return processed # min/max amplitude def min(self): min_all = np.inf for stream in self: for trace in stream: weight = getattr(trace, 'weight', 1.) if not weight: continue if trace.data.min() < min_all: min_all = trace.data.min() return min_all def max(self): max_all = -np.inf for stream in self: for trace in stream: weight = getattr(trace, 'weight', 1.) if not weight: continue if trace.data.max() > max_all: max_all = trace.data.max() return max_all # various sorting methods def sort_by_distance(self, reverse=False): """ Sorts in-place by hypocentral distance """ self.sort_by_function(lambda data: data.meta.catalog_distance, reverse=reverse) def sort_by_azimuth(self, reverse=False): """ Sorts in-place by source-receiver azimuth """ self.sort_by_function(lambda data: data.meta.catalog_azimuth, reverse=reverse) def sort_by_function(self, function, reverse=False): """ Sorts in-place using the python built-in "sort" """ self.__list__.sort(key=function, reverse=reverse) # because the way metadata are organized in obspy streams depends on file # format, the next two methods are deferred to the subclass def get_origin(self): """ Extracts origin information from metadata """ raise NotImplementedError("Must be implemented by subclass") def get_station(self): """ Extracts station metadata """ raise NotImplementedError("Must be implemented by subclass") # tags can be used to store custom metadata (e.g. not already returned by # dataset.get_station or dataset.get_origin) or support other customized # uses def add_tag(self, tag): for stream in self: stream.tags.append(tag) def remove_tag(self, tag): for stream in self: stream.tags.remove(tag) # the next method is called repeatedly during Dataset creation def __add__(self, stream): assert hasattr(stream, 'id') assert isinstance(stream, obspy.Stream) stream.tags = [] self.__list__.append(stream) try: stream.meta = self.get_station() stream.catalog_origin = self.get_origin() except: pass return self def remove(self, id): index = self._get_index(id) self.__list__.pop(index) # the remaining methods deal with indexing and iteration over the dataset def _get_index(self, id): for index, stream in enumerate(self.__list__): if id==stream.id: return index def __iter__(self): return self.__list__.__iter__() def __getitem__(self, index): return self.__list__[index] def __setitem__(self, index, value): self.__list__[index] = value def __len__(self): return len(self.__list__)
{"hexsha": "4c1dd542f0f7544fa9518443ae80ca47d9e37269", "size": 5274, "ext": "py", "lang": "Python", "max_stars_repo_path": "mtuq/dataset/base.py", "max_stars_repo_name": "junxie01/mtuq", "max_stars_repo_head_hexsha": "6adf9e983c221e788daf1dec26d028bc406ce4e6", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-28T18:06:40.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-28T18:06:40.000Z", "max_issues_repo_path": "mtuq/dataset/base.py", "max_issues_repo_name": "junxie01/mtuq", "max_issues_repo_head_hexsha": "6adf9e983c221e788daf1dec26d028bc406ce4e6", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mtuq/dataset/base.py", "max_forks_repo_name": "junxie01/mtuq", "max_forks_repo_head_hexsha": "6adf9e983c221e788daf1dec26d028bc406ce4e6", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.978021978, "max_line_length": 79, "alphanum_fraction": 0.6052332196, "include": true, "reason": "import numpy", "num_tokens": 1093}
""" Implements the agent and game classes for the Sharing Game. Each class inherit from the general agent and game class, respectively. """ import numpy as np from opinet import Agent, Game class SharingAgent(Agent): """ Describes a agent in the Sharing Game """ def __init__(self, init_stances, alphas, betas, gammas, strategies, T, stance_mult=None, diff_mult=None): Agent.__init__(self, init_stances, alphas, betas, gammas, T) # record strateiges strategies_map = { 'truthful': self.get_truthful_actions, 'extreme': self.get_extreme_actions, 'linear': self.get_linear_actions } self.get_actions = strategies_map[strategies] # for linear strategies, uniformly initialize sharing traits self.stance_mult = stance_mult self.diff_mult = diff_mult def get_truthful_actions(self, G, t, prev_actions=None): """ Agents share current stances. """ actions = self.stances[t] return actions def get_extreme_actions(self, G, t, prev_actions=None): """ Agents share -1 if stance < 0 and +1 if stance > 0 and 0 is stance = 0 """ stances = self.stances[t] actions = np.empty((self.n)) actions[stances < 0] = -1 actions[stances > 0] = 1 actions[stances == 0] = 0 return actions def get_linear_actions(self, G, t, prev_actions): """ s = diff_mult * (average stance difference) + stance_mult * current stance """ diffs = abs(self.get_stances_diffs(t)) diffs[G != 1] = np.NAN avg_diffs = np.nanmean(diffs, axis=1) actions = (self.stances[t] * self.stance_mult + avg_diffs * self.diff_mult) actions = np.clip(actions, -1, 1) return actions class SharingGame(Game): """ Describes an instance of a Sharing Game. """ def __init__(self, agents, E_mat, T, calc_utilities=True, keep_actions=True): Game.__init__(self, agents, T, calc_utilities) # must be one agent for each node assert(E_mat.shape[0] == self.n) # store graph as adjacency matrix; 1 => edge, NAN => no edge self.G = np.copy(E_mat) self.G[self.G == 0] = np.NAN np.fill_diagonal(self.G, np.NAN) # record actions self.keep_actions = keep_actions if self.keep_actions: # utilities[t,i] = utility of agent i in time t self.actions = np.empty((T, self.n)) else: self.actions = None self.curr_actions = np.empty(self.n) def update_actions(self, G): actions = self.agents.get_actions(G, self.t, self.actions) if self.keep_actions: self.actions[self.t] = actions self.curr_actions = actions def run(self): while self.t < self.T: if self.t != 0: self.update_stances(self.G) self.update_actions(self.G) self.update_utilities(self.G) self.t += 1 return self.agents.stances, self.actions, self.utilities
{"hexsha": "c8c71bc90a1b765f66f5d9cd3bd5692572949f27", "size": 3228, "ext": "py", "lang": "Python", "max_stars_repo_path": "opinet/sharing.py", "max_stars_repo_name": "ryanwallace96/opinet", "max_stars_repo_head_hexsha": "72495ceef2382cb76f2318f970147cc7c6c8a45a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "opinet/sharing.py", "max_issues_repo_name": "ryanwallace96/opinet", "max_issues_repo_head_hexsha": "72495ceef2382cb76f2318f970147cc7c6c8a45a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "opinet/sharing.py", "max_forks_repo_name": "ryanwallace96/opinet", "max_forks_repo_head_hexsha": "72495ceef2382cb76f2318f970147cc7c6c8a45a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0810810811, "max_line_length": 79, "alphanum_fraction": 0.583952912, "include": true, "reason": "import numpy", "num_tokens": 772}
#!/usr/bin/env python """ logarithmic normal distribution: China most people # ---- # License: BSD # ---- # 0.1 - init version - 2018.4 - by Nick Qian """ from scipy.stats import lognorm import matplotlib.pyplot as plt import numpy as np def logNrm_dist(s): x = np.linspace(lognorm.ppf(0.01, s), lognorm.ppf(0.99, s), 100) return x def main(sharp): fig, ax = plt.subplots(1, 1) mean, var, skew, kurt = lognorm.stats(s, moments='mvsk') x = logNrm_dist(s) ax.plot(x, lognorm.pdf(x, s), 'r-', lw=5, alpha=0.6, label='lognorm pdf') # freeze the dist and display the frozen pdf rv = lognorm(s) ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf') # gen radom numbers r = lognorm.rvs(s, size=1000) #ax.hist(r, histtype='stepfilled', alpha=0.2) ax.legend(loc='best', frameon=False) plt.show() if __name__ == "__main__": s = 0.654 main(s)
{"hexsha": "fa65ca2d27772ab5838910a62f74cbc99d597bdd", "size": 921, "ext": "py", "lang": "Python", "max_stars_repo_path": "logNrm_dist.py", "max_stars_repo_name": "NickQian/pyIncome", "max_stars_repo_head_hexsha": "9aa3f17527d6903f893e2be58a34972946294394", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "logNrm_dist.py", "max_issues_repo_name": "NickQian/pyIncome", "max_issues_repo_head_hexsha": "9aa3f17527d6903f893e2be58a34972946294394", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "logNrm_dist.py", "max_forks_repo_name": "NickQian/pyIncome", "max_forks_repo_head_hexsha": "9aa3f17527d6903f893e2be58a34972946294394", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.1875, "max_line_length": 77, "alphanum_fraction": 0.6102062975, "include": true, "reason": "import numpy,from scipy", "num_tokens": 305}
from collections import namedtuple import numpy as np ValueRange = namedtuple('ValueRange', ['min', 'max']) def determinerange(values): """Determine the range of values in each dimension""" r = ValueRange(np.min(values, axis=0), np.max(values, axis=0)) if np.any(r.max - r.min < 1e-8): r = ValueRange(r.min - 1e8, r.max + 1e8) return r def rescale(values, *, range_in=None, range_out=ValueRange(0, 1), scale_only=False): """Perform a scale transformation of `values`: [range_in] --> [range_out]""" if range_in is None: range_in = determinerange(values) elif not isinstance(range_in, ValueRange): range_in = ValueRange(*range_in) if not isinstance(range_out, ValueRange): range_out = ValueRange(*range_out) scale_out = range_out.max - range_out.min scale_in = range_in.max - range_in.min if scale_only: scaled_values = (values / scale_in) * scale_out else: scaled_values = (values - range_in.min) / scale_in scaled_values = (scaled_values * scale_out) + range_out.min return scaled_values
{"hexsha": "414cb1adc8e4683fe7bebf0e10f47d48922c328e", "size": 1105, "ext": "py", "lang": "Python", "max_stars_repo_path": "multiLevelCoSurrogates/utils/scaling.py", "max_stars_repo_name": "sjvrijn/multi-level-co-surrogates", "max_stars_repo_head_hexsha": "04a071eb4360bed6f1a517531690beec7857e3e5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "multiLevelCoSurrogates/utils/scaling.py", "max_issues_repo_name": "sjvrijn/multi-level-co-surrogates", "max_issues_repo_head_hexsha": "04a071eb4360bed6f1a517531690beec7857e3e5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-02-25T14:07:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-25T14:12:35.000Z", "max_forks_repo_path": "multiLevelCoSurrogates/utils/scaling.py", "max_forks_repo_name": "sjvrijn/multi-level-co-surrogates", "max_forks_repo_head_hexsha": "04a071eb4360bed6f1a517531690beec7857e3e5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5714285714, "max_line_length": 84, "alphanum_fraction": 0.6696832579, "include": true, "reason": "import numpy", "num_tokens": 285}
import os import json import enum import numpy as np class LaneAssociation(enum.Enum): LEFT = 0 CENTER = 1 RIGHT = 2 UNKNOWN = 3 NUM_VEHICLES = 6 NUM_ITERATIONS = 1_000 FPS = 25 VEHICLE_FILENAME = "vehicle_data.json" VEHICLE_FILEPATH = os.path.join( os.path.dirname(os.path.dirname(__file__)), "data", VEHICLE_FILENAME ) EGO_FILENAME = "ego_data.json" EGO_VEHICLE_FILEPATH = os.path.join( os.path.dirname(os.path.dirname(__file__)), "data", EGO_FILENAME ) np.random.seed(0) def kph_to_mps(kph: float) -> float: return kph / 3.6 def generate_vehicle_data(id_: int) -> dict: if id_ < 2: lane_data = int(LaneAssociation.LEFT.value) start_speed = np.random.uniform(110, 130) elif id_ < 4: lane_data = int(LaneAssociation.CENTER.value) start_speed = np.random.uniform(100, 110) else: lane_data = int(LaneAssociation.RIGHT.value) start_speed = np.random.uniform(80, 100) if id_ == 0 or id_ == 2 or id_ == 4: distance = np.random.uniform(20, 80) elif id_ == 1 or id_ == 3 or id_ == 5: distance = np.random.uniform(-80, -20) start_speed = kph_to_mps(start_speed) speed_data = [start_speed for _ in range(NUM_ITERATIONS)] for it in range(1, NUM_ITERATIONS): speed_data[it] = speed_data[it - 1] + np.random.normal(loc=0.0, scale=0.5) data_dict = { "Lane": lane_data, "Distance": distance, "Speed": speed_data, } return data_dict def main() -> int: vehicle_datas = {idx: {} for idx in range(NUM_VEHICLES)} for i in range(NUM_VEHICLES): data = generate_vehicle_data(i) vehicle_datas[i] = data with open(VEHICLE_FILEPATH, "w") as file_object: json.dump(vehicle_datas, file_object) ego_vehicle_data = { "Lane": int(LaneAssociation.CENTER.value), "Speed": kph_to_mps(np.random.uniform(110, 130)) } with open(EGO_VEHICLE_FILEPATH, "w") as file_object: json.dump(ego_vehicle_data, file_object) return 0 if __name__ == "__main__": main()
{"hexsha": "3db70ee060e786e9f7282e43479525101081331c", "size": 2093, "ext": "py", "lang": "Python", "max_stars_repo_path": "05_String/AdasInput/scripts/data.py", "max_stars_repo_name": "franneck94/UdemyCppExercises", "max_stars_repo_head_hexsha": "862d3e3df198ef8f3c7b850bbeead6161700f9d1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "05_String/AdasInput/scripts/data.py", "max_issues_repo_name": "franneck94/UdemyCppExercises", "max_issues_repo_head_hexsha": "862d3e3df198ef8f3c7b850bbeead6161700f9d1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "05_String/AdasInput/scripts/data.py", "max_forks_repo_name": "franneck94/UdemyCppExercises", "max_forks_repo_head_hexsha": "862d3e3df198ef8f3c7b850bbeead6161700f9d1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2022-02-06T20:05:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-10T12:48:58.000Z", "avg_line_length": 23.5168539326, "max_line_length": 82, "alphanum_fraction": 0.646440516, "include": true, "reason": "import numpy", "num_tokens": 597}
import numpy as np from .topology import cellular_automaton2d class Sandpile: def __init__(self, rows, cols, is_closed_boundary=True): self._K = 4 # this value is hard-coded because the neighbourhood type, "von Neumann", is fixed self._network = cellular_automaton2d(rows=rows, cols=cols, neighbourhood="von Neumann") self._boundary_indices = self._get_boundary_indices((rows, cols)) self._is_closed_boundary = is_closed_boundary def _get_boundary_indices(self, shape): m = np.arange(shape[0]*shape[1]).reshape(shape) return np.concatenate((m[0], m[-1], m[:, 0], m[:, -1]), axis=None) def activity_rule(self, ctx): if self._is_closed_boundary and ctx.node_label in self._boundary_indices: return 0 # closed boundary conditions new_activity = ctx.current_activity neighbour_activities = list(ctx.neighbourhood_activities) neighbour_activities.pop(ctx.neighbour_labels.index(ctx.node_label)) for neighbour_activity in neighbour_activities: if neighbour_activity >= self._K: new_activity += 1 if ctx.current_activity >= self._K: new_activity -= self._K return new_activity @property def network(self): return self._network
{"hexsha": "b9d17f45d4acc4ad2438442832db93e93db4fca8", "size": 1317, "ext": "py", "lang": "Python", "max_stars_repo_path": "netomaton/sandpile.py", "max_stars_repo_name": "lantunes/netomaton", "max_stars_repo_head_hexsha": "fef60a787d031c9c7b1eb4ff990f7c12145579ef", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 35, "max_stars_repo_stars_event_min_datetime": "2018-12-07T14:11:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T23:47:21.000Z", "max_issues_repo_path": "netomaton/sandpile.py", "max_issues_repo_name": "lantunes/netomaton", "max_issues_repo_head_hexsha": "fef60a787d031c9c7b1eb4ff990f7c12145579ef", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-03-15T06:45:39.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-15T23:50:13.000Z", "max_forks_repo_path": "netomaton/sandpile.py", "max_forks_repo_name": "lantunes/netomaton", "max_forks_repo_head_hexsha": "fef60a787d031c9c7b1eb4ff990f7c12145579ef", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-10-18T08:47:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-02T10:17:12.000Z", "avg_line_length": 34.6578947368, "max_line_length": 103, "alphanum_fraction": 0.6788154897, "include": true, "reason": "import numpy", "num_tokens": 297}
#include <fstream> #include <iostream> #include <filesystem> #include <string> #include <unordered_map> #include <algorithm> #include <chrono> #include <future> #include <utility> #include <boost/iostreams/filtering_streambuf.hpp> #include <boost/iostreams/filter/gzip.hpp> #include <xtensor/xarray.hpp> #include <xtensor/xmath.hpp> #include "csv.hpp" #include "../assembly/assembly.cpp" #include "../probability/probability.cpp" #include "../probability/probability_util.cpp" #include "../domain/domain.cpp" #include "./context.cpp" using namespace std; using namespace std::chrono; using namespace csv; bool task_compute_domain_probabilities_per_assembly( const int task_nb, const DomainProbabilityContext ctx, const vector<string>& assembly_ids ) { auto start = system_clock::now(); cerr << "Thread " << task_nb << " started." << endl; const string kind = ctx.kind; const string query = ctx.query; const string tail = ctx.tail; string dataFolder = "../data/"; string sequencesFolder = dataFolder + "sequences/"; auto n_assemblies = assembly_ids.size(); string metadata_path = dataFolder + query + "_master.csv"; auto metadata = LoadDomainMetadata(metadata_path); int i = 0; for (auto& accession : assembly_ids) { if (i == 0 || (i+1) % 100 == 0) { auto tp = system_clock::now(); auto elapsed = duration_cast<seconds>(tp - start).count(); cerr << "Thread " << task_nb << ": "; cerr << "Processing assembly " << i + 1 << " / " << n_assemblies; cerr << " (elapsed: " << elapsed << " seconds)" << endl; } ++i; string gene_probs_path = ( sequencesFolder + accession + "/" + accession + ctx.distance_to_mean_suffix ); try { ifstream gene_probs_file(gene_probs_path); GeneProbabilies gene_probs(gene_probs_file, tail); string protein_domains_path = ( sequencesFolder + accession + "/" + accession + "_" + query + ".csv.gz" ); ifstream protein_domains_file(protein_domains_path); boost::iostreams::filtering_streambuf<boost::iostreams::input> inbuf; inbuf.push(boost::iostreams::gzip_decompressor()); inbuf.push(protein_domains_file); istream instream(&inbuf); ProteinDomains domains(instream); string outputFolder; if (ctx.assembly_output_folder.empty()) { outputFolder = sequencesFolder + accession; } else { outputFolder = ctx.assembly_output_folder; } string assembly_domain_prob_out_path; if (kind == "tri-nucleotide") { assembly_domain_prob_out_path = ( outputFolder + "/" + accession + "_" + query + "_probability_" + tail + ".csv" ); } else { assembly_domain_prob_out_path = ( outputFolder + "/" + accession + "_" + query + "_aa_probability_" + tail + ".csv" ); } ofstream of(assembly_domain_prob_out_path); auto writer = make_csv_writer(of); writer << DomainProbability::RecordHeader(); vector<DomainProbability> records; for (ProteinDomain& domain : domains.Keys()) { if (metadata.find(domain.id) != metadata.end()) { auto& [domain_query, domain_description] = metadata[domain.id]; domain.query = domain_query; domain.description = domain_description; } xt::xarray<double> probs = domains.Probabilities(domain, gene_probs); xt::xarray<double> probs_random = domains.Probabilities(domain, gene_probs, true); xt::xarray<double> log_probabilities = xt::eval(xt::log(probs)); xt::xarray<double> log_probabilities_random = xt::eval(xt::log(probs_random)); double log_prob = product_rule_log(log_probabilities); double log_prob_random = product_rule_log(log_probabilities_random); DomainProbability record( domain, log_prob, log_prob_random, log_probabilities.size() ); records.push_back(record); } sort(records.begin(), records.end(), greater<DomainProbability>()); for (auto& record : records) { writer << record.Record(); } } catch (exception& e) { cerr << "Thread " << task_nb << " | Assembly: " << accession << " | "; cerr << "Exception: " << e.what() << endl; throw; } } auto tp = system_clock::now(); auto elapsed = duration_cast<seconds>(tp - start).count(); cerr << "Thread " << task_nb << ": DONE"; cerr << " (elapsed: " << elapsed << " seconds)" << endl; return true; } bool task_compute_domain_probabilities_per_phylum( const int task_nb, const DomainProbabilityContext ctx, const vector<string>& phyla, const unordered_map<string, vector<string>>& assemblies_per_phylum ) { cerr << "Thread " << task_nb << " started." << endl; const string kind = ctx.kind; const string query = ctx.query; const string tail = ctx.tail; string dataFolder = "../data/"; string sequencesFolder = dataFolder + "sequences/"; string phylumFolder = dataFolder + "phylum/"; auto n_phyla = phyla.size(); // Create phylum directory if it does not exist. filesystem::create_directory(phylumFolder); int i = 0; for (auto& phylum : phyla) { auto& assembly_ids = assemblies_per_phylum.at(phylum); auto n_assemblies = assembly_ids.size(); cerr << "Thread " << task_nb << ": "; cerr << "Processing phylum " << i + 1 << " / " << n_phyla; cerr << ": " << phylum << " (" << n_assemblies << " assemblies)"; cerr << endl; ++i; set<ProteinDomain> protein_domains; unordered_map<ProteinDomain, vector<DomainProbability>> protein_domain_probs; for (auto& accession : assembly_ids) { string assemblyFolder; if (ctx.assembly_output_folder.empty()) { assemblyFolder = sequencesFolder + accession; } else { assemblyFolder = ctx.assembly_output_folder; } string path; if (kind == "tri-nucleotide") { path = ( assemblyFolder + "/" + accession + "_" + query + "_probability_" + tail + ".csv" ); } else { path = ( assemblyFolder + "/" + accession + "_" + query + "_aa_probability_" + tail + ".csv" ); } vector<DomainProbability> domains = LoadDomainProbabilities(path); for (auto& domain_prob : domains) { auto& domain = domain_prob.domain; protein_domains.insert(domain); if (protein_domain_probs.find(domain) == protein_domain_probs.end()) { protein_domain_probs[domain] = vector<DomainProbability>{domain_prob}; } else { protein_domain_probs[domain].push_back(domain_prob); } } } string phylum_lower = phylum; transform(phylum_lower.begin(), phylum_lower.end(), phylum_lower.begin(), ::tolower); transform(phylum_lower.begin(), phylum_lower.end(), phylum_lower.begin(), [](char ch) { return ch == ' ' ? '_' : ch; }); string phylumDir = phylumFolder + phylum_lower + "/"; if (!ctx.phylum_output_folder.empty()) { phylumDir = ctx.phylum_output_folder + "/"; } filesystem::create_directory(phylumDir); string phylum_domain_prob_out_path; if (kind == "tri-nucleotide") { phylum_domain_prob_out_path = ( phylumDir + phylum_lower + "_" + query + "_probability_" + tail + ".csv" ); } else { phylum_domain_prob_out_path = ( phylumDir + phylum_lower + "_" + query + "_aa_probability_" + tail + ".csv" ); } ofstream of(phylum_domain_prob_out_path); auto writer = make_csv_writer(of); writer << DomainProbability::RecordHeader(); vector<DomainProbability> records; for (auto& domain : protein_domains) { auto& domain_probs = protein_domain_probs[domain]; auto n_probs = domain_probs.size(); xt::xarray<double> log_probs = xt::zeros<double>({n_probs}); xt::xarray<double> log_probs_random = xt::zeros<double>({n_probs}); for (int ix = 0; ix < n_probs; ++ix) { log_probs[ix] = domain_probs[ix].log_probability; log_probs_random[ix] = domain_probs[ix].log_probability_random; } double log_prob = product_rule_log(log_probs); double log_prob_random = product_rule_log(log_probs_random); try { DomainProbability record( domain, log_prob, log_prob_random, n_probs ); records.push_back(record); } catch (exception& e) { cerr << "Thread " << task_nb << " | Phylum: " << phylum << " | "; cerr << "Exception: " << e.what() << endl; throw; } } sort(records.begin(), records.end(), greater<DomainProbability>()); for (auto& record : records) { writer << record.Record(); } } cerr << "Thread " << task_nb << ": DONE" << endl; return true; } void compute_domain_probabilities(const DomainProbabilityContext ctx) { auto start = system_clock::now(); const string kind = ctx.kind; const string query = ctx.query; const string tail = ctx.tail; const int n_threads = ctx.n_threads; string dataFolder = "../data/"; string assembliesPath = dataFolder + "assemblies.csv"; Assemblies assemblies(assembliesPath, ctx.complete_genome_only); auto assembly_ids = assemblies.GetIds(); auto n_per_thread = ceil((double) assembly_ids.size() / (double) n_threads); // // 1) Compute probability of domains for each assembly individually. // cerr << "Processing of domain probabilities per assembly" << endl; cerr << "Processing " << assemblies.Size() << " assemblies" << endl; cerr << "Starting " << n_threads << " threads" << endl; vector<future<bool>> futures; for (int i = 0; i < n_threads; ++i) { auto start = assembly_ids.begin() + i * n_per_thread; auto end = assembly_ids.end(); int endInt = i * n_per_thread + n_per_thread; if (endInt < assembly_ids.size()) { end = assembly_ids.begin() + endInt; } auto ids = vector<string>(start, end); futures.push_back(async( task_compute_domain_probabilities_per_assembly, i+1, ctx, ids )); } for (auto& f : futures) { if(!f.get()) { throw runtime_error("Unexpected error while processing assembly output"); } } auto tp = system_clock::now(); auto elapsed = duration_cast<seconds>(tp - start).count(); cerr << "Processing of domain probabilities per assembly is complete" << endl; cerr << "Elapsed: " << elapsed << " seconds" << endl; // // 2) Compute probability of domains for each phylum // with at least 10 assemblies within it. // size_t min_n_phyla = 10; cerr << "Processing of domain probabilities per phylum" << endl; unordered_map<string, vector<string>> assemblies_per_phylum; for (auto& assembly_id : assembly_ids) { Assembly& assembly = assemblies.Get(assembly_id); string phylum = assembly.phylum; if (phylum.empty()) { continue; } if (assemblies_per_phylum.find(phylum) == assemblies_per_phylum.end()) { assemblies_per_phylum[phylum] = vector<string>{assembly_id}; } else { assemblies_per_phylum[phylum].push_back(assembly_id); } } set<string> phyla_set; for (auto& assembly_id : assembly_ids) { Assembly& assembly = assemblies.Get(assembly_id); string phylum = assembly.phylum; if (!phylum.empty() && assemblies_per_phylum[phylum].size() >= min_n_phyla) { phyla_set.insert(phylum); } } vector<string> phyla; phyla.assign(phyla_set.begin(), phyla_set.end()); auto n_phyla = phyla.size(); cerr << "Processing " << n_phyla << " phyla" << endl; cerr << "Starting " << n_threads << " threads" << endl; n_per_thread = ceil((double) n_phyla / (double) n_threads); vector<future<bool>> futuresP; for (int i = 0; i < n_threads; ++i) { auto start = phyla.begin() + i * n_per_thread; auto end = phyla.end(); int endInt = i * n_per_thread + n_per_thread; if (endInt < n_phyla) { end = phyla.begin() + endInt; } futuresP.push_back(async( task_compute_domain_probabilities_per_phylum, i+1, ctx, vector<string>(start, end), assemblies_per_phylum )); } for (auto& f : futuresP) { if(!f.get()) { throw runtime_error("Unexpected error while processing phylum output"); } } tp = system_clock::now(); elapsed = duration_cast<seconds>(tp - start).count(); cerr << "Processing of domain probabilities per phylum is complete" << endl; cerr << "Elapsed: " << elapsed << " seconds" << endl; // // 3) Compute probability of domains per superkingdom. // cerr << "Processing of domain probabilities per superkingdom" << endl; vector<string> superkingdoms; unordered_map<string, vector<string>> phyla_per_superkingdom; for (auto& phylum : phyla) { auto assembly_id = assemblies_per_phylum[phylum][0]; Assembly& assembly = assemblies.Get(assembly_id); const string superkingdom_raw = assembly.domain; if (superkingdom_raw.empty()) { continue; } string superkingdom = superkingdom_raw; transform( superkingdom.begin(), superkingdom.end(), superkingdom.begin(), ::tolower ); superkingdom[0] = toupper(superkingdom[0]); if (phyla_per_superkingdom.find(superkingdom) == phyla_per_superkingdom.end()) { phyla_per_superkingdom[superkingdom] = vector<string>{phylum}; superkingdoms.push_back(superkingdom); } else { phyla_per_superkingdom[superkingdom].push_back(phylum); } } string superkingdom_folder = dataFolder + "superkingdom/"; filesystem::create_directory(superkingdom_folder); for (auto& superkingdom : superkingdoms) { string superkingdom_lower = superkingdom; transform( superkingdom_lower.begin(), superkingdom_lower.end(), superkingdom_lower.begin(), ::tolower ); string superkingdom_inner_folder = ( superkingdom_folder + "/" + superkingdom_lower + "/" ); if (!ctx.superkingdom_output_folder.empty()) { superkingdom_inner_folder = ctx.superkingdom_output_folder + "/"; } filesystem::create_directory(superkingdom_inner_folder); auto& superkingdom_phyla = phyla_per_superkingdom[superkingdom]; auto n_superkingdom_phyla = superkingdom_phyla.size(); set<ProteinDomain> protein_domains; unordered_map<ProteinDomain, vector<DomainProbability>> protein_domain_probs; for (auto& phylum : superkingdom_phyla) { string phylum_lower = phylum; transform( phylum_lower.begin(), phylum_lower.end(), phylum_lower.begin(), ::tolower ); transform( phylum_lower.begin(), phylum_lower.end(), phylum_lower.begin(), [](char ch) { return ch == ' ' ? '_' : ch; } ); string phylumDir = dataFolder + "phylum/" + phylum_lower + "/"; if (!ctx.phylum_output_folder.empty()) { phylumDir = ctx.phylum_output_folder + "/"; } string phylum_domain_prob_path; if (kind == "tri-nucleotide") { phylum_domain_prob_path = ( phylumDir + phylum_lower + "_" + query + "_probability_" + tail + ".csv" ); } else { phylum_domain_prob_path = ( phylumDir + phylum_lower + "_" + query + "_aa_probability_" + tail + ".csv" ); } vector<DomainProbability> domains = LoadDomainProbabilities( phylum_domain_prob_path ); for (auto& domain_prob : domains) { auto& domain = domain_prob.domain; protein_domains.insert(domain); if (protein_domain_probs.find(domain) == protein_domain_probs.end()) { protein_domain_probs[domain] = vector<DomainProbability>{domain_prob}; } else { protein_domain_probs[domain].push_back(domain_prob); } } } string superkingdom_out_path; if (kind == "tri-nucleotide") { superkingdom_out_path= ( superkingdom_inner_folder + superkingdom_lower + "_" + query + "_probability_" + tail + ".csv" ); } else { superkingdom_out_path= ( superkingdom_inner_folder + superkingdom_lower + "_" + query + "_aa_probability_" + tail + ".csv" ); } ofstream superkingdom_of(superkingdom_out_path); auto writer = make_csv_writer(superkingdom_of); writer << DomainProbability::RecordHeader(); xt::xarray<double> uniform_log_prior = xt::eval( xt::log(make_uniform_prior(n_superkingdom_phyla)) ); vector<DomainProbability> records; for (auto& domain : protein_domains) { auto& domain_probs = protein_domain_probs[domain]; auto n_probs = domain_probs.size(); xt::xarray<double> log_probs = xt::zeros<double>({n_superkingdom_phyla}); xt::xarray<double> log_probs_random = xt::zeros<double>({n_superkingdom_phyla}); for (int ix = 0; ix < n_probs; ++ix) { log_probs[ix] = domain_probs[ix].log_probability; log_probs_random[ix] = domain_probs[ix].log_probability_random; } double log_prob = marginalization_log( uniform_log_prior, log_probs ); double log_prob_random = marginalization_log( uniform_log_prior, log_probs_random ); try { DomainProbability record( domain, log_prob, log_prob_random, n_probs ); records.push_back(record); } catch (exception& e) { cerr << "Global computation | "; cerr << "Exception: " << e.what() << endl; throw; } } sort(records.begin(), records.end(), greater<DomainProbability>()); for (auto& record : records) { writer << record.Record(); } } cerr << "Processing of domain probabilities per superkingdom is complete" << endl; // // 4) Compute global probability of domains. // cerr << "Processing of domain probabilities globally" << endl; set<ProteinDomain> protein_domains; unordered_map<ProteinDomain, vector<DomainProbability>> protein_domain_probs; for (auto& phylum : phyla) { string phylum_lower = phylum; transform(phylum_lower.begin(), phylum_lower.end(), phylum_lower.begin(), ::tolower); transform(phylum_lower.begin(), phylum_lower.end(), phylum_lower.begin(), [](char ch) { return ch == ' ' ? '_' : ch; }); string phylumDir = dataFolder + "phylum/" + phylum_lower + "/"; if (!ctx.phylum_output_folder.empty()) { phylumDir = ctx.phylum_output_folder + "/"; } string phylum_domain_prob_path; if (kind == "tri-nucleotide") { phylum_domain_prob_path = ( phylumDir + phylum_lower + "_" + query + "_probability_" + tail + ".csv" ); } else { phylum_domain_prob_path = ( phylumDir + phylum_lower + "_" + query + "_aa_probability_" + tail + ".csv" ); } vector<DomainProbability> domains = LoadDomainProbabilities(phylum_domain_prob_path); for (auto& domain_prob : domains) { auto& domain = domain_prob.domain; protein_domains.insert(domain); if (protein_domain_probs.find(domain) == protein_domain_probs.end()) { protein_domain_probs[domain] = vector<DomainProbability>{domain_prob}; } else { protein_domain_probs[domain].push_back(domain_prob); } } } string overallOutputFolder = dataFolder; if (!ctx.overall_output_folder.empty()) { overallOutputFolder = ctx.overall_output_folder + "/"; } string protein_out_path; if (kind == "tri-nucleotide") { protein_out_path = overallOutputFolder + query + "_probability_" + tail + ".csv"; } else { protein_out_path = overallOutputFolder + query + "_aa_probability_" + tail + ".csv"; } ofstream output_file(protein_out_path); auto writer = make_csv_writer(output_file); writer << DomainProbability::RecordHeader(); xt::xarray<double> uniform_log_prior = xt::eval( xt::log(make_uniform_prior(n_phyla)) ); vector<DomainProbability> records; for (auto& domain : protein_domains) { auto& domain_probs = protein_domain_probs[domain]; auto n_probs = domain_probs.size(); xt::xarray<double> log_probs = xt::zeros<double>({n_phyla}); xt::xarray<double> log_probs_random = xt::zeros<double>({n_phyla}); for (int ix = 0; ix < n_probs; ++ix) { log_probs[ix] = domain_probs[ix].log_probability; log_probs_random[ix] = domain_probs[ix].log_probability_random; } double log_prob = marginalization_log( uniform_log_prior, log_probs ); double log_prob_random = marginalization_log( uniform_log_prior, log_probs_random ); try { DomainProbability record( domain, log_prob, log_prob_random, n_probs ); records.push_back(record); } catch (exception& e) { cerr << "Global computation | "; cerr << "Exception: " << e.what() << endl; throw; } } sort(records.begin(), records.end(), greater<DomainProbability>()); for (auto& record : records) { writer << record.Record(); } tp = system_clock::now(); elapsed = duration_cast<seconds>(tp - start).count(); cerr << "Processing of global domain probabilities is complete" << endl; cerr << "Elapsed: " << elapsed << " seconds" << endl; cerr << "DONE" << endl; }
{"hexsha": "06dd99ea3ba0b780581cf63a9183c0147ed9a66a", "size": 20139, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/task/domain_probability_task.cpp", "max_stars_repo_name": "srom/nbias", "max_stars_repo_head_hexsha": "be8cf8dd623038dcf08d38ed3d19f635ee2dbeae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/task/domain_probability_task.cpp", "max_issues_repo_name": "srom/nbias", "max_issues_repo_head_hexsha": "be8cf8dd623038dcf08d38ed3d19f635ee2dbeae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/task/domain_probability_task.cpp", "max_forks_repo_name": "srom/nbias", "max_forks_repo_head_hexsha": "be8cf8dd623038dcf08d38ed3d19f635ee2dbeae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7474150665, "max_line_length": 89, "alphanum_fraction": 0.6777893639, "num_tokens": 5328}
# Run this app with `python app.py` and # visit http://127.0.0.1:8050/ in your web browser. # Imports necessary libraries import dash import dash_html_components as html import dash_core_components as dcc from dash.dependencies import Input, Output import numpy as np import pandas as pd from gensim import models, corpora, similarities from gensim.parsing.preprocessing import preprocess_documents import plotly.graph_objects as go def loadRedditData(): # Loads all saved Reddit posts df = pd.read_csv("data/reddit_todayilearned.csv") # Selects only the following columns df = df[["id", "author", "domain", "url", "num_comments", "score", "title", "retrieved_on", "over_18", "permalink", "created_utc", "link_flair_text"]] # Leaves only the non-adult content df = df[~df["over_18"]] # Removes documents with lower than 10 score df = df[df["score"] > 10] # Resets the index df.reset_index(inplace=True, drop=True) # Creates a list of documents documents = df["title"].tolist() # Preprocesses the documents texts = preprocess_documents(documents) # Creates the dictionary dictionary = corpora.Dictionary(texts) # Creates the corpus using bag-of-words corpus = [dictionary.doc2bow(text) for text in texts] # Generates the TF-IDF model tfidf = models.TfidfModel(corpus) # Creates the TF-IDF corpus corpus_tfidf = tfidf[corpus] # Fits an LSI model (with 100 topics) model = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=15) # Identifies topics for each document corpus_wrapper = model[corpus_tfidf] # Creates the similarity index index = similarities.MatrixSimilarity(corpus_wrapper) return corpus_wrapper, index, df def nextTIL(): # Creates a set of documents the user has seen seen = [x[0] for x in user_actions] if len(seen) > 0: # Retrieves topic values for each document multiplied by the user action (1 or -1) user_topics = [[x[1]*user_action[1] for x in corpus_wrapper[user_action[0]]] for user_action in user_actions] # Computes the mean of topic values for each document user_profile = [(i, x) for i, x in enumerate(np.array(user_topics).mean(axis=0))] else: user_profile = [(x, 0) for x in range(len(corpus_wrapper[0]))] # The more actions the user takes, the less we Explore and stop at 10% Exploration rate diminishingExplore = lambda x: max(80 - 10 * np.log(x), 10) * 0.01 # If no user actions have been taken we Explore if len(seen) == 0: explore = True # According to the diminishingExplore function elif np.random.uniform(low=0.0, high=1.0) < diminishingExplore(len(seen)): explore = True # The rest eighty percent of the time we Exploit else: explore = False # If we are Exploring – returns a random document from the Top 50 scoring unseen documents if explore == True: doc_idx = df[~df.index.isin(seen)].sort_values(by="score", ascending=False).head(50).sample(1).index[0] # Determines the topic with the highest value topic_num = np.array(corpus_wrapper[doc_idx])[:,1].argmax() + 1 return doc_idx, explore, topic_num, user_profile, 0 # Finds similarities between the user and the documents sim = index[user_profile] # Calculates the similarities weighted by the root of score of each document (from Reddit) w_sim = np.array(sim * np.power(df["score"], 0.03)) # Sorts the weighted similarities w_sim_sorted_desc = w_sim.argsort()[::-1] # Removes seen documents from the array w_sim_sorted_desc_not_seen = np.delete(w_sim_sorted_desc, np.isin(w_sim_sorted_desc, seen)) # Index of the top document doc_idx = w_sim_sorted_desc_not_seen[0] # Determines the topic with the highest value topic_num = np.array(corpus_wrapper[doc_idx])[:,1].argmax() + 1 # Retrieves the user & document similarity user_doc_sim = np.round(sim[doc_idx], 2) return doc_idx, explore, topic_num, user_profile, user_doc_sim # Sets the stylesheets external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] # Initializes the Dash App app = dash.Dash(__name__, external_stylesheets=external_stylesheets) # Defines the App loyout app.layout = html.Div([ html.Div(dcc.Graph(id='topic-graph'), style={"width": "100%", "height":" 400px", "align-items": "right", "justify-content": "center", "display": "flex"}), html.Div(id="TIL", children="", style={"width": "800px","height": "150px", "margin": "auto", "text-align": "center", "font-size": "18px", "padding": "10px"}), html.Div([dcc.Link(id="TIL-url", href=""), " / ", dcc.Link(id="TIL-permalink", href="")], style={"width": "800px","height": "50px", "margin": "auto", "text-align": "center", "font-size": "18px", "padding": "10px"}), html.Div(id="TIL-topic", children="", style={"width": "800px", "margin": "auto", "text-align": "left", "font-size": "14px", "font-weight": "bold"}), html.Div(id="TIL-explore", children="", style={"width": "800px", "margin": "auto", "text-align": "left", "font-size": "14px", "font-weight": "bold"}), html.Div([html.Button('Upvote 👍', id='up_btn', n_clicks=0, style={"width": "200px"}), html.Button('Downvote 👎', id='down_btn', n_clicks=0, style={"width": "200px"})], style={"width": "100%", "height":" 100px", "align-items": "center", "justify-content": "center", "display": "flex"}) ]) # Python functions that are automatically called by Dash whenever an input component's property changes @app.callback( Output('TIL', 'children'), Output('TIL-url', 'children'), Output('TIL-permalink', 'children'), Output('TIL-topic', 'children'), Output('TIL-explore', 'children'), Output('topic-graph', 'figure'), Input('up_btn','n_clicks'), Input('down_btn','n_clicks')) def displayNext(upvt, dnvt): # Defines upvotes and downvotes global TIL_id global user_actions # Checks if a button has been pressed changed_id = [p["prop_id"] for p in dash.callback_context.triggered][0] upvote = None # If the upvote button was pressed adds a user action with a value 1 if "up_btn" in changed_id: user_actions.append((TIL_id, 1)) # If the downvote button was pressed adds a user action with a value -1 elif "down_btn" in changed_id: user_actions.append((TIL_id, -1)) else: upvote = None # Retrieves the next TIL post TIL_id, TIL_explore, TIL_topic, user_profile, user_doc_sim = nextTIL() # Retrieves the next TIL post text TIL_text = df["title"].iloc[TIL_id] # Generates the TIL Topic Text TIL_topic_text = "Main Topic # is " + str(TIL_topic) # Generates text describing whether the recommendation is a popular factoid or a personalized factoid TIL_explore_text = "Personalized factoid, Match Score is " + str(user_doc_sim) if TIL_explore == False else "Popular factoid" # Adds the Reddit Score TIL_explore_text += ", Reddit Score is " + f'{df["score"].iloc[TIL_id]:,}' # Generates the URL link for the source TIL_url = dcc.Link("Source", href=df["url"].iloc[TIL_id]) # Generates the URL link for the Reddit Post (permalink) TIL_permalink = dcc.Link("Reddit Post", href="https://www.reddit.com/" + df["permalink"].iloc[TIL_id]) # Pulls out the first elements (indices) and adds 1 so that the topics indices start from 1 TIL_topics = [x[0]+1 for x in user_profile] # Pulls out the second elements (topic scores) TIL_topic_scores = [x[1] for x in user_profile] # Generates the barplot containing Topic indices and scores fig = go.Figure(go.Bar( y=TIL_topics, x=TIL_topic_scores, orientation="h")) fig_x_range_max = np.max(np.abs(TIL_topic_scores)) fig_x_range_max *= 1.2 fig.update_layout( autosize=False, width=800, height=400, title=go.layout.Title(text="Your Topic Preferences"), xaxis={"range":[-fig_x_range_max, fig_x_range_max], "visible": False, "showticklabels": False}, yaxis={"visible": True, "showticklabels": True, "tickvals": TIL_topics, "ticktext": ["Topic #" + str(x).zfill(2) for x in TIL_topics]} ) return [TIL_text, TIL_url, TIL_permalink, TIL_topic_text, TIL_explore_text, fig] # Loads Reddit data corpus_wrapper, index, df = loadRedditData() # Initializes user actions list user_actions = [] # Initializes TIL_id TIL_id = None if __name__ == "__main__": # Runs the app app.run_server(debug=True)
{"hexsha": "a13fa4b27a2533c0c36843e699a5a09d17f94536", "size": 8560, "ext": "py", "lang": "Python", "max_stars_repo_path": "app.py", "max_stars_repo_name": "SatenikS/reddit-recommender-system-interesting-factoid", "max_stars_repo_head_hexsha": "dd0ba1ee3c3f5cfb4741ad9ce57a059ec4ade3f9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "app.py", "max_issues_repo_name": "SatenikS/reddit-recommender-system-interesting-factoid", "max_issues_repo_head_hexsha": "dd0ba1ee3c3f5cfb4741ad9ce57a059ec4ade3f9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app.py", "max_forks_repo_name": "SatenikS/reddit-recommender-system-interesting-factoid", "max_forks_repo_head_hexsha": "dd0ba1ee3c3f5cfb4741ad9ce57a059ec4ade3f9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.0150753769, "max_line_length": 162, "alphanum_fraction": 0.6789719626, "include": true, "reason": "import numpy", "num_tokens": 2320}
[STATEMENT] lemma [smt_arith_multiplication]: fixes A B :: real and p n :: int assumes "A < B" "0 < n" "p > 0" shows "(A / n) * p < (B / n) * p" [PROOF STATE] proof (prove) goal (1 subgoal): 1. A / real_of_int n * real_of_int p < B / real_of_int n * real_of_int p [PROOF STEP] using assms [PROOF STATE] proof (prove) using this: A < B 0 < n 0 < p goal (1 subgoal): 1. A / real_of_int n * real_of_int p < B / real_of_int n * real_of_int p [PROOF STEP] by (auto simp: field_simps)
{"llama_tokens": 223, "file": null, "length": 2}
import pygame import sys from game.logic import SokobanLogic import time import os import numpy as np import torch from solver.search_util.policy import Action_Predictior from solver.search_util.value import Value_Predictior from solver.solver_search import SokobanSolverSearch from torch.utils.data import TensorDataset class SokobanGame: def __init__(self, level, solver=None, step_limit=None, data_dir=None, train_levels=None): self.wall = pygame.image.load('images/wall.png') self.floor = pygame.image.load('images/floor.png') self.box = pygame.image.load('images/box.png') self.box_docked = pygame.image.load('images/box_docked.png') self.worker = pygame.image.load('images/worker.png') self.worker_docked = pygame.image.load('images/worker_dock.png') self.docker = pygame.image.load('images/dock.png') self.background = 255, 226, 191 # self.level = self.start_game() self.level = level self.logic = SokobanLogic('levels', self.level) self.solver = None self.controls = None self.data_dir = data_dir self.action_pred = None self.value_pred = None if solver is not None: assert step_limit is not None self.solver = solver(self.logic.matrix, step_limit=step_limit) # if train_levels is not None: # assert type(self.solver) is SokobanSolverSearch # self.value_pred, self.action_pred, self.train_model(train_levels) print(f"\nLevel: {self.level}") time0 = time.time() self.solver.solve_for_one() self.controls = self.solver.get_controls() time1 = time.time() print(f"Use {time1 - time0: .2f} seconds") print(f"Use {len(self.controls)} steps") if self.data_dir is not None and type(self.solver) is SokobanSolverSearch: level_dir = os.path.join(self.data_dir, str(level)) os.makedirs(level_dir, exist_ok=True) self.solver.get_data(level_dir) def train_model(self, train_levels): all_points = [] all_features = [] all_actions = [] all_scores = [] for level in train_levels: all_points.append(np.load(os.path.join(self.data_dir, str(level), "points.npy"))) all_features.append(np.load(os.path.join(self.data_dir, str(level), "features.npy"))) all_actions.append(np.load(os.path.join(self.data_dir, str(level), "actions.npy"))) all_scores.append(np.load(os.path.join(self.data_dir, str(level), "scores.npy"))) # self.value_pred = Value_Predictior() # self.value_pred.fit(all_points, all_features, all_scores) self.action_pred = Action_Predictior() self.action_pred.fit(all_points, all_features, all_actions) def print_game(self, matrix, screen): screen.fill(self.background) x = 0 y = 0 for row in matrix: for char in row: if char == ' ': # floor screen.blit(self.floor, (x, y)) elif char == '#': # wall screen.blit(self.wall, (x, y)) elif char == '@': # worker on floor screen.blit(self.worker, (x, y)) elif char == '.': # dock screen.blit(self.docker, (x, y)) elif char == '*': # box on dock screen.blit(self.box_docked, (x, y)) elif char == '$': # box screen.blit(self.box, (x, y)) elif char == '+': # worker on dock screen.blit(self.worker_docked, (x, y)) x = x + 32 x = 0 y = y + 32 def get_key(self): while 1: event = pygame.event.poll() if event.type == pygame.KEYDOWN: return event.key else: pass def display_box(self, screen, message): "Print a message in a box in the middle of the screen" fontobject = pygame.font.Font(None, 18) pygame.draw.rect(screen, (0, 0, 0), ((screen.get_width() / 2) - 100, (screen.get_height() / 2) - 10, 200, 20), 0) pygame.draw.rect(screen, (255, 255, 255), ((screen.get_width() / 2) - 102, (screen.get_height() / 2) - 12, 204, 24), 1) if len(message) != 0: screen.blit(fontobject.render(message, 1, (255, 255, 255)), ((screen.get_width() / 2) - 100, (screen.get_height() / 2) - 10)) pygame.display.flip() def display_end(self, screen): message = "Level Completed" fontobject = pygame.font.Font(None, 18) pygame.draw.rect(screen, (0, 0, 0), ((screen.get_width() / 2) - 100, (screen.get_height() / 2) - 10, 200, 20), 0) pygame.draw.rect(screen, (255, 255, 255), ((screen.get_width() / 2) - 102, (screen.get_height() / 2) - 12, 204, 24), 1) screen.blit(fontobject.render(message, 1, (255, 255, 255)), ((screen.get_width() / 2) - 100, (screen.get_height() / 2) - 10)) pygame.display.flip() def ask(self, screen, question): "ask(screen, question) -> answer" current_string = [] self.display_box(screen, question + ": " + "".join(current_string)) while 1: inkey = self.get_key() if inkey == pygame.K_BACKSPACE: current_string = current_string[0:-1] elif inkey == pygame.K_RETURN: break elif inkey == pygame.K_MINUS: current_string.append("_") elif inkey <= 127: current_string.append(chr(inkey)) self.display_box(screen, question + ": " + "".join(current_string)) return int("".join(current_string)) def start_game(self): start = pygame.display.set_mode((320, 240)) level = self.ask(start, "Select Level") if level > 0: return level else: print("ERROR: Invalid Level: " + str(level)) sys.exit(2) def auto_play(self, interval): pygame.init() pygame.font.init() self.size = self.logic.load_size() self.screen = pygame.display.set_mode(self.size) if self.controls is not None: self.controls = self.controls[::-1] time_elapsed_since_last_action = 0 clock = pygame.time.Clock() while True: self.print_game(self.logic.get_matrix(), self.screen) dt = clock.tick() time_elapsed_since_last_action += dt if time_elapsed_since_last_action > interval: pygame.event.get() if self.logic.is_completed(): self.display_end(self.screen) pygame.time.wait(500) pygame.quit() break elif self.controls != []: control = self.controls.pop() if control == "UP": self.logic.move(0, -1, True) elif control == "DOWN": self.logic.move(0, 1, True) elif control == "LEFT": self.logic.move(-1, 0, True) elif control == "RIGHT": self.logic.move(1, 0, True) time_elapsed_since_last_action = 0 pygame.display.update() def play(self): self.size = self.logic.load_size() self.screen = pygame.display.set_mode(self.size) while 1: if self.logic.is_completed(): self.display_end(self.screen) self.print_game(self.logic.get_matrix(), self.screen) for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit(0) elif event.type == pygame.KEYDOWN: if event.key == pygame.K_UP: self.logic.move(0, -1, True) elif event.key == pygame.K_DOWN: self.logic.move(0, 1, True) elif event.key == pygame.K_LEFT: self.logic.move(-1, 0, True) elif event.key == pygame.K_RIGHT: self.logic.move(1, 0, True) elif event.key == pygame.K_d: self.logic.unmove() elif event.key == pygame.K_q: sys.exit(0) pygame.display.update()
{"hexsha": "eb4b4a3dfd5e3977e7fb1716b5ed94ed80891b79", "size": 8916, "ext": "py", "lang": "Python", "max_stars_repo_path": "game/game.py", "max_stars_repo_name": "cyclone923/sokoban_pygame", "max_stars_repo_head_hexsha": "942ea41f4c0e160ab54e967f6dd9424ae3d0d243", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "game/game.py", "max_issues_repo_name": "cyclone923/sokoban_pygame", "max_issues_repo_head_hexsha": "942ea41f4c0e160ab54e967f6dd9424ae3d0d243", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "game/game.py", "max_forks_repo_name": "cyclone923/sokoban_pygame", "max_forks_repo_head_hexsha": "942ea41f4c0e160ab54e967f6dd9424ae3d0d243", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.0875576037, "max_line_length": 97, "alphanum_fraction": 0.5223194258, "include": true, "reason": "import numpy", "num_tokens": 1950}
"""This module contains helper functions and utilities for nelpy.""" __all__ = ['spatial_information', 'frange', 'swap_cols', 'swap_rows', 'pairwise', 'is_sorted', 'linear_merge', 'PrettyDuration', 'ddt_asa', 'get_contiguous_segments', 'get_events_boundaries', 'get_threshold_crossing_epochs', '_bst_get_bins'] import numpy as np import logging from itertools import tee, repeat from collections import namedtuple from math import floor from scipy.signal import hilbert import scipy.ndimage.filters #import gaussian_filter1d, gaussian_filter from numpy import log, ceil import copy import sys import ctypes from multiprocessing import Array, cpu_count from multiprocessing.pool import Pool import pdb from . import core # so that core.RegularlySampledAnalogSignalArray is exposed from . import auxiliary # so that auxiliary.TuningCurve1D is epxosed from . import filtering from .utils_.decorators import keyword_deprecation # def sub2ind(array_shape, rows, cols): # ind = rows*array_shape[1] + cols # ind[ind < 0] = -1 # ind[ind >= array_shape[0]*array_shape[1]] = -1 # return ind # def ind2sub(array_shape, ind): # # see also np.unravel_index(ind, array.shape) # ind[ind < 0] = -1 # ind[ind >= array_shape[0]*array_shape[1]] = -1 # rows = (ind.astype('int') / array_shape[1]) # cols = ind % array_shape[1] # return (rows, cols) def ragged_array(arr): """Takes a list of arrays, and returns a ragged array. See https://github.com/numpy/numpy/issues/12468 """ n_elem = len(arr) out = np.array(n_elem*[None]) for ii in range(out.shape[0]): out[ii] = arr[ii] return out def asa_indices_within_epochs(asa, intervalarray): """Return indices of ASA within epochs. [[start, stop] ... [start, stop]] so that data can be associated with asa._data[:,start:stop] for each epoch. """ indices = [] intervalarray = intervalarray[asa.support] for interval in intervalarray.merge().data: a_start = interval[0] a_stop = interval[1] frm, to = np.searchsorted(asa._abscissa_vals, (a_start, a_stop)) indices.append((frm, to)) indices = np.array(indices, ndmin=2) return indices def frange(start, stop, step): """arange with floating point step""" # TODO: this function is not very general; we can extend it to work # for reverse (stop < start), empty, and default args, etc. # there are also many edge cases where this is weird. # see https://stackoverflow.com/questions/7267226/range-for-floats # for better alternatives. num_steps = int(np.floor((stop-start)/step)) return np.linspace(start, stop, num=num_steps, endpoint=False) def spatial_information(ratemap): """Compute the spatial information and firing sparsity... The specificity index examines the amount of information (in bits) that a single spike conveys about the animal's location (i.e., how well cell firing predicts the animal's location).The spatial information content of cell discharge was calculated using the formula: information content = \Sum P_i(R_i/R)log_2(R_i/R) where i is the bin number, P_i, is the probability for occupancy of bin i, R_i, is the mean firing rate for bin i, and R is the overall mean firing rate. In order to account for the effects of low firing rates (with fewer spikes there is a tendency toward higher information content) or random bursts of firing, the spike firing time-series was randomly offset in time from the rat location time-series, and the information content was calculated. A distribution of the information content based on 100 such random shifts was obtained and was used to compute a standardized score (Zscore) of information content for that cell. While the distribution is not composed of independent samples, it was nominally normally distributed, and a Z value of 2.29 was chosen as a cut-off for significance (the equivalent of a one-tailed t-test with P = 0.01 under a normal distribution). Reference(s) ------------ Markus, E. J., Barnes, C. A., McNaughton, B. L., Gladden, V. L., and Skaggs, W. E. (1994). "Spatial information content and reliability of hippocampal CA1 neurons: effects of visual input", Hippocampus, 4(4), 410-421. Parameters ---------- ratemap : array of shape (n_units, n_bins) Rate map in Hz. Returns ------- si : array of shape (n_units,) spatial information (in bits) per unit """ ratemap = copy.deepcopy(ratemap) # ensure that the ratemap always has nonzero firing rates, # otherwise the spatial information might return NaNs: bkg_rate = ratemap[ratemap>0].min() ratemap[ratemap < bkg_rate] = bkg_rate number_of_spatial_bins = np.prod(ratemap.shape[1:]) weight_per_bin = 1/number_of_spatial_bins Pi = 1 if len(ratemap.shape) == 3: # we have 2D tuning curve, (n_units, n_x, n_y) R = ratemap.mean(axis=1).mean(axis=1) # mean firing rate Ri = np.transpose(ratemap, (2,1,0)) si = np.sum(np.sum((Pi*((Ri / R)*np.log2(Ri / R)).T), axis=1), axis=1) elif len(ratemap.shape) == 2: # we have 1D tuning curve, (n_units, n_x) R = ratemap.mean(axis=1) # mean firing rate Ri = ratemap.T si = np.sum((Pi*((Ri / R)*np.log2(Ri / R)).T), axis=1) else: raise TypeError("rate map shape not supported / understood!") return si/number_of_spatial_bins def spatial_sparsity(ratemap): """Compute the firing sparsity... The specificity index examines the amount of information (in bits) that a single spike conveys about the animal's location (i.e., how well cell firing predicts the animal's location).The spatial information content of cell discharge was calculated using the formula: information content = \Sum P_i(R_i/R)log_2(R_i/R) where i is the bin number, P_i, is the probability for occupancy of bin i, R_i, is the mean firing rate for bin i, and R is the overall mean firing rate. In order to account for the effects of low firing rates (with fewer spikes there is a tendency toward higher information content) or random bursts of firing, the spike firing time-series was randomly offset in time from the rat location time-series, and the information content was calculated. A distribution of the information content based on 100 such random shifts was obtained and was used to compute a standardized score (Zscore) of information content for that cell. While the distribution is not composed of independent samples, it was nominally normally distributed, and a Z value of 2.29 was chosen as a cut-off for significance (the equivalent of a one-tailed t-test with P = 0.01 under a normal distribution). Reference(s) ------------ Markus, E. J., Barnes, C. A., McNaughton, B. L., Gladden, V. L., and Skaggs, W. E. (1994). "Spatial information content and reliability of hippocampal CA1 neurons: effects of visual input", Hippocampus, 4(4), 410-421. Parameters ---------- occupancy : array of shape (n_bins,) Occupancy of the animal. ratemap : array of shape (n_units, n_bins) Rate map in Hz. Returns ------- si : array of shape (n_units,) spatial information (in bits) per unit sparsity: array of shape (n_units,) sparsity (in percent) for each unit """ number_of_spatial_bins = np.prod(ratemap.shape[1:]) weight_per_bin = 1/number_of_spatial_bins Pi = 1 if len(ratemap.shape) == 3: # we have 2D tuning curve, (n_units, n_x, n_y) R = ratemap.mean(axis=1).mean(axis=1) # mean firing rate Ri = ratemap sparsity = np.sum(np.sum((Ri*Pi), axis=1), axis=1)/(R**2) elif len(ratemap.shape) == 2: # we have 1D tuning curve, (n_units, n_x) R = ratemap.mean(axis=1) # mean firing rate Ri = ratemap.T sparsity = np.sum((Pi*Ri.T), axis=1)/(R**2) else: raise TypeError("rate map shape not supported / understood!") return sparsity/number_of_spatial_bins def _bst_get_bins_inside_interval(interval, ds, w=1): """(np.array) Return bin edges entirely contained inside an interval. Bin edges always start at interval.start, and continue for as many bins as would fit entirely inside the interval. NOTE 1: there are (n+1) bin edges associated with n bins. WARNING: if an interval is smaller than ds, then no bin will be associated with the particular interval. NOTE 2: nelpy uses half-open intervals [a,b), but if the bin width divides b-a, then the bins will cover the entire range. For example, if interval = [0,2) and ds = 1, then bins = [0,1,2], even though [0,2] is not contained in [0,2). There might be numerical precision deviations from this? Parameters ---------- interval : EpochArray EpochArray containing a single interval with a start, and stop ds : float Time bin width, in seconds. w : number of bins to use in a sliding window mode. Default is 1 (no sliding window). For example, 40 ms bins, with a stride of 5 ms, can be achieved by using (ds=0.005, w=8) For now, w has to be an integer, and therefore 5 second bins, with a stride of 2 seconds are not supported within this framework. Returns ------- bins : array Bin edges in an array of shape (n+1,) where n is the number of bins centers : array Bin centers in an array of shape (n,) where n is the number of bins """ if interval.length < ds: return None, None n_bins = int(np.floor(interval.length / ds)) # number of bins # linspace is better than arange for non-integral steps bins = np.linspace(interval.start, interval.start + n_bins*ds, n_bins+1) if w > 1: wn_bins = np.max((1, n_bins - w + 1)) wn_bins = bins[:wn_bins+1] + w/2*ds - ds/2 bins = wn_bins centers = bins[:-1] + (ds / 2) return bins, centers def _bst_get_bins(intervalArray, ds, w=1): """ Docstring goes here. TBD. For use with bins that are contained wholly inside the intervals. """ b = [] # bin list c = [] # centers list left_edges = [] right_edges = [] counter = 0 for interval in intervalArray: bins, centers = _bst_get_bins_inside_interval(interval=interval, ds=ds, w=w) if bins is not None: left_edges.append(counter) counter += len(centers) - 1 right_edges.append(counter) counter += 1 b.extend(bins.tolist()) c.extend(centers.tolist()) bins = np.array(b) bin_centers = np.array(c) le = np.array(left_edges) le = le[:, np.newaxis] re = np.array(right_edges) re = re[:, np.newaxis] binned_support = np.hstack((le, re)) lengths = np.atleast_1d((binned_support[:,1] - binned_support[:,0] + 1).squeeze()) support_starts = bins[np.insert(np.cumsum(lengths+1),0,0)[:-1]] support_stops = bins[np.insert(np.cumsum(lengths+1)-1,0,0)[1:]] supportdata = np.vstack([support_starts, support_stops]).T support = type(intervalArray)(supportdata) # set support to TRUE bin support return bins, bin_centers, binned_support, support @keyword_deprecation(replace_x_with_y={'bw':'truncate'}) def get_mua(st, ds=None, sigma=None, truncate=None, _fast=True): """Compute the multiunit activity (MUA) from a spike train. Parameters ---------- st : SpikeTrainArray SpikeTrainArray containing one or more units. -- OR -- st : BinnedSpikeTrainArray BinnedSpikeTrainArray containing multiunit activity. ds : float, optional Time step in which to bin spikes. Default is 1 ms. sigma : float, optional Standard deviation (in seconds) of Gaussian smoothing kernel. Default is 10 ms. If sigma==0 then no smoothing is applied. truncate : float, optional Bandwidth of the Gaussian filter. Default is 6. Returns ------- mua : AnalogSignalArray AnalogSignalArray with MUA. """ if ds is None: ds = 0.001 # 1 ms bin size if sigma is None: sigma = 0.01 # 10 ms standard deviation if truncate is None: truncate = 6 if isinstance(st, core.EventArray): # bin spikes, so that we can count the spikes mua_binned = st.bin(ds=ds).flatten() elif isinstance(st, core.BinnedEventArray): mua_binned = st.flatten() ds = mua_binned.ds else: raise TypeError('st has to be one of (SpikeTrainArray, BinnedSpikeTrainArray)') # make sure data type is float, so that smoothing works, and convert to rate mua_binned._data = mua_binned._data.astype(float) / ds # TODO: now that we can simply cast from BST to ASA and back, the following logic could be simplified: # put mua rate inside an AnalogSignalArray if _fast: mua = core.AnalogSignalArray([], empty=True) mua._data = mua_binned.data mua._abscissa_vals = mua_binned.bin_centers mua._abscissa.support = mua_binned.support else: mua = core.AnalogSignalArray(mua_binned.data, timestamps=mua_binned.bin_centers, fs=1/ds) mua._fs = 1/ds if (sigma != 0) and (truncate > 0): mua = gaussian_filter(mua, sigma=sigma, truncate=truncate) return mua def is_odd(n): """Returns True if n is odd, and False if n is even. Assumes integer. """ return bool(n & 1) def swap_cols(arr, frm, to): """swap columns of a 2D np.array""" if arr.ndim > 1: arr[:,[frm, to]] = arr[:,[to, frm]] else: arr[frm], arr[to] = arr[to], arr[frm] def swap_rows(arr, frm, to): """swap rows of a 2D np.array""" if arr.ndim > 1: arr[[frm, to],:] = arr[[to, frm],:] else: arr[frm], arr[to] = arr[to], arr[frm] def pairwise(iterable): """returns a zip of all neighboring pairs. This is used as a helper function for is_sorted. Example ------- >>> mylist = [2, 3, 6, 8, 7] >>> list(pairwise(mylist)) [(2, 3), (3, 6), (6, 8), (8, 7)] """ a, b = tee(iterable) next(b, None) return zip(a, b) def argsort(seq): # http://stackoverflow.com/questions/3071415/efficient-method-to-calculate-the-rank-vector-of-a-list-in-python return sorted(range(len(seq)), key=seq.__getitem__) def is_sorted_general(iterable, key=lambda a, b: a <= b): """Check to see if iterable is monotonic increasing (sorted).""" return all(key(a, b) for a, b in pairwise(iterable)) def is_sorted(x, chunk_size=None): """Returns True if iterable is monotonic increasing (sorted). NOTE: intended for 1D array, list or tuple. Will not work on more than 1D This function works in-core with memory footrpint XXX. chunk_size = 100000 is probably a good choice. """ if not isinstance(x, (tuple, list, np.ndarray)): raise TypeError("Unsupported type {}".format(type(x))) x = np.atleast_1d(np.array(x).squeeze()) if x.ndim > 1: raise ValueError("Input x must be 1-dimensional") if chunk_size is None: chunk_size = 500000 stop = x.size for chunk_start in range(0, stop, chunk_size): chunk_stop = int(min(stop, chunk_start + chunk_size + 1)) chunk = x[chunk_start:chunk_stop] if not np.all(chunk[:-1] <= chunk[1:]): return False return True def linear_merge(list1, list2): """Merge two SORTED lists in linear time. UPDATED TO WORK WITH PYTHON 3.7+ (see https://stackoverflow.com/questions/51700960/runtimeerror-generator-raised-stopiteration-every-time-i-try-to-run-app) Returns a generator of the merged result. Examples -------- >>> a = [1, 3, 5, 7] >>> b = [2, 4, 6, 8] >>> [i for i in linear_merge(a, b)] [1, 2, 3, 4, 5, 6, 7, 8] >>> [i for i in linear_merge(b, a)] [1, 2, 3, 4, 5, 6, 7, 8] >>> a = [1, 2, 2, 3] >>> b = [2, 2, 4, 4] >>> [i for i in linear_merge(a, b)] [1, 2, 2, 2, 2, 3, 4, 4] """ # if any of the lists are empty, return the other (possibly also # empty) list: (this is necessary because having either list1 or # list2 be empty makes this quite a bit more complicated...) if isinstance(list1, (list, np.ndarray)): if len(list1) == 0: list2 = iter(list2) while True: try: yield next(list2) except StopIteration: return if isinstance(list2, (list, np.ndarray)): if len(list2) == 0: list1 = iter(list1) while True: try: yield next(list1) except StopIteration: return list1 = iter(list1) list2 = iter(list2) value1 = next(list1) value2 = next(list2) # We'll normally exit this loop from a next() call raising # StopIteration, which is how a generator function exits anyway. while True: if value1 <= value2: # Yield the lower value. try: yield value1 except StopIteration: return try: # Grab the next value from list1. value1 = next(list1) except StopIteration: # list1 is empty. Yield the last value we received from list2, then # yield the rest of list2. try: yield value2 except StopIteration: return while True: try: yield next(list2) except StopIteration: return else: try: yield value2 except StopIteration: return try: value2 = next(list2) except StopIteration: # list2 is empty. try: yield value1 except StopIteration: return while True: try: yield next(list1) except StopIteration: return def get_mua_events(mua, fs=None, minLength=None, maxLength=None, PrimaryThreshold=None, minThresholdLength=None, SecondaryThreshold=None): """Determine MUA/PBEs from multiunit activity. MUA : multiunit activity PBE : population burst event Parameters ---------- mua : AnalogSignalArray AnalogSignalArray with one signal, namely the multiunit firing rate [in Hz]. fs : float, optional Sampling frequency of mua, in Hz. If not specified, it will be inferred from mua.fs minLength : float, optional maxLength : float, optional PrimaryThreshold : float, optional SecondaryThreshold : float, optional minThresholdLength : float, optional Returns ------- mua_epochs : EpochArray EpochArray containing all the MUA events / PBEs. Example ------- mua = get_mua(spiketrain) mua_epochs = get_mua_events(mua) PBEs = get_PBEs(spiketrain, min_active=5) = get_PBEs(get_mua_events(get_mua(*)), spiketrain, min_active=5) """ if fs is None: fs = mua.fs if fs is None: raise ValueError("fs must either be specified, or must be contained in mua!") if PrimaryThreshold is None: PrimaryThreshold = mua.mean() + 3*mua.std() if SecondaryThreshold is None: SecondaryThreshold = mua.mean() if minLength is None: minLength = 0.050 # 50 ms minimum event duration if maxLength is None: maxLength = 0.750 # 750 ms maximum event duration if minThresholdLength is None: minThresholdLength = 0.0 # determine MUA event bounds: mua_bounds_idx, maxes, _ = get_events_boundaries( x = mua.data, PrimaryThreshold = PrimaryThreshold, SecondaryThreshold = SecondaryThreshold, minThresholdLength = minThresholdLength, minLength = minLength, maxLength = maxLength, ds = 1/fs ) if len(mua_bounds_idx) == 0: logging.warning("no mua events detected") return core.EpochArray(empty=True) # store MUA bounds in an EpochArray mua_epochs = core.EpochArray(mua.time[mua_bounds_idx]) return mua_epochs @keyword_deprecation(replace_x_with_y={'bw':'truncate'}) def get_PBEs(data, fs=None, ds=None, sigma=None, truncate=None, unsorted_id=0, min_active=None, minLength=None, maxLength=None, PrimaryThreshold=None, minThresholdLength=None, SecondaryThreshold=None): """Determine PBEs from multiunit activity or spike trains. Definitions ----------- MUA : multiunit activity PBE : population burst event Summary ------- This function can be used to identify PBE epochs from spike trains, binned spike trains, or multiunit activity (in the form of an AnalogSignalArray). It is recommended to either pass in a SpikeTrainArray or a BinnedSpikeTrainArray, so that a `min_active` number of sorted units can be set. It is also recommended that the unsorted units (but not noise artifacts!) should be included in the spike train that is used to estimate the PBEs. By default, unit_id=0 is assumed to be unsorted, but this can be changed, or if no unsorted units are present, you can set unsorted_id=None. Equivalently, if min_active=0, then no restriction will apply, and the unsorted_id will have no effect on the final PBE epochs. Examples -------- PBE_epochs = get_PBEs(mua_asa) PBE_epochs = get_PBEs(spiketrain, min_active=5) PBE_epochs = get_PBEs(binnedspiketrain, min_active=5) Parameters ---------- data : AnalogSignalArray AnalogSignalArray with one signal, namely the multiunit firing rate [in Hz]. -- OR -- data : SpikeTrainArray SpikeTrainArray with multiple units, including unsorted unit(s), but excluding any noise artifects. -- OR -- data : BinnedSpikeTrainArray BinnedSpikeTrainArray containing multiunit activity. fs : float, optional Sampling frequency of mua, in Hz. If not specified, it will be inferred from data. ds : float, optional Time step in which to bin spikes. Default is 1 ms. sigma : float, optional Standard deviation (in seconds) of Gaussian smoothing kernel. Default is 10 ms. If sigma==0 then no smoothing is applied. truncate : float, optional Bandwidth of the Gaussian filter. Default is 6. unsorted_id : int, optional unit_id of the unsorted unit. Default is 0. If no unsorted unit is present, then set unsorted_id = None min_active : int, optional Minimum number of active units per event, excluding unsorted unit. Default is 5. minLength : float, optional Minimum event duration in seconds. Default is 50 ms. maxLength : float, optional Maximum event duration in seconds. Default is 750 ms. PrimaryThreshold : float, optional Primary threshold to exceed. Default is mean() + 3*std() SecondaryThreshold : float, optional Secondary threshold to fall back to. Default is mean(). minThresholdLength : float, optional Minimum duration to stay above PrimaryThreshold. Default is 0 ms. Returns ------- PBE_epochs : EpochArray EpochArray containing all the PBEs. Future improvements ------------------- As of now, it is possible, but not easy to specify the Primary and Secondary thresholds for event detection. A slight change in API might be needed to make this specification more flexible. """ if sigma is None: sigma = 0.01 # 10 ms standard deviation if truncate is None: truncate = 6 if isinstance(data, core.AnalogSignalArray): # if we have only mua, then we cannot set (ds, unsorted_id, min_active) if ds is not None: raise ValueError('if data is an AnalogSignalArray then ds cannot be specified!') if unsorted_id: raise ValueError('if data is an AnalogSignalArray then unsorted_id cannot be specified!') if min_active is not None: raise ValueError('if data is an AnalogSignalArray then min_active cannot be specified!') mua = data mua._data = mua._data.astype(float) if (sigma != 0) and (truncate > 0): mua = gaussian_filter(mua, sigma=sigma, truncate=truncate) elif isinstance(data, (core.EventArray, core.BinnedEventArray)): # set default parameter values: if ds is None: ds = 0.001 # default 1 ms if min_active is None: min_active = 5 mua = get_mua(data, ds=ds, sigma=sigma, truncate=truncate, _fast=True) else: raise TypeError('data has to be one of (AnalogSignalArray, SpikeTrainArray, BinnedSpikeTrainArray)') # set default parameter values: if fs is None: fs = mua.fs if minLength is None: minLength = 0.050 # 50 ms minimum event duration if maxLength is None: maxLength = 0.750 # 750 ms maximum event duration if minThresholdLength is None: minThresholdLength = 0.0 # if PrimaryThreshold is None: # PrimaryThreshold = # if SecondaryThreshold is None: # SecondaryThreshold = PBE_epochs = get_mua_events(mua=mua, fs=fs, minLength=minLength, maxLength=maxLength, PrimaryThreshold=PrimaryThreshold, minThresholdLength=minThresholdLength, SecondaryThreshold=SecondaryThreshold) # now require min_active number of sorted cells if isinstance(data, (core.EventArray, core.BinnedEventArray)): if min_active > 0: if unsorted_id is not None: # remove unsorted unit, if present: unit_ids = copy.deepcopy(data.unit_ids) try: unit_ids.remove(unsorted_id) except ValueError: pass # data_ = data._unit_subset(unit_ids) data_ = data.loc[:,unit_ids] else: data_ = data # determine number of active units per epoch: n_active = np.array([snippet.n_active for snippet in data_[PBE_epochs]]) active_epochs_idx = np.argwhere(n_active > min_active).squeeze() # only keep those epochs where sufficiently many units are active: PBE_epochs = PBE_epochs[active_epochs_idx] return PBE_epochs def get_contiguous_segments(data, *, step=None, assume_sorted=None, in_core=True, index=False, inclusive=False, fs=None, sort=None, in_memory=None): """Compute contiguous segments (seperated by step) in a list. Note! This function requires that a sorted list is passed. It first checks if the list is sorted O(n), and only sorts O(n log(n)) if necessary. But if you know that the list is already sorted, you can pass assume_sorted=True, in which case it will skip the O(n) check. Returns an array of size (n_segments, 2), with each row being of the form ([start, stop]) [inclusive, exclusive]. NOTE: when possible, use assume_sorted=True, and step=1 as explicit arguments to function call. WARNING! Step is robustly computed in-core (i.e., when in_core is True), but is assumed to be 1 when out-of-core. Example ------- >>> data = [1,2,3,4,10,11,12] >>> get_contiguous_segments(data) ([1,5], [10,13]) >>> get_contiguous_segments(data, index=True) ([0,4], [4,7]) Parameters ---------- data : array-like 1D array of sequential data, typically assumed to be integral (sample numbers). step : float, optional Expected step size for neighboring samples. Default uses numpy to find the median, but it is much faster and memory efficient to explicitly pass in step=1. assume_sorted : bool, optional If assume_sorted == True, then data is not inspected or re-ordered. This can be significantly faster, especially for out-of-core computation, but it should only be used when you are confident that the data is indeed sorted, otherwise the results from get_contiguous_segments will not be reliable. in_core : bool, optional If True, then we use np.diff which requires all the data to fit into memory simultaneously, otherwise we use groupby, which uses a generator to process potentially much larger chunks of data, but also much slower. index : bool, optional If True, the indices of segment boundaries will be returned. Otherwise, the segment boundaries will be returned in terms of the data itself. Default is False. inclusive : bool, optional If True, the boundaries are returned as [(inclusive idx, inclusive idx)] Default is False, and can only be used when index==True. Deprecated ---------- in_memory : bool, optional This is equivalent to the new 'in-core'. sort : bool, optional This is equivalent to the new 'assume_sorted' fs : sampling rate (Hz) used to extend half-open interval support by 1/fs """ # handle deprecated API calls: if in_memory: in_core = in_memory logging.warning("'in_memory' has been deprecated; use 'in_core' instead") if sort: assume_sorted = sort logging.warning("'sort' has been deprecated; use 'assume_sorted' instead") if fs: step = 1/fs logging.warning("'fs' has been deprecated; use 'step' instead") if inclusive: assert index, "option 'inclusive' can only be used with 'index=True'" if in_core: data = np.asarray(data) if not assume_sorted: if not is_sorted(data): data = np.sort(data) # algorithm assumes sorted list if step is None: step = np.median(np.diff(data)) # assuming that data(t1) is sampled somewhere on [t, t+1/fs) we have a 'continuous' signal as long as # data(t2 = t1+1/fs) is sampled somewhere on [t+1/fs, t+2/fs). In the most extreme case, it could happen # that t1 = t and t2 = t + 2/fs, i.e. a difference of 2 steps. if np.any(np.diff(data) < step): logging.warning("some steps in the data are smaller than the requested step size.") breaks = np.argwhere(np.diff(data)>=2*step) starts = np.insert(breaks+1, 0, 0) stops = np.append(breaks, len(data)-1) bdries = np.vstack((data[starts], data[stops] + step)).T if index: if inclusive: indices = np.vstack((starts, stops)).T else: indices = np.vstack((starts, stops + 1)).T return indices else: from itertools import groupby from operator import itemgetter if not assume_sorted: if not is_sorted(data): # data = np.sort(data) # algorithm assumes sorted list raise NotImplementedError("out-of-core sorting has not been implemented yet...") if step is None: step = 1 bdries = [] if not index: for k, g in groupby(enumerate(data), lambda ix: (ix[0] - ix[1])): f = itemgetter(1) gen = (f(x) for x in g) start = next(gen) stop = start for stop in gen: pass bdries.append([start, stop + step]) else: counter = 0 for k, g in groupby(enumerate(data), lambda ix: (ix[0] - ix[1])): f = itemgetter(1) gen = (f(x) for x in g) _ = next(gen) start = counter stop = start for _ in gen: stop +=1 if inclusive: bdries.append([start, stop]) else: bdries.append([start, stop + 1]) counter = stop + 1 return np.asarray(bdries) def get_direction(asa, *, sigma=None): """Return epochs during which an animal was running left to right, or right to left. Parameters ---------- asa : AnalogSignalArray 1D AnalogSignalArray containing the 1D position data. sigma : float, optional Smoothing to apply to position (x) before computing gradient estimate. Default is 0. Returns ------- l2r, r2l : EpochArrays EpochArrays corresponding to left-to-right and right-to-left movement. """ if sigma is None: sigma = 0 if not isinstance(asa, core.AnalogSignalArray): raise TypeError('AnalogSignalArray expected!') assert asa.n_signals == 1, "1D AnalogSignalArray expected!" direction = dxdt_AnalogSignalArray(asa.smooth(sigma=sigma), rectify=False).data direction[direction>=0] = 1 direction[direction<0] = -1 direction = direction.squeeze() l2r = get_contiguous_segments(np.argwhere(direction>0).squeeze(), step=1) l2r[:,1] -= 1 # change bounds from [inclusive, exclusive] to [inclusive, inclusive] l2r = core.EpochArray(asa.abscissa_vals[l2r]) r2l = get_contiguous_segments(np.argwhere(direction<0).squeeze(), step=1) r2l[:,1] -= 1 # change bounds from [inclusive, exclusive] to [inclusive, inclusive] r2l = core.EpochArray(asa.abscissa_vals[r2l]) return l2r, r2l class PrettyBytes(int): """Prints number of bytes in a more readable format""" def __init__(self, val): self.val = val def __str__(self): if self.val < 1024: return '{} bytes'.format(self.val) elif self.val < 1024**2: return '{:.3f} kilobytes'.format(self.val/1024) elif self.val < 1024**3: return '{:.3f} megabytes'.format(self.val/1024**2) elif self.val < 1024**4: return '{:.3f} gigabytes'.format(self.val/1024**3) def __repr__(self): return self.__str__() class PrettyInt(int): """Prints integers in a more readable format""" def __init__(self, val): self.val = val def __str__(self): return '{:,}'.format(self.val) def __repr__(self): return '{:,}'.format(self.val) class PrettyDuration(float): """Time duration with pretty print. Behaves like a float, and can always be cast to a float. """ def __init__(self, seconds): self.duration = seconds def __str__(self): return self.time_string(self.duration) def __repr__(self): return self.time_string(self.duration) @staticmethod def to_dhms(seconds): """convert seconds into hh:mm:ss:ms""" pos = seconds >= 0 if not pos: seconds = -seconds ms = seconds % 1; ms = round(ms*10000)/10 seconds = floor(seconds) m, s = divmod(seconds, 60) h, m = divmod(m, 60) d, h = divmod(h, 24) Time = namedtuple('Time', 'pos dd hh mm ss ms') time = Time(pos=pos, dd=d, hh=h, mm=m, ss=s, ms=ms) return time @staticmethod def time_string(seconds): """returns a formatted time string.""" if np.isinf(seconds): return 'inf' pos, dd, hh, mm, ss, s = PrettyDuration.to_dhms(seconds) if s > 0: if mm == 0: # in this case, represent milliseconds in terms of # seconds (i.e. a decimal) sstr = str(s/1000).lstrip('0') if s >= 999.5: ss += 1 s = 0 sstr = "" # now propagate the carry: if ss == 60: mm += 1 ss = 0 if mm == 60: hh +=1 mm = 0 if hh == 24: dd += 1 hh = 0 else: # for all other cases, milliseconds will be represented # as an integer if s >= 999.5: ss += 1 s = 0 sstr = "" # now propagate the carry: if ss == 60: mm += 1 ss = 0 if mm == 60: hh +=1 mm = 0 if hh == 24: dd += 1 hh = 0 else: sstr = ":{:03d}".format(int(s)) else: sstr = "" if dd > 0: daystr = "{:01d} days ".format(dd) else: daystr = "" if hh > 0: timestr = daystr + "{:01d}:{:02d}:{:02d}{} hours".format(hh, mm, ss, sstr) elif mm > 0: timestr = daystr + "{:01d}:{:02d}{} minutes".format(mm, ss, sstr) elif ss > 0: timestr = daystr + "{:01d}{} seconds".format(ss, sstr) else: timestr = daystr +"{} milliseconds".format(s) if not pos: timestr = "-" + timestr return timestr def __add__(self, other): """a + b""" return PrettyDuration(self.duration + other) def __radd__(self, other): """b + a""" return self.__add__(other) def __sub__(self, other): """a - b""" return PrettyDuration(self.duration - other) def __rsub__(self, other): """b - a""" return other - self.duration def __mul__(self, other): """a * b""" return PrettyDuration(self.duration * other) def __rmul__(self, other): """b * a""" return self.__mul__(other) def __truediv__(self, other): """a / b""" return PrettyDuration(self.duration / other) def shrinkMatColsTo(mat, numCols): """ Docstring goes here Shrinks a NxM1 matrix down to an NxM2 matrix, where M2 <= M1""" import scipy.ndimage numCells = mat.shape[0] numColsMat = mat.shape[1] a = np.zeros((numCells, numCols)) for row in np.arange(numCells): niurou = scipy.ndimage.interpolation.zoom(input=mat[row,:], zoom=(numCols/numColsMat), order = 1) a[row,:] = niurou return a def find_threshold_crossing_events(x, threshold, *, mode='above'): """Find threshold crossing events. INCLUSIVE Parameters ---------- x : numpy array Input data threshold : float The value whose crossing triggers an event mode : string, optional in ['above', 'below']; default 'above' event triggering above, or below threshold Returns ------- eventlist : list List containing the indices corresponding to threshold crossings eventmax : list List containing the maximum value of each event """ from itertools import groupby from operator import itemgetter if mode == 'below': cross_threshold = np.where(x <= threshold, 1, 0) elif mode == 'above': cross_threshold = np.where(x >= threshold, 1, 0) else: raise NotImplementedError( "mode {} not understood for find_threshold_crossing_events".format(str(mode))) eventlist = [] eventmax = [] for k,v in groupby(enumerate(cross_threshold),key=itemgetter(1)): if k: v = list(v) eventlist.append([v[0][0],v[-1][0]]) try : eventmax.append(x[v[0][0]:(v[-1][0]+1)].max()) except : print(v, x[v[0][0]:v[-1][0]]) eventmax = np.asarray(eventmax) eventlist = np.asarray(eventlist) return eventlist, eventmax def get_events_boundaries(x, *, PrimaryThreshold=None, SecondaryThreshold=None, minThresholdLength=None, minLength=None, maxLength=None, ds=None, mode='above'): """get event boundaries such that event.max >= PrimaryThreshold and the event extent is defined by SecondaryThreshold. Note that when PrimaryThreshold==SecondaryThreshold, then this is a simple threshold crossing algorithm. NB. minLength and maxLength are applied to the SecondaryThreshold events, whereas minThresholdLength is applied to the PrimaryThreshold events. Parameters ---------- x : numpy array Input data mode : string, optional in ['above', 'below']; default 'above' event triggering above, or below threshold PrimaryThreshold : float, optional If mode=='above', requires that event.max >= PrimaryThreshold If mode=='below', requires that event.min <= PrimaryThreshold SecondaryThreshold : float, optional The value that defines the event extent minThresholdLength : float, optional Minimum duration for which the PrimaryThreshold is crossed minLength : float, optional Minimum duration for which the SecondaryThreshold is crossed maxLength : float, optional Maximum duration for which the SecondaryThreshold is crossed ds : float, optional Time step of the input data x Returns ------- returns bounds, maxes, events where bounds <==> SecondaryThreshold to SecondaryThreshold, inclusive maxes <==> maximum value during each event events <==> PrimaryThreshold to PrimaryThreshold, inclusive """ # TODO: x must be a numpy array # TODO: ds is often used, but we have no default, and no check for when # it is left as None. # TODO: the Docstring should equally be improved. x = x.squeeze() if x.ndim > 1: raise TypeError("multidimensional arrays not supported!") if PrimaryThreshold is None: # by default, threshold is 3 SDs above mean of x PrimaryThreshold = np.mean(x) + 3*np.std(x) if SecondaryThreshold is None: # by default, revert back to mean of x SecondaryThreshold = np.mean(x) # + 0*np.std(x) events, _ = \ find_threshold_crossing_events(x=x, threshold=PrimaryThreshold, mode=mode) # apply minThresholdLength criterion: if minThresholdLength is not None and len(events) > 0: durations = (events[:,1] - events[:,0] + 1) * ds events = events[[durations >= minThresholdLength]] if len(events) == 0: bounds, maxes, events = [], [], [] logging.warning("no events satisfied criteria") return bounds, maxes, events # Find periods where value is > SecondaryThreshold; note that the previous periods should be within these! if mode == 'above': assert SecondaryThreshold <= PrimaryThreshold, \ "Secondary Threshold by definition should include more data than Primary Threshold" elif mode == 'below': assert SecondaryThreshold >= PrimaryThreshold, \ "Secondary Threshold by definition should include more data than Primary Threshold" else: raise NotImplementedError( "mode {} not understood for find_threshold_crossing_events".format(str(mode))) bounds, broader_maxes = \ find_threshold_crossing_events(x=x, threshold=SecondaryThreshold, mode=mode) # Find corresponding big windows for potential events # Specifically, look for closest left edge that is just smaller outer_boundary_indices = np.searchsorted(bounds[:,0], events[:,0], side='right') # searchsorted finds the index after, so subtract one to get index before outer_boundary_indices = outer_boundary_indices - 1 # Find extended boundaries for events by pairing to larger windows # (Note that there may be repeats if the larger window contains multiple > 3SD sections) bounds = bounds[outer_boundary_indices,:] maxes = broader_maxes[outer_boundary_indices] if minLength is not None and len(events) > 0: durations = (bounds[:,1] - bounds[:,0] + 1) * ds # TODO: refactor [durations <= maxLength] but be careful about edge cases bounds = bounds[[durations >= minLength]] maxes = maxes[[durations >= minLength]] events = events[[durations >= minLength]] if maxLength is not None and len(events) > 0: durations = (bounds[:,1] - bounds[:,0] + 1) * ds # TODO: refactor [durations <= maxLength] but be careful about edge cases bounds = bounds[[durations <= maxLength]] maxes = maxes[[durations <= maxLength]] events = events[[durations <= maxLength]] if len(events) == 0: bounds, maxes, events = [], [], [] logging.warning("no events satisfied criteria") return bounds, maxes, events # Now, since all that we care about are the larger windows, so we should get rid of repeats _, unique_idx = np.unique(bounds[:,0], return_index=True) bounds = bounds[unique_idx,:] # SecondaryThreshold to SecondaryThreshold maxes = maxes[unique_idx] # maximum value during event events = events[unique_idx,:] # PrimaryThreshold to PrimaryThreshold return bounds, maxes, events def signal_envelope1D(data, *, sigma=None, fs=None): logging.warnings("'signal_envelope1D' is deprecated; use 'signal_envelope_1d' instead!") return signal_envelope_1d(data, sigma=sigma, fs=fs) def signal_envelope_1d(data, *, sigma=None, fs=None): """Finds the signal envelope by taking the absolute value of the Hilbert transform Parameters ---------- data : numpy array, list, or RegularlySampledAnalogSignalArray Input data If data is a numpy array, it is expected to have shape (n_signals, n_samples) If data is a list, it is expected to have length n_signals, where each sublist has length n_samples, i.e. data is not jagged sigma : float, optional Standard deviation of the Gaussian kernel used to smooth the envelope after applying the Hilbert transform. Units of seconds. Default is 4 ms fs : float, optional Sampling rate of the signal Returns ------- out : same type as the input object An object containing the signal envelope TODO: this is not yet epoch-aware! UPDATE: this is actually epoch-aware by now! """ if sigma is None: sigma = 0.004 # 4 ms standard deviation if fs is None: if isinstance(data, (np.ndarray, list)): raise ValueError("sampling frequency must be specified!") elif isinstance(data, core.RegularlySampledAnalogSignalArray): fs = data.fs if isinstance(data, (np.ndarray, list)): data_array = np.array(data) n_dims = np.array(data).ndim assert n_dims <= 2, "Only 1D signals supported!" if n_dims == 1: input_data = data_array.reshape((1, data_array.size)) else: input_data = data_array n_signals, n_samples = input_data.shape # Compute number of samples to compute fast FFTs padlen = nextfastpower(n_samples) - n_samples # Pad data paddeddata = np.hstack( (input_data, np.zeros((n_signals, padlen))) ) # Use hilbert transform to get an envelope envelope = np.absolute(hilbert(paddeddata, axis=-1)) # free up memory del paddeddata # Truncate results back to original length envelope = envelope[..., :n_samples] if sigma: # Smooth envelope with a gaussian (sigma = 4 ms default) EnvelopeSmoothingSD = sigma*fs smoothed_envelope = scipy.ndimage.filters.gaussian_filter1d(envelope, EnvelopeSmoothingSD, mode='constant', axis=-1) envelope = smoothed_envelope if isinstance(data, list): envelope = envelope.tolist() return envelope elif isinstance(data, core.RegularlySampledAnalogSignalArray): # Only ASA data of shape (n_signals, n_timepoints) -> 2D currently supported assert data.data.ndim == 2 cum_lengths = np.insert(np.cumsum(data.lengths), 0, 0) newasa = data.copy() # for segment in data: for idx in range(data.n_epochs): # print('hilberting epoch {}/{}'.format(idx+1, data.n_epochs)) segment_data = data._data[:,cum_lengths[idx]:cum_lengths[idx+1]] n_signals, n_samples = segment_data.shape # Compute number of samples to compute fast FFTs: padlen = nextfastpower(n_samples) - n_samples # Pad data paddeddata = np.hstack( (segment_data, np.zeros((n_signals, padlen))) ) # Use hilbert transform to get an envelope envelope = np.absolute(hilbert(paddeddata, axis=-1)) # free up memory del paddeddata # Truncate results back to original length envelope = envelope[..., :n_samples] if sigma: # Smooth envelope with a gaussian (sigma = 4 ms default) EnvelopeSmoothingSD = sigma*fs smoothed_envelope = scipy.ndimage.filters.gaussian_filter1d(envelope, EnvelopeSmoothingSD, mode='constant', axis=-1) envelope = smoothed_envelope newasa._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = np.atleast_2d(envelope) return newasa def nextpower(n, base=2.0): """Return the next integral power of two greater than the given number. Specifically, return m such that m >= n m == 2**x where x is an integer. Use base argument to specify a base other than 2. This is useful for ensuring fast FFT sizes. From https://gist.github.com/bhawkins/4479607 (Brian Hawkins) """ x = base**ceil (log (n) / log (base)) if type(n) == np.ndarray: return np.asarray (x, dtype=int) else: return int (x) def nextfastpower(n): """Return the next integral power of small factors greater than the given number. Specifically, return m such that m >= n m == 2**x * 3**y * 5**z where x, y, and z are integers. This is useful for ensuring fast FFT sizes. From https://gist.github.com/bhawkins/4479607 (Brian Hawkins) See also http://scipy.github.io/devdocs/generated/scipy.fftpack.next_fast_len.html """ if n < 7: return max (n, 1) # x, y, and z are all bounded from above by the formula of nextpower. # Compute all possible combinations for powers of 3 and 5. # (Not too many for reasonable FFT sizes.) def power_series (x, base): nmax = ceil (log (x) / log (base)) return np.logspace (0.0, nmax, num=nmax+1, base=base) n35 = np.outer (power_series (n, 3.0), power_series (n, 5.0)) n35 = n35[n35<=n] # Lump the powers of 3 and 5 together and solve for the powers of 2. n2 = nextpower (n / n35) return int (min (n2 * n35)) @keyword_deprecation(replace_x_with_y={'bw':'truncate'}) def gaussian_filter(obj, *, fs=None, sigma=None, truncate=None, inplace=False, mode=None, cval=None, within_intervals=False): """Smooths with a Gaussian kernel. Smoothing is applied along the abscissa, and the same smoothing is applied to each signal in the RegularlySampledAnalogSignalArray, or to each unit in a BinnedSpikeTrainArray. Smoothing is applied ACROSS intervals, but smoothing WITHIN intervals is also supported. Parameters ---------- obj : RegularlySampledAnalogSignalArray or BinnedSpikeTrainArray. fs : float, optional Sampling rate (in obj.base_unit^-1) of obj. If not provided, it will be inferred. sigma : float, optional Standard deviation of Gaussian kernel, in obj.base_units. Default is 0.05 (50 ms if base_unit=seconds). truncate : float, optional Bandwidth outside of which the filter value will be zero. Default is 4.0. inplace : bool If True the data will be replaced with the smoothed data. Default is False. mode : {‘reflect’, ‘constant’, ‘nearest’, ‘mirror’, ‘wrap’}, optional The mode parameter determines how the array borders are handled, where cval is the value when mode is equal to ‘constant’. Default is ‘reflect’. cval : scalar, optional Value to fill past edges of input if mode is ‘constant’. Default is 0.0. within_intervals : boolean, optional If True, then smooth within each epoch. Otherwise smooth across epochs. Default is False. Note that when mode = 'wrap', then smoothing within epochs aren't affected by wrapping. Returns ------- out : same type as obj An object with smoothed data is returned. """ if sigma is None: sigma = 0.05 if truncate is None: truncate = 4 if mode is None: mode = 'reflect' if cval is None: cval = 0.0 if not inplace: out = copy.deepcopy(obj) else: out = obj if isinstance(out, core.RegularlySampledAnalogSignalArray): if fs is None: fs = out.fs if fs is None: raise ValueError("fs must either be specified, or must be contained in the {}!".format(out.type_name)) elif isinstance(out, core.BinnedEventArray): bst = out if fs is None: fs = 1/bst.ds if fs is None: raise ValueError("fs must either be specified, or must be contained in the {}!".format(out.type_name)) else: raise NotImplementedError("gaussian_filter for {} is not yet supported!".format(str(type(out)))) sigma = sigma * fs if not within_intervals: # see https://stackoverflow.com/questions/18697532/gaussian-filtering-a-image-with-nan-in-python # (1) if smoothing across intervals, we work on a merged support # (2) build abscissa_vals, including existing ones, and out-of-support ones # (3) to smooth U, build auxiliary arrays V and W, with (V=U).nan=0, and (W=1).nan=0 # (4) Z = smooth(V)/smooth(W) # (5) only keep original support, and original abscissa_vals if isinstance(out, (core.RegularlySampledAnalogSignalArray, core.BinnedEventArray)): support = out._abscissa.support.merge() if not support.domain.is_finite: support.domain = (support.start, support.stop) #TODO: #FIXME might come from abscissa definition, and not from support missing_abscissa_vals = [] for interval in (~support): missing_vals = frange(interval.start, interval.stop, 1/fs) missing_abscissa_vals.extend(missing_vals) if isinstance(out, core.RegularlySampledAnalogSignalArray): n_signals = out.n_signals n_samples = out.n_samples elif isinstance(out, core.BinnedEventArray): n_signals = out.n_series n_samples = out.n_bins V = np.zeros((n_signals, n_samples + len(missing_abscissa_vals))) W = np.ones(V.shape) all_abscissa_vals = np.sort(np.append(out._abscissa_vals, missing_abscissa_vals)) data_idx = np.searchsorted(all_abscissa_vals, out._abscissa_vals) missing_idx = np.searchsorted(all_abscissa_vals, missing_abscissa_vals) V[:, data_idx] = out.data W[:, missing_idx] = 0 VV = scipy.ndimage.filters.gaussian_filter(V, sigma=(0,sigma), truncate=truncate, mode=mode, cval=cval) WW = scipy.ndimage.filters.gaussian_filter(W, sigma=(0,sigma), truncate=truncate, mode=mode, cval=cval) Z = VV[:,data_idx]/WW[:,data_idx] out._data = Z else: raise NotImplementedError("gaussian_filter across intervals for {} is not yet supported!".format(str(type(out)))) else: # within intervals: cum_lengths = np.insert(np.cumsum(out.lengths), 0, 0) out._data = out._data.astype(float) if isinstance(out, core.RegularlySampledAnalogSignalArray): # now smooth each interval separately for idx in range(out.n_intervals): out._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = scipy.ndimage.filters.gaussian_filter(out._data[:,cum_lengths[idx]:cum_lengths[idx+1]], sigma=(0,sigma), truncate=truncate) elif isinstance(out, core.BinnedSpikeTrainArray): # now smooth each interval separately for idx in range(out.n_epochs): out._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = scipy.ndimage.filters.gaussian_filter(out._data[:,cum_lengths[idx]:cum_lengths[idx+1]], sigma=(0,sigma), truncate=truncate) # out._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = self._smooth_array(out._data[:,cum_lengths[idx]:cum_lengths[idx+1]], w=w) return out @keyword_deprecation(replace_x_with_y={'bw':'truncate'}) def ddt_asa(asa, *, fs=None, smooth=False, rectify=True, sigma=None, truncate=None, norm=False): """Numerical differentiation of a regularly sampled AnalogSignalArray. Optionally also smooths result with a Gaussian kernel. Smoothing is applied in time, and the same smoothing is applied to each signal in the AnalogSignalArray. Differentiation, (and if requested, smoothing) is applied within each epoch. Parameters ---------- asa : nelpy.RegularlySampledAnalogSignalArray Input object. fs : float, optional Sampling rate (in Hz) of input RSASA. If not provided, it will be obtained from asa.fs. smooth : bool, optional If true, result will be smoothed. Default is False rectify : bool, optional If True, absolute value of derivative is computed. Default is True. sigma : float, optional Standard deviation of Gaussian kernel, in seconds. Default is 0.05 (50 ms). truncate : float, optional Bandwidth outside of which the filter value will be zero. Default is 4.0 norm: boolean, optional If True, then apply the L2 norm to the result. Returns ------- out : nelpy.RegularlySampledAnalogSignalArray A RegularlySampledAnalogSignalArray with derivative data (in units per second) is returned. Notes ----- Central differences are used here. """ if not isinstance(asa, core.RegularlySampledAnalogSignalArray): raise TypeError("Input object must be a RegularlySampledAnalogSignalArray!") if fs is None: fs = asa.fs if sigma is None: sigma = 0.05 # 50 ms default out = asa.copy() cum_lengths = np.insert(np.cumsum(asa.lengths), 0, 0) # ensure that datatype is float # TODO: this will break complex data out._data = out.data.astype(float) # now obtain the derivative for each epoch separately for idx in range(asa.n_epochs): # if 1D: if asa.n_signals == 1: if (cum_lengths[idx+1]-cum_lengths[idx]) < 2: # only single sample out._data[[0],cum_lengths[idx]:cum_lengths[idx+1]] = 0 else: out._data[[0],cum_lengths[idx]:cum_lengths[idx+1]] = np.gradient(asa._data[[0],cum_lengths[idx]:cum_lengths[idx+1]], axis=1) else: if (cum_lengths[idx+1]-cum_lengths[idx]) < 2: # only single sample out._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = 0 else: out._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = np.gradient(asa._data[:,cum_lengths[idx]:cum_lengths[idx+1]], axis=1) out._data = out._data * fs if norm: out._data = np.atleast_2d(np.linalg.norm(out._data, axis=0)) if rectify: out._data = np.abs(out._data) if smooth: out = gaussian_filter(out, fs=fs, sigma=sigma, truncate=truncate) return out @keyword_deprecation(replace_x_with_y={'bw':'truncate'}) def dxdt_AnalogSignalArray(asa, *, fs=None, smooth=False, rectify=True, sigma=None, truncate=None): """Numerical differentiation of a regularly sampled AnalogSignalArray. Optionally also smooths result with a Gaussian kernel. Smoothing is applied in time, and the same smoothing is applied to each signal in the AnalogSignalArray. Differentiation, (and if requested, smoothing) is applied within each epoch. Parameters ---------- asa : AnalogSignalArray fs : float, optional Sampling rate (in Hz) of AnalogSignalArray. If not provided, it will be obtained from asa.fs smooth : bool, optional If true, result will be smoothed. Default is False rectify : bool, optional If True, absolute value of derivative is computed. Default is True. sigma : float, optional Standard deviation of Gaussian kernel, in seconds. Default is 0.05 (50 ms). truncate : float, optional Bandwidth outside of which the filter value will be zero. Default is 4.0 Returns ------- out : AnalogSignalArray An AnalogSignalArray with derivative data (in units per second) is returned. """ raise DeprecationWarning('use ddt_asa instead!') if fs is None: fs = asa.fs if fs is None: raise ValueError("fs must either be specified, or must be contained in the AnalogSignalArray!") if sigma is None: sigma = 0.05 # 50 ms default out = copy.deepcopy(asa) cum_lengths = np.insert(np.cumsum(asa.lengths), 0, 0) # ensure that datatype is float out._data = out.data.astype(float) if asa.n_signals == 2: out._data = out._data[[0],:] # now obtain the derivative for each epoch separately for idx in range(asa.n_epochs): # if 1D: if asa.n_signals == 1: if (cum_lengths[idx+1]-cum_lengths[idx]) < 2: # only single sample out._data[[0],cum_lengths[idx]:cum_lengths[idx+1]] = 0 else: out._data[[0],cum_lengths[idx]:cum_lengths[idx+1]] = np.gradient(asa._data[[0],cum_lengths[idx]:cum_lengths[idx+1]], axis=1) elif asa.n_signals == 2: if (cum_lengths[idx+1]-cum_lengths[idx]) < 2: # only single sample out._data[[0],cum_lengths[idx]:cum_lengths[idx+1]] = 0 else: out._data[[0],cum_lengths[idx]:cum_lengths[idx+1]] = np.linalg.norm(np.gradient(asa._data[:,cum_lengths[idx]:cum_lengths[idx+1]], axis=1), axis=0) else: raise TypeError("more than 2D not currently supported!") out._data = out._data * fs if rectify: out._data = np.abs(out._data) if smooth: out = gaussian_filter(out, fs=fs, sigma=sigma, truncate=truncate) return out def get_threshold_crossing_epochs(asa, t1=None, t2=None, mode='above'): """Return epochs where a signal crosses a compound threshold specified by t1 and t2. Parameters ---------- asa : AnalogSignalArray AnalogSignalArray containing a single channel t1 : float, optional Primary threshold. Minimum signal value that has to be reached / exceeded during an event. Default is 3 standard deviations above signal mean. t2 : float, optional Secondary threshold. Signal value that defines the event boundaries. Default is signal mean. mode : string, optional Mode of operation. One of ['above', 'below']. If 'above', then return epochs where the signal exceeds the compound threshold, and if 'below', then return epochs where the signal falls below the compound threshold. Default is 'above'. Returns ------- epochs : EpochArray EpochArray with all the epochs where the signal satisfied the criteria. """ if asa.n_signals > 1: raise TypeError("multidimensional AnalogSignalArrays not supported!") x = asa.data.squeeze() if t1 is None: # by default, threshold is 3 SDs above mean of x t1 = np.mean(x) + 3*np.std(x) if t2 is None: # by default, revert back to mean of x t2 = np.mean(x) # compute periods where signal exceeds compound threshold epoch_bounds, _, _ = get_events_boundaries( x=x, PrimaryThreshold=t1, SecondaryThreshold=t2, mode=mode ) # convert bounds to time in seconds epoch_bounds = asa.time[epoch_bounds] if len(epoch_bounds) == 0: return type(asa._abscissa.support)(empty=True) # add 1/fs to stops for open interval epoch_bounds[:,1] += 1/asa.fs # create EpochArray with threshould exceeding bounds epochs = type(asa._abscissa.support)(epoch_bounds) return epochs def get_run_epochs(speed, v1=10, v2=8): """Return epochs where animal is running at least as fast as specified by v1 and v2. Parameters ---------- speed : AnalogSignalArray AnalogSignalArray containing single channel speed, in units/sec v1 : float, optional Minimum speed (in same units as speed) that has to be reached / exceeded during an event. Default is 10 [units/sec] v2 : float, optional Speed that defines the event boundaries. Default is 8 [units/sec] Returns ------- run_epochs : EpochArray EpochArray with all the epochs where speed satisfied the criteria. """ run_epochs = get_threshold_crossing_epochs(asa=speed, t1=v1, t2=v2, mode='above') return run_epochs def get_inactive_epochs(speed, v1=5, v2=7): """Return epochs where animal is running no faster than specified by v1 and v2. Parameters ---------- speed : AnalogSignalArray AnalogSignalArray containing single channel speed, in units/sec v1 : float, optional Minimum speed (in same units as speed) that has to be reached / exceeded during an event. Default is 10 [units/sec] v2 : float, optional Speed that defines the event boundaries. Default is 8 [units/sec] Returns ------- inactive_epochs : EpochArray EpochArray with all the epochs where speed satisfied the criteria. """ inactive_epochs = get_threshold_crossing_epochs(asa=speed, t1=v1, t2=v2, mode='below') return inactive_epochs def spiketrain_union(st1, st2): """Join two spiketrains together. WARNING! This function should be improved a lot! """ assert st1.n_units == st2.n_units support = st1.support.join(st2.support) newdata = [] for unit in range(st1.n_units): newdata.append(np.append(st1.time[unit], st2.time[unit])) fs = None if st1.fs == st2.fs: fs = st1.fs return core.SpikeTrainArray(newdata, support=support, fs=fs) ######################################################################## # uncurated below this line! ######################################################################## def find_nearest_idx(array, val): """Finds nearest index in array to value. Parameters ---------- array : np.array val : float Returns ------- Index into array that is closest to val TODO: this is a better version that should be incorporated: # Based on answer here: http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array def find_nearest(array,values): right_idxs = np.searchsorted(array, values, side="left") left_idxs = np.where(right_idxs > 0, right_idxs-1, right_idxs) right_idxs = np.where(right_idxs == len(array), len(array)-1, right_idxs) closest_idx = np.where(np.abs(values - array[right_idxs]) < np.abs(values - array[left_idxs]), right_idxs, left_idxs) return closest_idx """ return (np.abs(array-val)).argmin() def find_nearest_indices(array, vals): """Finds nearest index in array to value. Parameters ---------- array : np.array This is the array you wish to index into. vals : np.array This is the array that you are getting your indices from. Returns ------- Indices into array that is closest to vals. Notes ----- Wrapper around find_nearest_idx(). """ return np.array([find_nearest_idx(array, val) for val in vals], dtype=int) def get_sort_idx(tuning_curves): """Finds indices to sort neurons by max firing in tuning curve. Parameters ---------- tuning_curves : list of lists Where each inner list is the tuning curves for an individual neuron. Returns ------- sorted_idx : list List of integers that correspond to the neuron in sorted order. """ tc_max_loc = [] for i, neuron_tc in enumerate(tuning_curves): tc_max_loc.append((i, np.where(neuron_tc == np.max(neuron_tc))[0][0])) sorted_by_tc = sorted(tc_max_loc, key=lambda x: x[1]) sorted_idx = [] for idx in sorted_by_tc: sorted_idx.append(idx[0]) return sorted_idx def collapse_time(obj, gap=0): """Collapse all epochs in a SpikeTrainArray and collapse them into a single, contiguous SpikeTrainArray""" # TODO: redo SpikeTrainArray so as to keep the epochs separate!, and to support gaps! # We'll have to ajust all the spikes per epoch... and we'll have to compute a new support. Also set a flag! # If it's a SpikeTrainArray, then we left-shift the spike times. If it's an AnalogSignalArray, then we # left-shift the time and tdata. # Also set a new attribute, with the boundaries in seconds. if isinstance(obj, core.RegularlySampledAnalogSignalArray): new_obj = type(obj)(empty=True) new_obj._data = obj._data durations = obj.support.durations starts = np.insert(np.cumsum(durations + gap),0,0)[:-1] stops = starts + durations newsupport = type(obj._abscissa.support)(np.vstack((starts, stops)).T) new_obj._support = newsupport new_time = obj.time.astype(float) # fast copy time_idx = np.insert(np.cumsum(obj.lengths),0,0) new_offset = 0 for epidx in range(obj.n_epochs): if epidx > 0: new_time[time_idx[epidx]:time_idx[epidx+1]] = new_time[time_idx[epidx]:time_idx[epidx+1]] - obj.time[time_idx[epidx]] + new_offset + gap new_offset += durations[epidx] + gap else: new_time[time_idx[epidx]:time_idx[epidx+1]] = new_time[time_idx[epidx]:time_idx[epidx+1]] - obj.time[time_idx[epidx]] + new_offset new_offset += durations[epidx] new_obj._time = new_time new_obj._fs = obj._fs elif isinstance(obj, core.EventArray): if gap > 0: raise ValueError("gaps not supported for SpikeTrainArrays yet!") new_obj = type(obj)(empty=True) new_time = [[] for _ in range(obj.n_series)] duration = 0 for st_ in obj: le = st_.support.start for unit_ in range(obj.n_series): new_time[unit_].extend(st_._data[unit_] - le + duration) duration += st_.support.duration new_time = np.asanyarray([np.asanyarray(unittime) for unittime in new_time]) new_obj._data = new_time new_obj.support = type(obj._abscissa.support)([0, duration]) new_obj._series_ids = obj._series_ids new_obj._series_labels = obj._series_labels new_obj._series_tags = obj._series_tags elif isinstance(obj, core.BinnedEventArray): raise NotImplementedError("BinnedEventArrays are not yet supported, but bst.data is essentially already collapsed!") else: raise TypeError("unsupported type for collapse_time") return new_obj def cartesian(xcenters, ycenters): """Finds every combination of elements in two arrays. Parameters ---------- xcenters : np.array ycenters : np.array Returns ------- cartesian : np.array With shape(n_sample, 2). """ return np.transpose([np.tile(xcenters, len(ycenters)), np.repeat(ycenters, len(xcenters))])
{"hexsha": "37f19f659d9ef143b2408f934266bdcc951f5ade", "size": 73603, "ext": "py", "lang": "Python", "max_stars_repo_path": "nelpy/utils.py", "max_stars_repo_name": "IsaacBusaleh/nelpy", "max_stars_repo_head_hexsha": "f2663cf6f028c9bd0e630fbf8a527c236f4e0f41", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-01T17:59:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-01T17:59:31.000Z", "max_issues_repo_path": "nelpy/utils.py", "max_issues_repo_name": "IsaacBusaleh/nelpy", "max_issues_repo_head_hexsha": "f2663cf6f028c9bd0e630fbf8a527c236f4e0f41", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nelpy/utils.py", "max_forks_repo_name": "IsaacBusaleh/nelpy", "max_forks_repo_head_hexsha": "f2663cf6f028c9bd0e630fbf8a527c236f4e0f41", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1357214934, "max_line_length": 190, "alphanum_fraction": 0.6157085988, "include": true, "reason": "import numpy,from numpy,import scipy,from scipy", "num_tokens": 17764}
\section{Experiment} While the modified Count-min sketch algorithm provides an error bound for the estimated movie averages, what we are really interested in is how much it affects the ordering of the movies when sorted by these estimated averages compared to the real averages. In this section, we evaluate the modified Count-min sketch algorithm by running it on real-life data, and comparing the order it produces with the correct order. We quantify the error in ordering by the Kendall tau distance. For a permutation of movies $\pi$, we say that $x <_\pi y$ if $x$ comes before $y$ in $\pi$. The Kendall tau distance between two orderings of our movie set is then $\mathrm{KT} \left(\pi_1,\pi_2\right) = \left|\{x,y\}: x <_{\pi_1} y \wedge y <_{\pi_2} x\right|$. The Kendall distance is often normalized to the maximum number of inversions between the orderings, $n(n-1)/2$ for $n$ elements. We expect random orderings to have a normalized Kendall tau distance of $\frac{1}{2}$, so only results with a lower distance can be considered useful. \subsection{Results} Our data is sampled from the \textit{Netflix Prize}\footnotemark \ data set. The full data set contains more than 100 million user ratings, for more than 17 thousand movies. We test the algorithm on three samples of the \texttt{Netflix} data: \footnotetext{http://academictorrents.com/details/9b13183dc4d60676b773c9e2cd6de5e5542cee9a} \begin{itemize} \item \textit{100M ratings} contains all ratings for the $13726$ most frequently rated movies. Roughly 100 million ratings total, \item \textit{50Mmin ratings} contains all ratings for the $17146$ least frequently rated movies. Roughly 50 million ratings total, \item \textit{50M ratings} contains all ratings for the $611$ most frequently rated movies. Roughly 50 million ratings total, \item \textit{min10K ratings} contains all ratings for the $2042$ movies that have more than $10.000$ ratings. \end{itemize} \pgfplotsset{scaled x ticks=false} \begin{center} \begin{tikzpicture} \begin{axis}[ title=Count-min order error, xlabel={Error bound $\varepsilon$}, ylabel={KT. norm.}, legend pos=south east, xticklabel style={ /pgf/number format/.cd, fixed, fixed zerofill, precision=3, /tikz/.cd }, ] \addplot table [y=100M,x=E]{allresults}; \addlegendentry{\textit{100M ratings}} \addplot table [y=50M,x=E]{allresults}; \addlegendentry{\textit{50M ratings}} \addplot table [y=min10K,x=E]{allresults}; \addlegendentry{\textit{min10K ratings}} \addplot table [y=50Mmin,x=E]{allresults}; \addlegendentry{\textit{50Mmin ratings}} \end{axis} \end{tikzpicture} \end{center} All trials are run with $\delta = 0.01$. As expected, the algorithm performs much worse on the large, $100M raitings$ dataset than on the smaller data sets. For $\varepsilon = 0.001$, the normalized Kendall-Tau distance is $0.33$, and it rapidly approaches $0.5$. The obvious explanation is the error's dependency on the length of the stream, but there is another factor at play. Because of the way the Count-Min algorithm works --- adding up the ratings when the hash functions have collisions --- there is a tendency that movies with fewer ratings will cause inversions more often than movies with many ratings. Futhermore, we see that that the algorithm performs reasonable well in the two other data sets, containing only movies with many ratings. We see however, that in order to save memory compared to the simple approaches described in section \ref{sec:sorting}, we have to accept a normalized Kendall tau distance of at least $0.1$, or choose a higher value for $\delta$ \subsection{Implementation} The implementation can be found in the appendices. We Note that the implementation does not live up to the performance bounds stated in section \ref{sec:sketching}. This is not a problem, since we are assessing the correctness of the algorithm, not it's throughput. The implementation differs in two ways: To emulate querying all data points, a list of all observed movies is kept, requiring $O(|U|)$ extra memory. Furthermore the ratings are sorted when queried, rather than maintaining the ordering dynamically. Again, this does not effect the experiment.
{"hexsha": "19bc34169638c127f728387081bc32b56305a1f2", "size": 4193, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "experiment.tex", "max_stars_repo_name": "Bladtman242/SAD2_project", "max_stars_repo_head_hexsha": "6bc06598b2dd676a0c6ef6860c6e4b69ccff3e85", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "experiment.tex", "max_issues_repo_name": "Bladtman242/SAD2_project", "max_issues_repo_head_hexsha": "6bc06598b2dd676a0c6ef6860c6e4b69ccff3e85", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiment.tex", "max_forks_repo_name": "Bladtman242/SAD2_project", "max_forks_repo_head_hexsha": "6bc06598b2dd676a0c6ef6860c6e4b69ccff3e85", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.1368421053, "max_line_length": 91, "alphanum_fraction": 0.7710469831, "num_tokens": 1124}
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ This script includes the remote computations for brainage prediction using decentralized SVR with FNC as features """ import json import sys import numpy as np from core import common_functions as cf OUTPUT_KEY_LIST = ['w_local', 'intercept_local', 'n_train_samples_local', 'n_test_samples_local', 'rmse_train_local', 'rmse_test_local', 'mae_train_local', 'mae_test_local'] """ ============================================================================ The below function does the following tasks 1. aggregate input parameters from local sites and send it back to local_1 ---------------------------------------------------------------------------- This function takes in the following inputs in args['input']: ---------------------------------------------------------------------------- - wlocal : weight coefficients - intercept_local: intercept of local site - n_train_samples_local: number of training samples - n_test_samples_local: number of testing samples - rmse_train_local: root mean square error - rmse_test_local: root mean square error of test data - mae_train_local: mean absolute error - mae_test_local: mean absolute error of test data - computation_phase : local_0 ---------------------------------------------------------------------------- And gives the following output: ---------------------------------------------------------------------------- - output : wlocals : aggregate weight coefficients computation_phase : remote_0 - cache: intercept_locals: aggregate intercept of local sites rmse_train_locals: aggregate root mean square error rmse_test_locals: aggregate root mean square error of test data mae_train_locals: mean absolute error of training data mae_test_locals: mean absolute error of test data n_train_samples_locals: number of training samples n_test_samples_locals: number of test samples ============================================================================ """ def aggregate_locals(input_list, key_list): aggegated_dict = {} for key_name in key_list: aggegated_dict[key_name] = np.array( [ site_dict[key_name] for site, site_dict in input_list.items() if key_name in site_dict ] ) return aggegated_dict def remote_0(args): input_list = args["input"] aggegated_dict = aggregate_locals(input_list, OUTPUT_KEY_LIST) # dicts output_dict = { "w_locals": aggegated_dict["w_local"].T.tolist(), "phase": "remote_0" } cache_dict = output_dict.copy() cache_dict["intercept_locals"] = aggegated_dict["intercept_local"].tolist() cache_dict["rmse_train_locals"] = aggegated_dict["rmse_train_local"].tolist() cache_dict["rmse_test_locals"] = aggegated_dict["rmse_test_local"].tolist() cache_dict["mae_train_locals"] = aggegated_dict["mae_train_local"].tolist() cache_dict["mae_test_locals"] = aggegated_dict["mae_test_local"].tolist() cache_dict["n_train_samples_locals"] = aggegated_dict["n_train_samples_local"].tolist() cache_dict["n_test_samples_locals"] = aggegated_dict["n_test_samples_local"].tolist() result_dict = {"output": output_dict, "cache": cache_dict} return json.dumps(result_dict) """ ============================================================================ The below function does the following tasks 1. organizes the local and owner results for final results output of the pipeline ---------------------------------------------------------------------------- This function takes in following inputs in args['input'] and args['cache'] ---------------------------------------------------------------------------- - input: w_owner : weight coefficients of owner site intercept_owner: intercept of local site rmse_train_owner: root mean square error of training data (owner) rmse_test_owner: root mean square error of test data (owner) mae_train_owner: mean absolute error of training data (owner) mae_test_owner: mean absolute error of test data (owner) n_samples_train_owner: number of samples of training data (owner) n_samples_test_owner: number of samples of test data (owner) computation_phase : local_1 - cache: w_locals : aggregate weight coefficients of local sites intercept_locals: aggregate intercept of local sites rmse_train_locals: aggregate root mean square error rmse_test_locals: aggregate root mean square error of test data mae_train_locals: mean absolute error of training data mae_test_locals: mean absolute error of test data n_train_samples_locals: number of training samples n_test_samples_locals: number of test samples ---------------------------------------------------------------------------- And gives the following output: ---------------------------------------------------------------------------- - output : w_owner : weight coefficients of owner site intercept_owner: intercept of local site rmse_train_owner: root mean square error of training data (owner) rmse_test_owner: root mean square error of test data (owner) n_samples_train_owner: number of samples of training data (owner) n_samples_test_owner: number of samples of test data (owner) wlocals : aggregate weight coefficients of local sites intercept_locals: aggregate intercept of local sites rmse_train_locals: aggregate root mean square error rmse_test_locals: aggregate root mean square error of test data n_samples_locals: aggregate number of samples ============================================================================ """ def remote_1(args): input_list = args["input"] state_list = args["state"] owner = state_list["owner"] if "owner" in state_list else "local0" dict_owner = input_list[owner] dict_locals = args["cache"] # combine owner and locals output_dict = { "w_locals": [dict_locals.get("w_locals"), "arrays"], "intercept_locals": [dict_locals.get("intercept_locals"), "array"], "w_owner": [dict_owner.get("w_owner"), "array"], "intercept_owner": [dict_owner.get("intercept_owner"), "number"], "n_train_samples_locals": [dict_locals.get("n_train_samples_locals"), "array"], "n_test_samples_locals": [dict_locals.get("n_test_samples_locals"), "array"], "rmse_train_locals": [dict_locals.get("rmse_train_locals"), "tables"], "rmse_test_locals": [dict_locals.get("rmse_test_locals"), "tables"], "mae_train_locals": [dict_locals.get("mae_train_locals"), "tables"], "mae_test_locals": [dict_locals.get("mae_test_locals"), "tables"], "n_train_samples_owner": [dict_owner.get("n_train_samples_owner"), "number"], "n_test_samples_owner": [dict_owner.get("n_test_samples_owner"), "number"], "rmse_train_owner": [dict_owner.get("rmse_train_owner"), "table"], "rmse_test_owner": [dict_owner.get("rmse_test_owner"), "table"], "mae_train_owner": [dict_owner.get("mae_train_owner"), "table"], "mae_test_owner": [dict_owner.get("mae_test_owner"), "table"], } result_dict = {"output": output_dict, "success": True} return json.dumps(result_dict) if __name__ == "__main__": parsed_args = json.loads(sys.stdin.read()) phase_key = list(cf.list_recursive(parsed_args, "phase")) if "local_0" in phase_key: result_dict = remote_0(parsed_args) sys.stdout.write(result_dict) elif "local_1" in phase_key: result_dict = remote_1(parsed_args) sys.stdout.write(result_dict) else: raise Exception("Error occurred at Remote")
{"hexsha": "5bcba636dc5c9ba8b824f719227d66d5c7992da4", "size": 7716, "ext": "py", "lang": "Python", "max_stars_repo_path": "fnc_scripts/remote.py", "max_stars_repo_name": "trendscenter/decentralized_brainage_paper", "max_stars_repo_head_hexsha": "20ddcc537aa50a4576ff33949f6b3cbd053a1521", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "fnc_scripts/remote.py", "max_issues_repo_name": "trendscenter/decentralized_brainage_paper", "max_issues_repo_head_hexsha": "20ddcc537aa50a4576ff33949f6b3cbd053a1521", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fnc_scripts/remote.py", "max_forks_repo_name": "trendscenter/decentralized_brainage_paper", "max_forks_repo_head_hexsha": "20ddcc537aa50a4576ff33949f6b3cbd053a1521", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.593220339, "max_line_length": 97, "alphanum_fraction": 0.6378952825, "include": true, "reason": "import numpy", "num_tokens": 1635}
import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt df_full = pd.read_csv('/Users/mac/Documents/GitHub/my_shots/throw-the-ball/data/kobe/data.csv') df_sample = pd.read_csv('/Users/mac/Documents/GitHub/my_shots/throw-the-ball/data/kobe/sample_submission.csv') print('\n Full dataset \n', df_full.head()) print('\n Sample Submission \n') print(df_sample.head()) df_train = df_full[df_full['shot_made_flag'].notnull()] print('\nfull dataset size:', df_full.shape) print('train dataset size:', df_train.shape) # creating a basic scatter plot to show the data sns.set_style('white') sns.set_color_codes() plt.figure(figsize=(12,11)) plt.scatter(df_train['loc_x'],df_train['loc_y']) plt.xlim(300,-300) plt.ylim(-100,500) # plt.show() # Basketball court lines from matplotlib.patches import Circle, Rectangle, Arc def draw_court(ax=None, color='black', lw=2, outer_lines=False): # If an axes object isn't provided to plot onto, just get current one if ax is None: ax = plt.gca() hoop = Circle((0, 0), radius=7.5, linewidth=lw, color=color, fill=False) # Create backboard backboard = Rectangle((-30, -7.5), 60, -1, linewidth=lw, color=color) # The paint # Create the outer box 0f the paint, width=16ft, height=19ft outer_box = Rectangle((-80, -47.5), 160, 190, linewidth=lw, color=color, fill=False) # Create the inner box of the paint, widt=12ft, height=19ft inner_box = Rectangle((-60, -47.5), 120, 190, linewidth=lw, color=color, fill=False) # Create free throw top arc top_free_throw = Arc((0, 142.5), 120, 120, theta1=0, theta2=180, linewidth=lw, color=color, fill=False) # Create free throw bottom arc bottom_free_throw = Arc((0, 142.5), 120, 120, theta1=180, theta2=0, linewidth=lw, color=color, linestyle='dashed') # Restricted Zone, it is an arc with 4ft radius from center of the hoop restricted = Arc((0, 0), 80, 80, theta1=0, theta2=180, linewidth=lw, color=color) # Three point line # Create the side 3pt lines, they are 14ft long before they begin to arc corner_three_a = Rectangle((-220, -47.5), 0, 140, linewidth=lw, color=color) corner_three_b = Rectangle((220, -47.5), 0, 140, linewidth=lw, color=color) three_arc = Arc((0, 0), 475, 475, theta1=22, theta2=158, linewidth=lw, color=color) # Center Court center_outer_arc = Arc((0, 422.5), 120, 120, theta1=180, theta2=0, linewidth=lw, color=color) center_inner_arc = Arc((0, 422.5), 40, 40, theta1=180, theta2=0, linewidth=lw, color=color) # List of the court elements to be plotted onto the axes court_elements = [hoop, backboard, outer_box, inner_box, top_free_throw, bottom_free_throw, restricted, corner_three_a, corner_three_b, three_arc, center_outer_arc, center_inner_arc] if outer_lines: # Draw the half court line, baseline and side out bound lines outer_lines = Rectangle((-250, -47.5), 500, 470, linewidth=lw, color=color, fill=False) court_elements.append(outer_lines) # Add the court elements onto the axes for element in court_elements: ax.add_patch(element) return ax # let's draw the court plt.figure(figsize=(12,11)) plt.scatter(df_train['loc_x'],df_train['loc_y']) draw_court(outer_lines=True) # and now draw the shots plt.ylim(-100,500) plt.xlim(300,-300) # plt.show() # heatmap cmap=plt.cm.YlOrRd_r joint_shot_chart = sns.jointplot(df_train['loc_x'],df_train['loc_y'], stat_func=None, kind='kde', space=0, color=cmap(0.1), cmap=cmap, n_levels=50) joint_shot_chart.fig.set_size_inches(12,11) # A joint plot has 3 Axes, the first one called ax_joint ax = joint_shot_chart.ax_joint draw_court(ax) # Adjust the axis limits and orientation of the plot in order # to plot half court, with the hoop by the top of the plot ax.set_xlim(-250,250) ax.set_ylim(422.5, -47.5) # Get rid of axis labels and tick marks ax.set_xlabel('') ax.set_ylabel('') ax.tick_params(labelbottom='off', labelleft='off') ax.set_title('Kobe Bryant Career FGA', y=1.2, fontsize=18) plt.show()
{"hexsha": "28966744604725b28258763e9ed2f5ad313e4329", "size": 4484, "ext": "py", "lang": "Python", "max_stars_repo_path": "NBA/player_analysis/exploring_kobe_shots.py", "max_stars_repo_name": "blmendes/basketball-shot-mechanics", "max_stars_repo_head_hexsha": "bcc02e65b66c6523187ae17a28652c8bdb5574b4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-13T19:32:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-13T19:32:17.000Z", "max_issues_repo_path": "NBA/player_analysis/exploring_kobe_shots.py", "max_issues_repo_name": "blmendes/basketball-shot-mechanics", "max_issues_repo_head_hexsha": "bcc02e65b66c6523187ae17a28652c8bdb5574b4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "NBA/player_analysis/exploring_kobe_shots.py", "max_forks_repo_name": "blmendes/basketball-shot-mechanics", "max_forks_repo_head_hexsha": "bcc02e65b66c6523187ae17a28652c8bdb5574b4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-29T16:02:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-29T16:02:53.000Z", "avg_line_length": 35.03125, "max_line_length": 110, "alphanum_fraction": 0.6445138269, "include": true, "reason": "import numpy", "num_tokens": 1207}
import dgl import ogb import math import time import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from ogb.nodeproppred import DglNodePropPredDataset, Evaluator device_id=0 # GPU 的使用 id n_layers=3 # 输入层 + 隐藏层 + 输出层的数量 n_hiddens=256 # 隐藏层节点的数量 dropout=0.5 lr=0.01 epochs=300 runs=10 # 跑 10 次,取平均 log_steps=50 def train(model, g,feats,y_true,train_idx,optimizer): """ 训练函数 :param model: 模型 :param g: 图 :param feats: 特征 :param y_true: 标签 :param train_idx: 下标 :param optimizer: 优化器 :return: 损失 """ model.train() optimizer.zero_grad() out = model(g, feats)[train_idx] loss = F.nll_loss(out, y_true.squeeze(1)[train_idx]) loss.backward() optimizer.step() return loss.item() @torch.no_grad() def test(model, g, feats, y_true, split_idx, evaluator): """ 测试函数 :param model: 模型 :param g: 图 :param feats: 特征 :param y_true: 标签 :param split_idx: 下标 :param evaluator: 评价器 :return: 结果 """ model.eval() out = model(g, feats) y_pred = out.argmax(dim=-1, keepdim=True) train_acc = evaluator.eval({ 'y_true': y_true[split_idx['train']], 'y_pred': y_pred[split_idx['train']], })['acc'] valid_acc = evaluator.eval({ 'y_true': y_true[split_idx['valid']], 'y_pred': y_pred[split_idx['valid']], })['acc'] test_acc = evaluator.eval({ 'y_true': y_true[split_idx['test']], 'y_pred': y_pred[split_idx['test']], })['acc'] return train_acc, valid_acc, test_acc class Logger(object): """ 用于日志记录 """ def __init__(self, runs, info=None): self.info = info self.results = [[] for _ in range(runs)] def add_result(self, run, result): assert len(result) == 3 assert run >= 0 and run < len(self.results) self.results[run].append(result) def print_statistics(self, run=None): if run is not None: result = 100 * torch.tensor(self.results[run]) argmax = result[:, 1].argmax().item() print(f'Run {run + 1:02d}:') print(f'Highest Train: {result[:, 0].max():.2f}') print(f'Highest Valid: {result[:, 1].max():.2f}') print(f' Final Train: {result[argmax, 0]:.2f}') print(f' Final Test: {result[argmax, 2]:.2f}') else: result = 100 * torch.tensor(self.results) best_results = [] for r in result: train1 = r[:, 0].max().item() valid = r[:, 1].max().item() train2 = r[r[:, 1].argmax(), 0].item() test = r[r[:, 1].argmax(), 2].item() best_results.append((train1, valid, train2, test)) best_result = torch.tensor(best_results) print(f'All runs:') r = best_result[:, 0] print(f'Highest Train: {r.mean():.2f} ± {r.std():.2f}') r = best_result[:, 1] print(f'Highest Valid: {r.mean():.2f} ± {r.std():.2f}') r = best_result[:, 2] print(f' Final Train: {r.mean():.2f} ± {r.std():.2f}') r = best_result[:, 3] print(f' Final Test: {r.mean():.2f} ± {r.std():.2f}') device = f'cuda:{device_id}' if torch.cuda.is_available() else 'cpu' device = torch.device(device) dataset = DglNodePropPredDataset(name='ogbn-arxiv') split_idx = dataset.get_idx_split() g,labels = dataset[0] feats = g.ndata['feat'] g = dgl.to_bidirected(g) feats, labels = feats.to(device), labels.to(device) train_idx = split_idx['train'].to(device) from dgl.nn import GraphConv class GCN(nn.Module): def __init__(self,in_feats,n_hiddens,n_classes,n_layers,dropout): super(GCN, self).__init__() self.layers = nn.ModuleList() self.bns = nn.ModuleList() self.layers.append(GraphConv(in_feats,n_hiddens,'both')) self.bns.append(nn.BatchNorm1d(n_hiddens)) for _ in range(n_layers - 2): self.layers.append(GraphConv(n_hiddens, n_hiddens, 'both')) self.bns.append(nn.BatchNorm1d(n_hiddens)) self.layers.append(GraphConv(n_hiddens, n_classes, 'both')) self.dropout = dropout def reset_parameters(self): for layer in self.layers: layer.reset_parameters() for bn in self.bns: bn.reset_parameters() def forward(self, g, x): for i, layer in enumerate(self.layers[:-1]): x = layer(g,x) x = self.bns[i](x) x = F.relu(x) x = F.dropout(x,p=self.dropout,training=self.training) x = self.layers[-1](g,x) return x.log_softmax(dim=-1) model = GCN(in_feats=feats.size(-1),n_hiddens=n_hiddens,n_classes=dataset.num_classes, n_layers=n_layers,dropout=dropout).to(device) evaluator = Evaluator(name='ogbn-arxiv') logger = Logger(runs) for run in range(runs): model.reset_parameters() optimizer = torch.optim.Adam(model.parameters(),lr=lr) for epoch in range(1,1 + epochs): loss = train(model,g,feats,labels,train_idx,optimizer) result = test(model, g, feats, labels, split_idx, evaluator) logger.add_result(run,result) if epoch % log_steps == 0: train_acc, valid_acc, test_acc = result print(f'Run: {run + 1:02d}, ' f'Epoch: {epoch:02d}, ' f'Loss: {loss:.4f}, ' f'Train: {100 * train_acc:.2f}%, ' f'Valid: {100 * valid_acc:.2f}% ' f'Test: {100 * test_acc:.2f}%') logger.print_statistics(run) logger.print_statistics()
{"hexsha": "5e40851ca744124524a841fe130cbc8e6d0a61c4", "size": 5648, "ext": "py", "lang": "Python", "max_stars_repo_path": "GNN/GCN.py", "max_stars_repo_name": "jaykay233/tensorflow_models", "max_stars_repo_head_hexsha": "5b60b2adfa5e2d82c59189da6398388ba58c6c33", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "GNN/GCN.py", "max_issues_repo_name": "jaykay233/tensorflow_models", "max_issues_repo_head_hexsha": "5b60b2adfa5e2d82c59189da6398388ba58c6c33", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "GNN/GCN.py", "max_forks_repo_name": "jaykay233/tensorflow_models", "max_forks_repo_head_hexsha": "5b60b2adfa5e2d82c59189da6398388ba58c6c33", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.032967033, "max_line_length": 86, "alphanum_fraction": 0.5794971671, "include": true, "reason": "import numpy", "num_tokens": 1618}
import numpy as np def L2_distance_1(a, b): if a.shape != b.shape: raise ValueError("The dimensions of a and b don't agree") if a.shape[0] == 1: a = np.concatenate((a, np.zeros(a.shape)), axis=0) b = np.concatenate((b, np.zeros(b.shape)), axis=0) elif len(a.shape) == 1: a = a.reshape(1,-1) b = b.reshape(1,-1) a = np.concatenate((a, np.zeros(a.shape)), axis=0) b = np.concatenate((b, np.zeros(b.shape)), axis=0) aa = np.sum(a*a, axis=0) bb = np.sum(b*b, axis=0) ab = np.dot(a.T, b) d = np.tile(aa.reshape(-1,1), len(bb)) + np.tile(bb, (len(aa), 1)) - 2 * ab d = np.real(d) d[d<0] = 0 d = d * (1-np.eye(d.shape[0])) return d
{"hexsha": "90c9a6f40667be05bb9060f03c480b59e41f8c67", "size": 733, "ext": "py", "lang": "Python", "max_stars_repo_path": "SIMLR/src/L2_distance_1.py", "max_stars_repo_name": "5966466/SIMLR-python", "max_stars_repo_head_hexsha": "0ceb42ea4e766fd1a1bcbb1ee17af369dbc890c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-02-19T07:20:01.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-17T16:50:18.000Z", "max_issues_repo_path": "SIMLR/src/L2_distance_1.py", "max_issues_repo_name": "5966466/SIMLR-python", "max_issues_repo_head_hexsha": "0ceb42ea4e766fd1a1bcbb1ee17af369dbc890c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SIMLR/src/L2_distance_1.py", "max_forks_repo_name": "5966466/SIMLR-python", "max_forks_repo_head_hexsha": "0ceb42ea4e766fd1a1bcbb1ee17af369dbc890c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.32, "max_line_length": 79, "alphanum_fraction": 0.519781719, "include": true, "reason": "import numpy", "num_tokens": 250}
import numpy as np import os, shutil import pickle as pkl from dummylearning.utilities.info import Info from dummylearning.plots.survival import Plots from dummylearning.analysis.survival import Analysis class Report(Info): def __init__(self, model, verbose = True): super().__init__(verbose) self.model = model self.plots = Plots(self.model) self.analysis = Analysis(self.model) def generateCoefFile(self, filename): coefficientsValues, coefficientsNames = self.analysis.coefficients() with open(filename, "w") as outfile: outfile.write("Coefficient Name;Value\n") for name, value in zip(coefficientsNames, coefficientsValues): outfile.write(f"{name};{value}\n") def generateOddsFile(self, filename): coefficientsValues, coefficientsNames = self.analysis.oddsRatio() with open(filename, "w") as outfile: outfile.write("Coefficient Name;Value\n") for name, value in zip(coefficientsNames, coefficientsValues): outfile.write(f"{name};{value}\n") def generateLog2OddsFile(self, filename): coefficientsValues, coefficientsNames = self.analysis.log2oddsRatio() with open(filename, "w") as outfile: outfile.write("Coefficient Name;Value\n") for name, value in zip(coefficientsNames, coefficientsValues): outfile.write(f"{name};{value}\n") def generate(self, outfile): self.upgradeInfo("Generating model report") try: os.mkdir(outfile) except FileExistsError: shutil.rmtree(outfile) os.mkdir(outfile) #self.saveCoefs(f"{outfile}/coeffs.csv") self.plots.coefficients(f"{outfile}/coefs", extension = "png") self.plots.oddsRatio(f"{outfile}/odds", extension = "png") self.plots.log2oddsRatio(f"{outfile}/log2odds", extension = "png") self.plots.kaplanMeier(f"{outfile}/kaplanmeier", extension = "png") self.plots.rocCurve(f"{outfile}/rocCurve", extension = "png") self.generateCoefFile(f"{outfile}/coef.csv") self.generateOddsFile(f"{outfile}/odds.csv") self.generateLog2OddsFile(f"{outfile}/lof2odds.csv") pickle_file = open(f"{outfile}/model.pkl", "wb") pkl.dump(self.model, pickle_file) with open(outfile + "/report.md", "w") as file: file.write(f"# {type(self).__name__}\n") file.write("## Coefficients Info\n") file.write(f"![coefficients](coefs.png)\n\n\n") file.write(f"![odds ratio](odds.png)\n\n\n") file.write(f"![log2 odds ratio](log2odds.png)\n\n\n") file.write("## ROC curves single\n") for dataset in self.model.dataset: file.write(f"![{dataset} roc curve](rocCurve_{dataset}.png)\n\n\n") for dataset in self.model.dataset: file.write(f"![{dataset} kaplan-meier](kaplanmeier_{dataset}.png)\n\n\n") file.write(self.parametersReport() + "\n\n") file.write(self.infoReport() + "\n\n") def parametersReport(self): self.upgradeInfo("Generating parameters report format") message = ["### Parameters"] for name, value in self.analysis.parameters().items(): message.append(f"- ***{name}:*** {value}") return "\n".join(message) def infoReport(self): self.upgradeInfo("Generating process info report format") message = ["### Process Info"] mergedDict = {**self.report, **self.model.data.report, **self.plots.report, **self.analysis.report} keys = list(mergedDict.keys()) keys.sort() for key in keys: if "\n" in mergedDict[key]: aux = mergedDict[key].replace("\n", " \n&nbsp;&nbsp;&nbsp;&nbsp;") message.append(aux + " ") else: message.append(f"{mergedDict[key]} ") return "\n".join(message)
{"hexsha": "89ef9ea90017ff6745e8b933cd06afc569dfd4d3", "size": 4118, "ext": "py", "lang": "Python", "max_stars_repo_path": "dummylearning/reports/survival.py", "max_stars_repo_name": "JuantonioMS/dummylearning", "max_stars_repo_head_hexsha": "1780a7ba0f38633bcbfbacd4f35a31cce4c94a87", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-08T12:41:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-08T12:41:39.000Z", "max_issues_repo_path": "dummylearning/reports/survival.py", "max_issues_repo_name": "JuantonioMS/dummylearning", "max_issues_repo_head_hexsha": "1780a7ba0f38633bcbfbacd4f35a31cce4c94a87", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dummylearning/reports/survival.py", "max_forks_repo_name": "JuantonioMS/dummylearning", "max_forks_repo_head_hexsha": "1780a7ba0f38633bcbfbacd4f35a31cce4c94a87", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6258992806, "max_line_length": 89, "alphanum_fraction": 0.596163186, "include": true, "reason": "import numpy", "num_tokens": 935}
#!/usr/bin/python # encoding: utf-8 M=float t=str X=list g=False b=dict mV=True mE=print mF=any import os G=os.path import sys u=sys.path u.append('/Users/luoyonggui/PycharmProjects/mayiutils_n1/mayiutils/db') from pymongo_wrapper import PyMongoWrapper import pandas as pd N=pd.date_range R=pd.to_datetime L=pd.read_pickle r=pd.concat w=pd.DataFrame U=pd.read_excel import numpy as np l=np.where class Publisher: def __init__(x): pass def register(x): pass def unregister(x): pass def notify_all(x): pass class Subscriber: def __init__(x): pass def notify(x): pass from attr import attrs,attrib,fields from cattr import unstructure,structure from datetime import datetime,date c=datetime.strptime e=datetime.now @attrs class Transaction: m=attrib(type=M,default=0) V=attrib(type=M,default=0) E=attrib(type=M,default=0) F=attrib(type=datetime,default=e()) T=attrib(type=t,default='cash') k=attrib(type=t,default='pingan') @attrs class Trade(Publisher): p=attrib(factory=X,repr=g) a=attrib(factory=X) def register(x,k): if k not in x._accounts: x._accounts.append(k) def unregister(x,k): if k not in x._accounts: x._accounts.remove(k) def notify_all(x): for a in x._accounts: a.notify(x._transaction_list) @attrs class Account(Subscriber): d=attrib(type=t) S=attrib(factory=X,repr=g) W=attrib(factory=b) q=attrib(type=M,default=0) def notify(x,f): for t in f: if t.account==x._name: x._trade_list.append(t) if t.target=='cash': x._cash+=t.num else: m=t.num o=t.num*t.price h=5 if o*0.00025<5 else o*0.00025 if t.target in x._positions: m+=x._positions[t.target][0] o+=x._positions[t.target][1]*x._positions[t.target][0] z=(o+h)/m x._positions[t.target]=(m,z) def add_transactions(): df=U('data/transactions.xlsx') r=[] for I in df.itertuples(): if I.target!='cash': r.append([I.trade_date,I.account,'cash',1,-1*(I.price*I.num*1+I.fee_rate),0]) A=w(r,columns=df.columns) df=r([df,A],ignore_index=mV) mE(df) if not df.empty: K=PyMongoWrapper() K.insertDataframe(df,'finance','transactions') else: mE('no transactions!') H=G.dirname(G.abspath(__file__)) j=L(G.join(H,'data/stock_dict.pkl')) y=L(G.join(H,'data/fund_otc_series.pkl')) i=j.append(y) def get_last_day_market_val(T,D): if T=='cash': return e().date(),1 if mF([T.endswith('.SZ'),T.endswith('.SH')]): s=T else: s=i.loc[T] K=PyMongoWrapper() v=K.getCollection('finance',s) r=X(K.findAll(v,{'trade_date':{'$lte':c(D,'%Y%m%d')}},fieldlist=['trade_date','close','pct_chg'],sort=[('trade_date',-1)],limit=1))[0] return r['trade_date'].date(),r['close'] def get_account_val(J,D): J=J[:D] Q=J.groupby(['account','target'])['num','cost'].sum() Q.reset_index(inplace=mV) Q['price']=0 Q['tdate']=0 for i in J['target'].unique(): Q.loc[Q.target==i,['tdate','price']]=get_last_day_market_val(i,D) Q['type']=l((Q.account=='PA')&(Q.target!='cash'),'STOCK',l((Q.account=='ZLT')&(Q.target!='cash'),'FUND',l((Q.account.isin(['TTJ','TTA']))&(Q.target!='cash'),'OTC_FUND','cash'))) Q['market_val']=Q.num*Q.price Q['profit']=Q['market_val']-Q['cost'] s=Q.groupby('account')['market_val'].sum() s.loc['TOTAL']=s.sum() B=w(s).T B.index.name='date' B.index=R([D]) return B if __name__=='__main__': f=[] K=PyMongoWrapper() v=K.getCollection('finance','transactions') n='trade_date account target price num fee_rate'.split() rs=K.findAll(v,fieldlist=n,sort=[('trade_date',1)]) J=w(rs) J.set_index('trade_date',inplace=mV) J['cost']=J['price']*J['num']*(1+J.fee_rate) C=w() P=N('20190921','20190926',freq='B') for d in P: d=d.strftime('%Y%m%d') if C.empty: C=get_account_val(J,d) else: C=r([C,get_account_val(J,d)]) mE(C) C.plot(subplots=mV,figsize=(8,14)) # Created by pyminifier (https://github.com/liftoff/pyminifier)
{"hexsha": "aeabc8d2397cb5bcd3d1e5c05fa93aa0fa7399e7", "size": 3838, "ext": "py", "lang": "Python", "max_stars_repo_path": "engineering/test_obfuscate1.py", "max_stars_repo_name": "mayi140611/mayiexamples", "max_stars_repo_head_hexsha": "221cf9e8916d81198df7355894ec59dc334ae0af", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "engineering/test_obfuscate1.py", "max_issues_repo_name": "mayi140611/mayiexamples", "max_issues_repo_head_hexsha": "221cf9e8916d81198df7355894ec59dc334ae0af", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "engineering/test_obfuscate1.py", "max_forks_repo_name": "mayi140611/mayiexamples", "max_forks_repo_head_hexsha": "221cf9e8916d81198df7355894ec59dc334ae0af", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-03-09T12:48:07.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-19T11:43:22.000Z", "avg_line_length": 24.9220779221, "max_line_length": 178, "alphanum_fraction": 0.672746222, "include": true, "reason": "import numpy", "num_tokens": 1213}
# standard libraries import argparse import pathlib # dependent packages import numpy as np import matplotlib.pyplot as plt from astropy import table from astropy.modeling import models, fitting # module settings plt.style.use("seaborn-darkgrid") plt.style.use("seaborn-muted") # command line arguments parser = argparse.ArgumentParser() parser.add_argument("output_dir", help="output directory") parser.add_argument("result_files", help="fitting results", nargs="*") args = parser.parse_args() output_dir = pathlib.Path(args.output_dir) result_files = [pathlib.Path(rf) for rf in args.result_files] # directory settings if not output_dir.exists(): output_dir.mkdir(parents=True) # read fitting results subref_xs = [] peaks = [] for rf in result_files: fit_result = table.Table.read(rf, format="ascii") # subref_xs.append(fit_result['subref_x'][0]) subref_xs.append(fit_result["subref_y"][0]) peaks.append(fit_result["peak"][0]) subref_xs = np.array(subref_xs) peaks = np.array(peaks) # Gaussian fit g_init = models.Gaussian1D(amplitude=peaks.max()) fit_g = fitting.LevMarLSQFitter() g = fit_g(g_init, subref_xs, peaks) fig, ax = plt.subplots(1, 1, figsize=(10, 5)) sxs = np.linspace(subref_xs.min(), subref_xs.max(), 100) ax.plot(subref_xs, peaks, "o", label="data") ax.plot(sxs, g(sxs), label=f"model (mean: {g.mean.value:.2f})") ax.axvline(g.mean.value, linestyle="--") ax.set_xlabel("subref X/Y") ax.set_ylabel("peak") ax.legend() plt.tight_layout() plt.savefig(output_dir / "gaussian_fit.png") plt.show()
{"hexsha": "674bc126b38412bb43447e69a800ac450f923fd3", "size": 1541, "ext": "py", "lang": "Python", "max_stars_repo_path": "pipelines/subrefxy_fit.py", "max_stars_repo_name": "deshima-dev/qlook-pipeline", "max_stars_repo_head_hexsha": "90f520e101a58ae1dc1ffd9317ad16035e8efe0c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pipelines/subrefxy_fit.py", "max_issues_repo_name": "deshima-dev/qlook-pipeline", "max_issues_repo_head_hexsha": "90f520e101a58ae1dc1ffd9317ad16035e8efe0c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-10-17T17:09:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-03T14:10:47.000Z", "max_forks_repo_path": "pipelines/subrefxy_fit.py", "max_forks_repo_name": "deshima-dev/qlook-pipeline", "max_forks_repo_head_hexsha": "90f520e101a58ae1dc1ffd9317ad16035e8efe0c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5689655172, "max_line_length": 70, "alphanum_fraction": 0.7365347177, "include": true, "reason": "import numpy,from astropy", "num_tokens": 415}
import argparse import functools import importlib import logging import random import shutil import subprocess import sys import time from distutils.util import strtobool from pathlib import Path import numpy as np from tensorboardX import SummaryWriter curPath = Path(__file__).resolve() sys.path.append(str(curPath.parents[1])) from synthesis.GP import GP from synthesis.config import cfg from synthesis.Scheme import Scheme from monkeys import optimize, tournament_select, next_generation, build_tree from monkeys.typing import params from monkeys.search import require, pre_evaluate def get_seed(): return cfg.seed if cfg.seed is not None else random.randrange(sys.maxsize) def main(): writer = SummaryWriter('../runs/' + cur_time) if cfg.eval is not None: init_dataset(cfg.test_threshold) schemes = cfg.eval gp = GP() gp.init_population(schemes, True) gp.report(len(schemes)) return try: init_dataset(cfg.eval_threshold) gp1 = GP() logging.info('--- Initializing population ---') gp1.init_population(scheme_list=cfg.load) for i in range(cfg.epoch): logging.info('--- Epoch {} starts ---'.format(i)) tops, avg = gp1.report(cfg.report) gp1.evolve() if i % cfg.save == 0: gp1.save('epoch_{}'.format(i)) writer.add_scalar('best_fitness', np.array(tops[0][1]), i) writer.add_scalar('best_solved', np.array(tops[0][0]), i) writer.add_scalar('avg_fitness', avg, i) winner = gp1.get_winner() shutil.copy(winner.file, output_dir / 'winner.csv') logging.info('Winner: {}'.format(winner.file)) winner.display() init_dataset(cfg.test_threshold) winner.eval(True) winner.rename('test') logging.info('{} solved, avg_time = {}, fitness = {}'.format(winner.solved, winner.rtime, winner.fitness)) except AssertionError as err: logging.exception('Assertion failed :(') raise err def monkeys(): init_dataset(cfg.eval_threshold) grammar = importlib.import_module(cfg.monkeys) def display(codes): logging.info('-----') for code in codes: logging.info(code) logging.info('-----\n') @require() @params('heuristic') @pre_evaluate def score(heuristic): try: Scheme.embed_cadical(heuristic) if not Scheme.compile_cadical(): return -sys.maxsize subprocess.run('cd .. ; sh python/cadical.sh ' + str(cfg.eval_time), shell=True, check=True, capture_output=True) process = subprocess.run('sh ../python/statistics.sh ' + str(output_dir) + ' ' + str(cfg.eval_time), shell=True, check=True, capture_output=True) out = process.stdout.decode().strip() logging.info(out) out = out.split() solved, rtime = int(out[0]), float(out[-1][:-1]) display(heuristic) return 30 * solved ** 2 + 60 / rtime except subprocess.CalledProcessError as err: logging.error(err) return -sys.maxsize build_tree_ = functools.partial(build_tree, selection_strategy=grammar.selection_strategy) select_fn = functools.partial(tournament_select, selection_size=cfg.tournament_size) winner = optimize(score, iterations=cfg.epoch, population_size=cfg.pop_size, next_generation=functools.partial(next_generation, select_fn=select_fn, build_tree=build_tree_, crossover_rate=cfg.crossover_rate, mutation_rate=cfg.mutation_rate)) display(winner.evaluate()) def init_dataset(time_lim): logging.info('\nFiltering datasets for evaluation...') filtering = subprocess.run('cd ..; python python/filter.py -T ' + str(time_lim), shell=True, check=True, capture_output=True) out = filtering.stdout.decode().strip() logging.info(out + ' problems in total\n') def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-F', '--grammar_file', type=str) parser.add_argument('-O', '--output_root', type=str) parser.add_argument('-N', '--pop_size', type=int) parser.add_argument('-D', '--depth_lim', type=int) parser.add_argument('-S', '--tournament_size', type=int) parser.add_argument('-e', '--epoch', type=int) parser.add_argument('-t', '--eval_time', type=int) parser.add_argument('-r', '--eval_threshold', type=int) parser.add_argument('-T', '--test_time', type=int) parser.add_argument('-H', '--test_threshold', type=int) parser.add_argument('-E', '--eval', nargs='+', default=None, help='Evaluation mode: run eval for given schemes.') parser.add_argument('-R', '--score', type=str) parser.add_argument('-s', '--STGP', type=lambda x: bool(strtobool(x))) parser.add_argument('-M', '--monkeys', type=str) parser.add_argument('-L', '--load', nargs='+', default=None, help='Initialize from given schemes.') args = parser.parse_args() for k, v in vars(args).items(): if v is not None: cfg.__setattr__(k, v) if __name__ == '__main__': parse_args() cur_time = time.strftime('%m%d-%H%M%S') output_dir = Path(cfg.output_root) / cur_time cfg.output_dir = output_dir Path.mkdir(Path(cfg.output_root), exist_ok=True) Path.mkdir(output_dir, exist_ok=True) logging.basicConfig(format='%(levelname)s: %(message)s', filename=str(output_dir / 'log.txt'), level=logging.INFO) stdoutLogger = logging.StreamHandler(sys.stdout) stdoutLogger.setFormatter(logging.Formatter('%(levelname)s: %(message)s')) logging.getLogger().addHandler(stdoutLogger) cfg_str = ' --- config ---\n' for k, v in vars(cfg).items(): cfg_str += '\t' + k + ' = ' + str(v) + '\n' cfg_str += '--- End of config ---\n' logging.info(cfg_str) seed = get_seed() random.seed(seed) logging.info('Random seed: {}'.format(seed)) if cfg.monkeys: monkeys() else: main()
{"hexsha": "024f928a459af92bbb9b35aef4e4f5025f5cb936", "size": 6262, "ext": "py", "lang": "Python", "max_stars_repo_path": "synthesis/main.py", "max_stars_repo_name": "AryaGuo/cadical", "max_stars_repo_head_hexsha": "159af5b21fbe090e5c1514d7659d2bee92b3402a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "synthesis/main.py", "max_issues_repo_name": "AryaGuo/cadical", "max_issues_repo_head_hexsha": "159af5b21fbe090e5c1514d7659d2bee92b3402a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "synthesis/main.py", "max_forks_repo_name": "AryaGuo/cadical", "max_forks_repo_head_hexsha": "159af5b21fbe090e5c1514d7659d2bee92b3402a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9885057471, "max_line_length": 118, "alphanum_fraction": 0.6237623762, "include": true, "reason": "import numpy", "num_tokens": 1391}
{"mathlib_filename": "Mathlib.Algebra.Order.Field.Defs", "llama_tokens": 0}
#ifndef VENTURA_PROCESS_HANDLE_HPP #define VENTURA_PROCESS_HANDLE_HPP #include <silicium/error_or.hpp> #include <silicium/get_last_error.hpp> #include <boost/swap.hpp> #ifndef _WIN32 #include <sys/wait.h> #endif namespace ventura { #ifdef _WIN32 struct process_handle { process_handle() BOOST_NOEXCEPT : m_id(INVALID_HANDLE_VALUE) { } explicit process_handle(HANDLE id) BOOST_NOEXCEPT : m_id(id) { } ~process_handle() BOOST_NOEXCEPT { if (m_id == INVALID_HANDLE_VALUE) { return; } wait_for_exit(); } process_handle(process_handle &&other) BOOST_NOEXCEPT : m_id(INVALID_HANDLE_VALUE) { swap(other); } process_handle &operator=(process_handle &&other) BOOST_NOEXCEPT { swap(other); return *this; } void swap(process_handle &other) BOOST_NOEXCEPT { boost::swap(m_id, other.m_id); } SILICIUM_USE_RESULT Si::error_or<int> wait_for_exit() BOOST_NOEXCEPT { WaitForSingleObject(m_id, INFINITE); DWORD exit_code = 1; if (!GetExitCodeProcess(m_id, &exit_code)) { return Si::get_last_error(); } CloseHandle(m_id); m_id = INVALID_HANDLE_VALUE; return static_cast<int>(exit_code); } private: HANDLE m_id; SILICIUM_DELETED_FUNCTION(process_handle(process_handle const &)) SILICIUM_DELETED_FUNCTION(process_handle &operator=(process_handle const &)) }; #else struct process_handle { process_handle() BOOST_NOEXCEPT : m_id(-1) { } explicit process_handle(pid_t id) BOOST_NOEXCEPT : m_id(id) { } ~process_handle() BOOST_NOEXCEPT { if (m_id < 0) { return; } wait_for_exit().get(); } process_handle(process_handle &&other) BOOST_NOEXCEPT : m_id(-1) { swap(other); } process_handle &operator=(process_handle &&other) BOOST_NOEXCEPT { swap(other); return *this; } void swap(process_handle &other) BOOST_NOEXCEPT { boost::swap(m_id, other.m_id); } SILICIUM_USE_RESULT Si::error_or<int> wait_for_exit() BOOST_NOEXCEPT { int status = 0; int wait_id = Si::exchange(m_id, -1); assert(wait_id >= 1); if (waitpid(wait_id, &status, 0) < 0) { return Si::get_last_error(); } int const exit_status = WEXITSTATUS(status); return exit_status; } private: pid_t m_id; SILICIUM_DELETED_FUNCTION(process_handle(process_handle const &)) SILICIUM_DELETED_FUNCTION(process_handle &operator=(process_handle const &)) }; #endif } #endif
{"hexsha": "199f39ebde8daeb39b46dad0d741e1ba91e03dcf", "size": 3094, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "ventura/process_handle.hpp", "max_stars_repo_name": "TyRoXx/ventura", "max_stars_repo_head_hexsha": "e261d3a0589819f0709e43211974312be8a4e935", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ventura/process_handle.hpp", "max_issues_repo_name": "TyRoXx/ventura", "max_issues_repo_head_hexsha": "e261d3a0589819f0709e43211974312be8a4e935", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2016-06-11T12:56:02.000Z", "max_issues_repo_issues_event_max_datetime": "2016-08-10T20:31:11.000Z", "max_forks_repo_path": "ventura/process_handle.hpp", "max_forks_repo_name": "TyRoXx/ventura", "max_forks_repo_head_hexsha": "e261d3a0589819f0709e43211974312be8a4e935", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.8, "max_line_length": 90, "alphanum_fraction": 0.5491273432, "num_tokens": 682}
import os import time import numpy import json import random import matplotlib.pyplot as plt class ActionSpace: def __init__(self): self.data = numpy.array([0, 1, 2, 3, 4], dtype="int8") self.dict = {0: "A", 1: "W", 2: "D", 3: "S", 4: "B"} # int -> Left, Up, Right, Down, Bomb self.size = 5 def sample(self): return numpy.random.choice(a=self.data, size=1, replace=False)[0] class StateSpace: def __init__(self): self.data = numpy.array([_ for _ in range(315)], dtype="int8") self.dict = {(r, c): r * 21 + c for r in range(15) for c in range(21)} # (row, col) -> int self.size = 15 * 21 class Environment: def __init__(self): # Q-Learning self.action_space = ActionSpace() self.state_space = StateSpace() # BomBot self.object_map = numpy.array(json.load(open("map.json", "r")), dtype="int8") self.object_dictionary = {0: "F", 1: "B", 2: "W", 3: "X"} # int -> Floor, Wall, Brick, Player, Robot self.robot_position = (13, 1) self.robot_action = " " self.bomb_capacity = 1 self.bomb_timer = 0 self.bomb_position = [] def __repr__(self): s = "" s += str(self.robot_position) + "\n" s += "*" * 45 + "\n" s += numpy.array2string(self.object_map, max_line_width=45, formatter={"int": lambda x: self.object_dictionary.get(x)}) + "\n" s += "*" * 45 + "\n" return s def reset(self): # Q-Learning self.action_space = ActionSpace() self.state_space = StateSpace() # BomBot self.object_map = numpy.array(json.load(open("map.json", "r")), dtype="int8") self.object_dictionary = {0: "F", 1: "B", 2: "W", 3: "X"} # int -> Floor, Wall, Brick, Player, Robot self.robot_position = (13, 1) self.robot_action = " " self.bomb_timer = 0 self.bomb_position = [] return self.state_space.dict[self.robot_position] def step(self, action): # move if action == 0: r, c = self.robot_position if 0 <= c - 1 < len(self.object_map[0]) and not self.object_map[r][c - 1]: self.robot_position = (r, c - 1) if action == 1: r, c = self.robot_position if 0 <= r - 1 < len(self.object_map) and not self.object_map[r - 1][c]: self.robot_position = (r - 1, c) if action == 2: r, c = self.robot_position if 0 <= c + 1 < len(self.object_map[0]) and not self.object_map[r][c + 1]: self.robot_position = (r, c + 1) if action == 3: r, c = self.robot_position if 0 <= r + 1 < len(self.object_map) and not self.object_map[r + 1][c]: self.robot_position = (r + 1, c) # bomb if self.bomb_timer: self.bomb_timer -= 1 if action == 4: self.bomb_timer = 3 r, c = self.robot_position if 0 <= c - 1 < len(self.object_map[0]): self.bomb_position.append((r, c - 1)) if 0 <= r - 1 < len(self.object_map): self.bomb_position.append((r - 1, c)) if 0 <= c + 1 < len(self.object_map[0]): self.bomb_position.append((r, c + 1)) if 0 <= r + 1 < len(self.object_map): self.bomb_position.append((r + 1, c)) # reward reward = 0 for r, c in self.bomb_position: if self.object_map[r][c] == 1 and not self.bomb_timer: reward += 5 self.object_map[r][c] = 0 # done done = False if not self.bomb_timer: if self.robot_position in self.bomb_position: done, reward = True, 0 self.bomb_position = [] # heuristic return self.state_space.dict[self.robot_position], reward, done def render(self): os.system('cls') print(self) time.sleep(0.3) def train(num_episodes, max_steps_per_episode, learning_rate, discount_rate, exploration_rate, max_exploration_rate, min_exploration_rate, exploration_decay_rate): # initialize environment env = Environment() # initialize q_table q_table = numpy.zeros((env.state_space.size, env.action_space.size)) # initialize reward rewards_all_episodes = [] # Q-learning algorithm for episode in range(num_episodes): state = env.reset() done = False rewards_current_episode = 0 for step in range(max_steps_per_episode): # exploration-exploitation trade-off exploration_rate_threshold = random.uniform(0, 1) if exploration_rate_threshold > exploration_rate: action = numpy.argmax(q_table[state, :]) else: action = env.action_space.sample() # agent performs action new_state, reward, done = env.step(action) # update q_table for Q(s, a) q_table[state, action] = q_table[state, action] * (1 - learning_rate) + learning_rate * (reward + discount_rate * numpy.max(q_table[new_state, :])) # update state, reward state = new_state rewards_current_episode += reward # move to next episode if done if done: break # exploration rate decay exploration_rate = min_exploration_rate + (max_exploration_rate - min_exploration_rate) * numpy.exp(-exploration_decay_rate * episode) # update reward rewards_all_episodes.append(rewards_current_episode) # store q_table with open(f"ne={num_episodes}_ms={max_steps_per_episode}_lr={learning_rate}_dr={discount_rate}_edr={exploration_decay_rate}_mer={min_exploration_rate}.json", "w") as f: json.dump(q_table.tolist(), f) # visualize x, y, count = [], [], 1000 rewards_per_thousand_episodes = numpy.split(numpy.array(rewards_all_episodes), num_episodes / 1000) for r in rewards_per_thousand_episodes: x += [count]; y += [sum(r / 1000)]; count += 1000 plt.plot(x, y); plt.xlabel("Episode"); plt.ylabel("Reward") plt.title(f"ne={num_episodes}_ms={max_steps_per_episode}_lr={learning_rate}_dr={discount_rate}_edr={exploration_decay_rate}_mer={min_exploration_rate}") plt.savefig(f"ne={num_episodes}_ms={max_steps_per_episode}_lr={learning_rate}_dr={discount_rate}_edr={exploration_decay_rate}_mer={min_exploration_rate}.png") plt.show() train( num_episodes=20000, max_steps_per_episode=3500, learning_rate=0.05, discount_rate=0.89, exploration_rate=1, max_exploration_rate=1, min_exploration_rate=1e-3, exploration_decay_rate=1e-4, ) # initialize q_learning # num_episodes_list = [5000, 10000, 15000, 20000, 25000, 30000, 35000, 40000] # max_steps_per_episode_list = [2500, 5000, 7500, 10000, 12500, 15000, 17500, 20000] # learning_rate_list = [0.02, 0.05, 0.15, 0.25, 0.35, 0.45, 0.55, 0.65, 0.75, 0.85, 0.95, 0.97] # discount_rate_list = [0.02, 0.05, 0.15, 0.25, 0.35, 0.45, 0.55, 0.65, 0.75, 0.85, 0.95, 0.97] # exploration_rate = max_exploration_rate = 1 # min_exploration_rate_list = [1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7] # exploration_decay_rate_list = [1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7] # heuristics = dict() # for num_episodes in num_episodes_list: # for max_steps_per_episode in max_steps_per_episode_list: # for learning_rate in learning_rate_list: # for discount_rate in discount_rate_list: # for min_exploration_rate in min_exploration_rate_list: # for exploration_decay_rate in exploration_decay_rate_list: # train(num_episodes, max_steps_per_episode, learning_rate, discount_rate, exploration_rate, # max_exploration_rate, min_exploration_rate, exploration_decay_rate, heuristics) # print(heuristics) # with open("heuristics.json", "w") as f: # json.dump(heuristics, f)
{"hexsha": "a8c2bec604d5366a017a8e8789075e215c398af0", "size": 7885, "ext": "py", "lang": "Python", "max_stars_repo_path": "asset/genius/q_learning/environment.py", "max_stars_repo_name": "haok1402/BomBot", "max_stars_repo_head_hexsha": "2c26da303f06d7a2968fecd14ed1a541538ef889", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-11-06T03:22:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-06T05:15:22.000Z", "max_issues_repo_path": "asset/genius/q_learning/environment.py", "max_issues_repo_name": "haok1402/BomBot", "max_issues_repo_head_hexsha": "2c26da303f06d7a2968fecd14ed1a541538ef889", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "asset/genius/q_learning/environment.py", "max_forks_repo_name": "haok1402/BomBot", "max_forks_repo_head_hexsha": "2c26da303f06d7a2968fecd14ed1a541538ef889", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.8055555556, "max_line_length": 172, "alphanum_fraction": 0.6144578313, "include": true, "reason": "import numpy", "num_tokens": 2251}
import scipy.optimize import numpy as np import torch from ..functional import nac_weight, sparsity_error from ..abstract import ExtendedTorchModule from ._abstract_recurrent_cell import AbstractRecurrentCell class PosNACLayer(ExtendedTorchModule): """Implements the NAC (Neural Accumulator) Arguments: in_features: number of ingoing features out_features: number of outgoing features """ def __init__(self, in_features, out_features, **kwargs): super().__init__('nac', **kwargs) self.in_features = in_features self.out_features = out_features self.W_hat = torch.nn.Parameter(torch.Tensor(out_features, in_features)) self.register_parameter('bias', None) def reset_parameters(self): torch.nn.init.xavier_normal_(self.W_hat) def forward(self, input, reuse=False): W = torch.sigmoid(self.W_hat) self.writer.add_histogram('W', W) self.writer.add_tensor('W', W) self.writer.add_scalar('W/sparsity_error', sparsity_error(W), verbose_only=False) return torch.nn.functional.linear(input, W, self.bias) def extra_repr(self): return 'in_features={}, out_features={}'.format( self.in_features, self.out_features ) class PosNACCell(AbstractRecurrentCell): """Implements the NAC (Neural Accumulator) as a recurrent cell Arguments: input_size: number of ingoing features hidden_size: number of outgoing features """ def __init__(self, input_size, hidden_size, **kwargs): super().__init__(PosNACLayer, input_size, hidden_size, **kwargs)
{"hexsha": "51326de5e20d952ade2e9683ab97da6d732a2d71", "size": 1635, "ext": "py", "lang": "Python", "max_stars_repo_path": "stable_nalu/layer/pos_nac.py", "max_stars_repo_name": "wlm2019/Neural-Arithmetic-Units", "max_stars_repo_head_hexsha": "f9de9d004bb2dc2ee28577cd1760d0a00c185836", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 147, "max_stars_repo_stars_event_min_datetime": "2019-10-07T11:01:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-16T02:51:18.000Z", "max_issues_repo_path": "stable_nalu/layer/pos_nac.py", "max_issues_repo_name": "wlm2019/Neural-Arithmetic-Units", "max_issues_repo_head_hexsha": "f9de9d004bb2dc2ee28577cd1760d0a00c185836", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-12-03T12:40:21.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-03T12:40:21.000Z", "max_forks_repo_path": "stable_nalu/layer/pos_nac.py", "max_forks_repo_name": "wlm2019/Neural-Arithmetic-Units", "max_forks_repo_head_hexsha": "f9de9d004bb2dc2ee28577cd1760d0a00c185836", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2019-12-21T15:58:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-03T08:32:38.000Z", "avg_line_length": 32.0588235294, "max_line_length": 89, "alphanum_fraction": 0.6972477064, "include": true, "reason": "import numpy,import scipy", "num_tokens": 370}
""" Shared fixtures for tests """ import logging import pytest import numpy as np import pandas as pd from resqpy.model import Model, new_model from resqpy.organize import WellboreFeature, WellboreInterpretation from resqpy.well import Trajectory, MdDatum, WellboreFrame from resqpy.crs import Crs @pytest.fixture(autouse=True) def capture_logs(caplog): """Always capture log messages from respy""" caplog.set_level(logging.DEBUG, logger="resqpy") @pytest.fixture def tmp_model(tmp_path): """Example resqpy model in a temporary directory unique to each test""" return new_model(str(tmp_path / 'tmp_model.epc')) @pytest.fixture def example_model_and_crs(tmp_model): """ Returns a fresh RESQML Model and Crs, in a temporary directory """ # TODO: parameterise with m or feet xyz_uom = 'm' # Create a model with a coordinate reference system crs = Crs(parent_model=tmp_model, z_inc_down=True, xy_units=xyz_uom, z_units=xyz_uom) crs.create_xml() return tmp_model, crs @pytest.fixture def example_model_with_well(example_model_and_crs): """ Model with a single well with a vertical trajectory """ wellname = 'well A' elevation = 100 md_uom = 'm' model, crs = example_model_and_crs # Create a single well feature and interpretation well_feature = WellboreFeature(parent_model=model, feature_name=wellname) well_feature.create_xml() well_interp = WellboreInterpretation(parent_model=model, wellbore_feature=well_feature, is_drilled=True) well_interp.create_xml() # Create a measured depth datum datum = MdDatum( parent_model=model, crs_root=crs.root, location=(0, 0, -elevation), md_reference='kelly bushing' ) datum.create_xml() # Create trajectory of a vertical well mds = np.array([0, 1000, 2000]) zs = mds - elevation traj = Trajectory( parent_model=model, md_datum=datum, data_frame=pd.DataFrame(dict(MD=mds, X=0, Y=0, Z=zs)), length_uom=md_uom, represented_interp=well_interp ) traj.write_hdf5(mode='w') traj.create_xml() return model, well_interp, datum, traj @pytest.fixture def example_model_with_logs(example_model_with_well): model, well_interp, datum, traj = example_model_with_well frame = WellboreFrame( model, trajectory=traj, represented_interp=well_interp, mds=[1,2,3,4], ) frame.write_hdf5() frame.create_xml(title='Log run A') log_collection = frame.logs log_collection.add_log("GR", [1,2,1,2], 'gAPI') log_collection.add_log("NPHI", [0.1, 0.1, np.NaN, np.NaN], 'v/v') return model, well_interp, datum, traj, frame, log_collection
{"hexsha": "adf0cb90c1128059d773bdf79d26dd16edd5eba0", "size": 2656, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/conftest.py", "max_stars_repo_name": "seryozni/resqpy", "max_stars_repo_head_hexsha": "2fab32f0db02b40f6cacbc620372bbd0cd27fd0c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/conftest.py", "max_issues_repo_name": "seryozni/resqpy", "max_issues_repo_head_hexsha": "2fab32f0db02b40f6cacbc620372bbd0cd27fd0c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/conftest.py", "max_forks_repo_name": "seryozni/resqpy", "max_forks_repo_head_hexsha": "2fab32f0db02b40f6cacbc620372bbd0cd27fd0c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.56, "max_line_length": 107, "alphanum_fraction": 0.7225150602, "include": true, "reason": "import numpy", "num_tokens": 720}
""" Module containing classes and routines used in training of policies. """ from __future__ import annotations import os from typing import TYPE_CHECKING import numpy as np from tensorflow.keras.layers import Dense, Dropout, Input, Dot from tensorflow.keras.models import Sequential, Model from tensorflow.keras.optimizers import Adam from tensorflow.keras.utils import Sequence from tensorflow.keras.callbacks import ( EarlyStopping, CSVLogger, ModelCheckpoint, ReduceLROnPlateau, ) from tensorflow.keras import regularizers from sklearn.utils import shuffle from scipy import sparse from aizynthfinder.utils.models import top10_acc, top50_acc if TYPE_CHECKING: from aizynthfinder.utils.type_utils import Tuple, List, Any from aizynthfinder.training.utils import Config class _InMemorySequence(Sequence): # pylint: disable=W0223 def __init__(self, config: Config, dataset_label: str) -> None: self.batch_size = config["batch_size"] input_filename = config.filename(dataset_label + "_inputs") label_filename = config.filename(dataset_label + "_labels") self.input_matrix = self._load_data(input_filename) self.label_matrix = self._load_data(label_filename) self.input_dim = self.input_matrix.shape[1] def __len__(self) -> int: return int(np.ceil(self.label_matrix.shape[0] / float(self.batch_size))) def _make_slice(self, idx: int) -> slice: if idx < 0 or idx >= len(self): raise IndexError("index out of range") start = idx * self.batch_size end = (idx + 1) * self.batch_size return slice(start, end) @staticmethod def _load_data(filename: str) -> np.ndarray: try: return sparse.load_npz(filename) except ValueError: return np.load(filename)["arr_0"] class ExpansionModelSequence(_InMemorySequence): """ Custom sequence class to keep sparse, pre-computed matrices in memory. Batches are created dynamically by slicing the in-memory arrays The data will be shuffled on each epoch end :ivar output_dim: the output size (number of templates) :param config: the settings :param dataset_label: the label of set, e.g. training, testing or validation """ def __init__(self, config: Config, dataset_label: str) -> None: super().__init__(config, dataset_label) self.output_dim = self.label_matrix.shape[1] def __getitem__(self, idx: int) -> Tuple[np.ndarray, np.ndarray]: idx_ = self._make_slice(idx) return self.input_matrix[idx_].toarray(), self.label_matrix[idx_].toarray() def on_epoch_end(self) -> None: self.input_matrix, self.label_matrix = shuffle( self.input_matrix, self.label_matrix, random_state=0 ) class FilterModelSequence(_InMemorySequence): """ Custom sequence class to keep sparse, pre-computed matrices in memory. Batches are created dynamically by slicing the in-memory arrays The data will be shuffled on each epoch end :param config: the settings :param dataset_label: the label of set, e.g. training, testing or validation """ def __init__(self, config: Config, dataset_label: str) -> None: super().__init__(config, dataset_label) filename = config.filename(dataset_label + "_inputs2") self.input_matrix2 = self._load_data(filename) def __getitem__(self, idx: int) -> Tuple[List[np.ndarray], np.ndarray]: idx_ = self._make_slice(idx) return ( [self.input_matrix[idx_].toarray(), self.input_matrix2[idx_].toarray()], self.label_matrix[idx_], ) def on_epoch_end(self) -> None: self.input_matrix, self.input_matrix2, self.label_matrix = shuffle( self.input_matrix, self.input_matrix2, self.label_matrix, random_state=0 ) def _setup_callbacks(config: Config) -> List[Any]: early_stopping = EarlyStopping(monitor="val_loss", patience=10) csv_logger = CSVLogger(config.filename("_keras_training.log"), append=True) checkpoint_path = os.path.join(config["output_path"], "checkpoints") if not os.path.exists(checkpoint_path): os.mkdir(checkpoint_path) checkpoint = ModelCheckpoint( os.path.join(checkpoint_path, "keras_model.hdf5"), monitor="loss", save_best_only=True, ) reduce_lr = ReduceLROnPlateau( monitor="val_loss", factor=0.5, patience=5, verbose=0, mode="auto", min_delta=0.000001, cooldown=0, min_lr=0, ) return [early_stopping, csv_logger, checkpoint, reduce_lr] def _train_keras_model( model: Model, train_seq: _InMemorySequence, valid_seq: _InMemorySequence, loss: str, metrics: List[Any], config: Config, ) -> None: adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0) model.compile( optimizer=adam, loss=loss, metrics=metrics, ) model.fit_generator( train_seq, steps_per_epoch=None, epochs=config["epochs"], verbose=1, callbacks=_setup_callbacks(config), validation_data=valid_seq, validation_steps=None, class_weight=None, max_queue_size=20, workers=20, use_multiprocessing=False, shuffle=True, initial_epoch=0, ) def train_expansion_keras_model(config: Config) -> None: """ Train a expansion policy :param config: the settings """ train_seq = ExpansionModelSequence(config, "training") valid_seq = ExpansionModelSequence(config, "validation") model = Sequential() model.add( Dense( config["model"]["hidden_nodes"], input_shape=(train_seq.input_dim,), activation="elu", kernel_regularizer=regularizers.l2(0.001), ) ) model.add(Dropout(config["model"]["drop_out"])) model.add(Dense(train_seq.output_dim, activation="softmax")) _train_keras_model( model, train_seq, valid_seq, "categorical_crossentropy", ["accuracy", "top_k_categorical_accuracy", top10_acc, top50_acc], config, ) def train_filter_keras_model(config: Config) -> None: """Train a Filter model""" train_seq = FilterModelSequence(config, "training") valid_seq = FilterModelSequence(config, "validation") product_input_layer = Input(shape=(config["fingerprint_len"],)) product_dense_layer = Dense(config["model"]["hidden_nodes"], activation="elu")( product_input_layer ) product_droput_layer = Dropout(config["model"]["drop_out"])(product_dense_layer) reaction_input_layer = Input(shape=(config["fingerprint_len"],)) reaction_dense_layer = Dense(config["model"]["hidden_nodes"], activation="elu")( reaction_input_layer ) cosine_layer = Dot(-1, normalize=True)([product_droput_layer, reaction_dense_layer]) output_layer = Dense(1, activation="sigmoid")(cosine_layer) model = Model( inputs=[product_input_layer, reaction_input_layer], outputs=output_layer ) _train_keras_model( model, train_seq, valid_seq, "binary_crossentropy", ["accuracy"], config ) def train_recommender_keras_model(config: Config) -> None: """Train a recommender model to be used in filter development""" train_seq = ExpansionModelSequence(config, "training") valid_seq = ExpansionModelSequence(config, "validation") model = Sequential() model.add( Dense( config["model"]["hidden_nodes"], input_shape=(config["fingerprint_len"],), activation="elu", ) ) model.add(Dropout(config["model"]["drop_out"])) model.add(Dense(train_seq.output_dim, activation="softmax")) _train_keras_model( model, train_seq, valid_seq, "categorical_crossentropy", ["accuracy", "top_k_categorical_accuracy", top10_acc, top50_acc], config, )
{"hexsha": "5d67eb9254d81158b8226981983e826721087e2a", "size": 8066, "ext": "py", "lang": "Python", "max_stars_repo_path": "aizynthfinder/training/keras_models.py", "max_stars_repo_name": "cambiegroup/aizynthfinder", "max_stars_repo_head_hexsha": "f5bafb2ac4749284571c05ae6df45b6f45cccd30", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 219, "max_stars_repo_stars_event_min_datetime": "2020-06-15T08:04:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T09:02:47.000Z", "max_issues_repo_path": "aizynthfinder/training/keras_models.py", "max_issues_repo_name": "cambiegroup/aizynthfinder", "max_issues_repo_head_hexsha": "f5bafb2ac4749284571c05ae6df45b6f45cccd30", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 56, "max_issues_repo_issues_event_min_datetime": "2020-08-14T14:50:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T12:49:06.000Z", "max_forks_repo_path": "aizynthfinder/training/keras_models.py", "max_forks_repo_name": "cambiegroup/aizynthfinder", "max_forks_repo_head_hexsha": "f5bafb2ac4749284571c05ae6df45b6f45cccd30", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 58, "max_forks_repo_forks_event_min_datetime": "2020-06-15T13:36:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T06:18:02.000Z", "avg_line_length": 32.264, "max_line_length": 88, "alphanum_fraction": 0.67195636, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1824}
from pathlib import Path, PurePath import os from fnmatch import fnmatch import sys import csv sys.path.append("c:\\Users\\kpdav\\machine_learning\\projects\\PGA-portfolio-optimizer\\config") import config from pgatour_metrics import get_espn_tournaments import time from csv import DictWriter from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor from functools import wraps from bs4 import BeautifulSoup import requests import pandas as pd import numpy as np def timeit(method): @wraps(method) def wrapper(*args, **kwargs): start_time = time.time() result = method(*args, **kwargs) end_time = time.time() print(f"{method.__name__} => {(end_time-start_time)*1000} ms") return result return wrapper def find_player_id(player): """Find player id Args: player (str) : tournament participant Returns: player id """ id_ = "id/" beg = player.find(id_) + len(id_) end = player.rfind("/") return player[beg:end] def get_player_ids(t_body): """ All player id's from tournament """ # scorecard_beg = "https://www.espn.com/golf/player/scorecards/_/id/" player_ids = [] players = t_body.find_all("tr", class_="Table__TR Table__even") if players is not None: for player in players: p_id_link = player.find("a") # ensure that espn has player links on page # if not, there is no player information if p_id_link is not None: p_id = find_player_id(p_id_link["href"]) player_ids.append(p_id) return player_ids def missing_data(scoring_data): """Fill missing hole entries Args: scoring_data (list) : round data with missing entries Returns: scoring_data (list) : round data filled """ missing_holes = 9 - len(scoring_data) missing_entries = [None] * missing_holes scoring_data.extend(missing_entries) assert len(scoring_data) == 9 return scoring_data def missing_round(rd_name): """Fills missing round for player with None entires. Args: rd_name (str) : tournament round number Returns: data (dict) : tournament round shot score data. data_pts (dict) : tournament round hole score data. """ hole_ids = [rd_name + "_" + str(hn) for hn in range(1,19)] hole_pts_id = [h_id + "_pts" for h_id in hole_ids] hole_data = [None] * 18 hole_data_pts = [None] * 18 data = dict(zip(hole_ids, hole_data)) data_pts = dict(zip(hole_pts_id, hole_data_pts)) return data, data_pts def find_rd_number(rd): """Find round number for scorecard Args: rd (element.Tag) : div.roundSwap active. id attr includes round number Returns: tournament round number """ rd_name = rd["id"] rd_name = rd_name[:rd_name.rfind("-")] rd_name = rd_name.replace("-", "_") return rd_name def missing_round_number(scoring_base): """Find missing round number(s) of player during tournament. Ensures that same round number is not used twice. Args: scoring_base (ResultSet) : set of player tournament rounds Returns: m_rx : missing round name (x - dependent of number of rounds missed) """ rd_check = np.array(["round_1", "round_2", "round_3", "round_4"]) if len(scoring_base) == 1: rd_z = np.array([find_rd_number(scoring_base[0])]) missing_rds = np.setdiff1d(rd_check, rd_z) assert len(missing_rds) == 3 m_r1 = missing_rds[0] m_r2 = missing_rds[1] m_r3 = missing_rds[2] return m_r1, m_r2, m_r3 elif len(scoring_base) == 2: rd_z = find_rd_number(scoring_base[0]) rd_y = find_rd_number(scoring_base[1]) rds = np.array([rd_z, rd_y]) missing_rds = np.setdiff1d(rd_check, rds) assert len(missing_rds) == 2 m_r1 = missing_rds[0] m_r2 = missing_rds[1] return m_r1, m_r2 elif len(scoring_base) == 3: rd_z = find_rd_number(scoring_base[0]) rd_y = find_rd_number(scoring_base[1]) rd_x = find_rd_number(scoring_base[2]) rds = np.array([rd_z, rd_y, rd_x]) missing_rds = np.setdiff1d(rd_check, rds) assert len(missing_rds) == 1 m_r1 = missing_rds[0] return m_r1 else: print("Incorrect number of rounds given.\n") def get_round_scores(rd): """Get player scores, both shot and hole data, for 9 holes Args: rd (list) : player 9 hole scoring data Returns: shot_data (list) : shot data for 9 holes hole_data (list) : hole data for 9 holes """ shot_data = [int(score.text) if score.text else None for score in rd ] hole_data = [score["class"][0] if score["class"][0] != "textcenter" else None for score in rd] if len(shot_data) != 9: shot_data = missing_data(shot_data) if len(hole_data) != 9: hole_data = missing_data(hole_data) return shot_data, hole_data def round_data(round_base, rd_name): """Get player data for specific round in tournament Args: round_base (element.Tag) : tournament round data Returns: data (dict) : tournament round shot score data. item entries contain ints to reflect scoring data data_pts (dict) : tournament round hole score data. item entries contain strs to reflect scoring data """ front_hole_ids = [rd_name + "_" + str(hn) for hn in range(1,10)] back_hole_ids = [rd_name + "_" + str(hn) for hn in range(10, 19)] front_pts_id = [h_id + "_pts" for h_id in front_hole_ids] back_pts_id = [h_id + "_pts" for h_id in back_hole_ids] rd_body = round_base.find_all("tr", class_="oddrow") rd_front_total = rd_body[-2].find_all("td", class_="textcenter") rd_back_total = rd_body[-1].find_all("td", class_="textcenter") if len(rd_front_total) == 10: # Disregard totals rd_front = rd_front_total[:-1] front_shot_data, front_hole_data = get_round_scores(rd_front) f_labeled_data = dict(zip(front_hole_ids, front_shot_data)) f_labeled_data_pts = dict(zip(front_pts_id, front_hole_data)) else: rd_front = rd_front_total front_shot_data, front_hole_data = get_round_scores(rd_front) f_labeled_data = dict(zip(front_hole_ids, front_shot_data)) f_labeled_data_pts = dict(zip(front_pts_id, front_hole_data)) if len(rd_back_total) == 10: rd_back = rd_back_total[:-1] back_shot_data, back_hole_data = get_round_scores(rd_back) b_labeled_data = dict(zip(back_hole_ids, back_shot_data)) b_labeled_data_pts = dict(zip(back_pts_id, back_hole_data)) else: rd_back = rd_back_total back_shot_data, back_hole_data = get_round_scores(rd_back) b_labeled_data = dict(zip(back_hole_ids, back_shot_data)) b_labeled_data_pts = dict(zip(back_pts_id, back_hole_data)) data = {**f_labeled_data, **b_labeled_data} data_pts = {**f_labeled_data_pts, **b_labeled_data_pts} return data, data_pts def scoring_data(scoring_base): """Get player scoring data for each round in tournament Args: scoring_base (ResultSet) : set of player tournament rounds. Length reflects number of rounds played in tournament. Returns: round data containing player id, tourn id, and tourn scoring data """ if len(scoring_base) == 0: rd_1_data, rd_1_data_pts = missing_round("round_1") rd_2_data, rd_2_data_pts = missing_round("round_2") rd_3_data, rd_3_data_pts = missing_round("round_3") rd_4_data, rd_4_data_pts = missing_round("round_4") rds_data = {**rd_1_data, **rd_2_data, **rd_3_data, **rd_4_data, **rd_1_data_pts, **rd_2_data_pts, **rd_3_data_pts, **rd_4_data_pts} assert len(rds_data) == 144 return rds_data elif len(scoring_base) == 1: round_1 = find_rd_number(scoring_base[0]) m_rd1, m_rd2, m_rd3 = missing_round_number(scoring_base) rd_1 = scoring_base[0] rd_1_data, rd_1_data_pts = round_data(rd_1, round_1) rd_2_data, rd_2_data_pts = missing_round(m_rd1) rd_3_data, rd_3_data_pts = missing_round(m_rd2) rd_4_data, rd_4_data_pts = missing_round(m_rd3) rds_data = {**rd_1_data, **rd_2_data, **rd_3_data, **rd_4_data, **rd_1_data_pts, **rd_2_data_pts, **rd_3_data_pts, **rd_4_data_pts} assert len(rds_data) == 144 return rds_data elif len(scoring_base) == 2: # missed cut round_1 = find_rd_number(scoring_base[1]) round_2 = find_rd_number(scoring_base[0]) m_rd1, m_rd2 = missing_round_number(scoring_base) rd_1 = scoring_base[1] rd_1_data, rd_1_data_pts = round_data(rd_1, round_1) rd_2 = scoring_base[0] rd_2_data, rd_2_data_pts = round_data(rd_2, round_2) rd_3_data, rd_3_data_pts = missing_round(m_rd1) rd_4_data, rd_4_data_pts = missing_round(m_rd2) rds_data = {**rd_1_data, **rd_2_data, **rd_3_data, **rd_4_data, **rd_1_data_pts, **rd_2_data_pts, **rd_3_data_pts, **rd_4_data_pts} assert len(rds_data) == 144 return rds_data elif len(scoring_base) == 3: m_rd = missing_round_number(scoring_base) round_1 = find_rd_number(scoring_base[2]) round_2 = find_rd_number(scoring_base[1]) round_3 = find_rd_number(scoring_base[0]) rd_1 = scoring_base[2] rd_1_data, rd_1_data_pts = round_data(rd_1, round_1) rd_2 = scoring_base[1] rd_2_data, rd_2_data_pts = round_data(rd_2, round_2) rd_3 = scoring_base[0] rd_3_data, rd_3_data_pts = round_data(rd_3, round_3) rd_4_data, rd_4_data_pts = missing_round(m_rd) rds_data = {**rd_1_data, **rd_2_data, **rd_3_data, **rd_4_data, **rd_1_data_pts, **rd_2_data_pts, **rd_3_data_pts, **rd_4_data_pts} assert len(rds_data) == 144 return rds_data elif len(scoring_base) == 4: round_1 = find_rd_number(scoring_base[3]) round_2 = find_rd_number(scoring_base[2]) round_3 = find_rd_number(scoring_base[1]) round_4 = find_rd_number(scoring_base[0]) rd_1 = scoring_base[3] rd_1_data, rd_1_data_pts = round_data(rd_1, round_1) rd_2 = scoring_base[2] rd_2_data, rd_2_data_pts = round_data(rd_2, round_2) rd_3 = scoring_base[1] rd_3_data, rd_3_data_pts = round_data(rd_3, round_3) rd_4 = scoring_base[0] rd_4_data, rd_4_data_pts = round_data(rd_4, round_4) rds_data = {**rd_1_data, **rd_2_data, **rd_3_data, **rd_4_data, **rd_1_data_pts, **rd_2_data_pts, **rd_3_data_pts, **rd_4_data_pts} assert len(rds_data) == 144 return rds_data elif len(scoring_base) == 5: # playoff round round_1 = find_rd_number(scoring_base[4]) round_2 = find_rd_number(scoring_base[3]) round_3 = find_rd_number(scoring_base[2]) round_4 = find_rd_number(scoring_base[1]) rd_1 = scoring_base[4] rd_1_data, rd_1_data_pts = round_data(rd_1, round_1) rd_2 = scoring_base[3] rd_2_data, rd_2_data_pts = round_data(rd_2, round_2) rd_3 = scoring_base[2] rd_3_data, rd_3_data_pts = round_data(rd_3, round_3) rd_4 = scoring_base[1] rd_4_data, rd_4_data_pts = round_data(rd_4, round_4) rds_data = {**rd_1_data, **rd_2_data, **rd_3_data, **rd_4_data, **rd_1_data_pts, **rd_2_data_pts, **rd_3_data_pts, **rd_4_data_pts} assert len(rds_data) == 144 return rds_data else: print(len(scoring_base), " incorrect number of rounds\n") def handle_bad_page(player_info): """Handle page errors Args: player_info (dict) : player information player_scores (dict) : player scoring data Returns: Updated player data to handle espn server error """ new_player_info = player_info if new_player_info["player_id"] == "4686087" and new_player_info["tournament_id"] == "401155472": new_shot_scores = {"round_1_1": 4, "round_1_2": 5, "round_1_3": 5, "round_1_4": 4, "round_1_5": 5, "round_1_6": 7, "round_1_7": 4, "round_1_8": 4, "round_1_9": 4, "round_1_10": 4, "round_1_11": 4, "round_1_12": 4, "round_1_13": 3, "round_1_14": 4, "round_1_15": 4, "round_1_16": 4, "round_1_17": 3, "round_1_18": 6, "round_2_1": 4, "round_2_2": 4, "round_2_3": 3, "round_2_4": 4, "round_2_5": 4, "round_2_6": 4, "round_2_7": 4, "round_2_8": 3, "round_2_9": 5, "round_2_10": 4, "round_2_11": 4, "round_2_12": 5, "round_2_13": 5, "round_2_14": 5, "round_2_15": 3, "round_2_16": 3, "round_2_17": 3, "round_2_18": 4, "round_3_1": None, "round_3_2": None, "round_3_3": None, "round_3_4": None, "round_3_5": None, "round_3_6": None, "round_3_7": None, "round_3_8": None, "round_3_9": None, "round_3_10": None, "round_3_11": None, "round_3_12": None, "round_3_13": None, "round_3_14": None, "round_3_15": None, "round_3_16": None, "round_3_17": None, "round_3_18": None, "round_4_1": None, "round_4_2": None, "round_4_3": None, "round_4_4": None, "round_4_5": None, "round_4_6": None, "round_4_7": None, "round_4_8": None, "round_4_9": None, "round_4_10": None, "round_4_11": None, "round_4_12": None, "round_4_13": None, "round_4_14": None, "round_4_15": None, "round_4_16": None, "round_4_17": None, "round_4_18": None,} new_hole_scores = {"round_1_1_pts": "par", "round_1_2_pts": "bogey", "round_1_3_pts": "bogey", "round_1_4_pts": "bogey", "round_1_5_pts": "bogey", "round_1_6_pts": "double", "round_1_7_pts": "par", "round_1_8_pts": "bogey", "round_1_9_pts": "par", "round_1_10_pts": "par", "round_1_11_pts": "par", "round_1_12_pts": "birdie", "round_1_13_pts": "par", "round_1_14_pts": "par", "round_1_15_pts": "par", "round_1_16_pts": "par", "round_1_17_pts": "par", "round_1_18_pts": "bogey", "round_2_1_pts": "par", "round_2_2_pts": "par", "round_2_3_pts": "birdie", "round_2_4_pts": "bogey", "round_2_5_pts": "par", "round_2_6_pts": "birdie", "round_2_7_pts": "par", "round_2_8_pts": "par", "round_2_9_pts": "bogey", "round_2_10_pts": "par", "round_2_11_pts": "par", "round_2_12_pts": "par", "round_2_13_pts": "double", "round_2_14_pts": "bogey", "round_2_15_pts": "birdie", "round_2_16_pts": "birdie", "round_2_17_pts": "par", "round_2_18_pts": "birdie", "round_3_1_pts": None, "round_3_2_pts": None, "round_3_3_pts": None, "round_3_4_pts": None, "round_3_5_pts": None, "round_3_6_pts": None, "round_3_7_pts": None, "round_3_8_pts": None, "round_3_9_pts": None, "round_3_10_pts": None, "round_3_11_pts": None, "round_3_12_pts": None, "round_3_13_pts": None, "round_3_14_pts": None, "round_3_15_pts": None, "round_3_16_pts": None, "round_3_17_pts": None, "round_3_18_pts": None, "round_4_1_pts": None, "round_4_2_pts": None, "round_4_3_pts": None, "round_4_4_pts": None, "round_4_5_pts": None, "round_4_6_pts": None, "round_4_7_pts": None, "round_4_8_pts": None, "round_4_9_pts": None, "round_4_10_pts": None, "round_4_11_pts": None, "round_4_12_pts": None, "round_4_13_pts": None, "round_4_14_pts": None, "round_4_15_pts": None, "round_4_16_pts": None, "round_4_17_pts": None, "round_4_18_pts": None,} new_player_scores = {**new_shot_scores, **new_hole_scores} if new_player_info["player_id"] == "4686086" and new_player_info["tournament_id"] == "401155472": new_shot_scores = {"round_1_1": 4, "round_1_2": 3, "round_1_3": 3, "round_1_4": 2, "round_1_5": 4, "round_1_6": 5, "round_1_7": 4, "round_1_8": 2, "round_1_9": 5, "round_1_10": 4, "round_1_11": 5, "round_1_12": 5, "round_1_13": 3, "round_1_14": 4, "round_1_15": 4, "round_1_16": 4, "round_1_17": 3, "round_1_18": 5, "round_2_1": 4, "round_2_2": 4, "round_2_3": 4, "round_2_4": 3, "round_2_5": 4, "round_2_6": 5, "round_2_7": 4, "round_2_8": 3, "round_2_9": 4, "round_2_10": 5, "round_2_11": 5, "round_2_12": 5, "round_2_13": 3, "round_2_14": 4, "round_2_15": 4, "round_2_16": 4, "round_2_17": 3, "round_2_18": 6, "round_3_1": None, "round_3_2": None, "round_3_3": None, "round_3_4": None, "round_3_5": None, "round_3_6": None, "round_3_7": None, "round_3_8": None, "round_3_9": None, "round_3_10": None, "round_3_11": None, "round_3_12": None, "round_3_13": None, "round_3_14": None, "round_3_15": None, "round_3_16": None, "round_3_17": None, "round_3_18": None, "round_4_1": None, "round_4_2": None, "round_4_3": None, "round_4_4": None, "round_4_5": None, "round_4_6": None, "round_4_7": None, "round_4_8": None, "round_4_9": None, "round_4_10": None, "round_4_11": None, "round_4_12": None, "round_4_13": None, "round_4_14": None, "round_4_15": None, "round_4_16": None, "round_4_17": None, "round_4_18": None,} new_hole_scores = {"round_1_1_pts": "par", "round_1_2_pts": "par", "round_1_3_pts": "par", "round_1_4_pts": "par", "round_1_5_pts": "par", "round_1_6_pts": "par", "round_1_7_pts": "par", "round_1_8_pts": "par", "round_1_9_pts": "par", "round_1_10_pts": "bogey", "round_1_11_pts": "bogey", "round_1_12_pts": "par", "round_1_13_pts": "par", "round_1_14_pts": "par", "round_1_15_pts": "par", "round_1_16_pts": "par", "round_1_17_pts": "par", "round_1_18_pts": "bogey", "round_2_1_pts": "par", "round_2_2_pts": "birdie", "round_2_3_pts": "birdie", "round_2_4_pts": "birdie", "round_2_5_pts": "par", "round_2_6_pts": "par", "round_2_7_pts": "par", "round_2_8_pts": "birdie", "round_2_9_pts": "bogey", "round_2_10_pts": "par", "round_2_11_pts": "bogey", "round_2_12_pts": "par", "round_2_13_pts": "par", "round_2_14_pts": "par", "round_2_15_pts": "par", "round_2_16_pts": "par", "round_2_17_pts": "par", "round_2_18_pts": "par", "round_3_1_pts": None, "round_3_2_pts": None, "round_3_3_pts": None, "round_3_4_pts": None, "round_3_5_pts": None, "round_3_6_pts": None, "round_3_7_pts": None, "round_3_8_pts": None, "round_3_9_pts": None, "round_3_10_pts": None, "round_3_11_pts": None, "round_3_12_pts": None, "round_3_13_pts": None, "round_3_14_pts": None, "round_3_15_pts": None, "round_3_16_pts": None, "round_3_17_pts": None, "round_3_18_pts": None, "round_4_1_pts": None, "round_4_2_pts": None, "round_4_3_pts": None, "round_4_4_pts": None, "round_4_5_pts": None, "round_4_6_pts": None, "round_4_7_pts": None, "round_4_8_pts": None, "round_4_9_pts": None, "round_4_10_pts": None, "round_4_11_pts": None, "round_4_12_pts": None, "round_4_13_pts": None, "round_4_14_pts": None, "round_4_15_pts": None, "round_4_16_pts": None, "round_4_17_pts": None, "round_4_18_pts": None,} new_player_scores = {**new_shot_scores, **new_hole_scores} new_player_data = {**new_player_info, **new_player_scores} return new_player_data def player_scorecard(scorecard_url): """Get espn player scorecard for a specific tournament. Args: scorecard_url (str) : espn url Returns: player scoring data for tournament """ with requests.Session() as session: page = session.get(scorecard_url) if page.status_code == 200: soup = BeautifulSoup(page.content, "lxml") base = soup.find_all("div", class_="roundSwap active") if base is not None: id_data = {} p_id_start = scorecard_url.find("id") + 3 p_id_end = scorecard_url.rfind("tournamentId") - 1 id_data["player_id"] = scorecard_url[p_id_start:p_id_end] id_data["tournament_id"] = scorecard_url[scorecard_url.rfind("/") + 1:] scorecard_data = scoring_data(base) player_data = {**id_data, **scorecard_data} assert len(player_data) == 146 return player_data else: id_data = {} p_id_start = scorecard_url.find("id") + 3 p_id_end = scorecard_url.rfind("tournamentId") - 1 id_data["player_id"] = scorecard_url[p_id_start:p_id_end] id_data["tournament_id"] = scorecard_url[scorecard_url.rfind("/") + 1:] scorecard_data = {} if id_data["player_id"] == "4686086" and id_data["tournament_id"] == "401155472": scorecard_data = handle_bad_page(id_data) if id_data["player_id"] == "4686087" and id_data["tournament_id"] == "401155472": scorecard_data = handle_bad_page(id_data) if scorecard_data: player_data = {**id_data, **scorecard_data} else: rd_1_data, rd_1_data_pts = missing_round("round_1") rd_2_data, rd_2_data_pts = missing_round("round_2") rd_3_data, rd_3_data_pts = missing_round("round_3") rd_4_data, rd_4_data_pts = missing_round("round_4") rds_data = {**rd_1_data, **rd_2_data, **rd_3_data, **rd_4_data, **rd_1_data_pts, **rd_2_data_pts, **rd_3_data_pts, **rd_4_data_pts} assert len(rds_data) == 144 player_data = {**id_data, **rds_data} assert len(player_data) == 146 return player_data def players_scorecard_from_tournament(url): """Finds all participants in tournament and gets tournament scorecard data for each player Args: url (str) : espn tournament Returns: player data from tournament """ espn_home_url = "https://www.espn.com/golf/" t_id = url[url.rfind("=")+1:] base_url = url # Redirect server request to mimic more realistic behavior # h_page = requests.get(espn_home_url) if (t_id != "1155") and (t_id != "995"): with requests.Session() as session: time.sleep(3) # home_page = session.get(espn_home_url) page = session.get(base_url) if page.status_code == 200: print("good url: ", url) soup = BeautifulSoup(page.content, "lxml") # Table's on webpage. index with -1 in case of playoff table tourn_tables = soup.select("div.ResponsiveTable") if tourn_tables is not None: if len(tourn_tables) == 1: tourn_table = tourn_tables[-1] tourn_body = tourn_table.find("tbody", class_="Table__TBODY") tourn_players = get_player_ids(tourn_body) # 'https://www.espn.com/golf/player/scorecards/_/id/11099tournamentId/401148233' scorecard_front = "https://www.espn.com/golf/player/scorecards/_/id/" scorecard_back = "/tournamentId/" valid_player_urls = [scorecard_front + player + scorecard_back + t_id for player in tourn_players] # print(valid_player_urls) player_data = [player_scorecard(player) for player in valid_player_urls] print("\nNumber of players: ", len(player_data)) return player_data elif len(tourn_tables) == 2: tourn_table = tourn_tables[-1] tourn_body = tourn_table.find("tbody", class_="Table__TBODY") tourn_players = get_player_ids(tourn_body) # 'https://www.espn.com/golf/player/scorecards/_/id/11099tournamentId/401148233' scorecard_front = "https://www.espn.com/golf/player/scorecards/_/id/" scorecard_back = "/tournamentId/" valid_player_urls = [scorecard_front + player + scorecard_back + t_id for player in tourn_players] # print(valid_player_urls) player_data = [player_scorecard(player) for player in valid_player_urls] print("\nNumber of players: ", len(player_data)) return player_data elif len(tourn_tables) == 0: print(f"error with {url}") # To reset error on espn server page = session.get(espn_home_url) return url # return None else: print(f"Number of tables {len(tourn_tables)} in url {url}") else: h_page = session.get(espn_home_url) # return None def write_tournament_data(tournament_url, h_data_path="base"): """Write historical tournament data to disk Args: tournament_url (str) : espn tournament """ # Get data for file tourn_data = players_scorecard_from_tournament(tournament_url) # Create columns for csv file tournament_ids = ["player_id", "tournament_id"] rd_nums = ["1_", "2_", "3_", "4_"] rd_ids = ["round_" + rd_num + str(i) for rd_num in rd_nums for i in range(1,19)] rd_pt_ids = [ids + "_pts" for ids in rd_ids] tournament_ids.extend(rd_ids) tournament_ids.extend(rd_pt_ids) fields = tournament_ids # Create unique file path from tournament id t_id = tournament_url[tournament_url.rfind("=")+1:] fn = t_id + ".csv" # Changed f_path for testing purpose and to not mix with already correct historical data # When ready change path back to config.RAW_HISTORICAL_DIR if h_data_path == "base": f_path = str(Path(config.RAW_HISTORICAL_DIR, fn)) elif h_data_path == "pga_season_2011": f_path = str(Path(config.PGA_SEASON_2011, fn)) elif h_data_path == "pga_season_2012": f_path = str(Path(config.PGA_SEASON_2012, fn)) elif h_data_path == "pga_season_2013": f_path = str(Path(config.PGA_SEASON_2013, fn)) elif h_data_path == "pga_season_2014": f_path = str(Path(config.PGA_SEASON_2014, fn)) elif h_data_path == "pga_season_2015": f_path = str(Path(config.PGA_SEASON_2015, fn)) elif h_data_path == "pga_season_2016": f_path = str(Path(config.PGA_SEASON_2016, fn)) else: print(f"No directory called {h_data_path}. Used historical_player_data directory") f_path = str(Path(config.RAW_HISTORICAL_DIR, fn)) # fn = f_path + t_id + ".csv" with open (f_path, "w", newline="") as csvfile: writer = DictWriter(csvfile, fieldnames=fields) writer.writeheader() if tourn_data is not None: writer.writerows(tourn_data) else: print(f"The tourn data is None: {tourn_data}") msg = f"Finished {t_id}" return msg def csv_tournament_data(tournament_urls): """Write all tournament data using csv file format. Args: tournament_urls (list) : espn tournaments """ futures_list = [] results = [] with ThreadPoolExecutor(max_workers=10) as executor: for url in tournament_urls: futures = executor.submit(write_tournament_data, url) futures_list.append(futures) for future in futures_list: try: result = future.result(timeout=300) results.append(result) except Exception as exc: # print(f"{result} generated an excpetion {exc}") results.append(None) return results def p_csv_tournament_data(tournament_urls, f_path="base"): """Write all tournament data using csv file format. Args: tournament_urls (list) : espn tournaments """ futures_list = [] results = [] with ProcessPoolExecutor(max_workers=8) as executor: for url in tournament_urls: futures = executor.submit(write_tournament_data, url, f_path) futures_list.append(futures) for future in futures_list: try: result = future.result() results.append(result) except Exception as exc: # print(f"{result} generated an excpetion {exc}") results.append(None) return results @timeit def historical_data_runner(start, end=None, f_path=None): """Get historical data over given pga season(s) Args: start (int) : beginning pga season end (int) : ending pga season, optional arg f_path (str) : historical data directory to store data Returns: missed tournaments from to many server requests and failed connections """ if end is not None: tournaments_df = get_espn_tournaments(start, end) else: tournaments_df = get_espn_tournaments(start) print(f"Number of tournaments: {tournaments_df.shape[0]}") base_url = "https://www.espn.com/golf/leaderboard?tournamentId=" tournaments_df["url"] = tournaments_df["tournament_id"].apply(lambda x: base_url + str(x)) urls = tournaments_df["url"].tolist() if f_path is not None: results = p_csv_tournament_data(urls, f_path) else: results = p_csv_tournament_data(urls) missed_tourns = [] tourn_counter = 0 for result in results: if result is None: missed_tourns.append(urls[tourn_counter]) print(f"URL:{urls[tourn_counter]} TYPE: {(type(urls[tourn_counter]))}") print(f"Length of URL : {len(urls[tourn_counter])}") else: print(result) tourn_counter += 1 return missed_tourns # missed_results = [write_tournament_data(url) for url in missed_tourns] def st_historical_data_runner(start, end=None): """Get historical data over given pga season(s) from espn through a single thread process. Note: Process writes each tournament to disk as a csv file using the tournament id as a unique identifier. Args: start (int) : beginning pga season end (int) : ending pga season, optional arg """ if end is not None: tournaments_df = get_espn_tournaments(start, end) else: tournaments_df = get_espn_tournaments(start) base_url = "https://www.espn.com/golf/leaderboard?tournamentId=" tournaments_df["url"] = tournaments_df["tournament_id"].apply(lambda x: base_url + str(x)) urls = tournaments_df["url"].tolist() results = [write_tournament_data(url) for url in urls] print(results) def tournament_date_col(df, tournament_df): """Create date column through tournament id mapping Parameters: df (pd.Dataframe) tournament_df (pd.Dataframe) """ date_col = df["tournament_id"].apply(lambda x: tournament_df["date"][tournament_df["tournament_id"] == x].values[0]) idx = 2 df.insert(loc=idx, column="date", value=date_col) def combine_files(root, pattern=None): """Combine all files in root path directory Parameters: root (str) : file path to directory of files pattern (str) : optional file pattern to search for in directory Returns: combined files """ if pattern is not None: files = [PurePath(path, name) for path, subdirs, files in os.walk(root) for name in files if fnmatch(name, pattern)] combined_files = pd.concat([pd.read_csv(f) for f in files]) else: files = [PurePath(path, name) for path, subdirs, files in os.walk(root) for name in files] combined_files = pd.concat([pd.read_csv(f) for f in files]) run_date_transformation(combined_files) return combined_files.sort_values(by="date") def merge_tournaments(f_pattern, f_name, f_path="raw_historical"): """Merge espn tournmants Args: f_pattern (str) : pattern criteria to match for files f_name (str) : file name for merged tournaments """ file_p_dict = {"raw_historical": str(config.RAW_HISTORICAL), "seasons_2011_2016": str(config.PGA_SEASON_2011_2016), "raw_historical_dir": str(config.RAW_HISTORICAL_DIR)} full_path = file_p_dict[f_path] historical_data = combine_files(full_path, f_pattern) w_f_path = str(Path(config.PROCESSED_HISTORICAL, f_name)) if os.path.isfile(w_f_path): # file exists so no need for headers historical_data.to_csv(w_f_path, mode="a", header=False, index=False, date_format="%Y-%m-%d") else: historical_data.to_csv(w_f_path, mode="a", header=True, index=False, date_format="%Y-%m-%d") def run_date_transformation(df): """Run and save date transformations for historical player data Args: df (pd.DataFrame) : historical player data """ # f_path = str(Path(config.PROCESSED_HISTORICAL_DIR, "hpd_2017_2020.csv")) # historical_data_df = pd.read_csv(f_path) espn_tourn_path = str(Path(config.RAW_DATA_DIR, "espn_tournaments_2011_2016.csv")) espn_tourns_df = pd.read_csv(espn_tourn_path, parse_dates=["date"]) tournament_date_col(df, espn_tourns_df) # historical_data_df.to_csv(f_path) if __name__ == "__main__": # tourn_errors = historical_data_runner(2011, f_path="pga_season_2011") # if tourn_errors: # for tourn in tourn_errors: # missed_result = write_tournament_data(tourn, "pga_season_2011") # print(missed_result) merge_tournaments("*.csv", "hpd.csv")
{"hexsha": "3d3ab40a01fac9dd909986742122546a28b82820", "size": 41199, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/data/historical_data.py", "max_stars_repo_name": "kevin-persaud-davis/PGA-portfolio-optimizer", "max_stars_repo_head_hexsha": "e37a7a760c2ba1747a835a09da91733ca1bb8cdf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/data/historical_data.py", "max_issues_repo_name": "kevin-persaud-davis/PGA-portfolio-optimizer", "max_issues_repo_head_hexsha": "e37a7a760c2ba1747a835a09da91733ca1bb8cdf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/data/historical_data.py", "max_forks_repo_name": "kevin-persaud-davis/PGA-portfolio-optimizer", "max_forks_repo_head_hexsha": "e37a7a760c2ba1747a835a09da91733ca1bb8cdf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.9365733114, "max_line_length": 124, "alphanum_fraction": 0.5188718173, "include": true, "reason": "import numpy", "num_tokens": 10289}
import argparse import sys import tensorflow as tf import numpy as np import skimage.io as io from skimage.transform import rescale # Prepare image to network input format def prep(im): if len(im.shape)==3: return np.transpose(im,[2,0,1]).reshape((1,3,112,112))*2-1 elif len(im.shape)==4: return np.transpose(im,[0,3,1,2]).reshape((im.shape[0],3,112,112))*2-1 def main(args): print(args) sess = tf.Session() # Embedding model with tf.gfile.GFile(args.model, "rb") as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) tf.import_graph_def(graph_def, input_map=None, return_elements=None, name="") image_input = tf.get_default_graph().get_tensor_by_name('image_input:0') keep_prob = tf.get_default_graph().get_tensor_by_name('keep_prob:0') is_train = tf.get_default_graph().get_tensor_by_name('training_mode:0') embedding = tf.get_default_graph().get_tensor_by_name('embedding:0') tfdict = {keep_prob:1.0, is_train:False} # Embedding calculation im1 = prep(rescale(io.imread(args.face1)/255.,112./600.,order=5)) im2 = prep(rescale(io.imread(args.face2)/255.,112./600.,order=5)) tfdict[image_input] = im1 emb1 = sess.run(embedding,feed_dict=tfdict) tfdict[image_input] = im2 emb2 = sess.run(embedding,feed_dict=tfdict) # Result cos_sim = np.sum(emb1 * emb2) print('Cos_sim(face1, face2) =', cos_sim) def parse_arguments(argv): parser = argparse.ArgumentParser() parser.add_argument('face1', type=str, help='Path to the preprocessed face1.') parser.add_argument('face2', type=str, help='Path to the preprocessed face2.') parser.add_argument('model', type=str, help='Path to the model.') return parser.parse_args(argv) if __name__ == '__main__': main(parse_arguments(sys.argv[1:]))
{"hexsha": "8ae686f85448a9c16818b74e02681e53fa5b7151", "size": 2120, "ext": "py", "lang": "Python", "max_stars_repo_path": "Attack/cos_tf.py", "max_stars_repo_name": "cclauss/advhat", "max_stars_repo_head_hexsha": "ae996265a2d8481ecf7cec02d641af71668cec17", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 315, "max_stars_repo_stars_event_min_datetime": "2019-08-23T09:37:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T01:22:51.000Z", "max_issues_repo_path": "Attack/cos_tf.py", "max_issues_repo_name": "cclauss/advhat", "max_issues_repo_head_hexsha": "ae996265a2d8481ecf7cec02d641af71668cec17", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2019-08-29T03:53:00.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-11T13:14:55.000Z", "max_forks_repo_path": "Attack/cos_tf.py", "max_forks_repo_name": "cclauss/advhat", "max_forks_repo_head_hexsha": "ae996265a2d8481ecf7cec02d641af71668cec17", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 96, "max_forks_repo_forks_event_min_datetime": "2019-08-23T10:17:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-29T11:03:24.000Z", "avg_line_length": 36.5517241379, "max_line_length": 82, "alphanum_fraction": 0.6037735849, "include": true, "reason": "import numpy", "num_tokens": 515}
""" Read an audio as watermark signal (hidden voice), and another as carrier signal, then the voices are composited, and finally the watermark signal is extracted from the composite signal """ import numpy as np from scipy.io import wavfile from scipy.signal import resample, hilbert, firwin from tools import MaxMinNorm, save, play import time import warnings import sys import os warnings.filterwarnings("ignore") # ignore warnings os.close(sys.stderr.fileno()) # ignore ALSA errors ## Parameters L = 16 # compression ratio of hidden voice fc = 2000 # carrier frequency fs = 8000 # standard sampling rate TS = 0.02 # unit voice duration of per processing N = round(TS*fs) # unit voice length of per processing p = 1 # embedding strength ## Watermark audio print('Read the watermark audio') Fs, Sig = wavfile.read('wav_file/watermark.wav') Sig = MaxMinNorm(Sig.astype(np.int32)) K = Sig.shape[0] // (TS*Fs) # number of unit voice T = K * TS # total time voice = Sig[:int(T*Fs), 0].T # left channel len_watermark = int(len(voice)/Fs*L*fs) watermark = resample(voice, len_watermark) ## Carrier audio print("Read the carrier audio") Fs, Sig = wavfile.read('wav_file/carrier.wav') # your carrier audio path Sig = MaxMinNorm(Sig.astype(np.int32)) if Sig.shape[0] < L*T*Fs: raise IOError('The carrier audio is not long enough') music = Sig[:int(L*T*Fs), 0].T len_carrier = int(len(music)/Fs*fs) carrier = resample(music, len_carrier) # resample of carrier ## Voice composite print("Voice composite") ## Watermark signal modulation t = np.arange(len_watermark) / fs f = np.arange(len_watermark) / len_watermark * fs hsig = hilbert(watermark) # hilbert transform msig = np.multiply(hsig, np.exp(np.pi * 2j * fc * t)) rsig = msig.real ## Carrier signal filtering B = fs / 2 / L fl = fc / (fs / 2) fh = (fc + B) / (fs / 2) coefstop = firwin(N+1, [fl,fh], pass_zero=True) # band elimination filter buf = np.zeros(2*N) fsig = np.zeros(len_carrier) for k in range(int(len_carrier//N)): buf[0:N] = buf[N:2*N] buf[N:2*N] = carrier[k*N:(k+1)*N] for n in range(N): fsig[k*N+n] = np.multiply(buf[n:n+N+1], coefstop[::-1]).sum(axis=0) ## Embed the watermark signal into the carrier signal sendsig = (fsig + p * rsig) / (1 + p) # composite signal sendsig = MaxMinNorm(sendsig).astype(np.float32) ## Extract watermark signal from composite signal print("Extract watermark signal") coefpass = firwin(N+1, [fl, fh], pass_zero=False) # band-pass filter coeflow = firwin(N+1, B/(fs/2)) # low pass filter buf1 = np.zeros(2*N) buf2 = np.zeros(2*N) wsig = np.zeros(len(sendsig)) dsig = np.zeros(len(sendsig)) for k in range(int(len(sendsig)//N)): buf1[0:N] = buf1[N:2*N] buf1[N:2*N] = sendsig[k*N:(k+1)*N] for n in range(N): wsig[k*N+n] = np.multiply(buf1[n:n+N+1], coefpass[::-1]).sum(axis=0) * np.cos(np.pi*2*fc*t[k*N+n]) buf2[0:N] = buf2[N:2*N] buf2[N:2*N] = wsig[k*N:(k+1)*N] for n in range(N): dsig[k*N+n] = np.multiply(buf2[n:n+N+1], coeflow[::-1]).sum(axis=0) len_watermark_rec = int(len(dsig)/(L*fs)*fs) watermark_rec = resample(dsig, len_watermark_rec) watermark_rec = MaxMinNorm(watermark_rec).astype(np.float32) ## Saving & playing the extracted watermark signal save_path = 'wav_file/watermark_rec.wav' save(watermark_rec, save_path) print("Play the extracted watermark signal") t0 = time.time() play(watermark_rec, fs) print('Total time: %.2fs' % (time.time() - t0)) ## Saving & playing the composite signal save_path = 'wav_file/sendsig.wav' save(sendsig, save_path, sample_rate=fs) print("Play the composite signal") t0 = time.time() play(sendsig, fs) print('Total time: %.2fs' % (time.time() - t0))
{"hexsha": "b60bf564efb969438c2bff0038a5cc2191362493", "size": 3684, "ext": "py", "lang": "Python", "max_stars_repo_path": "hide_with_file.py", "max_stars_repo_name": "lsccccc/SSB-voice-hidden", "max_stars_repo_head_hexsha": "0abe2084c0b46dea5029930a6d15e48fde0cfede", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hide_with_file.py", "max_issues_repo_name": "lsccccc/SSB-voice-hidden", "max_issues_repo_head_hexsha": "0abe2084c0b46dea5029930a6d15e48fde0cfede", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hide_with_file.py", "max_forks_repo_name": "lsccccc/SSB-voice-hidden", "max_forks_repo_head_hexsha": "0abe2084c0b46dea5029930a6d15e48fde0cfede", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.5581395349, "max_line_length": 106, "alphanum_fraction": 0.6894679696, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1136}
import sys import pydart import numpy as np print('Example: rigidChain') class DampingController: """ Add damping force to the skeleton """ def __init__(self, skel): self.skel = skel def compute(self): damping = -0.01 * self.skel.qdot for i in range(1, self.skel.ndofs, 3): damping[i] *= 0.1 return damping pydart.init() print('pydart initialization OK') data_dir = pydart.misc.example_data_dir(__file__) print('data_dir = ' + data_dir) world = pydart.create_world(1.0 / 2000.0, data_dir + '/skel/chain.skel') print('pydart create_world OK') skel = world.skels[0] skel.q = (np.random.rand(skel.ndofs) - 0.5) print('init pose = %s' % skel.q) skel.controller = DampingController(skel) if 'qt' in sys.argv: tb = pydart.qtgui.Trackball(phi=-0.0, theta=0.0, zoom=1.0, rot=[-0.02, -0.71, -0.02, 0.71], trans=[0.02, 0.09, -1.0]) pydart.qtgui.run(title='rigidChain', simulation=world, trackball=tb) else: pydart.glutgui.run(title='rigidChain', simulation=world)
{"hexsha": "fa8c22d676fde570d69767f7d6eed83fe4c5b8c3", "size": 1095, "ext": "py", "lang": "Python", "max_stars_repo_path": "DartFootProject/rigidChain/main.py", "max_stars_repo_name": "hpgit/HumanFoot", "max_stars_repo_head_hexsha": "f9a1a341b7c43747bddcd5584b8c98a0d1ac2973", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "DartFootProject/rigidChain/main.py", "max_issues_repo_name": "hpgit/HumanFoot", "max_issues_repo_head_hexsha": "f9a1a341b7c43747bddcd5584b8c98a0d1ac2973", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DartFootProject/rigidChain/main.py", "max_forks_repo_name": "hpgit/HumanFoot", "max_forks_repo_head_hexsha": "f9a1a341b7c43747bddcd5584b8c98a0d1ac2973", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.375, "max_line_length": 72, "alphanum_fraction": 0.6237442922, "include": true, "reason": "import numpy", "num_tokens": 336}
/* * Copyright (c) 2018 Ryan Berryhill, University of Toronto * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "pme/util/clause_database.h" #include "pme/engine/variable_manager.h" #define BOOST_TEST_MODULE ClauseDatabaseTest #define BOOST_TEST_DYN_LINK #include <boost/test/unit_test.hpp> using namespace PME; BOOST_AUTO_TEST_CASE(test_clause_database) { VariableManager varman; ClauseDatabase db; ID a = varman.getNewID(); ID b = varman.getNewID(); Clause c0 = {a, b}; Clause c1 = {a, negate(b)}; ID act_c0 = varman.getNewID(); ID act_c1 = varman.getNewID(); db.addClause(0, act_c0, c0); BOOST_CHECK_EQUAL(db.activationOfID(0), act_c0); BOOST_CHECK_EQUAL(db.IDOfActivation(act_c0), 0); Clause c0_actual = db.clauseOf(0); std::sort(c0.begin(), c0.end()); std::sort(c0_actual.begin(), c0_actual.end()); BOOST_CHECK(c0 == c0_actual); db.addClause(1, act_c1, c1); // Clause 0 should still be the same BOOST_CHECK_EQUAL(db.activationOfID(0), act_c0); BOOST_CHECK_EQUAL(db.IDOfActivation(act_c0), 0); c0_actual = db.clauseOf(0); std::sort(c0.begin(), c0.end()); std::sort(c0_actual.begin(), c0_actual.end()); BOOST_CHECK(c0 == c0_actual); // Clause 1 should be there too BOOST_CHECK_EQUAL(db.activationOfID(1), act_c1); BOOST_CHECK_EQUAL(db.IDOfActivation(act_c1), 1); Clause c1_actual = db.clauseOf(1); std::sort(c1.begin(), c1.end()); std::sort(c1_actual.begin(), c1_actual.end()); BOOST_CHECK(c1 == c1_actual); } BOOST_AUTO_TEST_CASE(test_extra_data) { VariableManager varman; DualActivationClauseDatabase db; ID a = varman.getNewID(); ID b = varman.getNewID(); Clause c0 = {a, b}; Clause c1 = {a, negate(b)}; ID a_c0 = varman.getNewID(); ID a_c1 = varman.getNewID(); ID b_c0 = varman.getNewID(); ID b_c1 = varman.getNewID(); db.addClause(0, a_c0, c0, b_c0); db.addClause(1, a_c1, c1, b_c1); BOOST_CHECK_EQUAL(db.activationOfID(0), a_c0); BOOST_CHECK_EQUAL(db.getData(0), b_c0); BOOST_CHECK_EQUAL(db.IDOfActivation(a_c0), 0); BOOST_CHECK_EQUAL(db.activationOfID(1), a_c1); BOOST_CHECK_EQUAL(db.getData(1), b_c1); BOOST_CHECK_EQUAL(db.IDOfActivation(a_c1), 1); }
{"hexsha": "e1186a153dea4f056fe79d67219eee04d67a5059", "size": 3313, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/test_clause_database.cpp", "max_stars_repo_name": "ryanberryhill/pme", "max_stars_repo_head_hexsha": "416be2d52c920d285cc686a56d2f30bfab66bc51", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2019-01-25T16:07:56.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-14T17:34:22.000Z", "max_issues_repo_path": "tests/test_clause_database.cpp", "max_issues_repo_name": "ryanberryhill/pme", "max_issues_repo_head_hexsha": "416be2d52c920d285cc686a56d2f30bfab66bc51", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2018-08-21T22:46:41.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-14T17:36:31.000Z", "max_forks_repo_path": "tests/test_clause_database.cpp", "max_forks_repo_name": "ryanberryhill/pme", "max_forks_repo_head_hexsha": "416be2d52c920d285cc686a56d2f30bfab66bc51", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8557692308, "max_line_length": 79, "alphanum_fraction": 0.7047992756, "num_tokens": 893}
# __all__ = ['growing_degree_day', 'root_zone_water', 'check_groundwater_table', 'root_development', 'pre_irrigation', # 'drainage', 'rainfall_partition', 'irrigation', 'infiltration', 'capillary_rise', 'germination', # 'growth_stage', 'water_stress', 'cc_development', 'cc_required_time', 'adjust_CCx', 'update_CCx_CDC', # 'canopy_cover', 'evap_layer_water_content', 'soil_evaporation', 'aeration_stress', 'transpiration', # 'groundwater_inflow', 'HIref_current_day', 'biomass_accumulation', 'temperature_stress', # 'HIadj_pre_anthesis', 'HIadj_pollination', 'HIadj_post_anthesis', 'harvest_index'] # remove functions from __all__ as they become replace by compiled equivalent __all__ = [ "pre_irrigation", "capillary_rise", "irrigation", "germination", "growth_stage", "adjust_CCx", "canopy_cover", "transpiration", "groundwater_inflow", "harvest_index", ] # Cell if __name__ == "__main__": from classes import * else: from .classes import * import numpy as np import pandas as pd from numba import njit, types, typed, float64, int64, f8, i8, b1 # from aquacrop.classes import InitCondStructType # InitCond_type_sig = InitCondStructType(fields=InitCond_spec) from numba.pycc import CC # temporary name for compiled module cc = CC("solution_aot") # This compiled function is called a few times inside other functions if __name__ != "__main__": from .solution_aot import ( _water_stress, _evap_layer_water_content, _root_zone_water, _cc_development, _update_CCx_CDC, _cc_required_time, _aeration_stress, _temperature_stress, _HIadj_pre_anthesis, _HIadj_post_anthesis, _HIadj_pollination ) # Cell # @njit() @cc.export("_growing_degree_day", "f8(i4,f8,f8,f8,f8)") def growing_degree_day(GDDmethod, Tupp, Tbase, Tmax, Tmin): """ Function to calculate number of growing degree days on current day <a href="../pdfs/ac_ref_man_3.pdf#page=28" target="_blank">Reference manual: growing degree day calculations</a> (pg. 19-20) *Arguments:* `GDDmethod`: `int` : GDD calculation method `Tupp`: `float` : Upper temperature (degC) above which crop development no longer increases `Tbase`: `float` : Base temperature (degC) below which growth does not progress `Tmax`: `float` : Maximum tempature on current day (celcius) `Tmin`: `float` : Minimum tempature on current day (celcius) *Returns:* `GDD`: `float` : Growing degree days for current day """ ## Calculate GDDs ## if GDDmethod == 1: # Method 1 Tmean = (Tmax + Tmin) / 2 Tmean = min(Tmean, Tupp) Tmean = max(Tmean, Tbase) GDD = Tmean - Tbase elif GDDmethod == 2: # Method 2 Tmax = min(Tmax, Tupp) Tmax = max(Tmax, Tbase) Tmin = min(Tmin, Tupp) Tmin = max(Tmin, Tbase) Tmean = (Tmax + Tmin) / 2 GDD = Tmean - Tbase elif GDDmethod == 3: # Method 3 Tmax = min(Tmax, Tupp) Tmax = max(Tmax, Tbase) Tmin = min(Tmin, Tupp) Tmean = (Tmax + Tmin) / 2 Tmean = max(Tmean, Tbase) GDD = Tmean - Tbase return GDD # Cell @njit @cc.export("_root_zone_water", (SoilProfileNT_typ_sig,f8,f8[:],f8,f8,f8)) def root_zone_water( prof, InitCond_Zroot, InitCond_th, Soil_zTop, Crop_Zmin, Crop_Aer, ): """ Function to calculate actual and total available water in the rootzone at current time step <a href="../pdfs/ac_ref_man_3.pdf#page=14" target="_blank">Reference Manual: root-zone water calculations</a> (pg. 5-8) *Arguments:* `prof`: `SoilProfileClass` : jit class Object containing soil paramaters `InitCond_Zroot`: `float` : Initial rooting depth `InitCond_th`: `np.array` : Initial water content `Soil_zTop`: `float` : Top soil depth `Crop_Zmin`: `float` : crop minimum rooting depth `Crop_Aer`: `int` : number of aeration stress days *Returns:* `WrAct`: `float` : Actual rootzone water content `Dr`: `DrClass` : Depletion objection containing rootzone and topsoil depletion `TAW`: `TAWClass` : `TAWClass` containing rootzone and topsoil total avalable water `thRZ`: `thRZClass` : thRZ object conaining rootzone water content paramaters """ ## Calculate root zone water content and available water ## # Compartments covered by the root zone rootdepth = round(np.maximum(InitCond_Zroot, Crop_Zmin), 2) comp_sto = np.argwhere(prof.dzsum >= rootdepth).flatten()[0] # Initialise counters WrAct = 0 WrS = 0 WrFC = 0 WrWP = 0 WrDry = 0 WrAer = 0 for ii in range(comp_sto + 1): # Fraction of compartment covered by root zone if prof.dzsum[ii] > rootdepth: factor = 1 - ((prof.dzsum[ii] - rootdepth) / prof.dz[ii]) else: factor = 1 # Actual water storage in root zone (mm) WrAct = WrAct + round(factor * 1000 * InitCond_th[ii] * prof.dz[ii], 2) # Water storage in root zone at saturation (mm) WrS = WrS + round(factor * 1000 * prof.th_s[ii] * prof.dz[ii], 2) # Water storage in root zone at field capacity (mm) WrFC = WrFC + round(factor * 1000 * prof.th_fc[ii] * prof.dz[ii], 2) # Water storage in root zone at permanent wilting point (mm) WrWP = WrWP + round(factor * 1000 * prof.th_wp[ii] * prof.dz[ii], 2) # Water storage in root zone at air dry (mm) WrDry = WrDry + round(factor * 1000 * prof.th_dry[ii] * prof.dz[ii], 2) # Water storage in root zone at aeration stress threshold (mm) WrAer = WrAer + round(factor * 1000 * (prof.th_s[ii] - (Crop_Aer / 100)) * prof.dz[ii], 2) if WrAct < 0: WrAct = 0 # define total available water, depletion, root zone water content # TAW = TAWClass() # Dr = DrClass() # thRZ = thRZClass() # Calculate total available water (m3/m3) TAW_Rz = max(WrFC - WrWP, 0.0) # Calculate soil water depletion (mm) Dr_Rz = min(WrFC - WrAct, TAW_Rz) # Actual root zone water content (m3/m3) thRZ_Act = WrAct / (rootdepth * 1000) # Root zone water content at saturation (m3/m3) thRZ_S = WrS / (rootdepth * 1000) # Root zone water content at field capacity (m3/m3) thRZ_FC = WrFC / (rootdepth * 1000) # Root zone water content at permanent wilting point (m3/m3) thRZ_WP = WrWP / (rootdepth * 1000) # Root zone water content at air dry (m3/m3) thRZ_Dry = WrDry / (rootdepth * 1000) # Root zone water content at aeration stress threshold (m3/m3) thRZ_Aer = WrAer / (rootdepth * 1000) # print('inside') # thRZ = thRZNT( # Act=thRZ_Act, # S=thRZ_S, # FC=thRZ_FC, # WP=thRZ_WP, # Dry=thRZ_Dry, # Aer=thRZ_Aer, # ) # print(thRZ) ## Calculate top soil water content and available water ## if rootdepth > Soil_zTop: # Determine compartments covered by the top soil ztopdepth = round(Soil_zTop, 2) comp_sto = np.sum(prof.dzsum <= ztopdepth) # Initialise counters WrAct_Zt = 0 WrFC_Zt = 0 WrWP_Zt = 0 # Calculate water storage in top soil assert comp_sto > 0 for ii in range(comp_sto): # Fraction of compartment covered by root zone if prof.dzsum[ii] > ztopdepth: factor = 1 - ((prof.dzsum[ii] - ztopdepth) / prof.dz[ii]) else: factor = 1 # Actual water storage in top soil (mm) WrAct_Zt = WrAct_Zt + (factor * 1000 * InitCond_th[ii] * prof.dz[ii]) # Water storage in top soil at field capacity (mm) WrFC_Zt = WrFC_Zt + (factor * 1000 * prof.th_fc[ii] * prof.dz[ii]) # Water storage in top soil at permanent wilting point (mm) WrWP_Zt = WrWP_Zt + (factor * 1000 * prof.th_wp[ii] * prof.dz[ii]) # Ensure available water in top soil is not less than zero if WrAct_Zt < 0: WrAct_Zt = 0 # Calculate total available water in top soil (m3/m3) TAW_Zt = max(WrFC_Zt - WrWP_Zt, 0) # Calculate depletion in top soil (mm) Dr_Zt = min(WrFC_Zt - WrAct_Zt, TAW_Zt) else: # Set top soil depletions and TAW to root zone values Dr_Zt = Dr_Rz TAW_Zt = TAW_Rz return ( WrAct, Dr_Zt, Dr_Rz, TAW_Zt, TAW_Rz, thRZ_Act, thRZ_S, thRZ_FC, thRZ_WP, thRZ_Dry, thRZ_Aer, ) # Cell @cc.export("_check_groundwater_table", (SoilProfileNT_typ_sig,f8,f8[:],f8[:],i8,f8)) def check_groundwater_table( prof, NewCond_zGW, NewCond_th, NewCond_th_fc_Adj, water_table_presence, zGW, ): """ Function to check for presence of a groundwater table, and, if present, to adjust compartment water contents and field capacities where necessary <a href="../pdfs/ac_ref_man_3.pdf#page=61" target="_blank">Reference manual: water table adjustment equations</a> (pg. 52-57) *Arguments:* `Soil`: `SoilClass` : Soil object containing soil paramaters `InitCond`: `InitCondClass` : InitCond object containing model paramaters `water_table_presence`: int : indicates if water table is present or not *Returns:* `NewCond`: `InitCondClass` : InitCond object containing updated model paramaters `Soil`: `SoilClass` : Soil object containing updated soil paramaters """ ## Perform calculations (if variable water table is present) ## if water_table_presence == 1: # Update groundwater conditions for current day NewCond_zGW = zGW # Find compartment mid-points zMid = prof.zMid # Check if water table is within modelled soil profile if NewCond_zGW >= 0: if len(zMid[zMid >= NewCond_zGW]) == 0: NewCond_WTinSoil = False else: NewCond_WTinSoil = True # If water table is in soil profile, adjust water contents if NewCond_WTinSoil == True: idx = np.argwhere(zMid >= NewCond_zGW).flatten()[0] for ii in range(idx, len(prof.Comp)): NewCond_th[ii] = prof.th_s[ii] # Adjust compartment field capacity compi = len(prof.Comp) - 1 thfcAdj = np.zeros(compi + 1) # Find thFCadj for all compartments while compi >= 0: if prof.th_fc[compi] <= 0.1: Xmax = 1 else: if prof.th_fc[compi] >= 0.3: Xmax = 2 else: pF = 2 + 0.3 * (prof.th_fc[compi] - 0.1) / 0.2 Xmax = (np.exp(pF * np.log(10))) / 100 if (NewCond_zGW < 0) or ((NewCond_zGW - zMid[compi]) >= Xmax): for ii in range(compi): thfcAdj[ii] = prof.th_fc[ii] compi = -1 else: if prof.th_fc[compi] >= prof.th_s[compi]: thfcAdj[compi] = prof.th_fc[compi] else: if zMid[compi] >= NewCond_zGW: thfcAdj[compi] = prof.th_s[compi] else: dV = prof.th_s[compi] - prof.th_fc[compi] dFC = (dV / (Xmax * Xmax)) * ((zMid[compi] - (NewCond_zGW - Xmax)) ** 2) thfcAdj[compi] = prof.th_fc[compi] + dFC compi = compi - 1 # Store adjusted field capacity values NewCond_th_fc_Adj = thfcAdj # prof.th_fc_Adj = thfcAdj return (NewCond_th_fc_Adj, thfcAdj) # Cell # @njit() @cc.export("_root_development", (CropStructNT_type_sig,SoilProfileNT_typ_sig,f8,f8,f8,f8,f8,f8,f8[:],f8,f8,b1,f8,f8,f8,f8,b1,i8)) def root_development(Crop, prof, NewCond_DAP, NewCond_Zroot, NewCond_DelayedCDs, NewCond_GDDcum, NewCond_DelayedGDDs, NewCond_TrRatio, NewCond_th, NewCond_CC, NewCond_CC_NS, NewCond_Germination, NewCond_rCor, NewCond_Tpot, NewCond_zGW, GDD, GrowingSeason, water_table_presence): """ Function to calculate root zone expansion <a href="../pdfs/ac_ref_man_3.pdf#page=46" target="_blank">Reference Manual: root developement equations</a> (pg. 37-41) *Arguments:* `Crop`: `CropStruct` : jit class object containing Crop paramaters `prof`: `SoilProfileClass` : jit class object containing soil paramaters `InitCond`: `InitCondClass` : InitCond object containing model paramaters `GDD`: `float` : Growing degree days on current day `GrowingSeason`: `bool` : is growing season (True or Flase) `water_table_presence`: `int` : water table present (True=1 or Flase=0) *Returns:* `NewCond`: `InitCondClass` : InitCond object containing updated model paramaters """ # Store initial conditions for updating # NewCond = InitCond # save initial zroot Zroot_init = float(NewCond_Zroot) * 1.0 Soil_nLayer = np.unique(prof.Layer).shape[0] # Calculate root expansion (if in growing season) if GrowingSeason == True: # If today is first day of season, root depth is equal to minimum depth if NewCond_DAP == 1: NewCond_Zroot = float(Crop.Zmin) * 1.0 Zroot_init = float(Crop.Zmin) * 1.0 # Adjust time for any delayed development if Crop.CalendarType == 1: tAdj = NewCond_DAP - NewCond_DelayedCDs elif Crop.CalendarType == 2: tAdj = NewCond_GDDcum - NewCond_DelayedGDDs # Calculate root expansion # Zini = Crop.Zmin * (Crop.PctZmin / 100) t0 = round((Crop.Emergence / 2)) tmax = Crop.MaxRooting if Crop.CalendarType == 1: tOld = tAdj - 1 elif Crop.CalendarType == 2: tOld = tAdj - GDD # Potential root depth on previous day if tOld >= tmax: ZrOld = Crop.Zmax elif tOld <= t0: ZrOld = Zini else: X = (tOld - t0) / (tmax - t0) ZrOld = Zini + (Crop.Zmax - Zini) * np.power(X, 1 / Crop.fshape_r) if ZrOld < Crop.Zmin: ZrOld = Crop.Zmin # Potential root depth on current day if tAdj >= tmax: Zr = Crop.Zmax elif tAdj <= t0: Zr = Zini else: X = (tAdj - t0) / (tmax - t0) Zr = Zini + (Crop.Zmax - Zini) * np.power(X, 1 / Crop.fshape_r) if Zr < Crop.Zmin: Zr = Crop.Zmin # Store Zr as potential value ZrPot = Zr # Determine rate of change dZr = Zr - ZrOld # Adjust expansion rate for presence of restrictive soil horizons if Zr > Crop.Zmin: layeri = 1 l_idx = np.argwhere(prof.Layer == layeri).flatten() Zsoil = prof.dz[l_idx].sum() while (round(Zsoil, 2) <= Crop.Zmin) and (layeri < Soil_nLayer): layeri = layeri + 1 l_idx = np.argwhere(prof.Layer == layeri).flatten() Zsoil = Zsoil + prof.dz[l_idx].sum() soil_layer_dz = prof.dz[l_idx].sum() layer_comp = l_idx[0] # soil_layer = prof.Layer[layeri] ZrAdj = Crop.Zmin ZrRemain = Zr - Crop.Zmin deltaZ = Zsoil - Crop.Zmin EndProf = False while EndProf == False: ZrTest = ZrAdj + (ZrRemain * (prof.Penetrability[layer_comp] / 100)) if ( (layeri == Soil_nLayer) or (prof.Penetrability[layer_comp] == 0) or (ZrTest <= Zsoil) ): ZrOUT = ZrTest EndProf = True else: ZrAdj = Zsoil ZrRemain = ZrRemain - (deltaZ / (prof.Penetrability[layer_comp] / 100)) layeri = layeri + 1 l_idx = np.argwhere(prof.Layer == layeri).flatten() layer_comp = l_idx[0] soil_layer_dz = prof.dz[l_idx].sum() Zsoil = Zsoil + soil_layer_dz deltaZ = soil_layer_dz # Correct Zr and dZr for effects of restrictive horizons Zr = ZrOUT dZr = Zr - ZrOld # Adjust rate of expansion for any stomatal water stress if NewCond_TrRatio < 0.9999: if Crop.fshape_ex >= 0: dZr = dZr * NewCond_TrRatio else: fAdj = (np.exp(NewCond_TrRatio * Crop.fshape_ex) - 1) / (np.exp(Crop.fshape_ex) - 1) dZr = dZr * fAdj # print(NewCond.DAP,NewCond.th) # Adjust rate of root expansion for dry soil at expansion front if dZr > 0.001: # Define water stress threshold for inhibition of root expansion pZexp = Crop.p_up[1] + ((1 - Crop.p_up[1]) / 2) # Define potential new root depth ZiTmp = float(Zroot_init + dZr) # Find compartment that root zone will expand in to # compi_index = prof.dzsum[prof.dzsum>=ZiTmp].index[0] # have changed to index idx = np.argwhere(prof.dzsum >= ZiTmp).flatten()[0] prof = prof # Get TAW in compartment layeri = prof.Layer[idx] TAWprof = prof.th_fc[idx] - prof.th_wp[idx] # Define stress threshold thThr = prof.th_fc[idx] - (pZexp * TAWprof) # Check for stress conditions if NewCond_th[idx] < thThr: # Root expansion limited by water content at expansion front if NewCond_th[idx] <= prof.th_wp[idx]: # Expansion fully inhibited dZr = 0 else: # Expansion partially inhibited Wrel = (prof.th_fc[idx] - NewCond_th[idx]) / TAWprof Drel = 1 - ((1 - Wrel) / (1 - pZexp)) Ks = 1 - ( (np.exp(Drel * Crop.fshape_w[1]) - 1) / (np.exp(Crop.fshape_w[1]) - 1) ) dZr = dZr * Ks # Adjust for early senescence if (NewCond_CC <= 0) and (NewCond_CC_NS > 0.5): dZr = 0 # Adjust root expansion for failure to germinate (roots cannot expand # if crop has not germinated) if NewCond_Germination == False: dZr = 0 # Get new rooting depth NewCond_Zroot = float(Zroot_init + dZr) # Adjust root density if deepening is restricted due to dry subsoil # and/or restrictive layers if NewCond_Zroot < ZrPot: NewCond_rCor = ( 2 * (ZrPot / NewCond_Zroot) * ((Crop.SxTop + Crop.SxBot) / 2) - Crop.SxTop ) / Crop.SxBot if NewCond_Tpot > 0: NewCond_rCor = NewCond_rCor * NewCond_TrRatio if NewCond_rCor < 1: NewCond_rCor = 1 else: NewCond_rCor = 1 # Limit rooting depth if groundwater table is present (roots cannot # develop below the water table) if (water_table_presence == 1) and (NewCond_zGW > 0): if NewCond_Zroot > NewCond_zGW: NewCond_Zroot = float(NewCond_zGW) if NewCond_Zroot < Crop.Zmin: NewCond_Zroot = float(Crop.Zmin) else: # No root system outside of the growing season NewCond_Zroot = 0 return NewCond_Zroot # Cell # @njit() def pre_irrigation(prof, Crop, InitCond, GrowingSeason, IrrMngt): """ Function to calculate pre-irrigation when in net irrigation mode <a href="../pdfs/ac_ref_man_1.pdf#page=40" target="_blank">Reference Manual: Net irrigation description</a> (pg. 31) *Arguments:* `prof`: `SoilProfileClass` : Soil object containing soil paramaters `Crop`: `CropStruct` : Crop object containing Crop paramaters `InitCond`: `InitCondClass` : InitCond object containing model paramaters `GrowingSeason`: `bool` : is growing season (True or Flase) `IrrMngt`: ``IrrMngtStruct` object containing irrigation management paramaters *Returns:* `NewCond`: `InitCondClass` : InitCond object containing updated model paramaters `PreIrr`: `float` : Pre-Irrigaiton applied on current day mm """ # Store initial conditions for updating ## NewCond = InitCond ## Calculate pre-irrigation needs ## if GrowingSeason == True: if (IrrMngt.IrrMethod != 4) or (NewCond.DAP != 1): # No pre-irrigation as not in net irrigation mode or not on first day # of the growing season PreIrr = 0 else: # Determine compartments covered by the root zone rootdepth = round(max(NewCond.Zroot, Crop.Zmin), 2) compRz = np.argwhere(prof.dzsum >= rootdepth).flatten()[0] PreIrr = 0 for ii in range(int(compRz)): # Determine critical water content threshold thCrit = prof.th_wp[ii] + ( (IrrMngt.NetIrrSMT / 100) * (prof.th_fc[ii] - prof.th_wp[ii]) ) # Check if pre-irrigation is required if NewCond.th[ii] < thCrit: PreIrr = PreIrr + ((thCrit - NewCond.th[ii]) * 1000 * prof.dz[ii]) NewCond.th[ii] = thCrit else: PreIrr = 0 return NewCond, PreIrr # Cell # @njit() @cc.export("_drainage", (SoilProfileNT_typ_sig,f8[:],f8[:])) def drainage( prof, th_init, th_fc_Adj_init ): """ Function to redistribute stored soil water <a href="../pdfs/ac_ref_man_3.pdf#page=51" target="_blank">Reference Manual: drainage calculations</a> (pg. 42-65) *Arguments:* `prof`: `SoilProfileClass` : jit class object object containing soil paramaters `th_init`: `np.array` : initial water content `th_fc_Adj_init`: `np.array` : adjusted water content at field capacity *Returns:* `NewCond`: `InitCondClass` : InitCond object containing updated model paramaters `DeepPerc`:: `float` : Total Deep Percolation `FluxOut`:: `array-like` : Flux of water out of each compartment """ # Store initial conditions in new structure for updating %% # NewCond = InitCond # th_init = InitCond.th # th_fc_Adj_init = InitCond.th_fc_Adj # Preallocate arrays %% thnew = np.zeros(th_init.shape[0]) FluxOut = np.zeros(th_init.shape[0]) # Initialise counters and states %% drainsum = 0 # Calculate drainage and updated water contents %% for ii in range(th_init.shape[0]): # Specify layer for compartment cth_fc = prof.th_fc[ii] cth_s = prof.th_s[ii] ctau = prof.tau[ii] cdz = prof.dz[ii] cdzsum = prof.dzsum[ii] cKsat = prof.Ksat[ii] # Calculate drainage ability of compartment ii if th_init[ii] <= th_fc_Adj_init[ii]: dthdt = 0 elif th_init[ii] >= cth_s: dthdt = ctau * (cth_s - cth_fc) if (th_init[ii] - dthdt) < th_fc_Adj_init[ii]: dthdt = th_init[ii] - th_fc_Adj_init[ii] else: dthdt = ( ctau * (cth_s - cth_fc) * ((np.exp(th_init[ii] - cth_fc) - 1) / (np.exp(cth_s - cth_fc) - 1)) ) if (th_init[ii] - dthdt) < th_fc_Adj_init[ii]: dthdt = th_init[ii] - th_fc_Adj_init[ii] # Drainage from compartment ii (mm) draincomp = dthdt * cdz * 1000 # Check drainage ability of compartment ii against cumulative drainage # from compartments above excess = 0 prethick = cdzsum - cdz drainmax = dthdt * 1000 * prethick if drainsum <= drainmax: drainability = True else: drainability = False # Drain compartment ii if drainability == True: # No storage needed. Update water content in compartment ii thnew[ii] = th_init[ii] - dthdt # Update cumulative drainage (mm) drainsum = drainsum + draincomp # Restrict cumulative drainage to saturated hydraulic # conductivity and adjust excess drainage flow if drainsum > cKsat: excess = excess + drainsum - cKsat drainsum = cKsat elif drainability == False: # Storage is needed dthdt = drainsum / (1000 * prethick) # Calculate value of theta (thX) needed to provide a # drainage ability equal to cumulative drainage if dthdt <= 0: thX = th_fc_Adj_init[ii] elif ctau > 0: A = 1 + ((dthdt * (np.exp(cth_s - cth_fc) - 1)) / (ctau * (cth_s - cth_fc))) thX = cth_fc + np.log(A) if thX < th_fc_Adj_init[ii]: thX = th_fc_Adj_init[ii] else: thX = cth_s + 0.01 # Check thX against hydraulic properties of current soil layer if thX <= cth_s: # Increase compartment ii water content with cumulative # drainage thnew[ii] = th_init[ii] + (drainsum / (1000 * cdz)) # Check updated water content against thX if thnew[ii] > thX: # Cumulative drainage is the drainage difference # between theta_x and new theta plus drainage ability # at theta_x. drainsum = (thnew[ii] - thX) * 1000 * cdz # Calculate drainage ability for thX if thX <= th_fc_Adj_init[ii]: dthdt = 0 elif thX >= cth_s: dthdt = ctau * (cth_s - cth_fc) if (thX - dthdt) < th_fc_Adj_init[ii]: dthdt = thX - th_fc_Adj_init[ii] else: dthdt = ( ctau * (cth_s - cth_fc) * ((np.exp(thX - cth_fc) - 1) / (np.exp(cth_s - cth_fc) - 1)) ) if (thX - dthdt) < th_fc_Adj_init[ii]: dthdt = thX - th_fc_Adj_init[ii] # Update drainage total drainsum = drainsum + (dthdt * 1000 * cdz) # Restrict cumulative drainage to saturated hydraulic # conductivity and adjust excess drainage flow if drainsum > cKsat: excess = excess + drainsum - cKsat drainsum = cKsat # Update water content thnew[ii] = thX - dthdt elif thnew[ii] > th_fc_Adj_init[ii]: # Calculate drainage ability for updated water content if thnew[ii] <= th_fc_Adj_init[ii]: dthdt = 0 elif thnew[ii] >= cth_s: dthdt = ctau * (cth_s - cth_fc) if (thnew[ii] - dthdt) < th_fc_Adj_init[ii]: dthdt = thnew[ii] - th_fc_Adj_init[ii] else: dthdt = ( ctau * (cth_s - cth_fc) * ((np.exp(thnew[ii] - cth_fc) - 1) / (np.exp(cth_s - cth_fc) - 1)) ) if (thnew[ii] - dthdt) < th_fc_Adj_init[ii]: dthdt = thnew[ii] - th_fc_Adj_init[ii] # Update water content in compartment ii thnew[ii] = thnew[ii] - dthdt # Update cumulative drainage drainsum = dthdt * 1000 * cdz # Restrict cumulative drainage to saturated hydraulic # conductivity and adjust excess drainage flow if drainsum > cKsat: excess = excess + drainsum - cKsat drainsum = cKsat else: # Drainage and cumulative drainage are zero as water # content has not risen above field capacity in # compartment ii. drainsum = 0 elif thX > cth_s: # Increase water content in compartment ii with cumulative # drainage from above thnew[ii] = th_init[ii] + (drainsum / (1000 * cdz)) # Check new water content against hydraulic properties of soil # layer if thnew[ii] <= cth_s: if thnew[ii] > th_fc_Adj_init[ii]: # Calculate new drainage ability if thnew[ii] <= th_fc_Adj_init[ii]: dthdt = 0 elif thnew[ii] >= cth_s: dthdt = ctau * (cth_s - cth_fc) if (thnew[ii] - dthdt) < th_fc_Adj_init[ii]: dthdt = thnew[ii] - th_fc_Adj_init[ii] else: dthdt = ( ctau * (cth_s - cth_fc) * ((np.exp(thnew[ii] - cth_fc) - 1) / (np.exp(cth_s - cth_fc) - 1)) ) if (thnew[ii] - dthdt) < th_fc_Adj_init[ii]: dthdt = thnew[ii] - th_fc_Adj_init[ii] # Update water content in compartment ii thnew[ii] = thnew[ii] - dthdt # Update cumulative drainage drainsum = dthdt * 1000 * cdz # Restrict cumulative drainage to saturated hydraulic # conductivity and adjust excess drainage flow if drainsum > cKsat: excess = excess + drainsum - cKsat drainsum = cKsat else: drainsum = 0 elif thnew[ii] > cth_s: # Calculate excess drainage above saturation excess = (thnew[ii] - cth_s) * 1000 * cdz # Calculate drainage ability for updated water content if thnew[ii] <= th_fc_Adj_init[ii]: dthdt = 0 elif thnew[ii] >= cth_s: dthdt = ctau * (cth_s - cth_fc) if (thnew[ii] - dthdt) < th_fc_Adj_init[ii]: dthdt = thnew[ii] - th_fc_Adj_init[ii] else: dthdt = ( ctau * (cth_s - cth_fc) * ((np.exp(thnew[ii] - cth_fc) - 1) / (np.exp(cth_s - cth_fc) - 1)) ) if (thnew[ii] - dthdt) < th_fc_Adj_init[ii]: dthdt = thnew[ii] - th_fc_Adj_init[ii] # Update water content in compartment ii thnew[ii] = cth_s - dthdt # Update drainage from compartment ii draincomp = dthdt * 1000 * cdz # Update maximum drainage drainmax = dthdt * 1000 * prethick # Update excess drainage if drainmax > excess: drainmax = excess excess = excess - drainmax # Update drainsum and restrict to saturated hydraulic # conductivity of soil layer drainsum = draincomp + drainmax if drainsum > cKsat: excess = excess + drainsum - cKsat drainsum = cKsat # Store output flux from compartment ii FluxOut[ii] = drainsum # Redistribute excess in compartment above if excess > 0: precomp = ii + 1 while (excess > 0) and (precomp != 0): # Update compartment counter precomp = precomp - 1 # Update layer counter # precompdf = Soil.Profile.Comp[precomp] # Update flux from compartment if precomp < ii: FluxOut[precomp] = FluxOut[precomp] - excess # Increase water content to store excess thnew[precomp] = thnew[precomp] + (excess / (1000 * prof.dz[precomp])) # Limit water content to saturation and adjust excess counter if thnew[precomp] > prof.th_s[precomp]: excess = (thnew[precomp] - prof.th_s[precomp]) * 1000 * prof.dz[precomp] thnew[precomp] = prof.th_s[precomp] else: excess = 0 ## Update conditions and outputs ## # Total deep percolation (mm) DeepPerc = drainsum # Water contents # NewCond.th = thnew return thnew, DeepPerc, FluxOut # Cell # @njit() @cc.export("_rainfall_partition", (f8,f8[:],i8,f8,f8,f8,f8,f8,f8,f8,f8,SoilProfileNT_typ_sig)) def rainfall_partition( P, InitCond_th, NewCond_DaySubmerged, FieldMngt_SRinhb, FieldMngt_Bunds, FieldMngt_zBund, FieldMngt_CNadjPct, Soil_CN, Soil_AdjCN, Soil_zCN, Soil_nComp, prof, ): """ Function to partition rainfall into surface runoff and infiltration using the curve number approach <a href="../pdfs/ac_ref_man_3.pdf#page=57" target="_blank">Reference Manual: rainfall partition calculations</a> (pg. 48-51) *Arguments:* `P`: `float` : Percipitation on current day `InitCond`: `InitCondClass` : InitCond object containing model paramaters `FieldMngt`: `FieldMngtStruct` : field management params `Soil_CN`: `float` : curve number `Soil_AdjCN`: `float` : adjusted curve number `Soil_zCN`: `float` : `Soil_nComp`: `float` : number of compartments `prof`: `SoilProfileClass` : Soil object *Returns:* `Runoff`: `float` : Total Suface Runoff `Infl`: `float` : Total Infiltration `NewCond`: `InitCondClass` : InitCond object containing updated model paramaters """ # can probs make this faster by doing a if P=0 loop ## Store initial conditions for updating ## # NewCond = InitCond ## Calculate runoff ## if (FieldMngt_SRinhb == False) and ((FieldMngt_Bunds == False) or (FieldMngt_zBund < 0.001)): # Surface runoff is not inhibited and no soil bunds are on field # Reset submerged days NewCond_DaySubmerged = 0 # Adjust curve number for field management practices CN = Soil_CN * (1 + (FieldMngt_CNadjPct / 100)) if Soil_AdjCN == 1: # Adjust CN for antecedent moisture # Calculate upper and lowe curve number bounds CNbot = round( 1.4 * (np.exp(-14 * np.log(10))) + (0.507 * CN) - (0.00374 * CN ** 2) + (0.0000867 * CN ** 3) ) CNtop = round( 5.6 * (np.exp(-14 * np.log(10))) + (2.33 * CN) - (0.0209 * CN ** 2) + (0.000076 * CN ** 3) ) # Check which compartment cover depth of top soil used to adjust # curve number comp_sto_array = prof.dzsum[prof.dzsum >= Soil_zCN] if comp_sto_array.shape[0] == 0: comp_sto = int(Soil_nComp) else: comp_sto = int(Soil_nComp - comp_sto_array.shape[0]) # Calculate weighting factors by compartment xx = 0 wrel = np.zeros(comp_sto) for ii in range(comp_sto): if prof.dzsum[ii] > Soil_zCN: prof.dzsum[ii] = Soil_zCN wx = 1.016 * (1 - np.exp(-4.16 * (prof.dzsum[ii] / Soil_zCN))) wrel[ii] = wx - xx if wrel[ii] < 0: wrel[ii] = 0 elif wrel[ii] > 1: wrel[ii] = 1 xx = wx # Calculate relative wetness of top soil wet_top = 0 # prof = prof for ii in range(comp_sto): th = max(prof.th_wp[ii], InitCond_th[ii]) wet_top = wet_top + ( wrel[ii] * ((th - prof.th_wp[ii]) / (prof.th_fc[ii] - prof.th_wp[ii])) ) # Calculate adjusted curve number if wet_top > 1: wet_top = 1 elif wet_top < 0: wet_top = 0 CN = round(CNbot + (CNtop - CNbot) * wet_top) # Partition rainfall into runoff and infiltration (mm) S = (25400 / CN) - 254 term = P - ((5 / 100) * S) if term <= 0: Runoff = 0 Infl = P else: Runoff = (term ** 2) / (P + (1 - (5 / 100)) * S) Infl = P - Runoff else: # Bunds on field, therefore no surface runoff Runoff = 0 Infl = P return Runoff, Infl, NewCond_DaySubmerged # Cell # @njit() # @cc.export("_irrigation", (i8,f8[:],f8,f8,i8,f8[:],f8,f8,f8,f8,f8,f8[:],i8,i8,CropStructNT_type_sig,SoilProfileNT_typ_sig,f8,b1,f8,f8)) # @njit def irrigation( IrrMngt_IrrMethod, IrrMngt_SMT, IrrMngt_AppEff, IrrMngt_MaxIrr, IrrMngt_IrrInterval, IrrMngt_Schedule, IrrMngt_depth, IrrMngt_MaxIrrSeason, NewCond_GrowthStage, NewCond_IrrCum, NewCond_Epot, NewCond_Tpot, NewCond_Zroot, NewCond_th, NewCond_DAP, NewCond_TimeStepCounter, Crop, prof, Soil_zTop, GrowingSeason, Rain, Runoff): """ Function to get irrigation depth for current day <a href="../pdfs/ac_ref_man_1.pdf#page=40" target="_blank">Reference Manual: irrigation description</a> (pg. 31-32) *Arguments:* `InitCond`: `InitCondClass` : InitCond object containing model paramaters `IrrMngt`: `IrrMngtStruct`: jit class object containing irrigation management paramaters `Crop`: `CropClass` : Crop object containing Crop paramaters `Soil`: `SoilClass` : Soil object containing soil paramaters `GrowingSeason`: `bool` : is growing season (True or Flase) `Rain`: `float` : daily precipitation mm `Runoff`: `float` : surface runoff on current day *Returns:* `NewCond`: `InitCondClass` : InitCond object containing updated model paramaters `Irr`: `float` : Irrigaiton applied on current day mm """ ## Store intial conditions for updating ## # NewCond = InitCond ## Determine irrigation depth (mm/day) to be applied ## if GrowingSeason == True: # Calculate root zone water content and depletion # TAW_ = TAWClass() # Dr_ = DrClass() # thRZ = thRZClass() ( WrAct, Dr_Zt, Dr_Rz, TAW_Zt, TAW_Rz, thRZ_Act, thRZ_S, thRZ_FC, thRZ_WP, thRZ_Dry, thRZ_Aer, ) = _root_zone_water( prof, float(NewCond_Zroot), NewCond_th, Soil_zTop, float(Crop.Zmin), Crop.Aer, ) # WrAct,Dr_,TAW_,thRZ = root_zone_water(prof,float(NewCond.Zroot),NewCond.th,Soil_zTop,float(Crop.Zmin),Crop.Aer) # Use root zone depletions and TAW only for triggering irrigation Dr = Dr_Rz TAW = TAW_Rz # Determine adjustment for inflows and outflows on current day # if thRZ_Act > thRZ_FC: rootdepth = max(NewCond_Zroot, Crop.Zmin) AbvFc = (thRZ_Act - thRZ_FC) * 1000 * rootdepth else: AbvFc = 0 WCadj = NewCond_Tpot + NewCond_Epot - Rain + Runoff - AbvFc NewCond_Depletion = Dr + WCadj NewCond_TAW = TAW # Update growth stage if it is first day of a growing season if NewCond_DAP == 1: NewCond_GrowthStage = 1 if IrrMngt_IrrMethod == 0: Irr = 0 elif IrrMngt_IrrMethod == 1: Dr = NewCond_Depletion / NewCond_TAW index = int(NewCond_GrowthStage) - 1 if Dr > 1 - IrrMngt_SMT[index] / 100: # Irrigation occurs IrrReq = max(0, NewCond_Depletion) # Adjust irrigation requirements for application efficiency EffAdj = ((100 - IrrMngt_AppEff) + 100) / 100 IrrReq = IrrReq * EffAdj # Limit irrigation to maximum depth Irr = min(IrrMngt_MaxIrr, IrrReq) else: Irr = 0 elif IrrMngt_IrrMethod == 2: # Irrigation - fixed interval Dr = NewCond_Depletion # Get number of days in growing season so far (subtract 1 so that # always irrigate first on day 1 of each growing season) nDays = NewCond_DAP - 1 if nDays % IrrMngt_IrrInterval == 0: # Irrigation occurs IrrReq = max(0, Dr) # Adjust irrigation requirements for application efficiency EffAdj = ((100 - IrrMngt_AppEff) + 100) / 100 IrrReq = IrrReq * EffAdj # Limit irrigation to maximum depth Irr = min(IrrMngt_MaxIrr, IrrReq) else: # No irrigation Irr = 0 elif IrrMngt_IrrMethod == 3: # Irrigation - pre-defined schedule # Get current date idx = NewCond_TimeStepCounter # Find irrigation value corresponding to current date Irr = IrrMngt_Schedule[idx] assert Irr >= 0 Irr = min(IrrMngt_MaxIrr, Irr) elif IrrMngt_IrrMethod == 4: # Irrigation - net irrigation # Net irrigation calculation performed after transpiration, so # irrigation is zero here Irr = 0 elif IrrMngt_IrrMethod == 5: # depth applied each day (usually specified outside of model) Irr = min(IrrMngt_MaxIrr, IrrMngt_depth) # else: # assert 1 ==2, f'somethings gone wrong in irrigation method:{IrrMngt.IrrMethod}' Irr = max(0, Irr) elif GrowingSeason == False: # No irrigation outside growing season Irr = 0. NewCond_IrrCum = 0. NewCond_Depletion = 0. NewCond_TAW = 0. if NewCond_IrrCum + Irr > IrrMngt_MaxIrrSeason: Irr = max(0, IrrMngt_MaxIrrSeason - NewCond_IrrCum) # Update cumulative irrigation counter for growing season NewCond_IrrCum = NewCond_IrrCum + Irr return NewCond_Depletion,NewCond_TAW,NewCond_IrrCum, Irr # Cell # @njit() @cc.export("_infiltration", (SoilProfileNT_typ_sig,f8,f8[:],f8[:],f8,f8,f8,b1,f8,f8[:],f8,f8,b1)) def infiltration( prof, NewCond_SurfaceStorage, NewCond_th_fc_Adj, NewCond_th, Infl, Irr, IrrMngt_AppEff, FieldMngt_Bunds, FieldMngt_zBund, FluxOut, DeepPerc0, Runoff0, GrowingSeason ): """ Function to infiltrate incoming water (rainfall and irrigation) <a href="../pdfs/ac_ref_man_3.pdf#page=51" target="_blank">Reference Manual: drainage calculations</a> (pg. 42-65) *Arguments:* `prof`: `SoilProfileClass` : Soil object containing soil paramaters `InitCond`: `InitCondClass` : InitCond object containing model paramaters `Infl`: `float` : Infiltration so far `Irr`: `float` : Irrigation on current day `IrrMngt_AppEff`: `float`: irrigation application efficiency `FieldMngt`: `FieldMngtStruct` : field management params `FluxOut`: `np.array` : flux of water out of each compartment `DeepPerc0`: `float` : initial Deep Percolation `Runoff0`: `float` : initial Surface Runoff `GrowingSeason`:: `bool` : is growing season (True or Flase) *Returns:* `NewCond`: `InitCondClass` : InitCond object containing updated model paramaters `DeepPerc`:: `float` : Total Deep Percolation `RunoffTot`: `float` : Total surface Runoff `Infl`: `float` : Infiltration on current day `FluxOut`: `np.array` : flux of water out of each compartment """ ## Store initial conditions in new structure for updating ## # NewCond = InitCond InitCond_SurfaceStorage = NewCond_SurfaceStorage*1 InitCond_th_fc_Adj = NewCond_th_fc_Adj*1 InitCond_th = NewCond_th*1 thnew = NewCond_th*1. Soil_nComp = thnew.shape[0] ## Update infiltration rate for irrigation ## # Note: irrigation amount adjusted for specified application efficiency if GrowingSeason == True: Infl = Infl + (Irr * (IrrMngt_AppEff / 100)) assert Infl >= 0 ## Determine surface storage (if bunds are present) ## if FieldMngt_Bunds: # Bunds on field if FieldMngt_zBund > 0.001: # Bund height too small to be considered InflTot = Infl + NewCond_SurfaceStorage if InflTot > 0: # Update surface storage and infiltration storage if InflTot > prof.Ksat[0]: # Infiltration limited by saturated hydraulic conductivity # of surface soil layer ToStore = prof.Ksat[0] # Additional water ponds on surface NewCond_SurfaceStorage = InflTot - prof.Ksat[0] else: # All water infiltrates ToStore = InflTot # Reset surface storage depth to zero NewCond_SurfaceStorage = 0 # Calculate additional runoff if NewCond_SurfaceStorage > (FieldMngt_zBund * 1000): # Water overtops bunds and runs off RunoffIni = NewCond_SurfaceStorage - (FieldMngt_zBund * 1000) # Surface storage equal to bund height NewCond_SurfaceStorage = FieldMngt_zBund * 1000 else: # No overtopping of bunds RunoffIni = 0 else: # No storage or runoff ToStore = 0 RunoffIni = 0 elif FieldMngt_Bunds == False: # No bunds on field if Infl > prof.Ksat[0]: # Infiltration limited by saturated hydraulic conductivity of top # soil layer ToStore = prof.Ksat[0] # Additional water runs off RunoffIni = Infl - prof.Ksat[0] else: # All water infiltrates ToStore = Infl RunoffIni = 0 # Update surface storage NewCond_SurfaceStorage = 0 # Add any water remaining behind bunds to surface runoff (needed for # days when bunds are removed to maintain water balance) RunoffIni = RunoffIni + InitCond_SurfaceStorage ## Initialise counters ii = -1 Runoff = 0 ## Infiltrate incoming water ## if ToStore > 0: while (ToStore > 0) and (ii < Soil_nComp - 1): # Update compartment counter ii = ii + 1 # Get soil layer # Calculate saturated drainage ability dthdtS = prof.tau[ii] * (prof.th_s[ii] - prof.th_fc[ii]) # Calculate drainage factor factor = prof.Ksat[ii] / (dthdtS * 1000 * prof.dz[ii]) # Calculate drainage ability required dthdt0 = ToStore / (1000 * prof.dz[ii]) # Check drainage ability if dthdt0 < dthdtS: # Calculate water content, thX, needed to meet drainage dthdt0 if dthdt0 <= 0: theta0 = InitCond_th_fc_Adj[ii] else: A = 1 + ( (dthdt0 * (np.exp(prof.th_s[ii] - prof.th_fc[ii]) - 1)) / (prof.tau[ii] * (prof.th_s[ii] - prof.th_fc[ii])) ) theta0 = prof.th_fc[ii] + np.log(A) # Limit thX to between saturation and field capacity if theta0 > prof.th_s[ii]: theta0 = prof.th_s[ii] elif theta0 <= InitCond_th_fc_Adj[ii]: theta0 = InitCond_th_fc_Adj[ii] dthdt0 = 0 else: # Limit water content and drainage to saturation theta0 = prof.th_s[ii] dthdt0 = dthdtS # Calculate maximum water flow through compartment ii drainmax = factor * dthdt0 * 1000 * prof.dz[ii] # Calculate total drainage from compartment ii drainage = drainmax + FluxOut[ii] # Limit drainage to saturated hydraulic conductivity if drainage > prof.Ksat[ii]: drainmax = prof.Ksat[ii] - FluxOut[ii] # Calculate difference between threshold and current water contents diff = theta0 - InitCond_th[ii] if diff > 0: # Increase water content of compartment ii thnew[ii] = thnew[ii] + (ToStore / (1000 * prof.dz[ii])) if thnew[ii] > theta0: # Water remaining that can infiltrate to compartments below ToStore = (thnew[ii] - theta0) * 1000 * prof.dz[ii] thnew[ii] = theta0 else: # All infiltrating water has been stored ToStore = 0 # Update outflow from current compartment (drainage + infiltration # flows) FluxOut[ii] = FluxOut[ii] + ToStore # Calculate back-up of water into compartments above excess = ToStore - drainmax if excess < 0: excess = 0 # Update water to store ToStore = ToStore - excess # Redistribute excess to compartments above if excess > 0: precomp = ii + 1 while (excess > 0) and (precomp != 0): # Keep storing in compartments above until soil surface is # reached # Update compartment counter precomp = precomp - 1 # Update layer number # Update outflow from compartment FluxOut[precomp] = FluxOut[precomp] - excess # Update water content thnew[precomp] = thnew[precomp] + (excess / (prof.dz[precomp] * 1000)) # Limit water content to saturation if thnew[precomp] > prof.th_s[precomp]: # Update excess to store excess = (thnew[precomp] - prof.th_s[precomp]) * 1000 * prof.dz[precomp] # Set water content to saturation thnew[precomp] = prof.th_s[precomp] else: # All excess stored excess = 0 if excess > 0: # Any leftover water not stored becomes runoff Runoff = Runoff + excess # Infiltration left to store after bottom compartment becomes deep # percolation (mm) DeepPerc = ToStore else: # No infiltration DeepPerc = 0 Runoff = 0 ## Update total runoff ## Runoff = Runoff + RunoffIni ## Update surface storage (if bunds are present) ## if Runoff > RunoffIni: if FieldMngt_Bunds: if FieldMngt_zBund > 0.001: # Increase surface storage NewCond_SurfaceStorage = NewCond_SurfaceStorage + (Runoff - RunoffIni) # Limit surface storage to bund height if NewCond_SurfaceStorage > (FieldMngt_zBund * 1000): # Additonal water above top of bunds becomes runoff Runoff = RunoffIni + (NewCond_SurfaceStorage - (FieldMngt_zBund * 1000)) # Set surface storage to bund height NewCond_SurfaceStorage = FieldMngt_zBund * 1000 else: # No additional overtopping of bunds Runoff = RunoffIni ## Store updated water contents ## NewCond_th = thnew ## Update deep percolation, surface runoff, and infiltration values ## DeepPerc = DeepPerc + DeepPerc0 Infl = Infl - Runoff RunoffTot = Runoff + Runoff0 return NewCond_th,NewCond_SurfaceStorage, DeepPerc, RunoffTot, Infl, FluxOut # Cell # @njit() def capillary_rise(prof, Soil_nLayer, Soil_fshape_cr, NewCond, FluxOut, water_table_presence): """ Function to calculate capillary rise from a shallow groundwater table <a href="../pdfs/ac_ref_man_3.pdf#page=61" target="_blank">Reference Manual: capillary rise calculations</a> (pg. 52-61) *Arguments:* `Soil`: `SoilClass` : Soil object `NewCond`: `InitCondClass` : InitCond object containing model paramaters `FluxOut`: `np.array` : FLux of water out of each soil compartment `water_table_presence`: `int` : WaterTable present (1:yes, 0:no) *Returns:* `NewCond`: `InitCondClass` : InitCond object containing updated model paramaters `CrTot`: `float` : Total Capillary rise """ ## Get groundwater table elevation on current day ## zGW = NewCond.zGW ## Calculate capillary rise ## if water_table_presence == 0: # No water table present # Capillary rise is zero CrTot = 0 elif water_table_presence == 1: # Water table present # Get maximum capillary rise for bottom compartment zBot = prof.dzsum[-1] zBotMid = prof.zMid[-1] prof = prof if (prof.Ksat[-1] > 0) and (zGW > 0) and ((zGW - zBotMid) < 4): if zBotMid >= zGW: MaxCR = 99 else: MaxCR = np.exp((np.log(zGW - zBotMid) - prof.bCR[-1]) / prof.aCR[-1]) if MaxCR > 99: MaxCR = 99 else: MaxCR = 0 ######################### this needs fixing, will currently break#################### # # Find top of next soil layer that is not within modelled soil profile # zTopLayer = 0 # for layeri in np.sort(np.unique(prof.Layer)): # # Calculate layer thickness # l_idx = np.argwhere(prof.Layer==layeri).flatten() # LayThk = prof.dz[l_idx].sum() # zTopLayer = zTopLayer+LayThk # # Check for restrictions on upward flow caused by properties of # # compartments that are not modelled in the soil water balance # layeri = prof.Layer[-1] # assert layeri == Soil_nLayer # while (zTopLayer < zGW) and (layeri < Soil_nLayer): # # this needs fixing, will currently break # layeri = layeri+1 # compdf = prof.Layer[layeri] # if (compdf.Ksat > 0) and (zGW > 0) and ((zGW-zTopLayer) < 4): # if zTopLayer >= zGW: # LimCR = 99 # else: # LimCR = np.exp((np.log(zGW-zTopLayer)-compdf.bCR)/compdf.aCR) # if LimCR > 99: # LimCR = 99 # else: # LimCR = 0 # if MaxCR > LimCR: # MaxCR = LimCR # zTopLayer = zTopLayer+compdf.dz ##################################################################################### # Calculate capillary rise compi = len(prof.Comp) - 1 # Start at bottom of root zone WCr = 0 # Capillary rise counter while (round(MaxCR * 1000) > 0) and (compi > -1) and (round(FluxOut[compi] * 1000) == 0): # Proceed upwards until maximum capillary rise occurs, soil surface # is reached, or encounter a compartment where downward # drainage/infiltration has already occurred on current day # Find layer of current compartment # Calculate driving force if (NewCond.th[compi] >= prof.th_wp[compi]) and (Soil_fshape_cr > 0): Df = 1 - ( ( (NewCond.th[compi] - prof.th_wp[compi]) / (NewCond.th_fc_Adj[compi] - prof.th_wp[compi]) ) ** Soil_fshape_cr ) if Df > 1: Df = 1 elif Df < 0: Df = 0 else: Df = 1 # Calculate relative hydraulic conductivity thThr = (prof.th_wp[compi] + prof.th_fc[compi]) / 2 if NewCond.th[compi] < thThr: if (NewCond.th[compi] <= prof.th_wp[compi]) or (thThr <= prof.th_wp[compi]): Krel = 0 else: Krel = (NewCond.th[compi] - prof.th_wp[compi]) / (thThr - prof.th_wp[compi]) else: Krel = 1 # Check if room is available to store water from capillary rise dth = NewCond.th_fc_Adj[compi] - NewCond.th[compi] # Store water if room is available if (dth > 0) and ((zBot - prof.dz[compi] / 2) < zGW): dthMax = Krel * Df * MaxCR / (1000 * prof.dz[compi]) if dth >= dthMax: NewCond.th[compi] = NewCond.th[compi] + dthMax CRcomp = dthMax * 1000 * prof.dz[compi] MaxCR = 0 else: NewCond.th[compi] = NewCond.th_fc_Adj[compi] CRcomp = dth * 1000 * prof.dz[compi] MaxCR = (Krel * MaxCR) - CRcomp WCr = WCr + CRcomp # Update bottom elevation of compartment zBot = zBot - prof.dz[compi] # Update compartment and layer counters compi = compi - 1 # Update restriction on maximum capillary rise if compi > -1: zBotMid = zBot - (prof.dz[compi] / 2) if (prof.Ksat[compi] > 0) and (zGW > 0) and ((zGW - zBotMid) < 4): if zBotMid >= zGW: LimCR = 99 else: LimCR = np.exp((np.log(zGW - zBotMid) - prof.bCR[compi]) / prof.aCR[compi]) if LimCR > 99: LimCR = 99 else: LimCR = 0 if MaxCR > LimCR: MaxCR = LimCR # Store total depth of capillary rise CrTot = WCr return NewCond, CrTot # Cell # @njit() def germination(InitCond, Soil_zGerm, prof, Crop_GermThr, Crop_PlantMethod, GDD, GrowingSeason): """ Function to check if crop has germinated <a href="../pdfs/ac_ref_man_3.pdf#page=32" target="_blank">Reference Manual: germination condition</a> (pg. 23) *Arguments:* `InitCond`: `InitCondClass` : InitCond object containing model paramaters `Soil_zGerm`: `float` : Soil depth affecting germination `prof`: `SoilProfileClass` : Soil object containing soil paramaters `Crop_GermThr`: `float` : Crop germination threshold `Crop_PlantMethod`: `bool` : sown as seedling True or False `GDD`: `float` : Number of Growing Degree Days on current day `GrowingSeason`:: `bool` : is growing season (True or Flase) *Returns:* `NewCond`: `InitCondClass` : InitCond object containing updated model paramaters """ ## Store initial conditions in new structure for updating ## NewCond = InitCond ## Check for germination (if in growing season) ## if GrowingSeason == True: if (NewCond.Germination == False): # Find compartments covered by top soil layer affecting germination comp_sto = np.argwhere(prof.dzsum >= Soil_zGerm).flatten()[0] # Calculate water content in top soil layer Wr = 0 WrFC = 0 WrWP = 0 for ii in range(comp_sto + 1): # Get soil layer # Determine fraction of compartment covered by top soil layer if prof.dzsum[ii] > Soil_zGerm: factor = 1 - ((prof.dzsum[ii] - Soil_zGerm) / prof.dz[ii]) else: factor = 1 # Increment actual water storage (mm) Wr = Wr + round(factor * 1000 * InitCond.th[ii] * prof.dz[ii], 3) # Increment water storage at field capacity (mm) WrFC = WrFC + round(factor * 1000 * prof.th_fc[ii] * prof.dz[ii], 3) # Increment water storage at permanent wilting point (mm) WrWP = WrWP + round(factor * 1000 * prof.th_wp[ii] * prof.dz[ii], 3) # Limit actual water storage to not be less than zero if Wr < 0: Wr = 0 # Calculate proportional water content WcProp = 1 - ((WrFC - Wr) / (WrFC - WrWP)) # Check if water content is above germination threshold if (WcProp >= Crop_GermThr): # Crop has germinated NewCond.Germination = True # If crop sown as seedling, turn on seedling protection if Crop_PlantMethod == True: NewCond.ProtectedSeed = True else: # Crop is transplanted so no protection NewCond.ProtectedSeed = False # Increment delayed growth time counters if germination is yet to # occur, and also set seed protection to False if yet to germinate else: NewCond.DelayedCDs = InitCond.DelayedCDs + 1 NewCond.DelayedGDDs = InitCond.DelayedGDDs + GDD NewCond.ProtectedSeed = False else: # Not in growing season so no germination calculation is performed. NewCond.Germination = False NewCond.ProtectedSeed = False NewCond.DelayedCDs = 0 NewCond.DelayedGDDs = 0 return NewCond # Cell # @njit() def growth_stage(Crop, InitCond, GrowingSeason): """ Function to determine current growth stage of crop (used only for irrigation soil moisture thresholds) *Arguments:* `Crop`: `CropClass` : Crop object containing Crop paramaters `InitCond`: `InitCondClass` : InitCond object containing model paramaters `GrowingSeason`:: `bool` : is growing season (True or Flase) *Returns:* `NewCond`: `InitCondClass` : InitCond object containing updated model paramaters """ ## Store initial conditions in new structure for updating ## NewCond = InitCond ## Get growth stage (if in growing season) ## if GrowingSeason == True: # Adjust time for any delayed growth if Crop.CalendarType == 1: tAdj = NewCond.DAP - NewCond.DelayedCDs elif Crop.CalendarType == 2: tAdj = NewCond.GDDcum - NewCond.DelayedGDDs # Update growth stage if tAdj <= Crop.Canopy10Pct: NewCond.GrowthStage = 1 elif tAdj <= Crop.MaxCanopy: NewCond.GrowthStage = 2 elif tAdj <= Crop.Senescence: NewCond.GrowthStage = 3 elif tAdj > Crop.Senescence: NewCond.GrowthStage = 4 else: # Not in growing season so growth stage is set to dummy value NewCond.GrowthStage = 0 return NewCond # Cell @njit @cc.export("_water_stress", "(f8[:],f8[:],f8,f8,f8[:],f8,f8,f8,f8,f8)") def water_stress( Crop_p_up, Crop_p_lo, Crop_ETadj, Crop_beta, Crop_fshape_w, InitCond_tEarlySen, Dr, TAW, Et0, beta, ): """ Function to calculate water stress coefficients <a href="../pdfs/ac_ref_man_3.pdf#page=18" target="_blank">Reference Manual: water stress equations</a> (pg. 9-13) *Arguments:* `Crop`: `CropClass` : Crop Object `InitCond`: `InitCondClass` : InitCond object `Dr`: `DrClass` : Depletion object (contains rootzone and top soil depletion totals) `TAW`: `TAWClass` : TAW object (contains rootzone and top soil total available water) `Et0`: `float` : Reference Evapotranspiration `beta`: `float` : Adjust senescence threshold if early sensescence is triggered *Returns:* `Ksw`: `KswClass` : Ksw object containint water stress coefficients """ ## Calculate relative root zone water depletion for each stress type ## # Number of stress variables nstress = len(Crop_p_up) # Store stress thresholds p_up = np.ones(nstress) * Crop_p_up p_lo = np.ones(nstress) * Crop_p_lo if Crop_ETadj == 1: # Adjust stress thresholds for Et0 on currentbeta day (don't do this for # pollination water stress coefficient) for ii in range(3): p_up[ii] = p_up[ii] + (0.04 * (5 - Et0)) * (np.log10(10 - 9 * p_up[ii])) p_lo[ii] = p_lo[ii] + (0.04 * (5 - Et0)) * (np.log10(10 - 9 * p_lo[ii])) # Adjust senescence threshold if early sensescence is triggered if (beta == True) and (InitCond_tEarlySen > 0): p_up[2] = p_up[2] * (1 - Crop_beta / 100) # Limit values p_up = np.maximum(p_up, np.zeros(4)) p_lo = np.maximum(p_lo, np.zeros(4)) p_up = np.minimum(p_up, np.ones(4)) p_lo = np.minimum(p_lo, np.ones(4)) # Calculate relative depletion Drel = np.zeros(nstress) for ii in range(nstress): if Dr <= (p_up[ii] * TAW): # No water stress Drel[ii] = 0 elif (Dr > (p_up[ii] * TAW)) and (Dr < (p_lo[ii] * TAW)): # Partial water stress Drel[ii] = 1 - ((p_lo[ii] - (Dr / TAW)) / (p_lo[ii] - p_up[ii])) elif Dr >= (p_lo[ii] * TAW): # Full water stress Drel[ii] = 1 ## Calculate root zone water stress coefficients ## Ks = np.ones(3) for ii in range(3): Ks[ii] = 1 - ((np.exp(Drel[ii] * Crop_fshape_w[ii]) - 1) / (np.exp(Crop_fshape_w[ii]) - 1)) # Ksw = KswClass() # Water stress coefficient for leaf expansion Ksw_Exp = Ks[0] # Water stress coefficient for stomatal closure Ksw_Sto = Ks[1] # Water stress coefficient for senescence Ksw_Sen = Ks[2] # Water stress coefficient for pollination failure Ksw_Pol = 1 - Drel[3] # Mean water stress coefficient for stomatal closure Ksw_StoLin = 1 - Drel[1] return Ksw_Exp, Ksw_Sto, Ksw_Sen, Ksw_Pol, Ksw_StoLin # Cell # @njit() @cc.export("_cc_development", "f8(f8,f8,f8,f8,f8,unicode_type,f8)") def cc_development(CCo, CCx, CGC, CDC, dt, Mode, CCx0): """ Function to calculate canopy cover development by end of the current simulation day <a href="../pdfs/ac_ref_man_3.pdf#page=30" target="_blank">Reference Manual: CC devlopment</a> (pg. 21-24) *Arguments:* `CCo`: `float` : Fractional canopy cover size at emergence `CCx`: `float` : Maximum canopy cover (fraction of soil cover) `CGC`: `float` : Canopy growth coefficient (fraction per GDD) `CDC`: `float` : Canopy decline coefficient (fraction per GDD/calendar day) `dt`: `float` : Time delta of canopy growth (1 calander day or ... GDD) `Mode`: `str` : Stage of Canopy developement (Growth or Decline) `CCx0`: `float` : Maximum canopy cover (fraction of soil cover) *Returns:* `CC`: `float` : Canopy Cover """ ## Initialise output ## ## Calculate new canopy cover ## if Mode == "Growth": # Calculate canopy growth # Exponential growth stage CC = CCo * np.exp(CGC * dt) if CC > (CCx / 2): # Exponential decay stage CC = CCx - 0.25 * (CCx / CCo) * CCx * np.exp(-CGC * dt) # Limit CC to CCx if CC > CCx: CC = CCx elif Mode == "Decline": # Calculate canopy decline if CCx < 0.001: CC = 0 else: CC = CCx * ( 1 - 0.05 * (np.exp(dt * CDC * 3.33 * ((CCx + 2.29) / (CCx0 + 2.29)) / (CCx + 2.29)) - 1) ) ## Limit canopy cover to between 0 and 1 ## if CC > 1: CC = 1 elif CC < 0: CC = 0 return CC # Cell # @njit() @cc.export("_cc_required_time", "f8(f8,f8,f8,f8,f8,unicode_type)") def cc_required_time(CCprev, CCo, CCx, CGC, CDC, Mode): """ Function to find time required to reach CC at end of previous day, given current CGC or CDC <a href="../pdfs/ac_ref_man_3.pdf#page=30" target="_blank">Reference Manual: CC devlopment</a> (pg. 21-24) *Arguments:* `CCprev`: `float` : Canopy Cover at previous timestep. `CCo`: `float` : Fractional canopy cover size at emergence `CCx`: `float` : Maximum canopy cover (fraction of soil cover) `CGC`: `float` : Canopy growth coefficient (fraction per GDD) `CDC`: `float` : Canopy decline coefficient (fraction per GDD/calendar day) `Mode`: `str` : Canopy growth/decline coefficient (fraction per GDD/calendar day) *Returns:* `tReq`: `float` : time required to reach CC at end of previous day """ ## Get CGC and/or time (GDD or CD) required to reach CC on previous day ## if Mode == "CGC": if CCprev <= (CCx / 2): # print(CCprev,CCo,(tSum-dt),tSum,dt) CGCx = np.log(CCprev / CCo) # print(np.log(CCprev/CCo),(tSum-dt),CGCx) else: # print(CCx,CCo,CCprev) CGCx = np.log((0.25 * CCx * CCx / CCo) / (CCx - CCprev)) tReq = CGCx / CGC elif Mode == "CDC": tReq = (np.log(1 + (1 - CCprev / CCx) / 0.05)) / (CDC / CCx) return tReq # Cell # @njit() def adjust_CCx(CCprev, CCo, CCx, CGC, CDC, dt, tSum, Crop_CanopyDevEnd, Crop_CCx): """ Function to adjust CCx value for changes in CGC due to water stress during the growing season <a href="../pdfs/ac_ref_man_3.pdf#page=36" target="_blank">Reference Manual: CC stress response</a> (pg. 27-33) *Arguments:* `CCprev`: `float` : Canopy Cover at previous timestep. `CCo`: `float` : Fractional canopy cover size at emergence `CCx`: `float` : Maximum canopy cover (fraction of soil cover) `CGC`: `float` : Canopy growth coefficient (fraction per GDD) `CDC`: `float` : Canopy decline coefficient (fraction per GDD/calendar day) `dt`: `float` : Time delta of canopy growth (1 calander day or ... GDD) `tSum`: `float` : time since germination (CD or GDD) `Crop_CanopyDevEnd`: `float` : time that Canopy developement ends `Crop_CCx`: `float` : Maximum canopy cover (fraction of soil cover) *Returns:* `CCxAdj`: `float` : Adjusted CCx """ ## Get time required to reach CC on previous day ## tCCtmp = _cc_required_time(CCprev, CCo, CCx, CGC, CDC, "CGC") ## Determine CCx adjusted ## if tCCtmp > 0: tCCtmp = tCCtmp + (Crop_CanopyDevEnd - tSum) + dt CCxAdj = _cc_development(CCo, CCx, CGC, CDC, tCCtmp, "Growth", Crop_CCx) else: CCxAdj = 0 return CCxAdj # Cell # @njit() @cc.export("_update_CCx_CDC", "(f8,f8,f8,f8)") def update_CCx_CDC(CCprev, CDC, CCx, dt): """ Function to update CCx and CDC parameter valyes for rewatering in late season of an early declining canopy <a href="../pdfs/ac_ref_man_3.pdf#page=36" target="_blank">Reference Manual: CC stress response</a> (pg. 27-33) *Arguments:* `CCprev`: `float` : Canopy Cover at previous timestep. `CDC`: `float` : Canopy decline coefficient (fraction per GDD/calendar day) `CCx`: `float` : Maximum canopy cover (fraction of soil cover) `dt`: `float` : Time delta of canopy growth (1 calander day or ... GDD) *Returns:* `CCxAdj`: `float` : updated CCxAdj `CDCadj`: `float` : updated CDCadj """ ## Get adjusted CCx ## CCXadj = CCprev / (1 - 0.05 * (np.exp(dt * ((CDC * 3.33) / (CCx + 2.29))) - 1)) ## Get adjusted CDC ## CDCadj = CDC * ((CCXadj + 2.29) / (CCx + 2.29)) return CCXadj, CDCadj # Cell # @njit() def canopy_cover(Crop, prof, Soil_zTop, InitCond, GDD, Et0, GrowingSeason): # def canopy_cover(Crop,Soil_Profile,Soil_zTop,InitCond,GDD,Et0,GrowingSeason): """ Function to simulate canopy growth/decline <a href="../pdfs/ac_ref_man_3.pdf#page=30" target="_blank">Reference Manual: CC equations</a> (pg. 21-33) *Arguments:* `Crop`: `CropClass` : Crop object `prof`: `SoilProfileClass` : Soil object `Soil_zTop`: `float` : top soil depth `InitCond`: `InitCondClass` : InitCond object `GDD`: `float` : Growing Degree Days `Et0`: `float` : reference evapotranspiration `GrowingSeason`:: `bool` : is it currently within the growing season (True, Flase) *Returns:* `NewCond`: `InitCondClass` : updated InitCond object """ # Function to simulate canopy growth/decline InitCond_CC_NS = InitCond.CC_NS InitCond_CC = InitCond.CC InitCond_ProtectedSeed = InitCond.ProtectedSeed InitCond_CCxAct = InitCond.CCxAct InitCond_CropDead = InitCond.CropDead InitCond_tEarlySen = InitCond.tEarlySen InitCond_CCxW = InitCond.CCxW ## Store initial conditions in a new structure for updating ## NewCond = InitCond NewCond.CCprev = InitCond.CC ## Calculate canopy development (if in growing season) ## if GrowingSeason == True: # Calculate root zone water content TAW = TAWClass() Dr = DrClass() # thRZ = thRZClass() _, Dr.Zt, Dr.Rz, TAW.Zt, TAW.Rz, _,_,_,_,_,_ = _root_zone_water( prof, float(NewCond.Zroot), NewCond.th, Soil_zTop, float(Crop.Zmin), Crop.Aer, ) # _,Dr,TAW,_ = root_zone_water(Soil_Profile,float(NewCond.Zroot),NewCond.th,Soil_zTop,float(Crop.Zmin),Crop.Aer) # Check whether to use root zone or top soil depletions for calculating # water stress if (Dr.Rz / TAW.Rz) <= (Dr.Zt / TAW.Zt): # Root zone is wetter than top soil, so use root zone value Dr = Dr.Rz TAW = TAW.Rz else: # Top soil is wetter than root zone, so use top soil values Dr = Dr.Zt TAW = TAW.Zt # Determine if water stress is occurring beta = True Ksw = KswClass() Ksw.Exp, Ksw.Sto, Ksw.Sen, Ksw.Pol, Ksw.StoLin = _water_stress( Crop.p_up, Crop.p_lo, Crop.ETadj, Crop.beta, Crop.fshape_w, NewCond.tEarlySen, Dr, TAW, Et0, beta, ) # water_stress(Crop, NewCond, Dr, TAW, Et0, beta) # Get canopy cover growth time if Crop.CalendarType == 1: dtCC = 1 tCCadj = NewCond.DAP - NewCond.DelayedCDs elif Crop.CalendarType == 2: dtCC = GDD tCCadj = NewCond.GDDcum - NewCond.DelayedGDDs ## Canopy development (potential) ## if (tCCadj < Crop.Emergence) or (round(tCCadj) > Crop.Maturity): # No canopy development before emergence/germination or after # maturity NewCond.CC_NS = 0 elif tCCadj < Crop.CanopyDevEnd: # Canopy growth can occur if InitCond_CC_NS <= Crop.CC0: # Very small initial CC. NewCond.CC_NS = Crop.CC0 * np.exp(Crop.CGC * dtCC) # print(Crop.CC0,np.exp(Crop.CGC*dtCC)) else: # Canopy growing tmp_tCC = tCCadj - Crop.Emergence NewCond.CC_NS = _cc_development( Crop.CC0, 0.98 * Crop.CCx, Crop.CGC, Crop.CDC, tmp_tCC, "Growth", Crop.CCx ) # Update maximum canopy cover size in growing season NewCond.CCxAct_NS = NewCond.CC_NS elif tCCadj > Crop.CanopyDevEnd: # No more canopy growth is possible or canopy in decline # Set CCx for calculation of withered canopy effects NewCond.CCxW_NS = NewCond.CCxAct_NS if tCCadj < Crop.Senescence: # Mid-season stage - no canopy growth NewCond.CC_NS = InitCond_CC_NS # Update maximum canopy cover size in growing season NewCond.CCxAct_NS = NewCond.CC_NS else: # Late-season stage - canopy decline tmp_tCC = tCCadj - Crop.Senescence NewCond.CC_NS = _cc_development( Crop.CC0, NewCond.CCxAct_NS, Crop.CGC, Crop.CDC, tmp_tCC, "Decline", NewCond.CCxAct_NS, ) ## Canopy development (actual) ## if (tCCadj < Crop.Emergence) or (round(tCCadj) > Crop.Maturity): # No canopy development before emergence/germination or after # maturity NewCond.CC = 0 NewCond.CC0adj = Crop.CC0 elif tCCadj < Crop.CanopyDevEnd: # Canopy growth can occur if InitCond_CC <= NewCond.CC0adj or ( (InitCond_ProtectedSeed == True) and (InitCond_CC <= (1.25 * NewCond.CC0adj)) ): # Very small initial CC or seedling in protected phase of # growth. In this case, assume no leaf water expansion stress if InitCond_ProtectedSeed == True: tmp_tCC = tCCadj - Crop.Emergence NewCond.CC = _cc_development( Crop.CC0, Crop.CCx, Crop.CGC, Crop.CDC, tmp_tCC, "Growth", Crop.CCx ) # Check if seed protection should be turned off if NewCond.CC > (1.25 * NewCond.CC0adj): # Turn off seed protection - lead expansion stress can # occur on future time steps. NewCond.ProtectedSeed = False else: NewCond.CC = NewCond.CC0adj * np.exp(Crop.CGC * dtCC) else: # Canopy growing if InitCond_CC < (0.9799 * Crop.CCx): # Adjust canopy growth coefficient for leaf expansion water # stress effects CGCadj = Crop.CGC * Ksw.Exp if CGCadj > 0: # Adjust CCx for change in CGC CCXadj = adjust_CCx( InitCond_CC, NewCond.CC0adj, Crop.CCx, CGCadj, Crop.CDC, dtCC, tCCadj, Crop.CanopyDevEnd, Crop.CCx, ) if CCXadj < 0: NewCond.CC = InitCond_CC elif abs(InitCond_CC - (0.9799 * Crop.CCx)) < 0.001: # Approaching maximum canopy cover size tmp_tCC = tCCadj - Crop.Emergence NewCond.CC = _cc_development( Crop.CC0, Crop.CCx, Crop.CGC, Crop.CDC, tmp_tCC, "Growth", Crop.CCx ) else: # Determine time required to reach CC on previous, # day, given CGCAdj value tReq = _cc_required_time( InitCond_CC, NewCond.CC0adj, CCXadj, CGCadj, Crop.CDC, "CGC" ) if tReq > 0: # Calclate GDD's for canopy growth tmp_tCC = tReq + dtCC # Determine new canopy size NewCond.CC = _cc_development( NewCond.CC0adj, CCXadj, CGCadj, Crop.CDC, tmp_tCC, "Growth", Crop.CCx, ) # print(NewCond.DAP,CCXadj,tReq) else: # No canopy growth NewCond.CC = InitCond_CC else: # No canopy growth NewCond.CC = InitCond_CC # Update CC0 if NewCond.CC > NewCond.CC0adj: NewCond.CC0adj = Crop.CC0 else: NewCond.CC0adj = NewCond.CC else: # Canopy approaching maximum size tmp_tCC = tCCadj - Crop.Emergence NewCond.CC = _cc_development( Crop.CC0, Crop.CCx, Crop.CGC, Crop.CDC, tmp_tCC, "Growth", Crop.CCx ) NewCond.CC0adj = Crop.CC0 if NewCond.CC > InitCond_CCxAct: # Update actual maximum canopy cover size during growing season NewCond.CCxAct = NewCond.CC elif tCCadj > Crop.CanopyDevEnd: # No more canopy growth is possible or canopy is in decline if tCCadj < Crop.Senescence: # Mid-season stage - no canopy growth NewCond.CC = InitCond_CC if NewCond.CC > InitCond_CCxAct: # Update actual maximum canopy cover size during growing # season NewCond.CCxAct = NewCond.CC else: # Late-season stage - canopy decline # Adjust canopy decline coefficient for difference between actual # and potential CCx CDCadj = Crop.CDC * ((NewCond.CCxAct + 2.29) / (Crop.CCx + 2.29)) # Determine new canopy size tmp_tCC = tCCadj - Crop.Senescence NewCond.CC = _cc_development( NewCond.CC0adj, NewCond.CCxAct, Crop.CGC, CDCadj, tmp_tCC, "Decline", NewCond.CCxAct, ) # Check for crop growth termination if (NewCond.CC < 0.001) and (InitCond_CropDead == False): # Crop has died NewCond.CC = 0 NewCond.CropDead = True ## Canopy senescence due to water stress (actual) ## if tCCadj >= Crop.Emergence: if (tCCadj < Crop.Senescence) or (InitCond_tEarlySen > 0): # Check for early canopy senescence due to severe water # stress. if (Ksw.Sen < 1) and (InitCond_ProtectedSeed == False): # Early canopy senescence NewCond.PrematSenes = True if InitCond_tEarlySen == 0: # No prior early senescence NewCond.CCxEarlySen = InitCond_CC # Increment early senescence GDD counter NewCond.tEarlySen = InitCond_tEarlySen + dtCC # Adjust canopy decline coefficient for water stress beta = False Ksw = KswClass() Ksw.Exp, Ksw.Sto, Ksw.Sen, Ksw.Pol, Ksw.StoLin = _water_stress( Crop.p_up, Crop.p_lo, Crop.ETadj, Crop.beta, Crop.fshape_w, NewCond.tEarlySen, Dr, TAW, Et0, beta, ) # Ksw = water_stress(Crop, NewCond, Dr, TAW, Et0, beta) if Ksw.Sen > 0.99999: CDCadj = 0.0001 else: CDCadj = (1 - (Ksw.Sen ** 8)) * Crop.CDC # Get new canpy cover size after senescence if NewCond.CCxEarlySen < 0.001: CCsen = 0 else: # Get time required to reach CC at end of previous day, given # CDCadj tReq = (np.log(1 + (1 - InitCond_CC / NewCond.CCxEarlySen) / 0.05)) / ( (CDCadj * 3.33) / (NewCond.CCxEarlySen + 2.29) ) # Calculate GDD's for canopy decline tmp_tCC = tReq + dtCC # Determine new canopy size CCsen = NewCond.CCxEarlySen * ( 1 - 0.05 * ( np.exp(tmp_tCC * ((CDCadj * 3.33) / (NewCond.CCxEarlySen + 2.29))) - 1 ) ) if CCsen < 0: CCsen = 0 # Update canopy cover size if tCCadj < Crop.Senescence: # Limit CC to CCx if CCsen > Crop.CCx: CCsen = Crop.CCx # CC cannot be greater than value on previous day NewCond.CC = CCsen if NewCond.CC > InitCond_CC: NewCond.CC = InitCond_CC # Update maximum canopy cover size during growing # season NewCond.CCxAct = NewCond.CC # Update CC0 if current CC is less than initial canopy # cover size at planting if NewCond.CC < Crop.CC0: NewCond.CC0adj = NewCond.CC else: NewCond.CC0adj = Crop.CC0 else: # Update CC to account for canopy cover senescence due # to water stress if CCsen < NewCond.CC: NewCond.CC = CCsen # Check for crop growth termination if (NewCond.CC < 0.001) and (InitCond_CropDead == False): # Crop has died NewCond.CC = 0 NewCond.CropDead = True else: # No water stress NewCond.PrematSenes = False if (tCCadj > Crop.Senescence) and (InitCond_tEarlySen > 0): # Rewatering of canopy in late season # Get new values for CCx and CDC tmp_tCC = tCCadj - dtCC - Crop.Senescence CCXadj, CDCadj = _update_CCx_CDC(InitCond_CC, Crop.CDC, Crop.CCx, tmp_tCC) NewCond.CCxAct = CCXadj # Get new CC value for end of current day tmp_tCC = tCCadj - Crop.Senescence NewCond.CC = _cc_development( NewCond.CC0adj, CCXadj, Crop.CGC, CDCadj, tmp_tCC, "Decline", CCXadj ) # Check for crop growth termination if (NewCond.CC < 0.001) and (InitCond_CropDead == False): NewCond.CC = 0 NewCond.CropDead = True # Reset early senescence counter NewCond.tEarlySen = 0 # Adjust CCx for effects of withered canopy if NewCond.CC > InitCond_CCxW: NewCond.CCxW = NewCond.CC ## Calculate canopy size adjusted for micro-advective effects ## # Check to ensure potential CC is not slightly lower than actual if NewCond.CC_NS < NewCond.CC: NewCond.CC_NS = NewCond.CC if tCCadj < Crop.CanopyDevEnd: NewCond.CCxAct_NS = NewCond.CC_NS # Actual (with water stress) NewCond.CCadj = (1.72 * NewCond.CC) - (NewCond.CC ** 2) + (0.3 * (NewCond.CC ** 3)) # Potential (without water stress) NewCond.CCadj_NS = ( (1.72 * NewCond.CC_NS) - (NewCond.CC_NS ** 2) + (0.3 * (NewCond.CC_NS ** 3)) ) else: # No canopy outside growing season - set various values to zero NewCond.CC = 0 NewCond.CCadj = 0 NewCond.CC_NS = 0 NewCond.CCadj_NS = 0 NewCond.CCxW = 0 NewCond.CCxAct = 0 NewCond.CCxW_NS = 0 NewCond.CCxAct_NS = 0 return NewCond # Cell @njit @cc.export("_evap_layer_water_content", (f8[:],f8,SoilProfileNT_typ_sig)) def _evap_layer_water_content( InitCond_th, InitCond_EvapZ, prof, ): """ Function to get water contents in the evaporation layer <a href="../pdfs/ac_ref_man_3.pdf#page=82" target="_blank">Reference Manual: evaporation equations</a> (pg. 73-81) *Arguments:* `InitCond_th`: `np.array` : Initial water content `InitCond_EvapZ`: `float` : evaporation depth `prof`: `SoilProfileClass` : Soil object containing soil paramaters *Returns:* `Wevap_Sat`: `float` : Water storage in evaporation layer at saturation (mm) `Wevap_Fc`: `float` : Water storage in evaporation layer at field capacity (mm) `Wevap_Wp`: `float` : Water storage in evaporation layer at permanent wilting point (mm) `Wevap_Dry`: `float` : Water storage in evaporation layer at air dry (mm) `Wevap_Act`: `float` : Actual water storage in evaporation layer (mm) """ # Find soil compartments covered by evaporation layer comp_sto = np.sum(prof.dzsum < InitCond_EvapZ) + 1 Wevap_Sat = 0 Wevap_Fc = 0 Wevap_Wp = 0 Wevap_Dry = 0 Wevap_Act = 0 for ii in range(int(comp_sto)): # Determine fraction of soil compartment covered by evaporation layer if prof.dzsum[ii] > InitCond_EvapZ: factor = 1 - ((prof.dzsum[ii] - InitCond_EvapZ) / prof.dz[ii]) else: factor = 1 # Actual water storage in evaporation layer (mm) Wevap_Act += factor * 1000 * InitCond_th[ii] * prof.dz[ii] # Water storage in evaporation layer at saturation (mm) Wevap_Sat += factor * 1000 * prof.th_s[ii] * prof.dz[ii] # Water storage in evaporation layer at field capacity (mm) Wevap_Fc += factor * 1000 * prof.th_fc[ii] * prof.dz[ii] # Water storage in evaporation layer at permanent wilting point (mm) Wevap_Wp += factor * 1000 * prof.th_wp[ii] * prof.dz[ii] # Water storage in evaporation layer at air dry (mm) Wevap_Dry += factor * 1000 * prof.th_dry[ii] * prof.dz[ii] if Wevap_Act < 0: Wevap_Act = 0 return Wevap_Sat, Wevap_Fc, Wevap_Wp, Wevap_Dry, Wevap_Act # Cell # @njit() @cc.export( "_soil_evaporation", (i8,i8,i8,SoilProfileNT_typ_sig, f8,f8,f8,f8,f8,f8,f8,i8,f8,i8,f8,b1,f8,f8,i8,f8,f8,f8,f8[:],f8,f8,f8,f8,f8,f8, f8,b1,f8,f8,f8,f8,f8,f8,f8,b1), ) def soil_evaporation( ClockStruct_EvapTimeSteps, ClockStruct_SimOffSeason, ClockStruct_TimeStepCounter, prof, Soil_EvapZmin, Soil_EvapZmax, Soil_REW, Soil_Kex, Soil_fwcc, Soil_fWrelExp, Soil_fevap, Crop_CalendarType, Crop_Senescence, IrrMngt_IrrMethod, IrrMngt_WetSurf, FieldMngt_Mulches, FieldMngt_fMulch, FieldMngt_MulchPct, NewCond_DAP, NewCond_Wsurf, NewCond_EvapZ, NewCond_Stage2, NewCond_th, NewCond_DelayedCDs, NewCond_GDDcum, NewCond_DelayedGDDs, NewCond_CCxW, NewCond_CCadj, NewCond_CCxAct, NewCond_CC, NewCond_PrematSenes, NewCond_SurfaceStorage, NewCond_Wstage2, NewCond_Epot, Et0, Infl, Rain, Irr, GrowingSeason, ): """ Function to calculate daily soil evaporation <a href="../pdfs/ac_ref_man_3.pdf#page=82" target="_blank">Reference Manual: evaporation equations</a> (pg. 73-81) *Arguments:* `Clock params`: `bool, int` : clock params `Soil parameters`: `float` : soil parameters `Crop params`: `float` : Crop paramaters `IrrMngt params`: `int, float`: irrigation management paramaters `FieldMngt`: `FieldMngtStruct` : Field management paramaters `InitCond`: `InitCondClass` : InitCond object containing model paramaters `Et0`: `float` : daily reference evapotranspiration `Infl`: `float` : Infiltration on current day `Rain`: `float` : daily precipitation mm `Irr`: `float` : Irrigation applied on current day `GrowingSeason`:: `bool` : is growing season (True or Flase) *Returns:* `NewCond`: `InitCondClass` : InitCond object containing updated model paramaters `EsAct`: `float` : Actual surface evaporation current day `EsPot`: `float` : Potential surface evaporation current day """ # Wevap = WevapClass() ## Store initial conditions in new structure that will be updated ## # NewCond = InitCond ## Prepare stage 2 evaporation (REW gone) ## # Only do this if it is first day of simulation, or if it is first day of # growing season and not simulating off-season if (ClockStruct_TimeStepCounter == 0) or ( (NewCond_DAP == 1) and (ClockStruct_SimOffSeason == False) ): # Reset storage in surface soil layer to zero NewCond_Wsurf = 0 # Set evaporation depth to minimum NewCond_EvapZ = Soil_EvapZmin # Trigger stage 2 evaporation NewCond_Stage2 = True # Get relative water content for start of stage 2 evaporation Wevap_Sat, Wevap_Fc, Wevap_Wp, Wevap_Dry, Wevap_Act = _evap_layer_water_content( NewCond_th, NewCond_EvapZ, prof, ) NewCond_Wstage2 = round( (Wevap_Act - (Wevap_Fc - Soil_REW)) / (Wevap_Sat - (Wevap_Fc - Soil_REW)), 2 ) if NewCond_Wstage2 < 0: NewCond_Wstage2 = 0 ## Prepare soil evaporation stage 1 ## # Adjust water in surface evaporation layer for any infiltration if (Rain > 0) or ((Irr > 0) and (IrrMngt_IrrMethod != 4)): # Only prepare stage one when rainfall occurs, or when irrigation is # trigerred (not in net irrigation mode) if Infl > 0: # Update storage in surface evaporation layer for incoming # infiltration NewCond_Wsurf = Infl # Water stored in surface evaporation layer cannot exceed REW if NewCond_Wsurf > Soil_REW: NewCond_Wsurf = Soil_REW # Reset variables NewCond_Wstage2 = 0 NewCond_EvapZ = Soil_EvapZmin NewCond_Stage2 = False ## Calculate potential soil evaporation rate (mm/day) ## if GrowingSeason == True: # Adjust time for any delayed development if Crop_CalendarType == 1: tAdj = NewCond_DAP - NewCond_DelayedCDs elif Crop_CalendarType == 2: tAdj = NewCond_GDDcum - NewCond_DelayedGDDs # Calculate maximum potential soil evaporation EsPotMax = Soil_Kex * Et0 * (1 - NewCond_CCxW * (Soil_fwcc / 100)) # Calculate potential soil evaporation (given current canopy cover # size) EsPot = Soil_Kex * (1 - NewCond_CCadj) * Et0 # Adjust potential soil evaporation for effects of withered canopy if (tAdj > Crop_Senescence) and (NewCond_CCxAct > 0): if NewCond_CC > (NewCond_CCxAct / 2): if NewCond_CC > NewCond_CCxAct: mult = 0 else: mult = (NewCond_CCxAct - NewCond_CC) / (NewCond_CCxAct / 2) else: mult = 1 EsPot = EsPot * (1 - NewCond_CCxAct * (Soil_fwcc / 100) * mult) CCxActAdj = ( (1.72 * NewCond_CCxAct) - (NewCond_CCxAct ** 2) + 0.3 * (NewCond_CCxAct ** 3) ) EsPotMin = Soil_Kex * (1 - CCxActAdj) * Et0 if EsPotMin < 0: EsPotMin = 0 if EsPot < EsPotMin: EsPot = EsPotMin elif EsPot > EsPotMax: EsPot = EsPotMax if NewCond_PrematSenes == True: if EsPot > EsPotMax: EsPot = EsPotMax else: # No canopy cover outside of growing season so potential soil # evaporation only depends on reference evapotranspiration EsPot = Soil_Kex * Et0 ## Adjust potential soil evaporation for mulches and/or partial wetting ## # Mulches if NewCond_SurfaceStorage < 0.000001: if not FieldMngt_Mulches: # No mulches present EsPotMul = EsPot elif FieldMngt_Mulches: # Mulches present EsPotMul = EsPot * (1 - FieldMngt_fMulch * (FieldMngt_MulchPct / 100)) else: # Surface is flooded - no adjustment of potential soil evaporation for # mulches EsPotMul = EsPot # Partial surface wetting by irrigation if (Irr > 0) and (IrrMngt_IrrMethod != 4): # Only apply adjustment if irrigation occurs and not in net irrigation # mode if (Rain > 1) or (NewCond_SurfaceStorage > 0): # No adjustment for partial wetting - assume surface is fully wet EsPotIrr = EsPot else: # Adjust for proprtion of surface area wetted by irrigation EsPotIrr = EsPot * (IrrMngt_WetSurf / 100) else: # No adjustment for partial surface wetting EsPotIrr = EsPot # Assign minimum value (mulches and partial wetting don't combine) EsPot = min(EsPotIrr, EsPotMul) ## Surface evaporation ## # Initialise actual evaporation counter EsAct = 0 # Evaporate surface storage if NewCond_SurfaceStorage > 0: if NewCond_SurfaceStorage > EsPot: # All potential soil evaporation can be supplied by surface storage EsAct = EsPot # Update surface storage NewCond_SurfaceStorage = NewCond_SurfaceStorage - EsAct else: # Surface storage is not sufficient to meet all potential soil # evaporation EsAct = NewCond_SurfaceStorage # Update surface storage, evaporation layer depth, stage NewCond_SurfaceStorage = 0 NewCond_Wsurf = Soil_REW NewCond_Wstage2 = 0 NewCond_EvapZ = Soil_EvapZmin NewCond_Stage2 = False ## Stage 1 evaporation ## # Determine total water to be extracted ToExtract = EsPot - EsAct # Determine total water to be extracted in stage one (limited by surface # layer water storage) ExtractPotStg1 = min(ToExtract, NewCond_Wsurf) # Extract water if ExtractPotStg1 > 0: # Find soil compartments covered by evaporation layer comp_sto = np.sum(prof.dzsum < Soil_EvapZmin) + 1 comp = -1 # prof = Soil_Profile while (ExtractPotStg1 > 0) and (comp < comp_sto): # Increment compartment counter comp = comp + 1 # Specify layer number # Determine proportion of compartment in evaporation layer if prof.dzsum[comp] > Soil_EvapZmin: factor = 1 - ((prof.dzsum[comp] - Soil_EvapZmin) / prof.dz[comp]) else: factor = 1 # Water storage (mm) at air dry Wdry = 1000 * prof.th_dry[comp] * prof.dz[comp] # Available water (mm) W = 1000 * NewCond_th[comp] * prof.dz[comp] # Water available in compartment for extraction (mm) AvW = (W - Wdry) * factor if AvW < 0: AvW = 0 if AvW >= ExtractPotStg1: # Update actual evaporation EsAct = EsAct + ExtractPotStg1 # Update depth of water in current compartment W = W - ExtractPotStg1 # Update total water to be extracted ToExtract = ToExtract - ExtractPotStg1 # Update water to be extracted from surface layer (stage 1) ExtractPotStg1 = 0 else: # Update actual evaporation EsAct = EsAct + AvW # Update water to be extracted from surface layer (stage 1) ExtractPotStg1 = ExtractPotStg1 - AvW # Update total water to be extracted ToExtract = ToExtract - AvW # Update depth of water in current compartment W = W - AvW # Update water content NewCond_th[comp] = W / (1000 * prof.dz[comp]) # Update surface evaporation layer water balance NewCond_Wsurf = NewCond_Wsurf - EsAct if (NewCond_Wsurf < 0) or (ExtractPotStg1 > 0.0001): NewCond_Wsurf = 0 # If surface storage completely depleted, prepare stage 2 if NewCond_Wsurf < 0.0001: # Get water contents (mm) Wevap_Sat, Wevap_Fc, Wevap_Wp, Wevap_Dry, Wevap_Act = _evap_layer_water_content( NewCond_th, NewCond_EvapZ, prof, ) # Proportional water storage for start of stage two evaporation NewCond_Wstage2 = round( (Wevap_Act - (Wevap_Fc - Soil_REW)) / (Wevap_Sat - (Wevap_Fc - Soil_REW)), 2 ) if NewCond_Wstage2 < 0: NewCond_Wstage2 = 0 ## Stage 2 evaporation ## # Extract water if ToExtract > 0: # Start stage 2 NewCond_Stage2 = True # Get sub-daily evaporative demand Edt = ToExtract / ClockStruct_EvapTimeSteps # Loop sub-daily steps for jj in range(int(ClockStruct_EvapTimeSteps)): # Get current water storage (mm) Wevap_Sat, Wevap_Fc, Wevap_Wp, Wevap_Dry, Wevap_Act = _evap_layer_water_content( NewCond_th, NewCond_EvapZ, prof, ) # Get water storage (mm) at start of stage 2 evaporation Wupper = NewCond_Wstage2 * (Wevap_Sat - (Wevap_Fc - Soil_REW)) + (Wevap_Fc - Soil_REW) # Get water storage (mm) when there is no evaporation Wlower = Wevap_Dry # Get relative depletion of evaporation storage in stage 2 Wrel = (Wevap_Act - Wlower) / (Wupper - Wlower) # Check if need to expand evaporation layer if Soil_EvapZmax > Soil_EvapZmin: Wcheck = Soil_fWrelExp * ( (Soil_EvapZmax - NewCond_EvapZ) / (Soil_EvapZmax - Soil_EvapZmin) ) while (Wrel < Wcheck) and (NewCond_EvapZ < Soil_EvapZmax): # Expand evaporation layer by 1 mm NewCond_EvapZ = NewCond_EvapZ + 0.001 # Update water storage (mm) in evaporation layer Wevap_Sat, Wevap_Fc, Wevap_Wp, Wevap_Dry, Wevap_Act = _evap_layer_water_content( NewCond_th, NewCond_EvapZ, prof, ) Wupper = NewCond_Wstage2 * (Wevap_Sat - (Wevap_Fc - Soil_REW)) + ( Wevap_Fc - Soil_REW ) Wlower = Wevap_Dry # Update relative depletion of evaporation storage Wrel = (Wevap_Act - Wlower) / (Wupper - Wlower) Wcheck = Soil_fWrelExp * ( (Soil_EvapZmax - NewCond_EvapZ) / (Soil_EvapZmax - Soil_EvapZmin) ) # Get stage 2 evaporation reduction coefficient Kr = (np.exp(Soil_fevap * Wrel) - 1) / (np.exp(Soil_fevap) - 1) if Kr > 1: Kr = 1 # Get water to extract (mm) ToExtractStg2 = Kr * Edt # Extract water from compartments comp_sto = np.sum(prof.dzsum < NewCond_EvapZ) + 1 comp = -1 # prof = Soil_Profile while (ToExtractStg2 > 0) and (comp < comp_sto): # Increment compartment counter comp = comp + 1 # Specify layer number # Determine proportion of compartment in evaporation layer if prof.dzsum[comp] > NewCond_EvapZ: factor = 1 - ((prof.dzsum[comp] - NewCond_EvapZ) / prof.dz[comp]) else: factor = 1 # Water storage (mm) at air dry Wdry = 1000 * prof.th_dry[comp] * prof.dz[comp] # Available water (mm) W = 1000 * NewCond_th[comp] * prof.dz[comp] # Water available in compartment for extraction (mm) AvW = (W - Wdry) * factor if AvW >= ToExtractStg2: # Update actual evaporation EsAct = EsAct + ToExtractStg2 # Update depth of water in current compartment W = W - ToExtractStg2 # Update total water to be extracted ToExtract = ToExtract - ToExtractStg2 # Update water to be extracted from surface layer (stage 1) ToExtractStg2 = 0 else: # Update actual evaporation EsAct = EsAct + AvW # Update depth of water in current compartment W = W - AvW # Update water to be extracted from surface layer (stage 1) ToExtractStg2 = ToExtractStg2 - AvW # Update total water to be extracted ToExtract = ToExtract - AvW # Update water content NewCond_th[comp] = W / (1000 * prof.dz[comp]) ## Store potential evaporation for irrigation calculations on next day ## NewCond_Epot = EsPot return ( NewCond_Epot, NewCond_th, NewCond_Stage2, NewCond_Wstage2, NewCond_Wsurf, NewCond_SurfaceStorage, NewCond_EvapZ, EsAct, EsPot, ) # Cell # @njit() @cc.export("_aeration_stress", (f8,f8,thRZNT_type_sig)) def aeration_stress(NewCond_AerDays, Crop_LagAer, thRZ): """ Function to calculate aeration stress coefficient <a href="../pdfs/ac_ref_man_3.pdf#page=90" target="_blank">Reference Manual: aeration stress</a> (pg. 89-90) *Arguments:* `NewCond_AerDays`: `int` : number aeration stress days `Crop_LagAer`: `int` : lag days before aeration stress `thRZ`: `thRZClass` : object that contains information on the total water in the root zone *Returns:* `Ksa_Aer`: `float` : aeration stress coefficient `NewCond_AerDays`: `float` : updated aer days """ ## Determine aeration stress (root zone) ## if thRZ.Act > thRZ.Aer: # Calculate aeration stress coefficient if NewCond_AerDays < Crop_LagAer: stress = 1 - ((thRZ.S - thRZ.Act) / (thRZ.S - thRZ.Aer)) Ksa_Aer = 1 - ((NewCond_AerDays / 3) * stress) elif NewCond_AerDays >= Crop_LagAer: Ksa_Aer = (thRZ.S - thRZ.Act) / (thRZ.S - thRZ.Aer) # Increment aeration days counter NewCond_AerDays = NewCond_AerDays + 1 if NewCond_AerDays > Crop_LagAer: NewCond_AerDays = Crop_LagAer else: # Set aeration stress coefficient to one (no stress value) Ksa_Aer = 1 # Reset aeration days counter NewCond_AerDays = 0 return Ksa_Aer, NewCond_AerDays # Cell # @njit() def transpiration( Soil_Profile, Soil_nComp, Soil_zTop, Crop, IrrMngt_IrrMethod, IrrMngt_NetIrrSMT, InitCond, Et0, CO2, GrowingSeason, GDD, ): """ Function to calculate crop transpiration on current day <a href="../pdfs/ac_ref_man_3.pdf#page=91" target="_blank">Reference Manual: transpiration equations</a> (pg. 82-91) *Arguments:* `Soil`: `SoilClass` : Soil object `Crop`: `CropClass` : Crop object `IrrMngt`: `IrrMngt`: object containing irrigation management params `InitCond`: `InitCondClass` : InitCond object `Et0`: `float` : reference evapotranspiration `CO2`: `CO2class` : CO2 `GDD`: `float` : Growing Degree Days `GrowingSeason`:: `bool` : is it currently within the growing season (True, Flase) *Returns:* `TrAct`: `float` : Actual Transpiration on current day `TrPot_NS`: `float` : Potential Transpiration on current day with no water stress `TrPot0`: `float` : Potential Transpiration on current day `NewCond`: `InitCondClass` : updated InitCond object `IrrNet`: `float` : Net Irrigation (if required) """ ## Store initial conditions ## NewCond = InitCond InitCond_th = InitCond.th prof = Soil_Profile ## Calculate transpiration (if in growing season) ## if GrowingSeason == True: ## Calculate potential transpiration ## # 1. No prior water stress # Update ageing days counter DAPadj = NewCond.DAP - NewCond.DelayedCDs if DAPadj > Crop.MaxCanopyCD: NewCond.AgeDays_NS = DAPadj - Crop.MaxCanopyCD # Update crop coefficient for ageing of canopy if NewCond.AgeDays_NS > 5: Kcb_NS = Crop.Kcb - ((NewCond.AgeDays_NS - 5) * (Crop.fage / 100)) * NewCond.CCxW_NS else: Kcb_NS = Crop.Kcb # Update crop coefficient for CO2 concentration CO2CurrentConc = CO2.CurrentConc CO2RefConc = CO2.RefConc if CO2CurrentConc > CO2RefConc: Kcb_NS = Kcb_NS * (1 - 0.05 * ((CO2CurrentConc - CO2RefConc) / (550 - CO2RefConc))) # Determine potential transpiration rate (no water stress) TrPot_NS = Kcb_NS * (NewCond.CCadj_NS) * Et0 # Correct potential transpiration for dying green canopy effects if NewCond.CC_NS < NewCond.CCxW_NS: if (NewCond.CCxW_NS > 0.001) and (NewCond.CC_NS > 0.001): TrPot_NS = TrPot_NS * ((NewCond.CC_NS / NewCond.CCxW_NS) ** Crop.a_Tr) # 2. Potential prior water stress and/or delayed development # Update ageing days counter DAPadj = NewCond.DAP - NewCond.DelayedCDs if DAPadj > Crop.MaxCanopyCD: NewCond.AgeDays = DAPadj - Crop.MaxCanopyCD # Update crop coefficient for ageing of canopy if NewCond.AgeDays > 5: Kcb = Crop.Kcb - ((NewCond.AgeDays - 5) * (Crop.fage / 100)) * NewCond.CCxW else: Kcb = Crop.Kcb # Update crop coefficient for CO2 concentration if CO2CurrentConc > CO2RefConc: Kcb = Kcb * (1 - 0.05 * ((CO2CurrentConc - CO2RefConc) / (550 - CO2RefConc))) # Determine potential transpiration rate TrPot0 = Kcb * (NewCond.CCadj) * Et0 # Correct potential transpiration for dying green canopy effects if NewCond.CC < NewCond.CCxW: if (NewCond.CCxW > 0.001) and (NewCond.CC > 0.001): TrPot0 = TrPot0 * ((NewCond.CC / NewCond.CCxW) ** Crop.a_Tr) # 3. Adjust potential transpiration for cold stress effects # Check if cold stress occurs on current day if Crop.TrColdStress == 0: # Cold temperature stress does not affect transpiration KsCold = 1 elif Crop.TrColdStress == 1: # Transpiration can be affected by cold temperature stress if GDD >= Crop.GDD_up: # No cold temperature stress KsCold = 1 elif GDD <= Crop.GDD_lo: # Transpiration fully inhibited by cold temperature stress KsCold = 0 else: # Transpiration partially inhibited by cold temperature stress # Get parameters for logistic curve KsTr_up = 1 KsTr_lo = 0.02 fshapeb = (-1) * ( np.log(((KsTr_lo * KsTr_up) - 0.98 * KsTr_lo) / (0.98 * (KsTr_up - KsTr_lo))) ) # Calculate cold stress level GDDrel = (GDD - Crop.GDD_lo) / (Crop.GDD_up - Crop.GDD_lo) KsCold = (KsTr_up * KsTr_lo) / ( KsTr_lo + (KsTr_up - KsTr_lo) * np.exp(-fshapeb * GDDrel) ) KsCold = KsCold - KsTr_lo * (1 - GDDrel) # Correct potential transpiration rate (mm/day) TrPot0 = TrPot0 * KsCold TrPot_NS = TrPot_NS * KsCold # print(TrPot0,NewCond.DAP) ## Calculate surface layer transpiration ## if (NewCond.SurfaceStorage > 0) and (NewCond.DaySubmerged < Crop.LagAer): # Update submergence days counter NewCond.DaySubmerged = NewCond.DaySubmerged + 1 # Update anerobic conditions counter for each compartment for ii in range(int(Soil_nComp)): # Increment aeration days counter for compartment ii NewCond.AerDaysComp[ii] = NewCond.AerDaysComp[ii] + 1 if NewCond.AerDaysComp[ii] > Crop.LagAer: NewCond.AerDaysComp[ii] = Crop.LagAer # Reduce actual transpiration that is possible to account for # aeration stress due to extended submergence fSub = 1 - (NewCond.DaySubmerged / Crop.LagAer) if NewCond.SurfaceStorage > (fSub * TrPot0): # Transpiration occurs from surface storage NewCond.SurfaceStorage = NewCond.SurfaceStorage - (fSub * TrPot0) TrAct0 = fSub * TrPot0 else: # No transpiration from surface storage TrAct0 = 0 if TrAct0 < (fSub * TrPot0): # More water can be extracted from soil profile for transpiration TrPot = (fSub * TrPot0) - TrAct0 # print('now') else: # No more transpiration possible on current day TrPot = 0 # print('here') else: # No surface transpiration occurs TrPot = TrPot0 TrAct0 = 0 # print(TrPot,NewCond.DAP) ## Update potential root zone transpiration for water stress ## # Determine root zone and top soil depletion, and root zone water # content TAW = TAWClass() Dr = DrClass() thRZ = thRZClass() ( _, Dr.Zt, Dr.Rz, TAW.Zt, TAW.Rz, thRZ.Act, thRZ.S, thRZ.FC, thRZ.WP, thRZ.Dry, thRZ.Aer, ) = _root_zone_water( prof, float(NewCond.Zroot), NewCond.th, Soil_zTop, float(Crop.Zmin), Crop.Aer, ) class_args = {key:value for key, value in thRZ.__dict__.items() if not key.startswith('__') and not callable(key)} thRZ = thRZNT(**class_args) # _,Dr,TAW,thRZ = root_zone_water(Soil_Profile,float(NewCond.Zroot),NewCond.th,Soil_zTop,float(Crop.Zmin),Crop.Aer) # Check whether to use root zone or top soil depletions for calculating # water stress if (Dr.Rz / TAW.Rz) <= (Dr.Zt / TAW.Zt): # Root zone is wetter than top soil, so use root zone value Dr = Dr.Rz TAW = TAW.Rz else: # Top soil is wetter than root zone, so use top soil values Dr = Dr.Zt TAW = TAW.Zt # Calculate water stress coefficients beta = True Ksw = KswClass() Ksw.Exp, Ksw.Sto, Ksw.Sen, Ksw.Pol, Ksw.StoLin = _water_stress( Crop.p_up, Crop.p_lo, Crop.ETadj, Crop.beta, Crop.fshape_w, NewCond.tEarlySen, Dr, TAW, Et0, beta, ) # Ksw = water_stress(Crop, NewCond, Dr, TAW, Et0, beta) # Calculate aeration stress coefficients Ksa_Aer, NewCond.AerDays = _aeration_stress(NewCond.AerDays, Crop.LagAer, thRZ) # Maximum stress effect Ks = min(Ksw.StoLin, Ksa_Aer) # Update potential transpiration in root zone if IrrMngt_IrrMethod != 4: # No adjustment to TrPot for water stress when in net irrigation mode TrPot = TrPot * Ks ## Determine compartments covered by root zone ## # Compartments covered by the root zone rootdepth = round(max(float(NewCond.Zroot), float(Crop.Zmin)), 2) comp_sto = min(np.sum(Soil_Profile.dzsum < rootdepth) + 1, int(Soil_nComp)) RootFact = np.zeros(int(Soil_nComp)) # Determine fraction of each compartment covered by root zone for ii in range(comp_sto): if Soil_Profile.dzsum[ii] > rootdepth: RootFact[ii] = 1 - ((Soil_Profile.dzsum[ii] - rootdepth) / Soil_Profile.dz[ii]) else: RootFact[ii] = 1 ## Determine maximum sink term for each compartment ## SxComp = np.zeros(int(Soil_nComp)) if IrrMngt_IrrMethod == 4: # Net irrigation mode for ii in range(comp_sto): SxComp[ii] = (Crop.SxTop + Crop.SxBot) / 2 else: # Maximum sink term declines linearly with depth SxCompBot = Crop.SxTop for ii in range(comp_sto): SxCompTop = SxCompBot if Soil_Profile.dzsum[ii] <= rootdepth: SxCompBot = Crop.SxBot * NewCond.rCor + ( (Crop.SxTop - Crop.SxBot * NewCond.rCor) * ((rootdepth - Soil_Profile.dzsum[ii]) / rootdepth) ) else: SxCompBot = Crop.SxBot * NewCond.rCor SxComp[ii] = (SxCompTop + SxCompBot) / 2 # print(TrPot,NewCond.DAP) ## Extract water ## ToExtract = TrPot comp = -1 TrAct = 0 while (ToExtract > 0) and (comp < comp_sto - 1): # Increment compartment comp = comp + 1 # Specify layer number # Determine TAW (m3/m3) for compartment thTAW = prof.th_fc[comp] - prof.th_wp[comp] if Crop.ETadj == 1: # Adjust stomatal stress threshold for Et0 on current day p_up_sto = Crop.p_up[1] + (0.04 * (5 - Et0)) * (np.log10(10 - 9 * Crop.p_up[1])) # Determine critical water content at which stomatal closure will # occur in compartment thCrit = prof.th_fc[comp] - (thTAW * p_up_sto) # Check for soil water stress if NewCond.th[comp] >= thCrit: # No water stress effects on transpiration KsComp = 1 elif NewCond.th[comp] > prof.th_wp[comp]: # Transpiration from compartment is affected by water stress Wrel = (prof.th_fc[comp] - NewCond.th[comp]) / (prof.th_fc[comp] - prof.th_wp[comp]) pRel = (Wrel - Crop.p_up[1]) / (Crop.p_lo[1] - Crop.p_up[1]) if pRel <= 0: KsComp = 1 elif pRel >= 1: KsComp = 0 else: KsComp = 1 - ( (np.exp(pRel * Crop.fshape_w[1]) - 1) / (np.exp(Crop.fshape_w[1]) - 1) ) if KsComp > 1: KsComp = 1 elif KsComp < 0: KsComp = 0 else: # No transpiration is possible from compartment as water # content does not exceed wilting point KsComp = 0 # Adjust compartment stress factor for aeration stress if NewCond.DaySubmerged >= Crop.LagAer: # Full aeration stress - no transpiration possible from # compartment AerComp = 0 elif NewCond.th[comp] > (prof.th_s[comp] - (Crop.Aer / 100)): # Increment aeration stress days counter NewCond.AerDaysComp[comp] = NewCond.AerDaysComp[comp] + 1 if NewCond.AerDaysComp[comp] >= Crop.LagAer: NewCond.AerDaysComp[comp] = Crop.LagAer fAer = 0 else: fAer = 1 # Calculate aeration stress factor AerComp = (prof.th_s[comp] - NewCond.th[comp]) / ( prof.th_s[comp] - (prof.th_s[comp] - (Crop.Aer / 100)) ) if AerComp < 0: AerComp = 0 AerComp = (fAer + (NewCond.AerDaysComp[comp] - 1) * AerComp) / ( fAer + NewCond.AerDaysComp[comp] - 1 ) else: # No aeration stress as number of submerged days does not # exceed threshold for initiation of aeration stress AerComp = 1 NewCond.AerDaysComp[comp] = 0 # Extract water ThToExtract = (ToExtract / 1000) / Soil_Profile.dz[comp] if IrrMngt_IrrMethod == 4: # Don't reduce compartment sink for stomatal water stress if in # net irrigation mode. Stress only occurs due to deficient # aeration conditions Sink = AerComp * SxComp[comp] * RootFact[comp] else: # Reduce compartment sink for greatest of stomatal and aeration # stress if KsComp == AerComp: Sink = KsComp * SxComp[comp] * RootFact[comp] else: Sink = min(KsComp, AerComp) * SxComp[comp] * RootFact[comp] # Limit extraction to demand if ThToExtract < Sink: Sink = ThToExtract # Limit extraction to avoid compartment water content dropping # below air dry if (InitCond_th[comp] - Sink) < prof.th_dry[comp]: Sink = InitCond_th[comp] - prof.th_dry[comp] if Sink < 0: Sink = 0 # Update water content in compartment NewCond.th[comp] = InitCond_th[comp] - Sink # Update amount of water to extract ToExtract = ToExtract - (Sink * 1000 * prof.dz[comp]) # Update actual transpiration TrAct = TrAct + (Sink * 1000 * prof.dz[comp]) ## Add net irrigation water requirement (if this mode is specified) ## if (IrrMngt_IrrMethod == 4) and (TrPot > 0): # Initialise net irrigation counter IrrNet = 0 # Get root zone water content TAW = TAWClass() Dr = DrClass() thRZ = thRZClass() ( _, Dr.Zt, Dr.Rz, TAW.Zt, TAW.Rz, thRZ.Act, thRZ.S, thRZ.FC, thRZ.WP, thRZ.Dry, thRZ.Aer, ) = _root_zone_water( prof, float(NewCond.Zroot), NewCond.th, Soil_zTop, float(Crop.Zmin), Crop.Aer, ) # _,_Dr,_TAW,thRZ = root_zone_water(Soil_Profile,float(NewCond.Zroot),NewCond.th,Soil_zTop,float(Crop.Zmin),Crop.Aer) NewCond.Depletion = Dr.Rz NewCond.TAW = TAW.Rz # Determine critical water content for net irrigation thCrit = thRZ.WP + ((IrrMngt_NetIrrSMT / 100) * (thRZ.FC - thRZ.WP)) # Check if root zone water content is below net irrigation trigger if thRZ.Act < thCrit: # Initialise layer counter prelayer = 0 for ii in range(comp_sto): # Get soil layer layeri = Soil_Profile.Layer[ii] if layeri > prelayer: # If in new layer, update critical water content for # net irrigation thCrit = prof.th_wp[ii] + ( (IrrMngt_NetIrrSMT / 100) * (prof.th_fc[ii] - prof.th_wp[ii]) ) # Update layer counter prelayer = layeri # Determine necessary change in water content in # compartments to reach critical water content dWC = RootFact[ii] * (thCrit - NewCond.th[ii]) * 1000 * prof.dz[ii] # Update water content NewCond.th[ii] = NewCond.th[ii] + (dWC / (1000 * prof.dz[ii])) # Update net irrigation counter IrrNet = IrrNet + dWC # Update net irrigation counter for the growing season NewCond.IrrNetCum = NewCond.IrrNetCum + IrrNet elif (IrrMngt_IrrMethod == 4) and (TrPot <= 0): # No net irrigation as potential transpiration is zero IrrNet = 0 else: # No net irrigation as not in net irrigation mode IrrNet = 0 NewCond.IrrNetCum = 0 ## Add any surface transpiration to root zone total ## TrAct = TrAct + TrAct0 ## Feedback with canopy cover development ## # If actual transpiration is zero then no canopy cover growth can occur if ((NewCond.CC - NewCond.CCprev) > 0.005) and (TrAct == 0): NewCond.CC = NewCond.CCprev ## Update transpiration ratio ## if TrPot0 > 0: if TrAct < TrPot0: NewCond.TrRatio = TrAct / TrPot0 else: NewCond.TrRatio = 1 else: NewCond.TrRatio = 1 if NewCond.TrRatio < 0: NewCond.TrRatio = 0 elif NewCond.TrRatio > 1: NewCond.TrRatio = 1 else: # No transpiration if not in growing season TrAct = 0 TrPot0 = 0 TrPot_NS = 0 # No irrigation if not in growing season IrrNet = 0 NewCond.IrrNetCum = 0 ## Store potential transpiration for irrigation calculations on next day ## NewCond.Tpot = TrPot0 return TrAct, TrPot_NS, TrPot0, NewCond, IrrNet # Cell # @njit() def groundwater_inflow(prof, NewCond): """ Function to calculate capillary rise in the presence of a shallow groundwater table <a href="../pdfs/ac_ref_man_3.pdf#page=61" target="_blank">Reference Manual: capillary rise calculations</a> (pg. 52-61) *Arguments:* `Soil`: `SoilClass` : Soil object containing soil paramaters `InitCond`: `InitCondClass` : InitCond object containing model paramaters *Returns:* `NewCond`: `InitCondClass` : InitCond object containing updated model paramaters `GwIn`: `float` : Groundwater inflow """ ## Store initial conditions for updating ## GwIn = 0 ## Perform calculations ## if NewCond.WTinSoil == True: # Water table in soil profile. Calculate horizontal inflow. # Get groundwater table elevation on current day zGW = NewCond.zGW # Find compartment mid-points zMid = prof.zMid # For compartments below water table, set to saturation # idx = np.argwhere(zMid >= zGW).flatten()[0] for ii in range(idx, len(prof.Comp)): # Get soil layer if NewCond.th[ii] < prof.th_s[ii]: # Update water content dth = prof.th_s[ii] - NewCond.th[ii] NewCond.th[ii] = prof.th_s[ii] # Update groundwater inflow GwIn = GwIn + (dth * 1000 * prof.dz[ii]) return NewCond, GwIn # Cell # @njit() @cc.export("_HIref_current_day", (f8,i8,i8,b1,f8,f8,CropStructNT_type_sig,b1)) def HIref_current_day( NewCond_HIref, NewCond_DAP, NewCond_DelayedCDs, NewCond_YieldForm, NewCond_PctLagPhase, NewCond_CCprev, Crop, GrowingSeason): """ Function to calculate reference (no adjustment for stress effects) harvest index on current day <a href="../pdfs/ac_ref_man_3.pdf#page=119" target="_blank">Reference Manual: harvest index calculations</a> (pg. 110-126) *Arguments:* `InitCond`: `InitCondClass` : InitCond object containing model paramaters `Crop`: `CropClass` : Crop object containing Crop paramaters `GrowingSeason`: `bool` : is growing season (True or Flase) *Returns:* `NewCond`: `InitCondClass` : InitCond object containing updated model paramaters """ ## Store initial conditions for updating ## # NewCond = InitCond InitCond_HIref = NewCond_HIref*1 # NewCond.HIref = 0. ## Calculate reference harvest index (if in growing season) ## if GrowingSeason == True: # Check if in yield formation period tAdj = NewCond_DAP - NewCond_DelayedCDs if tAdj > Crop.HIstartCD: NewCond_YieldForm = True else: NewCond_YieldForm = False # Get time for harvest index calculation HIt = NewCond_DAP - NewCond_DelayedCDs - Crop.HIstartCD - 1 if HIt <= 0: # Yet to reach time for HI build-up NewCond_HIref = 0 NewCond_PctLagPhase = 0 else: if NewCond_CCprev <= (Crop.CCmin * Crop.CCx): # HI cannot develop further as canopy cover is too small NewCond_HIref = InitCond_HIref else: # Check crop type if (Crop.CropType == 1) or (Crop.CropType == 2): # If crop type is leafy vegetable or root/tuber, then proceed with # logistic growth (i.e. no linear switch) NewCond_PctLagPhase = 100 # No lag phase # Calculate reference harvest index for current day NewCond_HIref = (Crop.HIini * Crop.HI0) / ( Crop.HIini + (Crop.HI0 - Crop.HIini) * np.exp(-Crop.HIGC * HIt) ) # Harvest index apprAOSP_hing maximum limit if NewCond_HIref >= (0.9799 * Crop.HI0): NewCond_HIref = Crop.HI0 elif Crop.CropType == 3: # If crop type is fruit/grain producing, check for linear switch if HIt < Crop.tLinSwitch: # Not yet reached linear switch point, therefore proceed with # logistic build-up NewCond_PctLagPhase = 100 * (HIt / Crop.tLinSwitch) # Calculate reference harvest index for current day # (logistic build-up) NewCond_HIref = (Crop.HIini * Crop.HI0) / ( Crop.HIini + (Crop.HI0 - Crop.HIini) * np.exp(-Crop.HIGC * HIt) ) else: # Linear switch point has been reached NewCond_PctLagPhase = 100 # Calculate reference harvest index for current day # (logistic portion) NewCond_HIref = (Crop.HIini * Crop.HI0) / ( Crop.HIini + (Crop.HI0 - Crop.HIini) * np.exp(-Crop.HIGC * Crop.tLinSwitch) ) # Calculate reference harvest index for current day # (total - logistic portion + linear portion) NewCond_HIref = NewCond_HIref + (Crop.dHILinear * (HIt - Crop.tLinSwitch)) # Limit HIref and round off computed value if NewCond_HIref > Crop.HI0: NewCond_HIref = Crop.HI0 elif NewCond_HIref <= (Crop.HIini + 0.004): NewCond_HIref = 0 elif (Crop.HI0 - NewCond_HIref) < 0.004: NewCond_HIref = Crop.HI0 else: # Reference harvest index is zero outside of growing season NewCond_HIref = 0 return (NewCond_HIref, NewCond_YieldForm, NewCond_PctLagPhase, ) # Cell # @njit() @cc.export("_biomass_accumulation", (CropStructNT_type_sig,i8,i8,f8,f8,f8,f8,f8,f8,f8,b1)) def biomass_accumulation( Crop, NewCond_DAP, NewCond_DelayedCDs, NewCond_HIref, NewCond_PctLagPhase, NewCond_B, NewCond_B_NS, Tr, TrPot, Et0, GrowingSeason): """ Function to calculate biomass accumulation <a href="../pdfs/ac_ref_man_3.pdf#page=107" target="_blank">Reference Manual: biomass accumulaiton</a> (pg. 98-108) *Arguments:* `Crop`: `CropClass` : Crop object `InitCond`: `InitCondClass` : InitCond object containing model paramaters `Tr`: `float` : Daily transpiration `TrPot`: `float` : Daily potential transpiration `Et0`: `float` : Daily reference evapotranspiration `GrowingSeason`:: `bool` : is Growing season? (True, False) *Returns:* `NewCond`: `InitCondClass` : InitCond object containing updated model paramaters """ ## Store initial conditions in a new structure for updating ## # NewCond = InitCond ## Calculate biomass accumulation (if in growing season) ## if GrowingSeason == True: # Get time for harvest index build-up HIt = NewCond_DAP - NewCond_DelayedCDs - Crop.HIstartCD - 1 if ((Crop.CropType == 2) or (Crop.CropType == 3)) and (NewCond_HIref > 0): # Adjust WP for reproductive stage if Crop.Determinant == 1: fswitch = NewCond_PctLagPhase / 100 else: if HIt < (Crop.YldFormCD / 3): fswitch = HIt / (Crop.YldFormCD / 3) else: fswitch = 1 WPadj = Crop.WP * (1 - (1 - Crop.WPy / 100) * fswitch) else: WPadj = Crop.WP # print(WPadj) # Adjust WP for CO2 effects WPadj = WPadj * Crop.fCO2 # print(WPadj) # Calculate biomass accumulation on current day # No water stress dB_NS = WPadj * (TrPot / Et0) # With water stress dB = WPadj * (Tr / Et0) if np.isnan(dB) == True: dB = 0 # Update biomass accumulation NewCond_B = NewCond_B + dB NewCond_B_NS = NewCond_B_NS + dB_NS else: # No biomass accumulation outside of growing season NewCond_B = 0 NewCond_B_NS = 0 return (NewCond_B, NewCond_B_NS) # Cell # @njit() @cc.export("_temperature_stress", (CropStructNT_type_sig,f8,f8)) def temperature_stress(Crop, Tmax, Tmin): # Function to calculate temperature stress coefficients """ Function to get irrigation depth for current day <a href="../pdfs/ac_ref_man_3.pdf#page=23" target="_blank">Reference Manual: temperature stress</a> (pg. 14) *Arguments:* `Crop`: `CropClass` : Crop object containing Crop paramaters `Tmax`: `float` : max tempatature on current day (celcius) `Tmin`: `float` : min tempature on current day (celcius) *Returns:* `Kst`: `KstClass` : Kst object containing tempature stress paramators """ ## Calculate temperature stress coefficients affecting crop pollination ## # Get parameters for logistic curve KsPol_up = 1 KsPol_lo = 0.001 # Kst = KstClass() # Calculate effects of heat stress on pollination if Crop.PolHeatStress == 0: # No heat stress effects on pollination Kst_PolH = 1 elif Crop.PolHeatStress == 1: # Pollination affected by heat stress if Tmax <= Crop.Tmax_lo: Kst_PolH = 1 elif Tmax >= Crop.Tmax_up: Kst_PolH = 0 else: Trel = (Tmax - Crop.Tmax_lo) / (Crop.Tmax_up - Crop.Tmax_lo) Kst_PolH = (KsPol_up * KsPol_lo) / ( KsPol_lo + (KsPol_up - KsPol_lo) * np.exp(-Crop.fshape_b * (1 - Trel)) ) # Calculate effects of cold stress on pollination if Crop.PolColdStress == 0: # No cold stress effects on pollination Kst_PolC = 1 elif Crop.PolColdStress == 1: # Pollination affected by cold stress if Tmin >= Crop.Tmin_up: Kst_PolC = 1 elif Tmin <= Crop.Tmin_lo: Kst_PolC = 0 else: Trel = (Crop.Tmin_up - Tmin) / (Crop.Tmin_up - Crop.Tmin_lo) Kst_PolC = (KsPol_up * KsPol_lo) / ( KsPol_lo + (KsPol_up - KsPol_lo) * np.exp(-Crop.fshape_b * (1 - Trel)) ) return (Kst_PolH,Kst_PolC) # Cell # @njit() @cc.export("_HIadj_pre_anthesis", (f8,f8,f8,f8)) def HIadj_pre_anthesis( NewCond_B, NewCond_B_NS, NewCond_CC, Crop_dHI_pre): """ Function to calculate adjustment to harvest index for pre-anthesis water stress <a href="../pdfs/ac_ref_man_3.pdf#page=119" target="_blank">Reference Manual: harvest index calculations</a> (pg. 110-126) *Arguments:* `InitCond`: `InitCondClass` : InitCond object containing model paramaters `Crop`: `CropClass` : Crop object containing Crop paramaters *Returns:* `NewCond`: `InitCondClass` : InitCond object containing updated model paramaters """ ## Store initial conditions in structure for updating ## # NewCond = InitCond # check that there is an adjustment to be made if Crop_dHI_pre > 0: ## Calculate adjustment ## # Get parameters Br = NewCond_B / NewCond_B_NS Br_range = np.log(Crop_dHI_pre) / 5.62 Br_upp = 1 Br_low = 1 - Br_range Br_top = Br_upp - (Br_range / 3) # Get biomass ratios ratio_low = (Br - Br_low) / (Br_top - Br_low) ratio_upp = (Br - Br_top) / (Br_upp - Br_top) # Calculate adjustment factor if (Br >= Br_low) and (Br < Br_top): NewCond_Fpre = 1 + ( ((1 + np.sin((1.5 - ratio_low) * np.pi)) / 2) * (Crop_dHI_pre / 100) ) elif (Br > Br_top) and (Br <= Br_upp): NewCond_Fpre = 1 + ( ((1 + np.sin((0.5 + ratio_upp) * np.pi)) / 2) * (Crop_dHI_pre / 100) ) else: NewCond_Fpre = 1 else: NewCond_Fpre = 1 if NewCond_CC <= 0.01: # No green canopy cover left at start of flowering so no harvestable # crop will develop NewCond_Fpre = 0 return NewCond_Fpre # Cell # @njit() @cc.export("_HIadj_pollination", (f8,f8,f8,f8,f8,KswNT_type_sig,KstNT_type_sig,f8)) def HIadj_pollination( NewCond_CC, NewCond_Fpol, Crop_FloweringCD, Crop_CCmin, Crop_exc, Ksw, Kst, HIt ): """ Function to calculate adjustment to harvest index for failure of pollination due to water or temperature stress <a href="../pdfs/ac_ref_man_3.pdf#page=119" target="_blank">Reference Manual: harvest index calculations</a> (pg. 110-126) *Arguments:* `InitCond`: `InitCondClass` : InitCond object containing model paramaters `Crop`: `CropClass` : Crop object containing Crop paramaters `Ksw`: `KswClass` : Ksw object containing water stress paramaters `Kst`: `KstClass` : Kst object containing tempature stress paramaters `HIt`: `float` : time for harvest index build-up (calander days) *Returns:* `NewCond`: `InitCondClass` : InitCond object containing updated model paramaters """ ## Caclulate harvest index adjustment for pollination ## # Get fractional flowering if HIt == 0: # No flowering yet FracFlow = 0 elif HIt > 0: # Fractional flowering on previous day t1 = HIt - 1 if t1 == 0: F1 = 0 else: t1Pct = 100 * (t1 / Crop_FloweringCD) if t1Pct > 100: t1Pct = 100 F1 = 0.00558 * np.exp(0.63 * np.log(t1Pct)) - (0.000969 * t1Pct) - 0.00383 if F1 < 0: F1 = 0 # Fractional flowering on current day t2 = HIt if t2 == 0: F2 = 0 else: t2Pct = 100 * (t2 / Crop_FloweringCD) if t2Pct > 100: t2Pct = 100 F2 = 0.00558 * np.exp(0.63 * np.log(t2Pct)) - (0.000969 * t2Pct) - 0.00383 if F2 < 0: F2 = 0 # Weight values if abs(F1 - F2) < 0.0000001: F = 0 else: F = 100 * ((F1 + F2) / 2) / Crop_FloweringCD FracFlow = F # Calculate pollination adjustment for current day if NewCond_CC < Crop_CCmin: # No pollination can occur as canopy cover is smaller than minimum # threshold dFpol = 0 else: Ks = min([Ksw.Pol, Kst.PolC, Kst.PolH]) dFpol = Ks * FracFlow * (1 + (Crop_exc / 100)) # Calculate pollination adjustment to date NewCond_Fpol = NewCond_Fpol + dFpol if NewCond_Fpol > 1: # Crop has fully pollinated NewCond_Fpol = 1 return NewCond_Fpol # Cell # @njit() @cc.export("_HIadj_post_anthesis", (i8,f8,f8,i8,f8,f8,f8,f8,CropStructNT_type_sig,KswNT_type_sig,)) def HIadj_post_anthesis( NewCond_DelayedCDs, NewCond_sCor1, NewCond_sCor2, NewCond_DAP, NewCond_Fpre, NewCond_CC, NewCond_fpost_upp, NewCond_fpost_dwn, Crop, Ksw): """ Function to calculate adjustment to harvest index for post-anthesis water stress <a href="../pdfs/ac_ref_man_3.pdf#page=119" target="_blank">Reference Manual: harvest index calculations</a> (pg. 110-126) *Arguments:* `InitCond`: `InitCondClass` : InitCond object containing model paramaters `Crop`: `CropClass` : Crop object containing Crop paramaters `Ksw`: `KswClass` : Ksw object containing water stress paramaters *Returns:* `NewCond`: `InitCondClass` : InitCond object containing updated model paramaters """ ## Store initial conditions in a structure for updating ## # NewCond = InitCond InitCond_DelayedCDs = NewCond_DelayedCDs*1 InitCond_sCor1 = NewCond_sCor1*1 InitCond_sCor2 = NewCond_sCor2*1 ## Calculate harvest index adjustment ## # 1. Adjustment for leaf expansion tmax1 = Crop.CanopyDevEndCD - Crop.HIstartCD DAP = NewCond_DAP - InitCond_DelayedCDs if ( (DAP <= (Crop.CanopyDevEndCD + 1)) and (tmax1 > 0) and (NewCond_Fpre > 0.99) and (NewCond_CC > 0.001) and (Crop.a_HI > 0) ): dCor = 1 + (1 - Ksw.Exp) / Crop.a_HI NewCond_sCor1 = InitCond_sCor1 + (dCor / tmax1) DayCor = DAP - 1 - Crop.HIstartCD NewCond_fpost_upp = (tmax1 / DayCor) * NewCond_sCor1 # 2. Adjustment for stomatal closure tmax2 = Crop.YldFormCD DAP = NewCond_DAP - InitCond_DelayedCDs if ( (DAP <= (Crop.HIendCD + 1)) and (tmax2 > 0) and (NewCond_Fpre > 0.99) and (NewCond_CC > 0.001) and (Crop.b_HI > 0) ): # print(Ksw.Sto) dCor = np.power(Ksw.Sto, 0.1) * (1 - (1 - Ksw.Sto) / Crop.b_HI) NewCond_sCor2 = InitCond_sCor2 + (dCor / tmax2) DayCor = DAP - 1 - Crop.HIstartCD NewCond_fpost_dwn = (tmax2 / DayCor) * NewCond_sCor2 # Determine total multiplier if (tmax1 == 0) and (tmax2 == 0): NewCond_Fpost = 1 else: if tmax2 == 0: NewCond_Fpost = NewCond_fpost_upp else: if tmax1 == 0: NewCond_Fpost = NewCond_fpost_dwn elif tmax1 <= tmax2: NewCond_Fpost = NewCond_fpost_dwn * ( ((tmax1 * NewCond_fpost_upp) + (tmax2 - tmax1)) / tmax2 ) else: NewCond_Fpost = NewCond_fpost_upp * ( ((tmax2 * NewCond_fpost_dwn) + (tmax1 - tmax2)) / tmax1 ) return ( NewCond_sCor1, NewCond_sCor2, NewCond_fpost_upp, NewCond_fpost_dwn, NewCond_Fpost) # Cell # @njit() def harvest_index(prof, Soil_zTop, Crop, InitCond, Et0, Tmax, Tmin, GrowingSeason): """ Function to simulate build up of harvest index <a href="../pdfs/ac_ref_man_3.pdf#page=119" target="_blank">Reference Manual: harvest index calculations</a> (pg. 110-126) *Arguments:* `Soil`: `SoilClass` : Soil object containing soil paramaters `Crop`: `CropClass` : Crop object containing Crop paramaters `InitCond`: `InitCondClass` : InitCond object containing model paramaters `Et0`: `float` : reference evapotranspiration on current day `Tmax`: `float` : maximum tempature on current day (celcius) `Tmin`: `float` : minimum tempature on current day (celcius) `GrowingSeason`:: `bool` : is growing season (True or Flase) *Returns:* `NewCond`: `InitCondClass` : InitCond object containing updated model paramaters """ ## Store initial conditions for updating ## NewCond = InitCond InitCond_HI = InitCond.HI InitCond_HIadj = InitCond.HIadj InitCond_PreAdj = InitCond.PreAdj ## Calculate harvest index build up (if in growing season) ## if GrowingSeason == True: # Calculate root zone water content TAW = TAWClass() Dr = DrClass() # thRZ = thRZClass() _, Dr.Zt, Dr.Rz, TAW.Zt, TAW.Rz, _,_,_,_,_,_, = _root_zone_water( prof, float(NewCond.Zroot), NewCond.th, Soil_zTop, float(Crop.Zmin), Crop.Aer, ) # _,Dr,TAW,_ = root_zone_water(Soil_Profile,float(NewCond.Zroot),NewCond.th,Soil_zTop,float(Crop.Zmin),Crop.Aer) # Check whether to use root zone or top soil depletions for calculating # water stress if (Dr.Rz / TAW.Rz) <= (Dr.Zt / TAW.Zt): # Root zone is wetter than top soil, so use root zone value Dr = Dr.Rz TAW = TAW.Rz else: # Top soil is wetter than root zone, so use top soil values Dr = Dr.Zt TAW = TAW.Zt # Calculate water stress beta = True # Ksw = water_stress(Crop, NewCond, Dr, TAW, Et0, beta) # Ksw = KswClass() Ksw_Exp, Ksw_Sto, Ksw_Sen, Ksw_Pol, Ksw_StoLin = _water_stress( Crop.p_up, Crop.p_lo, Crop.ETadj, Crop.beta, Crop.fshape_w, NewCond.tEarlySen, Dr, TAW, Et0, beta, ) Ksw = KswNT(Exp=Ksw_Exp, Sto=Ksw_Sto, Sen=Ksw_Sen, Pol=Ksw_Pol, StoLin=Ksw_StoLin ) # Calculate temperature stress (Kst_PolH,Kst_PolC) = _temperature_stress(Crop, Tmax, Tmin) Kst = KstNT(PolH=Kst_PolH,PolC=Kst_PolC) # Get reference harvest index on current day HIi = NewCond.HIref # Get time for harvest index build-up HIt = NewCond.DAP - NewCond.DelayedCDs - Crop.HIstartCD - 1 # Calculate harvest index if (NewCond.YieldForm == True) and (HIt >= 0): # print(NewCond.DAP) # Root/tuber or fruit/grain crops if (Crop.CropType == 2) or (Crop.CropType == 3): # Detemine adjustment for water stress before anthesis if InitCond_PreAdj == False: InitCond.PreAdj = True NewCond.Fpre = _HIadj_pre_anthesis(NewCond.B, NewCond.B_NS, NewCond.CC, Crop.dHI_pre) # Determine adjustment for crop pollination failure if Crop.CropType == 3: # Adjustment only for fruit/grain crops if (HIt > 0) and (HIt <= Crop.FloweringCD): NewCond.Fpol = _HIadj_pollination( NewCond.CC, NewCond.Fpol, Crop.FloweringCD, Crop.CCmin, Crop.exc, Ksw, Kst, HIt, ) HImax = NewCond.Fpol * Crop.HI0 else: # No pollination adjustment for root/tuber crops HImax = Crop.HI0 # Determine adjustments for post-anthesis water stress if HIt > 0: (NewCond.sCor1, NewCond.sCor2, NewCond.fpost_upp, NewCond.fpost_dwn, NewCond.Fpost) = _HIadj_post_anthesis(NewCond.DelayedCDs, NewCond.sCor1, NewCond.sCor2, NewCond.DAP, NewCond.Fpre, NewCond.CC, NewCond.fpost_upp, NewCond.fpost_dwn, Crop, Ksw) # Limit HI to maximum allowable increase due to pre- and # post-anthesis water stress combinations HImult = NewCond.Fpre * NewCond.Fpost if HImult > 1 + (Crop.dHI0 / 100): HImult = 1 + (Crop.dHI0 / 100) # Determine harvest index on current day, adjusted for stress # effects if HImax >= HIi: HIadj = HImult * HIi else: HIadj = HImult * HImax elif Crop.CropType == 1: # Leafy vegetable crops - no adjustment, harvest index equal to # reference value for current day HIadj = HIi else: # No build-up of harvest index if outside yield formation period HIi = InitCond_HI HIadj = InitCond_HIadj # Store final values for current time step NewCond.HI = HIi NewCond.HIadj = HIadj else: # No harvestable crop outside of a growing season NewCond.HI = 0 NewCond.HIadj = 0 # print([NewCond.DAP , Crop.YldFormCD]) return NewCond if __name__ == "__main__": cc.compile()
{"hexsha": "a3b5f124926ff337162b40c8f915ebb870ee872f", "size": 153103, "ext": "py", "lang": "Python", "max_stars_repo_path": "aquacrop/solution.py", "max_stars_repo_name": "arongergely/aquacrop", "max_stars_repo_head_hexsha": "a6799ad81f46ebe9b48721f9cb6c55143663fe60", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "aquacrop/solution.py", "max_issues_repo_name": "arongergely/aquacrop", "max_issues_repo_head_hexsha": "a6799ad81f46ebe9b48721f9cb6c55143663fe60", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "aquacrop/solution.py", "max_forks_repo_name": "arongergely/aquacrop", "max_forks_repo_head_hexsha": "a6799ad81f46ebe9b48721f9cb6c55143663fe60", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.1750812568, "max_line_length": 137, "alphanum_fraction": 0.5412761344, "include": true, "reason": "import numpy,from numba", "num_tokens": 40285}
# Remove nans from textfile output of dmstack and only extract few columns # Author: Bhishan Poudel # # Filtering: # 1. flag calib_psfCandidate==False # 2. column deblend_nChild==0 # 3. ellipticity e = sqrt(e1^2 + e2^2) < 1.5 # 4. choose only few columns given below # 5. remove nans from all these columns # 6. change delimiter to tab. # # columns: # id (90) # base_SdssCentroid_x, base_SdssCentroid_y (102, 103) # base_SdssCentroid_xSigma, base_SdssCentroid_ySigma (104,105) # ext_shapeHSM_HsmShapeRegauss_e1, ext_shapeHSM_HsmShapeRegauss_e2 (127, 128) # base_SdssShape_flux (114) # # In total there are 8 columns # id # x1,x2 xerr1 xerr2 # e1 e2 # flux # import pandas as pd import numpy as np import sys import glob def remove_nans(ifile): """ Remove nans and filter data from dmstack output csv file. There are 90 flags col0 to col89 col90 is id is first column 'id' There are 90 flags and 77 columns. We exclude first column 'flags' and have 76 columns In total there are 90 + 76 = 166 columns. Columns selected: 1 : calib_psfCandidate (for filtering only) 94 : deblend_nChild (for filtering only) 90 : id 102 : base_SdssCentroid_x 103 : base_SdssCentroid_y 104 : base_SdssCentroid_xSigma 105 : base_SdssCentroid_ySigma 127 : ext_shapeHSM_HsmShapeRegauss_e1 128 : ext_shapeHSM_HsmShapeRegauss_e2 114 : ext_shapeHSM_HsmShapeRegauss_sigma """ usecols = [1, 94, 90, 102, 103, 104, 105, 127, 128, 114] df = pd.read_csv(ifile, sep=",",low_memory=False) for c in df.columns: df[c] = pd.to_numeric(df[c],errors='coerce') # filter the flag calib_psfCandidate==False # not a star candidate df = df.query('calib_psfCandidate == 0.0') # filter the column deblend_nChild==0 # no child source after deblending df = df.query('deblend_nChild == 0.0') df = df.copy() # clean out unphysical results # e1^2 + e2^2 < 1.5^2 df['e'] = (df['ext_shapeHSM_HsmShapeRegauss_e1'] ** 2 + df['ext_shapeHSM_HsmShapeRegauss_e2'] ** 2)**0.5 df = df.query('e < 1.5') # take only required columns cols_select = ['id', 'base_SdssCentroid_x', 'base_SdssCentroid_y', 'base_SdssCentroid_xSigma','base_SdssCentroid_ySigma', 'ext_shapeHSM_HsmShapeRegauss_e1','ext_shapeHSM_HsmShapeRegauss_e2', 'base_SdssShape_flux'] df = df[cols_select] # drop all nans df = df.dropna() # write txt file with commented header prefix = ' '*11 header_line = prefix.join(cols_select) np.savetxt(ifile[0:-4]+'.txt',df.values,header=header_line,delimiter='\t') if __name__ == '__main__': for ifile in glob.glob("*.csv"): print("Reading: ", ifile) remove_nans(ifile)
{"hexsha": "a0ea37960887bd93e6c81e3d450baff05430b52b", "size": 2790, "ext": "py", "lang": "Python", "max_stars_repo_path": "IMPORTANT_scripts/remove_nans_dmstack.py", "max_stars_repo_name": "bpRsh/shear_analysis_after_dmstack", "max_stars_repo_head_hexsha": "bfe8cffbf36c2adfb4c6db79f46e7d0949ba148c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "IMPORTANT_scripts/remove_nans_dmstack.py", "max_issues_repo_name": "bpRsh/shear_analysis_after_dmstack", "max_issues_repo_head_hexsha": "bfe8cffbf36c2adfb4c6db79f46e7d0949ba148c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "IMPORTANT_scripts/remove_nans_dmstack.py", "max_forks_repo_name": "bpRsh/shear_analysis_after_dmstack", "max_forks_repo_head_hexsha": "bfe8cffbf36c2adfb4c6db79f46e7d0949ba148c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-06-20T18:18:33.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-22T18:40:31.000Z", "avg_line_length": 29.0625, "max_line_length": 108, "alphanum_fraction": 0.6756272401, "include": true, "reason": "import numpy", "num_tokens": 932}
# # Copyright 2013 Y12Studio # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import threading,time from scipy import stats import m_thread as mt import logging def testLinregress(): xi = np.arange(0,9) y = [19, 20, 20.5, 21.5, 22, 23, 23, 25.5, 24] slope, intercept, r_value, p_value, std_err = stats.linregress(xi,y) print 'slope',slope print 'intercept', intercept print 'r value', r_value print 'p_value', p_value print 'standard deviation', std_err def testMthread1(): w = mt.MyBaseWorker() for i in range(5): w.sendData(i) time.sleep(1) w.stop() time.sleep(3) print 'END' def testMthread2(): wcls = mt.BaseWorker() time.sleep(2) wcls.sendData(12) print('Event is Send 1') time.sleep(5) wcls.sendData(99) print('Event is Send 2') time.sleep(2) wcls.stop() class LedCircle(mt.BaseWorker): def __init__(self): mt.BaseWorker.__init__(self) def handleEvent(self): targetValue = self.data print 'EVT Value=',targetValue def testMthread3(): wcls = LedCircle() time.sleep(2) wcls.sendData(12) print('Event is Send 1') time.sleep(5) wcls.sendData(99) print('Event is Send 2') time.sleep(2) wcls.stop() testMthread3()
{"hexsha": "291e6a93edbae015a8d0af0c96ce36da19ce9eac", "size": 1835, "ext": "py", "lang": "Python", "max_stars_repo_path": "testonly/mpush2/t_foo.py", "max_stars_repo_name": "y12studio/pi", "max_stars_repo_head_hexsha": "c815b0b2a2421036ec99e085ffa92b1e3b5145f9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-10-28T07:30:34.000Z", "max_stars_repo_stars_event_max_datetime": "2016-10-28T07:30:34.000Z", "max_issues_repo_path": "testonly/mpush2/t_foo.py", "max_issues_repo_name": "y12studio/pi", "max_issues_repo_head_hexsha": "c815b0b2a2421036ec99e085ffa92b1e3b5145f9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "testonly/mpush2/t_foo.py", "max_forks_repo_name": "y12studio/pi", "max_forks_repo_head_hexsha": "c815b0b2a2421036ec99e085ffa92b1e3b5145f9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.4861111111, "max_line_length": 74, "alphanum_fraction": 0.6610354223, "include": true, "reason": "import numpy,from scipy", "num_tokens": 513}
[STATEMENT] lemma admS_POR_lf [intro, simp]: "POR_lf_rep r \<in> admS" [PROOF STATE] proof (prove) goal (1 subgoal): 1. POR_lf_rep r \<in> admS [PROOF STEP] proof [PROOF STATE] proof (state) goal (2 subgoals): 1. \<bottom> \<in> POR_lf_rep r 2. adm (\<lambda>x. x \<in> POR_lf_rep r) [PROOF STEP] show "\<bottom> \<in> POR_lf_rep r" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<bottom> \<in> POR_lf_rep r [PROOF STEP] unfolding POR_lf_rep_def POR_base_lf_rep_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<bottom> \<in> (case r of (mR, pR) \<Rightarrow> {\<lambda>i. ValTT} \<union> {\<lambda>i. ValFF} \<union> (\<Union>n. {\<lambda>i. ValN\<cdot>n}) \<union> {f. f One = \<bottom>} \<union> {f. f Two = \<bottom>}) \<union> fn_lf_rep r [PROOF STEP] by simp [PROOF STATE] proof (state) this: \<bottom> \<in> POR_lf_rep r goal (1 subgoal): 1. adm (\<lambda>x. x \<in> POR_lf_rep r) [PROOF STEP] next [PROOF STATE] proof (state) goal (1 subgoal): 1. adm (\<lambda>x. x \<in> POR_lf_rep r) [PROOF STEP] show "adm (\<lambda>x. x \<in> POR_lf_rep r)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. adm (\<lambda>x. x \<in> POR_lf_rep r) [PROOF STEP] unfolding POR_lf_rep_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. adm (\<lambda>x. x \<in> POR_base_lf_rep r \<union> fn_lf_rep r) [PROOF STEP] using adm_POR_base_lf_rep[of r] adm_fn[of r] [PROOF STATE] proof (prove) using this: adm (\<lambda>x. x \<in> POR_base_lf_rep r) adm (\<lambda>x. x \<in> fn_lf_rep r) goal (1 subgoal): 1. adm (\<lambda>x. x \<in> POR_base_lf_rep r \<union> fn_lf_rep r) [PROOF STEP] by simp [PROOF STATE] proof (state) this: adm (\<lambda>x. x \<in> POR_lf_rep r) goal: No subgoals! [PROOF STEP] qed
{"llama_tokens": 838, "file": "PCF_PCF", "length": 10}
""" OpenMDAO Wrapper for Flops Automatically generated from flops.scriptWrapper with parse_phoenixwrapper. This wrapper is based on the ModelCenter Java wrapper, version 2.00 Beta """ # pylint: disable-msg=E0611,F0401,E1101 from numpy import int64 as numpy_int64 from numpy import float64 as numpy_float64 from numpy import str as numpy_str from numpy import zeros, array from openmdao.util.filewrap import FileParser from openmdao.util.namelist_util import Namelist from openmdao.main.api import VariableTree, FileMetadata from openmdao.lib.datatypes.api import Str, Bool, Int, Array, Enum, Float, \ File, List, VarTree from openmdao.lib.components.api import ExternalCode # pylint: disable-msg=C0301,C0324,C0103,R0903 class FlopsWrapper_output_Weight_Wing(VariableTree): """Container for output.Weight.Wing""" # OpenMDAO Public Variables w = Float(0.0, desc='Bending material factor. For detailed wing definition, this factor is calculated by numerical integration along the specified load path to determine the amount of bending material required to support an elliptical load distribution. The wing is treated as an idealized beam with dimensions proportional to the wing local chord and thickness. The bending factor is modified for aeroelastic penalties (flutter, divergence, and aeroelastic loads) depending on wing sweep (including forward), aspect ratio, degree of aeroelastic tailoring, and strut bracing, if any. These modifications are based on a curve fit of the results of a study performed using the Aeroelastic Tailoring and Structural Optimization (ATSO) code to structurally optimize a large matrix of wings.\n\nIf the detailed wing definition is not used, an equivalent bending factor is computed assuming a trapezoidal wing with constant t/c.') ew = Float(0.0, desc='Engine inertia relief factor.') w1 = Float(0.0, desc='The first term in the wing weight is the bending factor. It is adjusted for inertia relief for the wing itself and for any engines on the wing.') w2 = Float(0.0, desc='The second term represents control surfaces and shear material. According to structural and statistical studies conducted during weight module development, the weight of spars and ribs depends almost entirely on control surfaces. The amount of shear material required to carry structural loads is not critical.') w3 = Float(0.0, desc='The third term depends entirely on wing area and covers multitude of miscellaneous items.') class FlopsWrapper_output_Weight_Inertia(VariableTree): """Container for output.Weight.Inertia""" # OpenMDAO Public Variables cgx = Array(dtype=numpy_float64) cgy = Array(dtype=numpy_float64) cgz = Array(dtype=numpy_float64) ixxroll = Array(dtype=numpy_float64) ixxptch = Array(dtype=numpy_float64) ixxyaw = Array(dtype=numpy_float64) ixz = Array(dtype=numpy_float64) class FlopsWrapper_output_Weight(VariableTree): """Container for output.Weight""" # OpenMDAO Public Variables dowe = Float(0.0) paylod = Float(0.0) fuel = Float(0.0) rampwt = Float(0.0) wsr = Float(0.0) thrso = Float(0.0) esf = Float(0.0) twr = Float(0.0) wldg = Float(0.0) fultot = Float(0.0) exsful = Float(0.0) frwi = Float(0.0) frht = Float(0.0) frvt = Float(0.0) frfin = Float(0.0) frcan = Float(0.0) frfu = Float(0.0) wlg = Float(0.0) frna = Float(0.0) wengt = Float(0.0) wthr = Float(0.0) wpmisc = Float(0.0) wfsys = Float(0.0) frsc = Float(0.0) wapu = Float(0.0) win = Float(0.0) whyd = Float(0.0) welec = Float(0.0) wavonc = Float(0.0) wfurn = Float(0.0) wac = Float(0.0) wai = Float(0.0) wempty = Float(0.0) wflcrbw = Float(0.0) wwstuab = Float(0.0) wuf = Float(0.0) woil = Float(0.0) wsrv = Float(0.0) zfw = Float(0.0) wbomb = Float(0.0) # VariableTrees Inertia = VarTree(FlopsWrapper_output_Weight_Inertia()) Wing = VarTree(FlopsWrapper_output_Weight_Wing()) class FlopsWrapper_output_Plot_Files(VariableTree): """Container for output.Plot_Files""" # OpenMDAO Public Variables # TODO - Do we really need to read these in every time? Let's not for now. #cnfile = File(iotype='out', desc='Contour or thumbprint plot data file') #msfile = File(iotype='out', desc='Mission summary data file') #crfile = File(iotype='out', desc='Cruise schedule summary data file') #tofile = File(iotype='out', desc='Takeoff and landing aerodynamic and thrust data file') #nofile = File(iotype='out', desc='Takeoff and climb profile data file') #apfile = File(iotype='out', desc='Drag polar plot data file') #thfile = File(iotype='out', desc='Engine plot data file name') #hsfile = File(iotype='out', desc='Design history plot file') #psfile = File(iotype='out', desc='Excess power and load factor plot data file') class FlopsWrapper_output_Performance_Segments(VariableTree): """Container for output.Performance.Segments""" # OpenMDAO Public Variables segment = Array(dtype=numpy_str) weights = Array(dtype=numpy_float64) alts = Array(dtype=numpy_float64) machs = Array(dtype=numpy_float64) thrusts = Array(dtype=numpy_float64) totmaxs = Array(dtype=numpy_float64) lods = Array(dtype=numpy_float64) sfcs = Array(dtype=numpy_float64) engparms = Array(dtype=numpy_float64) weighte = Array(dtype=numpy_float64) alte = Array(dtype=numpy_float64) mache = Array(dtype=numpy_float64) thruste = Array(dtype=numpy_float64) totmaxe = Array(dtype=numpy_float64) lode = Array(dtype=numpy_float64) sfce = Array(dtype=numpy_float64) engparme = Array(dtype=numpy_float64) class FlopsWrapper_output_Performance_Constraints(VariableTree): """Container for output.Performance.Constraints""" # OpenMDAO Public Variables constraint = Array(dtype=numpy_str) value = Array(dtype=numpy_float64) units = Array(dtype=numpy_str) limit = Array(dtype=numpy_float64) weight = Array(dtype=numpy_float64) mach = Array(dtype=numpy_float64) alt = Array(dtype=numpy_float64) g = Array(dtype=numpy_float64) location = Array(dtype=numpy_str) class FlopsWrapper_output_Performance(VariableTree): """Container for output.Performance""" # OpenMDAO Public Variables fuel = Float(0.0) range = Float(0.0) vapp = Float(0.0) taxofl = Float(0.0) faroff = Float(0.0) farldg = Float(0.0) amfor = Float(0.0) ssfor = Float(0.0) esf = Float(0.0) thrso = Float(0.0) vmmo = Float(0.0) # VariableTrees Constraints = VarTree(FlopsWrapper_output_Performance_Constraints()) Segments = VarTree(FlopsWrapper_output_Performance_Segments()) class FlopsWrapper_output_Payload(VariableTree): """Container for output.Payload""" # OpenMDAO Public Variables npf = Int(0) npb = Int(0) npt = Int(0) nstu = Int(0) ngalc = Int(0) nflcr = Int(0) nstuag = Int(0) wppass = Float(0.0) bpp = Float(0.0) cargow = Float(0.0) cargof = Float(0.0) wcon = Float(0.0) class FlopsWrapper_output_Noise(VariableTree): """Container for output.Noise""" # OpenMDAO Public Variables nsplot = Str('', msg='Noise output filename') class FlopsWrapper_output_Geometry_BWB(VariableTree): """Container for output.Geometry.BWB""" # OpenMDAO Public Variables xlp = Float(0.0, units='ft', desc='Length of centerline') xlw = Float(0.0, units='ft', desc='Length of side wall') wf = Float(0.0, units='ft', desc='Width of cabin') acabin = Float(0.0, units='ft*ft', desc='Cabin area') nbaw = Int(0, desc='Number of bays') bayw = Float(0.0, units='ft', desc='Width of bay') nlava = Int(0, desc='NUMBER OF LAVATORIES') ngally = Int(0, desc='Number of galleys') nclset = Int(0, desc='Number of closets') xl = Float(0.0, units='ft', desc='Total fuselage length') df = Float(0.0, units='ft', desc='Fuselage maximum depth') class FlopsWrapper_output_Geometry(VariableTree): """Container for output.Geometry""" # OpenMDAO Public Variables xl = Float(0.0) wf = Float(0.0) df = Float(0.0) xlp = Float(0.0) ar = Float(0.0) sw = Float(0.0) tr = Float(0.0) sweep = Float(0.0) tca = Float(0.0) span = Float(0.0) glov = Float(0.0) sht = Float(0.0) svt = Float(0.0) xnac = Float(0.0) dnac = Float(0.0) xmlg = Float(0.0) xnlg = Float(0.0) # VariableTrees BWB = VarTree(FlopsWrapper_output_Geometry_BWB()) class FlopsWrapper_output_Engine(VariableTree): """Container for output.Engine""" # OpenMDAO Public Variables ofile = Str('') eofile = Str('') anopp = Str('') footpr = Str('') pltfil = Str('') class FlopsWrapper_output_Econ(VariableTree): """Container for output.Econ""" # OpenMDAO Public Variables sl = Array(dtype=numpy_float64) blockt = Array(dtype=numpy_float64) blockf = Array(dtype=numpy_float64) blockNx = Array(dtype=numpy_float64) wpayl = Array(dtype=numpy_float64) wgross = Array(dtype=numpy_float64) range = Array(dtype=numpy_float64) vapp = Array(dtype=numpy_float64) faroff = Array(dtype=numpy_float64) farldg = Array(dtype=numpy_float64) amfor = Array(dtype=numpy_float64) ssfor = Array(dtype=numpy_float64) class FlopsWrapper_output(VariableTree): """Container for output""" # VariableTrees Econ = VarTree(FlopsWrapper_output_Econ()) Engine = VarTree(FlopsWrapper_output_Engine()) Geometry = VarTree(FlopsWrapper_output_Geometry()) Noise = VarTree(FlopsWrapper_output_Noise()) Payload = VarTree(FlopsWrapper_output_Payload()) Performance = VarTree(FlopsWrapper_output_Performance()) Plot_Files = VarTree(FlopsWrapper_output_Plot_Files()) Weight = VarTree(FlopsWrapper_output_Weight()) class FlopsWrapper_input_wtin_Wing_Data(VariableTree): """Container for input.wtin.Wing_Data""" # OpenMDAO Public Variables span = Float(0.0, units='ft', desc='Wing span (optional, see &CONFIN - SW and AR)') dih = Float(0.0, units='deg', desc='Wing dihedral (positive) or anhedral (negative) angle') flapr = Float(0.3330, desc='Flap ratio -- ratio of total movable wing surface area (flaps, elevators, spoilers, etc.) to wing area') glov = Float(0.0, units='ft*ft', desc='Total glove and bat area beyond theoretical wing') varswp = Float(0.0, desc='Fraction of wing variable sweep weight penalty = 0., Fixed-geometry wing = 1., Full variable-sweep wing') fcomp = Float(0.0, desc='Decimal fraction of amount of composites used in wing structure = 0., No composites = 1., Maximum use of composites, approximately equivalent to FRWI1=.6, FRWI2=.83, FRWI3=.7 (Not necessarily all composite) This only applies to the wing. Use override parameters for other components such as FRHT=.75, FRVT=.75, FRFU=.82, FRLGN=.85, FRLGM=.85, FRNA=.8') faert = Float(0.0, desc='Decimal fraction of amount of aeroelastic tailoring used in design of wing = 0., No aeroelastic tailoring = 1., Maximum aeroelastic tailoring') fstrt = Float(0.0, desc='Wing strut-bracing factor = 0., No wing strut = 1., Full benefit from strut bracing') class FlopsWrapper_input_wtin_Tails_Fins(VariableTree): """Container for input.wtin.Tails_Fins""" # OpenMDAO Public Variables sht = Float(0.0, units='ft*ft', desc='Horizontal tail theoretical area') swpht = Float(-100.0, units='deg', desc='Horizontal tail 25% chord sweep angle (Default = SWEEP, Namelist &CONFIN)') arht = Float(-100.0, desc='Horizontal tail theoretical aspect ratio (Default = AR/2, Namelist &CONFIN)') trht = Float(-100.0, desc='Horizontal tail theoretical taper ratio (Default = TR, Namelist &CONFIN)') tcht = Float(0.0, desc='Thickness-chord ratio for the horizontal tail (Default = TCA, Namelist &CONFIN)') hht = Float(-100.0, desc='Decimal fraction of vertical tail span where horizontal tail is mounted = 0. for body mounted (Default for transports with all engines on the wing and for fighters) = 1. for T tail (Default for transports with multiple engines on the fuselage)') nvert = Int(1, desc='Number of vertical tails') svt = Float(0.0, units='ft*ft', desc='Vertical tail theoretical area (per tail)') swpvt = Float(-100.0, units='deg', desc='Vertical tail sweep angle at 25% chord (Default = SWPHT)') arvt = Float(-100.0, desc='Vertical tail theoretical aspect ratio (Default = ARHT/2)') trvt = Float(-100.0, desc='Vertical tail theoretical taper ratio (Default = TRHT)') tcvt = Float(0.0, desc='Thickness-chord ratio for the vertical tail (Default = TCHT)') nfin = Int(0, desc='Number of fins') sfin = Float(0.0, units='ft*ft', desc='Vertical fin theoretical area') arfin = Float(-100.0, desc='Vertical fin theoretical aspect ratio') trfin = Float(-100.0, desc='Vertical fin theoretical taper ratio') swpfin = Float(-100.0, units='deg', desc='Vertical fin sweep angle at 25% chord') tcfin = Float(0.0, desc='Vertical fin thickness - chord ratio') scan = Float(0.0, units='ft*ft', desc='Canard theoretical area') swpcan = Float(-100.0, units='deg', desc='Canard sweep angle at 25% chord') arcan = Float(-100.0, desc='Canard theoretical aspect ratio') trcan = Float(-100.0, desc='Canard theoretical taper ratio') tccan = Float(0.0, desc='Canard thickness-chord ratio (Default = TCHT)') class FlopsWrapper_input_wtin_Propulsion(VariableTree): """Container for input.wtin.Propulsion""" # OpenMDAO Public Variables new = Int(0, desc='Number of wing mounted engines') nef = Int(0, desc='Number of fuselage mounted engines') thrso = Float(0.0, units='lb', desc='Rated thrust of baseline engine as described in Engine Deck (Default = THRUST, see &CONFIN)') weng = Float(0.0, units='lb', desc='Weight of each baseline engine or bare engine if WINL and WNOZ (below) are supplied (Default = THRSO/5.5 for transports and THRSO/8 for fighters)') eexp = Float(1.15, desc='Engine weight scaling parameter\nW(Engine) = WENG*(THRUST/THRSO)**EEXP\nIf EEXP is less than 0.3,\nW(Engine) = WENG + (THRUST-THRSO)*EEXP') winl = Float(0.0, units='lb', desc='Inlet weight for baseline engine if not included in WENG above') einl = Float(1.0, desc='Inlet weight scaling exponent\nW(Inlet) = WINL*(THRUST/THRSO)**EINL') wnoz = Float(0.0, units='lb', desc='Nozzle weight for baseline engine if not included in WENG above') enoz = Float(1.0, desc='Nozzle weight scaling exponent\nW(Nozzle) = WNOZ*(THRUST/THRSO)**ENOZ') xnac = Float(0.0, units='ft', desc='Average length of baseline engine nacelles. Scaled by SQRT(THRUST/THRSO)') dnac = Float(0.0, units='ft', desc='Average diameter of baseline engine nacelles. Scaled by SQRT(THRUST/THRSO)') wpmisc = Float(0.0, desc='Additional miscellaneous propulsion system weight or fraction of engine weight if < 1. This is added to the engine control and starter weight and may be overridden if WPMSC is input.') class FlopsWrapper_input_wtin_Override(VariableTree): """Container for input.wtin.Override""" # OpenMDAO Public Variables frwi = Float(1.0, desc='Total wing weight - fixed weight overrides FRWI1, FRWI2, FRWI3, FRWI4 below, scale factor is cumulative \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component\n \n') frwi1 = Float(1.0, desc='First term in wing weight equation - loosely corresponds to bending material weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component\n') frwi2 = Float(1.0, desc='Second term in wing weight equation - loosely corresponds to control surfaces, spars and ribs \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component\n') frwi3 = Float(1.0, desc='Third term in wing weight equation - miscellaneous, just because it') frwi4 = Float(1.0, desc='Fourth term in wing weight equation - miscellaneous, just because it') frht = Float(1.0, desc='Horizontal tail weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') frvt = Float(1.0, desc='Vertical tail weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') frfin = Float(1.0, desc='Wing vertical fin weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') frcan = Float(1.0, desc='Canard weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') frfu = Float(1.0, desc='Fuselage weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') frlgn = Float(1.0, desc='Landing gear weight, nose \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') frlgm = Float(1.0, desc='Landing gear weight, main \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') frna = Float(1.0, desc='Total weight of nacelles and/or air induction system \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') wthr = Float(0.0, desc='Total weight of thrust reversers\n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') wpmsc = Float(1.0, desc='Weight of miscellaneous propulsion systems such as engine controls, starter and wiring \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') wfsys = Float(1.0, desc='Weight of fuel system \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') frsc = Float(1.0, desc='Surface controls weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') wapu = Float(1.0, desc='Auxiliary power unit weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') win = Float(1.0, desc='Instrument Group weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') whyd = Float(1.0, desc='Hydraulics Group weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') welec = Float(1.0, desc='Electrical Group weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') wavonc = Float(1.0, desc='Avionics Group weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') warm = Float(0.0, desc='Armament Group weight - includes thermal protection system or armor and fixed weapons\n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') wfurn = Float(1.0, desc='Furnishings Group weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') wac = Float(1.0, desc='Air Conditioning Group weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') wai = Float(1.0, desc='Transports: Anti-icing Group weight\n Fighters: Auxiliary gear \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') wuf = Float(1.0, desc='Weight of unusable fuel \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') woil = Float(1.0, desc='Engine oil weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') wsrv = Float(1.0, desc='Transports: Passenger service weight\n Fighters: Ammunition and nonfixed weapons weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') wcon = Float(1.0, desc='Transports: Cargo and baggage container weight Fighters: Miscellaneous operating items weight If < 0.5, as a fraction of Gross Weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') wauxt = Float(1.0, desc='Auxiliary fuel tank weight (Fighters only) \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') wflcrb = Float(1.0, desc='Total weight of flight crew and baggage\n (Defaults: Transports - 225.*NFLCR\n Fighters - 215.*NFLCR\n Carrier-based - 180.*NFLCR)\n \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') wstuab = Float(1.0, desc='Total weight of cabin crew and baggage (Default = 155.*NSTU + 200.*NGALC) \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') ewmarg = Float(0.0, desc='Empty weight margin (Special Option) - delta weight added to Weight Empty. If abs(EWMARG) < 5., it is interpreted as a fraction of calculated Weight Empty. May be positive or negative\n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') class FlopsWrapper_input_wtin_OEW_Calculations(VariableTree): """Container for input.wtin.OEW_Calculations.""" # OpenMDAO Public Variables ispowe = Enum(0, (0,1), desc='= 0, Normal FLOPS weight equations will be used\n= 1, Special equation for Operating Weight Empty will be used:\n \n OWE = SPWTH*THRUST + SPWSW*SW + SPWGW*GW + SPWCON\n \n Structures group weights will be scaled to meet the calculated OWE.\n \n = 2, Use response surface for weights - available only in DOSS version', aliases=('Normal FLOPS', 'Special eqn for OEW')) spwth = Float(2.2344, units='lb/lb', desc='Multiplier for thrust/engine in special equation for Operating Weight Empty\nSPWTH = \n AIRFLOWref\n(PODsclr + dOEWsclr) * ------------\n SLSTHRUSTref\n ') spwsw = Float(9.5, units='psf', desc='Multiplier for wing area in special equation for Operating Weight Empty') spwgw = Float(0.104087, units='lb/lb', desc='Multiplier for gross weight in special equation for Operating Weight Empty\nSPWGW = \n MTOWsclr+OEWgrwth*MTOWgrwth\n -----------------------------------\n 1. + MTOWgrowth\n\n') spwcon = Float(38584.0, units='lb', desc='Constant weight term in special equation for Operating Weight Empty\n \nSPWCON = OEWuncycled\n - MTOWscalar*MTOWuncycled\n - WINGscalar*SWref\n - (PODscalar + dOEWscalar)\n *AIRFLOWref\n') class FlopsWrapper_input_wtin_Landing_Gear(VariableTree): """Container for input.wtin.Landing_Gear""" # OpenMDAO Public Variables xmlg = Float(0.0, units='inch', desc='Length of extended main landing gear oleo (Default is computed internally)') xnlg = Float(0.0, units='inch', desc='Length of extended nose landing gear oleo (Default is computed internally)') wldg = Float(0.0, units='lb', desc='Design landing weight (if WRATIO is input in Namelist &AERIN, WLDG = GW*WRATIO) See Namelist &AERIN for WRATIO defaults.') mldwt = Enum(0, (1,0), desc='= 1, The design landing weight is set to the end of descent weight for the main mission plus DLDWT. Use only if IRW = 1 in Namelist &MISSIN. = 0, The design landing weight is determined by WLDG above or WRATIO in Namelist &AERIN') dldwt = Float(0.0, units='lb', desc='Delta landing weight for MLDWT = 1') carbas = Float(0.0, desc='Carrier based aircraft switch, affects weight of flight crew, avionics and nose gear = 1., Carrier based = 0., Land based') class FlopsWrapper_input_wtin_Inertia(VariableTree): """Container for input.wtin.Inertia""" # OpenMDAO Public Variables inrtia = Enum(0, (1,0), desc='= 1, Aircraft inertias will be calculated = 0, Otherwise', aliases=('Calculate', 'Do not calculate')) zht = Float(0.0, units='inch', desc='Vertical C.G. of the horizontal tail (optional)') zvt = Float(0.0, units='inch', desc='Vertical C.G. of the vertical tail (optional)') zfin = Float(0.0, units='inch', desc='Vertical C.G. of the vertical fin (optional)') yfin = Float(0.0, units='inch', desc='Lateral C.G. of the vertical fin (optional)') zef = Float(0.0, units='inch', desc='Vertical C.G. of two forward mounted engines (optional)') yef = Float(0.0, units='inch', desc='Lateral C.G. of two forward mounted engines (optional, may be input as a fraction of the semispan)') zea = Float(0.0, units='inch', desc='Vertical C.G. of one or two aft mounted engines (optional)') yea = Float(0.0, units='inch', desc='Lateral C.G. of one or two aft mounted engines (optional, may be input as a fraction of the semispan)') zbw = Float(0.0, units='inch', desc='Lowermost point of wing root airfoil section') zap = Float(0.0, units='inch', desc='Vertical C.G. of Auxiliary Power Unit (optional)') zrvt = Float(0.0, units='inch', desc='Vertical datum line (Water Line) of vertical tail theoretical root chord (optional, if blank assumes at maximum height of fuselage)') ymlg = Float(0.0, units='inch', desc='Lateral C.G. of extended main landing gear') yfuse = Float(0.0, units='inch', desc='Lateral C.G. of outboard fuselage if there is more than one fuselage') yvert = Float(0.0, units='inch', desc='Lateral C.G. of outboard vertical tail if there is more than one vertical tail') swtff = Float(0.0, desc='Gross fuselage wetted area (Default = internally computed)') tcr = Float(0.0, desc='Wing root thickness-chord ratio (Default = TOC(0) or TCA in &CONFIN)') tct = Float(0.0, desc='Wing tip thickness-chord ratio (Default = TOC(NETAW) or TCA in &CONFIN)') incpay = Enum(0, (1,0), desc='For inertia calculations, all mission fuel is placed in "tanks." \n \n = 1, Include passengers, passenger baggage, and cargo in the fuselage and contents for inertia calculations. \n \n = 0, For inertia calculations, all payload (passengers, passenger baggage, and cargo) are placed in "tanks" like the fuel', aliases=('Passengers-etc in fuse', 'All payload in tanks')) tx = Array(dtype=numpy_float64, units='inch', desc='x coordinates of the centroid of the Ith tank') ty = Array(dtype=numpy_float64, units='inch', desc='y coordinates of the centroid of the Ith tank') tz = Array(dtype=numpy_float64, units='inch', desc='z coordinates of the centroid of the Ith tank') tl = Array(dtype=numpy_float64, desc='Length of the Ith tank (optional, used only in calculating I0') tw = Array(dtype=numpy_float64, desc='Width of the Ith tank (optional, used only in calculating I0') td = Array(dtype=numpy_float64, desc='Depth of the Ith tank (optional, used only in calculating I0') tf = Array(dtype=numpy_float64, units='lb', desc='Weight of fuel (or payload) in Ith tank for the Jth fuel condition NOTE: Dimensions are [J,I]') class FlopsWrapper_input_wtin_Fuselage(VariableTree): """Container for input.wtin.Fuselage""" # OpenMDAO Public Variables nfuse = Int(1, desc='Number of fuselages') xl = Float(0.0, units='ft', desc='Fuselage total length (See Fuselage Design Data)') wf = Float(0.0, units='ft', desc='Maximum fuselage width') df = Float(0.0, units='ft', desc='Maximum fuselage depth') xlp = Float(0.0, units='ft', desc='Length of passenger compartment (Default is internally computed)') class FlopsWrapper_input_wtin_Fuel_System(VariableTree): """Container for input.wtin.Fuel_System""" # OpenMDAO Public Variables ntank = Int(7, desc='Number of fuel tanks') fulwmx = Float(-1.0, units='lb', desc='Total fuel capacity of wing. The default is internally calculated from:\n \n TCA * SW**2 TR\n FULWMX = FWMAX * ---------- * ( 1 - -------- )\n SPAN (1+TR)**2\n \n Where the default value of FWMAX is 23. If FULWMX is input < 50, it is interpreted as FWMAX and the above equation is used. This equation is also used for scaling when the wing area, t/c, aspect ratio, or taper ratio is varied or optimized.\n \n Alternatively, FULWMX = FUELRF + FUSCLA*(SW**1.5 - FSWREF**1.5)\n + FUSCLB*(SW - FSWREF)\n') fulden = Float(1.0, desc='Fuel density ratio for alternate fuels compared to jet fuel (typical density of 6.7 lb/gal), used in the calculation of FULWMX (if FULWMX is not input) and in the calculation of fuel system weight.') fuelrf = Float(0.0, units='lb', desc='Fuel capacity at FSWREF for alternate method') fswref = Float(-1.0, units='ft*ft', desc='Reference wing area for alternate method (Default = SW in Namelist &CONFIN)') fuscla = Float(0.0, desc='Alternate fuel capacity scaling method - Factor A') fusclb = Float(0.0, desc='Alternate fuel capacity scaling method - Factor B') fulfmx = Float(0.0, desc='Total fuel capacity of fuselage (wing ') ifufu = Int(0, desc='= 1, Fuselage fuel capacity is adjusted to meet the required fuel capacity for the primary mission. Use only if IRW = 1 in Namelist &MISSIN, and use with care - some passengers can') fulaux = Float(0.0, units='lb', desc='Auxiliary (external) fuel tank capacity (Fighters only)') class FlopsWrapper_input_wtin_Detailed_Wing(VariableTree): """Container for input.wtin.Detailed_Wing""" # OpenMDAO Public Variables etaw = Array(dtype=numpy_float64, desc='Wing station location - fraction of semispan or distance from fuselage centerline. Typically, goes from 0. to 1. Input fixed distances (>1.1) are not scaled with changes in span.') chd = Array(dtype=numpy_float64, desc='Chord length - fraction of semispan or actual chord. Actual chord lengths (>5.) are not scaled.') toc = Array(dtype=numpy_float64, desc='Thickness - chord ratio') swl = Array(dtype=numpy_float64, units='deg', desc='Sweep of load path. Typically parallel to rear spar tending toward max t/c of airfoil. The Ith value is used between wing stations I and I+1.') etae = Array(array([0.3, 0.6, 0.0, 0.0]), dtype=numpy_float64, desc='Engine locations - fraction of semispan or distance from fuselage centerline. Actual distances are not scaled with changes in span. NEW/2 values are input') pctl = Float(1.0, desc='Fraction of load carried by defined wing') arref = Float(0.0, desc='Reference aspect ratio (Default = AR in &CONFIN)') tcref = Float(0.0, desc='Reference thickness-chord ratio (Default = TCA in &CONFIN)') nstd = Int(50, desc='Number of integration stations') pdist = Float(2.0, desc='Pressure distribution indicator\n= 0., Input distribution - see below\n= 1., Triangular distribution\n= 2., Elliptical distribution\n= 3., Rectangular distribution PDIST is a continuous variable, i.e., a value of 1.5 would be half way between triangular and elliptical.\nCAUTION - the constants in the wing weight calculations were correlated with existing aircraft assuming an elliptical distribution. Use the default value unless you have a good reason not to.') etap = Array(dtype=numpy_float64, desc='Fraction of wing semispan') pval = Array(dtype=numpy_float64, desc='Relative spanwise pressure at ETAP(J)') class FlopsWrapper_input_wtin_Crew_Payload(VariableTree): """Container for input.wtin.Crew_Payload""" # OpenMDAO Public Variables npf = Int(0, desc='Number of first class passengers') npb = Int(0, desc='Number of business class passengers') npt = Int(0, desc='Number of tourist passengers') nstu = Int(-1, desc='Number of flight attendants (optional)') ngalc = Int(-1, desc='Number of galley crew (optional)') nflcr = Int(-1, desc='Number of flight crew (optional)') wppass = Float(165.0, units='lb', desc='Weight per passenger') bpp = Float(-1.0, units='lb', desc='Weight of baggage per passenger (Default = 35., or 40. if DESRNG in Namelist &CONFIN > 900., or 44. if DESRNG > 2900.)') cargf = Float(0.0, desc='Military cargo aircraft floor factor = 0., Passenger transport\n= 1., Military cargo transport floor') cargow = Float(0.0, units='lb', desc='Cargo carried in wing (Weight of wing-mounted external stores for fighters)') cargof = Float(0.0, units='lb', desc='Cargo (other than passenger baggage) carried in fuselage (Fuselage external stores for fighters)') class FlopsWrapper_input_wtin_Center_of_Gravity(VariableTree): """Container for input.wtin.Center_of_Gravity""" # OpenMDAO Public Variables cgw = Float(0.0, units='inch', desc='Longitudinal C.G. of wing') cght = Float(0.0, units='inch', desc='Longitudinal C.G. of horizontal tail') cgvt = Float(0.0, units='inch', desc='Longitudinal C.G. of vertical tail') cgfin = Float(0.0, units='inch', desc='Longitudinal C.G. of wing vertical fins') cgcan = Float(0.0, units='inch', desc='Longitudinal C.G. of canard') cgf = Float(0.0, units='inch', desc='Longitudinal C.G. of fuselage') cglgn = Float(0.0, units='inch', desc='Longitudinal C.G. of nose landing gear') cglgm = Float(0.0, units='inch', desc='Longitudinal C.G. of main landing gear') cgef = Float(0.0, units='inch', desc='Longitudinal C.G. of two forward mounted engines') cgea = Float(0.0, units='inch', desc='Longitudinal C.G. of one or two aft mounted engines') cgap = Float(0.0, units='inch', desc='Longitudinal C.G. of auxiliary power unit') cgav = Float(0.0, units='inch', desc='Longitudinal C.G. of avionics group (optional)') cgarm = Float(0.0, units='inch', desc='Longitudinal C.G. of armament group - includes thermal protection system or armor and fixed weapons (Default = CGF)') cgcr = Float(0.0, units='inch', desc='Longitudinal C.G. of flight crew') cgp = Float(0.0, units='inch', desc='Longitudinal C.G. of passengers') cgcw = Float(0.0, units='inch', desc='Longitudinal C.G. of wing cargo or external stores') cgcf = Float(0.0, units='inch', desc='Longitudinal C.G. of fuselage cargo or external stores') cgzwf = Float(0.0, units='inch', desc='Longitudinal C.G. of fuselage fuel') cgfwf = Float(0.0, units='inch', desc='Longitudinal C.G. of wing fuel in full condition') cgais = Float(0.0, units='inch', desc='Longitudinal C.G. of air induction system') cgacon = Float(0.0, units='inch', desc='Longitudinal C.G. of air conditioning system') cgaxg = Float(0.0, units='inch', desc='Longitudinal C.G. of auxiliary gear') cgaxt = Float(0.0, units='inch', desc='Longitudinal C.G. of auxiliary tanks') cgammo = Float(0.0, units='inch', desc='Longitudinal C.G. of ammunition and nonfixed weapons') cgmis = Float(0.0, units='inch', desc='Longitudinal C.G. of miscellaneous operating items') class FlopsWrapper_input_wtin_Basic(VariableTree): """Container for input.wtin.Basic""" # OpenMDAO Public Variables ulf = Float(3.75, desc='Structural ultimate load factor') dgw = Float(1.0, units='lb', desc='Design gross weight - fraction of GW (see &CONFIN) or weight') vmmo = Float(0.0, desc='Maximum operating Mach number (Default = VCMN, Namelist &CONFIN)') nwref = Enum(39, (39,37,33,26), desc='The number of the reference weight for percentage weight output.', aliases=('Ramp weight', 'Zero fuel weight', 'Operating weight empty', 'Weight empty')) cgrefl = Float(0.0, units='inch', desc='Reference length for percentage C.G. location output (Default = XL*12., fuselage length)') cgrefx = Float(0.0, units='inch', desc='X - location of start of reference length') mywts = Enum(0, (0,1), desc='= 0, Weights will be computed\n = 1, Otherwise (See User-Specified Weights, Namelist &MISSIN)', aliases=('Compute weight', 'User-specified')) hydpr = Float(3000.0, units='psi', desc='Hydraulic system pressure') wpaint = Float(0.0, units='psf', desc='Weight of paint for all wetted areas') ialtwt = Enum(0, (0,1), desc='= 1, Alternate weight equations for some components will be used (Special option)\n= 0, Normal FLOPS weight equations will be used', aliases=('Normal', 'Alternate')) class FlopsWrapper_input_wtin(VariableTree): """Container for input.wtin""" # OpenMDAO Public Variables # VariableTrees Basic = VarTree(FlopsWrapper_input_wtin_Basic()) Center_of_Gravity = VarTree(FlopsWrapper_input_wtin_Center_of_Gravity()) Crew_Payload = VarTree(FlopsWrapper_input_wtin_Crew_Payload()) Detailed_Wing = VarTree(FlopsWrapper_input_wtin_Detailed_Wing()) Fuel_System = VarTree(FlopsWrapper_input_wtin_Fuel_System()) Fuselage = VarTree(FlopsWrapper_input_wtin_Fuselage()) Inertia = VarTree(FlopsWrapper_input_wtin_Inertia()) Landing_Gear = VarTree(FlopsWrapper_input_wtin_Landing_Gear()) OEW_Calculations = VarTree(FlopsWrapper_input_wtin_OEW_Calculations()) Override = VarTree(FlopsWrapper_input_wtin_Override()) Propulsion = VarTree(FlopsWrapper_input_wtin_Propulsion()) Tails_Fins = VarTree(FlopsWrapper_input_wtin_Tails_Fins()) Wing_Data = VarTree(FlopsWrapper_input_wtin_Wing_Data()) class FlopsWrapper_input_tolin_Thrust_Reverser(VariableTree): """Container for input.tolin.Thrust_Reverser""" # OpenMDAO Public Variables inthrv = Int(-1, desc='= -1, Use takeoff thrust\n= 0, Input thrust values will be used\n= 1, Input values will be scaled\n> 1, Scaled engine deck for the (INTHRV-1)th power setting will be used') rvfact = Float(0.0, desc='Fraction of thrust reversed - net (Real values should be negative)') velrv = Array(dtype=numpy_float64, units='ft/s', desc='Velocities for reverse thrust') thrrv = Array(dtype=numpy_float64, units='lb', desc='Thrust values') tirvrs = Float(5.0, units='s', desc='Time after touchdown to reverse thrust') revcut = Float(-1000.0, units='nmi', desc='Cutoff velocity for thrust reverser') clrev = Float(0.0, desc='Change in lift coefficient due to thrust reverser') cdrev = Float(0.0, desc='Change in drag coefficient due to thrust reverser') class FlopsWrapper_input_tolin_Takeoff(VariableTree): """Container for input.tolin.Takeoff""" # OpenMDAO Public Variables cltom = Float(-1.0, desc='Maximum CL for takeoff (Default, see &AERIN)') cdmto = Float(0.0, desc='Minimum CD for takeoff, typically, this is the drag coefficient at zero lift') fcdmto = Float(0.3, desc='Fraction of CDMTO due to wing') almxto = Float(25.0, units='deg', desc='Maximum angle of attack during takeoff') obsto = Float(-1.0, units='ft', desc='Takeoff obstacle height (Defaults, Transport = 35., Fighter = 50.)') alpto = Array(array([-100.0]), dtype=numpy_float64, units='deg', desc='Angles of attack for takeoff polar') clto = Array(array([-100.0]), dtype=numpy_float64, desc='Lift coefficients for takeoff polar. These are not generated internally') cdto = Array(array([-100.0]), dtype=numpy_float64, desc='Drag coefficients for takeoff polar. These are not generated internally') inthto = Int(0, desc='= 0, Input thrust values will be used\n= 1, The input values will be scaled\n> 1, Scaled engine data deck for the (INTHTO-1)th power setting will be used') velto = Array(dtype=numpy_float64, units='ft/s', desc='Velocities for takeoff thrust') thrto = Array(dtype=numpy_float64, units='lb', desc='Thrust values') alprot = Float(-100.0, desc='Maximum angle of attack during rotation phase of takeoff (Default = ALMXTO)') vrotat = Float(1.05, desc='Minimum rotation start speed, knots or fraction of Vstall') vangl = Float(2.0, units='deg/s', desc='Rotation rate') thfact = Float(1.0, desc='Thrust multiplier for input or extracted thrust data') ftocl = Float(1.0, desc='Factor for takeoff lift. Also applied to drag polars input in &PROIN') ftocd = Float(1.0, desc='Factor for takeoff drag. Also applied to drag polars input in &PROIN') igobs = Enum(0, (0,1), desc='Gear retraction switch', aliases=('Liftoff + TDELG', 'Obstacle + TDELG')) tdelg = Float(0.0, units='s', desc='Time delay after liftoff/obstacle before start of landing gear retraction') tigear = Float(2.0, units='s', desc='Time required to retract landing gear. Landing gear drag is reduced using a cosine function.') ibal = Enum(1, (1,2,0), desc='Option to compute balanced field length', aliases=('pre-1998 FAA rules', 'post-1998 FAA rules', 'Do not compute')) itxout = Enum(0, (1,0), desc='Weight to use for takeoff field length calculations', aliases=('Ramp weight - taxi out fuel', 'Ramp weight')) pilott = Float(1.0, units='s', desc='Actual pilot reaction time from engine failure to brake application. Spoilers, brakes, and thrust reversal are assumed to become effective and engine cutback occurs at PILOTT + 2 seconds after engine failure.') tispa = Float(0.0, units='s', desc='Not currently used') tibra = Float(0.0, units='s', desc='Not currently used') tirva = Float(0.0, units='s', desc='Not currently used') ispol = Enum(1, (0,1), desc='Option for spoiler use during aborted takeoff', aliases=('Not used', 'Used')) irev = Enum(1, (0,1,2), desc='Option for thrust reversal during aborted takeoff', aliases=('Not used', 'Only if all engines operational', 'Always used')) class FlopsWrapper_input_tolin_Landing(VariableTree): """Container for input.tolin.Landing""" # OpenMDAO Public Variables clldm = Float(-1.0, desc='Maximum CL for landing (Default, see &AERIN)') cdmld = Float(0.0, desc='Minimum CD for landing') fcdmld = Float(-1.0, desc='Fraction of CDMLD due to wing (Default = FCDMTO)') almxld = Float(25.0, units='deg', desc='Maximum angle of attack during landing') obsld = Float(50.0, units='ft', desc='Landing obstacle height') alpld = Array(dtype=numpy_float64, units='deg', desc='Angles of attack for landing polar') clld = Array(dtype=numpy_float64, desc='Lift coefficients for landing polar. These are not generated internally') cdld = Array(dtype=numpy_float64, desc='Drag coefficients for landing polar. These are not generated internally') inthld = Int(0, desc='= 0, Input thrust values will be used\n= 1, The input values will be scaled\n> 1, Scaled engine data deck will be used') velld = Array(dtype=numpy_float64, units='ft/s', desc='Velocities for landing') thrld = Array(dtype=numpy_float64, units='lb', desc='Thrust values') thdry = Float(-1.0, units='lb', desc='Maximum dry thrust at missed appproach for fighters (Default = takeoff thrust)') aprhgt = Float(100.0, units='ft', desc='Height above ground for start of approach') aprang = Float(-3.0, units='deg', desc='Approach flight path angle') fldcl = Float(1.0, desc='Factor for landing lift') fldcd = Float(1.0, desc='Factor for landing drag') tdsink = Float(0.0, units='ft/s', desc='Sink rate at touchdown (Must be positive if input)') vangld = Float(0.0, units='deg/s', desc='Flare rate (Default = VANGL)') noflar = Enum(0, (1,0), desc='Option for flare during landing. If no flare, sink rate at touchdown is the approach sink rate with ground effects.', aliases=('No flare', 'Flare')) tispol = Float(2.0, units='s', desc='Time after touchdown to spoiler actuation') ticut = Float(3.0, units='s', desc='Time after touchdown to cut back of engines to zero thrust') tibrak = Float(4.0, units='s', desc='Time after touchdown to brake application') acclim = Float(16.0, units='ft/(s*s)', desc='Deceleration limit') magrup = Enum(-1, (1,0,-1), desc='Missed approach landing gear switch', aliases=('Gear up during missed approach', 'Gear down during missed approach', 'Use default')) class FlopsWrapper_input_tolin_Integration_Intervals(VariableTree): """Container for input.tolin.Integration_Intervals""" # OpenMDAO Public Variables delvto = Float(4.0, units='ft/s', desc='Velocity step during ground run') deltro = Float(0.2, units='s', desc='Time step during rotation') deltcl = Float(0.2, units='s', desc='Time step during climbout') delhap = Float(10.0, units='ft', desc='Altitude step during approach') deldfl = Float(10.0, units='ft', desc='Distance step during flare') deltrn = Float(0.25, units='s', desc='Time step during runout') class FlopsWrapper_input_tolin_Basic(VariableTree): """Container for input.tolin.Basic""" # OpenMDAO Public Variables apa = Float(0.0, units='ft', desc='Airport altitude') dtct = Float(0.0, units='degC', desc='Delta temperature from standard day. (This parameter is independent from the DTC in Namelist &MISSIN and DTCE in Namelist &ENGINE.)') swref = Float(-1.0, units='ft*ft', desc='Wing area on which takeoff and landing drag polars are based (Default = SW, Namelist &CONFIN). If different from SW, polars will be scaled.') arret = Float(-1.0, desc='Wing aspect ratio on which takeoff and landing drag polars are based (Default = AR, Namelist &CONFIN). If different from AR, polars will be modified.') whgt = Float(8.0, units='ft', desc='Wing height above ground') alprun = Float(0.0, units='deg', desc='Angle of attack on ground') tinc = Float(0.0, units='deg', desc='Thrust incidence on ground') rollmu = Float(0.025, desc='Coefficient of rolling friction') brakmu = Float(0.3, desc='Coefficient of friction, brakes on') cdgear = Float(0.0, desc='Landing gear drag coefficient') cdeout = Float(0.0, desc='Delta drag coefficient due to engine out condition. Includes effect of stopped or windmilling engine and the trim drag associated with compensating for asymmetric thrust.') clspol = Float(0.0, desc='Spoiler delta lift coefficient (Should be negative)') cdspol = Float(0.0, desc='Spoiler delta drag coefficient') incgef = Enum(1, (1,0), desc='Ground effects switch', aliases=('Ground effects', 'No ground effects')) argef = Float(1.0, desc='Aspect ratio factor for ground effects') itime = Enum(0, (1,0), desc='Detailed takeoff and landing profiles print option', aliases=('Print', 'No print')) class FlopsWrapper_input_tolin(VariableTree): """Container for input.tolin""" # VariableTrees Basic = VarTree(FlopsWrapper_input_tolin_Basic()) Integration_Intervals = VarTree(FlopsWrapper_input_tolin_Integration_Intervals()) Landing = VarTree(FlopsWrapper_input_tolin_Landing()) Takeoff = VarTree(FlopsWrapper_input_tolin_Takeoff()) Thrust_Reverser = VarTree(FlopsWrapper_input_tolin_Thrust_Reverser()) class FlopsWrapper_input_syntin_Variables(VariableTree): """Container for input.syntin.Variables""" # OpenMDAO Public Variables desrng = Float(-1.0, desc='Design range, n.mi. (or endurance, min.). See INDR in Namelist &MISSIN (Overrides input in Namelist &CONFIN).') vappr = Float(-1.0, units='nmi', desc='Maximum allowable landing approach velocity (Overrides input in Namelist &AERIN)') flto = Float(-1.0, units='ft', desc='Maximum allowable takeoff field length (Overrides input in Namelist &AERIN)') flldg = Float(-1.0, units='ft', desc='Maximum allowable landing field length (Overrides input in Namelist &AERIN)') exfcap = Float(0.0, units='lb', desc='Minimum allowable excess fuel capacity') cdtmax = Float(-1.0, units='degR', desc='Maximum allowable compressor discharge temperature (Overrides input in Namelist &ENGINE') cdpmax = Float(-1.0, units='psi', desc='Maximum allowable compressor discharge pressure (Overrides input in Namelist &ENGINE') vjmax = Float(-1.0, units='ft/s', desc='Maximum allowable jet velocity (Overrides input in Namelist &ENGINE') stmin = Float(-1.0, units='lb/lb/s', desc='Minimum allowable specific thrust (Overrides input in Namelist &ENGINE') armax = Float(-1.0, desc='Maximum allowable ratio of the bypass area to the core area of a mixed flow turbofan (Overrides input in Namelist &ENGINE') gnox = Float(0.0, units='lb', desc='Maximum allowable NOx emissions') roclim = Float(100.0, units='ft/min', desc='Minimum allowable potential rate of climb during climb segments') dhdtlm = Float(100.0, units='ft/min', desc='Minimum allowable actual rate of climb during climb segments') tmglim = Float(0.1, desc='Minimum allowable thrust margin, (Thrust-Drag)/Drag, during climb segments') ig = Array(dtype=numpy_int64, desc='= 1, Ith behavioral constraint is used in optimization\n= 0, Otherwise') ibfgs = Enum(1, (0,1,2,3,4,5), desc='Search algorithm for optimization', aliases=('Davidon-Fletcher-Powell', 'Broyden-Fletcher-Goldfarb-Shano', 'Conjugate Gradient (Polak-Ribiere)', 'Steepest Descent', 'Univariate Search', 'Kreisselmeier-Steinhauser with DFP')) itfine = Enum(0, (1,0), desc='Option to set IRW = 1 for final analysis', aliases=('Yes', 'No')) class FlopsWrapper_input_syntin_Optimization_Control(VariableTree): """Container for input.syntin.Optimization_Control""" # OpenMDAO Public Variables ndd = Int(0, desc='Number of drawdowns (Defaults to analysis only - no optimization is performed. Suggested value = 3 or 4)') rk = Float(0.0, desc='Initial value of RK (Default internally computed)') fdd = Float(0.2, desc='RK multiplier for successive drawdowns') nlin = Int(-1, desc='Maximum number of gradients per drawdown (Default = number of active design variables times 2)') nstep = Int(20, desc='Maximum number of steps per one-dimensional minimization (Default = 20)') ef = Float(3.0, desc='Limits one-dimensional minimization step size to EF times previous step') eps = Float(0.001, desc='Fraction of initial design variable value used as a finite difference delta') amult = Float(10.0, desc='The initial step in a one-dimensional search is controlled by the design variable value times EPS times AMULT') dep = Float(0.001, desc='One-dimensional search convergence criterion on step size as a fraction of move distance') accux = Float(3.0e-4, desc='One-dimensional search convergence criterion on step size as a fraction of initial design variable value') glm = Float(0.0, desc='Value of G at which constraint switches to quadratic extended form, a value of .002 is recommended') gfact = Array(dtype=numpy_float64, desc='Scaling factor for each behavioral constraint') autscl = Float(1.0, desc='Design variable scale factor exponent. Scale factors for design variables default to VALUE ** AUTSCL') icent = Enum(0, (0,1), desc='Type of differencing to be used in gradient calculations', aliases=('Forward', 'Central')) rhomin = Float(0.0, desc='Starting value for RHO, a scalar multiplying factor used in the KS function. (Default is computed internally)') rhomax = Float(300.0, desc='Maximum value for RHO') rhodel = Float(0.0, desc='RHO increment (Default is computed internally)') itmax = Int(30, desc='Maximum number of iterations') jprnt = Int(2, desc='KS module print control\n= 0, No output from the KS module\n= 999, Maximum output') rdfun = Float(0.01, desc='If the relative change in the KS function is less than RDFUN for three consecutive iterations, optimization is terminated.') adfun = Float(0.001, desc='If the absolute change in the KS function is less than ADFUN for three consecutive iterations, optimization is terminated.') class FlopsWrapper_input_syntin(VariableTree): """Container for input.syntin""" # VariableTrees Optimization_Control = VarTree(FlopsWrapper_input_syntin_Optimization_Control()) Variables = VarTree(FlopsWrapper_input_syntin_Variables()) class FlopsWrapper_input_rfhin(VariableTree): """Container for input.rfhin""" # OpenMDAO Public Variables tmach = Array(dtype=numpy_float64, desc='Mach numbers in increasing order') cdmin = Array(dtype=numpy_float64, desc='Minimum drag for each Mach number.\nThe lift dependent drag coefficient for the Ith Mach number is computed from:\n\nCD = CDMIN(I) + CK(I) * [CL - CLB(I)] ** 2\n+ C1SW(I) * (SW/REFAS - REFBS) ** EXPS\n+ C1TH(I) * (THRUST/REFAT - REFBT) ** EXPT\n\nwhere SW and THRUST are the current values for the wing area and for the thrust per engine, and CL is the lift coefficient.') ck = Array(dtype=numpy_float64, desc='Drag-due-to-lift factors for each Mach number') clb = Array(dtype=numpy_float64, desc='Lift coefficients corresponding to each CDMIN') c1sw = Array(dtype=numpy_float64, desc='Coefficient for wing area term for each Mach number. May be a drag coefficient or D/Q depending on the values of REFAS, REFBS and EXPS.') c1th = Array(dtype=numpy_float64, desc='Coefficient for thrust term for each Mach number. May be a drag coefficient or D/Q depending on the values of REFAT, REFBT and EXPT.') refas = Float(1.0, desc='Wing area reference value') refbs = Float(0.0, desc='Wing area base value') exps = Float(1.0, desc='Wing area term exponent') refat = Float(1.0, desc='Thrust reference value') refbt = Float(0.0, desc='Thrust base value') expt = Float(1.0, desc='Thrust term exponent') class FlopsWrapper_input_proin(VariableTree): """Container for input.proin""" # OpenMDAO Public Variables npol = Int(0, desc='Number of drag polars to be printed out (Default = size of dflap)') alpro = Array(dtype=numpy_float64, units='deg', desc='Angles of attack for each drag polar') clpro = Array(dtype=numpy_float64, desc='Lift coefficients for each drag polar') cdpro = Array(dtype=numpy_float64, desc='Drag coefficients for each drag polar') dflap = Array(array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), dtype=numpy_float64, units='deg', desc='Flap deflection corresponding to each drag polar. Used only for output') ntime = Enum(0, (1,0), desc='Option for printing detailed takeoff and climb profiles for noise', aliases=('Print', 'No print')) ipcmax = Int(1, desc='Maximum engine power code (This variable could be used, for example, to limit takeoff and climb to dry power settings on an afterburning engine.)') keas = Enum(0, (1,0), desc='Type of velocity given by VFIX in namelist &SEGIN', aliases=('Knots equivalent airspeed (keas)', 'True airspeed')) txf = Float(-1.0, units='lb', desc='Fuel used in taxiing out to runway (Default is computed in mission analysis)') alpmin = Float(0.0, units='deg', desc='Minimum angle of attack during climb segment') gamlim = Float(0.0, units='deg', desc='Minimum flight path angle during fixed angle of attack segments') inm = Enum(0, (1,0), desc='Option to generate data files necessary for transporting FLOPS takeoff and climb profile data to the FAA Integrated Noise Model (INM) program', aliases=('Generate', 'Do not generate')) iatr = Enum(0, (1,0), desc='Automatic thrust restoration indicator option (INM=1, has no effect of takeoff and climb profile)', aliases=('ATR', 'No ATR')) fzf = Float(1.25, desc='Maneuver speed factor (INM=1)') thclmb = Float(-1.0, desc='Climb throttle setting (INM=1)') flapid = Array(dtype=numpy_str, desc='Six character label for each of the NPOL input drag polars, for example, "gearup"') class FlopsWrapper_input_option_Program_Control(VariableTree): """Container for input.option.Program_Control""" # OpenMDAO Public Variables mprint = Enum(1, (0,1), desc='Print control \n = 0, Print only 3-5 line summary for each analysis. Usually used only for contour plots (IOPT = 4) \n = 1, Normal output for all analyses', aliases=('Short Summary', 'Normal')) iopt = Enum(1, (1,2,3,4), desc='Execution Type', aliases=('Analysis', 'Parametric Variation', 'Optimization', 'Contour or Thumbprint plot')) ianal = Enum(3, (1,2,3,4), desc='Analysis Type', aliases=('Weights', 'Weights and Aerodynamics', 'Full Analysis', 'Propulsion')) ineng = Enum(0, (0,1), desc='Force engine Data Read', aliases=('If necessary', 'Yes')) itakof = Enum(0, (0,1), desc='Detailed takeoff', aliases=('No', 'Yes (Namelist &TOLIN required)')) iland = Enum(0, (0,1), desc='Detailed landing', aliases=('No', 'Yes (Namelist &TOLIN required)')) nopro = Enum(0, (0,1), desc='Generate takeoff and climb profiles (Namelists &TOLIN &PROIN and &SEGIN required)', aliases=('No', 'Yes')) noise = Enum(0, (0,1,2), desc='Calculate noise', aliases=('No', 'Yes (Namelist &COSTIN required)', 'Yes for final analysis only')) icost = Enum(0, (0,1), desc='Calculate costs', aliases=('No', 'Yes (Namelist &COSTIN required)')) ifite = Enum(0, (0,1,2,3), desc='Weight equations', aliases=('Transports', 'Fighter/attack', 'General aviation', 'Blended wing body')) class FlopsWrapper_input_option_Plot_Files(VariableTree): """Container for input.option.Plot_Files""" # OpenMDAO Public Variables ixfl = Enum(0, (0,1), desc='Generate mission summary plot files', aliases=('No', 'Yes')) npfile = Enum(0, (0,1,2), desc='Output takeoff and climb profiles for use with ANOPP preprocessor (andin)', aliases=('No', 'Yes', 'XFlops')) ipolp = Enum(0, (0,1,2), desc='Drag polar plot data', aliases=('None', 'Drag polars at existing Mach numbers', 'User specified Mach numbers')) polalt = Float(0.0, units='ft', desc='Altitude for drag polar plots') pmach = Array(dtype=numpy_float64, desc='Mach numbers for drag polar plot data') ipltth = Enum(0, (0,1,2), desc='Generate engine plot data', aliases=('None', 'Initial engine', 'Final scaled engine')) iplths = Enum(0, (0,1), desc='Design history plot data', aliases=('No', 'Yes')) cnfile = Str(desc='Contour or thumbprint plot data filename') msfile = Str(desc='Mission summary data filename') crfile = Str(desc='Cruise schedule summary data filename') tofile = Str(desc='Takeoff and landing aerodynamic and thrust data filename') nofile = Str(desc='Takeoff and climb profile data filename') apfile = Str(desc='Drag polar plot data filename') thfile = Str(desc='Engine plot data filename') hsfile = Str(desc='Design history plot filename') psfile = Str(desc='Excess power and load factor plot data filename') class FlopsWrapper_input_option_Excess_Power_Plot(VariableTree): """Container for input.option.Excess_Power_Plot""" # OpenMDAO Public Variables xmax = Float(0.9, desc='Maximum Mach number for plots') xmin = Float(0.3, desc='Minimum Mach number for plots') xinc = Float(0.2, desc='Mach number increment for plots') ymax = Float(40000.0, units='ft', desc='Maximum altitude for plots') ymin = Float(0.0, units='ft', desc='Minimum altitude for plots') yinc = Float(10000.0, units='ft', desc='Altitude increment for plots') pltnz = Array(dtype=numpy_float64, desc='Nz at which Ps contours are plotted (or Nz)') pltpc = Array(dtype=numpy_float64, desc='Engine power (fraction if =< 1; else setting)') ipstdg = Array(dtype=numpy_int64, desc='Store drag schedule (see Namelist &MISSIN)') pltwt = Array(dtype=numpy_float64, units='lb', desc='Fixed weight') ipltsg = Array(dtype=numpy_int64, desc='Weight at start of mission segment IPLTSG is used') pltfm = Array(dtype=numpy_float64, desc='Fraction of fuel burned') pltwta = Array(dtype=numpy_float64, units='lb', desc='Delta weight') class FlopsWrapper_input_option(VariableTree): """Container for input.option""" # VariableTrees Excess_Power_Plot = VarTree(FlopsWrapper_input_option_Excess_Power_Plot()) Plot_Files = VarTree(FlopsWrapper_input_option_Plot_Files()) Program_Control = VarTree(FlopsWrapper_input_option_Program_Control()) class FlopsWrapper_input_noisin_Turbine(VariableTree): """Container for input.noisin.Turbine""" # OpenMDAO Public Variables tsupp = Array(dtype=numpy_float64, desc='Turbine suppression spectrum') tbndia = Float(-1.0, units='ft', desc='Diameter of last-stage turbine') gear = Float(1.0, desc='Gear ratio: turbine RPM/fan RPM') cs = Float(0.0, desc='Stator chord to rotor spacing ratio') nblr = Int(-1, desc='Number of last stage rotor blades') ityptb = Enum(0, (1,0), desc='Type of exit plane', aliases=('Turbofans', 'Turbojets or coplanar exits')) etdop = Float(4.0, desc='Exponent on source motion (Doppler) amplification on turbine noise') class FlopsWrapper_input_noisin_Shielding(VariableTree): """Container for input.noisin.Shielding""" # OpenMDAO Public Variables iuotw = Enum(0, (1,0), desc='Engine location relative to wing', aliases=('Over the wing', 'Under the wing')) sfuse = Float(10.0, desc='Maximum fuselage shielding') swide = Float(60.0, units='deg', desc='Degrees of arc where fuselage shielding is greater than SFUSE/e') swing = Float(10.0, desc='Maximum wing shielding for over-the-wing engine') smx = Float(90.0, units='deg', desc='Angle in flyover plane of maximum over-the-wing shielding') cfuse = Float(10.0, units='ft', desc='Characteristic fuselage dimension (such as diameter)') cwing = Float(10.0, units='ft', desc='Characteristic wing dimension (such as chord)') class FlopsWrapper_input_noisin_Propeller(VariableTree): """Container for input.noisin.Propeller""" # OpenMDAO Public Variables nb = Int(0, desc='Number of blades per propeller') bldia = Float(0.0, units='ft', desc='Diameter of propeller') blarea = Float(0.0, units='ft*ft', desc='Total blade area for one side of propeller') gearp = Float(1.0, desc='Ratio of propeller rpm / engine rpm') epdop = Float(1.0, desc='Exponent on source motion (Doppler) amplification on propeller noise') blth = Float(0.0, units='ft', desc='Blade thickness at 70% span') blch = Float(0.0, units='ft', desc='Blade chord at 70% span') blattk = Float(0.0, units='deg', desc='Blade angle of attack at 70% span') dharm = Float(0.5, desc='Rate of decrease in harmonic level beyond tenth, dB/harmonic') nph = Int(10, desc='Number of harmonics of BDF desired') ivor = Enum(1, (1,0), desc='Calculate vortex noise component', aliases=('Vortex noise', 'No vortex noise')) irot = Enum(1, (1,0), desc='Calculate rotational noise component', aliases=('Rotational noise', 'No rotational noise')) ipdir = Enum(0, (1,0), desc='Apply Boeing directivity correction', aliases=('Yes', 'No')) psupp = Array(dtype=numpy_float64, desc='Propeller noise suppression spectrum') class FlopsWrapper_input_noisin_Propagation(VariableTree): """Container for input.noisin.Propagation""" # OpenMDAO Public Variables isupp = Enum(0, (1,0), desc='Apply suppression spectra to each source for which they are supplied', aliases=('Yes', 'No')) idop = Enum(0, (1,0), desc='Apply Doppler frequency and intensity correction to total noise', aliases=('Yes', 'No')) ignd = Enum(0, (0,1,2), desc='Ground reflection option', aliases=('None', 'Perfect reflection', 'Putnam method')) iatm = Enum(0, (0,1,2), desc='Atmospheric absorption correction', aliases=('None', 'SAE ARP 866', 'Bass & Shields')) iega = Enum(0, (1,0), desc='Extra ground attenuation', aliases=('Yes', 'No')) ishld = Enum(0, (1,0), desc='Shielding of fan, jet, core, turbine and propeller sources', aliases=('Yes', 'No')) deldb = Float(20.0, desc='Number of dB down from the peak noise level to cut off printing of noise time histories') heng = Float(0.0, units='ft', desc='Height of engine above ground during taxi') filbw = Float(1.0, desc='Fraction of filter bandwidth with a gain of 1') tdi = Float(1.0, units='s', desc='Reception time increment') rh = Float(70.0, desc='Ambient relative humidity') class FlopsWrapper_input_noisin_Observers(VariableTree): """Container for input.noisin.Observers""" # OpenMDAO Public Variables xo = Array(dtype=numpy_float64, units='ft', desc='X-coordinates of observers') yo = Array(dtype=numpy_float64, units='ft', desc='Y-coordinates of observers') zo = Float(0.0, units='ft', desc='Height of all observers above the ground') ndprt = Enum(1, (1,0), desc='Print observer noise histories') ifoot = Enum(0, (1,0), desc='Print noise levels of input observers in countour format to file NSPLOT for subsequent plotting of the noise footprint', aliases=('Print', 'No print')) igeom = Enum(0, (1,0), desc='Print geometric relations of aircraft/observer at each time point', aliases=('Print', 'No print')) thrn = Float(-1.0, units='lb', desc='Thrust of baseline engine. Geometry data and engine parameter arrays will be scaled accordingly (Default=THRSO, Namelist &WTIN)') icorr = Enum(0, (1,0), desc='Apply corrections to engine parameters to correct for ambient conditions', aliases=('Yes', 'No')) tcorxp = Float(1.0, desc='Exponent for core temperature correction in engine parameter arrays') class FlopsWrapper_input_noisin_MSJet(VariableTree): """Container for input.noisin.MSJet""" # OpenMDAO Public Variables iy9 = Enum(1, (1,2,3,4,5,6), desc='Type of nozzle', aliases=('Convergent conical', 'Single multitube', 'Single multichute', 'Dual convergent conical', 'Dual, multitube on outer', 'Dual, multichute/spoke on outer')) n = Int(1, desc='Number of tubes (IY9=2,5) or elements (IY9=3,6)') rp = Float(0.0, units='ft', desc='Centerbody plug radius (IY9=2,3,5,6)') b9 = Float(0.0, units='deg', desc='Tube centerline cant angle (IY9-2,5)\nChute/spoke exit cant angle (IY9=3,6)') dt = Float(0.0, units='inch', desc='Tube diameter (IY9=2,5)') z5 = Float(0.0, desc='Number of rows of tubes, counting center tube (if present) as zero (IY9=2,5)') s1j = Float(0.0, desc='Tube centerline spacing to tube diameter ratio (IY9=2,5)') a6 = Float(0.0, desc='Ratio of ejector inlet area to nozzle (total or annulus) area (input zero for no ejector) (IY9=2,3,5,6)') zl9 = Float(0.0, desc='Ratio of ejector length to suppressor nozzle equivalent diameter (IY9=2,3,5,6)') a = Array(dtype=numpy_float64, desc='A(0): Ejector treatment faceplate thickness, in\nA(1): Ejector treatment hole diameter, in\nA(2): Ejector treatment cavity depth, in\nA(3): Ejector treatment open area ratio\n(IY9=2,3,5,6)') # TODO - rr and rx are units of 'Rayl' (rayleigh) rr = Array(dtype=numpy_float64, desc='Ejector treatment specific resistance (59 values required) (IY9=2,3,5,6)') rx = Array(dtype=numpy_float64, desc='Ejector treatment specific reactance (59 values required) (IY9=2,3,5,6)') r4 = Float(0.0, units='inch', desc='Outer circumferential flow dimension (IY9=3,6)') r6 = Float(0.0, units='inch', desc='Inner circumferential flow dimension (IY9=3,6)') ss = Float(0.0, units='inch', desc='Outer circumferential element dimension (IY9=3,6)') dn = Float(0.0, units='ft', desc='Nozzle outer diameter') aa = Float(0.0, desc='Unknown variable') nflt = Int(1, desc='Unknown variable') htr = Float(0.0, desc='Unknown variable') nst = Int(1, desc='Unknown variable') class FlopsWrapper_input_noisin_Jet(VariableTree): """Container for input.noisin.Jet""" # OpenMDAO Public Variables inoz = Enum(0, (1,0), desc='Type of nozzle', aliases=('Coaxial', 'Circular')) iplug = Enum(0, (1,0), desc='Plug nozzle on primary', aliases=('Plug', 'No plug')) islot = Enum(0, (1,0), desc='Slot nozzle on primary', aliases=('Slot nozzle', 'No slot')) iaz = Enum(0, (1,0), desc='Azimuthal correction for nozzle geometry', aliases=('Yes', 'No')) dbaz = Float(0.0, desc='Noise reduction due to nozzle geometry at phi = 75 degrees, used only if IAZ = 1') ejdop = Float(1.0, desc='Exponent on source motion (Doppler) amplification on shock noise only. Used for IJET=1,2') zmdc = Float(1.0, desc='Core (primary) jet design Mach number. Used for application of non-ideally expanded shock noise. Used for IJET=1,2') gammac = Float(-1.0, desc='Core (primary) jet exhaust gamma Used for IJET=1,2,6 (Default = 1.4)') gasrc = Float(-1.0, units='(ft*lb)/(lb*degR)', desc='Core exhaust gas constant, Used for IJET=1,2 (Default = 53.35)') annht = Float(0.0, units='ft', desc='Core nozzle annulus height. Used for IJET=1,2') zmdf = Float(1.0, desc='Fan (secondary) jet design Mach number. Used for application of non-ideally expanded shock noise. Used for IJET=1,2') gammap = Float(-1.0, desc='Fan (secondary) jet exhaust gamma Used for IJET=1,2 (Default = GAMMAF)') gasrf = Float(53.35, units='(ft*lb)/(lb*degR)', desc='Fan exhaust gas constant. Used for IJET=1,2') annhtf = Float(0.0, units='ft', desc='Fan nozzle annulus height. Used for IJET=1,2') dhc = Float(-1.0, units='ft', desc='Core nozzle hydraulic diameter. Used for IJET=3,4') dhf = Float(0.0, units='ft', desc='Fan nozzle hydraulic diameter. Used for IJET=3,4') zl2 = Float(0.0, units='ft', desc='Axial distance from the outer exit plane to the exit plane of the inner nozzle. Used for IJET=3,4') ifwd = Enum(0, (1,0), desc='Forward velocity effects on source. Used for IJET=1,2,3,4,5', aliases=('Yes', 'No')) ishock = Enum(1, (1,0), desc='Calculate shock noise. Used for IJET=1,2,3,4,5', aliases=('Shock noise', 'No shock')) zjsupp = Array(dtype=numpy_float64, desc='Jet suppression spectrum. Used for IJET=1,2,3,4,5') class FlopsWrapper_input_noisin_Ground_Effects(VariableTree): """Container for input.noisin.Ground_Effects""" # OpenMDAO Public Variables itone = Enum(0, (1,0), desc='1/3-octave bands exceeding adjacent bands by 3 dB or more are approximated as tones', aliases=('Yes', 'No')) #nht = Int(0, desc='Number of heights to be used to approximate a distributed source by multiple sources') dk = Array(dtype=numpy_float64, units='ft', desc='Heights of multiple sources from source center') class FlopsWrapper_input_noisin_Flap_Noise(VariableTree): """Container for input.noisin.Flap_Noise""" # OpenMDAO Public Variables ilnoz = Enum(0, (2,1,0), desc='Nozzle type', aliases=('Coaxial, mixed flow', 'Coaxial, separate flow', 'Circular')) insens = Enum(0, (1,0), desc='Configuration with noise levels insensitive to flap angle', aliases=('Yes', 'No')) ac1 = Float(0.0, units='ft*ft', desc='Core (primary) nozzle area') af1 = Float(0.0, units='ft*ft', desc='Fan (secondary) nozzle area') bpr = Float(0.0, desc='Bypass ratio, for mixed flow coaxial nozzle') wingd = Float(0.0, desc='Ratio of wing chord to total nozzle diameter, used for large BPR designs when WINGD < 3') flsupp = Array(dtype=numpy_float64, desc='Flap noise suppression spectrum') eldop = Float(0.0, desc='Exponent on source motion (Doppler) amplification on flap noise') class FlopsWrapper_input_noisin_Fan(VariableTree): """Container for input.noisin.Fan""" # OpenMDAO Public Variables igv = Enum(0, (1,0), desc='Inlet guide vane option', aliases=('Inlet guide vane', 'No IGV')) ifd = Enum(0, (1,0), desc='Inlet flow distortion option during ground run', aliases=('Inlet flow distortion', 'No distortion')) iexh = Enum(2, (0,1,2), desc='Fan inlet, exhaust noise options', aliases=('Inlet only', 'Exhaust only', 'Both inlet & exhaust')) nfh = Int(10, desc='Number of harmonics to be considered in blade-passing tone') nstg = Int(-1, desc='Number of fan stages') suppin = Array(dtype=numpy_float64, desc='Fan inlet suppression spectrum') suppex = Array(dtype=numpy_float64, desc='Fan exhaust suppression spectrum') methtip = Enum(1, (1,2,3), desc='Method for calculation of relative tip Mach number', aliases=('ANOPP method', 'Clark', 'Use ATIPM')) icomb = Enum(1, (1,0), desc='Option to include combination tones if relative tip Mach number is supersonic', aliases=('Combination tones', 'No combination tones')) decmpt = Float(0.0, desc='Decrement to apply to combination tones') gammaf = Float(1.4, desc='Gamma of fan air') nbl = Int(-1, desc='Number of fan blades') nvan = Int(-1, desc='Number of stator vanes') fandia = Float(-1.0, units='ft', desc='Fan diameter') fanhub = Float(-1.0, units='ft', desc='Fan hub diameter') tipmd = Float(-1.0, desc='Design relative tip Mach number') rss = Float(100.0, desc='Rotor-stator spacing in percent') efdop = Float(4.0, desc='Exponent on source motion (Doppler) amplification on fan noise') faneff = Float(0.88, desc='Constant first stage fan efficiency, < 1.0. Overridden by AFANEF') nbl2 = Int(-1, desc='Number of fan blades for second stage (Default = NBL)') nvan2 = Int(-1, desc='Number of stator vanes for second stage (Default = NVAN)') fand2 = Float(-1.0, units='ft', desc='Fan diameter for second stage (Default = FANDIA)') tipmd2 = Float(-1.0, desc='Design relative tip Mach number for second stage (Default = TIPMD)') rss2 = Float(-1.0, desc='Rotor-stator spacing in percent for second stage (Default = RSS)') efdop2 = Float(-1.0, desc='Exponent on source motion (Doppler) amplification on second stage fan noise (Default = EFDOP)') fanef2 = Float(0.88, desc='Constant second stage fan efficiency, < 1.0. Overridden by AFANF2') trat = Float(-1.0, desc='Ratio of second stage temperature rise (DELT2) to that of first stage. Either TRAT or PRAT is used to calculate DELT2.') prat = Float(1.0, desc='Ratio of second stage fan pressure ratio to that of first stage') class FlopsWrapper_input_noisin_Engine_Parameters(VariableTree): """Container for input.noisin.Engine_Parameters""" # OpenMDAO Public Variables aepp = Array(dtype=numpy_float64, desc='Throttle settings as a fraction of net thrust') avc = Array(dtype=numpy_float64, units='ft/s', desc='Core/primary exhaust jet velocity (ideally expanded velocity; exclude friction and expansion alterations). Used for IJET=1,2,3,4,6') avf = Array(dtype=numpy_float64, units='ft/s', desc='Fan/secondary exhaust jet velocity (ideally expanded velocity; exclude friction and expansion alterations). Used for IJET=1,2,3,4') atc = Array(dtype=numpy_float64, units='degR', desc='Core/primary jet exhaust total temperature. Used for IJET=1,2,3,4,6') atf = Array(dtype=numpy_float64, units='degR', desc='Fan/secondary jet exhaust total temperature. Used for IJET=1,2,3,4') aac = Array(dtype=numpy_float64, units='ft*ft', desc='Core jet nozzle exhaust area. For IJET=1,2,6, AAC represents exit area; for IJET=3,4, AAC represents throat area.') aaf = Array(dtype=numpy_float64, units='ft*ft', desc='Fan jet nozzle exhaust area. For IJET=1 or IJET=2, AAF represents exit area; for IJET=3,4, AAF represents throat area.') adj = Array(dtype=numpy_float64, units='ft', desc='Core outer diameter; at the equivalent throat if the nozzle is C-D. Used only for IJET=3,4') adj2 = Array(dtype=numpy_float64, units='ft', desc='Fan outer diameter; at the equivalent throat if the nozzle is C-D. Used only for IJET=3,4') ahj = Array(dtype=numpy_float64, units='ft', desc='Core annulus height; at the equivalent throat if the nozzle is C-D. Used only for IJET=3,4') ahj2 = Array(dtype=numpy_float64, units='ft', desc='Fan annulus height; at the equivalent throat if the nozzle is C-D. Used only for IJET=3,4') afuel = Array(dtype=numpy_float64, units='lb/s', desc='Fuel flow. Used if ICORE, ITURB=1; and IJET=1,2 and only if calculating GAMMAC and GASRC.') atipm = Array(dtype=numpy_float64, desc='Fan first-stage relative tip Mach number. These are approximated if not input. Used if IFAN=1') atipm2 = Array(dtype=numpy_float64, desc='Fan second-stage relative tip Mach number. These are approximated if not input. Used if IFAN=1') awafan = Array(dtype=numpy_float64, units='lb/s', desc='Total engine airflow. Used if IFAN=1') adelt = Array(dtype=numpy_float64, units='degR', desc='Fan temperature rise. Used if IFAN=1') afpr = Array(dtype=numpy_float64, desc='Fan pressure ratio. This is not needed if ADELT is input. Otherwise, values for ADELT will be calculated using AFANEF and AFANF2 values.') afanef = Array(dtype=numpy_float64, desc='Fan first-stage efficiency. These are required if AFPR is supplied rather than ADELT.') afanf2 = Array(dtype=numpy_float64, desc='Fan second-stage efficiency. These are required if AFPR is supplied rather than ADELT.') arpm = Array(dtype=numpy_float64, units='rpm', desc='Fan or turbine speed. Used if IFAN, ITURB=1') awcore = Array(dtype=numpy_float64, units='lb/s', desc='Burner and turbine airflow. Used if ICORE or ITURB=1 and IJET=1,2 and only if calculating GAMMAC and GASRC.') ap3 = Array(dtype=numpy_float64, units='psf', desc='Burner inlet pressure. Used if ICORE=1') at3 = Array(dtype=numpy_float64, units='degR', desc='Burner inlet temperature. Used if ICORE=1') at4 = Array(dtype=numpy_float64, units='degR', desc='Burner exit static temperature. These are approximated from the fuel/air ratio if not input. Used if ICORE=1') aturts = Array(dtype=numpy_float64, units='ft/s', desc='Turbine last stage rotor relative tip speed. These are approximated if not input. Used if ITURB=1') atctur = Array(dtype=numpy_float64, units='degR', desc='Turbine exit temperature. These are assumed the same as ATC if not supplied. Used if ITURB=1') aepwr = Array(dtype=numpy_float64, units='hp', desc='Horsepower supplied to propeller. Used if IPROP=1') athrst = Array(dtype=numpy_float64, units='lb', desc='Propeller thrust. Used if IPROP=1') amsp9 = Array(dtype=numpy_float64, desc='Nozzle pressure ratio: entance total to ambient static. Used for M*S code jet predictions, IJET=5') amstt3 = Array(dtype=numpy_float64, units='degR', desc='Nozzle exit total temperature. Used for M*S code jet predictions, IJET=5') amsa9 = Array(dtype=numpy_float64, units='ft*ft', desc='Nozzle exit area. Used for M*S code jet predictions, IJET=5') amsa7 = Array(dtype=numpy_float64, desc='Nozzle ejector chute area ratio. Used for M*S code jet predictions, IJET=5') amsaa8 = Array(dtype=numpy_float64, units='ft*ft', desc='Inner nozzle flow area. Used for M*S code jet predictions, IJET=5') amstt4 = Array(dtype=numpy_float64, units='degR', desc='Inner nozzle exit total temperature. Used for M*S code jet predictions, IJET=5') amsp4 = Array(dtype=numpy_float64, desc='Inner nozzle pressure ratio: entrance total to ambient static. Used for M*S code jet predictions, IJET=5') amstt5 = Array(dtype=numpy_float64, units='degR', desc='Outer nozzle exit total temperature. Used for M*S code jet predictions, IJET=5') amsp5 = Array(dtype=numpy_float64, desc='Outer nozzle pressure ratio: entrance total to ambient static. Used for M*S code jet predictions, IJET=5') class FlopsWrapper_input_noisin_Core(VariableTree): """Container for input.noisin.Core""" # OpenMDAO Public Variables csupp = Array(dtype=numpy_float64, desc='Core suppression spectrum') gamma = Float(1.4, desc='Specific heat ratio; required if using AP3 rather than AT3') imod = Enum(0, (1,0), desc='Use modified core level prediction', aliases=('Yes', 'No')) dtemd = Float(-1.0, units='degR', desc='Design turbine temperature drop') ecdop = Float(2.0, desc='Exponent on source motion (Doppler) amplification on core noise') class FlopsWrapper_input_noisin_Basic(VariableTree): """Container for input.noisin.Basic""" # OpenMDAO Public Variables iepn = Enum(0, (0,1,2), desc='= 0, Stage III\n= 1, Stage III - Delta dB (see DEPNT, DEPNS and DEPNL)\n=2, Find the X-coordinate where the maximum EPNL occurs. NOB, XO and YO must be input. YO should be constant. IEPN=2 is usually used to get a sideline (YO) noise for GA aircraft.', aliases=('Stage III', 'Stage III - Delta', 'Find max. EPNL')) depnt = Float(0.0, desc='Increment below Stage III for takeoff (see IEPN)') depns = Float(0.0, desc='Increment below Stage III for sideline (see IEPN).\nIf IEPN=2, DEPNS is the upper limit for sideline noise.') depnl = Float(0.0, desc='Increment below Stage III for landing (see IEPN)') itrade = Enum(0, (1,0), desc='Option to trade 2 dB between sideline and flyover noise', aliases=('Trade', 'No trade')) ijet = Enum(0, (0,1,2,3,4,5,6), desc='Jet noise option', aliases=('None', 'Stone/Clark', 'Kresja', 'Stone ALLJET', 'Stone JET181', 'GE M*S', 'SAE A-21 (ANOPP)')) ifan = Enum(0, (0,1,2), desc='Fan noise option', aliases=('None', 'Heidmann', 'Gliebe')) icore = Enum(0, (0,1), desc='Core noise option', aliases=('None', 'Core noise')) iturb = Enum(0, (0,1), desc='Turbine noise option', aliases=('None', 'Turbine noise')) iprop = Enum(0, (0,1,2), desc='Propeller noise option', aliases=('None', 'SAE', 'Gutin')) iflap = Enum(0, (0,1), desc='Flap noise/Jet-flap impingement noise option', aliases=('None', 'Flap & jet/flap noise')) iairf = Enum(0, (0,1), desc='Airframe noise option', aliases=('None', 'Airframe noise')) igear = Enum(0, (0,1), desc='Gear box noise option', aliases=('None', 'Approx. gear box noise')) class FlopsWrapper_input_noisin_Airframe(VariableTree): """Container for input.noisin.Airframe""" # OpenMDAO Public Variables ifl = Enum(0, (1,0), desc='Include slotted flap noise', aliases=('Slotted flap noise', 'No slotted flap noise')) nf = Int(2, desc='Number of trailing edge flap slots for IFL = 1') pfchd = Float(0.25, desc='Average chord for slotted flap, ft or fraction of wing chord. Used only if IFL = 1') itypw = Enum(1, (1,2), desc='Type of wing', aliases=('Conventional', 'Delta')) iclean = Enum(0, (1,0), desc='Aerodynamically clean aircraft', aliases=('Aerodynamically clean', 'Conventional')) iwing = Enum(0, (1,0), desc='Wing, horizontal and vertical tail noise', aliases=('Wing, horiz., vert. tail noise', 'No wing, tail noise')) islat = Enum(0, (1,0), desc='Slatted leading edge noise', aliases=('Slatted l.e. noise', 'No slatted l.e. noise')) ilg = Enum(0, (1,0), desc='Nose and main landing gear noise', aliases=('Landing gear noise', 'No landing gear noise')) ng = Array(dtype=numpy_int64, desc='NG(0): Number of nose gear trucks\nNG(1): Number of main gear trucks') nw = Array(dtype=numpy_int64, desc='NW(0): Number of wheels per nose gear truck\nNW(1): Number of wheels per main gear truck') dw = Array(dtype=numpy_float64, units='ft', desc='DW(0): Diameter of nose gear tires\nDW(1): Diameter of main gear tires') cg = Array(dtype=numpy_float64, desc='CG(0): Ratio of nose strut length to DW(0)\nCG(1): Ratio of main strut length to DW(1)') class FlopsWrapper_input_noisin(VariableTree): """Container for input.noisin""" # VariableTrees Airframe = VarTree(FlopsWrapper_input_noisin_Airframe()) Basic = VarTree(FlopsWrapper_input_noisin_Basic()) Core = VarTree(FlopsWrapper_input_noisin_Core()) Engine_Parameters = VarTree(FlopsWrapper_input_noisin_Engine_Parameters()) Fan = VarTree(FlopsWrapper_input_noisin_Fan()) Flap_Noise = VarTree(FlopsWrapper_input_noisin_Flap_Noise()) Ground_Effects = VarTree(FlopsWrapper_input_noisin_Ground_Effects()) Jet = VarTree(FlopsWrapper_input_noisin_Jet()) MSJet = VarTree(FlopsWrapper_input_noisin_MSJet()) Observers = VarTree(FlopsWrapper_input_noisin_Observers()) Propagation = VarTree(FlopsWrapper_input_noisin_Propagation()) Propeller = VarTree(FlopsWrapper_input_noisin_Propeller()) Shielding = VarTree(FlopsWrapper_input_noisin_Shielding()) Turbine = VarTree(FlopsWrapper_input_noisin_Turbine()) class FlopsWrapper_input_nacell(VariableTree): """Container for input.nacell""" # OpenMDAO Public Variables x1r = Float(2.06, desc='X1 / R. If IVAR = -1, X1R is the cowl length divided by the inlet capture radius.') x2r = Float(1.58, desc='X2 / R') r1r = Float(0.354, desc='R1 / R') r2r = Float(0.585, desc='R2 / R') angle = Float(7.0, units='deg', desc='Average angle of the subsonic diffuser portion of the inlet between the throat and the engine face') clang = Float(0.0, units='deg', desc='Cowl lip angle') mixed = Enum(-1, (-1,0,1), desc='Inlet compression type indicator\n= -1, Inlet geometry is based solely on the geometry variables described above.\n= 0, Inlet geometry is based in the internal geometry data base for external compression inlets and the given inlet design Mach number.\n= 1, Inlet geometry is based in the internal geometry data base for mixed compression inlets and the given inlet design Mach number', aliases=('Use geometry variables', 'External compression inlet', 'Mixed compression inlet')) radd = Float(3.0, units='inch', desc='Distance from the engine compressor tip to the exterior of the nacelle. If RADD < 1. the added radial distance is RADD times the compressor tip radius.') xnlod = Float(-10.0, desc='Nozzle length / diameter (Default is computed') xnld2 = Float(-10.0, desc='Fan nozzle length / diameter (Default is computed') inac = Enum(0, (-5,-4,-3,-2,-1,0,1,2,3,4,5), desc='Nacelle type indicator', aliases=('2-D Bifurcated inlet + axisymmetric nozzle + podded together', '2-D Bifurcated inlet + 2-D nozzle + podded together', '2-D inlet + axisymmetric nozzle + podded together', '2-D + podded together', 'Axisymmetric + podded together', 'None', 'Axisymmetric', '2-D', '2-D inlet + Axisymmetric nozzle', '2-D Bifurcated inlet + 2-D nozzle', '2-D Bifurcated inlet + axisymmetric nozzle')) ivar = Enum(1, (-1,0,1,2,3), desc='Inlet variable geometry switch used to estimate weight factor WTCB1', aliases=('Fixed no centerbody', 'Fixed centerbody', 'Translating centerbody', 'Collapsing centerbody', 'Translating & collapsing centerbody')) nvar = Enum(0, (0,1,2,3,4), desc='Nozzle variable geometry switch used to estimate weight factor WTNOZ', aliases=('Fixed geometry', 'Variable area throat', 'Variable area exit', 'Variable throat & exit', 'Fixed plug core & fixed fan nozzle')) wtcb1 = Float(-10.0, desc='Weighting factor for the inlet centerbody up to the throat. Multiplied by the surface area of the applicable inlet section to predict inlet weight. The default is based on the internal materials data base and the maximum cruise Mach number.') wtcb2 = Float(-10.0, desc='Weighting factor for the inlet centerbody from the throat to the engine face. Multiplied by the surface area of the applicable inlet section to predict inlet weight. The default is based on the internal materials data base and the maximum cruise Mach number.') wtint = Float(-10.0, desc='Weighting factor for the internal cowl up to the engine face. Multiplied by the surface area of the applicable inlet section to predict inlet weight. The default is based on the internal materials data base and the maximum cruise Mach number.') wtext = Float(-10.0, desc='Weighting factor for the external nacelle. Multiplied by the surface area of the applicable inlet section to predict inlet weight. The default is based on the internal materials data base and the maximum cruise Mach number.') wtnoz = Float(-10.0, desc='Weighting factor for the nozzle. Multiplied by the surface area of the applicable inlet section to predict inlet weight. The default is based on the internal materials data base and the maximum cruise Mach number.') h2w = Float(1.0, desc='Inlet height to width ratio for 2-D inlets') class FlopsWrapper_input_mission_definition(VariableTree): """Container for input.mission_definition""" # OpenMDAO Public Variables mission = List(iotype='in') class FlopsWrapper_input_missin_User_Weights(VariableTree): """Container for input.missin.User_Weights""" # OpenMDAO Public Variables mywts = Enum(0, (0,1), desc='Weight input switch, overrides value input in Namelist &WTIN.', aliases=('Compute weight', 'User-specified')) rampwt = Float(0.0, units='lb', desc='Gross weight before taxi out (Default = DOWE + PAYLOD + FUEMAX)') dowe = Float(0.0, units='lb', desc='Fixed operating weight empty') paylod = Float(0.0, units='lb', desc='Fixed payload weight') fuemax = Float(0.0, units='lb', desc='Total usable fuel weight\nFUEMAX = RAMPWT - DOWE - PAYLOD.\nRequired only if RAMPWT is not input') class FlopsWrapper_input_missin_Turn_Segments(VariableTree): """Container for input.missin.Turn_Segments""" # OpenMDAO Public Variables xnz = Array(dtype=numpy_float64, units='g', desc='Maximum turn load factor at each Mach number') xcl = Array(dtype=numpy_float64, desc='Maximum turn lift coefficient at each Mach number') xmach = Array(dtype=numpy_float64, desc='Mach number array corresponding to both XNZ and XCL') class FlopsWrapper_input_missin_Store_Drag(VariableTree): """Container for input.missin.Store_Drag""" # OpenMDAO Public Variables stma = Array(dtype=numpy_float64, desc='Mach number schedule for store drags. Store drags can also be assessed in ACCEL and TURN segments of the mission as covered in the Segment Definition Cards section, in PS and NZ plots (see Namelist &OPTION), and in performance constraints (see Namelist &PCONIN)') cdst = Array(dtype=numpy_float64, desc='Corresponding drag coefficients or D/q') istcl = Array(dtype=numpy_int64, desc='Store drag condition applied to climb schedule K\n= 0, No store drag for climb schedule K') istcr = Array(dtype=numpy_int64, desc='Store drag condition applied to cruise schedule K\n= 0, No store drag for cruise schedule K') istde = Int(0, desc='Store drag condition applied to descent schedule\n= 0, No store drag for descent schedule') class FlopsWrapper_input_missin_Reserve(VariableTree): """Container for input.missin.Reserve""" # OpenMDAO Public Variables irs = Enum(2, (1,2,3), desc='Reserve fuel calculation switch', aliases=('Calculated for trip to alternate airport plus RESRFU and/or RESTRP', 'Constant values (RESRFU and/or RESTRP) only', 'Reserve fuel is what is left over after primary mission')) resrfu = Float(0.0, desc='> 1., Fixed reserve fuel, lb\n< 1., Reserve fuel as a fraction of total usable fuel weight') restrp = Float(0.0, desc='Reserve fuel as a fraction of total trip fuel weight') timmap = Float(0.0, units='min', desc='Missed approach time') altran = Float(0.0, units='nmi', desc='Range to alternate airport') nclres = Int(1, desc='Climb schedule number used in reserve mission') ncrres = Int(1, desc='Cruise schedule number used in reserve mission') sremch = Float(-1.0, desc='Start reserve Mach number (Default = CLMMIN[NCLRES])') eremch = Float(-1.0, desc='End reserve Mach number (Default = DEMMIN)') srealt = Float(-1.0, units='ft', desc='Start reserve altitude (Default = CLAMIN[NCLRES])') erealt = Float(-1.0, units='ft', desc='End reserve altitude (Default = DEAMIN)') holdtm = Float(0.0, units='min', desc='Reserve holding time') ncrhol = Int(0, desc='Cruise schedule number for hold (Default = NCRRES)') ihopos = Enum(1, (0,1,2), desc='Hold position switch', aliases=('Between main descent and missed approach', 'End of reserve cruise', 'End of reserve descent')) icron = Enum(0, (0,1,2), desc='Type of flight to alternate airport', aliases=('Climb-cruise-descend', 'Climb-cruise-beam down to airport', 'Cruise only')) thold = Float(0.0, desc='Used to define a hold segment between main mission descent and missed approach.\n> 1., Reserve holding time, min\n< 1., Fraction of flight time to be used as reserve holding time. (Effective only if IRW = 1)\n= 0., This option is ignored') ncrth = Int(1, desc='Cruise schedule number for THOLD') class FlopsWrapper_input_missin_Ground_Operations(VariableTree): """Container for input.missin.Ground_Operations""" # OpenMDAO Public Variables takotm = Float(0.0, units='min', desc='Takeoff time') taxotm = Float(0.0, units='min', desc='Taxi out time') apprtm = Float(0.0, units='min', desc='Approach time') appfff = Float(2.0, desc='Approach fuel flow factor applied to sea level static idle fuel flow') taxitm = Float(0.0, units='min', desc='Taxi in time') ittff = Int(0, desc='> 0, Engine deck power setting for takeoff (Usually = 1 if specified). Taxi fuel flow is sea level static idle.\n= 0, Use TAKOFF and TXFUFL.') takoff = Float(0.0, units='lb/h', desc='Takeoff fuel flow') txfufl = Float(0.0, units='lb/h', desc='Taxi fuel flow') ftkofl = Float(0.0, units='lb', desc='Fixed takeoff fuel. This ovverides the calculated value and is not scaled with engine thrust') ftxofl = Float(0.0, units='lb', desc='Fixed taxi out fuel. This ovverides the calculated value and is not scaled with engine thrust') ftxifl = Float(0.0, units='lb', desc='Fixed taxi in fuel. This ovverides the calculated value and is not scaled with engine thrust') faprfl = Float(0.0, units='lb', desc='Fixed approach fuel. This ovverides the calculated value and is not scaled with engine thrust') class FlopsWrapper_input_missin_Descent(VariableTree): """Container for input.missin.Descent""" # OpenMDAO Public Variables ivs = Enum(1, (0,1,2), desc='Descent option switch', aliases=('No descent time or distance or fuel', 'Descend at optimum L/D', 'Descend at constance lift coefficient')) decl = Float(0.8, desc='Descent lift coefficient for IVS = 2') demmin = Float(0.3, desc='Minimum Mach number') demmax = Float(0.0, desc='Max Mach number (Default = VCMN, Namelist &CONFIN)') deamin = Float(0.0, units='ft', desc='Minimum altitude') deamax = Float(0.0, units='ft', desc='Max altitude (Default = CH, Namelist &CONFIN)') ninde = Int(31, desc='Number of descent steps') dedcd = Float(0.0, desc='Drag coefficient increment applied to descent') rdlim = Float(-99999.0, units='ft/min', desc='Limiting or constant rate of descent. Must be negative') ns = Int(0, desc='Number of altitudes for q limit schedule (Default = 0 - QLIM is used, Maximum = 20 )') keasvd = Enum(0, (0,1), desc='= 1, VDTAB is in knots equivalent airspeed (keas)\n\n= 0, VDTAB is true airspeed or Mach number (Default)', aliases=('VDTAB is Mach number', 'VDTAB in knots')) adtab = Array(dtype=numpy_float64, units='ft', desc='Descent altitude schedule. If only part of the descent profile is specified, the portion of the profile outside the energy range defined by values of ADTAB and VDTAB will be optimized for the descent schedule.') vdtab = Array(dtype=numpy_float64, desc='Descent speed schedule, kts or Mach number') class FlopsWrapper_input_missin_Cruise(VariableTree): """Container for input.missin.Cruise""" # OpenMDAO Public Variables ncruse = Int(1, desc='Number of cruise schedules to be defined (Default = 1, Maximum = 6, Include reserve cruise)') ioc = List([1], Enum(1, (0,1,2,3,4,5,6,7,8,9,10), aliases=('Opt. alt. and Mach for specific range', 'Fixed Mach + opt. alt. for specific range', 'Fixed Mach at input max. alt. or cruise ceiling', 'Fixed alt. + opt. Mach for specific range', 'Fixed alt. + opt. Mach for endurance (min. fuel flow)', 'Fixed alt. + constant lift coefficient (CRCLMX)', 'Fixed Mach + opt. alt. for endurance', 'Opt. Mach and alt. for endurance', 'Max. Mach at input fixed alt.', 'Max. Mach at opt. alt.', 'Fixed Mach + constant lift coefficient (CRCLMX')), desc='Cruise option switch') crmach = Array(array([0.0]), dtype=numpy_float64, desc='Maximum or fixed Mach number (or velocity, kts) (Default = VCMN, Namelist &CONFIN)') cralt = Array(array([-1.0]), dtype=numpy_float64, units='ft', desc='Maximum or fixed altitude (Default = CH, Namelist &CONFIN)') crdcd = Array(array([0.0]), dtype=numpy_float64, desc='Drag coefficient increment') flrcr = Array(array([1.0]), dtype=numpy_float64, desc='Specific range factor for long range cruise Mach number - used if IOC = 3') crmmin = Array(array([0.0]), dtype=numpy_float64, desc='Minimum Mach number') crclmx = Array(array([0.0]), dtype=numpy_float64, desc='Maximum or fixed lift coefficient') hpmin = Array(array([1000.0]), dtype=numpy_float64, units='ft', desc='Minimum cruise altitude.\nFor fixed Mach number cruise schedules, HPMIN can be used to enforce a dynamic pressure (Q) limit.') ffuel = Array(array([1.0]), dtype=numpy_float64, desc='Fuel factor in cruise profile optimization') fnox = Array(array([0.0]), dtype=numpy_float64, desc='NOx emissions factor in cruise profile optimization.\nSince for supersonic engines the NOx emissions are on the order of 1 - 3 percent of fuel, FNOX should be relatively large (30. - 100.) to get comparable weighting.') ifeath = List([0], Enum(0, (1,0,-1)), desc='Cruise feathering option', aliases=('Engines may be feathered', 'No feathering', 'Engines must be feathered')) feathf = Array(array([0.5]), dtype=numpy_float64, desc='Fraction of engines remaining after feathering') cdfeth = Array(array([0.0]), dtype=numpy_float64, desc='Drag coefficient increase due to feathered engines') dcwt = Float(1.0, units='lb', desc='Weight increment used to compute cruise tables (Default = the greater of 1. or DWT/20)') rcin = Float(100.0, units='ft/min', desc='Instantaneous rate of climb for ceiling calculation') wtbm = Array(dtype=numpy_float64, desc='Array of weights for specification of max. allowable altitude for low sonic boom configurations (must be in ascending order) Since linear interpolation/extrapolation is used, data should cover the entire expected weight range.') altbm = Array(dtype=numpy_float64, units='ft', desc='Corresponding array of maximum altitudes') class FlopsWrapper_input_missin_Climb(VariableTree): """Container for input.missin.Climb""" # OpenMDAO Public Variables nclimb = Int(1, desc='Number of climb schedules to be defined (Default = 1, Maximum = 4, Include reserve climb)') clmmin = Array(array([0.3]), dtype=numpy_float64, desc='Minimum Mach number for each climb schedule.\nNote: Separate climb schedules are not required if the only changes are in the minimum or maximum Mach number or altitude. Just make sure all climbs are bracketed.') clmmax = Array(array([0.0]), dtype=numpy_float64, desc='Maximum Mach number (Default = VCMN, Namelist &CONFIN).\nNote: Separate climb schedules are not required if the only changes are in the minimum or maximum Mach number or altitude. Just make sure all climbs are bracketed.') clamin = Array(array([0.0]), dtype=numpy_float64, units='ft', desc='Minimum altitude') clamax = Array(array([0.0]), dtype=numpy_float64, units='ft', desc='Maximum altitude (Default = CH, Namelist &CONFIN)') nincl = Array(array([31]), dtype=numpy_int64, desc='Number of climb steps') fwf = Array(array([-0.0010]), dtype=numpy_float64, desc='Climb profile optimization function control parameter. Recommended aircraft in parentheses.\n= 1., minimum fuel-to-distance profile (Subsonic transports, do NOT use for supersonic transports)\n= 0., minimum time-to-distance profile (Interceptors only)\n1. > FWF > 0., combination of the above\n= -.001, minimum time-to-climb profile (Fighters)\n= -1., minimum fuel-to-climb profile (Supersonic transports, Subsonic transports)\n-1. < FWF < -.001, combination of the above') ncrcl = Array(array([1]), dtype=numpy_int64, desc='Number of the cruise schedule to be used in fuel- or time-to-distance profile climb optimization comparisons') cldcd = Array(array([0.0]), dtype=numpy_float64, desc='Drag coefficient increment applied to each climb schedule. If coefficient varies with Mach number, see ISTCL above.') ippcl = Array(array([1]), dtype=numpy_int64, desc='Number of power settings to be considered for climb. Program will select the most efficient. Should be used only with afterburning engines for minimum fuel climb profiles.') maxcl = Array(array([1]), dtype=numpy_int64, desc='Maximum power setting used for climb') actab = Array(zeros(shape=(0,0)), dtype=numpy_float64, units='ft', desc='Altitude schedule. If not input, climb profile will be optimized') vctab = Array(zeros(shape=(0,0)), dtype=numpy_float64, units='nmi', desc='Climb speed schedule. If not input, climb profile will be optimized') keasvc = Enum(0, (1,0), desc='Type of velocity input in VCTAB', aliases=('Knots equivalent airspeed (keas)', 'True airspeed or Mach no.')) ifaacl = Enum(1, (0,1,2), desc='Climb speed limit option', aliases=('Optimum speed', 'Max. 250 knots CAS below 10,000 ft', 'Climb to 250 kcas at 1500 ft then SPDLIM at 10,000 ft')) ifaade = Enum(-1, (-1,0,1), desc='Descent speed limit option', aliases=('Use default', 'Optimum speed', 'Max. 250 knots CAS below 10,000 ft')) nodive = Enum(0, (0,1), desc='Rate of climb limit option', aliases=('Optimum altitude at each energy level', 'Min. rate of climb limit enfored')) divlim = Float(0.0, units='ft/min', desc='Minimum allowable rate of climb or descent.\nEnforced only if NODIVE = 1, may be negative to allow a shallow dive during climb.') qlim = Float(0.0, units='psf', desc='Constant dynamic pressure limit. Applied at all climb and descent points not covered by the variable dynamic pressure limit below.') spdlim = Float(0.0, desc='Maximum speed at 10,000 ft, used only for IFAACL = 2, kts or Mach number (Default is computed from\n a) the variable dynamic pressure limit below, if applicable,\n b) QLIM above, if QLIM > 0., or\n c) a dynamic pressure of 450 psf, in that order)') nql = Int(0, desc='Number of altitudes for q limit schedule (Default = 0 - QLIM is used, Maximum = 20 )') qlalt = Array(dtype=numpy_float64, units='ft', desc='Altitudes, in increasing order, for variable dynamic pressure limit schedule') vqlm = Array(dtype=numpy_float64, units='psf', desc='Corresponding dynamic pressure limits') class FlopsWrapper_input_missin_Basic(VariableTree): """Container for input.missin.Basic""" # OpenMDAO Public Variables indr = Enum(0, (0,1), desc='= 0, DESRNG is design range in n.mi.\n= 1, DESRNG is endurance in minutes', aliases=('Range', 'Endurance')) fact = Float(1.0, desc='Factor to increase or decrease fuel flows. Cumulative with FFFSUB and FFFSUP in Namelist &ENGDIN.') fleak = Float(0.0, units='lb/h', desc='Constant delta fuel flow') fcdo = Float(1.0, desc='Factor to increase or decrease lift-independent drag coefficients') fcdi = Float(1.0, desc='Factor to increase or decrease lift-dependent drag coefficients') fcdsub = Float(1.0, desc='Factor to increase or decrease all subsonic drag coefficients. Cumulative with FCDO and FCDI.') fcdsup = Float(1.0, desc='Factor to increase or decrease all supersonic drag coefficients. Cumulative with FCDO and FCDI.') iskal = Enum(1, (1,0), desc='Special option used to turn off engine scaling using THRUST/THRSO', aliases=('Scale engine', 'No scaling')) owfact = Float(1.0, desc='Factor for increasing or decreasing OWE') iflag = Enum(0, (0,1,2,3), desc='Mission print option', aliases=('Mission summary only', 'Plus cruise', 'Plus climb & descent', 'Plus scaled engine')) msumpt = Enum(0, (1,0), desc='Option to calculate and print detailed mission summary', aliases=('Yes', 'No')) dtc = Float(0.0, units='degC', desc='Deviation from standard day temperature (See also DTCT in Namelist &TOLIN and DTCE in Namelist &ENGINE. These temperature deviations are independent.)') irw = Enum(2, (1,2), desc='Range/weight calculation option', aliases=('Range fixed-calculate ramp weight', 'Ramp weight fixed-calculate range')) rtol = Float(0.001, units='nmi', desc='Tolerance in range calculation for IRW = 1') nhold = Int(0, desc='Special option - Time for segment NHOLD (which must be a Hold Segment) is adjusted until the specified range is met for the input ramp weight. Note - IRW must be 1') iata = Enum(1, (1,0), desc='Option to adjust range for ATA Traffic Allowance', aliases=('Yes', 'No')) tlwind = Float(0.0, units='nmi', desc='Velocity of tail wind (Input negative value for head wind)') dwt = Float(1.0, units='lb', desc='Gross weight increment for performance tables (Default is internally computed)') offdr = Array(dtype=numpy_float64, units='nmi', desc='Off design range. Note: This simply performs the defined mission with the sized airplane with a different design range. If more changes are desired or if additional analyses are required (e.g., cost analysis), use Namelist &RERUN. If OFFDR is used with a cost analysis, costs will be computed for the last design range.') idoq = Enum(0, (1,0), desc='Form for drag increments', aliases=('D/q', 'Drag coefficients')) nsout = Int(0, desc='Last segment number in outbound leg (Combat Radius Mission - Iterates until outbound leg and inbound leg are equal. IRW must be equal to 2, and there must be at least two cruise segments). If NSOUT = 0, radius is not calculated') nsadj = Int(0, desc='Cruise segment in outbound leg to be adjusted for radius calculation (Default = NSOUT). Note: Make sure that the NSADJ Cruise segment is terminated on total rather than segment distance in the Mission Definition Data.') mirror = Int(0, desc='Cruise segment in inbound leg to be set equal to segment NSADJ (if MIRROR = 0, only total leg lengths are forced to be equal). This option would be used for a high-low-low-high mission where the dash in and dash out are unknown but must be equal to each other. NSADJ would be the dash in segment number, and MIRROR would be the dash out segment number.') class FlopsWrapper_input_missin(VariableTree): """Container for input.missin""" # VariableTrees Basic = VarTree(FlopsWrapper_input_missin_Basic()) Climb = VarTree(FlopsWrapper_input_missin_Climb()) Cruise = VarTree(FlopsWrapper_input_missin_Cruise()) Descent = VarTree(FlopsWrapper_input_missin_Descent()) Ground_Operations = VarTree(FlopsWrapper_input_missin_Ground_Operations()) Reserve = VarTree(FlopsWrapper_input_missin_Reserve()) Store_Drag = VarTree(FlopsWrapper_input_missin_Store_Drag()) Turn_Segments = VarTree(FlopsWrapper_input_missin_Turn_Segments()) User_Weights = VarTree(FlopsWrapper_input_missin_User_Weights()) class FlopsWrapper_input_fusein_Basic(VariableTree): """Container for input.fusein.Basic""" # OpenMDAO Public Variables fpitch = Float(0.0, units='inch', desc='Seat pitch for the first class passengers') nfabr = Int(0, desc='Number of first class passengers abreast') bpitch = Float(0.0, units='inch', desc='Seat pitch for business class passengers') nbabr = Int(0, desc='Number of business class passengers abreast') tpitch = Float(0.0, units='inch', desc='Seat pitch for tourist class passengers') ntabr = Int(0, desc='Number of tourist class passengers abreast') class FlopsWrapper_input_fusein_BWB(VariableTree): """Container for input.fusein.BWB""" # OpenMDAO Public Variables osspan = Float(0.0, units='ft', desc='Outboard semispan (Default = ETAW(NETAW), required if ETAW(NETAW) is less than or equal to 1.0 and IFITE = 3 and NETAW > 1)\nThis variable is used if a detailed wing outboard panel (See Detailed Wing Data in Namelist $WTIN) is being added to a BWB fuselage.') tipchd = Float(0.0, units='ft', desc='Wing tip chord (Default = 0.06*Wing span)\nThis variable is used if the wing outer panel is defined as a trapezoid attached to the BWB cabin.') nesob = Int(0, desc='Wing eta station number for outboard side of body. If this variable is greater than 1, the detailed wing definition is assumed to include the cabin. Weight calculations for the outboard wing start at this eta station. (If = 0, the detailed outboard wing is added to the cabin as indicated above.)') acabin = Float(0.0, units='ft*ft', desc='Fixed area of passenger cabin for blended wing body transports (Default is internally computed based on passenger data)') xlw = Float(0.0, units='ft', desc='Fixed length of side wall.\nThis is the outboard wall of the passenger cabin and is used to define the outboard wing root chord.') xlwmin = Float(0.0, units='ft', desc='Minimum side wall length. The typical value of 38.5 ft is based on a required maximum depth at the side wall of 8.25 ft divided by a fuselage thickness/chord ratio of 0.15 and 70 percent of the resulting wing root chord of 55 ft.') nbay = Int(0, desc='Fixed number of bays') nbaymx = Int(0, desc='Maximum number of bays') bayw = Float(0.0, units='ft', desc='Fixed bay width') baywmx = Float(0.0, units='ft', desc='Maximum bay width') swple = Float(45.0, units='deg', desc='Sweep angle of the leading edge of the passenger cabin') cratio = Float(0.0, desc='Fixed ratio of the centerline length to the cabin width (XLP/WF)') tcf = Float(0.0, desc='Fuselage thickness/chord ratio (Default = TCA, Namelist &CONFIN)') tcsob = Float(0.0, desc='Fuselage thickness/chord ratio at side of body (Default = TCF)') rspchd = Float(0.0, desc='Rear spar percent chord for BWB fuselage and wing (Default = 70 percent)') rspsob = Float(0.0, desc='Rear spar percent chord for BWB fuselage at side of body (Default = 70 percent)') class FlopsWrapper_input_fusein(VariableTree): """Container for input.fusein""" # VariableTrees BWB = VarTree(FlopsWrapper_input_fusein_BWB()) Basic = VarTree(FlopsWrapper_input_fusein_Basic()) class FlopsWrapper_input_engine_deck(VariableTree): """Container for input.engine_deck""" # OpenMDAO Public Variables engdek = Str('') class FlopsWrapper_input_engine_Other(VariableTree): """Container for input.engine.Other""" # OpenMDAO Public Variables hpcpr = Float(5.0, desc='Pressure ratio of the high pressure (third) compressor (Only used if there are three compressor components)') aburn = Bool(False, desc='True if there is an afterburner') dburn = Bool(False, desc='True if there is a duct burner (Separate flow turbofans only). ABURN and DBURN cannot both be true.') effab = Float(0.85, desc='Afterburner/duct burner efficiency') tabmax = Float(3500.0, units='degR', desc='Maximum afterburner/duct burner temperature') ven = Bool(False, desc='True if the exhaust nozzle has a variable flow area. The nozzle flow area is automatically allowed to vary for cases when the afterburner or duct burner is on.') costbl = Float(1.0, units='lb/s', desc='Customer high pressure compressor bleed') fanbl = Float(0.0, desc='Fan bleed fraction, only used for bypass engines') hpext = Float(200.0, units='hp', desc='Customer power extraction') wcool = Float(-1.0e-4, desc='Turbine cooling flow as a fraction of high pressure compressor mass flow. The cooling flow defaults to the value in the engine cycle definition file. If WCOOL is input greater than or equal to zero the default will be overridden.\nIf WCOOL > 1., the turbine cooling flow fraction required to bring the turbine inlet temperature down to WCOOL will be computed.') fhv = Float(18500.0, units='Btu/lb', desc='Fuel heating value') dtce = Float(0.0, units='degC', desc='Deviation from standard day temperature. The deviation, as used in the cycle analysis module, is DTCE at sea level and varies to zero at ALC (see below). The design point is at standard temperature.') alc = Float(10000.0, units='ft', desc='The altitude at which DTCE (see above) becomes zero.') year = Float(1985.0, desc='Technology availability date used to estimate compressor polytropic efficiency') boat = Bool(False, desc='True to include boattail drag') ajmax = Float(0.0, units='ft*ft', desc='Nozzle reference area for boattail drag. Used only if BOAT = true. Default is the largest of\n1) 1.1 times the inlet capture area\n2) Nozzle exit area at the inlet design point\n3) Estimated engine frontal area\n4) Estimated nozzle entrance area\nor\nIf nacelle weight and geometry calculations are\nperformed (see NGINWT below) AJMAX is set to the\nnacelle cross-sectional area at the customer connect. \nor\nIf AJMAX is less than zero, the cruise design point\nnozzle exit area multiplied by the absolute value\nof AJMAX is used as the reference.') spill = Bool(False, desc='True to include spillage and lip drag in engine performance data') lip = Bool(False, desc='Compute inlet cowl lip drag. Used only if SPILL = true') blmax = Float(-1.0, desc='Inlet bleed flow fraction of total flow at the inlet design point (Default = .016 * AMINDS**1.5). Used only if SPILL = true') spldes = Float(0.01, desc='Inlet design spillage fraction. Used only if SPILL = true') aminds = Float(0.0, desc='Inlet design Mach number (Default = XMMAX). Used only if SPILL = true') alinds = Float(0.0, units='ft', desc='Inlet design altitude (Default = AMAX). Used only if SPILL = true') etaprp = Float(0.84, desc='Maximum propeller efficiency (Turboprops only). The actual propeller efficiency is based on an internal schedule of efficiency versus Mach number with the maximum efficiency (ETAPRP) occurring at a Mach number of 0.80. To use the Hamilton Standard Method set ETAPRP=1 and input the propeller characteristics as defined under ') shpowa = Float(60.0, units='hp/(lb/s)', desc='Design point shaft horsepower divided by the design point core airflow') cdtmax = Float(99999.0, units='degR', desc='Maximum allowable compressor discharge temperature') cdpmax = Float(99999.0, units='psi', desc='Maximum allowable compressor discharge pressure') vjmax = Float(99999.0, units='ft/s', desc='(IENG < 100) Maximum allowable jet velocity\n(IENG > 100) Propeller tip speed') stmin = Float(1.0, units='lb/lb/s', desc='Minimum allowable specific thrust') armax = Float(99999.0, desc='Maximum allowable ratio of the bypass area to the core area of a mixed flow turbofan') limcd = Enum(1, (0,1,2), desc='Switch to use the compressor discharge temperature and pressure limits only for optimization.', aliases=('Limit at cruise design Mach and altitude only for optimization', 'Limit at all points in envelope', 'Limit max. compressor discharge temp. everywhere')) class FlopsWrapper_input_engine_Noise_Data(VariableTree): """Container for input.engine.Noise_Data""" # OpenMDAO Public Variables nprint = Enum(0, (-1,0,1,2), desc='Noise data print control', aliases=('Print compressor operating line', 'No print', 'Print to ANOPP', 'Print to FOOTPR')) #ivat = Enum(0, (0,1), desc='Flag for variable exit area low pressure turbine. Used only for estimating LPT exit area when NPRINT=1', aliases=('Fixed', 'Variable')) jet = Enum(-1, (-1,0,1,2,3,4,5,6), desc='FOOTPR input data generation control', aliases=('No noise data', 'No jet noise', 'Stone/Clark', 'Kresja', 'Stone ALLJET', 'Stone JET181', 'GE M*S', 'SAE A-21')) ftmach = Float(0.0, desc='Mach number to calculate FOOTPR input data') ftalt = Float(0.0, desc='Altitude to calculate FOOTPR input data') class FlopsWrapper_input_engine_IC_Engine(VariableTree): """Container for input.engine.IC_Engine""" # OpenMDAO Public Variables ncyl = Int(4, desc='Number of cylinders') deshp = Float(180.0, units='hp', desc='Baseline engine power') alcrit = Float(0.0, units='ft', desc='Critical turbocharger altitude. The altitude to which turbocharged IC engines are able to maintain DESHP') sfcmax = Float(0.52, units='lb/h/hp', desc='Brake specific fuel consumption at maximum power') sfcmin = Float(0.4164, units='lb/h/hp', desc='Minimum brake specific fuel consumption or SFC') pwrmin = Float(0.65, desc='Fraction of maximum power where SFCMIN occurs. If NRPM > 0 and PWRMIN > 1 then PWRMIN is the rotational speed where SFCMIN occurs (recommend PWRMIN > 1 if SFCMIN is less than about 0.4') engspd = Float(2700.0, units='1/min', desc='Maximum engine crankshaft speed') prpspd = Float(2700.0, units='1/min', desc='Maximum propeller shaft speed') iwc = Enum(0, (0,1), desc='Cooling system', aliases=('Air cooled', 'Water cooled')) ecid = Float(361.0, units='inch*inch*inch', desc='Engine displacement') ecr = Float(8.5, desc='Engine compression ratio') eht = Float(19.96, units='inch', desc='Engine envelope height') ewid = Float(33.37, units='inch', desc='Engine envelope width') elen = Float(31.83, units='inch', desc='Engine envelope length') ntyp = Enum(2, (1,2,3,4,5,6), desc='Propeller type indicator', aliases=('Fixed pitch', 'Variable pitch', 'Variable pitch + full feathering', 'Variable pitch + full feathering + deicing', 'Variable pitch + full feathering + deicing w/reverse', 'Ducted fan')) af = Float(87.6, desc='Activity factor') cli = Float(0.569, desc='Integrated design lift coefficient') blang = Float(20.0, units='deg', desc='Blade angle for fixed pitch propeller') dprop = Float(6.375, units='ft', desc='Propeller diameter') nblade = Int(0, desc='Number of blades') gbloss = Float(0.02, desc='Gearbox losses, fraction. If PRPSPD = ENGSPD, there are no losses.') arrpm = Array(dtype=numpy_float64, units='rpm', desc='Rotational speed (descending order)') arpwr = Array(dtype=numpy_float64, units='hp', desc='Engine shaft power at ARRPM(I)') arful = Array(dtype=numpy_float64, desc='Engine fuel requirements at ARRPM(I) (Required only if LFUUN is not equal to zero)') lfuun = Enum(0, (0,1,2,3), desc='Fuel input type indicator', aliases=('Fuel flows are computed from SFCMAX SFCMIN and PWRMIN', 'Brake specific fuel consumption values are input in ARFUL', 'Actual fuel flows are input in ARFUL (lb/hr)', 'Actual fuel flows are input in ARFUL (gal/hr)')) feng = Float(1.0, desc='Scale factor on engine weight') fprop = Float(1.0, desc='Scale factor on propeller weight') fgbox = Float(1.0, desc='Scale factor on gear box weight') class FlopsWrapper_input_engine_Engine_Weight(VariableTree): """Container for input.engine.Engine_Weight""" # OpenMDAO Public Variables nginwt = Enum(0, (-4,-3,-2,-1,0,1,2,3,4,5), desc='Switch for engine weight calculations. Use the negative value to calculate the weight for the initial design and then scale engine weights and dimensions with airflow. Zero or a negative value should always be used during optimization with engine cycle design variables. (IENG > 100 options in parentheses)', aliases=('-Engine + inlet + nacelle + nozzle', '-Engine + inlet + nacelle', '-Engine and inlet', '-Engine only', 'None', 'Engine only (Total prop. system)', 'Engine and inlet (Propeller)', 'Engine + inlet + nacelle (Propeller + cowl + mounts)', 'Engine + inlet + nacelle + nozzle ( Propeller + cowl + mounts + exhaust)', '(Propeller + cowl + mounts + exhaust + alternator)')) iwtprt = Enum(1, (0,1,2,3,4), desc='Printout control for engine weight calculations. Printout is on file OFILE.', aliases=('No output', 'Print component weights and dimensions', 'Print component design details', 'Plus initial and final optimization data', 'Print component details at each iteration')) iwtplt = Enum(0, (-4,-3,-2,-1,0,1,2,3,4), desc='PostScript plot control for engine (and nacelle) schematics on file PLTFIL. If the negative value is input, only the final design will be plotted.') gratio = Float(1.0, desc='Ratio of the RPM of the low pressure compressor to the RPM of the connected fan') utip1 = Float(0.0, units='ft/s', desc='Tip speed of the first compressor (or fan) in the flow. Default is based on YEAR, engine type, and other design considerations.') rh2t1 = Float(0.0, desc='Hub to tip radius ratio of the first compressor (or fan) in the flow. Default is based on YEAR, engine type, and other design considerations.') igvw = Enum(0, (-2,-1,0,1,2), desc='Flag for compressor inlet guide vanes', aliases=('Variable-no fan IGV', 'Fixed-no fan IGV', 'None', 'Fixed', 'Variable')) trbrpm = Float(0.0, units='rpm', desc='The rotational speed of any free turbine. TRBAN2 is used to set the free turbine rotational speed if TRBRPM is not input. TRBRPM overrides TRBAN2.') trban2 = Float(0.0, units='(inch*inch)/(min*min)', desc='Maximum allowable AN**2 for turbine components. The input value is the actual maximum divided by 10**10. AN**2 is the flow area multiplied by the rotational speed squared. The default is based on year.') trbstr = Float(15000.0, units='psi', desc='Turbine usable stress lower limit. Normally when component weights are predicted, the usable stress is a function of operating conditions. For turbine components, this can be unusually low because cooling effects are not accounted for.') cmpan2 = Float(0.0, units='(inch*inch)/(min*min)', desc='Maximum allowable AN**2 for compressor components. The input value is the actual maximum divided by 10**10. AN**2 is the flow area multiplied by the rotational speed squared. The default is based on year.') cmpstr = Float(25000.0, units='psi', desc='Requested compressor usable stress. This forces a change in compressor material when the current (lower temperature) material starts to run out of strength as temperature increases.') vjpnlt = Float(0.0, units='lb', desc='Weight penalty factor for a suppressor to reduce the core jet velocity to 1500 ft/sec') wtebu = Float(0.2, desc='Fraction for weight of engine build up unit (pylon, mounting hardware, etc)') wtcon = Float(0.05, desc='Fraction for weight of engine controls') class FlopsWrapper_input_engine_Design_Point(VariableTree): """Container for input.engine.Design_Point""" # OpenMDAO Public Variables desfn = Float(0.0, units='lb', desc='Engine design point net dry thrust (Default = THRUST, Namelist &CONFIN). Do not use the default for afterburning engines since THRUST is the maximum wet thrust rating. The maximum wet (afterburning) thrust for the generated engine is transferred back to THRSO for scaling with THRUST.') xmdes = Float(-9999.0, desc='Engine optimization point Mach number (Default = VCMN, Namelist &CONFIN). XMDES and XADES are used for propulsion only analyses (IANAL = 4).') xades = Float(-9999.0, units='ft', desc='Engine optimization point altitude (Default = CH, Namelist &CONFIN). If XADES < 0., it is interpreted as the negative of the design point dynamic pressure (psf), and the altitude is back-calculated with a minimum of 0.') oprdes = Float(25.0, desc='Overall pressure ratio') fprdes = Float(1.5, desc='Fan pressure ratio (turbofans only)') bprdes = Float(0.0, desc='Bypass ratio (Turbofans only, Default is computed based on OPRDES, FPRDES, TTRDES, XMDES and ALDES). If BPRDES < -1, then the bypass ratio is computed such that the ratio of the fan to core jet velocities equals the absolute value of BPRDES. For turbine bypass engines, BPRDES must be input and is defined as the fraction of compressor exit airflow that is bypassed around the main burner and the turbine. If both EBPR and BPRDES are zero, the optimum bypass ratio is computed at the design Mach number and altitude (XMDES, XADES).') tetdes = Float(2500.0, units='degR', desc='Engine design point turbine entry temperature') ttrdes = Float(1.0, desc='Engine throttle ratio defined as the ratio of the maximum allowable turbine inlet temperature divided by the design point turbine inlet temperature. If TTRDES is greater than TETDES, it is assumed to be the maximum allowable turbine inlet temperature.') class FlopsWrapper_input_engine_Basic(VariableTree): """Container for input.engine.Basic""" # OpenMDAO Public Variables ieng = Enum(1, (0,1,2,3,4,5,6,7,8,9,101), desc='Engine cycle definition input file indicator', aliases=('User-defined', 'Turbojet', 'Separate flow turbofan w/ 2 compressors', 'Mixed flow turbofan w/ 2 compressors', 'Turboprop', 'Turbine bypass', 'Separate flow turofan w/ 3 compressors', 'Mixed flow turbofan w/ 3 compressors', '3-spool separate flow turbofan w/ 3 compressors', '2-spool turbojet', 'IC engine')) iprint = Int(1, desc='Engine cycle analysis printout control. Printout is on file OFILE') gendek = Bool(False, desc='Engine data will be saved on the file designated by EOFILE as an Engine Deck for future use') ithrot = Enum(1, (0,1,2), desc='Controls frequency of part power data generation', aliases=('All Mach-altitude combos', 'Max. altitude for each Mach', 'Max. altitude for max. Mach')) npab = Int(0, desc='Maximum number of afterburning throttle settings for each Mach-altitude combination') npdry = Int(15, desc='Maximum number of dry (non-afterburning) throttle settings') xidle = Float(0.05, desc='Fraction of maximum dry thrust used as a cutoff for part power throttle settings') nitmax = Int(50, desc='Maximum iterations per point') xmmax = Float(-1.0, desc='Max Mach number (Default = VCMN, Namelist &CONFIN)') amax = Float(-1.0, units='ft', desc='Max altitude (Default = CH, Namelist &CONFIN)') xminc = Float(0.2, desc='Mach number increment (Default = .2)') ainc = Float(5000.0, units='ft', desc='Altitude increment (Default = 5000.)') qmin = Float(150.0, units='psf', desc='Minimum dynamic pressure') qmax = Float(1200.0, units='psf', desc='Maximum dynamic pressure') class FlopsWrapper_input_engine(VariableTree): """Container for input.engine""" # OpenMDAO Public Variables ifile = Str(desc='Name of cycle definition input file. Used only if IENG = 0.') tfile = Str('ENGTAB', desc='Name of the file containing component map tables.') # VariableTrees Basic = VarTree(FlopsWrapper_input_engine_Basic()) Design_Point = VarTree(FlopsWrapper_input_engine_Design_Point()) Engine_Weight = VarTree(FlopsWrapper_input_engine_Engine_Weight()) IC_Engine = VarTree(FlopsWrapper_input_engine_IC_Engine()) Noise_Data = VarTree(FlopsWrapper_input_engine_Noise_Data()) Other = VarTree(FlopsWrapper_input_engine_Other()) class FlopsWrapper_input_engdin_Special_Options(VariableTree): """Container for input.engdin.Special_Options""" # OpenMDAO Public Variables dffac = Float(0.0, desc='Fuel flow scaling constant term.\nThe engine fuel flow scale factor for ENGSKAL = THRUST/THRSO is\nENGSKAL*[1. + DFFAC + FFFAC*(1. - ENGSKAL)]') fffac = Float(0.0, desc='Fuel flow scaling linear term.\nThe engine fuel flow scale factor for ENGSKAL = THRUST/THRSO is\nENGSKAL*[1. + DFFAC + FFFAC*(1. - ENGSKAL)]') emach = Array(dtype=numpy_float64, desc='Array of Mach numbers in descending order at which engine data are to be generated (Default computed internally, Do not zero fill)') alt = Array(zeros(shape=(0,0)), dtype=numpy_float64, units='ft', desc='Arrays of altitudes in descending order, one set for each Mach number, at which engine data are to be generated (Default computed internally, do not zero fill). Altitudes and numbers of altitudes do not have to be consistent between Mach numbers.') insdrg = Enum(0, (0,1,2,3), desc='Nozzle installation drag scaling switch', aliases=('No drag scaling', 'Scale with A10', 'Calculate using A10', 'Calculate for Cd=0 at A9=A9ref')) nab = Int(6969, desc='Table number in CDFILE to be used for afterbody drag') nabref = Int(6969, desc='Table number in CDFILE to be used for reference afterbody drag') a10 = Float(0.0, units='inch*inch', desc='Maximum nozzle area (Required if INSDRG > 0)') a10ref = Float(0.0, units='inch*inch', desc='Reference maximum nozzle area (Required if INSDRG > 0)') a9ref = Float(0.0, units='inch*inch', desc='Reference nozzle exit area (Required if INSDRG = 3)') xnoz = Float(0.0, units='inch', desc='Nozzle length (Required if INSDRG > 0)') xnref = Float(0.0, units='inch', desc='Reference nozzle length (Required if INSDRG > 0)') rcrv = Float(-1.0, desc='Nozzle radius of curvature parameter (Triggers special nozzle drag option)') class FlopsWrapper_input_engdin_Basic(VariableTree): """Container for input.engdin.Basic""" # OpenMDAO Public Variables ngprt = Enum(1, (0,1,2), desc='Print engine data tables', aliases=('No printout', 'Print tables', 'Print sorted tables')) igenen = Enum(0, (-3,-2,-1,0,1), desc='Switch indicating source of Engine Deck', aliases=('Response surfaces', 'External file (horsepower/rpm/fuel flow', 'External file (thrust/fuel flow)', 'Follows namelist &ENGDIN', 'Engine deck to be generated')) extfac = Float(1.0, desc='Slope factor for extrapolating engine fuel flows for thrust levels above the maximum for that Mach number and altitude') fffsub = Float(1.0, desc='Fuel flow factor for all subsonic engine points') fffsup = Float(1.0, desc='Fuel flow factor for all supersonic engine points') idle = Int(0, desc='> 0, Flight idle data will be internally generated with zero thrust and an extrapolated fuel flow. The fuel flow must be at least FIDMIN times the fuel flow at power setting number IDLE and no more than FIDMAX times the fuel flow at power setting number IDLE. If NONEG (below) = 0 and negative thrusts exist, an idle power setting is not generated.\n= 0, The lowest input power setting is assumed to be flight idle (Not recommended. Results will be more consistent with IDLE > 0)') noneg = Enum(0, (1,0), desc='Option for using points in the Engine Deck with negative thrust', aliases=('Ignore', 'Use all points')) fidmin = Float(0.08, desc='Minimum fraction of the fuel flow at power setting number IDLE for generated flight idle fuel flows') fidmax = Float(1.0, desc='Maximum fraction of the fuel flow at power setting number IDLE for generated flight idle fuel flows') ixtrap = Int(1, desc='Option for extrapolation of engine data beyond altitudes provided in input data, which may result in radically improved SFC') ifill = Int(2, desc='Option for filling in part power data\n=0, No part power data will be generated\n> 0, Part power cruise data will be filled in for Mach-altitude points for which IFILL (or fewer) thrust levels have been input\nFor NPCODE > 1, data will be filled in for each specified power code that is not input for each Mach-altitude point.') maxcr = Int(2, desc='Maximum power setting used for cruise') nox = Enum(0, (0,1,2,3), desc='Option for NOx emissions data. If IGENEN=-2, NOx emissions data are replaced with engine shaft speed, rpm', aliases=('Do not use', 'Indices in engine deck or generated', 'Emissions lb/hr in engine deck', 'Another parameter in engine deck')) pcode = Array(dtype=numpy_float64, desc='Power codes to be used in sorting the Engine Deck. Values correspond to thrust levels in descending order, i.e., climb, maximum continuous, part power cruise settings, and flight idle. Actual values are arbitrary (they are just used as labels), but only points in the Engine Deck with corresponding values for PC will be used.') boost = Float(0.0, desc='> 0., Scale factor for boost engine to be added to baseline engine for takeoff and climb. Climb thrust of the boost engine in the Engine Deck must be artificially increased by 100,000.\n= 0., No boost engine') igeo = Enum(0, (0,1), desc='Engine deck altitude type', aliases=('Geometric', 'Geopotential-will be converted')) class FlopsWrapper_input_engdin(VariableTree): """Container for input.engdin""" # OpenMDAO Public Variables cdfile = Str('') # Special addition for analysis runs where we aren't connected to NPSS. eifile = Str('', desc="Engine deck filename") # VariableTrees Basic = VarTree(FlopsWrapper_input_engdin_Basic()) Special_Options = VarTree(FlopsWrapper_input_engdin_Special_Options()) class FlopsWrapper_input_costin_Mission_Performance(VariableTree): """Container for input.costin.Mission_Performance""" # OpenMDAO Public Variables desmch = Float(0.0, desc='Design Mach number (Default = VCMN, Namelist &CONFIN)') dprsmx = Float(0.0, units='psf', desc='Maximum dynamic pressure (Default = 460. * DESMCH)') veloc = Float(0.0, units='mi/h', desc='Cruise velocity (Default = 660. * DESMCH)') blockf = Float(0.9, units='lb', desc='Block fuel, or fraction of aircraft fuel capacity (Default = 0.90 * (FULWMX+FULFMX), Namelist &WTIN)') blockt = Float(0.0, units='h', desc='Block time (Default = DESRNG/VELOC + 0.65)') class FlopsWrapper_input_costin_Cost_Technology(VariableTree): """Container for input.costin.Cost_Technology""" # OpenMDAO Public Variables fafrd = Float(1.0, desc='Technology factor on Airframe R&D') fenrd = Float(1.0, desc='Technology factor on Engine R&D') fmac = Float(1.0, desc='Technology factor on Air conditioning') fmai = Float(1.0, desc='Technology factor on Anti-icing') fmapu = Float(1.0, desc='Technology factor on Auxiliary power unit') fmav = Float(1.0, desc='Technology factor on Avionics') fmbody = Float(1.0, desc='Technology factor on Fuselage') fmcomp = Float(1.0, desc='Technology factor on Composite materials (applied to the wing, tails, fuselage, and nacelles)') fmel = Float(1.0, desc='Technology factor on Electrical systems') fmeng = Float(1.0, desc='Technology factor on Engine') fmensy = Float(1.0, desc='Technology factor on Engine systems') fmfcs = Float(1.0, desc='Technology factor on Surface controls') fmfeq = Float(1.0, desc='Technology factor on Furnishings and equipment') fmfusy = Float(1.0, desc='Technology factor on Fuel systems') fmgear = Float(1.0, desc='Technology factor on Landing gear') fmhyd = Float(1.0, desc='Technology factor on Hydraulic systems') fmins = Float(1.0, desc='Technology factor on Instruments') fmnac = Float(1.0, desc='Technology factor on Nacelles') fmpnm = Float(1.0, desc='Technology factor on Pneumatics') fmtail = Float(1.0, desc='Technology factor on Tail') fmtrv = Float(1.0, desc='Technology factor on Thrust reversers') fmwing = Float(1.0, desc='Technology factor on Wing') foac = Float(1.0, desc='Technology factor on Air conditioning') foai = Float(1.0, desc='Technology factor on Anti-icing') foapu = Float(1.0, desc='Technology factor on Auxiliary power unit') foav = Float(1.0, desc='Technology factor on Avionics') fobody = Float(1.0, desc='Technology factor on Fuselage') focomp = Float(1.0, desc='Technology factor on Composite materials') foel = Float(1.0, desc='Technology factor on Electrical systems') fofcs = Float(1.0, desc='Technology factor on Flight control system') fofeq = Float(1.0, desc='Technology factor on Furnishings and equipment') fofusy = Float(1.0, desc='Technology factor on Fuel systems') fogear = Float(1.0, desc='Technology factor on Landing gear') fohyd = Float(1.0, desc='Technology factor on Hydraulic systems') foins = Float(1.0, desc='Technology factor on Instruments') fonac = Float(1.0, desc='Technology factor on Nacelles') fopnm = Float(1.0, desc='Technology factor on Pneumatics') foprop = Float(1.0, desc='Technology factor on Propulsion system') fowing = Float(1.0, desc='Technology factor on Wing') feacsr = Float(1.0, desc='Technology factor on Aircraft servicing') fecfee = Float(1.0, desc='Technology factor on Aircraft control fee') fecrw = Float(1.0, desc='Technology factor on Flight crew') fedep = Float(1.0, desc='Technology factor on Depreciation') feflta = Float(1.0, desc='Technology factor on Flight attendants') feins = Float(1.0, desc='Technology factor on Insurance') felabr = Float(1.0, desc='Technology factor on R&D labor rate') feldfe = Float(1.0, desc='Technology factor on Landing fee') femain = Float(1.0, desc='Technology factor on Maintenance hours') class FlopsWrapper_input_costin_Basic(VariableTree): """Container for input.costin.Basic""" # OpenMDAO Public Variables ac = Float(350.0, units='lb/min', desc='Airconditioning total pack air flow') apuflw = Float(400.0, units='lb/min', desc='Auxiliary power unit flow rate') apushp = Float(170.0, units='hp', desc='Auxiliary power unit shaft horsepower') depper = Float(14.0, units='year', desc='Depreciation period') devst = Float(1980.0, units='year', desc='Development start time') dlbur = Float(2.0, desc='Direct labor burden factor') dyear = Int(1986, desc='Desired year for dollar calculations') epr = Float(20.0, desc='Engine pressure ratio at sea level static') fafmsp = Float(0.1, desc='Spares factor for production airframes') fare = Float(0.0, units='USD/pax/mi', desc='Fare (Triggers calculation of return on investment)') fengsp = Float(0.3, desc='Spares factor for production engines') fppft = Float(0.5, desc='Spares factor for prototype and flight test engines') fuelpr = Float(0.5, units='USD/galUS', desc='Fuel price') hydgpm = Float(150.0, desc='Gallon per minute flow of hydraulic pumps') iacous = Enum(0, (0,1), desc='Acoustic treatment in nacelle', aliases=('No', 'Yes')) ibody = Enum(0, (0,1), desc='Body type indicator', aliases=('Narrow', 'Wide')) icirc = Enum(1, (1,2), desc='Circuit indicator - fire detection', aliases=('Single', 'Dual')) icorev = Enum(1, (0,1), desc='Thrust reverser', aliases=('No core reverser', 'Core reverser')) icostp = Enum(1, (1,2,3,4,5), desc='Type of cost calculation desired', aliases=('Life cycle cost (LCC)', 'Acquisition cost', 'Direct operating cost (DOC)', 'Indirect operating cost (IOC)', 'Operating cost only (DOC + IOC - Depreciation)')) idom = Enum(1, (1,2), desc='Operation type indicator', aliases=('Domestic', 'International')) imux = Enum(0, (0,1), desc='Multiplex indicator', aliases=('No multiplex', 'Multiplex')) inozz = Enum(1, (1,2,3,4,5), desc='Nozzle type indicator', aliases=('Translating sleeve', 'Simple target w/ separate flow nozzle', 'Simple target w/ mixed flow nozzle', 'Separate flow exhaust w/o thrust reverser', 'Short duct w/o thrust reverser')) ipflag = Enum(1, (0,1), desc='Print controller for Cost Module', aliases=('Print major elements', 'Print details')) irad = Enum(1, (0,1), desc='Indicator to include research and development', aliases=('Ignore R&D costs', 'Include R&D costs')) irange = Enum(1, (0,1,2), desc='Range indicator', aliases=('Short', 'Medium', 'Long')) ispool = Enum(0, (0,1), desc='Auxiliary power unit complexity indicator', aliases=('Single spool fixed vane', 'Double spool variable vane APU')) itran = Enum(0, (0,1), desc='Cargo/baggage transfer operation indicator', aliases=('No transfer', 'Transfer')) iwind = Enum(0, (0,1), desc='Windshield type indicator', aliases=('Flat', 'Curved')) kva = Float(200.0, desc='KVA rating of full-time generators') lf = Float(55.0, desc='Passenger load factor') life = Float(14.0, desc='Number of years for Life Cycle Cost calculation') napu = Int(1, desc='Number of auxiliary power units') nchan = Enum(1, (1,2,3), desc='Number of autopilot channels') nfltst = Int(2, desc='Number of flight test aircraft') ngen = Enum(3, (3,4), desc='Number of inflight operated generators') nins = Int(0, desc='Number of inertial navigation systems') npod = Int(4, desc='Number of podded engines') nprotp = Int(2, desc='Number of prototype aircraft') pctfc = Float(10.0, desc='Percent of seats for first class') plmqt = Float(1984.0, units='year', desc='Planned MQT (150-hour Model Qualification Test or FAA certification)') prorat = Float(15.0, desc='Manufacturers') prproc = Float(0.0, desc='Prior number of engines procured') q = Float(100.0, desc='Airframe production quantities') resid = Float(2.0, desc='Residual value at end of lifetime') roi = Float(10.0, desc='Return on investment (Triggers calculation of required fare)') sfc = Float(0.6, units='lb/h/lb', desc='Engine specific fuel consumption') taxrat = Float(0.33, desc='Corporate tax rate for ROI calculations') temp = Float(1800.0, units='degF', desc='Maximum turbine inlet temperature') class FlopsWrapper_input_costin(VariableTree): """Container for input.costin""" # VariableTrees Basic = VarTree(FlopsWrapper_input_costin_Basic()) Cost_Technology = VarTree(FlopsWrapper_input_costin_Cost_Technology()) Mission_Performance = VarTree(FlopsWrapper_input_costin_Mission_Performance()) class FlopsWrapper_input_confin_Objective(VariableTree): """Container for input.confin.Objective""" # OpenMDAO Public Variables ofg = Float(0.0, desc='Objective function weighting factor for gross weight \nThe function that is minimized is\n \n OBJ = OFG*GW \n + OFF*Fuel \n + OFM*VCMN*(Lift/Drag) \n + OFR*Range + OFC*Cost \n + OSFC*SFC \n + OFNOX*NOx \n + OFNF*(Flyover Noise) \n + OFNS*(Sideline Noise) \n + OFNFOM*(Noise Figure of Merit) \n + OFH*(Hold Time for Segment NHOLD)') off = Float(1.0, desc='Objective function weighting factor for mission fuel \nThe function that is minimized is\n \n OBJ = OFG*GW \n + OFF*Fuel \n + OFM*VCMN*(Lift/Drag) \n + OFR*Range + OFC*Cost \n + OSFC*SFC \n + OFNOX*NOx \n + OFNF*(Flyover Noise) \n + OFNS*(Sideline Noise) \n + OFNFOM*(Noise Figure of Merit) \n + OFH*(Hold Time for Segment NHOLD)') ofm = Float(0.0, desc='Objective function weighting factor for Mach*(L/D), should be negative to maximize \nThe function that is minimized is\n \n OBJ = OFG*GW \n + OFF*Fuel \n + OFM*VCMN*(Lift/Drag) \n + OFR*Range + OFC*Cost \n + OSFC*SFC \n + OFNOX*NOx \n + OFNF*(Flyover Noise) \n + OFNS*(Sideline Noise) \n + OFNFOM*(Noise Figure of Merit) \n + OFH*(Hold Time for Segment NHOLD)') ofr = Float(0.0, desc='Objective function weighting factor for Range, should be negative to maximize. \nThe function that is minimized is\n \n OBJ = OFG*GW \n + OFF*Fuel \n + OFM*VCMN*(Lift/Drag) \n + OFR*Range + OFC*Cost \n + OSFC*SFC \n + OFNOX*NOx \n + OFNF*(Flyover Noise) \n + OFNS*(Sideline Noise) \n + OFNFOM*(Noise Figure of Merit) \n + OFH*(Hold Time for Segment NHOLD)') ofc = Float(0.0, desc='Objective function weighting factor for Cost \nThe function that is minimized is\n \n OBJ = OFG*GW \n + OFF*Fuel \n + OFM*VCMN*(Lift/Drag) \n + OFR*Range + OFC*Cost \n + OSFC*SFC \n + OFNOX*NOx \n + OFNF*(Flyover Noise) \n + OFNS*(Sideline Noise) \n + OFNFOM*(Noise Figure of Merit) \n + OFH*(Hold Time for Segment NHOLD)') osfc = Float(0.0, desc='Objective function weighting factor for Specific Fuel Consumption at the engine design point. Generally used only for engine design cases (IANAL = 4). \nThe function that is minimized is\n \n OBJ = OFG*GW \n + OFF*Fuel \n + OFM*VCMN*(Lift/Drag) \n + OFR*Range + OFC*Cost \n + OSFC*SFC \n + OFNOX*NOx \n + OFNF*(Flyover Noise) \n + OFNS*(Sideline Noise) \n + OFNFOM*(Noise Figure of Merit) \n + OFH*(Hold Time for Segment NHOLD)') ofnox = Float(0.0, desc='Objective function weighting factor for NOx emissions \nThe function that is minimized is\n \n OBJ = OFG*GW \n + OFF*Fuel \n + OFM*VCMN*(Lift/Drag) \n + OFR*Range + OFC*Cost \n + OSFC*SFC \n + OFNOX*NOx \n + OFNF*(Flyover Noise) \n + OFNS*(Sideline Noise) \n + OFNFOM*(Noise Figure of Merit) \n + OFH*(Hold Time for Segment NHOLD)') ofnf = Float(0.0, desc='Objective function weighting factor for flyover noise (used primarily for contour plots) \nThe function that is minimized is\n \n OBJ = OFG*GW \n + OFF*Fuel \n + OFM*VCMN*(Lift/Drag) \n + OFR*Range + OFC*Cost \n + OSFC*SFC \n + OFNOX*NOx \n + OFNF*(Flyover Noise) \n + OFNS*(Sideline Noise) \n + OFNFOM*(Noise Figure of Merit) \n + OFH*(Hold Time for Segment NHOLD)') ofns = Float(0.0, desc='Objective function weighting factor for sideline noise (used primarily for contour plots) \nThe function that is minimized is\n \n OBJ = OFG*GW \n + OFF*Fuel \n + OFM*VCMN*(Lift/Drag) \n + OFR*Range + OFC*Cost \n + OSFC*SFC \n + OFNOX*NOx \n + OFNF*(Flyover Noise) \n + OFNS*(Sideline Noise) \n + OFNFOM*(Noise Figure of Merit) \n + OFH*(Hold Time for Segment NHOLD)') ofnfom = Float(0.0, desc='Objective function weighting factor for noise figure of merit \nThe function that is minimized is\n \n OBJ = OFG*GW \n + OFF*Fuel \n + OFM*VCMN*(Lift/Drag) \n + OFR*Range + OFC*Cost \n + OSFC*SFC \n + OFNOX*NOx \n + OFNF*(Flyover Noise) \n + OFNS*(Sideline Noise) \n + OFNFOM*(Noise Figure of Merit) \n + OFH*(Hold Time for Segment NHOLD)') oarea = Float(0.0, desc='Objective function weighting factor for area of noise footprint (not implemented) \nThe function that is minimized is\n \n OBJ = OFG*GW \n + OFF*Fuel \n + OFM*VCMN*(Lift/Drag) \n + OFR*Range + OFC*Cost \n + OSFC*SFC \n + OFNOX*NOx \n + OFNF*(Flyover Noise) \n + OFNS*(Sideline Noise) \n + OFNFOM*(Noise Figure of Merit) \n + OFH*(Hold Time for Segment NHOLD)') ofh = Float(0.0, desc='Objective function weighting factor for hold time for segment NHOLD (See Namelist &MISSIN) \nThe function that is minimized is\n \n OBJ = OFG*GW \n + OFF*Fuel \n + OFM*VCMN*(Lift/Drag) \n + OFR*Range + OFC*Cost \n + OSFC*SFC \n + OFNOX*NOx \n + OFNF*(Flyover Noise) \n + OFNS*(Sideline Noise) \n + OFNFOM*(Noise Figure of Merit) \n + OFH*(Hold Time for Segment NHOLD)') class FlopsWrapper_input_confin_Design_Variables(VariableTree): """Container for input.confin.Design_Variables""" # OpenMDAO Public Variables gw = Array(dtype=numpy_float64, units='lb', desc='GW(0)=Ramp weight (Required. If IRW = 1, a good initial guess must be input.)\nGW(1)=Activity status, active if > 0\nGW(2)=Lower bound\nGW(3)=Upper bound\nGW(4)=Optimization scale factor') ar = Array(dtype=numpy_float64, desc='AR(0)=Wing aspect ratio\nAR(1)=Activity status, active if > 0\nAR(2)=Lower bound\nAR(3)=Upper bound\nAR(4)=Optimization scale factor') thrust = Array(dtype=numpy_float64, units='lb', desc='THRUST(0)=Maximum rated thrust per engine, or thrust-weight ratio if TWR = -1.\nTHRUST(1)=Activity status, active if > 0\nTHRUST(2)=Lower bound\nTHRUST(3)=Upper bound\nTHRUST(4)=Optimization scale factor') sw = Array(dtype=numpy_float64, units='ft*ft', desc='SW(0)=Reference wing area, or wing loading if WSR = -1.\nSW(1)=Activity status, active if > 0\nSW(2)=Lower bound\nSW(3)=Upper bound\nSW(4)=Optimization scale factor') tr = Array(dtype=numpy_float64, desc='TR(0)=Taper ratio of the wing (Required)\nTR(1)=Activity status, active if > 0\nTR(2)=Lower bound\nTR(3)=Upper bound\nTR(4)=Optimization scale factor') sweep = Array(dtype=numpy_float64, units='deg', desc='SWEEP(0)=Quarter-chord sweep angle of the wing (Required)\nSWEEP(1)=Activity status, active if > 0\nSWEEP(2)=Lower bound\nSWEEP(3)=Upper bound\nSWEEP(4)=Optimization scale factor') tca = Array(dtype=numpy_float64, desc='TCA(0)=Wing thickness-chord ratio (weighted average) (Required)\nTCA(1)=Activity status, active if > 0\nTCA(2)=Lower bound\nTCA(3)=Upper bound\nTCA(4)=Optimization scale factor') vcmn = Array(dtype=numpy_float64, desc='VCMN(0)=Cruise Mach number (Required)\nVCMN(1)=Activity status, active if > 0\nVCMN(2)=Lower bound\nVCMN(3)=Upper bound\nVCMN(4)=Optimization scale factor') ch = Array(dtype=numpy_float64, units='ft', desc='CH(0)=Maximum cruise altitude (Required)\nCH(1)=Activity status, active if > 0\nCH(2)=Lower bound\nCH(3)=Upper bound\nCH(4)=Optimization scale factor') varth = Array(dtype=numpy_float64, desc='VARTH(0)=Thrust derating factor for takeoff noise Fraction of full thrust used in takeoff\nVARTH(1)=Activity status, active if > 0\nVARTH(2)=Lower bound\nVARTH(3)=Upper bound\nVARTH(4)=Optimization scale factor') rotvel = Array(dtype=numpy_float64, desc='ROTVEL(0)=Rotation velocity for takeoff noise abatement (default is minimum required to meet takeoff performance constraints)\nROTVEL(1)=Activity status, active if > 0\nROTVEL(2)=Lower bound\nROTVEL(3)=Upper bound\nROTVEL(4)=Optimization scale factor') plr = Array(dtype=numpy_float64, desc='PLR(0)=Thrust fraction after programmed lapse rate (default thrust is specified in each segment)\nPLR(1)=Activity status, active if > 0\nPLR(2)=Lower bound\nPLR(3)=Upper bound\nPLR(4)=Optimization scale factor') etit = Array(dtype=numpy_float64, units='degR', desc='ETIT(0)=Engine design point turbine entry temperature\nETIT(1)=Activity status, active if > 0\nETIT(2)=Lower bound\nETIT(3)=Upper bound\nETIT(4)=Optimization scale factor') eopr = Array(dtype=numpy_float64, desc='EOPR(0)=Overall pressure ratio\nEOPR(1)=Activity status, active if > 0\nEOPR(2)=Lower bound\nEOPR(3)=Upper bound\nEOPR(4)=Optimization scale factor') efpr = Array(dtype=numpy_float64, desc='EFPR(0)=Fan pressure ratio (turbofans only)\nEFPR(1)=Activity status, active if > 0\nEFPR(2)=Lower bound\nEFPR(3)=Upper bound\nEFPR(4)=Optimization scale factor') ebpr = Array(dtype=numpy_float64, desc='EBPR(0)=Bypass ratio (turbofans only)\nEBPR(1)=Activity status, active if > 0\nEBPR(2)=Lower bound\nEBPR(3)=Upper bound\nEBPR(4)=Optimization scale factor') ettr = Array(dtype=numpy_float64, desc='ETTR(0)=Engine throttle ratio defined as the ratio of the maximum allowable turbine inlet temperature divided by the design point turbine inlet temperature.\nIf ETTR is greater than ETIT, it is assumed to be the maximum allowable turbine inlet temperature.\nETTR(1)=Activity status, active if > 0\nETTR(2)=Lower bound\nETTR(3)=Upper bound\nETTR(4)=Optimization scale factor') ebla = Array(dtype=numpy_float64, units='deg', desc='EBLA(0)=Blade angle for fixed pitch propeller\nEBLA(1)=Activity status, active if > 0\nEBLA(2)=Lower bound\nEBLA(3)=Upper bound\nEBLA(4)=Optimization scale factor') class FlopsWrapper_input_confin_Basic(VariableTree): """Container for input.confin.Basic""" # OpenMDAO Public Variables desrng = Float(0.0, desc='Design range (or endurance). See INDR in Namelist &MISSIN)\nRequired - if IRW = 2 in Namelist &MISSIN, the range is computed, but a reasonable guess must still be input') wsr = Float(0.0, desc='Required wing loading if > 0.\nDo not set WSR > 0 during optimization or if wing area is being varied.\nInterpret SW as wing loading for parametric variation if = -1.\nDo not use for optimization.') twr = Float(0.0, desc='Required total thrust-weight ratio if > 0.\nDo not set TWR > 0 during optimization or if thrust is being varied.\nInterpret THRUST as thrust-weight ratio for parametric variation if = -1.\nDo not use for optimization.') htvc = Float(0.0, desc='Modified horizontal tail volume coefficient.\nIf HTVC > 0., SHT = HTVC * SW * Sqrt(SW/AR) / XL (This overrides any input value for SHT)\nIf HTVC = 1., the horizontal tail volume coefficient calculated from the input values of SHT, SW, AR and XL will be maintained.') vtvc = Float(0.0, desc='Modified vertical tail volume coefficient.\nIf VTVC > 0., SVT = VTVC * SW * Sqrt(SW*AR) / XL (This overrides any input value for SVT)\nIf VTVC = 1., the vertical tail volume coefficient calculated from the input values of SVT, SW, AR and XL will be maintained.') pglov = Float(0.0, desc='Fixed ratio of glove area to wing area (GLOV/SW).\nIf PGLOV > 0., GLOV will change if SW changes.') fixspn = Float(0.0, units='ft', desc='Special Option - Fixed wing span. If the wing area is being varied or optimized, the wing aspect ratio will be adjusted to maintain a constant span.') fixful = Float(0.0, units='lb', desc='Special Option - Fixed mission fuel. Allows specification of mission fuel.\nSince this fuel is normally a fall out (what is left over after OWE and payload are subtracted from the gross weight), this option requires iterating on the gross weight until the mission fuel = FIXFUL. Gross weight cannot be an active design variable or used in a parametric variation, and IRW must be 2 in Namelist &MISSIN.') class FlopsWrapper_input_confin(VariableTree): """Container for input.confin""" # VariableTrees Basic = VarTree(FlopsWrapper_input_confin_Basic()) Design_Variables = VarTree(FlopsWrapper_input_confin_Design_Variables()) Objective = VarTree(FlopsWrapper_input_confin_Objective()) class FlopsWrapper_input_asclin(VariableTree): """Container for input.asclin""" # OpenMDAO Public Variables sref = Float(0.0, units='ft*ft', desc='Wing area on which aerodynamic input is based (Default = SW, Namelist &CONFIN). If different from SW, aerodynamics will be scaled.') tref = Float(0.0, units='lb', desc='Engine thrust corresponding to nacelle size used in generating aerodynamic input data (Default = THRUST, Namelist &CONFIN). If different from THRUST, aerodynamic data will be modified.') awetn = Float(0.0, desc='Nacelle wetted area/SREF') eltot = Float(0.0, units='ft', desc='Total configuration length (Default = fuselage length)') voltot = Float(0.0, units='ft*ft*ft', desc='Total configuration volume') awett = Array(dtype=numpy_float64, desc='Total wetted area/SREF. For variable geometry aircraft, up to NMP values may be input') awetw = Array(dtype=numpy_float64, desc='Wing wetted area/SREF') elw = Array(dtype=numpy_float64, units='ft', desc='Total length of exposed wing') volw = Array(dtype=numpy_float64, units='ft*ft*ft', desc='Total volume of exposed wing') form = Array(dtype=numpy_float64, desc='Subsonic form factor for total configuration') eql = Array(dtype=numpy_float64, units='ft', desc='Equivalent friction length for total baseline configuration. If EQL is omitted, skin friction drag is computed from component data') cdwav = Array(dtype=numpy_float64, desc='Wave drag coefficients (NMP values)') dcdnac = Array(dtype=numpy_float64, desc='Delta wave drag coefficients, nacelles on - nacelles off') class FlopsWrapper_input_aero_data(VariableTree): """Container for input.aero_data""" # OpenMDAO Public Variables aerodat = Str('') class FlopsWrapper_input_aerin_Takeoff_Landing(VariableTree): """Container for input.aerin.Takeoff_Landing""" # OpenMDAO Public Variables wratio = Float(0.0, desc='Ratio of maximum landing weight to maximum takeoff weight (Default = WLDG/GW if WLDG is input, otherwise for supersonic aircraft Default = 1. - .00009*DESRNG, for subsonic aircraft Default = 1. - .00004*DESRNG)') vappr = Float(150.0, units='nmi', desc='Maximum allowable landing approach velocity') flto = Float(12000.0, units='ft', desc='Maximum allowable takeoff field length') flldg = Float(0.0, units='ft', desc='Maximum allowable landing field length') cltom = Float(2.0, desc='Maximum CL in takeoff configuration') clldm = Float(3.0, desc='Maximum CL in landing configuration') clapp = Float(0.0, desc='Approach CL') dratio = Float(1.0, desc='Takeoff and landing air density ratio') elodss = Float(0.0, desc='Lift-Drag ratio for second segment climb (Default is internally computed)') elodma = Float(0.0, desc='Lift-Drag ratio for missed approach climb (Default is internally computed)') thrss = Float(0.0, units='lb', desc='Thrust per baseline engine for second segment climb (Default = THRUST, Namelist &CONFIN)') thrma = Float(0.0, units='lb', desc='Thrust per baseline engine for missed approach climb (Default = THRSS)') throff = Float(0.0, units='lb', desc='Thrust per baseline engine for takeoff (Default = THRSS)') class FlopsWrapper_input_aerin_Internal_Aero(VariableTree): """Container for input.aerin.Internal_Aero""" # OpenMDAO Public Variables cam = Float(0.0, desc='Maximum camber at 70% semispan, percent of local chord') sbase = Float(0.0, units='ft*ft', desc='Aircraft base area (total exit cross-section area minus inlet capture areas for internally mounted engines)') aitek = Float(1.0, desc='Airfoil technology parameter. Use 1 for conventional wing and 2 for advanced technology wing') modaro = Enum(0, (0,1), desc='Data tables in EDET are to be modified, Namelist &ARIDE will be read in', aliases=('No', 'Yes')) fcldes = Float(-1.0, desc='Fixed design lift coefficient. If input, overrides design CL computed by EDET.') fmdes = Float(-1.0, desc='Fixed design Mach number. If input, overrides design Mach number computed by EDET.') xllam = Enum(0, (0,1), desc='Use 0 for Turbulent flow and 1 for Laminar Flow', aliases=('Turbulent', 'Laminar')) truw = Float(0.0, desc='Percent LF wing upper surface') trlw = Float(0.0, desc='Percent LF wing low surface') truh = Float(0.0, desc='Percent LF horizontal tail upper surface') trlh = Float(0.0, desc='Percent LF horizontal tail lower surface') truv = Float(0.0, desc='Percent LF vertical tail upper surface') trlv = Float(0.0, desc='Percent LF vertical tail lower surface') trub = Float(0.0, desc='Percent LF fuselage upper surface') trlb = Float(0.0, desc='Percent LF fuselage lower surface') trun = Float(0.0, desc='Percent LF nacelle upper surface') trln = Float(0.0, desc='Percent LF nacelle lower surface') truc = Float(0.0, desc='Percent LF canard upper surface') trlc = Float(0.0, desc='Percent LF canard lower surface') e = Float(1.0, desc='Aerodynamic efficiency factor: use 1 for normal wing efficiency; normal wing efficiency modified for taper ratio and aspect ratio plus E if < 0; Otherwise, normal wing efficiency multiplied by E') swetw = Float(1.0, units='ft*ft', desc='Wing wetted area') sweth = Float(1.0, units='ft*ft', desc='Horizontal tail wetted area') swetv = Float(1.0, units='ft*ft', desc='Vertical tail wetted area') swetf = Float(1.0, units='ft*ft', desc='Fuselage wetted area') swetn = Float(1.0, units='ft*ft', desc='Nacelle wetted area') swetc = Float(1.0, units='ft*ft', desc='Canard wetted area') class FlopsWrapper_input_aerin_Basic(VariableTree): """Container for input.aerin.Basic""" # OpenMDAO Public Variables myaero = Enum(0, (0,1,2,3,4), desc='Controls type of user-supplied aerodynamic data\n= 0, Drag polars are computed internally\n= 1, Aerodynamic Data will be read in\n= 2, Scalable Aerodynamic Data will be input (Namelist &ASCLIN required)\n= 3, Special parabolic Aerodynamic Data format (Namelist &RFHIN required)\n= 4, Use aerodynamic response surface - available only in DOSS version', aliases=('Internal', 'Fixed input', 'Scalable input', 'Parabolic', 'Response surface')) iwave = Enum(0, (0,1), desc='Controls Wave Drag Data input type\n= 1, Input Wave Drag Data will be formatted\n= 0, Otherwise', aliases=('No', 'Yes')) fwave = Float(1.0, desc='Wave drag factor - multiplies input values of wave drag from formatted aerodynamic data or Namelist &ASCLIN') itpaer = Enum(2, (1,2,3), desc='Aerodynamic data interpolation switch\n= 1, Linear - Use if aerodynamic data is irregular. This is usually indicated by strange climb, descent or cruise profiles.\n= 2, Parabolic\n= 3, Parabolic interpolation for CL, linear interpolation for Mach number and altitude.', aliases=('Linear', 'Parabolic', 'Combination')) ibo = Enum(0, (0,1), desc='Format indicator for input aerodynamic matrices\n= 1, A new line is started for each Mach number for Cards 4 and for each altitude for Cards 8\n= 0, Data is continuous, 10 to a line', aliases=('Continuous', '1 Mach/line')) class FlopsWrapper_input_aerin(VariableTree): """Container for input.aerin""" # VariableTrees Basic = VarTree(FlopsWrapper_input_aerin_Basic()) Internal_Aero = VarTree(FlopsWrapper_input_aerin_Internal_Aero()) Takeoff_Landing = VarTree(FlopsWrapper_input_aerin_Takeoff_Landing()) class FlopsWrapper_input(VariableTree): """Container for input""" # OpenMDAO Public Variables title = Str('', desc='Any alphanumeric title') aerin = VarTree(FlopsWrapper_input_aerin()) aero_data = VarTree(FlopsWrapper_input_aero_data()) asclin = VarTree(FlopsWrapper_input_asclin()) confin = VarTree(FlopsWrapper_input_confin()) costin = VarTree(FlopsWrapper_input_costin()) engdin = VarTree(FlopsWrapper_input_engdin()) engine = VarTree(FlopsWrapper_input_engine()) engine_deck = VarTree(FlopsWrapper_input_engine_deck()) fusein = VarTree(FlopsWrapper_input_fusein()) missin = VarTree(FlopsWrapper_input_missin()) mission_definition = VarTree(FlopsWrapper_input_mission_definition()) nacell = VarTree(FlopsWrapper_input_nacell()) noisin = VarTree(FlopsWrapper_input_noisin()) option = VarTree(FlopsWrapper_input_option()) proin = VarTree(FlopsWrapper_input_proin()) rfhin = VarTree(FlopsWrapper_input_rfhin()) syntin = VarTree(FlopsWrapper_input_syntin()) tolin = VarTree(FlopsWrapper_input_tolin()) wtin = VarTree(FlopsWrapper_input_wtin()) # pylint: enable-msg=C0301,C0324,R0903 class FlopsWrapper(ExternalCode): """Wrapper for FlopsWrapper""" # OpenMDAO Public Variables ERROR = Str('none', iotype='out', desc='Error message for FLOPS failures') HINT = Str('none', iotype='out', desc='Hint for resolving error') npcon = Int(0, iotype='in', desc='Number of PCONIN namelists to be created') nseg = Int(0, iotype='in', desc='Number of SEGIN namelists to be created') nrerun = Int(0, iotype='in', desc='Number of RERUN namelists to be created') npcons = Array(iotype='in', dtype=numpy_int64, desc='Number of PCONIN ' + 'namelists to be created with each RERUN namelist') # Variable Trees input = VarTree(FlopsWrapper_input(), iotype='in') output = VarTree(FlopsWrapper_output(), iotype='out') # This stuff is defined in ExternalCode. I'm preserving it to keep a record # of the var names that were used in the MC Java wrapper. # ---- #execute_cmd = Str('flops', iotype='in', desc='Command for executing FLOPS') def __init__(self): """Constructor for the FlopsWrapper component""" super(FlopsWrapper, self).__init__() # External Code public variables self.stdin = 'flops.inp' self.stdout = 'flops.out' self.stderr = 'flops.err' self.command = ['flops'] self.external_files = [ FileMetadata(path=self.stdin, input=True), FileMetadata(path=self.stdout), FileMetadata(path=self.stderr), ] # This stuff is global in the Java wrap. # These are used when adding and removing certain segments. self.nseg0 = 0 self.npcon0 = 0 self.nrern0 = 0 self.npcons0 = [] self.npcons0.append(0) self.nmseg = 0 def execute(self): """Run Flops.""" #Prepare the input files for Flops self.generate_input() #Run Flops via ExternalCode's execute function super(FlopsWrapper, self).execute() #Parse the outut files from Flops self.parse_output() def generate_input(self): """Creates the FLOPS input file(s) namelists.""" sb = Namelist(self) sb.set_filename(self.stdin) # Write the Title Card sb.set_title(self.input.title) #------------------- # Namelist &OPTION #------------------- sb.add_group('OPTION') sb.add_comment("\n ! Program Control, Execution, Analysis and Plot Option Data") iopt = self.input.option.Program_Control.iopt ianal = self.input.option.Program_Control.ianal ineng = self.input.option.Program_Control.ineng itakof = self.input.option.Program_Control.itakof iland = self.input.option.Program_Control.iland nopro = self.input.option.Program_Control.nopro noise = self.input.option.Program_Control.noise icost = self.input.option.Program_Control.icost ifite = self.input.option.Program_Control.ifite mywts = self.input.wtin.Basic.mywts sb.add_container("input.option.Program_Control") sb.add_comment("\n ! Plot files for XFLOPS Graphical Interface Postprocessor (MSMPLOT)") sb.add_var("input.option.Plot_Files.ixfl") sb.add_comment("\n ! Takeoff and Climb Profile File for Noise Calculations (NPROF)") sb.add_var("input.option.Plot_Files.npfile") sb.add_comment("\n ! Drag Polar Plot File (POLPLOT)") sb.add_var("input.option.Plot_Files.ipolp") sb.add_var("input.option.Plot_Files.polalt") nmach = len(self.input.option.Plot_Files.pmach) if nmach > 0: sb.add_newvar("nmach", nmach) sb.add_var("input.option.Plot_Files.pmach") sb.add_comment("\n ! Engine Performance Data Plot File (THRPLOT)") sb.add_var("input.option.Plot_Files.ipltth") sb.add_comment("\n ! Design History Plot File (HISPLOT)") sb.add_var("input.option.Plot_Files.iplths") ipltps = len(self.input.option.Excess_Power_Plot.pltnz) if ipltps > 0: sb.add_comment("\n ! Excess Power Plot File (PSPLOT)") sb.add_newvar("ipltps", ipltps) sb.add_container("input.option.Excess_Power_Plot") # Plotfile names sb.add_comment("\n ! Plotfile Names") if self.input.option.Plot_Files.cnfile: sb.add_var("input.option.Plot_Files.cnfile") if self.input.option.Plot_Files.msfile: sb.add_var("input.option.Plot_Files.msfile") if self.input.option.Plot_Files.crfile: sb.add_var("input.option.Plot_Files.crfile") if self.input.option.Plot_Files.tofile : sb.add_var("input.option.Plot_Files.tofile ") if self.input.option.Plot_Files.nofile : sb.add_var("input.option.Plot_Files.nofile ") if self.input.option.Plot_Files.apfile : sb.add_var("input.option.Plot_Files.apfile ") if self.input.option.Plot_Files.thfile : sb.add_var("input.option.Plot_Files.thfile ") if self.input.option.Plot_Files.hsfile : sb.add_var("input.option.Plot_Files.hsfile ") if self.input.option.Plot_Files.psfile : sb.add_var("input.option.Plot_Files.psfile ") #------------------- # Namelist &WTIN #------------------- sb.add_group('WTIN') sb.add_comment("\n ! Geometric, Weight, Balance and Inertia Data") sb.add_container("input.wtin.Basic") sb.add_comment("\n ! Special Option for Operating Weight Empty Calculations") sb.add_container("input.wtin.OEW_Calculations") sb.add_comment("\n ! Wing Data") sb.add_container("input.wtin.Wing_Data") netaw = len(self.input.wtin.Detailed_Wing.etaw) if netaw > 0: sb.add_comment("\n ! Detailed Wing Data") sb.add_newvar("netaw", netaw) sb.add_var("input.wtin.Detailed_Wing.etaw") sb.add_var("input.wtin.Detailed_Wing.chd") sb.add_var("input.wtin.Detailed_Wing.toc") sb.add_var("input.wtin.Detailed_Wing.swl") sb.add_var("input.wtin.Detailed_Wing.etae") sb.add_var("input.wtin.Detailed_Wing.pctl") sb.add_var("input.wtin.Detailed_Wing.arref") sb.add_var("input.wtin.Detailed_Wing.tcref") sb.add_var("input.wtin.Detailed_Wing.nstd") pdist = self.input.wtin.Detailed_Wing.pdist sb.add_var("input.wtin.Detailed_Wing.pdist") if pdist < 0.0001: sb.add_var("input.wtin.Detailed_Wing.etap") sb.add_var("input.wtin.Detailed_Wing.pval") sb.add_comment("\n ! Tails, Fins, Canards") sb.add_comment("\n ! Horizontal Tail Data") sb.add_var("input.wtin.Tails_Fins.sht") sb.add_var("input.wtin.Tails_Fins.swpht") sb.add_var("input.wtin.Tails_Fins.arht") sb.add_var("input.wtin.Tails_Fins.trht") sb.add_var("input.wtin.Tails_Fins.tcht") sb.add_var("input.wtin.Tails_Fins.hht") nvert = self.input.wtin.Tails_Fins.nvert if nvert != 0: sb.add_comment("\n ! Vertical Tail Data") sb.add_var("input.wtin.Tails_Fins.nvert") sb.add_var("input.wtin.Tails_Fins.svt") sb.add_var("input.wtin.Tails_Fins.swpvt") sb.add_var("input.wtin.Tails_Fins.arvt") sb.add_var("input.wtin.Tails_Fins.trvt") sb.add_var("input.wtin.Tails_Fins.tcvt") nfin = self.input.wtin.Tails_Fins.nfin if nfin != 0: sb.add_comment("\n ! Fin Data") sb.add_var("input.wtin.Tails_Fins.nfin") sb.add_var("input.wtin.Tails_Fins.sfin") sb.add_var("input.wtin.Tails_Fins.arfin") sb.add_var("input.wtin.Tails_Fins.trfin") sb.add_var("input.wtin.Tails_Fins.swpfin") sb.add_var("input.wtin.Tails_Fins.tcfin") scan = self.input.wtin.Tails_Fins.scan if scan != 0: sb.add_comment("\n ! Canard Data") sb.add_var("input.wtin.Tails_Fins.scan") sb.add_var("input.wtin.Tails_Fins.swpcan") sb.add_var("input.wtin.Tails_Fins.arcan") sb.add_var("input.wtin.Tails_Fins.trcan") sb.add_var("input.wtin.Tails_Fins.tccan") sb.add_comment("\n ! Fuselage Data") sb.add_container("input.wtin.Fuselage") sb.add_comment("\n ! Landing Gear Data") sb.add_container("input.wtin.Landing_Gear") sb.add_comment("\n ! Propulsion System Data") sb.add_container("input.wtin.Propulsion") sb.add_comment("\n ! Fuel System Data") sb.add_var("input.wtin.Fuel_System.ntank") sb.add_var("input.wtin.Fuel_System.fulwmx") sb.add_var("input.wtin.Fuel_System.fulden") sb.add_var("input.wtin.Fuel_System.fulfmx") sb.add_var("input.wtin.Fuel_System.ifufu") sb.add_var("input.wtin.Fuel_System.fulaux") fuscla = self.input.wtin.Fuel_System.fuscla if fuscla > 0.000001: sb.add_comment("\n ! Special method for scaling wing fuel capacity") sb.add_var("input.wtin.Fuel_System.fuelrf") sb.add_var("input.wtin.Fuel_System.fswref") sb.add_var("input.wtin.Fuel_System.fuscla") sb.add_var("input.wtin.Fuel_System.fusclb") sb.add_comment("\n ! Crew and Payload Data") sb.add_container("input.wtin.Crew_Payload") sb.add_comment("\n ! Override Parameters") sb.add_container("input.wtin.Override") sb.add_comment("\n ! Center of Gravity (C.G.) Data") sb.add_container("input.wtin.Center_of_Gravity") inrtia = self.input.wtin.Inertia.inrtia if inrtia != 0: sb.add_comment("\n ! Inertia Data") sb.add_newvar("inrtia", inrtia) sb.add_var("input.wtin.Inertia.zht") sb.add_var("input.wtin.Inertia.zvt") sb.add_var("input.wtin.Inertia.zfin") sb.add_var("input.wtin.Inertia.yfin") sb.add_var("input.wtin.Inertia.zef") sb.add_var("input.wtin.Inertia.yef") sb.add_var("input.wtin.Inertia.zea") sb.add_var("input.wtin.Inertia.yea") sb.add_var("input.wtin.Inertia.zbw") sb.add_var("input.wtin.Inertia.zap") sb.add_var("input.wtin.Inertia.zrvt") sb.add_var("input.wtin.Inertia.ymlg") sb.add_var("input.wtin.Inertia.yfuse") sb.add_var("input.wtin.Inertia.yvert") sb.add_var("input.wtin.Inertia.swtff") sb.add_var("input.wtin.Inertia.tcr") sb.add_var("input.wtin.Inertia.tct") sb.add_var("input.wtin.Inertia.incpay") l = len(self.input.wtin.Inertia.tx) sb.add_newvar("itank", l) if l > 0: sb.add_var("input.wtin.Inertia.tx") sb.add_var("input.wtin.Inertia.ty") sb.add_var("input.wtin.Inertia.tz") j = len(self.input.wtin.Inertia.tl) if j > 0: sb.add_var("input.wtin.Inertia.tl") sb.add_var("input.wtin.Inertia.tw") sb.add_var("input.wtin.Inertia.td") j = self.input.wtin.Inertia.tf.shape[0] sb.add_newvar("nfcon", j) if l*j > 0: sb.add_var("input.wtin.Inertia.tf") #------------------- # Namelist &FUSEIN #------------------- # Namelist &FUSEIN is only required if XL=0 or IFITE=3. xl = self.input.wtin.Fuselage.xl if xl < 0.0000001 or ifite == 3: sb.add_group('FUSEIN') sb.add_comment("\n ! Fuselage Design Data") sb.add_container("input.fusein.Basic") sb.add_container("input.fusein.BWB") #------------------- # Namelist &CONFIN #------------------- sb.add_group('CONFIN') sb.add_container("input.confin.Basic") # MC Flops wrapper didn't write these out if iopt was less than 3 # I changed it to match expected behavior when comparing manual FLOPS # if iopt >= 3: sb.add_comment("\n ! Objective Function Definition") sb.add_container("input.confin.Objective") sb.add_comment("\n ! Design Variables") sb.add_var("input.confin.Design_Variables.gw") sb.add_var("input.confin.Design_Variables.ar") sb.add_var("input.confin.Design_Variables.thrust") sb.add_var("input.confin.Design_Variables.sw") sb.add_var("input.confin.Design_Variables.tr") sb.add_var("input.confin.Design_Variables.sweep") sb.add_var("input.confin.Design_Variables.tca") sb.add_var("input.confin.Design_Variables.vcmn") sb.add_var("input.confin.Design_Variables.ch") sb.add_var("input.confin.Design_Variables.varth") sb.add_var("input.confin.Design_Variables.rotvel") sb.add_var("input.confin.Design_Variables.plr") igenen = self.input.engdin.Basic.igenen if igenen in (1, -2): sb.add_comment("\n ! Engine Design Variables") sb.add_var("input.confin.Design_Variables.etit") sb.add_var("input.confin.Design_Variables.eopr") sb.add_var("input.confin.Design_Variables.efpr") sb.add_var("input.confin.Design_Variables.ebpr") sb.add_var("input.confin.Design_Variables.ettr") sb.add_var("input.confin.Design_Variables.ebla") #------------------- # Namelist &AERIN #------------------- sb.add_group('AERIN') myaero = self.input.aerin.Basic.myaero iwave = self.input.aerin.Basic.iwave if myaero != 0: sb.add_comment("\n ! Externally Computed Aerodynamics") sb.add_var("input.aerin.Basic.myaero") sb.add_var("input.aerin.Basic.iwave") if iwave != 0: sb.add_var("input.aerin.Basic.fwave") sb.add_var("input.aerin.Basic.itpaer") sb.add_var("input.aerin.Basic.ibo") else: sb.add_comment("\n ! Internally Computed Aerodynamics") sb.add_container("input.aerin.Internal_Aero") sb.add_container("input.aerin.Takeoff_Landing") #------------------- # Namelist &COSTIN #------------------- # Namelist &COSTIN is only required if ICOST=1. if icost != 0: sb.add_group('COSTIN') sb.add_comment("\n ! Cost Calculation Data") sb.add_container("input.costin.Basic") sb.add_comment("\n ! Mission Performance Data") sb.add_container("input.costin.Mission_Performance") sb.add_comment("\n ! Cost Technology Parameters") sb.add_container("input.costin.Cost_Technology") #------------------- # Namelist &ENGDIN #------------------- # Namelist &ENGDIN is only required in IANAL=3 or 4 or INENG=1. if ianal in (3, 4) or ineng == 1: sb.add_group('ENGDIN') sb.add_comment("\n ! Engine Deck Control, Scaling and Usage Data") sb.add_var("input.engdin.Basic.ngprt") sb.add_var("input.engdin.Basic.igenen") sb.add_var("input.engdin.Basic.extfac") sb.add_var("input.engdin.Basic.fffsub") sb.add_var("input.engdin.Basic.fffsup") sb.add_var("input.engdin.Basic.idle") sb.add_var("input.engdin.Basic.noneg") sb.add_var("input.engdin.Basic.fidmin") sb.add_var("input.engdin.Basic.fidmax") sb.add_var("input.engdin.Basic.ixtrap") sb.add_var("input.engdin.Basic.ifill") sb.add_var("input.engdin.Basic.maxcr") sb.add_var("input.engdin.Basic.nox") npcode = len(self.input.engdin.Basic.pcode) if npcode > 0: sb.add_newvar("npcode", npcode) sb.add_var("input.engdin.Basic.pcode") sb.add_var("input.engdin.Basic.boost") sb.add_var("input.engdin.Basic.igeo") sb.add_var("input.engdin.Special_Options.dffac") sb.add_var("input.engdin.Special_Options.fffac") if igenen in (1, -2): j = len(self.input.engdin.Special_Options.emach) l = self.input.engdin.Special_Options.alt.shape[0] if j > 0: # TODO - Find out about fake 2d for new FLOPS double prop # capability. sb.add_var("input.engdin.Special_Options.emach") if l*j > 0: # TODO - Find out about fake 3d for new FLOPS double prop # capability. sb.add_var("input.engdin.Special_Options.alt") insdrg = self.input.engdin.Special_Options.insdrg if insdrg != 0: sb.add_comment("\n ! Nozzle installation drag using table look-up") sb.add_newvar("insdrg", insdrg) sb.add_var("input.engdin.Special_Options.nab") sb.add_var("input.engdin.Special_Options.nabref") sb.add_var("input.engdin.Special_Options.a10") sb.add_var("input.engdin.Special_Options.a10ref") sb.add_var("input.engdin.Special_Options.a9ref") sb.add_var("input.engdin.Special_Options.xnoz") sb.add_var("input.engdin.Special_Options.xnref") sb.add_var("input.engdin.Special_Options.rcrv") # TODO - rawInputFile( cdfile, "ENDRAG" ); #cdfile.open # Write out the eifile. This is a new addition. if self.input.engdin.eifile: sb.add_var("input.engdin.eifile") #---------------------- # Namelist Engine deck #---------------------- # Insert the engine deck into the flops input file # If IGENEN=0 the engine deck is part of the input file, otherwise it is an # external file. engine_deck = self.input.engine_deck.engdek if igenen in (0, -2): # engine_deck contains the raw engine deck sb.add_group(engine_deck) else: # engine_deck contains the name of the engine deck file if engine_deck: sb.add_var("input.engine_deck.engdek") #------------------- # Namelist &ENGINE #------------------- # Namelist &ENGINE is only required if IGENEN=-2 or 1. if igenen in (-2, 1): sb.add_group('ENGINE') nginwt = self.input.engine.Engine_Weight.nginwt ieng = self.input.engine.Basic.ieng sb.add_var("input.engine.Basic.ieng") sb.add_var("input.engine.Basic.iprint") sb.add_var("input.engine.Basic.gendek") sb.add_var("input.engine.Basic.ithrot") sb.add_var("input.engine.Basic.npab") sb.add_var("input.engine.Basic.npdry") sb.add_var("input.engine.Basic.xidle") sb.add_var("input.engine.Basic.nitmax") if self.input.engine.Basic.xmmax > 0: sb.add_var("input.engine.Basic.xmmax") if self.input.engine.Basic.amax > 0: sb.add_var("input.engine.Basic.amax") if self.input.engine.Basic.xminc > 0: sb.add_var("input.engine.Basic.xminc") if self.input.engine.Basic.ainc > 0: sb.add_var("input.engine.Basic.ainc") if self.input.engine.Basic.qmin > 0: sb.add_var("input.engine.Basic.qmin") if self.input.engine.Basic.qmax > 0: sb.add_var("input.engine.Basic.qmax") sb.add_newvar("nginwt", nginwt) sb.add_container("input.engine.Noise_Data") if self.input.engine.Design_Point.desfn > 0: sb.add_var("input.engine.Design_Point.desfn") if self.input.engine.Design_Point.xmdes > 0: sb.add_var("input.engine.Design_Point.xmdes") if self.input.engine.Design_Point.xades > 0: sb.add_var("input.engine.Design_Point.xades") sb.add_var("input.engine.Design_Point.oprdes") sb.add_var("input.engine.Design_Point.fprdes") sb.add_var("input.engine.Design_Point.bprdes") sb.add_var("input.engine.Design_Point.tetdes") sb.add_var("input.engine.Design_Point.ttrdes") sb.add_var("input.engine.Other.hpcpr") sb.add_var("input.engine.Other.aburn") sb.add_var("input.engine.Other.dburn") sb.add_var("input.engine.Other.effab") sb.add_var("input.engine.Other.tabmax") sb.add_var("input.engine.Other.ven") sb.add_var("input.engine.Other.costbl") sb.add_var("input.engine.Other.fanbl") sb.add_var("input.engine.Other.hpext") sb.add_var("input.engine.Other.wcool") sb.add_var("input.engine.Other.fhv") sb.add_var("input.engine.Other.dtce") sb.add_var("input.engine.Other.alc") sb.add_var("input.engine.Other.year") sb.add_comment("\n ! Installation effects") sb.add_var("input.engine.Other.boat") sb.add_var("input.engine.Other.ajmax") if self.input.engine.Other.spill: sb.add_comment("\n ! Installation effects") sb.add_var("input.engine.Other.spill") sb.add_var("input.engine.Other.lip") sb.add_var("input.engine.Other.blmax") sb.add_var("input.engine.Other.spldes") sb.add_var("input.engine.Other.aminds") sb.add_var("input.engine.Other.alinds") sb.add_var("input.engine.Other.etaprp") sb.add_var("input.engine.Other.shpowa") sb.add_comment("\n ! Engine operating constraints") sb.add_var("input.engine.Other.cdtmax") sb.add_var("input.engine.Other.cdpmax") sb.add_var("input.engine.Other.vjmax") sb.add_var("input.engine.Other.stmin") sb.add_var("input.engine.Other.armax") sb.add_var("input.engine.Other.limcd") if nginwt != 0: sb.add_comment("\n ! Engine Weight Calculation Data") sb.add_var("input.engine.Engine_Weight.iwtprt") sb.add_var("input.engine.Engine_Weight.iwtplt") sb.add_var("input.engine.Engine_Weight.gratio") sb.add_var("input.engine.Engine_Weight.utip1") sb.add_var("input.engine.Engine_Weight.rh2t1") sb.add_var("input.engine.Engine_Weight.igvw") sb.add_var("input.engine.Engine_Weight.trbrpm") sb.add_var("input.engine.Engine_Weight.trban2") sb.add_var("input.engine.Engine_Weight.trbstr") sb.add_var("input.engine.Engine_Weight.cmpan2") sb.add_var("input.engine.Engine_Weight.cmpstr") sb.add_var("input.engine.Engine_Weight.vjpnlt") sb.add_var("input.engine.Engine_Weight.wtebu") sb.add_var("input.engine.Engine_Weight.wtcon") if ieng == 101: sb.add_var("input.engine.IC_Engine.ncyl") sb.add_var("input.engine.IC_Engine.deshp") sb.add_var("input.engine.IC_Engine.alcrit") sb.add_var("input.engine.IC_Engine.sfcmax") sb.add_var("input.engine.IC_Engine.sfcmin") sb.add_var("input.engine.IC_Engine.pwrmin") sb.add_var("input.engine.IC_Engine.engspd") sb.add_var("input.engine.IC_Engine.prpspd") if ieng == 101 or igenen == -2 and nginwt > 0: sb.add_var("input.engine.IC_Engine.iwc") sb.add_var("input.engine.IC_Engine.ecid") sb.add_var("input.engine.IC_Engine.ecr") if ieng == 101 or igenen == -2: sb.add_var("input.engine.IC_Engine.eht") sb.add_var("input.engine.IC_Engine.ewid") sb.add_var("input.engine.IC_Engine.elen") sb.add_var("input.engine.IC_Engine.ntyp") sb.add_var("input.engine.IC_Engine.af") sb.add_var("input.engine.IC_Engine.cli") sb.add_var("input.engine.IC_Engine.blang") sb.add_var("input.engine.IC_Engine.dprop") sb.add_var("input.engine.IC_Engine.nblade") sb.add_var("input.engine.IC_Engine.gbloss") nrpm = len(self.input.engine.IC_Engine.arrpm) if nrpm > 0: sb.add_comment(" ! power curve input data") sb.add_newvar("nrpm", nrpm) sb.add_var("input.engine.IC_Engine.arrpm") sb.add_var("input.engine.IC_Engine.arpwr") sb.add_var("input.engine.IC_Engine.arful") if self.input.engine.IC_Engine.lfuun != 0: sb.add_var("input.engine.IC_Engine.lfuun") sb.add_var("input.engine.IC_Engine.feng") sb.add_var("input.engine.IC_Engine.fprop") sb.add_var("input.engine.IC_Engine.fgbox") ifile = self.input.engine.ifile tfile = self.input.engine.tfile # The name of the engine cycle definition file to be read in is # set by the value of if IENG. filenames = { 0: "MYCYCL", 1: "TURJET", 2: "TFNSEP", 3: "TFNMIX", 4: "TURPRP", 5: "TBYPAS", 6: "TFNSP3", 7: "TFNMX3", 8: "TFN3SH", 9: "TURJT2", 101: "MYCYCL" } try: ifilNam = filenames[ieng] except KeyError: msg = "Illegal value %s for input.engine.Basic.IENG" % ieng raise KeyError(msg) # TODO - rawInputFile( ifile, ifilNam ) # TODO - rawInputFile( tfile, "ENGTAB" ) sb.add_newvar("tfile", tfile) sb.add_newvar("ifile", ifilNam) #------------------- # Namelist &NACELL #------------------- # Namelist &NACELL is only required if NGINWT != 0 # (note:, still in IGENEN=-2 or 1.) if nginwt != 0: sb.add_group('NACELL') sb.add_comment("\n ! Data for Computation of Nacelle Weight.") sb.add_container("input.nacell") #------------------- # Namelist &MISSIN #------------------- # Namelist &MISSIN is only required if IANAL=3 npcon = self.npcon if ianal == 3: sb.add_group('MISSIN') sb.add_comment("\n ! Performance Controls and Factors and Mission Segment Definition") sb.add_var("input.missin.Basic.indr") sb.add_var("input.missin.Basic.fact") sb.add_var("input.missin.Basic.fleak") sb.add_var("input.missin.Basic.fcdo") sb.add_var("input.missin.Basic.fcdi") sb.add_var("input.missin.Basic.fcdsub") sb.add_var("input.missin.Basic.fcdsup") sb.add_var("input.missin.Basic.iskal") sb.add_var("input.missin.Basic.owfact") sb.add_var("input.missin.Basic.iflag") sb.add_var("input.missin.Basic.msumpt") sb.add_var("input.missin.Basic.dtc") sb.add_var("input.missin.Basic.irw") sb.add_var("input.missin.Basic.rtol") sb.add_var("input.missin.Basic.nhold") sb.add_var("input.missin.Basic.iata") sb.add_var("input.missin.Basic.tlwind") sb.add_var("input.missin.Basic.dwt") if len(self.input.missin.Basic.offdr) > 0: sb.add_var("input.missin.Basic.offdr") sb.add_var("input.missin.Basic.idoq") sb.add_newvar("npcon", npcon) nsout = self.input.missin.Basic.nsout if nsout > 0: sb.add_comment("\n ! Combat Radius Mission\n") sb.add_newvar("nsout", nsout) sb.add_var("input.missin.Basic.nsadj") sb.add_var("input.missin.Basic.mirror") i = len(self.input.missin.Store_Drag.stma) if i > 0: sb.add_comment("\n ! Store Drags") sb.add_container("input.missin.Store_Drag") sb.add_var("input.missin.User_Weights.mywts") if mywts == 1: sb.add_comment("\n ! User-Specified Weights") sb.add_var("input.missin.User_Weights.rampwt") sb.add_var("input.missin.User_Weights.dowe") sb.add_var("input.missin.User_Weights.paylod") sb.add_var("input.missin.User_Weights.fuemax") sb.add_comment("\n ! Ground Operations and Takeoff and Approach Allowances") sb.add_container("input.missin.Ground_Operations") if len(self.input.missin.Turn_Segments.xnz) > 0: sb.add_var("input.missin.Turn_Segments.xnz") if len(self.input.missin.Turn_Segments.xcl) > 0: sb.add_var("input.missin.Turn_Segments.xcl") if len(self.input.missin.Turn_Segments.xmach) > 0: sb.add_var("input.missin.Turn_Segments.xmach") nclimb = max( len(self.input.missin.Climb.clmmin), len(self.input.missin.Climb.clmmax), len(self.input.missin.Climb.clamax), len(self.input.missin.Climb.nincl), len(self.input.missin.Climb.fwf), len(self.input.missin.Climb.ncrcl), len(self.input.missin.Climb.cldcd), len(self.input.missin.Climb.ippcl), len(self.input.missin.Climb.maxcl) ) # TODO - Ask Karl or Jeff about this # I've removed ioc and ifeath from this. These are parameters, so # their "length" should have nothing to do with how many Cruise # Schedules are in the model. ncruse = max( len(self.input.missin.Cruise.crmach), len(self.input.missin.Cruise.cralt), len(self.input.missin.Cruise.crdcd), len(self.input.missin.Cruise.flrcr), len(self.input.missin.Cruise.crmmin), len(self.input.missin.Cruise.crclmx), len(self.input.missin.Cruise.hpmin), len(self.input.missin.Cruise.ffuel), len(self.input.missin.Cruise.fnox), len(self.input.missin.Cruise.feathf), len(self.input.missin.Cruise.cdfeth) ) nql = len(self.input.missin.Climb.qlalt) ns = len(self.input.missin.Descent.adtab) sb.add_comment("\n ! Climb Schedule Definition") sb.add_newvar("nclimb", nclimb) sb.add_var("input.missin.Climb.clmmin") sb.add_var("input.missin.Climb.clmmax") sb.add_var("input.missin.Climb.clamin") sb.add_var("input.missin.Climb.clamax") sb.add_var("input.missin.Climb.nincl") sb.add_var("input.missin.Climb.fwf") sb.add_var("input.missin.Climb.ncrcl") sb.add_var("input.missin.Climb.cldcd") sb.add_var("input.missin.Climb.ippcl") sb.add_var("input.missin.Climb.maxcl") sb.add_var("input.missin.Climb.keasvc") actab = self.input.missin.Climb.actab no = actab.shape[1] if no == 0: no = actab.shape[0] elif no > 0: noval = "" for i in range(0, nclimb): if actab.shape[1] > 0: for j in range(0, actab.shape[1]): if actab[i, j] >= 0.0: n = j+1 noval += n + ", " else: break else: noval += "0, " sb.add_newvar("no", noval) sb.add_var("input.missin.Climb.actab") sb.add_var("input.missin.Climb.vctab") sb.add_var("input.missin.Climb.ifaacl") sb.add_var("input.missin.Climb.ifaade") sb.add_var("input.missin.Climb.nodive") sb.add_var("input.missin.Climb.divlim") sb.add_var("input.missin.Climb.qlim") sb.add_var("input.missin.Climb.spdlim") if nql > 0: sb.add_var("input.missin.Climb.qlalt") sb.add_var("input.missin.Climb.vqlm") sb.add_comment("\n ! Cruise Schedule Definition\n") sb.add_newvar("ncruse", ncruse) sb.add_var("input.missin.Cruise.ioc") sb.add_var("input.missin.Cruise.crmach") sb.add_var("input.missin.Cruise.cralt") sb.add_var("input.missin.Cruise.crdcd") sb.add_var("input.missin.Cruise.flrcr") sb.add_var("input.missin.Cruise.crmmin") sb.add_var("input.missin.Cruise.crclmx") sb.add_var("input.missin.Cruise.hpmin") sb.add_var("input.missin.Cruise.ffuel") sb.add_var("input.missin.Cruise.fnox") sb.add_var("input.missin.Cruise.ifeath") sb.add_var("input.missin.Cruise.feathf") sb.add_var("input.missin.Cruise.cdfeth") sb.add_var("input.missin.Cruise.dcwt") sb.add_var("input.missin.Cruise.rcin") if len(self.input.missin.Cruise.wtbm) > 0: sb.add_var("input.missin.Cruise.wtbm") if len(self.input.missin.Cruise.altbm) > 0: sb.add_var("input.missin.Cruise.altbm") sb.add_comment("\n ! Descent Schedule Definition") sb.add_var("input.missin.Descent.ivs") sb.add_var("input.missin.Descent.decl") sb.add_var("input.missin.Descent.demmin") sb.add_var("input.missin.Descent.demmax") sb.add_var("input.missin.Descent.deamin") sb.add_var("input.missin.Descent.deamax") sb.add_var("input.missin.Descent.ninde") sb.add_var("input.missin.Descent.dedcd") sb.add_var("input.missin.Descent.rdlim") sb.add_var("input.missin.Descent.keasvd") if ns > 0: sb.add_newvar("ns", ns) sb.add_var("input.missin.Descent.adtab") sb.add_var("input.missin.Descent.vdtab") sb.add_container("input.missin.Reserve") #---------------------- # Mission definition #---------------------- mission = self.input.mission_definition.mission for seg in mission: sb.add_group(seg) self.nmseg = mission.count('CLIMB') + mission.count('CRUISE') + \ mission.count('REFUEL') + mission.count('RELEASE') + \ mission.count('ACCEL') + mission.count('TURN') + \ mission.count('COMBAT') + mission.count('HOLD') + \ mission.count('DESCENT') #------------------- # Namelist &PCONIN #------------------- # One or more &PCONIN namelists may have been created by the user. if npcon > 0 and ianal == 3: for i in range(0, npcon): sb.add_group('PCONIN') sb.add_comment("\n ! Performance Constraint") if self.get("input.pconin%s.conalt" % (i)) >= 0.: sb.add_var("input.pconin%s.conalt" % (i)) if self.get("input.pconin%s.conmch" % (i)) >= 0.: sb.add_var("input.pconin%s.conmch" % (i)) if self.get("input.pconin%s.connz" % (i)) >= 0.: sb.add_var("input.pconin%s.connz" % (i)) if self.get("input.pconin%s.conpc" % (i)) > -10.: sb.add_var("input.pconin%s.conpc" % (i)) if self.get("input.pconin%s.conlim" % (i)) != -999.: sb.add_var("input.pconin%s.conlim" % (i)) if self.get("input.pconin%s.conaux" % (i)) > -1.: sb.add_var("input.pconin%s.conaux" % (i)) if self.get("input.pconin%s.neo" % (i)) >= 0: sb.add_var("input.pconin%s.neo" % (i)) if self.get("input.pconin%s.icstdg" % (i)) >= 0: sb.add_var("input.pconin%s.icstdg" % (i)) if self.get("input.pconin%s.conwt" % (i)) >= 0.: sb.add_var("input.pconin%s.conwt" % (i)) if self.get("input.pconin%s.iconsg" % (i)) >= 0: sb.add_var("input.pconin%s.iconsg" % (i)) if self.get("input.pconin%s.confm" % (i)) >= 0.: sb.add_var("input.pconin%s.confm" % (i)) if self.get("input.pconin%s.conwta" % (i)) != -999.: sb.add_var("input.pconin%s.conwta" % (i)) if self.get("input.pconin%s.icontp" % (i)) >= 0: sb.add_var("input.pconin%s.icontp" % (i)) #-------------------- # Aerodynamic data #-------------------- # Aerodynamic data are placed in the input file if MYAERO > 0. If MYAERO=3, # insert the aerodynamic data after namelist &RFHIN (below), otherwise insert # them here. if myaero > 0 and myaero != 3 and ianal == 3: # aerodat contains the raw aero data sb.add_group(self.input.aero_data.aerodat) #------------------- # Namelist &RFHIN #------------------- # Namelist &RFHIN is only required if MYAERO=3. elif myaero == 3: sb.add_group('RFHIN') mmach = len(self.input.rfhin.tmach) sb.add_comment(" ! Aerodynamic Data for Parabolic Drag Polars") sb.add_newvar("mmach", mmach) sb.add_container("input.rfhin") # If MYAERO=3, insert the aerodynamic data here. Otherwise it may have already # been inserted above. # aerodat contains the raw aero data sb.add_group(self.input.aero_data.aerodat) #------------------- # Namelist &ASCLIN #------------------- # Namelist &ASCLIN is only required if MYAERO=2. if myaero == 2: sb.add_group('ASCLIN') sb.add_comment(" ! Scaling Data for Lift Independent Drag") sb.add_var("input.asclin.sref") sb.add_var("input.asclin.tref") sb.add_var("input.asclin.awetn") sb.add_var("input.asclin.eltot") sb.add_var("input.asclin.voltot") if len(self.input.asclin.awett) > 0: sb.add_var("input.asclin.awett") if len(self.input.asclin.awetw) > 0: sb.add_var("input.asclin.awetw") if len(self.input.asclin.elw) > 0: sb.add_var("input.asclin.elw") if len(self.input.asclin.volw) > 0: sb.add_var("input.asclin.volw") if len(self.input.asclin.form) > 0: sb.add_var("input.asclin.form") if len(self.input.asclin.eql) > 0: sb.add_var("input.asclin.eql") ncdwav = len(self.input.asclin.cdwav) if ncdwav > 0: sb.add_var("input.asclin.cdwav") sb.add_var("input.asclin.dcdnac") #------------------- # Namelist &TOLIN #------------------- if itakof == 1 or iland == 1 or nopro == 1: sb.add_group('TOLIN') sb.add_var("input.tolin.Basic.apa") sb.add_var("input.tolin.Basic.dtct") if self.input.tolin.Basic.swref > 0: sb.add_var("input.tolin.Basic.swref") if self.input.tolin.Basic.arret > 0: sb.add_var("input.tolin.Basic.arret") sb.add_var("input.tolin.Basic.whgt") sb.add_var("input.tolin.Basic.alprun") sb.add_var("input.tolin.Basic.tinc") sb.add_var("input.tolin.Basic.rollmu") sb.add_var("input.tolin.Basic.brakmu") sb.add_var("input.tolin.Basic.cdgear") sb.add_var("input.tolin.Basic.cdeout") sb.add_var("input.tolin.Basic.clspol") sb.add_var("input.tolin.Basic.cdspol") sb.add_var("input.tolin.Basic.incgef") sb.add_var("input.tolin.Basic.argef") sb.add_var("input.tolin.Basic.itime") sb.add_comment("\n ! Thrust Reverser") sb.add_var("input.tolin.Thrust_Reverser.inthrv") sb.add_var("input.tolin.Thrust_Reverser.rvfact") if len(self.input.tolin.Thrust_Reverser.velrv) > 0: sb.add_var("input.tolin.Thrust_Reverser.velrv") sb.add_var("input.tolin.Thrust_Reverser.thrrv") sb.add_var("input.tolin.Thrust_Reverser.tirvrs") sb.add_var("input.tolin.Thrust_Reverser.revcut") sb.add_var("input.tolin.Thrust_Reverser.clrev") sb.add_var("input.tolin.Thrust_Reverser.cdrev") sb.add_comment("\n ! Integration Intervals (Default values will provide a precision of +/-.25 ft)") sb.add_container("input.tolin.Integration_Intervals") sb.add_comment("\n ! Takeoff Data") if self.input.tolin.Takeoff.cltom > 0: sb.add_var("input.tolin.Takeoff.cltom") sb.add_var("input.tolin.Takeoff.cdmto") sb.add_var("input.tolin.Takeoff.fcdmto") sb.add_var("input.tolin.Takeoff.almxto") if self.input.tolin.Takeoff.obsto > 0: sb.add_var("input.tolin.Takeoff.obsto") sb.add_var("input.tolin.Takeoff.alpto") sb.add_var("input.tolin.Takeoff.clto") sb.add_var("input.tolin.Takeoff.cdto") sb.add_var("input.tolin.Takeoff.inthto") if len(self.input.tolin.Takeoff.velto) > 0: sb.add_var("input.tolin.Takeoff.velto") sb.add_var("input.tolin.Takeoff.thrto") if self.input.tolin.Takeoff.alprot > -99: sb.add_var("input.tolin.Takeoff.alprot") sb.add_var("input.tolin.Takeoff.vrotat") sb.add_var("input.tolin.Takeoff.vangl") sb.add_var("input.tolin.Takeoff.thfact") sb.add_var("input.tolin.Takeoff.ftocl") sb.add_var("input.tolin.Takeoff.ftocd") sb.add_var("input.tolin.Takeoff.igobs") sb.add_var("input.tolin.Takeoff.tdelg") sb.add_var("input.tolin.Takeoff.tigear") sb.add_var("input.tolin.Takeoff.ibal") sb.add_var("input.tolin.Takeoff.itxout") sb.add_comment("\n ! Aborted Takeoff Data") sb.add_var("input.tolin.Takeoff.pilott") sb.add_var("input.tolin.Takeoff.tispa") sb.add_var("input.tolin.Takeoff.tibra") sb.add_var("input.tolin.Takeoff.tirva") sb.add_var("input.tolin.Takeoff.ispol") sb.add_var("input.tolin.Takeoff.irev") sb.add_comment("\n ! Landing Data") if self.input.tolin.Landing.clldm > 0: sb.add_var("input.tolin.Landing.clldm") sb.add_var("input.tolin.Landing.cdmld") if self.input.tolin.Landing.fcdmld > 0: sb.add_var("input.tolin.Landing.fcdmld") sb.add_var("input.tolin.Landing.almxld") sb.add_var("input.tolin.Landing.obsld") sb.add_var("input.tolin.Landing.alpld") sb.add_var("input.tolin.Landing.clld") sb.add_var("input.tolin.Landing.cdld") sb.add_var("input.tolin.Landing.inthld") if len(self.input.tolin.Landing.velld) > 0: sb.add_var("input.tolin.Landing.velld") sb.add_var("input.tolin.Landing.thrld") sb.add_var("input.tolin.Landing.thrld") if self.input.tolin.Landing.thdry > 0: sb.add_var("input.tolin.Landing.thdry") sb.add_var("input.tolin.Landing.aprhgt") sb.add_var("input.tolin.Landing.aprang") sb.add_var("input.tolin.Landing.fldcl") sb.add_var("input.tolin.Landing.fldcd") sb.add_var("input.tolin.Landing.tdsink") if self.input.tolin.Landing.vangld > 0: sb.add_var("input.tolin.Landing.vangld") sb.add_var("input.tolin.Landing.noflar") sb.add_var("input.tolin.Landing.tispol") sb.add_var("input.tolin.Landing.ticut") sb.add_var("input.tolin.Landing.tibrak") sb.add_var("input.tolin.Landing.acclim") if self.input.tolin.Landing.magrup > 0: sb.add_var("input.tolin.Landing.magrup") #------------------- # Namelist &PROIN #------------------- # Namelist &PROIN is only required if NOPRO=1. if nopro > 0: npol = len(self.input.proin.dflap) sb.add_group('PROIN') sb.add_var("input.proin.npol") if npol > 0: sb.add_var("input.proin.alpro") sb.add_var("input.proin.clpro") sb.add_var("input.proin.cdpro") sb.add_var("input.proin.dflap") sb.add_var("input.proin.ntime") sb.add_var("input.proin.ipcmax") sb.add_var("input.proin.txf") sb.add_var("input.proin.alpmin") sb.add_var("input.proin.gamlim") inm = self.input.proin.inm if inm == 1: sb.add_var("input.proin.inm") sb.add_var("input.proin.iatr") sb.add_var("input.proin.fzf") sb.add_var("input.proin.thclmb") sb.add_var("input.proin.flapid") #------------------- # Namelist &SEGIN #------------------- # One or more &SEGIN namelists may have been created by the user. #nseg = self.nseg if nopro > 0 and self.nseg0 > 0: for i in range(0, self.nseg0): key = self.get("input.segin%s.key" % (i)) nflap = self.get("input.segin%s.nflap" % (i)) ifix = self.get("input.segin%s.ifix" % (i)) engscl = self.get("input.segin%s.engscl" % (i)) afix = self.get("input.segin%s.afix" % (i)) gfix = self.get("input.segin%s.gfix" % (i)) vfix = self.get("input.segin%s.vfix" % (i)) hstop = self.get("input.segin%s.hstop" % (i)) dstop = self.get("input.segin%s.dstop" % (i)) tstop = self.get("input.segin%s.tstop" % (i)) vstop = self.get("input.segin%s.vstop" % (i)) hmin = self.get("input.segin%s.hmin" % (i)) sprate = self.get("input.segin%s.sprate" % (i)) iplr = self.get("input.segin%s.iplr" % (i)) delt = self.get("input.segin%s.delt" % (i)) grdaeo = self.get("input.segin%s.grdaeo" % (i)) grdoeo = self.get("input.segin%s.grdoeo" % (i)) sb.add_group('SEGIN') sb.add_newvar("key", key) if nflap > 0: sb.add_newvar("nflap", nflap) if ifix > 0: sb.add_newvar("ifix", ifix) if engscl >= 0.: sb.add_newvar("engscl", engscl) if afix > -10.: sb.add_newvar("afix", afix) if gfix > -10.: sb.add_newvar("gfix", gfix) if vfix > 0.: sb.add_newvar("vfix", vfix) if hstop > 0.: sb.add_newvar("hstop", hstop) if dstop > 0.: sb.add_newvar("dstop", dstop) if tstop > 0.: sb.add_newvar("tstop", tstop) if vstop > 0.: sb.add_newvar("vstop", vstop) if hmin > 0.: sb.add_newvar("hmin", hmin) if sprate >= 0.: sb.add_newvar("sprate", sprate) if iplr >= 0.: sb.add_newvar("iplr", iplr) if delt > 0.: sb.add_newvar("delt", delt) if grdaeo > -1.: sb.add_newvar("grdaeo", grdaeo) if grdoeo > -1.: sb.add_newvar("grdoeo", grdoeo) #------------------- # Namelist &NOISIN #------------------- # Namelist &NOISIN is only required if NOISIN=1. if noise == 1: sb.add_group('NOISIN') sb.add_comment("\n ! Data for Noise Calculations\n ! Noise regulation control") sb.add_var("input.noisin.Basic.iepn") sb.add_var("input.noisin.Basic.depnt") sb.add_var("input.noisin.Basic.depns") sb.add_var("input.noisin.Basic.depnl") sb.add_var("input.noisin.Basic.itrade") sb.add_comment("\n ! Noise sources to be included") ijet = self.input.noisin.Basic.ijet ifan = self.input.noisin.Basic.ifan icore = self.input.noisin.Basic.icore iturb = self.input.noisin.Basic.iturb iprop = self.input.noisin.Basic.iprop iflap = self.input.noisin.Basic.iflap iairf = self.input.noisin.Basic.iairf igear = self.input.noisin.Basic.igear ishld = self.input.noisin.Propagation.ishld ignd = self.input.noisin.Propagation.ignd if ijet > 0: sb.add_newvar("ijet", ijet) if ifan > 0: sb.add_newvar("ifan", ifan) if icore > 0: sb.add_newvar("icore", icore) if iturb > 0: sb.add_newvar("iturb", iturb) if iprop > 0: sb.add_newvar("iprop", iprop) if iflap > 0: sb.add_newvar("iflap", iflap) if iairf > 0: sb.add_newvar("iairf", iairf) if igear > 0: sb.add_newvar("igear", igear) sb.add_comment("\n ! Noise Propagation Corrections") sb.add_var("input.noisin.Propagation.isupp") sb.add_var("input.noisin.Propagation.idop") sb.add_newvar("ignd", ignd) sb.add_var("input.noisin.Propagation.iatm") sb.add_var("input.noisin.Propagation.iega") sb.add_newvar("ishld", ishld) sb.add_var("input.noisin.Propagation.deldb") sb.add_var("input.noisin.Propagation.heng") sb.add_var("input.noisin.Propagation.filbw") sb.add_var("input.noisin.Propagation.tdi") sb.add_var("input.noisin.Propagation.rh") sb.add_comment("\n ! Observer Locations") nob = len(self.input.noisin.Observers.xo) if nob > 0: sb.add_newvar("nob", nob) sb.add_var("input.noisin.Observers.xo") sb.add_var("input.noisin.Observers.yo") sb.add_var("input.noisin.Observers.zo") sb.add_var("input.noisin.Observers.ndprt") sb.add_var("input.noisin.Observers.ifoot") sb.add_var("input.noisin.Observers.igeom") if self.input.noisin.Observers.thrn > 0: sb.add_var("input.noisin.Observers.thrn") sb.add_var("input.noisin.Observers.icorr") sb.add_var("input.noisin.Observers.tcorxp") nparam = len(self.input.noisin.Engine_Parameters.aepp) if nparam > 0: sb.add_comment("\n ! Engine Noise Parameters") sb.add_newvar("nparam", nparam) sb.add_container("input.noisin.Engine_Parameters") if ijet != 0: sb.add_comment("\n ! Jet Noise Input Data") sb.add_var("input.noisin.Jet.inoz") sb.add_var("input.noisin.Jet.iplug") sb.add_var("input.noisin.Jet.islot") sb.add_var("input.noisin.Jet.iaz") sb.add_var("input.noisin.Jet.dbaz") sb.add_var("input.noisin.Jet.ejdop") sb.add_var("input.noisin.Jet.zmdc") sb.add_var("input.noisin.Jet.gammac") sb.add_var("input.noisin.Jet.gasrc") sb.add_var("input.noisin.Jet.annht") sb.add_var("input.noisin.Jet.zmdf") sb.add_var("input.noisin.Jet.gammap") sb.add_var("input.noisin.Jet.gasrf") sb.add_var("input.noisin.Jet.annhtf") if self.input.noisin.Jet.dhc > 0: sb.add_var("input.noisin.Jet.dhc") sb.add_var("input.noisin.Jet.dhf") sb.add_var("input.noisin.Jet.zl2") sb.add_var("input.noisin.Jet.ifwd") sb.add_var("input.noisin.Jet.ishock") sb.add_var("input.noisin.Jet.zjsupp") if ijet == 5: sb.add_comment("\n ! Jet Noise Input Data for MSjet") sb.add_container("input.noisin.MSJet") if ifan > 0: sb.add_comment("\n ! Fan Noise Data") sb.add_var("input.noisin.Fan.igv") sb.add_var("input.noisin.Fan.ifd") sb.add_var("input.noisin.Fan.iexh") sb.add_var("input.noisin.Fan.nfh") if self.input.noisin.Fan.nstg > 0: sb.add_var("input.noisin.Fan.nstg") sb.add_var("input.noisin.Fan.suppin") sb.add_var("input.noisin.Fan.suppex") sb.add_var("input.noisin.Fan.methtip") sb.add_var("input.noisin.Fan.icomb") sb.add_var("input.noisin.Fan.decmpt") sb.add_var("input.noisin.Fan.gammaf") if self.input.noisin.Fan.nbl > 0: sb.add_var("input.noisin.Fan.nbl") if self.input.noisin.Fan.nvan > 0: sb.add_var("input.noisin.Fan.nvan") if self.input.noisin.Fan.fandia > 0: sb.add_var("input.noisin.Fan.fandia") if self.input.noisin.Fan.fanhub > 0: sb.add_var("input.noisin.Fan.fanhub") if self.input.noisin.Fan.tipmd > 0: sb.add_var("input.noisin.Fan.tipmd") sb.add_var("input.noisin.Fan.rss") sb.add_var("input.noisin.Fan.efdop") sb.add_var("input.noisin.Fan.faneff") if self.input.noisin.Fan.nbl2 > 0: sb.add_var("input.noisin.Fan.nbl2") if self.input.noisin.Fan.nvan2 > 0: sb.add_var("input.noisin.Fan.nvan2") if self.input.noisin.Fan.fand2 > 0: sb.add_var("input.noisin.Fan.fand2") if self.input.noisin.Fan.tipmd2 > 0: sb.add_var("input.noisin.Fan.tipmd2") sb.add_var("input.noisin.Fan.rss2") sb.add_var("input.noisin.Fan.efdop2") sb.add_var("input.noisin.Fan.fanef2") if self.input.noisin.Fan.trat > 0: sb.add_var("input.noisin.Fan.trat") if igenen not in [1, -2] and self.input.noisin.Fan.prat > 0: sb.add_var("input.noisin.Fan.prat") if icore > 0: sb.add_comment("\n ! Core Noise Data") sb.add_var("input.noisin.Core.csupp") sb.add_var("input.noisin.Core.gamma") sb.add_var("input.noisin.Core.imod") if self.input.noisin.Core.dtemd > 0: sb.add_var("input.noisin.Core.dtemd") sb.add_var("input.noisin.Core.ecdop") if iturb > 0: sb.add_comment("\n ! Core Noise Data") sb.add_var("input.noisin.Turbine.tsupp") if self.input.noisin.Turbine.tbndia > 0: sb.add_var("input.noisin.Turbine.tbndia") sb.add_var("input.noisin.Turbine.gear") sb.add_var("input.noisin.Turbine.cs") if self.input.noisin.Turbine.nblr > 0: sb.add_var("input.noisin.Turbine.nblr") sb.add_var("input.noisin.Turbine.ityptb") sb.add_var("input.noisin.Turbine.etdop") if iprop > 0: sb.add_comment("\n ! Propeller Noise Data") sb.add_container("input.noisin.Propeller") if ishld > 0: sb.add_comment("\n ! Shielding Effects Data") sb.add_container("input.noisin.Shielding") if iflap > 0: sb.add_comment("\n ! Flap Noise Data") sb.add_container("input.noisin.Flap_Noise") if iairf > 0: sb.add_comment("\n ! Flap Noise Data") sb.add_container("input.noisin.Airframe") if ignd > 0: sb.add_comment("\n ! Ground Reflection Effects Data") sb.add_var("input.noisin.Ground_Effects.itone") nht = len(self.input.noisin.Ground_Effects.dk) if nht > 0: sb.add_newvar("nht", nht) sb.add_var("input.noisin.Ground_Effects.dk") #------------------- # Namelist &SYNTIN #------------------- # Namelist &SYNTIN is only required if IOPT=3. if iopt == 3: sb.add_group('SYNTIN') if self.input.syntin.Variables.desrng > 0: sb.add_var("input.syntin.Variables.desrng") if self.input.syntin.Variables.vappr > 0: sb.add_var("input.syntin.Variables.vappr") if self.input.syntin.Variables.flto > 0: sb.add_var("input.syntin.Variables.flto") if self.input.syntin.Variables.flldg > 0: sb.add_var("input.syntin.Variables.flldg") sb.add_var("input.syntin.Variables.exfcap") if igenen == 1: if self.input.syntin.Variables.cdtmax > 0: sb.add_var("input.syntin.Variables.cdtmax") if self.input.syntin.Variables.cdpmax > 0: sb.add_var("input.syntin.Variables.cdpmax") if self.input.syntin.Variables.vjmax > 0: sb.add_var("input.syntin.Variables.vjmax") if self.input.syntin.Variables.stmin > 0: sb.add_var("input.syntin.Variables.stmin") if self.input.syntin.Variables.armax > 0: sb.add_var("input.syntin.Variables.armax") sb.add_var("input.syntin.Variables.gnox") sb.add_var("input.syntin.Variables.roclim") sb.add_var("input.syntin.Variables.dhdtlm") sb.add_var("input.syntin.Variables.tmglim") sb.add_var("input.syntin.Variables.ig") sb.add_var("input.syntin.Variables.ibfgs") sb.add_var("input.syntin.Variables.itfine") sb.add_comment("\n ! Optimization Control") sb.add_var("input.syntin.Optimization_Control.ndd") sb.add_var("input.syntin.Optimization_Control.rk") sb.add_var("input.syntin.Optimization_Control.fdd") if self.input.syntin.Optimization_Control.nlin > 0: sb.add_var("input.syntin.Optimization_Control.nlin") sb.add_var("input.syntin.Optimization_Control.nstep") sb.add_var("input.syntin.Optimization_Control.ef") sb.add_var("input.syntin.Optimization_Control.eps") sb.add_var("input.syntin.Optimization_Control.amult") sb.add_var("input.syntin.Optimization_Control.dep") sb.add_var("input.syntin.Optimization_Control.accux") sb.add_var("input.syntin.Optimization_Control.glm") if len(self.input.syntin.Optimization_Control.gfact) > 0: sb.add_var("input.syntin.Optimization_Control.gfact") sb.add_var("input.syntin.Optimization_Control.autscl") sb.add_var("input.syntin.Optimization_Control.icent") sb.add_var("input.syntin.Optimization_Control.rhomin") sb.add_var("input.syntin.Optimization_Control.rhomax") sb.add_var("input.syntin.Optimization_Control.rhodel") sb.add_var("input.syntin.Optimization_Control.itmax") sb.add_var("input.syntin.Optimization_Control.jprnt") sb.add_var("input.syntin.Optimization_Control.rdfun") sb.add_var("input.syntin.Optimization_Control.adfun") #------------------- # Namelist &RERUN #------------------- # One or more &RERUN namelists may have been created by the user. #nrerun = self.nrerun if self.nrern0 > 0: for i in range(0, self.nrern0): sb.add_group('RERUN') re_desrng = self.get("input.rerun%s.desrng" % (i)) re_mywts = self.get("input.rerun%s.mywts" % (i)) re_rampwt = self.get("input.rerun%s.rampwt" % (i)) re_dowe = self.get("input.rerun%s.dowe" % (i)) re_paylod = self.get("input.rerun%s.paylod" % (i)) re_fuemax = self.get("input.rerun%s.fuemax" % (i)) re_itakof = self.get("input.rerun%s.itakof" % (i)) re_iland = self.get("input.rerun%s.iland" % (i)) re_nopro = self.get("input.rerun%s.nopro" % (i)) re_noise = self.get("input.rerun%s.noise" % (i)) re_icost = self.get("input.rerun%s.icost" % (i)) re_wsr = self.get("input.rerun%s.wsr" % (i)) re_twr = self.get("input.rerun%s.twr" % (i)) if re_desrng > 0.: sb.add_var("input.rerun%s.desrng" % (i)) if re_mywts >= 0: sb.add_var("input.rerun%s.mywts" % (i)) if re_rampwt >= 0.: sb.add_var("input.rerun%s.rampwt" % (i)) if re_dowe > 0.: sb.add_var("input.rerun%s.dowe" % (i)) if re_paylod > 0.: sb.add_var("input.rerun%s.paylod" % (i)) if re_fuemax > 0.: sb.add_var("input.rerun%s.fuemax" % (i)) if re_itakof == 0: sb.add_var("input.rerun%s.itakof" % (i)) if re_iland == 0: sb.add_var("input.rerun%s.iland" % (i)) if re_nopro == 0: sb.add_var("input.rerun%s.nopro" % (i)) if re_noise == 0: sb.add_var("input.rerun%s.noise" % (i)) if re_icost == 0: sb.add_var("input.rerun%s.icost" % (i)) if re_wsr == 0.: sb.add_var("input.rerun%s.wsr" % (i)) if re_twr == 0.: sb.add_var("input.rerun%s.twr" % (i)) re_indr = self.get("input.rerun%s.missin.Basic.indr" % (i)) re_fact = self.get("input.rerun%s.missin.Basic.fact" % (i)) re_fleak = self.get("input.rerun%s.missin.Basic.fleak" % (i)) re_fcdo = self.get("input.rerun%s.missin.Basic.fcdo" % (i)) re_fcdi = self.get("input.rerun%s.missin.Basic.fcdi" % (i)) re_fcdsub = self.get("input.rerun%s.missin.Basic.fcdsub" % (i)) re_fcdsup = self.get("input.rerun%s.missin.Basic.fcdsup" % (i)) re_iskal = self.get("input.rerun%s.missin.Basic.iskal" % (i)) re_owfact = self.get("input.rerun%s.missin.Basic.owfact" % (i)) re_iflag = self.get("input.rerun%s.missin.Basic.iflag" % (i)) re_msumpt = self.get("input.rerun%s.missin.Basic.msumpt" % (i)) re_dtc = self.get("input.rerun%s.missin.Basic.dtc" % (i)) re_irw = self.get("input.rerun%s.missin.Basic.irw" % (i)) re_rtol = self.get("input.rerun%s.missin.Basic.rtol" % (i)) re_nhold = self.get("input.rerun%s.missin.Basic.nhold" % (i)) re_iata = self.get("input.rerun%s.missin.Basic.iata" % (i)) re_tlwind = self.get("input.rerun%s.missin.Basic.tlwind" % (i)) sb.add_group('MISSIN') if re_indr != -999: sb.add_var("input.rerun%s.missin.Basic.indr" % (i)) if re_fact != -999.: sb.add_var("input.rerun%s.missin.Basic.fact" % (i)) if re_fleak != -999.: sb.add_var("input.rerun%s.missin.Basic.fleak" % (i)) if re_fcdo != -999.: sb.add_var("input.rerun%s.missin.Basic.fcdo" % (i)) if re_fcdi != -999.: sb.add_var("input.rerun%s.missin.Basic.fcdi" % (i)) if re_fcdsub != -999.: sb.add_var("input.rerun%s.missin.Basic.fcdsub" % (i)) if re_fcdsup != -999.: sb.add_var("input.rerun%s.missin.Basic.fcdsup" % (i)) if re_iskal != -999: sb.add_var("input.rerun%s.missin.Basic.iskal" % (i)) if re_owfact != -999.: sb.add_var("input.rerun%s.missin.Basic.owfact" % (i)) if re_iflag != -999: sb.add_var("input.rerun%s.missin.Basic.iflag" % (i)) if re_msumpt != -999: sb.add_var("input.rerun%s.missin.Basic.msumpt" % (i)) if re_dtc != -999.: sb.add_var("input.rerun%s.missin.Basic.dtc" % (i)) if re_irw != -999: sb.add_var("input.rerun%s.missin.Basic.irw" % (i)) if re_rtol != -999.: sb.add_var("input.rerun%s.missin.Basic.rtol" % (i)) if re_nhold != -999: sb.add_var("input.rerun%s.missin.Basic.nhold" % (i)) if re_iata != -999: sb.add_var("input.rerun%s.missin.Basic.iata" % (i)) if re_tlwind != -999.: sb.add_var("input.rerun%s.missin.Basic.tlwind" % (i)) re_dwt = self.get("input.rerun%s.missin.Basic.dwt" % (i)) re_offdr = self.get("input.rerun%s.missin.Basic.offdr" % (i)) re_idoq = self.get("input.rerun%s.missin.Basic.idoq" % (i)) re_nsout = self.get("input.rerun%s.missin.Basic.nsout" % (i)) re_nsadj = self.get("input.rerun%s.missin.Basic.nsadj" % (i)) re_mirror = self.get("input.rerun%s.missin.Basic.mirror" % (i)) re_stma = self.get("input.rerun%s.missin.Store_Drag.stma" % (i)) re_cdst = self.get("input.rerun%s.missin.Store_Drag.cdst" % (i)) re_istcl = self.get("input.rerun%s.missin.Store_Drag.istcl" % (i)) re_istcr = self.get("input.rerun%s.missin.Store_Drag.istcr" % (i)) re_istde = self.get("input.rerun%s.missin.Store_Drag.istde" % (i)) re_mywts = self.get("input.rerun%s.missin.User_Weights.mywts" % (i)) re_rampwt = self.get("input.rerun%s.missin.User_Weights.rampwt" % (i)) re_dowe = self.get("input.rerun%s.missin.User_Weights.dowe" % (i)) re_paylod = self.get("input.rerun%s.missin.User_Weights.paylod" % (i)) re_fuemax = self.get("input.rerun%s.missin.User_Weights.fuemax" % (i)) re_takotm = self.get("input.rerun%s.missin.Ground_Operations.takotm" % (i)) re_taxotm = self.get("input.rerun%s.missin.Ground_Operations.taxotm" % (i)) re_apprtm = self.get("input.rerun%s.missin.Ground_Operations.apprtm" % (i)) re_appfff = self.get("input.rerun%s.missin.Ground_Operations.appfff" % (i)) re_taxitm = self.get("input.rerun%s.missin.Ground_Operations.taxitm" % (i)) re_ittff = self.get("input.rerun%s.missin.Ground_Operations.ittff" % (i)) re_takoff = self.get("input.rerun%s.missin.Ground_Operations.takoff" % (i)) re_txfufl = self.get("input.rerun%s.missin.Ground_Operations.txfufl" % (i)) re_ftkofl = self.get("input.rerun%s.missin.Ground_Operations.ftkofl" % (i)) re_ftxofl = self.get("input.rerun%s.missin.Ground_Operations.ftxofl" % (i)) re_ftxifl = self.get("input.rerun%s.missin.Ground_Operations.ftxifl" % (i)) re_faprfl = self.get("input.rerun%s.missin.Ground_Operations.faprfl" % (i)) re_xnz = self.get("input.rerun%s.missin.Turn_Segments.xnz" % (i)) re_xcl = self.get("input.rerun%s.missin.Turn_Segments.xcl" % (i)) re_xmach = self.get("input.rerun%s.missin.Turn_Segments.xmach" % (i)) re_nclimb = self.get("input.rerun%s.missin.Climb.nclimb" % (i)) re_clmmin = self.get("input.rerun%s.missin.Climb.clmmin" % (i)) re_clmmax = self.get("input.rerun%s.missin.Climb.clmmax" % (i)) re_clamin = self.get("input.rerun%s.missin.Climb.clamin" % (i)) re_clamax = self.get("input.rerun%s.missin.Climb.clamax" % (i)) re_nincl = self.get("input.rerun%s.missin.Climb.nincl" % (i)) re_fwf = self.get("input.rerun%s.missin.Climb.fwf" % (i)) re_ncrcl = self.get("input.rerun%s.missin.Climb.ncrcl" % (i)) re_cldcd = self.get("input.rerun%s.missin.Climb.cldcd" % (i)) re_ippcl = self.get("input.rerun%s.missin.Climb.ippcl" % (i)) re_maxcl = self.get("input.rerun%s.missin.Climb.maxcl" % (i)) re_no = self.get("input.rerun%s.missin.Climb.no" % (i)) re_keasvc = self.get("input.rerun%s.missin.Climb.keasvc" % (i)) re_actab = self.get("input.rerun%s.missin.Climb.actab" % (i)) re_vctab = self.get("input.rerun%s.missin.Climb.vctab" % (i)) re_ifaacl = self.get("input.rerun%s.missin.Climb.ifaacl" % (i)) re_ifaade = self.get("input.rerun%s.missin.Climb.ifaade" % (i)) re_nodive = self.get("input.rerun%s.missin.Climb.nodive" % (i)) re_divlim = self.get("input.rerun%s.missin.Climb.divlim" % (i)) re_qlim = self.get("input.rerun%s.missin.Climb.qlim" % (i)) re_spdlim = self.get("input.rerun%s.missin.Climb.spdlim" % (i)) re_qlalt = self.get("input.rerun%s.missin.Climb.qlalt" % (i)) re_vqlm = self.get("input.rerun%s.missin.Climb.vqlm" % (i)) re_ioc = self.get("input.rerun%s.missin.Cruise.ioc" % (i)) re_crmach = self.get("input.rerun%s.missin.Cruise.crmach" % (i)) re_cralt = self.get("input.rerun%s.missin.Cruise.cralt" % (i)) re_crdcd = self.get("input.rerun%s.missin.Cruise.crdcd" % (i)) re_flrcr = self.get("input.rerun%s.missin.Cruise.flrcr" % (i)) re_crmmin = self.get("input.rerun%s.missin.Cruise.crmmin" % (i)) re_crclmx = self.get("input.rerun%s.missin.Cruise.crclmx" % (i)) re_hpmin = self.get("input.rerun%s.missin.Cruise.hpmin" % (i)) re_ffuel = self.get("input.rerun%s.missin.Cruise.ffuel" % (i)) re_fnox = self.get("input.rerun%s.missin.Cruise.fnox" % (i)) re_ifeath = self.get("input.rerun%s.missin.Cruise.ifeath" % (i)) re_feathf = self.get("input.rerun%s.missin.Cruise.feathf" % (i)) re_cdfeth = self.get("input.rerun%s.missin.Cruise.cdfeth" % (i)) re_dcwt = self.get("input.rerun%s.missin.Cruise.dcwt" % (i)) re_rcin = self.get("input.rerun%s.missin.Cruise.rcin" % (i)) re_wtbm = self.get("input.rerun%s.missin.Cruise.wtbm" % (i)) re_altbm = self.get("input.rerun%s.missin.Cruise.altbm" % (i)) re_ivs = self.get("input.rerun%s.missin.Descent.ivs" % (i)) re_decl = self.get("input.rerun%s.missin.Descent.decl" % (i)) re_demmin = self.get("input.rerun%s.missin.Descent.demmin" % (i)) re_demmax = self.get("input.rerun%s.missin.Descent.demmax" % (i)) re_deamin = self.get("input.rerun%s.missin.Descent.deamin" % (i)) re_deamax = self.get("input.rerun%s.missin.Descent.deamax" % (i)) re_ninde = self.get("input.rerun%s.missin.Descent.ninde" % (i)) re_dedcd = self.get("input.rerun%s.missin.Descent.dedcd" % (i)) re_rdlim = self.get("input.rerun%s.missin.Descent.rdlim" % (i)) re_ns = self.get("input.rerun%s.missin.Descent.ns" % (i)) re_irs = self.get("input.rerun%s.missin.Reserve.irs" % (i)) re_resrfu = self.get("input.rerun%s.missin.Reserve.resrfu" % (i)) re_restrp = self.get("input.rerun%s.missin.Reserve.restrp" % (i)) re_timmap = self.get("input.rerun%s.missin.Reserve.timmap" % (i)) re_altran = self.get("input.rerun%s.missin.Reserve.altran" % (i)) re_nclres = self.get("input.rerun%s.missin.Reserve.nclres" % (i)) re_ncrres = self.get("input.rerun%s.missin.Reserve.ncrres" % (i)) re_sremch = self.get("input.rerun%s.missin.Reserve.sremch" % (i)) re_eremch = self.get("input.rerun%s.missin.Reserve.eremch" % (i)) re_srealt = self.get("input.rerun%s.missin.Reserve.srealt" % (i)) re_erealt = self.get("input.rerun%s.missin.Reserve.erealt" % (i)) re_holdtm = self.get("input.rerun%s.missin.Reserve.holdtm" % (i)) re_ncrhol = self.get("input.rerun%s.missin.Reserve.ncrhol" % (i)) re_ihopos = self.get("input.rerun%s.missin.Reserve.ihopos" % (i)) re_icron = self.get("input.rerun%s.missin.Reserve.icron" % (i)) re_thold = self.get("input.rerun%s.missin.Reserve.thold" % (i)) re_ncrth = self.get("input.rerun%s.missin.Reserve.ncrth" % (i)) if re_dwt != -999.: sb.add_var("input.rerun%s.missin.Basic.dwt" % (i)) if len(re_offdr) > 0: sb.add_var("input.rerun%s.missin.Basic.offdr" % (i)) if re_idoq != -999: sb.add_var("input.rerun%s.missin.Basic.idoq" % (i)) if re_nsout != -999: sb.add_var("input.rerun%s.missin.Basic.nsout" % (i)) if re_nsadj != -999: sb.add_var("input.rerun%s.missin.Basic.nsadj" % (i)) if re_mirror != -999: sb.add_var("input.rerun%s.missin.Basic.mirror" % (i)) if len(re_stma) > 0: sb.add_var("input.rerun%s.missin.Store_Drag.stma" % (i)) if len(re_cdst) > 0: sb.add_var("input.rerun%s.missin.Store_Drag.cdst" % (i)) if len(re_istcl) > 0: sb.add_var("input.rerun%s.missin.Store_Drag.istcl" % (i)) if len(re_istcr) > 0: sb.add_var("input.rerun%s.missin.Store_Drag.istcr" % (i)) if re_istde != -999: sb.add_var("input.rerun%s.missin.Store_Drag.istde" % (i)) if re_mywts != -999: sb.add_var("input.rerun%s.missin.User_Weights.mywts" % (i)) if re_rampwt != -999.: sb.add_var("input.rerun%s.missin.User_Weights.rampwt" % (i)) if re_dowe != -999.: sb.add_var("input.rerun%s.missin.User_Weights.dowe" % (i)) if re_paylod != -999.: sb.add_var("input.rerun%s.missin.User_Weights.paylod" % (i)) if re_fuemax != -999.: sb.add_var("input.rerun%s.missin.User_Weights.fuemax" % (i)) if re_takotm != -999.: sb.add_var("input.rerun%s.missin.Ground_Operations.takotm" % (i)) if re_taxotm != -999.: sb.add_var("input.rerun%s.missin.Ground_Operations.taxotm" % (i)) if re_apprtm != -999.: sb.add_var("input.rerun%s.missin.Ground_Operations.apprtm" % (i)) if re_appfff != -999.: sb.add_var("input.rerun%s.missin.Ground_Operations.appfff" % (i)) if re_taxitm != -999.: sb.add_var("input.rerun%s.missin.Ground_Operations.taxitm" % (i)) if re_ittff != -999: sb.add_var("input.rerun%s.missin.Ground_Operations.ittff" % (i)) if re_takoff != -999.: sb.add_var("input.rerun%s.missin.Ground_Operations.takoff" % (i)) if re_txfufl != -999.: sb.add_var("input.rerun%s.missin.Ground_Operations.txfufl" % (i)) if re_ftkofl != -999.: sb.add_var("input.rerun%s.missin.Ground_Operations.ftkofl" % (i)) if re_ftxofl != -999.: sb.add_var("input.rerun%s.missin.Ground_Operations.ftxofl" % (i)) if re_ftxifl != -999.: sb.add_var("input.rerun%s.missin.Ground_Operations.ftxifl" % (i)) if re_faprfl != -999.: sb.add_var("input.rerun%s.missin.Ground_Operations.faprfl" % (i)) if len(re_xnz) > 0: sb.add_var("input.rerun%s.missin.Turn_Segments.xnz" % (i)) if len(re_xcl) > 0: sb.add_var("input.rerun%s.missin.Turn_Segments.xcl" % (i)) if len(re_xmach) > 0: sb.add_var("input.rerun%s.missin.Turn_Segments.xmach" % (i)) if re_nclimb > 0: sb.add_var("input.rerun%s.missin.Climb.nclimb" % (i)) if len(re_clmmin) > 0: sb.add_var("input.rerun%s.missin.Climb.clmmin" % (i)) if len(re_clmmax) > 0: sb.add_var("input.rerun%s.missin.Climb.clmmax" % (i)) if len(re_clamin) > 0: sb.add_var("input.rerun%s.missin.Climb.clamin" % (i)) if len(re_clamax) > 0: sb.add_var("input.rerun%s.missin.Climb.clamax" % (i)) if len(re_nincl) > 0: sb.add_var("input.rerun%s.missin.Climb.nincl" % (i)) if len(re_fwf) > 0: sb.add_var("input.rerun%s.missin.Climb.fwf" % (i)) if len(re_ncrcl) > 0: sb.add_var("input.rerun%s.missin.Climb.ncrcl" % (i)) if len(re_cldcd) > 0: sb.add_var("input.rerun%s.missin.Climb.cldcd" % (i)) if len(re_ippcl) > 0: sb.add_var("input.rerun%s.missin.Climb.ippcl" % (i)) if len(re_maxcl) > 0: sb.add_var("input.rerun%s.missin.Climb.maxcl" % (i)) if len(re_no) > 0: sb.add_var("input.rerun%s.missin.Climb.no" % (i)) if re_keasvc != -999: sb.add_var("input.rerun%s.missin.Climb.keasvc" % (i)) if len(re_actab) > 0: sb.add_var2d("input.rerun%s.missin.Climb.actab" % (i)) if len(re_vctab) > 0: sb.add_var2d("input.rerun%s.missin.Climb.vctab" % (i)) if re_ifaacl != -999: sb.add_var("input.rerun%s.missin.Climb.ifaacl" % (i)) if re_ifaade != -999: sb.add_var("input.rerun%s.missin.Climb.ifaade" % (i)) if re_nodive != -999: sb.add_var("input.rerun%s.missin.Climb.nodive" % (i)) if re_divlim != -999.: sb.add_var("input.rerun%s.missin.Climb.divlim" % (i)) if re_qlim != -999.: sb.add_var("input.rerun%s.missin.Climb.qlim" % (i)) if re_spdlim != -999.: sb.add_var("input.rerun%s.missin.Climb.spdlim" % (i)) if len(re_qlalt) > 0: sb.add_var("input.rerun%s.missin.Climb.qlalt" % (i)) if len(re_vqlm) > 0: sb.add_var("input.rerun%s.missin.Climb.vqlm" % (i)) if len(re_ioc) > 0: sb.add_var("input.rerun%s.missin.Cruise.ioc" % (i)) if len(re_crmach) > 0: sb.add_var("input.rerun%s.missin.Cruise.crmach" % (i)) if len(re_cralt) > 0: sb.add_var("input.rerun%s.missin.Cruise.cralt" % (i)) if len(re_crdcd) > 0: sb.add_var("input.rerun%s.missin.Cruise.crdcd" % (i)) if len(re_flrcr) > 0: sb.add_var("input.rerun%s.missin.Cruise.flrcr" % (i)) if len(re_crmmin) > 0: sb.add_var("input.rerun%s.missin.Cruise.crmmin" % (i)) if len(re_crclmx) > 0: sb.add_var("input.rerun%s.missin.Cruise.crclmx" % (i)) if len(re_hpmin) > 0: sb.add_var("input.rerun%s.missin.Cruise.hpmin" % (i)) if len(re_ffuel) > 0: sb.add_var("input.rerun%s.missin.Cruise.ffuel" % (i)) if len(re_fnox) > 0: sb.add_var("input.rerun%s.missin.Cruise.fnox" % (i)) if len(re_ifeath) > 0: sb.add_var("input.rerun%s.missin.Cruise.ifeath" % (i)) if len(re_feathf) > 0: sb.add_var("input.rerun%s.missin.Cruise.feathf" % (i)) if len(re_cdfeth) > 0: sb.add_var("input.rerun%s.missin.Cruise.cdfeth" % (i)) if re_dcwt != -999.: sb.add_var("input.rerun%s.missin.Cruise.dcwt" % (i)) if re_rcin != -999.: sb.add_var("input.rerun%s.missin.Cruise.rcin" % (i)) if len(re_wtbm) > 0: sb.add_var("input.rerun%s.missin.Cruise.wtbm" % (i)) if len(re_altbm) > 0: sb.add_var("input.rerun%s.missin.Cruise.altbm" % (i)) if re_ivs != -999: sb.add_var("input.rerun%s.missin.Descent.ivs" % (i)) if re_decl != -999.: sb.add_var("input.rerun%s.missin.Descent.decl" % (i)) if re_demmin != -999.: sb.add_var("input.rerun%s.missin.Descent.demmin" % (i)) if re_demmax != -999.: sb.add_var("input.rerun%s.missin.Descent.demmax" % (i)) if re_deamin != -999.: sb.add_var("input.rerun%s.missin.Descent.deamin" % (i)) if re_deamax != -999.: sb.add_var("input.rerun%s.missin.Descent.deamax" % (i)) if re_ninde != -999: sb.add_var("input.rerun%s.missin.Descent.ninde" % (i)) if re_dedcd != -999.: sb.add_var("input.rerun%s.missin.Descent.dedcd" % (i)) if re_rdlim != -999.: sb.add_var("input.rerun%s.missin.Descent.rdlim" % (i)) ns = len(self.get("input.rerun%s.missin.Descent.adtab" % (i))) if ns > 0: sb.add_comment("\n ! Input Descent Schedule\n") sb.add_newvar('ns', ns) sb.add_var("input.rerun%s.missin.Descent.keasvd" % (i)) sb.add_var("input.rerun%s.missin.Descent.adtab" % (i)) sb.add_var("input.rerun%s.missin.Descent.vdtab" % (i)) if re_irs != -999: sb.add_var("input.rerun%s.missin.Reserve.irs" % (i)) if re_resrfu != -999.: sb.add_var("input.rerun%s.missin.Reserve.resrfu" % (i)) if re_restrp != -999.: sb.add_var("input.rerun%s.missin.Reserve.restrp" % (i)) if re_timmap != -999.: sb.add_var("input.rerun%s.missin.Reserve.timmap" % (i)) if re_altran != -999.: sb.add_var("input.rerun%s.missin.Reserve.altran" % (i)) if re_nclres != -999: sb.add_var("input.rerun%s.missin.Reserve.nclres" % (i)) if re_ncrres != -999: sb.add_var("input.rerun%s.missin.Reserve.ncrres" % (i)) if re_sremch != -999.: sb.add_var("input.rerun%s.missin.Reserve.sremch" % (i)) if re_eremch != -999.: sb.add_var("input.rerun%s.missin.Reserve.eremch" % (i)) if re_srealt != -999.: sb.add_var("input.rerun%s.missin.Reserve.srealt" % (i)) if re_erealt != -999.: sb.add_var("input.rerun%s.missin.Reserve.erealt" % (i)) if re_holdtm != -999.: sb.add_var("input.rerun%s.missin.Reserve.holdtm" % (i)) if re_ncrhol != -999: sb.add_var("input.rerun%s.missin.Reserve.ncrhol" % (i)) if re_ihopos != -999: sb.add_var("input.rerun%s.missin.Reserve.ihopos" % (i)) if re_icron != -999: sb.add_var("input.rerun%s.missin.Reserve.icron" % (i)) if re_thold != -999.: sb.add_var("input.rerun%s.missin.Reserve.thold" % (i)) if re_ncrth != -999: sb.add_var("input.rerun%s.missin.Reserve.ncrth" % (i)) sb.add_newvar("NPCON", self.npcons0[i]) # Insert the new mission definition. #infile = self.get("input.rerun%s.mission" % (i)).open() #mission = infile.read() #infile.close() #sb.add_comment(mission) # Get the mission definition mission = self.get("input.rerun%s.mission_definition" % i) for seg in mission: sb.add_group(seg) # Insert the &PCONIN namelists for j in range(0, self.npcons0[i]): re_conalt = self.get("input.rerun%s.pconin%s.conalt" % (i, j)) re_conmch = self.get("input.rerun%s.pconin%s.conmch" % (i, j)) re_connz = self.get("input.rerun%s.pconin%s.connz" % (i, j)) re_conpc = self.get("input.rerun%s.pconin%s.conpc" % (i, j)) re_conlim = self.get("input.rerun%s.pconin%s.conlim" % (i, j)) re_conaux = self.get("input.rerun%s.pconin%s.conaux" % (i, j)) re_neo = self.get("input.rerun%s.pconin%s.neo" % (i, j)) re_icstdg = self.get("input.rerun%s.pconin%s.icstdg" % (i, j)) re_conwt = self.get("input.rerun%s.pconin%s.conwt" % (i, j)) re_iconsg = self.get("input.rerun%s.pconin%s.iconsg" % (i, j)) re_confm = self.get("input.rerun%s.pconin%s.confm" % (i, j)) re_conwta = self.get("input.rerun%s.pconin%s.conwta" % (i, j)) re_icontp = self.get("input.rerun%s.pconin%s.icontp" % (i, j)) sb.add_group('PCONIN') if re_conalt >= 0.: sb.add_newvar("CONALT", re_conalt) if re_conmch >= 0.: sb.add_newvar("CONMCH", re_conmch) if re_connz >= 0.: sb.add_newvar("CONNZ", re_connz) if re_conpc > -10.: sb.add_newvar("CONPC", re_conpc) if re_conlim != -999.: sb.add_newvar("CONLIM", re_conlim) if re_conaux > -1.: sb.add_newvar("CONAUX", re_conaux) if re_neo >= 0: sb.append("NEO", re_neo) if re_icstdg >= 0: sb.add_newvar("ICSTDG", re_icstdg) if re_conwt >= 0.: sb.add_newvar("CONWT", re_conwt) if re_iconsg >= 0: sb.add_newvar("ICONSG", re_iconsg) if re_confm >= 0.: sb.add_newvar("CONFM", re_confm) if re_conwta != -999.: sb.add_newvar("CONWTA", re_conwta) if re_icontp >= 0: sb.add_newvar("ICONTP", re_icontp) # Generate the input file for FLOPS sb.generate() def parse_output(self): """Parses the FLOPS output file(s) and populates the component outputs with the data. """ out = FileParser() out.set_file(self.stdout) # added error check Thu Nov 15 2007 ERROR = self.ERROR HINT = self.HINT # Check for namelist read error # Throw new Exception for fatal errors # Continue processing for FLOPS failures (may want to return error # codes to optimizers sometime in the future) out.set_delimiters(" ") try: out.mark_anchor("ERROR READING NAMELIST") except RuntimeError: pass else: ERROR = out.transfer_line(0) raise RuntimeError('Error during FLOPS execution.\n %s' % ERROR) out.reset_anchor() try: out.mark_anchor("ERROR READING AERODYNAMIC") except RuntimeError: pass else: ERROR = out.transfer_line(0) raise RuntimeError('Error during FLOPS execution.\n %s' % ERROR) out.reset_anchor() try: out.mark_anchor("* * * ENGINE DECK MISSING * * *") except RuntimeError: pass else: ERROR = out.transfer_line(0) raise RuntimeError('Error during FLOPS execution.\n %s' % ERROR + \ '\n\nCheck links from "Engine" to "Flops". Make sure EIFILE' + \ 'points to an existing file (default is "ENGDECK.txt" in UserDir.\n\n*****************') out.reset_anchor() try: out.mark_anchor("* * * ONLY ONE ALTITUDE FOR MACH NUMBER") except RuntimeError: pass else: ERROR = out.transfer_line(0) # TODO - Why does MC wrapper do this? # commented out for now #self.output.Performance.range = 0. #self.output.Performance.rampwt = 0. #self.output.Performance.fuel = 0. raise RuntimeError('Error during FLOPS execution.\n %s' % ERROR) out.reset_anchor() try: out.mark_anchor("* * * ILLEGAL DATA IN ENGINE DECK * * *") except RuntimeError: pass else: ERROR = out.transfer_line(0) raise RuntimeError('Error during FLOPS execution.\n %s' % ERROR) out.reset_anchor() try: #out.mark_anchor("ERROR READING MISSION DEFINITION DATA FROM UNIT") # Loosened this up to find any read error; i've found others out.mark_anchor("ERROR READING") except RuntimeError: pass else: ERROR = out.transfer_line(0) raise RuntimeError('Error reading a file during FLOPS execution.\n %s' % ERROR) out.reset_anchor() try: out.mark_anchor("ERROR IN SEGMENT INPUT DATA") except RuntimeError: pass else: ERROR = out.transfer_line(0) raise RuntimeError('Error during FLOPS execution.\n %s' % ERROR) # Modified this section Fri Mar 5 15:05:09 EST 2010 # there could be failures that recover during optimization iopt = self.input.option.Program_Control.iopt out.reset_anchor() try: if iopt != 3: out.mark_anchor("TITLE, BEGIN OUTPUT OF RESULTS") else: out.mark_anchor("FINAL ANALYSIS") except RuntimeError: # Check invalid results errorArray = [ "* * * ENGINE DECK MISSING * * *", "NO WEIGHT AVAILABLE FOR FUEL", "FAILURE FOR CLIMB SEGMENT", "FAILURE FOR CRUISE CONDITION", "FAILURE FOR DESCENT SEGMENT", "ANALYSIS COULD NOT RECOVER", "INITIAL DESIGN UNACCEPTABLE" ] descArray = [ "Check links from \"Engine\" to \"Flops\". Make sure EIFILE points to an existing file (default is \"ENGDECK.txt\" in UserDir", "Try increasing gross weight (confin.variables.GW1)", "Try increasing thrust and/or wing area and see flops.man", "Try increasing thrust and/or wing area and see flops.man", "Check thrust at flight idle. May need to set IDLE to 1 and see flops.man", "Try tweaking SYNTIN inputs to resolve this (AnalysisControl.syntin.control). Also check for other nonfatal failures like failed missed approach climb criterion.", "Make sure any initial design variable are within the upper and lower bounds" ] for i in range(0, len(errorArray)): try: out.reset_anchor() out.mark_anchor(errorArray[i]) ERROR = out.transfer_line(0) HINT = descArray[i] self.output.Performance.range = 0. self.output.Performance.rampwt = 0. self.output.Performance.fuel = 0. break except RuntimeError: ERROR = "None" HINT = "n/a" iopt = self.input.option.Program_Control.iopt ianal = self.input.option.Program_Control.ianal ifite = self.input.option.Program_Control.ifite mywts = self.input.wtin.Basic.mywts inrtia = self.input.wtin.Inertia.inrtia msumpt = self.input.missin.Basic.msumpt noffdr = len(self.input.missin.Basic.offdr) out.reset_anchor() if ifite == 3: out.mark_anchor("PRESSURIZED CABIN DIMENSIONS FOR A") self.output.Geometry.BWB.xlp = out.transfer_var(1, 5) self.output.Geometry.BWB.xlw = out.transfer_var(2, 6) self.output.Geometry.BWB.wf = out.transfer_var(3, 5) self.output.Geometry.BWB.acabin = out.transfer_var(4, 4) self.output.Geometry.BWB.nbaw = out.transfer_var(5, 5) self.output.Geometry.BWB.bayw = out.transfer_var(6, 5) self.output.Geometry.BWB.nlava = out.transfer_var(7, 5) self.output.Geometry.BWB.ngally = out.transfer_var(8, 5) self.output.Geometry.BWB.nclset = out.transfer_var(9, 5) self.output.Geometry.BWB.xl = out.transfer_var(10, 5) self.output.Geometry.BWB.df = out.transfer_var(11, 5) out.reset_anchor() out.mark_anchor("FUSELAGE DATA") self.output.Geometry.xl = out.transfer_var(2, 4) self.output.Geometry.wf = out.transfer_var(3, 4) self.output.Geometry.df = out.transfer_var(4, 4) self.output.Geometry.xlp = out.transfer_var(6, 5) out.reset_anchor() out.mark_anchor( "CREW AND PAYLOAD DATA" ) if ifite != 1: self.output.Payload.npf = out.transfer_var(1, 5) self.output.Payload.npb = out.transfer_var(2, 4) self.output.Payload.npt = out.transfer_var(3, 4) self.output.Payload.nstu = out.transfer_var(4, 3) self.output.Payload.ngalc = out.transfer_var(5, 4) self.output.Payload.wppass = out.transfer_var(7, 5) self.output.Payload.bpp = out.transfer_var(8, 5) self.output.Payload.cargow = out.transfer_var(9, 5) self.output.Payload.cargof = out.transfer_var(10, 5) else: self.output.Payload.cargow = out.transfer_var(2, 6) self.output.Payload.cargof = out.transfer_var(3, 6) out.reset_anchor() out.mark_anchor( "CARGO AND BAGGAGE CONTAIN." ) self.output.Payload.wcon = out.transfer_var(0, 6) if mywts == 0: out.reset_anchor() out.mark_anchor( "CREW AND BAGGAGE-FLIGHT" ) self.output.Payload.nflcr = out.transfer_var(0, 4) if ifite != 1: self.output.Payload.nstuag = out.transfer_var(1, 2) if iopt == 3: # In optimization mode, find the last design mission nos = 0 while True: try: out.reset_anchor() out.mark_anchor( "#OBJ/VAR/CONSTR SUMMARY", noffdr+nos+1+self.nrern0 ) except RuntimeError: break else: nos += 1 nit = noffdr + nos else: nit = nos = 1 if nit > 0: # Read output from the weights module if mywts == 0: out.reset_anchor() try: out.mark_anchor( "WING SPAN ", nos) except RuntimeError: ndd = self.input.syntin.Optimization_Control.ndd if ndd == 0: msg = "\n\n***************** \n\n" msg += "There was only one iteration in optimization mode \n\n" msg += "and we happen to be looking for the final solution, which isn't there. \n\n" msg += "ndd = %" % ndd + "\n\n" msg += "Try setting flops.input.syntin.Optimization_Control.ndd to 3 or 4.\n\n" msg += "*****************" raise RuntimeError(msg) else: msg = "\n\n***************** \n\n" msg += "There was only one iteration in optimization mode \n\n" msg += "and we happen to be looking for the final solution, which isn't there. \n\n" msg += "Something is wrong here and someone needs to figure it out before we can proceed.\n\n" msg += "*****************" raise RuntimeError(msg) self.output.Geometry.span = out.transfer_var(0, 3) self.output.Geometry.glov = out.transfer_var(1, 4) self.output.Geometry.sht = out.transfer_var(3, 4) self.output.Geometry.svt = out.transfer_var(5, 4) self.output.Geometry.xnac = out.transfer_var(8, 3) self.output.Geometry.dnac = out.transfer_var(9, 3) self.output.Geometry.xmlg = out.transfer_var(11, 5) self.output.Geometry.xnlg = out.transfer_var(12, 5) self.output.Weight.wldg = out.transfer_var(14, 4) self.output.Weight.fultot = out.transfer_var(19, 4) self.output.Weight.exsful = out.transfer_var(20, 4) out.reset_anchor() out.mark_anchor( "WING BENDING FACTOR", nos) self.output.Weight.Wing.w = out.transfer_var(0, 4) self.output.Weight.Wing.ew = out.transfer_var(1, 5) self.output.Weight.Wing.w1 = out.transfer_var(4, 3) self.output.Weight.Wing.w2 = out.transfer_var(5, 3) self.output.Weight.Wing.w3 = out.transfer_var(6, 3) # Read mass and balance summary data out.reset_anchor() out.mark_anchor( "MASS AND BALANCE SUMMARY", nos) if ifite == 1: self.output.Weight.frwi = out.transfer_keyvar("WING ", 2) self.output.Weight.frht = out.transfer_keyvar("HORIZONTAL TAIL ", 2) self.output.Weight.frvt = out.transfer_keyvar("VERTICAL TAIL ", 2) self.output.Weight.frfin = out.transfer_keyvar("VERTICAL FIN ", 2) self.output.Weight.frcan = out.transfer_keyvar("CANARD ", 2) self.output.Weight.frfu = out.transfer_keyvar("FUSELAGE ", 2) self.output.Weight.wlg = out.transfer_keyvar("LANDING GEAR ", 2) self.output.Weight.frna = out.transfer_keyvar("NACELLE (AIR INDUCTION) ", 2) self.output.Weight.wengt = out.transfer_keyvar("ENGINES ", 2) self.output.Weight.wthr = out.transfer_keyvar("THRUST REVERSERS ", 2) self.output.Weight.wpmisc = out.transfer_keyvar("MISCELLANEOUS SYSTEMS ", 2) self.output.Weight.wfsys = out.transfer_keyvar("FUEL SYSTEM-TANKS AND PLUMBING ", 2) self.output.Weight.frsc = out.transfer_keyvar("SURFACE CONTROLS ", 2) self.output.Weight.wapu = out.transfer_keyvar("AUXILIARY POWER ", 2) self.output.Weight.win = out.transfer_keyvar("INSTRUMENTS ", 2) self.output.Weight.whyd = out.transfer_keyvar("HYDRAULICS ", 2) self.output.Weight.welec = out.transfer_keyvar("ELECTRICAL ", 2) self.output.Weight.wavonc = out.transfer_keyvar("AVIONICS ", 2) self.output.Weight.wfurn = out.transfer_keyvar("FURNISHINGS AND EQUIPMENT ", 2) self.output.Weight.wac = out.transfer_keyvar("AIR CONDITIONING ", 2) self.output.Weight.wai = out.transfer_keyvar("AUXILIARY GEAR ", 2) self.output.Weight.wempty = out.transfer_keyvar(" WEIGHT EMPTY ", 2) self.output.Weight.wflcrbw = out.transfer_keyvar("CREW AND BAGGAGE-FLIGHT,", 3) self.output.Weight.wuf = out.transfer_keyvar("UNUSABLE FUEL ", 2) self.output.Weight.woil = out.transfer_keyvar("ENGINE OIL ", 2) self.output.Weight.wsrv = out.transfer_keyvar("AMMUNITION, ETC. ", 2) self.output.Weight.wbomb = out.transfer_keyvar("AUXILIARY TANKS ", 2) self.output.Weight.dowe = out.transfer_keyvar("OPERATING WEIGHT ", 2) self.output.Weight.zfw = out.transfer_keyvar("ZERO FUEL WEIGHT ", 2) else: self.output.Weight.frwi = out.transfer_keyvar("WING ", 2) self.output.Weight.frht = out.transfer_keyvar("HORIZONTAL TAIL ", 2) self.output.Weight.frvt = out.transfer_keyvar("VERTICAL TAIL ", 2) self.output.Weight.frfin = out.transfer_keyvar("VERTICAL FIN ", 2) self.output.Weight.frcan = out.transfer_keyvar("CANARD ", 2) self.output.Weight.frfu = out.transfer_keyvar("FUSELAGE ", 2) self.output.Weight.wlg = out.transfer_keyvar("LANDING GEAR ", 2) self.output.Weight.frna = out.transfer_keyvar("NACELLE (AIR INDUCTION) ", 2) self.output.Weight.wengt = out.transfer_keyvar("ENGINES ", 2) self.output.Weight.wthr = out.transfer_keyvar("THRUST REVERSERS ", 2) self.output.Weight.wpmisc = out.transfer_keyvar("MISCELLANEOUS SYSTEMS ", 2) self.output.Weight.wfsys = out.transfer_keyvar("FUEL SYSTEM-TANKS AND PLUMBING ", 2) self.output.Weight.frsc = out.transfer_keyvar("SURFACE CONTROLS ", 2) self.output.Weight.wapu = out.transfer_keyvar("AUXILIARY POWER ", 2) self.output.Weight.win = out.transfer_keyvar("INSTRUMENTS ", 2) self.output.Weight.whyd = out.transfer_keyvar("HYDRAULICS ", 2) self.output.Weight.welec = out.transfer_keyvar("ELECTRICAL ", 2) self.output.Weight.wavonc = out.transfer_keyvar("AVIONICS ", 2) self.output.Weight.wfurn = out.transfer_keyvar("FURNISHINGS AND EQUIPMENT ", 2) self.output.Weight.wac = out.transfer_keyvar("AIR CONDITIONING ", 2) self.output.Weight.wai = out.transfer_keyvar("ANTI-ICING ", 2) self.output.Weight.wempty = out.transfer_keyvar(" WEIGHT EMPTY ", 2) self.output.Weight.wflcrbw = out.transfer_keyvar("CREW AND BAGGAGE-FLIGHT,", 3) self.output.Weight.wwstuab = out.transfer_keyvar("-CABIN, ", 3) self.output.Weight.wuf = out.transfer_keyvar("UNUSABLE FUEL ", 2) self.output.Weight.woil = out.transfer_keyvar("ENGINE OIL ", 2) self.output.Weight.wsrv = out.transfer_keyvar("PASSENGER SERVICE ", 2) self.output.Weight.dowe = out.transfer_keyvar("OPERATING WEIGHT ", 2) self.output.Weight.zfw = out.transfer_keyvar("ZERO FUEL WEIGHT ", 2) # Read inertia data if inrtia > 0: out.reset_anchor() out.mark_anchor( "# INERTIA DATA FOR AIRCRAFT", nos) nfcon = self.input.wtin.Inertia.tf.shape[0] self.output.Weight.Inertia.cgx = zeros(1+nfcon) self.output.Weight.Inertia.cgy = zeros(1+nfcon) self.output.Weight.Inertia.cgz = zeros(1+nfcon) self.output.Weight.Inertia.ixxroll = zeros(1+nfcon) self.output.Weight.Inertia.ixxptch = zeros(1+nfcon) self.output.Weight.Inertia.ixxyaw = zeros(1+nfcon) self.output.Weight.Inertia.ixz = zeros(1+nfcon) out.reset_anchor() out.mark_anchor( " AIRCRAFT OWE OR ZFW", 1) self.output.Weight.Inertia.cgx[0] = out.transfer_var(0, 6) self.output.Weight.Inertia.cgy[0] = out.transfer_var(0, 7) self.output.Weight.Inertia.cgz[0] = out.transfer_var(0, 8) out.reset_anchor() out.mark_anchor( " AIRCRAFT OWE OR ZFW", 2) self.output.Weight.Inertia.ixxroll[0] = out.transfer_var(0, 5) self.output.Weight.Inertia.ixxptch[0] = out.transfer_var(0, 6) self.output.Weight.Inertia.ixxyaw[0] = out.transfer_var(0, 7) self.output.Weight.Inertia.ixz[0] = out.transfer_var(0, 8) out.reset_anchor() if nfcon > 0: for i in range(1, nfcon+1): out.mark_anchor( "INERTIA DATA FOR FUEL CONDITION" ) out.mark_anchor( " TOTAL WEIGHT " ) self.output.Weight.Inertia.cgx[i] = out.transfer_var(0, 4) self.output.Weight.Inertia.cgy[i] = out.transfer_var(0, 5) self.output.Weight.Inertia.cgz[i] = out.transfer_var(0, 6) out.mark_anchor( " TOTAL AIRCRAFT " ) self.output.Weight.Inertia.ixxroll[i] = out.transfer_var(0, 3) self.output.Weight.Inertia.ixxptch[i] = out.transfer_var(0, 4) self.output.Weight.Inertia.ixxyaw[i] = out.transfer_var(0, 5) self.output.Weight.Inertia.ixz[i] = out.transfer_var(0, 6) else: # set weights to zero self.output.Geometry.span = 0.0 self.output.Geometry.glov = 0.0 self.output.Geometry.sht = 0.0 self.output.Geometry.svt = 0.0 self.output.Geometry.xnac = 0.0 self.output.Geometry.dnac = 0.0 self.output.Geometry.xmlg = 0.0 self.output.Geometry.xnlg = 0.0 self.output.Weight.wldg = 0.0 self.output.Weight.fultot = 0.0 self.output.Weight.exsful = 0.0 self.output.Weight.frwi = 0.0 self.output.Weight.frht = 0.0 self.output.Weight.frvt = 0.0 self.output.Weight.frfin = 0.0 self.output.Weight.frcan = 0.0 self.output.Weight.frfu = 0.0 self.output.Weight.wlg = 0.0 self.output.Weight.frna = 0.0 self.output.Weight.wengt = 0.0 self.output.Weight.wthr = 0.0 self.output.Weight.wpmisc = 0.0 self.output.Weight.wfsys = 0.0 self.output.Weight.frsc = 0.0 self.output.Weight.wapu = 0.0 self.output.Weight.win = 0.0 self.output.Weight.whyd = 0.0 self.output.Weight.welec = 0.0 self.output.Weight.wavonc = 0.0 self.output.Weight.wfurn = 0.0 self.output.Weight.wac = 0.0 self.output.Weight.wai = 0.0 self.output.Weight.wempty = 0.0 self.output.Weight.wflcrbw = 0.0 self.output.Weight.wwstuab = 0.0 self.output.Weight.wuf = 0.0 self.output.Weight.woil = 0.0 self.output.Weight.wsrv = 0.0 self.output.Weight.dowe = 0.0 self.output.Weight.zfw = 0.0 self.output.Weight.wbomb = 0.0 # inertia data self.output.Weight.Inertia.cgx = zeros(0) self.output.Weight.Inertia.cgy = zeros(0) self.output.Weight.Inertia.cgz = zeros(0) self.output.Weight.Inertia.ixxroll = zeros(0) self.output.Weight.Inertia.ixxptch = zeros(0) self.output.Weight.Inertia.ixxyaw = zeros(0) self.output.Weight.Inertia.ixz = zeros(0) # Read performance contraints summary if self.npcon0 > 0 and ianal == 3: out.reset_anchor() out.mark_anchor( "PERFORMANCE CONSTRAINT SUMMARY", nos) out.set_delimiters("columns") self.output.Performance.Constraints.constraint = out.transfer_array(4, 16, 3+self.npcon0, 29) self.output.Performance.Constraints.value = out.transfer_array(4, 32, 3+self.npcon0, 40) self.output.Performance.Constraints.units = out.transfer_array(4, 41, 3+self.npcon0, 47) self.output.Performance.Constraints.limit = out.transfer_array(4, 48, 3+self.npcon0, 56) weight = out.transfer_array(4, 56, 3+self.npcon0, 65) if isinstance(weight[0], str): self.output.Performance.Constraints.location = out.transfer_array(4, 58, 3+self.npcon0, 87) else: self.output.Performance.Constraints.weight = weight self.output.Performance.Constraints.mach = out.transfer_array(4, 66, 3+self.npcon0, 74) self.output.Performance.Constraints.alt = out.transfer_array(4, 75, 3+self.npcon0, 85) self.output.Performance.Constraints.g = out.transfer_array(4, 86, 3+self.npcon0, 98) out.set_delimiters(" ") # Read sizing and performance results if ianal == 3: out.reset_anchor() out.mark_anchor( "CONFIGURATION DATA AFTER RESIZING (IF ANY)", nit) self.output.Weight.dowe = out.transfer_var(2, 4) self.output.Weight.paylod = out.transfer_var(3, 2) self.output.Weight.fuel = out.transfer_var(4, 3) self.output.Weight.rampwt = out.transfer_var(5, 3) self.output.Weight.wsr = out.transfer_var(8, 3) self.output.Weight.thrso = out.transfer_var(10, 4) self.output.Weight.esf = out.transfer_var(11, 4) self.output.Weight.twr = out.transfer_var(12, 3) self.output.Performance.thrso = self.output.Weight.thrso self.output.Performance.esf = self.output.Weight.esf # Read detailed flight segment summary if ianal == 3 and msumpt > 0: out.reset_anchor() out.mark_anchor( "DETAILED FLIGHT SEGMENT SUMMARY") self.output.Performance.Segments.segment = zeros(self.nmseg) self.output.Performance.Segments.weights = zeros(self.nmseg) self.output.Performance.Segments.alts = zeros(self.nmseg) self.output.Performance.Segments.machs = zeros(self.nmseg) self.output.Performance.Segments.thrusts = zeros(self.nmseg) self.output.Performance.Segments.totmaxs = zeros(self.nmseg) self.output.Performance.Segments.lods = zeros(self.nmseg) self.output.Performance.Segments.sfcs = zeros(self.nmseg) self.output.Performance.Segments.engparms = zeros(self.nmseg) self.output.Performance.Segments.weighte = zeros(self.nmseg) self.output.Performance.Segments.alte = zeros(self.nmseg) self.output.Performance.Segments.mache = zeros(self.nmseg) self.output.Performance.Segments.thruste = zeros(self.nmseg) self.output.Performance.Segments.totmaxe = zeros(self.nmseg) self.output.Performance.Segments.lode = zeros(self.nmseg) self.output.Performance.Segments.sfce = zeros(self.nmseg) self.output.Performance.Segments.engparme = zeros(self.nmseg) for i in range(0, self.nmseg): if i < 9: out.mark_anchor( "SEGMENT " + str(i+1) + " ") else: out.mark_anchor( "SEGMENT " + str(i+1) + " " ) self.output.Performance.Segments.segment[i] = out.transfer_var(0, 3) self.output.Performance.Segments.weights[i] = out.transfer_var(5, 1) self.output.Performance.Segments.alts[i] = out.transfer_var(5, 2) self.output.Performance.Segments.machs[i] = out.transfer_var(5, 3) self.output.Performance.Segments.thrusts[i] = out.transfer_var(5, 7) self.output.Performance.Segments.lods[i] = out.transfer_var(5, 12) self.output.Performance.Segments.totmaxs[i] = out.transfer_var(6, 6) self.output.Performance.Segments.sfcs[i] = out.transfer_var(6, 7) self.output.Performance.Segments.engparms[i] = out.transfer_var(6, 13) # This seems a bit klugey, but it actually works. j = 0 while True: try: self.output.Performance.Segments.weighte[i] = out.transfer_var(j+5, 1) self.output.Performance.Segments.alte[i] = out.transfer_var(j+5, 2) self.output.Performance.Segments.mache[i] = out.transfer_var(j+5, 3) self.output.Performance.Segments.thruste[i] = out.transfer_var(j+5, 7) self.output.Performance.Segments.lode[i] = out.transfer_var(j+5, 12) self.output.Performance.Segments.totmaxe[i] = out.transfer_var(j+6, 6) self.output.Performance.Segments.sfce[i] = out.transfer_var(j+6, 7) self.output.Performance.Segments.engparme[i] = out.transfer_var(j+6, 13) except ValueError: break j += 3 # Read the mission summary out.reset_anchor() out.mark_anchor( "M I S S I O N S U M M A R Y", nos) self.output.Performance.taxofl = out.transfer_var(5, 4) # Read the objective, variable and constraint summary out.reset_anchor() out.mark_anchor( "#OBJ/VAR/CONSTR SUMMARY", nos) out.set_delimiters("columns") # Changed based on Karl's fix to bug I reported if ianal == 3: self.output.Performance.fuel = out.transfer_var(3, 1, 10) self.output.Performance.range = out.transfer_var(3, 11, 17) self.output.Performance.vapp = out.transfer_var(3, 18, 23) # TODO - Again, there's got to be a better way try: self.output.Performance.faroff = out.transfer_var(3, 24, 30) except RuntimeError, IndexError: self.output.Performance.faroff = 1.0e10 self.output.Performance.farldg = out.transfer_var(3, 31, 37) self.output.Performance.amfor = out.transfer_var(3, 38, 45) self.output.Performance.ssfor = out.transfer_var(3, 46, 53) self.output.Geometry.ar = out.transfer_var(3, 65, 70) self.output.Geometry.sw = out.transfer_var(3, 80, 87) self.output.Geometry.tr = out.transfer_var(3, 88, 93) self.output.Geometry.sweep = out.transfer_var(3, 94, 99) self.output.Geometry.tca = out.transfer_var(3, 100, 106) if self.input.wtin.Basic.vmmo > 0.: self.output.Performance.vmmo = self.input.wtin.Basic.vmmo else: self.output.Performance.vmmo = out.transfer_var(3, 107, 112) if self.output.Weight.fuel == 0.: self.output.Weight.fuel = out.transfer_var(3, 1, 10) if self.output.Weight.rampwt == 0.: self.output.Weight.rampwt = out.transfer_var(3, 54, 64) if self.output.Weight.thrso == 0.: self.output.Weight.thrso = out.transfer_var(3, 72, 78) self.output.Weight.thrsop = self.output.Performance.thrso if self.output.Weight.wsr == 0.: self.output.Weight.wsr = out.transfer_var(3, 121, 126) if self.output.Weight.twr == 0.: self.output.Weight.twr = out.transfer_var(3, 127, 132) out.set_delimiters(" ") # Read off-design mission data if ianal == 3: ndim = 1 + noffdr + self.nrern0 self.output.Econ.sl = zeros(ndim) self.output.Econ.blockt = zeros(ndim) self.output.Econ.blockf = zeros(ndim) self.output.Econ.blockNx = zeros(ndim) self.output.Econ.wpayl = zeros(ndim) self.output.Econ.wgross = zeros(ndim) self.output.Econ.range = zeros(ndim) self.output.Econ.vapp = zeros(ndim) self.output.Econ.faroff = zeros(ndim) self.output.Econ.farldg = zeros(ndim) self.output.Econ.amfor = zeros(ndim) self.output.Econ.ssfor = zeros(ndim) for i in range(0, ndim): out.reset_anchor() out.mark_anchor( "CONFIGURATION DATA AFTER RESIZING", (nos-1)*(1 + noffdr) + 1 + i) self.output.Econ.wpayl[i] = out.transfer_var(3, 2) self.output.Econ.wgross[i] = out.transfer_var(5, 3) out.mark_anchor( "DESIGN RANGE" ) self.output.Econ.sl[i] = out.transfer_var(0, 3) out.mark_anchor( "BLOCK TIME =" ) self.output.Econ.blockt[i] = out.transfer_var(0, 4) self.output.Econ.blockf[i] = out.transfer_var(1, 4) self.output.Econ.blockNx[i] = out.transfer_var(2, 6) out.mark_anchor( "#OBJ/VAR/CONSTR SUMMARY" ); out.set_delimiters("columns") self.output.Econ.range[i] = out.transfer_var(3, 11, 17) self.output.Econ.vapp[i] = out.transfer_var(3, 18, 23) try: self.output.Econ.faroff[i] = out.transfer_var(3, 24, 30) except RuntimeError, IndexError: self.output.Econ.faroff[i] = 1.0e10 self.output.Econ.farldg[i] = out.transfer_var(3, 31, 37) self.output.Econ.amfor[i] = out.transfer_var(3, 38, 45) self.output.Econ.ssfor[i] = out.transfer_var(3, 46, 53) out.set_delimiters(" ") def add_segin(self): """Adds a new SEGIN namelist.""" name = "segin" + str(self.nseg0) self.nseg0 += 1 comp = VariableTree() comp.add('key', Str('CHAN', desc="Key word specifying reason for end of segment")) comp.add('nflap', Int(-1, desc="Number of drag polar to use\nIf NFLAP = -1, default value or previous value is used")) comp.add('ifix', Int(-1, desc="Constraints for climb segments after OBSTACLE\nIf IFIX = 0, default value or previous value is used" )) comp.add('engscl', Float(-1., desc="Engine setting as a fraction of thrust at IPCMAX\nIf ENGSCL = -1., default value or previous value is used" )) comp.add('afix', Float(-10., units='deg', desc="Fixed angle of attack for IFIX = 3 or 6\nIf AFIX = -10., final value from previous segment is used" )) comp.add('gfix', Float(-10., units='deg', desc="Fixed flight path angle for IFIX = 2 or 4, or fixed cabin floor angle for IFIX = 5\nIf GFIX = -10., final value from previous segment is used" )) comp.add('vfix', Float(-1., units='nmi/h', desc="Fixed velocity for IFIX = 1, 4 or 6\nIf VFIX = -1., final value from previous segment is used" )) comp.add('hstop', Float(-1., units='ft', desc="Segment termination altitude\nIf HSTOP = -1., default value is used" )) comp.add('dstop', Float(-1., units='ft', desc="Segment termination distance\nIf DSTOP = -1., value from following segment is used" )) comp.add('tstop', Float(-1., units='s', desc="Segment termination time\nIf TSTOP = -1., value from following segment is used" )) comp.add('vstop', Float(-1., units='nmi/h', desc="Segment termination velocity\nIf VSTOP = -1., default value is used" )) comp.add('hmin', Float(-1., units='ft', desc="Minimum altitude for segment termination; overrides STOP variables above\nIf HMIN = -1., value is not used" )) comp.add('sprate', Float(-1., desc="Thrust reduction rate during segments where the power setting is reduced\nIf SPRATE = -1., default value or previous value is used" )) comp.add('iplr', Int(-1, desc="Programmed lapse rate switch for this segment\nIf IPLR = -1, default value is used" )) comp.add('noycal', Int(-1, desc="Noise calculation switch - available only for simplified noise calculations in DOSS version\nIf NOYCAL = -1, default value is used" )) comp.add('delt', Float(-1., units='s', desc="Time step for post OBSTACLE segments\nIf DELT = -1., default value is used" )) comp.add('grdaeo', Float(-1., units='deg', desc="Flight path angle for CUTBACK with all engines operating\nIf GRDAEO = -1., default value is used" )) comp.add('grdoeo', Float(-1., units='deg', desc="Flight path angle for CUTBACK with one engine out\nIf GRDOEO = -1., default value is used" )) self.input.add(name, VarTree(comp)) def remove_segin(self): """Removes a SEGIN namelist. Actually, it removes the most recently added SEGIN, as per the MC wrapper.""" if self.nseg0 == 0: raise RuntimeError('No &SEGIN namelists to remove!') self.nseg0 += -1 name = "segin" + str(self.nseg0) self.input.remove_container(name) def add_pconin(self): """Method to add a pconin* group to the list of input variables. This method can be invoked multiple times to add as many pconin* groups as desired. The first group added is input.pconin0, the second is input.pconin1, etc. Local var self.npcon0 keeps track of the number of groups added.""" if self.npcon0 == 30: raise RuntimeError('Maximum of 30 performance constraints') name = "pconin" + str(self.npcon0) self.npcon0 += 1 comp = VariableTree() comp.add('conalt', Float(-1., units='ft', desc="Altitude at which constraint is to be evaluated (Default = value from preceding constraint)" )) comp.add('conmch', Float(-1., units='nmi/h', desc="Velocity at which constraint is to be evaluated, kts. If less than or equal to 5., assumed to be Mach number (Default = value from preceding constraint)" )) if self.npcon0 == 1: comp.add('connz', Float(1., desc="Load factor (Nz) at which constraint is to be evaluated, G's (Default = value from preceding constraint or 1.)" )) comp.add('conpc', Float(1., desc="Engine power setting parameter\n< 1., Fraction of maximum available thrust\n= 1., Maximum thrust at this Mach number and altitude\n> 1., Power setting for engine deck (3. would indicate the third highest thrust)\n(Default = value from preceding constraint or 1.)" )) comp.add('icstdg', Int(0, desc="Number of store drag schedule (see Namelist $MISSIN) to be applied to this constraint (Default = value from preceding constraint or 0)" )) else: comp.add('connz', Float(-1., desc="Load factor (Nz) at which constraint is to be evaluated, G's (Default = value from preceding constraint or 1.)" )) comp.add('conpc', Float(-10., desc="Engine power setting parameter\n< 1., Fraction of maximum available thrust\n= 1., Maximum thrust at this Mach number and altitude\n> 1., Power setting for engine deck (3. would indicate the third highest thrust)\n(Default = value from preceding constraint or 1.)" )) comp.add('icstdg', Int(-1, desc="Number of store drag schedule (see Namelist $MISSIN) to be applied to this constraint (Default = value from preceding constraint or 0)" )) comp.add('conlim', Float(-999., desc="Constraint minimum or maximum value" )) comp.add('conaux', Float(-1., desc="Additional constraint parameter" )) comp.add('neo', Int(-1, desc="Number of engines operating (Default = value from preceding constraint or all)" )) comp.add('conwt', Float(-1., units='lb', desc="Fixed weight (Default = value from preceding constraint)" )) comp.add('iconsg', Int(-1, desc="Weight at start of mission segment ICONSG is used (Default = value from preceding constraint)" )) comp.add('confm', Float(-1., desc="Fuel multiplier or fraction of fuel burned (Default = value from preceding constraint)" )) comp.add('conwta', Float(-999., units='lb', desc="Delta weight (Default = value from preceding constraint)" )) comp.add('icontp', Enum(-1, (-1,5,6,7,8,9,10,11,12,13,16,17,20,30), desc="Type of constraint (Default = value from preceding constraint)", \ aliases=("Previous","Min. climb rate","Max. time-to-climb","Max. time-to-distance","Min. sustained load factor","Min. instant. load factor","Min. turn rate","Max. turn radius","Min. excess energy","Min. climb ceiling","Max. accel./decel. time","Min. max. speed","Min. energy bleed rate","Min. thrust margin"))) self.input.add(name, VarTree(comp)) def remove_pconin(self): """Removes a PCONIN namelist. Actually, it removes the most recently added PCONIN, as per the MC wrapper.""" if self.npcon0 == 0: raise RuntimeError('No &PCONIN namelists to remove!') self.npcon0 += -1 name = "pconin" + str(self.npcon0) self.input.remove_container(name) def add_rerunpconin(self, i): """Method to add a pconin* group to the list of input variables, within an existing rerun* group . This method can be invoked multiple times to add as many pconin* groups as desired. Local array self.npcons keeps track of the number of groups added to each rerun*.""" if self.npcons0[i] == 30: raise RuntimeError('Maximum of 30 performance constraints') rerun_name = "rerun" + str(i) if not hasattr(self.input,rerun_name): raise RuntimeError('Attempted to add a PCONIN group to a nonexistant RERUN group') name = "pconin" + str(self.npcons0[i]) self.npcons0[i] += 1 comp = VariableTree() comp.add('conalt', Float(-1., units='ft', desc="Altitude at which constraint is to be evaluated (Default = value from preceding constraint)" )) comp.add('conmch', Float(-1., units='nmi/h', desc="Velocity at which constraint is to be evaluated, kts. If less than or equal to 5., assumed to be Mach number (Default = value from preceding constraint)" )) comp.add('connz', Float(-1., desc="Load factor (Nz) at which constraint is to be evaluated, G's (Default = value from preceding constraint or 1.)" )) comp.add('conpc', Float(-10., desc="Engine power setting parameter\n< 1., Fraction of maximum available thrust\n= 1., Maximum thrust at this Mach number and altitude\n> 1., Power setting for engine deck (3. would indicate the third highest thrust)\n(Default = value from preceding constraint or 1.)" )) comp.add('icstdg', Int(-1, desc="Number of store drag schedule (see Namelist $MISSIN) to be applied to this constraint (Default = value from preceding constraint or 0)" )) comp.add('conlim', Float(-999., desc="Constraint minimum or maximum value" )) comp.add('conaux', Float(-1., desc="Additional constraint parameter" )) comp.add('neo', Int(-1, desc="Number of engines operating (Default = value from preceding constraint or all)" )) comp.add('conwt', Float(-1., units='lb', desc="Fixed weight (Default = value from preceding constraint)" )) comp.add('iconsg', Int(-1, desc="Weight at start of mission segment ICONSG is used (Default = value from preceding constraint)" )) comp.add('confm', Float(-1., desc="Fuel multiplier or fraction of fuel burned (Default = value from preceding constraint)" )) comp.add('conwta', Float(-999., units='lb', desc="Delta weight (Default = value from preceding constraint)" )) comp.add('icontp', Enum(-1, (-1,5,6,7,8,9,10,11,12,13,16,17,20,30), desc="Type of constraint (Default = value from preceding constraint)", \ aliases=("Previous","Min. climb rate","Max. time-to-climb","Max. time-to-distance","Min. sustained load factor","Min. instant. load factor","Min. turn rate","Max. turn radius","Min. excess energy","Min. climb ceiling","Max. accel./decel. time","Min. max. speed","Min. energy bleed rate","Min. thrust margin"))) temp = getattr(self.input, rerun_name) temp.add(name, comp) def remove_rerunpconin(self, i): """Removes a PCONIN from an existing RERUN group. Actually, it removes the most recently added PCONIN, as per the MC wrapper.""" if self.npcons0[i] == 0: raise RuntimeError('No &PCONIN namelists to remove!') self.npcons0[i] += -1 rerun_name = "rerun" + str(i) if not hasattr(self.input,rerun_name): raise RuntimeError('Attempted to remove a PCONIN group to a nonexistant RERUN group') name = "pconin" + str(self.npcons0[i]) temp = getattr(self.input, rerun_name) temp.remove_container(name) def add_rerun(self): """ Method to add a rerun* group to the list of input variables. This method can be invoked multiple times to add as many rerun* groups as desired. The first group added is input.rerun0, the second is input.rerun1, etc. An additional missin group and mission definition file are also created within the new group. Local var self.nrern0 keeps track of the number of groups added.""" name = "rerun" + str(self.nrern0) self.nrern0 += 1 self.npcons0.append(0) comp = VariableTree() comp.add('desrng', Float(-1., units="nmi/s" )) comp.add('mywts', Int(-1 )) comp.add('rampwt', Float(-1., units="lb" )) comp.add('dowe', Float(-1., units="lb" )) comp.add('paylod', Float(-1., units="lb" )) comp.add('fuemax', Float(-1., units="lb" )) comp.add('itakof', Int(-1 )) comp.add('iland', Int(-1 )) comp.add('nopro', Int(-1 )) comp.add('noise', Int(-1 )) comp.add('icost', Int(-1 )) comp.add('wsr', Float(-1. )) comp.add('twr', Float(-1. )) comp.add('missin', VarTree(VariableTree())) comp.missin.add('Basic', VarTree(VariableTree())) comp.missin.Basic.add('indr', Int(-999 )) comp.missin.Basic.add('fact', Float(-999. )) comp.missin.Basic.add('fleak', Float(-999. )) comp.missin.Basic.add('fcdo', Float(-999. )) comp.missin.Basic.add('fcdi', Float(-999. )) comp.missin.Basic.add('fcdsub', Float(-999. )) comp.missin.Basic.add('fcdsup', Float(-999. )) comp.missin.Basic.add('iskal', Int(-999 )) comp.missin.Basic.add('owfact', Float(-999. )) comp.missin.Basic.add('iflag', Int(-999 )) comp.missin.Basic.add('msumpt', Int(-999 )) comp.missin.Basic.add('dtc', Float(-999. )) comp.missin.Basic.add('irw', Int(-999 )) comp.missin.Basic.add('rtol', Float(-999. )) comp.missin.Basic.add('nhold', Int(-999 )) comp.missin.Basic.add('iata', Int(-999 )) comp.missin.Basic.add('tlwind', Float(-999. )) comp.missin.Basic.add('dwt', Float(-999. )) comp.missin.Basic.add('offdr', Array(dtype=numpy_float64 )) comp.missin.Basic.add('idoq', Int(-999 )) comp.missin.Basic.add('nsout', Int(-999 )) comp.missin.Basic.add('nsadj', Int(-999 )) comp.missin.Basic.add('mirror', Int(-999 )) comp.missin.add('Store_Drag', VarTree(VariableTree())) comp.missin.Store_Drag.add('stma', Array(dtype=numpy_float64 )) comp.missin.Store_Drag.add('cdst', Array(dtype=numpy_float64 )) comp.missin.Store_Drag.add('istcl', Array(dtype=numpy_float64 )) comp.missin.Store_Drag.add('istcr', Array(dtype=numpy_float64 )) comp.missin.Store_Drag.add('istde', Int(-999 )) comp.missin.add('User_Weights', VarTree(VariableTree())) comp.missin.User_Weights.add('mywts', Int(-999 )) comp.missin.User_Weights.add('rampwt', Float(-999. )) comp.missin.User_Weights.add('dowe', Float(-999. )) comp.missin.User_Weights.add('paylod', Float(-999. )) comp.missin.User_Weights.add('fuemax', Float(-999. )) comp.missin.add('Ground_Operations', VarTree(VariableTree())) comp.missin.Ground_Operations.add('takotm', Float(-999. )) comp.missin.Ground_Operations.add('taxotm', Float(-999. )) comp.missin.Ground_Operations.add('apprtm', Float(-999. )) comp.missin.Ground_Operations.add('appfff', Float(-999. )) comp.missin.Ground_Operations.add('taxitm', Float(-999. )) comp.missin.Ground_Operations.add('ittff', Int(-999 )) comp.missin.Ground_Operations.add('takoff', Float(-999. )) comp.missin.Ground_Operations.add('txfufl', Float(-999. )) comp.missin.Ground_Operations.add('ftkofl', Float(-999. )) comp.missin.Ground_Operations.add('ftxofl', Float(-999. )) comp.missin.Ground_Operations.add('ftxifl', Float(-999. )) comp.missin.Ground_Operations.add('faprfl', Float(-999. )) comp.missin.add('Turn_Segments', VarTree(VariableTree())) comp.missin.Turn_Segments.add('xnz', Array(dtype=numpy_float64 )) comp.missin.Turn_Segments.add('xcl', Array(dtype=numpy_float64 )) comp.missin.Turn_Segments.add('xmach', Array(dtype=numpy_float64 )) comp.missin.add('Climb', VarTree(VariableTree())) comp.missin.Climb.add('nclimb', Int(-999)) comp.missin.Climb.add('clmmin', Array(dtype=numpy_float64 )) comp.missin.Climb.add('clmmax', Array(dtype=numpy_float64 )) comp.missin.Climb.add('clamin', Array(dtype=numpy_float64 )) comp.missin.Climb.add('clamax', Array(dtype=numpy_float64 )) comp.missin.Climb.add('nincl', Array(dtype=numpy_int64 )) comp.missin.Climb.add('fwf', Array(dtype=numpy_float64 )) comp.missin.Climb.add('ncrcl', Array(dtype=numpy_int64 )) comp.missin.Climb.add('cldcd', Array(dtype=numpy_float64 )) comp.missin.Climb.add('ippcl', Array(dtype=numpy_int64 )) comp.missin.Climb.add('maxcl', Array(dtype=numpy_int64 )) comp.missin.Climb.add('no', Array(dtype=numpy_int64 )) comp.missin.Climb.add('keasvc', Int(-999 )) comp.missin.Climb.add('actab', Array(dtype=numpy_float64 )) comp.missin.Climb.add('vctab', Array(dtype=numpy_float64 )) comp.missin.Climb.add('ifaacl', Int(-999 )) comp.missin.Climb.add('ifaade', Int(-999 )) comp.missin.Climb.add('nodive', Int(-999 )) comp.missin.Climb.add('divlim', Float(-999. )) comp.missin.Climb.add('qlim', Float(-999. )) comp.missin.Climb.add('spdlim', Float(-999. )) comp.missin.Climb.add('nql', Int(-999 )) comp.missin.Climb.add('qlalt', Array(dtype=numpy_float64 )) comp.missin.Climb.add('vqlm', Array(dtype=numpy_float64 )) comp.missin.add('Cruise', VarTree(VariableTree())) comp.missin.Cruise.add('ncruse', Int(-999 )) comp.missin.Cruise.add('ioc', Array(dtype=numpy_int64 )) comp.missin.Cruise.add('crmach', Array(dtype=numpy_float64 )) comp.missin.Cruise.add('cralt', Array(dtype=numpy_float64 )) comp.missin.Cruise.add('crdcd', Array(dtype=numpy_float64 )) comp.missin.Cruise.add('flrcr', Array(dtype=numpy_float64 )) comp.missin.Cruise.add('crmmin', Array(dtype=numpy_float64 )) comp.missin.Cruise.add('crclmx', Array(dtype=numpy_float64 )) comp.missin.Cruise.add('hpmin', Array(dtype=numpy_float64 )) comp.missin.Cruise.add('ffuel', Array(dtype=numpy_float64 )) comp.missin.Cruise.add('fnox', Array(dtype=numpy_float64 )) comp.missin.Cruise.add('ifeath', Array(dtype=numpy_int64 )) comp.missin.Cruise.add('feathf', Array(dtype=numpy_float64 )) comp.missin.Cruise.add('cdfeth', Array(dtype=numpy_float64 )) comp.missin.Cruise.add('dcwt', Float(-999. )) comp.missin.Cruise.add('rcin', Float(-999. )) comp.missin.Cruise.add('wtbm', Array(dtype=numpy_float64 )) comp.missin.Cruise.add('altbm', Array(dtype=numpy_float64 )) comp.missin.add('Descent', VarTree(VariableTree())) comp.missin.Descent.add('ivs', Int(-999 )) comp.missin.Descent.add('decl', Float(-999. )) comp.missin.Descent.add('demmin', Float(-999. )) comp.missin.Descent.add('demmax', Float(-999. )) comp.missin.Descent.add('deamin', Float(-999. )) comp.missin.Descent.add('deamax', Float(-999. )) comp.missin.Descent.add('ninde', Int(-999 )) comp.missin.Descent.add('dedcd', Float(-999. )) comp.missin.Descent.add('rdlim', Float(-999. )) comp.missin.Descent.add('ns', Int(-999 )) comp.missin.Descent.add('keasvd', Int(-999 )) comp.missin.Descent.add('adtab', Array(dtype=numpy_float64 )) comp.missin.Descent.add('vdtab', Array(dtype=numpy_float64 )) comp.missin.add('Reserve', VarTree(VariableTree())) comp.missin.Reserve.add('irs', Int(-999 )) comp.missin.Reserve.add('resrfu', Float(-999. )) comp.missin.Reserve.add('restrp', Float(-999. )) comp.missin.Reserve.add('timmap', Float(-999. )) comp.missin.Reserve.add('altran', Float(-999. )) comp.missin.Reserve.add('nclres', Int(-999 )) comp.missin.Reserve.add('ncrres', Int(-999 )) comp.missin.Reserve.add('sremch', Float(-999. )) comp.missin.Reserve.add('eremch', Float(-999. )) comp.missin.Reserve.add('srealt', Float(-999. )) comp.missin.Reserve.add('erealt', Float(-999. )) comp.missin.Reserve.add('holdtm', Float(-999. )) comp.missin.Reserve.add('ncrhol', Int(-999 )) comp.missin.Reserve.add('ihopos', Int(-999 )) comp.missin.Reserve.add('icron', Int(-999 )) comp.missin.Reserve.add('thold', Float(-999. )) comp.missin.Reserve.add('ncrth', Int(-999 )) # New mission definition defaults to the original one comp.add('mission_definition', List(iotype='in')) comp.mission_definition = self.input.mission_definition.mission self.input.add(name, VarTree(comp)) def remove_rerun(self): """Removes a Rerun namelist. Actually, it removes the most recently added Rerun, as per the MC wrapper.""" if self.nrern0 == 0: raise RuntimeError('No &PCONIN namelists to remove!') self.nrern0 += -1 name = "rerun" + str(self.nrern0) self.input.remove_container(name) self.npcons0 = self.npcons0[:-1] def reinitialize(self): """Method to add pconin*, segin* and rerun* groups to the list of input variables. This method can be invoked by the user to add the appropriate number of groups based on input variables npcon, nseg, nrerun and npcons[].""" # Add or remove an appropriate number of pconin* groups to the input variable # list. n0 = self.npcon0 n = self.npcon if n > n0: for i in range(0,n-n0): self.add_pconin() elif n < n0: for i in range(0,n0-n): self.remove_pconin() # Add or remove an appropriate number of segin* groups to the input variable # list. n0 = self.nseg0 n = self.nseg if n > n0: for i in range(0,n-n0): self.add_segin() elif n < n0: for i in range(0,n0-n): self.remove_segin() # Add or remove an appropriate number of rerun* groups to the input variable # list. n0 = self.nrern0 n = self.nrerun if n > n0: for i in range(0,n-n0): self.add_rerun() elif n < n0: for i in range(0,n0-n): self.remove_rerun() # Add or remove an appropriate number of rerun*.pconin* groups to the input # variable list. for i in range(0,self.nrern0): n0 = self.npcons0[i] n = self.npcons[i] if n > n0: for j in range(0,n-n0): self.add_rerunpconin(i) elif n < n0: for j in range(0,n0-n): self.remove_rerunpconin(i) def load_model(self, filename): """ Loads a FLOPS model from an existing input file.""" sb = Namelist(self) sb.set_filename(filename) # Where each namelist goes in the component rule_dict = { "OPTION" : ["input.option.Program_Control", \ "input.option.Plot_Files", \ "input.option.Excess_Power_Plot"], "WTIN" : [ "input.wtin.Basic", \ "input.wtin.Center_of_Gravity", \ "input.wtin.Crew_Payload", \ "input.wtin.Detailed_Wing", \ "input.wtin.Fuel_System", \ "input.wtin.Fuselage", \ "input.wtin.Inertia", \ "input.wtin.Landing_Gear", \ "input.wtin.OEW_Calculations", \ "input.wtin.Override", \ "input.wtin.Propulsion", \ "input.wtin.Tails_Fins", \ "input.wtin.Wing_Data"], "CONFIN" : ["input.confin.Basic", \ "input.confin.Design_Variables", \ "input.confin.Objective"], "AERIN" : ["input.aerin.Basic", \ "input.aerin.Internal_Aero", \ "input.aerin.Takeoff_Landing"], "ENGDIN" : ["input.engdin", \ "input.engdin.Basic", \ "input.engdin.Special_Options"], "MISSIN" : ["input.missin.Basic", \ "input.missin.Climb", \ "input.missin.Cruise", \ "input.missin.Descent", \ "input.missin.Ground_Operations", \ "input.missin.Reserve", \ "input.missin.Store_Drag", \ "input.missin.Turn_Segments", \ "input.missin.User_Weights", \ "input.parent"], "TOLIN" : ["input.tolin.Basic", \ "input.tolin.Integration_Intervals", \ "input.tolin.Landing", \ "input.tolin.Takeoff", \ "input.tolin.Thrust_Reverser"], "NOISIN" : ["input.noisin.Airframe", \ "input.noisin.Basic", \ "input.noisin.Core", \ "input.noisin.Engine_Parameters", \ "input.noisin.Fan", \ "input.noisin.Flap_Noise", \ "input.noisin.Ground_Effects", \ "input.noisin.Jet", \ "input.noisin.MSJet", \ "input.noisin.Observers", \ "input.noisin.Propagation", \ "input.noisin.Propeller", \ "input.noisin.Shielding", \ "input.noisin.Turbine"], "COSTIN" : ["input.costin.Basic", \ "input.costin.Cost_Technology", \ "input.costin.Mission_Performance"], "FUSEIN" : ["input.fusein.Basic", \ "input.fusein.BWB"], "ENGINE" : ["input.engine", \ "input.engine.Basic", \ "input.engine.Design_Point", \ "input.engine.Engine_Weight", \ "input.engine.IC_Engine", \ "input.engine.Noise_Data", \ "input.engine.Other"], "SYNTIN" : ["input.syntin", \ "input.syntin.Variables", \ "input.syntin.Optimization_Control"], "ASCLIN" : ["input.asclin"], "NACELL" : ["input.nacell"], "PROIN" : ["input.proin"] } # Some variables aren't exposed in the OpenMDAO wrapper (e.g., array # sizes which aren't needed explicitly.) ignore = ["netaw", "itank", "nob", "nparam", "nfcon", "npcon"] sb.parse_file() self.input.title = sb.title empty_groups, unlisted_groups, unlinked_vars = \ sb.load_model(rule_dict, ignore) # The pconin groups are problematic, and have not been filled because # they aren't created yet. We can parse the unlisted_groups to see # which ones are in the input-file, and then add them to the component. # Rerun, Segin, and Pconin groups also do not have unique names. We give # them unique names in OpenMDAO. num_mission = 0 if len(unlisted_groups) > 0: for i, group in unlisted_groups.iteritems(): if group.lower().count('pconin'): self.add_pconin() rule_dict = { "PCONIN" : ["input.pconin"+str(self.npcon0-1)] } ne, nu, nv = sb.load_model(rule_dict, ignore, i) for var in nv: unlinked_vars.append(var) elif group.lower().count('rerun'): self.add_rerun() stem = "input.rerun"+str(self.nrern0-1) rule_dict = { "RERUN" : [stem] } ne, nu, nv = sb.load_model(rule_dict, ignore, i) for var in nv: unlinked_vars.append(var) elif group.lower().count('segin'): self.add_segin() stem = "input.segin"+str(self.nseg0-1) rule_dict = { "SEGIN" : [stem] } ne, nu, nv = sb.load_model(rule_dict, ignore, i) for var in nv: unlinked_vars.append(var) # Hopefully the missin namelist always follows its associated # rerun group. elif group.lower().count('missin'): rule_dict = { "MISSIN" : [stem+".missin.Basic", stem+".missin.Store_Drag", stem+".missin.User_Weights", stem+".missin.Ground_Operations", stem+".missin.Turn_Segments", stem+".missin.Climb", stem+".missin.Cruise", stem+".missin.Descent", stem+".missin.Reserve",] } ne, nu, nv = sb.load_model(rule_dict, ignore, i) for var in nv: unlinked_vars.append(var) num_mission += 1 # Mission segments are also a challenge. # The remaining empty groups should be mission segments or comments. missions = [] if len(empty_groups) > 0: in_mission = False for group in empty_groups.values(): group_name = group.strip().split(" ")[0] if group_name.lower() == 'start': missions.append('START') in_mission = True elif group_name.lower() == 'end': missions.append('END') in_mission = False elif in_mission == True: groups = ['climb', 'cruise', 'refuel', 'release', 'accel', \ 'turn', 'combat', 'hold', 'descent'] if group_name.lower() in groups: missions.append(group.upper()) else: print "Warning: Ignoring unknonwn mission %s" % group # Fist, handle the standard run missions mission_count = 0 mission_start = 0 mission_end = 0 for i, mission in enumerate(missions): if mission == 'END': mission_end = i mission_count += i+1 break self.input.mission_definition.mission = missions[:mission_end+1] # Next, handle the missions in the Rerun groups for j in range(0,self.nrern0): name = "rerun" + str(j) mission_start = mission_end+1 for i, mission in enumerate(missions[mission_start:]): if mission == 'END': mission_end = i+mission_start mission_count += i+1 break self.set("input.%s.mission_definition" % name, \ missions[mission_start:mission_end+1]) # Certain data files are sometimes jammed into the input file. We have # to jump through some hoops to detect and import this information. ndecks = 0 if self.input.engdin.Basic.igenen in (0, -2): found = False engine_deck = "" for i, group in enumerate(sb.groups): if group.lower().strip() == 'engdin': found = True elif found == True: if len(sb.cards[i]) > 0: break engine_deck += '%s\n' % group ndecks += 1 self.input.engine_deck.engdek = engine_deck # Aero deck seems to fall after the mission segements if self.input.aerin.Basic.myaero > 0 and \ self.input.aerin.Basic.myaero != 3 and \ self.input.option.Program_Control.ianal == 3: found = False aerodat = "" for i, group in enumerate(sb.groups): if group.lower().strip() == 'end': found = True elif found == True: if len(sb.cards[i]) > 0: break aerodat += '%s\n' % group ndecks += 1 self.input.aero_data.aerodat = aerodat # Post process some stuff, mostly arrays 2D arrays that come over as 1D tf = self.input.wtin.Inertia.tf # TODO: tf can be input with 1st dim greater than one. Need to find out # how that is written / parsed. if tf.shape[0] > 0: self.set('input.wtin.Inertia.tf', array([tf])) # Report diagnostics and raise any exceptions. print "Empty Groups: %d, Unhandled Groups: %d, Unlinked Vars: %d" % \ (len(empty_groups)-len(missions)-ndecks, \ len(unlisted_groups)-self.npcon-self.nrern0-self.nseg0-num_mission, \ len(unlinked_vars)) #print empty_groups #print unlisted_groups if __name__ == "__main__": # pragma: no cover from openmdao.main.api import set_as_top from numpy import array flops_comp = set_as_top(FlopsWrapper()) flops_comp.input.option.Program_Control.mprint = 1 flops_comp.input.title = "Testing" #flops_comp.npcon = 1 #flops_comp.nseg = 3 #flops_comp.nrerun = 2 #flops_comp.npcons = [3, 4] #flops_comp.reinitialize() flops_comp.run()
{"hexsha": "7cb1cc7995d8f7890757e20eecfb73987fe59034", "size": 322272, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/flops_wrapper/flops_wrapper.py", "max_stars_repo_name": "OpenMDAO-Plugins/flops_wrapper", "max_stars_repo_head_hexsha": "da03b981b88682426038dce14e4fa992ec27d158", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2015-10-08T17:38:47.000Z", "max_stars_repo_stars_event_max_datetime": "2015-10-08T17:38:47.000Z", "max_issues_repo_path": "src/flops_wrapper/flops_wrapper.py", "max_issues_repo_name": "OpenMDAO-Plugins/flops_wrapper", "max_issues_repo_head_hexsha": "da03b981b88682426038dce14e4fa992ec27d158", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/flops_wrapper/flops_wrapper.py", "max_forks_repo_name": "OpenMDAO-Plugins/flops_wrapper", "max_forks_repo_head_hexsha": "da03b981b88682426038dce14e4fa992ec27d158", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2015-10-08T17:38:58.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-27T14:16:56.000Z", "avg_line_length": 62.3109048724, "max_line_length": 930, "alphanum_fraction": 0.6294093188, "include": true, "reason": "from numpy", "num_tokens": 86802}
[STATEMENT] lemma JF_cind: "sbis UNIV UNIV dtor1 dtor2 R1 R2 \<Longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id" [PROOF STATE] proof (prove) goal (1 subgoal): 1. sbis UNIV UNIV dtor1 dtor2 R1 R2 \<Longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule rev_mp) [PROOF STATE] proof (prove) goal (2 subgoals): 1. sbis UNIV UNIV dtor1 dtor2 R1 R2 \<Longrightarrow> ?P 2. sbis UNIV UNIV dtor1 dtor2 R1 R2 \<Longrightarrow> ?P \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (tactic \<open>forward_tac @{context} @{thms bis_def[THEN iffD1]} 1\<close>) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; (R1 \<subseteq> UNIV \<times> UNIV \<and> R2 \<subseteq> UNIV \<times> UNIV) \<and> (\<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1')) \<and> (\<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2'))\<rbrakk> \<Longrightarrow> ?P 2. sbis UNIV UNIV dtor1 dtor2 R1 R2 \<Longrightarrow> ?P \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (erule conjE)+ [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> ?P 2. sbis UNIV UNIV dtor1 dtor2 R1 R2 \<Longrightarrow> ?P \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule bis_cong) [PROOF STATE] proof (prove) goal (4 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> bis ?B1.10 ?B2.10 ?s1.10 ?s2.10 ?B1'10 ?B2'10 ?s1'10 ?s2'10 ?R1.10 ?R2.10 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> ?R1'10 = ?R1.10 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> ?R2'10 = ?R2.10 4. sbis UNIV UNIV dtor1 dtor2 R1 R2 \<Longrightarrow> bis ?B1.10 ?B2.10 ?s1.10 ?s2.10 ?B1'10 ?B2'10 ?s1'10 ?s2'10 ?R1'10 ?R2'10 \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule bis_Comp) [PROOF STATE] proof (prove) goal (5 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> bis ?B1.10 ?B2.10 ?s1.10 ?s2.10 ?B1'14 ?B2'14 ?s1'14 ?s2'14 ?P1.14 ?P2.14 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> bis ?B1'14 ?B2'14 ?s1'14 ?s2'14 ?B1'10 ?B2'10 ?s1'10 ?s2'10 ?Q1.14 ?Q2.14 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> ?R1'10 = ?P1.14 O ?Q1.14 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> ?R2'10 = ?P2.14 O ?Q2.14 5. sbis UNIV UNIV dtor1 dtor2 R1 R2 \<Longrightarrow> bis ?B1.10 ?B2.10 ?s1.10 ?s2.10 ?B1'10 ?B2'10 ?s1'10 ?s2'10 ?R1'10 ?R2'10 \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule bis_converse) [PROOF STATE] proof (prove) goal (5 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> bis ?B1'14 ?B2'14 ?s1'14 ?s2'14 ?B1.10 ?B2.10 ?s1.10 ?s2.10 ?R1.17 ?R2.17 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> bis ?B1'14 ?B2'14 ?s1'14 ?s2'14 ?B1'10 ?B2'10 ?s1'10 ?s2'10 ?Q1.14 ?Q2.14 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> ?R1'10 = ?R1.17\<inverse> O ?Q1.14 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> ?R2'10 = ?R2.17\<inverse> O ?Q2.14 5. sbis UNIV UNIV dtor1 dtor2 R1 R2 \<Longrightarrow> bis ?B1.10 ?B2.10 ?s1.10 ?s2.10 ?B1'10 ?B2'10 ?s1'10 ?s2'10 ?R1'10 ?R2'10 \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule bis_Gr) [PROOF STATE] proof (prove) goal (6 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> coalg ?B1'14 ?B2'14 ?s1'14 ?s2'14 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> mor ?B1'14 ?B2'14 ?s1'14 ?s2'14 ?B1.10 ?B2.10 ?s1.10 ?s2.10 ?f1.19 ?f2.19 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> bis ?B1'14 ?B2'14 ?s1'14 ?s2'14 ?B1'10 ?B2'10 ?s1'10 ?s2'10 ?Q1.14 ?Q2.14 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> ?R1'10 = (BNF_Def.Gr ?B1'14 ?f1.19)\<inverse> O ?Q1.14 5. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> ?R2'10 = (BNF_Def.Gr ?B2'14 ?f2.19)\<inverse> O ?Q2.14 6. sbis UNIV UNIV dtor1 dtor2 R1 R2 \<Longrightarrow> bis ?B1.10 ?B2.10 ?s1.10 ?s2.10 ?B1'10 ?B2'10 ?s1'10 ?s2'10 ?R1'10 ?R2'10 \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule tcoalg) [PROOF STATE] proof (prove) goal (5 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> mor UNIV UNIV ?s1'14 ?s2'14 ?B1.10 ?B2.10 ?s1.10 ?s2.10 ?f1.19 ?f2.19 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> bis UNIV UNIV ?s1'14 ?s2'14 ?B1'10 ?B2'10 ?s1'10 ?s2'10 ?Q1.14 ?Q2.14 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> ?R1'10 = (BNF_Def.Gr UNIV ?f1.19)\<inverse> O ?Q1.14 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> ?R2'10 = (BNF_Def.Gr UNIV ?f2.19)\<inverse> O ?Q2.14 5. sbis UNIV UNIV dtor1 dtor2 R1 R2 \<Longrightarrow> bis ?B1.10 ?B2.10 ?s1.10 ?s2.10 ?B1'10 ?B2'10 ?s1'10 ?s2'10 ?R1'10 ?R2'10 \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule mor_Rep_JF) [PROOF STATE] proof (prove) goal (4 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> bis UNIV UNIV dtor1 dtor2 ?B1'10 ?B2'10 ?s1'10 ?s2'10 ?Q1.14 ?Q2.14 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> ?R1'10 = (BNF_Def.Gr UNIV Rep_JF1)\<inverse> O ?Q1.14 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> ?R2'10 = (BNF_Def.Gr UNIV Rep_JF2)\<inverse> O ?Q2.14 4. sbis UNIV UNIV dtor1 dtor2 R1 R2 \<Longrightarrow> bis car_final1 car_final2 str_final1 str_final2 ?B1'10 ?B2'10 ?s1'10 ?s2'10 ?R1'10 ?R2'10 \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule bis_Comp) [PROOF STATE] proof (prove) goal (5 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> bis UNIV UNIV dtor1 dtor2 ?B1'24 ?B2'24 ?s1'24 ?s2'24 ?P1.24 ?P2.24 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> bis ?B1'24 ?B2'24 ?s1'24 ?s2'24 ?B1'10 ?B2'10 ?s1'10 ?s2'10 ?Q1.24 ?Q2.24 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> ?R1'10 = (BNF_Def.Gr UNIV Rep_JF1)\<inverse> O ?P1.24 O ?Q1.24 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> ?R2'10 = (BNF_Def.Gr UNIV Rep_JF2)\<inverse> O ?P2.24 O ?Q2.24 5. sbis UNIV UNIV dtor1 dtor2 R1 R2 \<Longrightarrow> bis car_final1 car_final2 str_final1 str_final2 ?B1'10 ?B2'10 ?s1'10 ?s2'10 ?R1'10 ?R2'10 \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply assumption [PROOF STATE] proof (prove) goal (4 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> bis UNIV UNIV dtor1 dtor2 ?B1'10 ?B2'10 ?s1'10 ?s2'10 ?Q1.24 ?Q2.24 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> ?R1'10 = (BNF_Def.Gr UNIV Rep_JF1)\<inverse> O R1 O ?Q1.24 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> ?R2'10 = (BNF_Def.Gr UNIV Rep_JF2)\<inverse> O R2 O ?Q2.24 4. sbis UNIV UNIV dtor1 dtor2 R1 R2 \<Longrightarrow> bis car_final1 car_final2 str_final1 str_final2 ?B1'10 ?B2'10 ?s1'10 ?s2'10 ?R1'10 ?R2'10 \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule bis_Gr) [PROOF STATE] proof (prove) goal (5 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> tcoalg dtor1 dtor2 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> mor UNIV UNIV dtor1 dtor2 ?B1'10 ?B2'10 ?s1'10 ?s2'10 ?f1.27 ?f2.27 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> ?R1'10 = (BNF_Def.Gr UNIV Rep_JF1)\<inverse> O R1 O BNF_Def.Gr UNIV ?f1.27 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> ?R2'10 = (BNF_Def.Gr UNIV Rep_JF2)\<inverse> O R2 O BNF_Def.Gr UNIV ?f2.27 5. sbis UNIV UNIV dtor1 dtor2 R1 R2 \<Longrightarrow> bis car_final1 car_final2 str_final1 str_final2 ?B1'10 ?B2'10 ?s1'10 ?s2'10 ?R1'10 ?R2'10 \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule tcoalg) [PROOF STATE] proof (prove) goal (4 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> mor UNIV UNIV dtor1 dtor2 ?B1'10 ?B2'10 ?s1'10 ?s2'10 ?f1.27 ?f2.27 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> ?R1'10 = (BNF_Def.Gr UNIV Rep_JF1)\<inverse> O R1 O BNF_Def.Gr UNIV ?f1.27 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> ?R2'10 = (BNF_Def.Gr UNIV Rep_JF2)\<inverse> O R2 O BNF_Def.Gr UNIV ?f2.27 4. sbis UNIV UNIV dtor1 dtor2 R1 R2 \<Longrightarrow> bis car_final1 car_final2 str_final1 str_final2 ?B1'10 ?B2'10 ?s1'10 ?s2'10 ?R1'10 ?R2'10 \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule mor_Rep_JF) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> ?R1'10 = (BNF_Def.Gr UNIV Rep_JF1)\<inverse> O R1 O BNF_Def.Gr UNIV Rep_JF1 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> ?R2'10 = (BNF_Def.Gr UNIV Rep_JF2)\<inverse> O R2 O BNF_Def.Gr UNIV Rep_JF2 3. sbis UNIV UNIV dtor1 dtor2 R1 R2 \<Longrightarrow> sbis car_final1 car_final2 str_final1 str_final2 ?R1'10 ?R2'10 \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (erule relImage_Gr) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; R1 \<subseteq> UNIV \<times> UNIV; R2 \<subseteq> UNIV \<times> UNIV; \<forall>b1 b1'. (b1, b1') \<in> R1 \<longrightarrow> (\<exists>z\<in>F1in UNIV R1 R2. F1map id fst fst z = dtor1 b1 \<and> F1map id snd snd z = dtor1 b1'); \<forall>b2 b2'. (b2, b2') \<in> R2 \<longrightarrow> (\<exists>z\<in>F2in UNIV R1 R2. F2map id fst fst z = dtor2 b2 \<and> F2map id snd snd z = dtor2 b2')\<rbrakk> \<Longrightarrow> ?R2'10 = (BNF_Def.Gr UNIV Rep_JF2)\<inverse> O R2 O BNF_Def.Gr UNIV Rep_JF2 2. sbis UNIV UNIV dtor1 dtor2 R1 R2 \<Longrightarrow> sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) ?R2'10 \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (erule relImage_Gr) [PROOF STATE] proof (prove) goal (1 subgoal): 1. sbis UNIV UNIV dtor1 dtor2 R1 R2 \<Longrightarrow> sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2) \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule impI) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule rev_mp) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> ?P36 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> ?P36 \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule bis_cong) [PROOF STATE] proof (prove) goal (4 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> bis ?B1.39 ?B2.39 ?s1.39 ?s2.39 ?B1'39 ?B2'39 ?s1'39 ?s2'39 ?R1.39 ?R2.39 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> ?R1'39 = ?R1.39 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> ?R2'39 = ?R2.39 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> bis ?B1.39 ?B2.39 ?s1.39 ?s2.39 ?B1'39 ?B2'39 ?s1'39 ?s2'39 ?R1'39 ?R2'39 \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule bis_Comp) [PROOF STATE] proof (prove) goal (5 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> bis ?B1.39 ?B2.39 ?s1.39 ?s2.39 ?B1'43 ?B2'43 ?s1'43 ?s2'43 ?P1.43 ?P2.43 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> bis ?B1'43 ?B2'43 ?s1'43 ?s2'43 ?B1'39 ?B2'39 ?s1'39 ?s2'39 ?Q1.43 ?Q2.43 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> ?R1'39 = ?P1.43 O ?Q1.43 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> ?R2'39 = ?P2.43 O ?Q2.43 5. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> bis ?B1.39 ?B2.39 ?s1.39 ?s2.39 ?B1'39 ?B2'39 ?s1'39 ?s2'39 ?R1'39 ?R2'39 \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule bis_Gr) [PROOF STATE] proof (prove) goal (6 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> coalg ?B1.39 ?B2.39 ?s1.39 ?s2.39 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> mor ?B1.39 ?B2.39 ?s1.39 ?s2.39 ?B1'43 ?B2'43 ?s1'43 ?s2'43 ?f1.46 ?f2.46 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> bis ?B1'43 ?B2'43 ?s1'43 ?s2'43 ?B1'39 ?B2'39 ?s1'39 ?s2'39 ?Q1.43 ?Q2.43 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> ?R1'39 = BNF_Def.Gr ?B1.39 ?f1.46 O ?Q1.43 5. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> ?R2'39 = BNF_Def.Gr ?B2.39 ?f2.46 O ?Q2.43 6. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> bis ?B1.39 ?B2.39 ?s1.39 ?s2.39 ?B1'39 ?B2'39 ?s1'39 ?s2'39 ?R1'39 ?R2'39 \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule coalg_T) [PROOF STATE] proof (prove) goal (5 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> mor carT1 carT2 strT1 strT2 ?B1'43 ?B2'43 ?s1'43 ?s2'43 ?f1.46 ?f2.46 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> bis ?B1'43 ?B2'43 ?s1'43 ?s2'43 ?B1'39 ?B2'39 ?s1'39 ?s2'39 ?Q1.43 ?Q2.43 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> ?R1'39 = BNF_Def.Gr carT1 ?f1.46 O ?Q1.43 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> ?R2'39 = BNF_Def.Gr carT2 ?f2.46 O ?Q2.43 5. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> bis carT1 carT2 strT1 strT2 ?B1'39 ?B2'39 ?s1'39 ?s2'39 ?R1'39 ?R2'39 \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule mor_T_final) [PROOF STATE] proof (prove) goal (4 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> bis car_final1 car_final2 str_final1 str_final2 ?B1'39 ?B2'39 ?s1'39 ?s2'39 ?Q1.43 ?Q2.43 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> ?R1'39 = BNF_Def.Gr carT1 (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2)) O ?Q1.43 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> ?R2'39 = BNF_Def.Gr carT2 (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)) O ?Q2.43 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> bis carT1 carT2 strT1 strT2 ?B1'39 ?B2'39 ?s1'39 ?s2'39 ?R1'39 ?R2'39 \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule bis_Comp) [PROOF STATE] proof (prove) goal (5 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> bis car_final1 car_final2 str_final1 str_final2 ?B1'51 ?B2'51 ?s1'51 ?s2'51 ?P1.51 ?P2.51 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> bis ?B1'51 ?B2'51 ?s1'51 ?s2'51 ?B1'39 ?B2'39 ?s1'39 ?s2'39 ?Q1.51 ?Q2.51 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> ?R1'39 = BNF_Def.Gr carT1 (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2)) O ?P1.51 O ?Q1.51 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> ?R2'39 = BNF_Def.Gr carT2 (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)) O ?P2.51 O ?Q2.51 5. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> bis carT1 carT2 strT1 strT2 ?B1'39 ?B2'39 ?s1'39 ?s2'39 ?R1'39 ?R2'39 \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule sbis_lsbis) [PROOF STATE] proof (prove) goal (4 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> bis car_final1 car_final2 str_final1 str_final2 ?B1'39 ?B2'39 ?s1'39 ?s2'39 ?Q1.51 ?Q2.51 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> ?R1'39 = BNF_Def.Gr carT1 (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2)) O lsbis1 car_final1 car_final2 str_final1 str_final2 O ?Q1.51 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> ?R2'39 = BNF_Def.Gr carT2 (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)) O lsbis2 car_final1 car_final2 str_final1 str_final2 O ?Q2.51 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> bis carT1 carT2 strT1 strT2 ?B1'39 ?B2'39 ?s1'39 ?s2'39 ?R1'39 ?R2'39 \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule bis_converse) [PROOF STATE] proof (prove) goal (4 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> bis ?B1'39 ?B2'39 ?s1'39 ?s2'39 car_final1 car_final2 str_final1 str_final2 ?R1.55 ?R2.55 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> ?R1'39 = BNF_Def.Gr carT1 (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2)) O lsbis1 car_final1 car_final2 str_final1 str_final2 O ?R1.55\<inverse> 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> ?R2'39 = BNF_Def.Gr carT2 (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)) O lsbis2 car_final1 car_final2 str_final1 str_final2 O ?R2.55\<inverse> 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> bis carT1 carT2 strT1 strT2 ?B1'39 ?B2'39 ?s1'39 ?s2'39 ?R1'39 ?R2'39 \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule bis_Gr) [PROOF STATE] proof (prove) goal (5 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> coalg ?B1'39 ?B2'39 ?s1'39 ?s2'39 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> mor ?B1'39 ?B2'39 ?s1'39 ?s2'39 car_final1 car_final2 str_final1 str_final2 ?f1.57 ?f2.57 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> ?R1'39 = BNF_Def.Gr carT1 (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2)) O lsbis1 car_final1 car_final2 str_final1 str_final2 O (BNF_Def.Gr ?B1'39 ?f1.57)\<inverse> 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> ?R2'39 = BNF_Def.Gr carT2 (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)) O lsbis2 car_final1 car_final2 str_final1 str_final2 O (BNF_Def.Gr ?B2'39 ?f2.57)\<inverse> 5. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> bis carT1 carT2 strT1 strT2 ?B1'39 ?B2'39 ?s1'39 ?s2'39 ?R1'39 ?R2'39 \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule coalg_T) [PROOF STATE] proof (prove) goal (4 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> mor carT1 carT2 strT1 strT2 car_final1 car_final2 str_final1 str_final2 ?f1.57 ?f2.57 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> ?R1'39 = BNF_Def.Gr carT1 (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2)) O lsbis1 car_final1 car_final2 str_final1 str_final2 O (BNF_Def.Gr carT1 ?f1.57)\<inverse> 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> ?R2'39 = BNF_Def.Gr carT2 (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)) O lsbis2 car_final1 car_final2 str_final1 str_final2 O (BNF_Def.Gr carT2 ?f2.57)\<inverse> 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> sbis carT1 carT2 strT1 strT2 ?R1'39 ?R2'39 \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule mor_T_final) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> ?R1'39 = BNF_Def.Gr carT1 (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2)) O lsbis1 car_final1 car_final2 str_final1 str_final2 O (BNF_Def.Gr carT1 (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2)))\<inverse> 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> ?R2'39 = BNF_Def.Gr carT2 (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)) O lsbis2 car_final1 car_final2 str_final1 str_final2 O (BNF_Def.Gr carT2 (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<inverse> 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> sbis carT1 carT2 strT1 strT2 ?R1'39 ?R2'39 \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule relInvImage_Gr[OF lsbis1_incl]) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> ?R2'39 = BNF_Def.Gr carT2 (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)) O lsbis2 car_final1 car_final2 str_final1 str_final2 O (BNF_Def.Gr carT2 (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<inverse> 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) ?R2'39 \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule relInvImage_Gr[OF lsbis2_incl]) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2)\<rbrakk> \<Longrightarrow> sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2))) \<longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule impI) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R1 \<subseteq> Id \<and> R2 \<subseteq> Id [PROOF STEP] apply (rule conjI) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R1 \<subseteq> Id 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R2 \<subseteq> Id [PROOF STEP] apply (rule subset_trans) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R1 \<subseteq> ?B71 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> ?B71 \<subseteq> Id 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R2 \<subseteq> Id [PROOF STEP] apply (rule relInvImage_UNIV_relImage) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV (BNF_Greatest_Fixpoint.relImage R1 ?f74) ?f74 \<subseteq> Id 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R2 \<subseteq> Id [PROOF STEP] apply (rule subset_trans) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV (BNF_Greatest_Fixpoint.relImage R1 ?f74) ?f74 \<subseteq> ?B75 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> ?B75 \<subseteq> Id 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R2 \<subseteq> Id [PROOF STEP] apply (rule relInvImage_mono) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relImage R1 ?f74 \<subseteq> ?R2.78 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.78 ?f74 \<subseteq> Id 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R2 \<subseteq> Id [PROOF STEP] apply (rule subset_trans) [PROOF STATE] proof (prove) goal (4 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relImage R1 ?f74 \<subseteq> ?B80 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> ?B80 \<subseteq> ?R2.78 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.78 ?f74 \<subseteq> Id 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R2 \<subseteq> Id [PROOF STEP] apply (erule incl_lsbis1) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> lsbis1 car_final1 car_final2 str_final1 str_final2 \<subseteq> ?R2.78 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.78 Rep_JF1 \<subseteq> Id 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R2 \<subseteq> Id [PROOF STEP] apply (rule ord_eq_le_trans) [PROOF STATE] proof (prove) goal (4 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> lsbis1 car_final1 car_final2 str_final1 str_final2 = ?b84 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> ?b84 \<subseteq> ?R2.78 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.78 Rep_JF1 \<subseteq> Id 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R2 \<subseteq> Id [PROOF STEP] apply (rule sym[OF relImage_relInvImage]) [PROOF STATE] proof (prove) goal (4 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> lsbis1 car_final1 car_final2 str_final1 str_final2 \<subseteq> ?f88 ` ?A88 \<times> ?f88 ` ?A88 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relImage (BNF_Greatest_Fixpoint.relInvImage ?A88 (lsbis1 car_final1 car_final2 str_final1 str_final2) ?f88) ?f88 \<subseteq> ?R2.78 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.78 Rep_JF1 \<subseteq> Id 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R2 \<subseteq> Id [PROOF STEP] apply (rule xt1(3)) [PROOF STATE] proof (prove) goal (5 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> ?f88 ` ?A88 \<times> ?f88 ` ?A88 = ?b90 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> lsbis1 car_final1 car_final2 str_final1 str_final2 \<subseteq> ?b90 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relImage (BNF_Greatest_Fixpoint.relInvImage ?A88 (lsbis1 car_final1 car_final2 str_final1 str_final2) ?f88) ?f88 \<subseteq> ?R2.78 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.78 Rep_JF1 \<subseteq> Id 5. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R2 \<subseteq> Id [PROOF STEP] apply (rule Sigma_cong) [PROOF STATE] proof (prove) goal (6 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> ?f88 ` ?A88 = ?B93 2. \<And>uu_. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2))); uu_ \<in> ?B93\<rbrakk> \<Longrightarrow> ?f88 ` ?A88 = ?D93 uu_ 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> lsbis1 car_final1 car_final2 str_final1 str_final2 \<subseteq> Sigma ?B93 ?D93 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relImage (BNF_Greatest_Fixpoint.relInvImage ?A88 (lsbis1 car_final1 car_final2 str_final1 str_final2) ?f88) ?f88 \<subseteq> ?R2.78 5. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.78 Rep_JF1 \<subseteq> Id 6. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R2 \<subseteq> Id [PROOF STEP] apply (rule proj_image) [PROOF STATE] proof (prove) goal (5 subgoals): 1. \<And>uu_. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2))); uu_ \<in> ?A88 // ?r96\<rbrakk> \<Longrightarrow> Equiv_Relations.proj ?r96 ` ?A88 = ?D93 uu_ 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> lsbis1 car_final1 car_final2 str_final1 str_final2 \<subseteq> Sigma (?A88 // ?r96) ?D93 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relImage (BNF_Greatest_Fixpoint.relInvImage ?A88 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj ?r96)) (Equiv_Relations.proj ?r96) \<subseteq> ?R2.78 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.78 Rep_JF1 \<subseteq> Id 5. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R2 \<subseteq> Id [PROOF STEP] apply (rule proj_image) [PROOF STATE] proof (prove) goal (4 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> lsbis1 car_final1 car_final2 str_final1 str_final2 \<subseteq> ?A88 // ?r96 \<times> ?A88 // ?r96 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relImage (BNF_Greatest_Fixpoint.relInvImage ?A88 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj ?r96)) (Equiv_Relations.proj ?r96) \<subseteq> ?R2.78 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.78 Rep_JF1 \<subseteq> Id 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R2 \<subseteq> Id [PROOF STEP] apply (rule lsbis1_incl) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relImage (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2)) \<subseteq> ?R2.78 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.78 Rep_JF1 \<subseteq> Id 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R2 \<subseteq> Id [PROOF STEP] apply (rule subset_trans) [PROOF STATE] proof (prove) goal (4 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relImage (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2)) \<subseteq> ?B99 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> ?B99 \<subseteq> ?R2.78 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.78 Rep_JF1 \<subseteq> Id 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R2 \<subseteq> Id [PROOF STEP] apply (rule relImage_mono) [PROOF STATE] proof (prove) goal (4 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2)) \<subseteq> ?R2.102 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relImage ?R2.102 (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2)) \<subseteq> ?R2.78 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.78 Rep_JF1 \<subseteq> Id 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R2 \<subseteq> Id [PROOF STEP] apply (rule incl_lsbis1) [PROOF STATE] proof (prove) goal (4 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> sbis ?B1.104 ?B2.104 ?s1.104 ?s2.104 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) ?R2.104 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relImage (lsbis1 ?B1.104 ?B2.104 ?s1.104 ?s2.104) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2)) \<subseteq> ?R2.78 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.78 Rep_JF1 \<subseteq> Id 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R2 \<subseteq> Id [PROOF STEP] apply assumption [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relImage (lsbis1 carT1 carT2 strT1 strT2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2)) \<subseteq> ?R2.78 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.78 Rep_JF1 \<subseteq> Id 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R2 \<subseteq> Id [PROOF STEP] apply (rule relImage_proj) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> equiv ?A106 (lsbis1 carT1 carT2 strT1 strT2) 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV (Id_on (?A106 // lsbis1 carT1 carT2 strT1 strT2)) Rep_JF1 \<subseteq> Id 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R2 \<subseteq> Id [PROOF STEP] apply (rule equiv_lsbis1[OF coalg_T]) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV (Id_on car_final1) Rep_JF1 \<subseteq> Id 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R2 \<subseteq> Id [PROOF STEP] apply (rule relInvImage_Id_on) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<And>a1 a2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> (Rep_JF1 a1 = Rep_JF1 a2) = (a1 = a2) 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R2 \<subseteq> Id [PROOF STEP] apply (rule Rep_JF1_inject) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R2 \<subseteq> Id [PROOF STEP] apply (rule subset_trans) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> R2 \<subseteq> ?B113 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> ?B113 \<subseteq> Id [PROOF STEP] apply (rule relInvImage_UNIV_relImage) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV (BNF_Greatest_Fixpoint.relImage R2 ?f116) ?f116 \<subseteq> Id [PROOF STEP] apply (rule subset_trans) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV (BNF_Greatest_Fixpoint.relImage R2 ?f116) ?f116 \<subseteq> ?B117 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> ?B117 \<subseteq> Id [PROOF STEP] apply (rule relInvImage_mono) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relImage R2 ?f116 \<subseteq> ?R2.120 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.120 ?f116 \<subseteq> Id [PROOF STEP] apply (rule subset_trans) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relImage R2 ?f116 \<subseteq> ?B122 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> ?B122 \<subseteq> ?R2.120 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.120 ?f116 \<subseteq> Id [PROOF STEP] apply (erule incl_lsbis2) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> lsbis2 car_final1 car_final2 str_final1 str_final2 \<subseteq> ?R2.120 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.120 Rep_JF2 \<subseteq> Id [PROOF STEP] apply (rule ord_eq_le_trans) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> lsbis2 car_final1 car_final2 str_final1 str_final2 = ?b126 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> ?b126 \<subseteq> ?R2.120 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.120 Rep_JF2 \<subseteq> Id [PROOF STEP] apply (rule sym[OF relImage_relInvImage]) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> lsbis2 car_final1 car_final2 str_final1 str_final2 \<subseteq> ?f130 ` ?A130 \<times> ?f130 ` ?A130 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relImage (BNF_Greatest_Fixpoint.relInvImage ?A130 (lsbis2 car_final1 car_final2 str_final1 str_final2) ?f130) ?f130 \<subseteq> ?R2.120 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.120 Rep_JF2 \<subseteq> Id [PROOF STEP] apply (rule xt1(3)) [PROOF STATE] proof (prove) goal (4 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> ?f130 ` ?A130 \<times> ?f130 ` ?A130 = ?b132 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> lsbis2 car_final1 car_final2 str_final1 str_final2 \<subseteq> ?b132 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relImage (BNF_Greatest_Fixpoint.relInvImage ?A130 (lsbis2 car_final1 car_final2 str_final1 str_final2) ?f130) ?f130 \<subseteq> ?R2.120 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.120 Rep_JF2 \<subseteq> Id [PROOF STEP] apply (rule Sigma_cong) [PROOF STATE] proof (prove) goal (5 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> ?f130 ` ?A130 = ?B135 2. \<And>uu_. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2))); uu_ \<in> ?B135\<rbrakk> \<Longrightarrow> ?f130 ` ?A130 = ?D135 uu_ 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> lsbis2 car_final1 car_final2 str_final1 str_final2 \<subseteq> Sigma ?B135 ?D135 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relImage (BNF_Greatest_Fixpoint.relInvImage ?A130 (lsbis2 car_final1 car_final2 str_final1 str_final2) ?f130) ?f130 \<subseteq> ?R2.120 5. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.120 Rep_JF2 \<subseteq> Id [PROOF STEP] apply (rule proj_image) [PROOF STATE] proof (prove) goal (4 subgoals): 1. \<And>uu_. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2))); uu_ \<in> ?A130 // ?r138\<rbrakk> \<Longrightarrow> Equiv_Relations.proj ?r138 ` ?A130 = ?D135 uu_ 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> lsbis2 car_final1 car_final2 str_final1 str_final2 \<subseteq> Sigma (?A130 // ?r138) ?D135 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relImage (BNF_Greatest_Fixpoint.relInvImage ?A130 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj ?r138)) (Equiv_Relations.proj ?r138) \<subseteq> ?R2.120 4. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.120 Rep_JF2 \<subseteq> Id [PROOF STEP] apply (rule proj_image) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> lsbis2 car_final1 car_final2 str_final1 str_final2 \<subseteq> ?A130 // ?r138 \<times> ?A130 // ?r138 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relImage (BNF_Greatest_Fixpoint.relInvImage ?A130 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj ?r138)) (Equiv_Relations.proj ?r138) \<subseteq> ?R2.120 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.120 Rep_JF2 \<subseteq> Id [PROOF STEP] apply (rule lsbis2_incl) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relImage (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2))) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)) \<subseteq> ?R2.120 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.120 Rep_JF2 \<subseteq> Id [PROOF STEP] apply (rule subset_trans) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relImage (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2))) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)) \<subseteq> ?B141 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> ?B141 \<subseteq> ?R2.120 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.120 Rep_JF2 \<subseteq> Id [PROOF STEP] apply (rule relImage_mono) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)) \<subseteq> ?R2.144 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relImage ?R2.144 (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)) \<subseteq> ?R2.120 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.120 Rep_JF2 \<subseteq> Id [PROOF STEP] apply (rule incl_lsbis2) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> sbis ?B1.146 ?B2.146 ?s1.146 ?s2.146 ?R1.146 (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2))) 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relImage (lsbis2 ?B1.146 ?B2.146 ?s1.146 ?s2.146) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)) \<subseteq> ?R2.120 3. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.120 Rep_JF2 \<subseteq> Id [PROOF STEP] apply assumption [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relImage (lsbis2 carT1 carT2 strT1 strT2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)) \<subseteq> ?R2.120 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV ?R2.120 Rep_JF2 \<subseteq> Id [PROOF STEP] apply (rule relImage_proj) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> equiv ?A148 (lsbis2 carT1 carT2 strT1 strT2) 2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV (Id_on (?A148 // lsbis2 carT1 carT2 strT1 strT2)) Rep_JF2 \<subseteq> Id [PROOF STEP] apply (rule equiv_lsbis2[OF coalg_T]) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> BNF_Greatest_Fixpoint.relInvImage UNIV (Id_on car_final2) Rep_JF2 \<subseteq> Id [PROOF STEP] apply (rule relInvImage_Id_on) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<And>a1 a2. \<lbrakk>sbis UNIV UNIV dtor1 dtor2 R1 R2; sbis car_final1 car_final2 str_final1 str_final2 (BNF_Greatest_Fixpoint.relImage R1 Rep_JF1) (BNF_Greatest_Fixpoint.relImage R2 Rep_JF2); sbis carT1 carT2 strT1 strT2 (BNF_Greatest_Fixpoint.relInvImage carT1 (lsbis1 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis1 carT1 carT2 strT1 strT2))) (BNF_Greatest_Fixpoint.relInvImage carT2 (lsbis2 car_final1 car_final2 str_final1 str_final2) (Equiv_Relations.proj (lsbis2 carT1 carT2 strT1 strT2)))\<rbrakk> \<Longrightarrow> (Rep_JF2 a1 = Rep_JF2 a2) = (a1 = a2) [PROOF STEP] apply (rule Rep_JF2_inject) [PROOF STATE] proof (prove) goal: No subgoals! [PROOF STEP] done
{"llama_tokens": 57879, "file": "BNF_Operations_GFP", "length": 76}
[STATEMENT] lemma even_bit_succ_iff: \<open>bit (1 + a) n \<longleftrightarrow> bit a n \<or> n = 0\<close> if \<open>even a\<close> [PROOF STATE] proof (prove) goal (1 subgoal): 1. bit ((1::'a) + a) n = (bit a n \<or> n = 0) [PROOF STEP] using that [PROOF STATE] proof (prove) using this: even a goal (1 subgoal): 1. bit ((1::'a) + a) n = (bit a n \<or> n = 0) [PROOF STEP] by (cases \<open>n = 0\<close>) (simp_all add: bit_iff_odd)
{"llama_tokens": 203, "file": null, "length": 2}
""" ============== Rotating a Map ============== How to rotate a map. """ import matplotlib.pyplot as plt import astropy.units as u import sunpy.data.sample import sunpy.map ############################################################################### # We start with the sample data aia_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE) ############################################################################## # `~sunpy.map.GenericMap` provides the `~sunpy.map.GenericMap.rotate` method # which accepts an angle. This returns a rotated map and does not rotate in # place. The data array size is expanded so that none of the original data is # lost due to clipping. Note that subsequent rotations are not compounded. # The map is only rotated by the specified amount from the original maps # orientation. aia_rotated = aia_map.rotate(angle=30 * u.deg) ############################################################################### # Let's now plot the results. fig = plt.figure() ax = plt.subplot(projection=aia_rotated) aia_rotated.plot() aia_rotated.draw_limb() aia_rotated.draw_grid() plt.show()
{"hexsha": "5a543acae103964fe3d0c5a7578a3d18f615ef65", "size": 1117, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/map/map_rotation.py", "max_stars_repo_name": "johan12345/sunpy", "max_stars_repo_head_hexsha": "56e1ab0c2c992f99e0fe3e6bff468b731a51228c", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-07-02T13:01:42.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-27T20:05:31.000Z", "max_issues_repo_path": "examples/map/map_rotation.py", "max_issues_repo_name": "johan12345/sunpy", "max_issues_repo_head_hexsha": "56e1ab0c2c992f99e0fe3e6bff468b731a51228c", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-05-11T13:38:56.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-11T13:38:56.000Z", "max_forks_repo_path": "examples/map/map_rotation.py", "max_forks_repo_name": "johan12345/sunpy", "max_forks_repo_head_hexsha": "56e1ab0c2c992f99e0fe3e6bff468b731a51228c", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0277777778, "max_line_length": 79, "alphanum_fraction": 0.581915846, "include": true, "reason": "import astropy", "num_tokens": 224}
""" Mattsson2014 Coefficients of the SBP operators given in Mattsson (2014) Diagonal-norm summation by parts operators for fiite difference approximations of third and fourth derivatives. Journal of Computational Physics 274, pp. 432-454. """ struct Mattsson2014 <: SourceOfCoefficients end function Base.show(io::IO, ::Mattsson2014) print(io, " Mattsson (2014) \n", " Diagonal-norm summation by parts operators for fiite difference approximations\n", " of third and fourth derivatives. \n", " Journal of Computational Physics 274, pp. 432-454. \n") end function first_derivative_coefficients(source::Mattsson2014, order::Int, T=Float64, parallel=Val{:serial}()) if order == 2 left_boundary = ( # d1 DerivativeCoefficientRow{T,1,2}(SVector(T(-1), T(1) )), ) right_boundary = .- left_boundary upper_coef = SVector(T(1//2)) central_coef = zero(T) lower_coef = -upper_coef left_weights = SVector(T(1//2)) right_weights = left_weights left_boundary_derivatives = Tuple{}() right_boundary_derivatives = left_boundary_derivatives DerivativeCoefficients(left_boundary, right_boundary, left_boundary_derivatives, right_boundary_derivatives, lower_coef, central_coef, upper_coef, left_weights, right_weights, parallel, 1, order, source) elseif order == 4 left_boundary = ( # d1 DerivativeCoefficientRow{T,1,6}(SVector(T(-50400//35809), T(526249//322281), T(-75733//1933686), T(-50767//322281), T(-4417//71618), T(31850//966843) )), # d2 DerivativeCoefficientRow{T,1,6}(SVector(T(-526249//1077057), T(0), T(1421209//3231171), T(16657//239346), T(-16934//1077057), T(-33059//6462342) )), # d3 DerivativeCoefficientRow{T,1,6}(SVector(T(75733//5541372), T(-1421209//2770686), T(0), T(631187//1385343), T(400139//5541372), T(-8789//307854) )), # d4 DerivativeCoefficientRow{T,1,6}(SVector(T(50767//811962), T(-16657//180436), T(-631187//1217943), T(0), T(496403//811962), T(-308533//4871772) )), # d5 DerivativeCoefficientRow{T,1,7}(SVector(T(4417//211146), T(16934//950157), T(-400139//5700942), T(-496403//950157), T(0), T(1805647//2850471), T(-2800//35191) )), # d6 DerivativeCoefficientRow{T,1,8}(SVector(T(-31850//2713743), T(33059//5427486), T(8789//301527), T(308533//5427486), T(-1805647//2713743), T(0), T(22400//33503), T(-2800//33503) )), ) right_boundary = .- left_boundary upper_coef = SVector(T(2//3), T(-1//12)) central_coef = zero(T) lower_coef = -upper_coef left_weights = SVector( T(35809//100800), T(13297//11200), T(5701//5600), T(45109//50400), T(35191//33600), T(33503//33600) ) right_weights = left_weights left_boundary_derivatives = Tuple{}() right_boundary_derivatives = left_boundary_derivatives DerivativeCoefficients(left_boundary, right_boundary, left_boundary_derivatives, right_boundary_derivatives, lower_coef, central_coef, upper_coef, left_weights, right_weights, parallel, 1, order, source) elseif order == 6 left_boundary = ( # d1 DerivativeCoefficientRow{T,1,8}(SVector(T(-508032//318365), T(113221347//55519750), T(-3338172//27759875), T(-1002751721//2731571700), T(-46815789//455261950), T(1228638//9105239), T(7120579//166559250), T(-10874619//350201500) )), # d2 DerivativeCoefficientRow{T,1,8}(SVector(T(-4642075227//10228748530), T(0), T(69095487//265681780), T(32352081//146124979), T(28592150//438374937), T(-137946309//1461249790), T(-720387//44961532), T(538846039//30686245590) )), # d3 DerivativeCoefficientRow{T,1,8}(SVector(T(45621684//696580885), T(-23031829//36186020), T(0), T(28368209//39804622), T(-9693137//39804622), T(3868089//30618940), T(-2468403//199023110), T(-1686470//139316177) )), # d4 DerivativeCoefficientRow{T,1,8}(SVector(T(1002751721//11591207628), T(-32352081//137990567), T(-85104627//275981134), T(0), T(17499453//42458636), T(13059537//275981134), T(4924918//413971701), T(-29088585//1931867938) )), # d5 DerivativeCoefficientRow{T,1,8}(SVector(T(46815789//1188140954), T(-28592150//254601633), T(29079411//169734422), T(-17499453//26112988), T(0), T(112822635//169734422), T(-9119079//84867211), T(103152839//7128845724) )), # d6 DerivativeCoefficientRow{T,1,9}(SVector(T(-2047730//52061009), T(45982103//371864350), T(-3868089//57209900), T(-4353179//74372870), T(-7521509//14874574), T(0), T(474569879//743728700), T(-138109864//1301525225), T(4032//260045) )), # d7 DerivativeCoefficientRow{T,1,10}(SVector(T(-291943739//21305233950), T(720387//31216460), T(7405209//1014534950), T(-4924918//304360485), T(9119079//101453495), T(-1423709637//2029069900), T(0), T(5309800707//7101744650), T(-108864//709465), T(12096//709465) )), # d8 DerivativeCoefficientRow{T,1,11}(SVector(T(10874619//1121684300), T(-538846039//21872843850), T(1011882//145818959), T(5817717//291637918), T(-103152839//8749137540), T(414329592//3645473975), T(-5309800707//7290947950), T(0), T(762048//1019713), T(-762048//5098565), T(84672//5098565) )), ) right_boundary = .- left_boundary upper_coef = SVector(T(3//4), T(-3//20), T(1//60)) central_coef = zero(T) lower_coef = -upper_coef left_weights = SVector( T(318365//1016064), T(145979//103680), T(139177//241920), T(964969//725760), T(593477//725760), T(52009//48384), T(141893//145152), T(1019713//1016064) ) right_weights = left_weights left_boundary_derivatives = Tuple{}() right_boundary_derivatives = left_boundary_derivatives DerivativeCoefficients(left_boundary, right_boundary, left_boundary_derivatives, right_boundary_derivatives, lower_coef, central_coef, upper_coef, left_weights, right_weights, parallel, 1, order, source) else throw(ArgumentError("Order $order not implemented/derived.")) end end function second_derivative_coefficients(source::Mattsson2014, order::Int, T=Float64, parallel=Val{:serial}()) if order == 2 left_boundary = ( # d1 DerivativeCoefficientRow{T,1,3}(SVector(T(1), T(-2), T(1) )), ) right_boundary = left_boundary upper_coef = SVector(T(1)) central_coef = T(-2) lower_coef = upper_coef left_weights = SVector(T(1//2)) right_weights = left_weights left_boundary_derivatives = ( # first derivative DerivativeCoefficientRow{T,1,3}(SVector(T(-3//2), T(2), T(-1//2) )), ) right_boundary_derivatives = ( -left_boundary_derivatives[1], ) DerivativeCoefficients(left_boundary, right_boundary, left_boundary_derivatives, right_boundary_derivatives, lower_coef, central_coef, upper_coef, left_weights, right_weights, parallel, 2, order, source) elseif order == 4 left_boundary = ( # d1 DerivativeCoefficientRow{T,1,6}(SVector(T(8027765//3867372), T(-1690702//322281), T(8240267//1933686), T(-1030334//966843), T(-6817//143236), T(21380//966843) )), # d2 DerivativeCoefficientRow{T,1,6}(SVector(T(1030898//1077057), T(-23780195//12924684), T(2540018//3231171), T(26257//239346), T(-12268//3231171), T(-119459//12924684) )), # d3 DerivativeCoefficientRow{T,1,6}(SVector(T(75467//5541372), T(1270009//1385343), T(-558115//307854), T(1111174//1385343), T(551339//5541372), T(-8789//461781) )), # d4 DerivativeCoefficientRow{T,1,6}(SVector(T(-61567//1217943), T(26257//180436), T(1111174//1217943), T(-5126635//2435886), T(464003//405981), T(-222133//4871772) )), # d5 DerivativeCoefficientRow{T,1,7}(SVector(T(-6817//422292), T(-12268//2850471), T(551339//5700942), T(928006//950157), T(-25370795//11401884), T(3568094//2850471), T(-2800//35191) )), # d6 DerivativeCoefficientRow{T,1,8}(SVector(T(21380//2713743), T(-119459//10854972), T(-17578//904581), T(-222133//5427486), T(3568094//2713743), T(-9063745//3618324), T(44800//33503), T(-2800//33503) )), ) right_boundary = left_boundary upper_coef = SVector(T(4//3), T(-1//12)) central_coef = T(-5//2) lower_coef = upper_coef left_weights = SVector( T(35809//100800), T(13297//11200), T(5701//5600), T(45109//50400), T(35191//33600), T(33503//33600) ) right_weights = left_weights left_boundary_derivatives = ( # first derivative DerivativeCoefficientRow{T,1,4}(SVector(T(-11/6), T(3), T(-3//2), T(1//3) )), ) right_boundary_derivatives = ( -left_boundary_derivatives[1], ) DerivativeCoefficients(left_boundary, right_boundary, left_boundary_derivatives, right_boundary_derivatives, lower_coef, central_coef, upper_coef, left_weights, right_weights, parallel, 2, order, source) elseif order == 6 left_boundary = ( # d1 DerivativeCoefficientRow{T,1,8}(SVector(T(24055498439//8194715100), T(-9925742373//1138154875), T(983223468//103468625), T(-18528585641//4097357550), T(16391451//22207900), T(13725804//227630975), T(355447739//20486787750), T(-24731721//2276309750) )), # d2 DerivativeCoefficientRow{T,1,8}(SVector(T(270821931//300845545), T(-42416226217//26302496220), T(685962357//1461249790), T(2160993//8595587), T(59905900//1315124811), T(-158509509//2922499580), T(-9667431//1461249790), T(634102039//92058736770) )), # d3 DerivativeCoefficientRow{T,1,8}(SVector(T(-2479644//63325535), T(228654119//199023110), T(-16197861//7237204), T(24739409//19902311), T(-7878737//39804622), T(1917829//18093010), T(-7508403//398046220), T(-41036//10716629) )), # d4 DerivativeCoefficientRow{T,1,8}(SVector(T(-1092927401//17386811442), T(36736881//137990567), T(74218227//137990567), T(-7780367599//4967660412), T(210256089//275981134), T(1500627//25089194), T(1246172//95531931), T(-37555785//3863735876) )), # d5 DerivativeCoefficientRow{T,1,8}(SVector(T(-54436269//2376281908), T(59905900//763804899), T(-23636211//169734422), T(210256089//169734422), T(-7116321131//3055219596), T(8176215//6528247), T(-7304679//84867211), T(84101639//10693268586) )), # d6 DerivativeCoefficientRow{T,1,9}(SVector(T(4575268//260305045), T(-52836503//743728700), T(1917829//33805850), T(500209//6761170), T(545081//572099), T(-324760747//148745740), T(461969879//371864350), T(-11753624//118320475), T(2688//260045) )), # d7 DerivativeCoefficientRow{T,1,10}(SVector(T(355447739//63915701850), T(-9667431//1014534950), T(-22525209//2029069900), T(1246172//70237035), T(-7304679//101453495), T(1385909637//1014534950), T(-48284442317//18261629100), T(5288632707//3550872325), T(-108864//709465), T(8064//709465) )), # d8 DerivativeCoefficientRow{T,1,11}(SVector(T(-24731721//7290947950), T(634102039//65618531550), T(-123108//56084215), T(-7511157//583275836), T(84101639//13123706310), T(-35260872//331406725), T(5288632707//3645473975), T(-70820489957//26247412620), T(1524096//1019713), T(-762048//5098565), T(56448//5098565) )), ) right_boundary = left_boundary upper_coef = SVector(T(3//2), T(-3//20), T(1//90)) central_coef = T(-49//18) lower_coef = upper_coef left_weights = SVector( T(318365//1016064), T(145979//103680), T(139177//241920), T(964969//725760), T(593477//725760), T(52009//48384), T(141893//145152), T(1019713//1016064) ) right_weights = left_weights left_boundary_derivatives = ( # first derivative DerivativeCoefficientRow{T,1,5}(SVector(T(-25/12), T(4), T(-3), T(4//3), T(-1//4) )), ) right_boundary_derivatives = ( -left_boundary_derivatives[1], ) DerivativeCoefficients(left_boundary, right_boundary, left_boundary_derivatives, right_boundary_derivatives, lower_coef, central_coef, upper_coef, left_weights, right_weights, parallel, 2, order, source) else throw(ArgumentError("Order $order not implemented/derived.")) end end function third_derivative_coefficients(source::Mattsson2014, order::Int, T=Float64, parallel=Val{:serial}()) if order == 2 left_boundary = ( # d1 DerivativeCoefficientRow{T,1,4}(SVector(T(1//4), T(-5//8), T(1//2), T(-1//8) )), # d2 DerivativeCoefficientRow{T,1,4}(SVector(T(-11//16), T(2), T(-31//16), T(5//8) )), # d3 DerivativeCoefficientRow{T,1,5}(SVector(T(-1//2), T(15//16), T(1//8), T(-17//16), T(1//2) )), ) right_boundary = .- left_boundary upper_coef = SVector(T(-1), T(1//2)) central_coef = T(0) lower_coef = -upper_coef left_weights = SVector(T(1//2)) right_weights = left_weights left_boundary_derivatives = ( # first derivative DerivativeCoefficientRow{T,1,3}(SVector(T(-3//2), T(2), T(-1//2) )), # second derivative DerivativeCoefficientRow{T,1,3}(SVector(T(1), T(-2), T(1) )), ) right_boundary_derivatives = ( -left_boundary_derivatives[1], left_boundary_derivatives[2] ) DerivativeCoefficients(left_boundary, right_boundary, left_boundary_derivatives, right_boundary_derivatives, lower_coef, central_coef, upper_coef, left_weights, right_weights, parallel, 3, order, source) elseif order == 4 left_boundary = ( # d1 DerivativeCoefficientRow{T,1,6}(SVector(T(-32200//35809), T(188187//71618), T(-90183//35809), T(27988//35809), T(-801//35809), T(2205//71618) )), # d2 DerivativeCoefficientRow{T,1,6}(SVector(T(-96329//79782), T(50400//13297), T(-163046//39891), T(63583//39891), T(-1337//26594), T(-1567//39891) )), # d3 DerivativeCoefficientRow{T,1,6}(SVector(T(-11939//34206), T(5923//17103), T(6300//5701), T(-32543//17103), T(29083//34206), T(-284//5701) )), # d4 DerivativeCoefficientRow{T,1,7}(SVector(T(5606//45109), T(-89949//90218), T(72429//45109), T(2800//45109), T(-77319//45109), T(95517//90218), T(-6300//45109) )), # d5 DerivativeCoefficientRow{T,1,8}(SVector(T(267//35191), T(4011//70382), T(-29083//35191), T(51546//35191), T(0), T(-108271//70382), T(33600//35191), T(-4200//35191) )), # d6 DerivativeCoefficientRow{T,1,9}(SVector(T(-735//67006), T(1567//33503), T(1704//33503), T(-31839//33503), T(108271//67006), T(0), T(-54600//33503), T(33600//33503), T(-4200//33503) )), ) right_boundary = .- left_boundary upper_coef = SVector(T(-13//8), T(1), T(-1//8)) central_coef = T(0) lower_coef = -upper_coef left_weights = SVector( T(35809//100800), T(13297//11200), T(5701//5600), T(45109//50400), T(35191//33600), T(33503//33600) ) right_weights = left_weights left_boundary_derivatives = ( # first derivative DerivativeCoefficientRow{T,1,4}(SVector(T(-11/6), T(3), T(-3//2), T(1//3) )), # second derivative DerivativeCoefficientRow{T,1,4}(SVector(T(2), T(-5), T(4), T(-1) )), ) right_boundary_derivatives = ( -left_boundary_derivatives[1], left_boundary_derivatives[2] ) DerivativeCoefficients(left_boundary, right_boundary, left_boundary_derivatives, right_boundary_derivatives, lower_coef, central_coef, upper_coef, left_weights, right_weights, parallel, 3, order, source) elseif order == 6 left_boundary = ( # d1 DerivativeCoefficientRow{T,1,8}(SVector(T(-151704//63673), T(15270676769//1821047800), T(-443349971//41387450), T(2063356637//364209560), T(-39300617//45526195), T(-11473393//364209560), T(-38062741//455261950), T(40315779//1821047800) )), # d2 DerivativeCoefficientRow{T,1,8}(SVector(T(-13333381409//8182998824), T(829440//145979), T(-8702160983//1168999832), T(1321219979//292249958), T(-1463113021//1168999832), T(1240729//20874997), T(102110955//1168999832), T(-50022767//2045749706) )), # d3 DerivativeCoefficientRow{T,1,8}(SVector(T(14062931//75990642), T(-1261072297//477655464), T(1088640//139177), T(-4530616889//477655464), T(602572103//119413866), T(-116503713//159218488), T(-17846623//59706933), T(343537955//3343588248) )), # d4 DerivativeCoefficientRow{T,1,8}(SVector(T(661223855//7727471752), T(-214194059//275981134), T(1209539129//1103924536), T(645120//964969), T(-2321979501//1103924536), T(327603877//275981134), T(-175223717//1103924536), T(1353613//965933969) )), # d5 DerivativeCoefficientRow{T,1,9}(SVector(T(-91064195//594070477), T(632843581//678937688), T(-446896583//169734422), T(2045223021//678937688), T(22680//593477), T(-1804641793//678937688), T(311038417//169734422), T(-1932566239//4752563816), T(21168//593477) )), # d6 DerivativeCoefficientRow{T,1,10}(SVector(T(11473393//1249464216), T(-8685103//111559305), T(116503713//297491480), T(-327603877//223118610), T(1804641793//892474440), T(0), T(-1760949511//892474440), T(2105883973//1561830270), T(-72576//260045), T(7056//260045) )), # d7 DerivativeCoefficientRow{T,1,11}(SVector(T(38062741//1420348930), T(-20422191//162325592), T(17846623//101453495), T(175223717//811627960), T(-311038417//202906990), T(1760949511//811627960), T(0), T(-1081094773//516490520), T(1022112//709465), T(-217728//709465), T(21168//709465) )), # d8 DerivativeCoefficientRow{T,1,12}(SVector(T(-40315779//5832758360), T(50022767//1458189590), T(-68707591//1166551672), T(-1353613//729094795), T(1932566239//5832758360), T(-2105883973//1458189590), T(1081094773//530250760), T(0), T(-10329984//5098565), T(7154784//5098565), T(-1524096//5098565), T(148176//5098565) )), ) right_boundary = .- left_boundary upper_coef = SVector(T(-61//30), T(169//120), T(-3//10), T(7//240)) central_coef = T(0) lower_coef = -upper_coef left_weights = SVector( T(318365//1016064), T(145979//103680), T(139177//241920), T(964969//725760), T(593477//725760), T(52009//48384), T(141893//145152), T(1019713//1016064) ) right_weights = left_weights left_boundary_derivatives = ( # first derivative DerivativeCoefficientRow{T,1,5}(SVector(T(-25//12), T(4), T(-3), T(4//3), T(-1//4) )), # second derivative DerivativeCoefficientRow{T,1,5}(SVector(T(35//12), T(-26//3), T(19//2), T(-14//3), T(11//12) )), ) right_boundary_derivatives = ( -left_boundary_derivatives[1], left_boundary_derivatives[2] ) DerivativeCoefficients(left_boundary, right_boundary, left_boundary_derivatives, right_boundary_derivatives, lower_coef, central_coef, upper_coef, left_weights, right_weights, parallel, 3, order, source) else throw(ArgumentError("Order $order not implemented/derived.")) end end function fourth_derivative_coefficients(source::Mattsson2014, order::Int, T=Float64, parallel=Val{:serial}()) if order == 2 left_boundary = ( # d1 DerivativeCoefficientRow{T,1,4}(SVector(T(8//5), T(-24//5), T(24//5), T(-8//5) )), # d2 DerivativeCoefficientRow{T,1,4}(SVector(T(-2//5), T(6//5), T(-6//5), T(2//5) )), # d3 DerivativeCoefficientRow{T,1,5}(SVector(T(2//5), T(-11//5), T(21//5), T(-17//5), T(1) )), # d4 DerivativeCoefficientRow{T,1,6}(SVector(T(1//5), T(2//5), T(-17//5), T(-29//5), T(-4), T(1) )), ) right_boundary = left_boundary upper_coef = SVector(T(-4), T(1)) central_coef = T(6) lower_coef = upper_coef left_weights = SVector(T(1//2)) right_weights = left_weights left_boundary_derivatives = ( # first derivative DerivativeCoefficientRow{T,1,3}(SVector(T(-3//2), T(2), T(-1//2) )), # second derivative DerivativeCoefficientRow{T,1,3}(SVector(T(1), T(-2), T(1) )), # third derivative DerivativeCoefficientRow{T,1,4}(SVector(T(-1), T(3), T(-3), T(1) )), ) right_boundary_derivatives = ( -left_boundary_derivatives[1], left_boundary_derivatives[2], -left_boundary_derivatives[3], ) DerivativeCoefficients(left_boundary, right_boundary, left_boundary_derivatives, right_boundary_derivatives, lower_coef, central_coef, upper_coef, left_weights, right_weights, parallel, 4, order, source) elseif order == 4 left_boundary = ( # d1 DerivativeCoefficientRow{T,1,6}(SVector(T(-242219//644562), T(881057//644562), T(-183673//107427), T(220981//322281), T(109057//644562), T(-29273//214854) )), # d2 DerivativeCoefficientRow{T,1,6}(SVector(T(578657//2154114), T(-703457//718038), T(1327457//1077057), T(-544543//1077057), T(-79457//718038), T(204257//2154114) )), # d3 DerivativeCoefficientRow{T,1,6}(SVector(T(219527//307854), T(-2754943//923562), T(2216981//461781), T(-559673//153927), T(1141057//923562), T(-120619//923562) )), # d4 DerivativeCoefficientRow{T,1,7}(SVector(T(69781//811962), T(665057//811962), T(-584873//135327), T(2995381//405981), T(-4614143//811962), T(172109//90218), T(-8400//45109) )), # d5 DerivativeCoefficientRow{T,1,8}(SVector(T(8389//146178), T(-79457//633438), T(1141057//950157), T(-4614143//950157), T(557127//70382), T(-11293343//1900314), T(67200//35191), T(-5600//35191) )), # d6 DerivativeCoefficientRow{T,1,9}(SVector(T(-29273//603054), T(204257//1809162), T(-120619//904581), T(172109//100509), T(-11293343//1809162), T(16787381//1809162), T(-218400//33503), T(67200//33503), T(-5600//33503) )), ) right_boundary = left_boundary upper_coef = SVector(T(-13//2), T(2), T(-1//6)) central_coef = T(28//3) lower_coef = upper_coef left_weights = SVector( T(35809//100800), T(13297//11200), T(5701//5600), T(45109//50400), T(35191//33600), T(33503//33600) ) right_weights = left_weights left_boundary_derivatives = ( # first derivative DerivativeCoefficientRow{T,1,4}(SVector(T(-11/6), T(3), T(-3//2), T(1//3) )), # second derivative DerivativeCoefficientRow{T,1,4}(SVector(T(2), T(-5), T(4), T(-1) )), # third derivative DerivativeCoefficientRow{T,1,4}(SVector(T(-1), T(3), T(-3), T(1) )), ) right_boundary_derivatives = ( -left_boundary_derivatives[1], left_boundary_derivatives[2], -left_boundary_derivatives[3], ) DerivativeCoefficients(left_boundary, right_boundary, left_boundary_derivatives, right_boundary_derivatives, lower_coef, central_coef, upper_coef, left_weights, right_weights, parallel, 4, order, source) elseif order == 6 left_boundary = ( # d1 DerivativeCoefficientRow{T,1,8}(SVector(T(37567391168//53948541075), T(-95834307667//35965694050), T(252350074//65392171), T(-58232913019//21579416430), T(4040770588//3596569405), T(-15248255797//35965694050), T(4832196698//53948541075), T(134156001//7193138810) )), # d2 DerivativeCoefficientRow{T,1,8}(SVector(T(29125918379//23087746682), T(-1255810938848//242421340161), T(1289206067431//161614226774), T(-431078362378//80807113387), T(494586219497//484842680322), T(31446420748//80807113387), T(-21701585799//161614226774), T(334788562//242421340161) )), # d3 DerivativeCoefficientRow{T,1,8}(SVector(T(1308658570//3001630359), T(-88210933529//66035867898), T(13622370452//11005977983), T(-27138341627//66035867898), T(23881355534//33017933949), T(-26412188989//22011955966), T(21399717536//33017933949), T(-928716467//9433695414) )), # d4 DerivativeCoefficientRow{T,1,8}(SVector(T(110582060185//457852701306), T(-22954806538//76308783551), T(-180184675067//152617567102), T(678091654628//228926350653), T(-378329435643//152617567102), T(69519106966//76308783551), T(-98928859751//457852701306), T(4720003312//76308783551) )), # d5 DerivativeCoefficientRow{T,1,9}(SVector(T(1870177580//46931567683), T(-21945155863//281589406098), T(45403496174//46931567683), T(-384706366203//93863135366), T(974238057544//140794703049), T(-520477408939//93863135366), T(99162460006//46931567683), T(-99640101991//281589406098), T(21168//593477) )), # d6 DerivativeCoefficientRow{T,1,10}(SVector(T(-15248255797//123384591330), T(31446420748//61692295665), T(-26412188989//41128197110), T(69519106966//61692295665), T(-520477408939//123384591330), T(155376599432//20564098555), T(-772894368601//123384591330), T(21159425698//8813185095), T(-96768//260045), T(7056//260045) )), # d7 DerivativeCoefficientRow{T,1,11}(SVector(T(690313814//24044478315), T(-21701585799//112207565470), T(21399717536//56103782735), T(-98928859751//336622696410), T(99162460006//56103782735), T(-772894368601//112207565470), T(1826861184956//168311348205), T(-915425403107//112207565470), T(2044224//709465), T(-290304//709465), T(21168//709465) )), # d8 DerivativeCoefficientRow{T,1,12}(SVector(T(134156001//23039395522), T(334788562//172795466415), T(-6501015269//115196977610), T(4720003312//57598488805), T(-99640101991//345590932830), T(148115979886//57598488805), T(-915425403107//115196977610), T(1952118169516//172795466415), T(-41319936//5098565), T(14309568//5098565), T(-2032128//5098565), T(148176//5098565) )), ) right_boundary = left_boundary upper_coef = SVector(T(-122//15), T(169//60), T(-2//5), T(7//240)) central_coef = T(91//8) lower_coef = upper_coef left_weights = SVector( T(318365//1016064), T(145979//103680), T(139177//241920), T(964969//725760), T(593477//725760), T(52009//48384), T(141893//145152), T(1019713//1016064) ) right_weights = left_weights left_boundary_derivatives = ( # first derivative DerivativeCoefficientRow{T,1,5}(SVector(T(-25//12), T(4), T(-3), T(4//3), T(-1//4) )), # second derivative DerivativeCoefficientRow{T,1,5}(SVector(T(35//12), T(-26//3), T(19//2), T(-14//3), T(11//12) )), # third derivative DerivativeCoefficientRow{T,1,5}(SVector(T(-5//2), T(9), T(-12), T(7), T(-3//2) )), ) right_boundary_derivatives = ( -left_boundary_derivatives[1], left_boundary_derivatives[2], -left_boundary_derivatives[3], ) DerivativeCoefficients(left_boundary, right_boundary, left_boundary_derivatives, right_boundary_derivatives, lower_coef, central_coef, upper_coef, left_weights, right_weights, parallel, 4, order, source) else throw(ArgumentError("Order $order not implemented/derived.")) end end
{"hexsha": "7144f9eac7ebf0f143ad1867b4e35203675b496a", "size": 55101, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/SBP_coefficients/Mattsson2014.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/SummationByPartsOperators.jl-9f78cca6-572e-554e-b819-917d2f1cf240", "max_stars_repo_head_hexsha": "9d665594279c4131d3132c8d3fc9db2ea17b912d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-02T10:17:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-02T10:17:34.000Z", "max_issues_repo_path": "src/SBP_coefficients/Mattsson2014.jl", "max_issues_repo_name": "UnofficialJuliaMirror/SummationByPartsOperators.jl-9f78cca6-572e-554e-b819-917d2f1cf240", "max_issues_repo_head_hexsha": "99379add278e0463145289703273681b2c291da7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/SBP_coefficients/Mattsson2014.jl", "max_forks_repo_name": "UnofficialJuliaMirror/SummationByPartsOperators.jl-9f78cca6-572e-554e-b819-917d2f1cf240", "max_forks_repo_head_hexsha": "99379add278e0463145289703273681b2c291da7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.9813857291, "max_line_length": 109, "alphanum_fraction": 0.3207745776, "num_tokens": 9673}
import torch import numpy as np import random def stackmix(x, y, alpha, prob, nframes=64): if prob < 0: raise ValueError('prob must be a positive value') k = random.random() if k > 1 - prob: batch_size = x.size()[0] batch_idx = torch.randperm(batch_size) lam = np.random.beta(alpha, alpha) cut_idx = int(lam * nframes) shuffled_x = torch.cat((x[:, :, :cut_idx, :, :], x[batch_idx][:, :, cut_idx:, :, :]), dim=2) shuffled_y = torch.cat((y[:, :, :cut_idx], y[batch_idx][:, :, cut_idx:]), dim=2) cls_y = torch.cat((y[:, :, :cut_idx] * (cut_idx / nframes), y[batch_idx][:, :, cut_idx:] * (1 - cut_idx / nframes)), dim=2) return shuffled_x, shuffled_y, cls_y else: return x, y, y def tubemix(x, y, alpha, prob): if prob < 0: raise ValueError('prob must be a positive value') k = random.random() if k > 1 - prob: batch_size = x.size()[0] batch_idx = torch.randperm(batch_size) lam = np.random.beta(alpha, alpha) bbx1, bby1, bbx2, bby2 = rand_bbox(x[:, :, 0, :, :].size(), lam) x[:, :, :, bbx1:bbx2, bby1:bby2] = x[batch_idx, :, :, bbx1:bbx2, bby1:bby2] lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (x.size()[-1] * x.size()[-2])) tube_y = y * lam + y[batch_idx] * (1 - lam) return x, tube_y else: return x, y def speed_jitter(frames, alpha, speed_mode, mode='rgb', max_speed=5, prob=0.5): """range of key_idx is 2s+1<= k <= N-(2s+1)""" k = random.random() with torch.no_grad(): if k > 0: if speed_mode == 'variation': speed = np.random.choice([i for i in range(max_speed + 1)]) else: speed = int(speed_mode) lam = np.random.beta(alpha, alpha) key_idx = int((64-2*(2*speed+1))*lam + 2*speed + 1) batch_size = frames.size(0) if mode != 'rgb': new_group = torch.randn(batch_size, 2, (2*speed+1) * 2, 224, 224).cuda() else: new_group = torch.randn(batch_size, 3, (2*speed+1) * 2, 224, 224).cuda() for side in ['b', 'a']: for i in range(speed): if side == 'b': new_group[:, :, i*2, :, :] = frames[:, :, key_idx-speed-speed-1+i, :, :] new_group[:, :, i*2 + 1, :, :] = frames[:, :, key_idx-speed-speed-1+i, :, :] * 0.5 + \ frames[:, :, key_idx-speed-speed-1+i+1, :, :] * 0.5 if side == 'a': new_group[:, :, 2*speed + 1 + i*2, :, :] = frames[:, :, key_idx+speed+1+i, :, :] new_group[:, :, 2*speed + 2 + i*2, :, :] = frames[:, :, key_idx+speed+1+i, :, :] * 0.5 + \ frames[:, :, key_idx+speed+1+i+1, :, :] * 0.5 new_group[:, :, 2*speed, :, :] = frames[:, :, key_idx-speed-1, :, :] new_group[:, :, 4*speed+1, :, :] = frames[:, :, key_idx+2*speed+1, :, :] new_frames = torch.cat((frames[:, :, :key_idx-2*speed-1, :, :], new_group[:, :, :2*speed + 1, :, :], frames[:, :, key_idx, :, :].unsqueeze(2), new_group[:, :, 2*speed+1:, :, :], frames[:, :, key_idx+2*speed+2:, :, :]), dim=2) else: return frames return new_frames def rand_bbox(size, lam): W = size[2] H = size[3] cut_rat = np.sqrt(1. - lam) cut_w = np.int(W * cut_rat) cut_h = np.int(H * cut_rat) cx = np.random.randint(W) cy = np.random.randint(H) bbx1 = np.clip(cx - cut_w // 2, 0, W) bby1 = np.clip(cy - cut_h // 2, 0, H) bbx2 = np.clip(cx + cut_w // 2, 0, W) bby2 = np.clip(cy + cut_h // 2, 0, H) return bbx1, bby1, bbx2, bby2
{"hexsha": "38e63ebae576344c83d3c02c5cbc4c03a73bd192", "size": 3928, "ext": "py", "lang": "Python", "max_stars_repo_path": "videomix.py", "max_stars_repo_name": "jayChung0302/videomix", "max_stars_repo_head_hexsha": "528052cce1ee5a21a755e5d963e3fe58a308a0ca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2021-06-26T07:09:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-24T02:43:35.000Z", "max_issues_repo_path": "videomix.py", "max_issues_repo_name": "jayChung0302/videomix", "max_issues_repo_head_hexsha": "528052cce1ee5a21a755e5d963e3fe58a308a0ca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "videomix.py", "max_forks_repo_name": "jayChung0302/videomix", "max_forks_repo_head_hexsha": "528052cce1ee5a21a755e5d963e3fe58a308a0ca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.7872340426, "max_line_length": 131, "alphanum_fraction": 0.4653767821, "include": true, "reason": "import numpy", "num_tokens": 1207}
import pybullet as p import matplotlib.pyplot as plt import numpy as np p.connect(p.PhysX) p.loadURDF('urdf/laikago_description/laikago_foot.urdf', [0,0,0.47]) p.loadURDF('urdf/plane/plane.urdf') p.setGravity(0,0, -9.81) p.loadPlugin('eglRendererPlugin') _,_, img, _,_ = p.getCameraImage(1920, 1080) img = img[:,:,:3] plt.imshow(img) plt.show()
{"hexsha": "82ec98c23ce34a89bc1cfd525f80ae658ff881b7", "size": 350, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/tools/simple_load_urdf.py", "max_stars_repo_name": "Yunaik/drl_env", "max_stars_repo_head_hexsha": "d284e79847c59daa6ccb222f30fc7e2a86375546", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/tools/simple_load_urdf.py", "max_issues_repo_name": "Yunaik/drl_env", "max_issues_repo_head_hexsha": "d284e79847c59daa6ccb222f30fc7e2a86375546", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/tools/simple_load_urdf.py", "max_forks_repo_name": "Yunaik/drl_env", "max_forks_repo_head_hexsha": "d284e79847c59daa6ccb222f30fc7e2a86375546", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.4444444444, "max_line_length": 68, "alphanum_fraction": 0.7228571429, "include": true, "reason": "import numpy", "num_tokens": 121}
import os import numpy as np import pytest import qcodes as qc import scipy.fft as fp from qcodes.dataset.experiment_container import (experiments, load_last_experiment) from qcodes.dataset.sqlite.database import (conn_from_dbpath_or_conn, connect, get_DB_debug, get_DB_location) from qcodes.dataset.sqlite.queries import (finish_experiment, get_experiments, get_last_experiment, get_last_run, get_run_counter, get_runs) from qcodes.dataset.sqlite.queries import new_experiment as ne from scipy.ndimage import generic_gradient_magnitude, sobel from skimage.transform import resize import nanotune as nt from nanotune.data.export_data import ( # subsample_2Ddata, correct_normalizations, export_data, export_label, prep_data) def test_export_label(): new_label = export_label(["singledot"], 0, "dotregime") assert new_label == 0 new_label = export_label(["singledot"], 1, "dotregime") assert new_label == 1 new_label = export_label(["doubledot"], 0, "dotregime") assert new_label == 2 new_label = export_label(["doubledot"], 1, "dotregime") assert new_label == 3 for category in ["outerbarriers", "pinchoff", "singledot", "doubledot"]: for quality in [0, 1]: new_label = export_label([category], quality, category) assert new_label == quality with pytest.raises(ValueError): new_label = export_label(["pinchoff"], quality, "just work") with pytest.raises(ValueError): new_label = export_label(["outerbarriers"], quality, "pinchoff") def test_export_data(experiment_doubledots, tmp_path): export_data( "doubledot", ["temp.db"], ["doubledot"], db_folder=tmp_path, filename="temp.npy" ) data_w_labels = np.load(os.path.join(tmp_path, "temp.npy")) assert data_w_labels.shape == (4, 10, 2501) export_data( "doubledot", ["temp.db"], ["doubledot"], skip_ids={"temp.db": [1, 10]}, db_folder=tmp_path, filename="temp.npy", ) data_w_labels = np.load(os.path.join(tmp_path, "temp.npy")) assert data_w_labels.shape == (4, 8, 2501) export_data( "doubledot", ["temp.db"], ["singledot"], db_folder=tmp_path, filename="temp.npy" ) data_w_labels = np.load(os.path.join(tmp_path, "temp.npy")) assert data_w_labels.shape == (4, 0, 2501) export_data( "doubledot", ["temp.db"], ["doubledot"], quality=1, db_folder=tmp_path, filename="temp.npy", ) data_w_labels = np.load(os.path.join(tmp_path, "temp.npy")) assert data_w_labels.shape == (4, 10, 2501) def test_correct_normalizations(experiment_doubledots, tmp_path): export_data( "doubledot", ["temp.db"], ["doubledot"], db_folder=tmp_path, filename="temp.npy" ) data_w_labels = np.load(os.path.join(tmp_path, "temp.npy")) sg_indx = nt.config["core"]["data_types"]["signal"] data_w_labels[sg_indx, :, :-1] += 1 assert np.max(data_w_labels[sg_indx, :, :-1].flatten()) >= 1 path = os.path.join(tmp_path, "temp.npy") np.save(path, data_w_labels) correct_normalizations("temp.npy", tmp_path) data_w_labels = np.load(os.path.join(tmp_path, "temp.npy")) assert data_w_labels.shape == (4, 10, 2501) assert np.max(data_w_labels[sg_indx, :, :-1].flatten()) <= 1 assert np.min(data_w_labels[sg_indx, :, :-1].flatten()) >= 0 data = data_w_labels[:, :, :-1] for did, signal in enumerate(data[sg_indx]): freq_spect = fp.fft2(signal.reshape(50, 50)) freq_spect = np.abs(fp.fftshift(freq_spect)) grad = generic_gradient_magnitude(signal.reshape(50, 50), sobel) index = nt.config["core"]["data_types"]["frequencies"] assert np.allclose(data[index, did, :], freq_spect.flatten()) index = nt.config["core"]["data_types"]["gradient"] assert np.allclose(data[index, did, :], grad.flatten()) def test_prep_data_return_shape(nt_dataset_pinchoff, tmp_path): ds = nt.Dataset(1, db_name="temp.db", db_folder=str(tmp_path)) shape = tuple(nt.config["core"]["standard_shapes"]["1"]) condensed_data = prep_data(ds, "pinchoff")[0] assert len(ds.power_spectrum) > 0 index = nt.config["core"]["data_types"]["signal"] assert len(condensed_data[index, 0, :]) == np.prod(shape) index = nt.config["core"]["data_types"]["frequencies"] assert len(condensed_data[index, 0, :]) == np.prod(shape) index = nt.config["core"]["data_types"]["gradient"] assert len(condensed_data[index, 0, :]) == np.prod(shape) index = nt.config["core"]["data_types"]["features"] assert len(condensed_data[index, 0, :]) == np.prod(shape) with pytest.raises(KeyError): condensed_data = prep_data(ds, "doubledot")[0] def test_prep_data_normalization(nt_dataset_pinchoff, tmp_path): ds = nt.Dataset(1, db_name="temp.db", db_folder=str(tmp_path)) ds.data["dc_current"].values *= 1.4 ds.data["dc_current"].values += 0.5 assert np.max(ds.data["dc_current"].values) > 1 assert np.min(ds.data["dc_current"].values) >= 0.5 _ = prep_data(ds, "pinchoff")[0] assert np.max(ds.data["dc_current"].values) <= 1 assert np.min(ds.data["dc_current"].values) <= 0.5 def test_prep_data_return_data(nt_dataset_pinchoff, tmp_path): ds = nt.Dataset(1, db_name="temp.db", db_folder=str(tmp_path)) condensed_data = prep_data(ds, "pinchoff")[0] shape = tuple(nt.config["core"]["standard_shapes"]["1"]) ds_curr = ds.data["dc_current"].values ds_freq = ds.power_spectrum["dc_current"].values data_resized = resize(ds_curr, shape, anti_aliasing=True, mode="constant").flatten() frq = resize(ds_freq, shape, anti_aliasing=True, mode="constant").flatten() grad = generic_gradient_magnitude(ds_curr, sobel) gradient_resized = resize( grad, shape, anti_aliasing=True, mode="constant" ).flatten() relevant_features = nt.config["core"]["features"]["pinchoff"] features = [] for feat in relevant_features: features.append(ds.features["dc_current"][feat]) pad_width = len(data_resized.flatten()) - len(features) features = np.pad( features, (0, pad_width), "constant", constant_values=nt.config["core"]["fill_value"], ) index = nt.config["core"]["data_types"]["signal"] assert np.allclose(condensed_data[index, 0, :], data_resized) index = nt.config["core"]["data_types"]["frequencies"] assert np.allclose(condensed_data[index, 0, :], frq) index = nt.config["core"]["data_types"]["gradient"] assert np.allclose(condensed_data[index, 0, :], gradient_resized) index = nt.config["core"]["data_types"]["features"] assert np.allclose(condensed_data[index, 0, :], features)
{"hexsha": "866b483729589663db88c8ab25316ea257107857", "size": 6963, "ext": "py", "lang": "Python", "max_stars_repo_path": "nanotune/tests/data/test_data_export.py", "max_stars_repo_name": "theatlasroom/nanotune", "max_stars_repo_head_hexsha": "444edb47b34739db82e1c58a6c963cb14b223398", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-03T11:58:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-03T11:58:52.000Z", "max_issues_repo_path": "nanotune/tests/data/test_data_export.py", "max_issues_repo_name": "Ayushparikh-code/nanotune", "max_issues_repo_head_hexsha": "6d63adc64c89aa38592cf732345d38f7c18f05e1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nanotune/tests/data/test_data_export.py", "max_forks_repo_name": "Ayushparikh-code/nanotune", "max_forks_repo_head_hexsha": "6d63adc64c89aa38592cf732345d38f7c18f05e1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.815, "max_line_length": 88, "alphanum_fraction": 0.6494327158, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 1792}
import json import logging from enum import Enum import numpy as np import pandas as pd from datetime import datetime, timedelta logger = logging.getLogger(__name__) class JsonSerializable(object): """ Interface for serializable classes.""" def toJson(self): return json.dumps(self, default=lambda o: o.name if isinstance(o, Enum) else o.__dict__, sort_keys=True, indent=4) def __repr__(self): return self.toJson() def find_value_by_key_with_condition(items, condition_key, condition_value, lookup_key): """ Find the value of lookup key where the dictionary contains condition key = condition value. :param items: list of dictionaries :type items: list :param condition_key: condition key :type condition_key: str :param condition_value: a value for the condition key :param lookup_key: lookup key or key you want to find the value for :type lookup_key: str :return: lookup value or found value for the lookup key """ return [item[lookup_key] for item in items if item[condition_key] == condition_value][0] def is_nan(obj): """ Checks whether the input is NaN. It uses the trick that NaN is not equal to NaN.""" return obj != obj def drop_nan(array): """ Drop Nan values from the given numpy array. :param array: input array :type array: np.ndarray :return: a new array without NaN values :rtype: np.ndarray """ if array.ndim == 1: return array[~np.isnan(array)] elif array.ndim == 2: return array[~np.isnan(array).any(axis=1)] def generate_random_data(): """ Generate random data for two variants. It can be used in unit tests or demo. """ np.random.seed(42) size = 10000 data = pd.DataFrame() data['entity'] = list(range(size)) data['variant'] = np.random.choice(['A', 'B'], size=size, p=[0.6, 0.4]) data['normal_same'] = np.random.normal(size=size) data['normal_shifted'] = np.random.normal(size=size) size_shifted_B = data['normal_shifted'][data['variant'] == 'B'].shape[0] data.loc[data['variant'] == 'B', 'normal_shifted'] = np.random.normal(loc=1.0, size=size_shifted_B) data['feature'] = np.random.choice(['has', 'non'], size=size) data.loc[0, 'feature'] = 'feature that only has one data point' data['normal_shifted_by_feature'] = np.random.normal(size=size) ii = (data['variant'] == 'B') & (data['feature'] == 'has') randdata_shifted_mean = np.random.normal(loc=1.0, size=sum(ii == True)) data.loc[ii, 'normal_shifted_by_feature'] = randdata_shifted_mean data['treatment_start_time'] = np.random.choice(list(range(10)), size=size) data['normal_unequal_variance'] = np.random.normal(size=size) size_unequalvar_B = data['normal_unequal_variance'][data['variant'] == 'B'].shape[0] data.loc[data['variant'] == 'B', 'normal_unequal_variance'] = np.random.normal(scale=10, size=size_unequalvar_B) # Add date column d1 = datetime.strptime('2015-01-01', '%Y-%m-%d') d2 = datetime.strptime('2016-03-01', '%Y-%m-%d') date_col = [] delta = d2 - d1 for i in range(delta.days * 24 + 1): date_col.append((d1 + timedelta(hours=i)).strftime('%Y-%m-%d')) data['date'] = date_col[:size] metadata = { 'primary_KPI': 'normal_shifted', 'source': 'simulated', 'experiment': 'random_data_generation' } return data, metadata
{"hexsha": "2c9f00350cd12ccaa68a4de92a735c833c58104a", "size": 3437, "ext": "py", "lang": "Python", "max_stars_repo_path": "expan/core/util.py", "max_stars_repo_name": "andompesta/expan", "max_stars_repo_head_hexsha": "705081b28bb5cbc5bf93b9c1689aa45308e595f4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 320, "max_stars_repo_stars_event_min_datetime": "2016-05-02T16:38:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T17:06:35.000Z", "max_issues_repo_path": "expan/core/util.py", "max_issues_repo_name": "andompesta/expan", "max_issues_repo_head_hexsha": "705081b28bb5cbc5bf93b9c1689aa45308e595f4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 229, "max_issues_repo_issues_event_min_datetime": "2016-05-02T16:30:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-01T20:44:14.000Z", "max_forks_repo_path": "expan/core/util.py", "max_forks_repo_name": "andompesta/expan", "max_forks_repo_head_hexsha": "705081b28bb5cbc5bf93b9c1689aa45308e595f4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 62, "max_forks_repo_forks_event_min_datetime": "2016-05-02T16:39:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T09:29:57.000Z", "avg_line_length": 34.0297029703, "max_line_length": 122, "alphanum_fraction": 0.6613325575, "include": true, "reason": "import numpy", "num_tokens": 902}
push!(LOAD_PATH,"../src/") using Documenter, Quiqbox makedocs( sitename="Quiqbox.jl", modules = [Quiqbox], authors="Weishi Wang", pages=[ "Home"=>"index.md" "Manual"=>[ "basis.md" "SCF.md" "optimization.md" ] "Base"=>[ "coreFunction.md" "coreType.md" "toolFunction.md" ] "Submodule"=>[ "molden.md" ] "Index"=>"list.md" ] ) deploydocs(repo="github.com/frankwswang/Quiqbox.jl.git", devbranch = "main", target = "build", push_preview = true)
{"hexsha": "a4bb32ede575ab9533aa6c57a454c3e15991f018", "size": 642, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/make.jl", "max_stars_repo_name": "frankwswang/Quiqbox.jl", "max_stars_repo_head_hexsha": "e3c137d1017235c68db6389ff4a902e789cfa376", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-08-28T02:39:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T23:56:17.000Z", "max_issues_repo_path": "docs/make.jl", "max_issues_repo_name": "frankwswang/Quiqbox.jl", "max_issues_repo_head_hexsha": "e3c137d1017235c68db6389ff4a902e789cfa376", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2021-09-02T03:45:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-18T03:07:35.000Z", "max_forks_repo_path": "docs/make.jl", "max_forks_repo_name": "frankwswang/Quiqbox.jl", "max_forks_repo_head_hexsha": "e3c137d1017235c68db6389ff4a902e789cfa376", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.4, "max_line_length": 57, "alphanum_fraction": 0.4641744548, "num_tokens": 168}
import numpy as np from pyfibre.model.tools.filters import ( gaussian, tubeness, hysteresis, derivatives, form_structure_tensor, form_nematic_tensor ) from pyfibre.tests.pyfibre_test_case import PyFibreTestCase class TestFilters(PyFibreTestCase): def setUp(self): self.image = np.ones((5, 5)) self.image[1, 1] = 5 self.image[2, 2] = 10 self.image[3, 3] = 8 self.sigma = 1.0 def test_gaussian(self): smoothed_image = gaussian(self.image) self.assertArrayAlmostEqual(smoothed_image, smoothed_image) smoothed_image = gaussian(self.image, self.sigma) self.assertAlmostEqual(1.80, smoothed_image.mean(), 6) self.assertAlmostEqual(3.0771676, smoothed_image.max(), 6) self.assertAlmostEqual(1.0455832, smoothed_image.min(), 6) def test_tubeness(self): tubeness_image = tubeness(self.image) self.assertAlmostEqual(0.53899511, tubeness_image.mean(), 6) self.assertAlmostEqual(1.1045664, tubeness_image.max(), 6) self.assertAlmostEqual(0.3038492, tubeness_image.min(), 6) tubeness_image = tubeness(self.image, sigma_max=1) self.assertAlmostEqual(0.52744720, tubeness_image.mean(), 6) self.assertAlmostEqual(1.1045664, tubeness_image.max(), 6) self.assertAlmostEqual(0.1719257, tubeness_image.min(), 6) def test_hysteresis(self): hysteresis_image = hysteresis(self.image, alpha=2.0) answer = np.array( [[0, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 0]] ) self.assertArrayAlmostEqual(answer, hysteresis_image) hysteresis_image = hysteresis(self.image, alpha=0.1) self.assertArrayAlmostEqual( np.ones((5, 5)), hysteresis_image) def test_derivatives(self): first_derivatives = derivatives(self.image) dx = np.array([[0., 4., 0., 0., 0.], [0., 0., 4.5, 0., 0.], [0., -2., 0., 3.5, 0.], [0., 0., -4.5, 0., 0.], [0., 0., 0., -7., 0.]]) dy = np.array([[0., 0., 0., 0., 0.], [4., 0., -2, 0., 0.], [0., 4.5, 0., -4.5, 0.], [0., 0., 3.5, 0., -7.], [0., 0., 0., 0., 0.]]) self.assertEqual((2, 5, 5), first_derivatives.shape) self.assertArrayAlmostEqual(dx, first_derivatives[0]) self.assertArrayAlmostEqual(dy, first_derivatives[1]) second_derivatives = derivatives(self.image, rank=2) ddx = np.array([[0., -4., 4.5, 0., 0.], [0., -3., 0, 1.75, 0.], [0., 0., -4.5, 0., 0.], [0., 1., 0., -5.25, 0.], [0., 0., 4.5, -7., 0.]]) dxdy = np.array([[4., 0., -2., 0., 0.], [0., 2.25, 0., -2.25, 0.], [-2., 0., 2.75, 0., -3.5], [0., -2.25, 0., 2.25, 0.], [0., 0., -3.5, 0., 7.]]) ddy = np.array([[0., 0., 0., 0., 0.], [-4., -3., 0, 1., 0.], [4.5, 0, -4.5, 0, 4.5], [0., 1.75, 0., -5.25, -7.], [0., 0., 0., 0., 0.]]) self.assertEqual((4, 5, 5), second_derivatives.shape) self.assertArrayAlmostEqual(ddx, second_derivatives[0]) self.assertArrayAlmostEqual(dxdy, second_derivatives[1]) self.assertArrayAlmostEqual(dxdy, second_derivatives[2]) self.assertArrayAlmostEqual(ddy, second_derivatives[3]) def test_form_nematic_tensor(self): n_tensor = form_nematic_tensor( self.image) self.assertEqual((5, 5, 2, 2), n_tensor.shape) n_tensor = form_nematic_tensor( self.image, sigma=self.sigma) self.assertEqual((5, 5, 2, 2), n_tensor.shape) n_tensor = form_nematic_tensor( np.array([self.image, self.image]), sigma=self.sigma) self.assertEqual((2, 5, 5, 2, 2), n_tensor.shape) def test_form_structure_tensor(self): j_tensor = form_structure_tensor( self.image, sigma=self.sigma) self.assertEqual((5, 5, 2, 2), j_tensor.shape) j_tensor = form_structure_tensor( np.array([self.image, self.image]), sigma=self.sigma) self.assertEqual((2, 5, 5, 2, 2), j_tensor.shape)
{"hexsha": "80682add72803564eb3683dc5fcbf93506123870", "size": 4563, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyfibre/model/tools/tests/test_filters.py", "max_stars_repo_name": "franklongford/ImageCol", "max_stars_repo_head_hexsha": "96f0db337a203c5634bebcbae10a6d85789dff2c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-03-01T14:28:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-26T05:53:01.000Z", "max_issues_repo_path": "pyfibre/model/tools/tests/test_filters.py", "max_issues_repo_name": "franklongford/ImageCol", "max_issues_repo_head_hexsha": "96f0db337a203c5634bebcbae10a6d85789dff2c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2019-12-20T12:00:01.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-24T11:27:16.000Z", "max_forks_repo_path": "pyfibre/model/tools/tests/test_filters.py", "max_forks_repo_name": "franklongford/PyFibre", "max_forks_repo_head_hexsha": "255a3c659931f1be0356eb49b878e3b741201955", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1, "max_line_length": 68, "alphanum_fraction": 0.5180802104, "include": true, "reason": "import numpy", "num_tokens": 1379}
import logging import math import numpy import matplotlib.pyplot as plt from dalesdata import dataslice # Matplotlib plotting backend for dalesview. log = logging.getLogger(__name__) class Mpl4Dales(object): def __init__(self): pass # Plotting interface method, entry point of the class @staticmethod def plot(data, **kwargs): if isinstance(data, dataslice.Profile): num_times = len(data.times) threshold2d = kwargs.get("threshold2d", 10) if num_times <= threshold2d: return Mpl4Dales.plot_profiles(data, **kwargs) else: return Mpl4Dales.plot_profiles_2d(data, **kwargs) elif isinstance(data, dataslice.TimeSeries): return Mpl4Dales.plot_series(data, **kwargs) # Plots 1D profiles, vertically. Can handle multiple (up to 10) time steps # of a variable profile. @staticmethod def plot_profiles(profile, **kwargs): if len(profile.shape) == 1: vals, heights = numpy.array(profile[:]), numpy.array(profile.heights) vmask = numpy.not_equal(vals, profile.miss_vals.get(profile.variable, None)) hmask = numpy.not_equal(heights, profile.miss_vals.get("height", None)) mask = numpy.logical_and(vmask, hmask) plt.plot(vals[mask], heights[mask], color='b', linewidth=2.) elif len(profile.shape) == 2: num_plots = len(profile.times) lines = [] heights = numpy.array(profile.heights) hmask = numpy.not_equal(heights, profile.miss_vals.get("height", None)) for i in range(num_plots): time = profile.times[i] if time == profile.miss_vals["time"]: continue vals = numpy.array(profile[i, :]) vmask = numpy.not_equal(vals, profile.miss_vals.get(profile.variable, None)) mask = numpy.logical_and(vmask, hmask) time_label = "time " + str(profile.times[i]) + " s" lines.append(plt.plot(vals[mask], profile.heights, color='b', alpha=math.exp(-2 * i / num_plots), label=time_label, linewidth=2.)[0]) if num_plots < 10: plt.legend(handles=lines) else: raise Exception("Plotting profiles with shape %s is not supported" % str(profile.shape)) plt.ylabel("height [m]") x_label = profile.variable if profile.unit: x_label += " [" + profile.unit + "]" plt.xlabel(x_label) if kwargs.get("show", True): plt.show() # Plots 2D profile fields. @staticmethod def plot_profiles_2d(profile, **kwargs): contours = plt.contourf(numpy.array(profile.times) / 3600., profile.heights, profile[:, :].transpose(), 20, cmap=plt.cm.jet) plt.ylabel("height [m]") plt.xlabel("time [h]") c_label = profile.variable cb = plt.colorbar(contours) if profile.unit: c_label += " [" + profile.unit + "]" cb.set_label(c_label) if kwargs.get("show", True): plt.show() # Plots time series @staticmethod def plot_series(timeseries, **kwargs): if len(timeseries.shape) == 1: vals, times = numpy.array(timeseries[:]), numpy.array(timeseries.times) vmask = numpy.not_equal(vals, timeseries.miss_vals.get(timeseries.variable, None)) tmask = numpy.not_equal(times, timeseries.miss_vals.get("time", None)) mask = numpy.logical_and(vmask, tmask) plt.plot(times[mask] / 3600., vals[mask], color='r', linewidth=2) plt.xlabel("time [h]") y_label = timeseries.variable if timeseries.unit: y_label += " [" + timeseries.unit + "]" plt.ylabel(y_label) else: raise Exception("Plotting time series with shape %s is not supported" % str(timeseries.shape)) if kwargs.get("show", True): plt.show() # Plots multiple data sets in subplots. @staticmethod def multiplot(data_list, **kwargs): num_data = float(len(data_list)) ncols = int(math.ceil(math.sqrt(3 * num_data / 2))) nrows = int(math.ceil(math.sqrt(2 * num_data / 3))) k = 0 for i in range(nrows): for j in range(ncols): if k < len(data_list): plt.subplot(nrows, ncols, k + 1) kwargs["show"] = False Mpl4Dales.plot(data_list[k], **kwargs) k += 1 plt.subplots_adjust(left=0.02, right=0.98, bottom=0.02, top=0.98, hspace=0.5, wspace=0.4) plt.show() return None
{"hexsha": "d48134f1436ff15c99e1ebb7f7725a9ada972624", "size": 4807, "ext": "py", "lang": "Python", "max_stars_repo_path": "dalesview/mpl4dales.py", "max_stars_repo_name": "CloudResolvingClimateModeling/dalesview", "max_stars_repo_head_hexsha": "9e1d7a96bcdadde5d5681d0478a2af9d93a8cc69", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dalesview/mpl4dales.py", "max_issues_repo_name": "CloudResolvingClimateModeling/dalesview", "max_issues_repo_head_hexsha": "9e1d7a96bcdadde5d5681d0478a2af9d93a8cc69", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2018-09-09T16:30:37.000Z", "max_issues_repo_issues_event_max_datetime": "2018-10-22T10:14:29.000Z", "max_forks_repo_path": "dalesview/mpl4dales.py", "max_forks_repo_name": "CloudResolvingClimateModeling/dalesview", "max_forks_repo_head_hexsha": "9e1d7a96bcdadde5d5681d0478a2af9d93a8cc69", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-10T07:32:21.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-10T07:32:21.000Z", "avg_line_length": 40.7372881356, "max_line_length": 115, "alphanum_fraction": 0.5720823799, "include": true, "reason": "import numpy", "num_tokens": 1109}
import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import matplotlib.patches as mpatches def plot_rewards(rewards, file_name, display_interval=10): """Plot average reward for each time step :param rewards: reward received at each step :param file_name: the file where the figure will be saved """ x = range(1, len(rewards) + 1, display_interval) plt.plot(x, rewards[::display_interval]) plt.xlabel('Step', fontsize=12) plt.ylabel('Average Reward', fontsize=12) plt.savefig(f'figures/{file_name}.png') plt.close() def plot_actions(actions, n_arms, n_games, file_name): """Plot the average number of times each action was chosen""" for i in range(n_arms): # Compute the average number of times each action was chosen in each step action_count_avg = 100 * actions[:, i] / n_games plt.plot(action_count_avg, linewidth=2, label=f'Arm {i + 1}') plt.xlabel('Step', fontsize=12) plt.ylabel('% of choosing the action', fontsize=12) plt.legend(shadow=True) plt.ylim([0, 100]) plt.savefig(f'figures/{file_name}.png') plt.close() def plot_blackjack_values(V, filename): def get_figure(ax, usable_ace): x_range = np.arange(11, 22) y_range = np.arange(1, 11) X, Y = np.meshgrid(x_range, y_range) Z = [V[(x, y, usable_ace)] if (x, y, usable_ace) in V else 0 for x, y in zip(np.ravel(X), np.ravel(Y))] Z = np.array(Z).reshape(X.shape) ax.plot_surface(X, Y, Z, cmap=plt.cm.coolwarm) ax.set_xlabel("Player's current sum") ax.set_ylabel("Dealer's showing card") ax.set_zlabel('State value') ax.view_init(ax.elev, -120) fig = plt.figure(figsize=(12, 12)) ax = fig.add_subplot(211, projection='3d') ax.set_title('Usable ace') get_figure(ax, True) ax = fig.add_subplot(212, projection='3d') ax.set_title('No usable ace') get_figure(ax, False) output_file = f'output/{filename}' plt.savefig(output_file) def plot_policy(Q, filename): def get_figure(usable_ace, ax): x_range = np.arange(11, 22) y_range = np.arange(10, 0, -1) Z = [[np.argmax(Q[(x, y, usable_ace)]) for x in x_range] for y in y_range] Z = np.array(Z) print(Z) im = ax.imshow(Z, extent=[10.5, 21.5, 0.5, 10.5], cmap=plt.cm.coolwarm) ax.grid() ax.set_xticks(x_range) ax.set_yticks(y_range) ax.set_xlabel("Player's current sum") ax.set_ylabel("Dealer's showing card") # Create a legend for the colors used by imshow values = [0, 1] labels = ['STICK', 'HIT'] colors = [im.cmap(im.norm(value)) for value in values] patches = [mpatches.Patch(color=colors[i], label=labels[i]) for i in range(len(values))] ax.legend(handles=patches) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6)) ax1.set_title('Usable ace') get_figure(True, ax1) ax2.set_title('No usable ace') get_figure(False, ax2) output_file = f'output/{filename}' plt.savefig(output_file)
{"hexsha": "700992f14b780e3367472f2118950bdca7cf4a4c", "size": 3136, "ext": "py", "lang": "Python", "max_stars_repo_path": "chapter06/plot_utils.py", "max_stars_repo_name": "roiyeho/drl-book", "max_stars_repo_head_hexsha": "1db635fd508e5b17ef8bfecbe49a79f55503a1f1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "chapter06/plot_utils.py", "max_issues_repo_name": "roiyeho/drl-book", "max_issues_repo_head_hexsha": "1db635fd508e5b17ef8bfecbe49a79f55503a1f1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chapter06/plot_utils.py", "max_forks_repo_name": "roiyeho/drl-book", "max_forks_repo_head_hexsha": "1db635fd508e5b17ef8bfecbe49a79f55503a1f1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2359550562, "max_line_length": 96, "alphanum_fraction": 0.6345663265, "include": true, "reason": "import numpy", "num_tokens": 871}
#ifndef ASLAM_GRID_CALIBRATION_TARGET_DESIGN_VARIABLE_CONTAINER_HPP #define ASLAM_GRID_CALIBRATION_TARGET_DESIGN_VARIABLE_CONTAINER_HPP #include <boost/shared_ptr.hpp> #include <aslam/targets.hpp> #include <aslam/backend/MappedEuclideanPoint.hpp> namespace aslam { class GridCalibrationTargetDesignVariableContainer { public: GridCalibrationTargetDesignVariableContainer( boost::shared_ptr<cameras::GridCalibrationTargetBase> target, bool active); virtual ~GridCalibrationTargetDesignVariableContainer(); /// \brief get all underlying design variables. void getDesignVariables( backend::DesignVariable::set_t & designVariables) const; /// \brief estimate the ith point. void setPointActive(size_t i, bool active); /// \brief is point i being estimated? bool isPointActive(size_t i); /// \brief get the expression for point i backend::EuclideanExpression getPoint(size_t i); /// \brief get the target. boost::shared_ptr<cameras::GridCalibrationTargetBase> getTarget(); private: boost::shared_ptr<cameras::GridCalibrationTargetBase> _target; std::vector<boost::shared_ptr<backend::MappedEuclideanPoint> > _points; std::vector<backend::EuclideanExpression> _pointExpressions; }; } // namespace aslam #endif /* ASLAM_GRID_CALIBRATION_TARGET_DESIGN_VARIABLE_CONTAINER_HPP */
{"hexsha": "402eb4815bd9be2aea403b3c349c268f925af12a", "size": 1334, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "aslam_cv/aslam_cv_backend/include/aslam/GridCalibrationTargetDesignVariableContainer.hpp", "max_stars_repo_name": "PushyamiKaveti/kalibr", "max_stars_repo_head_hexsha": "d8bdfc59ee666ef854012becc93571f96fe5d80c", "max_stars_repo_licenses": ["BSD-4-Clause"], "max_stars_count": 2690.0, "max_stars_repo_stars_event_min_datetime": "2015-01-07T03:50:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T20:27:01.000Z", "max_issues_repo_path": "aslam_cv/aslam_cv_backend/include/aslam/GridCalibrationTargetDesignVariableContainer.hpp", "max_issues_repo_name": "PushyamiKaveti/kalibr", "max_issues_repo_head_hexsha": "d8bdfc59ee666ef854012becc93571f96fe5d80c", "max_issues_repo_licenses": ["BSD-4-Clause"], "max_issues_count": 481.0, "max_issues_repo_issues_event_min_datetime": "2015-01-27T10:21:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T14:02:41.000Z", "max_forks_repo_path": "aslam_cv/aslam_cv_backend/include/aslam/GridCalibrationTargetDesignVariableContainer.hpp", "max_forks_repo_name": "PushyamiKaveti/kalibr", "max_forks_repo_head_hexsha": "d8bdfc59ee666ef854012becc93571f96fe5d80c", "max_forks_repo_licenses": ["BSD-4-Clause"], "max_forks_count": 1091.0, "max_forks_repo_forks_event_min_datetime": "2015-01-26T21:21:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T01:55:33.000Z", "avg_line_length": 30.3181818182, "max_line_length": 73, "alphanum_fraction": 0.7893553223, "num_tokens": 311}
import os import logging import pickle import copy from turtle import update import numpy as np from tqdm import tqdm import gensim import torch import torch.nn.functional as F import torch.optim as optim from .lm import MWMLNetLMFineGrind,MWMLNetLMClassifier from ..data import Dictionary logger = logging.getLogger() class DocReader: def __init__(self,args_dict,chars_dict,features_dict,labels_dict=None,state_dict=None): self.chars_dict = chars_dict self.features_dict = features_dict self.labels_dict = labels_dict self.bmes_dict = Dictionary(['B','M','E','S']) self.config = args_dict self.config["vocab_size"] = len(chars_dict) self.config["features_size"] = len(features_dict) self.config["chars_max_length"] = args_dict["chars_max_length"] # Building network. If normalize if false, scores are not normalized # 0-1 per paragraph (no softmax). if self.config["model_type"].lower() == 'mwmlnetlmfinegrind' and self.config["dataset"].lower()=="aichallenger2018": self.config["n_class"] = 4 self.config["type_class"] = 20 self.network = MWMLNetLMFineGrind(**self.config) logger.info("Model name:MWMLNetLMFineGrind") elif self.config["model_type"].lower() == 'mwmlnetlmclassifier' and self.config["dataset"].lower()=="cluemotionanalysis2020": self.config["n_class"] = len(self.labels_dict) self.network = MWMLNetLMClassifier(**self.config) logger.info("Model name:MWMLNetLMFineGrind") else: raise RuntimeError('Unsupported model: %s' % self.config["model_type"]) self.config["updates"] = 0 # Load saved state if state_dict: # Load buffer separately self.network.load_state_dict(state_dict) def init_optimizer(self,optimizer=None): if self.config["fix_embeddings"]: for p in self.network.parameters(): p.requires_grad = False parameters = [p for p in self.network.parameters() if p.requires_grad] if optimizer is not None: self.optimizer = optimizer else: if self.config["optim_method"].lower() == 'sgd': self.optimizer = optim.SGD(parameters, lr=self.config["learning_rate"], momentum=self.config["momentum"], weight_decay=self.config["weight_decay"]) elif self.config["optim_method"].lower() == 'adamax': self.optimizer = optim.Adamax(parameters,lr=self.config["learning_rate"], weight_decay=self.config["weight_decay"]) elif self.config["optim_method"].lower() == 'adam': self.optimizer = optim.Adam(parameters,lr=self.config["learning_rate"], weight_decay=self.config["weight_decay"]) elif self.config["optim_method"].lower() == 'adadelta': self.optimizer = optim.Adadelta(parameters,lr=self.config["learning_rate"], rho=self.config["rho"],eps=self.config["eps"], weight_decay=self.config["weight_decay"]) else: raise RuntimeError('Unsupported optimizer: %s' %self.config["optim_method"]) def save(self,save_file_name,epoch=0): state_dict = copy.copy(self.network.cpu().state_dict()) if 'fixed_embedding' in state_dict: state_dict.pop('fixed_embedding') params = { 'state_dict': state_dict, 'char_dict': self.chars_dict, 'feature_dict': self.features_dict, 'labels_dict':self.labels_dict, 'config':self.config } try: torch.save(params, save_file_name) except BaseException: logger.warning('WARN: Saving failed... continuing anyway.') @staticmethod def load(loaded_file_name): logger.info('Loading model from file %s'%loaded_file_name) saved_params = torch.load(loaded_file_name,map_location=lambda storage,loc:storage) chars_dict = saved_params["chars_dict"] features_dict = saved_params["features_dict"] state_dict = saved_params["state_dict"] config = saved_params["config"] labels_dict = saved_params['labels_dict'] model = DocReader(config,chars_dict,features_dict,labels_dict,state_dict) model.init_optimizer() return model def checkpoint(self,save_file_name,epoch): params = { 'state_dict':self.network.cpu().state_dict(), 'chars_dict':self.chars_dict, 'features_dict':self.features_dict, 'labels_dict':self.labels_dict, 'config':self.config, 'optimizer':self.optimizer, 'epoch':epoch } try: torch.save(params,save_file_name) except BaseException: logger.warning('WARN: Saving failed... continuing anyway.') @staticmethod def load_checkpoint(checkpoint_file_name): logger.info('Loading model %s'%checkpoint_file_name) saved_params = torch.load( checkpoint_file_name,map_location=lambda storage,loc:storage ) chars_dict = saved_params['chars_dict'] features_dict = saved_params['features_dict'] state_dict = saved_params['state_dict'] epoch = saved_params['epoch'] optimizer = saved_params['optimizer'] config = saved_params['config'] labels_dict = saved_params['labels_dict'] model = DocReader(config,chars_dict,features_dict,labels_dict,state_dict) model.init_optimizer(optimizer) return model, epoch def load_embeddings(self,embedding_file,save_embedding_file): """Load pretrained embeddings for a given list of words, if they exist. Args: words: iterable of tokens. Only those that are indexed in the dictionary are kept. embedding_file: path to text file of embeddings, space separated. """ if os.path.exists(save_embedding_file): with open(save_embedding_file,'rb') as rfp: data_dict = pickle.load(rfp) self.chars_dict = data_dict["chars_dict"] embedding = data_dict["embedding"] logger.info('Loading pre-trained embeddings for %d words from %s' %(len(self.chars_dict), save_embedding_file)) else: logger.info('Loading pre-trained embeddings for %d words from %s' %(len(self.chars_dict), embedding_file)) # When normalized, some words are duplicated. (Average the embeddings). vec_counts = {} model = gensim.models.KeyedVectors.load_word2vec_format(embedding_file) loaded_dim = model.vector_size words_list = model.index_to_key length = len(words_list) vocab_size = len(self.chars_dict) embedding = np.random.rand(vocab_size,loaded_dim) for index in tqdm(range(length),desc='Loading vectors'): word = words_list[index] if word in self.chars_dict: vec = model.get_vector(word) if word not in vec_counts: vec_counts[word] = 1 embedding[self.chars_dict[word]] = vec else: logging.warning('WARN: Duplicate embedding found for %s' % word) vec_counts[word] = vec_counts[word] + 1 embedding[self.chars_dict[word]] += vec del model for w, c in vec_counts.items(): embedding[self.chars_dict[w]]/=c with open(save_embedding_file,'wb') as wfp: data_dict = { "embedding":embedding, "chars_dict":self.chars_dict } pickle.dump(data_dict,wfp) embedding = torch.tensor(embedding,dtype=torch.float) self.network.from_pretrained(embedding) logger.info('Loaded %d embeddings dimension(%d)' %(embedding.shape[0],embedding.shape[1])) def expand_dictionary(self,external_dict=None): # Add words to dictionary and expand embedding layer if external_dict is not None and len(external_dict)>0: logger.info('Adding %d new words to dictionary...' % len(external_dict)) for w in external_dict: self.chars_dict.add(w) self.vocabs_size = len(self.chars_dict) logger.info('New vocab size: %d' % len(self.chars_dict)) def to_device(self,device): self.network.to(device) def update(self,ex,device): """ Forward a batch of examples; step the optimizer to update weights. """ if not self.optimizer: raise RuntimeError('No optimizer set.') # Train mode self.network.train() self.network.to(device) # Transfer to GPU # ex : words_segs,chars_segs,features_segs,labels inputs = [e.to(device) for e in ex[:-1]] targets = ex[-1].to(device) # Run forward score = self.network(*inputs) # Compute loss and accuracies loss = F.cross_entropy(score,targets) # Clear gradients and run backward self.optimizer.zero_grad() loss.backward() # Clip gradients torch.nn.utils.clip_grad_norm_(self.network.parameters(),self.config['grad_clipping']) # Update parameters self.optimizer.step() self.config["updates"] += 1 return loss.cpu().detach().item() def predict(self,ex,device): if not self.optimizer: raise RuntimeError('No optimizer set.') # eval mode self.network.eval() self.network.to(device) # Transfer to GPU # ex : words_segs,chars_segs,features_segs,labels inputs = [e.to(device) for e in ex[:-1]] targets = ex[-1] # Run forward scores = self.network(*inputs) return scores,targets
{"hexsha": "2ea0c412f48b8f8dda4acbaaf960ae93fb3d7d69", "size": 10241, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/model/base.py", "max_stars_repo_name": "MobtgZhang/MWMLNet", "max_stars_repo_head_hexsha": "125bb39935916b6b4be505c51cb6a04eb49b96d0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-07T08:53:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T08:53:09.000Z", "max_issues_repo_path": "src/model/base.py", "max_issues_repo_name": "MobtgZhang/MWMLNet", "max_issues_repo_head_hexsha": "125bb39935916b6b4be505c51cb6a04eb49b96d0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/model/base.py", "max_forks_repo_name": "MobtgZhang/MWMLNet", "max_forks_repo_head_hexsha": "125bb39935916b6b4be505c51cb6a04eb49b96d0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.1145374449, "max_line_length": 133, "alphanum_fraction": 0.6057025681, "include": true, "reason": "import numpy", "num_tokens": 2086}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % GEANT manual in LaTeX form % % % % Michel Goossens (for translation into LaTeX) % % Version 1.00 % % Last Mod. Jan 24 1991 1300 MG + IB % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \Documentation {M.Maire} \Submitted{01.10.84} \Revised{20.03.94} \Version{Geant 3.11}\Routid{BASE020} \Makehead{The data structures and their relationship} \section{Dynamic memory} The {\tt GEANT} data structures are stored in the common \FCind{/GCBANK/} accessible through the following Patchy sequence: The \FCind{/GCLINK/} variables are pointers to the {\tt GEANT} data structures in the \FCind{/GCBANK/} common. They belong to a permanent area declared in \Rind{GZINIT}. \FComm{GCBANK}{Dynamic core for the GEANT data structures} \begin{verbatim} PARAMETER (KWBANK=69000,KWWORK=5200) COMMON/GCBANK/NZEBRA,GVERSN,ZVERSN,IXSTOR,IXDIV,IXCONS,FENDQ(16) + ,LMAIN,LR1,WS(KWBANK) DIMENSION IQ(2),Q(2),LQ(8000),IWS(2) EQUIVALENCE (Q(1),IQ(1),LQ(9)),(LQ(1),LMAIN),(IWS(1),WS(1)) EQUIVALENCE (JCG,JGSTAT) COMMON/GCLINK/JDIGI ,JDRAW ,JHEAD ,JHITS ,JKINE ,JMATE ,JPART + ,JROTM ,JRUNG ,JSET ,JSTAK ,JGSTAT,JTMED ,JTRACK,JVERTX + ,JVOLUM,JXYZ ,JGPAR ,JGPAR2,JSKLT C \end{verbatim} The figure on the next page indicates the ralations between the {\tt GEANT} data structures. Detailed description of the data structure parts can be found in the following sections: \begin{center}\tt\begin{tabular}{lllrrr} JRUNG &[BASE299] \\ JPART &[CONS399] &JMATE &[CONS199] \\ JROTM &[GEOM299] &JTMED &[CONS299] \\ JVOLUM &[GEOM199] \\ JSET &[HITS199] &JDRAW &[DRAW399] \\ JHEAD &[BASE299] &JKINE &[KINE199] & JVERTX & [KINE199]\\ JSTAK &[TRAK399] \\ IDIGI &[HITS399] &JHITS &[HITS299] & JXYZ & [TRAK 499] \end{tabular} \end{center} \begin{figure}[hbt] \centering \epsfig{file=eps/base020-1.eps,width=12cm} \caption{Relation between {\tt GEANT} data structures} \label{fg:base020-1} \end{figure} \newpage \section{Common blocks} The communication between program segments of the {\tt GEANT} system is assured by the contents of the data structures and by the definition of {\it long range} variables in several common blocks. In addition, within the program segments, the subroutines communicate with each other through actual arguments and through the common block variables. A detailed list of the user accessed common blocks is given in {\tt [ZZZZ010]}. Their also the variables initialized in \Rind{GINIT} and the possibility in overriding them through data records {\tt [BASE040]} or interactive commands {\tt [XINT]} are specified. In most of the cases there is a correspondence between a given data structure and a given common block where the current contents of the banks are stored. The labelled common blocks are accessible through Patchy/CMZ sequences identified by the name of the {\tt COMMON}. They are defined in the Patch \Rind {GCDES}. {\bf Note:} Unless otherwise specified, the long range variables are initialised in \Rind{GINIT}. When non-zero, default values are quoted between brackets. If the value may be modified the keyword for the data record and for the interactive command is also given in bold characters between brackets.
{"hexsha": "ab201a43d4cc4c56e02a6dfd48355f5048b8d6da", "size": 3782, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "geant/base020.tex", "max_stars_repo_name": "berghaus/cernlib-docs", "max_stars_repo_head_hexsha": "76048db0ca60708a16661e8494e1fcaa76a83db7", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-07-24T12:30:01.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-24T12:30:01.000Z", "max_issues_repo_path": "geant/base020.tex", "max_issues_repo_name": "berghaus/cernlib-docs", "max_issues_repo_head_hexsha": "76048db0ca60708a16661e8494e1fcaa76a83db7", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "geant/base020.tex", "max_forks_repo_name": "berghaus/cernlib-docs", "max_forks_repo_head_hexsha": "76048db0ca60708a16661e8494e1fcaa76a83db7", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.9772727273, "max_line_length": 79, "alphanum_fraction": 0.6166049709, "num_tokens": 1022}
import connexion import six from swagger_server.models.prediction import PREDICTION # noqa: E501 from swagger_server import util from subprocess import Popen, PIPE from re import split from sys import stdout import subprocess import numpy as np import pandas as pd #import seaborn as sns from statsmodels.nonparametric.kde import KDEUnivariate from statsmodels.nonparametric import smoothers_lowess from pandas import Series, DataFrame from patsy import dmatrices from sklearn import datasets, svm from sklearn import grid_search from sklearn.pipeline import Pipeline from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeClassifier from sklearn.svm import SVC,LinearSVC from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier from sklearn.ensemble import BaggingClassifier,GradientBoostingClassifier from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from azureml import Workspace from sklearn.base import BaseEstimator,TransformerMixin from sklearn.preprocessing import LabelEncoder from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import Imputer from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error from sklearn.ensemble import GradientBoostingRegressor from sklearn import cross_validation,metrics from flask import jsonify def prediction_get(): # noqa: E501 """process_get Returns process information of the hosting server # noqa: E501 :rtype: PROCESS """ #import matplotlib.pyplot as plt #get_ipython().magic(u'matplotlib inline') # # Read data from BigMart datasets for Train and Test # In[7]: train = pd.read_csv('./train.csv') test = pd.read_csv('./test.csv') train['source']='train' test['source']='test' data = pd.concat([train, test],ignore_index=True) train.head(10) test.head(10) print(train.describe()) print(test.describe()) print(data.describe()) data.isnull().sum() data.apply(lambda x: len(x.unique())) categorical_attributes = [x for x in data.dtypes.index if data.dtypes[x]=='object'] #Exclude ID cols and source: categorical_attributes = [x for x in categorical_attributes if x not in ['Item_Identifier','Outlet_Identifier','source']] #for i in categorical_attributes: data["Item_Weight"]=data["Item_Weight"].fillna(data["Item_Weight"].mean()) data['Outlet_Size']=data['Outlet_Size'].fillna(data['Outlet_Size'].mode().iloc[0]) data['Item_Visibility'] = data['Item_Visibility'].mask(data['Item_Visibility'] == 0,data['Item_Visibility'].mean(skipna=True)) data.head(10) data['Item_Identifier'].value_counts() data['Item_Type_Combined'] = data['Item_Identifier'].apply(lambda x: x[0:2]) data['Item_Type_Combined'] = data['Item_Type_Combined'].map({'FD':'Food', 'NC':'Non-Consumable', 'DR':'Drinks'}) data['Item_Type_Combined'].value_counts() data['Outlet_Years'] = 2018 - data['Outlet_Establishment_Year'] data['Outlet_Years'].describe() data['Item_Fat_Content'] = data['Item_Fat_Content'].replace({'LF':'Low Fat', 'reg':'Regular', 'low fat':'Low Fat'}) data.loc[data['Item_Type_Combined']=="Non-Consumable",'Item_Fat_Content'] = "Non-Edible" data['Item_Fat_Content'].value_counts() le = LabelEncoder() data['Outlet'] = le.fit_transform(data['Outlet_Identifier']) var_mod = ['Item_Fat_Content','Outlet_Location_Type','Outlet_Size','Item_Type_Combined','Outlet_Type','Outlet'] le = LabelEncoder() for i in var_mod: data[i] = le.fit_transform(data[i]) data = pd.get_dummies(data, columns=['Item_Fat_Content','Outlet_Location_Type','Outlet_Size','Outlet_Type', 'Item_Type_Combined','Outlet_Identifier']) name_of_attribs = list(data) data.apply(lambda x: len(x.unique())) class DataFrameSelector(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self.attribute_names = attribute_names def fit(self, X, y=None): return self def transform(self, X): return X[self.attribute_names].values num_attribs = data[['Item_Weight','Item_Visibility']] num_pipeline = Pipeline([ ('selector', DataFrameSelector(num_attribs)), ('std_scaler', StandardScaler()), ]) data.drop(['Item_Type','Outlet_Establishment_Year'],axis=1,inplace=True) data.head() trainr = data.loc[data['source']=="train"] testr = data.loc[data['source']=="test"] testr.drop(['Item_Outlet_Sales','source'],axis=1,inplace=True) trainr.drop(['source'],axis=1,inplace=True) # Create the train and test dataset Xtrain = trainr.drop(["Item_Outlet_Sales"], axis=1) ytrain = trainr["Item_Outlet_Sales"] X_train, X_test, y_train, y_test = train_test_split(Xtrain, ytrain) #print(X_train.shape, X_test.shape, y_train.shape, y_test.shape) # In[45]: # Create a dataset without Item_Identifier predictors = [x for x in Xtrain.columns if x not in ['Item_Identifier']] r_pipeline = Pipeline([ ('std_scaler', StandardScaler()), ('linear', LinearRegression()) ]) r_pipeline.fit(X_train[predictors], y_train) preds = r_pipeline.predict(X_test[predictors]) cv_score = cross_validation.cross_val_score(r_pipeline, X_train[predictors], y_train, cv=20, scoring='mean_squared_error') cv_score = np.sqrt(np.abs(cv_score)) RMSE = cv_score.mean() RMSEd = mean_squared_error(preds, y_test) RMSEsd=np.sqrt(RMSEd) pipedesc = Pipeline([('std_scaler', StandardScaler()), ('grboostregmodel', GradientBoostingRegressor(n_estimators=100, learning_rate=0.1, max_depth=1, random_state=0, loss='ls'))]) dscrmol = pipedesc.fit(X_train[predictors], y_train) #print(dscrmol.get_params()) preddesctree = dscrmol.predict(X_test[predictors]) cv_scoredesc = cross_validation.cross_val_score(pipedesc, X_train[predictors], y_train, cv=20, scoring='mean_squared_error') cv_scoredesct = np.sqrt(np.abs(cv_scoredesc)) RMSEdesc = cv_scoredesct.mean() gb_grid_params = {'learning_rate': [0.1, 0.05] #'max_depth': [4, 6, 8] #'min_samples_leaf': [20, 50,100,150], #'max_features': [1.0, 0.3, 0.1] } gb_gs = GradientBoostingRegressor(n_estimators = 60) clfgrd = grid_search.GridSearchCV(gb_gs, gb_grid_params, cv=20, n_jobs=10) clfgrdmof=clfgrd.fit(X_train[predictors], y_train) clfpred = clfgrdmof.predict(X_test[predictors]) cvgd_scoredesc = cross_validation.cross_val_score(clfgrd, X_train[predictors], y_train, cv=20, scoring='mean_squared_error') cvgd_scoredesct = np.sqrt(np.abs(cvgd_scoredesc)) RMSEdescgd = cvgd_scoredesct.mean() #print('RMSE is ', RMSEdescgd) results = pd.DataFrame(columns=["Description", "RMSE"]) results.loc[len(results)] = ["LinearModel", RMSE] results.loc[len(results)] = ["GradientBoost", RMSEdesc] results.loc[len(results)] = ["HypertunedGradientBoost", RMSEdescgd] results bigmartpred = [] overallprediction=clfgrdmof.predict(testr[predictors]) bigmartpred.append(overallprediction.tolist()) #print(overallprediction) #import pickle #filename = 'finalized_model.pkl' #pickle.dump(clfgrdmof, open(filename, 'wb')) #loaded_model = pickle.load(open(filename, 'rb')) #Test1 = loaded_model.predict(testr[predictors]) return PREDICTION(bigmartpred)
{"hexsha": "9152b14717b1f3d3601beebb4eb949fd72dc5a4d", "size": 8236, "ext": "py", "lang": "Python", "max_stars_repo_path": "PredictBigMartDataset/project/default_controller.py", "max_stars_repo_name": "arijitsinha80/PYworld", "max_stars_repo_head_hexsha": "ece0ddb1233452da2442405d3c40bd24aef77af8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PredictBigMartDataset/project/default_controller.py", "max_issues_repo_name": "arijitsinha80/PYworld", "max_issues_repo_head_hexsha": "ece0ddb1233452da2442405d3c40bd24aef77af8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PredictBigMartDataset/project/default_controller.py", "max_forks_repo_name": "arijitsinha80/PYworld", "max_forks_repo_head_hexsha": "ece0ddb1233452da2442405d3c40bd24aef77af8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8464419476, "max_line_length": 131, "alphanum_fraction": 0.6633074308, "include": true, "reason": "import numpy,from statsmodels", "num_tokens": 1941}
#!/usr/bin/env python # Simple example of Wiener deconvolution in Python. # We use a fixed SNR across all frequencies in this example. # # Written 2015 by Dan Stowell. Public domain. import matplotlib import matplotlib.cm as cm # matplotlib.use('PDF') # http://www.astrobetter.com/plotting-to-a-file-in-python/ import matplotlib.pyplot as plt import numpy as np from matplotlib.backends.backend_pdf import PdfPages from numpy.fft import fft, ifft, ifftshift plt.rcParams.update({"font.size": 6}) ########################## # user config sonlen = 128 irlen = 64 lambd_est = 1e-3 # estimated noise lev ########################## def gen_son(length): "Generate a synthetic un-reverberated 'sound event' template" # (whitenoise -> integrate -> envelope -> normalise) son = np.cumsum(np.random.randn(length)) # apply envelope attacklen = int(length // 8) env = np.hstack((np.linspace(0.1, 1, attacklen), np.linspace(1, 0.1, length - attacklen))) son *= env son /= np.sqrt(np.sum(son * son)) return son def gen_ir(length): "Generate a synthetic impulse response" # First we generate a quietish tail son = np.random.randn(length) attacklen = int(length // 2) env = np.hstack((np.linspace(0.1, 1, attacklen), np.linspace(1, 0.1, length - attacklen))) son *= env son *= 0.05 # Here we add the "direct" signal son[5] = 1 # Now some early reflection spikes for _ in range(10): son[int(length * (np.random.rand() ** 2))] += np.random.randn() * 0.5 # Normalise and return son /= np.sqrt(np.sum(son * son)) return son def wiener_deconvolution(signal, kernel, lambd): "lambd is the SNR" kernel = np.hstack( (kernel, np.zeros(len(signal) - len(kernel))) ) # zero pad the kernel to same length H = fft(kernel) deconvolved = np.real(ifft(fft(signal) * np.conj(H) / (H * np.conj(H) + lambd**2))) return deconvolved if __name__ == "__main__": "simple test: get one soundtype and one impulse response, convolve them, deconvolve them, and check the result (plot it!)" son = gen_son(sonlen) ir = gen_ir(irlen) obs = np.convolve(son, ir, mode="full") # let's add some noise to the obs obs += np.random.randn(*obs.shape) * lambd_est son_est = wiener_deconvolution(obs, ir, lambd=lambd_est)[:sonlen] ir_est = wiener_deconvolution(obs, son, lambd=lambd_est)[:irlen] # calc error son_err = np.sqrt(np.mean((son - son_est) ** 2)) ir_err = np.sqrt(np.mean((ir - ir_est) ** 2)) print("single_example_test(): RMS errors son %g, IR %g" % (son_err, ir_err)) # plot pdf = PdfPages("wiener_deconvolution_example.pdf") plt.figure(frameon=False) # plt.subplot(3, 2, 1) plt.plot(son) plt.title("son") plt.subplot(3, 2, 3) plt.plot(son_est) plt.title("son_est") plt.subplot(3, 2, 2) plt.plot(ir) plt.title("ir") plt.subplot(3, 2, 4) plt.plot(ir_est) plt.title("ir_est") plt.subplot(3, 1, 3) plt.plot(obs) plt.title("obs") # pdf.savefig() plt.close() pdf.close()
{"hexsha": "76d78e1ca5491c952c65fd161882d0f168d1b604", "size": 3111, "ext": "py", "lang": "Python", "max_stars_repo_path": "rapidtide/wiener2.py", "max_stars_repo_name": "bbfrederick/delaytools", "max_stars_repo_head_hexsha": "190d79ae4c19317dfce38a528e43fd05459f29a5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-06-15T03:45:52.000Z", "max_stars_repo_stars_event_max_datetime": "2016-06-15T03:45:52.000Z", "max_issues_repo_path": "rapidtide/wiener2.py", "max_issues_repo_name": "bbfrederick/delaytools", "max_issues_repo_head_hexsha": "190d79ae4c19317dfce38a528e43fd05459f29a5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rapidtide/wiener2.py", "max_forks_repo_name": "bbfrederick/delaytools", "max_forks_repo_head_hexsha": "190d79ae4c19317dfce38a528e43fd05459f29a5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6285714286, "max_line_length": 126, "alphanum_fraction": 0.6345226615, "include": true, "reason": "import numpy,from numpy", "num_tokens": 922}
''' Finetuning Huggingface's models for question-answering on Natural Questions (NQ) datasety by Google For the full list of options, type python run_nq.py -h ''' from __future__ import absolute_import, division, print_function import argparse import json import logging import os import random import glob import sys import timeit import numpy as np from pathlib import Path import torch import torch.nn.functional as F from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler) from torch.utils.data.distributed import DistributedSampler import warnings warnings.filterwarnings('ignore',category=FutureWarning) # Following import causes annoying warning try: from torch.utils.tensorboard import SummaryWriter except: from tensorboardX import SummaryWriter from tqdm import tqdm, trange from transformers import (WEIGHTS_NAME, BertConfig, BertForQuestionAnswering, BertTokenizer, AlbertConfig, AlbertForQuestionAnswering, AlbertTokenizer) from transformers import AdamW, get_linear_schedule_with_warmup, get_constant_schedule, get_constant_schedule_with_warmup from nq_model import NQBert import nq_metric from nq_features import load_or_precompute_nq_features from nq_eval import RawResult, read_candidates, compute_pred_dict import google_tokenization import nq_config logger = logging.getLogger(__name__) ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) \ for conf in (BertConfig,)), ()) MODEL_CLASSES = { 'bert': (BertConfig, BertForQuestionAnswering, BertTokenizer), 'albert': (AlbertConfig, AlbertForQuestionAnswering, AlbertTokenizer) } def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def to_list(tensor): return tensor.detach().cpu().tolist() def get_lrs(optimizer): lrs = [] for param_group in optimizer.param_groups: lrs.append((param_group, param_group['lr'])) return lrs def train(args, train_dataset, model, tokenizer): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) # Allow for different learning rate for final layers final_layers = ['span_outputs.weight', 'span_outputs.bias', 'type_output.weight', 'type_output.bias'] if args.final_layers_lr == -1.0: args.final_layers_lr = args.learning_rate if args.final_layers_wd == -1.0: args.final_layers_wd = args.weight_decay final_layer_params = [(n, p) for n, p in model.named_parameters() if n in final_layers] non_final_layer_params = [(n, p) for n, p in model.named_parameters() if n not in final_layers] no_decay = ['bias', 'LayerNorm.weight'] final_layer_decaying_params = [p for n, p in final_layer_params if not any(nd in n for nd in no_decay)] final_layer_nondecaying_params = [p for n, p in final_layer_params if any(nd in n for nd in no_decay)] non_final_layer_decaying_params = [p for n, p in non_final_layer_params if not any(nd in n for nd in no_decay)] non_final_layer_nondecaying_params = [p for n, p in non_final_layer_params if any(nd in n for nd in no_decay)] optimizer_grouped_parameters = [ {'params': final_layer_decaying_params, 'lr':args.final_layers_lr, 'weight_decay':args.final_layers_wd}, {'params': final_layer_nondecaying_params, 'lr':args.final_layers_lr, 'weight_decay':0.0}, {'params': non_final_layer_decaying_params, 'lr':args.learning_rate, 'weight_decay':args.weight_decay}, {'params': non_final_layer_nondecaying_params, 'lr':args.learning_rate, 'weight_decay':0.0}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) # Allow choice between lr schedules if args.constant_lr and args.warmup_steps == 0: scheduler = get_constant_schedule(optimizer) elif args.constant_lr and args.warmup_steps > 0: scheduler = get_constant_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps) else: scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 1 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility (even between python 2 and 3) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() inputs = {'input_ids': batch['input_ids'].to(args.device), 'attention_mask': batch['attention_mask'].to(args.device), 'token_type_ids': batch['token_type_ids'].to(args.device), 'start_positions': batch['start_positions'].to(args.device), 'end_positions': batch['end_positions'].to(args.device), 'instance_types': batch['instance_types'].to(args.device)} outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer, dataset_type='dev', prefix=str(global_step)) for key, value in results.items(): tb_writer.add_scalar('eval_{}'.format(key), value, global_step) tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) tb_writer.add_scalar('lr_final_layers', scheduler.get_lr()[1], global_step) tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, dataset_type='test', prefix='', output_preds=False, output_raw=False): '''Predict answer spans using model and tokenizer (Google tokenizer)''' # I believe the enclosing barriers can be deleted. dataset_type should be test or train # if args.local_rank not in [-1, 0]: # torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache dataset = load_or_precompute_nq_features(args, tokenizer, dataset_type) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) nq_config.predict_batch_size = args.eval_batch_size # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu evaluate if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) all_results = [] start_time = timeit.default_timer() for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() with torch.no_grad(): inputs = {'input_ids': batch['input_ids'].to(args.device), 'attention_mask': batch['attention_mask'].to(args.device), 'token_type_ids': batch['token_type_ids'].to(args.device) } outputs = model(**inputs) for b, unique_id in enumerate(batch['unique_id']): all_results.append( RawResult( unique_id=unique_id, start_logits=to_list(outputs[0][b]), end_logits=to_list(outputs[1][b]), answer_type_logits=to_list(outputs[2][b]))) # Computing predictions dataset_jsonl_file = args.test_file if dataset_type == 'test' else args.dev_file if dataset_type == 'dev' else args.train_file logger.info(f"Loading candidates from {dataset_jsonl_file} ...") candidates_dict = read_candidates(dataset_jsonl_file) logger.info(f"Computing predictions for {dataset_jsonl_file} ...") nq_pred_dict = compute_pred_dict(candidates_dict, dataset, [r._asdict() for r in all_results]) evalTime = timeit.default_timer() - start_time logger.info("Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset)) if output_raw: return nq_pred_dict output_prediction_file = os.path.join(args.output_dir, f"predictions_{dataset_type}_{prefix}.json") logger.info("Writing predictions to: %s" % (output_prediction_file)) with open(output_prediction_file, 'w') as f: json.dump(list(nq_pred_dict.values()), f, indent=4) nq_pred_df = nq_metric.get_test_df(nq_pred_dict, args.long_answer_threshold, args.short_answer_threshold) nq_pred_df = nq_pred_df.sort_values(by='example_id').set_index('example_id', drop=False) if not output_preds: labelled_df = nq_metric.get_labelled_df(dataset_jsonl_file).sort_values(by='example_id').set_index('example_id', drop=False) f1_scores = nq_metric.get_f1(nq_pred_df, labelled_df) results = {'micro_f1':f1_scores[0] * 100, 'long_f1':f1_scores[1] * 100, 'short_f1':f1_scores[2] * 100} return results else: submission_df = nq_pred_df[['example_id', 'long_answer', 'short_answer']] submission_df = submission_df.rename(columns={'long_answer':'long', 'short_answer':'short'}) submission_df = submission_df.melt(id_vars=['example_id'], var_name='answer_length') submission_df['example_id'] = submission_df['example_id'].apply(str) + '_' + submission_df['answer_length'] submission_df = submission_df.rename(columns={'value':'PredictionString'}).drop(columns='answer_length') return submission_df def main(args): # One must be chosen: if not args.do_train and not args.do_eval and not args.do_predict: sys.exit("Must use at least one of --do_train, --do_eval, --do_predict") if args.do_train and (not args.train_file or not args.train_features): sys.exit("When training, must specify both --train_file, --train_features") if args.do_eval and (not args.dev_file or not args.dev_features): sys.exit("When evaluating on dev set, must specify both --dev_file, --dev_features") if args.do_predict and (not args.test_file or not args.test_features): sys.exit("When predicting on test set, must specify both --test_file, --test_features") if args.evaluate_during_training and (not args.dev_file): sys.exit("When evaluating during training, specify both --dev_file, --dev_features") # Set NQA prep variables nq_config.max_context_length = args.max_seq_length nq_config.max_question_length = args.max_query_length nq_config.max_windows = args.max_windows nq_config.window_stride = args.doc_stride nq_config.include_unknowns = 1.0 / args.undersampling_factor nq_config.n_best_size = args.n_best_size nq_config.max_answer_length = args.max_answer_length if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] # Google tokenizer: tokenizer = google_tokenization.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=True) # Resume from checkpoint if args.from_checkpoint: model = NQBert.from_pretrained(args.from_checkpoint) else: config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=2, cache_dir=args.cache_dir if args.cache_dir else None) model = NQBert.from_pretrained(args.model_name_or_path, config=config, cache_dir=args.cache_dir if args.cache_dir else None) # Only tune last layer: Last four parameters (answer type weight & bias, span start/end weight and bias) if args.only_final_layers: for i_param, param in enumerate(model.parameters()): if i_param in range(len(model.parameters())-4): continue else: param.requires_grad = False if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set. # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will # remove the need for this code, but it is still valid. if args.fp16: try: import apex apex.amp.register_half_function(torch, 'einsum') except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") # Training if args.do_train: if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Using Google's feature computation train_dataset = load_or_precompute_nq_features(args, tokenizer, dataset_type='train') if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Save the trained model and the tokenizer if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Load a trained model and vocabulary that you have fine-tuned model = NQBert.from_pretrained(args.output_dir) tokenizer = google_tokenization.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=True) model.to(args.device) # When checkpoint is supplied and output_dir does not contain mode, copy it over from checkpoint output_model_path = Path(args.output_dir) / 'pytorch_model.bin' if not args.do_train and (not output_model_path.exists()): if not args.from_checkpoint: sys.exit("When not training, evaluation model bust either resider in output directory or checkpoint mus be provided.") eval_model_dir = args.from_checkpoint else: eval_model_dir = args.output_dir # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory results = {} if args.do_eval and args.local_rank in [-1, 0]: checkpoints = [eval_model_dir] if args.eval_all_checkpoints: checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(eval_model_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: # Reload the model global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" model = NQBert.from_pretrained(checkpoint) tokenizer = google_tokenization.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=True) model.to(args.device) # Evaluate result = evaluate(args, model, tokenizer, dataset_type='dev', prefix=global_step) result = dict((k + '_dev' + ('_{}'.format(global_step) if global_step else ''), v) for k, v in result.items()) results.update(result) logger.info("Results: {}".format(results)) if args.do_predict and args.local_rank in [-1, 0]: logger.info("Predicting on test data set...") # Reload the model model = NQBert.from_pretrained(eval_model_dir) model.to(args.device) tokenizer = google_tokenization.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=True) submission_df = evaluate(args, model, tokenizer, dataset_type='test', prefix='', output_preds=True) submission_df.to_csv("./submission.csv", index=False) logger.info("submission_df:") logger.info(submission_df) logger.info("Results: {}".format(results)) return results if __name__ == "__main__": parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--train_file", default=None, type=str, required=False, help="Natural Questions jsonl for training. E.g., simplified-nq-train.jsonl") parser.add_argument("--dev_file", default=None, type=str, required=False, help="Natural Questions jsonl for cross-validation. E.g., simplified-nq-dev.jsonl") parser.add_argument("--test_file", default=None, type=str, required=False, help="Natural Questions jsonl for prediction. E.g., simplified-nq-test.jsonl") parser.add_argument("--train_features", default=None, type=str, required=False, help="Train features are computed and saved to, or loaded from this file") parser.add_argument("--dev_features", default=None, type=str, required=False, help="Dev features are computed and saved to, or loaded from this file") parser.add_argument("--test_features", default=None, type=str, required=False, help="Test features are computed and saved to, or loaded from this file") parser.add_argument("--undersampling_factor", default=50, type=int, required=False, help="Undersample null instances by this factor") parser.add_argument("--only_final_layers", action='store_true', help="Whether to only tune layers on top of BERT representation") parser.add_argument("--from_checkpoint", default='', type=str, required=False, help="Start training from the provided checkpoint") parser.add_argument("--vocab_file", default=None, type=str, required=True, help="Vocabulary file for the tokenizer") parser.add_argument('--long_answer_threshold', type=float, default=-1*float('inf'), help="Threshold for confidence score for long answer.") parser.add_argument('--short_answer_threshold', type=float, default=-1*float('inf'), help="Threshold for confidence score for short answer.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--final_layers_wd", default=-1.0, type=float, help="Weight decay parameeters for final layers.") parser.add_argument("--do_predict", action='store_true', help="Whether to perform prediction on test set") parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.") parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument("--max_seq_length", default=384, type=int, help="The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.") parser.add_argument("--doc_stride", default=128, type=int, help="When splitting up a long document into chunks, how much stride to take between chunks.") parser.add_argument("--max_query_length", default=64, type=int, help="The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length.") parser.add_argument("--max_windows", default=48, type=int, help="The maximum number of windows / instances to get per question.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--evaluate_during_training", action='store_true', help="Rul evaluation during training at each logging step.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument("--learning_rate", default=3e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--final_layers_lr", default=-1.0, type=float, help="The initial learning rate for the final layers") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--constant_lr", action='store_true', help="If true, use a constant learning rate (after a potential warmup phase)") parser.add_argument("--n_best_size", default=20, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json output file.") parser.add_argument("--max_answer_length", default=30, type=int, help="The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another.") parser.add_argument("--verbose_logging", action='store_true', help="If true, all of the warnings related to data processing will be printed. " "A number of warnings are expected for a normal SQuAD evaluation.") parser.add_argument('--logging_steps', type=int, default=50, help="Log every X updates steps.") parser.add_argument('--save_steps', type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument("--eval_all_checkpoints", action='store_true', help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument('--overwrite_cached_input_features', action='store_true', help="Overwrite the precomputed input features") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() main(args)
{"hexsha": "63510479caa3549d5f7203f24f9fa3a20917c558", "size": 33348, "ext": "py", "lang": "Python", "max_stars_repo_path": "run_nq.py", "max_stars_repo_name": "valemore/nq_transformers", "max_stars_repo_head_hexsha": "fcc31f06989fc7b83cec10477e56573aebd1a0eb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-03-22T17:20:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-11T13:45:27.000Z", "max_issues_repo_path": "run_nq.py", "max_issues_repo_name": "valemore/nq_transformers", "max_issues_repo_head_hexsha": "fcc31f06989fc7b83cec10477e56573aebd1a0eb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "run_nq.py", "max_forks_repo_name": "valemore/nq_transformers", "max_forks_repo_head_hexsha": "fcc31f06989fc7b83cec10477e56573aebd1a0eb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.2695924765, "max_line_length": 154, "alphanum_fraction": 0.6572208228, "include": true, "reason": "import numpy", "num_tokens": 7057}
import numpy as np from boirlscenarios.irlobject import IRLObject from boirlscenarios.configurations import Configurations from tqdm import tqdm import GPyOpt import os import boirlscenarios.constants as constants import matplotlib.pyplot as plt from tabulate import tabulate import time def exp_moving_average(data, alpha=0.6): smooth_data = np.zeros(np.shape(data)) for dind, d in enumerate(data): prev_sv = None for lind, l in enumerate(d): if lind == 0: sv = l else: sv = alpha * l + (1 - alpha) * prev_sv smooth_data[dind, lind] = sv prev_sv = sv return smooth_data def get_experts_fetch(O): import gym KEY_ORDER = ['observation', 'achieved_goal', 'desired_goal'] fulltrajectories = O.trajectories indices = fulltrajectories["indices"] inds = np.where(indices == 1.)[0] inds = np.append(inds, indices.shape[0]).astype(np.int) expert_trajs = [] for n in range(len(inds) - 1): observation_dic = None for k in KEY_ORDER: if observation_dic is None: observation_dic = fulltrajectories[k][inds[n]:inds[n + 1]] else: observation_dic = np.append(observation_dic, fulltrajectories[k][inds[n]:inds[n + 1]], axis=1) action = fulltrajectories["actions"][inds[n]:inds[n + 1]] fin_dic = {"observations": observation_dic, "actions": action} expert_trajs.append(fin_dic) return expert_trajs def get_experts(dir, trajectories=None): all_obs = np.expand_dims(np.load(os.path.join(dir, "features.npy")), axis=1) if len(np.shape(all_obs)) > 2: all_obs = np.squeeze(all_obs) elif len(np.shape(all_obs)) == 1: all_obs = np.expand_dims(all_obs, axis=1) if trajectories is None: trajectories = np.load(os.path.join(dir, "full_opt_trajectories.npy")) n_traj, l_traj, _ = np.shape(trajectories) _, d_states = np.shape(all_obs) paths = [] for i in range(n_traj): current_trajectory = trajectories[i] current_obs = np.zeros((l_traj, d_states)) current_actions = np.zeros((l_traj, 1)) for tind in range(l_traj): s, a = current_trajectory[tind] current_obs[tind] = all_obs[s] current_actions[tind, 0] = a current_path = {'observations': np.array(current_obs), 'actions': np.array(current_actions)} paths.append(current_path) return paths def algoexecute(algo, env, budget, trials, nInit=1, projections=None): """ EXECUTE THE GIVEN IRL ALGORITHM ON THE SPECIFIED ENVIRONMENT """ import os import numpy as np # Number of initial samples randomly selected boirlobj = IRLObject(algo, env, projections=projections) # Object that stores trajectories, result directories, data directories etc # Load the initialization points. They are selected from regions of high NLL making the training challenging. initXs = np.load(os.path.join(boirlobj.configurations.getTrajectoryDir(), "myinitpoints.npy")) if algo == constants.AIRL or algo == constants.GCL: # Code for AIRL and GCL taken from the official repository: https://github.com/justinjfu/inverse_rl import tensorflow as tf from sandbox.rocky.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy from sandbox.rocky.tf.envs.base import TfEnv from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline from inverse_rl.envs.env_utils import CustomGymEnv from rllab.envs.gym_env import GymEnv from inverse_rl.algos.irl_trpo import IRLTRPO from inverse_rl.utils.log_utils import rllab_logdir import gym import numpy as np import os # Specify the architecture for the reward network. # For fair comparison, we used the same model for reward as with BOIRL. rewardarch = None irl_inits = None # Consider each environment and load appropriate reward architecture. # Only valid for gridworld3d, vborlange, rborlange and point mass maze if env == constants.GRIDWORLD3D: import gym_sigmoid if algo == constants.AIRL: from inverse_rl.models.architectures import sigmoid_airl_net rewardarch = sigmoid_airl_net # Necessary to load init points for rewards. irl_inits = np.load(os.path.join(boirlobj.configurations.getTrajectoryDir(), "irl_weights.npy"), allow_pickle=True).tolist() policy_inits = np.load(os.path.join(boirlobj.configurations.getTrajectoryDir(), "policy_weights.npy"), allow_pickle=True).tolist() else: from inverse_rl.models.architectures import sigmoid_gcl_net irl_inits = [0, 0, 0] rewardarch = sigmoid_gcl_net elif env == constants.VIRTBORLANGE or env == constants.REALBORLANGE: import gym_sweden if algo == constants.AIRL: from inverse_rl.models.architectures import airl_linear_net rewardarch = airl_linear_net # Necessary to load init points for rewards. irl_inits = np.load(os.path.join(boirlobj.configurations.getTrajectoryDir(), "irl_weights.npy"), allow_pickle=True).tolist() else: from inverse_rl.models.architectures import gcl_linear_net rewardarch = gcl_linear_net irl_inits = [np.zeros((3, 1))] elif env == constants.MAZE: import gym if algo == constants.AIRL: irl_inits = np.load(os.path.join(boirlobj.configurations.getTrajectoryDir(), "irl_weights.npy"), allow_pickle=True).tolist() from inverse_rl.models.architectures import maze_net_2dim rewardarch = maze_net_2dim else: irl_inits = [np.zeros((2))] from inverse_rl.models.architectures import maze_net_2dim_gcl rewardarch = maze_net_2dim_gcl else: raise (NotImplementedError( "AIRL and GCL requires OpenAI Gym version of the environment. Only available for Gridworld3d, Virtual Borlange, Real Borlange and Point Mass Maze")) # Import the necessary algorithm if algo == constants.AIRL: from inverse_rl.models.airl_state import AIRL else: from inverse_rl.models.imitation_learning import GAN_GCL import numpy as np import os # This code expects environments as OpenAi Gyms. # So make sure you follow the Readme.md to install the correct OpenAI Gym version of the environments. if env == constants.MAZE: mygymenv = TfEnv(GymEnv(constants.get_gym(env), record_video=False, record_log=False)) else: mygymenv = TfEnv(CustomGymEnv(constants.get_gym(env), record_video=False, record_log=False)) # Load expert data if env == constants.FETCH: all_experts = get_experts_fetch(boirlobj.env) elif env == constants.MAZE: all_experts = boirlobj.env.trajectories else: all_experts = get_experts(boirlobj.configurations.getTrajectoryDir()) # Iterate over trials for tr in np.arange(trials): if env == constants.VIRTBORLANGE or env == constants.REALBORLANGE: irl_inits[0] = np.expand_dims(initXs[tr % initXs.shape[0]], axis=1) elif env == constants.MAZE: irl_inits[0] = initXs[tr % initXs.shape[0]] elif env == constants.GRIDWORLD3D: irl_inits[0] = initXs[tr % initXs.shape[0]][1] * np.ones(1) irl_inits[1] = initXs[tr % initXs.shape[0]][0] * np.ones(1) irl_inits[2] = initXs[tr % initXs.shape[0]][2] * np.ones(1) if algo == constants.AIRL: for ggg in np.arange(3, 9): irl_inits[ggg] = policy_inits[ggg] elif env == constants.MAZE: raise NotImplementedError() inds = np.random.permutation(np.arange(len(all_experts))) # [0:50] maxlength = len(all_experts[0]['actions']) experts = [all_experts[ind] for ind in inds] if algo == constants.AIRL: irl_model = AIRL(env=mygymenv, expert_trajs=experts, state_only=True, fusion=False, max_itrs=budget, reward_arch=rewardarch, discount=boirlobj.configurations.getDiscounts()) else: irl_model = GAN_GCL(env_spec=mygymenv.spec, expert_trajs=experts, state_only=True, discount=boirlobj.configurations.getDiscounts(), discrim_arch_args={"ff_arch": rewardarch}) policy = GaussianMLPPolicy(name='policy', env_spec=mygymenv.spec, hidden_sizes=(32, 32)) # Training algorithm myalgo = IRLTRPO( init_irl_params=irl_inits, env=mygymenv, policy=policy, irl_model=irl_model, n_itr=budget, batch_size=10, max_path_length=maxlength, discount=boirlobj.configurations.getDiscounts(), store_paths=True, irl_model_wt=1.0, entropy_weight=0.1, zero_environment_reward=True, baseline=LinearFeatureBaseline(env_spec=mygymenv.spec), ) # Do the actual training and store the weights of the reward network mydirname = os.path.join('Data', algo, 'db_' + env + '_' + algo, str(tr)) with rllab_logdir(algo=myalgo, dirname=mydirname): with tf.Session(): myalgo.train(os.path.join(boirlobj.configurations.getResultDir(), "weights%d.npy" % tr)) tf.reset_default_graph() else: # Actual code BOIRL import numpy as np import os # Get and store the ground truth NLL value if env == constants.FETCH: gt_lik, _ = boirlobj.env.get_likelihood(None) elif env == constants.MAZE: boirlobj.env.trial = -1 boirlobj.env.algo = algo gt_lik = boirlobj.env.get_likelihood() elif env == constants.VIRTBORLANGE or env == constants.REALBORLANGE or env == constants.GRIDWORLD2D or env == constants.GRIDWORLD3D: gt_lik, _ = boirlobj.env.get_likelihood(boirlobj.fullTrajectories) else: gt_lik = boirlobj.env.get_likelihood(boirlobj.configurations.fullTrajectories) np.save(os.path.join(boirlobj.configurations.getResultDir(), "gt_lik.npy"), gt_lik) # Start BO for trial in np.arange(trials): ### Only applicable for taurbf. ### Sample artificial trajectories using a uniform policy # print("Generating Artificial Trajectories") if algo == constants.RHORBF: if env == constants.FETCH: subsetTrajectories, subsetStartPos, subsetArtTrajs = boirlobj.env.get_subs( boirlobj.configurations.getNTrajs()) elif env == constants.MAZE: subsetTrajectories, subsetStartPos, subsetArtTrajs = boirlobj.env.get_subs( boirlobj.configurations.getNTrajs(), boirlobj.configurations.getNArtTrajs()) else: subinds = np.random.permutation(boirlobj.fullTrajectories.shape[0])[ 0:boirlobj.configurations.projections] subsetTrajectories = boirlobj.fullTrajectories[subinds] subsetStartPos = boirlobj.fullStartpos[subinds] subsetArtTrajs = None for at in range(boirlobj.configurations.getNArtTrajs()): temp_trajectories = boirlobj.env.artificial_trajectories(subsetTrajectories, subsetStartPos) if subsetArtTrajs is None: subsetArtTrajs = np.expand_dims(temp_trajectories, axis=0) else: subsetArtTrajs = np.append(subsetArtTrajs, np.expand_dims(temp_trajectories, axis=0), axis=0) else: subsetTrajectories = None subsetArtTrajs = None # Setup kernel boirlobj.setupkernel(subsetTrajectories, subsetArtTrajs) # Get the current initialization point initX = np.expand_dims(initXs[trial % initXs.shape[0]], axis=0) # Set a progress bar to observe the training progress boirlobj.pbar = tqdm(total=budget + nInit) boirlobj.pbar.set_description("Trial: %d" % trial) boirlobj.tr = trial # define the BO object myProblem = GPyOpt.methods.BayesianOptimization(boirlobj.blackboxfunc, boirlobj.bounds, kernel=boirlobj.kernel, model_type="GP", normalize_Y=False, exact_feval=True, X=initX) # initial_design_numdata=nInit) # Starting the optimization myProblem.run_optimization(budget) boirlobj.pbar.close() # Optimization over" # Save the results np.save(os.path.join(boirlobj.configurations.getResultDir(), "x_choices.npy"), np.array(boirlobj.x_choices)) np.save(os.path.join(boirlobj.configurations.getResultDir(), "y_choices.npy"), np.array(boirlobj.y_choices)) np.save(os.path.join(boirlobj.configurations.getResultDir(), "trial_tracks.npy"), np.array(boirlobj.trial_track)) np.save(os.path.join(boirlobj.configurations.getResultDir(), "length_tracks.npy"), np.array(boirlobj.length_track)) if len(boirlobj.stoch_pol) > 0: np.save(os.path.join(boirlobj.configurations.getResultDir(), "stochpols.npy"), np.array(boirlobj.stoch_pol)) np.save(os.path.join(boirlobj.configurations.getResultDir(), "kernel_length_" + str(trial) + ".npy"), boirlobj.kernel.lengthscale.values) # kernel length for debugging purposes np.save(os.path.join(boirlobj.configurations.getResultDir(), "best_x" + str(trial) + ".npy"), myProblem.x_opt) # best x value corresponding to the best reward encountered during training if env == constants.FETCH: np.save(os.path.join(boirlobj.configurations.getResultDir(), "train_fids.npy"), np.array(boirlobj.train_fids)) # plot the convergence plot. This also includes euclidean distance between subsequent points myProblem.plot_convergence( os.path.join(boirlobj.configurations.getResultDir(), "convergence_" + str(trial) + ".png")) boirlobj.plotAcquisition(bo=myProblem, trial=trial) # Before training, we specify a set of random values for the reward function parameters and store it as allW. # We can calculate the correlation between the ground truth reward function and allW # We avoid rborlange and vborlange due to the heavy memory requirement if not (env == constants.REALBORLANGE or env == constants.VIRTBORLANGE): kk = boirlobj.kernel.K(X=boirlobj.gtheta, X2=boirlobj.allW) np.save(os.path.join(boirlobj.configurations.getResultDir(), "kk_" + str(trial) + ".npy"), kk) # We can also store the mapping of allW to the latent space. if algo == constants.RHORBF: np.save(os.path.join(boirlobj.configurations.getResultDir(), "proxies_" + str(trial) + ".npy"), boirlobj.kernel._get_proxy(boirlobj.allW, None)[0]) def algosor(algo, env, projections=None): """ MAIN FUNCTION TO CALCULATE THE ESOR AND NLL OF THE GIVEN ALGORITHM IN THE SPECIFIC ENVIRONMENT. """ print("Calculating BOIRL ESOR and NLL") algo_sor = None algo_lik = None boirlobj = IRLObject(algo, env, projections) gt_trajs = boirlobj.fullTrajectories gt_spos = boirlobj.fullStartpos if env == constants.VIRTBORLANGE: randomstart = False n_testtrajs = 5000 if env == constants.FETCH: spos = gt_trajs["observation"].shape[0] else: spos = gt_spos[0:5000] else: randomstart = False if env == constants.FETCH: n_testtrajs = gt_trajs["observation"].shape[0] else: n_testtrajs = gt_trajs.shape[0] spos = gt_spos if algo == constants.BIRL: assert ((env == constants.GRIDWORLD2D) or (env == constants.VIRTBORLANGE) or (env == constants.GRIDWORLD3D)) tr = 0 while True: # Calculate ESOR and NLL for each trial # Load the weights stored during IRL training if not os.path.exists( os.path.join(boirlobj.configurations.getResultDir(), "rewards%d.npy" % tr)): break allW = np.load(os.path.join(boirlobj.configurations.getResultDir(), "rewards%d.npy") % tr) temp_sor_pw = np.zeros(allW.shape[0]) temp_lik_pw = np.zeros(allW.shape[0]) for n in tqdm(np.arange(allW.shape[0]), desc="Trial: " + str(tr)): currentW_mode = allW[n] # Set the reward function to the learned reward function boirlobj.env.set_reward(currentW_mode) # Generate trajectories using the learned reward function traj, _, stochpolicy = boirlobj.env.generate_trajectories(n_trajectories=n_testtrajs, random_start=randomstart, startpos=spos) # Calculate the likelihood of expert demonstration using learned reward function lik_mode = boirlobj.env.get_likelihood_from_policy(gt_trajs, stochpolicy) # Set the reward back to the ground truth reward function boirlobj.env.set_reward(boirlobj.env.gtweights) # Calculate the Expected Sum of Rewards (ESOR) for the new set of trajectories using ground truth reward sor_mode, _ = boirlobj.env.evaluate_expsor(traj) temp_sor_pw[n] = sor_mode temp_lik_pw[n] = lik_mode if algo_sor is None: algo_sor = np.expand_dims(temp_sor_pw, axis=0) algo_lik = np.expand_dims(temp_lik_pw, axis=0) else: ex_length = algo_sor.shape[1] curr_length = temp_sor_pw.shape[0] if ex_length > curr_length: temp_sor_pw = np.append(temp_sor_pw, temp_sor_pw[-1] * np.ones((ex_length - curr_length))) temp_lik_pw = np.append(temp_lik_pw, temp_lik_pw[-1] * np.ones((ex_length - curr_length))) else: algo_sor = np.append(algo_sor, np.expand_dims(algo_sor[:, -1], axis=1) * np.ones( (1, curr_length - ex_length)), axis=1) algo_lik = np.append(algo_lik, np.expand_dims(algo_lik[:, -1], axis=1) * np.ones( (1, curr_length - ex_length)), axis=1) algo_sor = np.append(algo_sor, np.expand_dims(temp_sor_pw, axis=0), axis=0) algo_lik = np.append(algo_lik, np.expand_dims(temp_lik_pw, axis=0), axis=0) tr += 1 # Save ESOR and NLL np.save(os.path.join(boirlobj.configurations.getResultDir(), "sor.npy"), algo_sor) np.save(os.path.join(boirlobj.configurations.getResultDir(), "likelihood.npy"), algo_lik) elif algo == constants.AIRL or algo == constants.GCL: tr = 0 while True: # load weights: weight_fname = os.path.join(boirlobj.configurations.getResultDir(), "weights%d.npy") % tr if not os.path.exists(weight_fname): break allweights = np.load(weight_fname).squeeze() # Gridworld env has some mismatch in the index between saved weight and the one used in the reward function. if env == constants.GRIDWORLD3D: tempshift = np.array(allweights)[:, 0] # shift tempsteep = np.array(allweights)[:, 1] # steep allweights[:, 0] = tempsteep allweights[:, 1] = tempshift nrew = allweights.shape[0] temp_sor = np.zeros(nrew) temp_lik = np.zeros(nrew) for wind in tqdm(np.arange(0, nrew), desc="Trial: %d" % tr): w = allweights[wind] # Set the current reward function in the environment boirlobj.env.set_reward(w) if env == constants.MAZE: boirlobj.env.trial = tr boirlobj.env.algo = algo # Generate trajectories using the optimal policy learned from current reward function traj, _, stochpolicy = boirlobj.env.generate_trajectories(n_trajectories=n_testtrajs, random_start=randomstart, startpos=spos) # Calculate the likelihood of the expert demonstrations under current trajectories if stochpolicy is not None: lik = boirlobj.env.get_likelihood_from_policy(gt_trajs, stochpolicy) else: lik = boirlobj.env.get_likelihood(savefile=False) # Reset the reward function of the environment to the expert's true reward function boirlobj.env.set_reward(boirlobj.gtheta.squeeze()) temp_lik[wind] = lik if not (env == constants.REALBORLANGE): # Calculate the expected sum of rewards sor, _ = boirlobj.env.evaluate_expsor(traj) temp_sor[wind] = sor if algo_sor is None: algo_sor = np.expand_dims(temp_sor, axis=0) algo_lik = np.expand_dims(temp_lik, axis=0) else: algo_sor = np.append(algo_sor, np.expand_dims(temp_sor, axis=0), axis=0) algo_lik = np.append(algo_lik, np.expand_dims(temp_lik, axis=0), axis=0) tr += 1 np.save(os.path.join(boirlobj.configurations.getResultDir(), "sor.npy"), algo_sor) np.save(os.path.join(boirlobj.configurations.getResultDir(), "likelihood.npy"), algo_lik) # Save ESOR and NLL np.save(os.path.join(boirlobj.configurations.getResultDir(), "sor.npy"), algo_sor) np.save(os.path.join(boirlobj.configurations.getResultDir(), "likelihood.npy"), algo_lik) else: # Load all the data from BOIRL training. # Single set of file for all trials # trial_tracks.npy indicate which indices in x_choices.npy and y_choices.npy correspond to which trial trials = np.load(os.path.join(boirlobj.configurations.getResultDir(), "trial_tracks.npy")) X = np.load(os.path.join(boirlobj.configurations.getResultDir(), "x_choices.npy")) Y = np.load(os.path.join(boirlobj.configurations.getResultDir(), "y_choices.npy")) policies = None policy_location = os.path.join(boirlobj.configurations.getResultDir(), "stochpols.npy") if os.path.exists(policy_location): policies = np.load(policy_location) unique_trial, trcnts = np.unique(trials, return_counts=True) unique_trial = np.sort(unique_trial) trcnts = np.amax(trcnts) for tr in tqdm(unique_trial): # Calculate ESOR and NLL for each trial # Find the indices to extract for current trial trialInds = np.where(trials == tr)[0] """ if algo_lik is not None: if not (len(trialInds) == np.shape(algo_lik)[1]): break """ validX = X[trialInds] validY = Y[trialInds] validP = None if policies is not None: validP = policies[trialInds] # NLL can be directly read off from validY as opposed to other algorithms bestY = np.minimum.accumulate(validY) bestY = np.append(bestY, bestY[-1].item() * np.ones(trcnts - len(bestY))) if algo_lik is None: algo_lik = np.expand_dims(bestY, axis=0) else: algo_lik = np.append(algo_lik, np.expand_dims(bestY, axis=0), axis=0) # Calculate ESOR # Not possible to calculate ESOR for Real Borlange as there is no ground truth reward if not (env == constants.REALBORLANGE or env == constants.FETCH): # You only need to evaluate when the best reward function thus far has changed validChangeLoc = bestY[1:] - bestY[0:-1] validChangeLoc = np.append(np.ones(1), validChangeLoc) temp_sor = np.zeros(len(bestY)) for n in range(len(bestY)): if not (validChangeLoc[n] == 0): current_x = validX[n] # Set the reward function to the learned reward function boirlobj.env.set_reward(current_x.squeeze()) if env == constants.MAZE: boirlobj.env.trial = tr boirlobj.env.algo = algo # get current policy current_policy = None if validP is not None: current_policy = validP[n] # Generate trajectories using the learned reward function or learned policy if available if current_policy is None: traj, _, _ = boirlobj.env.generate_trajectories(n_trajectories=n_testtrajs, random_start=randomstart, startpos=spos) else: traj, _, _ = boirlobj.env.generate_trajectories_from_policy(n_trajectories=n_testtrajs, random_start=randomstart, startpos=spos, stoch_policy=current_policy) # traj, _, _ = boirlobj.env.generate_trajectories(n_trajectories=gt_spos.shape[0], # random_start=True) # Set the reward back to the ground truth reward function boirlobj.env.set_reward(boirlobj.gtheta.squeeze()) # Calculate the Expected Sum of Rewards (ESOR) for the new set of trajectories using ground truth reward sor, _ = boirlobj.env.evaluate_expsor(traj) temp_sor[n] = sor if algo_sor is None: algo_sor = np.expand_dims(temp_sor, axis=0) else: algo_sor = np.append(algo_sor, np.expand_dims(temp_sor, axis=0), axis=0) # Save ESOR and NLL np.save(os.path.join(boirlobj.configurations.getResultDir(), "sor.npy"), algo_sor) np.save(os.path.join(boirlobj.configurations.getResultDir(), "likelihood.npy"), algo_lik) def algoplot(algos, env, percent, niter, plotme=True): """ MAIN CODE TO PLOT ESOR and NLL for the given environment. The code also outputs the average number of iterations( and std) required for each algorithm to achieve the specified percentage of Expert's ESOR """ linewd = 3 plt.rc('xtick', labelsize=25) plt.rc('ytick', labelsize=25) fontlabs = {'family': 'serif', 'color': 'black', 'weight': 'normal', 'size': 30, 'verticalalignment': 'center', 'horizontalalignment': 'center' } salpha = 0.9 catchup = [] fig = plt.figure(10, figsize=(24., 13.5), dpi=80) # Ground truth esor and likelihood boirlobj = IRLObject(algos[0], env) gtsor_path = os.path.join(boirlobj.configurations.getResultDir(),"gt_sor.npy") if os.path.exists(gtsor_path): gtsor = np.load(gtsor_path) else: gtsor, _ = boirlobj.env.evaluate_expsor(boirlobj.fullTrajectories) np.save(os.path.join(boirlobj.configurations.getResultDir(), "gt_sor.npy"), gtsor) # input(gtsor) limit = percent * gtsor # For VBORLANAGE and MAZE, the esor is always negative. if env == constants.REALBORLANGE or env == constants.VIRTBORLANGE or env == constants.MAZE: limit = 2 * gtsor - limit # Because [1 + (1-percent)]*gtsor # deb_sor = np.zeros((3,10,21)) algoind = 0 max_length = -1 if not env == constants.REALBORLANGE: for algo in algos: config = Configurations(algo, env) # Plot SOR and calculate the iterations to reach percentage of expert's ESOR sor = np.load(os.path.join(config.getResultDir(), "sor.npy")) algoind += 1 max_length = max(max_length, sor.shape[1]) bval = [] #Find at which iteration we cross the limit (% of ESOR) for b in sor: loc = np.where(b > limit)[0] if len(loc) > 0: bval.append(loc[0]) # input(bval) bmean = np.mean(bval) bstd = np.std(bval) blen = "%d out of %d" % (len(bval), len(sor)) catchup.append([algo, blen, bmean, bstd]) # plot only niter iterations sor = sor[:, 0: niter] sormax = exp_moving_average(sor, alpha=salpha) # np.maximum.accumulate(algosor, axis=1) sor_mean = np.mean(sormax, axis=0) sor_std = np.std(sormax, axis=0) p1 = plt.fill_between(np.arange(len(sor_mean)), sor_mean - sor_std, sor_mean + sor_std, alpha=0.1) plt_color = np.array(p1.get_facecolor()[0]) plt_color[-1] = 1. plt.plot(np.arange(len(sor_mean)), sor_mean, "-", label=constants.LEGENDS[algo], c=plt_color, linewidth=linewd) if sor_mean.shape[0] < niter: diff = niter - sor_mean.shape[0] extra_x = np.arange(sor_mean.shape[0], niter) extra_y = sor_mean[-1] * np.ones((diff)) plt.plot(extra_x, extra_y, "--", linewidth=linewd, c=plt_color) # save ESOR plt.plot(np.arange(niter), gtsor * np.ones(niter), "--", linewidth=linewd / 2, label="Ground Truth") plt.legend(fontsize=20) plt.xlabel("Number of iterations", fontdict=fontlabs, labelpad=30) plt.ylabel("ESOR", fontdict=fontlabs, labelpad=15) if plotme: plt.savefig("ESOR_%s.png" % env, bbox_inches="tight") plt.close("all") fig = plt.figure(11, figsize=(24., 13.5), dpi=80) # Repeat for NLL for algo in algos: config = Configurations(algo, env) lik = np.load(os.path.join(config.getResultDir(), "likelihood.npy"))[:, 0:niter] likmax = exp_moving_average(lik, alpha=salpha) # np.maximum.accumulate(algosor, axis=1) lik_mean = np.mean(likmax, axis=0) lik_std = np.std(likmax, axis=0) p1 = plt.fill_between(np.arange(len(lik_mean)), lik_mean - lik_std, lik_mean + lik_std, alpha=0.1) plt_color = np.array(p1.get_facecolor()[0]) plt_color[-1] = 1. plt.plot(np.arange(len(lik_mean)), lik_mean, "-", label=constants.LEGENDS[algo], c=plt_color, linewidth=linewd) if lik_mean.shape[0] < niter: diff = niter - lik_mean.shape[0] extra_x = np.arange(lik_mean.shape[0], niter) extra_y = lik_mean[-1] * np.ones((diff)) plt.plot(extra_x, extra_y, "--", linewidth=linewd, c=plt_color) plt.legend(fontsize=20) plt.xlabel("Number of iterations", fontdict=fontlabs, labelpad=30) plt.ylabel("NLL", fontdict=fontlabs, labelpad=15) plt.xticks(np.arange(0,max(lik_mean.shape[0],niter),5)) if plotme: plt.savefig("NLL_%s.png" % env, bbox_inches="tight") plt.close("all") # Output the number of iterations required to reach the specified percentage of Expert's ESOR print("Number of iterations required to reach %2.1f%% of Expert's ESOR:%2.1f" % (100 * percent, gtsor)) print(tabulate(catchup, headers=['Algo', 'Success Rate', "Mean no: of iters for successful cases", "Std of no: of iters"])) return catchup
{"hexsha": "bc4da2ef94537d9209255e5e277396633df0e190", "size": 34193, "ext": "py", "lang": "Python", "max_stars_repo_path": "boirlscenarios/boirlmain.py", "max_stars_repo_name": "clear-nus/BOIRL", "max_stars_repo_head_hexsha": "cc872111fda3c7b8118e1a864831013c30f63948", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-26T10:09:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-26T10:09:15.000Z", "max_issues_repo_path": "boirlscenarios/boirlmain.py", "max_issues_repo_name": "clear-nus/BOIRL", "max_issues_repo_head_hexsha": "cc872111fda3c7b8118e1a864831013c30f63948", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "boirlscenarios/boirlmain.py", "max_forks_repo_name": "clear-nus/BOIRL", "max_forks_repo_head_hexsha": "cc872111fda3c7b8118e1a864831013c30f63948", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.958271237, "max_line_length": 165, "alphanum_fraction": 0.572778054, "include": true, "reason": "import numpy", "num_tokens": 7869}
[STATEMENT] lemma locate_locate_pred_unique: assumes "\<And> a. a \<in> set al \<Longrightarrow> (0::nat) < f a" and "locate_pred f al i n_j" shows "n_j = locate f al i" [PROOF STATE] proof (prove) goal (1 subgoal): 1. n_j = locate f al i [PROOF STEP] unfolding locate_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. n_j = (SOME n_j. locate_pred f al i n_j) [PROOF STEP] apply(rule sym, rule some_equality) [PROOF STATE] proof (prove) goal (2 subgoals): 1. locate_pred f al i n_j 2. \<And>n_ja. locate_pred f al i n_ja \<Longrightarrow> n_ja = n_j [PROOF STEP] using assms locate_locate_pred [PROOF STATE] proof (prove) using this: ?a \<in> set al \<Longrightarrow> 0 < f ?a locate_pred f al i n_j \<lbrakk>\<And>a. a \<in> set ?al \<Longrightarrow> 0 < ?f a; ?i < lsum ?f ?al\<rbrakk> \<Longrightarrow> locate_pred ?f ?al ?i (locate ?f ?al ?i) goal (2 subgoals): 1. locate_pred f al i n_j 2. \<And>n_ja. locate_pred f al i n_ja \<Longrightarrow> n_ja = n_j [PROOF STEP] apply force [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<And>n_ja. locate_pred f al i n_ja \<Longrightarrow> n_ja = n_j [PROOF STEP] using assms locate_pred_unique [PROOF STATE] proof (prove) using this: ?a \<in> set al \<Longrightarrow> 0 < f ?a locate_pred f al i n_j \<lbrakk>\<And>a. a \<in> set ?al \<Longrightarrow> 0 < ?f a; locate_pred ?f ?al ?i ?n1_j1.0; locate_pred ?f ?al ?i ?n2_j2.0\<rbrakk> \<Longrightarrow> ?n1_j1.0 = ?n2_j2.0 goal (1 subgoal): 1. \<And>n_ja. locate_pred f al i n_ja \<Longrightarrow> n_ja = n_j [PROOF STEP] by blast
{"llama_tokens": 681, "file": "Probabilistic_Noninterference_Language_Semantics", "length": 6}
from . import colour_functions as cf import matplotlib.pyplot as plt import progressbar from scipy.interpolate import interp2d from pathlib import Path from PIL import Image import time import numpy as np from .backend_functions import backend as bd m = 1. cm = 1e-2 mm = 1e-3 um = 1e-6 nm = 1e-9 class PolychromaticField: def __init__(self, spectrum, extent_x, extent_y, Nx, Ny, spectrum_size = 180, spectrum_divisions = 30): global bd from .backend_functions import backend as bd self.extent_x = extent_x self.extent_y = extent_y self.x = bd.linspace(-extent_x / 2, extent_x / 2, Nx) self.y = bd.linspace(-extent_y / 2, extent_y / 2, Ny) self.xx, self.yy = bd.meshgrid(self.x, self.y) self.Nx = bd.int(Nx) self.Ny = bd.int(Ny) self.E = bd.ones((int(self.Ny), int(self.Nx))) if not(spectrum_size/spectrum_divisions).is_integer(): raise ValueError("spectrum_size/spectrum_divisions must be an integer") if spectrum_size == 400: self.spectrum = bd.array(spectrum) else: #by default spectrum has a size of 400. If new size, we interpolate self.spectrum = bd.array(np.interp(np.linspace(380,779, spectrum_size), np.linspace(380,779, 400), spectrum)) self.spectrum_divisions = spectrum_divisions self.dλ_partition = (780 - 380) / self.spectrum_divisions self.λ_list_samples = bd.arange(380, 780, self.dλ_partition) self.spec_partitions = bd.split(self.spectrum, self.spectrum_divisions) self.cs = cf.ColourSystem(spectrum_size = spectrum_size, spec_divisions = spectrum_divisions, clip_method = 1) self.lens = False self.lens_f = 0. self.z = 0 def add_rectangular_slit(self, x0, y0, width, height): """ Creates a slit centered at the point (x0, y0) with width width and height height """ t = bd.select( [ ((self.xx > (x0 - width / 2)) & (self.xx < (x0 + width / 2))) & ((self.yy > (y0 - height / 2)) & (self.yy < (y0 + height / 2))), True, ], [bd.ones(self.E.shape), bd.zeros(self.E.shape)], ) self.E = self.E*t def add_circular_slit(self, x0, y0, R): """ Creates a circular slit centered at the point (x0,y0) with radius R """ t = bd.select( [(self.xx - x0) ** 2 + (self.yy - y0) ** 2 < R ** 2, bd.full(self.E.shape, True, dtype=bool)], [bd.ones(self.E.shape), bd.zeros(self.E.shape)] ) self.E = self.E*t def add_gaussian_beam(self, w0): """ Creates a Gaussian beam with radius equal to w0 """ r2 = self.xx**2 + self.yy**2 self.E = self.E*bd.exp(-r2/(w0**2)) def add_diffraction_grid(self, D, a, Nx, Ny): """ Creates a diffraction_grid with Nx * Ny slits with separation distance D and width a """ E0 = bd.copy(self.E) t = 0 b = D - a width, height = Nx * a + (Nx - 1) * b, Ny * a + (Ny - 1) * b x0, y0 = -width / 2, height / 2 x0 = -width / 2 + a / 2 for _ in range(Nx): y0 = height / 2 - a / 2 for _ in range(Ny): t += bd.select( [ ((self.xx > (x0 - a / 2)) & (self.xx < (x0 + a / 2))) & ((self.yy > (y0 - a / 2)) & (self.yy < (y0 + a / 2))), True, ], [bd.ones(self.E.shape), bd.zeros(self.E.shape)], ) y0 -= D x0 += D self.E = self.E*t def add_aperture_from_image(self, path, pad=None, Nx=None, Ny=None): """ Load the image specified at "path" as a numpy graymap array. - If Nx and Ny is specified, we interpolate the pattern with interp2d method to the new specified resolution. - If pad is specified, we add zeros (black color) padded to the edges of each axis. """ img = Image.open(Path(path)) img = img.convert("RGB") imgRGB = np.asarray(img) / 255.0 imgR = imgRGB[:, :, 0] imgG = imgRGB[:, :, 1] imgB = imgRGB[:, :, 2] t = 0.2990 * imgR + 0.5870 * imgG + 0.1140 * imgB t = np.flip(t, axis = 0) fun = interp2d( np.linspace(0, 1, t.shape[1]), np.linspace(0, 1, t.shape[0]), t, kind="cubic", ) t = fun(np.linspace(0, 1, self.Nx), np.linspace(0, 1, self.Ny)) # optional: add zeros and interpolate to the new specified resolution if pad != None: if bd != np: self.E = self.E.get() Nxpad = int(np.round(self.Nx / self.extent_x * pad[0])) Nypad = int(np.round(self.Ny / self.extent_y * pad[1])) self.E = np.pad(self.E, ((Nypad, Nypad), (Nxpad, Nxpad)), "constant") t = np.pad(t, ((Nypad, Nypad), (Nxpad, Nxpad)), "constant") self.E = np.array(self.E*t) scale_ratio = self.E.shape[1] / self.E.shape[0] self.Nx = int(np.round(self.E.shape[0] * scale_ratio)) if Nx is None else Nx self.Ny = self.E.shape[0] if Ny is None else Ny self.extent_x += 2 * pad[0] self.extent_y += 2 * pad[1] fun = interp2d( np.linspace(0, 1, self.E.shape[1]), np.linspace(0, 1, self.E.shape[0]), self.E, kind="cubic", ) self.E = bd.array(fun(np.linspace(0, 1, self.Nx), np.linspace(0, 1, self.Ny))) # new grid units self.x = bd.linspace(-self.extent_x / 2, self.extent_x / 2, self.Nx) self.y = bd.linspace(-self.extent_y / 2, self.extent_y / 2, self.Ny) self.xx, self.yy = bd.meshgrid(self.x, self.y) else: self.E = self.E*bd.array(t) def add_lens(self, f): """add a thin lens with a focal length equal to f """ self.lens = True self.lens_f = f def compute_colors_at(self, z): """propagate the field to a distance equal to z and compute the RGB colors of the beam profile profile""" t0 = time.time() self.z = z kx = bd.linspace( -bd.pi * self.Nx // 2 / (self.extent_x / 2), bd.pi * self.Nx // 2 / (self.extent_x / 2), self.Nx, ) ky = bd.linspace( -bd.pi * self.Ny // 2 / (self.extent_y / 2), bd.pi * self.Ny // 2 / (self.extent_y / 2), self.Ny, ) kx, ky = bd.meshgrid(kx, ky) sRGB_linear = bd.zeros((3, self.Nx * self.Ny)) if self.lens == False: fft_c = bd.fft.fft2(self.E) c = bd.fft.fftshift(fft_c) # if not is computed in the loop bar = progressbar.ProgressBar() # We compute the pattern of each wavelength separately, and associate it to small spectrum interval dλ = (780- 380)/spectrum_divisions . We approximately the final colour # by summing the contribution of each small spectrum interval converting its intensity distribution to a RGB space. t0 = time.time() for i in bar(range(self.spectrum_divisions)): if self.lens == True: fft_c = bd.fft.fft2(self.E * bd.exp(-1j*bd.pi/(self.λ_list_samples[i]* nm * self.lens_f) * (self.xx**2 + self.yy**2))) c = bd.fft.fftshift(fft_c) # if not is computed in the loop kz = bd.sqrt( (2 * bd.pi / (self.λ_list_samples[i] * nm)) ** 2 - kx ** 2 - ky ** 2 ) E_λ = bd.fft.ifft2(bd.fft.ifftshift(c * bd.exp(1j * kz * z))) Iλ = bd.real(E_λ * bd.conjugate(E_λ)) XYZ = self.cs.spec_partition_to_XYZ(bd.outer(Iλ, self.spec_partitions[i]),i) sRGB_linear += self.cs.XYZ_to_sRGB_linear(XYZ) if bd != np: bd.cuda.Stream.null.synchronize() rgb = self.cs.sRGB_linear_to_sRGB(sRGB_linear) rgb = (rgb.T).reshape((self.Ny, self.Nx, 3)) print ("Computation Took", time.time() - t0) return rgb def plot(self, rgb, figsize=(6, 6), xlim=None, ylim=None): """visualize the diffraction pattern with matplotlib""" plt.style.use("dark_background") if bd != np: rgb = rgb.get() fig = plt.figure(figsize=figsize) ax = fig.add_subplot(1, 1, 1) if xlim != None: ax.set_xlim(xlim) if ylim != None: ax.set_ylim(ylim) ax.set_xlabel("[mm]") ax.set_ylabel("[mm]") ax.set_title("Screen distance = " + str(self.z * 100) + " cm") ax.set_aspect("equal") im = ax.imshow( (rgb), extent=[ -self.extent_x / 2 / mm, self.extent_x / 2 / mm, -self.extent_y / 2 / mm, self.extent_y / 2 / mm, ], interpolation="spline36", origin = "lower" ) plt.show() def propagate(self, z, spectrum_divisions=40, grid_divisions=10): raise NotImplementedError(self.__class__.__name__ + '.propagate') def get_colors(self): raise NotImplementedError(self.__class__.__name__ + '.get_colors') def add_spatial_noise(self, noise_radius, f_mean, f_size, N = 30, A = 1): raise NotImplementedError(self.__class__.__name__ + '.add_spatial_noise')
{"hexsha": "b2eebe4434b8f18373d9f8eec5f620387f7526d8", "size": 9615, "ext": "py", "lang": "Python", "max_stars_repo_path": "diffractsim/polychromatic_simulator.py", "max_stars_repo_name": "villadsegede/Diffraction-Simulations--Angular-Spectrum-Method", "max_stars_repo_head_hexsha": "35a875711bba8f00ab060cea211aeb3f36c3d8bc", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 142, "max_stars_repo_stars_event_min_datetime": "2020-12-30T07:05:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-24T14:42:18.000Z", "max_issues_repo_path": "diffractsim/polychromatic_simulator.py", "max_issues_repo_name": "villadsegede/Diffraction-Simulations--Angular-Spectrum-Method", "max_issues_repo_head_hexsha": "35a875711bba8f00ab060cea211aeb3f36c3d8bc", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-01-04T17:00:15.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-28T11:27:48.000Z", "max_forks_repo_path": "diffractsim/polychromatic_simulator.py", "max_forks_repo_name": "villadsegede/Diffraction-Simulations--Angular-Spectrum-Method", "max_forks_repo_head_hexsha": "35a875711bba8f00ab060cea211aeb3f36c3d8bc", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2020-12-31T05:59:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-23T06:03:59.000Z", "avg_line_length": 32.7040816327, "max_line_length": 178, "alphanum_fraction": 0.5316692668, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2625}
using Test using FixedEffects using StatsBase using PooledArrays, CategoricalArrays import Base: == ==(x::FixedEffect{R,I}, y::FixedEffect{R,I}) where {R,I} = x.refs == y.refs && x.interaction == y.interaction && x.n == y.n @testset "FixedEffect" begin fe1 = FixedEffect(1:10) @test sprint(show, fe1) == "Fixed Effects" if VERSION <= v"1.5" @test sprint(show, MIME("text/plain"), fe1) == """ Fixed Effects: refs (10-element Array{Int64,1}): [1, 2, 3, 4, 5, ... ] interaction (UnitWeights): none""" else @test sprint(show, MIME("text/plain"), fe1) == """ Fixed Effects: refs (10-element Vector{Int64}): [1, 2, 3, 4, 5, ... ] interaction (UnitWeights): none""" end fe2 = FixedEffect(1:10, interaction=fill(1.23456789, 10)) if VERSION <= v"1.5" @test sprint(show, MIME("text/plain"), fe2) == """ Fixed Effects: refs (10-element Array{Int64,1}): [1, 2, 3, 4, 5, ... ] interaction (10-element Array{Float64,1}): [1.23457, 1.23457, 1.23457, 1.23457, 1.23457, ... ]""" else @test sprint(show, MIME("text/plain"), fe2) == """ Fixed Effects: refs (10-element Vector{Int64}): [1, 2, 3, 4, 5, ... ] interaction (10-element Vector{Float64}): [1.23457, 1.23457, 1.23457, 1.23457, 1.23457, ... ]""" end @test_throws DimensionMismatch FixedEffect(1:10, interaction=fill(1, 5)) @test size(fe1) == (10,) @test length(fe1) == 10 @test eltype(fe1) == Int @test eltype(fe2) == Float64 @test fe1[:] === fe1 subfe1 = fe1[[1,2]] @test subfe1.refs == fe1.refs[1:2] @test subfe1.interaction == uweights(2) @test subfe1 == fe1[1:2] @test subfe1 == fe1[fe1.refs.<=2] @test_throws MethodError fe1[[1 2]] subfe2 = fe2[[1,2]] @test subfe2.refs == fe2.refs[1:2] @test subfe2.interaction == fe2.interaction[1:2] @test subfe2 == fe2[1:2] @test subfe2 == fe2[fe2.refs.<=2] @test_throws MethodError fe2[[1 2]] end
{"hexsha": "666cbac2cf290ecc2ea7e3e31ddee7f14966c874", "size": 2199, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/types.jl", "max_stars_repo_name": "FixedEffects/FixedEffects.jl", "max_stars_repo_head_hexsha": "6a5874c8cccb4c766977c33492ff9f271b97cef6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2019-12-31T09:41:28.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-05T07:49:31.000Z", "max_issues_repo_path": "test/types.jl", "max_issues_repo_name": "matthieugomez/FixedEffects", "max_issues_repo_head_hexsha": "4f7cdc2b9ebcdafa09be6d694b3c2f146b97b34c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2018-12-10T15:07:13.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-09T06:52:25.000Z", "max_forks_repo_path": "test/types.jl", "max_forks_repo_name": "matthieugomez/FixedEffects", "max_forks_repo_head_hexsha": "4f7cdc2b9ebcdafa09be6d694b3c2f146b97b34c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2019-12-28T17:35:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-31T20:18:04.000Z", "avg_line_length": 31.8695652174, "max_line_length": 76, "alphanum_fraction": 0.5302410186, "num_tokens": 726}
[STATEMENT] lemma mod_int_wlog [consumes 1, case_names modulo]: fixes P :: "int \<Rightarrow> bool" assumes "b > 0" assumes "\<And>k. 0 \<le> k \<Longrightarrow> k < b \<Longrightarrow> n mod b = k \<Longrightarrow> P n" shows "P n" [PROOF STATE] proof (prove) goal (1 subgoal): 1. P n [PROOF STEP] using \<open>b > 0\<close> assms(2) [of \<open>n mod b\<close>] [PROOF STATE] proof (prove) using this: 0 < b \<lbrakk>0 \<le> n mod b; n mod b < b; n mod b = n mod b\<rbrakk> \<Longrightarrow> P n goal (1 subgoal): 1. P n [PROOF STEP] by simp
{"llama_tokens": 233, "file": "Probabilistic_Prime_Tests_Jacobi_Symbol", "length": 2}
from __future__ import annotations __copyright__ = "Copyright (C) 2021 Kaushik Kulkarni" __license__ = """ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import numpy as np import islpy as isl import pymbolic.primitives as prim from typing import (Tuple, List, Union, Callable, Any, Sequence, Dict, Optional, Iterable, TypeVar) from pytato.array import (Array, ShapeType, IndexLambda, SizeParam, ShapeComponent, DtypeOrScalar, ArrayOrScalar, BasicIndex, AdvancedIndexInContiguousAxes, AdvancedIndexInNoncontiguousAxes, ConvertibleToIndexExpr, IndexExpr, NormalizedSlice) from pytato.scalar_expr import (ScalarExpression, IntegralScalarExpression, SCALAR_CLASSES) from pytools import UniqueNameGenerator from pytato.transform import Mapper __doc__ = """ Helper routines --------------- .. autofunction:: are_shape_components_equal .. autofunction:: are_shapes_equal .. autofunction:: get_shape_after_broadcasting .. autofunction:: dim_to_index_lambda_components """ # {{{ partition Tpart = TypeVar("Tpart") def partition(pred: Callable[[Tpart], bool], iterable: Iterable[Tpart]) -> Tuple[List[Tpart], List[Tpart]]: """ Use a predicate to partition entries into false entries and true entries """ # Inspired from https://docs.python.org/3/library/itertools.html # partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9 from itertools import tee, filterfalse t1, t2 = tee(iterable) return list(filterfalse(pred, t1)), list(filter(pred, t2)) # }}} def get_shape_after_broadcasting( exprs: Sequence[Union[Array, ScalarExpression]]) -> ShapeType: """ Returns the shape after broadcasting *exprs* in an operation. """ from pytato.diagnostic import CannotBroadcastError shapes = [expr.shape if isinstance(expr, Array) else () for expr in exprs] result_dim = max((len(s) for s in shapes), default=0) # append leading dimensions of all the shapes with 1's to match result_dim. augmented_shapes = [((1,)*(result_dim-len(s)) + s) for s in shapes] def _get_result_axis_length(axis_lengths: List[IntegralScalarExpression] ) -> IntegralScalarExpression: result_axis_len = axis_lengths[0] for axis_len in axis_lengths[1:]: if are_shape_components_equal(axis_len, result_axis_len): pass elif are_shape_components_equal(axis_len, 1): pass elif are_shape_components_equal(result_axis_len, 1): result_axis_len = axis_len else: raise CannotBroadcastError("operands could not be broadcasted " "together with shapes " f"{' '.join(str(s) for s in shapes)}.") return result_axis_len return tuple(_get_result_axis_length([s[i] for s in augmented_shapes]) for i in range(result_dim)) def get_indexing_expression(shape: ShapeType, result_shape: ShapeType) -> Tuple[ScalarExpression, ...]: """ Returns the indices while broadcasting an array of shape *shape* into one of shape *result_shape*. """ assert len(shape) <= len(result_shape) i_start = len(result_shape) - len(shape) indices = [] for i, (dim1, dim2) in enumerate(zip(shape, result_shape[i_start:])): if not are_shape_components_equal(dim1, dim2): assert are_shape_components_equal(dim1, 1) indices.append(0) else: indices.append(prim.Variable(f"_{i+i_start}")) return tuple(indices) def with_indices_for_broadcasted_shape(val: prim.Variable, shape: ShapeType, result_shape: ShapeType) -> prim.Expression: if len(shape) == 0: # scalar expr => do not index return val else: return val[get_indexing_expression(shape, result_shape)] def extract_dtypes_or_scalars( exprs: Sequence[ArrayOrScalar]) -> List[DtypeOrScalar]: dtypes: List[DtypeOrScalar] = [] for expr in exprs: if isinstance(expr, Array): dtypes.append(expr.dtype) else: assert isinstance(expr, SCALAR_CLASSES) dtypes.append(expr) return dtypes def update_bindings_and_get_broadcasted_expr(arr: ArrayOrScalar, bnd_name: str, bindings: Dict[str, Array], result_shape: ShapeType ) -> ScalarExpression: """ Returns an instance of :class:`~pytato.scalar_expr.ScalarExpression` to address *arr* in a :class:`pytato.array.IndexLambda` of shape *result_shape*. """ if isinstance(arr, SCALAR_CLASSES): return arr assert isinstance(arr, Array) bindings[bnd_name] = arr return with_indices_for_broadcasted_shape(prim.Variable(bnd_name), arr.shape, result_shape) def broadcast_binary_op(a1: ArrayOrScalar, a2: ArrayOrScalar, op: Callable[[ScalarExpression, ScalarExpression], ScalarExpression], # noqa:E501 get_result_type: Callable[[DtypeOrScalar, DtypeOrScalar], np.dtype[Any]], # noqa:E501 ) -> ArrayOrScalar: if np.isscalar(a1) and np.isscalar(a2): from pytato.scalar_expr import evaluate return evaluate(op(a1, a2)) # type: ignore result_shape = get_shape_after_broadcasting([a1, a2]) dtypes = extract_dtypes_or_scalars([a1, a2]) result_dtype = get_result_type(*dtypes) bindings: Dict[str, Array] = {} expr1 = update_bindings_and_get_broadcasted_expr(a1, "_in0", bindings, result_shape) expr2 = update_bindings_and_get_broadcasted_expr(a2, "_in1", bindings, result_shape) return IndexLambda(op(expr1, expr2), shape=result_shape, dtype=result_dtype, bindings=bindings) # {{{ dim_to_index_lambda_components class ShapeExpressionMapper(Mapper): """ Mapper that takes a shape component and returns it as a scalar expression. """ def __init__(self, var_name_gen: UniqueNameGenerator): self.cache: Dict[Array, ScalarExpression] = {} self.var_name_gen = var_name_gen self.bindings: Dict[str, SizeParam] = {} def rec(self, expr: Array) -> ScalarExpression: # type: ignore if expr in self.cache: return self.cache[expr] result: Array = super().rec(expr) self.cache[expr] = result return result def map_index_lambda(self, expr: IndexLambda) -> ScalarExpression: from pytato.scalar_expr import substitute return substitute(expr.expr, {name: self.rec(val) for name, val in expr.bindings.items()}) def map_size_param(self, expr: SizeParam) -> ScalarExpression: name = self.var_name_gen("_in") self.bindings[name] = expr return prim.Variable(name) def dim_to_index_lambda_components(expr: ShapeComponent, vng: Optional[UniqueNameGenerator] = None, ) -> Tuple[ScalarExpression, Dict[str, SizeParam]]: """ Returns the scalar expressions and bindings to use the shape component within an index lambda. .. testsetup:: >>> import pytato as pt >>> from pytato.utils import dim_to_index_lambda_components >>> from pytools import UniqueNameGenerator .. doctest:: >>> n = pt.make_size_param("n") >>> expr, bnds = dim_to_index_lambda_components(3*n+8, UniqueNameGenerator()) >>> print(expr) 3*_in + 8 >>> bnds # doctest: +ELLIPSIS {'_in': <pytato.array.SizeParam ...>} """ if isinstance(expr, int): return expr, {} if vng is None: vng = UniqueNameGenerator() assert isinstance(vng, UniqueNameGenerator) assert isinstance(expr, Array) mapper = ShapeExpressionMapper(vng) result = mapper(expr) return result, mapper.bindings # }}} def are_shape_components_equal(dim1: ShapeComponent, dim2: ShapeComponent) -> bool: """ Returns *True* iff *dim1* and *dim2* are have equal :class:`~pytato.array.SizeParam` coefficients in their expressions. """ from pytato.scalar_expr import substitute, distribute def to_expr(dim: ShapeComponent) -> ScalarExpression: expr, bnds = dim_to_index_lambda_components(dim, UniqueNameGenerator()) return substitute(expr, {name: prim.Variable(bnd.name) for name, bnd in bnds.items()}) dim1_expr = to_expr(dim1) dim2_expr = to_expr(dim2) # ScalarExpression.__eq__ returns Any return (distribute(dim1_expr-dim2_expr) == 0) # type: ignore def are_shapes_equal(shape1: ShapeType, shape2: ShapeType) -> bool: """ Returns *True* iff *shape1* and *shape2* have the same dimensionality and the correpsonding components are equal as defined by :func:`~pytato.utils.are_shape_components_equal`. """ return ((len(shape1) == len(shape2)) and all(are_shape_components_equal(dim1, dim2) for dim1, dim2 in zip(shape1, shape2))) # {{{ ShapeToISLExpressionMapper class ShapeToISLExpressionMapper(Mapper): """ Mapper that takes a shape component and returns it as :class:`isl.Aff`. """ def __init__(self, space: isl.Space): self.cache: Dict[Array, isl.Aff] = {} self.space = space # type-ignore reason: incompatible return type with super class def rec(self, expr: Array) -> isl.Aff: # type: ignore[override] if expr in self.cache: return self.cache[expr] result: Array = super().rec(expr) self.cache[expr] = result return result def map_index_lambda(self, expr: IndexLambda) -> isl.Aff: from pytato.scalar_expr import evaluate return evaluate(expr.expr, {name: self.rec(val) for name, val in expr.bindings.items()}) def map_size_param(self, expr: SizeParam) -> isl.Aff: dt, pos = self.space.get_var_dict()[expr.name] return isl.Aff.var_on_domain(self.space, dt, pos) # }}} def _create_size_param_space(names: Iterable[str]) -> isl.Space: return isl.Space.create_from_names(isl.DEFAULT_CONTEXT, set=[], params=sorted(names)).params() def _get_size_params_assumptions_bset(space: isl.Space) -> isl.BasicSet: bset = isl.BasicSet.universe(space) for name in bset.get_var_dict(): bset = bset.add_constraint(isl.Constraint.ineq_from_names(space, {name: 1})) return bset def _is_non_negative(expr: ShapeComponent) -> bool: """ Returns *True* iff it can be proven that ``expr >= 0``. """ if isinstance(expr, int): return expr >= 0 assert isinstance(expr, Array) and expr.shape == () from pytato.transform import InputGatherer # type-ignore reason: passed Set[Optional[str]]; function expects Set[str] space = _create_size_param_space({expr.name # type: ignore for expr in InputGatherer()(expr)}) aff = ShapeToISLExpressionMapper(space)(expr) # type-ignore reason: mypy doesn't know comparing isl.Sets returns bool return (aff.ge_set(aff * 0) # type: ignore[no-any-return] <= _get_size_params_assumptions_bset(space)) def _is_non_positive(expr: ShapeComponent) -> bool: """ Returns *True* iff it can be proven that ``expr <= 0``. """ return _is_non_negative(-expr) # {{{ _index_into # {{{ normalized slice def _normalize_slice(slice_: slice, axis_len: ShapeComponent) -> NormalizedSlice: start, stop, step = slice_.start, slice_.stop, slice_.step if step is None: step = 1 if not isinstance(step, int): raise ValueError(f"slice step must be an int or 'None' (got a {type(step)})") if step == 0: raise ValueError("slice step cannot be zero") if step > 0: default_start: ShapeComponent = 0 default_stop: ShapeComponent = axis_len else: default_start = axis_len - 1 default_stop = -1 if start is None: start = default_start else: if isinstance(axis_len, int): if -axis_len <= start < axis_len: start = start % axis_len elif start >= axis_len: if step > 0: start = axis_len else: start = axis_len - 1 else: if step > 0: start = 0 else: start = -1 else: raise NotImplementedError if stop is None: stop = default_stop else: if isinstance(axis_len, int): if -axis_len <= stop < axis_len: stop = stop % axis_len elif stop >= axis_len: if step > 0: stop = axis_len else: stop = axis_len - 1 else: if step > 0: stop = 0 else: stop = -1 else: raise NotImplementedError return NormalizedSlice(start, stop, step) def _normalized_slice_len(slice_: NormalizedSlice) -> ShapeComponent: start, stop, step = slice_.start, slice_.stop, slice_.step if step > 0: if _is_non_negative(stop - start): return (stop - start + step - 1) // step elif _is_non_positive(stop - start): return 0 else: # ISL could not ascertain the expression's sign raise NotImplementedError("could not ascertain the sign of " f"{stop-start} while computing the axis" " length.") else: if _is_non_negative(start - stop): return (start - stop - step - 1) // (-step) elif _is_non_positive(start - stop): return 0 else: # ISL could not ascertain the expression's sign raise NotImplementedError("could not ascertain the sign of " f"{start-stop} while computing the axis" " length.") # }}} def _index_into(ary: Array, indices: Tuple[ConvertibleToIndexExpr, ...]) -> Array: from pytato.diagnostic import CannotBroadcastError # {{{ handle ellipsis if indices.count(...) > 1: raise IndexError("an index can only have a single ellipsis ('...')") if indices.count(...): ellipsis_pos = indices.index(...) indices = (indices[:ellipsis_pos] + (slice(None, None, None),) * (ary.ndim - len(indices) + 1) + indices[ellipsis_pos+1:]) # }}} # {{{ "pad" index with complete slices to match ary's ndim if len(indices) < ary.ndim: indices = indices + (slice(None, None, None),) * (ary.ndim - len(indices)) # }}} if len(indices) != ary.ndim: raise IndexError(f"Too many indices (expected {ary.ndim}" f", got {len(indices)})") if any(idx is None for idx in indices): raise NotImplementedError("newaxis is not supported") # {{{ validate broadcastability of the array indices try: get_shape_after_broadcasting([idx for idx in indices if isinstance(idx, Array)]) except CannotBroadcastError as e: raise IndexError(str(e)) # }}} # {{{ validate index for i, idx in enumerate(indices): if isinstance(idx, slice): pass elif isinstance(idx, int): if not (_is_non_negative(idx + ary.shape[i]) and _is_non_negative(ary.shape[i] - 1 - idx)): raise IndexError(f"{idx} is out of bounds for axis {i}") elif isinstance(idx, Array): if idx.dtype.kind != "i": raise IndexError("only integer arrays are valid array indices") else: raise IndexError("only integers, slices, ellipsis and integer arrays" " are valid indices") # }}} # {{{ normalize slices normalized_indices: List[IndexExpr] = [_normalize_slice(idx, axis_len) if isinstance(idx, slice) else idx for idx, axis_len in zip(indices, ary.shape)] del indices # }}} if any(isinstance(idx, Array) for idx in normalized_indices): # advanced indexing expression i_adv_indices, i_basic_indices = partition( lambda idx: isinstance( normalized_indices[idx], NormalizedSlice), range(len(normalized_indices))) if any(i_adv_indices[0] < i_basic_idx < i_adv_indices[-1] for i_basic_idx in i_basic_indices): # non contiguous advanced indices return AdvancedIndexInNoncontiguousAxes(ary, tuple(normalized_indices)) else: return AdvancedIndexInContiguousAxes(ary, tuple(normalized_indices)) else: # basic indexing expression return BasicIndex(ary, tuple(normalized_indices)) # }}}
{"hexsha": "5df0a13c9d355c673ce926689b65abd3c6b9d015", "size": 19298, "ext": "py", "lang": "Python", "max_stars_repo_path": "pytato/utils.py", "max_stars_repo_name": "alexfikl/pytato", "max_stars_repo_head_hexsha": "6ece6bc8bc35b22fe000a60ded74e8845883d30b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pytato/utils.py", "max_issues_repo_name": "alexfikl/pytato", "max_issues_repo_head_hexsha": "6ece6bc8bc35b22fe000a60ded74e8845883d30b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-04-01T19:31:49.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-01T19:31:49.000Z", "max_forks_repo_path": "pytato/utils.py", "max_forks_repo_name": "kaushikcfd/pytato", "max_forks_repo_head_hexsha": "0bf3fdfc35aec5911ca8aabd394c1d7207562edb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8033395176, "max_line_length": 110, "alphanum_fraction": 0.5897502332, "include": true, "reason": "import numpy", "num_tokens": 4135}
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. ''' Imports ''' import numpy as np import tensorflow as tf import os import argparse from Var import Var class DataLoader: def __init__(self, num_frames, use_arm, m_score): self.num_frames = num_frames self.use_arm = use_arm self.m_score = m_score self.debug = False self.working_dir = os.getcwd() + "/" self.v = Var(use_arm) self.classes = self.v.get_classes() self.num_classes = self.v.get_num_classes() self.num_features = self.v.get_num_features() self.num_joints = self.v.get_size() def setDebug(self): self.debug = True def npz_to_npy(self, fn, label_fn): '''Change input npz file to numpy arrays''' data = fn['data'].item() new_data = {} score_idx = self.num_features - 1 score = data[score_idx] det = fn['isBadData'] new_labels = np.zeros((0, self.num_classes)) num_data = data[0].shape[0] data_size = data[0].shape[1] min_data = num_data * 100 #really big number for feature_num in range(self.num_features): min_data = data[feature_num].shape[0] if data[feature_num].shape[0] < min_data else min_data new_data[feature_num] = np.zeros((0, data_size)) num_data = min_data ''' Read label ''' f = open(label_fn, 'r') s = f.read() ''' Check and set one hot encoded value ''' label = np.zeros(self.num_classes) for key, val in self.classes.items(): if s == val: label[key] = 1 ''' Stack as many inputs as needed in data ''' labels = np.stack(label for i in range(num_data)) for idx in range(num_data): isBad = det[idx] if not isBad: for feature_num in range(self.num_features): if self.m_score: if feature_num != score_idx: multiplied = data[feature_num][idx].reshape(1, self.num_joints) * score[idx].reshape(1, self.num_joints) new_data[feature_num] = np.concatenate((new_data[feature_num], multiplied)) else: new_data[feature_num] = np.concatenate( (new_data[feature_num], data[feature_num][idx].reshape(1, self.num_joints))) new_labels = np.concatenate( (new_labels, labels[idx].reshape(1, self.num_classes))) if (self.debug): print("SHAPE OF INPUTS: ", new_data[0].shape) ''' Returns array with inputs and data quality of inputs ''' return new_data, new_labels def load_npz_data(self): ''' Uses npzToNpy to take all the npz files in a data folder and generate numpy arrays of inputs and good/bad data samples ''' score_idx = self.num_features - 1 ''' Set filepaths for data/label folders ''' data_path = self.working_dir + "data/GestureData/%d/gestureData" % self.num_frames label_path = self.working_dir + 'data/Labels/%d/label' % self.num_frames print list(os.walk(self.working_dir+'data/Labels/%d' % self.num_frames)) try: data_amount = len( next(os.walk(self.working_dir+'data/Labels/%d' % self.num_frames))[2]) except: raise Exception("your data cannot be found in %s. If you have data in this folder, the next function (for iterators) is not working properly." % data_path) info = {} labels = np.zeros((0, self.num_classes)) for feature_num in range(self.num_features): if self.m_score and feature_num == score_idx: break else: info[feature_num] = np.zeros((0, self.num_joints)) for i in range(data_amount): datum, label = self.npz_to_npy(np.load(data_path+str(i+1)+'.npz'), label_path+str(i+1)+'.txt') for feature_num in range(self.num_features): if self.m_score and feature_num == score_idx: break else: info[feature_num] = np.vstack((info[feature_num], datum[feature_num])) labels = np.vstack((labels, label)) if (self.debug): print("FULL INPUT SHAPE: ", labels.shape) return info, labels def load_all(self): ''' Loads full npz file and properly converts and prunes it for training.''' ''' Load all input data from the files ''' data, out = self.load_npz_data() values = np.array(data.values()) for idx, val in enumerate(values): if idx == 0: combined = val else: combined = np.concatenate((combined, val), axis=1) p = np.random.permutation(combined.shape[0]) combined = combined[p] out = out[p] if(self.debug): print("IN SIZE: ", combined.shape) print("OUT SIZE: ", out.shape) return combined, out
{"hexsha": "3028cb127612a4daae4d235a7ae9e8a53974286d", "size": 5111, "ext": "py", "lang": "Python", "max_stars_repo_path": "DataLoader.py", "max_stars_repo_name": "bhardwajRahul/Gesture-Recognition", "max_stars_repo_head_hexsha": "e4722ecb58e5c65f34f92a21058eae81ed3c84e0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 102, "max_stars_repo_stars_event_min_datetime": "2018-11-03T03:39:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T03:11:54.000Z", "max_issues_repo_path": "DataLoader.py", "max_issues_repo_name": "shuren007/Gesture-Recognition", "max_issues_repo_head_hexsha": "e4722ecb58e5c65f34f92a21058eae81ed3c84e0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-06-14T14:03:49.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-20T12:14:27.000Z", "max_forks_repo_path": "DataLoader.py", "max_forks_repo_name": "shuren007/Gesture-Recognition", "max_forks_repo_head_hexsha": "e4722ecb58e5c65f34f92a21058eae81ed3c84e0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 34, "max_forks_repo_forks_event_min_datetime": "2018-12-13T23:20:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-09T16:37:34.000Z", "avg_line_length": 38.7196969697, "max_line_length": 167, "alphanum_fraction": 0.5738603013, "include": true, "reason": "import numpy", "num_tokens": 1168}
import cv2 as cv import numpy as np img = cv.imread('sudoku.png') gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) edges = cv.Canny(gray, 50, 150, apertureSize=3) lines = cv.HoughLines(edges, 1, np.pi/180, 200) for line in lines : rho, theta = line[0] a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * (-b)) y1 = int(y0 + 1000 * (a)) x2 = int(x0 - 1000 * (-b)) y2 = int(y0 - 1000 * (a)) cv.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2) cv.imshow('houghLines', img) cv.imshow('canny edges', edges) cv.waitKey(0) cv.destroyAllWindows()
{"hexsha": "06a0224f7c681e662cd69929a13bae578e3b8a69", "size": 606, "ext": "py", "lang": "Python", "max_stars_repo_path": "55_hough_line_basic.py", "max_stars_repo_name": "EnesMercan/Computer-Vision-Python", "max_stars_repo_head_hexsha": "21ba2f5125b56c17fa72e8b032cbe943af1f36fb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-01-01T14:45:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-03T23:13:30.000Z", "max_issues_repo_path": "55_hough_line_basic.py", "max_issues_repo_name": "EnesMercan/Computer-Vision-Python", "max_issues_repo_head_hexsha": "21ba2f5125b56c17fa72e8b032cbe943af1f36fb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "55_hough_line_basic.py", "max_forks_repo_name": "EnesMercan/Computer-Vision-Python", "max_forks_repo_head_hexsha": "21ba2f5125b56c17fa72e8b032cbe943af1f36fb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.4444444444, "max_line_length": 52, "alphanum_fraction": 0.5825082508, "include": true, "reason": "import numpy", "num_tokens": 230}
[STATEMENT] theorem (in itrace_top) alpern_schneider: assumes notempty: "A \<noteq> {}" and Psub: "P \<subseteq> A\<^sup>\<omega>" shows "\<exists> S L. infsafety A S \<and> infliveness A L \<and> P = S \<inter> L" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<exists>S L. infsafety A S \<and> infliveness A L \<and> P = S \<inter> L [PROOF STEP] proof- [PROOF STATE] proof (state) goal (1 subgoal): 1. \<exists>S L. infsafety A S \<and> infliveness A L \<and> P = S \<inter> L [PROOF STEP] from Psub [PROOF STATE] proof (chain) picking this: P \<subseteq> A\<^sup>\<omega> [PROOF STEP] have "P \<subseteq> carrier" [PROOF STATE] proof (prove) using this: P \<subseteq> A\<^sup>\<omega> goal (1 subgoal): 1. P \<subseteq> carrier [PROOF STEP] by (simp add: itop_carrier [THEN sym]) [PROOF STATE] proof (state) this: P \<subseteq> carrier goal (1 subgoal): 1. \<exists>S L. infsafety A S \<and> infliveness A L \<and> P = S \<inter> L [PROOF STEP] then [PROOF STATE] proof (chain) picking this: P \<subseteq> carrier [PROOF STEP] obtain L S where Lsub: "L \<subseteq> carrier" and Ssub: "S \<subseteq> carrier" and Sclosed: "S closed" and Ldense: "L dense" and Pinter: "P = S \<inter> L" [PROOF STATE] proof (prove) using this: P \<subseteq> carrier goal (1 subgoal): 1. (\<And>L S. \<lbrakk>L \<subseteq> carrier; S \<subseteq> carrier; S closed; L dense; P = S \<inter> L\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] by (rule topology.ex_dense_closure_interE [OF topology]) auto [PROOF STATE] proof (state) this: L \<subseteq> carrier S \<subseteq> carrier S closed L dense P = S \<inter> L goal (1 subgoal): 1. \<exists>S L. infsafety A S \<and> infliveness A L \<and> P = S \<inter> L [PROOF STEP] from Ssub Sclosed [PROOF STATE] proof (chain) picking this: S \<subseteq> carrier S closed [PROOF STEP] have "infsafety A S" [PROOF STATE] proof (prove) using this: S \<subseteq> carrier S closed goal (1 subgoal): 1. infsafety A S [PROOF STEP] by (simp add: infsafety_closed_iff itop_carrier) [PROOF STATE] proof (state) this: infsafety A S goal (1 subgoal): 1. \<exists>S L. infsafety A S \<and> infliveness A L \<and> P = S \<inter> L [PROOF STEP] moreover [PROOF STATE] proof (state) this: infsafety A S goal (1 subgoal): 1. \<exists>S L. infsafety A S \<and> infliveness A L \<and> P = S \<inter> L [PROOF STEP] from notempty Lsub Ldense [PROOF STATE] proof (chain) picking this: A \<noteq> {} L \<subseteq> carrier L dense [PROOF STEP] have "infliveness A L" [PROOF STATE] proof (prove) using this: A \<noteq> {} L \<subseteq> carrier L dense goal (1 subgoal): 1. infliveness A L [PROOF STEP] by (simp add: infliveness_dense_iff itop_carrier) [PROOF STATE] proof (state) this: infliveness A L goal (1 subgoal): 1. \<exists>S L. infsafety A S \<and> infliveness A L \<and> P = S \<inter> L [PROOF STEP] ultimately [PROOF STATE] proof (chain) picking this: infsafety A S infliveness A L [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: infsafety A S infliveness A L goal (1 subgoal): 1. \<exists>S L. infsafety A S \<and> infliveness A L \<and> P = S \<inter> L [PROOF STEP] using Pinter [PROOF STATE] proof (prove) using this: infsafety A S infliveness A L P = S \<inter> L goal (1 subgoal): 1. \<exists>S L. infsafety A S \<and> infliveness A L \<and> P = S \<inter> L [PROOF STEP] by auto [PROOF STATE] proof (state) this: \<exists>S L. infsafety A S \<and> infliveness A L \<and> P = S \<inter> L goal: No subgoals! [PROOF STEP] qed
{"llama_tokens": 1470, "file": "Topology_LList_Topology", "length": 19}
\documentclass{beamer} \usepackage[utf8]{inputenc} \usepackage[T1]{fontenc} % \usepackage{amscd, amsfonts, amsmath, amssymb, amstext, amsthm, caption, epsfig, fancyhdr, float, graphicx, latexsym, mathtools, multicol, multirow, algorithm, chngcntr} \usepackage[english]{babel} \usepackage{booktabs} \usepackage{amsmath,amssymb} \usepackage{graphicx} \usepackage{caption} \usepackage{subfig} \usepackage{xspace} \usepackage{fourier} \usepackage{tikz} \usetikzlibrary{shapes,arrows} \usepackage{tkz-graph} \usetikzlibrary{automata,arrows,positioning,calc} \usetikzlibrary{positioning} \usetikzlibrary{fit} \usetikzlibrary{backgrounds} \usetikzlibrary{calc} \usetikzlibrary{shapes} \usetikzlibrary{mindmap} \usetikzlibrary{decorations.text} \usetikzlibrary{snakes} % \theoremstyle{definition} % insert bellow all blocks you want in normal text % \newtheorem{definition}{Definition} % tikzmark command, for shading over items \newcommand{\tikzmark}[1]{\tikz[overlay,remember picture] \node (#1) {};} % Define block styles \tikzstyle{decision} = [diamond, draw, fill=blue!20, text width=4.5em, text badly centered, node distance=3cm, inner sep=0pt] \tikzstyle{block} = [rectangle, draw, fill=blue!20, text width=5em, text centered, rounded corners] \tikzstyle{line} = [draw] \tikzstyle{cloud} = [draw, ellipse,fill=red!20, node distance=3cm, minimum height=2em] \usepackage[most]{tcolorbox} \setbeamertemplate{blocks}[rounded][shadow=true] % use rounded blocks with standard beamer shadow % Distributions. \newcommand*{\UnifDist}{\mathsf{Unif}} \newcommand*{\ExpDist}{\mathsf{Exp}} \newcommand*{\DepExpDist}{\mathsf{DepExp}} \newcommand*{\GammaDist}{\mathsf{Gamma}} \newcommand*{\LognormalDist}{\mathsf{LogNorm}} \newcommand*{\WeibullDist}{\mathsf{Weib}} \newcommand*{\ParetoDist}{\mathsf{Par}} \newcommand*{\NormalDist}{\mathsf{Norm}} \newcommand*{\GeometricDist}{\mathsf{Geom}} \newcommand*{\NegBinomialDist}{\mathsf{NegBin}} \newcommand*{\PoissonDist}{\mathsf{Poisson}} \newcommand*{\BivariatePoissonDist}{\mathsf{BPoisson}} \newcommand*{\CyclicalPoissonDist}{\mathsf{CPoisson}} \newcommand*{\iid}{\textbf{iid}\@\xspace} \newcommand*{\pdf}{\textbf{pdf}\@\xspace} \newcommand*{\cdf}{\textbf{cdf}\@\xspace} \newcommand*{\pmf}{\textbf{pmf}\@\xspace} \newcommand*{\abc}{{\textbf{abc}}\@\xspace} \newcommand*{\smc}{\textbf{smc}\@\xspace} \newcommand*{\mcmc}{\textbf{mcmc}\@\xspace} \newcommand*{\ess}{\textbf{ess}\@\xspace} \newcommand*{\mle}{\textbf{mle}\@\xspace} \newcommand*{\bic}{\textbf{bic}\@\xspace} \newcommand*{\kde}{\textbf{kde}\@\xspace} \newcommand*{\glm}{\textbf{glm}\@\xspace} \newcommand*{\xol}{\textbf{xol}\@\xspace} \newcommand*{\cpu}{\textbf{cpu}\@\xspace} \newcommand*{\gpu}{\textbf{gpu}\@\xspace} \newcommand*{\arm}{\textbf{arm}\@\xspace} \def \si {\sigma} \def \la {\lambda} \def \al {\alpha} % \def\e*{\end{eqnarray*}} \def \di{\displaystyle} \def \E{\mathbb E} \def \N{\mathbb N} \def \Z{\mathbb Z} \def \NZ{\mathbb{N}_0} \def \I{\mathbb I} \def \w{\widehat} \def \P {\mathbb P} \def \V{\mathbb V} \newcommand{\CL}{\mathbb{C}} \newcommand{\RL}{\mathbb{R}} \newcommand{\nat}{{\mathbb N}} \newcommand{\Laplace}{\mathscr{L}} \newcommand{\e}{\mathrm{e}} \newcommand{\ve}{\bm{\mathrm{e}}} % vector e \renewcommand{\L}{\mathcal{L}} % e.g. L^2 loss. \newcommand{\ih}{\mathrm{i}} \newcommand{\oh}{{\mathrm{o}}} \newcommand{\Oh}{{\mathcal{O}}} \newcommand{\Exp}{\mathbb{E}} \newcommand{\Norm}{\mathcal{N}} \newcommand{\LN}{\mathcal{LN}} \newcommand{\SLN}{\mathcal{SLN}} \renewcommand{\Pr}{\mathbb{P}} \newcommand{\Ind}{\mathbb I} \newcommand\bfsigma{\bm{\sigma}} \newcommand\bfSigma{\bm{\Sigma}} \newcommand\bfLambda{\bm{\Lambda}} \newcommand{\stimes}{{\times}} \def \limsup{\underset{n\rightarrow+\infty}{\overline{\lim}}} \def \liminf{\underset{n\rightarrow+\infty}{\underline{\lim}}} % vertical separator macro \newcommand{\vsep}{ \column{0.0\textwidth} \begin{tikzpicture} \draw[very thick,black!10] (0,0) -- (0,7.3); \end{tikzpicture} } \newcommand\blfootnote[1]{% \begingroup \renewcommand\thefootnote{}\footnote{#1}% \addtocounter{footnote}{-1}% \endgroup } % More space between lines in align % \setlength{\mathindent}{0pt} % Beamer theme \usetheme{ZMBZFMK} \usefonttheme[onlysmall]{structurebold} \mode<presentation> \setbeamercovered{transparent=10} % align spacing \setlength{\jot}{0pt} \setbeamertemplate{navigation symbols}{}%remove navigation symbols \title[BLOCKASTICS]{Blockchain miner's risk management} \author{Pierre-O. Goffard} \institute[ISFA]{Institut de Science Financières et d'Assurances\\ \texttt{pierre-olivier.goffard@univ-lyon1.fr} } \date{\today} % \titlegraphic{\includegraphics[width=2.5cm]{../../Figures/bfs_logo.png}} \begin{document} \begin{frame} \titlepage \end{frame} \begin{frame} \tableofcontents \end{frame} \section{Introduction} \begin{frame}{Blockchain} A decentralized data ledger made of blocks maintained by achieving consensus in a P2P network. \begin{columns} \begin{column}{0.5\textwidth} % \small \begin{itemize} \item Decentralized \item Public/private \item Permissionned/permissionless \item Immutable \item Incentive compatible \end{itemize} \end{column} \begin{column}{0.5\textwidth} \begin{center} \begin{tikzpicture}[-, >=stealth', auto, semithick, node distance=01cm] \tikzstyle{every edge}=[snake=expanding waves,segment length=1mm,segment angle=10, draw] \tikzstyle{full node}=[circle, fill=tublue,draw=tublue,thick,text=black,scale=0.8] \tikzstyle{light node}=[circle, fill=white,draw=tublue,thick,text=black,scale=0.8] \node[full node] (1) {}; \node[full node] (2)[above right of=1] {}; \node[full node] (3)[above left of=1] {}; \node[full node] (4)[below of=1] {}; \node[full node] (5)[right of=4] {}; \node[full node] (6)[below of=4] {}; \node[light node] (7)[left of=1] {}; \node[light node] (8)[right of=2] {}; \node[light node] (9)[left of=4] {}; \node[light node] (10)[above right of=5] {}; \node[light node] (11)[ right of=5] {}; \node[light node] (12)[ below right of=5] {}; % \node[light node] (4)[above of=2] {}; \path (1) edge node{} (2) edge node{} (3) edge node{} (7) ; \path (5) edge node{} (10) edge node{} (11) edge node{} (12) ; \path (4) edge node{} (5) edge node{} (1) edge node{} (9) edge node{} (6) ; \path (2) edge node{} (8) ; \end{tikzpicture} \end{center} \end{column} \end{columns} \vspace{0.2cm} \begin{tcolorbox}[enhanced,drop shadow, title=Focus of the talk] Public and permissionless blockchain equipped with the Proof-of-Work protocol. \end{tcolorbox} \end{frame} \begin{frame}{Consensus protocols} The mechanism to make all the nodes agree on a common data history.\\ \vspace{0.3cm} The three dimensions of blockchain systems analysis \begin{enumerate} \item Efficiency \begin{itemize} \item Throughputs \item Transaction confirmation time \end{itemize} \item Decentralization \begin{itemize} \item Fair distribution of the accounting right \end{itemize} \item Security \begin{itemize} \item Resistance to attacks \end{itemize} \end{enumerate} \footnotesize \begin{thebibliography}{1} \bibitem{Fu2020} X.~Fu, H.~Wang, and P.~Shi, ``A survey of blockchain consensus algorithms: mechanism, design and applications,'' {\em Science China Information Sciences}, vol.~64, nov 2020. \end{thebibliography} \end{frame} \begin{frame}{Applications of blockchain: Cryptocurrency} \begin{columns} \begin{column}{0.5\textwidth} {\footnotesize \begin{thebibliography}{1} \bibitem{Na08} S.~Nakamoto, ``Bitcoin: A peer-to-peer electronic cash system.'' Available at \href{https://bitcoin.org/bitcoin.pdf}{https://bitcoin.org/bitcoin.pdf}, 2008. \end{thebibliography} } \end{column} \begin{column}{0.5\textwidth} %%<--- here \begin{center} \includegraphics[width=0.5\textwidth]{../../Figures/bitcoin-6284869_1920.png} \end{center} \end{column} \end{columns} \begin{itemize} \item Transaction anonymity \item Banking and reliable currency in certain regions of the world \item Money Transfer worldwide (at low fare) \item No need for a thrusted third party \end{itemize} \end{frame} \begin{frame}{Decentralized finance} DEFI creates new financial architecture \begin{columns} \begin{column}{0.5\textwidth} \begin{itemize} \item[+] Non custodial \item[+] Anonymous \item[+] Permisionless \item[+] openly auditable \end{itemize} \end{column} \begin{column}{0.5\textwidth} \begin{itemize} \item[-] Unregulated \item[-] Tax evasion \item[-] Fraud \item[-] Money laundering \end{itemize} \end{column} \end{columns} \vspace{0.5cm} Extends the Bitcoin promises to more complex financial operations \begin{itemize} \item Collateralized lending \item Decentralized Exchange Platform \item Tokenized assets \item Fundraising vehicle (ICO, STO, ...) \end{itemize} \vspace{0.3cm} \scriptsize \begin{thebibliography}{1} \bibitem{werner2021sok} S.~M. Werner, D.~Perez, L.~Gudgeon, A.~Klages-Mundt, D.~Harz, and W.~J. Knottenbelt, ``Sok: Decentralized finance (defi),'' 2021. \end{thebibliography} \end{frame} \begin{frame}{What's inside a block?} A block consists of \begin{itemize} \item a header \item a list of "transactions" that represents the information recorded through the blockchain. \end{itemize} The header usually includes \begin{itemize} \item the date and time of creation of the block, \item the block height which is the index inside the blockchain, \item the hash of the block \item the hash of the previous block. \end{itemize} \begin{tcolorbox}[enhanced,drop shadow, title=Question] What is the hash of a block? \end{tcolorbox} \end{frame} \begin{frame}{Cryptographic Hash function} \small A function that maps data of arbitratry size (message) to a bit array of fixed size (hash value) $$ h:\{0,1\}^\ast\mapsto \{0,1\}^d. $$ A good hash function is \begin{itemize} \item deterministic \item quick to compute \item One way \begin{itemize} \scriptsize \item[$\hookrightarrow$] For a given hash value $\overline{h}$ it is hard to find a message $m$ such that $$ h(m) = \overline{h} $$ \end{itemize} \item Colision resistant \begin{itemize} \item[$\hookrightarrow$] Impossible to find $m_1$ and $m_2$ such that $$ h(m_1) = h(m_2) $$ \end{itemize} \item Chaotic $$m_1\approx m_2\Rightarrow h(m_1) \neq h(m_2)$$ \end{itemize} \end{frame} \begin{frame}{SHA-256} The SHA-256 function which converts any message into a hash value of $256$ bits. \begin{tcolorbox}[enhanced,drop shadow, title=Example] The hexadecimal digest of the message $$ \texttt{Moritz Voss is the man} $$ is \footnotesize $$ \texttt{50f3257a3d22a56247a8978fd2505e8cdd64e1cb06e52c941d09e234722dc275} $$ \end{tcolorbox} \end{frame} \begin{frame}{Mining a block} \begin{figure}[!ht] \includegraphics[width = \textwidth]{../../Figures/block_not_mined.png} \captionsetup{width=0.8\textwidth} \centering \caption{A block that has not been mined yet.} \label{fig:block_not_mined} \end{figure} \end{frame} \begin{frame}{Mining a block} The maximum value for a 256 bits number is $$ T_\text{max} = 2^{256}-1 \approx 1.16e^{77}. $$ Mining consists in drawing at random a nonce $$ \text{Nonce} \sim \text{Unif}(\{0,\ldots, 2^{32}-1\}), $$ until $$ h(\text{Nonce}|\text{Block info})<T, $$ where $T$ is referred to as the target. \begin{tcolorbox}[enhanced,drop shadow, title=Difficulty of the cryptopuzzle] $$ D = \frac{T_{\max}}{T}. $$ \end{tcolorbox} \end{frame} \begin{frame}{Mining a block} If we set the difficulty to $D = 2^4$ then the hexadecimal digest must start with at least $1$ leading $0$ \begin{figure}[!ht] \includegraphics[width = \textwidth]{../../Figures/block_mined.png} \captionsetup{width=0.8\textwidth} \centering \caption{A mined block with a hash value having on leading zero.} \label{fig:block_mined} \end{figure} The number of trial is geometrically distributed \begin{itemize} \item Exponential inter-block times \item Lenght of the blockchain = Poisson process \end{itemize} \end{frame} \begin{frame}{Bitcoin protocol} \begin{itemize} \item One block every 10 minutes on average \item Depends on the hashrate of the network \item Difficulty adjustment every 2,016 blocks ($\approx$ two weeks) \item Reward halving every 210,000 blocks \end{itemize} Check out \url{https://www.bitcoinblockhalf.com/} \begin{tcolorbox}[enhanced,drop shadow, title=Risky business] Steady operational cost VS infrequent capital gains \end{tcolorbox} \end{frame} \section{Insurance risk theory} \begin{frame}{Cramer-Lunberg risk model} \begin{columns} \begin{column}{0.5\textwidth} \scriptsize The financial reserves of a nonlife insurance company is given by \begin{equation*} R_t = u +ct - \sum_{i = 1}^{N_t}U_i\text{, }t\geq0, \end{equation*} où \begin{itemize} \item $u>0$ the initial reserves \item $c$ is the premium rate \item $(N_t)_{t\geq0}$ is the claim frequency up to time $t\geq0$. \begin{itemize} \scriptsize \item[$\hookrightarrow$] Poisson process with intensity $\lambda$ \end{itemize} \item The $U_i$'s are the claim amounts \begin{itemize} \scriptsize \item[$\hookrightarrow$] Nonnegative random variables, \textbf{i.i.d.}, and independent from $N_t$ \end{itemize} \end{itemize} \end{column} \begin{column}{0.5\textwidth} \begin{tikzpicture} %Origin and axis \coordinate (O) at (0,0); \draw[->] (-0.5,0) -- (5.5,0) coordinate[label = {below:\scriptsize$t$}] (xmax); \draw[->] (0,-0.5) -- (0,4) coordinate[label = {right:\scriptsize$R_t$}] (ymax); %Initial reserves \draw (0,2) node[black,left] {\scriptsize$u$} node{}; % % %Length of the honest chain \draw[thick, tublue,-] (0,2) -- (2,3) node[pos=0.5, above] {}; \draw[thick, dashed, tublue] (2,3) -- (2,1) node[pos=0.5, left] {\scriptsize\color{black}$U_1$}; \draw[thick, tublue] (2,1) -- (3,1.5) node[pos=0.5, above] {}; \draw[thick, dashed, tublue] (3,1.5) -- (3, 0.5) node[pos=0.5, left] {\scriptsize\color{black}$U_2$}; \draw[thick, tublue] (3,0.5) -- (5, 1.5) node[pos=0.5, above] {}; \draw[thick, dashed, tublue] (5,1.5) -- (5, -0.5) node[pos=0.5,above left] {\scriptsize\color{black}$U_3$}; %Block finding Times \draw (2,0) node[black,below] {\scriptsize$T_1$} node{ \color{black}$\bullet$}; \draw (3,0) node[black,below] {\scriptsize$T_2$} node{ \color{black}$\bullet$}; \draw (5,0) node[black,below left] {\scriptsize$\tau_u$} node{ \color{black}$\bullet$}; \end{tikzpicture} \end{column} \end{columns} \end{frame} \begin{frame}{Ruin probability} \scriptsize Define the ruin time as $$ \tau_u = \inf\{t\geq0\text{ ; }R_t <0\} $$ and the ruin probability as $$ \psi(u,t) = \mathbb{P}(\tau_u < t)\text{ et }\psi(u) = \mathbb{P}(\tau_u < \infty) $$ Find $u$ such that $$ \mathbb{P}(\text{Ruin}) = \alpha\text{ (0.005)}, $$ with $$ c=(1+\eta)\lambda\mathbb{E}(U), $$ where $$\eta>0\text{ (net profit condition)}$$ otherwise $$\psi(u)=1.$$ \tiny \begin{thebibliography}{1} \bibitem{Asmussen_2010} S.~Asmussen and H.~Albrecher, {\em Ruin Probabilities}. \newblock {WORLD} {SCIENTIFIC}, sep 2010. \end{thebibliography} \end{frame} \section{Application to blockchain miner risk management} \begin{frame}{Dual risk model} \begin{columns} \begin{column}{0.5\textwidth} \scriptsize Consider a miner \begin{itemize} \item of hashrate $p\in(0,1)$ \item that owns $u\geq0$ at $t = 0$ \item spends $c = \pi_W\cdot W\cdot p$ per time unit \item who finds $p \lambda$ blocks on average per time unit, where $\lambda$ is the average number of blocks found by the network \end{itemize} The wealth of such a miner is given by $$ R_t = u - c\cdot t + N_t\cdot b,\text{ (Dual risk model)} $$ où \begin{itemize} \item $(N_t)_{t\geq0}$ is a Poisson process with intensity $p\cdot\lambda$ \item $b$ is the block finding reward (6.25 BTC) \url{bitcoinhalf.com} \end{itemize} \end{column} \begin{column}{0.5\textwidth} \begin{tikzpicture} %Origin and axis \coordinate (O) at (0,0); \draw[->] (-0.5,0) -- (5.5,0) coordinate[label = {below:\scriptsize$t$}] (xmax); \draw[->] (0,-0.5) -- (0,4) coordinate[label = {right:\scriptsize$R_t$}] (ymax); %Initial reserves \draw (0,3) node[black,left] {\scriptsize$u$} node{}; % % %Length of the honest chain \draw[thick, tublue,-] (0,3) -- (2,1) node[pos=0.5, above] {}; \draw[thick, dashed, tublue] (2,1) -- (2,2) node[pos=0.5, above left] {\scriptsize\color{black}$b$}; \draw[thick, tublue] (2,2) -- (3.5,0.5) node[pos=0.5, above] {}; \draw[thick, dashed, tublue] (3.5,0.5) -- (3.5, 1.5) node[pos=0.5, above left] {\scriptsize\color{black}$b$}; \draw[thick, tublue] (3.5,1.5) -- (5, 0) node[pos=0.5, above] {}; %Block finding Times \draw (2,0) node[black,below] {\scriptsize$T_1$} node{ \color{black}$\bullet$}; \draw (3.5,0) node[black,below] {\scriptsize$T_2$} node{ \color{black}$\bullet$}; \draw (5,0) node[black,below] {\scriptsize$\tau_u$} node{ \color{black}$\bullet$}; \end{tikzpicture} \end{column} \end{columns} \end{frame} \begin{frame}{Expected profit if no failure} \scriptsize The ruin time is defined as $$ \tau_u = \inf\{t\geq0\text{ ; }R_t \leq0\} $$ \begin{itemize} \item Risk measure $$ \psi(u,t) = \mathbb{P}(\tau_u \leq t) $$ \item Profitability measure $$ V(u,t) = \mathbb{E}(R_t\mathbb{I}_{\tau_u > t}) $$ \end{itemize} \end{frame} \begin{frame}{A miner's dilemma} \scriptsize Use $\psi$ and $V$ to compare mining solo to \begin{itemize} \item pool mining \tiny \begin{thebibliography}{1} \bibitem{rosenfeld2011analysis} M.~Rosenfeld, ``Analysis of bitcoin pooled mining reward systems,'' 2011. \bibitem[Albrecher et~al.(2022)Albrecher, Finger, and Goffard]{albrecher:hal-03336851} Hansj{\"o}rg Albrecher, Dina Finger, and Pierre-Olivier Goffard. \newblock {Blockchain mining in pools: Analyzing the trade-off between profitability and ruin}. \newblock to appear in Insurance; Mathematics and Economics, April 2022. \newblock URL \url{https://hal.archives-ouvertes.fr/hal-03336851}. \end{thebibliography} \item \scriptsize deviating from the prescribed protocol (selfish mining) \tiny \begin{thebibliography}{1} \bibitem{Eyal2014} I.~Eyal and E.~G. Sirer, ``Majority is not enough: Bitcoin mining is vulnerable,'' in {\em Financial Cryptography and Data Security}, pp.~436--454, Springer Berlin Heidelberg, 2014. \bibitem[Albrecher and Goffard(2022)]{Hansjoerg2022} Hansjoerg Albrecher and Pierre-Olivier Goffard. \newblock On the profitability of selfish blockchain mining under consideration of ruin. \newblock \emph{Operations Research}, 70(1):179--200, jan 2022. \newblock \url{10.1287/opre.2021.2169}. \end{thebibliography} \end{itemize} Analytical expressions for $$ \widehat{\psi}(u,t)= \mathbb{E}[\psi(u,T)]\text{ and }\widehat{V}(u,t)= \mathbb{E}[V(u,T)], $$ where $T\sim\text{Exp}(t)$. \end{frame} \begin{frame}{Solo mining} \scriptsize \begin{tcolorbox}[enhanced,drop shadow, title=Theorem (profit and ruin when mining solo)] For $u\geq0$, with \begin{equation*} \widehat{\psi}(u,t) = e^{\rho^\ast u}, \end{equation*} and \begin{equation*} \widehat{V}(u,t) = u+(p\lambda b-c)t\left(1-e^{\rho^\ast u }\right), \end{equation*} where $\rho^\ast$ is the only nonnegative solution of \begin{equation}\label{eq:equation_rho} -c\rho + p\lambda(e^{b\rho}-1) = 1/t. \end{equation} \end{tcolorbox} \begin{tcolorbox}[enhanced,drop shadow, title=Lambert function] The solution $\rho^\ast$ of \eqref{eq:equation_rho} is given by \begin{equation*} \rho^{\ast}=-\frac{p \lambda t+1}{ct} -\frac{1}{b} \,{\rm W} \left[-\frac{p\lambda \,b}{c}\,{e^{-b\,\left(\frac{p \lambda t+1}{ct}\right)}} \right], \end{equation*} where $W(.)$ denotes the Lambert function. \end{tcolorbox} \end{frame} \begin{frame}{Sketch of the proof} \scriptsize The time-horizon is random with $T\sim\text{Exp}(t)$, we condition upon the events occuring in $(0,h)$, with $h<u/c$ so that ruin cannot occur before $h$. Three possibilities \begin{itemize} \item[(i)] $T>h$ and no blocks $(0,h)$ \item[(ii)] $T<h$ and no blocks $(0,T)$ \item[(iii)] One block found before $T$ and $h$ \end{itemize} The expected profit $\widehat{V}(u,t)$ satisfies \begin{eqnarray*} \widehat{V}(u,t)& =&e^{-h(1/t + p\lambda)}\,\widehat{V}(u-ch,t)+\int\limits_0^h\frac1t\, e^{-s(1/t + p\lambda)}\,(u-cs)ds\\ &+&\int\limits_0^h p\lambda\, e^{-s(1/t + p\lambda)}\,\widehat{V}(u-cs+b,t)ds. \end{eqnarray*} \end{frame} \begin{frame}{Sketch of the proof} \scriptsize Differentiating with respect to $h$ and setting $h=0$, we get \begin{equation}\label{eq:ODE} c\widehat{V}'(u,t) + \left(\frac{1}{t} + p\lambda\right)\widehat{V}(u,t) - p\lambda \widehat{V}(u+b,t) - \frac{u}{t} =0, \end{equation} Equation \eqref{eq:ODE} is an advanced differential equation \blfootnote{\tiny H.~L. Smith, {\em An introduction to delay differential equations with applications to the life sciences}. \newblock Springer, New York, 2011. } with boundary conditions $$ \widehat{V}(0,t) = 0 \text{ such that } 0\leq \widehat{V}(u,t)\leq u-ct+p\lambda b t \text{ for }u>0. $$ Consider solutions of the form \begin{equation}\label{eq:potential_solution} \widehat{V}(u,t) = Ae^{\rho u }+Bu + C,\text{ }u \ge 0, \end{equation} where $A, B,C$ and $\rho$ are constants to be determined. Substituting \eqref{eq:potential_solution} in \eqref{eq:ODE} together with boundary conditions \begin{equation*} \begin{cases} 0&=ct\rho + \left(1+p\lambda t\right)-p\lambda te^{\rho b}, \\ 0&= B\left(1+tp\lambda\right)-p\lambda tB - 1,\\ 0&=Bct+C(1+tp\lambda) - p\lambda t Bb-p\lambda tC, \\ 0&=A+C. \end{cases} \end{equation*} \end{frame} \begin{frame}{Sketch of the proof} \scriptsize We get $A = -t(p\lambda b - c)$, $B = 1$, $C = t(p\lambda b - c)$ and $\rho$ verifies $$ c\rho + \left(1+p\lambda t\right)-p\lambda te^{\rho b} = 0, $$ The latter has two solutions on the real line, one negative and the other is positive. As $A<0$, we must take $\rho^\ast<0$ to ensure that $\widehat{V}(u,t)>0$. Substituting $A,B,C$ and $\rho^{\ast}$ in \eqref{eq:potential_solution} yields the result.\\ Similarly, the ruin probability satisfies \begin{equation*}\label{psii} c\widehat{\psi}'(u,t)+(p \lambda+1/t)\,\widehat{\psi}(u,t)-p \lambda\,\widehat{\psi}(u+b,t)=0 \end{equation*} with initial condition $\widehat{\psi}(0,t)=1$ and boundary condition $\lim_{u\to\infty}\widehat{\psi}(u,t)=0$. \end{frame} \begin{frame}{Mining pool?} \scriptsize Let $I\subset\{1,\ldots, n\}$ be a set of miners with cumulated hashpower $$ p_I = \sum_{i\in I }p_i, $$ \begin{itemize} \item A pool manager coordinates the joint effort \item Miners show their work by submitting partial solutions (\textit{share}) \end{itemize} The pool manager chooses \begin{itemize} \item the participant remuneration system \item the relative difficulty $q\in(0,1)$ of finding a \textit{share} VS finding a proper solution \item the amount of management fees $f$ \end{itemize} \end{frame} \begin{frame}{Remuneration system} \scriptsize Miners must be compensated pro-rata to their contribution to the mining effort. \begin{tcolorbox}[enhanced,drop shadow, title=Proportional scheme] A \text{round} is the time elapsed between two block discovery \begin{itemize} \item $s_i$ is the number of \textit{shares} submitted by $i\in I$ during the \textit{round} \item Each miner receives $$ (1-f)\cdot b\cdot\frac{s_i}{\sum_{i\in I}s_i}, $$ at the end the round, where $f$ is the pool manager's cut. \item The system is deemed fair if $\frac{s_i}{\sum_{i\in I}s_i}\approx\frac{p_i}{\sum_{i\in I}p_i}$ \end{itemize} \end{tcolorbox} \end{frame} \begin{frame}{What's wrong about going proportional} \scriptsize \begin{tcolorbox}[enhanced,drop shadow, title=Remarque] This scheme is not incentive compatible \end{tcolorbox} \tiny \begin{thebibliography}{1} \bibitem{Schrijvers2017} O.~Schrijvers, J.~Bonneau, D.~Boneh, and T.~Roughgarden, ``Incentive compatibility of bitcoin mining pool reward functions,'' in {\em Financial Cryptography and Data Security}, pp.~477--498, Springer Berlin Heidelberg, 2017. \end{thebibliography} \scriptsize \begin{itemize} \item The duration of \textit{rounds} is random \begin{itemize} \scriptsize \item[$\hookrightarrow$] A \textit{share} loses value when the \textit{round} last for too long $\Rightarrow$ \textit{pool hoping} \tiny \begin{thebibliography}{1} \bibitem{rosenfeld2011analysis} M.~Rosenfeld, ``Analysis of bitcoin pooled mining reward systems,'' 2011. \end{thebibliography} \scriptsize \item[$\hookrightarrow$] Apply a discount factor to \textit{shares} \tiny \begin{thebibliography}{1} \bibitem{slush} slush pool, ``Reward system specifications,'' 2021. \end{thebibliography} \end{itemize} \item A miner may postpone the communication of a solution \begin{itemize} \scriptsize \item[$\hookrightarrow$] to wait for her proportion of submitted \textit{shares} to improve \end{itemize} \item No risk transfer from miner to pool manager \begin{itemize} \scriptsize \item[$\hookrightarrow$] $f$ must be small \end{itemize} \end{itemize} \end{frame} \begin{frame}{The Pay-per-Share (PPS) system} \scriptsize The manager pays $$ w = (1-f)\cdot q \cdot b $$ for every \textit{share} and keeps the block finding reward. \vspace{1cm} \begin{columns} \begin{column}{0.5\textwidth} Miner's wealth $$ R_t^i = u_i-ct + M_t^i w,\text{ }t\geq0. $$ where \begin{itemize} \item $(M_t^i)_{t\geq0}$ is a Poisson process with intensity $p_i \mu= p_i\lambda / q$ \item $\mu$ is the average number of \textit{shares} submitted by the network \end{itemize} \end{column} \begin{column}{0.5\textwidth} Manager's wealth $$ R_t^I = u_I - M_t^I w+N_t^I b,\text{ }t\geq0. $$ where \begin{itemize} \item $(M_t^I)_{t\geq0}$ is a Poisson process with intensity $p_I\mu =p_I\lambda / q$ \item $(N_t^I)_{t\geq0}$ is a Poisson process with intensity $p_I\lambda$ \end{itemize} \end{column} \end{columns} \end{frame} \begin{frame}{Pool manager's risk} \scriptsize Let us consider randomized rewards $$ R_t= u - \sum_{i=1}^{M_t} W_i +\sum_{j=1}^{N_t} B_j,\text{ }t\geq0. $$ where \begin{itemize} \item $(M_t)_{t\geq0}$ and $(N_t)_{t\geq0}$ are Poisson processes with intensity $\mu^\ast=\mu- \lambda$ and $\lambda$ \item $(W_i)_{i\geq0}$ and $(B_j)_{j\geq0}$ are two independent sequence of \iid exponential random variables with mean $w$ and $b^\ast = b-w$. \end{itemize} \begin{tcolorbox}[enhanced,drop shadow, title=Poisson process superposition] A block discovery triggers the payment of a \textit{share} to the miners \begin{itemize} \item The intensity of $M_t$ is given by $\mu^\ast=\mu- \lambda$ \item The block finding reward is then $b^\ast = b-w$ \end{itemize} A distinction is made here between jumps up and down. \end{tcolorbox} \end{frame} \begin{frame}{Pool manager's risk} \scriptsize \begin{tcolorbox}[enhanced,drop shadow, title=Theorem (Profits and loss of a pool manager)] The ruin probability is given by \begin{equation*}\label{psiexpe} \widehat{\psi}(u,t) = (1-Rw) e^{-R u},\;u\ge 0, \end{equation*} and the expected wealth is \begin{equation*}\label{Vcombexpe} \w{V}(u,t) = (1 - Rw)[w-t(\lambda b^\ast-\mu^\ast w)] e^{-R u}+u+t(\lambda b^\ast-\mu^\ast w), \end{equation*} where $R$ is the only solution to \begin{equation*} \label{VLunde} -(t^{-1}+\lambda+\mu^\ast)+\lambda(1+b^\ast r)^{-1}+\mu^\ast(1-wr)^{-1}=0, \end{equation*} with positive real part. \end{tcolorbox} \tiny \begin{thebibliography}{1} \bibitem{albrecher2021blockchain} H.~Albrecher, D.~Finger, and P.-O. Goffard, ``Blockchain mining in pools: Analyzing the trade-off between profitability and ruin,'' 2021. \end{thebibliography} \end{frame} \begin{frame}[allowframebreaks]{Sketch of the proof} \scriptsize Conditionning upon the events that occur during $(0,h)$. Four possibilities \begin{itemize} \item[(i)] $T>h$ and no jumps $(0,h)$ \item[(ii)] $T<h$ and no jumps $(0,T)$ \item[(iii)] an upward jump $(0,h)$ \item[(iv)] a downward jump $(0,h)$ \end{itemize} \begin{eqnarray*}\label{neu0} \w{V}(u,t)&=& e^{-(\frac{1}{t}+\lambda+\mu^\ast)h}\w{V}(u,t) + \frac{1}{t}\int_0^h e^{-{s}/{t}}e^{-(\lambda +\mu^\ast) s} u\,ds\\ & +& \lambda\int_0^he^{-\lambda s} e^{-({1}/{t}+\mu^\ast) s} \int_0^\infty\w{V}(u+x,t)\,dF_{B}(x)\,ds\\ & +&\mu^\ast \int_0^he^{-\mu^\ast s} e^{-({1}/{t}+\lambda) s}\int_0^u \w{V}(u-y,t) \,dF_W(y)\,ds. \end{eqnarray*} Differentiating with respect to $h$ and letting $h\rightarrow 0$, yields \begin{equation} \label{inteq} \lambda\int_0^\infty\w{V}(u+x,t)\,dF_{B}(x)-(\lambda+\mu^\ast+{1}/{t})\w{V}(u,t)+\mu^\ast\int_0^u \w{V}(u-y,t) \,dF_W(y)+{u}/{t}=0,\quad u\ge 0, \end{equation} with boundary conditions $\w{V}(u,t)=0$ pour tout $u<0$ et $0\leq\w{V}(u,t)\leq u+(\lambda b^\ast - \mu^\ast w)t$. Consider solutions of the form $$ Ce^{-ru}+d_1u+d_0 $$ \begin{itemize} \item Gathering the terms in factor of $e^{-r u}$ yields an equation for $r$ with $$ -(t^{-1}+\lambda+\mu^\ast)+\lambda(1+b^\ast r)^{-1}+\mu^\ast(1-wr)^{-1}=0 $$ with nonnegative solution $R>0$, negative is impossible because $0\leq\w{V}(u,t)\leq u+(\lambda b^\ast - \mu^\ast w)t$ \item Gathering the terms in factor of $u$, yields $d_1 = 1$ \item Gathering the terms in factor of $1$, yields $$ d_0 = t(\lambda b^\ast-\mu^\ast w) $$ \item Gathering the terms in factor of $e^{-u/w}$, yields $$ C = (1 - Rw)[w-t(\lambda b^\ast-\mu^\ast w)] $$ \end{itemize} \end{frame} \begin{frame}{Problem related to mining pools} \begin{itemize} \item Arm race, ramping electricity consumption and e-waste generation \tiny \begin{thebibliography}{1} \bibitem{bertucci2020mean} C.~Bertucci, L.~Bertucci, J.-M. Lasry, and P.-L. Lions, ``Mean field game approach to bitcoin mining,'' 2020. \bibitem{Alsabah2018} H.~Alsabah and A.~Capponi, ``Bitcoin mining arms race: R{\&}d with spillovers,'' {\em {SSRN} Electronic Journal}, 2018. \end{thebibliography} \end{itemize} \begin{itemize} \item \normalsize A threat on decentralization? \tiny \begin{thebibliography}{1} \bibitem{Cong2020} L.~W. Cong, Z.~He, and J.~Li, ``Decentralized mining in centralized pools,'' {\em The Review of Financial Studies}, vol.~34, pp.~1191--1235, apr 2020. \bibitem{li2019mean} Z.~Li, A.~M. Reppen, and R.~Sircar, ``A mean field games model for cryptocurrency mining,'' 2019. \end{thebibliography} \end{itemize} \end{frame} % \begin{frame} % \cite{albrecher:hal-03336851,Hansjoerg2022} % \bibliography{../../blockastics} % \bibliographystyle{plainnat} % \end{frame} \end{document}
{"hexsha": "8063ba9db8121f8f78442f88bc91cb345b3b6ac6", "size": 30809, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Slides/Seminar/seminar.tex", "max_stars_repo_name": "LaGauffre/BLOCKASTICS", "max_stars_repo_head_hexsha": "4087304a4fb6fe55b5e8746315f524eddedc72e8", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Slides/Seminar/seminar.tex", "max_issues_repo_name": "LaGauffre/BLOCKASTICS", "max_issues_repo_head_hexsha": "4087304a4fb6fe55b5e8746315f524eddedc72e8", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Slides/Seminar/seminar.tex", "max_forks_repo_name": "LaGauffre/BLOCKASTICS", "max_forks_repo_head_hexsha": "4087304a4fb6fe55b5e8746315f524eddedc72e8", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-21T08:20:38.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-21T08:20:38.000Z", "avg_line_length": 31.8603929679, "max_line_length": 254, "alphanum_fraction": 0.6873640819, "num_tokens": 11036}
import pandas as pd import numpy as np from pymongo import MongoClient import json filename = 'PaperCitationContexts.txt' key_paper_id = 'PaperId' key_paper_ref = 'PaperReferenceId' key_citation_context = 'CitationContext' header = [key_paper_id,key_paper_ref,key_citation_context] client = MongoClient('localhost', 27017) db = client['mag'] collection = db['ref'] cnt = 0 chunksize = 10 ** 6 for chunk in pd.read_csv(filename, names=header, sep='\t', chunksize=chunksize): data = [] for key, val in chunk.iterrows(): paper_id = val[key_paper_id] paper_ref = val[key_paper_ref] if pd.notna(paper_ref) : current = { key_paper_id: paper_id, key_paper_ref: paper_ref } data.append(current) collection.insert_many(data) print(cnt) cnt+=1
{"hexsha": "3b69e864b7aa3f8331cbd262b99040780f746919", "size": 867, "ext": "py", "lang": "Python", "max_stars_repo_path": "extraction/mag/dump_to_mongo/citation_to_mongo.py", "max_stars_repo_name": "dhjournals/code", "max_stars_repo_head_hexsha": "8e85744325f2938786b88a3143a8ed7ae39f1992", "max_stars_repo_licenses": ["CC-BY-3.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-04T16:21:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-04T16:21:45.000Z", "max_issues_repo_path": "extraction/mag/dump_to_mongo/citation_to_mongo.py", "max_issues_repo_name": "dhjournals/code", "max_issues_repo_head_hexsha": "8e85744325f2938786b88a3143a8ed7ae39f1992", "max_issues_repo_licenses": ["CC-BY-3.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "extraction/mag/dump_to_mongo/citation_to_mongo.py", "max_forks_repo_name": "dhjournals/code", "max_forks_repo_head_hexsha": "8e85744325f2938786b88a3143a8ed7ae39f1992", "max_forks_repo_licenses": ["CC-BY-3.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.6428571429, "max_line_length": 80, "alphanum_fraction": 0.6528258362, "include": true, "reason": "import numpy", "num_tokens": 212}
import numpy as np import numpy.random as npr import scipy.stats as sps import sklearn.ensemble import sklearn.ensemble.forest from spearmint import util from sklearn.externals.joblib import Parallel, delayed def init(expt_dir, arg_string): args = util.unpack_args(arg_string) return RandomForestEIChooser(**args) class RandomForestRegressorWithVariance(sklearn.ensemble.RandomForestRegressor): def predict(self,X): # Check data X = np.atleast_2d(X) all_y_hat = [ tree.predict(X) for tree in self.estimators_ ] # Reduce y_hat = sum(all_y_hat) / self.n_estimators y_var = np.var(all_y_hat,axis=0,ddof=1) return y_hat, y_var class RandomForestEIChooser: def __init__(self,n_trees=50, max_depth=None, min_samples_split=1, max_monkeys=7, max_features="auto", n_jobs=1, random_state=None): self.n_trees = float(n_trees) self.max_depth = max_depth self.min_samples_split = min_samples_split self.max_features = max_features self.n_jobs = float(n_jobs) self.random_state = random_state self.rf = RandomForestRegressorWithVariance(n_estimators=n_trees, max_depth=max_depth, min_samples_split=min_samples_split, max_features=max_features, n_jobs=n_jobs, random_state=random_state) def next(self, grid, values, durations, candidates, pending, complete): # Grab out the relevant sets. # Don't bother using fancy RF stuff at first. if complete.shape[0] < 2: return int(candidates[0]) # Grab out the relevant sets. comp = grid[complete,:] cand = grid[candidates,:] pend = grid[pending,:] vals = values[complete] self.rf.fit(comp,vals) if pend.shape[0] != 0: # Generate fantasies for pending func_m, func_v = self.rf.predict(pend) vals_pend = func_m + np.sqrt(func_v) + npr.randn(func_m.shape[0]) # Re-fit using fantasies self.rf.fit(np.vstack[comp,pend],np.hstack[vals,vals_pend]) # Predict the marginal means and variances at candidates. func_m, func_v = self.rf.predict(cand) # Current best. best = np.min(vals) # Expected improvement func_s = np.sqrt(func_v) + 0.0001 u = (best - func_m) / func_s ncdf = sps.norm.cdf(u) npdf = sps.norm.pdf(u) ei = func_s*( u*ncdf + npdf) best_cand = np.argmax(ei) ei.sort() return int(candidates[best_cand])
{"hexsha": "2112e02cbca4e16c48bb75b03a1ed463a9dccaf8", "size": 2940, "ext": "py", "lang": "Python", "max_stars_repo_path": "spearmint-win/spearmint/chooser/RandomForestEIChooser.py", "max_stars_repo_name": "TudorParas/Practical-Bayesian-Optimization", "max_stars_repo_head_hexsha": "db13b509e07e1283c9f37a31cae11d9c9940ebfc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "spearmint-win/spearmint/chooser/RandomForestEIChooser.py", "max_issues_repo_name": "TudorParas/Practical-Bayesian-Optimization", "max_issues_repo_head_hexsha": "db13b509e07e1283c9f37a31cae11d9c9940ebfc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "spearmint-win/spearmint/chooser/RandomForestEIChooser.py", "max_forks_repo_name": "TudorParas/Practical-Bayesian-Optimization", "max_forks_repo_head_hexsha": "db13b509e07e1283c9f37a31cae11d9c9940ebfc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3076923077, "max_line_length": 88, "alphanum_fraction": 0.5639455782, "include": true, "reason": "import numpy,import scipy", "num_tokens": 644}
import time import traceback from io import StringIO from typing import List, Tuple, Type, Dict, Any, Optional, Iterator from pathlib import Path import math from dataclasses import dataclass, field import warnings import sys import os import torch from torch import nn as nn import numpy as np import tqdm from rtg import TranslationExperiment as Experiment from rtg import log, device, my_tensor as tensor, debug_mode from rtg.module.generator import GeneratorFactory from rtg.data.dataset import Field, Batch as TrainerBatch from rtg.registry import MODELS Hypothesis = Tuple[float, List[int]] StrHypothesis = Tuple[float, str] if not sys.warnoptions: warnings.simplefilter("default") # Change the filter in this process os.environ["PYTHONWARNINGS"] = "default" # Also affect subprocesses def load_models(models: List[Path], exp: Experiment): res = [] for i, model_path in enumerate(models): assert model_path.exists() log.info(f"Load Model {i}: {model_path} ") chkpt = torch.load(str(model_path), map_location=device) model = exp.load_model_with_state(chkpt) res.append(model) return res class ReloadEvent(Exception): """An exception to reload model with new path -- Its a kind of hack to pass event back to caller and redo interactive shell-- """ def __init__(self, model_paths, state: Dict[str, Any]): super().__init__() self.model_paths = model_paths self.state = state @dataclass class DecoderBatch: idxs: List[int] = field(default_factory=list) # index in the file, for restoring the order srcs: List[str] = field(default_factory=list) seqs: List[str] = field(default_factory=list) # processed srcs refs: List[str] = field(default_factory=list) # references for logging if they exist ids: List[str] = field(default_factory=list) # original id column; not to be confused with line_count = 0 tok_count = 0 max_len = 0 max_len_buffer = 0 # Some extra buffer for target size; eg: tgt_len = 50 + src_len def add(self, idx, src, ref, seq, id): self.idxs.append(idx) self.srcs.append(src) self.refs.append(ref) self.seqs.append(seq) self.ids.append(id) self.line_count += 1 self.tok_count += len(seq) self.max_len = max(self.max_len, len(seq)) @property def padded_tok_count(self): return (self.max_len + self.max_len_buffer) * self.line_count def as_tensors(self, device): seqs = torch.zeros(self.line_count, self.max_len, device=device, dtype=torch.long) lens = torch.zeros(self.line_count, device=device, dtype=torch.long) for i, seq in enumerate(self.seqs): seqs[i, :len(seq)] = torch.tensor(seq, dtype=torch.long) lens[i] = len(seq) return seqs, lens @classmethod def from_lines(cls, lines: Iterator[str], batch_size: int, vocab: Field, sort=True, max_src_len=0, max_len_buffer=0): """ Note: this changes the order based on sequence length if sort=True :param lines: stream of input lines :param batch_size: number of tokens in batch :param vocab: Field to use for mapping word pieces to ids :param sort: sort based on descending order of length :param max_src_len : truncate at length ; 0 disables this :return: stream of DecoderBatches """ log.info("Tokenizing sequences") buffer = [] for i, line in enumerate(lines): line = line.strip() if not line: log.warning(f"line {i + 1} was empty. inserting a dot (.). " f"Empty lines are problematic when you want line-by-line alignment...") line = "." cols = line.split('\t') id, ref = None, None if len(cols) == 1: # SRC src = cols[0] elif len(cols) == 2: # ID \t SRC id, src = cols else: # ID \t SRC \t REF id, src, ref = cols[:3] seq = vocab.encode_as_ids(src, add_eos=True, add_bos=False) if max_src_len > 0 and len(seq) > max_src_len: log.warning(f"Line {i} full length={len(seq)} ; truncated to {max_src_len}") seq = seq[:max_src_len] buffer.append((i, src, ref, seq, id)) # idx, src, ref, seq, id if sort: log.info(f"Sorting based on the length. total = {len(buffer)}") buffer = sorted(buffer, reverse=True, key=lambda x: len(x[3])) # sort by length of seq batch = cls() batch.max_len_buffer = max_len_buffer for idx, src, ref, seq, _id in buffer: batch.add(idx=idx, src=src, ref=ref, seq=seq, id=_id) if batch.padded_tok_count >= batch_size: yield batch batch = cls() batch.max_len_buffer = max_len_buffer if batch.line_count > 0: yield batch class Decoder: default_beam_size = 5 def __init__(self, model, gen_factory: Type[GeneratorFactory], exp: Experiment, gen_args=None, debug=debug_mode): self.model = model self.exp = exp self.gen_factory = gen_factory self.debug = debug self.gen_args = gen_args if gen_args is not None else {} self.pad_val = exp.tgt_vocab.pad_idx self.bos_val = exp.tgt_vocab.bos_idx self.eos_val = exp.tgt_vocab.eos_idx self.dec_bos_cut = self.exp.config.get('trainer', {}).get('dec_bos_cut', False) (log.info if self.dec_bos_cut else log.debug)(f"dec_bos_cut={self.dec_bos_cut}") def generator(self, x_seqs, x_lens): return self.gen_factory(self.model, field=self.exp.tgt_vocab, x_seqs=x_seqs, x_lens=x_lens, **self.gen_args) @classmethod def combo_new(cls, exp: Experiment, model_paths: List[str], weights: List[float]): assert len(model_paths) == len(weights), 'one weight per model needed' assert abs(sum(weights) - 1) < 1e-3, 'weights must sum to 1' log.info(f"Combo mode of {len(model_paths)} models :\n {list(zip(model_paths, weights))}") model_paths = [Path(m) for m in model_paths] models = load_models(model_paths, exp) from rtg.syscomb import Combo combo = Combo(models) return cls.new(exp, model=combo, model_type='combo') @classmethod def new(cls, exp: Experiment, model=None, gen_args=None, model_paths: Optional[List[str]] = None, ensemble: int = 1, model_type: Optional[str] = None): """ create a new decoder :param exp: experiment :param model: Optional pre initialized model :param gen_args: any optional args needed for generator :param model_paths: optional model paths :param ensemble: number of models to use for ensembling (if model is not specified) :param model_type: model_type ; when not specified, model_type will be read from experiment :return: """ if not model_type: model_type = exp.model_type assert model_type in MODELS, f'{model_type} is invalid; known:{MODELS.keys()}' spec = MODELS[model_type] if model is None: model = spec.Model(exp=exp, **exp.model_args)[0] state = exp.maybe_ensemble_state(model_paths=model_paths, ensemble=ensemble) model.load_state_dict(state) log.info("Successfully restored the model state.") elif isinstance(model, nn.DataParallel): model = model.module model = model.eval().to(device=device) from rtg.registry import CRITERION if exp.config.get(CRITERION, {}).get('name') == 'binary_cross_entropy': log.info("((Going to decode in multi-label mode))") gen_args = gen_args or {} gen_args['multi_label'] = True return cls(model, spec.Generator, exp, gen_args) def greedy_decode(self, x_seqs, x_lens, max_len, **args) -> List[Hypothesis]: """ Implements a simple greedy decoder :param x_seqs: :param x_lens: length of x sequences :param max_len: :return: """ device = x_seqs.device batch_size = x_seqs.size(0) if self.dec_bos_cut: ys = x_seqs[:, :1] x_seqs = x_seqs[:, 1:] x_lens -= 1 else: ys = torch.full(size=(batch_size, 1), fill_value=self.bos_val, dtype=torch.long, device=device) gen = self.generator(x_seqs, x_lens) scores = torch.zeros(batch_size, device=device) actives = ys[:, -1] != self.eos_val max_x_len = x_lens.max().item() for i in range(1, max_x_len + max_len + 1): if actives.sum() == 0: # all sequences Ended break log_prob = gen.generate_next(ys) max_prob, next_word = torch.max(log_prob, dim=1) scores += max_prob ys = torch.cat([ys, next_word.view(batch_size, 1)], dim=1) actives &= ys[:, -1] != self.eos_val result = [] for i in range(batch_size): result.append((scores[i].item(), ys[i, 1:].tolist())) return result @staticmethod def masked_select(x, mask): assert x.shape[0] == mask.shape[0] assert mask.shape[1] == 1 selected = x.masked_select(mask) return selected.view(-1, x.size(1)) @staticmethod def repeat_adjacent(x, n, dim=0): """ repeat along a dimension values are adjacent. unlike torch.Tensor.repeat() which repeats at the end instead of adjacent. :param x: input tensor :param n: how many times to repeat :param dim: which dimension :return: repeated tensor """ shape = list(x.shape) # add a new dimension and expand it beam size expand_shape = [-1] * (len(x.shape) + 1) expand_shape[dim + 1] = n x = x.unsqueeze(dim + 1).expand(*expand_shape) shape[dim] *= n # reduce to original num of dims but given dim has n times more return x.contiguous().view(*shape) def beam_decode(self, x_seqs, x_lens, max_len, beam_size=default_beam_size, num_hyp=1, lp_alpha: float = 0., **args) -> List[List[Hypothesis]]: """ Beam decoder :param x_seqs: input x_seqs as a padded tensor :param x_lens: lengths of x_lengths :param max_len: maximum time steps to run :param beam_size: how many beams :param num_hyp: how many hypothesis to return ( must be <= beam_size) :param lp_alpha: length penalty (0.0 means disables) :return: """ args = dict((k, v) for k, v in args.items() if v is not None) if args: warnings.warn(f"Ignored args: {args}. To remove this message simply remove the args") assert beam_size >= num_hyp device = x_seqs.device batch_size = x_seqs.size(0) # ys = torch.zeros(batch_size, beam_size, max_len + 1, dtype=torch.long, device=device) if self.dec_bos_cut: # cut first time step from xs and repeat beam_size times, add time dim # [Batch] -> [Batch x 1=Beam] -> [Batch x Beam] -> [Batch x Beam x 1=Time] ys = x_seqs[:, 0].unsqueeze(-1).expand(-1, beam_size).unsqueeze(-1) x_seqs = x_seqs[:, 1:] # cut x_lens -= 1 else: ys = torch.full((batch_size, beam_size, 1), fill_value=self.bos_val, device=device, dtype=torch.long) # repeat x_seqs and x_lens beam times beamed_x_seqs = self.repeat_adjacent(x_seqs, n=beam_size, dim=0) beamed_x_lens = self.repeat_adjacent(x_lens, n=beam_size, dim=0) gen = self.generator(beamed_x_seqs, beamed_x_lens) scores = torch.zeros(batch_size, beam_size, device=device) actives = ys[:, :, 0] != self.eos_val lengths = torch.full((batch_size, beam_size), fill_value=max_len, device=device, dtype=torch.long) max_x_len = x_lens.max().item() for t in range(1, max_x_len + max_len + 1): if actives.sum() == 0: # all sequences Ended break # [Batch x Beams x Vocab] <-- [Batch x Beams x Time] flat_ys = ys.contiguous().view(batch_size * beam_size, -1) log_prob = gen.generate_next(flat_ys) # ys upto current time step log_prob = log_prob.view(batch_size, beam_size, -1) if t == 1: # Note: since sequences are duplicated, to start with # we need to pick the top k beams from a single beam # How? mask out all beams, except the first beam beam_mask = torch.full((batch_size, beam_size, 1), fill_value=1, device=device, dtype=torch.bool) beam_mask[:, 0, :] = 0 log_prob.masked_fill_(mask=beam_mask, value=float('-inf')) inactives = ~actives if inactives.sum() > 0: # Goal: do not let the inactive beams grow. How? this is tricky # we set -inf to all next words of inactive beams (so none of them make to topk) log_prob.masked_fill_(mask=inactives.unsqueeze(-1), value=float('-inf')) # But we need to preserve the inactive beam (just one copy) if it is still in topk. how? # just set zero to just one word of inactive beam # shouldn't matter which word since an EOS has already appeared --> pick index 0 word log_prob[:, :, 0].masked_fill_(mask=inactives, value=0.0) # add current beam_scores all possible next_words # broadcast scores to each word in vocab [Batch x Beams x Vocab=1] next_scores = scores.unsqueeze(-1) + log_prob # max_probs and next_words: [Batch x Beams x Beams] --> [Batch x Beams*Beams] next_scores, next_words = next_scores.topk(k=beam_size, dim=-1, largest=True) next_scores = next_scores.view(batch_size, beam_size * beam_size) next_words = next_words.view(batch_size, beam_size * beam_size) # Trim beams: [Batch, Beams] <-- [Batch, Beams*Beams] scores, next_words_idxs = next_scores.topk(k=beam_size, dim=-1, largest=True) next_words = next_words.gather(dim=1, index=next_words_idxs) # task: rearrange ys based on the newer ranking of beams # ys_idx: [Beams] --> [Beams x 1] --> [Beams x Beams] # --> [1 x Beams x Beams] --> [Batch x Beams * Beams] ys_idx = torch.arange(beam_size, device=device) \ .unsqueeze(-1).expand(-1, beam_size) \ .unsqueeze(0).expand(batch_size, -1, -1).contiguous() \ .view(batch_size, beam_size * beam_size) # [Batch x Beams] <- [Batch x Beams*Beams] as per the topk next_scores of beams ys_idx = ys_idx.gather(dim=1, index=next_words_idxs) ys_idx = ys_idx.unsqueeze(-1).expand_as(ys) # expand along time dim ys = ys.gather(1, ys_idx) # re arrange beams ys = torch.cat([ys, next_words.unsqueeze(-1)], dim=-1) # cat along the time dim # Task: update lengths and active flag of beam ended_beams = actives & (next_words == self.eos_val) # it was active but saw EOS now @t lengths.masked_fill_(mask=ended_beams, value=t) actives &= next_words != self.eos_val # was active and not EOS yet ys = ys[:, :, 1:] # remove BOS if lp_alpha > 0: # Page 12 of Wu et al (2016) Google NMT : https://arxiv.org/pdf/1609.08144.pdf # score(y, X) = \frac{ logP(Y | X) }{ lp(Y)} # lp(Y) = \frac{ (5 + |Y|)^α }{ (5 + 1)^α } penalty = (5 + lengths.float()).pow(lp_alpha) / math.pow(6, lp_alpha) scores = scores / penalty n_hyp_scores, n_hyp_idxs = scores.topk(k=num_hyp, dim=-1) # pick num_hyp beams result = [] for seq_idx in range(batch_size): result.append([]) for hyp_score, beam_idx in zip(n_hyp_scores[seq_idx], n_hyp_idxs[seq_idx]): result[-1].append((hyp_score, ys[seq_idx, beam_idx, :].tolist())) return result @property def inp_vocab(self) -> Field: # the choice of vocabulary can be tricky, because of bidirectional model if self.exp.model_type == 'binmt': return { 'E1': self.exp.src_vocab, 'E2': self.exp.tgt_vocab }[self.gen_args['path'][:2]] else: # all others go from source as input to target as output return self.exp.src_vocab @property def out_vocab(self) -> Field: # the choice of vocabulary can be tricky, because of bidirectional model if self.exp.model_type == 'binmt': return { 'D1': self.exp.src_vocab, 'D2': self.exp.tgt_vocab }[self.gen_args['path'][-2:]] else: # all others go from source as input to target as output return self.exp.tgt_vocab def decode_sentence(self, line: str, max_len=20, prepared=False, add_bos=False, **args) -> List[StrHypothesis]: line = line.strip() if prepared: in_seq = [int(t) for t in line.split()] if add_bos and in_seq[0] != self.bos_val: in_seq.insert(0, self.bos_val) if in_seq[-1] != self.eos_val: in_seq.append(self.eos_val) else: in_seq = self.inp_vocab.encode_as_ids(line, add_eos=True, add_bos=add_bos) in_seqs = tensor(in_seq, dtype=torch.long).view(1, -1) in_lens = tensor([len(in_seq)], dtype=torch.long) if self.debug: greedy_score, greedy_out = self.greedy_decode(in_seqs, in_lens, max_len, **args)[0] greedy_out = self.out_vocab.decode_ids(greedy_out, trunc_eos=True) log.debug(f'Greedy : score: {greedy_score:.4f} :: {greedy_out}') beams: List[List[Hypothesis]] = self.beam_decode(in_seqs, in_lens, max_len, **args) beams = beams[0] # first sentence, the only one we passed to it as input result = [] for i, (score, beam_toks) in enumerate(beams): out = self.out_vocab.decode_ids(beam_toks, trunc_eos=True) if self.debug: log.debug(f"Beam {i}: score:{score:.4f} :: {out}") result.append((score, out)) return result def decode_visualize(self, line: str, target=None, max_len=20, reduction=None, **args): line = line.strip() assert hasattr(self.model, 'cache_attn'), f'{type(self.model)} does not have cache_attn feature' if not self.model.cache_attn: self.model.cache_attn = True # EOS was added to encoder sequence during training in_toks = self.inp_vocab.tokenize(line) + [self.inp_vocab.eos_tok] in_seq = self.inp_vocab.encode_as_ids(line, add_eos=True, add_bos=False) in_seqs = tensor(in_seq, dtype=torch.long).view(1, -1) in_lens = tensor([len(in_seq)], dtype=torch.long) if target: # tgt_toks = [self.out_vocab.bos_tok] + self.out_vocab.tokenize(target) + [self.out_vocab.eos_tok] tgt_seq = self.out_vocab.encode_as_ids(target, add_eos=True, add_bos=True) tgt_seqs = tensor(tgt_seq, dtype=torch.long).view(1, -1) tgt_in_seqs = tgt_seqs[:, :-1] # skip EOS x_mask = (in_seqs != self.inp_vocab.pad_idx).unsqueeze(1) y_mask = TrainerBatch.make_autogres_mask_(tgt_in_seqs, self.out_vocab.pad_idx) out_feats = self.model(in_seqs, tgt_in_seqs, x_mask, y_mask) #self.model.generator(out_feats) x_probs = self.model.generator(out_feats, score='log_probs') # B=1 x T x V out_ids = tgt_seqs[0, 1:] # Skip BOS # [T] x_probs = x_probs.squeeze(0) # T x V force_score = x_probs.gather(1, out_ids.view(-1, 1)) score = force_score.sum().item() out_ids = out_ids.tolist() else: score, out_ids = self.greedy_decode(in_seqs, in_lens, max_len, **args)[0] # [0] since Batch=1 sentence attns = [self.model.encoder.self_attn, self.model.decoder.self_attn, self.model.decoder.src_attn] attns = [a[0].cpu().detach().numpy() for a in attns] if reduction and reduction.lower() == 'none': reduction = None if reduction: parts = reduction.split('_') assert len(parts) % 2 == 0, f'reduction={reduction} is invalid' for dim, func in zip(parts[::2], parts[1::2]): dim = dict(layers=0, heads=1)[dim] func = dict(mean=np.mean, max=np.amax)[func] attns = [func(a, axis=dim, keepdims=True) for a in attns] xx_attn, yy_attn, yx_attn = attns out_line = self.out_vocab.decode_ids(out_ids, trunc_eos=True) out_toks = [self.out_vocab.bos_tok] + self.out_vocab.tokenize(out_line) + [self.out_vocab.eos_tok] result = dict(source=line, translation=out_line, score=score, in_ids=in_seq, in_toks=in_toks, out_ids=out_ids, out_toks=out_toks, source_length=len(in_toks), target_lenth=len(out_toks), xx_attn=xx_attn, yy_attn=yy_attn, yx_attn=yx_attn, reduction=reduction) return result def next_word_distr(self, past_seq, x_seqs=None, x_lens=None): """ Gets log distribution of next word :param past_seq: paste sequence :param x_seqs: optional; source sequence, :param x_lens: optional; source sequence length :return: log probability distribution of next word """ return self.generator(x_seqs=x_seqs, x_lens=x_lens).generate_next(past_seq) # noinspection PyUnresolvedReferences def decode_interactive(self, **args): import sys import readline helps = [(':quit', 'Exit'), (':help', 'Print this help message'), (':beam_size <n>', 'Set beam size to n'), (':lp_alpha <n>', 'Set length penalty alpha'), (':num_hyp <k>', 'Print top k hypotheses'), (':debug', 'Flip debug flag'), (':models', 'show all available models of this experiment'), (':model <number>', 'reload shell with the model chosen by <number>') ] if self.exp.model_type == 'binmt': helps.append((':path <path>', 'BiNMT modules: {E1D1, E2D2, E1D2E2D1, E2D2E1D2}')) def print_cmds(): for cmd, msg in helps: print(f"\t{cmd:15}\t-\t{msg}") global debug_mode print("Launching Interactive shell...") import rtg.module.generator as gen gen.INTERACTIVE = True print_cmds() print_state = True while True: if print_state: state = ' '.join(f'{k}={v}' for k, v in args.items()) if self.exp.model_type == 'binmt': state += f' path={self.gen_args.get("path")}' state += f' debug={debug_mode}' print('\t|' + state) print_state = False line = input('Input: ') line = line.strip() if not line: continue try: if line == ':quit': break elif line == ':help': print_cmds() elif line.startswith(":beam_size"): args['beam_size'] = int(line.replace(':beam_size', '').replace('=', '').strip()) print_state = True elif line.startswith(":num_hyp"): args['num_hyp'] = int(line.replace(':num_hyp', '').replace('=', '').strip()) print_state = True elif line.startswith(":lp_alpha"): args['lp_alpha'] = float(line.replace(':lp_alpha', '').replace('=', '').strip()) print_state = True elif line == ":debug": debug_mode = self.debug = not debug_mode print_state = True elif line.startswith(":path"): self.gen_args['path'] = line.replace(':path', '').replace('=', '').strip() print_state = True elif line.startswith(":models"): for i, mod_path in enumerate(self.exp.list_models()): print(f"\t{i}\t{mod_path}") elif line.startswith(":model"): mod_idxs = [int(x) for x in line.replace(":model", "").replace("=", "").strip().split()] models = self.exp.list_models() mod_paths = [] for mod_idx in mod_idxs: if 0 <= mod_idx < len(models): mod_paths.append(str(models[mod_idx])) else: print(f"\tERROR: Index {mod_idx} is invalid") if mod_paths: print(f"\t Switching to models {mod_paths}") raise ReloadEvent(mod_paths, state=args) else: start = time.time() res = self.decode_sentence(line, **args) print(f'\t|took={1000 * (time.time() - start):.3f}ms') for score, hyp in res: print(f' {score:.4f}\t{hyp}') except ReloadEvent as re: raise re # send it to caller except EOFError as e1: break except Exception: traceback.print_exc() print_state = True @staticmethod def _remove_null_vals(args: Dict): return {k: v for k, v in args.items() if v is not None} # remove None args def decode_file(self, inp: Iterator[str], out: StringIO, beam_size=default_beam_size, num_hyp=1, batch_size=1, max_src_len=-1, **args): args = self._remove_null_vals(args) log.info(f"Args to decoder : {args} and num_hyp={num_hyp} " f"batch_size={batch_size} max_src_len={max_src_len}") batches: Iterator[DecoderBatch] = DecoderBatch.from_lines( inp, batch_size=batch_size, vocab=self.inp_vocab, max_src_len=max_src_len, max_len_buffer=args.get('max_len', 1)) def _decode_all(): buffer = [] start_at = time.time() n_src_toks = 0 n_hyp_toks = 0 with tqdm.tqdm(batches, dynamic_ncols=True, desc='Decoding', unit='segs') as data_bar: for batch in data_bar: in_seqs, in_lens = batch.as_tensors(device=device) if beam_size > 1: batched_hyps: List[List[Hypothesis]] = self.beam_decode(in_seqs, in_lens, num_hyp=num_hyp, **args) else: batched_hyps: List[Hypothesis] = self.greedy_decode(in_seqs, in_lens, **args) batched_hyps: List[List[Hypothesis]] = [[h] for h in batched_hyps] assert len(batched_hyps) == batch.line_count for i, hyps in enumerate(batched_hyps): idx = batch.idxs[i] src = batch.srcs[i] _id = batch.ids[i] log.info(f"{idx}: SRC: {batch.srcs[i]}") ref = batch.refs[i] # just for the sake of logging, if it exists if ref: log.info(f"{idx}: REF: {batch.refs[i]}") result = [] _hyp_toks = [] for j, (score, hyp) in enumerate(hyps): hyp_line = self.out_vocab.decode_ids(hyp, trunc_eos=True) # tok ids to string log.info(f"{idx}: HYP{j}: {score:g} : {hyp_line}") result.append((score, hyp_line)) _hyp_toks.append(len(hyp_line.split())) buffer.append((idx, src, result, _id)) n_src_toks += len(src.split()) n_hyp_toks += max(_hyp_toks) time_delta = time.time() - start_at speed = f'src: {n_src_toks / time_delta:.2f} toks/sec hyp: {n_hyp_toks / time_delta:.2f} toks/sec' data_bar.set_postfix_str(speed) buffer = sorted(buffer, key=lambda x: x[0]) # restore order for _, src, result, _id in buffer: yield src, result, _id streamed_results: Iterator[Tuple[str, List[StrHypothesis], Any]] = _decode_all() for src, hyps, _id in streamed_results: prefix = f'{_id}\t' if _id else '' # optional Id out_line = '\n'.join(f'{prefix}{hyp}\t{score:.4f}' for score, hyp in hyps) out.write(f'{out_line}\n') if num_hyp > 1: out.write('\n') def decode_stream(self, inp: Iterator[str], out: StringIO, max_src_len=-1, **args): args = self._remove_null_vals(args) log.info(f"Args to decoder : {args} max_src_len={max_src_len}") for inp_line in inp: log.info(f"SRC: {inp_line}") out_line = self.decode_sentence(line=inp_line, **args)[0][1] # 0th result, 1st hyp log.info(f"HYP: {out_line} \n") out.write(f'{out_line}\n') out.flush()
{"hexsha": "151290e4d01237cbde0856db758077e9bc899e77", "size": 30155, "ext": "py", "lang": "Python", "max_stars_repo_path": "rtg/module/decoder.py", "max_stars_repo_name": "isi-vista/rtg", "max_stars_repo_head_hexsha": "149415f424f2a6585cbe0d97f0007b8b0b53d164", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rtg/module/decoder.py", "max_issues_repo_name": "isi-vista/rtg", "max_issues_repo_head_hexsha": "149415f424f2a6585cbe0d97f0007b8b0b53d164", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rtg/module/decoder.py", "max_forks_repo_name": "isi-vista/rtg", "max_forks_repo_head_hexsha": "149415f424f2a6585cbe0d97f0007b8b0b53d164", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.0381679389, "max_line_length": 122, "alphanum_fraction": 0.5677997015, "include": true, "reason": "import numpy", "num_tokens": 7311}
import os import sys import numpy as np import codecs if __name__ == "__main__": if len(sys.argv)!=3: print('Usage: python src/prepare_txt_done_data_file.py <meta_file> <utts.data>\n') sys.exit(0) meta_file = sys.argv[1] out_file = sys.argv[2] out_f = open(out_file,'w') with open(meta_file, "r") as f: content = f.readlines() for text in content: data = text.split("|") t = data[2] file_id = data[0] out_f.write("( "+file_id+" \" "+t+" \")\n") out_f.close()
{"hexsha": "6c3e00ef6300dd57d6d3de652c50581a3f9bb91d", "size": 552, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/prepare_txt_done_data_file.py", "max_stars_repo_name": "AvashnaGovender/Tacotron", "max_stars_repo_head_hexsha": "b4d710ffb0f9e7ef0096d1993b8a24cae4f0d557", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-24T16:10:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-24T16:10:08.000Z", "max_issues_repo_path": "scripts/prepare_txt_done_data_file.py", "max_issues_repo_name": "AvashnaGovender/Tacotron", "max_issues_repo_head_hexsha": "b4d710ffb0f9e7ef0096d1993b8a24cae4f0d557", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/prepare_txt_done_data_file.py", "max_forks_repo_name": "AvashnaGovender/Tacotron", "max_forks_repo_head_hexsha": "b4d710ffb0f9e7ef0096d1993b8a24cae4f0d557", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-17T10:48:50.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-17T10:48:50.000Z", "avg_line_length": 19.7142857143, "max_line_length": 90, "alphanum_fraction": 0.5670289855, "include": true, "reason": "import numpy", "num_tokens": 160}
[STATEMENT] lemma lock_okE: "\<lbrakk> lock_ok ls ts; \<forall>t. ts t = None \<longrightarrow> (\<forall>l. has_locks (ls $ l) t = 0) \<Longrightarrow> Q; \<forall>t e x ln. ts t = \<lfloor>((e, x), ln)\<rfloor> \<longrightarrow> (\<forall>l. has_locks (ls $ l) t + ln $ l = expr_locks e l) \<Longrightarrow> Q \<rbrakk> \<Longrightarrow> Q" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>lock_ok ls ts; \<forall>t. ts t = None \<longrightarrow> (\<forall>l. has_locks (ls $ l) t = 0) \<Longrightarrow> Q; \<forall>t e x ln. ts t = \<lfloor>((e, x), ln)\<rfloor> \<longrightarrow> (\<forall>l. has_locks (ls $ l) t + ln $ l = expr_locks e l) \<Longrightarrow> Q\<rbrakk> \<Longrightarrow> Q [PROOF STEP] by(fastforce simp add: lock_ok_def)
{"llama_tokens": 304, "file": "JinjaThreads_J_Threaded", "length": 1}
// Copyright (C) 2013 Eurodecision // Authors: Guillaume Pinot // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #include <boost/property_map/compose_property_map.hpp> #include <iostream> int main() { const int idx[] = {2, 0, 4, 1, 3}; double v[] = {1., 3., 0., 4., 2.}; boost::compose_property_map<double*, const int*> cpm(v, idx); for (int i = 0; i < 5; ++i) std::cout << get(cpm, i) << " "; std::cout << std::endl; for (int i = 0; i < 5; ++i) ++cpm[i]; for (int i = 0; i < 5; ++i) std::cout << get(cpm, i) << " "; std::cout << std::endl; for (int i = 0; i < 5; ++i) put(cpm, i, 42.); for (int i = 0; i < 5; ++i) std::cout << get(cpm, i) << " "; std::cout << std::endl; return 0; }
{"hexsha": "ed809632a35c78f6027759f9008aaf7e0975c00f", "size": 889, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "3rdParty/boost/1.71.0/libs/property_map/example/compose_property_map_example.cpp", "max_stars_repo_name": "rajeev02101987/arangodb", "max_stars_repo_head_hexsha": "817e6c04cb82777d266f3b444494140676da98e2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 12278.0, "max_stars_repo_stars_event_min_datetime": "2015-01-29T17:11:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T21:12:00.000Z", "max_issues_repo_path": "3rdParty/boost/1.71.0/libs/property_map/example/compose_property_map_example.cpp", "max_issues_repo_name": "rajeev02101987/arangodb", "max_issues_repo_head_hexsha": "817e6c04cb82777d266f3b444494140676da98e2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9469.0, "max_issues_repo_issues_event_min_datetime": "2015-01-30T05:33:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:17:21.000Z", "max_forks_repo_path": "3rdParty/boost/1.71.0/libs/property_map/example/compose_property_map_example.cpp", "max_forks_repo_name": "rajeev02101987/arangodb", "max_forks_repo_head_hexsha": "817e6c04cb82777d266f3b444494140676da98e2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1343.0, "max_forks_repo_forks_event_min_datetime": "2017-12-08T19:47:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T11:31:36.000Z", "avg_line_length": 24.027027027, "max_line_length": 66, "alphanum_fraction": 0.5253093363, "num_tokens": 319}
import cv2 import sys import numpy def detect(img, cascade): rects = cascade.detectMultiScale(img, scaleFactor=1.1, minNeighbors=3, minSize=(10, 10), flags = cv2.CASCADE_SCALE_IMAGE) if len(rects) == 0: return [] rects[:,2:] += rects[:,:2] return rects def draw_rects(img, rects, color): for x1, y1, x2, y2 in rects: cv2.rectangle(img, (x1, y1), (x2, y2), color, 2) if __name__ == '__main__': img = cv2.imread('C:\Code_python\Image\Picture\Tiger.jpg',cv2.IMREAD_COLOR) ## Read image file cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml") gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) gray = cv2.equalizeHist(gray) rects = detect(gray, cascade) ## Extract face coordinates x1 = rects[0][3] y1 = rects[0][0] x2 = rects[0][4] y2 = rects[0][5] y=y2-y1 x=x2-x1 ## Extract face ROI faceROI = gray[x1:x2, y1:y2] ## Show face ROI cv2.imshow('Display face ROI', faceROI) small = cv2.imread("average_face.png",cv2.IMREAD_COLOR) print ("here") small=cv2.resize(small, (x, y)) cv2.namedWindow('Display image') ## create window for display cv2.imshow('Display image', small) ## Show image in the window print ("size of image: ", img.shape ) ## print size of image cv2.waitKey(1000)
{"hexsha": "1b8be3fc882eb9be27c95ef49065f6b51a3f99b1", "size": 1410, "ext": "py", "lang": "Python", "max_stars_repo_path": "Image/test.py", "max_stars_repo_name": "pection/InternshipProject", "max_stars_repo_head_hexsha": "2b39cc244e7ac989795d3aba5c1a11bb6c9a57b7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Image/test.py", "max_issues_repo_name": "pection/InternshipProject", "max_issues_repo_head_hexsha": "2b39cc244e7ac989795d3aba5c1a11bb6c9a57b7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Image/test.py", "max_forks_repo_name": "pection/InternshipProject", "max_forks_repo_head_hexsha": "2b39cc244e7ac989795d3aba5c1a11bb6c9a57b7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.652173913, "max_line_length": 126, "alphanum_fraction": 0.6007092199, "include": true, "reason": "import numpy", "num_tokens": 417}
function varargout = Read_FrictionmodeGUI(varargin) % Last Modified by GUIDE v2.5 11-Feb-2019 18:03:10 % Begin initialization code - DO NOT EDIT gui_Singleton = 1; gui_State = struct('gui_Name', mfilename, ... 'gui_Singleton', gui_Singleton, ... 'gui_OpeningFcn', @Read_FrictionmodeGUI_OpeningFcn, ... 'gui_OutputFcn', @Read_FrictionmodeGUI_OutputFcn, ... 'gui_LayoutFcn', [] , ... 'gui_Callback', []); if nargin && ischar(varargin{1}) gui_State.gui_Callback = str2func(varargin{1}); end if nargout [varargout{1:nargout}] = gui_mainfcn(gui_State, varargin{:}); else gui_mainfcn(gui_State, varargin{:}); end % End initialization code - DO NOT EDIT % --- Executes just before Read_FrictionmodeGUI is made visible. function Read_FrictionmodeGUI_OpeningFcn(hObject, eventdata, handles, varargin) % Choose default command line output for Read_FrictionmodeGUI handles.output = hObject; a=get(handles.global_slider,'Value'); %Get slider value of "global slider" set(handles.global_text,'String',num2str(a)); %Set text above "global slider" to slider value set(findall(handles.random_panel1, '-property', 'enable'),'enable', 'off'); %Disable Random_panel1 set(findall(handles.random_panel2, '-property', 'enable'),'enable', 'off'); %Disable Random_panel2 set(findall(handles.raceline_panel, '-property', 'enable'),'enable', 'off'); %Disable Raceline_panel set(findall(handles.uipanel10, '-property', 'enable'),'enable', 'off'); %Disable Raceline percentage mode panel handles.another_map_1=0; %Set default value of "another_map" to "0" % Update handles structure guidata(hObject, handles); % --- Outputs from this function are returned to the command line. function varargout = Read_FrictionmodeGUI_OutputFcn(hObject, eventdata, handles) varargout{1} = handles.output; % --- Executes on button press in global_button. function global_button_Callback(hObject, eventdata, handles) set(handles.raceline_pathname, 'String', ''); %Set raceline pathname to none set(handles.random_text1,'String',''); %Set text above "random slider1" to none set(handles.random_text2,'String',''); %Set text above "random slider2" to none set(handles.slider1_text2,'String',''); %Set text above "raceline slider1" to none set(handles.slider2_text2,'String',''); %Set text above "raceline slider2" to none cla(handles.axes1); %Clear axes1 set(findall(handles.global_panel, '-property', 'enable'),'enable', 'on'); %Enable Global panel set(handles.checkbox, 'Enable','on'); %Enable Checkbox of Raceline Percentage Mode set(findall(handles.random_panel1, '-property', 'enable'),'enable', 'off'); %Disable Random_panel1 set(findall(handles.random_panel2, '-property', 'enable'),'enable', 'off'); %Disable Random_panel2 set(findall(handles.raceline_panel, '-property', 'enable'),'enable', 'off'); %Disable Raceline_panel a=get(handles.global_slider,'Value'); %Get slider value of "global slider" set(handles.global_text,'String',num2str(a)); %Set text above "global slider" to slider value % --- Executes on button press in random_button. function random_button_Callback(hObject, eventdata, handles) set(handles.raceline_pathname, 'String', ''); %Set raceline pathname to none set(handles.global_text,'String',''); %Set text above "global slider" to none set(handles.slider1_text2,'String',''); %Set text above "raceline slider1" to none set(handles.slider2_text2,'String',''); %Set text above "raceline slider2" to none cla(handles.axes1); %Clear axes1 set(findall(handles.global_panel, '-property', 'enable'),'enable', 'off'); %Disable Global panel set(handles.checkbox, 'Enable','on'); %Enable Checkbox of Raceline Percentage Mode set(findall(handles.random_panel1, '-property', 'enable'),'enable', 'on'); %Enable Random_panel1 set(findall(handles.random_panel2, '-property', 'enable'),'enable', 'on'); %Enable Random_panel2 set(findall(handles.raceline_panel, '-property', 'enable'),'enable', 'off'); %Disable Raceline_panel a=get(handles.random_slider1,'Value'); %Get slider value of "random slider1" b=get(handles.random_slider2,'Value'); %Get slider value of "random slider2" set(handles.random_text1,'String',num2str(a)); %Set text above "random slider1" to slider value set(handles.random_text2,'String',num2str(b)); %Set text above "random slider2" to slider value % --- Executes on button press in raceline_button. function raceline_button_Callback(hObject, eventdata, handles) set(findall(handles.global_panel, '-property', 'enable'),'enable', 'off'); %Disable Global panel set(handles.checkbox, 'Value', 0); %Set Checkbox Value to "0" checkbox_Callback(handles.checkbox, eventdata, handles); %Run Checkbox_Callback set(handles.checkbox, 'Enable','off'); %Disable Checkbox of Raceline Percentage Mode set(handles.raceline_pathname_percent,'String',''); %Set raceline_pathname_percent to none set(findall(handles.random_panel1, '-property', 'enable'),'enable', 'off'); %Disable Random_panel1 set(findall(handles.random_panel2, '-property', 'enable'),'enable', 'off'); %Disable Random_panel2 set(findall(handles.raceline_panel, '-property', 'enable'),'enable', 'on'); %Enable Raceline_panel set(handles.global_text,'String',''); %Set text above "global slider" to none set(handles.random_text1,'String',''); %Set text above "random slider1" to none set(handles.random_text2,'String',''); %Set text above "random slider2" to none a=get(handles.raceline_slider1,'Value'); %Get slider value of "raceline slider1" b=get(handles.raceline_slider2,'Value'); %Get slider value of "raceline slider2" set(handles.slider1_text2,'String',num2str(a)); %Set text above "raceline slider1" to slider value set(handles.slider2_text2,'String',num2str(b)); %Set text above "raceline slider2" to slider value x=-6:0.1:6; %Generate array y=a*exp(-b*0.1*x.^2); %Generate function value plot(handles.axes1,x,y) %Plot function set(handles.axes1, 'Xlim', [-6,6]); %Set x-limits to [-6,6] % --- Executes on button press in checkbox. function checkbox_Callback(hObject, eventdata, handles) if get(handles.checkbox, 'Value')==1 %If checkbox is activated: Enable all features! set(findall(handles.uipanel10, '-property', 'enable'),'enable', 'on'); %Enable Raceline percentage mode panel set(handles.browse3_button, 'Enable', 'On'); %Enable Raceline percentage mode browser button set(handles.percent_value, 'Enable', 'On'); %Enable Raceline percentage mode Value Text elseif get(handles.checkbox, 'Value')==0 %If checkbox is not activated: Disable all features! set(findall(handles.uipanel10, '-property', 'enable'),'enable', 'off'); %Disable Raceline percentage mode panel set(handles.browse3_button, 'Enable', 'Off'); %Disable Raceline percentage mode browser button set(handles.percent_value, 'Enable', 'Off'); %Disable Raceline percentage mode Value Text end % --- Executes on slider movement. function raceline_slider1_Callback(hObject, eventdata, handles) a=get(handles.raceline_slider1,'Value'); %Get slider value of "raceline slider1" b=get(handles.raceline_slider2,'Value'); %Get slider value of "raceline slider2" a=round(a*100)/100; %Round value of "raceline slider1" to 1/100 set(handles.raceline_slider1,'Value', a); %Set slider value of "raceline slider1" to rounded value set(handles.slider1_text2,'String',num2str(a)); %Set text above "raceline slider1" to slider value x=-6:0.1:6; %Generate array y=a*exp(-b*0.1*x.^2); %Generate function value plot(handles.axes1,x,y) %Plot function set(handles.axes1, 'Xlim', [-6,6]); %Set x-limits to [-6,6] % --- Executes during object creation, after setting all properties. function raceline_slider1_CreateFcn(hObject, eventdata, handles) if isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) set(hObject,'BackgroundColor',[.9 .9 .9]); end % --- Executes on slider movement. function raceline_slider2_Callback(hObject, eventdata, handles) a=get(handles.raceline_slider1,'Value'); %Get slider value of "raceline slider1" b=get(handles.raceline_slider2,'Value'); %Get slider value of "raceline slider2" b=round(b*100)/100; %Round value of "raceline slider2" to 1/100 set(handles.raceline_slider2,'Value', b); %Set slider value of "raceline slider2" to rounded value set(handles.slider2_text2,'String',num2str(b)); %Set text above "raceline slider2" to slider value x=-6:0.1:6; %Generate array y=a*exp(-b*0.1*x.^2); %Generate function value plot(handles.axes1,x,y) %Plot function set(handles.axes1, 'Xlim', [-6,6]); %Set x-limits to [-6,6] % --- Executes during object creation, after setting all properties. function raceline_slider2_CreateFcn(hObject, eventdata, handles) if isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) set(hObject,'BackgroundColor',[.9 .9 .9]); end % --- Executes on slider movement. function random_slider1_Callback(hObject, eventdata, handles) a=get(handles.random_slider1,'Value'); %Get slider value of "random slider1" a=round(a*50)/50; %Round value of "random slider1" to 1/50 set(handles.random_slider1,'Value', a); %Set slider value of "random slider1" to rounded value set(handles.random_text1,'String',num2str(a)); %Set text above "random slider1" to slider value % --- Executes during object creation, after setting all properties. function random_slider1_CreateFcn(hObject, eventdata, handles) if isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) set(hObject,'BackgroundColor',[.9 .9 .9]); end % --- Executes on slider movement. function random_slider2_Callback(hObject, eventdata, handles) a=get(handles.random_slider2,'Value'); %Get slider value of "random slider2" a=round(a*50)/50; %Round value of "random slider2" to 1/50 set(handles.random_slider2,'Value', a); %Set slider value of "random slider2" to rounded value set(handles.random_text2,'String',num2str(a)); %Set text above "random slider2" to slider value % --- Executes during object creation, after setting all properties. function random_slider2_CreateFcn(hObject, eventdata, handles) if isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) set(hObject,'BackgroundColor',[.9 .9 .9]); end % --- Executes on slider movement. function global_slider_Callback(hObject, eventdata, handles) a=get(handles.global_slider,'Value'); %Get slider value of "global slider" a=round(a*50)/50; %Round value of "global slider" to 1/50 set(handles.global_slider,'Value', a); %Set slider value of "global slider" to rounded value set(handles.global_text,'String',num2str(a)); %Set text above "global slider" to slider value % --- Executes during object creation, after setting all properties. function global_slider_CreateFcn(hObject, eventdata, handles) %#ok<*DEFNU,*INUSD> if isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) set(hObject,'BackgroundColor',[.9 .9 .9]); end % --- Executes on button press in browse2_button. function browse2_button_Callback(hObject, eventdata, handles) [filename2, pathname2] = uigetfile('*.csv', 'Pick a .csv file'); %Browse for file handles.pathname_raceline = strcat(pathname2, filename2); %Connect path name and file name to one string data_type=lower(filename2(end-2:end)); %Extract data type and convert to string if data_type =='csv' %#ok<*BDSCA> set(handles.raceline_pathname, 'String', handles.pathname_raceline); %Set GUI.full_path to string fullpathname else set(handles.raceline_pathname, 'String', 'ERROR'); %Display "ERROR" end guidata(hObject, handles); % --- Executes on button press in browse3_button. function browse3_button_Callback(hObject, eventdata, handles) [filename3, pathname3] = uigetfile('*.csv', 'Pick a .csv file'); %Browse for file handles.raceline_pathname_percent_name = strcat(pathname3, filename3); %Connect path name and file name to one string data_type=lower(filename3(end-2:end)); %Extract data type and convert to string if data_type =='csv' %#ok<*BDSCA> set(handles.raceline_pathname_percent, 'String', handles.raceline_pathname_percent_name); %Set GUI.full_path to string fullpathname else set(handles.raceline_pathname_percent, 'String', 'ERROR'); %Display "ERROR" end guidata(hObject, handles); function percent_value_Callback(hObject, eventdata, handles) % --- Executes during object creation, after setting all properties. function percent_value_CreateFcn(hObject, eventdata, handles) if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) set(hObject,'BackgroundColor','white'); end % --- Executes on button press in create_2_map_button. function create_2_map_button_Callback(hObject, eventdata, handles) handles.another_map_1=1; %Set value of "another_map" to "1" create_map_button_Callback(handles.create_map_button, eventdata, handles); %Run create_map_button_Callback % --- Executes on button press in create_map_button. function create_map_button_Callback(hObject, eventdata, handles) %#ok<*INUSL> if handles.another_map_1==1 %if "another_map" =1 assignin('base', 'another_map', 1); %Handover "another_map"==1 to base workspace else assignin('base', 'another_map', 0); %Handover "another_map"==0 to base workspace end a=0; %Initialize a consistency check variable %Check Raceline_percentage_panel on consistency if get(handles.checkbox, 'Value')==1 %If Checkbox for Raceline Percentage Mode is activated d=str2double(get(handles.percent_value, 'String')); %Get Raceline Percentage Value if d>-99 && d<99 %If Raceline Percentage Value is consistent path_percent = get(handles.raceline_pathname_percent, 'String'); %Get Raceline Percentage Pathname if strcmp(path_percent, "") || strcmp(path_percent,'ERROR') %If Pathname is not consistent set(handles.raceline_pathname_percent, 'String', 'ERROR'); %Set Raceline Percentage Pathname to "ERROR" else %Else (if pathname is consistent) assignin('base', 'raceline_percentage_mode', 1); %Handover a positive ("1") raceline percentage mode assignin('base', 'raceline_percent_value', d); %Handover the Raceline Percentage value assignin('base', 'path_raceline', path_percent); %Handover Raceline Path a=1; %Set Consistency Check Variable to "1" end else %Else (if raceline percentage value is not consistent) set(handles.percent_value, 'String', 'ERROR') %Set Raceline Percentage value to "ERROR" end else %Else (if checkbox for Raceline Percentage Mode is not activated) assignin('base', 'raceline_percentage_mode', 0); %Handover a negative ("0") raceline percentage mode a=1; %Set Consistency Check Variable to "1" end %End of Consistency Check: a=0: not consistent, a=1: consistent % If everything is consistent, GUI continues if a==1 if get(handles.global_button,'Value') %If Global_Button is activated global_value=get(handles.global_slider,'Value'); %Get slider value of "global slider" assignin('base', 'global_value', global_value); %Handover "global value" to base workspace assignin('base', 'mode', 'global'); %Handover "mode" to base workspace close(Read_FrictionmodeGUI); %Close Read_FrictionmodeGUI Friction_Map_Creation(); %Run Friction_Map_Creation elseif get(handles.random_button,'Value') %If Random_Button is activated random_value_1=get(handles.random_slider1,'Value'); %Get slider value of "random slider1" random_value_2=get(handles.random_slider2,'Value'); %Get slider value of "random slider2" if random_value_1<random_value_2 %if slider values are consistent assignin('base', 'random_value_1', random_value_1); %Handover "random value1" to base workspace assignin('base', 'random_value_2', random_value_2); %Handover "random value2" to base workspace assignin('base', 'mode', 'random'); %Handover "mode" to base workspace close(Read_FrictionmodeGUI); %Close Read_FrictionmodeGUI Friction_Map_Creation(); %Run Friction_Map_Creation else %else (if slider values are not consistent) set(handles.random_text1, 'String', ''); %Set text above "random slider1" to none set(handles.random_text2, 'String', ''); %Set text above "random slider2" to none end elseif get(handles.raceline_button,'Value') %If Raceline_Button is activated path = get(handles.raceline_pathname, 'String'); %Get Raceline Pathname if strcmp(path, "") %If Raceline Pathname is none set(handles.raceline_pathname, 'String', 'ERROR'); %Set Raceline Pathname to "ERROR" elseif strcmp(path,'ERROR') %If Raceline Pathname is "ERROR" set(handles.raceline_pathname, 'String', 'ERROR'); %Set Raceline Pathname to "ERROR" else %else (if Raceline Pathname is consistent) a=get(handles.raceline_slider1,'Value'); %Get slider value of "raceline slider1" b=get(handles.raceline_slider2,'Value'); %Get slider value of "raceline slider2" assignin('base', 'raceline_value_1', a); %Handover "raceline value1" to base workspace assignin('base', 'raceline_value_2', b); %Handover "raceline value2" to base workspace assignin('base', 'mode', 'raceline'); %Handover "mode" to base workspace assignin('base','path_raceline', handles.pathname_raceline); %Handover Path Raceline close(Read_FrictionmodeGUI); %Close Read_FrictionmodeGUI Friction_Map_Creation(); %Run Friction_Map_Creation end end end
{"author": "TUMFTM", "repo": "sim_vehicle_dynamics", "sha": "df2ae95dbeb6f8e4591f31ee378acac8e812f358", "save_path": "github-repos/MATLAB/TUMFTM-sim_vehicle_dynamics", "path": "github-repos/MATLAB/TUMFTM-sim_vehicle_dynamics/sim_vehicle_dynamics-df2ae95dbeb6f8e4591f31ee378acac8e812f358/vehicle_environment/variable_friction/archive/generate_map/scripts/Read_FrictionmodeGUI.m"}
import time import click import gym import numpy as np from .agent import Agent from .utils import KinematicConstraint, Rate, tf from ..scene import Body, VR, Marker class VRAgent(Agent): def __init__(self, env, timescale=1): super(VRAgent, self).__init__(env) scene = env.unwrapped.scene self._scene = scene self._timescale = int(timescale) self._rate = Rate(scene.dt / self._timescale) self._controller = VRController(scene) self._b_marker = BoundaryMarker(scene) self._c_marker = InteractMarker(scene) self._counter = 0 self._action_update = None def get_action_update(self): self._rate.sleep() if self._counter % self._timescale == 0: state = self._controller.state() if state is None: # if controller has not connected to an arm self._c_marker.show() self._b_marker.hide() return None self._c_marker.hide() dt = self._scene.dt * self._timescale low, high = self._scene.workspace arm = self._scene.robot.arm grip = self._scene.robot.gripper arm_target, grip_target = state prev_pos, prev_orn = tf(arm.tool_position) next_pos, next_orn = tf(arm_target) if any(next_pos < low) or any(next_pos > high): # if an arm is going outside workspace self._b_marker.show() return None self._b_marker.hide() pos_err = next_pos - prev_pos orn_err = next_orn * prev_orn.inverse grip_err = grip_target - grip.width self._action_update = dict( linear_velocity=pos_err / dt, angular_velocity=np.array(orn_err.axis) * orn_err.angle / dt, grip_velocity = 10*(grip_target-0.5) ) self._counter += 1 return self._action_update class VRController(object): def __init__(self, scene): self._arm = scene.robot.arm self._grip = scene.robot.gripper self._device_id = None self._constraint = None def state(self): for e in VR.events(): if e.button_was_triggered(VR.GripButton): tool_pos, _ = tf(self._arm.tool.state.position) ctrl_pos, _ = tf(e.position) if np.linalg.norm(ctrl_pos - tool_pos) < 0.05: self._constraint = KinematicConstraint( e.position, self._arm.tool_position) self._device_id = e.controller_id elif e.button_was_released(VR.GripButton): self._device_id = None if self._device_id == e.controller_id: arm_target = self._constraint.get_child(e.position) grip_target = 10*(1.0 - e.analog) return arm_target, grip_target return None class BoundaryMarker(Marker): def __init__(self, scene): super(BoundaryMarker, self).__init__() self.client_id = scene.client_id self._center = np.median(scene.workspace, axis=0) self._size = np.ptp(scene.workspace, axis=0) def make(self): boundary = Body.box(size=self._size, client_id=self.client_id) boundary.position = self._center boundary.color = (1, 0, 0, 0.5) return boundary class InteractMarker(Marker): def __init__(self, scene): super(InteractMarker, self).__init__() self.client_id = scene.client_id self._arm = scene.robot.arm def make(self): pos, _ = self._arm.tool.state.position sphere = Body.sphere(radius=0.05, client_id=self.client_id) sphere.position = pos sphere.color = (0, 0, 0, 0) return sphere def update(self, marker): marker.color = (1.0, 0.5 + 0.5 * np.sin(3 * time.time()), 0.0, 0.2) @click.command(help='vr_agent env_name [options]') @click.argument('env_name', type=str) @click.option('-s', '--seed', default=0, help='seed') @click.option('-t', '--timescale', type=int, default=10, help='time scale') def main(env_name, seed, timescale): env = gym.make(env_name).unwrapped env.observe(False) env.renders(shared=True) env.seed(seed) env.reset() agent = VRAgent(env, timescale) actions = [] done = False while not done: action = agent.get_action() if action is not None: obs, reward, done, info = env.step(action) actions.append(action) # check recorded data env.seed(seed) env.reset() for action in actions: obs, reward, done, info = env.step(action) print(info['success'] and 'Success' or 'Failed') env.close() if __name__ == "__main__": main()
{"hexsha": "294a5c51d691729f2c1a7795e769886f1ae9b741", "size": 4849, "ext": "py", "lang": "Python", "max_stars_repo_path": "mime/agent/vr_agent.py", "max_stars_repo_name": "rjgpinel/mime-release", "max_stars_repo_head_hexsha": "26a850c4ba5b702b86d068995614163338fb01df", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2020-06-24T10:52:28.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-23T03:05:27.000Z", "max_issues_repo_path": "mime/agent/vr_agent.py", "max_issues_repo_name": "rjgpinel/mime-release", "max_issues_repo_head_hexsha": "26a850c4ba5b702b86d068995614163338fb01df", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-08-18T12:45:15.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-18T12:45:15.000Z", "max_forks_repo_path": "mime/agent/vr_agent.py", "max_forks_repo_name": "rjgpinel/mime-release", "max_forks_repo_head_hexsha": "26a850c4ba5b702b86d068995614163338fb01df", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-09-09T18:17:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-06T09:43:45.000Z", "avg_line_length": 31.0833333333, "max_line_length": 77, "alphanum_fraction": 0.5920808414, "include": true, "reason": "import numpy", "num_tokens": 1154}
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Dec 3 22:49:52 2020 @author: lutra Phage_like_plasmids_SSU5_P1_D6_12Nov20 CREATE TABLE "Phage_like_plasmids_SSU5_P1_D6_12Nov20" ( "id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, "project_ID" INTEGER, "project_ID_number" INTEGER, "nucleotide" TEXT, "biosample" TEXT, "organism" TEXT, "completeness" TEXT, "genome" TEXT, "slen" INTEGER, "shape" TEXT, "PLP_status" TEXT, "MF356679_D6_ref_cov" INTEGER, "AF234172_P1_ref_cov" INTEGER, "JQ965645_SSU5_ref_cov" INTEGER, "Major_replicon" TEXT, "Major_replicon_variant" TEXT, "Major_replicon_sequence" TEXT, "enterobacteriaceae_N" INTEGER, "enterobacteriaceae" TEXT, "ResFinder_N" INTEGER, "ResFinder" TEXT, "AF234172_P1_ref_CDS_N" INTEGER, "AF234172_P1_ref_CDS" TEXT, "JQ965645_SSU5_ref_CDS_N" INTEGER, "JQ965645_SSU5_ref_CDS" TEXT, "ISel_db_N" INTEGER, "ISel_db" TEXT, "VFDB_setB_nt_N" INTEGER, "VFDB_setB_nt" TEXT, "oriT_all_N" INTEGER, "oriT_all" TEXT, "MF356679_D6_ref_CDS_N" INTEGER, "MF356679_D6_ref_CDS" TEXT, "D6_putative_replicon_orf42_N" INTEGER, "D6_putative_replicon_orf42" TEXT, "t4cp_all_blastx_N" INTEGER, "t4cp_all_blastx" TEXT, "relaxase_all_blastx_N" INTEGER, "relaxase_all_blastx" TEXT, "auxiliary_all_blastx_N" INTEGER, "auxiliary_all_blastx" TEXT, "strain" TEXT, "host" TEXT, "plasmid" TEXT, "country" TEXT, "isolation_source" TEXT, "lat_lon" TEXT, "collection_date" TEXT, "collected_by" TEXT, "host_disease" TEXT, "latitude_and_longitude" TEXT, "geographic_location" TEXT, "Other_features" TEXT ) """ from csv_in_tables_to_sqlite3 import csv_to_sqlite3 import sqlite3 import os import pandas as pd import numpy as np PC_lab = True if PC_lab: main = '/home/shiri/plasmid_project/Phage_like_plasmids/PLP_final/' else: main = '/data/Current_work/Phage_like_plasmids/PLP_final/' main = '/data/Current_work/Phage_like_plasmids/PLP_final/scripts_to_Github/' columns = ''' project_ID INTEGER, nucleotide TEXT, biosample TEXT, organism TEXT, completeness TEXT, genome TEXT, slen INTEGER, shape TEXT, PLP_status TEXT, MF356679_D6_ref_cov INTEGER, AF234172_P1_ref_cov INTEGER, JQ965645_SSU5_ref_cov INTEGER, Major_replicon INTEGER, enterobacteriaceae_N INTEGER, enterobacteriaceae TEXT, D6_putative_replicon_orf42_N INTEGER, D6_putative_replicon_orf42 TEXT, ResFinder_N INTEGER, ResFinder TEXT, ISel_db_N INTEGER, ISel_db TEXT, VFDB_setB_nt_N INTEGER, VFDB_setB_nt TEXT, MF356679_D6_ref_CDS_N INTEGER, MF356679_D6_ref_CDS TEXT, AF234172_P1_ref_CDS_N INTEGER, AF234172_P1_ref_CDS TEXT, JQ965645_SSU5_ref_CDS_N INTEGER, JQ965645_SSU5_ref_CDS TEXT, strain TEXT, host TEXT, plasmid TEXT, country TEXT, isolation_source TEXT, lat_lon TEXT, collection_date TEXT, collected_by TEXT, host_disease TEXT, latitude_and_longitude TEXT, geographic_location TEXT ''' columns = columns.strip().split('\n') columns = [c.replace(',', '').split() for c in columns] task_columns = [c[0] for c in columns] print(*task_columns, len(task_columns), '--\n\n', sep = '\n') replicons = ['D6_putative_replicon_orf42_c45699_46596', 'IncY_1__K02380', 'p0111_1__AP010962', 'IncFIB_pHCM2_1__AL513384', 'IncFIB_pKPHS1_1__CP003223', 'IncFIB_H89-PhagePlasmid_1__HG530657', 'IncFIB_pLF82-PhagePlasmid_1__CU638872'] ref_cov = {'D6': 'MF356679_D6_ref_cov', 'IncY': 'AF234172_P1_ref_cov', 'p0111': 'AF234172_P1_ref_cov', 'IncFIB': 'JQ965645_SSU5_ref_cov'} ref_cds_n = {'D6': 'MF356679_D6_ref_CDS_N', 'IncY': 'AF234172_P1_ref_CDS_N', 'p0111': 'AF234172_P1_ref_CDS_N', 'IncFIB': 'JQ965645_SSU5_ref_CDS_N'} table = 'Phage_like_plasmids_SSU5_P1_D6_12Nov20' database = main + table + '.sqlite3' print(database) conn = sqlite3.connect(database) cur = conn.cursor() data = [] for rep in replicons[:]: print(rep) if rep[:2] == 'D6': rep_column = 'D6_putative_replicon_orf42' else: rep_column = 'enterobacteriaceae' rep_data = [] task = 'SELECT ' + ', '.join(task_columns) + ' FROM ' + table task += f" WHERE {rep_column} LIKE '%{rep}%'" for row in cur.execute(task): row = [str(r) for r in row] rep_match = row[task_columns.index(rep_column)].split('\n') rep_match = [r for r in rep_match if rep in r] rep_match = '\n'.join(rep_match) # print(rep_match) ref = rep.split('_')[0] complete =row[task_columns.index('completeness')] slen = row[task_columns.index('slen')] cov = row[task_columns.index(ref_cov[ref])] cds_n = row[task_columns.index(ref_cds_n[ref])] cov = float(cov) reason = '' if complete == 'complete' and int(slen) < 2000000 and cov >= 40: reason = 'grouped' elif complete != 'complete': reason = 'not complete' elif int(slen) >= 2000000: reason = 'chromosome' elif cov < 40: reason = f'lower_coverage qcovs:{cov}% CDSs:{cds_n}' if not reason: print('REASON PROBLEM', row[1]) shorten_annot = row[task_columns.index('Major_replicon')+1:task_columns.index('strain')] shorten = [] for s in shorten_annot: if row.index(s) == task_columns.index('ResFinder'): s = [ss.split()[1] for ss in s.split('\n')] else: s = [ss.split()[0] for ss in s.split('\n')] shorten.append('\n'.join(s)) row = row[:task_columns.index('Major_replicon')+1] + shorten + row[task_columns.index('strain'):] new_row = [rep, rep_match, reason] + row rep_data.append(new_row) rep_data.sort(key = lambda x: (x[2], x[3].split('_')[0], int(x[3].split('_')[-1]))) possible_reasons = ['grouped', 'not complete', 'chromosome', 'lower_coverage'] for pr in possible_reasons: pr_filt = [r for r in rep_data if pr in r[2]] print(pr, len(pr_filt)) print(len(rep_data)) data += rep_data print('--\n\n\n') print(len(data)) name = 'STable2.Distribution_of_PLP_related_replicons_26Aug20' table2_csv = main + name + '.csv' with(open(table2_csv, 'w')) as fh: header = '\t'.join(['Replicon', 'Replicon_match', 'Grouped_or_why_not_grouped'] + task_columns) fh.write(header + '\n') data = ['\t'.join(d) for d in data] data = [d.replace('\n', '<<AND>>').replace('**', '#').replace('--', '_').replace('__', '_').replace('__', '_') for d in data] fh.write('\n'.join(data) + '\n') # csv_to_sqlite3(table2_csv, main + name + '.sqlite3') # Reading the csv file df_new = pd.read_csv(table2_csv, sep='\t') df_new = df_new.replace('<<AND>>', ' ', regex=True) # saving xlsx file GFG = pd.ExcelWriter(main + name + '.xlsx') df_new.to_excel(GFG, index = False) GFG.save() os.remove(table2_csv)
{"hexsha": "e58e36d3b2e67c49e77b0e94b567f15f0567f324", "size": 6872, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python_scripts_for_PLP_project/9_2_PLP_prepare_TableS2.py", "max_stars_repo_name": "snvlab18/phage_like_plasmids_project_2020", "max_stars_repo_head_hexsha": "f594debd821cafe7be2a8f0827379bf0c95cc5d5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Python_scripts_for_PLP_project/9_2_PLP_prepare_TableS2.py", "max_issues_repo_name": "snvlab18/phage_like_plasmids_project_2020", "max_issues_repo_head_hexsha": "f594debd821cafe7be2a8f0827379bf0c95cc5d5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python_scripts_for_PLP_project/9_2_PLP_prepare_TableS2.py", "max_forks_repo_name": "snvlab18/phage_like_plasmids_project_2020", "max_forks_repo_head_hexsha": "f594debd821cafe7be2a8f0827379bf0c95cc5d5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1186440678, "max_line_length": 231, "alphanum_fraction": 0.6786961583, "include": true, "reason": "import numpy", "num_tokens": 2268}
cd(@__DIR__); include("setups/simpletree.jl") gr(dpi = 200) ## build frames SGWT = sgwt_frame(Matrix(W); nf = 6) SGWT = reshape(SGWT, (N, :)) SGWT_dual = (SGWT * SGWT') \ SGWT distROT = natural_eigdist(𝚽, 𝛌, Q; α = 1.0, input_format = :pmf1, distance = :ROT) rNGWF, dic_l2x = rngwf_all_vectors(distROT, 𝚽; σ = 0.1 * maximum(distROT), thres = 0.15) rNGWF_dual = (rNGWF * rNGWF') \ rNGWF Γ = rngwf_lx(dic_l2x) ## (a) plt = plot(size = (600, 500)) gplot!(W, X, width = 1) scatter_gplot!(X; marker = f, ms = 4) plot!(frame = :none, cbar = true, xlim = [-10, 14]) savefig(plt, "../figs/simpletree_f.png") ## (b) rel_approx_sgwt, f_approx_sgwt = frame_approx(f, SGWT, SGWT_dual; num_kept = 3 * N) rel_approx_rngwf, f_approx_rngwf = frame_approx(f, rNGWF, rNGWF_dual; num_kept = 3 * N) plt = plot(0:length(rel_approx_rngwf)-1, [rel_approx_sgwt rel_approx_rngwf], grid = false, lw = 2, c = [:blue :green], xlab = "Number of Coefficients Retained", ylab = "Relative Approximation Error", yaxis = :log, lab = ["SGWT" "rNGWF"]) plot!(xguidefontsize = 14, yguidefontsize = 14, legendfontsize = 11, size = (600, 500)) savefig(plt, "../figs/simpletree_approx_f.png")
{"hexsha": "e3acf9efbf3e173dd2a03b7e176abe7ef728b671", "size": 1189, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/dissertations/htli/scripts/Figure10.10.jl", "max_stars_repo_name": "BoundaryValueProblems/MTSG.jl", "max_stars_repo_head_hexsha": "8cf8e2b3035876b5ceda45109b0847a60b581a7c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-02T18:39:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-19T15:45:17.000Z", "max_issues_repo_path": "test/dissertations/htli/scripts/Figure10.10.jl", "max_issues_repo_name": "haotian127/MultiscaleGraphSignalTransforms.jl", "max_issues_repo_head_hexsha": "85ba99e505283491ac69e979737bbb712b698a6e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2021-04-27T23:00:40.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-03T11:03:17.000Z", "max_forks_repo_path": "test/dissertations/htli/scripts/Figure10.10.jl", "max_forks_repo_name": "haotian127/MultiscaleGraphSignalTransforms.jl", "max_forks_repo_head_hexsha": "85ba99e505283491ac69e979737bbb712b698a6e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-04-24T21:46:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-05T04:32:31.000Z", "avg_line_length": 38.3548387097, "max_line_length": 91, "alphanum_fraction": 0.6576955425, "num_tokens": 445}
import os,shutil,sys import numpy as np from mpi4py import MPI import pandas as pd from collections import OrderedDict from pypospack.pyposmat.data import PyposmatConfigurationFile from pypospack.pyposmat.data import PyposmatDataAnalyzer # from pypospack.pyposmat.engines import PyposmatMonteCarloSampler from pypospack.pyposmat.data import PyposmatDataFile from pypospack.pyposmat.data.cluster_analysis import PyposmatClusterAnalysis # ---- imports for PyposmatMonteCarloSampler import time,sys,os,copy,shutil,importlib from collections import OrderedDict import numpy as np import scipy.stats from pypospack.kde import Chiu1999_h from pypospack.pyposmat.engines import PyposmatEngine from pypospack.pyposmat.data import PyposmatDataFile from pypospack.task.lammps import LammpsSimulationError from pypospack.task.task_manager import PypospackTaskManagerError from pypospack.potential import PotentialObjectMap from numpy.linalg import LinAlgError # ---- additional imports for PyposmatMonteCarloSampler from pypospack.qoi import QoiManager from pypospack.task import TaskManager from pypospack.task.lammps import LammpsSimulationError #---- imports for IterativeSampler from pypospack.pyposmat.data.logfile import PyposmatLogFile # --- import for PyposmatEngine from pypospack.pyposmat.engines.mc_sampler import PyposmatBadParameterError class PyposmatEngine(object): """ Args: filename_in(str): filename_out(str): base_directory(str): This is the base directory from which the PyposmatEngine will create and run simulations. By default this is set to None, which means it will use the current working directory as the base directory. fullauto(bool): Attributes: pyposmat_filename_in(str) pyposmat_filename_out(str) base_directory(str) rank_directory(str): This reflect the MPI rank of the processsor that the PyposmatEngine is running on. If there is no MPI available, this is automatically set to rank0000. configuration(pypospack.pyposmat.PyposmatConfigurationFile) qoi_manager(pypospack.qoi.QoiManager) task_mamanger(pypospack.task.TaskManager) """ def __init__(self, filename_in = 'pypospack.config.in', filename_out = 'pypospack.results.out', base_directory = None, fullauto = False): assert isinstance(filename_in,str) assert isinstance(filename_out,str) self.pyposmat_filename_in = filename_in self.pyposmat_filename_out = filename_out self.base_directory = None self.rank_directory = None self.configuration = None self.qoi_manager = None self.task_manager = None if base_directory is None: self.base_directory = os.getcwd() elif isinstance(base_directory,str): self.base_directory = base_directory else: msg_err = "base_directory has to be a string" raise ValueError(msg_err) if fullauto: self.configure() @property def structures(self): """(collections.OrderedDict)""" return self.configuration.structures @property def potential(self): """(collections.OrderedDict)""" return self.configuration.potential def configure(self): """ When writing a new PypospackEngine this method will likely have to be modified """ self.create_base_directories() self.read_configuration_file() self.configure_qoi_manager() self.configure_task_manager() def create_base_directories(self,base_directory=None): assert isinstance(base_directory,str) or base_directory is None # <-------- determine the base directory. if base_directory is None: if self.base_directory is None: self.base_directory = os.getcwd() elif isinstance(base_directory,str): self.base_directory = base_directory else: msg_err = "the base directory must be a string" raise ValueError(msg_err) # <-------- create the base directory if the base directory does # not exist if not os.path.exists(self.base_directory): os.mkdirs(self.base_directory) # <-------- the rank directory is determined by the MPI rank # this is not implemented yet if self.rank_directory is None: _rank_directory = "rank0" self.rank_directory = os.path.join( self.base_directory, _rank_directory) def read_configuration_file(self,filename=None): assert isinstance(filename,str) or filename is None _filename_in = None if filename is None: _filename_in = self.pyposmat_filename_in else: _filename_in = filename self.pyposmat_filename_in = filename self.configuration = PyposmatConfigurationFile() self.configuration.read(_filename_in) def configure_qoi_manager(self,qois=None): if qois is None: _qois= self.configuration.qois self.qoi_manager = QoiManager(qoi_database=_qois,fullauto=True) def configure_task_manager(self): # <-------- local variables _base_directory = self.base_directory _tasks = self.qoi_manager.tasks _structures = self.structures # <-------- configure task manager self.task_manager = TaskManager( base_directory=_base_directory) self.task_manager.configure( tasks = _tasks, structures = _structures) def evaluate_parameter_set(self,parameters): self.configure_task_manager() _parameters = copy.deepcopy(parameters) _potential = copy.deepcopy(self.configuration.potential) try: self.task_manager.evaluate_tasks( parameters=_parameters, potential=_potential) except LammpsSimulationError as e: str_neighlist_overflow = 'Neighbor list overflow' raise except: print("--- FATAL ERROR ---") print("self.configuration.potential:") for k,v in self.configuration.potential.items(): print("\t",k,'=',v) print("current_parameter_set:") for k,v in _parameters.items(): print("\t",k,'=',v) print("--- END ERROR INFO ---") print(type(self.configuration.potential)) raise else: # send the results from the task calculations to calculate QOIs _task_results = self.task_manager.results self.qoi_manager.calculate_qois( task_results=_task_results) # populate qoi values _qoi_results = OrderedDict() for k_qoi,v_qoi in self.qoi_manager.qois.items(): _qoi_val = v_qoi['qoi_val'] _qoi_results[k_qoi] = _qoi_val # populate errors _qoi_errors = OrderedDict() for k_qoi,v_qoi in self.qoi_manager.qois.items(): _qoi_error_name = '{}.{}'.format(k_qoi,'err') _qoi_error = v_qoi['qoi_err'] _qoi_errors[_qoi_error_name] = _qoi_error _results = OrderedDict() _results['parameters'] = copy.deepcopy(_parameters) _results['qois'] = copy.deepcopy(_qoi_results) _results['errors'] = copy.deepcopy(_qoi_errors) return _results class PyposmatClusterSampler(PyposmatEngine): def __init__(self, filename_in='pyposmat.config.in', filename_out='pyposmat.results.out', mpi_comm=None, mpi_rank=None, mpi_size=None, base_directory=None, o_logger=None): PyposmatEngine.__init__(self, filename_in=filename_in, filename_out=filename_out, base_directory=base_directory, fullauto=False) self.PYPOSMAT_CLUSTER_FILENAME_FORMAT = "pyposmat.cluster.{}.out" # setup possible mpi interaction self.mpi_rank = None self.mpi_size = None self._set_mpi_comm_world(mpi_rank, mpi_size) self.pyposmat_data_in_filename = None self.pyposmat_data_out_filename = filename_out self.pyposmat_data_bad_filename = 'pypospack.results.bad' # for cluster analysis self.pyposmat_configuration_fn = filename_in if base_directory is None: self.base_directory = os.getcwd() # setup logging facility if o_logger is None: # if logging facility si not provided, this implements a stub logging configuration self._configure_logging() else: assert type(o_logger) is PyposmatLogFile self.log = o_logger def _configure_logging(self): raise NotImplementedError() @property def configuration_fn(self): return self.pyposmat_configuration_fn @property def data_fn(self): return self.pyposmat_data_in_filename def configure_pyposmat_datafile_in(self,filename): self.pyposmat_data_in_filename = filename self.pyposmat_datafile_in = PyposmatDataFile(filename) self.data = PyposmatDataFile() self.data.read(filename) def configure_pyposmat_datafile_out(self,filename=None): if filename is not None: assert type(filename) is str self.pyposmat_data_out_filename = filename self.pyposmat_datafile_out = PyposmatDataFile(filename) def read_configuration_file(self,filename=None): if filename is not None: _filename = filename self.pyposmat_configuration_fn = filename else: _filename = self.pyposmat_configuration_fn try: PyposmatEngine.read_configuration_file(self,filename=_filename) except FileNotFoundError as e: print("Cannot read filename:") print(" filename={}".format(filename)) print(" o.pyposmat_configuration_fn={}".format(self.pyposmat_configuration_fn)) print(" _filename={}".format(_filename)) raise e # get information from the PyposmatConfigurationFile object self.structure_directory = self.configuration.structures['structure_directory'] self.parameter_names = [p for p in self.configuration.sampling_distribution] self.qoi_names = [k for k in self.configuration.qois] self.error_names = ['{}.err'.format(k) for k in self.qoi_names] self.parameter_distribution_definition =\ self.configuration.sampling_distribution try: self.free_parameter_names = [k for k,v in self.parameter_distribution_definition.items() if v[0] != 'equals'] except KeyError as e: print(self.parameter_distribution_definition.items()) raise if self.configuration.sampling_constraints is not None: self.parameter_constraints = copy.deepcopy(self.configuration.sampling_constraints) else: self.parameter_constraints = OrderedDict() self.constrained_parameter_names = [] for p in self.parameter_names: if p not in self.free_parameter_names: self.constrained_parameter_names.append(p) # this should only ever be called by rank 0 def write_cluster_file(self, i_iteration, filename=None): # arg: filename if filename is None: _filename = self.pyposmat_data_in_filename else: _filename = filename self.data = PyposmatDataFile() self.data.read(_filename) # process the arguments required for clustering cluster_args = None if 'cluster_args' not in self.configuration.sampling_type[i_iteration]: cluster_args = self.get_clustering_parameters( configuration_fn=self.configuration_fn, data_fn=_filename) else: cluster_args = self.configuration.sampling_type[i_iteration]['cluster_args'] cluster_args['configuration_fn'] = os.path.join(os.getcwd(), self.configuration_fn) cluster_args['data_fn'] = os.path.join(os.getcwd(), _filename) # this should probably be broken down into master/minion tasks # determine the _cluster_filename _cluster_filename = self.PYPOSMAT_CLUSTER_FILENAME_FORMAT.format(i_iteration) # maybe the data dir should be a property of the class _cluster_filename = os.path.join('..', 'data', _cluster_filename) # RANK_0: initalization obj_cluster_analysis = None # do cluster analysis obj_cluster_analysis = PyposmatClusterAnalysis.init_from_ordered_dict(cluster_args, o_logger=self.log) obj_cluster_analysis.preprocess_data(cluster_args) obj_cluster_analysis.calculate_manifold(cluster_args) obj_cluster_analysis.calculate_kNN_analysis(cluster_args) obj_cluster_analysis.calculate_clusters(cluster_args) # error checking while True: self.log.write("Checking the partition for errors...") if obj_cluster_analysis.isValidPartition(): self.log.write("The partition is valid") break else: self.log.write("Partition Error encountered, rebuilding partition...") obj_cluster_analysis = PyposmatClusterAnalysis.init_from_ordered_dict(cluster_args, o_logger=self.log) obj_cluster_analysis.preprocess_data(cluster_args) obj_cluster_analysis.calculate_manifold(cluster_args) obj_cluster_analysis.calculate_kNN_analysis(cluster_args) obj_cluster_analysis.calculate_clusters(cluster_args) self.data.df = obj_cluster_analysis.data.df self.data.write(filename=_cluster_filename) def run_simulations(self, i_iteration, n_samples=None, filename=None): #---------------------------------------------------------------------- # process the arguments of the method first # arg: i_iteration i = i_iteration # arg: n_samples if n_samples is not None: _n_samples = n_samples else: try: _n_samples = self.configuration.sampling_type[i]['n_samples_per_cluster'] except KeyError as e: print("must use \"n_samples_per_cluster\" keyword to describe the number of simulations per cluster") raise e # arg: filename if filename is None: _filename = self.pyposmat_data_in_filename else: _filename = filename # end argument processing #---------------------------------------------------------------------- # read in the the datafile self.data = PyposmatDataFile() self.data.read(_filename) # determine the sampling type _sampling_type = self.configuration.sampling_type[i]['type'] if _sampling_type == 'kde_w_clusters': _cluster_filename = self.PYPOSMAT_CLUSTER_FILENAME_FORMAT.format(i_iteration) _cluster_filename = os.path.join('..', 'data', _cluster_filename) self._run_mc_cluster_sampling( i=i, _n_samples=_n_samples, _cluster_filename=_cluster_filename ) else: raise ValueError( 'unknown sampling type:{}'.format( _sampling_type ) ) def _run_mc_cluster_sampling(self, i, _n_samples, _cluster_filename): # get unique cluster ids cluster_ids = set(self.data.df['cluster_id']) self.log.write("rank={r} cluster_ids={c}".format(r=self.mpi_rank, c=cluster_ids)) for cluster_id in cluster_ids: mc_sampler = PyposmatMonteCarloSampler( mpi_rank = self.mpi_rank, mpi_size = self.mpi_size, log=self.log) # This hack exists because I haven't updated the configuration file to deal with # clustering and sampling in any way where the information communicated in the # configuration file is actually handled corrected by the PyposmatMonteCarloSampler mc_sampler.configuration = PyposmatConfigurationFile() mc_sampler.configuration.read(self.configuration_fn) mc_sampler.configuration.sampling_type[i] = OrderedDict() mc_sampler.configuration.sampling_type[i]['type'] = 'kde' mc_sampler.configuration.sampling_type[i]['n_samples'] = _n_samples # TODO: how does this even work ?? # EJR for EJR mc_sampler.create_base_directories() mc_sampler.read_configuration_file(self.configuration_fn) mc_sampler.configuration.structures['structure_directory'] = os.path.join( '..', mc_sampler.configuration.structures['structure_directory'] ) mc_sampler.configure_qoi_manager() mc_sampler.configure_task_manager() mc_sampler.configure_pyposmat_datafile_out() # pyposmat_datafile_out = PyposmatDataFile(filename_out) mc_sampler.print_structure_database() mc_sampler.print_sampling_configuration() mc_sampler.print_initial_parameter_distribution() self.log.write("KDE sampling in MCSampler rank={r} cluster_id={c}".format(r=self.mpi_rank, c=cluster_id)) mc_sampler.run_kde_sampling(n_samples=_n_samples, filename_in=_cluster_filename, cluster_id=cluster_id) def get_clustering_parameters( self, configuration_fn, data_fn ): d = OrderedDict() d['configuration_fn'] = configuration_fn d['data_fn'] = data_fn d['include_parameters'] = True d['include_qois'] = True d['include_errors'] = False d['preprocessing'] = OrderedDict() d['preprocessing']['type'] = 'standard_scaler' d['preprocessing']['args'] = OrderedDict() d['preprocessing']['args']['copy'] = True d['preprocessing']['args']['with_mean'] = True d['preprocessing']['args']['with_std'] = True d['manifold'] = OrderedDict() d['manifold']['type'] = 'tsne' d['manifold']['args'] = OrderedDict() d['manifold']['args']['n_components'] = 2 d['manifold']['args']['perplexity'] = 30 d['manifold']['args']['early_exaggeration'] = 12 d['manifold']['args']['learning_rate'] = 200 d['manifold']['args']['n_iter'] = 5000 d['manifold']['args']['n_iter_without_progress'] = 300, d['manifold']['args']['min_grad_norm'] = 1e-7, # d['manifold']['args']['metric']='euclidean', d['manifold']['args']['init'] = 'pca', d['manifold']['args']['verbose'] = 0, d['manifold']['args']['random_state'] = None # method='barnes_hut' # angle=0.5 d['neighbors'] = OrderedDict() d['neighbors']['type'] = 'ball_tree' d['neighbors']['kNN'] = 4 d['neighbors']['args'] = OrderedDict() d['neighbors']['args']['leaf_size'] = 40 d['neighbors']['args']['metric'] = 'minkowski' d['cluster'] = OrderedDict() d['cluster']['type'] = 'dbscan' d['cluster']['args'] = OrderedDict() d['cluster']['args']['eps'] = OrderedDict() d['cluster']['args']['eps']['NN'] = 3 d['cluster']['args']['eps']['percentile'] = .99 d['cluster']['args']['min_samples'] = 10 d['cluster']['args']['metric'] = 'euclidean' d['cluster']['args']['metric_params'] = None d['cluster']['args']['algorithm'] = 'auto' d['cluster']['args']['leaf_size'] = 30 d['cluster']['args']['p'] = None # add conditional here to check for custom clustering params in the config # pseudo: if 'clustering_params' in self.configuration: # pseudo: for k,v in self.configuration['clustering_params'].items(): # pseudo: d[k] = v # not yet implemented in configuration return d def _set_mpi_comm_world(self,mpi_rank,mpi_size): if all([ mpi_rank is not None, mpi_size is not None]): self.mpi_rank = mpi_rank self.mpi_size = mpi_size elif all([ mpi_rank is None, mpi_size is None]): self.mpi_rank = mpi_rank self.mpi_size = mpi_size else: s = "Did not initialize with valid mpi information" raise ValueError(s) class PyposmatMonteCarloSampler(PyposmatEngine): def __init__(self, log, filename_in='pypospack.config.in', # filename_out='pypospack.results.out', filename_out='pyposmat.results.out', mpi_rank=None, mpi_size=None, base_directory=None): assert isinstance(filename_in,str) assert isinstance(filename_out,str) assert type(base_directory) in [str,type(None)] PyposmatEngine.__init__(self, filename_in=filename_in, filename_out=filename_out, base_directory=base_directory, fullauto=False) self.mpi_rank=mpi_rank self.mpi_size=mpi_size self.pyposmat_data_in_filename = None self.pyposmat_data_out_filename = filename_out self.pyposmat_data_bad_filename = 'pypospack.results.bad' self.log = log def _log(self,str_msg): print(str_msg) def configure_pyposmat_datafile_in(self,filename): self.pyposmat_data_in_filename = filename self.pyposmat_datafile_in = PyposmatDataFile(filename) def configure_pyposmat_datafile_out(self,filename=None): if filename is not None: assert type(filename) is str self.pyposmat_data_out_filename = filename self.pyposmat_datafile_out = PyposmatDataFile(filename) def read_configuration_file(self,filename=None): PyposmatEngine.read_configuration_file(self,filename=filename) self.structure_directory = self.configuration.structures['structure_directory'] self.n_iterations = self.configuration.sampling_type['n_iterations'] self.parameter_names = [p for p in self.configuration.sampling_distribution] self.qoi_names = [k for k in self.configuration.qois] self.error_names = ['{}.err'.format(k) for k in self.qoi_names] self.parameter_distribution_definition =\ self.configuration.sampling_distribution try: self.free_parameter_names = [k for k,v in self.parameter_distribution_definition.items() if v[0] != 'equals'] except KeyError as e: print(self.parameter_distribution_definition.items()) raise if self.configuration.sampling_constraints is not None: self.parameter_constraints = copy.deepcopy(self.configuration.sampling_constraints) else: self.parameter_constraints = OrderedDict() self.constrained_parameter_names = [] for p in self.parameter_names: if p not in self.free_parameter_names: self.constrained_parameter_names.append(p) def run_simulations(self,i_iteration,n_samples=None,filename=None): i = i_iteration _sampling_type = self.configuration.sampling_type[i]['type'] _n_samples = self.configuration.sampling_type[i]['n_samples'] if n_samples is not None: _n_samples = n_samples if _sampling_type == 'parametric': self.run_parameteric_sampling(n_samples=_n_samples) elif _sampling_type == 'kde': if filename is None: raise ValueError('cannot do kde sampling with out filename') self.run_kde_sampling(n_samples=_n_samples,filename_in=filename) elif _sampling_type == 'from_file': if filename is None: raise ValueError('cannot do filesampling without file') self.run_file_sampling(filename) else: raise ValueError( 'unknown sampling type:{}'.format( _sampling_type ) ) if self.mpi_rank == 0: print(i_iteration,_n_samples,_sampling_type) def run_parameteric_sampling(self,n_samples): _rv_generators = OrderedDict() for p in self.free_parameter_names: distribution_type = self.parameter_distribution_definition[p][0] if distribution_type == 'uniform': _a = self.parameter_distribution_definition[p][1]['a'] _b = self.parameter_distribution_definition[p][1]['b'] _loc = _a _scale = _b-_a _rv_generators[p] = scipy.stats.uniform(loc=_loc,scale=_scale) else: raise ValueError('unknown distribution type: {}'.format( distribution_type)) self.pyposmat_datafile_out.write_header_section( filename=self.pyposmat_data_out_filename, parameter_names=self.parameter_names, qoi_names=self.qoi_names, error_names=self.error_names) time_start_iteration = time.time() _n_errors = 0 for i_sample in range(n_samples): # generate parameter set _parameters = OrderedDict([(p,None) for p in self.parameter_names]) # generate free parameters for p in self.free_parameter_names: _parameters[p] = _rv_generators[p].rvs(size=1)[0] # generate constrained parameters for p in self.constrained_parameter_names: if self.parameter_distribution_definition[p][0] == 'equals': if type(self.parameter_distribution_definition[p][1]) is not list: _str_eval = str(self.parameter_distribution_definition[p][1]) for fp in self.free_parameter_names: if fp in _str_eval: _str_eval = _str_eval.replace(fp,str(_parameters[fp])) _parameters[p] = eval(_str_eval) # generate wierd things for p in self.constrained_parameter_names: if self.parameter_distribution_definition[p][0] == 'equals': if type(self.parameter_distribution_definition[p][1]) is list: # required for EAM potentials to calculate dens_max for embedding function if self.parameter_distribution_definition[p][1][0] == 'equilibrium_density': a0 = self.parameter_distribution_definition[p][1][1] latt = self.parameter_distribution_definition[p][1][2] _parameters[p] = self.calculate_equilibrium_density(a0,latt,_parameters) try: # check constraints for k,v in self.parameter_constraints.items(): _eval_str = v for pn,pv in _parameters.items(): _eval_str = _eval_str.replace(pn,str(pv)) try: _is_constraint_ok = eval(_eval_str) except NameError as e: _str = str(e) _regex_str = "name \'d_NiNi_r0\' is not defined" _err_msg = "BadQoiConstraint:\n" _err_msg += "\t{}".format(k) _err_msg += "\t{}".format(v) _err_msg += "\t{}".format(_eval_str) self._log(_err_msg) raise if eval(_eval_str) is False: raise PyposmatBadParameterError() _results = self.evaluate_parameter_set(parameters=_parameters) except PyposmatBadParameterError as e: _n_errors += 1 except LammpsSimulationError as e: _n_errors += 1 except PypospackTaskManagerError as e: _n_errors += 1 else: self.pyposmat_datafile_out.write_simulation_results( filename=self.pyposmat_data_out_filename, sim_id=i_sample, results=_results) finally: # print out summaries every 10 solutions if (i_sample+1)%10 == 0: n_samples_completed = i_sample+1 time_end = time.time() time_total = time_end-time_start_iteration avg_time = time_total/n_samples_completed _str_msg = '{} samples completed in {:.4f}s. Avg_time = {:.4f}. n_errors = {}'.format( n_samples_completed, time_total, avg_time, _n_errors) print(_str_msg) sys.stdout.flush() def run_kde_sampling(self,n_samples,filename_in,cluster_id=None): _datafile_in = None if cluster_id is None: _datafile_in = PyposmatDataFile() _datafile_in.read(filename_in) else: _datafile_in = PyposmatDataFile() _datafile_in.read(filename_in) # redefining the dataframe by subselecting the cluster_id we are interested in _datafile_in.df = _datafile_in.df.loc[ _datafile_in.df['cluster_id'] == cluster_id ] if cluster_id is None: _X = _datafile_in.df[self.free_parameter_names].values.T else: _X = _datafile_in.df[self.free_parameter_names].loc[_datafile_in.df['cluster_id'] == cluster_id].values.T self.log.write("cluster_id {c} _X.shape={x}".format(c=cluster_id, x=_X.shape)) try: _h = Chiu1999_h(_X) kde_bw_type = 'Chiu1999' except LinAlgError as e: print('filename:{}'.format(filename_in)) raise d = OrderedDict() d['kde_bandwidth'] = OrderedDict() d['kde_bandwidth']['type'] = kde_bw_type d['kde_bandwidth']['h'] = _h _rv_generator = scipy.stats.gaussian_kde(_X,_h) print('Chiu1999_h:{}'.format(_h)) self.pyposmat_datafile_out.df = copy.deepcopy(_datafile_in.df) self.pyposmat_datafile_out.write_header_section( filename=self.pyposmat_data_out_filename, parameter_names=self.parameter_names, qoi_names=self.qoi_names, error_names=self.error_names) time_start_iteration = time.time() _n_errors = 0 for i_sample in range(n_samples): # generate parameter set _parameters = OrderedDict([(p,None) for p in self.parameter_names]) _free_parameters = _rv_generator.resample(1) for i,v in enumerate(self.free_parameter_names): _parameters[v] = float(_free_parameters[i,0]) # generate constrained parameters for p in self.constrained_parameter_names: if self.parameter_distribution_definition[p][0] == 'equals': if type(self.parameter_distribution_definition[p][1]) is not list: _str_eval = str(self.parameter_distribution_definition[p][1]) for fp in self.free_parameter_names: if fp in _str_eval: _str_eval = _str_eval.replace(fp,str(_parameters[fp])) _parameters[p] = eval(_str_eval) # generate wierd things for p in self.constrained_parameter_names: if self.parameter_distribution_definition[p][0] == 'equals': if type(self.parameter_distribution_definition[p][1]) is list: if self.parameter_distribution_definition[p][1][0] == 'equilibrium_density': a0 = self.parameter_distribution_definition[p][1][1] latt = self.parameter_distribution_definition[p][1][2] _parameters[p] = self.calculate_equilibrium_density(a0,latt,_parameters) try: # check constraints for k,v in self.parameter_constraints.items(): _eval_str = v for pn,pv in _parameters.items(): _eval_str = _eval_str.replace(pn,str(pv)) if eval(_eval_str) is False: raise PyposmatBadParameterError() _results = self.evaluate_parameter_set(parameters=_parameters) except PyposmatBadParameterError as e: _n_errors += 1 except LammpsSimulationError as e: _n_errors += 1 except PypospackTaskManagerError as e: _n_errors += 1 else: self.pyposmat_datafile_out.write_simulation_results( filename=self.pyposmat_data_out_filename, sim_id=i_sample, cluster_id=cluster_id, results=_results) finally: # print out summaries every 10 solutions if (i_sample+1)%10 == 0: n_samples_completed = i_sample+1 time_end = time.time() time_total = time_end-time_start_iteration avg_time = time_total/n_samples_completed _str_msg = '{} samples completed in {:.4f}s. Avg_time = {:.4f}. n_errors = {}'.format( n_samples_completed, time_total, avg_time, _n_errors) print(_str_msg) self.log.write(_str_msg) def run_file_sampling(self,filename_in): _datafile_in = PyposmatDataFile(filename=filename_in) _datafile_in.read() # configure random number generator self.pyposmat_datafile_out.write_header_section( filename=self.pyposmat_data_out_filename, parameter_names=self.parameter_names, qoi_names=self.qoi_names, error_names=self.error_names) time_start_iteration = time.time() if self.mpi_rank is None: self.mpi_rank = 0 if self.mpi_size is None: self.mpi_size = 1 _n_errors = 0 i_sample = 0 for row in _datafile_in.df.iterrows(): if self.mpi_rank != row[0]%self.mpi_size: continue _parameters = OrderedDict([(p,row[1][p]) for p in self.parameter_names]) # generate wierd things for p in self.constrained_parameter_names: if self.parameter_distribution_definition[p][0] == 'equals': if type(self.parameter_distribution_definition[p][1]) is list: if self.parameter_distribution_definition[p][1][0] == 'equilibrium_density': a0 = self.parameter_distribution_definition[p][1][1] latt = self.parameter_distribution_definition[p][1][2] _parameters[p] = self.calculate_equilibrium_density(a0,latt,_parameters) try: # check constraints for k,v in self.parameter_constraints.items(): _eval_str = v for pn,pv in _parameters.items(): _eval_str = _eval_str.replace(pn,str(pv)) if eval(_eval_str) is False: raise PyposmatBadParameterError() _results = self.evaluate_parameter_set(parameters=_parameters) except PyposmatBadParameterError as e: _n_errors += 1 except LammpsSimulationError as e: _n_errors += 1 except PypospackTaskManagerError as e: _n_errors += 1 else: self.pyposmat_datafile_out.write_simulation_results( filename=self.pyposmat_data_out_filename, sim_id=i_sample, results=_results) finally: # print out summaries every 10 solutions i_sample = i_sample+1 if (i_sample)%10 == 0: n_samples_completed = i_sample time_end = time.time() time_total = time_end-time_start_iteration avg_time = time_total/n_samples_completed _str_msg = '{} samples completed in {:.4f}s. Avg_time = {:.4f}. n_errors = {}'.format( n_samples_completed, time_total, avg_time, _n_errors) print('rank{}:'.format(self.mpi_rank)+_str_msg) def calculate_equilibrium_density(self,a0,latt,parameters): _parameters = OrderedDict() for k,v in parameters.items(): if k.startswith('d_'): _parameters[k[2:]] = v s = k[2:].split('_')[0] _potential_type = self.configuration.potential['density_type'] _symbols = self.configuration.potential['symbols'] _module_name,_class_name = PotentialObjectMap( potential_type=_potential_type) try: _module = importlib.import_module(_module_name) _class = getattr(_module,_class_name) _dens_potential = _class(symbols=_symbols) except: raise if latt == 'fcc': d = OrderedDict([ ('1NN',2/(2**0.5)*a0), ('2NN',1.000*a0), ('3NN',1.225*a0)]) Z= OrderedDict([ ('1NN',12), ('2NN',6), ('3NN',24)]) rcut = (d['2NN']+d['3NN'])/2. rmax = 10. r = np.linspace(1,10,5000)*rmax/10 rho = _dens_potential.evaluate(r,_parameters,rcut) rho_e = 0 for m in Z: if d[m] < rcut: rho_e += Z[m]*np.interp(d[m],r,rho[s]) return rho_e def print_structure_database(self): print(80*'-') print('{:^80}'.format('STRUCTURE DATABASE')) print(80*'-') print('structure_directory:{}'.format(self.structure_directory)) print('') print('{:^20} {:^20}'.format('name','filename')) print('{} {}'.format(20*'-',20*'-')) for k,v in self.structures['structures'].items(): print('{:20} {:20}'.format(k,v)) def print_sampling_configuration(self): print(80*'-') print('{:^80}'.format('SAMPLING CONFIGURATION')) print(80*'-') print('{:^10} {:^10} {:^20}'.format( 'iteration', 'n_samples', 'sampling_type')) print('{} {} {}'.format(10*'-',10*'-',20*'-')) for i in range(self.n_iterations): _sample_type = self.configuration.sampling_type[i]['type'] if _sample_type == 'kde_w_clusters': _n_samples = self.configuration.sampling_type[i]['n_samples_per_cluster'] else: _n_samples = self.configuration.sampling_type[i]['n_samples'] print('{:^10} {:^10} {:^20}'.format(i,_n_samples,_sample_type)) def print_initial_parameter_distribution(self): print(80*'-') print('{:80}'.format('INITIAL PARAMETER DISTRIBUTION')) print(80*'-') for p in self.parameter_distribution_definition: if p in self.free_parameter_names: str_free = 'free' print('{:^20} {:^10} {:^10} {:^10} {:^10}'.format( p, str_free, self.parameter_distribution_definition[p][0], self.parameter_distribution_definition[p][1]['a'], self.parameter_distribution_definition[p][1]['b'])) else: str_free = 'not_free' print('{:^20} {:^10}'.format(p,str_free)) class PyposmatIterativeSampler(object): def __init__(self, configuration_filename,is_restart=False): self.RANK_DIR_FORMAT = 'rank_{}' self.mpi_comm = None self.mpi_rank = None self.mpi_size = None self.mpi_nprocs = None self.n_iterations = None self.rv_seed = None self.rv_seeds = None self.configuration_filename = configuration_filename self.configuration = None self.mc_sampler = None self.root_directory = os.getcwd() self.data_directory = 'data' self.is_restart = is_restart self.start_iteration = 0 self.log_fn = os.path.join(self.root_directory, self.data_directory, 'pyposmat.log') self.log = PyposmatLogFile(filename=self.log_fn) def run_restart(self): if self.configuration is None: self.configuration = PyposmatConfigurationFile() self.configuration.read(self.configuration_filename) # determine if there was a seed file _init_fn = self.find_initial_parameters_files() # get contents of the data directory if it exists _data_dir = os.path.join(self.root_directory,self.data_directory) self.i_iterations, _data_dir_fns = self.analyze_rank_directories( data_dir=_data_dir) # get contents of the rank directories _root_dir = self.root_directory n_ranks, _rank_data_fns = self.analyze_rank_directories( root_dir=_root_dir) if self.mpi_rank == 0: pass pass def run_all(self): self.setup_mpi_environment() self.determine_rv_seeds() MPI.COMM_WORLD.Barrier() self.start_iteration = 0 for i in range(self.start_iteration,self.n_iterations): if self.mpi_rank == 0: self.log.write(80*'-') self.log.write('{:80}'.format('BEGIN ITERATION {}/{}'.format( i+1, self.n_iterations))) self.log.write(80*'-') MPI.COMM_WORLD.Barrier() self.run_simulations(i) self.log.write("rank {} simulations complete".format(self.mpi_rank)) MPI.COMM_WORLD.Barrier() if self.mpi_rank == 0: self.log.write("ALL SIMULATIONS COMPLETE FOR ALL RANKS") if self.mpi_rank == 0: self.log.write('merging files...') self.merge_files(i) self.log.write('analyzing results...') self.analyze_results(i) MPI.COMM_WORLD.Barrier() self.log.write(80*'-') self.log.write('JOBCOMPLETE') def run_simulations(self,i_iteration): self.rank_directory = self.RANK_DIR_FORMAT.format( self.mpi_rank) # if the directory exists delete it if os.path.isdir(self.rank_directory): shutil.rmtree(self.rank_directory) os.makedirs(self.rank_directory) # change execution context for this rank # this provides a directory for each worker directory so that the # disk IO writes don't conflict os.chdir(self.rank_directory) _config_filename = os.path.join( self.root_directory, self.configuration_filename) _results_filename = os.path.join( self.root_directory, self.rank_directory, 'pyposmat.results.out') # set random seed np.random.seed(self.rv_seeds[self.mpi_rank,i_iteration]) # initialize() self.mc_sampler = PyposmatMonteCarloSampler( filename_in = _config_filename, filename_out = _results_filename, mpi_rank = self.mpi_rank, mpi_size = self.mpi_size, log=self.log) self.mc_sampler.create_base_directories() self.mc_sampler.read_configuration_file() _structure_dir = self.mc_sampler.configuration.structures['structure_directory'] self.mc_sampler.configuration.structures['structure_directory'] = \ os.path.join('..',_structure_dir) self.mc_sampler.configure_qoi_manager() self.mc_sampler.configure_task_manager() self.mc_sampler.configure_pyposmat_datafile_out() #pyposmat_datafile_out = PyposmatDataFile(filename_out) if self.mpi_rank == 0: self.mc_sampler.print_structure_database() self.mc_sampler.print_sampling_configuration() if self.mpi_rank == 0 and i_iteration == 0: self.mc_sampler.print_initial_parameter_distribution() if self.mpi_rank == 0: self.log.write(80*'-') MPI.COMM_WORLD.Barrier() _mc_config = self.mc_sampler.configuration.sampling_type[i_iteration] # choose sampling type _mc_sample_type = _mc_config['type'] self.log.write("_mc_sample_type={}".format(_mc_sample_type)) # <----- paramter sampling type --------------------------------------- if _mc_sample_type == 'parametric': _mc_n_samples = _mc_config['n_samples'] # determine number of sims for this rank _n_samples_per_rank = int(_mc_n_samples/self.mpi_size) if _mc_n_samples%self.mpi_size > self.mpi_rank: _n_samples_per_rank += 1 self.mc_sampler.run_simulations( i_iteration=i_iteration, n_samples=_n_samples_per_rank) # <----- kde sampling sampling type --------------------------------------- elif _mc_sample_type == 'kde': _mc_n_samples = _mc_config['n_samples'] # determine number of sims for this rank _n_samples_per_rank = int(_mc_n_samples/self.mpi_size) if _mc_n_samples%self.mpi_size > self.mpi_rank: _n_samples_per_rank += 1 _filename_in = '' if 'file' in _mc_config: _filename_in = os.path.join( self.root_directory, _mc_config['file'] ) else: _filename_in = os.path.join( self.root_directory, self.data_directory, 'pyposmat.kde.{}.out'.format(i_iteration)) self.mc_sampler.run_simulations( i_iteration=i_iteration, n_samples=_n_samples_per_rank, filename=_filename_in) # <----- sampling from a file type --------------------------------------- # get parameters from file elif _mc_sample_type == 'from_file': _mc_n_samples = _mc_config['n_samples'] # determine number of sims for this rank _n_samples_per_rank = int(_mc_n_samples/self.mpi_size) if _mc_n_samples%self.mpi_size > self.mpi_rank: _n_samples_per_rank += 1 _filename_in = os.path.join( self.root_directory, _mc_config['file'] ) self.mc_sampler.run_simulations( i_iteration=i_iteration, n_samples=_n_samples_per_rank, filename=_filename_in ) # <----- kde with clusters sampling type --------------------------------------- elif _mc_sample_type == 'kde_w_clusters': cluster_fn = "pyposmat.cluster.{}.out".format(i_iteration) pyposmat_datafile_in = os.path.join( self.root_directory, self.data_directory, cluster_fn ) _config_filename = os.path.join( self.root_directory, self.configuration_filename) # determine number of sims for this rank _mc_n_samples = _mc_config['n_samples_per_cluster'] _n_samples_per_rank = int(_mc_n_samples / self.mpi_size) if _mc_n_samples % self.mpi_size > self.mpi_rank: _n_samples_per_rank += 1 # initialize sampling object o = PyposmatClusterSampler(o_logger=self.log, mpi_rank=self.mpi_rank, mpi_comm=self.mpi_comm, mpi_size=self.mpi_size) o.create_base_directories() o.read_configuration_file(filename=_config_filename) # check to see if clustered data file exists if self.mpi_rank == 0: if not os.path.isfile(pyposmat_datafile_in): kde_fn = "pyposmat.kde.{}.out".format(i_iteration) kde_fn = os.path.join( self.root_directory, self.data_directory, kde_fn ) o.write_cluster_file(filename=kde_fn, i_iteration=i_iteration) MPI.COMM_WORLD.Barrier() o.configure_pyposmat_datafile_in(filename=pyposmat_datafile_in) # fix relative path to structure databae folder _structure_dir = o.configuration.structures['structure_directory'] o.configuration.structures['structure_directory'] = \ os.path.join('..',_structure_dir) # finish the rest of the initialization o.configure_qoi_manager() o.configure_task_manager() o.configure_pyposmat_datafile_out() MPI.COMM_WORLD.Barrier() # run simulations o.run_simulations(i_iteration=i_iteration, n_samples=_mc_n_samples, filename=pyposmat_datafile_in) MPI.COMM_WORLD.Barrier() else: m = "unknown sampling type: {}".format( _mc_sample_type ) raise ValueError(m) # return to root directory os.chdir(self.root_directory) def get_results_dict(self): rd = OrderedDict() rd['mpi'] = OrderedDict() rd['mpi']['size'] = self.mpi_size def setup_mpi_environment(self): self.mpi_comm = MPI.COMM_WORLD self.mpi_rank = self.mpi_comm.Get_rank() self.mpi_size = self.mpi_comm.Get_size() self.mpi_procname = MPI.Get_processor_name() if self.mpi_rank == 0: self.print_mpi_environment() def print_mpi_environment(self): self.log.write(80*'-') self.log.write('{:^80}'.format('MPI COMMUNICATION INFORMATION')) self.log.write(80 * '-') self.log.write('mpi_size={}'.format(self.mpi_size)) def determine_rv_seeds(self): _randint_low = 0 _randint_high = 2147483647 # set original seed if self.rv_seed is None: self.rv_seed = np.random.randint( low=_randint_low, high=_randint_high) np.random.seed(self.rv_seed) # determine rank seed self.rv_seeds = np.random.randint( low=0, high=2147483647, size=(int(self.mpi_size),self.n_iterations) ) if self.mpi_rank == 0: self.print_random_seeds() def analyze_data_directories(self,data_dir=None): _d = data_dir i = 0 contents = [] if not os.path.exists(_d): return i, contents if not os.path.isdir(_d): return i, contents while True: kde_fn = os.path.join(_d,"pyposmat.kde.{}.out".format(i)) if os.path.exists(kde_fn): contents.append(kde_fn) else: if i > 0: contents.append(results_fn) break results_fn = os.path.join(_d,"pyposmat.results.{}.out".format(i)) if os.path.exists(results_fn): pass else:break i = i + 1 return i, contents def analyze_rank_directories(self,root_dir=None): i = 0 contents = [] if root_dir is None: _d = self.root_directory else: _d = root_directory while True: rank_dir = os.path.join(_d,"rank_{}".format(i)) if not os.path.exists(rank_dir): break if not os.path.isdir(rank_dir): break rank_fn = os.path.join("rank_{}".format(i),"pyposmat.results.out") if not os.path.exists(os.path.join(_d,rank_fn)): break if not os.path.isfile(os.path.join(_d,rank_fn)): break else: contents.append(rank_fn) i = i + 1 return i, contents def find_initial_parameters_file(self): if 'file' in self.configuration.sampling_type[0]: _init_fn =os.path.join( self.root_directory, self.configuration.sampling_type[0]['file'] ) if os.path.exists(_init_fn): if os.path.isfile(_init_fn): return _init_fn else: return None def merge_pypospack_datafiles(datafile_fns): d0 = PyposmatDataFile() d0.read(filename=datafile_fns[0]) df0 = d0.df for i in range(1,len(datafile_fns)): print("merging {}...".format(datafile_fns[i])) d = PyposmatDataFile() d.read(filename=datafile_fns[i]) df = d.df df0 = pd.concat([df0,df]).drop_duplicates().reset_index(drop=True) d0.df = df0 return d0 def merge_files(self,i_iteration): _dir = self.data_directory _n_ranks = self.mpi_size datafile = None # filename of old kde file _filename_kde = os.path.join( _dir,'pyposmat.kde.{}.out'.format(i_iteration)) self.log.write('Looking for previous kde file') self.log.write(' {}'.format(_filename_kde)) datafile_fns = [] if os.path.exists(_filename_kde): if os.path.isfile(_filename_kde): datafile_fns.append(_filename_kde) for i_rank in range(_n_ranks): rank_fn = os.path.join( 'rank_{}'.format(i_rank), 'pyposmat.results.out') datafile_fns.append(rank_fn) names = ['sim_id']\ + self.parameter_names\ + self.qoi_names\ + self.error_names types = ['sim_id']\ + ['param']*len(self.parameter_names)\ + ['qoi']*len(self.qoi_names)\ + ['err']*len(self.error_names) ##### # ERROR OCCURS HERE # pyposmat.results.out exists only in the rank_0 directory # is that the expected behavior or should every rank get a results directory??? ##### dataframes = OrderedDict() for fn in datafile_fns: datafile = PyposmatDataFile() datafile.read(fn) #if fn.startswith('rank') #datafile.df['sim_id'] = datafile.df.apply( # lambda x:"{}_{}_{}".format( # i_iteration,i_rank,str(x['sim_id']))) dataframes[fn] = datafile.df[names] df = pd.concat(dataframes).reset_index(drop=True) datafile = PyposmatDataFile() datafile.df = df datafile.parameter_names = self.parameter_names datafile.error_names = self.error_names datafile.qoi_names = self.qoi_names datafile.names = names datafile.types = types try: fn_out = os.path.join( _dir,'pyposmat.results.{}.out'.format(i_iteration)) datafile.write(filename=fn_out) except FileNotFoundError as e: if not os.path.exists(self.data_directory): os.mkdir(self.data_directory) datafile.write(filename_fn_out) else: raise def analyze_results(self,i_iteration): data_fn = os.path.join(\ self.root_directory, self.data_directory, 'pyposmat.results.{}.out'.format(i_iteration)) config_fn = os.path.join(\ self.root_directory, self.configuration_filename) kde_fn = os.path.join(\ self.root_directory, self.data_directory, 'pyposmat.kde.{}.out'.format(i_iteration+1)) data_analyzer = PyposmatDataAnalyzer() data_analyzer.read_configuration_file(filename=config_fn) data_analyzer.read_data_file(filename=data_fn) data_analyzer.write_kde_file(filename=kde_fn) def read_configuration_file(self,filename=None): assert isinstance(filename,str) or filename is None if filename is None: _filename_in = self.configuration_filename else: self.configuration_filename = filename _filename_in = filename self.configuration = PyposmatConfigurationFile() self.configuration.read(filename=_filename_in) self.n_iterations = self.configuration.n_iterations self.qoi_names = self.configuration.qoi_names self.error_names = self.configuration.error_names self.parameter_names = self.configuration.parameter_names self.log.write(self.parameter_names) self.log.write(self.qoi_names) self.log.write(self.error_names) def print_random_seeds(self): self.log.write(80*'-') self.log.write('{:^80}'.format('GENERATED RANDOM SEEDS')) self.log.write(80*'-') self.log.write('') self.log.write('rv_seed={}'.format(self.rv_seed)) self.log.write('') self.log.write('{:^8} {:^8} {:^10}'.format('rank','iter','seed')) self.log.write('{} {} {}'.format(8*'-',8*'-',10*'-')) for i_rank in range(self.mpi_size): for i_iter in range(self.n_iterations): self.log.write('{:^8} {:^8} {:>10}'.format( i_rank, i_iter, self.rv_seeds[i_rank, i_iter]))
{"hexsha": "6b7e8f30d15999147961fbf54adbb42d190f7402", "size": 59530, "ext": "py", "lang": "Python", "max_stars_repo_path": "dev/cluster_sampling/dev__cluster_sampling/mc_sampler_iterate_w_cluster.py", "max_stars_repo_name": "eragasa/pypospack", "max_stars_repo_head_hexsha": "21cdecaf3b05c87acc532d992be2c04d85bfbc22", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-01-18T19:59:56.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-25T11:56:52.000Z", "max_issues_repo_path": "dev/cluster_sampling/dev__cluster_sampling/mc_sampler_iterate_w_cluster.py", "max_issues_repo_name": "eragasa/pypospack", "max_issues_repo_head_hexsha": "21cdecaf3b05c87acc532d992be2c04d85bfbc22", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-04-22T23:02:13.000Z", "max_issues_repo_issues_event_max_datetime": "2018-04-22T23:02:13.000Z", "max_forks_repo_path": "dev/cluster_sampling/dev__cluster_sampling/mc_sampler_iterate_w_cluster.py", "max_forks_repo_name": "eragasa/pypospack", "max_forks_repo_head_hexsha": "21cdecaf3b05c87acc532d992be2c04d85bfbc22", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-09-14T07:04:42.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-14T07:04:42.000Z", "avg_line_length": 40.1145552561, "max_line_length": 121, "alphanum_fraction": 0.5804972283, "include": true, "reason": "import numpy,from numpy,import scipy", "num_tokens": 12211}