text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
[STATEMENT]
lemma cosh_minus_sinh: "cosh x - sinh x = exp (-x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cosh x - sinh x = exp (- x)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. cosh x - sinh x = exp (- x)
[PROOF STEP]
have "cosh x - sinh x = (1 / 2) *\<^sub>R (exp (-x) + exp (-x))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cosh x - sinh x = (1 / 2) *\<^sub>R (exp (- x) + exp (- x))
[PROOF STEP]
by (simp add: sinh_def cosh_def algebra_simps)
[PROOF STATE]
proof (state)
this:
cosh x - sinh x = (1 / 2) *\<^sub>R (exp (- x) + exp (- x))
goal (1 subgoal):
1. cosh x - sinh x = exp (- x)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
cosh x - sinh x = (1 / 2) *\<^sub>R (exp (- x) + exp (- x))
goal (1 subgoal):
1. cosh x - sinh x = exp (- x)
[PROOF STEP]
have "\<dots> = exp (-x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (1 / 2) *\<^sub>R (exp (- x) + exp (- x)) = exp (- x)
[PROOF STEP]
by (rule scaleR_half_double)
[PROOF STATE]
proof (state)
this:
(1 / 2) *\<^sub>R (exp (- x) + exp (- x)) = exp (- x)
goal (1 subgoal):
1. cosh x - sinh x = exp (- x)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
cosh x - sinh x = exp (- x)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
cosh x - sinh x = exp (- x)
goal (1 subgoal):
1. cosh x - sinh x = exp (- x)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
cosh x - sinh x = exp (- x)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 697, "file": null, "length": 10}
|
[STATEMENT]
lemma UNIV_ipv4addrset: "UNIV = {0 .. max_ipv4_addr}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. UNIV = {0..max_ipv4_addr}
[PROOF STEP]
(*not in the simp set, for a reason*)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. UNIV = {0..max_ipv4_addr}
[PROOF STEP]
by(simp add: max_ipv4_addr_max_word) fastforce
|
{"llama_tokens": 154, "file": "IP_Addresses_IPv4", "length": 2}
|
export triu, triu!
import LinearAlgebra: triu, triu!
function triu!(A::AbstractMPIArray{T}, k::Integer=0) where T
zero_ = zero(T)
forlocalpart!(A) do lA
gi, gj = localindices(A)
for (i, gi) in enumerate(gi)
for (j, gj) in enumerate(gj)
if gj < gi + k
lA[i, j] = zero_
end
end
end
end
return A
end
function triu(A::AbstractMPIArray, k::Integer=0)
B = copy(A)
return triu!(B, k)
end
|
{"hexsha": "fd1806b118af80b639432196048f59418dcf416b", "size": 525, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/linalg.jl", "max_stars_repo_name": "Soyukke/MPIArrays.jl", "max_stars_repo_head_hexsha": "2a309c8a81f05e14dcd1b555d830abf98121da0a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/linalg.jl", "max_issues_repo_name": "Soyukke/MPIArrays.jl", "max_issues_repo_head_hexsha": "2a309c8a81f05e14dcd1b555d830abf98121da0a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/linalg.jl", "max_forks_repo_name": "Soyukke/MPIArrays.jl", "max_forks_repo_head_hexsha": "2a309c8a81f05e14dcd1b555d830abf98121da0a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.0, "max_line_length": 61, "alphanum_fraction": 0.5047619048, "num_tokens": 156}
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm_ext
import tvm
import tvm._ffi.registry
import tvm.testing
from tvm import te
import numpy as np
def test_bind_add():
def add(a, b):
return a + b
f = tvm_ext.bind_add(add, 1)
assert f(2) == 3
def test_ext_dev():
n = 10
A = te.placeholder((n,), name="A")
B = te.compute((n,), lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
def check_llvm():
if not tvm.testing.device_enabled("llvm"):
return
f = tvm.build(s, [A, B], "ext_dev", "llvm")
ctx = tvm.ext_dev(0)
# launch the kernel.
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(n, dtype=B.dtype), ctx)
f(a, b)
tvm.testing.assert_allclose(b.asnumpy(), a.asnumpy() + 1)
check_llvm()
def test_sym_add():
a = te.var("a")
b = te.var("b")
c = tvm_ext.sym_add(a, b)
assert c.a == a and c.b == b
def test_ext_vec():
ivec = tvm_ext.ivec_create(1, 2, 3)
assert isinstance(ivec, tvm_ext.IntVec)
assert ivec[0] == 1
assert ivec[1] == 2
def ivec_cb(v2):
assert isinstance(v2, tvm_ext.IntVec)
assert v2[2] == 3
tvm.runtime.convert(ivec_cb)(ivec)
def test_extract_ext():
fdict = tvm._ffi.registry.extract_ext_funcs(tvm_ext._LIB.TVMExtDeclare)
assert fdict["mul"](3, 4) == 12
def test_extern_call():
n = 10
A = te.placeholder((n,), name="A")
B = te.compute(
(n,), lambda *i: tvm.tir.call_extern("float32", "TVMTestAddOne", A(*i)), name="B"
)
s = te.create_schedule(B.op)
def check_llvm():
if not tvm.testing.device_enabled("llvm"):
return
f = tvm.build(s, [A, B], "llvm")
ctx = tvm.cpu(0)
# launch the kernel.
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(n, dtype=B.dtype), ctx)
f(a, b)
tvm.testing.assert_allclose(b.asnumpy(), a.asnumpy() + 1)
check_llvm()
def test_nd_subclass():
a = tvm_ext.NDSubClass.create(additional_info=3)
b = tvm_ext.NDSubClass.create(additional_info=5)
assert isinstance(a, tvm_ext.NDSubClass)
c = a + b
d = a + a
e = b + b
assert a.additional_info == 3
assert b.additional_info == 5
assert c.additional_info == 8
assert d.additional_info == 6
assert e.additional_info == 10
if __name__ == "__main__":
test_nd_subclass()
test_extern_call()
test_ext_dev()
test_ext_vec()
test_bind_add()
test_sym_add()
test_extract_ext()
|
{"hexsha": "c73e820c19ad695512cd09fced582a7bbdab9894", "size": 3369, "ext": "py", "lang": "Python", "max_stars_repo_path": "apps/extension/tests/test_ext.py", "max_stars_repo_name": "janifer112x/incubator-tvm", "max_stars_repo_head_hexsha": "98c2096f4944bdbdbbb2b7b20ccd35c6c11dfbf6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 40, "max_stars_repo_stars_event_min_datetime": "2021-06-14T23:14:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T14:32:23.000Z", "max_issues_repo_path": "apps/extension/tests/test_ext.py", "max_issues_repo_name": "janifer112x/incubator-tvm", "max_issues_repo_head_hexsha": "98c2096f4944bdbdbbb2b7b20ccd35c6c11dfbf6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2021-06-08T03:15:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-01T23:50:24.000Z", "max_forks_repo_path": "apps/extension/tests/test_ext.py", "max_forks_repo_name": "janifer112x/incubator-tvm", "max_forks_repo_head_hexsha": "98c2096f4944bdbdbbb2b7b20ccd35c6c11dfbf6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2021-06-14T05:56:18.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-27T06:52:07.000Z", "avg_line_length": 27.6147540984, "max_line_length": 89, "alphanum_fraction": 0.6360937964, "include": true, "reason": "import numpy", "num_tokens": 971}
|
from pathlib import Path
import numpy as np
from pyhdx import PeptideMasterTable, read_dynamx, KineticsSeries
current_dir = Path(__file__).parent
np.random.seed(43)
fpath = current_dir.parent / 'tests' / 'test_data' / 'ecSecB_apo.csv'
data = read_dynamx(fpath)
pmt = PeptideMasterTable(data, drop_first=1, ignore_prolines=True, remove_nan=False)
pmt.set_control(('Full deuteration control', 0.167))
sequence = 'MSEQNNTEMTFQIQRIYTKDISFEAPNAPHVFQKDWQPEVKLDLDTASSQLADDVYEVVLRVTVTASLGEETAFLCEVQQGGIFSIAGIEGTQMAHCLGAYCPNILFPYARECITSMVSRGTFPQLNLAPVNFDALFMNYLQQQAGEGTEEHQDA'
series = KineticsSeries(pmt.get_state('SecB WT apo'), sequence=sequence)
print(series)
#series.coverage.protein.to_file('test.txt', fmt='pprint')
from pyhdx.fileIO import csv_to_protein
protein = csv_to_protein(current_dir.parent / 'tests' / 'test_data' / 'ecSecB_info.csv', column_depth=1)
print(protein.df)
#print(protein.index)
|
{"hexsha": "c74753fd225325fe6a37af8656ae00fbdf4002ca", "size": 906, "ext": "py", "lang": "Python", "max_stars_repo_path": "templates/load_secb_data_template.py", "max_stars_repo_name": "sebaztiano/PyHDX", "max_stars_repo_head_hexsha": "12fc2b5f67200885706226823bd8e1f46e3b5db1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "templates/load_secb_data_template.py", "max_issues_repo_name": "sebaztiano/PyHDX", "max_issues_repo_head_hexsha": "12fc2b5f67200885706226823bd8e1f46e3b5db1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "templates/load_secb_data_template.py", "max_forks_repo_name": "sebaztiano/PyHDX", "max_forks_repo_head_hexsha": "12fc2b5f67200885706226823bd8e1f46e3b5db1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.24, "max_line_length": 168, "alphanum_fraction": 0.8123620309, "include": true, "reason": "import numpy", "num_tokens": 299}
|
import sys
import os
from pathlib import Path
import logging
import time
from typing import List, Union, Dict, Tuple, Any
from collections import OrderedDict
import numpy as np
import pandas as pd
import mxnet as mx
from gluonts.model.n_beats import NBEATSEnsembleEstimator
from gluonts.trainer import Trainer
from d3m.primitive_interfaces.base import CallResult
from d3m.primitive_interfaces.supervised_learning import SupervisedLearnerPrimitiveBase
from d3m import container, utils
from d3m.metadata import hyperparams, params, base as metadata_base
from d3m.exceptions import PrimitiveNotFittedError
from ..utils.time_utils import (
calculate_time_frequency,
discretize_time_difference,
)
from .nbeats_dataset import NBEATSDataset
from .nbeats_forecast import NBEATSForecast
from .nbeats_predictor import NBEATSEnsembleEstimatorHook
__author__ = "Distil"
__version__ = "1.2.0"
__contact__ = "mailto:jeffrey.gleason@kungfu.ai"
Inputs = container.DataFrame
Outputs = container.DataFrame
logger = logging.getLogger(__name__)
class Params(params.Params):
nbeats_dataset: NBEATSDataset
is_fit: bool
timestamp_column: int
freq: str
reind_freq: str
group_cols: List[int]
output_column: str
target_column: int
min_trains: Union[
List[pd._libs.tslibs.timestamps.Timestamp],
Dict[str, pd._libs.tslibs.timestamps.Timestamp],
Dict[Any, pd._libs.tslibs.timestamps.Timestamp]
]
class Hyperparams(hyperparams.Hyperparams):
weights_dir= hyperparams.Hyperparameter[str](
default='nbeats_weights',
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="weights of trained model will be saved to this filepath",
)
prediction_length = hyperparams.UniformInt(
lower=1,
upper=1000,
default=30,
upper_inclusive=True,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="number of future timesteps to predict",
)
interpretable = hyperparams.UniformBool(
default=True,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="whether to build interpretable architecture",
)
num_context_lengths = hyperparams.UniformInt(
lower=1,
upper=6,
default=2,
upper_inclusive=True,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="number of different context lengths to use for training estimators"
)
num_estimators = hyperparams.UniformInt(
lower=1,
upper=20,
default=2,
upper_inclusive=True,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/TuningParameter"
],
description="number of different estimators to train for each combination of context "
+ "length and loss function (3). The total number of estimators is num_estimators * "
+ "num_context_lengths * 3"
)
epochs = hyperparams.UniformInt(
lower=1,
upper=sys.maxsize,
default=10,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/TuningParameter"
],
description="number of training epochs for each estimator",
)
steps_per_epoch = hyperparams.UniformInt(
lower=1,
upper=200,
default=50,
upper_inclusive=True,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/TuningParameter"
],
description="number of steps per epoch for each estimator",
)
learning_rate = hyperparams.Uniform(
lower=0.0,
upper=1.0,
default=1e-4,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/TuningParameter"
],
description="learning rate for each estimator",
)
training_batch_size = hyperparams.UniformInt(
lower=1,
upper=256,
default=32,
upper_inclusive=True,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/TuningParameter"
],
description="training batch size for each estimator",
)
inference_batch_size = hyperparams.UniformInt(
lower=1,
upper=1024,
default=256,
upper_inclusive=True,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="inference batch size",
)
output_mean = hyperparams.UniformBool(
default=True,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="whether to output mean (or median) forecasts from ensemble estimators. "
+ "If `interpretable` is `True`, `output_mean` will automatically be `True` to preserve "
+ "the additive decomposition of the trend and seasonality forecast components"
)
nan_padding = hyperparams.UniformBool(
default=True,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="whether to pad predictions that aren't supported by the model "
+ "with 'np.nan' or with the last valid prediction"
)
class NBEATSPrimitive(SupervisedLearnerPrimitiveBase[Inputs, Outputs, Params, Hyperparams]):
"""
Primitive that applies the NBEATS (Neural basis expansion analysis for interpretable time
series forecasting) method for time series forecasting. The implementation is based off of
this paper: https://arxiv.org/abs/1905.10437 and this repository: https://gluon-ts.mxnet.io/index.html
Training inputs: 1) Feature dataframe, 2) Target dataframe
Outputs: Dataframe with predictions for specific time series at specific future time instances
Arguments:
hyperparams {Hyperparams} -- D3M Hyperparameter object
Keyword Arguments:
random_seed {int} -- random seed (default: {0})
"""
metadata = metadata_base.PrimitiveMetadata(
{
"id": "3952a074-145e-406d-9cee-80232ae8f3ae",
"version": __version__,
"name": "NBEATS",
"keywords": [
"time series",
"forecasting",
"deep neural network",
"fully-connected",
"residual network",
"interpretable"
],
"source": {
"name": __author__,
"contact": __contact__,
"uris": [
"https://github.com/kungfuai/d3m-primitives",
],
},
"installation": [
{"type": "PIP", "package": "cython", "version": "0.29.16"},
{
"type": metadata_base.PrimitiveInstallationType.PIP,
"package_uri": "git+https://github.com/kungfuai/d3m-primitives.git@{git_commit}#egg=kf-d3m-primitives".format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)),
),
},
],
"python_path": "d3m.primitives.time_series_forecasting.feed_forward_neural_net.NBEATS",
"algorithm_types": [
metadata_base.PrimitiveAlgorithmType.DEEP_NEURAL_NETWORK,
],
"primitive_family": metadata_base.PrimitiveFamily.TIME_SERIES_FORECASTING,
"can_use_gpus": True
}
)
def __init__(self, *, hyperparams: Hyperparams, random_seed: int = 0) -> None:
super().__init__(hyperparams=hyperparams, random_seed=random_seed)
self._freq = None
self._is_fit = False
self.preds = None
def get_params(self) -> Params:
return Params(
nbeats_dataset = self._nbeats_dataset,
timestamp_column = self._timestamp_column,
group_cols = self._grouping_columns,
output_column = self._output_column,
target_column = self._target_column,
freq = self._freq,
reind_freq = self._reind_freq,
is_fit = self._is_fit,
min_trains = self._min_trains
)
def set_params(self, *, params: Params) -> None:
self._nbeats_dataset = params['nbeats_dataset']
self._timestamp_column = params['timestamp_column']
self._grouping_columns = params['group_cols']
self._output_column = params['output_column']
self._target_column = params['target_column']
self._freq = params['freq']
self._reind_freq = params['reind_freq']
self._is_fit = params['is_fit']
self._min_trains = params['min_trains']
def set_training_data(self, *, inputs: Inputs, outputs: Outputs) -> None:
""" Sets primitive's training data
Arguments:
inputs {Inputs} -- D3M dataframe containing attributes
outputs {Outputs} -- D3M dataframe containing targets
Raises:
ValueError: If multiple columns are annotated with 'Time' or 'DateTime' metadata
"""
self._output_column = outputs.columns[0]
frame = inputs.append_columns(outputs)
self._get_cols(frame)
self._set_freq(frame)
frame, self._min_trains, max_train_length, _ = self._reindex(frame)
self._check_window_support(max_train_length)
self._nbeats_dataset = NBEATSDataset(
frame,
self._grouping_columns,
self._timestamp_column,
self._target_column,
self._freq,
self.hyperparams['prediction_length'],
self.hyperparams['num_context_lengths']
)
self._train_data = self._nbeats_dataset.get_data()
def fit(self, *, timeout: float = None, iterations: int = None) -> CallResult[None]:
""" Fits NBEATS model using training data from set_training_data and hyperparameters
Keyword Arguments:
timeout {float} -- timeout, considered (default: {None})
iterations {int} -- iterations, considered (default: {None})
Returns:
CallResult[None]
"""
if iterations is None:
iterations = self.hyperparams["epochs"]
has_finished = True
else:
has_finished = False
if self.hyperparams['interpretable']:
num_stacks = 2
num_blocks = [1]
widths = [256,2048]
sharing = [True]
expansion_coefficient_lengths = [3]
stack_types = ["T", "S"]
estimator_class = NBEATSEnsembleEstimatorHook
else:
num_stacks = 30
num_blocks = [3]
widths = [512]
sharing = [False]
expansion_coefficient_lengths = [32]
stack_types = ["G"]
estimator_class = NBEATSEnsembleEstimator
estimator = estimator_class(
freq=self._freq,
prediction_length=self.hyperparams['prediction_length'],
meta_context_length=[
i for i in range(2, self.hyperparams['num_context_lengths'] + 2)
],
meta_loss_function = ['sMAPE', 'MASE', 'MAPE'],
meta_bagging_size = self.hyperparams['num_estimators'],
num_stacks=num_stacks,
num_blocks=num_blocks,
widths=widths,
sharing=sharing,
expansion_coefficient_lengths=expansion_coefficient_lengths,
stack_types=stack_types,
trainer=Trainer(
epochs=iterations,
learning_rate=self.hyperparams['learning_rate'],
batch_size=self.hyperparams['training_batch_size'],
num_batches_per_epoch=self.hyperparams['steps_per_epoch']
),
)
logger.info(f"Fitting for {iterations} iterations")
start_time = time.time()
predictor = estimator.train(self._train_data)
predictor.batch_size = self.hyperparams['inference_batch_size']
predictor.set_aggregation_method('none')
self._is_fit = True
logger.info(f"Fit for {iterations} epochs, took {time.time() - start_time}s")
if not os.path.isdir(self.hyperparams['weights_dir']):
os.mkdir(self.hyperparams['weights_dir'])
predictor.serialize(Path(self.hyperparams['weights_dir']))
return CallResult(None, has_finished=has_finished)
def produce(
self, *, inputs: Inputs, timeout: float = None, iterations: int = None
) -> CallResult[Outputs]:
""" Produce primitive's predictions for specific time series at specific future time instances
* these specific timesteps / series are specified implicitly by input dataset
Arguments:
inputs {Inputs} -- D3M dataframe containing attributes
Keyword Arguments:
timeout {float} -- timeout, not considered (default: {None})
iterations {int} -- iterations, not considered (default: {None})
Raises:
PrimitiveNotFittedError: if primitive not fit
Returns:
CallResult[Outputs] -- (N, 2) dataframe with d3m_index and value for each prediction slice requested.
prediction slice = specific horizon idx for specific series in specific regression
"""
all_preds, pred_intervals = self._produce(inputs)
if self.hyperparams['interpretable']:
all_components = [[] for c in range(3)]
for series, idxs in zip(all_preds, pred_intervals):
for i, component in enumerate(series):
all_components[i].append(component[idxs])
all_components = [np.concatenate(component) for component in all_components]
col_names = (self._output_column, 'trend-component', 'seasonality-component')
df_data = {
col_name: component
for col_name, component
in zip(col_names, all_components)
}
else:
point_estimates = np.concatenate(
[series[0][idxs] for series, idxs in zip(all_preds, pred_intervals)]
)
df_data = {self._output_column: point_estimates}
result_df = container.DataFrame(
df_data,
generate_metadata=True,
)
result_df.metadata = result_df.metadata.add_semantic_type(
(metadata_base.ALL_ELEMENTS, 0),
("https://metadata.datadrivendiscovery.org/types/PredictedTarget"),
)
return CallResult(result_df, has_finished=self._is_fit)
def _get_col_names(self, col_idxs, all_col_names):
""" transform column indices to column names """
return [all_col_names[i] for i in col_idxs]
def _process_special_col(self, col_list, col_type):
""" private util function that warns if multiple special columns
"""
if len(col_list) == 0:
return None
elif len(col_list) > 1:
logger.warn(
f"""There are more than one {col_type} marked. This primitive will use the first"""
)
return col_list[0]
def _sort_by_timestamp(self, frame):
""" private util function: convert to pd datetime and sort
"""
time_name = frame.columns[self._timestamp_column]
new_frame = frame.copy()
if "http://schema.org/Integer" in frame.metadata.query_column_field(
self._timestamp_column, "semantic_types"
):
new_frame.iloc[:, self._timestamp_column] = pd.to_datetime(
new_frame.iloc[:, self._timestamp_column] - 1,
unit = 'D'
)
self._freq = 'D'
self._reind_freq = 'D'
else:
new_frame.iloc[:, self._timestamp_column] = pd.to_datetime(
new_frame.iloc[:, self._timestamp_column],
unit = 's'
)
return new_frame.sort_values(by = time_name)
def _set_freq(self, frame):
""" sets frequency using differences in timestamp column in data frame
ASSUMPTION: frequency is the same across all grouped time series
"""
if len(self._grouping_columns) == 0:
if self._freq is None:
diff = frame.iloc[1, self._timestamp_column] - frame.iloc[0, self._timestamp_column]
self._freq, self._reind_freq = calculate_time_frequency(diff, model = 'gluon')
else:
if self._freq is None:
g_cols = self._get_col_names(self._grouping_columns, frame.columns)
for g, df in frame.groupby(g_cols, sort = False):
diff = df.iloc[1, self._timestamp_column] - df.iloc[0, self._timestamp_column]
break
self._freq, self._reind_freq = calculate_time_frequency(diff, model = 'gluon')
def _robust_reindex(self, frame):
""" reindex dataframe IFF it has > 1 row, interpolate target column """
frame = self._sort_by_timestamp(frame)
original_times = frame.iloc[:, self._timestamp_column]
frame = frame.drop_duplicates(subset = frame.columns[self._timestamp_column])
frame.index = frame.iloc[:, self._timestamp_column]
if frame.shape[0] > 1:
frame = frame.reindex(
pd.date_range(
frame.index[0],
frame.index[-1],
freq = self._reind_freq,
)
)
# only interpolate when target exists during training
if self._target_column < frame.shape[1]:
frame.iloc[:, self._target_column] = frame.iloc[:, self._target_column].interpolate()
frame.iloc[:, self._grouping_columns] = frame.iloc[:, self._grouping_columns].ffill()
return frame, original_times
def _reindex(self, frame):
""" reindex data, interpolating target columns
"""
if len(self._grouping_columns) == 0:
df, original_times = self._robust_reindex(frame)
return df, [df.index[0]], df.shape[0], original_times
else:
all_dfs, min_trains, original_times = [], {}, OrderedDict()
max_train_length = 0
g_cols = self._get_col_names(self._grouping_columns, frame.columns)
for grp, df in frame.groupby(g_cols, sort = False):
df, orig_times = self._robust_reindex(df)
if df.shape[0] > max_train_length:
max_train_length = df.shape[0]
all_dfs.append(df)
min_trains[grp] = df.index[0]
original_times[grp] = orig_times
return pd.concat(all_dfs), min_trains, max_train_length, original_times
def _get_cols(self, frame):
""" private util function: get indices of important columns from metadata
"""
input_metadata = frame.metadata
# get target idx (first column by default)
target_columns = input_metadata.list_columns_with_semantic_types(
(
"https://metadata.datadrivendiscovery.org/types/SuggestedTarget",
"https://metadata.datadrivendiscovery.org/types/TrueTarget",
"https://metadata.datadrivendiscovery.org/types/Target",
)
)
if len(target_columns) == 0:
raise ValueError("At least one column must be marked as a target")
self._target_column = self._process_special_col(
target_columns, "target column"
)
# get timestamp idx (first column by default)
timestamp_columns = input_metadata.list_columns_with_semantic_types(
(
"https://metadata.datadrivendiscovery.org/types/Time",
"http://schema.org/DateTime",
)
)
self._timestamp_column = self._process_special_col(
timestamp_columns, "timestamp column"
)
# get grouping idx
self._grouping_columns = input_metadata.list_columns_with_semantic_types(
("https://metadata.datadrivendiscovery.org/types/GroupingKey",)
)
suggested_group_cols = input_metadata.list_columns_with_semantic_types(
("https://metadata.datadrivendiscovery.org/types/SuggestedGroupingKey",)
)
if len(self._grouping_columns) == 0:
self._grouping_columns = suggested_group_cols
def _check_window_support(self, max_train_length):
""" ensures that at least one series of target series is >= context_length """
if max_train_length < self.hyperparams['prediction_length']:
raise ValueError(
f"This training set does not support a prediction length of {self.hyperparams['prediction_length']} " +
f"because its longest series has length {max_train_length} observations. Please " +
f"choose a shorter prediction length."
)
def _get_pred_intervals(self, original_times):
""" private util function that retrieves unevenly spaced prediction intervals from data frame
"""
if len(self._grouping_columns) == 0:
intervals = discretize_time_difference(
original_times,
self._min_trains[0],
self._freq,
zero_index = True
)
all_intervals = [np.array(intervals) + 1]
else:
all_intervals = []
for grp, times in original_times.items():
if grp in self._min_trains.keys():
intervals = discretize_time_difference(
times,
self._min_trains[grp],
self._freq,
zero_index = True
)
else:
logger.info(
f'Series with category {grp} did not exist in training data, ' +
f'These predictions will be returned as np.nan.'
)
intervals = np.zeros(times.shape[0]).astype(int)
all_intervals.append(np.array(intervals) + 1)
return all_intervals
def _produce(self, inputs: Inputs):
""" internal produce method to support produce() and produce_confidence_intervals() methods """
if not self._is_fit:
raise PrimitiveNotFittedError("Primitive not fitted.")
test_frame = inputs.copy()
nbeats_forecast = NBEATSForecast(
self._nbeats_dataset,
self.hyperparams['weights_dir'],
self.hyperparams['interpretable'],
self.hyperparams['output_mean'],
self.hyperparams['nan_padding']
)
test_frame, _, _, original_times = self._reindex(test_frame)
pred_intervals = self._get_pred_intervals(original_times)
st = time.time()
preds = nbeats_forecast.predict(test_frame, pred_intervals)
logger.info(f'Making predictions took {time.time() - st}s')
return preds, pred_intervals
|
{"hexsha": "e6ed4c92bc994249ea8a72234ddc1c2baaa95802", "size": 23577, "ext": "py", "lang": "Python", "max_stars_repo_path": "kf_d3m_primitives/ts_forecasting/nbeats/nbeats.py", "max_stars_repo_name": "cdbethune/d3m-primitives", "max_stars_repo_head_hexsha": "5530da1b8efba7de8cec6890401c5d4091acd45a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "kf_d3m_primitives/ts_forecasting/nbeats/nbeats.py", "max_issues_repo_name": "cdbethune/d3m-primitives", "max_issues_repo_head_hexsha": "5530da1b8efba7de8cec6890401c5d4091acd45a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kf_d3m_primitives/ts_forecasting/nbeats/nbeats.py", "max_forks_repo_name": "cdbethune/d3m-primitives", "max_forks_repo_head_hexsha": "5530da1b8efba7de8cec6890401c5d4091acd45a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.9059405941, "max_line_length": 130, "alphanum_fraction": 0.6072867625, "include": true, "reason": "import numpy", "num_tokens": 4977}
|
import os
import numpy
from PIL import Image
import torch
from torch.utils.data.dataset import Dataset
class WheatDataset(Dataset):
def __init__(self, df, config, tile_mode: int = 0, rand: bool = False, resize_transform: callable = None,
transform: callable = None):
self.df = df.reset_index(drop=True)
self.is_train = 'growth_stage' in df.columns
self.num_classes = config.num_classes
self.image_dir = config.images_dir
self.is_regression = config.regression
self.rand = rand
self.resize_transform = resize_transform
self.transform = transform
def __len__(self):
return self.df.shape[0]
def __getitem__(self, index):
row = self.df.iloc[index]
filename = row.UID + '.jpeg'
image_path = os.path.join(self.image_dir, filename)
images = numpy.asarray(Image.open(image_path).convert('RGB'))
if self.transform is not None:
images = self.transform(image=images)['image']
if self.resize_transform is not None:
images = self.resize_transform(images)
images = images.astype(numpy.float32)
images /= 255
images = images.transpose((2, 0, 1))
images = torch.tensor(images)
if self.is_train:
if self.is_regression:
label = row.growth_stage
else:
label = numpy.zeros(self.num_classes)
label[row.growth_stage - 1] = 1
return images, torch.tensor(label.astype(numpy.float32))
else:
return images
|
{"hexsha": "e73bff97702d08df806d1c5a9b8addcc965362d2", "size": 1608, "ext": "py", "lang": "Python", "max_stars_repo_path": "Image Classification/CGIAR Wheat Growth Stage Challenge/Nuno/competition_CGIAR_user_ngcferreira_3rd_place/wheat_dataset.py", "max_stars_repo_name": "ZindiAfrica/Computer-Vision", "max_stars_repo_head_hexsha": "bf4c00a0633506270dc6d07df938a100a10ee799", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Image Classification/CGIAR Wheat Growth Stage Challenge/Nuno/competition_CGIAR_user_ngcferreira_3rd_place/wheat_dataset.py", "max_issues_repo_name": "ZindiAfrica/Computer-Vision", "max_issues_repo_head_hexsha": "bf4c00a0633506270dc6d07df938a100a10ee799", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Image Classification/CGIAR Wheat Growth Stage Challenge/Nuno/competition_CGIAR_user_ngcferreira_3rd_place/wheat_dataset.py", "max_forks_repo_name": "ZindiAfrica/Computer-Vision", "max_forks_repo_head_hexsha": "bf4c00a0633506270dc6d07df938a100a10ee799", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.9230769231, "max_line_length": 109, "alphanum_fraction": 0.6169154229, "include": true, "reason": "import numpy", "num_tokens": 343}
|
! %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
! Copyright (c) 2016, Regents of the University of Colorado
! All rights reserved.
!
! Redistribution and use in source and binary forms, with or without modification, are
! permitted provided that the following conditions are met:
!
! 1. Redistributions of source code must retain the above copyright notice, this list of
! conditions and the following disclaimer.
!
! 2. Redistributions in binary form must reproduce the above copyright notice, this list
! of conditions and the following disclaimer in the documentation and/or other
! materials provided with the distribution.
!
! 3. Neither the name of the copyright holder nor the names of its contributors may be
! used to endorse or promote products derived from this software without specific prior
! written permission.
!
! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
! EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
! MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
! THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
! SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
! OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
! LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
! OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
!
! History
! March 2016 - D. Swales - Original version
! %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
program cosp1_test
use cosp_kinds, only: wp
USE MOD_COSP_CONFIG, ONLY: R_UNDEF,PARASOL_NREFL,LIDAR_NCAT,SR_BINS, &
N_HYDRO,RTTOV_MAX_CHANNELS,numMISRHgtBins, &
cloudsat_DBZE_BINS,LIDAR_NTEMP,calipso_histBsct, &
numMODISTauBins,numMODISPresBins, &
numMODISReffIceBins,numMODISReffLiqBins, &
numISCCPTauBins,numISCCPPresBins,numMISRTauBins, &
ntau,modis_histTau,tau_binBounds, &
modis_histTauEdges,tau_binEdges, &
modis_histTauCenters,tau_binCenters,ntauV1p4, &
tau_binBoundsV1p4,tau_binEdgesV1p4, tau_binCentersV1p4, &
Nlvgrid_local => Nlvgrid
use mod_cosp1_io, only: nc_read_input_file, write_cosp1_output, read_cosp_output_nl
USE MOD_COSP_INTERFACE_v1p4, ONLY: cosp => cosp_interface_v1p4, &
cosp_gridbox,construct_cosp_vgrid, &
construct_cosp_gridbox, &
free_cosp_gridbox => destroy_cosp_gridbox, &
free_cosp_sgradar => destroy_cosp_sgradar, &
free_cosp_radarstats => destroy_cosp_radarstats, &
free_cosp_sglidar => destroy_cosp_sglidar, &
free_cosp_lidarstats => destroy_cosp_lidarstats, &
free_cosp_isccp => destroy_cosp_isccp, &
free_cosp_misr => destroy_cosp_misr, &
free_cosp_rttov => destroy_cosp_rttov, &
free_cosp_modis => destroy_cosp_modis, &
free_cosp_vgrid => destroy_cosp_vgrid, &
free_cosp_subgrid => destroy_cosp_subgrid, &
construct_cosp_subgrid,cosp_config,cosp_subgrid, &
cosp_sglidar,cosp_lidarstats, &
construct_cosp_lidarstats,construct_cosp_sglidar, &
cosp_isccp,construct_cosp_isccp,cosp_misr, &
construct_cosp_misr,cosp_rttov,construct_cosp_rttov,&
cosp_sgradar,cosp_radarstats, &
construct_cosp_radarstats,construct_cosp_sgradar, &
cosp_modis,construct_cosp_modis, &
cosp_vgrid,I_CVCLIQ,I_LSCLIQ,I_CVCICE,I_LSCICE, &
I_LSRAIN,I_LSSNOW,I_LSGRPL,I_CVRAIN,I_CVSNOW
implicit none
! Input/Output driver file control
character(len=64),parameter :: &
cosp_input_namelist = 'cosp1_input_nl.txt', &
cosp_output_namelist = 'cosp1_output_nl.txt'
! Test data
integer :: &
Nlon,Nlat,geomode
real(wp) :: &
emsfc_lw
real(wp),dimension(:),allocatable,target:: &
lon, & ! Longitude (deg)
lat, & ! Latitude (deg)
skt, & ! Skin temperature (K)
landmask, & ! Land/sea mask (0/1)
u_wind, & ! U-component of wind (m/s)
v_wind, & ! V-component of wind (m/s)
sunlit ! Sunlit flag
real(wp),dimension(:,:),allocatable,target :: &
p, & ! Model pressure levels (pa)
ph, & ! Moddel pressure @ half levels (pa)
zlev, & ! Model level height (m)
zlev_half, & ! Model level height @ half-levels (m)
T, & ! Temperature (K)
sh, & ! Specific humidity (kg/kg)
rh, & ! Relative humidity (1)
tca, & ! Total cloud fraction (1)
cca, & ! Convective cloud fraction (1)
mr_lsliq, & ! Mass mixing ratio for stratiform cloud liquid (kg/kg)
mr_lsice, & ! Mass mixing ratio for stratiform cloud ice (kg/kg)
mr_ccliq, & ! Mass mixing ratio for convective cloud liquid (kg/kg)
mr_ccice, & ! Mass mixing ratio for convective cloud ice (kg/kg)
mr_ozone, & ! Mass mixing ratio for ozone (kg/kg)
fl_lsrain, & ! Precipitation flux (rain) for stratiform cloud (kg/m^2/s)
fl_lssnow, & ! Precipitation flux (snow) for stratiform cloud (kg/m^2/s)
fl_lsgrpl, & ! Precipitation flux (groupel) for stratiform cloud (kg/m^2/s)
fl_ccrain, & ! Precipitation flux (rain) for convective cloud (kg/m^2/s)
fl_ccsnow, & ! Precipitation flux (snow) for convective cloud (kg/m^2/s)
dtau_s, & ! 0.67micron optical depth (stratiform cloud) (1)
dtau_c, & ! 0.67micron optical depth (convective cloud) (1)
dem_s, & ! 11micron emissivity (stratiform cloud)
dem_c ! 11microm emissivity (convective cloud)
real(wp),dimension(:,:,:),allocatable,target :: &
frac_out, & ! Subcolumn cloud cover (0/1)
Reff ! Subcolumn effective radius
! Input namelist fields
integer :: & !
Npoints, & ! Number of gridpoints
Ncolumns, & ! Number of subcolumns
Nlevels, & ! Number of model vertical levels
Npoints_it, & ! Number of gridpoints to be processed in one
! iteration
Nlvgrid, & ! Number of vertical levels for statistical outputs
! (USE_VGRID=.true.)
surface_radar, & ! surface=1/spaceborne=0
cloudsat_use_gas_abs, & ! Include gaseous absorption (1=yes/0=no)
cloudsat_do_ray, & ! Calculate output Rayleigh (1=yes/0=no)
lidar_ice_type, & ! Ice particle shape in lidar calculations
! (0=ice-spheres/1=ice-non-spherical)
overlap, & ! Overlap type: 1=max, 2=rand, 3=max/rand
isccp_topheight, & ! ISCCP cloud top height
isccp_topheight_direction, & ! ISCCP cloud top height direction
rttov_platform, & ! RTTOV: Satellite platform
rttov_satellite, & ! RTTOV: Satellite
rttov_instrument, & ! RTTOV: Instrument
rttov_Nchannels ! RTTOV: Number of channels to be computed
real(wp) :: & !
cloudsat_radar_freq, & ! CloudSat radar frequency (GHz)
cloudsat_k2, & ! |K|^2, -1=use frequency dependent default
rttov_ZenAng, & ! RTTOV: Satellite Zenith Angle
co2, & ! CO2 mixing ratio
ch4, & ! CH4 mixing ratio
n2o, & ! n2o mixing ratio
co ! co mixing ratio
logical :: & !
use_vgrid, & ! Use fixed vertical grid for outputs?
csat_vgrid, & ! CloudSat vertical grid?
use_precipitation_fluxes ! True if precipitation fluxes are input to the
! algorithm
integer,dimension(RTTOV_MAX_CHANNELS) :: &
rttov_Channels ! RTTOV: Channel numbers
real(wp),dimension(RTTOV_MAX_CHANNELS) :: &
rttov_Surfem ! RTTOV: Surface emissivity
character(len=64) :: &
cloudsat_micro_scheme ! Microphysical scheme used in cloudsat radar simulator
character(len=64) :: &
finput ! Input NetCDF file
character(len=256) :: &
foutput
character(len=512) :: &
dinput ! Directory where the input files are located
character(len=600) :: &
fileIN ! dinput+finput
namelist/COSP_INPUT/overlap,isccp_topheight,isccp_topheight_direction,npoints, &
npoints_it,ncolumns,nlevels,use_vgrid,Nlvgrid,csat_vgrid,dinput, &
finput,foutput,cloudsat_radar_freq,surface_radar,cloudsat_use_gas_abs, &
cloudsat_do_ray,cloudsat_k2,cloudsat_micro_scheme,lidar_ice_type, &
use_precipitation_fluxes,rttov_platform,rttov_satellite, &
rttov_Instrument,rttov_Nchannels,rttov_Channels,rttov_Surfem, &
rttov_ZenAng,co2,ch4,n2o,co
! Output namelist
logical :: Lcfaddbze94,Ldbze94,Latb532,LcfadLidarsr532,Lclcalipso,Lclhcalipso, &
Lcllcalipso,Lclmcalipso,Lcltcalipso,LparasolRefl,Lclcalipsoliq, &
Lclcalipsoice,Lclcalipsoun,Lclcalipsotmp,Lclcalipsotmpliq,Lclcalipsotmpice, &
Lclcalipsotmpun,Lclhcalipsoliq,Lcllcalipsoliq,Lclmcalipsoliq, &
Lcltcalipsoliq,Lclhcalipsoice,Lcllcalipsoice,Lclmcalipsoice,Lcltcalipsoice, &
Lclhcalipsoun,Lcllcalipsoun,Lclmcalipsoun,Lcltcalipsoun,Lalbisccp, &
Lboxptopisccp,Lboxtauisccp,Lpctisccp,Lclisccp,Ltauisccp,Lcltisccp, &
Lmeantbisccp,Lmeantbclrisccp,LclMISR,Lclcalipso2,Lcltlidarradar,Lfracout, &
LlidarBetaMol532,Lcltmodis,Lclwmodis,Lclimodis,Lclhmodis,Lclmmodis, &
Lcllmodis,Ltautmodis,Ltauwmodis,Ltauimodis,Ltautlogmodis,Ltauwlogmodis, &
Ltauilogmodis,Lreffclwmodis,Lreffclimodis,Lpctmodis,Llwpmodis,Liwpmodis, &
Lclmodis,Ltbrttov
namelist/COSP_OUTPUT/Lcfaddbze94,Ldbze94,Latb532,LcfadLidarsr532,Lclcalipso, &
Lclhcalipso,Lcllcalipso,Lclmcalipso,Lcltcalipso,LparasolRefl, &
Lclcalipsoliq,Lclcalipsoice,Lclcalipsoun,Lclcalipsotmp, &
Lclcalipsotmpliq,Lclcalipsotmpice,Lclcalipsotmpun,Lclhcalipsoliq, &
Lcllcalipsoliq,Lclmcalipsoliq,Lcltcalipsoliq,Lclhcalipsoice, &
Lcllcalipsoice,Lclmcalipsoice,Lcltcalipsoice,Lclhcalipsoun, &
Lcllcalipsoun,Lclmcalipsoun,Lcltcalipsoun,Lalbisccp,Lboxptopisccp,&
Lboxtauisccp,Lpctisccp,Lclisccp,Ltauisccp,Lcltisccp,Lmeantbisccp, &
Lmeantbclrisccp,LclMISR,Lclcalipso2,Lcltlidarradar,Lfracout, &
LlidarBetaMol532,Lcltmodis,Lclwmodis,Lclimodis,Lclhmodis, &
Lclmmodis,Lcllmodis,Ltautmodis,Ltauwmodis,Ltauimodis, &
Ltautlogmodis,Ltauwlogmodis,Ltauilogmodis,Lreffclwmodis, &
Lreffclimodis,Lpctmodis,Llwpmodis,Liwpmodis,Lclmodis,Ltbrttov
! Local variables
logical :: &
lsingle = .true., & ! True if using MMF_v3_single_moment CLOUDSAT microphysical scheme (default)
ldouble = .false., & ! True if using MMF_v3.5_two_moment CLOUDSAT microphysical scheme
lisccp = .false. ,& ! Local on/off switch for simulators (used by initialization)
lmodis = .false., & !
lmisr = .false., & !
lcalipso = .false., & !
lcloudsat = .false., & !
lrttov = .false., & !
lparasol = .false. !
integer :: iChunk,nChunks,start_idx,end_idx,nPtsPerIt
real(wp),dimension(10) :: driver_time
character(len=256),dimension(100) :: cosp_status
! Fields used solely for output
integer,parameter :: &
n_out_list = 63, & ! Number of possible output variables
N3D = 8, & ! Number of 3D output variables
N2D = 14, & ! Number of 2D output variables
N1D = 40 ! Number of 1D output variables
character(len=32),dimension(n_out_list) :: out_list ! List of output variable names
integer :: lon_axid,time_axid,height_axid,height_mlev_axid,grid_id,lonvar_id, &
latvar_id,column_axid,sza_axid,temp_axid,channel_axid,dbze_axid,sratio_axid,&
MISR_CTH_axid,lat_axid,tau_axid,pressure2_axid ,k
double precision :: time,time_bnds(2),time_step,half_time_step
real(wp),dimension(:),allocatable :: mgrid_z,mgrid_zu,mgrid_zl
! COSP1 types
type(cosp_gridbox) :: gbx ! Gridbox information. Input for COSP
type(cosp_subgrid) :: sgx ! Subgrid outputs
type(cosp_config) :: cfg ! Configuration options
type(cosp_vgrid) :: vgrid ! Information on vertical grid of stats
type(cosp_sgradar) :: sgradar ! Output from radar simulator
type(cosp_sglidar) :: sglidar ! Output from lidar simulator
type(cosp_isccp) :: isccp ! Output from ISCCP simulator
type(cosp_modis) :: modis ! Output from MODIS simulator
type(cosp_misr) :: misr ! Output from MISR simulator
type(cosp_rttov) :: rttov ! Output from RTTOV
type(cosp_radarstats) :: stradar ! Summary statistics from radar simulator
type(cosp_lidarstats) :: stlidar ! Summary statistics from lidar simulator
integer,parameter :: &
use_mie_tables=0, &
melt_lay=0, & ! melting layer model off=0, on=1
Nprmts_max_hydro=12, & ! Max number of parameters for hydrometeor size distributions
Naero=1, & ! Number of aerosol species (Not used)
Nprmts_max_aero=1 ! Max number of parameters for aerosol size distributions (Not used)
logical,parameter :: &
use_reff=.true. ! True if you want effective radius to be used by radar simulator (always used by lidar)
real(wp) :: toffset_step
!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
call cpu_time(driver_time(1))
!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
! Read in namelists
!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
! Input namelist (cosp setup)
open(10,file=cosp_input_namelist,status='unknown')
read(10,nml=cosp_input)
close(10)
! Output namelist (logical flags to turn on/off outputs)
call read_cosp_output_nl(cosp_output_namelist,63,cfg)
!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
! Read in sample input data.
!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
allocate(lon(Npoints),lat(Npoints),p(Npoints,Nlevels),ph(Npoints,Nlevels), &
zlev(Npoints,Nlevels),zlev_half(Npoints,Nlevels),T(Npoints,Nlevels), &
sh(Npoints,Nlevels),rh(Npoints,Nlevels),tca(Npoints,Nlevels), &
cca(Npoints,Nlevels),mr_lsliq(Npoints,Nlevels),mr_lsice(Npoints,Nlevels), &
mr_ccliq(Npoints,Nlevels),mr_ccice(Npoints,Nlevels), &
fl_lsrain(Npoints,Nlevels),fl_lssnow(Npoints,Nlevels), &
fl_lsgrpl(Npoints,Nlevels),fl_ccrain(Npoints,Nlevels), &
fl_ccsnow(Npoints,Nlevels),Reff(Npoints,Nlevels,N_HYDRO), &
dtau_s(Npoints,Nlevels),dtau_c(Npoints,Nlevels),dem_s(Npoints,Nlevels), &
dem_c(Npoints,Nlevels),skt(Npoints),landmask(Npoints), &
mr_ozone(Npoints,Nlevels),u_wind(Npoints),v_wind(Npoints),sunlit(Npoints), &
frac_out(Npoints,Ncolumns,Nlevels))
fileIN = trim(dinput)//trim(finput)
call nc_read_input_file(fileIN,Npoints,Nlevels,N_HYDRO,lon,lat,p,ph,zlev,zlev_half, &
T,sh,rh,tca,cca,mr_lsliq,mr_lsice,mr_ccliq,mr_ccice,fl_lsrain, &
fl_lssnow,fl_lsgrpl,fl_ccrain,fl_ccsnow,Reff,dtau_s,dtau_c, &
dem_s,dem_c,skt,landmask,mr_ozone,u_wind,v_wind,sunlit, &
emsfc_lw,geomode,Nlon,Nlat)
call cpu_time(driver_time(2))
!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
! Allocate memory for gridbox type
!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
call construct_cosp_gridbox(time, time_bnds, cloudsat_radar_freq, &
surface_radar, use_mie_tables, cloudsat_use_gas_abs, cloudsat_do_ray, melt_lay, &
cloudsat_k2, Npoints, Nlevels, Ncolumns, N_HYDRO, &
Nprmts_max_hydro, Naero, Nprmts_max_aero, Npoints_it,lidar_ice_type, &
isccp_topheight, isccp_topheight_direction, overlap, emsfc_lw, &
use_precipitation_fluxes, use_reff, rttov_platform, rttov_satellite, &
rttov_instrument, rttov_Nchannels, rttov_ZenAng,rttov_channels(1:rttov_Nchannels),&
rttov_surfem(1:rttov_Nchannels), co2, ch4, n2o, co,gbx)
gbx%longitude = lon
gbx%latitude = lat
! Toffset. This assumes that time is the mid-point of the interval.
do k=1,Npoints
gbx%toffset(k) = -half_time_step + toffset_step*(k-0.5)
enddo
gbx%p = p
gbx%ph = ph
gbx%zlev = zlev
gbx%zlev_half = zlev_half
gbx%T = T
gbx%q = rh
gbx%sh = sh
gbx%cca = cca
gbx%tca = tca
gbx%psfc = ph(:,1)
gbx%skt = skt
gbx%land = landmask
gbx%mr_ozone = mr_ozone
gbx%u_wind = u_wind
gbx%v_wind = v_wind
gbx%sunlit = sunlit
gbx%rain_ls = fl_lsrain
gbx%snow_ls = fl_lssnow
gbx%grpl_ls = fl_lsgrpl
gbx%rain_cv = fl_ccrain
gbx%snow_cv = fl_ccsnow
gbx%dtau_s = dtau_s
gbx%dtau_c = dtau_c
gbx%dem_s = dem_s
gbx%dem_c = dem_c
gbx%Reff = Reff
gbx%Reff(:,:,I_LSRAIN) = 0._wp
gbx%mr_hydro(:,:,I_LSCLIQ) = mr_lsliq
gbx%mr_hydro(:,:,I_LSCICE) = mr_lsice
gbx%mr_hydro(:,:,I_CVCLIQ) = mr_ccliq
gbx%mr_hydro(:,:,I_CVCICE) = mr_ccice
!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
! Define new vertical grid
!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
call construct_cosp_vgrid(gbx, Nlvgrid, use_vgrid, csat_vgrid, vgrid)
!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
! Subgrid information
!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
call construct_cosp_subgrid(Npoints, Ncolumns, Nlevels, sgx)
!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
! Allocate memory for other types.
! *NOTE* These construct subroutines are different than the original construct
! subroutines provided with cospv1.4.0. The old subroutines required the
! derived type cosp_config to be provided, which was used to determine which
! simulators were to be run. For simulators that were not run, a minimum
! amount of space was allocated for that simulator.
! In COSPv2.0, which simulators are run is determined by looking at which
! output fields are allocated (i.e. if the output field for the modis tau vs.
! cloud-top height joint histogram is allocated, we know that the ISCCP and
! MODIS simulators need to be run). This change in v2.0 makes the way that
! the simulators outputs were allocated in compatable, so these subroutines
! needed to be modified, albeit only slightly.
!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if (cfg%Lradar_sim) call construct_cosp_sgradar(Npoints,Ncolumns,Nlevels,N_HYDRO,sgradar)
if (cfg%Lradar_sim) call construct_cosp_radarstats(Npoints,Ncolumns,vgrid%Nlvgrid,N_HYDRO,stradar)
if (cfg%Llidar_sim) call construct_cosp_sglidar(Npoints,Ncolumns,Nlevels,N_HYDRO,PARASOL_NREFL,sglidar)
if (cfg%Llidar_sim) call construct_cosp_lidarstats(Npoints,Ncolumns,vgrid%Nlvgrid,N_HYDRO,PARASOL_NREFL,stlidar)
if (cfg%Lisccp_sim) call construct_cosp_isccp(Npoints,Ncolumns,Nlevels,isccp)
if (cfg%Lmodis_sim) call construct_cosp_modis(Npoints,modis)
if (cfg%Lmisr_sim) call construct_cosp_misr(Npoints,misr)
if (cfg%Lrttov_sim) call construct_cosp_rttov(Npoints,rttov_Nchannels,rttov)
!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
! Call simulator
!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
call cpu_time(driver_time(6))
call cosp(overlap,Ncolumns,cfg,vgrid,gbx,sgx,sgradar,sglidar,isccp,misr,modis,rttov,&
stradar,stlidar)
call cpu_time(driver_time(7))
print*,'Time to read in data: ',driver_time(2)-driver_time(1)
print*,'Time to run COSP: ',driver_time(7)-driver_time(6)
print*,'Total time: ',driver_time(7)-driver_time(1)
!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
! Output
!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
call write_cosp1_output(Npoints, Ncolumns, Nlevels, gbx%zlev(1,Nlevels:1:-1),lon, lat, cfg, vgrid, gbx, sgx, &
sgradar, sglidar, isccp, misr, modis, rttov, stradar, stlidar, foutput)
call cpu_time(driver_time(8))
print*,'Time to write to output: ',driver_time(8)-driver_time(7)
!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
! Free up memory
!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
call free_cosp_gridbox(gbx)
call free_cosp_subgrid(sgx)
call free_cosp_vgrid(vgrid)
if (cfg%Lradar_sim) call free_cosp_sgradar(sgradar)
if (cfg%Lradar_sim) call free_cosp_radarstats(stradar)
if (cfg%Llidar_sim) call free_cosp_sglidar(sglidar)
if (cfg%Llidar_sim) call free_cosp_lidarstats(stlidar)
if (cfg%Lisccp_sim) call free_cosp_isccp(isccp)
if (cfg%Lmisr_sim) call free_cosp_misr(misr)
if (cfg%Lmodis_sim) call free_cosp_modis(modis)
if (cfg%Lrttov_sim) call free_cosp_rttov(rttov)
end program cosp1_test
|
{"hexsha": "de2d8dad225d9a9793194a4cb1dfca806951bea9", "size": 24350, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "components/eam/src/physics/cosp2/external/driver/src/cosp1_test.f90", "max_stars_repo_name": "meng630/GMD_E3SM_SCM", "max_stars_repo_head_hexsha": "990f84598b79f9b4763c3a825a7d25f4e0f5a565", "max_stars_repo_licenses": ["FTL", "zlib-acknowledgement", "RSA-MD"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "components/eam/src/physics/cosp2/external/driver/src/cosp1_test.f90", "max_issues_repo_name": "meng630/GMD_E3SM_SCM", "max_issues_repo_head_hexsha": "990f84598b79f9b4763c3a825a7d25f4e0f5a565", "max_issues_repo_licenses": ["FTL", "zlib-acknowledgement", "RSA-MD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "components/eam/src/physics/cosp2/external/driver/src/cosp1_test.f90", "max_forks_repo_name": "meng630/GMD_E3SM_SCM", "max_forks_repo_head_hexsha": "990f84598b79f9b4763c3a825a7d25f4e0f5a565", "max_forks_repo_licenses": ["FTL", "zlib-acknowledgement", "RSA-MD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 58.8164251208, "max_line_length": 118, "alphanum_fraction": 0.5611498973, "num_tokens": 6475}
|
function [view, mapvol, covol] = polarAngleMap(view, dt, scans, params, legend, W);
%
% [view, map, co] = polarAngleMap(view, <dt, scans, params>, <legend>, <W>);
%
% AUTHOR: rory
% PURPOSE:
% Given corAnal data for a polar angle ("meridian")-mapping experiment
% (single scan or set of scans), produce a parameter map of preferred
% polar angle in units of degrees of visual field.
%
% If a single scan is provided as input, this function saves the
% parameter map in the scan's data type, assigning it only for that
% scan.
%
% However, if multiple input scans are provided (see below),
% the code saves the results in a new data type 'Meta_Analysis'.
% corAnal data from each scan are first converted into real-world
% units, then overlapping data are averaged together in a weighted
% average, based on each scan's coherence. (I.e., if one scan's
% co values for a given voxel are much higher than other scans,
% it will dominate the determination of what angle is represented.)
%
% I wrote this code to use in conjunction with my across-session
% tools (createCombinedSession, importTSeries) to run meta-analyses
% on retinotopy data.
%
% ARGUMENTS:
% INPUT:
% view: mrVista view. <Defaults to selected gray view>
% dt: for a single scan, name or number of the data type
% from which the input data come. If analyzing multiple
% scans, a cell of length nScans of data type names/numbers.
% <default: cur data type>
%
% scans: scan or scans to use as input. <default: cur scan>
%
% params: struct (or nScans-long struct array) specifying how
% the stimulus mapped polar angle during each scan. Needs
% the following fields:
% params.startAngle: angle of center of wedge stimulus, measured
% in degrees clockwise from 12-o-clock, at the start of each
% cycle;
% params.width: width of wedge stimulus in degrees.
% params.direction: 'cw' or 'ccw', direction in which the stimulus
% proceeded. (cw=clockwise or ccw=counterclockwise).
% params.visualField: number of degrees the stimulus traversed
% each cycle (e.g., 360 if it went all the way around).
% <default: get these params using retinoCheckParams>
%
% legend: optional flag which, if set to 1, provides for a separate
% figure with a legend image to go with the polar angle map.
% <default 0, don't show this>
%
% W: optional vector of weights for each input scan, for use when
% doing a meta-analysis across scans. The vector should be
% the same length as the input scans, and should specify the
% overall weight, on top of the coherence, that that scan's voxels
% get. Useful for me, when high-res data produces lower co values,
% but is actually more reliable at identifying meridian
% representations. <default: all ones.>
%
% OUTPUT:
% view: mrVista view, set to the relevant data type / scan and with
% the map loaded and set to map mode.
%
% map: the map volume produced (but not the cell-of-scans set in the
% view, the numeric matrix).
%
% co: the maximum coherence at each voxel, across all the input scans.
% (It may be more sensible to make this the mean, but I'm trying
% max for now.) Same format as map.
%
%
% ras, 01/10/06.
if notDefined('view'), view = getSelectedGray; end
if notDefined('dt'), dt = viewGet(view, 'curDataType'); end
if notDefined('scans'), scans = viewGet(view, 'curScan'); end
if notDefined('legend'), legend = 0; end
if notDefined('W'), W = ones(size(scans)); end
if notDefined('params')
params = retinoCheckParams(view, dt, scans);
end
mapName = 'Polar Angle (clock position)';
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Deal with single input scan instances separately %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if length(scans)==1
% this should be easy: just convert the corAnal values into
% degrees of polar angle
view = selectDataType(view, dt);
corAnalPath = fullfile(dataDir(view), 'corAnal.mat');
if ~exist(corAnalPath, 'file')
error('corAnal not found. Run computeCorAnal first.');
end
load(corAnalPath, 'ph', 'co');
srcPh = ph{scans}; srcCo = co{scans}; clear ph co;
% map from corAnal ph to polar angle
mapvol = polarAngle(srcPh, params) ./ 30;
% make and set the parameter map
mapPath = fullfile(dataDir(view), 'Polar_Angle_Map.mat');
if exist(mapPath, 'file')
load(mapPath, 'map', 'co');
else
map = cell(1, numScans(view));
end
map{scans} = mapvol;
% let's set the map colormap to be the same as the phase mode
% colormap, and save this with the map
if checkfields(view, 'ui', 'phMode')
view.ui.mapMode.cmap = view.ui.phMode.cmap;
view.ui.mapMode.clipMode = [0 12];
end
view = setParameterMap(view, map, mapName);
saveParameterMap(view, mapPath, 1, 1);
if legend, polarAngleMapLegend(view); end
% that should be it!
if nargout>=3, covol = co{scans}; end
view = refreshScreen(view);
return
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% if we get here, we have multiple scans: parse the arguments to %
% be cell arrays, and get set up: %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
nScans = length(scans);
if ~iscell(dt)
mrGlobals;
% assume a single dt is specified, and all the scans are
% coming from that data type
for i = 1:nScans
if isnumeric(dt), tmp{i} = dataTYPES(dt).name;
else, tmp{i} = dt;
end
end
dt = tmp; clear tmp;
else
for i = 1:nScans
if isnumeric(dt{i}), dt{i} = dataTYPES(dt{i}).name; end
end
end
%%%%%Get corAnal volumes for each input scan
srcCo = cell(1, nScans); srcPh = cell(1, nScans);
uniqueDts = unique(dt);
for i = 1:length(uniqueDts)
corAnalPath = fullfile(viewDir(view), uniqueDts{i}, 'corAnal.mat');
if ~exist(corAnalPath, 'file')
error('corAnal not found. Run computeCorAnal first.');
end
load(corAnalPath, 'co', 'ph', 'amp')
I = cellfind(dt, uniqueDts{i});
srcCo(I) = co(scans(I));
srcPh(I) = ph(scans(I));
srcAmp(I) = amp(scans(I));
end
%%%%%Set up the target data type for the multi-scan meta-analysis
view = initScan(view, 'Meta_Analysis', [], {dt{1} scans(1)});
view = selectDataType(view, 'Meta_Analysis');
view = setCurScan(view, numScans(view));
view = setAnnotation(view, sprintf('Meta Analysis for %s scans %s', ...
dt{1}, num2str(scans)));
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Calculate the Polar Angle Map %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%Set NaNs to zero -- will be ignored
for i = 1:nScans
srcCo{i}(isnan(srcCo{i})) = 0;
srcAmp{i}(isnan(srcAmp{i})) = 0;
srcPh{i}(isnan(srcPh{i})) = 0;
end
%%%%%Convert each phase map into real-word units
for i = 1:nScans
srcPh{i} = polarAngle(srcPh{i}, params(i)) ./ 30;
end
%%%%%because we allow an additional level of user-defined weights
%%%%%(orig. b/c I wanted to weigh high-res scans higher than low-res),
%%%%%adjust the coherence weights accordingly for each scan
for i = 1:nScans, srcCo{i} = srcCo{i} .* W(i); end
%%%%%Set up the weighted average
% we'll need a volume representing the sum of the coherence
% for each voxel, across input scans. This will serve as the
% denominator of the weight formula for each input scan.
coSum = zeros(size(srcCo{1})); coMax = zeros(size(srcCo{1}));
for i = 1:nScans,
coSum = coSum + srcCo{i};
coMax = max(coMax, srcCo{i});
end
%%%%%initialize the map and co volumes
mapvol = zeros(size(srcCo{1}));
covol = zeros(size(srcCo{1}));
%%%%%compute co volume as the mean across all input co volumes
for i = 1:nScans, covol = covol + srcCo{i}; end
covol = covol ./ length(srcCo);
%%%%%Compute the weighted average, iteratively across input scans
for i = 1:nScans
mapvol = mapvol + (srcPh{i} .* srcCo{i} ./ coSum);
% % alternate attempt: use winner-take-all: scan with the
% % highest co value for a given voxel determines the map
% % value at that voxel.
% Imax = find(srcCo{i}==coMax);
% mapvol(Imax) = srcPh{i}(Imax);
end
covol = coMax;
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Output the parameter map %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% initialize the output map and co data fields, loading it if it
% already exists:
mapPath = fullfile(dataDir(view), 'Polar_Angle_Map.mat');
if exist(mapPath, 'file')
load(mapPath, 'map', 'mapName', 'co');
else
map = cell(1, numScans(view));
co = cell(1, numScans(view));
end
% append the map volume for the new scans
map{numScans(view)} = mapvol;
co{numScans(view)} = covol;
% before saving the map, copy the view's phase mode color map
% to the map mode, which will also be saved:
if checkfields(view, 'ui', 'phMode')
view.ui.mapMode.cmap = view.ui.phMode.cmap;
view.ui.mapMode.clipMode = [0 12];
end
% set the map in the view, and save it
view = setParameterMap(view, map, mapName);
saveParameterMap(view, mapPath, 1, 1);
save(mapPath, 'co', '-append'); % also add the co field
view.co = co;
refreshScreen(view);
% also set corAnal amp and ph fields, and save a corAnal, so
% we can view in that mode as well (nice colorbar, can ph-restrict)
% we map the ph back from degrees to radians:
phvol = deg2rad(mapvol.*30);
ampvol = srcAmp{1};
if exist(fullfile(dataDir(view), 'corAnal.mat'), 'file')
view = loadCorAnal(view);
end
view.co{numScans(view)} = covol;
view.ph{numScans(view)} = phvol;
view.amp{numScans(view)} = ampvol;
view = saveCorAnal(view, 1);
newParams.type = 'polar_angle'; % set retino params such
newParams.startAngle = 0; % that the default HSV color map
newParams.direction = 'clockwise'; % produces a nice wedge color bar
newParams.visualField = 360;
newParams.width = 0;
retinoSetParams(view, 'Meta_Analysis', numScans(view), newParams);
% show a legend if requested
if legend, polarAngleMapLegend(view); end
% ok, think that's it!
return
% /--------------------------------------------------------------------/ %
% /--------------------------------------------------------------------/ %
function A = polarAngleMapLegend(view);
% img = polarAngleMapLegend(view);
% Using the current map mode settings, produce a legend
% for a polar angle parameter map and plot in a separate figure.
% Returns a truecolor image if requested.
mode = view.ui.mapMode;
% generate an angle map A and a radius map R
% A will start at the 12-o-clock and run clockwise back to 12,
% ranging from 1 to the number of colors in the cmap.
[X Y] = meshgrid(1:256, 1:256);
X = X-128; Y = Y-128;
A = atan2(X, Y);
A = fliplr(mod(A-pi, 2*pi));
A = rescale(A, [], [1 mode.numColors]);
R = sqrt(X.^2 + Y.^2);
% take cmap from color part of map mode
cmap = mode.cmap(mode.numGrays+1:end,:);
% convert A to truecolor
A = ind2rgb(A, cmap);
% for each color channel, mask out region outside radius (128 pixels)
[I J] = find(R>128);
for ch = 1:3
ind = sub2ind(size(A), I, J, repmat(ch, size(I)));
A(ind) = 1;
end
% put up the image
figure('Color', 'w', 'Name', 'Polar Angle Map Legend');
imshow(A);
return
|
{"author": "vistalab", "repo": "vistasoft", "sha": "7f0102c696c091c858233340cc7e1ab02f064d4c", "save_path": "github-repos/MATLAB/vistalab-vistasoft", "path": "github-repos/MATLAB/vistalab-vistasoft/vistasoft-7f0102c696c091c858233340cc7e1ab02f064d4c/mrBOLD/Analysis/VisualField/polarAngleMap.m"}
|
#include "TrICP.h"
#include <Eigen/LU>
#include <Eigen/SVD>
using namespace navtypes;
struct PointPair // NOLINT(cppcoreguidelines-pro-type-member-init)
{
point_t mapPoint;
point_t samplePoint;
double dist;
};
void heapify(PointPair arr[], int len, int i) {
int smallest = i;
int l = 2 * i + 1;
int r = 2 * i + 2;
if (l < len && arr[l].dist < arr[smallest].dist) {
smallest = l;
}
if (r < len && arr[r].dist < arr[smallest].dist) {
smallest = r;
}
if (smallest != i) {
std::swap(arr[i], arr[smallest]);
heapify(arr, len, smallest);
}
}
// get some of the point pairs with the least distance using heapsort
std::vector<PointPair> getMinN(PointPair* arr, int len, int minNum) {
for (int i = len / 2 - 1; i >= 0; i--) {
heapify(arr, len, i);
}
std::vector<PointPair> ret;
for (int i = len - 1; i > len - 1 - minNum; i--) {
ret.push_back(arr[0]);
std::swap(arr[0], arr[i]);
heapify(arr, i, 0);
}
return ret;
}
transform_t computeTransformation(const std::vector<PointPair>& pairs) {
/*
* We need to find a rigid transformation that maps points in the sample to points in
* the map. This transformation is not a regular affine, as only rotations and translations
* are permitted.
*
* We can do this with the Kabsch algorithm.
* https://en.wikipedia.org/wiki/Kabsch_algorithm
*/
Eigen::Vector2d mapCentroid = Eigen::Vector2d::Zero();
Eigen::Vector2d sampleCentroid = Eigen::Vector2d::Zero();
for (const PointPair& pair : pairs) {
mapCentroid += pair.mapPoint.topRows<2>();
sampleCentroid += pair.samplePoint.topRows<2>();
}
mapCentroid /= pairs.size();
sampleCentroid /= pairs.size();
int size = pairs.size();
Eigen::Matrix<double, Eigen::Dynamic, 2> P(size, 2);
Eigen::Matrix<double, Eigen::Dynamic, 2> Q(size, 2);
for (int i = 0; i < size; i++) {
P.row(i) = pairs[i].samplePoint.topRows<2>() - sampleCentroid;
Q.row(i) = pairs[i].mapPoint.topRows<2>() - mapCentroid;
}
Eigen::Matrix2d H = P.transpose() * Q;
// computing SVD for square matrices, so no QR preconditioner needed
Eigen::JacobiSVD<Eigen::Matrix2d, Eigen::NoQRPreconditioner> svd;
svd.compute(H, Eigen::ComputeFullU | Eigen::ComputeFullV);
Eigen::Matrix2d U = svd.matrixU();
Eigen::Matrix2d V = svd.matrixV();
double d = (V * U.transpose()).determinant() > 0 ? 1 : -1;
Eigen::Matrix2d D = Eigen::Matrix2d::Identity();
D.bottomRightCorner<1, 1>()(0, 0) = d;
Eigen::Matrix2d R = V * D * U.transpose();
Eigen::Vector2d translation = mapCentroid - R * sampleCentroid;
transform_t trf = transform_t::Identity();
trf.topLeftCorner<2, 2>() = R;
trf.topRightCorner<2, 1>() = translation;
return trf;
}
TrICP::TrICP(int maxIter, double relErrChangeThresh,
std::function<point_t(const point_t&)> getClosest)
: maxIter(maxIter), relErrChangeThresh(relErrChangeThresh),
getClosest(std::move(getClosest)) {
}
transform_t TrICP::correct(const points_t& sample, double overlap) {
if (sample.empty() || overlap == 0) {
return transform_t::Identity();
}
int i = 0;
double mse = 1e9;
double oldMSE;
points_t points = sample;
transform_t trf = transform_t::Identity();
int N = static_cast<int>(overlap * sample.size());
do {
i++;
oldMSE = mse;
transform_t t = iterate(points, N, mse);
trf = t * trf;
} while (!isDone(i, mse, oldMSE));
return trf;
}
bool TrICP::isDone(int numIter, double mse, double oldMSE) const {
if (mse <= 1e-9) {
return true;
}
double relErrChange = fabs(mse - oldMSE) / mse;
return numIter >= maxIter || relErrChange <= relErrChangeThresh;
}
transform_t TrICP::iterate(points_t& sample, int N, double& mse) const {
PointPair pairs[sample.size()];
for (size_t i = 0; i < sample.size(); i++) {
const point_t& point = sample[i];
point_t closestPoint = getClosest(point);
double dist = (point - closestPoint).norm();
PointPair pair{closestPoint, point, dist};
pairs[i] = pair;
}
std::vector<PointPair> closestPairs = getMinN(pairs, sample.size(), N);
double newS = 0;
for (const PointPair& pair : closestPairs) {
newS += pair.dist * pair.dist;
}
mse = newS / N;
transform_t trf = computeTransformation(closestPairs);
for (point_t& point : sample) {
point = trf * point;
}
return trf;
}
|
{"hexsha": "04d046fef742144d36e251ddc1d23dbc6363d56e", "size": 4214, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/worldmap/TrICP.cpp", "max_stars_repo_name": "huskyroboticsteam/Resurgence", "max_stars_repo_head_hexsha": "649f78103b6d76709fdf55bb38d08c0ff50da140", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2021-12-23T23:31:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T07:17:41.000Z", "max_issues_repo_path": "src/worldmap/TrICP.cpp", "max_issues_repo_name": "huskyroboticsteam/Resurgence", "max_issues_repo_head_hexsha": "649f78103b6d76709fdf55bb38d08c0ff50da140", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-11-22T05:33:43.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-23T07:01:47.000Z", "max_forks_repo_path": "src/worldmap/TrICP.cpp", "max_forks_repo_name": "huskyroboticsteam/Resurgence", "max_forks_repo_head_hexsha": "649f78103b6d76709fdf55bb38d08c0ff50da140", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0128205128, "max_line_length": 92, "alphanum_fraction": 0.6682486948, "num_tokens": 1305}
|
module probdata_module
! make the model name enter through the probin file
use amrex_fort_module, only : rt => amrex_real
character (len=80), save :: model_name
! arrange storage for read_in model-- not worrying about efficiency,
! since this will only be called once
real(rt) , allocatable, save :: hse_r(:), hse_rho(:)
real(rt) , allocatable, save :: hse_t(:), hse_p(:)
real(rt) , allocatable, save :: hse_s(:,:)
! hold the state at the top of the initial model for the boundary
! conditions
real(rt) , save :: hse_rho_top, hse_T_top
real(rt) , save :: hse_p_top, hse_eint_top
real(rt) , allocatable, save :: hse_X_top(:)
real(rt) , save :: xmin, xmax, ymin, ymax, zmin, zmax
real(rt) , save :: heating_time, heating_rad, &
heating_peak, heating_sigma
! the prob_type matches MAESTRO test_basestate
integer , save :: prob_type
end module probdata_module
|
{"hexsha": "1dbfa486074aa067d507fdddeaa5e9b23999c554", "size": 1019, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Exec/gravity_tests/hydrostatic_adjust/probdata.f90", "max_stars_repo_name": "taehoryu/Castro", "max_stars_repo_head_hexsha": "223c72c993343ba5df84613d058ffb0767c2a7c9", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-06-05T19:23:47.000Z", "max_stars_repo_stars_event_max_datetime": "2019-06-05T19:23:47.000Z", "max_issues_repo_path": "Exec/gravity_tests/hydrostatic_adjust/probdata.f90", "max_issues_repo_name": "taehoryu/Castro", "max_issues_repo_head_hexsha": "223c72c993343ba5df84613d058ffb0767c2a7c9", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Exec/gravity_tests/hydrostatic_adjust/probdata.f90", "max_forks_repo_name": "taehoryu/Castro", "max_forks_repo_head_hexsha": "223c72c993343ba5df84613d058ffb0767c2a7c9", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3928571429, "max_line_length": 70, "alphanum_fraction": 0.6162904809, "num_tokens": 282}
|
import numpy as np
import scipy.special
from .base_signal import BaseSignal
__all__ = ['GaussianProcess']
class GaussianProcess(BaseSignal):
"""Gaussian Process time series sampler
Samples time series from Gaussian Process with selected covariance function (kernel).
Parameters
----------
kernel : {'SE', 'Constant', 'Exponential', 'RQ', 'Linear', 'Matern', 'Periodic'}
the kernel type, as described in [1]_ and [2]_, which can be:
- `Constant`. All covariances set to `variance`
- `Exponential`. Ornstein-Uhlenbeck kernel. Optionally, set keyword `gamma` for a gamma-exponential kernel
- `SE`, the squared exponential.
- `RQ`, the rational quadratic. To use this kernel, set keyword argument `alpha`
- `Linear`. To use this kernel, set keyword arguments `c` and `offset`
- `Matern`. To use this kernel, set keyword argument `nu`
- `Periodic`. To use this kernel, set keyword argument `p` for the period
mean : float
the mean of the gaussian process
variance : float
the output variance of the gaussian process (sigma^2)
lengthscale : float
the characteristic lengthscale used to generate the covariance matrix
References
----------
.. [1] URL: http://www.cs.toronto.edu/~duvenaud/cookbook/index.html
.. [2] Rasmussen, C.E., 2006. Gaussian processes for machine learning. URL: https://pdfs.semanticscholar.org/a9fe/ab0fe858dbde2eecff8b1f7c629cc6aff8ad.pdf
"""
def __init__(self, kernel="SE", lengthscale=1., mean=0., variance=1., c=1., gamma=1., alpha=1., offset=0., nu=5./2, p=1.):
self.vectorizable = True
self.lengthscale = lengthscale
self.mean = mean
self.variance = variance
self.kernel = kernel
self.kernel_function = {"Constant": lambda x1, x2: variance,
"Exponential": lambda x1, x2: variance * np.exp(-np.power(np.abs(x1 - x2) / lengthscale, gamma)),
"SE": lambda x1, x2: variance * np.exp(- np.square(x1 - x2) / (2 * np.square(lengthscale))),
"RQ": lambda x1, x2: variance * np.power((1 + np.square(x1 - x2) / (2 * alpha * np.square(lengthscale))), -alpha),
"Linear": lambda x1, x2: variance * (x1 - c) * (x2 - c) + offset,
"Matern": lambda x1, x2: variance if x1 - x2 == 0. else variance * (np.power(2, 1 - nu) / scipy.special.gamma(nu)) * np.power(np.sqrt(2 * nu) * np.abs(x1 - x2) / lengthscale, nu) * scipy.special.kv(nu, np.sqrt(2 * nu) * np.abs(x1 - x2) / lengthscale),
"Periodic":lambda x1, x2: variance * np.exp(- 2 * np.square(np.sin(np.pi * np.abs(x1 - x2) / p))),
}[kernel]
def sample_next(self, time, samples, errors):
"""Sample a single time point
Parameters
----------
time : number
Time at which a sample was required
Returns
-------
float
sampled signal for time t
"""
raise NotImplementedError
def sample_vectorized(self, time_vector):
"""Sample entire series based off of time vector
Parameters
----------
time_vector : array-like
Timestamps for signal generation
Returns
-------
array-like
sampled signal for time vector
"""
cartesian_time = np.dstack(np.meshgrid(time_vector, time_vector)).reshape(-1, 2)
covariance_matrix = (np.vectorize(self.kernel_function)(cartesian_time[:, 0], cartesian_time[:, 1])).reshape(-1, time_vector.shape[0])
covariance_matrix[np.diag_indices_from(covariance_matrix)] += 1e-12 # Add small value to diagonal for numerical stability
return np.random.multivariate_normal(mean=np.full(shape=(time_vector.shape[0],), fill_value=self.mean), cov=covariance_matrix)
|
{"hexsha": "5b64a6a38a2ff7cad0ac88cfb47d8a234db8ebe3", "size": 4007, "ext": "py", "lang": "Python", "max_stars_repo_path": "timesynth/signals/gaussian_process.py", "max_stars_repo_name": "swight-prc/TimeSynth", "max_stars_repo_head_hexsha": "9b10a276e90fee145c9f69c15195d028c78214bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 242, "max_stars_repo_stars_event_min_datetime": "2016-11-03T21:26:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T21:33:50.000Z", "max_issues_repo_path": "timesynth/signals/gaussian_process.py", "max_issues_repo_name": "swight-prc/TimeSynth", "max_issues_repo_head_hexsha": "9b10a276e90fee145c9f69c15195d028c78214bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2017-04-06T18:47:36.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-30T18:18:56.000Z", "max_forks_repo_path": "timesynth/signals/gaussian_process.py", "max_forks_repo_name": "swight-prc/TimeSynth", "max_forks_repo_head_hexsha": "9b10a276e90fee145c9f69c15195d028c78214bf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 56, "max_forks_repo_forks_event_min_datetime": "2017-08-31T14:32:50.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T01:03:41.000Z", "avg_line_length": 45.0224719101, "max_line_length": 283, "alphanum_fraction": 0.5969553282, "include": true, "reason": "import numpy,import scipy", "num_tokens": 994}
|
/* Copyright 2003-2008 Joaquin M Lopez Munoz.
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* See http://www.boost.org/libs/multi_index for library home page.
*/
#ifndef BOOST_MULTI_INDEX_DETAIL_UINTPTR_TYPE_HPP
#define BOOST_MULTI_INDEX_DETAIL_UINTPTR_TYPE_HPP
#if defined(_MSC_VER)&&(_MSC_VER>=1200)
#pragma once
#endif
#include <boost/config.hpp> /* keep it first to prevent nasty warns in MSVC */
#include <boost/mpl/bool.hpp>
namespace boost{
namespace multi_index{
namespace detail{
/* has_uintptr_type is an MPL integral constant determining whether
* there exists an unsigned integral type with the same size as
* void *.
* uintptr_type is such a type if has_uintptr is true, or unsigned int
* otherwise.
* Note that uintptr_type is more restrictive than C99 uintptr_t,
* where an integral type with size greater than that of void *
* would be conformant.
*/
template<int N>struct uintptr_candidates;
template<>struct uintptr_candidates<-1>{typedef unsigned int type;};
template<>struct uintptr_candidates<0> {typedef unsigned int type;};
template<>struct uintptr_candidates<1> {typedef unsigned short type;};
template<>struct uintptr_candidates<2> {typedef unsigned long type;};
#if defined(BOOST_HAS_LONG_LONG)
template<>struct uintptr_candidates<3> {typedef unsigned long long type;};
#else
template<>struct uintptr_candidates<3> {typedef unsigned int type;};
#endif
#if defined(BOOST_HAS_MS_INT64)
template<>struct uintptr_candidates<4> {typedef unsigned __int64 type;};
#else
template<>struct uintptr_candidates<4> {typedef unsigned int type;};
#endif
struct uintptr_aux
{
BOOST_STATIC_CONSTANT(int,index=
sizeof(void*)==sizeof(uintptr_candidates<0>::type)?0:
sizeof(void*)==sizeof(uintptr_candidates<1>::type)?1:
sizeof(void*)==sizeof(uintptr_candidates<2>::type)?2:
sizeof(void*)==sizeof(uintptr_candidates<3>::type)?3:
sizeof(void*)==sizeof(uintptr_candidates<4>::type)?4:-1);
BOOST_STATIC_CONSTANT(bool,has_uintptr_type=(index>=0));
typedef uintptr_candidates<index>::type type;
};
typedef mpl::bool_<uintptr_aux::has_uintptr_type> has_uintptr_type;
typedef uintptr_aux::type uintptr_type;
} /* namespace multi_index::detail */
} /* namespace multi_index */
} /* namespace boost */
#endif
|
{"hexsha": "4fbbeccc95c01220087f5d8d8f5c99471923c575", "size": 2432, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src-2007/public/python/boost/multi_index/detail/uintptr_type.hpp", "max_stars_repo_name": "KyleGospo/City-17-Episode-One-Source", "max_stars_repo_head_hexsha": "2bc0bb56a2e0a63d963755e2831c15f2970c38e7", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 30.0, "max_stars_repo_stars_event_min_datetime": "2016-04-23T04:55:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-19T10:26:27.000Z", "max_issues_repo_path": "src-2007/public/python/boost/multi_index/detail/uintptr_type.hpp", "max_issues_repo_name": "KyleGospo/City-17-Episode-One-Source", "max_issues_repo_head_hexsha": "2bc0bb56a2e0a63d963755e2831c15f2970c38e7", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2017-12-26T21:49:18.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-11T04:03:44.000Z", "max_forks_repo_path": "src-2007/public/python/boost/multi_index/detail/uintptr_type.hpp", "max_forks_repo_name": "KyleGospo/City-17-Episode-One-Source", "max_forks_repo_head_hexsha": "2bc0bb56a2e0a63d963755e2831c15f2970c38e7", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 15.0, "max_forks_repo_forks_event_min_datetime": "2016-04-26T13:16:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T06:13:14.000Z", "avg_line_length": 31.5844155844, "max_line_length": 78, "alphanum_fraction": 0.7454769737, "num_tokens": 559}
|
import networkx as nx
untypedkami = nx.DiGraph()
untypedkami.add_nodes_from(
[
"agent",
"region",
"residue",
"locus",
"state",
"mod",
"syn",
"deg",
"bnd",
"brk",
"is_bnd",
"is_free",
]
)
untypedkami.add_edges_from(
[
("region", "agent"),
("residue", "agent"),
("residue", "region"),
("state", "agent"),
("syn", "agent"),
("deg", "agent"),
("state", "region"),
("state", "residue"),
("locus", "agent"),
("locus", "region"),
("mod", "state"),
("locus", "bnd"),
("locus", "brk"),
("locus", "is_bnd"),
("locus", "is_free"),
("agent", "mod")
]
)
untyped_base_kami = nx.DiGraph()
untyped_base_kami.add_nodes_from(
[
"component",
"test",
"state",
"action"
]
)
untyped_base_kami.add_edges_from(
[
("component", "component"),
("state", "component"),
("component", "action"),
("action", "component"),
("component", "test"),
("action", "state")
]
)
kami_basekami = {
"agent": "component",
"region": "component",
"residue": "component",
"locus": "component",
"state": "state",
"mod": "action",
"syn": "action",
"deg": "action",
"bnd": "action",
"brk": "action",
"is_bnd": "test",
"is_free": "test"
}
|
{"hexsha": "5e3cd8eb30e1971790b142fe244c4a9c644fd5ba", "size": 1477, "ext": "py", "lang": "Python", "max_stars_repo_path": "server/kami/metamodels.py", "max_stars_repo_name": "Xogiga/KAMIStudio", "max_stars_repo_head_hexsha": "bdaebc3def154d22292cd2753391a9523f8a42d2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "server/kami/metamodels.py", "max_issues_repo_name": "Xogiga/KAMIStudio", "max_issues_repo_head_hexsha": "bdaebc3def154d22292cd2753391a9523f8a42d2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "server/kami/metamodels.py", "max_forks_repo_name": "Xogiga/KAMIStudio", "max_forks_repo_head_hexsha": "bdaebc3def154d22292cd2753391a9523f8a42d2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.1818181818, "max_line_length": 35, "alphanum_fraction": 0.4407582938, "include": true, "reason": "import networkx", "num_tokens": 417}
|
import os
from typing import List
import numpy as np
from pydrake.math import RollPitchYaw
from pydrake.all import (PiecewisePolynomial, RigidTransform)
from qsim.simulator import (
QuasistaticSimParameters)
from robotics_utilities.iiwa_controller.utils import (
create_iiwa_controller_plant)
from qsim.model_paths import (models_dir, box3d_8cm_sdf_path,
box3d_7cm_sdf_path, box3d_6cm_sdf_path)
from .inverse_kinematics import calc_iwa_trajectory_for_point_tracking
q_model_path = os.path.join(models_dir, 'q_sys', 'iiwa_and_boxes.yml')
def concatenate_traj_list(traj_list: List[PiecewisePolynomial]):
"""
Concatenates a list of PiecewisePolynomials into a single
PiecewisePolynomial.
:param traj_list:
:return:
"""
traj = traj_list[0]
for a in traj_list[1:]:
a.shiftRight(traj.end_time())
traj.ConcatenateInTime(a)
return traj
#%% Create trajectories.
q_a_initial_guess = np.array([0, 0, 0, -1.75, 0, 1.0, 0])
plant_iiwa, _ = create_iiwa_controller_plant(gravity=[0, 0, 0])
durations = np.array([1.0, 2.0, 2.0, 1.0, 1.0, 1.0, 2.0]) * 2
n_blocks_to_stack = 3
l = 0.075
p_WQ_list = np.array([
[0.555, 0, 0.10],
[0.555, 0, 0.10],
[0.555, 0, 0.17 + n_blocks_to_stack * l],
[0.69, 0, 0.17 + n_blocks_to_stack * l],
[0.69, 0, 0.17 + (n_blocks_to_stack - 1) * l + 0.005],
[0.69, 0, 0.17 + (n_blocks_to_stack - 1) * l + 0.005],
[0.69, 0, 0.25 + n_blocks_to_stack * l],
[0.555, 0, 0.25 + n_blocks_to_stack * l],
])
schunk_setpoints = [0.05, 0.02, 0.02, 0.02, 0.02, 0.05, 0.05, 0.05]
# frame L7 orientation
R_WL7_0 = RollPitchYaw(0, np.pi, 0).ToRotationMatrix()
R_WL7_1 = RollPitchYaw(0, np.pi, np.pi/2).ToRotationMatrix()
R_WL7_list = [
R_WL7_0, R_WL7_0, R_WL7_0, R_WL7_1, R_WL7_1, R_WL7_1, R_WL7_1, R_WL7_0]
q_iiwa_traj_list = []
q_schunk_traj_list = []
x_schunk_traj_list = []
for i, duration in enumerate(durations):
q_iiwa_traj, q_knots = calc_iwa_trajectory_for_point_tracking(
plant=plant_iiwa,
duration=duration,
num_knot_points=10,
p_WQ_start=p_WQ_list[i],
p_WQ_offset=p_WQ_list[i + 1] - p_WQ_list[i],
R_WL7_start=R_WL7_list[i],
R_WL7_final=R_WL7_list[i + 1],
q_initial_guess=q_a_initial_guess,
p_L7Q=np.array([0, 0, 0.15]))
q_iiwa_traj_list.append(q_iiwa_traj)
q_schunk_traj_list.append(
PiecewisePolynomial.FirstOrderHold(
[0., duration],
np.array([[-schunk_setpoints[i], schunk_setpoints[i]],
[-schunk_setpoints[i+1], schunk_setpoints[i+1]]]).T))
q_a_initial_guess = q_knots[-1]
q_iiwa_traj = concatenate_traj_list(q_iiwa_traj_list)
q_schunk_traj = concatenate_traj_list(q_schunk_traj_list)
# other constants for simulation.
iiwa_name = "iiwa"
schunk_name = "schunk"
X_L7E = RigidTransform(
RollPitchYaw(np.pi/2, 0, np.pi/2), np.array([0, 0, 0.114]))
q_u0_list = np.zeros((10, 7))
q_u0_list[0] = [1, 0, 0, 0, 0.55, 0, 0.03]
q_u0_list[1] = [1, 0, 0, 0, 0.70, 0, 0.04]
q_u0_list[2] = [1, 0, 0, 0, 0.70, 0., 0.115]
q_u0_list[3] = [1, 0, 0, 0, 0.70, 0., 0.19]
q_u0_list[4] = [1, 0, 0, 0, 0.50, -0.2, 0.04]
q_u0_list[5] = [1, 0, 0, 0, 0.50, -0.2, 0.115]
q_u0_list[6] = [1, 0, 0, 0, 0.50, -0.2, 0.19]
q_u0_list[7] = [1, 0, 0, 0, 0.45, 0.2, 0.04]
q_u0_list[8] = [1, 0, 0, 0, 0.45, 0.2, 0.115]
q_u0_list[9] = [1, 0, 0, 0, 0.48, 0.3, 0.04]
q0_dict_str = {"box%i" % i: q_u0_i for i, q_u0_i in enumerate(q_u0_list)}
t_start = q_iiwa_traj.start_time()
q0_dict_str[iiwa_name] = q_iiwa_traj.value(t_start).ravel()
q0_dict_str[schunk_name] = q_schunk_traj.value(t_start).ravel()
q_a_traj_dict_str = {iiwa_name: q_iiwa_traj, schunk_name: q_schunk_traj}
|
{"hexsha": "97d036ee47d96ff90b1e41293d346e77f70ac338", "size": 3741, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/iiwa_block_stacking/simulation_parameters.py", "max_stars_repo_name": "pangtao22/quasistatic_simulator", "max_stars_repo_head_hexsha": "7c6f99cc7237dd922f6eb0b54c580303e86b5223", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-07-15T03:58:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-23T17:26:16.000Z", "max_issues_repo_path": "examples/iiwa_block_stacking/simulation_parameters.py", "max_issues_repo_name": "pangtao22/quasistatic_simulator", "max_issues_repo_head_hexsha": "7c6f99cc7237dd922f6eb0b54c580303e86b5223", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-08-16T22:27:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-07T18:06:07.000Z", "max_forks_repo_path": "examples/iiwa_block_stacking/simulation_parameters.py", "max_forks_repo_name": "pangtao22/quasistatic_simulator", "max_forks_repo_head_hexsha": "7c6f99cc7237dd922f6eb0b54c580303e86b5223", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9743589744, "max_line_length": 75, "alphanum_fraction": 0.6621224272, "include": true, "reason": "import numpy", "num_tokens": 1506}
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2017 nicolas <nicolas@laptop>
#
# Distributed under terms of the MIT license.
"""
Support Vector Machine
======================
Cost function plots.
"""
import matplotlib.pyplot as plt
from math import log, exp
X = []
y = []
for z in range(-5, 6):
y.append(-log(1 - 1/(1 + exp(-z))))
X.append(z)
if (z == -1.0):
print z, '=>', -log(1 - 1/(1 + exp(-z)))
plt.plot(X, y)
plt.show()
print y
# Logistic Regression Cost Function
# =================================
# 0 < z < 1
import numpy as np
for z in np.arange(0.1, 1, 0.05):
print z,
y0 = -log(1 - z)
y1 = -log(z)
|
{"hexsha": "0acbd4cb6916df2003e8e826c56c9f829807985c", "size": 657, "ext": "py", "lang": "Python", "max_stars_repo_path": "notebooks/some_plots.py", "max_stars_repo_name": "ng-nicolas/ml-small-projects", "max_stars_repo_head_hexsha": "92775fbee220f1e83d9891749c158a93aafc19ee", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "notebooks/some_plots.py", "max_issues_repo_name": "ng-nicolas/ml-small-projects", "max_issues_repo_head_hexsha": "92775fbee220f1e83d9891749c158a93aafc19ee", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notebooks/some_plots.py", "max_forks_repo_name": "ng-nicolas/ml-small-projects", "max_forks_repo_head_hexsha": "92775fbee220f1e83d9891749c158a93aafc19ee", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.8461538462, "max_line_length": 48, "alphanum_fraction": 0.5296803653, "include": true, "reason": "import numpy", "num_tokens": 210}
|
program autobk
c
c autobk version 2.92b 07-Dec-2000
c
c author Matthew Newville, The University of Chicago
c e-mail newville@cars.uchicago.edu
c post GSECARS, Bldg 434A
c APS, Argonne National Laboratory
c Argonne, IL 64309 USA
c voice (630) 252-0431
c fax (630) 252-0443
c
c further information on this code is available at
c http://cars.uchicago.edu/~newville/autobk/
c
c --- copyright 1998,1999 matt newville
c --- copyright 1995 matt newville, university of washington
c
c - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
c
c autobk removes the background of x-ray-absorption fine-structure
c data. a spline function is used to approximate the background.
c the spline is chosen so that the resulting chi is optimized at
c low-r. the optimization minimizes the difference between the
c data and a standard chi(r) at low-r. the standard is used to
c estimate the leakage form the first shell into the low-r region,
c and since this leakage is a small portion of the first shell, the
c standard does not need to be an extremely accurate estimate of
c the true first shell. the standard chi should be a chi for which
c the background is trusted, and can be either a theoretical
c calculation or an experimental standard. if no standard chi is
c specified, the low-r components of chi(r) will be minimized.
c
c major revisions (for a complete revision record, contact matt) :
c
include 'autobk.h'
c------------------------------------------------------------------
c local variables
character system*10
logical first, domore, dorun
integer iinpf, ilogf, ilen, istrln
external istrln
data first, domore, dorun /.true.,.true.,.true./
data iinpf, ilogf / 2, 4/
data system /'unix'/
c system options: 'unix','vax','dos','mac'
call setsys(system,vaxflg,dosflg,macflg,unxflg)
call sca_init
call echo_init
call open_echofile('autobk.run')
call fstop_init('autobk.err')
c version & date
ilen = max(1,istrln(system))
versn = ' autobk: 2.941 10-Jan-2004 '
ilen = istrln(versn)
call echo(versn(1:ilen))
c------------------------------------------------------------------
c loop for each different running of program
100 continue
c initialize variables in common blocks, open files
call autint
c read input file
call autinp(iinpf, ilogf, first, domore, dorun)
if (dorun) then
c read in data, subtract pre-edge, reset fitting ranges, etc
call autdat
c do nonlinear least-square fittings for background, and
c possibily e0 and amplitude of theory.
call autnls
c write out results to log file
call autlog(ilogf)
c write out data results to data files
call autout
end if
c continue on to next data set
if (domore) go to 100
c----------------------------------------------------------------
c finished: close files, and give happy ending message
close(iinpf)
close(ilogf)
call echo( ' autobk is finished.')
call echo( ' have a nice day.')
call close_echofile()
c end main program autobk
end
|
{"hexsha": "f17c54da626eb23f24272453a07499929e5fff90", "size": 3301, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/autobk/autobk.f", "max_stars_repo_name": "keechul/ifeffit", "max_stars_repo_head_hexsha": "306444e500cb3ecb1795fcbde9219369b003f1fa", "max_stars_repo_licenses": ["Naumen", "Condor-1.1", "MS-PL"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2017-09-16T12:41:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-08T05:17:16.000Z", "max_issues_repo_path": "src/autobk/autobk.f", "max_issues_repo_name": "bruceravel/ifeffit", "max_issues_repo_head_hexsha": "97f6458584e237a6a9e3681bb9b604c9d1ec9743", "max_issues_repo_licenses": ["Naumen", "Condor-1.1", "MS-PL"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-07-20T01:15:41.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-20T02:09:08.000Z", "max_forks_repo_path": "src/autobk/autobk.f", "max_forks_repo_name": "bruceravel/ifeffit", "max_forks_repo_head_hexsha": "97f6458584e237a6a9e3681bb9b604c9d1ec9743", "max_forks_repo_licenses": ["Naumen", "Condor-1.1", "MS-PL"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2017-03-22T19:27:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-23T07:47:31.000Z", "avg_line_length": 38.3837209302, "max_line_length": 68, "alphanum_fraction": 0.6155710391, "num_tokens": 902}
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% == AIT CSIM Handout LaTeX Template ==
% == Credit ==
% Assoc. Prof. Matthew N. Dailey
% Computer Science and Information Management
% Asian Insitute of Technology
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\documentclass{article}
\usepackage{a4,url,upquote}
\usepackage{graphicx}
\usepackage{hyperref}
\usepackage[cmex10]{amsmath}
\usepackage{amssymb}
\usepackage{placeins}
\setlength{\textwidth}{6.5in}
\setlength{\textheight}{9in}
\setlength{\oddsidemargin}{0in}
\setlength{\evensidemargin}{0in}
\setlength{\topmargin}{0in}
\setlength{\headheight}{0in}
\setlength{\headsep}{0in}
\setlength{\footskip}{0.5in}
\newcommand{\bheading}[1]{\vspace{10pt} \noindent \textbf{#1}}
\begin{document}
\begin{tabbing}
\`\=\kill
\textbf{Workshop:} Workshop Name
\` September 11, 2015 \\
Asian Institute of Technology
\` Computer Science and Information Management \\
\textbf{Handout:} Workshop Title
\` \textbf{Instructor:} Matthew N.\ Dailey (\tt{\small mdailey@ait.asia})
\end{tabbing}
\hrule
\vspace{.25in}
\begin{center}
\textbf{\Large Workshop Title}
\end{center}
\vspace{.15in}
\noindent \textbf{Instructions:} In this workshop, you will learn how to do something.
% Inserting a tilde into LaTeX
% Credit:
% http://tex.stackexchange.com/questions/9363/how-does-one-insert-a-backslash-or-a-tilde-into-latex
\subsection*{Section 1}
\begin{itemize}
\item[-] {\tt code} - Folder that contains code and scripts
\begin{itemize}
\item[-] File {\tt workshop1.py}
\item[-] File {\tt workshop2.py}
\end{itemize}
\item[-] {\tt data} -- Folder that contains the datasets.
\begin{itemize}
\item[-] {\tt input} -- Folder that contains the input images.
\begin{itemize}
\item[-] {\tt folder\_1} -- Folder that contains some files.
\item[-] {\tt folder\_2} -- Folder that contains some other files.
\end{itemize}
\end{itemize}
\end{itemize}
\FloatBarrier
\subsection*{Section 2}
\noindent We will do a simple workshop. Figure~\ref{fig:csim-logo} shows
a logo of CSIM. \\
\begin{figure}[t]
\centering
\includegraphics[width=2in]{figures/csim}
\caption{CSIM Logo}
\label{fig:csim-logo}
\end{figure}
\noindent Let's follow these steps below.
\begin{enumerate}
\item Step 1
\item Step 2
\item Step 3
\end{enumerate}
\noindent The logo of CSIM is from the CSIM Web site~[1]. \\
\subsection*{References}
\begin{itemize}
\item[1] \tt{http://www.cs.ait.ac.th/}
\end{itemize}
\end{document}
|
{"hexsha": "3de86baed48db5bcbb803da90c8296d51dc216ec", "size": 2598, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "handout.tex", "max_stars_repo_name": "aitcsim/ait-handout-latex-template", "max_stars_repo_head_hexsha": "8c0088644ac497114e4b8c8dd71db53891a15698", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-04-22T00:17:22.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-22T00:17:22.000Z", "max_issues_repo_path": "handout.tex", "max_issues_repo_name": "aitcsim/ait-handout-latex-template", "max_issues_repo_head_hexsha": "8c0088644ac497114e4b8c8dd71db53891a15698", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "handout.tex", "max_forks_repo_name": "aitcsim/ait-handout-latex-template", "max_forks_repo_head_hexsha": "8c0088644ac497114e4b8c8dd71db53891a15698", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.0555555556, "max_line_length": 99, "alphanum_fraction": 0.6547344111, "num_tokens": 794}
|
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dense
from tf2rl.algos.gail import GAIL
from tf2rl.algos.policy_base import IRLPolicy
from tf2rl.networks.spectral_norm_dense import SNDense
class Discriminator(tf.keras.Model):
LOG_SIG_CAP_MAX = 2 # np.e**2 = 7.389
LOG_SIG_CAP_MIN = -20 # np.e**-10 = 4.540e-05
EPS = 1e-6
def __init__(self, state_shape, action_dim, units=(32, 32),
n_latent_unit=32, enable_sn=False, name="Discriminator"):
super().__init__(name=name)
DenseClass = SNDense if enable_sn else Dense
self.l1 = DenseClass(units[0], name="L1", activation="relu")
self.l2 = DenseClass(units[1], name="L2", activation="relu")
self.l_mean = DenseClass(n_latent_unit, name="L_mean", activation="linear")
self.l_logstd = DenseClass(n_latent_unit, name="L_std", activation="linear")
self.l3 = DenseClass(1, name="L3", activation="sigmoid")
dummy_state = tf.constant(
np.zeros(shape=(1,)+state_shape, dtype=np.float32))
dummy_action = tf.constant(
np.zeros(shape=[1, action_dim], dtype=np.float32))
with tf.device("/cpu:0"):
self([dummy_state, dummy_action])
def call(self, inputs):
# Encoder
features = tf.concat(inputs, axis=1)
features = self.l1(features)
features = self.l2(features)
means = self.l_mean(features)
logstds = self.l_logstd(features)
logstds = tf.clip_by_value(
logstds, self.LOG_SIG_CAP_MIN, self.LOG_SIG_CAP_MAX)
latents = means + tf.random.normal(shape=means.shape) * tf.math.exp(logstds)
# Binary classifier
out = self.l3(latents)
return out, means, logstds
def compute_reward(self, inputs):
features = tf.concat(inputs, axis=1)
features = self.l1(features)
features = self.l2(features)
means = self.l_mean(features)
return tf.math.log(self.l3(means) + 1e-8)
class VAIL(GAIL):
def __init__(
self,
state_shape,
action_dim,
units=(32, 32),
n_latent_unit=32,
lr=5e-5,
kl_target=0.5,
reg_param=0.,
enable_sn=False,
enable_gp=False,
name="VAIL",
**kwargs):
"""
Args:
state_shape:
action_dim:
units:
n_latent_unit:
lr:
kl_target:
reg_param:
enable_sn: bool
If true, add spectral normalization in Dense layer
enable_gp: bool
If true, add gradient penalty to loss function
name:
**kwargs:
"""
IRLPolicy.__init__(
self, name=name, n_training=10, **kwargs)
self.disc = Discriminator(
state_shape=state_shape, action_dim=action_dim,
units=units, n_latent_unit=n_latent_unit,
enable_sn=enable_sn)
self.optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
self._kl_target = kl_target
self._reg_param = tf.Variable(reg_param, dtype=tf.float32)
self._step_reg_param = tf.constant(1e-5, dtype=tf.float32)
self._enable_gp = enable_gp
def train(self, agent_states, agent_acts,
expert_states, expert_acts, **kwargs):
loss, accuracy, real_kl, fake_kl, js_divergence = self._train_body(
agent_states, agent_acts, expert_states, expert_acts)
tf.summary.scalar(name=self.policy_name+"/DiscriminatorLoss", data=loss)
tf.summary.scalar(name=self.policy_name+"/Accuracy", data=accuracy)
tf.summary.scalar(name=self.policy_name+"/RegParam", data=self._reg_param)
tf.summary.scalar(name=self.policy_name+"/RealLatentKL", data=real_kl)
tf.summary.scalar(name=self.policy_name+"/FakeLatentKL", data=fake_kl)
tf.summary.scalar(name=self.policy_name+"/JSdivergence", data=js_divergence)
@tf.function
def _compute_kl_latent(self, means, log_stds):
r"""
Compute KL divergence of latent spaces over standard Normal
distribution to compute loss in eq.5. The formulation of
KL divergence between two normal distributions is as follows:
ln(\sigma_2 / \sigma_1) + {(\mu_1 - \mu_2)^2 + \sigma_1^2 - \sigma_2^2} / (2 * \sigma_2^2)
Since the target distribution is standard Normal distributions,
we can assume `\sigma_2 = 1` and `mean_2 = 0`.
So, the resulting equation can be computed as:
ln(1 / \sigma_1) + (\mu_1^2 + \sigma_1^2 - 1) / 2
"""
return tf.reduce_sum(-log_stds +
(tf.square(means) + tf.square(tf.exp(log_stds)) - 1.) / 2., axis=-1)
@tf.function
def _train_body(self, agent_states, agent_acts, expert_states, expert_acts):
epsilon = 1e-8
with tf.device(self.device):
with tf.GradientTape() as tape:
# Compute discriminator loss
real_logits, real_means, real_logstds = self.disc(
[expert_states, expert_acts])
fake_logits, fake_means, fake_logstds = self.disc(
[agent_states, agent_acts])
disc_loss = -(tf.reduce_mean(tf.math.log(real_logits + epsilon)) +
tf.reduce_mean(tf.math.log(1. - fake_logits + epsilon)))
# Compute KL loss
real_kl = tf.reduce_mean(
self._compute_kl_latent(real_means, real_logstds))
fake_kl = tf.reduce_mean(
self._compute_kl_latent(fake_means, fake_logstds))
kl_loss = 0.5 * (real_kl - self._kl_target +
fake_kl - self._kl_target)
loss = disc_loss + self._reg_param * kl_loss
# Gradient penalty
if self._enable_gp:
raise NotImplementedError
grads = tape.gradient(loss, self.disc.trainable_variables)
self.optimizer.apply_gradients(
zip(grads, self.disc.trainable_variables))
# Update reguralizer parameter \beta in eq.(9)
self._reg_param.assign(tf.maximum(
tf.constant(0., dtype=tf.float32),
self._reg_param + self._step_reg_param * (kl_loss - self._kl_target)))
accuracy = (tf.reduce_mean(tf.cast(real_logits >= 0.5, tf.float32)) / 2. +
tf.reduce_mean(tf.cast(fake_logits < 0.5, tf.float32)) / 2.)
js_divergence = self._compute_js_divergence(
fake_logits, real_logits)
return loss, accuracy, real_kl, fake_kl, js_divergence
|
{"hexsha": "18888b8db23ccb42d571dc875c3ca719e169cf4f", "size": 6766, "ext": "py", "lang": "Python", "max_stars_repo_path": "tf2rl/algos/vail.py", "max_stars_repo_name": "yamada-github-account/tf2rl", "max_stars_repo_head_hexsha": "b380c9d7de8b07c3f263b4637b13c0787c42eeac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 453, "max_stars_repo_stars_event_min_datetime": "2019-04-21T10:17:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-12T05:24:48.000Z", "max_issues_repo_path": "tf2rl/algos/vail.py", "max_issues_repo_name": "yamada-github-account/tf2rl", "max_issues_repo_head_hexsha": "b380c9d7de8b07c3f263b4637b13c0787c42eeac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 132, "max_issues_repo_issues_event_min_datetime": "2019-04-27T08:17:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-13T02:45:35.000Z", "max_forks_repo_path": "tf2rl/algos/vail.py", "max_forks_repo_name": "yamada-github-account/tf2rl", "max_forks_repo_head_hexsha": "b380c9d7de8b07c3f263b4637b13c0787c42eeac", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 105, "max_forks_repo_forks_event_min_datetime": "2019-04-27T13:03:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T12:21:31.000Z", "avg_line_length": 41.7654320988, "max_line_length": 102, "alphanum_fraction": 0.5991723322, "include": true, "reason": "import numpy", "num_tokens": 1641}
|
# Split Monopole `GiRaFFEfood` Initial Data for `GiRaFFE`
## Author: Patrick Nelson
### NRPy+ Source Code for this module: [GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Split_Monopole.py](../../edit/in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Split_Monopole.py)
**Notebook Status:** <font color='green'><b> In-Progress </b></font>
**Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented [below](#code_validation1). The initial data has validated against the original `GiRaFFE`, as documented [here](Tutorial-Start_to_Finish_UnitTest-GiRaFFEfood_NRPy.ipynb).
## Introduction:
We need to "feed" our giraffe with initial data to evolve. There are several different choices of initial data we can use here; here, we will only be implementing the "Split Monopole" initial data, given by Table 3 in [the original paper](https://arxiv.org/pdf/1704.00599.pdf). This solution is based on the Blandford-Znajek force-free monopole; it is an approximation for the case of small spin with the solution inverted in the lower hemisphere. The vector potential is
\begin{align}
A_r &= -\frac{aC}{8}\cos\theta \left( 1 + \frac{4M}{r} \right), \\
A_\theta &= 0, \\
A_\phi &= M^2 C [1-\cos \theta + a^2 f(r) \cos \theta \sin^2 \theta],
\end{align}
and the electric field is
\begin{align}
E_r &= -\frac{C a^3}{8\alpha M^3} f'(r) \cos \theta \sin^2 \theta \\
E_\theta &= -\frac{Ca}{8\alpha}[\sin \theta + a^2 f(r) \sin \theta (2 \cos^2 \theta-\sin^2 \theta) ] - \beta^r \sqrt{\gamma} \frac{a C}{8 r^2}\left( 1+\frac{4M}{r}\right) \\
E_\phi &= \frac{\beta^r}{\alpha M} Ca^2 f'(r) \cos \theta \sin^2 \theta,
\end{align}
where
\begin{align}
f(r) =& \ \frac{r^2(2r-3M)}{8M^3} L \left(\frac{2M}{r}\right) \\
&+ \frac{M^2+3Mr-6r^2}{12M^2} \ln \frac{r}{2M} \\
&+ \frac{11}{72} + \frac{M}{3r} + \frac{r}{2M} - \frac{r^2}{M^2}, \\
L(x) =& \ {\rm Li}_2(x) + \frac{1}{2} \ln x \ln (1-x).
\end{align}
The function ${\rm Li}_2(x)$ is known as the dilogarithm function, defined as
$$ {\rm Li}_2(x) = -\int_{0}^{1} \frac{\ln(1-tx)}{t} dt = \sum_{k=1}^{\infty} \frac{x^k}{k^2}. $$
Now, to use this initial data scheme, we need to transform the above into the quantities actually tracked by `GiRaFFE` and HydroBase: $A_i$, $B^i$, $\tilde{S}_i$, $v^i$, and $\Phi$. Of these quantities, `GiRaFFEfood` will only set $A_i$, $v^i$, and $\Phi=0$, then call a separate function to calculate $\tilde{S}_i$; `GiRaFFE` itself will call a function to set $B^i$ before the time-evolution begins. This can be done with eqs. 16 and 18, here given in that same order:
\begin{align}
v_{(n)}^i &= \frac{\epsilon^{ijk} E_j B_k}{B^2} \\
B^i &= \frac{[ijk]}{\sqrt{\gamma}} \partial_j A_k \\
\end{align}
In the simulations, $B^i$ will be calculated numerically from $A_i$; however, it will be useful to analytically calculate $B^i$ to use calculating the initial $v^i$.
This module requires the use of the NRPy+ [Shifted Kerr-Schild initial data module](../Tutorial-ADM_Initial_Data-ShiftedKerrSchild.ipynb)
<a id='toc'></a>
# Table of Contents:
$$\label{toc}$$
This notebook is organized as follows
1. [Step 1](#initializenrpy): Import core NRPy+ modules and set NRPy+ parameters
1. [Step 2](#aux_func): Write helpful auxiliary functions
1. [Step 3](#set_a_i): Set the vector $A_i$
1. [Step 4](#set_vi): Calculate $v^i$ from $B_i$ and $E_i$
1. [Step 5](#code_validation1): Code Validation against `GiRaFFEfood_NRPy.GiRaFFEfood_NRPy` NRPy+ Module
1. [Step 6](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
<a id='initializenrpy'></a>
# Step 1: Import core NRPy+ modules and set NRPy+ parameters \[Back to [top](#toc)\]
$$\label{initializenrpy}$$
Here, we will import the NRPy+ core modules after adding NRPy+ to the directory path.
```python
# Step 0: Add NRPy's directory to the path
# https://stackoverflow.com/questions/16780014/import-file-from-parent-directory
import os,sys
nrpy_dir_path = os.path.join("..")
if nrpy_dir_path not in sys.path:
sys.path.append(nrpy_dir_path)
# Step 0.a: Import the NRPy+ core modules and set the reference metric to Cartesian
from outputC import nrpyAbs
import NRPy_param_funcs as par # NRPy+: Parameter interface
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_Common_Functions as gfcf # Some useful functions for GiRaFFE initial data.
import reference_metric as rfm # NRPy+: Reference metric support
import Min_Max_and_Piecewise_Expressions as noif
par.set_parval_from_str("reference_metric::CoordSystem","Cartesian")
rfm.reference_metric()
# Step 1a: Set commonly used parameters.
thismodule = "GiRaFFEfood_NRPy_Split_Monopole"
# The solution depends on a constant C
C_SM = par.Cparameters("REAL",thismodule,["C_SM"], 1.0)
import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy as gf # Import this now to set parameters
```
<a id='aux_func'></a>
# Step 2: Write helpful auxiliary functions \[Back to [top](#toc)\]
$$\label{aux_func}$$
We begin by coding the function $f(r)$ with the inputs $r$ and mass $M$. We avoid calling the function simply `f` out of an abundance of caution, as we do not want to risk overwriting an identically named function elsewhere.
\begin{align}
f(r) =& \ \frac{r^2(2r-3M)}{8M^3} L \left(\frac{2M}{r}\right) \\
&+ \frac{M^2+3Mr-6r^2}{12M^2} \ln \frac{r}{2M} \\
&+ \frac{11}{72} + \frac{M}{3r} + \frac{r}{2M} - \frac{r^2}{2M^2}, \\
\end{align}
where $L(x) = {\rm Li}_2 (x) + \frac{1}{2} \ln x \ln (1-x)$ and ${\rm Li}_2 (x)$ is known as the dilogarithm function. We will use the C library `gsl` to handle this special function. In order to do so, we must tell NRPy+ that `nrpyDilog` will be our code word that means "use the `gsl` dilogarithm function, `gsl_sf_dilog`". This is done by simply creating a new sympy function using `Function()` and then adding the name-value pair to the dictionary `custom_functions_for_SymPy_ccode`.
```python
nrpyDilog = sp.Function('nrpyDilog')
from outputC import custom_functions_for_SymPy_ccode
custom_functions_for_SymPy_ccode["nrpyDilog"] = "gsl_sf_dilog"
def f_of_r(r,M):
if par.parval_from_str("drop_fr"):
return sp.sympify(0)
x = sp.sympify(2)*M/r
L = sp.sympify(0) + \
noif.coord_greater_bound(x,sp.sympify(0))*noif.coord_less_bound(x,sp.sympify(1))*nrpyDilog(x)\
+sp.Rational(1,2)*sp.log(noif.coord_greater_bound(x,sp.sympify(0))*x + noif.coord_leq_bound(x,sp.sympify(1)))\
*sp.log(noif.coord_less_bound(x,sp.sympify(1))*(sp.sympify(1)-x) + noif.coord_geq_bound(x,sp.sympify(1)))
f = r*r*(sp.sympify(2)*r-sp.sympify(3)*M)*sp.Rational(1,8)/(M**3)*L\
+(M*M+sp.sympify(3)*M*r-sp.sympify(6)*r*r)*sp.Rational(1,12)/(M*M)*sp.log(r*sp.Rational(1,2)/M)\
+sp.Rational(11,72) + M*sp.Rational(1,3)/r + r*sp.Rational(1,2)/M - r*r*sp.Rational(1,2)/(M*M)
return f
```
We will also need the derivative of $f(r)$:
\begin{align}
f'(r) =& \ \frac{2r(2r-3M)+2r^2}{8M^3} L \left(\frac{2M}{r}\right) + \frac{r^2(2r-3M)}{8M^3} L' \left(\frac{2M}{r}\right) \left( -\frac{2M}{r^2} \right) \\
&+ \frac{3M-12r}{12M^2} \ln \frac{r}{2M} + \frac{M^2+3Mr-6r^2}{12M^2} \frac{2M}{r} \frac{1}{2M} \\
&- \frac{M}{3r^2} + \frac{1}{2M} - \frac{2r}{M^2}. \\
\end{align}
Because $$\frac{\partial {\rm Li}_2 (x)}{\partial x} = \frac{{\rm Li}_1 (x)}{x} = \frac{-\ln (1-x)}{x},$$
we know that
\begin{align}
L'(x) &= \frac{-\ln (1-x)}{x} + \frac{\ln (1-x)}{2x} - \frac{\ln (x)}{2-2x} \\
&= -\frac{1}{2} \left( \frac{\ln (1-x)}{x} + \frac{\ln(x)}{1-x} \right).
\end{align}
We simplify this some.
\begin{align}
f'(r) =& \ \frac{3r^2-3Mr}{4M^3} L \left(\frac{2M}{r}\right) - \frac{2r-3M}{4M^2} L' \left(\frac{2M}{r}\right)\\
&+ \frac{3M-12r}{12M^2} \ln \frac{r}{2M} + \frac{M^2+3Mr-6r^2}{12rM^2} \\
&- \frac{M}{3r^2} + \frac{1}{2M} - \frac{r}{M^2}. \\
\end{align}
```python
def fp_of_r(r,M):
if par.parval_from_str("drop_fr"):
return sp.sympify(0)
x = sp.sympify(2)*M/r
L = sp.sympify(0) + \
noif.coord_greater_bound(x,sp.sympify(0))*noif.coord_less_bound(x,sp.sympify(1))*nrpyDilog(x)\
+sp.Rational(1,2)*sp.log(noif.coord_greater_bound(x,sp.sympify(0))*x + noif.coord_leq_bound(x,sp.sympify(1)))\
*sp.log(noif.coord_less_bound(x,sp.sympify(1))*(sp.sympify(1)-x) + noif.coord_geq_bound(x,sp.sympify(1)))
Lp = sp.sympify(0) + noif.coord_greater_bound(x,sp.sympify(0))*noif.coord_less_bound(x,sp.sympify(1)) * -sp.Rational(1,2) *\
(sp.log(noif.coord_less_bound(x,sp.sympify(1))*(sp.sympify(1)-x) + noif.coord_geq_bound(x,sp.sympify(1)))/(x+sp.sympify(1e-100))\
+sp.log(noif.coord_greater_bound(x,sp.sympify(0))*x + noif.coord_leq_bound(x,sp.sympify(1)))/(sp.sympify(1)-x+sp.sympify(1e-100)))
fp = sp.sympify(3)*r*(r-M)*sp.Rational(1,4)/(M**3)*L + (sp.sympify(2)*r-sp.sympify(3)*M)*sp.Rational(1,4)/(M*M)*Lp\
+(sp.sympify(3)*M-12*r)*sp.Rational(1,12)/(M*M)*sp.log(r*sp.Rational(1,2)/M) + (M*M+sp.sympify(3)*M*r-sp.sympify(6)*r*r)*sp.Rational(1,12)/(r*M*M)\
-M*sp.Rational(1,3)/(r*r) + sp.Rational(1,2)/M - r/(M*M)
return fp
```
<a id='set_a_i'></a>
# Step 3: Set the vector $A_i$ \[Back to [top](#toc)\]
$$\label{set_a_i}$$
Now, we will code the components of the vector potential $A_i$ in spherical coordinates and a spherical basis. The outputs from these functions can then be easily converted to other coordinate systems by giving the spherical coordinates as inputs in terms of the desired coordinates (e.g., if we want to use Cartesian coordinates, we pass $r = \sqrt{x^2+y^2+z^2}$ and so on). They can also by transformed into any other basis using the appropriate Jacobian matrix.
We will code each component as its own function to more easily apply the appropriate staggering.
\begin{align}
A_r &= -\frac{aC}{8} \left| \cos \theta \right| \left( 1 + \frac{4M}{r} \right)
\sqrt{1 + \frac{2M}{r}}, \\
\end{align}
```python
def Ar_SM(r,theta,phi, **params):
M = params["M"]
a = params["a"]
# A_r = -aC/8 * cos \theta ( 1 + 4M/r ) \sqrt{1 + 2M/r}
return -a*C_SM*sp.Rational(1,8)*nrpyAbs(sp.cos(theta))*(sp.sympify(1)+sp.sympify(4)*M/r)*sp.sqrt(sp.sympify(1)+sp.sympify(2)*M/r)
```
\begin{align}
A_\theta &= 0, \\
\end{align}
```python
def Ath_SM(r,theta,phi, **params):
# A_\theta = 0
return sp.sympify(0)
```
\begin{align}
A_\phi &= M^2 C [1- \left| \cos \theta \right| + a^2 f(r) \cos \theta \sin^2 \theta]
\end{align}
```python
def Aph_SM(r,theta,phi, **params):
M = params["M"]
a = params["a"]
# A_\phi = M^2 C [1-\cos \theta + a^2 f(r) cos \theta sin^2 \theta]
return M*M*C_SM*(sp.sympify(1)-nrpyAbs(sp.cos(theta))+a*a*f_of_r(r,M)*sp.cos(theta)*sp.sin(theta)**2)
```
<a id='set_vi'></a>
# Step 4: Calculate $v^i$ from $B_i$ and $E_i$ \[Back to [top](#toc)\]
$$\label{set_vi}$$
Next, we will code up the Valencia velocity, which will require us to first code the electric and magnetic fields. The magnetic field is simply $$B^i = \frac{[ijk]}{\sqrt{\gamma}} \partial_j A_k,$$ which gives
\begin{align}
B^r &= \frac{C \alpha M^2}{r^2} + \frac{C \alpha a^2 M^2}{2r^4} \left[ -2\cos \theta + \left(\frac{r}{M}\right)^2 (1+3 \cos 2\theta) f(r) \right], \\
B^\theta &= - \frac{C \alpha a^2}{r^2} \sin \theta \cos \theta f'(r), \\
B^\phi &= -\frac{C \alpha a M}{8r^2} \left( 1 + \frac{4M}{r}\right) .
\end{align}
The electric field is
\begin{align}
E_r &= -\frac{C a^3}{8\alpha M^3} f'(r) \cos \theta \sin^2 \theta \\
E_\theta &= -\frac{Ca}{8\alpha}[\sin \theta + a^2 f(r) \sin \theta (2 \cos^2 \theta-\sin^2 \theta) ] - \beta^r \sqrt{\gamma} \frac{a C}{8 r^2}\left( 1+\frac{4M}{r}\right) \\
E_\phi &= \frac{\beta^r}{\alpha M} Ca^2 f'(r) \cos \theta \sin^2 \theta,
\end{align}
We can then calculate the the velocity as $$v_{(n)}^i = \frac{\epsilon^{ijk} E_j B_k}{B^2}.$$
```python
def ValenciavU_func_SM(**params):
M = params["M"]
a = params["a"]
alpha = params["alpha"]
betaU = params["betaU"] # Note that this must use a spherical basis!
gammaDD = params["gammaDD"] # Note that this must use a Cartesian basis!
sqrtgammaDET = params["sqrtgammaDET"] # This must be spherical
KerrSchild_radial_shift = params["KerrSchild_radial_shift"]
r = rfm.xxSph[0] + KerrSchild_radial_shift # We are setting the data up in Shifted Kerr-Schild coordinates
theta = rfm.xxSph[1]
phi = rfm.xxSph[2]
z = rfm.xx_to_Cart[2]
split_C_SM = noif.coord_geq_bound(z,sp.sympify(0))*C_SM - noif.coord_less_bound(z,sp.sympify(0))*C_SM
BsphU = ixp.zerorank1()
BsphU[0] = split_C_SM*alpha*M*M/(r*r) + \
split_C_SM*alpha*a*a*M*M*sp.Rational(1,2)/(r**4)*(-sp.sympify(2)*sp.cos(theta) + (r/M)**2*(sp.sympify(1)+sp.sympify(3)*sp.cos(sp.sympify(2)*theta))*f_of_r(r,M))
BsphU[1] = -split_C_SM*alpha*a*a/(r*r) * sp.sin(theta)*sp.cos(theta)*fp_of_r(r,M)
BsphU[2] = -split_C_SM*alpha*a*M*sp.Rational(1,8)/(r*r)*(sp.sympify(1)+sp.sympify(4)*M/r)
EsphD = ixp.zerorank1()
EsphD[0] = -split_C_SM*a**3/(sp.sympify(8)*alpha*M**3)*fp_of_r(r,M)*sp.cos(theta)*sp.sin(theta)**2
EsphD[1] = -split_C_SM*a*sp.Rational(1,8)/alpha*(sp.sin(theta) + a*a*f_of_r(r,M)*sp.sin(theta)*(sp.sympify(2)*sp.cos(theta)**2-sp.sin(theta)**2)) - \
betaU[0]*sqrtgammaDET*a*split_C_SM*sp.Rational(1,8)/(r*r)*(sp.sympify(1)+sp.sympify(4)*M/r)
EsphD[2] = betaU[0]/(alpha*M)*split_C_SM*a*a*fp_of_r(r,M)*sp.cos(theta)*sp.sin(theta)**2
ED = gfcf.change_basis_spherical_to_Cartesian_D(EsphD)
BU = gfcf.change_basis_spherical_to_Cartesian_U(BsphU)
return gfcf.compute_ValenciavU_from_ED_and_BU(ED, BU, gammaDD)
```
<a id='code_validation1'></a>
# Step 5: Code Validation against `GiRaFFEfood_NRPy.GiRaFFEfood_NRPy` NRPy+ module \[Back to [top](#toc)\]
$$\label{code_validation1}$$
Here, as a code validation check, we verify agreement in the SymPy expressions for the `GiRaFFE` Exact Wald initial data equations we intend to use between
1. this tutorial and
2. the NRPy+ [GiRaFFEfood_NRPy.GiRaFFEfood_NRPy-Split_Monopole](../edit/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Split_Monopole.py) module.
```python
import BSSN.ShiftedKerrSchild as sks
sks.ShiftedKerrSchild(True)
import reference_metric as rfm # NRPy+: Reference metric support
par.set_parval_from_str("reference_metric::CoordSystem","Cartesian")
rfm.reference_metric()
# Use the Jacobian matrix to transform the vectors to Cartesian coordinates.
drrefmetric__dx_0UDmatrix = sp.Matrix([[sp.diff(rfm.xxSph[0],rfm.xx[0]), sp.diff(rfm.xxSph[0],rfm.xx[1]), sp.diff(rfm.xxSph[0],rfm.xx[2])],
[sp.diff(rfm.xxSph[1],rfm.xx[0]), sp.diff(rfm.xxSph[1],rfm.xx[1]), sp.diff(rfm.xxSph[1],rfm.xx[2])],
[sp.diff(rfm.xxSph[2],rfm.xx[0]), sp.diff(rfm.xxSph[2],rfm.xx[1]), sp.diff(rfm.xxSph[2],rfm.xx[2])]])
dx__drrefmetric_0UDmatrix = drrefmetric__dx_0UDmatrix.inv()
gammaSphDD = ixp.zerorank2()
for i in range(3):
for j in range(3):
gammaSphDD[i][j] += sks.gammaSphDD[i][j].subs(sks.r,rfm.xxSph[0]).subs(sks.th,rfm.xxSph[1])
gammaDD = ixp.zerorank2()
for i in range(3):
for j in range(3):
for k in range(3):
for l in range(3):
gammaDD[i][j] += drrefmetric__dx_0UDmatrix[(k,i)]*drrefmetric__dx_0UDmatrix[(l,j)]*gammaSphDD[k][l]
unused_gammaUU,gammaDET = ixp.symm_matrix_inverter3x3(gammaDD)
sqrtgammaDET = sp.sqrt(gammaDET)
betaU = ixp.zerorank1()
for i in range(3):
betaU[i] += sks.betaSphU[i].subs(sks.r,rfm.xxSph[0]).subs(sks.th,rfm.xxSph[1])
alpha = sks.alphaSph.subs(sks.r,rfm.xxSph[0]).subs(sks.th,rfm.xxSph[1])
A_smD = gfcf.Axyz_func_spherical(Ar_SM,Ath_SM,Aph_SM,stagger_enable = True,M=sks.M,a=sks.a,KerrSchild_radial_shift=sks.r0)
Valenciav_smD = ValenciavU_func_SM(M=sks.M,a=sks.a,KerrSchild_radial_shift=sks.r0,alpha=alpha,betaU=betaU,gammaDD=gammaDD,sqrtgammaDET=sqrtgammaDET)
gf.GiRaFFEfood_NRPy_generate_initial_data(ID_type = "SplitMonopole", stagger_enable = True,M=sks.M,a=sks.a,KerrSchild_radial_shift=sks.r0,alpha=alpha,betaU=betaU,gammaDD=gammaDD,sqrtgammaDET=sqrtgammaDET)
def consistency_check(quantity1,quantity2,string):
if quantity1-quantity2==0:
print(string+" is in agreement!")
else:
print(string+" does not agree!")
sys.exit(1)
for i in range(3):
consistency_check(Valenciav_smD[i],gf.ValenciavU[i],"ValenciavU"+str(i))
consistency_check(A_smD[i],gf.AD[i],"AD"+str(i))
```
ValenciavU0 is in agreement!
AD0 is in agreement!
ValenciavU1 is in agreement!
AD1 is in agreement!
ValenciavU2 is in agreement!
AD2 is in agreement!
<a id='latex_pdf_output'></a>
# Step 6: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
$$\label{latex_pdf_output}$$
The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
[Tutorial-GiRaFFEfood_NRPy-Split_Monopole.pdf](Tutorial-GiRaFFEfood_NRPy-Split_Monopole.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
```python
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-GiRaFFEfood_NRPy-Split_Monopole",location_of_template_file=os.path.join(".."))
```
Created Tutorial-GiRaFFEfood_NRPy-Split_Monopole.tex, and compiled LaTeX
file to PDF file Tutorial-GiRaFFEfood_NRPy-Split_Monopole.pdf
|
{"hexsha": "dd0425576f21ff4cd711406b5cb51a22005af2be", "size": 23499, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "in_progress/Tutorial-GiRaFFEfood_NRPy-Split_Monopole.ipynb", "max_stars_repo_name": "fedelopezar/nrpytutorial", "max_stars_repo_head_hexsha": "753acd954be4a2f99639c9f9fd5e623689fc7493", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-13T05:51:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-13T05:51:18.000Z", "max_issues_repo_path": "in_progress/Tutorial-GiRaFFEfood_NRPy-Split_Monopole.ipynb", "max_issues_repo_name": "fedelopezar/nrpytutorial", "max_issues_repo_head_hexsha": "753acd954be4a2f99639c9f9fd5e623689fc7493", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "in_progress/Tutorial-GiRaFFEfood_NRPy-Split_Monopole.ipynb", "max_forks_repo_name": "fedelopezar/nrpytutorial", "max_forks_repo_head_hexsha": "753acd954be4a2f99639c9f9fd5e623689fc7493", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.4515463918, "max_line_length": 500, "alphanum_fraction": 0.5736839866, "converted": true, "num_tokens": 6327}
|
import sys
import os
from PIL import Image
import numpy as np
import tensorflow as tf
import rospy
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Image as sensor_image
from sensor_msgs.msg import Joy
import logging
import logging.handlers
from time import sleep
from random import uniform
from threading import Thread
import json
from std_msgs.msg import String
import random
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient, AWSIoTMQTTShadowClient
logger = logging.getLogger('MyLogger')
logger.setLevel(logging.DEBUG)
handler = logging.handlers.SysLogHandler(address = '/dev/log')
logger.addHandler(handler)
# Set your variables here
FROZEN_MODEL_LOCAL_PATH = "model.pb"
FROZEN_MODEL_S3_KEY = "model/model.pb"
AWS_REGION = "us-west-2"
ROOT_CA = '/greengrass/certs/root.ca.pem'
CERT_KEY = '/greengrass/certs/ggc.cert.pem'
PRIVATE_KEY = '/greengrass/certs/ggc.private.key'
THING_NAME = os.environ['AWS_IOT_THING_NAME']
logger.debug('IoT Thing Name: {}'.format(THING_NAME))
IOT_ENDPOINT = os.environ['AWS_IOT_MQTT_ENDPOINT'][:-5]
logger.debug('IoT Endpoint: {}'.format(IOT_ENDPOINT))
logger.debug(os.environ)
TRAINING_IMAGE_SIZE = (160, 120)
class IoT(object):
# Class to handle AWS IoT SDK connections and commands
def __init__(self, host, rootCAPath, certificatePath, privateKeyPath,
clientId, useWebsocket=False, mode='both'):
self.AllowedActions = ['both', 'publish', 'subscribe']
self.host = host
self.rootCAPath = rootCAPath
self.certificatePath = certificatePath
self.privateKeyPath = privateKeyPath
self.clientId = clientId
self.useWebsocket = useWebsocket
self.mode = mode
# Configure logging
self.logger = logging.getLogger("AWSIoTPythonSDK.core")
self.logger.setLevel(logging.ERROR)
streamHandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
streamHandler.setFormatter(formatter)
self.logger.addHandler(streamHandler)
self.connect_client()
self.connect_shadow_client()
def connect_client(self):
# Init AWSIoTMQTTClient
self.myAWSIoTMQTTClient = None
if self.useWebsocket:
self.myAWSIoTMQTTClient = AWSIoTMQTTClient(self.clientId, useWebsocket=True)
self.myAWSIoTMQTTClient.configureEndpoint(self.host, 443)
self.myAWSIoTMQTTClient.configureCredentials(self.rootCAPath)
else:
self.myAWSIoTMQTTClient = AWSIoTMQTTClient(self.clientId)
self.myAWSIoTMQTTClient.configureEndpoint(self.host, 8883)
self.myAWSIoTMQTTClient.configureCredentials(self.rootCAPath, self.privateKeyPath, self.certificatePath)
# AWSIoTMQTTClient connection configuration
self.myAWSIoTMQTTClient.configureAutoReconnectBackoffTime(1, 32, 20)
self.myAWSIoTMQTTClient.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing
self.myAWSIoTMQTTClient.configureDrainingFrequency(2) # Draining: 2 Hz
self.myAWSIoTMQTTClient.configureConnectDisconnectTimeout(10) # 10 sec
self.myAWSIoTMQTTClient.configureMQTTOperationTimeout(5) # 5 sec
# Connect and subscribe to AWS IoT
self.myAWSIoTMQTTClient.connect()
def connect_shadow_client(self, clientId_suffix='_shadow'):
# Init AWSIoTMQTTShadowClient
clientId = self.clientId + clientId_suffix
self.myAWSIoTMQTTShadowClient = None
if self.useWebsocket:
self.myAWSIoTMQTTShadowClient = AWSIoTMQTTShadowClient(clientId, useWebsocket=True)
self.myAWSIoTMQTTShadowClient.configureEndpoint(self.host, 443)
self.myAWSIoTMQTTShadowClient.configureCredentials(self.rootCAPath)
else:
self.myAWSIoTMQTTShadowClient = AWSIoTMQTTShadowClient(clientId)
self.myAWSIoTMQTTShadowClient.configureEndpoint(self.host, 8883)
self.myAWSIoTMQTTShadowClient.configureCredentials(self.rootCAPath, self.privateKeyPath, self.certificatePath)
# AWSIoTMQTTShadowClient configuration
self.myAWSIoTMQTTShadowClient.configureAutoReconnectBackoffTime(1, 32, 20)
self.myAWSIoTMQTTShadowClient.configureConnectDisconnectTimeout(10) # 10 sec
self.myAWSIoTMQTTShadowClient.configureMQTTOperationTimeout(5) # 5 sec
# Connect to AWS IoT
self.myAWSIoTMQTTShadowClient.connect()
def shadow_handler(self, thingName):
# Create a deviceShadow with persistent subscription
self.deviceShadowHandler = self.myAWSIoTMQTTShadowClient.createShadowHandlerWithName(thingName, True)
def shadow_get(self, callback):
try:
# Get shadow JSON doc
return self.deviceShadowHandler.shadowGet(callback, 5)
except Exception as e:
logger.exception(e)
def shadow_update(self, json_payload, callback):
# Update shadow JSON doc
self.deviceShadowHandler.shadowUpdate(json_payload, callback, 5)
class InferenceWorker:
def __init__(self, model_path):
self.model_path = model_path
def run(self):
self.graph = self.load_graph()
self.session = tf.Session(graph=self.graph, config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True))
logger.debug('INFO: Creating publisher on /cmd_vel')
self.ack_publisher = rospy.Publisher('cmd_vel', Twist, queue_size=10)
logger.debug('INFO: Creating subscriber on /camera/bgr/image_raw')
rospy.Subscriber('/usb_cam/image_raw', sensor_image, self.callback_image)
logger.debug('INFO: Finished initialization')
def load_graph(self):
print('Loading graph...')
with tf.gfile.GFile(self.model_path, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name="turtlebot")
print('INFO: Finished loading graph')
return graph
def callback_image(self, raw_image):
try:
global active_mode
if active_mode:
if active_mode == 'tracking':
image = Image.frombytes('RGB', (raw_image.width, raw_image.height),
raw_image.data, 'raw', 'BGR', 0, 1)
image = image.resize(TRAINING_IMAGE_SIZE)
image = np.array(image)
# Get the yuma component of the image
r, g, b = image[:, :, 0], image[:, :, 1], image[:, :, 2]
image = 0.2989 * r + 0.5870 * g + 0.1140 * b
image = np.expand_dims(image, 2)
image = np.expand_dims(image, 0)
x = self.graph.get_tensor_by_name('turtlebot/main_level/agent/main/online/network_0/observation/observation:0')
y = self.graph.get_tensor_by_name('turtlebot/main_level/agent/main/online/network_1/ppo_head_0/policy:0')
inferenceOutput = np.argmax(self.session.run(y, feed_dict={
x: image
}))
self.takeAction(inferenceOutput)
except Exception as e:
logger.exception(e)
def takeAction(self, action):
if action == 0: # move left
steering = 0.6
throttle = x
elif action == 1: # move right
steering = -0.6
throttle = x
elif action == 2: # straight
steering = 0
throttle = x
elif action == 3: # move left
steering = 0.3
throttle = x
elif action == 4: # move right
steering = -0.3
throttle = x
else: # should not be here
raise ValueError("Invalid action")
speed = Twist()
speed.linear.x = throttle
speed.angular.z = steering
self.ack_publisher.publish(speed)
class EV3DEV:
def __init__(self):
self.shadow_event = None
self.iot_init()
self.thing_shadow_init()
self.joystick_init()
def iot_init(self):
try:
logger.debug('IOT INIT...')
self.iot = IoT(IOT_ENDPOINT,
ROOT_CA,
CERT_KEY,
PRIVATE_KEY,
THING_NAME)
self.iot.shadow_handler(THING_NAME)
except Exception as e:
logger.exception(e)
def thing_shadow_init(self):
try:
logger.debug('ACTIVE MODE INIT...')
self.mode_publisher = rospy.Publisher('ev3/active_mode', String, queue_size=10, tcp_nodelay=True, latch=False)
thread = Thread(target=self.thing_shadow_thread)
thread.daemon = True
thread.start()
except Exception as e:
logger.exception(e)
def thing_shadow_thread(self):
logger.debug('Thing Shadow thread started')
while True:
try:
global active_mode
self.iot.shadow_get(self.shadow_callback_get)
thing_shadow = self.shadow_event.get('state').get('desired')
if thing_shadow:
logger.debug(thing_shadow)
active_mode = thing_shadow.get('active_mode')
self.mode_publisher.publish(active_mode)
# Update shadow to reflect reported state
shadow_payload = json.dumps({"state":{"reported": {'active_mode': active_mode}, 'desired': None}})
self.iot.shadow_update(shadow_payload, self.shadow_callback_update)
sleep(1)
self.iot.shadow_get(self.shadow_callback_get)
logger.debug(self.shadow_event.get('state'))
else:
active_mode = self.shadow_event.get('state').get('reported').get('active_mode')
sleep(1)
except Exception as e:
logger.exception('Thing Shadow Thread error: {}'.format(e))
sleep(5)
def shadow_callback_get(self, payload, responseStatus, token):
try:
self.shadow_event = json.loads(payload)
except Exception as e:
logger.exception(e)
def shadow_callback_update(self, payload, responseStatus, token):
try:
if responseStatus == "timeout":
logger.debug("Update request " + token + " time out!")
if responseStatus == "accepted":
payloadDict = json.loads(payload)
logger.debug("~~~~~~~~~~~~~~~~~~~~~~~")
logger.debug("Update request with token: " + token + " accepted!")
logger.debug("active_mode: " + payloadDict["state"]["reported"]["active_mode"])
logger.debug("~~~~~~~~~~~~~~~~~~~~~~~\n\n")
if responseStatus == "rejected":
logger.debug("Update request " + token + " rejected!")
except Exception as e:
logger.exception(e)
def joystick_init(self):
try:
logger.debug('JOYSTICK INIT...')
self.twist = Twist()
thread = Thread(target=self.joystick_thread)
thread.daemon = True
thread.start()
self.joystick_publisher = rospy.Publisher('ev3/cmd_vel', Twist, queue_size=1)
except Exception as e:
logger.exception(e)
def joystick_thread(self):
try:
logger.debug('Joystick thread started')
rospy.Subscriber('joy', Joy, self.joystick_callback)
except Exception as e:
logger.exception(e)
def joystick_callback(self, data):
try:
global active_mode
if active_mode == 'joystick':
# button_a = data.buttons[0]
# button_b = data.buttons[1]
# button_x = data.buttons[2]
# button_y = data.buttons[3]
dpad_x = data.axes[6]
dpad_y = data.axes[7]
speed = 0.5
if abs(dpad_y) == 1:
print('DPAD_X: {}, DPAD_Y: {}'.format(dpad_x, dpad_y))
if dpad_y == 1:
self.move(speed, 0, 0)
else:
self.move(-speed, 0, 0)
if abs(dpad_x) == 1:
print('DPAD_X: {}, DPAD_Y: {}'.format(dpad_x, dpad_y))
if dpad_x == 1:
self.move(0, 100, 0)
print('left')
else:
self.move(0, -100, 0)
print('right')
elif abs(dpad_y) == 0 and abs(dpad_x) == 0:
print('stop')
self.move(0, 0, 0)
except Exception as e:
logger.exception(e)
def move(self, x, z, d):
try:
self.twist.linear.x = x
self.twist.angular.z = z
self.joystick_publisher.publish(self.twist)
except Exception as e:
logger.exception(e)
if __name__ == '__main__':
try:
model_path = sys.argv[1]
logger.debug('Starting Inference Worker, Specified Model Directory: {}'.format(model_path))
x = 0.2
ev3dev = EV3DEV()
rospy.init_node('rl_coach', anonymous=True)
rate = rospy.Rate(1)
inference_worker = InferenceWorker(model_path)
inference_worker.run()
rospy.spin()
except Exception as e:
logger.exception(e)
|
{"hexsha": "db88942246f4c36c5d47b37cf7d5e51c18e785a5", "size": 13654, "ext": "py", "lang": "Python", "max_stars_repo_path": "reinvent-2019/lego-ev3-raspberry-pi-robot/robomaker/robot_ws/src/turtlebot_controller/robomaker/inference_worker.py", "max_stars_repo_name": "kienpham2000/aws-builders-fair-projects", "max_stars_repo_head_hexsha": "6c4075c0945a6318b217355a6fc663e35ffb9dba", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-12-17T03:38:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-28T06:23:58.000Z", "max_issues_repo_path": "reinvent-2019/lego-ev3-raspberry-pi-robot/robomaker/robot_ws/src/turtlebot_controller/robomaker/inference_worker.py", "max_issues_repo_name": "kienpham2000/aws-builders-fair-projects", "max_issues_repo_head_hexsha": "6c4075c0945a6318b217355a6fc663e35ffb9dba", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2021-05-09T06:05:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-02T09:53:20.000Z", "max_forks_repo_path": "reinvent-2019/lego-ev3-raspberry-pi-robot/robomaker/robot_ws/src/turtlebot_controller/robomaker/inference_worker.py", "max_forks_repo_name": "kienpham2000/aws-builders-fair-projects", "max_forks_repo_head_hexsha": "6c4075c0945a6318b217355a6fc663e35ffb9dba", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-05T04:45:42.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-05T04:45:42.000Z", "avg_line_length": 38.1396648045, "max_line_length": 131, "alphanum_fraction": 0.6034129193, "include": true, "reason": "import numpy", "num_tokens": 3023}
|
# Convert a prolongation matrix P to a tentative operator S.
# P is piecewise constant over some number of aggregates.
# S has a n x n for each aggregate where n is the size of the aggregate.
using PETScBinaryIO
P = readPETSc(ARGS[1])
rows = rowvals(P)
m, n = size(P)
is = Vector{Int}()
js = Vector{Int}()
ks = Vector{Float64}()
j = 0
for i = 1:n
nzrng = nzrange(P,i)
agg_size = length(nzrng)
if agg_size > 10000
println("Aggregate is too large ($agg_size)")
exit(-1)
end
block = fill(-1/agg_size, (agg_size, agg_size)) # B(BᵀB)⁻¹Bᵀ
block[diagind(block)] += 1 # I - B(BᵀB)⁻¹Bᵀ
U, _, _ = svd(block)
for x = 1:agg_size
for y = 1:agg_size-1
push!(is, rows[nzrng[x]])
push!(js, j + y)
push!(ks, U[x,y])
end
end
j += agg_size-1
end
S = sparse(is,js,ks,m,j)
writePETSc(ARGS[2], S)
|
{"hexsha": "895af5665a17df139cb0d2c5096caab4b46b921c", "size": 888, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "scripts/P_to_S.jl", "max_stars_repo_name": "ligmg/ligmg", "max_stars_repo_head_hexsha": "b0046cac6ee0aed044ef9b3e2ea091b3d44219ee", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2018-07-03T23:40:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-18T00:39:47.000Z", "max_issues_repo_path": "scripts/P_to_S.jl", "max_issues_repo_name": "ligmg/ligmg", "max_issues_repo_head_hexsha": "b0046cac6ee0aed044ef9b3e2ea091b3d44219ee", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-01-05T13:06:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-28T04:14:45.000Z", "max_forks_repo_path": "scripts/P_to_S.jl", "max_forks_repo_name": "ligmg/ligmg", "max_forks_repo_head_hexsha": "b0046cac6ee0aed044ef9b3e2ea091b3d44219ee", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-18T00:39:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-18T00:39:50.000Z", "avg_line_length": 23.3684210526, "max_line_length": 72, "alphanum_fraction": 0.5878378378, "num_tokens": 309}
|
## DDPG model with actor-critic framework
import numpy as np
import random
import tensorflow as tf
from tensorflow.python.framework import ops
import keras.backend as K
from keras import Sequential
from keras.layers import Dense, Dropout
class Actor():
'''
Policy function approximator
'''
def __init__(self, sess, state_space_size, action_space_size, batch_size, ra_length, history_length, embedding_size, tau, learning_rate, scope='actor'):
self.sess = sess
self.state_space_size = state_space_size
self.action_space_size = action_space_size
self.batch_size = batch_size
self.ra_length = ra_length
self.history_length = history_length
self.embedding_size = embedding_size
self.tau = tau
self.learning_rate = learning_rate
self.scope = scope
with tf.variable_scope(self.scope):
# Build Actor network
self.action_weights, self.state, self.sequence_length = self._build_net('estimator_actor')
self.network_params = tf.trainable_variables()
# Build target Actor network
self.target_action_weights, self.target_state, self.target_sequence_length = self._build_net('target_actor')
self.target_network_params = tf.trainable_variables()[len(self.network_params):] # TODO: why sublist [len(x):]? Maybe because its equal to network_params + target_network_params
# Initialize target network weights with network weights
self.init_target_network_params = [self.target_network_params[i].assign(self.network_params[i])
for i in range(len(self.target_network_params))]
# Update target network weights
self.update_target_network_params = [self.target_network_params[i].assign(
tf.multiply(self.tau, self.network_params[i]) +
tf.multiply(1 - self.tau, self.target_network_params[i]))for i in range(len(self.target_network_params))]
# Gradient computation from Critic's action_gradients
self.action_gradients = tf.placeholder(tf.float32, [None, self.action_space_size])
gradients = tf.gradients(tf.reshape(self.action_weights, [self.batch_size, self.action_space_size], name = '42'),
self.network_params, self.action_gradients)
params_gradients = list(map(lambda x: tf.div(x, self.batch_size * self.action_space_size), gradients))
# Compute ∇_a.Q(s, a|θ^µ).∇_θ^π.f_θ^π(s)
self.optimizer = tf.train.AdamOptimizer(self.learning_rate).apply_gradients(zip(params_gradients, self.network_params))
def _build_net(self, scope):
'''
Build the (target) Actor network
'''
def gather_last_output(data, seq_lens):
def cli_value(x, v):
y = tf.constant(v, shape = x.get_shape(), dtype = tf.int64)
x = tf.cast(x, tf.int64)
return tf.where(tf.greater(x, y), x, y)
batch_range = tf.range(tf.cast(tf.shape(data)[0], dtype = tf.int64), dtype=tf.int64)
tmp_end = tf.map_fn(lambda x: cli_value(x, 0), seq_lens - 1, dtype = tf.int64)
indices = tf.stack([batch_range, tmp_end], axis = 1)
return tf.gather_nd(data, indices)
with tf.variable_scope(scope):
# Inputs: current state, sequence_length
# Outputs: action weights
state = tf.placeholder(tf.float32, [None, self.state_space_size], 'state')
state_ = tf.reshape(state, [-1, self.history_length, self.embedding_size])
sequence_length = tf.placeholder(tf.int32, [None], 'sequence_length')
cell = tf.nn.rnn_cell.GRUCell(self.embedding_size,
activation = tf.nn.relu,
kernel_initializer = tf.initializers.random_normal(),
bias_initializer = tf.zeros_initializer())
outputs, _ = tf.nn.dynamic_rnn(cell, state_, dtype = tf.float32, sequence_length = sequence_length)
last_output = gather_last_output(outputs, sequence_length)
x = tf.keras.layers.Dense(self.ra_length * self.embedding_size)(last_output)
action_weights = tf.reshape(x, [-1, self.ra_length, self.embedding_size])
return action_weights, state, sequence_length
def train(self, state, sequence_length, action_gradients):
'''
Compute ∇_a.Q(s, a|θ^µ).∇_θ^π.f_θ^π(s)
'''
self.sess.run(self.optimizer,
feed_dict = {
self.state: state,
self.sequence_length: sequence_length,
self.action_gradients: action_gradients})
def predict(self, state, sequence_length):
return self.sess.run(self.action_weights,
feed_dict = {
self.state: state,
self.sequence_length: sequence_length})
def predict_target(self, state, sequence_length):
return self.sess.run(self.target_action_weights,
feed_dict = {
self.target_state: state,
self.target_sequence_length: sequence_length})
def init_target_network(self):
self.sess.run(self.init_target_network_params)
def update_target_network(self):
self.sess.run(self.update_target_network_params)
def get_recommendation_list(self, ra_length, noisy_state, embeddings, target = False):
'''
Args:
ra_length: length of the recommendation list.
noisy_state: current/remembered environment state with noise.
embeddings: Embeddings object.
target: boolean to use Actor's network or target network.
Returns:
Recommendation List: list of embedded items as future actions.
'''
def get_score(weights, embedding, batch_size):
'''
Args:
weights: w_t^k shape = (embedding_size,).
embedding: e_i shape = (embedding_size,).
Returns:
score of the item i: score_i = w_t^k.e_i^T shape = (1,).
'''
ret = np.dot(weights, embedding.T)
return ret
batch_size = noisy_state.shape[0]
# Generate w_t = {w_t^1, ..., w_t^K}
method = self.predict_target if target else self.predict
weights = method(noisy_state, [ra_length] * batch_size)
# Score items
scores = np.array([[[get_score(weights[i][k], embedding, batch_size)
for embedding in embeddings.get_embedding_vector()]
for k in range(ra_length)] for i in range(batch_size)])
# return a_t
return np.array([[embeddings.get_embedding(np.argmax(scores[i][k]))
for k in range(ra_length)] for i in range(batch_size)])
class Critic():
'''
Value function approximator
'''
def __init__(self, sess, state_space_size, action_space_size, history_length, embedding_size, tau, learning_rate, scope='critic'):
self.sess = sess
self.state_space_size = state_space_size
self.action_space_size = action_space_size
self.history_length = history_length
self.embedding_size = embedding_size
self.tau = tau
self.learning_rate = learning_rate
self.scope = scope
with tf.variable_scope(self.scope):
# Build Critic network
self.critic_Q_value, self.state, self.action, self.sequence_length = self._build_net('estimator_critic')
self.network_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='estimator_critic')
# Build target Critic network
self.target_Q_value, self.target_state, self.target_action, self.target_sequence_length = self._build_net('target_critic')
self.target_network_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='target_critic')
# Initialize target network weights with network weights (θ^µ′ ← θ^µ)
self.init_target_network_params = [self.target_network_params[i].assign(self.network_params[i])
for i in range(len(self.target_network_params))]
# Update target network weights (θ^µ′ ← τθ^µ + (1 − τ)θ^µ′)
self.update_target_network_params = [self.target_network_params[i].assign(
tf.multiply(self.tau, self.network_params[i]) +
tf.multiply(1 - self.tau, self.target_network_params[i]))
for i in range(len(self.target_network_params))]
# Minimize MSE between Critic's and target Critic's outputed Q-values
self.expected_reward = tf.placeholder(tf.float32, [None, 1])
self.loss = tf.reduce_mean(tf.squared_difference(self.expected_reward, self.critic_Q_value))
self.optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
# Compute ∇_a.Q(s, a|θ^µ)
self.action_gradients = tf.gradients(self.critic_Q_value, self.action)
def _build_net(self, scope):
'''
Build the (target) Critic network
'''
def gather_last_output(data, seq_lens):
def cli_value(x, v):
y = tf.constant(v, shape = x.get_shape(), dtype = tf.int64)
return tf.where(tf.greater(x, y), x, y)
this_range = tf.range(tf.cast(tf.shape(seq_lens)[0], dtype = tf.int64), dtype = tf.int64)
tmp_end = tf.map_fn(lambda x: cli_value(x, 0), seq_lens - 1, dtype = tf.int64)
indices = tf.stack([this_range, tmp_end], axis = 1)
return tf.gather_nd(data, indices)
with tf.variable_scope(scope):
# Inputs: current state, current action
# Outputs: predicted Q-value
state = tf.placeholder(tf.float32, [None, self.state_space_size], 'state')
state_ = tf.reshape(state, [-1, self.history_length, self.embedding_size])
action = tf.placeholder(tf.float32, [None, self.action_space_size], 'action')
sequence_length = tf.placeholder(tf.int64, [None], name = 'critic_sequence_length')
cell = tf.nn.rnn_cell.GRUCell(self.history_length,
activation = tf.nn.relu,
kernel_initializer = tf.initializers.random_normal(),
bias_initializer = tf.zeros_initializer())
predicted_state, _ = tf.nn.dynamic_rnn(cell, state_, dtype = tf.float32, sequence_length = sequence_length)
predicted_state = gather_last_output(predicted_state, sequence_length)
inputs = tf.concat([predicted_state, action], axis = -1)
layer1 = tf.layers.Dense(32, activation = tf.nn.relu)(inputs)
layer2 = tf.layers.Dense(16, activation = tf.nn.relu)(layer1)
critic_Q_value = tf.layers.Dense(1)(layer2)
return critic_Q_value, state, action, sequence_length
def train(self, state, action, sequence_length, expected_reward):
'''
Minimize MSE between expected reward and target Critic's Q-value
'''
return self.sess.run([self.critic_Q_value, self.loss, self.optimizer],
feed_dict = {
self.state: state,
self.action: action,
self.sequence_length: sequence_length,
self.expected_reward: expected_reward})
def predict(self, state, action, sequence_length):
'''
Returns Critic's predicted Q-value
'''
return self.sess.run(self.critic_Q_value,
feed_dict = {
self.state: state,
self.action: action,
self.sequence_length: sequence_length})
def predict_target(self, state, action, sequence_length):
'''
Returns target Critic's predicted Q-value
'''
return self.sess.run(self.target_Q_value,
feed_dict = {
self.target_state: state,
self.target_action: action,
self.target_sequence_length: sequence_length})
def get_action_gradients(self, state, action, sequence_length):
'''
Returns ∇_a.Q(s, a|θ^µ)
'''
return np.array(self.sess.run(self.action_gradients,
feed_dict = {
self.state: state,
self.action: action,
self.sequence_length: sequence_length})[0])
def init_target_network(self):
self.sess.run(self.init_target_network_params)
def update_target_network(self):
self.sess.run(self.update_target_network_params)
class ReplayMemory():
'''
Replay memory D in article
'''
def __init__(self, buffer_size):
self.buffer_size = buffer_size
self.buffer = []
def add(self, state, action, reward, n_state):
self.buffer.append([state, action, reward, n_state])
if len(self.buffer) > self.buffer_size:
self.buffer.pop(0)
def size(self):
return len(self.buffer)
def sample_batch(self, batch_size):
return random.sample(self.buffer, batch_size)
def experience_replay(replay_memory, batch_size, actor, critic, embeddings, ra_length, state_space_size, action_space_size, discount_factor):
'''
Experience replay.
Args:
replay_memory: replay memory D in article.
batch_size: sample size.
actor: Actor network.
critic: Critic network.
embeddings: Embeddings object.
state_space_size: dimension of states.
action_space_size: dimensions of actions.
Returns:
Best Q-value, loss of Critic network for printing/recording purpose.
'''
# Sample minibatch of N transitions (s, a, r, s′)
samples = replay_memory.sample_batch(batch_size)
states = np.array([s[0] for s in samples])
actions = np.array([s[1] for s in samples])
rewards = np.array([s[2] for s in samples])
n_states = np.array([s[3] for s in samples]).reshape(-1, state_space_size)
# Generate a′ by target Actor network
n_actions = actor.get_recommendation_list(ra_length, states, embeddings, target = True).reshape(-1, action_space_size)
# Calculate predicted Q′(s′, a′|θ^µ′) value
target_Q_value = critic.predict_target(n_states, n_actions, [ra_length] * batch_size)
# Set y = r + γQ′(s′, a′|θ^µ′)'
expected_rewards = rewards + discount_factor * target_Q_value
# Update Critic by minimizing (y − Q(s, a|θ^µ))²'
critic_Q_value, critic_loss, _ = critic.train(states, actions, [ra_length] * batch_size, expected_rewards)
# Update the Actor using the sampled policy gradient'
action_gradients = critic.get_action_gradients(states, n_actions, [ra_length] * batch_size)
actor.train(states, [ra_length] * batch_size, action_gradients)
# Update the Critic target networks
critic.update_target_network()
# Update the Actor target network'
actor.update_target_network()
return np.amax(critic_Q_value), critic_loss
|
{"hexsha": "89d6b41c41760de054e265607977e3e5dfdd9f2c", "size": 15835, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/model.py", "max_stars_repo_name": "paige-chang/Personalized-Music-Recommendation", "max_stars_repo_head_hexsha": "bfe8381b5a84e7bb0460cbbceb60b3f4514da226", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-07-09T19:44:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-06T21:03:34.000Z", "max_issues_repo_path": "src/model.py", "max_issues_repo_name": "paige-chang/Personalized-Music-Recommendation", "max_issues_repo_head_hexsha": "bfe8381b5a84e7bb0460cbbceb60b3f4514da226", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/model.py", "max_forks_repo_name": "paige-chang/Personalized-Music-Recommendation", "max_forks_repo_head_hexsha": "bfe8381b5a84e7bb0460cbbceb60b3f4514da226", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2020-09-30T10:39:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T03:27:16.000Z", "avg_line_length": 42.9132791328, "max_line_length": 189, "alphanum_fraction": 0.6062519735, "include": true, "reason": "import numpy", "num_tokens": 3242}
|
from __future__ import division
from __future__ import print_function
import time
import argparse
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from utils import load_data,accuracy
from model import GCN
# Training settings
np.random.seed(42)
torch.manual_seed(42)
torch.cuda.manual_seed(42)
# Load data
adj, features, labels, idx_train, idx_val, idx_test = load_data()
adj, features, labels, idx_train, idx_val, idx_test
# Model and optimizer
model = GCN(nfeat=features.shape[1],
nhid=16,
nclass=labels.max().item() + 1,
dropout=0.5)
optimizer = optim.Adam(model.parameters(),
lr=0.01, weight_decay=5e-4)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)
features = features.to(device)
adj = adj.to(device)
labels = labels.to(device)
idx_train = idx_train.to(device)
idx_val = idx_val.to(device)
idx_test = idx_test.to(device)
def train(epoch):
t = time.time()
model.train()
optimizer.zero_grad()
output = model(features, adj)
loss_train = F.nll_loss(output[idx_train], labels[idx_train])
acc_train = accuracy(output[idx_train], labels[idx_train])
loss_train.backward()
optimizer.step()
# Evaluate validation set performance separately,
# deactivates dropout during validation run.
model.eval()
output = model(features, adj)
loss_val = F.nll_loss(output[idx_val], labels[idx_val])
acc_val = accuracy(output[idx_val], labels[idx_val])
print('Epoch: {:04d}'.format(epoch+1),
'loss_train: {:.4f}'.format(loss_train.item()),
'acc_train: {:.4f}'.format(acc_train.item()),
'loss_val: {:.4f}'.format(loss_val.item()),
'acc_val: {:.4f}'.format(acc_val.item()),
'time: {:.4f}s'.format(time.time() - t))
def test():
model.eval()
output = model(features, adj)
loss_test = F.nll_loss(output[idx_test], labels[idx_test])
acc_test = accuracy(output[idx_test], labels[idx_test])
print("Test set results:",
"loss= {:.4f}".format(loss_test.item()),
"accuracy= {:.4f}".format(acc_test.item()))
# Train model
t_total = time.time()
for epoch in range(1000):
train(epoch)
print("Optimization Finished!")
print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
# Testing
test()
|
{"hexsha": "172989e43fc78f5762d4c329673e0ce4142e0579", "size": 2382, "ext": "py", "lang": "Python", "max_stars_repo_path": "Pytorch_geo/main.py", "max_stars_repo_name": "dariush-salami/gcn-gesture-recognition", "max_stars_repo_head_hexsha": "c75c29da0327c43d8601da43a20e1044e2cff139", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Pytorch_geo/main.py", "max_issues_repo_name": "dariush-salami/gcn-gesture-recognition", "max_issues_repo_head_hexsha": "c75c29da0327c43d8601da43a20e1044e2cff139", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Pytorch_geo/main.py", "max_forks_repo_name": "dariush-salami/gcn-gesture-recognition", "max_forks_repo_head_hexsha": "c75c29da0327c43d8601da43a20e1044e2cff139", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-04T06:01:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-04T06:01:40.000Z", "avg_line_length": 26.7640449438, "max_line_length": 69, "alphanum_fraction": 0.6704450042, "include": true, "reason": "import numpy", "num_tokens": 584}
|
theory OneThirdRuleDefs
imports "../HOModel"
begin
section {* Verification of the \emph{One-Third Rule} Consensus Algorithm *}
text {*
We now apply the framework introduced so far to the verification of
concrete algorithms, starting with algorithm \emph{One-Third Rule},
which is one of the simplest algorithms presented in~\cite{charron:heardof}.
Nevertheless, the algorithm has some interesting characteristics:
it ensures safety (i.e., the Integrity and Agreement) properties in the
presence of arbitrary benign faults, and if everything works perfectly,
it terminates in just two rounds. \emph{One-Third Rule} is an uncoordinated
algorithm tolerating benign faults, hence SHO or coordinator sets do not
play a role in its definition.
*}
subsection {* Model of the Algorithm *}
text {*
We begin by introducing an anonymous type of processes of finite
cardinality that will instantiate the type variable @{text "'proc"}
of the generic HO model.
*}
typedecl Proc -- {* the set of processes *}
axiomatization where Proc_finite: "OFCLASS(Proc, finite_class)"
instance Proc :: finite by (rule Proc_finite)
abbreviation
"N \<equiv> card (UNIV::Proc set)"
text {*
The state of each process consists of two fields: @{text x} holds
the current value proposed by the process and @{text decide} the
value (if any, hence the option type) it has decided.
*}
record 'val pstate =
x :: "'val"
decide :: "'val option"
text {*
The initial value of field @{text x} is unconstrained, but no decision
has been taken initially.
*}
definition OTR_initState where
"OTR_initState p st \<equiv> decide st = None"
text {*
Given a vector @{text msgs} of values (possibly null) received from
each process, @{term "HOV msgs v"} denotes the set of processes from
which value @{text v} was received.
*}
definition HOV :: "(Proc \<Rightarrow> 'val option) \<Rightarrow> 'val \<Rightarrow> Proc set" where
"HOV msgs v \<equiv> { q . msgs q = Some v }"
text {*
@{term "MFR msgs v"} (``most frequently received'') holds for
vector @{text msgs} if no value has been received more frequently
than @{text v}.
Some such value always exists, since there is only a finite set of
processes and thus a finite set of possible cardinalities of the
sets @{term "HOV msgs v"}.
*}
definition MFR :: "(Proc \<Rightarrow> 'val option) \<Rightarrow> 'val \<Rightarrow> bool" where
"MFR msgs v \<equiv> \<forall>w. card (HOV msgs w) \<le> card (HOV msgs v)"
lemma MFR_exists: "\<exists>v. MFR msgs v"
proof -
let ?cards = "{ card (HOV msgs v) | v . True }"
let ?mfr = "Max ?cards"
have "\<forall>v. card (HOV msgs v) \<le> N" by (auto intro: card_mono)
hence "?cards \<subseteq> { 0 .. N }" by auto
hence fin: "finite ?cards" by (metis atLeast0AtMost finite_atMost finite_subset)
hence "?mfr \<in> ?cards" by (rule Max_in) auto
then obtain v where v: "?mfr = card (HOV msgs v)" by auto
have "MFR msgs v"
proof (auto simp: MFR_def)
fix w
from fin have "card (HOV msgs w) \<le> ?mfr" by (rule Max_ge) auto
thus "card (HOV msgs w) \<le> card (HOV msgs v)" by (unfold v)
qed
thus ?thesis ..
qed
text {*
Also, if a process has heard from at least one other process,
the most frequently received values are among the received messages.
*}
lemma MFR_in_msgs:
assumes HO:"HOs m p \<noteq> {}"
and v: "MFR (HOrcvdMsgs OTR_M m p (HOs m p) (rho m)) v"
(is "MFR ?msgs v")
shows "\<exists>q \<in> HOs m p. v = the (?msgs q)"
proof -
from HO obtain q where q: "q \<in> HOs m p"
by auto
with v have "HOV ?msgs (the (?msgs q)) \<noteq> {}"
by (auto simp: HOV_def HOrcvdMsgs_def)
hence HOp: "0 < card (HOV ?msgs (the (?msgs q)))"
by auto
also from v have "\<dots> \<le> card (HOV ?msgs v)"
by (simp add: MFR_def)
finally have "HOV ?msgs v \<noteq> {}"
by auto
thus ?thesis
by (auto simp: HOV_def HOrcvdMsgs_def)
qed
text {*
@{term "TwoThirds msgs v"} holds if value @{text v} has been
received from more than $2/3$ of all processes.
*}
definition TwoThirds where
"TwoThirds msgs v \<equiv> (2*N) div 3 < card (HOV msgs v)"
text {*
The next-state relation of algorithm \emph{One-Third Rule} for every process
is defined as follows:
if the process has received values from more than $2/3$ of all processes,
the @{text x} field is set to the smallest among the most frequently received
values, and the process decides value $v$ if it received $v$ from more than
$2/3$ of all processes. If @{text p} hasn't heard from more than $2/3$ of
all processes, the state remains unchanged.
(Note that @{text Some} is the constructor of the option datatype, whereas
@{text "\<some>"} is Hilbert's choice operator.)
We require the type of values to be linearly ordered so that the minimum
is guaranteed to be well-defined.
*}
definition OTR_nextState where
"OTR_nextState r p (st::('val::linorder) pstate) msgs st' \<equiv>
if (2*N) div 3 < card {q. msgs q \<noteq> None}
then st' = \<lparr> x = Min {v . MFR msgs v},
decide = (if (\<exists>v. TwoThirds msgs v)
then Some (\<some>v. TwoThirds msgs v)
else decide st) \<rparr>
else st' = st"
text {*
The message sending function is very simple: at every round, every process
sends its current proposal (field @{text x} of its local state) to all
processes.
*}
definition OTR_sendMsg where
"OTR_sendMsg r p q st \<equiv> x st"
subsection {* Communication Predicate for \emph{One-Third Rule} *}
text {*
We now define the communication predicate for the \emph{One-Third Rule}
algorithm to be correct.
It requires that, infinitely often, there is a round where all processes
receive messages from the same set @{text "\<Pi>"} of processes where @{text "\<Pi>"}
contains more than two thirds of all processes.
The ``per-round'' part of the communication predicate is trivial.
*}
definition OTR_commPerRd where
"OTR_commPerRd HOrs \<equiv> True"
definition OTR_commGlobal where
"OTR_commGlobal HOs \<equiv>
\<forall>r. \<exists>r0 \<Pi>. r0 \<ge> r \<and> (\<forall>p. HOs r0 p = \<Pi>) \<and> card \<Pi> > (2*N) div 3"
subsection {* The \emph{One-Third Rule} Heard-Of Machine *}
text {*
We now define the HO machine for the \emph{One-Third Rule} algorithm
by assembling the algorithm definition and its communication-predicate.
Because this is an uncoordinated algorithm, the @{text crd} arguments
of the initial- and next-state predicates are unused.
*}
definition OTR_HOMachine where
"OTR_HOMachine =
\<lparr> CinitState = (\<lambda> p st crd. OTR_initState p st),
sendMsg = OTR_sendMsg,
CnextState = (\<lambda> r p st msgs crd st'. OTR_nextState r p st msgs st'),
HOcommPerRd = OTR_commPerRd,
HOcommGlobal = OTR_commGlobal \<rparr>"
abbreviation "OTR_M \<equiv> OTR_HOMachine::(Proc, 'val::linorder pstate, 'val) HOMachine"
end
|
{"author": "Josh-Tilles", "repo": "AFP", "sha": "f4bf1d502bde2a3469d482b62c531f1c3af3e881", "save_path": "github-repos/isabelle/Josh-Tilles-AFP", "path": "github-repos/isabelle/Josh-Tilles-AFP/AFP-f4bf1d502bde2a3469d482b62c531f1c3af3e881/thys/Heard_Of/otr/OneThirdRuleDefs.thy"}
|
function [configure, obj] = Estimate_Weight(configure, Seqs)
% initialization
configure.weight = rand(length(configure.id),1);
tau = configure.tau;
obj = zeros(configure.epoch * length(Seqs), 1);
tic
for n = 1:configure.epoch
ind = randperm(length(Seqs));
lr = configure.lr * (0.9)^(n-1);
for m = 1:length(Seqs)
X = [Seqs(ind(m)).Time; Seqs(ind(m)).Mark];
[Prob, Delta] = ChosenProbability(X, configure);
grad = 0;
for t1 = 1:size(Prob,1)-tau
for t2 = 1:size(Prob,2)-tau
if t1~=t2
obj((n-1)*length(Seqs)+m) = obj((n-1)*length(Seqs)+m) +...
Prob(t1, t2)*Prob(t1+tau, t2+tau);
grad = grad + ...
2*Prob(t1, t2)*Prob(t1+tau, t2+tau)*...
(Delta(:,t1,t2)+Delta(:,t1+tau,t2+tau));
end
end
end
configure.weight = configure.weight - lr * grad;
fprintf('epoch=%d, #seq=%d/%d, obj=%f, ||grad||=%.4f, time=%.2fsec\n',...
n, m, length(Seqs), obj((n-1)*length(Seqs)+m), norm(grad), toc);
end
end
|
{"author": "HongtengXu", "repo": "Hawkes-Process-Toolkit", "sha": "2548a41c7418b8edef3261ab4479cee4e8eaf071", "save_path": "github-repos/MATLAB/HongtengXu-Hawkes-Process-Toolkit", "path": "github-repos/MATLAB/HongtengXu-Hawkes-Process-Toolkit/Hawkes-Process-Toolkit-2548a41c7418b8edef3261ab4479cee4e8eaf071/Analysis/Estimate_Weight.m"}
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 23 07:15:09 2018
@author: Madhur Kashyap 2016EEZ8350
"""
import os
import math
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from Utils import *
def init_sns_style(style='white'):
sns.set_style('white')
def new_figure(size=None):
return plt.figure();
def save_figure(prefix,folder=None,size=None,imgfmt='jpg'):
assert size==None or len(size)==1 or len(size)==2, "Size should be tuple"
if size:
tup = size if len(size)==2 else (size,size);
plt.gcf().set_size_inches(tup[0], tup[1], forward=True)
fn = '.'.join([prefix,imgfmt]);
if folder:
create_folder(folder);
fn = os.path.join(folder,fn);
plt.savefig(fn);
def plot_keras_history(history,keydict,suptitle='',boxsize=4,
legendloc='lower center'):
'''
Accepts history dictionary object as input. Plots both training
and validation accuracy and loss curves against epochs
'''
for keytup in keydict.values():
for key in keytup:
if not key in history: raise KeyError(key);
x = list(range(len(history[key])));
nfigs = len(keydict);
ncols= 2 if nfigs>=2 else 1;
nrows = math.ceil(nfigs/2);
figsize = (nrows*boxsize,ncols*boxsize);
f, axes = plt.subplots(nrows=nrows,ncols=ncols,figsize=figsize);
f.suptitle(suptitle);
if not isinstance(axes,np.ndarray):
axes = [axes];
else:
axes = np.ndarray.flatten(axes);
titles = list(keydict.keys());
for i in range(nfigs):
ax = axes[i]; ax.set_xlabel('# Epoch');
ax.set_ylabel(titles[i]);
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(9)
for key in keydict[titles[i]]:
line, = ax.plot(x,history[key],label=key);
ax.legend();
plt.tight_layout(h_pad=0.9)
plt.show()
|
{"hexsha": "8e628f5167e31c169ac4cc191a707dda8aa8e867", "size": 2070, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/PlotUtils.py", "max_stars_repo_name": "madhurkashyap/boundary_detection", "max_stars_repo_head_hexsha": "f7fb98c8bcbc204b1fcd0eb34a8699f16a8725a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/PlotUtils.py", "max_issues_repo_name": "madhurkashyap/boundary_detection", "max_issues_repo_head_hexsha": "f7fb98c8bcbc204b1fcd0eb34a8699f16a8725a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/PlotUtils.py", "max_forks_repo_name": "madhurkashyap/boundary_detection", "max_forks_repo_head_hexsha": "f7fb98c8bcbc204b1fcd0eb34a8699f16a8725a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5714285714, "max_line_length": 78, "alphanum_fraction": 0.5942028986, "include": true, "reason": "import numpy", "num_tokens": 534}
|
[STATEMENT]
lemma lit_ord_dominating_term:
assumes "(s1,s2) \<in> trm_ord \<or> (s1,t2) \<in> trm_ord"
assumes "orient_lit x1 s1 t1 p1"
assumes "orient_lit x2 s2 t2 p2"
assumes "vars_of_lit x1 = {}"
assumes "vars_of_lit x2 = {}"
shows "(x1,x2) \<in> lit_ord"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (x1, x2) \<in> lit_ord
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (x1, x2) \<in> lit_ord
[PROOF STEP]
from \<open>vars_of_lit x1 = {}\<close> and \<open>orient_lit x1 s1 t1 p1\<close>
[PROOF STATE]
proof (chain)
picking this:
vars_of_lit x1 = {}
orient_lit x1 s1 t1 p1
[PROOF STEP]
have "vars_of t1 = {}" and "vars_of s1 = {}"
and "\<not>(s1,t1) \<in> trm_ord"
[PROOF STATE]
proof (prove)
using this:
vars_of_lit x1 = {}
orient_lit x1 s1 t1 p1
goal (1 subgoal):
1. vars_of t1 = {} &&& vars_of s1 = {} &&& (s1, t1) \<notin> trm_ord
[PROOF STEP]
unfolding orient_lit_def
[PROOF STATE]
proof (prove)
using this:
vars_of_lit x1 = {}
(x1 = Pos (Eq s1 t1) \<or> x1 = Pos (Eq t1 s1)) \<and> (s1, t1) \<notin> trm_ord \<and> p1 = pos \<or> (x1 = Neg (Eq s1 t1) \<or> x1 = Neg (Eq t1 s1)) \<and> (s1, t1) \<notin> trm_ord \<and> p1 = neg
goal (1 subgoal):
1. vars_of t1 = {} &&& vars_of s1 = {} &&& (s1, t1) \<notin> trm_ord
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
vars_of t1 = {}
vars_of s1 = {}
(s1, t1) \<notin> trm_ord
goal (1 subgoal):
1. (x1, x2) \<in> lit_ord
[PROOF STEP]
from assms(5) and \<open>orient_lit x2 s2 t2 p2\<close>
[PROOF STATE]
proof (chain)
picking this:
vars_of_lit x2 = {}
orient_lit x2 s2 t2 p2
[PROOF STEP]
have "vars_of t2 = {}" and "vars_of s2 = {}"
and "\<not>(s2,t2) \<in> trm_ord"
[PROOF STATE]
proof (prove)
using this:
vars_of_lit x2 = {}
orient_lit x2 s2 t2 p2
goal (1 subgoal):
1. vars_of t2 = {} &&& vars_of s2 = {} &&& (s2, t2) \<notin> trm_ord
[PROOF STEP]
unfolding orient_lit_def
[PROOF STATE]
proof (prove)
using this:
vars_of_lit x2 = {}
(x2 = Pos (Eq s2 t2) \<or> x2 = Pos (Eq t2 s2)) \<and> (s2, t2) \<notin> trm_ord \<and> p2 = pos \<or> (x2 = Neg (Eq s2 t2) \<or> x2 = Neg (Eq t2 s2)) \<and> (s2, t2) \<notin> trm_ord \<and> p2 = neg
goal (1 subgoal):
1. vars_of t2 = {} &&& vars_of s2 = {} &&& (s2, t2) \<notin> trm_ord
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
vars_of t2 = {}
vars_of s2 = {}
(s2, t2) \<notin> trm_ord
goal (1 subgoal):
1. (x1, x2) \<in> lit_ord
[PROOF STEP]
from \<open>vars_of t1 = {}\<close> and \<open>vars_of s1 = {}\<close> and \<open>\<not>(s1,t1) \<in> trm_ord\<close>
[PROOF STATE]
proof (chain)
picking this:
vars_of t1 = {}
vars_of s1 = {}
(s1, t1) \<notin> trm_ord
[PROOF STEP]
have o1: "t1 = s1 \<or> (t1,s1) \<in> trm_ord"
[PROOF STATE]
proof (prove)
using this:
vars_of t1 = {}
vars_of s1 = {}
(s1, t1) \<notin> trm_ord
goal (1 subgoal):
1. t1 = s1 \<or> (t1, s1) \<in> trm_ord
[PROOF STEP]
using trm_ord_ground_total
[PROOF STATE]
proof (prove)
using this:
vars_of t1 = {}
vars_of s1 = {}
(s1, t1) \<notin> trm_ord
\<forall>x y. ground_term x \<longrightarrow> ground_term y \<longrightarrow> x \<noteq> y \<longrightarrow> (x, y) \<in> trm_ord \<or> (y, x) \<in> trm_ord
goal (1 subgoal):
1. t1 = s1 \<or> (t1, s1) \<in> trm_ord
[PROOF STEP]
unfolding ground_term_def
[PROOF STATE]
proof (prove)
using this:
vars_of t1 = {}
vars_of s1 = {}
(s1, t1) \<notin> trm_ord
\<forall>x y. vars_of x = {} \<longrightarrow> vars_of y = {} \<longrightarrow> x \<noteq> y \<longrightarrow> (x, y) \<in> trm_ord \<or> (y, x) \<in> trm_ord
goal (1 subgoal):
1. t1 = s1 \<or> (t1, s1) \<in> trm_ord
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
t1 = s1 \<or> (t1, s1) \<in> trm_ord
goal (1 subgoal):
1. (x1, x2) \<in> lit_ord
[PROOF STEP]
from \<open>vars_of t2 = {}\<close> and \<open>vars_of s2 = {}\<close> and \<open>\<not>(s2,t2) \<in> trm_ord\<close>
[PROOF STATE]
proof (chain)
picking this:
vars_of t2 = {}
vars_of s2 = {}
(s2, t2) \<notin> trm_ord
[PROOF STEP]
have o2: "t2 = s2 \<or> (t2,s2) \<in> trm_ord"
[PROOF STATE]
proof (prove)
using this:
vars_of t2 = {}
vars_of s2 = {}
(s2, t2) \<notin> trm_ord
goal (1 subgoal):
1. t2 = s2 \<or> (t2, s2) \<in> trm_ord
[PROOF STEP]
using trm_ord_ground_total
[PROOF STATE]
proof (prove)
using this:
vars_of t2 = {}
vars_of s2 = {}
(s2, t2) \<notin> trm_ord
\<forall>x y. ground_term x \<longrightarrow> ground_term y \<longrightarrow> x \<noteq> y \<longrightarrow> (x, y) \<in> trm_ord \<or> (y, x) \<in> trm_ord
goal (1 subgoal):
1. t2 = s2 \<or> (t2, s2) \<in> trm_ord
[PROOF STEP]
unfolding ground_term_def
[PROOF STATE]
proof (prove)
using this:
vars_of t2 = {}
vars_of s2 = {}
(s2, t2) \<notin> trm_ord
\<forall>x y. vars_of x = {} \<longrightarrow> vars_of y = {} \<longrightarrow> x \<noteq> y \<longrightarrow> (x, y) \<in> trm_ord \<or> (y, x) \<in> trm_ord
goal (1 subgoal):
1. t2 = s2 \<or> (t2, s2) \<in> trm_ord
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
t2 = s2 \<or> (t2, s2) \<in> trm_ord
goal (1 subgoal):
1. (x1, x2) \<in> lit_ord
[PROOF STEP]
from \<open>\<not>(s2,t2) \<in> trm_ord\<close> and assms(1)
[PROOF STATE]
proof (chain)
picking this:
(s2, t2) \<notin> trm_ord
(s1, s2) \<in> trm_ord \<or> (s1, t2) \<in> trm_ord
[PROOF STEP]
have "(s1,s2) \<in> trm_ord"
[PROOF STATE]
proof (prove)
using this:
(s2, t2) \<notin> trm_ord
(s1, s2) \<in> trm_ord \<or> (s1, t2) \<in> trm_ord
goal (1 subgoal):
1. (s1, s2) \<in> trm_ord
[PROOF STEP]
by (metis assms(1) o2 trm_ord_trans transE)
[PROOF STATE]
proof (state)
this:
(s1, s2) \<in> trm_ord
goal (1 subgoal):
1. (x1, x2) \<in> lit_ord
[PROOF STEP]
let ?m1 = "mset_lit x1"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (x1, x2) \<in> lit_ord
[PROOF STEP]
let ?m2 = "mset_lit x2"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (x1, x2) \<in> lit_ord
[PROOF STEP]
from assms(1) and o1 and o2
[PROOF STATE]
proof (chain)
picking this:
(s1, s2) \<in> trm_ord \<or> (s1, t2) \<in> trm_ord
t1 = s1 \<or> (t1, s1) \<in> trm_ord
t2 = s2 \<or> (t2, s2) \<in> trm_ord
[PROOF STEP]
have "(t1,s2) \<in> trm_ord"
[PROOF STATE]
proof (prove)
using this:
(s1, s2) \<in> trm_ord \<or> (s1, t2) \<in> trm_ord
t1 = s1 \<or> (t1, s1) \<in> trm_ord
t2 = s2 \<or> (t2, s2) \<in> trm_ord
goal (1 subgoal):
1. (t1, s2) \<in> trm_ord
[PROOF STEP]
using trm_ord_trans
trans_def
[PROOF STATE]
proof (prove)
using this:
(s1, s2) \<in> trm_ord \<or> (s1, t2) \<in> trm_ord
t1 = s1 \<or> (t1, s1) \<in> trm_ord
t2 = s2 \<or> (t2, s2) \<in> trm_ord
trans trm_ord
trans ?r = (\<forall>x y z. (x, y) \<in> ?r \<longrightarrow> (y, z) \<in> ?r \<longrightarrow> (x, z) \<in> ?r)
goal (1 subgoal):
1. (t1, s2) \<in> trm_ord
[PROOF STEP]
by metis
[PROOF STATE]
proof (state)
this:
(t1, s2) \<in> trm_ord
goal (1 subgoal):
1. (x1, x2) \<in> lit_ord
[PROOF STEP]
from this and \<open>(s1,s2) \<in> trm_ord\<close>
[PROOF STATE]
proof (chain)
picking this:
(t1, s2) \<in> trm_ord
(s1, s2) \<in> trm_ord
[PROOF STEP]
have
s2max: "\<forall>x. (x \<in># {# t1,t1,s1,s1 #} \<longrightarrow> (x,s2) \<in> trm_ord)"
[PROOF STATE]
proof (prove)
using this:
(t1, s2) \<in> trm_ord
(s1, s2) \<in> trm_ord
goal (1 subgoal):
1. \<forall>x. x \<in># {#t1, t1, s1, s1#} \<longrightarrow> (x, s2) \<in> trm_ord
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<forall>x. x \<in># {#t1, t1, s1, s1#} \<longrightarrow> (x, s2) \<in> trm_ord
goal (1 subgoal):
1. (x1, x2) \<in> lit_ord
[PROOF STEP]
have "{# s2 #} \<subset># {# t2,t2,s2,s2 #}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {#s2#} \<subset># {#t2, t2, s2, s2#}
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
{#s2#} \<subset># {#t2, t2, s2, s2#}
goal (1 subgoal):
1. (x1, x2) \<in> lit_ord
[PROOF STEP]
from \<open>{# s2 #} \<subset># {# t2,t2,s2,s2 #}\<close>
[PROOF STATE]
proof (chain)
picking this:
{#s2#} \<subset># {#t2, t2, s2, s2#}
[PROOF STEP]
have "( {# s2 #}, {# t2,t2,s2,s2 #} ) \<in> mult trm_ord"
[PROOF STATE]
proof (prove)
using this:
{#s2#} \<subset># {#t2, t2, s2, s2#}
goal (1 subgoal):
1. ({#s2#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
[PROOF STEP]
using trm_ord_trans multiset_order_inclusion [of "{# s2 #}" "{# t2,t2,s2,s2 #}" "trm_ord"]
[PROOF STATE]
proof (prove)
using this:
{#s2#} \<subset># {#t2, t2, s2, s2#}
trans trm_ord
\<lbrakk>{#s2#} \<subset># {#t2, t2, s2, s2#}; trans trm_ord\<rbrakk> \<Longrightarrow> ({#s2#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
goal (1 subgoal):
1. ({#s2#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
({#s2#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
goal (1 subgoal):
1. (x1, x2) \<in> lit_ord
[PROOF STEP]
have "p1 = neg \<or> p1 = pos"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. p1 = neg \<or> p1 = pos
[PROOF STEP]
using sign.exhaust
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?y = pos \<Longrightarrow> ?P; ?y = neg \<Longrightarrow> ?P\<rbrakk> \<Longrightarrow> ?P
goal (1 subgoal):
1. p1 = neg \<or> p1 = pos
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
p1 = neg \<or> p1 = pos
goal (1 subgoal):
1. (x1, x2) \<in> lit_ord
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
p1 = neg \<or> p1 = pos
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
p1 = neg \<or> p1 = pos
goal (1 subgoal):
1. (x1, x2) \<in> lit_ord
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. p1 = neg \<Longrightarrow> (x1, x2) \<in> lit_ord
2. p1 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
assume "p1 = neg"
[PROOF STATE]
proof (state)
this:
p1 = neg
goal (2 subgoals):
1. p1 = neg \<Longrightarrow> (x1, x2) \<in> lit_ord
2. p1 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
from this and \<open>orient_lit x1 s1 t1 p1\<close>
[PROOF STATE]
proof (chain)
picking this:
p1 = neg
orient_lit x1 s1 t1 p1
[PROOF STEP]
have "x1 = (Neg (Eq t1 s1)) \<or> x1 = (Neg (Eq s1 t1))"
[PROOF STATE]
proof (prove)
using this:
p1 = neg
orient_lit x1 s1 t1 p1
goal (1 subgoal):
1. x1 = Neg (Eq t1 s1) \<or> x1 = Neg (Eq s1 t1)
[PROOF STEP]
using orient_lit_def
[PROOF STATE]
proof (prove)
using this:
p1 = neg
orient_lit x1 s1 t1 p1
orient_lit ?L ?u ?v ?s = ((?L = Pos (Eq ?u ?v) \<or> ?L = Pos (Eq ?v ?u)) \<and> (?u, ?v) \<notin> trm_ord \<and> ?s = pos \<or> (?L = Neg (Eq ?u ?v) \<or> ?L = Neg (Eq ?v ?u)) \<and> (?u, ?v) \<notin> trm_ord \<and> ?s = neg)
goal (1 subgoal):
1. x1 = Neg (Eq t1 s1) \<or> x1 = Neg (Eq s1 t1)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
x1 = Neg (Eq t1 s1) \<or> x1 = Neg (Eq s1 t1)
goal (2 subgoals):
1. p1 = neg \<Longrightarrow> (x1, x2) \<in> lit_ord
2. p1 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
from this
[PROOF STATE]
proof (chain)
picking this:
x1 = Neg (Eq t1 s1) \<or> x1 = Neg (Eq s1 t1)
[PROOF STEP]
have m1: "?m1 = {# t1,t1,s1,s1 #}"
[PROOF STATE]
proof (prove)
using this:
x1 = Neg (Eq t1 s1) \<or> x1 = Neg (Eq s1 t1)
goal (1 subgoal):
1. mset_lit x1 = {#t1, t1, s1, s1#}
[PROOF STEP]
using mset_lit.simps
[PROOF STATE]
proof (prove)
using this:
x1 = Neg (Eq t1 s1) \<or> x1 = Neg (Eq s1 t1)
mset_lit (Pos (Eq ?t ?s)) = {#?t, ?s#}
mset_lit (Neg (Eq ?t ?s)) = {#?t, ?t, ?s, ?s#}
goal (1 subgoal):
1. mset_lit x1 = {#t1, t1, s1, s1#}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
mset_lit x1 = {#t1, t1, s1, s1#}
goal (2 subgoals):
1. p1 = neg \<Longrightarrow> (x1, x2) \<in> lit_ord
2. p1 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
have "p2 = neg \<or> p2 = pos"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. p2 = neg \<or> p2 = pos
[PROOF STEP]
using sign.exhaust
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?y = pos \<Longrightarrow> ?P; ?y = neg \<Longrightarrow> ?P\<rbrakk> \<Longrightarrow> ?P
goal (1 subgoal):
1. p2 = neg \<or> p2 = pos
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
p2 = neg \<or> p2 = pos
goal (2 subgoals):
1. p1 = neg \<Longrightarrow> (x1, x2) \<in> lit_ord
2. p1 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
p2 = neg \<or> p2 = pos
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
p2 = neg \<or> p2 = pos
goal (1 subgoal):
1. (x1, x2) \<in> lit_ord
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. p2 = neg \<Longrightarrow> (x1, x2) \<in> lit_ord
2. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
assume "p2 = neg"
[PROOF STATE]
proof (state)
this:
p2 = neg
goal (2 subgoals):
1. p2 = neg \<Longrightarrow> (x1, x2) \<in> lit_ord
2. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
from this and \<open>orient_lit x2 s2 t2 p2\<close>
[PROOF STATE]
proof (chain)
picking this:
p2 = neg
orient_lit x2 s2 t2 p2
[PROOF STEP]
have "x2 = (Neg (Eq t2 s2)) \<or> x2 = (Neg (Eq s2 t2))"
[PROOF STATE]
proof (prove)
using this:
p2 = neg
orient_lit x2 s2 t2 p2
goal (1 subgoal):
1. x2 = Neg (Eq t2 s2) \<or> x2 = Neg (Eq s2 t2)
[PROOF STEP]
using orient_lit_def
[PROOF STATE]
proof (prove)
using this:
p2 = neg
orient_lit x2 s2 t2 p2
orient_lit ?L ?u ?v ?s = ((?L = Pos (Eq ?u ?v) \<or> ?L = Pos (Eq ?v ?u)) \<and> (?u, ?v) \<notin> trm_ord \<and> ?s = pos \<or> (?L = Neg (Eq ?u ?v) \<or> ?L = Neg (Eq ?v ?u)) \<and> (?u, ?v) \<notin> trm_ord \<and> ?s = neg)
goal (1 subgoal):
1. x2 = Neg (Eq t2 s2) \<or> x2 = Neg (Eq s2 t2)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
x2 = Neg (Eq t2 s2) \<or> x2 = Neg (Eq s2 t2)
goal (2 subgoals):
1. p2 = neg \<Longrightarrow> (x1, x2) \<in> lit_ord
2. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
from this
[PROOF STATE]
proof (chain)
picking this:
x2 = Neg (Eq t2 s2) \<or> x2 = Neg (Eq s2 t2)
[PROOF STEP]
have m2: "?m2 = {# t2,t2,s2,s2 #}"
[PROOF STATE]
proof (prove)
using this:
x2 = Neg (Eq t2 s2) \<or> x2 = Neg (Eq s2 t2)
goal (1 subgoal):
1. mset_lit x2 = {#t2, t2, s2, s2#}
[PROOF STEP]
using mset_lit.simps
[PROOF STATE]
proof (prove)
using this:
x2 = Neg (Eq t2 s2) \<or> x2 = Neg (Eq s2 t2)
mset_lit (Pos (Eq ?t ?s)) = {#?t, ?s#}
mset_lit (Neg (Eq ?t ?s)) = {#?t, ?t, ?s, ?s#}
goal (1 subgoal):
1. mset_lit x2 = {#t2, t2, s2, s2#}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
mset_lit x2 = {#t2, t2, s2, s2#}
goal (2 subgoals):
1. p2 = neg \<Longrightarrow> (x1, x2) \<in> lit_ord
2. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
from s2max
[PROOF STATE]
proof (chain)
picking this:
\<forall>x. x \<in># {#t1, t1, s1, s1#} \<longrightarrow> (x, s2) \<in> trm_ord
[PROOF STEP]
have "({# t1,t1,s1,s1 #}, {# s2 #}) \<in> mult trm_ord"
[PROOF STATE]
proof (prove)
using this:
\<forall>x. x \<in># {#t1, t1, s1, s1#} \<longrightarrow> (x, s2) \<in> trm_ord
goal (1 subgoal):
1. ({#t1, t1, s1, s1#}, {#s2#}) \<in> mult trm_ord
[PROOF STEP]
using mult1_def_lemma [of "{# s2 #}" "{#}" s2 "{# t1,t1,s1,s1 #}" "{# t1,t1,s1,s1 #}" trm_ord]
mult_def
[PROOF STATE]
proof (prove)
using this:
\<forall>x. x \<in># {#t1, t1, s1, s1#} \<longrightarrow> (x, s2) \<in> trm_ord
{#s2#} = {#} + {#s2#} \<and> {#t1, t1, s1, s1#} = {#} + {#t1, t1, s1, s1#} \<and> (\<forall>b. b \<in># {#t1, t1, s1, s1#} \<longrightarrow> (b, s2) \<in> trm_ord) \<Longrightarrow> ({#t1, t1, s1, s1#}, {#s2#}) \<in> mult1 trm_ord
mult ?r = (mult1 ?r)\<^sup>+
goal (1 subgoal):
1. ({#t1, t1, s1, s1#}, {#s2#}) \<in> mult trm_ord
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
({#t1, t1, s1, s1#}, {#s2#}) \<in> mult trm_ord
goal (2 subgoals):
1. p2 = neg \<Longrightarrow> (x1, x2) \<in> lit_ord
2. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
from \<open>( {# s2 #}, {# t2,t2,s2,s2 #} ) \<in> mult trm_ord\<close> and \<open>({# t1,t1,s1,s1 #}, {# s2 #}) \<in> mult trm_ord\<close>
[PROOF STATE]
proof (chain)
picking this:
({#s2#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
({#t1, t1, s1, s1#}, {#s2#}) \<in> mult trm_ord
[PROOF STEP]
have "( {# t1,t1,s1,s1 #}, {# t2,t2,s2,s2 #} ) \<in> mult trm_ord"
[PROOF STATE]
proof (prove)
using this:
({#s2#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
({#t1, t1, s1, s1#}, {#s2#}) \<in> mult trm_ord
goal (1 subgoal):
1. ({#t1, t1, s1, s1#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
[PROOF STEP]
using mult_trm_ord_trans
[PROOF STATE]
proof (prove)
using this:
({#s2#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
({#t1, t1, s1, s1#}, {#s2#}) \<in> mult trm_ord
trans (mult trm_ord)
goal (1 subgoal):
1. ({#t1, t1, s1, s1#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
[PROOF STEP]
unfolding trans_def
[PROOF STATE]
proof (prove)
using this:
({#s2#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
({#t1, t1, s1, s1#}, {#s2#}) \<in> mult trm_ord
\<forall>x y z. (x, y) \<in> mult trm_ord \<longrightarrow> (y, z) \<in> mult trm_ord \<longrightarrow> (x, z) \<in> mult trm_ord
goal (1 subgoal):
1. ({#t1, t1, s1, s1#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
({#t1, t1, s1, s1#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
goal (2 subgoals):
1. p2 = neg \<Longrightarrow> (x1, x2) \<in> lit_ord
2. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
from this and m1 and m2
[PROOF STATE]
proof (chain)
picking this:
({#t1, t1, s1, s1#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
mset_lit x1 = {#t1, t1, s1, s1#}
mset_lit x2 = {#t2, t2, s2, s2#}
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
({#t1, t1, s1, s1#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
mset_lit x1 = {#t1, t1, s1, s1#}
mset_lit x2 = {#t2, t2, s2, s2#}
goal (1 subgoal):
1. (x1, x2) \<in> lit_ord
[PROOF STEP]
using lit_ord_def
[PROOF STATE]
proof (prove)
using this:
({#t1, t1, s1, s1#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
mset_lit x1 = {#t1, t1, s1, s1#}
mset_lit x2 = {#t2, t2, s2, s2#}
lit_ord = {(x, y). (mset_lit x, mset_lit y) \<in> mult trm_ord}
goal (1 subgoal):
1. (x1, x2) \<in> lit_ord
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(x1, x2) \<in> lit_ord
goal (1 subgoal):
1. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
assume "p2 = pos"
[PROOF STATE]
proof (state)
this:
p2 = pos
goal (1 subgoal):
1. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
from this and \<open>orient_lit x2 s2 t2 p2\<close>
[PROOF STATE]
proof (chain)
picking this:
p2 = pos
orient_lit x2 s2 t2 p2
[PROOF STEP]
have "x2 = (Pos (Eq t2 s2)) \<or> x2 = (Pos (Eq s2 t2))"
[PROOF STATE]
proof (prove)
using this:
p2 = pos
orient_lit x2 s2 t2 p2
goal (1 subgoal):
1. x2 = Pos (Eq t2 s2) \<or> x2 = Pos (Eq s2 t2)
[PROOF STEP]
using orient_lit_def
[PROOF STATE]
proof (prove)
using this:
p2 = pos
orient_lit x2 s2 t2 p2
orient_lit ?L ?u ?v ?s = ((?L = Pos (Eq ?u ?v) \<or> ?L = Pos (Eq ?v ?u)) \<and> (?u, ?v) \<notin> trm_ord \<and> ?s = pos \<or> (?L = Neg (Eq ?u ?v) \<or> ?L = Neg (Eq ?v ?u)) \<and> (?u, ?v) \<notin> trm_ord \<and> ?s = neg)
goal (1 subgoal):
1. x2 = Pos (Eq t2 s2) \<or> x2 = Pos (Eq s2 t2)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
x2 = Pos (Eq t2 s2) \<or> x2 = Pos (Eq s2 t2)
goal (1 subgoal):
1. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
from this
[PROOF STATE]
proof (chain)
picking this:
x2 = Pos (Eq t2 s2) \<or> x2 = Pos (Eq s2 t2)
[PROOF STEP]
have m2: "?m2 = {# t2,s2 #}"
[PROOF STATE]
proof (prove)
using this:
x2 = Pos (Eq t2 s2) \<or> x2 = Pos (Eq s2 t2)
goal (1 subgoal):
1. mset_lit x2 = {#t2, s2#}
[PROOF STEP]
using mset_lit.simps
[PROOF STATE]
proof (prove)
using this:
x2 = Pos (Eq t2 s2) \<or> x2 = Pos (Eq s2 t2)
mset_lit (Pos (Eq ?t ?s)) = {#?t, ?s#}
mset_lit (Neg (Eq ?t ?s)) = {#?t, ?t, ?s, ?s#}
goal (1 subgoal):
1. mset_lit x2 = {#t2, s2#}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
mset_lit x2 = {#t2, s2#}
goal (1 subgoal):
1. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
from s2max
[PROOF STATE]
proof (chain)
picking this:
\<forall>x. x \<in># {#t1, t1, s1, s1#} \<longrightarrow> (x, s2) \<in> trm_ord
[PROOF STEP]
have "({# t1,t1,s1,s1 #}, {# s2 #}) \<in> mult trm_ord"
[PROOF STATE]
proof (prove)
using this:
\<forall>x. x \<in># {#t1, t1, s1, s1#} \<longrightarrow> (x, s2) \<in> trm_ord
goal (1 subgoal):
1. ({#t1, t1, s1, s1#}, {#s2#}) \<in> mult trm_ord
[PROOF STEP]
using mult1_def_lemma [of "{# s2 #}" "{#}" s2 "{# t1,t1,s1,s1 #}" "{# t1,t1,s1,s1 #}" trm_ord]
mult_def
[PROOF STATE]
proof (prove)
using this:
\<forall>x. x \<in># {#t1, t1, s1, s1#} \<longrightarrow> (x, s2) \<in> trm_ord
{#s2#} = {#} + {#s2#} \<and> {#t1, t1, s1, s1#} = {#} + {#t1, t1, s1, s1#} \<and> (\<forall>b. b \<in># {#t1, t1, s1, s1#} \<longrightarrow> (b, s2) \<in> trm_ord) \<Longrightarrow> ({#t1, t1, s1, s1#}, {#s2#}) \<in> mult1 trm_ord
mult ?r = (mult1 ?r)\<^sup>+
goal (1 subgoal):
1. ({#t1, t1, s1, s1#}, {#s2#}) \<in> mult trm_ord
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
({#t1, t1, s1, s1#}, {#s2#}) \<in> mult trm_ord
goal (1 subgoal):
1. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
from this and \<open>( {# s2 #}, {# t2,t2,s2,s2 #} ) \<in> mult trm_ord\<close>
[PROOF STATE]
proof (chain)
picking this:
({#t1, t1, s1, s1#}, {#s2#}) \<in> mult trm_ord
({#s2#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
[PROOF STEP]
have "({# t1,t1,s1,s1 #}, {# t2,s2 #}) \<in> mult trm_ord"
[PROOF STATE]
proof (prove)
using this:
({#t1, t1, s1, s1#}, {#s2#}) \<in> mult trm_ord
({#s2#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
goal (1 subgoal):
1. ({#t1, t1, s1, s1#}, {#t2, s2#}) \<in> mult trm_ord
[PROOF STEP]
using mset_ordering_add1 [of "{# t1,t1,s1,s1 #}" " {# s2 #}" trm_ord t2]
[PROOF STATE]
proof (prove)
using this:
({#t1, t1, s1, s1#}, {#s2#}) \<in> mult trm_ord
({#s2#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
({#t1, t1, s1, s1#}, {#s2#}) \<in> mult trm_ord \<Longrightarrow> ({#t1, t1, s1, s1#}, {#s2#} + {#t2#}) \<in> mult trm_ord
goal (1 subgoal):
1. ({#t1, t1, s1, s1#}, {#t2, s2#}) \<in> mult trm_ord
[PROOF STEP]
by (auto)
[PROOF STATE]
proof (state)
this:
({#t1, t1, s1, s1#}, {#t2, s2#}) \<in> mult trm_ord
goal (1 subgoal):
1. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
from this and m1 and m2
[PROOF STATE]
proof (chain)
picking this:
({#t1, t1, s1, s1#}, {#t2, s2#}) \<in> mult trm_ord
mset_lit x1 = {#t1, t1, s1, s1#}
mset_lit x2 = {#t2, s2#}
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
({#t1, t1, s1, s1#}, {#t2, s2#}) \<in> mult trm_ord
mset_lit x1 = {#t1, t1, s1, s1#}
mset_lit x2 = {#t2, s2#}
goal (1 subgoal):
1. (x1, x2) \<in> lit_ord
[PROOF STEP]
using lit_ord_def
[PROOF STATE]
proof (prove)
using this:
({#t1, t1, s1, s1#}, {#t2, s2#}) \<in> mult trm_ord
mset_lit x1 = {#t1, t1, s1, s1#}
mset_lit x2 = {#t2, s2#}
lit_ord = {(x, y). (mset_lit x, mset_lit y) \<in> mult trm_ord}
goal (1 subgoal):
1. (x1, x2) \<in> lit_ord
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(x1, x2) \<in> lit_ord
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(x1, x2) \<in> lit_ord
goal (1 subgoal):
1. p1 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. p1 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
assume "p1 = pos"
[PROOF STATE]
proof (state)
this:
p1 = pos
goal (1 subgoal):
1. p1 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
from this and \<open>orient_lit x1 s1 t1 p1\<close>
[PROOF STATE]
proof (chain)
picking this:
p1 = pos
orient_lit x1 s1 t1 p1
[PROOF STEP]
have "x1 = (Pos (Eq t1 s1)) \<or> x1 = (Pos (Eq s1 t1))"
[PROOF STATE]
proof (prove)
using this:
p1 = pos
orient_lit x1 s1 t1 p1
goal (1 subgoal):
1. x1 = Pos (Eq t1 s1) \<or> x1 = Pos (Eq s1 t1)
[PROOF STEP]
using orient_lit_def
[PROOF STATE]
proof (prove)
using this:
p1 = pos
orient_lit x1 s1 t1 p1
orient_lit ?L ?u ?v ?s = ((?L = Pos (Eq ?u ?v) \<or> ?L = Pos (Eq ?v ?u)) \<and> (?u, ?v) \<notin> trm_ord \<and> ?s = pos \<or> (?L = Neg (Eq ?u ?v) \<or> ?L = Neg (Eq ?v ?u)) \<and> (?u, ?v) \<notin> trm_ord \<and> ?s = neg)
goal (1 subgoal):
1. x1 = Pos (Eq t1 s1) \<or> x1 = Pos (Eq s1 t1)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
x1 = Pos (Eq t1 s1) \<or> x1 = Pos (Eq s1 t1)
goal (1 subgoal):
1. p1 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
from this
[PROOF STATE]
proof (chain)
picking this:
x1 = Pos (Eq t1 s1) \<or> x1 = Pos (Eq s1 t1)
[PROOF STEP]
have m1: "?m1 = {# t1,s1 #}"
[PROOF STATE]
proof (prove)
using this:
x1 = Pos (Eq t1 s1) \<or> x1 = Pos (Eq s1 t1)
goal (1 subgoal):
1. mset_lit x1 = {#t1, s1#}
[PROOF STEP]
using mset_lit.simps
[PROOF STATE]
proof (prove)
using this:
x1 = Pos (Eq t1 s1) \<or> x1 = Pos (Eq s1 t1)
mset_lit (Pos (Eq ?t ?s)) = {#?t, ?s#}
mset_lit (Neg (Eq ?t ?s)) = {#?t, ?t, ?s, ?s#}
goal (1 subgoal):
1. mset_lit x1 = {#t1, s1#}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
mset_lit x1 = {#t1, s1#}
goal (1 subgoal):
1. p1 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
have "p2 = neg \<or> p2 = pos"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. p2 = neg \<or> p2 = pos
[PROOF STEP]
using sign.exhaust
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?y = pos \<Longrightarrow> ?P; ?y = neg \<Longrightarrow> ?P\<rbrakk> \<Longrightarrow> ?P
goal (1 subgoal):
1. p2 = neg \<or> p2 = pos
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
p2 = neg \<or> p2 = pos
goal (1 subgoal):
1. p1 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
p2 = neg \<or> p2 = pos
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
p2 = neg \<or> p2 = pos
goal (1 subgoal):
1. (x1, x2) \<in> lit_ord
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. p2 = neg \<Longrightarrow> (x1, x2) \<in> lit_ord
2. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
assume "p2 = neg"
[PROOF STATE]
proof (state)
this:
p2 = neg
goal (2 subgoals):
1. p2 = neg \<Longrightarrow> (x1, x2) \<in> lit_ord
2. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
from this and \<open>orient_lit x2 s2 t2 p2\<close>
[PROOF STATE]
proof (chain)
picking this:
p2 = neg
orient_lit x2 s2 t2 p2
[PROOF STEP]
have "x2 = (Neg (Eq t2 s2)) \<or> x2 = (Neg (Eq s2 t2))"
[PROOF STATE]
proof (prove)
using this:
p2 = neg
orient_lit x2 s2 t2 p2
goal (1 subgoal):
1. x2 = Neg (Eq t2 s2) \<or> x2 = Neg (Eq s2 t2)
[PROOF STEP]
using orient_lit_def
[PROOF STATE]
proof (prove)
using this:
p2 = neg
orient_lit x2 s2 t2 p2
orient_lit ?L ?u ?v ?s = ((?L = Pos (Eq ?u ?v) \<or> ?L = Pos (Eq ?v ?u)) \<and> (?u, ?v) \<notin> trm_ord \<and> ?s = pos \<or> (?L = Neg (Eq ?u ?v) \<or> ?L = Neg (Eq ?v ?u)) \<and> (?u, ?v) \<notin> trm_ord \<and> ?s = neg)
goal (1 subgoal):
1. x2 = Neg (Eq t2 s2) \<or> x2 = Neg (Eq s2 t2)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
x2 = Neg (Eq t2 s2) \<or> x2 = Neg (Eq s2 t2)
goal (2 subgoals):
1. p2 = neg \<Longrightarrow> (x1, x2) \<in> lit_ord
2. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
from this
[PROOF STATE]
proof (chain)
picking this:
x2 = Neg (Eq t2 s2) \<or> x2 = Neg (Eq s2 t2)
[PROOF STEP]
have m2: "?m2 = {# t2,t2,s2,s2 #}"
[PROOF STATE]
proof (prove)
using this:
x2 = Neg (Eq t2 s2) \<or> x2 = Neg (Eq s2 t2)
goal (1 subgoal):
1. mset_lit x2 = {#t2, t2, s2, s2#}
[PROOF STEP]
using mset_lit.simps
[PROOF STATE]
proof (prove)
using this:
x2 = Neg (Eq t2 s2) \<or> x2 = Neg (Eq s2 t2)
mset_lit (Pos (Eq ?t ?s)) = {#?t, ?s#}
mset_lit (Neg (Eq ?t ?s)) = {#?t, ?t, ?s, ?s#}
goal (1 subgoal):
1. mset_lit x2 = {#t2, t2, s2, s2#}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
mset_lit x2 = {#t2, t2, s2, s2#}
goal (2 subgoals):
1. p2 = neg \<Longrightarrow> (x1, x2) \<in> lit_ord
2. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
from s2max
[PROOF STATE]
proof (chain)
picking this:
\<forall>x. x \<in># {#t1, t1, s1, s1#} \<longrightarrow> (x, s2) \<in> trm_ord
[PROOF STEP]
have "({# t1,s1 #}, {# s2 #}) \<in> mult trm_ord"
[PROOF STATE]
proof (prove)
using this:
\<forall>x. x \<in># {#t1, t1, s1, s1#} \<longrightarrow> (x, s2) \<in> trm_ord
goal (1 subgoal):
1. ({#t1, s1#}, {#s2#}) \<in> mult trm_ord
[PROOF STEP]
using mult1_def_lemma [of "{# s2 #}" "{#}" s2 "{# t1,s1 #}" "{# t1,s1 #}" trm_ord]
mult_def
[PROOF STATE]
proof (prove)
using this:
\<forall>x. x \<in># {#t1, t1, s1, s1#} \<longrightarrow> (x, s2) \<in> trm_ord
{#s2#} = {#} + {#s2#} \<and> {#t1, s1#} = {#} + {#t1, s1#} \<and> (\<forall>b. b \<in># {#t1, s1#} \<longrightarrow> (b, s2) \<in> trm_ord) \<Longrightarrow> ({#t1, s1#}, {#s2#}) \<in> mult1 trm_ord
mult ?r = (mult1 ?r)\<^sup>+
goal (1 subgoal):
1. ({#t1, s1#}, {#s2#}) \<in> mult trm_ord
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
({#t1, s1#}, {#s2#}) \<in> mult trm_ord
goal (2 subgoals):
1. p2 = neg \<Longrightarrow> (x1, x2) \<in> lit_ord
2. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
from \<open>( {# s2 #}, {# t2,t2,s2,s2 #} ) \<in> mult trm_ord\<close> and \<open>({# t1,s1 #}, {# s2 #}) \<in> mult trm_ord\<close>
[PROOF STATE]
proof (chain)
picking this:
({#s2#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
({#t1, s1#}, {#s2#}) \<in> mult trm_ord
[PROOF STEP]
have "( {# t1,s1 #}, {# t2,t2,s2,s2 #} ) \<in> mult trm_ord"
[PROOF STATE]
proof (prove)
using this:
({#s2#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
({#t1, s1#}, {#s2#}) \<in> mult trm_ord
goal (1 subgoal):
1. ({#t1, s1#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
[PROOF STEP]
using mult_trm_ord_trans
[PROOF STATE]
proof (prove)
using this:
({#s2#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
({#t1, s1#}, {#s2#}) \<in> mult trm_ord
trans (mult trm_ord)
goal (1 subgoal):
1. ({#t1, s1#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
[PROOF STEP]
unfolding trans_def
[PROOF STATE]
proof (prove)
using this:
({#s2#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
({#t1, s1#}, {#s2#}) \<in> mult trm_ord
\<forall>x y z. (x, y) \<in> mult trm_ord \<longrightarrow> (y, z) \<in> mult trm_ord \<longrightarrow> (x, z) \<in> mult trm_ord
goal (1 subgoal):
1. ({#t1, s1#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
({#t1, s1#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
goal (2 subgoals):
1. p2 = neg \<Longrightarrow> (x1, x2) \<in> lit_ord
2. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
from this and m1 and m2
[PROOF STATE]
proof (chain)
picking this:
({#t1, s1#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
mset_lit x1 = {#t1, s1#}
mset_lit x2 = {#t2, t2, s2, s2#}
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
({#t1, s1#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
mset_lit x1 = {#t1, s1#}
mset_lit x2 = {#t2, t2, s2, s2#}
goal (1 subgoal):
1. (x1, x2) \<in> lit_ord
[PROOF STEP]
using lit_ord_def
[PROOF STATE]
proof (prove)
using this:
({#t1, s1#}, {#t2, t2, s2, s2#}) \<in> mult trm_ord
mset_lit x1 = {#t1, s1#}
mset_lit x2 = {#t2, t2, s2, s2#}
lit_ord = {(x, y). (mset_lit x, mset_lit y) \<in> mult trm_ord}
goal (1 subgoal):
1. (x1, x2) \<in> lit_ord
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(x1, x2) \<in> lit_ord
goal (1 subgoal):
1. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
assume "p2 = pos"
[PROOF STATE]
proof (state)
this:
p2 = pos
goal (1 subgoal):
1. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
from this and \<open>orient_lit x2 s2 t2 p2\<close>
[PROOF STATE]
proof (chain)
picking this:
p2 = pos
orient_lit x2 s2 t2 p2
[PROOF STEP]
have "x2 = (Pos (Eq t2 s2)) \<or> x2 = (Pos (Eq s2 t2))"
[PROOF STATE]
proof (prove)
using this:
p2 = pos
orient_lit x2 s2 t2 p2
goal (1 subgoal):
1. x2 = Pos (Eq t2 s2) \<or> x2 = Pos (Eq s2 t2)
[PROOF STEP]
using orient_lit_def
[PROOF STATE]
proof (prove)
using this:
p2 = pos
orient_lit x2 s2 t2 p2
orient_lit ?L ?u ?v ?s = ((?L = Pos (Eq ?u ?v) \<or> ?L = Pos (Eq ?v ?u)) \<and> (?u, ?v) \<notin> trm_ord \<and> ?s = pos \<or> (?L = Neg (Eq ?u ?v) \<or> ?L = Neg (Eq ?v ?u)) \<and> (?u, ?v) \<notin> trm_ord \<and> ?s = neg)
goal (1 subgoal):
1. x2 = Pos (Eq t2 s2) \<or> x2 = Pos (Eq s2 t2)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
x2 = Pos (Eq t2 s2) \<or> x2 = Pos (Eq s2 t2)
goal (1 subgoal):
1. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
from this
[PROOF STATE]
proof (chain)
picking this:
x2 = Pos (Eq t2 s2) \<or> x2 = Pos (Eq s2 t2)
[PROOF STEP]
have m2: "?m2 = {# t2,s2 #}"
[PROOF STATE]
proof (prove)
using this:
x2 = Pos (Eq t2 s2) \<or> x2 = Pos (Eq s2 t2)
goal (1 subgoal):
1. mset_lit x2 = {#t2, s2#}
[PROOF STEP]
using mset_lit.simps
[PROOF STATE]
proof (prove)
using this:
x2 = Pos (Eq t2 s2) \<or> x2 = Pos (Eq s2 t2)
mset_lit (Pos (Eq ?t ?s)) = {#?t, ?s#}
mset_lit (Neg (Eq ?t ?s)) = {#?t, ?t, ?s, ?s#}
goal (1 subgoal):
1. mset_lit x2 = {#t2, s2#}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
mset_lit x2 = {#t2, s2#}
goal (1 subgoal):
1. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
from s2max
[PROOF STATE]
proof (chain)
picking this:
\<forall>x. x \<in># {#t1, t1, s1, s1#} \<longrightarrow> (x, s2) \<in> trm_ord
[PROOF STEP]
have "({# t1,s1 #}, {# s2 #}) \<in> mult trm_ord"
[PROOF STATE]
proof (prove)
using this:
\<forall>x. x \<in># {#t1, t1, s1, s1#} \<longrightarrow> (x, s2) \<in> trm_ord
goal (1 subgoal):
1. ({#t1, s1#}, {#s2#}) \<in> mult trm_ord
[PROOF STEP]
using mult1_def_lemma [of "{# s2 #}" "{#}" s2 "{# t1,s1 #}" "{# t1,s1 #}" trm_ord]
mult_def
[PROOF STATE]
proof (prove)
using this:
\<forall>x. x \<in># {#t1, t1, s1, s1#} \<longrightarrow> (x, s2) \<in> trm_ord
{#s2#} = {#} + {#s2#} \<and> {#t1, s1#} = {#} + {#t1, s1#} \<and> (\<forall>b. b \<in># {#t1, s1#} \<longrightarrow> (b, s2) \<in> trm_ord) \<Longrightarrow> ({#t1, s1#}, {#s2#}) \<in> mult1 trm_ord
mult ?r = (mult1 ?r)\<^sup>+
goal (1 subgoal):
1. ({#t1, s1#}, {#s2#}) \<in> mult trm_ord
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
({#t1, s1#}, {#s2#}) \<in> mult trm_ord
goal (1 subgoal):
1. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
from this
[PROOF STATE]
proof (chain)
picking this:
({#t1, s1#}, {#s2#}) \<in> mult trm_ord
[PROOF STEP]
have "({# t1,s1 #}, {# t2,s2 #}) \<in> mult trm_ord"
[PROOF STATE]
proof (prove)
using this:
({#t1, s1#}, {#s2#}) \<in> mult trm_ord
goal (1 subgoal):
1. ({#t1, s1#}, {#t2, s2#}) \<in> mult trm_ord
[PROOF STEP]
using mset_ordering_add1 [of "{# t1,s1 #}" " {# s2 #}" trm_ord t2]
[PROOF STATE]
proof (prove)
using this:
({#t1, s1#}, {#s2#}) \<in> mult trm_ord
({#t1, s1#}, {#s2#}) \<in> mult trm_ord \<Longrightarrow> ({#t1, s1#}, {#s2#} + {#t2#}) \<in> mult trm_ord
goal (1 subgoal):
1. ({#t1, s1#}, {#t2, s2#}) \<in> mult trm_ord
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
({#t1, s1#}, {#t2, s2#}) \<in> mult trm_ord
goal (1 subgoal):
1. p2 = pos \<Longrightarrow> (x1, x2) \<in> lit_ord
[PROOF STEP]
from this and m1 and m2
[PROOF STATE]
proof (chain)
picking this:
({#t1, s1#}, {#t2, s2#}) \<in> mult trm_ord
mset_lit x1 = {#t1, s1#}
mset_lit x2 = {#t2, s2#}
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
({#t1, s1#}, {#t2, s2#}) \<in> mult trm_ord
mset_lit x1 = {#t1, s1#}
mset_lit x2 = {#t2, s2#}
goal (1 subgoal):
1. (x1, x2) \<in> lit_ord
[PROOF STEP]
using lit_ord_def
[PROOF STATE]
proof (prove)
using this:
({#t1, s1#}, {#t2, s2#}) \<in> mult trm_ord
mset_lit x1 = {#t1, s1#}
mset_lit x2 = {#t2, s2#}
lit_ord = {(x, y). (mset_lit x, mset_lit y) \<in> mult trm_ord}
goal (1 subgoal):
1. (x1, x2) \<in> lit_ord
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(x1, x2) \<in> lit_ord
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(x1, x2) \<in> lit_ord
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(x1, x2) \<in> lit_ord
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 18453, "file": "SuperCalc_superposition", "length": 166}
|
import numpy as np
import pandas as pd
import networkx as nx
import scipy as cp
import sys; sys.path.insert(1,'../')
#import sys; sys.path.insert(1, 'C:/Users/hbass/Desktop/fca/FCA-ML/')
from firefly import *
from kuramoto import *
from scipy.sparse import csr_matrix
from math import floor
from scipy.sparse.csgraph import reverse_cuthill_mckee
#path = 'C:/Users/hbass/Desktop/fca/FCA-ML/adjacency-dynamics/'
path="/mnt/l/home/fca30/"
path="/mnt/l/home/kura30/"
# read initial coloring, labels, indices and graph6
coloring = pd.read_csv(path+'color.csv',
header=None).to_numpy()
# dataout = [i for i in np.load(path+'labels (4).npy',
# allow_pickle=True)]
dataout = [i for i in pd.read_csv(path + 'sync.csv', header=None).to_numpy()]
indices = [i for i in pd.read_csv(path+'ind.csv',
header=None).to_numpy()]
graphs = nx.read_graph6(path+'tag.csv')
count = True
def width(colors, kappa):
"""
computes width from a color list
"""
ordered = list(set(colors))
lordered = len(ordered)
threshold = floor(kappa/2)
if ordered == 0:
assert("Empty array or logic error.")
elif lordered == 1:
return 0
elif lordered == 2:
dw = ordered[1]-ordered[0]
if dw > threshold:
return kappa - dw
else:
return dw
else:
widths = [ordered[-1]-ordered[0]]
for i in range(lordered-1):
widths.append(ordered[i+1]-ordered[i])
return kappa - max(widths)
def delta_buildmatrices():
cols = []; kappa = 5; its = 24; ind = []
# run simulations
print("===================")
print("RUNNING SIMULATIONS")
print("===================")
if count:
nsl = 0
sl = 0
for j in tqdm(indices):
edgelist = list(graphs[int(j)].edges)
colorlist = coloring[j][0]
net = ColorNNetwork(colorlist.tolist(), edgelist)
coldyn = simulate_Kuramoto(net, K=2, timesec=60, verbose=0, intrinsic=0)[0]
s = dataout[int(j)]
if count:
if s:
if sl>=100:
pass
else:
cols.append(coldyn)
ind.append(int(j))
sl+=1
else:
if nsl>=100:
pass
else:
cols.append(coldyn)
ind.append(int(j))
nsl+=1
if sl >= 100 and nsl >= 100:
break
print(len(cols),len(ind))
adjmatsnsl = []
adjmatssl = []
n = 30
dataynsl = []
dataysl = []
# create adjacency matrices
print("==================")
print("ADJACENCY MATRICES")
print("==================")
if count:
nsl = 0
sl = 0
for i, j in enumerate(tqdm(ind)):
# index from the graphs
graph = graphs[int(j)]
# compute rcm
rcm = np.asarray(reverse_cuthill_mckee(csr_matrix(\
nx.adjacency_matrix(graph).todense())
)
)
adjdyn = []
# assigning colors
for col in cols[i]:
for x in range(n):
for y in range(x+1):
if graph.has_edge(x, y):
widthcol = width([col[x],col[y]], kappa)
graph.add_weighted_edges_from([(x, y, widthcol)])
frame = nx.adjacency_matrix(graph).todense()[:,rcm][rcm,:] + \
np.diag(np.asarray(col)[rcm])
adjdyn.append(frame)
# pad iterations to uniform length
frameseq = np.stack(np.asarray(
adjdyn + [adjdyn[-1]]*((its+1)-len(cols[i]))
),axis=0)
if count:
s = dataout[int(j)]
if s:
sl+=1
else:
nsl+=1
print("SYNC:", len(dataysl),"NONSYNC:",len(dataynsl))
if sl > 100:
pass
else:
adjmatssl.append(frameseq)
dataysl.append(s)
if nsl > 100:
pass
else:
adjmatsnsl.append(frameseq)
dataynsl.append(s)
if sl > 100 and nsl > 100:
break
print(len(adjmatssl), len(dataynsl))
print(len(adjmatsnsl), len(dataysl))
#datain = np.stack(adjmats, axis=0)
# save results
with open(path+'delta.npy', 'wb') as f:
np.save(f, adjmatssl)
np.save(f, dataysl)
np.save(f, adjmatsnsl)
np.save(f, dataynsl)
delta_buildmatrices()
|
{"hexsha": "b222c094e9210c0f9dee3b844c3c8cc6f95eaa86", "size": 4622, "ext": "py", "lang": "Python", "max_stars_repo_path": "simulation-data/LRCN-datagen/DeltaKM.py", "max_stars_repo_name": "richpaulyim/L2PSync", "max_stars_repo_head_hexsha": "81138245c1b50584476be83722ee1044ef023ce6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-12-26T22:11:16.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-27T00:09:37.000Z", "max_issues_repo_path": "simulation-data/LRCN-datagen/DeltaKM.py", "max_issues_repo_name": "richpaulyim/L2PSync", "max_issues_repo_head_hexsha": "81138245c1b50584476be83722ee1044ef023ce6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-12-27T01:36:06.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-12T07:09:50.000Z", "max_forks_repo_path": "simulation-data/LRCN-datagen/DeltaKM.py", "max_forks_repo_name": "richpaulyim/L2PSync", "max_forks_repo_head_hexsha": "81138245c1b50584476be83722ee1044ef023ce6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-12-27T00:09:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-12T23:15:00.000Z", "avg_line_length": 28.0121212121, "max_line_length": 83, "alphanum_fraction": 0.5023799221, "include": true, "reason": "import numpy,import scipy,from scipy,import networkx", "num_tokens": 1180}
|
(* Title: Sigma/Typed_Sigma.thy
Author: Florian Kammuller and Henry Sudhof, 2006
*)
header {* First Order Types for Sigma terms *}
theory TypedSigma imports "../preliminary/Environments" Sigma begin
subsubsection {* Types and typing rules *}
text{* The inductive definition of the typing relation.*}
definition
return :: "(type \<times> type) \<Rightarrow> type" where
"return a = fst a"
definition
param :: "(type \<times> type) \<Rightarrow> type" where
"param a = snd a"
primrec
do :: "type \<Rightarrow> (Label set)"
where
"do (Object l) = (dom l)"
primrec
type_get :: "type \<Rightarrow> Label \<Rightarrow> (type \<times> type) option " ("_^_" 1000)
where
"(Object l)^n = (l n)"
(* we need to restrict objects to ok environments,
as the empty object does not yield ok env otherwise *)
inductive
typing :: "(type environment) \<Rightarrow> sterm \<Rightarrow> type \<Rightarrow> bool"
("_ \<turnstile> _ : _" [80, 0, 80] 230)
where
T_Var[intro!]:
"\<lbrakk> ok env; x \<in> env_dom env; (the (env!x)) = T \<rbrakk>
\<Longrightarrow> env \<turnstile> (Fvar x) : T"
| T_Obj[intro!]:
"\<lbrakk> ok env; dom b = do A; finite F;
\<forall>l\<in>do A. \<forall>s p. s \<notin> F \<and> p \<notin> F \<and> s \<noteq> p
\<longrightarrow> env\<lparr>s:A\<rparr>\<lparr>p:param(the (A^l))\<rparr>
\<turnstile> (the (b l)\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the (A^l)) \<rbrakk>
\<Longrightarrow> env \<turnstile> (Obj b A) : A"
| T_Upd[intro!]:
"\<lbrakk> finite F;
\<forall>s p. s \<notin> F \<and> p \<notin> F \<and> s \<noteq> p
\<longrightarrow> env\<lparr>s:A\<rparr>\<lparr>p:param(the (A^l))\<rparr>
\<turnstile> (n\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the (A^l));
env \<turnstile> a : A; l \<in> do A \<rbrakk> \<Longrightarrow> env \<turnstile> Upd a l n : A"
| T_Call[intro!]:
"\<lbrakk> env \<turnstile> a : A; env \<turnstile> b : param(the (A^l)); l \<in> do A \<rbrakk>
\<Longrightarrow> env \<turnstile> (Call a l b) : return(the (A^l))"
inductive_cases typing_elims [elim!]:
"e \<turnstile> Obj b T : T"
"e \<turnstile> Fvar x : T"
"e \<turnstile> Call a l b : T"
"e \<turnstile> Upd a l n : T"
subsubsection {*Basic lemmas *}
text{*Basic treats of the type system.*}
lemma not_bvar: "e \<turnstile> t : T \<Longrightarrow> \<forall>i. t \<noteq> Bvar i"
by (erule typing.cases, simp_all)
lemma typing_regular': "e \<turnstile> t : T \<Longrightarrow> ok e"
by (induct rule:typing.induct, auto)
lemma typing_regular'': "e \<turnstile> t : T \<Longrightarrow> lc t"
by (induct rule:typing.induct, auto)
theorem typing_regular: "e \<turnstile> t : T \<Longrightarrow> ok e \<and> lc t"
by (simp add: typing_regular' typing_regular'')
lemma obj_inv: "e \<turnstile> Obj f U : A \<Longrightarrow> A = U"
by (erule typing.cases, auto)
lemma obj_inv_elim:
"e \<turnstile> Obj f U : U
\<Longrightarrow> (dom f = do U)
\<and> (\<exists>F. finite F \<and> (\<forall>l\<in>do U. \<forall>s p. s \<notin> F \<and> p \<notin> F \<and> s \<noteq> p
\<longrightarrow> e\<lparr>s:U\<rparr>\<lparr>p:param(the U^l)\<rparr>
\<turnstile> (the (f l)\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the (U^l))))"
by (erule typing.cases, simp_all, blast)
lemma typing_induct[consumes 1, case_names Fvar Call Upd Obj Bnd]:
fixes
env :: "type environment" and t :: sterm and T :: type and
P1 :: "type environment \<Rightarrow> sterm \<Rightarrow> type \<Rightarrow> bool" and
P2 :: "type environment \<Rightarrow> sterm \<Rightarrow> type \<Rightarrow> Label \<Rightarrow> bool"
assumes
"env \<turnstile> t : T" and
"\<And>env T x. \<lbrakk> ok env; x \<in> env_dom env; the env!x = T \<rbrakk>
\<Longrightarrow> P1 env (Fvar x) T" and
"\<And>env T t l p. \<lbrakk> env \<turnstile> t : T; P1 env t T; env \<turnstile> p : param (the(T^l));
P1 env p (param (the(T^l))); l \<in> do T \<rbrakk>
\<Longrightarrow> P1 env (Call t l p) (return (the(T^l)))" and
"\<And>env T t l u. \<lbrakk> env \<turnstile> t : T; P1 env t T; l \<in> do T; P2 env u T l \<rbrakk>
\<Longrightarrow> P1 env (Upd t l u) T" and
"\<And>env T f. \<lbrakk> ok env; dom f = do T; \<forall>l\<in>dom f. P2 env (the(f l)) T l \<rbrakk>
\<Longrightarrow> P1 env (Obj f T) T" and
"\<And>env T l t L. \<lbrakk> ok env; finite L;
\<forall>s p. s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p
\<longrightarrow> env\<lparr>s:T\<rparr>\<lparr>p:param (the(T^l))\<rparr>
\<turnstile> (t\<^bsup>[Fvar s, Fvar p]\<^esup>) : return (the(T^l))
\<and> P1 (env\<lparr>s:T\<rparr>\<lparr>p:param (the(T^l))\<rparr>) (t\<^bsup>[Fvar s, Fvar p]\<^esup>)
(return (the(T^l))) \<rbrakk>
\<Longrightarrow> P2 env t T l"
shows
"P1 env t T"
using assms by (induct rule: typing.induct, auto simp: typing_regular')
(* TODO: delete after refactoring of disjunct_env *)
lemma ball_Tltsp:
fixes
P1 :: "type \<Rightarrow> Label \<Rightarrow> sterm \<Rightarrow> string \<Rightarrow> string \<Rightarrow> bool" and
P2 :: "type \<Rightarrow> Label \<Rightarrow> sterm \<Rightarrow> string \<Rightarrow> string \<Rightarrow> bool"
assumes
"\<And>l t t'. \<lbrakk> \<forall>s p. s \<notin> F \<and> p \<notin> F \<and> s \<noteq> p \<longrightarrow> P1 T l t s p \<rbrakk>
\<Longrightarrow> \<forall>s p. s \<notin> F' \<and> p \<notin> F' \<and> s \<noteq> p \<longrightarrow> P2 T l t s p" and
"\<forall>l\<in>do T. \<forall>s p. s \<notin> F \<and> p \<notin> F \<and> s \<noteq> p \<longrightarrow> P1 T l (the(f l)) s p"
shows "\<forall>l\<in>do T. \<forall>s p. s \<notin> F' \<and> p \<notin> F' \<and> s \<noteq> p \<longrightarrow> P2 T l (the(f l)) s p"
proof
fix l assume "l \<in> do T"
with assms(2)
have "\<forall>s p. s \<notin> F \<and> p \<notin> F \<and> s \<noteq> p \<longrightarrow> P1 T l (the(f l)) s p"
by simp
with assms(1)
show "\<forall>s p. s \<notin> F' \<and> p \<notin> F' \<and> s \<noteq> p \<longrightarrow> P2 T l (the(f l)) s p"
by simp
qed
(* TODO: delete after refactoring of subject_reduction *)
lemma ball_ex_finite:
fixes
S :: "'a set" and F :: "'b set" and x :: 'a and
P :: "'a \<Rightarrow> 'b \<Rightarrow> 'b \<Rightarrow> bool"
assumes
"finite S" and "finite F" and
"\<forall>x\<in>S. (\<exists>F'. finite F'
\<and> (\<forall>s p. s \<notin> F' \<union> F \<and> p \<notin> F' \<union> F \<and> s \<noteq> p
\<longrightarrow> P x s p))"
shows
"\<exists>F'. finite F'
\<and> (\<forall>x\<in>S. \<forall>s p. s \<notin> F' \<union> F \<and> p \<notin> F' \<union> F \<and> s \<noteq> p
\<longrightarrow> P x s p)"
proof -
from assms show ?thesis
proof (induct S)
case empty thus ?case by force
next
case (insert x S)
from insert(5)
have
"\<forall>y\<in>S. (\<exists>F'. finite F'
\<and> (\<forall>s p. s \<notin> F' \<union> F \<and> p \<notin> F' \<union> F \<and> s \<noteq> p
\<longrightarrow> P y s p))"
by simp
from insert(3)[OF `finite F` this]
obtain F1 where
"finite F1" and
pred_S: "\<forall>y\<in>S. \<forall>s p. s \<notin> F1 \<union> F \<and> p \<notin> F1 \<union> F \<and> s \<noteq> p
\<longrightarrow> P y s p"
by auto
from insert(5)
obtain F2 where
"finite F2" and
"\<forall>s p. s \<notin> F2 \<union> F \<and> p \<notin> F2 \<union> F \<and> s \<noteq> p \<longrightarrow> P x s p"
by auto
with pred_S have
"\<forall>y\<in>insert x S. \<forall>s p. s \<notin> F1 \<union> F2 \<union> F \<and> p \<notin> F1 \<union> F2 \<union> F \<and> s \<noteq> p
\<longrightarrow> P y s p"
by auto
moreover
from `finite F1` `finite F2` have "finite (F1 \<union> F2)" by simp
ultimately
show ?case by blast
qed
qed
(* TODO: delete after refactoring of type_renaming' *)
lemma bnd_renaming_lem:
assumes
"s \<notin> FV t'" and "p \<notin> FV t'" and "x \<notin> FV t'" and "y \<notin> FV t'" and
"x \<notin> env_dom env'" and "y \<notin> env_dom env'" and "s \<noteq> p" and "x \<noteq> y" and
"t = {Suc n \<rightarrow> [Fvar s, Fvar p]} t'" and "env = env'\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>" and
pred_bnd:
"\<forall>sa pa. sa \<notin> F \<and> pa \<notin> F \<and> sa \<noteq> pa
\<longrightarrow> env\<lparr>sa:T\<rparr>\<lparr>pa:param(the(T^l))\<rparr> \<turnstile> (t\<^bsup>[Fvar sa,Fvar pa]\<^esup>) : return(the(T^l))
\<and> (\<forall>env'' t'' s' p' x' y' A' B' n'.
s' \<notin> FV t'' \<longrightarrow> p' \<notin> FV t'' \<longrightarrow> x' \<notin> FV t'' \<longrightarrow> y' \<notin> FV t'' \<longrightarrow>
x' \<notin> env_dom env'' \<longrightarrow> y' \<notin> env_dom env'' \<longrightarrow> x' \<noteq> y' \<longrightarrow> s' \<noteq> p'
\<longrightarrow> (t\<^bsup>[Fvar sa,Fvar pa]\<^esup>) = {n' \<rightarrow> [Fvar s',Fvar p']} t''
\<longrightarrow> env\<lparr>sa:T\<rparr>\<lparr>pa:param(the(T^l))\<rparr> = env''\<lparr>s':A'\<rparr>\<lparr>p':B'\<rparr>
\<longrightarrow> env''\<lparr>x':A'\<rparr>\<lparr>y':B'\<rparr>
\<turnstile> {n' \<rightarrow> [Fvar x',Fvar y']} t'' : return(the(T^l)))" and
"FV t' \<subseteq> F'"
shows
"\<forall>sa pa. sa \<notin> F \<union> {s,p,x,y} \<union> F' \<union> env_dom env'
\<and> pa \<notin> F \<union> {s,p,x,y} \<union> F' \<union> env_dom env'
\<and> sa \<noteq> pa
\<longrightarrow> env'\<lparr>x:A\<rparr>\<lparr>y:B\<rparr>\<lparr>sa:T\<rparr>\<lparr>pa:param(the(T^l))\<rparr>
\<turnstile> ({Suc n \<rightarrow> [Fvar x, Fvar y]} t'\<^bsup>[Fvar sa,Fvar pa]\<^esup>) : return (the(T^l))"
proof (intro strip, elim conjE)
fix sa pa
assume
nin_sa: "sa \<notin> F \<union> {s,p,x,y} \<union> F' \<union> env_dom env'" and
nin_pa: "pa \<notin> F \<union> {s,p,x,y} \<union> F' \<union> env_dom env'" and "sa \<noteq> pa"
hence "sa \<notin> F \<and> pa \<notin> F \<and> sa \<noteq> pa" by auto
moreover
{
fix a assume "a \<notin> FV t'" and "a \<in> {s,p,x,y}"
with
`FV t' \<subseteq> F'` nin_sa nin_pa `sa \<noteq> pa`
sopen_FV[of 0 "Fvar sa" "Fvar pa" t']
have "a \<notin> FV (t'\<^bsup>[Fvar sa,Fvar pa]\<^esup>)" by (auto simp: openz_def)
} note
this[OF `s \<notin> FV t'`] this[OF `p \<notin> FV t'`]
this[OF `x \<notin> FV t'`] this[OF `y \<notin> FV t'`]
moreover
from
not_in_env_bigger_2[OF `x \<notin> env_dom env'`]
not_in_env_bigger_2[OF `y \<notin> env_dom env'`]
nin_sa nin_pa
have
"x \<notin> env_dom (env'\<lparr>sa:T\<rparr>\<lparr>pa:param(the(T^l))\<rparr>)
\<and> y \<notin> env_dom (env'\<lparr>sa:T\<rparr>\<lparr>pa:param(the(T^l))\<rparr>)" by auto
moreover
from `t = {Suc n \<rightarrow> [Fvar s, Fvar p]} t'` sopen_commute[OF Suc_not_Zero]
have "(t\<^bsup>[Fvar sa,Fvar pa]\<^esup>) = {Suc n \<rightarrow> [Fvar s,Fvar p]} (t'\<^bsup>[Fvar sa,Fvar pa]\<^esup>)"
by (auto simp: openz_def)
moreover
from
subst_add[of s sa env' A T] subst_add[of sa p "env'\<lparr>s:A\<rparr>" T B]
subst_add[of s pa "env'\<lparr>sa:T\<rparr>" A "param(the(T^l))"]
subst_add[of p pa "env'\<lparr>sa:T\<rparr>\<lparr>s:A\<rparr>" B "param(the(T^l))"]
`env = env'\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>` nin_sa nin_pa
have "env\<lparr>sa:T\<rparr>\<lparr>pa:param(the(T^l))\<rparr> = env'\<lparr>sa:T\<rparr>\<lparr>pa:param(the(T^l))\<rparr>\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>"
by auto
ultimately
have
"env'\<lparr>sa:T\<rparr>\<lparr>pa:param(the T^l)\<rparr>\<lparr>x:A\<rparr>\<lparr>y:B\<rparr>
\<turnstile> {Suc n \<rightarrow> [Fvar x, Fvar y]} (t'\<^bsup>[Fvar sa,Fvar pa]\<^esup>) : return(the(T^l))"
using `s \<noteq> p` `x \<noteq> y` pred_bnd by auto
moreover
from
subst_add[of y sa "env'\<lparr>x:A\<rparr>" B T] subst_add[of x sa env' A T]
subst_add[of y pa "env'\<lparr>sa:T\<rparr>\<lparr>x:A\<rparr>" B "param(the(T^l))"]
subst_add[of x pa "env'\<lparr>sa:T\<rparr>" A "param(the(T^l))"]
nin_sa nin_pa
have
"env'\<lparr>x:A\<rparr>\<lparr>y:B\<rparr>\<lparr>sa:T\<rparr>\<lparr>pa:param(the(T^l))\<rparr>
= env'\<lparr>sa:T\<rparr>\<lparr>pa:param(the(T^l))\<rparr>\<lparr>x:A\<rparr>\<lparr>y:B\<rparr>"
by auto
ultimately
show
"env'\<lparr>x:A\<rparr>\<lparr>y:B\<rparr>\<lparr>sa:T\<rparr>\<lparr>pa:param(the(T^l))\<rparr>
\<turnstile> ({Suc n \<rightarrow> [Fvar x, Fvar y]} t'\<^bsup>[Fvar sa,Fvar pa]\<^esup>) : return (the(T^l))"
using sopen_commute[OF not_sym[OF Suc_not_Zero]]
by (simp add: openz_def)
qed
(* TODO: refactor to work with typing_induct *)
lemma type_renaming'[rule_format]:
"e \<turnstile> t : C \<Longrightarrow>
(\<And>env t' s p x y A B n. \<lbrakk> s \<notin> FV t'; p \<notin> FV t'; x \<notin> FV t'; y \<notin> FV t';
x \<notin> env_dom env; y \<notin> env_dom env; s \<noteq> p; x \<noteq> y;
t = {n \<rightarrow> [Fvar s,Fvar p]} t'; e = env\<lparr>s:A\<rparr>\<lparr>p:B\<rparr> \<rbrakk>
\<Longrightarrow> env\<lparr>x:A\<rparr>\<lparr>y:B\<rparr> \<turnstile> {n \<rightarrow> [Fvar x,Fvar y]} t' : C)"
proof (induct set:typing)
case (T_Call env t1 T t2 l env' t' s p x y A B n)
with sopen_eq_Call[OF sym[OF `Call t1 l t2 = {n \<rightarrow> [Fvar s,Fvar p]} t'`]]
show ?case by auto
next
case (T_Var env a T env' t' s p x y A B n)
from `ok env` `env = env'\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>` ok_add_2[of env' s A p B]
have "ok env'" by simp
from
ok_add_ok[OF ok_add_ok[OF this `x \<notin> env_dom env'`]
not_in_env_bigger[OF `y \<notin> env_dom env'` not_sym[OF `x \<noteq> y`]]]
have ok: "ok (env'\<lparr>x:A\<rparr>\<lparr>y:B\<rparr>)" by assumption
from sopen_eq_Fvar[OF sym[OF `Fvar a = {n \<rightarrow> [Fvar s,Fvar p]} t'`]]
show ?case
proof (elim disjE conjE)
assume "t' = Fvar a" with T_Var(4-7)
obtain "a \<noteq> s" and "a \<noteq> p" and "a \<noteq> x" and "a \<noteq> y" by auto
note in_env_smaller2[OF _ this(1-2)]
from `a \<in> env_dom env` `env = env'\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>` this[of env' A B]
have "a \<in> env_dom env'" by simp
from env_bigger2[OF `x \<notin> env_dom env'` `y \<notin> env_dom env'` this `x \<noteq> y`]
have inenv: "a \<in> env_dom (env'\<lparr>x:A\<rparr>\<lparr>y:B\<rparr>)" by assumption
note get_env_bigger2[OF _ `a \<noteq> s` `a \<noteq> p`]
from
this[of env' A B] `a \<in> env_dom env` `the env!a = T`
`env = env'\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>` get_env_bigger2[OF inenv `a \<noteq> x` `a \<noteq> y`]
have "the (env'\<lparr>x:A\<rparr>\<lparr>y:B\<rparr>!a) = T" by simp
from typing.T_Var[OF ok inenv this] `t' = Fvar a` show ?case by simp
next
assume "a = s" and "t' = Bvar (Self n)"
from
this(1) `ok env` `env = env'\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>` `the env!a = T`
add_get2_1[of env' s A p B]
have "T = A" by simp
moreover
from `t' = Bvar (Self n)` have "{n \<rightarrow> [Fvar x,Fvar y]} t' = Fvar x" by simp
ultimately
show ?case using in_add_2[OF ok] typing.T_Var[OF ok _ add_get2_1[OF ok]]
by simp
next
note subst = subst_add[OF `x \<noteq> y`]
from subst[of env' A B] ok have ok': "ok (env'\<lparr>y:B\<rparr>\<lparr>x:A\<rparr>)" by simp
assume "a = p" and "t' = Bvar (Param n)"
from
this(1) `ok env` `env = env'\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>` `the env!a = T`
add_get2_2[of env' s A p B]
have "T = B" by simp
moreover
from `t' = Bvar (Param n)` have "{n \<rightarrow> [Fvar x,Fvar y]} t' = Fvar y" by simp
ultimately
show ?case
using
subst[of env' A B] in_add_2[OF ok']
typing.T_Var[OF ok' _ add_get2_1[OF ok']]
by simp
qed
next
case (T_Upd F env T l t2 t1 env' t' s p x y A B n)
from sopen_eq_Upd[OF sym[OF `Upd t1 l t2 = {n \<rightarrow> [Fvar s,Fvar p]} t'`]]
obtain t1' t2' where
t1: "t1 = {n \<rightarrow> [Fvar s,Fvar p]} t1'" and
t2: "t2 = {Suc n \<rightarrow> [Fvar s,Fvar p]} t2'" and
t': "t' = Upd t1' l t2'"
by auto
{ fix a assume "a \<notin> FV t'" with t' have "a \<notin> FV t1'" by simp }
note
t1' = T_Upd(4)[OF this[OF `s \<notin> FV t'`] this[OF `p \<notin> FV t'`]
this[OF `x \<notin> FV t'`] this[OF `y \<notin> FV t'`]
`x \<notin> env_dom env'` `y \<notin> env_dom env'`
`s \<noteq> p` `x \<noteq> y` t1 `env = env'\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>`]
from ok_finite[of env'] ok_add_2[OF typing_regular'[OF this]]
have findom: "finite (env_dom env')" by simp
{ fix a assume "a \<notin> FV t'" with t' have "a \<notin> FV t2'" by simp }
note
bnd_renaming_lem[OF this[OF `s \<notin> FV t'`] this[OF `p \<notin> FV t'`]
this[OF `x \<notin> FV t'`] this[OF `y \<notin> FV t'`]
`x \<notin> env_dom env'` `y \<notin> env_dom env'`
`s \<noteq> p` `x \<noteq> y` t2 `env = env'\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>`]
from this[of F T l "FV t2'"] T_Upd(2)
have
"\<forall>sa pa. sa \<notin> F \<union> {s, p, x, y} \<union> FV t2' \<union> env_dom env'
\<and> pa \<notin> F \<union> {s, p, x, y} \<union> FV t2' \<union> env_dom env'
\<and> sa \<noteq> pa
\<longrightarrow> env'\<lparr>x:A\<rparr>\<lparr>y:B\<rparr>\<lparr>sa:T\<rparr>\<lparr>pa:param(the(T^l))\<rparr>
\<turnstile> ({Suc n \<rightarrow> [Fvar x,Fvar y]} t2'\<^bsup>[Fvar sa,Fvar pa]\<^esup>) : return(the(T^l))"
by simp
from
typing.T_Upd[OF _ this t1' `l \<in> do T`]
`finite F` findom t'
show ?case by simp
next
case (T_Obj env f T F env' t' s p x y A B n)
from `ok env` `env = env'\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>` ok_add_2[of env' s A p B]
have "ok env'" by simp
from
ok_add_ok[OF ok_add_ok[OF this `x \<notin> env_dom env'`]
not_in_env_bigger[OF `y \<notin> env_dom env'` not_sym[OF `x \<noteq> y`]]]
have ok: "ok (env'\<lparr>x:A\<rparr>\<lparr>y:B\<rparr>)" by assumption
from sopen_eq_Obj[OF sym[OF `Obj f T = {n \<rightarrow> [Fvar s,Fvar p]} t'`]]
obtain f' where
obj: "{n \<rightarrow> [Fvar s,Fvar p]} Obj f' T = Obj f T" and
t': "t' = Obj f' T" by auto
from
this(1) `dom f = do T`
sym[OF dom_sopenoption_lem[of "Suc n" "Fvar s" "Fvar p" f']]
dom_sopenoption_lem[of "Suc n" "Fvar x" "Fvar y" f']
have dom: "dom (\<lambda>l. sopen_option (Suc n) (Fvar x) (Fvar y) (f' l)) = do T"
by simp
from
`finite F` finite_FV[of "Obj f' T"]
ok_finite[of env'] ok_add_2[OF ok]
have finF: "finite (F \<union> {s,p,x,y} \<union> FV (Obj f' T) \<union> env_dom env')"
by simp
have
"\<forall>l\<in>do T. \<forall>sa pa. sa \<notin> F \<union> {s, p, x, y} \<union> FV (Obj f' T) \<union> env_dom env'
\<and> pa \<notin> F \<union> {s, p, x, y} \<union> FV (Obj f' T) \<union> env_dom env'
\<and> sa \<noteq> pa
\<longrightarrow> env'\<lparr>x:A\<rparr>\<lparr>y:B\<rparr>\<lparr>sa:T\<rparr>\<lparr>pa:param(the(T^l))\<rparr>
\<turnstile> (the(sopen_option (Suc n) (Fvar x) (Fvar y) (f' l))\<^bsup>[Fvar sa,Fvar pa]\<^esup>) :
return(the(T^l))"
proof
fix l assume "l \<in> do T" with T_Obj(4)
have cof:
"\<forall>sa pa. sa \<notin> F \<and> pa \<notin> F \<and> sa \<noteq> pa
\<longrightarrow> env\<lparr>sa:T\<rparr>\<lparr>pa:param(the(T^l))\<rparr>
\<turnstile> (the(f l)\<^bsup>[Fvar sa,Fvar pa]\<^esup>) : return(the(T^l))
\<and> (\<forall>env'' t'' s' p' x' y' A' B' n'.
s' \<notin> FV t'' \<longrightarrow> p' \<notin> FV t'' \<longrightarrow> x' \<notin> FV t'' \<longrightarrow> y' \<notin> FV t''
\<longrightarrow> x' \<notin> env_dom env'' \<longrightarrow> y' \<notin> env_dom env'' \<longrightarrow> x' \<noteq> y'
\<longrightarrow> s' \<noteq> p'
\<longrightarrow> (the(f l)\<^bsup>[Fvar sa,Fvar pa]\<^esup>) = {n' \<rightarrow> [Fvar s',Fvar p']} t''
\<longrightarrow> env\<lparr>sa:T\<rparr>\<lparr>pa:param(the(T^l))\<rparr> = env''\<lparr>s':A'\<rparr>\<lparr>p':B'\<rparr>
\<longrightarrow> env''\<lparr>x':A'\<rparr>\<lparr>y':B'\<rparr>
\<turnstile> {n' \<rightarrow> [Fvar x',Fvar y']} t'' : return(the(T^l)))"
by simp
from
`l \<in> do T` `dom f = do T` `Obj f T = {n \<rightarrow> [Fvar s,Fvar p]} t'` obj t'
dom_sopenoption_lem[of "Suc n" "Fvar s" "Fvar p" f']
have indomf': "l \<in> dom f'" by auto
hence
opened: "the (sopen_option (Suc n) (Fvar x) (Fvar y) (f' l))
= {Suc n \<rightarrow> [Fvar x,Fvar y]} the(f' l)"
by force
from indomf' have FVsubset: "FV (the(f' l)) \<subseteq> FV (Obj f' T)" by force
with
`s \<notin> FV t'` `p \<notin> FV t'` `x \<notin> FV t'` `y \<notin> FV t'` obj t'
indomf' FV_option_lem[of f']
obtain
"s \<notin> FV (the(f' l))" and "p \<notin> FV (the(f' l))" and
"x \<notin> FV (the(f' l))" and "y \<notin> FV (the(f' l))" and
"the(f l) = {Suc n \<rightarrow> [Fvar s,Fvar p]} the(f' l)" by auto
from
bnd_renaming_lem[OF this(1-4) `x \<notin> env_dom env'` `y \<notin> env_dom env'`
`s \<noteq> p` `x \<noteq> y` this(5) `env = env'\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>`
cof FVsubset]
show
"\<forall>sa pa. sa \<notin> F \<union> {s, p, x, y} \<union> FV (Obj f' T) \<union> env_dom env'
\<and> pa \<notin> F \<union> {s, p, x, y} \<union> FV (Obj f' T) \<union> env_dom env'
\<and> sa \<noteq> pa
\<longrightarrow> env'\<lparr>x:A\<rparr>\<lparr>y:B\<rparr>\<lparr>sa:T\<rparr>\<lparr>pa:param(the(T^l))\<rparr>
\<turnstile> (the(sopen_option (Suc n) (Fvar x) (Fvar y) (f' l))\<^bsup>[Fvar sa,Fvar pa]\<^esup>) :
return(the(T^l))"
by (subst opened, assumption)
qed
from typing.T_Obj[OF ok dom finF this] t' show ?case by simp
qed
lemma type_renaming:
"\<lbrakk> e\<lparr>s:A\<rparr>\<lparr>p:B\<rparr> \<turnstile> {n \<rightarrow> [Fvar s,Fvar p]} t : T;
s \<notin> FV t; p \<notin> FV t; x \<notin> FV t; y \<notin> FV t;
x \<notin> env_dom e; y \<notin> env_dom e; x \<noteq> y; s \<noteq> p\<rbrakk>
\<Longrightarrow> e\<lparr>x:A\<rparr>\<lparr>y:B\<rparr> \<turnstile> {n \<rightarrow> [Fvar x,Fvar y]} t : T"
by (auto simp: type_renaming')
(* too weak, as we need specific s,p *)
lemma obj_inv_elim':
assumes
"e \<turnstile> Obj f U : U" and
nin_s: "s \<notin> FV (Obj f U) \<union> env_dom e" and
nin_p: "p \<notin> FV (Obj f U) \<union> env_dom e" and "s \<noteq> p"
shows
"(dom f = do U) \<and> (\<forall>l\<in>do U. e\<lparr>s:U\<rparr>\<lparr>p:param(the(U^l))\<rparr>
\<turnstile> (the(f l)\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the(U^l)))"
using assms
proof (cases rule: typing.cases)
case (T_Obj F)
(* from `e = env` `Obj f U = Obj f' T` `dom f' = do T`
have "dom f = do U" by simp*)
thus ?thesis
proof (simp, intro strip)
fix l assume "l \<in> do U"
from `finite F` finite_FV[of "Obj f U"] have "finite (F \<union> FV (Obj f U) \<union> {s,p})"
by simp
from exFresh_s_p_cof[OF this]
obtain sa pa where
"sa \<noteq> pa" and
nin_sa: "sa \<notin> F \<union> FV (Obj f U)" and
nin_pa: "pa \<notin> F \<union> FV (Obj f U)" by auto
with `l \<in> do U` T_Obj(4)
have
"e\<lparr>sa:U\<rparr>\<lparr>pa:param(the(U^l))\<rparr>
\<turnstile> (the(f l)\<^bsup>[Fvar sa,Fvar pa]\<^esup>) : return(the(U^l))"
by simp
moreover
from `l \<in> do U` `dom f = do U`
have "l \<in> dom f" by simp
with nin_s nin_p nin_sa nin_pa FV_option_lem[of f]
have
"sa \<notin> FV (the(f l)) \<and> pa \<notin> FV (the(f l))
\<and> s \<notin> FV (the(f l)) \<and> p \<notin> FV (the(f l))
\<and> s \<notin> env_dom e \<and> p \<notin> env_dom e" by auto
ultimately
show
"e\<lparr>s:U\<rparr>\<lparr>p:param(the(U^l))\<rparr>
\<turnstile> (the(f l)\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the(U^l))"
using type_renaming[OF _ _ _ _ _ _ _ `s \<noteq> p` `sa \<noteq> pa`]
by (simp add: openz_def)
qed
qed
lemma dom_lem: "e \<turnstile> Obj f (Object fun) : Object fun \<Longrightarrow> dom f = dom fun"
by (erule typing.cases, auto)
lemma abs_typeE:
assumes "e \<turnstile> Call (Obj f U) l b : T"
shows
"(\<exists>F. finite F
\<and> (\<forall>s p. s \<notin> F \<and> p \<notin> F \<and> s \<noteq> p
\<longrightarrow> e\<lparr>s:U\<rparr>\<lparr>p: param(the(U^l))\<rparr> \<turnstile> (the(f l)\<^bsup>[Fvar s,Fvar p]\<^esup>) : T) \<Longrightarrow> P)
\<Longrightarrow> P"
using assms
proof (cases rule: typing.cases)
case (T_Call A (*env t1 t2=b*))
assume
cof: "\<exists>F. finite F
\<and> (\<forall>s p. s \<notin> F \<and> p \<notin> F \<and> s \<noteq> p
\<longrightarrow> e\<lparr>s:U\<rparr>\<lparr>p: param(the(U^l))\<rparr> \<turnstile> (the(f l)\<^bsup>[Fvar s,Fvar p]\<^esup>) : T)
\<Longrightarrow> P"
from
`T = return(the(A^l))`
`e \<turnstile> Obj f U : A` `l \<in> do A` obj_inv[of e f U A]
obtain "e \<turnstile> (Obj f U) : U" and "T = return(the(U^l))" and "l \<in> do U"
by simp
from obj_inv_elim[OF this(1)] this(2-3) cof show ?thesis by blast
qed
subsubsection {* Substitution preserves Well-Typedness *}
lemma bigger_env_lemma[rule_format]:
assumes "e \<turnstile> t : T"
shows "\<forall>x X. x \<notin> env_dom e \<longrightarrow> e\<lparr>x:X\<rparr> \<turnstile> t: T"
proof -
def pred_cof \<equiv> "\<lambda>L env t T l.
\<forall>s p. s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p
\<longrightarrow> env\<lparr>s:T\<rparr>\<lparr>p:param (the(T^l))\<rparr> \<turnstile> (t\<^bsup>[Fvar s,Fvar p]\<^esup>) : return (the(T^l))"
from assms show ?thesis
proof (induct
taking: "\<lambda>env t T l. \<forall>x X. x \<notin> env_dom env
\<longrightarrow> (\<exists>L. finite L \<and> pred_cof L (env\<lparr>x:X\<rparr>) t T l)"
rule: typing_induct)
case Call thus ?case by auto
next
case (Fvar env Ta xa) thus ?case
proof (intro strip)
fix x X assume "x \<notin> env_dom env"
from
get_env_smaller[OF `xa \<in> env_dom env` this]
T_Var[OF ok_add_ok[OF `ok env` this]
env_bigger[OF this `xa \<in> env_dom env`]]
`the env!xa = Ta`
show "env\<lparr>x:X\<rparr> \<turnstile> Fvar xa : Ta" by simp
qed
next
case (Obj env Ta f) note pred_o = this(3)
def pred_cof' \<equiv> "\<lambda>x X b l. \<exists>L. finite L \<and> pred_cof L (env\<lparr>x:X\<rparr>) (the b) Ta l"
from pred_o
have pred: "\<forall>x X. x \<notin> env_dom env \<longrightarrow> (\<forall>l\<in>dom f. pred_cof' x X (f l) l)"
by (intro fmap_ball_all2'[of f "\<lambda>x X. x \<notin> env_dom env" pred_cof'],
unfold pred_cof_def pred_cof'_def, simp)
show ?case
proof (intro strip)
fix x X
def pred_bnd \<equiv> "\<lambda>s p b l. env\<lparr>x:X\<rparr>\<lparr>s:Ta\<rparr>\<lparr>p:param (the(Ta^l))\<rparr>
\<turnstile> (the b\<^bsup>[Fvar s,Fvar p]\<^esup>) : return (the(Ta^l))"
assume "x \<notin> env_dom env"
with pred fmap_ex_cof[of f pred_bnd] `dom f = do Ta`
obtain L where
"finite L" and "\<forall>l\<in>do Ta. pred_cof L (env\<lparr>x:X\<rparr>) (the(f l)) Ta l"
unfolding pred_bnd_def pred_cof_def pred_cof'_def
by auto
from
T_Obj[OF ok_add_ok[OF `ok env` `x \<notin> env_dom env`]
`dom f = do Ta` this(1)]
this(2)
show "env<x:X> \<turnstile> Obj f Ta : Ta"
unfolding pred_cof_def
by simp
qed
next
case (Upd env Ta t l u)
note pred_t = this(2) and pred_u = this(4)
show ?case
proof (intro strip)
fix x X assume "x \<notin> env_dom env"
with pred_u obtain L where
"finite L" and "pred_cof L (env\<lparr>x:X\<rparr>) u Ta l" by auto
with `l \<in> do Ta` `x \<notin> env_dom env` pred_t
show "env\<lparr>x:X\<rparr> \<turnstile> Upd t l u : Ta"
unfolding pred_cof_def
by auto
qed
next
case (Bnd env Ta l t L) note pred = this(3)
show ?case
proof (intro strip)
fix x X assume "x \<notin> env_dom env"
thus "\<exists>L. finite L \<and> pred_cof L (env<x:X>) t Ta l"
proof (rule_tac x = "L \<union> {x}" in exI, simp add: `finite L`,
unfold pred_cof_def, auto)
fix s p
assume
"s \<notin> L" and "p \<notin> L" and "s \<noteq> p" and
"s \<noteq> x" and "p \<noteq> x"
note
subst_add[OF not_sym[OF `s \<noteq> x`]]
subst_add[OF not_sym[OF `p \<noteq> x`]]
from
this(1)[of env X Ta] this(2)[of "env\<lparr>s:Ta\<rparr>" X "param (the(Ta^l))"]
pred `s \<notin> L` `p \<notin> L` `s \<noteq> p`
not_in_env_bigger_2[OF `x \<notin> env_dom env`
not_sym[OF `s \<noteq> x`] not_sym[OF `p \<noteq> x`]]
show
"env\<lparr>x:X\<rparr>\<lparr>s:Ta\<rparr>\<lparr>p:param (the(Ta^l))\<rparr>
\<turnstile> (t\<^bsup>[Fvar s,Fvar p]\<^esup>) : return (the(Ta^l))"
by auto
qed
qed
qed
qed
lemma bnd_disj_env_lem:
assumes
"ok e1" and "env_dom e1 \<inter> env_dom e2 = {}" and "ok e2" and
"\<forall>s p. s \<notin> F \<and> p \<notin> F \<and> s \<noteq> p
\<longrightarrow> e1\<lparr>s:T\<rparr>\<lparr>p:param(the(T^l))\<rparr>
\<turnstile> (t2\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the(T^l))
\<and> (env_dom (e1\<lparr>s:T\<rparr>\<lparr>p:param(the(T^l))\<rparr>) \<inter> env_dom e2 = {}
\<longrightarrow> ok e2
\<longrightarrow> e1\<lparr>s:T\<rparr>\<lparr>p:param(the(T^l))\<rparr>+e2
\<turnstile> (t2\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the(T^l)))"
shows
"\<forall>s p. s \<notin> F \<union> env_dom (e1+e2) \<and> p \<notin> F \<union> env_dom (e1+e2) \<and> s \<noteq> p
\<longrightarrow> (e1+e2)\<lparr>s:T\<rparr>\<lparr>p:param(the(T^l))\<rparr> \<turnstile> (t2\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the(T^l))"
proof (intro strip, elim conjE)
fix s p assume
nin_s: "s \<notin> F \<union> env_dom (e1+e2)" and
nin_p: "p \<notin> F \<union> env_dom (e1+e2)" and "s \<noteq> p"
from
this(1-2) env_add_dom_2[OF assms(1) _ _ this(3)]
assms(2) env_app_dom[OF assms(1-3)]
have "env_dom (e1\<lparr>s:T\<rparr>\<lparr>p:param(the(T^l))\<rparr>) \<inter> env_dom e2 = {}" by simp
with
env_app_add2[OF assms(1-3) _ _ _ _ `s \<noteq> p`]
env_app_dom[OF assms(1-3)] `ok e2` assms(4) nin_s nin_p `s \<noteq> p`
show "(e1+e2)\<lparr>s:T\<rparr>\<lparr>p:param(the(T^l))\<rparr> \<turnstile> (t2\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the(T^l))"
by auto
qed
(* TODO: refactor to work with typing_induct *)
lemma disjunct_env:
assumes "e \<turnstile> t : A"
shows "(env_dom e \<inter> env_dom e' = {} \<Longrightarrow> ok e' \<Longrightarrow> e + e' \<turnstile> t : A)"
using assms
proof (induct rule: typing.induct)
case T_Call thus ?case by auto
next
case (T_Var env x T)
from
env_app_dom[OF `ok env` `env_dom env \<inter> env_dom e' = {}` `ok e'`]
`x \<in> env_dom env`
have indom: "x \<in> env_dom (env+e')" by simp
from
`ok env` `x \<in> env_dom env` `the env!x = T` `env_dom env \<inter> env_dom e' = {}`
`ok e'`
have "the (env+e')!x = T" by simp
from
typing.T_Var[OF env_app_ok[OF `ok env` `env_dom env \<inter> env_dom e' = {}`
`ok e'`]
indom this]
show ?case by assumption
next
case (T_Upd F env T l t2 t1)
from
typing.T_Upd[OF _ bnd_disj_env_lem[OF typing_regular'[OF `env \<turnstile> t1 : T`]
`env_dom env \<inter> env_dom e' = {}` `ok e'`
T_Upd(2)]
T_Upd(4)[OF `env_dom env \<inter> env_dom e' = {}` `ok e'`]
`l \<in> do T`]
`finite F` ok_finite[OF env_app_ok[OF typing_regular'[OF `env \<turnstile> t1 : T`]
`env_dom env \<inter> env_dom e' = {}` `ok e'`]]
show ?case by simp
next
case (T_Obj env f T F)
from
ok_finite[OF env_app_ok[OF `ok env` `env_dom env \<inter> env_dom e' = {}` `ok e'`]]
`finite F`
have finF: "finite (F \<union> env_dom (env+e'))" by simp
note
ball_Tltsp[of F
"\<lambda>T l t s p. env\<lparr>s:T\<rparr>\<lparr>p:param(the(T^l))\<rparr> \<turnstile> (t\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the(T^l))
\<and> (env_dom (env\<lparr>s:T\<rparr>\<lparr>p:param(the(T^l))\<rparr>) \<inter> env_dom e' = {}
\<longrightarrow> ok e'
\<longrightarrow> env\<lparr>s:T\<rparr>\<lparr>p:param(the(T^l))\<rparr>+e'
\<turnstile> (t\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the(T^l)))"
T "F \<union> env_dom (env+e')"
"\<lambda>T l t s p. (env+e')\<lparr>s:T\<rparr>\<lparr>p:param(the(T^l))\<rparr>
\<turnstile> (t\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the(T^l))"]
from
this[OF _ T_Obj(4)]
bnd_disj_env_lem[OF `ok env` `env_dom env \<inter> env_dom e' = {}` `ok e'`]
typing.T_Obj[OF env_app_ok[OF `ok env`
`env_dom env \<inter> env_dom e' = {}` `ok e'`]
`dom f = do T` finF]
show ?case by simp
qed
text {* Typed in the Empty Environment implies typed in any Environment *}
lemma empty_env:
assumes "(Env empty) \<turnstile> t : A" and "ok env"
shows "env \<turnstile> t : A"
proof -
from `ok env` have "env = (Env empty)+env" by (cases env, auto)
with disjunct_env[OF assms(1) _ assms(2)] show ?thesis by simp
qed
lemma bnd_open_lem:
assumes
pred_bnd:
"\<forall>sa pa. sa \<notin> F \<and> pa \<notin> F \<and> sa \<noteq> pa
\<longrightarrow> env\<lparr>sa:T\<rparr>\<lparr>pa:param(the(T^l))\<rparr>
\<turnstile> (t\<^bsup>[Fvar sa,Fvar pa]\<^esup>) : return(the(T^l))
\<and> (\<forall>env'' t'' s' p' x' y' A' B' n'. s' \<notin> FV t'' \<union> FV x' \<union> FV y'
\<longrightarrow> p' \<notin> FV t'' \<union> FV x' \<union> FV y' \<longrightarrow> s' \<noteq> p'
\<longrightarrow> env'' \<turnstile> x' : A' \<longrightarrow> env'' \<turnstile> y' : B'
\<longrightarrow> (t\<^bsup>[Fvar sa,Fvar pa]\<^esup>) = {n' \<rightarrow> [Fvar s',Fvar p']} t''
\<longrightarrow> env\<lparr>sa:T\<rparr>\<lparr>pa:param(the(T^l))\<rparr> = env''\<lparr>s':A'\<rparr>\<lparr>p':B'\<rparr>
\<longrightarrow> env'' \<turnstile> {n' \<rightarrow> [x',y']} t'' : return(the(T^l)))" and
"ok env" and "env = env'\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>" and
"s \<notin> FV t'' \<union> FV x \<union> FV y" and "p \<notin> FV t'' \<union> FV x \<union> FV y" and "s \<noteq> p" and
"env' \<turnstile> x : A" and "env' \<turnstile> y : B" and
"t = {Suc n \<rightarrow> [Fvar s,Fvar p]} t'" and "FV t' \<subseteq> FV t''"
shows
"\<forall>sa pa. sa \<notin> F \<union> {s,p} \<union> env_dom env'
\<and> pa \<notin> F \<union> {s,p} \<union> env_dom env' \<and> sa \<noteq> pa
\<longrightarrow> env'\<lparr>sa:T\<rparr>\<lparr>pa:param(the(T^l))\<rparr>
\<turnstile> ({Suc n \<rightarrow> [x,y]} t'\<^bsup>[Fvar sa, Fvar pa]\<^esup>) : return(the(T^l))"
proof (intro strip, elim conjE)
fix sa pa assume
nin_sa: "sa \<notin> F \<union> {s,p} \<union> env_dom env'" and
nin_pa: "pa \<notin> F \<union> {s,p} \<union> env_dom env'" and "sa \<noteq> pa"
hence "sa \<notin> F \<and> pa \<notin> F \<and> sa \<noteq> pa" by auto
moreover
{
fix a assume "a \<notin> FV t'' \<union> FV x \<union> FV y" and "a \<in> {s,p}"
with
`FV t' \<subseteq> FV t''` nin_sa nin_pa `sa \<noteq> pa`
sopen_FV[of 0 "Fvar sa" "Fvar pa" t']
have "a \<notin> FV (t'\<^bsup>[Fvar sa,Fvar pa]\<^esup>) \<union> FV x \<union> FV y" by (auto simp: openz_def)
} note
this[OF `s \<notin> FV t'' \<union> FV x \<union> FV y`]
this[OF `p \<notin> FV t'' \<union> FV x \<union> FV y`]
moreover
{
from `ok env` `env = env'\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>` ok_add_2[of env' s A p B]
have "ok env'" by simp
from nin_sa nin_pa `sa \<noteq> pa` env_add_dom[OF this]
obtain "sa \<notin> env_dom env'" and "pa \<notin> env_dom (env'\<lparr>sa:T\<rparr>)" by auto
note
bigger_env_lemma[OF bigger_env_lemma[OF `env' \<turnstile> x : A` this(1)] this(2)]
bigger_env_lemma[OF bigger_env_lemma[OF `env' \<turnstile> y : B` this(1)] this(2)]
}note
this(1)[of "param(the(T^l))"]
this(2)[of "param(the(T^l))"]
moreover
from `t = {Suc n \<rightarrow> [Fvar s,Fvar p]} t'` sopen_commute[of 0 "Suc n" sa pa s p t']
have "(t\<^bsup>[Fvar sa,Fvar pa]\<^esup>) = {Suc n \<rightarrow> [Fvar s,Fvar p]} (t'\<^bsup>[Fvar sa,Fvar pa]\<^esup>)"
by (simp add: openz_def)
moreover
from
subst_add[of p sa "env'\<lparr>s:A\<rparr>" B T] subst_add[of s sa env' A T]
subst_add[of p pa "env'\<lparr>sa:T\<rparr>\<lparr>s:A\<rparr>" B "param(the(T^l))"]
subst_add[of s pa "env'\<lparr>sa:T\<rparr>" A "param(the(T^l))"]
`env = env'\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>` nin_sa nin_pa
have "env\<lparr>sa:T\<rparr>\<lparr>pa:param(the(T^l))\<rparr> = env'\<lparr>sa:T\<rparr>\<lparr>pa:param(the(T^l))\<rparr>\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>"
by auto
ultimately
show
"env'\<lparr>sa:T\<rparr>\<lparr>pa:param(the(T^l))\<rparr>
\<turnstile> ({Suc n \<rightarrow> [x,y]} t'\<^bsup>[Fvar sa, Fvar pa]\<^esup>) : return(the(T^l))"
using
pred_bnd `s \<noteq> p`
sopen_commute_gen[OF lc_Fvar[of sa] lc_Fvar[of pa]
typing_regular''[OF `env' \<turnstile> x : A`]
typing_regular''[OF `env' \<turnstile> y : B`]
not_sym[OF Suc_not_Zero]]
by (auto simp: openz_def)
qed
(* A variation of the Type Renaming lemma above. This is stronger and could be extended to show type renaming, using that a term typed in one environment is typed in any bigger environment *)
(* TODO: refactor to work with typing_induct *)
lemma open_lemma':
shows
"e \<turnstile> t : C
\<Longrightarrow> (\<And>env t' s p x y A B n. s \<notin> FV t' \<union> FV x \<union> FV y
\<Longrightarrow> p \<notin> FV t' \<union> FV x \<union> FV y \<Longrightarrow> s \<noteq> p
\<Longrightarrow> env \<turnstile> x : A \<Longrightarrow> env \<turnstile> y : B
\<Longrightarrow> t = {n \<rightarrow> [Fvar s,Fvar p]} t'
\<Longrightarrow> e = env\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>
\<Longrightarrow> env \<turnstile> {n \<rightarrow> [x,y]} t' : C)"
proof (induct set:typing)
case (T_Var env x T env' t' s p y z A B n)
from sopen_eq_Fvar[OF sym[OF `Fvar x = {n \<rightarrow> [Fvar s,Fvar p]} t'`]]
show ?case
proof (elim disjE conjE)
assume "t' = Fvar x"
with `s \<notin> FV t' \<union> FV y \<union> FV z` `p \<notin> FV t' \<union> FV y \<union> FV z`
obtain "x \<noteq> s" and "x \<noteq> p" by auto
from `x \<in> env_dom env` `env = env'\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>` in_env_smaller2[OF _ this]
have indom: "x \<in> env_dom env'" by simp
from
`ok env` `the env!x = T` `env = env'\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>`
ok_add_2[of env' s A p B] get_env_smaller2[OF this _ _ `s \<noteq> p`]
have "the env'!x = T" by simp
from
`ok env` `env = env'\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>` `t' = Fvar x`
ok_add_2[of env' s A p B] typing.T_Var[OF _ indom this]
show ?case by simp
next
assume "x = s"
with
`ok env` `the env!x = T` `env = env'\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>`
add_get2_1[of env' s A p B]
have "T = A" by simp
moreover assume "t' = Bvar (Self n)"
ultimately show ?thesis using `env' \<turnstile> y : A` by simp
next
assume "x = p"
with
`ok env` `the env!x = T` `env = env'\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>`
add_get2_2[of env' s A p B] have "T = B" by simp
moreover assume "t' = Bvar (Param n)"
ultimately show ?thesis using `env' \<turnstile> z : B` by simp
qed
next
case (T_Upd F env T l t2 t1 env' t' s p x y A B n)
from sopen_eq_Upd[OF sym[OF `Upd t1 l t2 = {n \<rightarrow> [Fvar s,Fvar p]} t'`]]
obtain t1' t2' where
t1': "t1 = {n \<rightarrow> [Fvar s,Fvar p]} t1'" and
t2': "t2 = {Suc n \<rightarrow> [Fvar s,Fvar p]} t2'" and
t': "t' = Upd t1' l t2'" by auto
hence "FV t2' \<subseteq> FV t'" by auto
from
`s \<notin> FV t' \<union> FV x \<union> FV y` `p \<notin> FV t' \<union> FV x \<union> FV y`
t' `finite F` ok_finite[OF typing_regular'[OF `env' \<turnstile> x : A`]]
typing.T_Upd[OF _ bnd_open_lem[OF T_Upd(2)
typing_regular'[OF `env \<turnstile> t1 : T`]
`env = env'\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>`
`s \<notin> FV t' \<union> FV x \<union> FV y`
`p \<notin> FV t' \<union> FV x \<union> FV y` `s \<noteq> p`
`env' \<turnstile> x : A` `env' \<turnstile> y : B` t2' this]
T_Upd(4)[OF _ _ `s \<noteq> p` `env' \<turnstile> x : A` `env' \<turnstile> y : B`
t1' `env = env'\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>`] `l \<in> do T`]
show ?case by simp
next
case (T_Obj env f T F env' t' s p x y A B n)
from sopen_eq_Obj[OF sym[OF `Obj f T = {n \<rightarrow> [Fvar s,Fvar p]} t'`]]
obtain f' where
obj: "Obj f T = {n \<rightarrow> [Fvar s,Fvar p]} Obj f' T" and
t': "t' = Obj f' T" by auto
from
sym[OF this(1)] `dom f = do T`
sym[OF dom_sopenoption_lem[of "Suc n" "Fvar s" "Fvar p" f']]
dom_sopenoption_lem[of "Suc n" x y f']
have dom: "dom (\<lambda>l. sopen_option (Suc n) x y (f' l)) = do T" by simp
from `finite F` ok_finite[OF typing_regular'[OF `env' \<turnstile> x : A`]]
have finF: "finite (F \<union> {s,p} \<union> env_dom env')"
by simp
have
"\<forall>l\<in>do T. \<forall>sa pa. sa \<notin> F \<union> {s,p} \<union> env_dom env'
\<and> pa \<notin> F \<union> {s,p} \<union> env_dom env'
\<and> sa \<noteq> pa
\<longrightarrow> env'\<lparr>sa:T\<rparr>\<lparr>pa:param(the(T^l))\<rparr>
\<turnstile> (the(sopen_option (Suc n) x y (f' l))\<^bsup>[Fvar sa,Fvar pa]\<^esup>) : return(the(T^l))"
proof
fix l assume "l \<in> do T" with T_Obj(4)
have
cof:
"\<forall>sa pa. sa \<notin> F \<and> pa \<notin> F \<and> sa \<noteq> pa
\<longrightarrow> env\<lparr>sa:T\<rparr>\<lparr>pa:param(the(T^l))\<rparr>
\<turnstile> (the(f l)\<^bsup>[Fvar sa,Fvar pa]\<^esup>) : return(the(T^l))
\<and> (\<forall>env'' t'' s' p' x' y' A' B' n'.
s' \<notin> FV t'' \<union> FV x' \<union> FV y' \<longrightarrow> p' \<notin> FV t'' \<union> FV x' \<union> FV y'
\<longrightarrow> s' \<noteq> p' \<longrightarrow> env'' \<turnstile> x' : A' \<longrightarrow> env'' \<turnstile> y' : B'
\<longrightarrow> (the(f l)\<^bsup>[Fvar sa,Fvar pa]\<^esup>) = {n' \<rightarrow> [Fvar s',Fvar p']} t''
\<longrightarrow> env\<lparr>sa:T\<rparr>\<lparr>pa:param(the(T^l))\<rparr> = env''\<lparr>s':A'\<rparr>\<lparr>p':B'\<rparr>
\<longrightarrow> env'' \<turnstile> {n' \<rightarrow> [x',y']} t'' : return(the(T^l)))"
by simp
from
`l \<in> do T` `dom f = do T` `Obj f T = {n \<rightarrow> [Fvar s,Fvar p]} t'` obj t'
dom_sopenoption_lem[of "Suc n" "Fvar s" "Fvar p" f']
have indomf': "l \<in> dom f'" by auto
with obj sopen_option_lem[of f' "Suc n" "Fvar s" "Fvar p"] FV_option_lem[of f'] t'
obtain
"the(f l) = {Suc n \<rightarrow> [Fvar s,Fvar p]} the(f' l)" and
"FV (the(f' l)) \<subseteq> FV t'" by auto
from
bnd_open_lem[OF cof `ok env` `env = env'\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>`
`s \<notin> FV t' \<union> FV x \<union> FV y` `p \<notin> FV t' \<union> FV x \<union> FV y`
`s \<noteq> p` `env' \<turnstile> x : A` `env' \<turnstile> y : B` this]
indomf' sopen_option_lem[of f' "Suc n" x y] T_Obj(4)
show
"\<forall>sa pa. sa \<notin> F \<union> {s,p} \<union> env_dom env'
\<and> pa \<notin> F \<union> {s,p} \<union> env_dom env' \<and> sa \<noteq> pa
\<longrightarrow> env'\<lparr>sa:T\<rparr>\<lparr>pa:param(the(T^l))\<rparr>
\<turnstile> (the(sopen_option (Suc n) x y (f' l))\<^bsup>[Fvar sa,Fvar pa]\<^esup>) : return(the(T^l))"
by simp
qed
from typing.T_Obj[OF typing_regular'[OF `env' \<turnstile> x : A`] dom finF this] t'
show ?case by simp
next
case (T_Call env t1 T t2 l env' t' s p x y A B n)
from sopen_eq_Call[OF sym[OF `Call t1 l t2 = {n \<rightarrow> [Fvar s,Fvar p]} t'`]]
obtain t1' t2' where
t1: "t1 = {n \<rightarrow> [Fvar s,Fvar p]} t1'" and
t2: "t2 = {n \<rightarrow> [Fvar s,Fvar p]} t2'" and
t': "t' = Call t1' l t2'" by auto
{ fix a assume "a \<notin> FV t' \<union> FV x \<union> FV y"
with t' have "a \<notin> FV t1' \<union> FV x \<union> FV y" by simp
}note
t1' = T_Call(2)[OF this[OF `s \<notin> FV t' \<union> FV x \<union> FV y`]
this[OF `p \<notin> FV t' \<union> FV x \<union> FV y`]
`s \<noteq> p` `env' \<turnstile> x : A` `env' \<turnstile> y : B`
t1 `env = env'\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>`]
{ fix a assume "a \<notin> FV t' \<union> FV x \<union> FV y"
with t' have "a \<notin> FV t2' \<union> FV x \<union> FV y" by simp
}
from
typing.T_Call[OF t1' T_Call(4)[OF this[OF `s \<notin> FV t' \<union> FV x \<union> FV y`]
this[OF `p \<notin> FV t' \<union> FV x \<union> FV y`]
`s \<noteq> p` `env' \<turnstile> x : A` `env' \<turnstile> y : B`
t2 `env = env'\<lparr>s:A\<rparr>\<lparr>p:B\<rparr>`]
`l \<in> do T`]
t'
show ?case by simp
qed
lemma open_lemma:
"\<lbrakk> env\<lparr>s:A\<rparr>\<lparr>p:B\<rparr> \<turnstile> {n \<rightarrow> [Fvar s,Fvar p]} t : T;
s \<notin> FV t \<union> FV x \<union> FV y; p \<notin> FV t \<union> FV x \<union> FV y; s \<noteq> p;
env \<turnstile> x : A; env \<turnstile> y : B \<rbrakk>
\<Longrightarrow> env \<turnstile> {n \<rightarrow> [x,y]} t : T"
by (simp add: open_lemma')
subsubsection {* Subject reduction *}
lemma type_dom[simp]: "env \<turnstile> (Obj a A) : A \<Longrightarrow> dom a = do A"
by (erule typing.cases, auto)
{
note
ok_env = typing_regular'[OF `env \<turnstile> Obj f (Object t) : Object t`] and
ok_env_sp = typing_regular'[OF assms(4)]
fix sa pa assume
nin_sa: "sa \<notin> {s,p} \<union> env_dom env" and
nin_pa: "pa \<notin> {s,p} \<union> env_dom env" and "sa \<noteq> pa"
from this(1) ok_add_2[OF ok_env_sp] env_add_dom_2[OF ok_env]
have "sa \<notin> env_dom (env\<lparr>s:Object t\<rparr>\<lparr>p:param(the(t l2))\<rparr>)" by simp
from
nin_sa bigger_env_lemma[OF assms(4) this]
subst_add[of sa p "env\<lparr>s:Object t\<rparr>" "Object t" "param(the(t l2))"]
subst_add[of sa s env "Object t" "Object t"]
have
aT_sa: "env\<lparr>sa:Object t\<rparr>\<lparr>s:Object t\<rparr>\<lparr>p:param(the(t l2))\<rparr>
\<turnstile> (a\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the(t l2))" by simp
from
`sa \<noteq> pa` nin_sa nin_pa env_add_dom[OF ok_env]
ok_add_2[OF ok_env_sp]
obtain
"s \<notin> env_dom (env\<lparr>sa:Object t\<rparr>)" and
"p \<notin> env_dom (env\<lparr>sa:Object t\<rparr>)" and "s \<noteq> p" and
"sa \<notin> env_dom env" and "pa \<notin> env_dom (env\<lparr>sa:Object t\<rparr>)"
by auto
with env_add_dom_2[OF ok_add_ok[OF ok_env this(4)] this(1-3)] nin_pa
have "pa \<notin> env_dom (env\<lparr>sa:Object t\<rparr>\<lparr>s:Object t\<rparr>\<lparr>p:param(the(t l2))\<rparr>)"
by simp
from
nin_pa bigger_env_lemma[OF aT_sa this]
subst_add[of pa p "env\<lparr>sa:Object t\<rparr>\<lparr>s:Object t\<rparr>"
"param(the(t l2))" "param(the(t l2))"]
subst_add[of pa s "env\<lparr>sa:Object t\<rparr>" "param(the(t l2))" "Object t"]
have
aT_sapa:
"env\<lparr>sa:Object t\<rparr>\<lparr>pa:param(the(t l2))\<rparr>\<lparr>s:Object t\<rparr>\<lparr>p:param(the(t l2))\<rparr>
\<turnstile> {0 \<rightarrow> [Fvar s, Fvar p]} a : return(the(t l2))" by (simp add: openz_def)
from nin_sa nin_pa `s \<notin> FV a` `p \<notin> FV a` ok_add_2[OF ok_env_sp]
obtain
ninFV_s: "s \<notin> FV a \<union> FV (Fvar sa) \<union> FV (Fvar pa)" and
ninFV_p: "p \<notin> FV a \<union> FV (Fvar sa) \<union> FV (Fvar pa)" and "s \<noteq> p"
by auto
from ok_add_2[OF typing_regular'[OF aT_sapa]]
have ok_env_sapa: "ok (env\<lparr>sa:Object t\<rparr>\<lparr>pa:param(the(t l2))\<rparr>)"
by simp
with ok_add_reverse[OF this]
have ok_env_pasa: "ok (env\<lparr>pa:param(the(t l2))\<rparr>\<lparr>sa:Object t\<rparr>)"
by simp
from
open_lemma[OF aT_sapa ninFV_s ninFV_p `s \<noteq> p` _
T_Var[OF ok_env_sapa in_add[OF ok_env_sapa]
add_get2_2[OF ok_env_sapa]]]
T_Var[OF ok_env_pasa in_add[OF ok_env_pasa] add_get2_2[OF ok_env_pasa]]
ok_add_reverse[OF ok_env_sapa]
have
"env\<lparr>sa:(Object t)\<rparr>\<lparr>pa:param(the(t l2))\<rparr>
\<turnstile> (a\<^bsup>[Fvar sa,Fvar pa]\<^esup>) : return(the(t l2))"
by (simp add: openz_def)
}note alem = this
(* case split *)
show ?thesis
proof (cases "l1 = l2")
case True with assms obj_inv_elim'[OF assms(1)] show ?thesis
by (simp (no_asm_simp), rule_tac x = "{s,p} \<union> env_dom env" in exI,
auto simp: finF alem)
next
case False
from obj_inv_elim[OF `env \<turnstile> Obj f (Object t) : Object t`]
obtain F where
"finite F" and
"\<forall>l\<in>dom t.
\<forall>s p. s \<notin> F \<and> p \<notin> F \<and> s \<noteq> p
\<longrightarrow> env\<lparr>s:Object t\<rparr>\<lparr>p:param(the(Object t^l))\<rparr>
\<turnstile> (the(f l)\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the(Object t^l))"
by auto
from this(2) `l1 \<in> dom t`
have
"\<forall>s p. s \<notin> F \<and> p \<notin> F \<and> s \<noteq> p
\<longrightarrow> env\<lparr>s:Object t\<rparr>\<lparr>p:param(the(Object t^l1))\<rparr>
\<turnstile> (the(f l1)\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the(Object t^l1))"
by auto
thus ?thesis using `finite F` `l1 \<noteq> l2` by (simp,blast)
qed
qed
text {* Main Lemma *}
(* TODO: refactor to work with typing_induct *)
lemma subject_reduction: "e \<turnstile> t : T \<Longrightarrow> (\<And>t'. t \<rightarrow>\<^sub>\<beta> t' \<Longrightarrow> e \<turnstile> t' : T)"
proof (induct set: typing)
case (T_Var env x T t')
from Fvar_beta[OF `Fvar x \<rightarrow>\<^sub>\<beta> t'`] show ?case by simp
next
case (T_Upd F env T l t2 t1 t')
from Upd_beta[OF `Upd t1 l t2 \<rightarrow>\<^sub>\<beta> t'`] show ?case
proof (elim disjE exE conjE)
fix t1' assume "t1 \<rightarrow>\<^sub>\<beta> t1'" and "t' = Upd t1' l t2"
from
this(2) T_Upd(2)
typing.T_Upd[OF `finite F` _ T_Upd(4)[OF this(1)] `l \<in> do T`]
show ?case by simp
next
fix t2' F'
assume
"finite F'" and
pred_F': "\<forall>s p. s \<notin> F' \<and> p \<notin> F' \<and> s \<noteq> p
\<longrightarrow> (\<exists>t''. t2\<^bsup>[Fvar s,Fvar p]\<^esup> \<rightarrow>\<^sub>\<beta> t'' \<and> t2' = \<sigma>[s,p] t'')" and
t': "t' = Upd t1 l t2'"
have
"\<forall>s p. s \<notin> F \<union> F' \<and> p \<notin> F \<union> F' \<and> s \<noteq> p
\<longrightarrow> env\<lparr>s:T\<rparr>\<lparr>p:param(the(T^l))\<rparr> \<turnstile> (t2'\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the(T^l))"
proof (intro strip, elim conjE)
fix s p assume
nin_s: "s \<notin> F \<union> F'" and
nin_p: "p \<notin> F \<union> F'" and "s \<noteq> p"
with pred_F' obtain t'' where "t2\<^bsup>[Fvar s,Fvar p]\<^esup> \<rightarrow>\<^sub>\<beta> t''" and "t2' = \<sigma>[s,p] t''"
by auto
with beta_lc[OF this(1)] sopen_sclose_eq_t[of t'' 0 s p]
have "t2\<^bsup>[Fvar s,Fvar p]\<^esup> \<rightarrow>\<^sub>\<beta> (t2'\<^bsup>[Fvar s,Fvar p]\<^esup>)"
by (simp add: openz_def closez_def)
with nin_s nin_p `s \<noteq> p` T_Upd(2)
show "env\<lparr>s:T\<rparr>\<lparr>p:param(the(T^l))\<rparr> \<turnstile> (t2'\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the(T^l))"
by auto
qed
from t' `finite F` `finite F'` typing.T_Upd[OF _ this `env \<turnstile> t1 : T` `l \<in> do T`]
show ?case by simp
next
fix f U assume
"l \<in> dom f" and "Obj f U = t1" and
t': "t' = Obj (f(l \<mapsto> t2)) U"
from this(1-2) `env \<turnstile> t1 : T` obj_inv[of env f U T]
obtain t where
objT: "env \<turnstile> Obj f (Object t) : (Object t)" and
"Object t = T" and "T = U"
by (cases T, auto)
from obj_inv_elim[OF objT] `Object t = T` `l \<in> dom f`
have domf': "dom (f(l \<mapsto> t2)) = do T" by auto
have
exF: "\<forall>l'\<in>do T.
(\<exists>F'. finite F'
\<and> (\<forall>s p. s \<notin> F' \<union> (F \<union> FV t2) \<and> p \<notin> F' \<union> (F \<union> FV t2) \<and> s \<noteq> p
\<longrightarrow> env\<lparr>s:T\<rparr>\<lparr>p:param(the(T^l'))\<rparr>
\<turnstile> (the ((f(l \<mapsto> t2)) l')\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the(T^l'))))"
proof
fix l' assume "l' \<in> do T"
with dom_lem[OF objT] `l \<in> dom f` `Object t = T`
obtain ll': "l' \<in> dom t" and "l \<in> dom t" by auto
from `finite F` have "finite (F \<union> FV t2)" by simp
from exFresh_s_p_cof[OF this]
obtain s p where
nin_s: "s \<notin> F \<union> FV t2" and
nin_p: "p \<notin> F \<union> FV t2" and "s \<noteq> p"
by auto
with T_Upd(2) `Object t = T`
have
"env\<lparr>s:Object t\<rparr>\<lparr>p:param(the(t l))\<rparr>
\<turnstile> (t2\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the(t l))"
by auto
from
select_preserve_type[OF objT _ _ this ll'] sym[OF `Object t = T`]
nin_s nin_p `l \<in> dom t`
obtain F' where
"finite F'" and
"\<forall>s p. s \<notin> F' \<and> p \<notin> F' \<and> s \<noteq> p
\<longrightarrow> env\<lparr>s:T\<rparr>\<lparr>p:param(the(T^l'))\<rparr>
\<turnstile> (the ((f(l \<mapsto> t2)) l')\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the(T^l'))"
by auto
thus
"\<exists>F'. finite F'
\<and> (\<forall>s p. s \<notin> F' \<union> (F \<union> FV t2) \<and> p \<notin> F' \<union> (F \<union> FV t2) \<and> s \<noteq> p
\<longrightarrow> env\<lparr>s:T\<rparr>\<lparr>p:param(the(T^l'))\<rparr>
\<turnstile> (the ((f(l \<mapsto> t2)) l')\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the(T^l')))"
by blast
qed
{ fix Ta from finite_dom_fmap have "finite (do Ta)" by (cases Ta, auto) }
note fin_doT = this ball_ex_finite[of "do T" "F \<union> FV t2"]
from this(2)[OF this(1)[of T] _ exF] `finite F`
obtain F' where
"finite F'" and
"\<forall>l'\<in>do T. \<forall>s p. s \<notin> F' \<union> (F \<union> FV t2) \<and> p \<notin> F' \<union> (F \<union> FV t2) \<and> s \<noteq> p
\<longrightarrow> env\<lparr>s:T\<rparr>\<lparr>p:param(the(T^l'))\<rparr>
\<turnstile> (the ((f(l \<mapsto> t2)) l')\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the(T^l'))"
by auto
moreover
from `finite F'` `finite F` have "finite (F' \<union> (F \<union> FV t2))" by simp
note typing.T_Obj[OF typing_regular'[OF `env \<turnstile> t1 : T`] domf' this]
ultimately show ?case using t' `T = U` by auto
qed
next
case (T_Obj env f T F t')
from Obj_beta[OF `Obj f T \<rightarrow>\<^sub>\<beta> t'`] show ?case
proof (elim exE conjE)
fix l f' a a' F' assume
"dom f = dom f'" and "f = f'(l \<mapsto> a)" and "l \<in> dom f'" and
t': "t' = Obj (f'(l \<mapsto> a')) T" and "finite F'" and
red_sp: "\<forall>s p. s \<notin> F' \<and> p \<notin> F' \<and> s \<noteq> p
\<longrightarrow> (\<exists>t''. a\<^bsup>[Fvar s, Fvar p]\<^esup> \<rightarrow>\<^sub>\<beta> t'' \<and> a' = \<sigma>[s,p] t'')"
from this(2) `dom f = do T` have domf': "dom (f'(l \<mapsto> a')) = do T" by auto
have
exF: "\<forall>l'\<in>do T. \<forall>s p. s \<notin> F \<union> F' \<and> p \<notin> F \<union> F' \<and> s \<noteq> p
\<longrightarrow> env\<lparr>s:T\<rparr>\<lparr>p:param(the(T^l'))\<rparr>
\<turnstile> (the ((f'(l \<mapsto> a')) l')\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the(T^l'))"
proof (intro strip, elim conjE)
fix l' s p assume
"l' \<in> do T" and
nin_s: "s \<notin> F \<union> F'" and
nin_p: "p \<notin> F \<union> F'" and "s \<noteq> p"
with red_sp obtain t'' where "a\<^bsup>[Fvar s,Fvar p]\<^esup> \<rightarrow>\<^sub>\<beta> t''" and "a' = \<sigma>[s,p] t''"
by auto
with
beta_lc[OF this(1)] sopen_sclose_eq_t[of t'' 0 s p]
`f = f'(l \<mapsto> a)`
have "the (f l)\<^bsup>[Fvar s,Fvar p]\<^esup> \<rightarrow>\<^sub>\<beta> (the((f'(l \<mapsto> a')) l)\<^bsup>[Fvar s,Fvar p]\<^esup>)"
by (simp add: openz_def closez_def)
with T_Obj(4) nin_s nin_p `s \<noteq> p` `l' \<in> do T` `f = f'(l \<mapsto> a)`
show
"env\<lparr>s:T\<rparr>\<lparr>p:param(the(T^l'))\<rparr>
\<turnstile> (the((f'(l \<mapsto> a')) l')\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the(T^l'))"
by auto
qed
from typing.T_Obj[OF `ok env` domf' _ this] `finite F` `finite F'` t'
show ?case by (simp (no_asm_simp))
qed
next
case (T_Call env t1 T t2 l t')
from Call_beta[OF `Call t1 l t2 \<rightarrow>\<^sub>\<beta> t'`] show ?case
proof (elim disjE conjE exE)
fix t1' assume "t1 \<rightarrow>\<^sub>\<beta> t1'" and "t' = Call t1' l t2"
from
typing.T_Call[OF T_Call(2)[OF this(1)]
`env \<turnstile> t2 : param(the(T^l))` `l \<in> do T`]
this(2)
show ?case by simp
next
fix t2' assume "t2 \<rightarrow>\<^sub>\<beta> t2'" and "t' = Call t1 l t2'"
from
typing.T_Call[OF `env \<turnstile> t1 : T` T_Call(4)[OF this(1)] `l \<in> do T`]
this(2)
show ?case by simp
next
fix f U assume "Obj f U = t1" and "l \<in> dom f" and t': "t' = (the(f l)\<^bsup>[Obj f U,t2]\<^esup>)"
from
typing.T_Call[OF `env \<turnstile> t1 : T` `env \<turnstile> t2 : param(the(T^l))` `l \<in> do T`]
sym[OF this(1)] `env \<turnstile> t1 : T` `env \<turnstile> t2 : param(the(T^l))`
obj_inv[of env f U T]
obtain
objT: "env \<turnstile> (Obj f T) : T" and "T = U" and
callT: "env \<turnstile> Call (Obj f T) l t2 : return(the(T^l))"
by auto
have
"(\<exists>F. finite F
\<and> (\<forall>s p. s \<notin> F \<and> p \<notin> F \<and> s \<noteq> p
\<longrightarrow> env\<lparr>s:T\<rparr>\<lparr>p:param(the(T^l))\<rparr>
\<turnstile> (the(f l)\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the(T^l)))
\<Longrightarrow> env \<turnstile> (the (f l)\<^bsup>[Obj f T,t2]\<^esup>) : return (the(T^l)))"
proof (elim exE conjE)
fix F
assume
"finite F" and
pred_F:
"\<forall>s p. s \<notin> F \<and> p \<notin> F \<and> s \<noteq> p
\<longrightarrow> env\<lparr>s:T\<rparr>\<lparr>p:param(the(T^l))\<rparr>
\<turnstile> (the(f l)\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the(T^l))"
from this(1) finite_FV[of "Obj f T"]
have "finite (F \<union> FV (Obj f T) \<union> FV t2)" by simp
from exFresh_s_p_cof[OF this]
obtain s p where
nin_s: "s \<notin> F \<union> FV (Obj f T) \<union> FV t2" and
nin_p: "p \<notin> F \<union> FV (Obj f T) \<union> FV t2" and "s \<noteq> p"
by auto
with pred_F
have
type_opened: "env\<lparr>s:T\<rparr>\<lparr>p:param(the(T^l))\<rparr>
\<turnstile> {0 \<rightarrow> [Fvar s,Fvar p]} the(f l) : return(the(T^l))"
by (auto simp: openz_def)
from nin_s nin_p FV_option_lem[of f] objT `l \<in> do T`
obtain
"s \<notin> FV (the(f l)) \<union> FV (Obj f T) \<union> FV t2" and
"p \<notin> FV (the(f l)) \<union> FV (Obj f T) \<union> FV t2" by auto
from
open_lemma[OF type_opened this `s \<noteq> p`
objT `env \<turnstile> t2 : param(the(T^l))`]
show ?thesis by (simp add: openz_def)
qed
with abs_typeE[OF callT] t' `T = U` show ?case by auto
qed
qed
theorem subject_reduction': "t \<rightarrow>\<^sub>\<beta>\<^sup>* t' \<Longrightarrow> e \<turnstile> t : T \<Longrightarrow> e \<turnstile> t' : T"
by (induct set: rtranclp) (iprover intro: subject_reduction)+
lemma type_members_equal:
fixes A :: type and B :: type
assumes "do A = do B" and "\<forall>i. (A^i) = (B^i)"
shows "A = B"
proof (cases A)
case (Object ta) thus ?thesis
proof (cases B)
case (Object tb)
from `\<forall>i. (A^i) = (B^i)` `A = Object ta` `B = Object tb`
have "\<And>i. ta i = tb i" by auto
with `A = Object ta` `B = Object tb` show ?thesis by (simp add: ext)
qed
qed
lemma not_var: "Env empty \<turnstile> a : A \<Longrightarrow> \<forall>x. a \<noteq> Fvar x"
by (rule allI, case_tac x, auto)
lemma Call_label_range: "(Env empty) \<turnstile> Call (Obj c T) l b : A \<Longrightarrow> l \<in> dom c"
by (erule typing_elims, erule typing.cases, simp_all)
lemma Call_subterm_type: "Env empty \<turnstile> Call t l b: T
\<Longrightarrow> (\<exists>T'. Env empty \<turnstile> t : T') \<and> (\<exists>T'. Env empty \<turnstile> b : T')"
by (erule typing.cases) auto
lemma Upd_label_range: "Env empty \<turnstile> Upd (Obj c T) l x : A \<Longrightarrow> l \<in> dom c"
by (erule typing_elims, erule typing.cases, simp_all)
lemma Upd_subterm_type:
"Env empty \<turnstile> Upd t l x : T \<Longrightarrow> \<exists>T'. Env empty \<turnstile> t : T'"
by (erule typing.cases) auto
lemma no_var: "\<exists>T. Env empty \<turnstile> Fvar x : T \<Longrightarrow> False"
by (case_tac x, auto)
lemma no_bvar: "e \<turnstile> Bvar x : T \<Longrightarrow> False"
by (erule typing.cases, auto)
subsubsection{*Unique Type*}
theorem type_unique[rule_format]:
assumes "env \<turnstile> a: T"
shows "\<forall>T'. env \<turnstile> a: T' \<longrightarrow> T = T'"
using assms
proof (induct rule: typing.induct)
case T_Var thus ?case by (auto simp: add_get_eq)
next
case T_Obj show ?case by (auto simp: sym[OF obj_inv])
next
case T_Call from this(2) show ?case by auto
next
case T_Upd from this(4) show ?case by auto
qed
subsubsection{*Progress*}
text {* Final Type Soundness Lemma *}
theorem progress:
assumes "Env empty \<turnstile> t : A" and "\<not>(\<exists>c A. t = Obj c A)"
shows "\<exists>b. t \<rightarrow>\<^sub>\<beta> b"
proof -
fix f
have
"(\<forall>A. Env empty \<turnstile> t : A \<longrightarrow> \<not>(\<exists>c T. t = Obj c T) \<longrightarrow> (\<exists>b. t \<rightarrow>\<^sub>\<beta> b))
&(\<forall>A. Env empty \<turnstile> Obj f A : A \<longrightarrow> \<not>(\<exists>c T. Obj f A = Obj c T)
\<longrightarrow> (\<exists>b. Obj f A \<rightarrow>\<^sub>\<beta> b))"
proof (induct rule: sterm_induct)
case (Bvar b) with no_bvar[of "Env empty" b] show ?case
by auto (* contradiction *)
next
case (Fvar x) with Fvar_beta[of x] show ?case
by auto (* contradiction *)
next
case Obj show ?case by auto (* contradiction *)
next
case empty thus ?case by auto (* contradiction *)
next
case insert show ?case by auto (* contradiction *)
next
case (Call t1 l t2) show ?case
proof (clarify)
fix T assume
"Env empty \<turnstile> t1 : T" and "Env empty \<turnstile> t2 : param(the(T^l))" and "l \<in> do T"
note lc = typing_regular''[OF this(1)] typing_regular''[OF this(2)]
from
`Env empty \<turnstile> t1 : T`
`\<forall>A. Env empty \<turnstile> t1 : A \<longrightarrow> \<not> (\<exists>c T. t1 = Obj c T) \<longrightarrow> (\<exists>b. t1 \<rightarrow>\<^sub>\<beta> b)`
have "(\<exists>c B. t1 = Obj c B) \<or> (\<exists>b. t1 \<rightarrow>\<^sub>\<beta> b)" by auto
thus "\<exists>b. Call t1 l t2 \<rightarrow>\<^sub>\<beta> b"
proof (elim disjE exE)
fix c B assume "t1 = Obj c B"
with
`Env empty \<turnstile> t1 : T` obj_inv[of "Env empty" c B T]
`l \<in> do T` obj_inv_elim[of "Env empty" c B]
have "l \<in> dom c" by auto
with `t1 = Obj c B` lc beta.beta[of l c B t2]
show ?thesis by auto
next
fix b assume "t1 \<rightarrow>\<^sub>\<beta> b"
from beta.beta_CallL[OF this lc(2)] show ?thesis by auto
qed
qed
next
case (Upd t1 l t2) show ?case
proof (clarify)
fix T F
assume
"finite F" and
"\<forall>s p. s \<notin> F \<and> p \<notin> F \<and> s \<noteq> p
\<longrightarrow> Env empty\<lparr>s:T\<rparr>\<lparr>p:param(the(T^l))\<rparr>
\<turnstile> (t2\<^bsup>[Fvar s,Fvar p]\<^esup>) : return(the(T^l))" and
"Env empty \<turnstile> t1 : T" and
"l \<in> do T"
from typing_regular''[OF T_Upd[OF this]] lc_upd[of t1 l t2]
obtain "lc t1" and "body t2" by auto
from
`Env empty \<turnstile> t1 : T`
`\<forall>A. Env empty \<turnstile> t1 : A \<longrightarrow> \<not> (\<exists>c T. t1 = Obj c T) \<longrightarrow> (\<exists>b. t1 \<rightarrow>\<^sub>\<beta> b)`
have "(\<exists>c B. t1 = Obj c B) \<or> (\<exists>b. t1 \<rightarrow>\<^sub>\<beta> b)" by auto
thus "\<exists>b. Upd t1 l t2 \<rightarrow>\<^sub>\<beta> b"
proof (elim disjE exE)
fix c B assume "t1 = Obj c B"
with
`Env empty \<turnstile> t1 : T` obj_inv[of "Env empty" c B T]
`l \<in> do T` obj_inv_elim[of "Env empty" c B]
have "l \<in> dom c" by auto
with `t1 = Obj c B` `lc t1` `body t2` beta.beta_Upd[of l c B t2]
show ?thesis by auto
next
fix b assume "t1 \<rightarrow>\<^sub>\<beta> b"
from beta.beta_UpdL[OF this `body t2`] show ?thesis by auto
qed
qed
qed
with assms show ?thesis by auto
qed
end
|
{"author": "Josh-Tilles", "repo": "AFP", "sha": "f4bf1d502bde2a3469d482b62c531f1c3af3e881", "save_path": "github-repos/isabelle/Josh-Tilles-AFP", "path": "github-repos/isabelle/Josh-Tilles-AFP/AFP-f4bf1d502bde2a3469d482b62c531f1c3af3e881/thys/Locally-Nameless-Sigma/Sigma/TypedSigma.thy"}
|
import numpy as np
import pandas as pd
import torch
from torch.optim import Adam
from torch import nn
import pytorch_lightning as pl
import torch.nn.functional as F
import spacy
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
from transformers import PretrainedConfig, PreTrainedModel
from kogito.core.processors.models.utils import Evaluator, text_to_embedding
class SWEMHeadDataset(Dataset):
def __init__(
self,
data,
vocab,
embedding_matrix=None,
apply_pooling=False,
pooling="avg",
lang=None,
):
texts = data["text"] if isinstance(data, pd.DataFrame) else data
labels = data["label"] if isinstance(data, pd.DataFrame) else None
if not lang:
lang = spacy.load("en_core_web_sm")
self.texts = []
if apply_pooling:
# Apply pooling directly without padding
self.labels = []
self.features = []
for index, text in enumerate(texts):
embedding = text_to_embedding(
text, vocab=vocab, embedding_matrix=embedding_matrix, lang=lang
)
if embedding is not None:
self.features.append(embedding)
if labels is not None:
self.labels.append(labels[index])
self.texts.append(text)
self.labels = np.asarray(self.labels)
else:
# Pad sequences
self.texts = texts
self.labels = np.asarray(labels.to_list()) if labels is not None else None
self.features = pad_sequence(
[
torch.tensor(
[vocab.get(token.text, 1) for token in lang(text)],
dtype=torch.int,
)
for text in texts
],
batch_first=True,
)
def __len__(self):
return len(self.features)
def __getitem__(self, idx):
if self.labels is not None:
return self.features[idx], self.labels[idx]
return self.features[idx]
class MaxPool(nn.Module):
def forward(self, X):
values, _ = torch.max(X, dim=1)
return values
class AvgPool(nn.Module):
def forward(self, X):
return torch.mean(X, dim=1)
class SWEMConfig(PretrainedConfig):
def __init__(
self,
num_classes=3,
pooling="avg",
freeze_emb=False,
learning_rate=1e-4,
num_embeddings=400002,
embedding_dim=100,
**kwargs
):
self.num_classes = num_classes
self.pooling = pooling
self.freeze_emb = freeze_emb
self.learning_rate = learning_rate
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
super().__init__(**kwargs)
class SWEMClassifier(PreTrainedModel, Evaluator, pl.LightningModule):
config_class = SWEMConfig
def __init__(self, config: SWEMConfig):
super().__init__(config)
try:
embedding_matrix = np.load(
"data/embedding_matrix_glove_100d.npy", allow_pickle=True
)
self.embedding = nn.Embedding(
num_embeddings=embedding_matrix.shape[0],
embedding_dim=embedding_matrix.shape[1],
).from_pretrained(
torch.tensor(embedding_matrix, dtype=torch.float32),
freeze=config.freeze_emb,
)
except FileNotFoundError:
self.embedding = nn.Embedding(
num_embeddings=config.num_embeddings,
embedding_dim=config.embedding_dim,
)
self.pool = MaxPool() if config.pooling == "max" else AvgPool()
self.linear = nn.Linear(self.embedding.embedding_dim, config.num_classes)
self.model = nn.Sequential(self.embedding, self.pool, self.linear)
self.criterion = nn.BCEWithLogitsLoss()
self.learning_rate = config.learning_rate
self.save_hyperparameters(config.to_dict(), ignore="config")
def forward(self, X):
outputs = self.model(X)
probs = F.sigmoid(outputs)
return probs
def training_step(self, batch, batch_idx):
X, y = batch
outputs = self.model(X)
train_loss = self.criterion(outputs, y.float())
preds = self.forward(X)
self.log("train_loss", train_loss, on_epoch=True)
self.log_metrics(preds, y, type="train")
return train_loss
def validation_step(self, batch, batch_idx):
X, y = batch
outputs = self.model(X)
val_loss = self.criterion(outputs, y.float())
preds = self.forward(X)
self.log("val_loss", val_loss, on_epoch=True)
self.log_metrics(preds, y, type="val")
return val_loss
def test_step(self, batch, batch_idx):
X, y = batch
outputs = self.model(X)
test_loss = self.criterion(outputs, y.float())
preds = self.forward(X)
self.log("test_loss", test_loss, on_epoch=True)
self.log_metrics(preds, y, type="test")
return test_loss
def configure_optimizers(self):
optimizer = Adam(self.parameters(), lr=self.learning_rate)
return optimizer
|
{"hexsha": "f38f484ebb7e61c82206bbc009fdd3d28a113906", "size": 5340, "ext": "py", "lang": "Python", "max_stars_repo_path": "kogito/core/processors/models/swem.py", "max_stars_repo_name": "mismayil/kogito", "max_stars_repo_head_hexsha": "e62b010d6787ddae0035ed2bc596619ec31fd6b9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-18T13:07:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T13:07:14.000Z", "max_issues_repo_path": "kogito/core/processors/models/swem.py", "max_issues_repo_name": "mismayil/kogito", "max_issues_repo_head_hexsha": "e62b010d6787ddae0035ed2bc596619ec31fd6b9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kogito/core/processors/models/swem.py", "max_forks_repo_name": "mismayil/kogito", "max_forks_repo_head_hexsha": "e62b010d6787ddae0035ed2bc596619ec31fd6b9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5976331361, "max_line_length": 86, "alphanum_fraction": 0.5934456929, "include": true, "reason": "import numpy", "num_tokens": 1110}
|
# get nse daily bhav
# https://www1.nseindia.com/content/historical/EQUITIES/2020/JUN/cm12JUN2020bhav.csv.zip
from datetime import datetime, timedelta
from time import sleep
from typing import Optional
import requests
import os
from pathlib import Path
from fake_useragent import UserAgent
from numpy import random
from requests.adapters import HTTPAdapter
from requests.packages.urllib3 import Retry
from multiprocessing.pool import ThreadPool
from nse_daily.common import _get_exception, _errorify
class NSEDaily(object):
"""
NSE Daily
"""
def __init__(self,
default_date_pattern: Optional[str] = '%Y%m%d',
file_pattern: Optional[str] = "cm{date_part}bhav.csv.zip",
file_date_part_format: Optional[str] = '%d%b%Y',
uri_pattern: Optional[
str] = "https://www1.nseindia.com/content/historical/EQUITIES/{yyyy_mon}/{file_name}",
uri_yy_mm_format: Optional[str] = '%Y/%b',
download_path: Optional[str] = None
):
"""
NSE Daily bhav copy can be downloaded from the following
URI: https://www1.nseindia.com/content/historical/EQUITIES/2020/JUN/cm12JUN2020bhav.csv.zip
Parameters are set by default to match the above URI.
If the URI Changes, please change the parameters below, to avoid breaking of code.
:param default_date_pattern: The default input date pattern to be used for parsing dates passed to the functions
:param file_pattern: The pattern of the BSE Daily bhav file, i.e. for cm12JUN2020bhav.csv.zip pass in cm{date_part}bhav.csv.zip
:param file_date_part_format: The date format of the date part in the NSE Daily bhav file pattern i.e. for 130122 pass in %d%m%y
:param uri_pattern: The uri from where the BSE Daily bhav copy needs to be downloaded, default=https://www1.nseindia.com/content/historical/EQUITIES/{yyyy_mon}/{file_name}
:param uri_yy_mm_format: The date part format in the uri , i.e. for 2020/JUN the default has been set to '%Y/%b'
:param download_path: The local filesystem path where the NSE Daily bhav copy will be downloaded
"""
self.default_date_pattern = default_date_pattern
self.nse_file_pattern = file_pattern
self.nse_file_date_part_format = file_date_part_format
self.nse_uri_pattern = uri_pattern
self.nse_uri_yy_mm_format = uri_yy_mm_format
self.download_path = download_path
if self.download_path is None or str(self.download_path).strip() == '':
appdir = str(Path.cwd())
self.download_path = os.path.join(appdir, 'downloads')
self._create_session()
def _create_session(self):
self.session = requests.Session()
retry = Retry(total=5,
read=5,
connect=5,
status_forcelist=(500, 502, 504),
method_whitelist=frozenset(['GET', 'POST']),
backoff_factor=1
)
adapter = HTTPAdapter(max_retries=retry)
self.session.mount('http://', adapter)
self.session.mount('https://', adapter)
ua = UserAgent()
self.session.headers.update({"User-Agent": str(ua.chrome)})
def _check_reponse(self, response: requests.Response):
try:
response.raise_for_status()
return True
except requests.exceptions.HTTPError:
print('HTTP Error %s', response.reason)
print(response.text)
return False
def _download_by_date(self, file_date: datetime):
"""
Internal function to request the download for a single date. Function has a random uniform distribution sleep
time between 1 to 3 seconds to avoid getting blocked during multiple concurrent requests. Function also
checks and skips the download if date is a weekend.
:param file_date: The date for which the download is being requested
:return: (file_date, download_file_path)
"""
################################################################################
# Adding random sleep time to avoid being blocked for multiple requests
sleep(random.uniform(1, 3))
#########################################################################
daynum = file_date.weekday()
if daynum >= 5:
print("{} is weekend, file skipped".format(file_date.strftime(self.default_date_pattern)))
return file_date, None
file_date_str = file_date.strftime(self.nse_file_date_part_format).upper()
nse_yy_mm = file_date.strftime(self.nse_uri_yy_mm_format).upper()
nse_file_name = self.nse_file_pattern.format(date_part=file_date_str)
nse_uri = self.nse_uri_pattern.format(yyyy_mon=nse_yy_mm, file_name=nse_file_name)
nse_download_file_path = os.path.join(self.download_path, nse_file_name)
if not os.path.exists(self.download_path):
os.makedirs(self.download_path)
print(nse_uri)
response = self.session.request(method='GET', url=nse_uri, allow_redirects=True)
# r = requests.get(nse_uri, allow_redirects=True)
status = self._check_reponse(response)
if not status:
return file_date, None
content_type = response.headers.get('content-type')
if content_type in ['application/zip', 'application/x-zip-compressed', 'application/x-7z-compressed',
'text/csv', 'application/gzip', 'application/x-tar', 'text/plain']:
with open(nse_download_file_path, 'wb') as nse_file:
nse_file.write(response.content)
print("{} download complete".format(nse_file_name))
else:
e = _errorify("INVALID_CONTENT_TYPE", f"content-type {content_type} being returned is not supported..")
raise Exception(e)
return file_date, nse_download_file_path
def download_by_date(self, date_str, date_format: Optional[str] = '%Y%m%d'):
"""
Function to download the NSE Daily bhav copy for a date
:param date_str: Input date string i.e. '20210105' for 5th Jab 2021
:param date_format: The date format of the input date string, default = '%Y%m%d'
:return:
"""
try:
file_date = datetime.strptime(date_str, date_format)
return self._download_by_date(file_date)
except:
e = _get_exception()
raise Exception(e)
def download_by_date_range(self, date_start: str, date_end: str, date_format: Optional[str] = '%Y%m%d',
num_workers: Optional[int] = 1):
"""
Function to download multiple Daily NSE bhav copies for a date range
:param date_start: The start date of the date range
:param date_end: The end date of the date range
:param date_format: The format of the input dates, default='%Y%m%d'
:param num_workers: The number of workers to be utilized to get the files, default=1
:return:
"""
try:
start = datetime.strptime(date_start, date_format)
end = datetime.strptime(date_end, date_format)
l_dates = [start + timedelta(days=x) for x in range(0, (end - start).days)]
tpool = ThreadPool(processes=num_workers)
l_e = tpool.map(self._download_by_date, l_dates)
return l_e
except:
e = _get_exception()
raise Exception(e)
|
{"hexsha": "cf6f4c98cca347f1d1344ebcdea6cf7f98d46478", "size": 7609, "ext": "py", "lang": "Python", "max_stars_repo_path": "nse_daily/nse/__init__.py", "max_stars_repo_name": "v33rh0ra/get_nse_daily", "max_stars_repo_head_hexsha": "c20362c149766116e52d85987f27c3d988af4965", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nse_daily/nse/__init__.py", "max_issues_repo_name": "v33rh0ra/get_nse_daily", "max_issues_repo_head_hexsha": "c20362c149766116e52d85987f27c3d988af4965", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nse_daily/nse/__init__.py", "max_forks_repo_name": "v33rh0ra/get_nse_daily", "max_forks_repo_head_hexsha": "c20362c149766116e52d85987f27c3d988af4965", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.0592105263, "max_line_length": 179, "alphanum_fraction": 0.6346431857, "include": true, "reason": "from numpy", "num_tokens": 1685}
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
# Copyright (c) 2013-2014, 2017 ARM Limited
# All rights reserved
# Authors: Matteo Andreozzi
# Riken Gohil
#
# This script is used to parse m3i ASCII traces containing AXI transactions
# and profile them by using scipy distribution fitting capabilities
from numpy import array, median, arange, multiply
from scipy.stats import norm, rayleigh, pareto, expon
from os.path import basename
import re
class Analyzer(object):
"""
M3I file analyzer class
"""
def init_stats(self):
self.num_transactions = 0
self.num_uncachables = 0
self.cumulative_cycles = 0
self.total_data = 0
self.master_name = ''
def __init__(self, fig, bus=4):
# Width of the data bus in bytes (typical configuration of a CCI port for
# CPU/GPU master)
self.type = "READ"
self.figure = fig
self.bus_width = bus
self.max_samples = 10000
# bandwidth sampling window
self.bw_window = 10
# histograms switch
self.show_histograms = False
# bandwidth only mode switch
self.show_bw_only = False
# data only mode switch
self.show_data_only = False
# stats fitting switch
self.stats_fitting = False
# burst mode switch
self.show_burst_mode = False
self.times = {}
self.cycles = {}
self.addresses = {}
self.sizes = {}
self.init_stats()
self.sizeDict = dict(byte=1, size8=1, size16=2,
size32=4, size64=8, size128=16,
size256=32, size512=64, size1024=128,
half=1, word=2, dword=4)
@staticmethod
def fit(ax, func, bins, data, color):
"""
TODO
:param ax:
:param func:
:param bins:
:param data:
:param color:
"""
params = func.fit(data)
y = func.pdf(bins, *params)
ax.plot(bins, y, color + '--', linewidth=2,
label='{0} fit '.format(func.name) + ' '.join([str(p) for p in params]))
def histogram(self, functions, data, title, s1, s2, s3):
"""
TODO
:param s3:
:param s2:
:param s1:
:param functions:
:param data:
:param title:
"""
ax = self.figure.add_subplot(s1, s2, s3)
# the histogram of the data
n, bins, patches = ax.hist(data, normed=True, alpha=0.5, facecolor='g', label='')
for c, f in functions.iteritems():
self.fit(ax, f, bins, data, c)
# plot
ax.set_title(title)
ax.legend(loc='best', frameon=False)
def yint(self, plt):
# make the y ticks integers, not floats
yi = []
locs = plt.get_yticks()
for each in locs:
yi.append(int(each))
plt.set_yticks(yi)
plt.set_yticklabels(yi)
def sequence(self, prev_ax, x, y, title, s1, s2, s3, file_type):
"""
TODO
:param s2:
:param s3:
:param s1:
:param prev_ax:
:param x:
:param y:
:param title:
"""
ax = self.figure.add_subplot(s1, s2, s3, sharex=prev_ax)
ax.scatter(x, y, picker=True, label='')
ax.legend(loc='best', frameon=False)
ax.autoscale_view(True, True, True)
ax.autoscale(enable=True, axis='x', tight=True)
if ( file_type == 'm3i' ):
ax.set_xlabel('Cycles')
elif ( file_type == 'trace' ):
ax.set_xlabel('Seconds')
ax.set_title(title)
if "Sizes" in title:
ax.set_ylabel('Bytes')
elif "Addresses" in title:
ax.set_ylabel('Addresses')
self.yint(ax)
return ax
def median_sample(self, arr):
"""
:param arr:
:return:
"""
x = array(arr)
# compute how many elements we need to compute the median over
n = (len(arr) / self.max_samples)
# elements must be odd in order to get an integer median
n = n + 1 if not n % 2 else n
# last element of the array to take into account
end = n * int(len(x) / n)
# compute the median over every n-dimensional portion of the original array
return list(median(x[:end].reshape(-1, n), 1))
def sample(self, arr):
"""
:param arr:
:return:
"""
x = array(arr)
# compute how many elements we need to extract
n = (len(arr) / self.max_samples)
l = list(x.take(arange(0, len(arr), n)))
return l
def bandwidth(self, prev_ax, x, y, title, s1, s2, s3, file_type):
"""
:param s3:
:param s2:
:param s1:
:param prev_ax:
:param x:
:param y:
:param title:
"""
ax = self.figure.add_subplot(s1, s2, s3, sharex=prev_ax)
mean_y = []
prev_i = 0
# limit bw_window to number of samples
self.bw_window = min(self.bw_window, len(x)-1)
for i in xrange(self.bw_window, len(x), self.bw_window):
for j in xrange(self.bw_window):
mean_y.append(sum(y[prev_i:i]) / ((x[i] - x[prev_i]) or 1))
prev_i = i
# extend mean_y to match x axis length
for k in xrange(len(mean_y), len(x)):
mean_y.append(mean_y[-1])
ax.plot(x, mean_y, label='')
# ax.set_title(r'{0}'.format(title))
ax.legend(loc='best', frameon=False)
if ( file_type == 'm3i' ):
ax.set_xlabel('Cycles')
elif ( file_type == 'trace' ):
ax.set_xlabel('Seconds')
ax.set_title(title)
ax.set_ylabel('Bytes')
ax.set_title(title)
ax.autoscale_view(True, True, True)
ax.autoscale(enable=True, axis='x', tight=True)
return ax
def parse_m3i(self, file_name):
"""
Parses an m3i file and extracts data
:param file_name: the m3i file name
"""
global ascii_in
try:
ascii_in = open(file_name, 'r')
except IOError:
print "Failed to open ", file_name, " for reading"
exit(-1)
# reset all statistics
self.init_stats()
# init samples
for t in ('READ','WRITE'):
self.addresses[t] = []
self.sizes[t] = []
self.cycles[t] = []
# For each line in the m3i trace, parse addresses and sizes
for line in ascii_in:
fields = line.split()
transaction_type = 'READ' if fields[0] == 'AR' else 'WRITE'
# Skips the line if it's not AR or AW
if (not (fields[0] == 'AR' or fields[0] == 'AW')):
continue
# Convert hex address in m3i to dec
address = long(fields[1], 16)
# Sets defaults values for options
length = 1
data_size = self.bus_width
burst_type = 'incr'
transaction_timing = 1
for field in fields[2:]:
# Searches for size field, which is the amount of data transfered
# in a single transfer
if field in self.sizeDict:
data_size = self.sizeDict[field]
# Search for field with prefix L which is the no. of transfers
# on the read data or write data channel
elif 'L' in field:
# Packet size in bytes for L transfers
length = long(field.strip('L'))
# Search for field with prefix V that holds Valid timing wrt last
# transaction (a delta in cycles). Make it absolute (in cycles).
elif 'V' in field:
transaction_timing = long(field.strip('V'))
self.cumulative_cycles += transaction_timing
# Search for field with prefix C to account for uncachables
elif 'C' in field:
cache_attr = field.strip('C')
if not cache_attr[-1] == '1':
self.num_uncachables += 1
# Searches the field for the burst mode
elif 'fixed' in field:
burst_type = field
elif 'incr' in field:
burst_type = field
elif 'wrap' in field:
burst_type= field
# Only plots the burst data points if the option is selected
if not self.show_burst_mode:
self.sizes[transaction_type].append(data_size * length)
self.addresses[transaction_type].append(address)
self.cycles[transaction_type].append(self.cumulative_cycles)
else:
if 'fixed' in burst_type:
for i in xrange(length):
self.addresses[transaction_type].append(address)
self.sizes[transaction_type].append(data_size)
self.cycles[transaction_type].append(self.cumulative_cycles)
elif 'incr' in burst_type:
for i in xrange(length):
self.addresses[transaction_type].append(address)
self.sizes[transaction_type].append(data_size)
self.cycles[transaction_type].append(self.cumulative_cycles)
address += (data_size * 8)
# Needs to be changed to wrapping around addresses
# once a limit has been reached (limit not currently detected)
elif 'wrap' in burst_type:
for i in xrange(length):
self.addresses[transaction_type].append(address)
self.sizes[transaction_type].append(data_size)
self.cycles[transaction_type].append(self.cumulative_cycles)
address += (data_size * 8)
self.num_transactions += 1
self.total_data += data_size * length
ascii_in.close()
if len(self.sizes['READ']) and len(self.sizes['WRITE']):
return 'READ/WRITE'
elif len(self.sizes['READ']):
return 'READ'
else:
return 'WRITE'
def parse_trace(self, file_name):
"""
Parses a trace file and extracts data
:param file_name: the trace file name
"""
global ascii_in
try:
ascii_in = open(file_name, 'r')
except IOError:
print "Failed to open ", file_name, " for reading"
exit(-1)
# reset all statistics
self.init_stats()
match = re.search("(.*)\.(.+)\.trace", basename(file_name))
self.master_name = match.group(1)
transaction_type = match.group(2)
# init samples
self.times[transaction_type] = []
self.addresses[transaction_type] = []
self.sizes[transaction_type] = []
# For each line in the m3i trace, parse addresses and sizes
for line in ascii_in:
fields = line.split()
# store transaction time
self.times[transaction_type].append(float(fields[0]))
# Convert hex address in trace to dec
self.addresses[transaction_type].append(long(fields[1], 16))
# store transaction size
size = long(fields[2])
self.sizes[transaction_type].append(size)
self.total_data += size
ascii_in.close()
return transaction_type
def draw_trace(self):
ax = None
sub_index = 3 if not (self.show_bw_only or self.show_data_only) else 1
r_index = 1
t_index = 0
if len(self.times[self.type]) > self.max_samples:
addresses = self.sample(self.addresses[self.type])
times = self.sample(self.times[self.type])
sizes = self.sample(self.sizes[self.type])
# bandwidth is sub-sampled when using 'sample', adjust magnitude
bw_sizes = multiply(sizes, len(self.times[self.type]) / self.max_samples)
else:
addresses = self.addresses[self.type]
times = self.times[self.type]
sizes = self.sizes[self.type]
bw_sizes = sizes
ok_addresses = True if len(addresses) else False
ok_sizes = True if len(sizes) else False
ok_times = True if len(times) else False
functions = {}
if ok_addresses and self.show_histograms and not (self.show_bw_only or self.show_data_only):
if self.stats_fitting:
functions = {'r': norm, 'b': rayleigh, 'y': pareto, 'g': expon}
self.histogram(functions, addresses, self.master_name + '.' + self.type + ' Addresses',
sub_index, r_index, 1 + t_index)
if ok_sizes and self.show_histograms and not (self.show_bw_only or self.show_data_only):
if self.stats_fitting:
functions = {'r': norm, 'b': rayleigh, 'y': pareto}
self.histogram(functions, sizes, self.master_name + '.' + self.type + ' Sizes',
sub_index, r_index, 2 + t_index)
if ok_times and self.show_histograms and not (self.show_bw_only or self.show_data_only):
if self.stats_fitting:
functions = {'r': norm, 'b': rayleigh}
self.histogram(functions, times, self.master_name + '.' + self.type + ' Times',
sub_index, r_index, 3 + t_index)
if ok_times and ok_addresses and not self.show_histograms and not (self.show_bw_only or self.show_data_only):
ax = self.sequence(ax, times, addresses,
self.master_name + '.' + self.type + ' Addresses', sub_index, r_index, 1 + t_index, 'trace')
if ok_times and ok_sizes and not self.show_histograms:
if not self.show_bw_only:
sq_t_index = 2 + t_index if not self.show_data_only else 1 + t_index
ax = self.sequence(ax, times, sizes,
self.master_name + '.' + self.type + ' Sizes', sub_index, r_index, sq_t_index, 'trace')
if not self.show_data_only:
bw_t_index = 3 + t_index if not self.show_bw_only else 1 + t_index
ax = self.bandwidth(ax, times, bw_sizes,
self.master_name + '.' + self.type + ' Bandwidth', sub_index, r_index, bw_t_index, 'trace')
t_index += 3
if ax is not None:
ax.set_xlim(0, times[-1])
self.figure.tight_layout()
def draw_m3i(self):
"""
TODO
"""
ax = None
types = self.type.split('/')
sub_index = 3
if len(types) == 2:
r_index = 2
else:
r_index = 1
t_index = 1;
for transaction_type in types:
if len(self.cycles[transaction_type]) > self.max_samples:
addresses = self.median_sample(self.addresses[transaction_type])
cycles = self.median_sample(self.cycles[transaction_type])
sizes = self.median_sample(self.sizes[transaction_type])
else:
addresses = self.addresses[transaction_type]
cycles = self.cycles[transaction_type]
sizes = self.sizes[transaction_type]
ok_addresses = True if len(addresses) else False
ok_sizes = True if len(sizes) else False
ok_cycles = True if len(cycles) else False
functions = {}
if ok_addresses and self.show_histograms:
if self.stats_fitting:
functions = {'r': norm, 'b': rayleigh, 'y': pareto, 'g': expon}
self.histogram(functions, addresses, transaction_type + ' Addresses',
sub_index, r_index, t_index)
if ok_sizes and self.show_histograms:
if self.stats_fitting:
functions = {'r': norm, 'b': rayleigh, 'y': pareto}
t_index += r_index;
self.histogram(functions, sizes, transaction_type + ' Sizes', sub_index, r_index, t_index)
if ok_cycles and self.show_histograms:
if self.stats_fitting:
functions = {'r': norm, 'b': rayleigh}
t_index += r_index;
self.histogram(functions, cycles, transaction_type + ' Cycles', sub_index, r_index, t_index)
if ok_cycles and ok_addresses and not self.show_histograms:
ax = self.sequence(ax, cycles, addresses,
transaction_type + ' Addresses', sub_index, r_index, t_index, 'm3i')
if ok_cycles and ok_sizes and not self.show_histograms:
t_index += r_index;
ax = self.sequence(ax, cycles, sizes,
transaction_type + ' Sizes', sub_index, r_index, t_index, 'm3i')
t_index += r_index;
ax = self.bandwidth(ax, cycles, sizes,
transaction_type + ' Bandwidth', sub_index, r_index, t_index, 'm3i')
t_index = 2;
self.figure.tight_layout()
|
{"hexsha": "cc41c3469f86f168dfa8ed766bfb35020367ced3", "size": 17431, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/analyzer.py", "max_stars_repo_name": "wwmfdb/ATP-Engine", "max_stars_repo_head_hexsha": "00eaf0f551907c9d6e2db446d5e78364364531d4", "max_stars_repo_licenses": ["BSD-3-Clause-Clear"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2020-05-19T16:13:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-05T19:22:37.000Z", "max_issues_repo_path": "utils/analyzer.py", "max_issues_repo_name": "wwmfdb/ATP-Engine", "max_issues_repo_head_hexsha": "00eaf0f551907c9d6e2db446d5e78364364531d4", "max_issues_repo_licenses": ["BSD-3-Clause-Clear"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-07-16T14:30:02.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-08T22:10:18.000Z", "max_forks_repo_path": "utils/analyzer.py", "max_forks_repo_name": "wwmfdb/ATP-Engine", "max_forks_repo_head_hexsha": "00eaf0f551907c9d6e2db446d5e78364364531d4", "max_forks_repo_licenses": ["BSD-3-Clause-Clear"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2020-05-21T17:56:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-19T02:07:15.000Z", "avg_line_length": 37.2457264957, "max_line_length": 123, "alphanum_fraction": 0.5428259997, "include": true, "reason": "from numpy,from scipy", "num_tokens": 3963}
|
from unittest import TestCase
from pysight.nd_hist_generator.movie import *
from pysight.nd_hist_generator.volume_gen import *
import pandas as pd
import numpy as np
def gen_data_df(frame_num=10, line_num=1000, end=100_000):
"""
Mock data for tests.
Returns:
df - The full DataFrame
frames only
lines only
x pixels
y pixels
"""
photons = np.arange(0, end, dtype=np.uint64)
channel = np.ones_like(photons)
lines = np.linspace(0, end, num=line_num, endpoint=False, dtype=np.uint64)
x_pix = int(len(photons) / len(lines))
ones_lines = np.ones((1, int(len(photons) / len(lines))), dtype=np.uint64)
frames = np.linspace(0, end, num=frame_num, dtype=np.uint64, endpoint=False)
frames_ser = pd.Series(frames)
ones_frames = np.ones((1, int(len(photons) / len(frames))), dtype=np.uint64)
lines = (np.atleast_2d(lines).T @ ones_lines).ravel()
frames = (np.atleast_2d(frames).T @ ones_frames).ravel()
assert len(lines) == len(frames) == len(photons)
df = pd.DataFrame(
{
"abs_time": photons,
"time_rel_line": photons - lines,
"Lines": lines,
"Frames": frames,
"Channel": channel,
}
)
df["Channel"] = df["Channel"].astype("category")
df.set_index(["Channel", "Frames", "Lines"], drop=True, inplace=True)
y_pix = x_pix
return df, frames_ser, pd.Series(np.unique(lines)), x_pix, y_pix
class TestMovies(TestCase):
frame_num = 10
end = 1000
line_num = 100
data, frames, lines, x_pix, y_pix = gen_data_df(
frame_num=frame_num, line_num=line_num, end=end
)
data_shape = (frame_num, x_pix, y_pix)
volgen = VolumeGenerator(frames, data_shape)
fr = volgen.create_frame_slices()
movie = Movie(
data=data,
lines=lines,
data_shape=data_shape,
outputs={"memory": True},
line_delta=int(lines.diff().mean()),
fill_frac=100.0,
bidir=True,
frame_slices=fr,
frames=frames,
frames_per_chunk=volgen.frames_per_chunk,
)
movie.run()
def test_all_pipeline_basic(self):
self.assertTrue(
np.all(self.movie.stack[1].ravel() == np.ones((self.end,), dtype=np.uint8))
)
def test_baseline_outputs(self):
during, end = self.movie._Movie__determine_outputs()
self.assertTrue(len(during) == 1)
self.assertTrue(len(end) == 1)
during, end = str(during), str(end)
self.assertTrue("create_memory_output" in during)
self.assertTrue("convert_list_to_arr" in end)
def test_slice_df(self):
sl = slice(0, 23000)
movie = Movie(
self.data,
self.lines,
data_shape=self.data_shape,
outputs={"memory": True},
line_delta=int(self.lines.diff().mean()),
fill_frac=100.0,
bidir=True,
frames=self.frames,
frame_slices=(slice(1) for n in range(2)),
)
di = movie._Movie__slice_df(sl)
self.assertTrue(1 in di[0].keys())
self.assertSequenceEqual((di[0][1].shape, di[1]), ((1000, 2), 10))
def test_single_slice_df(self):
sl = slice(0, 0)
movie = Movie(
self.data,
self.lines,
data_shape=self.data_shape,
outputs={"memory": True},
line_delta=int(self.lines.diff().mean()),
fill_frac=100.0,
bidir=True,
frames=self.frames,
frame_slices=(slice(1) for n in range(2)),
)
di = movie._Movie__slice_df(sl)
self.assertTrue(1 in di[0].keys())
self.assertSequenceEqual((di[0][1].shape, di[1]), ((100, 2), 1))
|
{"hexsha": "968814f924f2220fd20710bd3140d4dd7eca60d9", "size": 3774, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_movie.py", "max_stars_repo_name": "liorgolgher/python-pysight", "max_stars_repo_head_hexsha": "029634d328c18fde4fc4ed666980b2e537e18814", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_movie.py", "max_issues_repo_name": "liorgolgher/python-pysight", "max_issues_repo_head_hexsha": "029634d328c18fde4fc4ed666980b2e537e18814", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_movie.py", "max_forks_repo_name": "liorgolgher/python-pysight", "max_forks_repo_head_hexsha": "029634d328c18fde4fc4ed666980b2e537e18814", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8173913043, "max_line_length": 87, "alphanum_fraction": 0.5900900901, "include": true, "reason": "import numpy", "num_tokens": 959}
|
# define test kernel
function test_kernel!(
b :: Matrix{Float64},
x :: Float64,
dx :: Float64
) :: Nothing
for i in eachindex(b)
b[i] += dx * x^i * exp(-x^2) * sin(2.0 * pi * x)
end
return nothing
end
# define benchmark kernel
function bench_kernel!(
b :: Matrix{Float64},
x :: Float64
) :: Nothing
for i in eachindex(b)
b[i] = x^i * exp(-x^2) * sin(2.0 * pi * x)
end
return nothing
end
"""
test_flow() :: Nothing
Run consistency checks for flow equations by testing integrators.
"""
function test_flow() :: Nothing
# benchmark handcrafted integrators against QuadGK
test_integrators()
return nothing
end
"""
test_integrators() :: Nothing
Run consistency checks for integrators by computing test integrals and comparing to QuadGK.
"""
function test_integrators() :: Nothing
# init test dummys
b1 = zeros(Float64, 10, 10)
b2 = (copy(b1), copy(b1), copy(b1))
b3 = (copy(b1), copy(b1), copy(b1))
@testset "quadrature" begin
# compute integral with QuadGK
quadgk!((b, x) -> bench_kernel!(b, x), b1, 1.0, 5.0, atol = 1e-8, rtol = 1e-8)
# compute integral with simps!
integrate_lin!((b, x, dx) -> test_kernel!(b, x, dx), b2, 1.0, 5.0, 100, 1e-8, 1e-8)
integrate_log!((b, x, dx) -> test_kernel!(b, x, dx), b3, 1.0, 5.0, 100, 1e-8, 1e-8)
@test b1 ≈ b2[1]
@test b1 ≈ b3[1]
end
return nothing
end
|
{"hexsha": "eb17b1042d19c92c07c74f76772fca21366672a8", "size": 1475, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Flow/test.jl", "max_stars_repo_name": "dominikkiese/PFFRGSolver.jl", "max_stars_repo_head_hexsha": "13bafa3afb83cfc4305aa4cf3edb9fc5fb9849cd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-06-18T12:42:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-30T23:37:28.000Z", "max_issues_repo_path": "src/Flow/test.jl", "max_issues_repo_name": "dominikkiese/PFFRGSolver.jl", "max_issues_repo_head_hexsha": "13bafa3afb83cfc4305aa4cf3edb9fc5fb9849cd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2021-06-18T07:48:49.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-09T15:48:54.000Z", "max_forks_repo_path": "src/Flow/test.jl", "max_forks_repo_name": "dominikkiese/PFFRGSolver.jl", "max_forks_repo_head_hexsha": "13bafa3afb83cfc4305aa4cf3edb9fc5fb9849cd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.046875, "max_line_length": 91, "alphanum_fraction": 0.5891525424, "num_tokens": 503}
|
import cv2
import time
import numpy as np
import pandas as pd
import mediapipe as mp
import plotly.express as px
import plotly.graph_objects as go
class poseDetector:
def __init__(
self,
mode=False,
complex=1,
smooth_landmarks=True,
segmentation=True,
smooth_segmentation=True,
detectionCon=0.5,
trackCon=0.5,
):
self.mode = mode
self.complex = complex
self.smooth_landmarks = smooth_landmarks
self.segmentation = segmentation
self.smooth_segmentation = smooth_segmentation
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpDraw = mp.solutions.drawing_utils
self.mpDrawStyle = mp.solutions.drawing_styles
self.mpPose = mp.solutions.pose
self.pose = self.mpPose.Pose(
self.mode,
self.complex,
self.smooth_landmarks,
self.segmentation,
self.smooth_segmentation,
self.detectionCon,
self.trackCon,
)
self.mp_drawing = mp.solutions.drawing_utils
def findPose(self, img):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.pose.process(imgRGB)
# self.plotly_fig(self.results.pose_landmarks)
print(self.results.pose_landmarks)
print('-----------------------------------------------------------------------------------------------------------')
# if self.results.pose_landmarks:
# if draw:
# self.mp_drawing.draw_landmarks(
# img,
# self.results.pose_landmarks,
# self.mpPose.POSE_CONNECTIONS,
# # self.mpDrawStyle.get_default_pose_landmarks_style())
# self.mpDraw.DrawingSpec(
# color=(0, 0, 255), thickness=2, circle_radius=2
# ),
# self.mpDraw.DrawingSpec(
# color=(0, 255, 0), thickness=2, circle_radius=2
# ),
# )
return img
def findPosition(self, img, draw=True):
self.lmList = []
if self.results.pose_landmarks:
for id, lm in enumerate(self.results.pose_landmarks.landmark):
h, w, c = img.shape
# print(id, lm)
cx, cy = int(lm.x * w), int(lm.y * h)
x, y, z = lm.x, lm.y, lm.z
self.lmList.append([id, cx, cy])
if draw:
cv2.circle(img, (cx, cy), 5, (0, 255, 0), cv2.FILLED)
return self.lmList
def findAngle(self, img, p1, p2, p3, draw=True):
# Get the landmarks
x1, y1 = self.lmList[p1][1:]
x2, y2 = self.lmList[p2][1:]
x3, y3 = self.lmList[p3][1:]
# Calculate the Angle
radians = np.arctan2(y3 - y2, x3 - x2) - np.arctan2(y1 - y2, x1 - x2)
angle = np.abs(radians * 180.0 / np.pi)
if angle > 180.0:
angle = 360 - angle
print(int(angle))
# Draw
if draw:
cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 3)
cv2.line(img, (x3, y3), (x2, y2), (255, 255, 255), 3)
cv2.circle(img, (x1, y1), 5, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x1, y1), 10, (0, 0, 255), 2)
cv2.circle(img, (x2, y2), 5, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x2, y2), 10, (0, 0, 255), 2)
cv2.circle(img, (x3, y3), 5, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x3, y3), 10, (0, 0, 255), 2)
cv2.putText(
img,
str(int(angle)) + "",
(x2 - 50, y2 + 50),
cv2.FONT_HERSHEY_PLAIN,
2,
(255, 0, 0),
2,
)
return angle
def plotly_fig(self, results):
if not results:
return
plotted_landmarks = {}
_PRESENCE_THRESHOLD = 0.5
_VISIBILITY_THRESHOLD = 0.5
for idx, landmark in enumerate(self.results.pose_landmarks.landmark):
if (
landmark.HasField("visibility")
and landmark.visibility < _VISIBILITY_THRESHOLD
) or (
landmark.HasField("presence") and landmark.presence < _PRESENCE_THRESHOLD
):
continue
plotted_landmarks[idx] = (-landmark.z, landmark.x, -landmark.y)
if self.results.pose_landmarks.landmark:
out_cn = []
num_landmarks = len(self.results.pose_landmarks.landmark)
# Draws the connections if the start and end landmarks are both visible.
for connection in self.mpPose.POSE_CONNECTIONS:
start_idx = connection[0]
end_idx = connection[1]
if not (0 <= start_idx < num_landmarks and 0 <= end_idx < num_landmarks):
raise ValueError(
f"Landmark index is out of range. Invalid connection "
f"from landmark #{start_idx} to landmark #{end_idx}."
)
if start_idx in plotted_landmarks and end_idx in plotted_landmarks:
landmark_pair = [
plotted_landmarks[start_idx],
plotted_landmarks[end_idx],
]
out_cn.append(
dict(
xs=[landmark_pair[0][0], landmark_pair[1][0]],
ys=[landmark_pair[0][1], landmark_pair[1][1]],
zs=[landmark_pair[0][2], landmark_pair[1][2]],
)
)
cn2 = {"xs": [], "ys": [], "zs": []}
for pair in out_cn:
for k in pair.keys():
cn2[k].append(pair[k][0])
cn2[k].append(pair[k][1])
cn2[k].append(None)
df = pd.DataFrame(plotted_landmarks).T.rename(columns={0: "z", 1: "x", 2: "y"})
df["lm"] = df.index.map(lambda s: self.mpPose.PoseLandmark(s).name).values
fig = (
px.scatter_3d(df, x="z", y="x", z="y", hover_name="lm")
.update_traces(marker={"color": "red"})
.update_layout(
margin={"l": 0, "r": 0, "t": 0, "b": 0},
scene={"camera": {"eye": {"x": 2.1, "y": 0, "z": 0}}},
)
)
fig.add_traces(
[
go.Scatter3d(
x=cn2["xs"],
y=cn2["ys"],
z=cn2["zs"],
mode="lines",
line={"color": "black", "width": 5},
name="connections",
)
]
)
return fig
def main():
cap = cv2.VideoCapture('./Hackathon_1st_Hitter.mp4')
milliseconds = 1000
start_time = int(input("Enter Start time: "))
end_time = int(input("Enter Length: "))
end_time = start_time + end_time
cap.set(cv2.CAP_PROP_POS_MSEC, start_time * milliseconds)
pTime = 0
detector = poseDetector()
while True and cap.get(cv2.CAP_PROP_POS_MSEC) <= end_time * milliseconds:
success, img = cap.read()
img = detector.findPose(img)
lmList = detector.findPosition(img, draw=False)
if len(lmList) != 0:
detector.findAngle(img, 11, 13, 15)
detector.findAngle(img, 24, 12, 14)
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
# show fps count
cv2.putText(
img, str(int(fps)), (70, 50), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 0), 3
)
cv2.imshow("Image", img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
|
{"hexsha": "31a0da6326b13ebdbc5ff4fe03a661e0db84c628", "size": 7918, "ext": "py", "lang": "Python", "max_stars_repo_path": "track_2_openCV/pose_angle.py", "max_stars_repo_name": "Batlytics/Batlytics", "max_stars_repo_head_hexsha": "3766e9f847b58a533fc09ee196fb59c075b8842a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "track_2_openCV/pose_angle.py", "max_issues_repo_name": "Batlytics/Batlytics", "max_issues_repo_head_hexsha": "3766e9f847b58a533fc09ee196fb59c075b8842a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "track_2_openCV/pose_angle.py", "max_forks_repo_name": "Batlytics/Batlytics", "max_forks_repo_head_hexsha": "3766e9f847b58a533fc09ee196fb59c075b8842a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3211009174, "max_line_length": 129, "alphanum_fraction": 0.4843394797, "include": true, "reason": "import numpy", "num_tokens": 2011}
|
module actual_burner_module
use eos_type_module
contains
subroutine actual_burner_init()
use amrex_fort_module, only : rt => amrex_real
implicit none
! Do nothing in this burner.
end subroutine actual_burner_init
subroutine actual_burner(state_in, state_out, dt, time)
use amrex_fort_module, only : rt => amrex_real
implicit none
type (eos_t), intent(in) :: state_in
type (eos_t), intent(inout) :: state_out
real(rt) , intent(in) :: dt, time
! Do nothing in this burner.
end subroutine actual_burner
end module actual_burner_module
|
{"hexsha": "7ea9cf2e038aa0e13d53b4b6abe289f75981d37a", "size": 608, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "networks/breakout/actual_burner.f90", "max_stars_repo_name": "doreenfan/Microphysics", "max_stars_repo_head_hexsha": "bbfabaae0a98af32dbf353a7747a8ca787710ac6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-24T04:07:25.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-24T04:07:25.000Z", "max_issues_repo_path": "networks/breakout/actual_burner.f90", "max_issues_repo_name": "Youhichka/Microphysics", "max_issues_repo_head_hexsha": "6f28333d40c9e15fdfbb1c4dc208e887fb5549c3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-02-19T14:58:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:40:56.000Z", "max_forks_repo_path": "networks/breakout/actual_burner.f90", "max_forks_repo_name": "Youhichka/Microphysics", "max_forks_repo_head_hexsha": "6f28333d40c9e15fdfbb1c4dc208e887fb5549c3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.0, "max_line_length": 57, "alphanum_fraction": 0.6973684211, "num_tokens": 163}
|
import unittest
import nose.tools
import numpy as np
from scipy.spatial import distance_matrix
from tspsolver.tsp_generator import TSPGenerator
from ..population_generation import SimplePopulationGenerator
from ..mutation import (SwapCityMutation, DisplacementMutation,
InversionMutation, InsertionMutation)
class SwapCityMutationTest(unittest.TestCase):
def setUp(self):
self._num_points = 10
self._pop_size = 5
gen = TSPGenerator(self._num_points)
self._data = gen.generate()
self._distances = distance_matrix(self._data, self._data)
popGen = SimplePopulationGenerator(self._pop_size)
self._population = popGen.generate(self._distances[0])
def test_mutate(self):
swap_city = SwapCityMutation(1.0)
new_pop = swap_city.mutate(self._population.copy())
nose.tools.assert_equal(new_pop.shape, self._population.shape)
nose.tools.assert_true(np.any(new_pop != self._population))
class DisplacementMutationTest(unittest.TestCase):
def setUp(self):
self._num_points = 10
self._pop_size = 5
gen = TSPGenerator(self._num_points)
self._data = gen.generate()
self._distances = distance_matrix(self._data, self._data)
popGen = SimplePopulationGenerator(self._pop_size)
self._population = popGen.generate(self._distances[0])
def test_mutate_single(self):
mutator = DisplacementMutation(1.0)
pop = np.array([[1, 2, 3, 4, 5, 6, 7]])
new_pop = mutator.mutate(pop.copy())
np.testing.assert_array_equal(pop[0], np.sort(new_pop[0]))
def test_mutate(self):
mutator = DisplacementMutation(1.0)
new_pop = mutator.mutate(self._population.copy())
nose.tools.assert_equal(new_pop.shape, self._population.shape)
nose.tools.assert_true(np.any(new_pop != self._population))
class InversionMutationTest(unittest.TestCase):
def setUp(self):
self._num_points = 10
self._pop_size = 5
gen = TSPGenerator(self._num_points)
self._data = gen.generate()
self._distances = distance_matrix(self._data, self._data)
popGen = SimplePopulationGenerator(self._pop_size)
self._population = popGen.generate(self._distances[0])
def test_mutate_single(self):
mutator = InversionMutation(1.0)
pop = np.array([[1, 2, 3, 4, 5, 6, 7]])
new_pop = mutator.mutate(pop.copy())
np.testing.assert_array_equal(pop[0], np.sort(new_pop[0]))
def test_mutate(self):
mutator = InversionMutation(1.0)
new_pop = mutator.mutate(self._population.copy())
nose.tools.assert_equal(new_pop.shape, self._population.shape)
nose.tools.assert_true(np.any(new_pop != self._population))
class InsertionMutationTest(unittest.TestCase):
def setUp(self):
self._num_points = 10
self._pop_size = 5
gen = TSPGenerator(self._num_points)
self._data = gen.generate()
self._distances = distance_matrix(self._data, self._data)
popGen = SimplePopulationGenerator(self._pop_size)
self._population = popGen.generate(self._distances[0])
def test_mutate_single(self):
mutator = InsertionMutation(1.0)
pop = np.array([[1, 2, 3, 4, 5, 6, 7]])
new_pop = mutator.mutate(pop.copy())
np.testing.assert_array_equal(pop[0], np.sort(new_pop[0]))
def test_mutate(self):
mutator = InsertionMutation(1.0)
new_pop = mutator.mutate(self._population.copy())
nose.tools.assert_equal(new_pop.shape, self._population.shape)
nose.tools.assert_true(np.any(new_pop != self._population))
|
{"hexsha": "9290bc4c51d4adfeb3e35f4fa6da7942ab6ab775", "size": 3723, "ext": "py", "lang": "Python", "max_stars_repo_path": "tspsolver/ga/test/mutation_test.py", "max_stars_repo_name": "samueljackson92/tsp-solver", "max_stars_repo_head_hexsha": "4f6403b40c7ba9062a9b7ffdde5e7d594163bc2f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-12-03T14:37:48.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-01T23:13:56.000Z", "max_issues_repo_path": "tspsolver/ga/test/mutation_test.py", "max_issues_repo_name": "samueljackson92/tsp-solver", "max_issues_repo_head_hexsha": "4f6403b40c7ba9062a9b7ffdde5e7d594163bc2f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tspsolver/ga/test/mutation_test.py", "max_forks_repo_name": "samueljackson92/tsp-solver", "max_forks_repo_head_hexsha": "4f6403b40c7ba9062a9b7ffdde5e7d594163bc2f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3739130435, "max_line_length": 70, "alphanum_fraction": 0.6747246844, "include": true, "reason": "import numpy,from scipy", "num_tokens": 903}
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import sys
from collections import defaultdict
import numpy as np
import pandas as pd
from sklearn import linear_model, preprocessing, cluster, metrics, svm, model_selection
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.linalg as slin
import scipy.sparse.linalg as sparselin
import scipy.sparse as sparse
import data_utils as data
import defenses
import datasets
def process_defense(
datadef, Q, all_dists, model, results, use_emp, use_emp_label, defense_label,
max_frac_to_remove, frac_increment, num_folds,
norm=2,
subtract_from_l2=False,
P_datadef=None,
dists=None):
if dists is not None:
assert Q is None
assert P_datadef is None
assert subtract_from_l2 == False
if P_datadef is None:
P_datadef = datadef
if use_emp_label is not None:
defense_emp_label = '%s_%s' % (defense_label, use_emp_label)
else:
defense_emp_label = defense_label
if dists is None:
dists = P_datadef.compute_dists_under_Q_over_dataset(
Q=Q,
use_emp_centroids=use_emp,
subtract_from_l2=subtract_from_l2,
norm=norm).reshape(-1, 1)
results['dist_labels'].append(defense_emp_label)
all_dists = np.concatenate((all_dists, dists), axis=1)
auto_threshold_and_retrain(
datadef, dists, model, results, defense_emp_label,
max_frac_to_remove=max_frac_to_remove,
frac_increment=frac_increment,
num_folds=num_folds)
return all_dists, results
def auto_threshold_and_retrain(
datadef,
dists,
model,
results,
defense_emp_label,
max_frac_to_remove=0.30,
frac_increment=0.05,
num_folds=5):
def perc_format(frac):
return "{:6.2f}".format(frac * 100)
fracs_to_remove = np.linspace(
0,
max_frac_to_remove,
int(np.round(max_frac_to_remove / frac_increment)) + 1)
train_accs = np.zeros(len(fracs_to_remove))
val_accs = np.zeros(len(fracs_to_remove))
test_accs = np.zeros(len(fracs_to_remove))
fracs_of_good_points_kept = np.zeros(len(fracs_to_remove))
fracs_of_bad_points_kept = np.zeros(len(fracs_to_remove))
best_val_acc = -1
for idx, frac_to_remove in enumerate(fracs_to_remove):
if frac_to_remove == 0: # Use entire modified data
train_acc = results['test']['modified']['train_acc_overall']
val_acc = results['cv_val_acc_modified']
test_acc = results['test']['modified']['test_acc']
frac_of_good_points_kept = 1.0
frac_of_bad_points_kept = 1.0
else:
train_acc, val_acc, test_acc, frac_of_good_points_kept, frac_of_bad_points_kept = datadef.remove_and_retrain(
dists,
model,
frac_to_remove,
num_folds=num_folds)
print(' Removing %s%%: Train acc: %.3f Validation acc: %.3f Test acc: %.3f %% good data kept: %s %% bad data kept: %s' % (
perc_format(frac_to_remove),
train_acc, val_acc, test_acc,
perc_format(frac_of_good_points_kept),
perc_format(frac_of_bad_points_kept)))
train_accs[idx] = train_acc
val_accs[idx] = val_acc
test_accs[idx] = test_acc
fracs_of_good_points_kept[idx] = frac_of_good_points_kept
fracs_of_bad_points_kept[idx] = frac_of_bad_points_kept
if best_val_acc < val_acc:
best_val_acc = val_acc
best_idx = idx
diff_in_test_acc = results['test']['clean']['test_acc'] - test_accs[best_idx]
leverage = diff_in_test_acc / datadef.epsilon
print(' Defense auto-selected removing %s of the data.' % fracs_to_remove[best_idx])
print(' Best test acc: %.3f' % test_accs[best_idx])
print(' Leverage: %.3f' % leverage)
results[defense_emp_label] = defaultdict(dict)
results[defense_emp_label]['fracs_to_remove'] = fracs_to_remove
results[defense_emp_label]['train_accs'] = train_accs
results[defense_emp_label]['val_accs'] = val_accs
results[defense_emp_label]['test_accs'] = test_accs
results[defense_emp_label]['fracs_of_good_points_kept'] = fracs_of_good_points_kept
results[defense_emp_label]['fracs_of_bad_points_kept'] = fracs_of_bad_points_kept
results[defense_emp_label]['cv_frac_to_remove'] = fracs_to_remove[best_idx]
results[defense_emp_label]['cv_test_acc'] = test_accs[best_idx]
results[defense_emp_label]['leverage'] = leverage
return results
|
{"hexsha": "723789477fe8937c259cfec264b25ad8a956bfcd", "size": 4743, "ext": "py", "lang": "Python", "max_stars_repo_path": "defense_testers.py", "max_stars_repo_name": "iamgroot42/data-poisoning-release", "max_stars_repo_head_hexsha": "fef371060878b7524af9b31225d3144d268b98b3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 34, "max_stars_repo_stars_event_min_datetime": "2017-12-27T21:42:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-24T02:06:34.000Z", "max_issues_repo_path": "defense_testers.py", "max_issues_repo_name": "iamgroot42/data-poisoning-release", "max_issues_repo_head_hexsha": "fef371060878b7524af9b31225d3144d268b98b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "defense_testers.py", "max_forks_repo_name": "iamgroot42/data-poisoning-release", "max_forks_repo_head_hexsha": "fef371060878b7524af9b31225d3144d268b98b3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2018-01-27T00:34:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-11T16:20:58.000Z", "avg_line_length": 33.1678321678, "max_line_length": 154, "alphanum_fraction": 0.6816360953, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1218}
|
[STATEMENT]
lemma L_transform_Tree\<^sub>\<alpha>_preserves_hereditarily_fs:
assumes "hereditarily_fs t\<^sub>\<alpha>"
shows "Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
FL_Formula.hereditarily_fs t\<^sub>\<alpha>
goal (1 subgoal):
1. Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)
[PROOF STEP]
proof (induct rule: hereditarily_fs.induct)
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. \<And>tset\<^sub>\<alpha>. \<lbrakk>finite (supp tset\<^sub>\<alpha>); \<And>t\<^sub>\<alpha>. t\<^sub>\<alpha> \<in> set_bset tset\<^sub>\<alpha> \<Longrightarrow> FL_Formula.hereditarily_fs t\<^sub>\<alpha>; \<And>t\<^sub>\<alpha>. t\<^sub>\<alpha> \<in> set_bset tset\<^sub>\<alpha> \<Longrightarrow> Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)\<rbrakk> \<Longrightarrow> Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Conj\<^sub>\<alpha> tset\<^sub>\<alpha>))
2. \<And>t\<^sub>\<alpha>. \<lbrakk>FL_Formula.hereditarily_fs t\<^sub>\<alpha>; Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)\<rbrakk> \<Longrightarrow> Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Not\<^sub>\<alpha> t\<^sub>\<alpha>))
3. \<And>f \<phi>. Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Pred\<^sub>\<alpha> f \<phi>))
4. \<And>t\<^sub>\<alpha> f \<alpha>. \<lbrakk>FL_Formula.hereditarily_fs t\<^sub>\<alpha>; Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)\<rbrakk> \<Longrightarrow> Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Act\<^sub>\<alpha> f \<alpha> t\<^sub>\<alpha>))
[PROOF STEP]
case (Conj\<^sub>\<alpha> tset\<^sub>\<alpha>)
[PROOF STATE]
proof (state)
this:
finite (supp tset\<^sub>\<alpha>)
?t\<^sub>\<alpha> \<in> set_bset tset\<^sub>\<alpha> \<Longrightarrow> FL_Formula.hereditarily_fs ?t\<^sub>\<alpha>
?t\<^sub>\<alpha> \<in> set_bset tset\<^sub>\<alpha> \<Longrightarrow> Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> ?t\<^sub>\<alpha>)
goal (4 subgoals):
1. \<And>tset\<^sub>\<alpha>. \<lbrakk>finite (supp tset\<^sub>\<alpha>); \<And>t\<^sub>\<alpha>. t\<^sub>\<alpha> \<in> set_bset tset\<^sub>\<alpha> \<Longrightarrow> FL_Formula.hereditarily_fs t\<^sub>\<alpha>; \<And>t\<^sub>\<alpha>. t\<^sub>\<alpha> \<in> set_bset tset\<^sub>\<alpha> \<Longrightarrow> Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)\<rbrakk> \<Longrightarrow> Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Conj\<^sub>\<alpha> tset\<^sub>\<alpha>))
2. \<And>t\<^sub>\<alpha>. \<lbrakk>FL_Formula.hereditarily_fs t\<^sub>\<alpha>; Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)\<rbrakk> \<Longrightarrow> Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Not\<^sub>\<alpha> t\<^sub>\<alpha>))
3. \<And>f \<phi>. Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Pred\<^sub>\<alpha> f \<phi>))
4. \<And>t\<^sub>\<alpha> f \<alpha>. \<lbrakk>FL_Formula.hereditarily_fs t\<^sub>\<alpha>; Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)\<rbrakk> \<Longrightarrow> Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Act\<^sub>\<alpha> f \<alpha> t\<^sub>\<alpha>))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
finite (supp tset\<^sub>\<alpha>)
?t\<^sub>\<alpha> \<in> set_bset tset\<^sub>\<alpha> \<Longrightarrow> FL_Formula.hereditarily_fs ?t\<^sub>\<alpha>
?t\<^sub>\<alpha> \<in> set_bset tset\<^sub>\<alpha> \<Longrightarrow> Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> ?t\<^sub>\<alpha>)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
finite (supp tset\<^sub>\<alpha>)
?t\<^sub>\<alpha> \<in> set_bset tset\<^sub>\<alpha> \<Longrightarrow> FL_Formula.hereditarily_fs ?t\<^sub>\<alpha>
?t\<^sub>\<alpha> \<in> set_bset tset\<^sub>\<alpha> \<Longrightarrow> Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> ?t\<^sub>\<alpha>)
goal (1 subgoal):
1. Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Conj\<^sub>\<alpha> tset\<^sub>\<alpha>))
[PROOF STEP]
by (auto intro!: Formula.hereditarily_fs.Conj\<^sub>\<alpha>) (metis imageE map_bset.rep_eq)
[PROOF STATE]
proof (state)
this:
Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Conj\<^sub>\<alpha> tset\<^sub>\<alpha>))
goal (3 subgoals):
1. \<And>t\<^sub>\<alpha>. \<lbrakk>FL_Formula.hereditarily_fs t\<^sub>\<alpha>; Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)\<rbrakk> \<Longrightarrow> Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Not\<^sub>\<alpha> t\<^sub>\<alpha>))
2. \<And>f \<phi>. Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Pred\<^sub>\<alpha> f \<phi>))
3. \<And>t\<^sub>\<alpha> f \<alpha>. \<lbrakk>FL_Formula.hereditarily_fs t\<^sub>\<alpha>; Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)\<rbrakk> \<Longrightarrow> Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Act\<^sub>\<alpha> f \<alpha> t\<^sub>\<alpha>))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>t\<^sub>\<alpha>. \<lbrakk>FL_Formula.hereditarily_fs t\<^sub>\<alpha>; Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)\<rbrakk> \<Longrightarrow> Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Not\<^sub>\<alpha> t\<^sub>\<alpha>))
2. \<And>f \<phi>. Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Pred\<^sub>\<alpha> f \<phi>))
3. \<And>t\<^sub>\<alpha> f \<alpha>. \<lbrakk>FL_Formula.hereditarily_fs t\<^sub>\<alpha>; Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)\<rbrakk> \<Longrightarrow> Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Act\<^sub>\<alpha> f \<alpha> t\<^sub>\<alpha>))
[PROOF STEP]
case (Not\<^sub>\<alpha> t\<^sub>\<alpha>)
[PROOF STATE]
proof (state)
this:
FL_Formula.hereditarily_fs t\<^sub>\<alpha>
Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)
goal (3 subgoals):
1. \<And>t\<^sub>\<alpha>. \<lbrakk>FL_Formula.hereditarily_fs t\<^sub>\<alpha>; Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)\<rbrakk> \<Longrightarrow> Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Not\<^sub>\<alpha> t\<^sub>\<alpha>))
2. \<And>f \<phi>. Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Pred\<^sub>\<alpha> f \<phi>))
3. \<And>t\<^sub>\<alpha> f \<alpha>. \<lbrakk>FL_Formula.hereditarily_fs t\<^sub>\<alpha>; Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)\<rbrakk> \<Longrightarrow> Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Act\<^sub>\<alpha> f \<alpha> t\<^sub>\<alpha>))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
FL_Formula.hereditarily_fs t\<^sub>\<alpha>
Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
FL_Formula.hereditarily_fs t\<^sub>\<alpha>
Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)
goal (1 subgoal):
1. Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Not\<^sub>\<alpha> t\<^sub>\<alpha>))
[PROOF STEP]
by (simp add: Formula.hereditarily_fs.Not\<^sub>\<alpha>)
[PROOF STATE]
proof (state)
this:
Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Not\<^sub>\<alpha> t\<^sub>\<alpha>))
goal (2 subgoals):
1. \<And>f \<phi>. Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Pred\<^sub>\<alpha> f \<phi>))
2. \<And>t\<^sub>\<alpha> f \<alpha>. \<lbrakk>FL_Formula.hereditarily_fs t\<^sub>\<alpha>; Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)\<rbrakk> \<Longrightarrow> Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Act\<^sub>\<alpha> f \<alpha> t\<^sub>\<alpha>))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>f \<phi>. Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Pred\<^sub>\<alpha> f \<phi>))
2. \<And>t\<^sub>\<alpha> f \<alpha>. \<lbrakk>FL_Formula.hereditarily_fs t\<^sub>\<alpha>; Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)\<rbrakk> \<Longrightarrow> Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Act\<^sub>\<alpha> f \<alpha> t\<^sub>\<alpha>))
[PROOF STEP]
case (Pred\<^sub>\<alpha> f \<phi>)
[PROOF STATE]
proof (state)
this:
goal (2 subgoals):
1. \<And>f \<phi>. Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Pred\<^sub>\<alpha> f \<phi>))
2. \<And>t\<^sub>\<alpha> f \<alpha>. \<lbrakk>FL_Formula.hereditarily_fs t\<^sub>\<alpha>; Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)\<rbrakk> \<Longrightarrow> Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Act\<^sub>\<alpha> f \<alpha> t\<^sub>\<alpha>))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Pred\<^sub>\<alpha> f \<phi>))
[PROOF STEP]
by (simp add: Formula.hereditarily_fs.Act\<^sub>\<alpha> Formula.hereditarily_fs.Pred\<^sub>\<alpha>)
[PROOF STATE]
proof (state)
this:
Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Pred\<^sub>\<alpha> f \<phi>))
goal (1 subgoal):
1. \<And>t\<^sub>\<alpha> f \<alpha>. \<lbrakk>FL_Formula.hereditarily_fs t\<^sub>\<alpha>; Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)\<rbrakk> \<Longrightarrow> Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Act\<^sub>\<alpha> f \<alpha> t\<^sub>\<alpha>))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>t\<^sub>\<alpha> f \<alpha>. \<lbrakk>FL_Formula.hereditarily_fs t\<^sub>\<alpha>; Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)\<rbrakk> \<Longrightarrow> Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Act\<^sub>\<alpha> f \<alpha> t\<^sub>\<alpha>))
[PROOF STEP]
case (Act\<^sub>\<alpha> t\<^sub>\<alpha> f \<alpha>)
[PROOF STATE]
proof (state)
this:
FL_Formula.hereditarily_fs t\<^sub>\<alpha>
Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)
goal (1 subgoal):
1. \<And>t\<^sub>\<alpha> f \<alpha>. \<lbrakk>FL_Formula.hereditarily_fs t\<^sub>\<alpha>; Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)\<rbrakk> \<Longrightarrow> Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Act\<^sub>\<alpha> f \<alpha> t\<^sub>\<alpha>))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
FL_Formula.hereditarily_fs t\<^sub>\<alpha>
Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
FL_Formula.hereditarily_fs t\<^sub>\<alpha>
Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> t\<^sub>\<alpha>)
goal (1 subgoal):
1. Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Act\<^sub>\<alpha> f \<alpha> t\<^sub>\<alpha>))
[PROOF STEP]
by (simp add: Formula.hereditarily_fs.Act\<^sub>\<alpha>)
[PROOF STATE]
proof (state)
this:
Formula.hereditarily_fs (L_transform_Tree\<^sub>\<alpha> (FL_Formula.Act\<^sub>\<alpha> f \<alpha> t\<^sub>\<alpha>))
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 4510, "file": "Modal_Logics_for_NTS_L_Transform", "length": 22}
|
# To view in browser start a server in the build dir:
# python -m http.server --bind localhost
using Documenter
using QuasinormalModes
makedocs(sitename = "QuasinormalModes.jl",
modules = [QuasinormalModes],
pages = [
"index.md",
"intro.md",
"org.md",
"schw.md",
"sho.md",
"api_ref.md"
]
)
deploydocs(
repo = "github.com/lucass-carneiro/QuasinormalModes.jl.git",
)
|
{"hexsha": "7603d2ccbb6bbcc1fd5735dc64b574008d9c7a8b", "size": 432, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/make.jl", "max_stars_repo_name": "eschnett/QuasinormalModes.jl", "max_stars_repo_head_hexsha": "7ec50c3f565f6cda7501baa0bc589e445873a06e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/make.jl", "max_issues_repo_name": "eschnett/QuasinormalModes.jl", "max_issues_repo_head_hexsha": "7ec50c3f565f6cda7501baa0bc589e445873a06e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/make.jl", "max_forks_repo_name": "eschnett/QuasinormalModes.jl", "max_forks_repo_head_hexsha": "7ec50c3f565f6cda7501baa0bc589e445873a06e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.6363636364, "max_line_length": 64, "alphanum_fraction": 0.6134259259, "num_tokens": 122}
|
from os import write
import numpy as np
import os.path as osp
import struct
from typing import List
from pandas.core.indexing import need_slice
from .block import Block
def __write_plot3D_block_binary(f,B:Block):
"""Write binary plot3D block which contains X,Y,Z
default format is Big-Endian
Args:
f (IO): file handle
B (Block): writes a single block to a file
"""
'''
https://docs.python.org/3/library/struct.html
'''
def write_var(V:np.ndarray):
for k in range(B.KMAX):
for j in range(B.JMAX):
for i in range(B.IMAX):
f.write(struct.pack('f',V[i,j,k]))
write_var(B.X)
write_var(B.Y)
write_var(B.Z)
def __write_plot3D_block_ASCII(f,B:Block,columns:int=6):
"""Write plot3D block in ascii format
Args:
f (IO): file handle
B (Block): writes a single block to a file
columns (int, optional): Number of columns in the file. Defaults to 6.
"""
def write_var(V:np.ndarray):
bNewLine = False
indx = 0
for k in range(B.KMAX):
for j in range(B.JMAX):
for i in range(B.IMAX):
f.write('{0:8.8f} '.format(V[i,j,k]))
bNewLine=False
indx+=1
if (indx % columns) == 0:
f.write('\n')
bNewLine=True
if not bNewLine:
f.write('\n')
write_var(B.X)
write_var(B.Y)
write_var(B.Z)
def write_plot3D(filename:str,blocks:List[Block],binary:bool=True):
"""Writes blocks to a Plot3D file
Args:
filename (str): name of the file to create
blocks (List[Block]): List containing all the blocks to write
binary (bool, optional): Binary big endian. Defaults to True.
"""
if binary:
with open(filename,'wb') as f:
f.write(struct.pack('I',len(blocks)))
for b in blocks:
IMAX,JMAX,KMAX = b.X.shape
f.write(struct.pack('I',IMAX))
f.write(struct.pack('I',JMAX))
f.write(struct.pack('I',KMAX))
for b in blocks:
__write_plot3D_block_binary(f,b)
else:
with open(filename,'w') as f:
f.write('{0:d}\n'.format(len(blocks)))
for b in blocks:
IMAX,JMAX,KMAX = b.X.shape
f.write('{0:d} {1:d} {2:d}\n'.format(IMAX,JMAX,KMAX))
for b in blocks:
__write_plot3D_block_ASCII(f,b)
|
{"hexsha": "5dd06b12992488ef05925ce3026b6ce179212d3f", "size": 2604, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/plot3d/write.py", "max_stars_repo_name": "ckeokot/Plot3D_utilities", "max_stars_repo_head_hexsha": "7bba70aeb48d8577ff582e999e8ce186c68d0189", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2021-08-12T06:40:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T08:31:49.000Z", "max_issues_repo_path": "python/plot3d/write.py", "max_issues_repo_name": "ckeokot/Plot3D_utilities", "max_issues_repo_head_hexsha": "7bba70aeb48d8577ff582e999e8ce186c68d0189", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2021-09-30T05:39:25.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-01T18:14:15.000Z", "max_forks_repo_path": "python/plot3d/write.py", "max_forks_repo_name": "ckeokot/Plot3D_utilities", "max_forks_repo_head_hexsha": "7bba70aeb48d8577ff582e999e8ce186c68d0189", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2021-08-11T18:54:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-15T15:53:12.000Z", "avg_line_length": 31.3734939759, "max_line_length": 81, "alphanum_fraction": 0.5322580645, "include": true, "reason": "import numpy", "num_tokens": 647}
|
from pathlib import Path
import json
import re
import numpy as np
import os
from collections import OrderedDict
from .TxtMpiFile import TxtMpiFile
from .BaseSource import BaseSource
from tweezers.meta import MetaDict, UnitDict
class TxtMpiSource(BaseSource):
"""
Data source for \*.txt files from the MPI with the old style header or the new JSON format.
"""
data = None
psd = None
ts = None
def __init__(self, data=None, psd=None, ts=None):
"""
Args:
path (:class:`patlhlib.Path`): path to file to read, if the input is of a different type, it is given to
:class:`pathlibh.Path` to try to create an instance
"""
super().__init__()
# go through input
if data:
self.data = TxtMpiFile(data)
if psd:
self.psd = TxtMpiFile(psd)
if ts:
self.ts = TxtMpiFile(ts)
@staticmethod
def isDataFile(path):
"""
Checks if a given file is a valid data file and returns its ID and type.
Args:
path (:class:`pathlib.Path`): file to check
Returns:
:class:`dict` with `id` and `type`
"""
pPath = Path(path)
m = re.match('^((?P<type>[A-Z]+)_)?(?P<id>(?P<trial>[0-9]{1,3})_Date_[0-9_]{19})\.txt$',
pPath.name)
if m:
tipe = 'data'
if m.group('type'):
tipe = m.group('type').lower()
res = {'id': m.group('id'),
'trial': m.group('trial'),
'type': tipe,
'path': pPath}
return res
else:
return False
@classmethod
def getAllSources(cls, path):
"""
Get a list of all IDs and their files that are at the given path and its subfolders.
Args:
path (:class:`pathlib.Path`): root path for searching
Returns:
`dir`
"""
_path = Path(path)
# get a list of all files and their properties
files = cls.getAllFiles(_path)
sources = OrderedDict()
# sort files that belong to the same id
for el in files:
if el['id'] not in sources.keys():
sources[el['id']] = cls()
setattr(sources[el['id']], el['type'], TxtMpiFile(el['path']))
return sources
def getMetadata(self):
"""
Return the metadata of the experiment.
Returns:
:class:`tweezers.MetaDict` and :class:`tweezers.UnitDict`
"""
# keep variables local so they are not stored in memory
meta, units = self.getDefaultMeta()
# check each available file for header information
# sequence is important since later calls overwrite earlier ones so if a header is present in "psd" and
# "data", the value from "data" will be returned
if self.ts:
# get header data from file
metaTmp, unitsTmp = self.ts.getMetadata()
# make sure we don't override important stuff that by accident has the same name
self.renameKey('nSamples', 'psdNSamples', meta=metaTmp, units=unitsTmp)
self.renameKey('dt', 'psdDt', meta=metaTmp, units=unitsTmp)
# set time series unit
unitsTmp['timeseries'] = 'V'
# update the dictionaries with newly found values
meta.update(metaTmp)
units.update(unitsTmp)
if self.psd:
metaTmp, unitsTmp = self.psd.getMetadata()
# make sure we don't override important stuff that by accident has the same name
# also, 'nSamples' and 'samplingRate' in reality refer to the underlying timeseries data
self.renameKey('nSamples', 'psdNSamples', meta=metaTmp, units=unitsTmp)
self.renameKey('dt', 'psdDt', meta=metaTmp, units=unitsTmp)
# set psd unit
unitsTmp['psd'] = 'V^2 / Hz'
meta.update(metaTmp)
units.update(unitsTmp)
if self.data:
metaTmp, unitsTmp = self.data.getMetadata()
# rename variables for the sake of consistency and compatibility with Matlab and because the naming is
# confusing: samplingRate is actually the acquisition rate since the DAQ card averages the data already
# the sampling rate should describe the actual time step between data points not something else
if 'recordingRate' in metaTmp:
self.renameKey('samplingRate', 'acquisitionRate', meta=metaTmp, units=unitsTmp)
self.renameKey('recordingRate', 'samplingRate', meta=metaTmp, units=unitsTmp)
self.renameKey('nSamples', 'nAcquisitionsPerSample', meta=metaTmp)
# add trial number
metaTmp['trial'] = self.data.getTrialNumber()
# update dictionaries
meta.update(metaTmp)
units.update(unitsTmp)
# add title string to metadata, used for plots
self.setTitle(meta)
# make sure all axes have the beadDiameter
meta['pmY']['beadDiameter'] = meta['pmX']['beadDiameter']
units['pmY']['beadDiameter'] = units['pmX']['beadDiameter']
meta['aodY']['beadDiameter'] = meta['aodX']['beadDiameter']
units['aodY']['beadDiameter'] = units['aodX']['beadDiameter']
# add trap names
meta['traps'] = meta.subDictKeys()
return meta, units
def getData(self):
"""
Return the experiment data.
Returns:
:class:`pandas.DataFrame`
"""
if not self.data:
raise ValueError('No data file given.')
return self.data.getData()
def getDataSegment(self, tmin, tmax, chunkN=10000):
"""
Returns the data between ``tmin`` and ``tmax``.
Args:
tmin (float): minimum data timestamp
tmax (float): maximum data timestamp
chunkN (int): number of rows to read per chunk
Returns:
:class:`pandas.DataFrame`
"""
meta, units = self.getMetadata()
nstart = int(meta.samplingRate * tmin)
nrows = int(meta.samplingRate * (tmax - tmin))
return self.data.getDataSegment(nstart, nrows)
def getPsd(self):
"""
Return the PSD of the thermal calibration of the experiment as computed by LabView.
Returns:
:class:`pandas.DataFrame`
"""
if not self.psd:
raise ValueError('No PSD file given.')
# read psd file which also contains the fitting
data = self.psd.getData()
# ignore the fitting
titles = [title for title, column in data.iteritems() if not title.endswith('Fit')]
return data[titles]
def getPsdFit(self):
"""
Return the LabView fit of the Lorentzian to the PSD.
Returns:
:class:`pandas.DataFrame`
"""
if not self.psd:
raise ValueError('No PSD file given.')
# the fit is in the psd file
data = self.psd.getData()
# only choose frequency and fit columns
titles = [title for title, column in data.iteritems() if title.endswith('Fit') or title == 'f']
return data[titles]
def getTs(self):
"""
Return the time series recorded for thermal calibration.
Returns:
:class:`pandas.DataFrame`
"""
if not self.ts:
raise ValueError('No time series file given.')
data = self.ts.getData()
# remove "Diff" from column headers
columnHeader = [title.split('Diff')[0] for title in data.columns]
data.columns = columnHeader
return data
@staticmethod
def calculateForce(meta, units, data):
"""
Calculate forces from Diff signal and calibration values.
Args:
meta (:class:`.MetaDict`): metadata
units (:class:`.UnitDict`): unit metadata
data (:class:`pandas.DataFrame`): data
Returns:
Updated versions of the input parameters
* meta (:class:`.MetaDict`)
* units (:class:`.UnitDict`)
* data (:class:`pandas.DataFrame`)
"""
# calculate force per trap and axis
for trap in meta['traps']:
m = meta[trap]
data[trap + 'Force'] = (data[trap + 'Diff'] - m['zeroOffset']) \
/ m['displacementSensitivity'] \
* m['stiffness']
units[trap + 'Force'] = 'pN'
# invert PM force, is not as expected in the raw data
# data.pmYForce = -data.pmYForce
# calculate mean force per axis, only meaningful for two traps
data['xForce'] = (data.pmXForce + data.aodXForce) / 2
data['yForce'] = (data.pmYForce - data.aodYForce) / 2
units['xForce'] = 'pN'
units['yForce'] = 'pN'
return meta, units, data
@staticmethod
def postprocessData(meta, units, data):
"""
Create time array, calculate forces etc.
Args:
meta (:class:`tweezers.MetaDict`): meta dictionary
units (:class:`tweezers.UnitDict`): units dictionary
data (:class:`pandas.DataFrame`): data
Returns:
Updated versions of the input parameters
* meta (:class:`.MetaDict`)
* units (:class:`.UnitDict`)
* data (:class:`pandas.DataFrame`)
"""
data['time'] = np.arange(0, meta['dt'] * len(data), meta['dt'])
units['time'] = 's'
meta, units, data = self.calculateForce(meta, units, data)
data['distance'] = np.sqrt(data.xDist**2 + data.yDist**2)
units['distance'] = 'nm'
return meta, units, data
def setTitle(self, meta):
"""
Set the 'title' key in the metadata dictionary based on date and trial number if they are available. This
string is e.g. used for plots.
Args:
meta
Returns:
:class:`tweezers.MetaDict`
"""
title = ''
try:
title += meta['date'] + ' '
except KeyError:
pass
try:
title += meta['time'] + ' '
except KeyError:
pass
try:
title += meta['trial']
except KeyError:
pass
meta['title'] = title.strip()
def save(self, container, path=None):
"""
Writes the data of a :class:`tweezers.TweezersData` to disk. This preservers the `data` and`thermalCalibration`
folder structure. `path` should be the folder that holds these subfolders. If it is empty, the original files
will be overwritten.
Args:
container (:class:`tweezers.TweezersData`): data to write
path (:class:`pathlib.Path`): path to a folder for the dataset, if not set, the original data will be
overwritten
"""
if not isinstance(path, Path):
path = Path(path)
data = ['ts', 'psd', 'data']
# list of input files and their data from the container, these are the ones we're writing back
# this is also important for the laziness of the TweezerData object
files = [[getattr(self, file), getattr(container, file)] for file in data if getattr(self, file)]
if not files:
return
# get root path if not given
if not path:
path = files[0][0].path.parents[1]
meta = container.meta
meta['units'] = container.units
# now write all of it
for file in files:
filePath = path / file[0].path.parent.name / file[0].path.name
self.writeData(meta, file[1], filePath)
def writeData(self, meta, data, path):
"""
Write experiment data back to a target file. Note that this writes the data in an `UTF-8` encoding.
Implementing this is not required for a data source but used here to convert the header to JSON.
Args:
meta (:class:`tweezers.MetaDict`): meta data to store
data (:class:`pandas.DataFrame`): data to write back
path (:class:`pathlib.Path`): path where to write the file
"""
# ensure directory exists
try:
os.makedirs(str(path.parent))
except FileExistsError:
pass
# write the data
with path.open(mode='w', encoding='utf-8') as f:
f.write(json.dumps(meta,
indent=4,
ensure_ascii=False,
sort_keys=True))
f.write("\n\n#### DATA ####\n\n")
data.to_csv(path_or_buf=str(path), sep='\t', mode='a', index=False)
def getDefaultMeta(self):
"""
Set default values for metadata and units. This will be overwritten by values in the data files if they exist.
Returns:
:class:`tweezers.MetaDict` and :class:`tweezers.UnitDict`
"""
meta = MetaDict()
units = UnitDict()
# meta[self.getStandardIdentifier('tsSamplingRate')] = 80000
#
# units[self.getStandardIdentifier('tsSamplingRate')] = 'Hz'
return meta, units
def renameKey(self, oldKey, newKey, meta=None, units=None):
"""
Rename a key in the meta- and units-dictionaries. Does not work for nested dictionaries.
Args:
meta (:class:`tweezers.MetaDict`): meta dictionary
units (:class:`tweezers.UnitDict`): units dictionary (can be an empty one if not required)
oldKey (str): key to be renamed
newKey (str): new key name
"""
if meta:
if oldKey not in meta:
return
meta.replaceKey(oldKey, newKey)
if units:
if oldKey not in units:
return
units.replaceKey(oldKey, newKey)
|
{"hexsha": "d6ca0b78e18a4bf98def2fc3af39ef75294bf852", "size": 14129, "ext": "py", "lang": "Python", "max_stars_repo_path": "tweezers/io/TxtMpiSource.py", "max_stars_repo_name": "DollSimon/tweezers", "max_stars_repo_head_hexsha": "7c9b3d781c53f7728526a8242aa9e1d671f15688", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tweezers/io/TxtMpiSource.py", "max_issues_repo_name": "DollSimon/tweezers", "max_issues_repo_head_hexsha": "7c9b3d781c53f7728526a8242aa9e1d671f15688", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tweezers/io/TxtMpiSource.py", "max_forks_repo_name": "DollSimon/tweezers", "max_forks_repo_head_hexsha": "7c9b3d781c53f7728526a8242aa9e1d671f15688", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0385487528, "max_line_length": 119, "alphanum_fraction": 0.5579304976, "include": true, "reason": "import numpy", "num_tokens": 3252}
|
!***********************************************************************
! *
SUBROUTINE ENGOUT1(EAV, E, JTOT, IPAR, ILEV, NN, MODE, K)
! *
! This subroutine prints energy levels, splittings, and energies *
! relative to the lowest in Hartrees, Kaysers, and eV, using the *
! reduced mass corrected value for the Rydberg. If MODE is 0, only *
! the eigenenergies are printed. If MODE is 1, the eigenenergies *
! and separations are printed. If MODE is 2, the eigenenergies and *
! energies relative to level 1 are printed. If MODE is 3, the eig- *
! enenergies, separations, and energies relative to level 1 are *
! printed. *
! Last updated: 15 Oct 1992 *
! *
!***********************************************************************
!...Translated by Pacific-Sierra Research 77to90 4.3E 13:35:54 1/ 3/07
!...Modified by Charlotte Froese Fischer
! Gediminas Gaigalas 10/05/17
!-----------------------------------------------
! M o d u l e s
!-----------------------------------------------
USE vast_kind_param, ONLY: DOUBLE
USE def_C, ONLY: AUCM, AUEV, CCMS, FASI, FBSI
USE jlabl_C, LABJ=>JLBR, LABP=>JLBP
IMPLICIT NONE
!-----------------------------------------------
! D u m m y A r g u m e n t s
!-----------------------------------------------
INTEGER , INTENT(IN) :: NN
INTEGER :: MODE
INTEGER , INTENT(IN) :: K
REAL(DOUBLE) , INTENT(IN) :: EAV
INTEGER , INTENT(IN) :: JTOT(NN)
INTEGER , INTENT(IN) :: IPAR(NN)
INTEGER , INTENT(IN) :: ILEV(NN)
REAL(DOUBLE) , INTENT(IN) :: E(NN)
!-----------------------------------------------
! L o c a l V a r i a b l e s
!-----------------------------------------------
INTEGER :: J, I, IP
REAL(DOUBLE) :: EAU, ECM, EEV
!-----------------------------------------------
!
! Always print the eigenenergies
!
IF (K == 1) WRITE (24, 299)
IF (K == 2) WRITE (24, 300)
WRITE (24, 301)
DO J = 1, NN
I = ILEV(J)
EAU = E(J) + EAV
ECM = EAU*AUCM
EEV = EAU*AUEV
IP = (IPAR(J)+3)/2
WRITE (24, 302) I, LABJ(JTOT(J)), LABP(IP), EAU, ECM
END DO
!
RETURN
!
299 FORMAT('Eigenenergies for the initial state list')
300 FORMAT('Eigenenergies for the final state list')
301 FORMAT('Level J Parity',10X,'Hartrees',18X,'Kaysers')
302 FORMAT(1I3,4X,2A4,1P,2D25.15)
303 FORMAT('Energy of each level relative to immediately lower',' level:')
304 FORMAT('Energy of each level relative to lowest level:')
RETURN
!
END SUBROUTINE ENGOUT1
|
{"hexsha": "a4ea98b948c5f2be02e34d965b99dd63596af770", "size": 2940, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/appl/rtransition90_mpi/engout1.f90", "max_stars_repo_name": "sylas/grasp-continuum", "max_stars_repo_head_hexsha": "f5e2fb18bb2bca4f715072190bf455fba889320f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 33, "max_stars_repo_stars_event_min_datetime": "2019-03-10T04:00:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-08T22:01:15.000Z", "max_issues_repo_path": "src/appl/rtransition90_mpi/engout1.f90", "max_issues_repo_name": "sylas/grasp-continuum", "max_issues_repo_head_hexsha": "f5e2fb18bb2bca4f715072190bf455fba889320f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 65, "max_issues_repo_issues_event_min_datetime": "2019-03-07T17:56:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-16T16:45:24.000Z", "max_forks_repo_path": "src/appl/rtransition90_mpi/engout1.f90", "max_forks_repo_name": "sylas/grasp-continuum", "max_forks_repo_head_hexsha": "f5e2fb18bb2bca4f715072190bf455fba889320f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2019-03-10T04:00:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T02:06:40.000Z", "avg_line_length": 42.6086956522, "max_line_length": 76, "alphanum_fraction": 0.4360544218, "num_tokens": 783}
|
import numpy as np
import torch
# Parameters
_dt = 0.05
_max_a = 5.0
# Note: Because the system is relatively simple,
# we can manually compute the region of attraction
# (RoA) for the bicycle. In particular, the LQR
# brings the bicycle to a stop as quickly as possible,
# within the acceleration bounds. Thus, a point is
# guaranteed to be stable if the LQR can bring the
# bicycle to a complete stop in one step from that
# point (and a zero velocity, zero acceleration point
# is invariant).
# Check if the given state is safe, i.e., the bicycle has
# not collided with an obstacle.
#
# state: np.array([state_dim])
# x_obstacle_0: float
# x_obstacle_1: float
# obstacle_radius: float
# return: bool
def is_bicycle_safe(state, x_obstacle_0, x_obstacle_1, obstacle_radius):
# Step 1: Unpack values
xb, yb, xf, yf, _, y_obstacle_0, y_obstacle_1 = state
# Step 2: Convert to lists
zs = [(xf, yf), (xb, yb)]
z_obstacles = [(x_obstacle_0, y_obstacle_0), (x_obstacle_1, y_obstacle_1)]
# Step 3: Compute safety
for z in zs:
for z_obstacle in z_obstacles:
dist = np.square(z[0] - z_obstacle[0]) + np.square(z[1] - z_obstacle[1])
if dist <= obstacle_radius * obstacle_radius:
return False
return True
# Safe policy for bicycle. Comes to a stop as
# quickly as possible.
class BicycleSafePolicy:
# Get safe backup action.
#
# state: torch.tensor([state_dim])
def act(self, state):
# Step 1: Get velocity
v = state[4]
# Step 2: Compute optimal action
opt_a = -v / _dt
# Step 3: Clamp action
a = np.clip(opt_a, -_max_a, _max_a)
# Step 4: Build action
return torch.tensor([a, 0.0], dtype=torch.float)
# Check if the given state is stable, i.e., the bicycle
# can safely stop without colliding with an obstacle.
#
# state: np.array([state_dim])
# x_obstacle_0: float
# x_obstacle_1: float
# obstacle_radius: float
# return: bool
def is_bicycle_stable(state, x_obstacle_0, x_obstacle_1, obstacle_radius):
# Step 1: Get velocity
v = state[4]
# Step 2: Compute optimal action
opt_a = -v / _dt
# Step 3: Check to make sure we can stop in one step
if np.abs(opt_a) > _max_a:
return False
# Step 4: Take a step
state = torch.tensor(state, dtype=torch.float)
action = torch.tensor([opt_a, 0.0], dtype=torch.float)
next_state = BicycleEnv().step_torch(state, action)
next_state = next_state.detach().numpy()
# Step 5: Check safety of next state
return is_bicycle_safe(state, x_obstacle_0, x_obstacle_1, obstacle_radius)
# States are np.array([state_dim]), where state_dim = 5
# representing a vector
#
# [ xb, yb, xf, yf, v ]
#
# where [ xb, yb ] are the coordinates of the back of the car,
# [ xf, yf ] are the coordinates of the front of the car,
# and v is the velocity.
#
# Actions are np.array([action_dim]), where action_dim = 2
# representing a vector
#
# [ a, t ]
#
# where a is the acceleration and t is the steering angle.
#
# Rewards are given according to
#
# (i) a per time step reward penalizing large actions
# (ii) a final step reward measuring distance to the goal,
# which specifies the desired location of the
# front of the car
#
class BicycleEnv:
# Initializes the goal, time step, and weight of action portion of the loss.
def __init__(self):
# dynamics parameters
self.dt = _dt
self.max_a = _max_a
self.max_steps_ = 200
# action loss parameters
self.action_weight = 0.01
# goal loss parameters
self.goal = np.array([1.0, 0.0])
# obstacle loss parameters
self.obstacle_weight = 100.0
self.obstacle_radius = 0.05
self.x_obstacle_0 = 0.4
self.x_obstacle_1 = 0.7
#
# Gym functions
#
# Render the current state.
#
# state: np.array([state_dim])
# action: np.array([action_dim])
def render(self, state, action):
print(state,
action,
'safe',
self.is_safe(state),
'stable',
self.is_stable(state),
self._goal_loss_torch(torch.tensor(state, dtype=torch.float)).item(),
self._obstacle_loss_torch(torch.tensor(state, dtype=torch.float)).item(),
self._action_loss_torch(torch.tensor(action, dtype=torch.float)).item())
# Get the final reward.
#
# state: np.array([state_dim])
# action: np.array([action_dim])
def final_loss(self, state, action):
return state[2]
# Compute whether the current state is safe.
#
# state: np.array([state_dim])
def is_safe(self, state):
return is_bicycle_safe(state, self.x_obstacle_0, self.x_obstacle_1, self.obstacle_radius)
# Compute whether the current state is stable.
#
# state: np.array([state_dim])
def is_stable(self, state):
return is_bicycle_stable(state, self.x_obstacle_0, self.x_obstacle_1, self.obstacle_radius)
# Close the environment.
def close(self):
pass
# Maximum number of steps.
#
# return: int
def max_steps(self):
return self.max_steps_
#
# TorchEnv functions
#
# Get a random initial state.
#
# return: np.array([state_dim])
def reset_torch(self):
y_obstacle_0 = 2.0 * self.obstacle_radius * np.random.uniform() - self.obstacle_radius
y_obstacle_1 = 2.0 * self.obstacle_radius * np.random.uniform() - self.obstacle_radius
state = np.array([-0.1, 0.0, 0.0, 0.0, 0.0, y_obstacle_0, y_obstacle_1])
return torch.tensor(state, dtype=torch.float)
# Takes a single step according to the dynamics,
# using torch tensors to enable automatic gradient computations.
#
# state: torch.tensor([state_dim])
# action: torch.tensor([action_dim])
# return: torch.tensor([state_dim])
def step_torch(self, state, action):
# Step 1: Unpack values
dt = self.dt
max_a = self.max_a
ns = state.clone()
a, t = action
# Step 2: Threshold acceleration
a = torch.clamp(a, -max_a, max_a)
# Step 3: Update car velocity
ns[4] += a * dt
# Step 4: Update car front x and y
XL = ns[2] - ns[0]
YL = ns[3] - ns[1]
# H is a constant, so we discard the gradient computation
H = np.sqrt((XL.pow(2) + YL.pow(2)).item())
coa = XL/H
sia = YL/H
DY = ns[4] * dt * (torch.sin(t) * coa + torch.cos(t) * sia)
DX = ns[4] * dt * (torch.cos(t) * coa - torch.sin(t) * sia)
ns[2] += DX
ns[3] += DY
# Step 5: Update car back x and y
tt = (DX + XL) * coa + (DY + YL) * sia
st = 4.0 * tt * tt - 4.0 * (DX * DX + 2.0 * DX * XL + DY * (DY + 2.0 * YL))
q = DX * coa + XL * coa + DY * sia + YL * sia - 0.5 * torch.sqrt(st)
ns[0] += q * coa
ns[1] += q * sia
return ns
# Loss function for a given step, computed using torch
# tensors to enable automatic gradient computation.
#
# state: torch.tensor([state_dim])
# action: torch.tensor([action_dim])
# return: torch.tensor(1)
def loss_torch(self, state, action):
# Step 1: Loss from distance to goal
loss = self._goal_loss_torch(state)
# Step 2: Loss from distance to obstacles
loss += self._obstacle_loss_torch(state)
# Step 3: Loss from action
loss += self._action_loss_torch(action)
return loss
# Get the loss for distance to goal.
#
# state: torch.tensor([state_dim])
# return: torch.tensor(1)
def _goal_loss_torch(self, state):
# Step 1: Unpack values
_, _, xf, yf, _, _, _ = state
xf_goal, yf_goal = self.goal
# Step 2: Compute loss
loss = (xf - xf_goal).pow(2) + (yf - yf_goal).pow(2)
return loss
# Get the loss for distance to obstacles.
#
# state: torch.tensor([state_dim])
# return: torch.tensor(1)
def _obstacle_loss_torch(self, state):
# Step 1: Unpack values
xb, yb, xf, yf, _, y_obstacle_0, y_obstacle_1 = state
x_obstacle_0 = self.x_obstacle_0
x_obstacle_1 = self.x_obstacle_1
obstacle_radius = self.obstacle_radius
# Step 2: Convert to lists
zs = [(xf, yf), (xb, yb)]
z_obstacles = [(x_obstacle_0, y_obstacle_0), (x_obstacle_1, y_obstacle_1)]
# Step 3: Compute loss
loss = 0.0
for z in zs:
for z_obstacle in z_obstacles:
cur_loss = 2.0 * obstacle_radius * obstacle_radius
cur_loss -= (z[0] - z_obstacle[0]).pow(2)
cur_loss -= (z[1] - z_obstacle[1]).pow(2)
cur_loss = torch.max(torch.tensor([0.0, cur_loss], dtype=torch.float))
loss += cur_loss
return self.obstacle_weight * loss
# Get the loss for the action
#
# action: torch.tensor([action_dim])
# return: torch.tensor(1)
def _action_loss_torch(self, action):
return self.action_weight * action.pow(2).sum()
# Bicycle environment for training the
# recovery policy.
class BicycleRecoveryEnv:
# Initializes the parameters of the model.
#
# policy: BPTTPolicy
def __init__(self, policy):
# Step 1: Environment
self.env = BicycleEnv()
# Step 2: Policy
self.policy = policy
#
# Gym functions
#
# Render the current state.
#
# state: np.array([state_dim])
# action: np.array([action_dim])
def render(self, state, action):
self.env.render(state, action)
# Close the environment
def close(self):
self.env.close()
#
# TorchEnv functions
#
# Get a random initial state. In particular,
# execute the environment for a uniformly random
# number of steps using the given policy, and
# then return the final state.
#
# return: torch.tensor([state_dim])
def reset_torch(self):
# Step 1: Randomly sample number of steps to take
steps = np.random.randint(self.env.max_steps())
# Step 2: Randomly sample initial state
state = self.env.reset_torch()
# Step 3: Simulate environment for that many steps
for _ in range(steps):
# Step 3a: Compute action
action = self.policy.act_torch(state)
# Step 3b: Compute state transition
state = self.env.step_torch(state, action)
# Step 3c: Break if unsafe
if not self.env.is_safe(state.detach().numpy()):
break
# Step 4: Clean state
state = torch.tensor(state.detach().numpy(), dtype=torch.float)
return state
# Take a single step according to the dynamics,
# using torch tensors to enable automatic gradient computations.
#
# state: torch.tensor([state_dim])
# action: torch.tensor([1])
# return: torch.tensor([state_dim])
def step_torch(self, state, action):
new_state = self.env.step_torch(state, action)
return new_state
# Loss function for a given step, computed using torch
# tensors to enable automatic gradient computation.
#
# state: torch.tensor([state_dim])
# action: torch.tensor([action_dim])
# return: torch.tensor(1)
def loss_torch(self, state, action):
# Step 1: Loss from distance to goal
loss = self._goal_loss_torch(state)
# Step 2: Loss from distance to obstacles
loss += self.env._obstacle_loss_torch(state)
# Step 3: Loss from action
loss += self.env._action_loss_torch(action)
return loss
# Get the loss for distance to goal, which is v = 0.
#
# state: torch.tensor([state_dim])
# return: torch.tensor(1)
def _goal_loss_torch(self, state):
# Step 1: Unpack values
_, _, _, _, v, _, _ = state
# Step 2: Compute loss
loss = v.pow(2)
return loss
# Maximum number of steps.
#
# return: int
def max_steps(self):
return self.env.max_steps()
|
{"hexsha": "0c2a08e0251755f2e28845f5cf13972ad18697a6", "size": 12179, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/spire/env/bicycle.py", "max_stars_repo_name": "obastani/model-predictive-shielding", "max_stars_repo_head_hexsha": "8d74b38f809ea39ea54dfa028d9498767a6f8650", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2019-08-01T10:35:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T07:17:39.000Z", "max_issues_repo_path": "python/spire/env/bicycle.py", "max_issues_repo_name": "obastani/model-predictive-shielding", "max_issues_repo_head_hexsha": "8d74b38f809ea39ea54dfa028d9498767a6f8650", "max_issues_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/spire/env/bicycle.py", "max_forks_repo_name": "obastani/model-predictive-shielding", "max_forks_repo_head_hexsha": "8d74b38f809ea39ea54dfa028d9498767a6f8650", "max_forks_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8504901961, "max_line_length": 99, "alphanum_fraction": 0.6117907874, "include": true, "reason": "import numpy", "num_tokens": 3326}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from argparse import ArgumentParser
from os.path import expandvars
usage = "usage: %prog [options] inputfile"
parser = ArgumentParser(usage)
parser.add_argument("-n", "--numevents", type=int, default=1,
dest="NUMEVENTS", help="The number of events per run")
parser.add_argument("-s", "--seed",type=int,default=12345,
dest="SEED", help="Initial seed for the random number generator")
parser.add_argument("-r", "--runnumber", type=int, default=1,
dest="RUNNUMBER", help="The run number for this simulation")
parser.add_argument("-x", "--xmlfile", default=None,
dest="JSONFILE", help="Write statistics to JSONFILE")
parser.add_argument("--oversize", default=1,
dest="OVERSIZE", help="DOM oversize factor")
parser.add_argument("--energy", default=1e3, type=float,
dest="ENERGY", help="Particle energy in GeV")
parser.add_argument("--type", default="EMinus",
dest="PARTICLE_TYPE", help="Particle type")
parser.add_argument("--propagate-muons", default=False, action="store_true",
dest="PROPAGATE_MUONS", help="Include muon propagation in process")
parser.add_argument("--icemodel", default=expandvars("$I3_BUILD/ice-models/resources/models/spice_lea"),
dest="ICEMODEL", help="A clsim ice model file/directory (ice models *will* affect performance metrics, always compare using the same model!)")
parser.add_argument("--unweighted-photons", action="store_true",
help="Propagate all Cherenkov photons. This is ~13x slower than downsampling first.")
parser.add_argument("--cable-position", help='explicitly simulate cable shadow in given position',
choices=('cable_shadow','led7'))
group = parser.add_mutually_exclusive_group()
group.add_argument("--minimal-gcd", action="store_true", default=False,
dest="MINIMALGCD", help="generate a trivial GCD from scratch with only 24 DOMs. There are fewer collision checks, so usually things are faster, but unrealistic.")
group.add_argument("-g", "--gcd-file",
default="/cvmfs/icecube.opensciencegrid.org/data/GCD/GeoCalibDetectorStatus_AVG_55697-57531_PASS2_SPE_withStdNoise.i3.gz", dest="GCDFILE")
parser.add_argument("-d", "--device", type=int, default=None,
dest="DEVICE", help="device number")
group = parser.add_mutually_exclusive_group()
group.add_argument("--use-cpu", action="store_true", default=False,
dest="USECPU", help="simulate using CPU instead of GPU")
group.add_argument("--use-cuda", action="store_true", default=False,
dest="CUDA", help="use CUDA kernel instead of OpenCL")
parser.add_argument("--double-buffering", default=False, action="store_true",
help="Interleave kernel execution and i/o")
# parse cmd line args, bail out if anything is not understood
options = parser.parse_args()
if options.DEVICE is not None:
print(" ")
print(" ** DEVICE selected using the \"-d/--device\" command line option. Only do this if you know what you are doing!")
print(" ** You should be using the CUDA_VISIBLE_DEVICES and/or GPU_DEVICE_ORDINAL environment variables instead.")
if options.MINIMALGCD:
parser.error("--minimal-gcd does not work with I3CLSimClientModule; it needs an external GCD file")
print(" ")
print(" ** You chose to not use a standard IceCube GCD file but instead to create a trivial geometry from scratch.")
print(" ** This geometry only has 24 DOMs, so there are fewer collision checks.")
print(" ** This usually means propagation is faster, but unrealistic. Might differ from GPU type to GPU type.")
from I3Tray import *
import json
import os
import sys
import math
import numpy
from icecube import icetray, dataclasses, dataio, phys_services, sim_services, simclasses, clsim
# icetray.I3Logger.global_logger.set_level(icetray.I3LogLevel.LOG_INFO)
icetray.I3Logger.global_logger.set_level(icetray.I3LogLevel.LOG_WARN)
radius = 120.*I3Units.m
omPos = numpy.array(
[[ 0., 1., 0.],
[ 1., 1., 0.],
[ 1., 0., 0.],
[ 1., -1., 0.],
[ 0., -1., 0.],
[-1., -1., 0.],
[-1., 0., 0.],
[-1., 1., 0.]]
)
# normalize and scale
omPos = (omPos.T/numpy.sqrt(numpy.sum(omPos**2, 1))).T * radius
omPosLower = numpy.array(omPos)
omPosLower.T[2] = omPosLower.T[2] - radius
omPosUpper = numpy.array(omPos)
omPosUpper.T[2] = omPosUpper.T[2] + radius
omPositions = numpy.concatenate((omPosUpper, omPos, omPosLower), axis=0)
omKeys = [
icetray.OMKey(1,1),
icetray.OMKey(2,1),
icetray.OMKey(3,1),
icetray.OMKey(4,1),
icetray.OMKey(5,1),
icetray.OMKey(6,1),
icetray.OMKey(7,1),
icetray.OMKey(8,1),
icetray.OMKey(1,2),
icetray.OMKey(2,2),
icetray.OMKey(3,2),
icetray.OMKey(4,2),
icetray.OMKey(5,2),
icetray.OMKey(6,2),
icetray.OMKey(7,2),
icetray.OMKey(8,2),
icetray.OMKey(1,3),
icetray.OMKey(2,3),
icetray.OMKey(3,3),
icetray.OMKey(4,3),
icetray.OMKey(5,3),
icetray.OMKey(6,3),
icetray.OMKey(7,3),
icetray.OMKey(8,3),
]
class generateEvent(icetray.I3Module):
def __init__(self, context):
icetray.I3Module.__init__(self, context)
self.AddParameter("I3RandomService", "the service", None)
self.AddParameter("Type", "", dataclasses.I3Particle.ParticleType.EMinus)
self.AddParameter("Energy", "", 10.*I3Units.TeV)
self.AddParameter("NEvents", "", 1)
self.AddParameter("XCoord", "", 0.)
self.AddParameter("YCoord", "", 0.)
self.AddParameter("ZCoord", "", 0.)
self.AddOutBox("OutBox")
def Configure(self):
self.rs = self.GetParameter("I3RandomService")
self.particleType = self.GetParameter("Type")
self.energy = self.GetParameter("Energy")
self.nEvents = self.GetParameter("NEvents")
self.xCoord = self.GetParameter("XCoord")
self.yCoord = self.GetParameter("YCoord")
self.zCoord = self.GetParameter("ZCoord")
self.eventCounter = 0
def DAQ(self, frame):
daughter = dataclasses.I3Particle()
daughter.type = self.particleType
daughter.energy = self.energy
daughter.pos = dataclasses.I3Position(self.xCoord,self.yCoord,self.zCoord)
daughter.dir = dataclasses.I3Direction(0.,0.,-1.)
daughter.time = 0.
daughter.location_type = dataclasses.I3Particle.LocationType.InIce
primary = dataclasses.I3Particle()
primary.type = dataclasses.I3Particle.ParticleType.NuE
primary.energy = self.energy
primary.pos = dataclasses.I3Position(self.xCoord,self.yCoord,self.zCoord)
primary.dir = dataclasses.I3Direction(0.,0.,-1.)
primary.time = 0.
primary.location_type = dataclasses.I3Particle.LocationType.Anywhere
mctree = dataclasses.I3MCTree()
mctree.add_primary(primary)
mctree.append_child(primary,daughter)
frame["I3MCTree"] = mctree
self.PushFrame(frame)
self.eventCounter += 1
if self.eventCounter==self.nEvents:
self.RequestSuspension()
class injectFakeGCD(icetray.I3Module):
def __init__(self, context):
icetray.I3Module.__init__(self, context)
self.AddParameter("OMKeys", "", [])
self.AddParameter("OMPositions", "", [])
self.AddParameter("XCoord", "", 0.)
self.AddParameter("YCoord", "", 0.)
self.AddParameter("ZCoord", "", 0.)
self.AddOutBox("OutBox")
def Configure(self):
self.omkeys = self.GetParameter("OMKeys")
self.ompositions = self.GetParameter("OMPositions")
self.xCoord = self.GetParameter("XCoord")
self.yCoord = self.GetParameter("YCoord")
self.zCoord = self.GetParameter("ZCoord")
self.has_been_injected = False
def DAQ(self, frame):
# only inject it once
if self.has_been_injected:
self.PushFrame(frame)
return
self.has_been_injected = True
geometry = dataclasses.I3Geometry()
calibration = dataclasses.I3Calibration()
detectorStatus = dataclasses.I3DetectorStatus()
# fill the geometry map
omgeomap = geometry.omgeo
domcalmap = calibration.dom_cal
domstatusmap = detectorStatus.dom_status
for i, pos in enumerate(omPositions):
shiftedPos = pos
shiftedPos[0] += self.xCoord*I3Units.m
shiftedPos[1] += self.yCoord*I3Units.m
shiftedPos[2] += self.zCoord*I3Units.m
omkey = omKeys[i]
newomgeo = dataclasses.I3OMGeo()
newomgeo.omtype = dataclasses.I3OMGeo.OMType.IceCube
newomgeo.orientation = dataclasses.I3Orientation(dataclasses.I3Direction(0.,0.,-1.))
newomgeo.position = dataclasses.I3Position(shiftedPos[0], shiftedPos[1], shiftedPos[2])
omgeomap[omkey] = newomgeo
newdomcal = dataclasses.I3DOMCalibration()
newdomcal.relative_dom_eff = 1.0
domcalmap[omkey] = newdomcal
newdomstatus = dataclasses.I3DOMStatus()
newdomstatus.pmt_hv = 1345.*I3Units.V # some arbitrary setting: >0 and not NaN
domstatusmap[omkey] = newdomstatus
# make GCD frames and fill them with objects
Gframe = icetray.I3Frame(icetray.I3Frame.Geometry)
Cframe = icetray.I3Frame(icetray.I3Frame.Calibration)
Dframe = icetray.I3Frame(icetray.I3Frame.DetectorStatus)
Gframe["I3Geometry"] = geometry
Cframe["I3Calibration"] = calibration
Dframe["I3DetectorStatus"] = detectorStatus
# push the new GCD frames
self.PushFrame(Gframe)
self.PushFrame(Cframe)
self.PushFrame(Dframe)
# push the original Q-frame
self.PushFrame(frame)
tray = I3Tray()
summary = dataclasses.I3MapStringDouble()
tray.context['I3SummaryService'] = summary
# a random number generator
try:
randomService = phys_services.I3SPRNGRandomService(
seed = options.SEED,
nstreams = 10000,
streamnum = options.RUNNUMBER)
except AttributeError:
randomService = phys_services.I3GSLRandomService(
seed = options.SEED*1000000 + options.RUNNUMBER)
if options.MINIMALGCD:
tray.AddModule("I3InfiniteSource","streams",
Stream=icetray.I3Frame.DAQ)
tray.AddModule(injectFakeGCD,"gcd",
OMKeys = omKeys,
OMPositions = omPositions,
# XCoord = xCoord,
# YCoord = yCoord,
# ZCoord = zCoord,
)
else:
tray.AddModule("I3InfiniteSource","streams",
Prefix = options.GCDFILE,
Stream=icetray.I3Frame.DAQ)
tray.AddModule("I3MCEventHeaderGenerator","gen_header",
Year=2009,
DAQTime=158100000000000000,
RunNumber=1,
EventID=1,
IncrementEventID=True)
tray.AddModule(generateEvent, "generateEvent",
I3RandomService = randomService,
NEvents = options.NUMEVENTS,
Energy = options.ENERGY,
Type = getattr(dataclasses.I3Particle.ParticleType, options.PARTICLE_TYPE),
# Energy = 1000.*I3Units.TeV,
# XCoord = xCoord,
# YCoord = yCoord,
# ZCoord = zCoord,
)
MCTreeName="I3MCTree"
photonSeriesName = None
kwargs = {}
if options.cable_position:
from icecube.clsim import GetIceCubeCableShadow
kwargs['CableOrientation'] = GetIceCubeCableShadow.GetIceCubeCableShadow(getattr(GetIceCubeCableShadow, 'from_{}'.format(options.cable_position)))
tray.AddSegment(clsim.I3CLSimMakeHits, "makeCLSimHits",
GCDFile = options.GCDFILE,
PhotonSeriesName = photonSeriesName,
MCTreeName = MCTreeName,
RandomService = randomService,
MCPESeriesName = "MCPESeriesMap",
UnshadowedFraction = 0.95,
UseGPUs=not options.USECPU,
UseCPUs=options.USECPU,
UseOnlyDeviceNumber=options.DEVICE,
UseCUDA=options.CUDA,
EnableDoubleBuffering=options.double_buffering,
UseI3PropagatorService=options.PROPAGATE_MUONS,
IceModelLocation=options.ICEMODEL,
DOMOversizeFactor=options.OVERSIZE,
UnWeightedPhotons=options.unweighted_photons,
**kwargs
)
icetray.logging.set_level_for_unit('I3CLSimServer', 'INFO')
icetray.logging.set_level_for_unit('I3CLSimStepToPhotonConverterOpenCL', 'INFO')
from datetime import datetime
t0 = datetime.now()
tray.Execute()
walltime_in_execute = ((datetime.now() - t0).total_seconds())*1e9
del tray
if options.JSONFILE:
with open(options.JSONFILE, 'w') as f:
json.dump(dict(summary), f, indent=1)
########### this is optional and just parses the generated summary
import numpy as np
def get(key, aggregate=np.mean, default=0):
pkey = 'I3CLSimModule_makeCLSimHits_makePhotons_clsim_'+key
return aggregate([summary.get(k,default) for k in summary.keys() if k.startswith(pkey)])
ns_per_photon = get('AverageDeviceTimePerPhoton')
ns_per_photon_with_util = get('AverageHostTimePerPhoton')
device_util = get('DeviceUtilization')
ncalls = get('NumKernelCalls', sum)
if ncalls == 0:
sys.stderr.write("Not enough kernel calls to estimate performance! Trying increasing the number of events.\n")
sys.stderr.write("Summary:\n")
json.dump(dict(summary), sys.stderr, indent=1)
sys.exit(1)
total_host_time = get('TotalHostTime', sum)
total_queue_time = get('TotalQueueTime', sum)
class duration(float):
def __format__(self, format_spec):
if self > 2e9:
return format(self/1e9, format_spec) + ' s'
elif self > 2e6:
return format(self/1e6, format_spec) + ' ms'
elif self > 2e3:
return format(self/1e3, format_spec) + ' µs'
else:
return format(self/1e6, format_spec) + ' ns'
print(" ")
print("# these numbers are performance figures for the GPU:")
print("time per photon (GPU):", ns_per_photon, "ns")
print("photons per second (GPU):", 1e9/ns_per_photon, "photons per second")
print(" ")
print("# these numbers include the host utilization and are probably not meaningful for --numevents=1 (the default). You need more events to even out the startup/setup time.")
print("(avg) time per photon (actual, including under-utilization):", ns_per_photon_with_util, "ns")
print("(avg) photons per second (actual, including under-utilization):", 1e9/ns_per_photon_with_util, "photons per second")
print("(total) host time: {:.1f}".format(duration(total_host_time)))
print("(total) waiting time: {:.1f} ({:.3f}%)".format(duration(total_queue_time), 100.*total_queue_time/total_host_time))
print("(total) number of kernel calls: {:.0f}".format(ncalls))
print(" wallclock time: {:.1f}".format(duration(walltime_in_execute)))
print("(avg) device utilization:", device_util*100., "%")
|
{"hexsha": "be75ad90fdac636917192147d86c96d07c6500d9", "size": 14888, "ext": "py", "lang": "Python", "max_stars_repo_path": "clsim/resources/scripts/benchmark.py", "max_stars_repo_name": "hschwane/offline_production", "max_stars_repo_head_hexsha": "e14a6493782f613b8bbe64217559765d5213dc1e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-24T22:00:01.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-24T22:00:01.000Z", "max_issues_repo_path": "clsim/resources/scripts/benchmark.py", "max_issues_repo_name": "hschwane/offline_production", "max_issues_repo_head_hexsha": "e14a6493782f613b8bbe64217559765d5213dc1e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "clsim/resources/scripts/benchmark.py", "max_forks_repo_name": "hschwane/offline_production", "max_forks_repo_head_hexsha": "e14a6493782f613b8bbe64217559765d5213dc1e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-07-17T09:20:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-30T16:44:18.000Z", "avg_line_length": 37.1271820449, "max_line_length": 180, "alphanum_fraction": 0.6722864052, "include": true, "reason": "import numpy", "num_tokens": 3957}
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj12eqsynthconj3 : forall (lv0 : natural) (lv1 : natural), (@eq natural (plus lv0 lv1) (plus lv1 (plus lv0 Zero))).
Admitted.
QuickChick conj12eqsynthconj3.
|
{"author": "yalhessi", "repo": "lemmaranker", "sha": "53bc2ad63ad7faba0d7fc9af4e1e34216173574a", "save_path": "github-repos/coq/yalhessi-lemmaranker", "path": "github-repos/coq/yalhessi-lemmaranker/lemmaranker-53bc2ad63ad7faba0d7fc9af4e1e34216173574a/benchmark/clam/_lfind_clam_lf_goal33_distrib_100_plus_assoc/lfindconj12eqsynthconj3.v"}
|
# -*- coding: utf-8 -*-
from __future__ import print_function
try:
import cPickle as pickle
except:
import pickle
# Python 3 support
try:
from Tkinter import *
import tkMessageBox
import tkFont
except ImportError:
from tkinter import *
from tkinter import font, messagebox
import healpy as hp
import numpy as np
from aladinSAMP import AladinScriptCommands
aladin = AladinScriptCommands()
from config_values import UserValues
# global variable: level of trasparency window
user = UserValues()
trasparency = user.get_win_trasparency()
class LoadSkymap(Toplevel):
"""Loading a new skymap."""
def __init__(self):
Toplevel.__init__(self, border=8, bg="slate grey")
self.user = UserValues()
# get trasparency windows
self.wait_visibility()
self.wm_attributes('-alpha', trasparency)
self.title("Load a new skymap")
self.attributes("-topmost", True)
self.label_1 = Label(self, text="LVC skymap", bg="slate grey")
self.label_1.grid(row=0, column=0, sticky=E, pady=0)
# default: input skymap
skymap_input = StringVar(self, value=self.user.get_skymap())
self.entry_new_skymap = Entry(self, width=30, justify=CENTER,
textvariable=skymap_input)
self.entry_new_skymap.grid(row=0, padx=15, column=1)
#Btns
self.show = Button(self, text='Load',
command=self.new_skymap)
self.show.grid(column=2, row=0, sticky=W, padx=2, pady=5)
self.close = Button(self, text="Close",
command=self.close_window)
self.close.grid(column=5,row=0, sticky=E, padx=2, pady=5)
def new_skymap(self):
"""Loading a new LVC skymap."""
try:
aladin.send_file(self.entry_new_skymap.get())
aladin.rename(self.entry_new_skymap.get())
except ValueError as value_error:
tkMessageBox.showerror ('Load a new skymap',
value_error)
except IOError as io_error:
tkMessageBox.showerror ('Load a new skymap',
io_error)
def update_GWsky_config():
"""Updating GWsky_config file: coords max probability pixel and nside."""
prob = hp.read_map(self.entry_new_skymap.get(), verbose = False)
# update nside
npix = len(prob)
nside = hp.npix2nside(npix)
# update coord. maximum prob pixel
ipix_max = np.argmax(prob)
prob[ipix_max]
theta, phi = hp.pix2ang(nside, ipix_max)
ra_max = round(np.rad2deg(phi), 5)
dec_max = round(np.rad2deg(0.5 * np.pi - theta), 5)
with open('GWsky_config', 'rb') as data:
config_GWsky = pickle.load(data)
config_GWsky['skymap'], config_GWsky['nside'],config_GWsky['ra_max_pixel'],config_GWsky['dec_max_pixel']=\
self.entry_new_skymap.get(), nside, ra_max, dec_max
with open('GWsky_config', 'wb') as data:
pickle.dump(config_GWsky, data)
update_GWsky_config()
def close_window(self):
return self.destroy()
|
{"hexsha": "5d0329c243fdfd6345e130d21ff3c1a905e88a30", "size": 3380, "ext": "py", "lang": "Python", "max_stars_repo_path": "load_skymap.py", "max_stars_repo_name": "ggreco77/test_3", "max_stars_repo_head_hexsha": "02f4ae877beb3b173454b6d97abe90e08747c042", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "load_skymap.py", "max_issues_repo_name": "ggreco77/test_3", "max_issues_repo_head_hexsha": "02f4ae877beb3b173454b6d97abe90e08747c042", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "load_skymap.py", "max_forks_repo_name": "ggreco77/test_3", "max_forks_repo_head_hexsha": "02f4ae877beb3b173454b6d97abe90e08747c042", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7272727273, "max_line_length": 118, "alphanum_fraction": 0.5819526627, "include": true, "reason": "import numpy", "num_tokens": 810}
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.10.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import ipywidgets
import matplotlib.pyplot as plt
def intplot(file,xmin=0,xmax=200,ymin=300,ymax=550,step=1.0e-6):
Long = 200.0
N_pos = 200
fig, axes = plt.subplots(nrows=1,ncols=1, figsize=(6, 3), dpi=100)
data = np.load(file)
V_s = data['X'][:,0:-1:2]
axes.plot(range(N_pos),V_s[0,:]/1e3,'r')
line_plot = axes.plot(range(N_pos),V_s[0,:]/1e3)
#axes.legend()
axes.set_xlabel('Posición (m)')
fig.tight_layout()
axes.set_ylim((ymin,ymax))
axes.set_xlim((xmin,xmax))
axes.grid(True)
fig.tight_layout()
sld_T = ipywidgets.FloatSlider(orientation='horizontal',description = "Time $(\mu s)$",
value=0, min=0,max= data['t'][-1]*1e6,
step=step,continuous_update=False)
def update(change):
t = sld_T.value
it = np.searchsorted(data['t'][:,0], t*1e-6)
line_plot[0].set_data(range(N_pos),V_s[it,:]/1e3)
fig.canvas.draw_idle()
sld_T.observe(update, names='value')
layout_row1 = ipywidgets.HBox([fig.canvas])
layout_row2 = ipywidgets.HBox([sld_T])
layout = ipywidgets.VBox([layout_row1,layout_row2])
return layout
# -
|
{"hexsha": "d4fc32e56de40c6f2b91e24956a197232469771f", "size": 1557, "ext": "py", "lang": "Python", "max_stars_repo_path": "core/db_emt_line_core.py", "max_stars_repo_name": "jmmauricio/e-dashboards", "max_stars_repo_head_hexsha": "c993a2aa7b665d68e2af6ce76cb4556ff8a85f52", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "core/db_emt_line_core.py", "max_issues_repo_name": "jmmauricio/e-dashboards", "max_issues_repo_head_hexsha": "c993a2aa7b665d68e2af6ce76cb4556ff8a85f52", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "core/db_emt_line_core.py", "max_forks_repo_name": "jmmauricio/e-dashboards", "max_forks_repo_head_hexsha": "c993a2aa7b665d68e2af6ce76cb4556ff8a85f52", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.2388059701, "max_line_length": 93, "alphanum_fraction": 0.5850995504, "include": true, "reason": "import numpy", "num_tokens": 484}
|
"""
High-level functions used across the CAP-Toolkit package.
"""
import h5py
import numpy as np
import pyproj
import xarray as xr
import pandas as pd
from scipy.spatial import cKDTree
from scipy.spatial.distance import cdist
from scipy import stats
from scipy.ndimage import map_coordinates
from gdalconst import *
from osgeo import gdal, osr
from scipy import signal
def print_args(args):
"""Print arguments passed to argparse."""
print("Input arguments:")
for arg in list(vars(args).items()):
print(arg)
def read_h5(fname, vnames):
"""Generic HDF5 reader.
vnames : ['var1', 'var2', 'var3']
"""
with h5py.File(fname, "r") as f:
variables = [f[v][()] for v in vnames]
return variables if len(vnames) > 1 else variables[0]
def save_h5(fname, vardict, mode="a"):
"""Generic HDF5 writer.
vardict : {'name1': var1, 'name2': va2, 'name3': var3}
"""
with h5py.File(fname, mode) as f:
for k, v in list(vardict.items()):
if k in f:
f[k][:] = np.squeeze(v)
else:
f[k] = np.squeeze(v)
def is_empty(ifile):
"""Test if file is corruted or empty"""
try:
with h5py.File(ifile, "r") as f:
if bool(list(f.keys())):
return False
else:
return True
except IOError:
return True
def find_nearest(arr, val):
"""Find index of 'nearest' value(s).
Args:
arr (nd array) : The array to search in (nd). No need to be sorted.
val (scalar or array) : Value(s) to find.
Returns:
out (tuple or scalar) : The index (or tuple if nd array) of nearest
entry found. If `val` is a list of values then a tuple of ndarray
with the indices of each value is return.
See also:
find_nearest2
"""
idx = []
if np.ndim(val) == 0:
val = np.array([val])
for v in val:
idx.append((np.abs(arr - v)).argmin())
idx = np.unravel_index(idx, arr.shape)
return idx if val.ndim > 1 else idx[0]
def make_grid(xmin, xmax, ymin, ymax, dx, dy, return_2d=False):
"""
Construct 2D-grid given input boundaries
:param xmin: x-coord. min
:param xmax: x-coord. max
:param ymin: y-coors. min
:param ymax: y-coord. max
:param dx: x-resolution
:param dy: y-resolution
:param return_2d: if true return grid otherwise vector
:return: 2D grid or 1D vector
"""
Nn = int((np.abs(ymax - ymin)) / dy) + 1
Ne = int((np.abs(xmax - xmin)) / dx) + 1
xi = np.linspace(xmin, xmax, num=Ne)
yi = np.linspace(ymin, ymax, num=Nn)
if return_2d:
return np.meshgrid(xi, yi)
else:
return xi, yi
def transform_coord(proj1, proj2, x, y):
"""
Transform coordinates from proj1 to proj2
usgin EPSG number
:param proj1: current projection (4326)
:param proj2: target projection (3031)
:param x: x-coord in current proj1
:param y: y-coord in current proj1
:return: x and y now in proj2
"""
proj1 = pyproj.Proj("+init=EPSG:" + str(proj1))
proj2 = pyproj.Proj("+init=EPSG:" + str(proj2))
return pyproj.transform(proj1, proj2, x, y)
def mad_std(x, axis=None):
"""
Robust std.dev using median absolute deviation
:param x: data values
:param axis: target axis for computation
:return: std.dev (MAD)
"""
return 1.4826 * np.nanmedian(np.abs(x - np.nanmedian(x, axis)), axis)
def interpmed(x, y, z, Xi, Yi, n, d):
"""
2D median interpolation of scattered data
:param x: x-coord (m)
:param y: y-coord (m)
:param z: values
:param Xi: x-coord. grid (2D)
:param Yi: y-coord. grid (2D)
:param n: number of nearest neighbours
:param d: maximum distance allowed (m)
:return: 1D array of interpolated values
"""
xi = Xi.ravel()
yi = Yi.ravel()
zi = np.zeros(len(xi)) * np.nan
tree = cKDTree(np.c_[x, y])
for i in range(len(xi)):
(dxy, idx) = tree.query((xi[i], yi[i]), k=n)
if n == 1:
pass
elif dxy.min() > d:
continue
else:
pass
zc = z[idx]
zi[i] = np.median(zc)
return zi
def interpgaus(x, y, z, s, Xi, Yi, n, d, a):
"""
2D interpolation using a gaussian kernel
weighted by distance and error
:param x: x-coord (m)
:param y: y-coord (m)
:param z: values
:param s: obs. errors
:param Xi: x-coord. interp. point(s) (m)
:param Yi: y-coord. interp. point(s) (m)
:param n: number of nearest neighbours
:param d: maximum distance allowed (m)
:param a: correlation length in distance (m)
:return: 1D vec. of prediction, sigma and nobs
"""
xi = Xi.ravel()
yi = Yi.ravel()
zi = np.zeros(len(xi)) * np.nan
ei = np.zeros(len(xi)) * np.nan
ni = np.zeros(len(xi)) * np.nan
tree = cKDTree(np.c_[x, y])
if np.all(np.isnan(s)): s = np.ones(s.shape)
for i in range(len(xi)):
(dxy, idx) = tree.query((xi[i], yi[i]), k=n)
if n == 1:
pass
elif dxy.min() > d:
continue
else:
pass
zc = z[idx]
sc = s[idx]
if len(zc[~np.isnan(zc)]) == 0: continue
# Weights
wc = (1./sc**2) * np.exp(-(dxy**2)/(2*a**2))
# Avoid singularity
wc += 1e-6
# Predicted value
zi[i] = np.nansum(wc * zc) / np.nansum(wc)
# Weighted rmse
sigma_r = np.nansum(wc * (zc - zi[i])**2) / np.nansum(wc)
# Obs. error
sigma_s = 0 if np.all(s == 1) else np.nanmean(sc)
# Prediction error
ei[i] = np.sqrt(sigma_r ** 2 + sigma_s ** 2)
# Number of points in prediction
ni[i] = 1 if n == 1 else len(zc)
return zi, ei, ni
def interpkrig(x, y, z, s, Xi, Yi, d, a, n):
"""
2D interpolation using ordinary kriging/collocation
with second-order markov covariance model.
:param x: x-coord (m)
:param y: y-coord (m)
:param z: values
:param s: obs. error added to diagonal
:param Xi: x-coord. interp. point(s) (m)
:param Yi: y-coord. interp. point(s) (m)
:param d: maximum distance allowed (m)
:param a: correlation length in distance (m)
:param n: number of nearest neighbours
:return: 1D vec. of prediction, sigma and nobs
"""
n = int(n)
# Check
if n == 1:
print('n > 1 needed!')
return
xi = Xi.ravel()
yi = Yi.ravel()
zi = np.zeros(len(xi)) * np.nan
ei = np.zeros(len(xi)) * np.nan
ni = np.zeros(len(xi)) * np.nan
tree = cKDTree(np.c_[x, y])
# Convert to meters
a *= 0.595 * 1e3
d *= 1e3
for i in range(len(xi)):
(dxy, idx) = tree.query((xi[i], yi[i]), k=n)
if dxy.min() > d:
continue
xc = x[idx]
yc = y[idx]
zc = z[idx]
sc = s[idx]
if len(zc) < 2: continue
m0 = np.median(zc)
c0 = np.var(zc)
# Covariance function for Dxy
Cxy = c0 * (1 + (dxy / a)) * np.exp(-dxy / a)
# Compute pair-wise distance
dxx = cdist(np.c_[xc, yc], np.c_[xc, yc], "euclidean")
# Covariance function Dxx
Cxx = c0 * (1 + (dxx / a)) * np.exp(-dxx / a)
# Measurement noise matrix
N = np.eye(len(Cxx)) * sc * sc
# Solve for the inverse
CxyCxxi = np.linalg.solve((Cxx + N).T, Cxy.T)
# Predicted value
zi[i] = np.dot(CxyCxxi, zc) + (1 - np.sum(CxyCxxi)) * m0
# Predicted error
ei[i] = np.sqrt(np.abs(c0 - np.dot(CxyCxxi, Cxy.T)))
# Number of points in prediction
ni[i] = len(zc)
return zi, ei, ni
def spatial_filter(x, y, z, dx, dy, n_sigma=3.0):
"""
Spatial outlier editing filter
:param x: x-coord (m)
:param y: y-coord (m)
:param z: values
:param dx: filter res. in x (m)
:param dy: filter res. in y (m)
:param n_sigma: cutt-off value
:param thres: max absolute value of data
:return: filtered array containing nan-values
"""
Nn = int((np.abs(y.max() - y.min())) / dy) + 1
Ne = int((np.abs(x.max() - x.min())) / dx) + 1
f_bin = stats.binned_statistic_2d(x, y, z, bins=(Ne, Nn))
index = f_bin.binnumber
ind = np.unique(index)
zo = z.copy()
for i in range(len(ind)):
# index for each bin
idx, = np.where(index == ind[i])
zb = z[idx]
if len(zb[~np.isnan(zb)]) == 0:
continue
dh = zb - np.nanmedian(zb)
foo = np.abs(dh) > n_sigma * np.nanstd(dh)
zb[foo] = np.nan
zo[idx] = zb
return zo
def interp2d(x, y, z, xi, yi, **kwargs):
"""
Raster to point interpolation based on
scipy.ndimage import map_coordinates
:param x: x-coord. in 2D (m)
:param y: x-coord. in 2D (m)
:param z: values in 2D
:param xi: interp. point in x (m)
:param yi: interp. point in y (m)
:param kwargs: see map_coordinates
:return: array of interp. values
"""
x = np.flipud(x)
y = np.flipud(y)
z = np.flipud(z)
x = x[0,:]
y = y[:,0]
nx, ny = x.size, y.size
x_s, y_s = x[1] - x[0], y[1] - y[0]
if np.size(xi) == 1 and np.size(yi) > 1:
xi = xi * ones(yi.size)
elif np.size(yi) == 1 and np.size(xi) > 1:
yi = yi * ones(xi.size)
xp = (xi - x[0]) * (nx - 1) / (x[-1] - x[0])
yp = (yi - y[0]) * (ny - 1) / (y[-1] - y[0])
coord = np.vstack([yp, xp])
zi = map_coordinates(z, coord, **kwargs)
return zi
def tiffread(ifile):
"""
Reading tif-file to memory
:param ifile: path+name of tif file
:return: X, Y, Z, dx, dy and proj
"""
file = gdal.Open(ifile, GA_ReadOnly)
metaData = file.GetMetadata()
projection = file.GetProjection()
src = osr.SpatialReference()
src.ImportFromWkt(projection)
proj = src.ExportToWkt()
Nx = file.RasterXSize
Ny = file.RasterYSize
trans = file.GetGeoTransform()
dx = trans[1]
dy = trans[5]
Xp = np.arange(Nx)
Yp = np.arange(Ny)
(Xp, Yp) = np.meshgrid(Xp, Yp)
X = trans[0] + (Xp + 0.5) * trans[1] + (Yp + 0.5) * trans[2]
Y = trans[3] + (Xp + 0.5) * trans[4] + (Yp + 0.5) * trans[5]
band = file.GetRasterBand(1)
Z = band.ReadAsArray()
dx = np.abs(dx)
dy = np.abs(dy)
return X, Y, Z, dx, dy, proj
def tiffwrite(ofile, X, Y, Z, dx, dy, proj, otype='float'):
"""
Writing raster to a tif-file
:param ofile: name of ofile
:param X: x-coord of raster (2D)
:param Y: y-coord of raster (2D)
:param Z: values (2D)
:param dx: grid-spacing x
:param dy: grid-spacing y
:param proj: projection (epsg number)
:param dtype: save as 'int' or 'float'
:return: written file to memory
"""
proj = int(proj)
N, M = Z.shape
driver = gdal.GetDriverByName("GTiff")
if otype == 'int':
datatype = gdal.GDT_Int32
if otype == 'float':
datatype = gdal.GDT_Float32
ds = driver.Create(ofile, M, N, 1, datatype)
src = osr.SpatialReference()
src.ImportFromEPSG(proj)
ulx = np.min(np.min(X)) - 0.5 * dx
uly = np.max(np.max(Y)) + 0.5 * dy
geotransform = [ulx, dx, 0, uly, 0, -dy]
ds.SetGeoTransform(geotransform)
ds.SetProjection(src.ExportToWkt())
ds.GetRasterBand(1).SetNoDataValue(np.nan)
ds.GetRasterBand(1).WriteArray(Z)
ds = None
def binning(x, y, xmin=None, xmax=None, dx=1 / 12.,
window=3 / 12., interp=False, median=False):
"""Time-series binning (w/overlapping windows).
Args:
x,y: time and value of time series.
xmin,xmax: time span of returned binned series.
dx: time step of binning.
window: size of binning window.
interp: interpolate binned values to original x points.
"""
if xmin is None:
xmin = np.nanmin(x)
if xmax is None:
xmax = np.nanmax(x)
steps = np.arange(xmin, xmax, dx) # time steps
bins = [(ti, ti + window) for ti in steps] # bin limits
N = len(bins)
yb = np.full(N, np.nan)
xb = np.full(N, np.nan)
eb = np.full(N, np.nan)
nb = np.full(N, np.nan)
sb = np.full(N, np.nan)
for i in range(N):
t1, t2 = bins[i]
idx, = np.where((x >= t1) & (x <= t2))
if len(idx) == 0:
xb[i] = 0.5 * (t1 + t2)
continue
ybv = y[idx]
if median:
yb[i] = np.nanmedian(ybv)
else:
yb[i] = np.nanmean(ybv)
xb[i] = 0.5 * (t1 + t2)
eb[i] = mad_std(ybv)
nb[i] = np.sum(~np.isnan(ybv))
sb[i] = np.sum(ybv)
if interp:
try:
yb = np.interp(x, xb, yb)
eb = np.interp(x, xb, eb)
sb = np.interp(x, xb, sb)
xb = x
except:
pass
return xb, yb, eb, nb, sb
def hampel_filter1d(x, k, t0=3):
"""
Hampel-filter for outlier editing
:param x: values
:param k: window size (int)
:param t0: sigma threshold value
:return: filtered array with nan's
"""
x = np.pad(x, k, 'constant', constant_values=9999)
x[x == 9999] = np.nan
n = len(x)
y = x.copy()
L = 1.4826
for i in range((k + 1),(n - k)):
if np.isnan(x[(i - k):(i + k+1)]).all():
continue
x0 = np.nanmedian(x[(i - k):(i + k+1)])
S0 = L * np.nanmedian(np.abs(x[(i - k):(i + k+1)] - x0))
if np.abs(x[i] - x0) > t0 * S0:
y[i] = np.nan
y = y[k:-k]
return y
def sgolay1d(h, window=3, order=1, deriv=0, dt=1.0, mode="nearest", time=None):
"""Savitztky-Golay filter with support for NaNs.
If time is given, interpolate NaNs otherwise pad w/zeros.
If time is given, calculate dt as t[1]-t[0].
Args:
dt (int): spacing between samples (for correct units).
Notes:
Works with numpy, pandas and xarray objects.
"""
if isinstance(h, (pd.Series, xr.DataArray)):
h = h.values
if isinstance(time, (pd.Series, xr.DataArray)):
time = time.values
_h = h.copy()
(i_nan,) = np.where(np.isnan(_h))
(i_valid,) = np.where(np.isfinite(_h))
if i_valid.size < 5:
return _h
elif time is not None:
_h[i_nan] = np.interp(time[i_nan], time[i_valid], _h[i_valid])
dt = np.abs(time[1] - time[0])
else:
_h[i_nan] = 0
return signal.savgol_filter(_h, window, order, deriv, delta=dt, mode=mode)
def sgolay2d(z, window_size, order, derivative=None):
"""Two dimensional data smoothing and least-square gradient estimate.
Code from:
http://scipy-cookbook.readthedocs.io/items/SavitzkyGolay.html
Reference:
A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
"""
# number of terms in the polynomial expression
# TODO: Double check this (changed for Py3)
n_terms = (order + 1) * (order + 2) // 2
if window_size % 2 == 0:
raise ValueError("window_size must be odd")
if window_size ** 2 < n_terms:
raise ValueError("order is too high for the window size")
half_size = window_size // 2
# exponents of the polynomial.
# p(x,y) = a0 + a1*x + a2*y + a3*x^2 + a4*y^2 + a5*x*y + ...
# this line gives a list of two item tuple. Each tuple contains
# the exponents of the k-th term. First element of tuple is for x
# second element for y.
# Ex. exps = [(0,0), (1,0), (0,1), (2,0), (1,1), (0,2), ...]
exps = [(k - n, n) for k in range(order + 1) for n in range(k + 1)]
# coordinates of points
ind = np.arange(-half_size, half_size + 1, dtype=np.float64)
dx = np.repeat(ind, window_size)
dy = np.tile(ind, [window_size, 1]).reshape(window_size ** 2,)
# build matrix of system of equation
A = np.empty((window_size ** 2, len(exps)))
for i, exp in enumerate(exps):
A[:, i] = (dx ** exp[0]) * (dy ** exp[1])
# pad input array with appropriate values at the four borders
new_shape = z.shape[0] + 2 * half_size, z.shape[1] + 2 * half_size
Z = np.zeros((new_shape))
# top band
band = z[0, :]
Z[:half_size, half_size:-half_size] = band - np.abs(
np.flipud(z[1 : half_size + 1, :]) - band
)
# bottom band
band = z[-1, :]
Z[-half_size:, half_size:-half_size] = band + np.abs(
np.flipud(z[-half_size - 1 : -1, :]) - band
)
# left band
band = np.tile(z[:, 0].reshape(-1, 1), [1, half_size])
Z[half_size:-half_size, :half_size] = band - np.abs(
np.fliplr(z[:, 1 : half_size + 1]) - band
)
# right band
band = np.tile(z[:, -1].reshape(-1, 1), [1, half_size])
Z[half_size:-half_size, -half_size:] = band + np.abs(
np.fliplr(z[:, -half_size - 1 : -1]) - band
)
# central band
Z[half_size:-half_size, half_size:-half_size] = z
# top left corner
band = z[0, 0]
Z[:half_size, :half_size] = band - np.abs(
np.flipud(np.fliplr(z[1 : half_size + 1, 1 : half_size + 1])) - band
)
# bottom right corner
band = z[-1, -1]
Z[-half_size:, -half_size:] = band + np.abs(
np.flipud(np.fliplr(z[-half_size - 1 : -1, -half_size - 1 : -1]))
- band
)
# top right corner
band = Z[half_size, -half_size:]
Z[:half_size, -half_size:] = band - np.abs(
np.flipud(Z[half_size + 1 : 2 * half_size + 1, -half_size:]) - band
)
# bottom left corner
band = Z[-half_size:, half_size].reshape(-1, 1)
Z[-half_size:, :half_size] = band - np.abs(
np.fliplr(Z[-half_size:, half_size + 1 : 2 * half_size + 1]) - band
)
# solve system and convolve
if derivative is None:
m = np.linalg.pinv(A)[0].reshape((window_size, -1))
return signal.fftconvolve(Z, m, mode="valid")
elif derivative == "col":
c = np.linalg.pinv(A)[1].reshape((window_size, -1))
return signal.fftconvolve(Z, -c, mode="valid")
elif derivative == "row":
r = np.linalg.pinv(A)[2].reshape((window_size, -1))
return signal.fftconvolve(Z, -r, mode="valid")
elif derivative == "both":
c = np.linalg.pinv(A)[1].reshape((window_size, -1))
r = np.linalg.pinv(A)[2].reshape((window_size, -1))
return (
signal.fftconvolve(Z, -r, mode="valid"),
signal.fftconvolve(Z, -c, mode="valid"),
)
# Some edge test cases (for the 3-km grid)
test_ij_3km = [
(845, 365), # 0 PIG Floating 1
(831, 364), # 1 PIG Floating 2
(1022, 840), # 2 CS-2 only 1
(970, 880), # 3 CS-2 only 2
(100, 1170), # 4 fig1 large peaks at mission overlaps
(100, 766), # 5 fig2 peak at mission overlap
(7, 893), # 6 step change at beguining
(8, 892), # 7 with hole
(9, 889), # 8 with large hole
(11, 893), # 9 step in divergence
]
|
{"hexsha": "45fa2084277b9231239d0e1ee11434dcc52f3e48", "size": 19235, "ext": "py", "lang": "Python", "max_stars_repo_path": "captoolkit/utils.py", "max_stars_repo_name": "tsutterley/captoolkit", "max_stars_repo_head_hexsha": "314c4d34f49012c25286478c943b0ab13c893c62", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 37, "max_stars_repo_stars_event_min_datetime": "2019-09-27T00:36:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-31T01:51:19.000Z", "max_issues_repo_path": "captoolkit/utils.py", "max_issues_repo_name": "tsutterley/captoolkit", "max_issues_repo_head_hexsha": "314c4d34f49012c25286478c943b0ab13c893c62", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-02-27T21:22:50.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-14T01:31:26.000Z", "max_forks_repo_path": "captoolkit/utils.py", "max_forks_repo_name": "tsutterley/captoolkit", "max_forks_repo_head_hexsha": "314c4d34f49012c25286478c943b0ab13c893c62", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2019-09-24T08:06:49.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-03T14:44:19.000Z", "avg_line_length": 25.3759894459, "max_line_length": 79, "alphanum_fraction": 0.545931895, "include": true, "reason": "import numpy,from scipy", "num_tokens": 6129}
|
module Idris.IDEMode.Commands
import Core.Core
import Core.Context
import Core.Context.Log
import Core.Name
import public Idris.REPL.Opts
import Libraries.Utils.Hex
import System.File
%default total
public export
data SExp = SExpList (List SExp)
| StringAtom String
| BoolAtom Bool
| IntegerAtom Integer
| SymbolAtom String
public export
data DocMode = Overview | Full
public export
data IDECommand
= Interpret String
| LoadFile String (Maybe Integer)
| TypeOf String (Maybe (Integer, Integer))
| NameAt String (Maybe (Integer, Integer))
| CaseSplit Integer Integer String
| AddClause Integer String
-- deprecated: | AddProofClause
| AddMissing Integer String
| ExprSearch Integer String (List String) Bool
| ExprSearchNext
| GenerateDef Integer String
| GenerateDefNext
| MakeLemma Integer String
| MakeCase Integer String
| MakeWith Integer String
| DocsFor String (Maybe DocMode)
| Directive String
| Apropos String
| Metavariables Integer
| WhoCalls String
| CallsWho String
| BrowseNamespace String
| NormaliseTerm String -- TODO: implement a Binary lib
| ShowTermImplicits String -- and implement Binary (Term)
| HideTermImplicits String -- for these four defintions,
| ElaborateTerm String -- then change String to Term, as in idris1
| PrintDefinition String
| ReplCompletions String
| EnableSyntax Bool
| Version
| GetOptions
readHints : List SExp -> Maybe (List String)
readHints [] = Just []
readHints (StringAtom s :: rest)
= do rest' <- readHints rest
pure (s :: rest')
readHints _ = Nothing
export
getIDECommand : SExp -> Maybe IDECommand
getIDECommand (SExpList [SymbolAtom "interpret", StringAtom cmd])
= Just $ Interpret cmd
getIDECommand (SExpList [SymbolAtom "load-file", StringAtom fname])
= Just $ LoadFile fname Nothing
getIDECommand (SExpList [SymbolAtom "load-file", StringAtom fname, IntegerAtom l])
= Just $ LoadFile fname (Just l)
getIDECommand (SExpList [SymbolAtom "type-of", StringAtom n])
= Just $ TypeOf n Nothing
getIDECommand (SExpList [SymbolAtom "type-of", StringAtom n,
IntegerAtom l, IntegerAtom c])
= Just $ TypeOf n (Just (l, c))
getIDECommand (SExpList [SymbolAtom "name-at", StringAtom n])
= Just $ NameAt n Nothing
getIDECommand (SExpList [SymbolAtom "name-at", StringAtom n,
IntegerAtom l, IntegerAtom c])
= Just $ NameAt n (Just (l, c))
getIDECommand (SExpList [SymbolAtom "case-split", IntegerAtom l, IntegerAtom c,
StringAtom n])
= Just $ CaseSplit l c n
getIDECommand (SExpList [SymbolAtom "case-split", IntegerAtom l, StringAtom n])
= Just $ CaseSplit l 0 n
getIDECommand (SExpList [SymbolAtom "add-clause", IntegerAtom l, StringAtom n])
= Just $ AddClause l n
getIDECommand (SExpList [SymbolAtom "add-missing", IntegerAtom line, StringAtom n])
= Just $ AddMissing line n
getIDECommand (SExpList [SymbolAtom "proof-search", IntegerAtom l, StringAtom n])
= Just $ ExprSearch l n [] False
getIDECommand (SExpList [SymbolAtom "proof-search", IntegerAtom l, StringAtom n, SExpList hs])
= (\hs' => ExprSearch l n hs' False) <$> readHints hs
getIDECommand (SExpList [SymbolAtom "proof-search", IntegerAtom l, StringAtom n, SExpList hs, SymbolAtom mode])
= (\hs' => ExprSearch l n hs' (getMode mode)) <$> readHints hs
where
getMode : String -> Bool
getMode m = m == "all"
getIDECommand (SymbolAtom "proof-search-next") = Just ExprSearchNext
getIDECommand (SExpList [SymbolAtom "generate-def", IntegerAtom l, StringAtom n])
= Just $ GenerateDef l n
getIDECommand (SymbolAtom "generate-def-next") = Just GenerateDefNext
getIDECommand (SExpList [SymbolAtom "make-lemma", IntegerAtom l, StringAtom n])
= Just $ MakeLemma l n
getIDECommand (SExpList [SymbolAtom "make-case", IntegerAtom l, StringAtom n])
= Just $ MakeCase l n
getIDECommand (SExpList [SymbolAtom "make-with", IntegerAtom l, StringAtom n])
= Just $ MakeWith l n
getIDECommand (SExpList (SymbolAtom "docs-for" :: StringAtom n :: modeTail))
= do -- Maybe monad
modeOpt <- case modeTail of
[] => Just Nothing
[SymbolAtom "overview"] => Just $ Just Overview
[SymbolAtom "full" ] => Just $ Just Full
_ => Nothing
Just $ DocsFor n modeOpt
getIDECommand (SExpList [SymbolAtom "apropos", StringAtom n])
= Just $ Apropos n
getIDECommand (SExpList [SymbolAtom "directive", StringAtom n])
= Just $ Directive n
getIDECommand (SExpList [SymbolAtom "metavariables", IntegerAtom n])
= Just $ Metavariables n
getIDECommand (SExpList [SymbolAtom "who-calls", StringAtom n])
= Just $ WhoCalls n
getIDECommand (SExpList [SymbolAtom "calls-who", StringAtom n])
= Just $ CallsWho n
getIDECommand (SExpList [SymbolAtom "browse-namespace", StringAtom ns])
= Just $ BrowseNamespace ns
getIDECommand (SExpList [SymbolAtom "normalise-term", StringAtom tm])
= Just $ NormaliseTerm tm
getIDECommand (SExpList [SymbolAtom "show-term-implicits", StringAtom tm])
= Just $ ShowTermImplicits tm
getIDECommand (SExpList [SymbolAtom "hide-term-implicits", StringAtom tm])
= Just $ HideTermImplicits tm
getIDECommand (SExpList [SymbolAtom "elaborate-term", StringAtom tm])
= Just $ ElaborateTerm tm
getIDECommand (SExpList [SymbolAtom "print-definition", StringAtom n])
= Just $ PrintDefinition n
getIDECommand (SExpList [SymbolAtom "repl-completions", StringAtom n])
= Just $ ReplCompletions n
getIDECommand (SExpList [SymbolAtom "enable-syntax", BoolAtom b])
= Just $ EnableSyntax b
getIDECommand (SymbolAtom "version") = Just Version
getIDECommand (SExpList [SymbolAtom "get-options"]) = Just GetOptions
getIDECommand _ = Nothing
export
putIDECommand : IDECommand -> SExp
putIDECommand (Interpret cmd) = (SExpList [SymbolAtom "interpret", StringAtom cmd])
putIDECommand (LoadFile fname Nothing) = (SExpList [SymbolAtom "load-file", StringAtom fname])
putIDECommand (LoadFile fname (Just line)) = (SExpList [SymbolAtom "load-file", StringAtom fname, IntegerAtom line])
putIDECommand (TypeOf cmd Nothing) = (SExpList [SymbolAtom "type-of", StringAtom cmd])
putIDECommand (TypeOf cmd (Just (line, col))) = (SExpList [SymbolAtom "type-of", StringAtom cmd, IntegerAtom line, IntegerAtom col])
putIDECommand (NameAt cmd Nothing) = (SExpList [SymbolAtom "name-at", StringAtom cmd])
putIDECommand (NameAt cmd (Just (line, col))) = (SExpList [SymbolAtom "name-at", StringAtom cmd, IntegerAtom line, IntegerAtom col])
putIDECommand (CaseSplit line col n) = (SExpList [SymbolAtom "case-split", IntegerAtom line, IntegerAtom col, StringAtom n])
putIDECommand (AddClause line n) = (SExpList [SymbolAtom "add-clause", IntegerAtom line, StringAtom n])
putIDECommand (AddMissing line n) = (SExpList [SymbolAtom "add-missing", IntegerAtom line, StringAtom n])
putIDECommand (ExprSearch line n exprs mode) = (SExpList [SymbolAtom "proof-search", IntegerAtom line, StringAtom n, SExpList $ map StringAtom exprs, getMode mode])
where
getMode : Bool -> SExp
getMode True = SymbolAtom "all"
getMode False = SymbolAtom "other"
putIDECommand ExprSearchNext = SymbolAtom "proof-search-next"
putIDECommand (GenerateDef line n) = (SExpList [SymbolAtom "generate-def", IntegerAtom line, StringAtom n])
putIDECommand GenerateDefNext = SymbolAtom "generate-def-next"
putIDECommand (MakeLemma line n) = (SExpList [SymbolAtom "make-lemma", IntegerAtom line, StringAtom n])
putIDECommand (MakeCase line n) = (SExpList [SymbolAtom "make-case", IntegerAtom line, StringAtom n])
putIDECommand (MakeWith line n) = (SExpList [SymbolAtom "make-with", IntegerAtom line, StringAtom n])
putIDECommand (DocsFor n modeOpt) = let modeTail = case modeOpt of
Nothing => []
Just Overview => [SymbolAtom "overview"]
Just Full => [SymbolAtom "full"] in
(SExpList (SymbolAtom "docs-for" :: StringAtom n :: modeTail))
putIDECommand (Apropos n) = (SExpList [SymbolAtom "apropos", StringAtom n])
putIDECommand (Metavariables n) = (SExpList [SymbolAtom "metavariables", IntegerAtom n])
putIDECommand (WhoCalls n) = (SExpList [SymbolAtom "who-calls", StringAtom n])
putIDECommand (CallsWho n) = (SExpList [SymbolAtom "calls-who", StringAtom n])
putIDECommand (BrowseNamespace ns) = (SExpList [SymbolAtom "browse-namespace", StringAtom ns])
putIDECommand (NormaliseTerm tm) = (SExpList [SymbolAtom "normalise-term", StringAtom tm])
putIDECommand (ShowTermImplicits tm) = (SExpList [SymbolAtom "show-term-implicits", StringAtom tm])
putIDECommand (HideTermImplicits tm) = (SExpList [SymbolAtom "hide-term-implicits", StringAtom tm])
putIDECommand (ElaborateTerm tm) = (SExpList [SymbolAtom "elaborate-term", StringAtom tm])
putIDECommand (PrintDefinition n) = (SExpList [SymbolAtom "print-definition", StringAtom n])
putIDECommand (ReplCompletions n) = (SExpList [SymbolAtom "repl-completions", StringAtom n])
putIDECommand (Directive n) = (SExpList [SymbolAtom "directive", StringAtom n])
putIDECommand (EnableSyntax b) = (SExpList [SymbolAtom "enable-syntax", BoolAtom b])
putIDECommand GetOptions = (SExpList [SymbolAtom "get-options"])
putIDECommand Version = SymbolAtom "version"
export
getMsg : SExp -> Maybe (IDECommand, Integer)
getMsg (SExpList [cmdexp, IntegerAtom num])
= do cmd <- getIDECommand cmdexp
pure (cmd, num)
getMsg _ = Nothing
escape : String -> String
escape = pack . concatMap escapeChar . unpack
where
escapeChar : Char -> List Char
escapeChar '\\' = ['\\', '\\']
escapeChar '"' = ['\\', '\"']
escapeChar c = [c]
export
Show SExp where
show (SExpList xs) = assert_total $ "(" ++ showSep " " (map show xs) ++ ")"
show (StringAtom str) = "\"" ++ escape str ++ "\""
show (BoolAtom b) = ":" ++ show b
show (IntegerAtom i) = show i
show (SymbolAtom s) = ":" ++ s
public export
interface SExpable a where
toSExp : a -> SExp
export
SExpable IDECommand where
toSExp = putIDECommand
export
SExpable SExp where
toSExp = id
export
SExpable Bool where
toSExp = BoolAtom
export
SExpable String where
toSExp = StringAtom
export
SExpable Integer where
toSExp = IntegerAtom
export
SExpable Int where
toSExp = IntegerAtom . cast
export
SExpable Nat where
toSExp = IntegerAtom . cast
export
SExpable Name where
toSExp = SymbolAtom . show
export
(SExpable a, SExpable b) => SExpable (a, b) where
toSExp (x, y)
= case toSExp y of
SExpList xs => SExpList (toSExp x :: xs)
y' => SExpList [toSExp x, y']
export
SExpable a => SExpable (List a) where
toSExp xs
= SExpList (map toSExp xs)
export
SExpable a => SExpable (Maybe a) where
toSExp Nothing = SExpList []
toSExp (Just x) = toSExp x
export
version : Int -> Int -> SExp
version maj min = toSExp (SymbolAtom "protocol-version", maj, min)
sendStr : File -> String -> IO ()
sendStr f st =
map (const ()) (fPutStr f st)
export
send : {auto c : Ref Ctxt Defs} -> SExpable a => File -> a -> Core ()
send f resp
= do let r = show (toSExp resp) ++ "\n"
log "ide-mode.send" 20 r
coreLift $ sendStr f $ leftPad '0' 6 (asHex (cast (length r)))
coreLift $ sendStr f r
coreLift $ fflush f
|
{"hexsha": "93ff3926ef2f5741faabd0fd8103b13865725587", "size": 12053, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "src/Idris/IDEMode/Commands.idr", "max_stars_repo_name": "ska80/idris-jvm", "max_stars_repo_head_hexsha": "66223d026d034578876b325e9fcd95874faa6052", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Idris/IDEMode/Commands.idr", "max_issues_repo_name": "ska80/idris-jvm", "max_issues_repo_head_hexsha": "66223d026d034578876b325e9fcd95874faa6052", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Idris/IDEMode/Commands.idr", "max_forks_repo_name": "ska80/idris-jvm", "max_forks_repo_head_hexsha": "66223d026d034578876b325e9fcd95874faa6052", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.2912280702, "max_line_length": 165, "alphanum_fraction": 0.6639840704, "num_tokens": 3265}
|
"""
Routines for astronomical related calculation.
"""
import datetime
import numpy as np
import astropy.units as u
def beam_area(*args):
"""
Calculate the Gaussian beam area.
Parameters
----------
args: float
Beam widths.
If args is a single argument, a symmetrical beam is assumed.
If args has two arguments, the two arguments are bmaj and bmin,
the width of the major and minor axes of the beam in that order.
Return
------
out: float
Beam area. No unit conversion is performed, i.e. the unit will depend
on the input arguments. For example, beam width in degree wil return
the beam area in square degree. Likewise, beam width in pixel will
return the beam area in pixel.
"""
if len(args) > 2:
raise ValueError('Input argument must be a single beam width for a '
'symmetrical beam, or widths of the major and minor '
'axes of the beam.')
if len(args) == 2:
bmaj, bmin = args
else:
bmaj = args[0]
bmin = bmaj
return np.pi * bmaj * bmin / (4 * np.log(2))
def jysr2k(intensity, freq):
"""
Convert Jy/sr to K.
Parameters
----------
intensity: array-like
Intensity (brightness) in Jy/sr
freq: float
Frequency of the map in MHz
Return
------
out: array-like or float
Brightness temperature in Kelvin
"""
ba = 1 * u.sr
equiv = u.brightness_temperature(ba, freq * u.MHz)
return (intensity * u.Jy).to(u.K, equivalencies=equiv).value
def k2jysr(temp, freq):
"""
Convert K to Jy/sr.
Parameters
----------
temp: array-like
Brightness temperature in Kelvin
freq: float
Frequency of the map in MHz
Return
------
out: array-like or float
Intensity (brightness) in Jy/sr
"""
ba = 1 * u.sr
equiv = u.brightness_temperature(ba, freq * u.MHz)
return (temp * u.K).to(u.Jy, equivalencies=equiv).value
def jybeam2k(intensity, freq, beam_width):
"""
Convert Jy/beam to K.
Parameters
----------
intensity: array-like
Intensity (brightness) in Jy/beam
freq: float
Frequency of the map in MHz
beam_width: float
The Gaussian beam width in degree
Return
------
out: array-like or float
Brightness temperature in Kelvin
"""
ba = beam_area(beam_width) * u.Unit('deg2')
equiv = u.brightness_temperature(ba, freq * u.MHz)
return (intensity * u.Jy).to(u.K, equivalencies=equiv).value
def k2jybeam(temp, freq, beam_width):
"""
Convert K to Jy/beam.
Parameters
----------
temp: array-like
Brightness temperature in Kelvin
freq: float
Frequency of the map in MHz
beam_width: float
The Gaussian beam width in degree
Return
------
out: array-like or float
Intensity (brightness) in Jy/beam
"""
ba = beam_area(beam_width) * u.Unit('deg2')
equiv = u.brightness_temperature(ba, freq * u.MHz)
return (temp * u.K).to(u.Jy, equivalencies=equiv).value
|
{"hexsha": "e4eed16c02856c9d0a0f402075bac3c9145f75c0", "size": 3165, "ext": "py", "lang": "Python", "max_stars_repo_path": "astro.py", "max_stars_repo_name": "piyanatk/simcube_tools", "max_stars_repo_head_hexsha": "e56b1cb4bc6cc84d2c5933d7c8871d7e6799be46", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "astro.py", "max_issues_repo_name": "piyanatk/simcube_tools", "max_issues_repo_head_hexsha": "e56b1cb4bc6cc84d2c5933d7c8871d7e6799be46", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "astro.py", "max_forks_repo_name": "piyanatk/simcube_tools", "max_forks_repo_head_hexsha": "e56b1cb4bc6cc84d2c5933d7c8871d7e6799be46", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.2720588235, "max_line_length": 78, "alphanum_fraction": 0.595892575, "include": true, "reason": "import numpy,import astropy", "num_tokens": 826}
|
\section{Group Communication}
\label{Sec::Group}
\ba supports publish/subscribe-based group communication.
Actors can join and leave groups and send messages to groups.
\begin{lstlisting}
std::string group_module = ...;
std::string group_id = ...;
auto grp = group::get(group_module, group_id);
self->join(grp);
self->send(grp, atom("test"));
self->leave(grp);
\end{lstlisting}
\subsection{Anonymous Groups}
\label{Sec::Group::Anonymous}
Groups created on-the-fly with \lstinline^group::anonymous()^ can be used to coordinate a set of workers.
Each call to \lstinline^group::anonymous()^ returns a new, unique group instance.
\subsection{Local Groups}
\label{Sec::Group::Local}
The \lstinline^"local"^ group module creates groups for in-process communication.
For example, a group for GUI related events could be identified by \lstinline^group::get("local", "GUI events")^.
The group ID \lstinline^"GUI events"^ uniquely identifies a singleton group instance of the module \lstinline^"local"^.
\subsection{Spawn Actors in Groups}
\label{Sec::Group::Spawn}
The function \lstinline^spawn_in_group^ can be used to create actors as members of a group.
The function causes the newly created actors to call \lstinline^self->join(...)^ immediately and before \lstinline^spawn_in_group^ returns.
The usage of \lstinline^spawn_in_group^ is equal to \lstinline^spawn^, except for an additional group argument.
The group handle is always the first argument, as shown in the examples below.
\begin{lstlisting}
void fun1();
void fun2(int, float);
class my_actor1 : event_based_actor { /* ... */ };
class my_actor2 : event_based_actor {
// ...
my_actor2(const std::string& str) { /* ... */ }
};
// ...
auto grp = group::get(...);
auto a1 = spawn_in_group(grp, fun1);
auto a2 = spawn_in_group(grp, fun2, 1, 2.0f);
auto a3 = spawn_in_group<my_actor1>(grp);
auto a4 = spawn_in_group<my_actor2>(grp, "hello my_actor2!");
\end{lstlisting}
|
{"hexsha": "5df24de7a59ad2f261e8ca3159ffccf82b7ec06a", "size": 1934, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "manual/GroupCommunication.tex", "max_stars_repo_name": "syoummer/boost.actor", "max_stars_repo_head_hexsha": "58f35499bac8871b8f5b0b024246a467b63c6fb0", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2015-03-20T21:11:16.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-20T08:05:41.000Z", "max_issues_repo_path": "manual/GroupCommunication.tex", "max_issues_repo_name": "syoummer/boost.actor", "max_issues_repo_head_hexsha": "58f35499bac8871b8f5b0b024246a467b63c6fb0", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "manual/GroupCommunication.tex", "max_forks_repo_name": "syoummer/boost.actor", "max_forks_repo_head_hexsha": "58f35499bac8871b8f5b0b024246a467b63c6fb0", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1923076923, "max_line_length": 140, "alphanum_fraction": 0.740434333, "num_tokens": 506}
|
import pylab as plt
from scrawl.imagine import ImageDisplay
import numpy as np
data = np.random.random((100, 100))
ImageDisplay(data)
# TESTS:
# all zero data
# fig, ax = plt.subplots(1,1, figsize=(2.5, 10), tight_layout=True)
# ax.set_ylim(0, 250)
# sliders = AxesSliders(ax, 0.2, 0.7, slide_axis='y')
# sliders.connect()
# plt.show()
|
{"hexsha": "2fa75de69fe1053d5506a0bb2e99d270a68e0303", "size": 339, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_imaging.py", "max_stars_repo_name": "astromancer/graphical", "max_stars_repo_head_hexsha": "2d72407c53967714953485dd52ad72e34e549ef5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_imaging.py", "max_issues_repo_name": "astromancer/graphical", "max_issues_repo_head_hexsha": "2d72407c53967714953485dd52ad72e34e549ef5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_imaging.py", "max_forks_repo_name": "astromancer/graphical", "max_forks_repo_head_hexsha": "2d72407c53967714953485dd52ad72e34e549ef5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.1875, "max_line_length": 67, "alphanum_fraction": 0.6991150442, "include": true, "reason": "import numpy", "num_tokens": 109}
|
import numpy as np
from sklearn.metrics import mean_squared_error, r2_score
def int_diff(a):
"""
Make a new array of the same size, where each element is the
difference between the preceding element and the current element.
For example:
[0,0,1,1,1,0,0,0,2,3,-2,-3] -> [0,0,1,0,0,-1,0,0,2,1,-5,1]
:param a: input array
:return: array of differences
"""
return np.diff(a.astype(int), prepend=0)
def event_ticker(a):
"""
Takes an "event" array (see param). If an event occurs it will increment a counter that will increase for
positive events or decrease for negative events. The counter will continue to increment (or decrement) until
a new event occurs. Note that repeated identical events has no effect (i.e., it results in the same values as
if no event occurred).
:param a: "event" array. A value of 0 indicates no event. Events can be either positive or negative (typically
-1 or 1 though).
:return: same size array that provides a "history" of event durations.
"""
direction = -1
counter = 0
ctr = np.zeros(len(a))
for i, el in enumerate(a):
# reset counter if event occurred and set direction
if el != 0:
direction = el
else:
counter += direction
ctr[i] = counter
return ctr
def ticker_r2(a1, a2):
b1 = int_diff(a1)
b2 = int_diff(a2)
ev1 = event_ticker(b1)
ev2 = event_ticker(b2)
return r2_score_wrapper(ev1, ev2)
def ticker_mse(a1, a2):
b1 = int_diff(a1)
b2 = int_diff(a2)
ev1 = event_ticker(b1)
ev2 = event_ticker(b2)
return mse_wrapper(ev1, ev2)
def bconvolve_score(a1, a2):
pn1 = np.where(a1>0, 1, -1)
pn2 = np.where(a2>0, 1, -1)
l = max(len(a1), len(a2))
return np.max(np.convolve(pn1, pn2, mode='valid')) / l
def r2_score_wrapper(a1, a2):
s = r2_score(a1.astype(int), a2.astype(int))
return s if s >= 0 else 0
def mse_wrapper(a1, a2):
mse = mean_squared_error(a1.astype(int), a2.astype(int))
return sigmoid(-mse)
def conv_var(a1, a2, var_penalty=30):
var_penalty = 30 # var_penalty default empirically chosen to give a decent sized penalty to differing variances
var_diff = np.var(a1) - np.var(a2)
# we apply a guassian to normalize the diff between 0 and 1. A diff of 0->1, A diff of ±inf->0
var_factor = gaussian(var_penalty*var_diff)
return bconvolve_score(a1, a2) * var_factor
def sigmoid(x):
return 1/(1 + np.exp(-x))
def gaussian(x):
return np.exp(-np.power(x, 2.))
class ConvVar():
def __init__(self, var_penalty=30):
# var_penalty default empirically chosen to give a decent sized penalty to differing variances
self.var_penalty = var_penalty
def score(self, a1, a2):
var_diff = np.var(a1) - np.var(a2)
# we apply a guassian to normalize the diff between 0 and 1. A diff of 0->1, A diff of ±inf->0
var_factor = gaussian(self.var_penalty * var_diff)
return bconvolve_score(a1, a2) * var_factor
|
{"hexsha": "5316969bdab0b4a43ab9b232d17c59434ae9666e", "size": 3054, "ext": "py", "lang": "Python", "max_stars_repo_path": "scorers.py", "max_stars_repo_name": "forforf/boolean-series-scorers", "max_stars_repo_head_hexsha": "6dfa6e5da414739c3ef9531e7fd4ed1e0ce8552f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scorers.py", "max_issues_repo_name": "forforf/boolean-series-scorers", "max_issues_repo_head_hexsha": "6dfa6e5da414739c3ef9531e7fd4ed1e0ce8552f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scorers.py", "max_forks_repo_name": "forforf/boolean-series-scorers", "max_forks_repo_head_hexsha": "6dfa6e5da414739c3ef9531e7fd4ed1e0ce8552f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2376237624, "max_line_length": 115, "alphanum_fraction": 0.6502946955, "include": true, "reason": "import numpy", "num_tokens": 897}
|
import sys
import time
from abc import ABC, abstractmethod
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from profilehooks import profile
import basis_forms
import quadrature
from basis_forms import BasisForm
from function_space import FunctionSpace
from helpers import unblockshaped
from mesh import CrazyMesh
from polynomials import edge_basis, lagrange_basis
np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})
class AbstractForm(ABC):
"""Abstract class for k-forms."""
def __init__(self, *args):
self.function_space = args[0]
self.mesh = self.function_space.mesh
self.p = self.function_space.p
self._cochain = None
self._basis = None
if len(args) == 2:
if args[1] is callable:
self.function = args[1]
elif isinstance(args[1], (np.ndarray)):
self.cochain = args[1]
self._cochain_local = None
@property
def cochain_local(self):
"""Map the cochain elements into local dof with the dof map."""
if self._cochain_local is None:
self._cochain_local = self.cochain[np.transpose(self.function_space.dof_map.dof_map)]
return self._cochain_local
@cochain_local.setter
def cochain_local(self, cochain):
try:
assert np.shape(cochain)[-1] == (self.function_space.mesh.num_elements)
self._cochain_local = cochain
except AssertionError as e:
raise AssertionError(
"The number of the local cochain columns should be equal to the number of elements")
@property
def basis(self):
"""Return the basis function correponding to the element type."""
if self._basis is None:
# get name of the class of the basis functions
elem_type = self.function_space.str_to_elem[self.function_space.form_type]
self.basis = getattr(basis_forms, elem_type)(self.function_space)
return self._basis
@basis.setter
def basis(self, basis):
self._basis = basis
@property
def cochain(self):
"""Cochain relative to a form.
It is a ndarry having for rows the dofs and in the columns the number of the element.
"""
return self._cochain
@cochain.setter
def cochain(self, cochain):
try:
assert np.shape(cochain) == (self.function_space.num_dof,)
self._cochain = cochain
except AssertionError:
raise AssertionError(
"The dofs of the cochain do not match the dofs of the function space. \n \
shape cochain {0}, number of degrees of freedom : {1}" .format(np.shape(cochain), self.function_space.num_dof))
def cochain_to_global(self):
"""Map the local dofs of the cochain into the global cochain."""
self._cochain = np.zeros((self.function_space.num_dof))
dof_map = np.transpose(self.function_space.dof_map.dof_map)
# reorder degrees of freedom
for i, row in enumerate(self.cochain_local):
for j, dof_value in enumerate(row):
self._cochain[dof_map[i, j]] = dof_value
def export_to_plot(self):
"""Return x, y coordinates and data to plot."""
try:
num_pts_y, num_pts_x = np.shape(self.basis.xi)
num_el_x, num_el_y = self.function_space.mesh.n_x, self.function_space.mesh.n_y
x, y = self.function_space.mesh.mapping(self.basis.xi, self.basis.eta)
x_4d = x.reshape(num_pts_y, num_pts_x, num_el_y, num_el_x, order='F')
x = np.moveaxis(x_4d, 2, 1).reshape(
num_el_y * num_pts_y, num_el_x * num_pts_x, order='F')
y_4d = y.reshape(num_pts_y, num_pts_x, num_el_y, num_el_x, order='F')
y = np.rollaxis(y_4d, 2, 1).reshape(
num_el_y * num_pts_y, num_el_x * num_pts_x, order='F')
recon_4d = self.reconstructed.reshape(
num_pts_y, num_pts_x, num_el_y, num_el_x, order='F')
reconstructed = np.moveaxis(recon_4d, 2, 1).ravel('F').reshape(
num_el_y * num_pts_y, num_el_x * num_pts_x, order='F')
return (x, y), reconstructed
except AttributeError:
raise AttributeError("The mesh is not crazy")
def l_2_norm(self, func, quad='gauss'):
"""Calculate the L_2 error between a function and the reconstructed form.
Parameters
----------
func : callable
Analytical function
quad (optional) : quadrature type
Quadrature used for the integration. The quadrature is fed to parse_quad_type.
Returns
-------
l_2_norm : float
L_2 norm between the reconstructed form and an analytical function
local_error: float
Sum of the error at the quadrature points.
"""
self.basis.quad_grid = quad
quad_weights_2d = np.kron(self.basis._quad_weights[0], self.basis._quad_weights[1]).reshape(
np.size(self.basis._quad_weights[0]) * np.size(self.basis._quad_weights[1]), 1)
# reconstruct cochain at quadrature pts
self.reconstruct(self.basis._quad_nodes[0], self.basis._quad_nodes[1])
pts_per_element = np.size(quad_weights_2d)
x, y = self.function_space.mesh.mapping(self.basis.xi, self.basis.eta)
# evaluate funcions at the domain and reshape them into shape = (num_quad_pts,num_eleßments)
func_eval = func(x, y).reshape(
pts_per_element, self.function_space.mesh.num_elements, order='F')
g = self.function_space.mesh.g(self.basis.xi, self.basis.eta).reshape(
pts_per_element, self.function_space.mesh.num_elements, order='F')
# error at the quadrature points
local_error = (self.reconstructed - func_eval)**2
# integrate to get the l_2 norm
global_error = local_error * g * quad_weights_2d
return np.sum(global_error)**0.5, np.sum(local_error)**0.5
@abstractmethod
def discretize(self):
raise NotImplementedError
@abstractmethod
def reconstruct(self, xi, eta):
raise NotImplementedError
def save(self):
raise NotImplementedError
def export(self):
raise NotImplementedError
class Form_0(AbstractForm):
"""Create a 0-form."""
def __init__(self, *args):
# TODO: after reconstruction and discretization the cochain is 2d
super().__init__(*args)
self.reconstructed = None
def discretize(self, func):
"""Project a function onto a finite dimensional space of 0-forms."""
# discretize function and reshape as (# dof per element, # num elements )
self.cochain_local = func(*self.mesh.mapping(*self.basis.basis_nodes))
self.cochain_to_global()
def reconstruct(self, xi, eta):
"""Reconstruct the 0-form on the physical domain."""
self.basis.evaluate_basis(domain=(xi, eta))
self.reconstructed = np.tensordot(self.basis.basis, self.cochain_local, axes=([0], [0]))
def export(self):
raise NotImplementedError
class ExtGaussForm_0(Form_0):
"""A 0-form with extended gauss grid."""
def __init__(self, *args):
# TODO: after reconstruction and discretization the cochain is 2d
super().__init__(*args)
@property
def cochain_local_internal(self):
return self.cochain_local[:self.basis.num_basis]
@property
def cochain_internal(self):
return self.cochain[:self.basis.num_basis * self.function_space.mesh.num_elements]
def reconstruct(self, xi, eta):
"""Reconstruct the 0-form on the physical domain."""
self.basis.evaluate_basis(domain=(xi, eta))
self.reconstructed = np.tensordot(
self.basis.basis, self.cochain_local_internal, axes=([0], [0]))
def export(self):
raise NotImplementedError
class Form_1(AbstractForm):
"""Class for 1-forms."""
def __init__(self, *args):
super().__init__(*args)
self.reconstructed_dx = None
self.reconstructed_dy = None
@property
def cochain_xi(self):
"""Return the dx component of the cochain."""
return self._cochain[:self.basis.num_basis_xi]
@property
def cochain_eta(self):
"""Return the dy component of the cochain."""
return self._cochain[-self.basis.num_basis_eta:]
def split_cochain(self, cochain):
"""Split the cochain in the dx and dy component."""
return cochain[:self.basis.num_basis_xi], cochain[-self.basis.num_basis_eta:]
def discretize(self, func, quad='gauss'):
"""Discretize a vector function into a one form."""
self.basis.quad_grid = quad
quad, (p_x, p_y) = self.basis.quad_grid
xi_ref, eta_ref = np.meshgrid(self.basis._quad_nodes[0], self.basis._quad_nodes[1])
edges_size = [self.basis._edge_nodes[i][1:] - self.basis._edge_nodes[i][:-1]
for i in range(2)]
magic_factor = 0.5
cell_nodes = [(0.5 * (edges_size[i][np.newaxis, :]) *
(self.basis._quad_nodes[i][:, np.newaxis] + 1) + self.basis._edge_nodes[i][:-1]).ravel('F') for i in range(2)]
quad_eta_for_dx = np.tile(self.basis._edge_nodes[1], (p_x + 1, self.p[0]))
quad_xi_for_dx = np.repeat(cell_nodes[0].reshape(
p_x + 1, self.p[0], order='F'), self.p[1] + 1, axis=1)
quad_xi_for_dy = np.repeat(
self.basis._edge_nodes[0], (p_y + 1) * self.p[1]).reshape(p_y + 1, (self.p[0] + 1) * self.p[1], order='F')
quad_eta_for_dy = np.tile(cell_nodes[1].reshape(
p_y + 1, self.p[1], order='F'), (1, self.p[0] + 1))
x_dx, y_dx = self.mesh.mapping(quad_xi_for_dx, quad_eta_for_dx)
cochain_local_xi = np.tensordot(self.basis._quad_weights[0], func[0](x_dx, y_dx) * self.mesh.dx_dxi(
quad_xi_for_dx, quad_eta_for_dx) + func[1](x_dx, y_dx) * self.mesh.dy_dxi(quad_xi_for_dx, quad_eta_for_dx),
axes=((0), (0))) * np.repeat(edges_size[0] * magic_factor, self.p[1] + 1).reshape(self.p[0] * (self.p[1] + 1), 1)
x_dy, y_dy = self.mesh.mapping(quad_xi_for_dy, quad_eta_for_dy)
cochain_local_eta = np.tensordot(self.basis._quad_weights[1], func[0](x_dy, y_dy) * self.mesh.dx_deta(
quad_xi_for_dy, quad_eta_for_dy) + func[1](x_dy, y_dy) * self.mesh.dy_deta(
quad_xi_for_dy, quad_eta_for_dy), axes=((0), (0))) * \
np.tile(edges_size[1] * magic_factor, (self.p[0] + 1, 1)).reshape(self.p[1]
* (self.p[0] + 1), 1)
self.cochain_local = np.vstack((cochain_local_xi, cochain_local_eta))
self.cochain_to_global()
return quad_xi_for_dy, quad_eta_for_dy, cochain_local_eta
def reconstruct(self, xi, eta):
"""Reconstruct the form on the computational domain.
Given the values of the degrees of freedom in each element the function
reconstruct the form through the basis functions.
"""
self.basis.evaluate_basis(domain=(xi, eta))
xi, eta = self.basis.xi.ravel('F'), self.basis.eta.ravel('F')
cochain_xi, cochain_eta = self.split_cochain(self.cochain_local)
g = self.mesh.g(xi, eta)
self.reconstructed_dx = 1 / g * (
self.mesh.dy_deta(xi, eta) * np.tensordot(self.basis.basis_xi,
cochain_xi, axes=((0), (0)))
- self.mesh.dy_dxi(xi, eta) * np.tensordot(self.basis.basis_eta,
cochain_eta, axes=((0), (0)))
)
self.reconstructed_dy = 1 / g * (-self.mesh.dx_deta(xi, eta) * np.tensordot(self.basis.basis_xi, cochain_xi, axes=(
(0), (0))) + self.mesh.dx_dxi(xi, eta) * np.tensordot(self.basis.basis_eta, cochain_eta, axes=((0), (0))))
def export_to_plot(self):
"""Export the domain and the correspondent values for the reconstruction.
Return
------
(x, y) : tuple of ndarrays
Contain the x and y coordinates of the domain
reconstructed : ndarray
the reconstrued values of the form for the domain pts
"""
try:
num_pts_y, num_pts_x = np.shape(self.basis.xi)
num_el_x, num_el_y = self.function_space.mesh.n_x, self.function_space.mesh.n_y
x, y = self.function_space.mesh.mapping(self.basis.xi, self.basis.eta)
# print(x[0, 0, :])
x_4d = x.reshape(num_pts_y, num_pts_x, num_el_y, num_el_x, order='F')
x = np.moveaxis(x_4d, 2, 1).reshape(
num_el_y * num_pts_y, num_el_x * num_pts_x, order='F')
y_4d = y.reshape(num_pts_y, num_pts_x, num_el_y, num_el_x, order='F')
y = np.rollaxis(y_4d, 2, 1).reshape(
num_el_y * num_pts_y, num_el_x * num_pts_x, order='F')
recon_4d_dx = self.reconstructed_dx.reshape(
num_pts_y, num_pts_x, num_el_y, num_el_x, order='F')
reconstructed_dx = np.moveaxis(recon_4d_dx, 2, 1).ravel('F').reshape(
num_el_y * num_pts_y, num_el_x * num_pts_x, order='F')
recon_4d_dy = self.reconstructed_dy.reshape(
num_pts_y, num_pts_x, num_el_y, num_el_x, order='F')
reconstructed_dy = np.moveaxis(recon_4d_dy, 2, 1).ravel('F').reshape(
num_el_y * num_pts_y, num_el_x * num_pts_x, order='F')
return (x, y), reconstructed_dx, reconstructed_dy
except AttributeError:
raise AttributeError("The mesh is not crazy")
def l_2_norm(self, vector_func, quad='gauss'):
"""Calculate the L_2 error between a function and the reconstructed form.
Parameters
----------
vector_func : list or tuple of callable
Analytical vector function having two components x nad y
quad (optional) : quadrature type
Quadrature used for the integration. The quadrature is fed to parse_quad_type.
Returns
-------
l_2_norm : float
L_2 norm between the reconstructed form and an analytical vector function
local_error: float
Sum of the error at the quadrature points.
"""
func_x, func_y = vector_func
self.basis.quad_grid = quad
quad_weights_2d = np.kron(self.basis._quad_weights[0], self.basis._quad_weights[1]).reshape(
np.size(self.basis._quad_weights[0]) * np.size(self.basis._quad_weights[1]), 1)
# reconstruct cochain at quadrature pts
self.reconstruct(self.basis._quad_nodes[0], self.basis._quad_nodes[1])
pts_per_element = np.size(quad_weights_2d)
x, y = self.function_space.mesh.mapping(self.basis.xi, self.basis.eta)
# evaluate funcions at the domain and reshape them into shape = (num_quad_pts,num_elements)
func_eval_x = func_x(x, y).reshape(
pts_per_element, self.function_space.mesh.num_elements, order='F')
func_eval_y = func_y(x, y).reshape(
pts_per_element, self.function_space.mesh.num_elements, order='F')
g = self.function_space.mesh.g(self.basis.xi, self.basis.eta).reshape(
pts_per_element, self.function_space.mesh.num_elements, order='F')
# error at the quadrature points
local_error = (self.reconstructed_dx - func_eval_x)**2 + \
(self.reconstructed_dy - func_eval_y)**2
# integrate to get the l_2 norm
#
global_error = local_error * g * quad_weights_2d
return np.sum(global_error)**0.5, np.sum(local_error)**0.5
class ExtGaussForm_1(Form_1):
"""1-Form with Extended gauss edges."""
def __init___(self, *args):
super().__init(*args)
@property
def cochain_xi(self):
"""Return the dx component of the cochain."""
return self._cochain[:self.basis.num_basis_xi]
@property
def cochain_eta(self):
"""Return the dy component of the cochain."""
return self._cochain[self.basis.num_basis_xi: self.basis.num_basis]
@property
def cochain_local(self):
"""Map the cochain elements into local dof with the dof map."""
if self._cochain_local is None:
self._cochain_local = self.cochain[np.transpose(
self.function_space.dof_map.dof_map_internal)]
return self._cochain_local[:self.basis.num_basis]
@cochain_local.setter
def cochain_local(self, cochain):
# fuzzy logic now you can assing the ghost cochain as local but when you
# access it it returns only the local. or maybe it is good
try:
assert np.shape(cochain)[-1] == (self.function_space.mesh.num_elements)
self._cochain_local = cochain
except AssertionError as e:
raise AssertionError(
"The number of the local cochain columns should be equal to the number of elements")
# @property
# def cochain_local_with_ghosts(self):
# """Map the cochain elements into local dof with the dof map."""
# if self._cochain_local is None:
# self._cochain_local = self.cochain[np.transpose(self.function_space.dof_map.dof_map)]
# return self._cochain_local
#
# @cochain_local_with_ghosts.setter
# def cochain_local_with_ghosts(self, cochain):
# try:
# assert np.shape(cochain)[-1] == (self.function_space.mesh.num_elements)
# self._cochain_local = cochain
# except AssertionError as e:
# raise AssertionError(
# "The number of the local cochain columns should be equal to the number of elements")
#
def cochain_to_global(self):
"""Map the local dofs of the cochain into the global cochain."""
self.cochain = np.zeros((self.function_space.num_dof))
if np.shape(self.cochain_local) == (self.basis.num_basis, self.function_space.mesh.num_elements):
dof_map = np.transpose(self.function_space.dof_map.dof_map[:, :self.basis.num_basis])
else:
dof_map = np.transpose(self.function_space.dof_map.dof_map)
# reorder degrees of freedom
for i, row in enumerate(self.cochain_local):
for j, dof_value in enumerate(row):
self.cochain[dof_map[i, j]] = dof_value
def split_cochain(self, cochain):
"""Split the cochain in the dx and dy component."""
return cochain[:self.basis.num_basis_xi], cochain[self.basis.num_basis_xi: self.basis.num_basis]
class Form_2(AbstractForm):
"""Class of two forms."""
def __init__(self, *args):
super().__init__(*args)
self.reconstructed = None
def discretize(self, func, quad='gauss'):
# TODO: change self p by len(self._face_nodes)
"""Project a function into a finite element space of 2-forms.
The projection is done in the reference element. It follows the inverse of the pullback to project into the physical domain.
"""
# calculate quadrature nodes and weights
self.basis.quad_grid = quad
quad, (p, _) = self.basis.quad_grid
xi_ref, eta_ref = np.meshgrid(self.basis._quad_nodes[0], self.basis._quad_nodes[1])
quad_weights = np.kron(self.basis._quad_weights[0], self.basis._quad_weights[1])
# calculate the dimension of the edges of the cells
dim_faces = [self.basis._face_nodes[i][1:] - self.basis._face_nodes[i][:-1]
for i in range(2)]
# set up the right amout of x and y dimensions of the edges of the cell
x_dim = np.repeat(dim_faces[0], self.p[1])
y_dim = np.tile(dim_faces[1], self.p[0])
magic_factor = 0.25
cell_nodes = [(0.5 * (dim_faces[i][np.newaxis, :]) *
(self.basis._quad_nodes[i][:, np.newaxis] + 1) + self.basis._face_nodes[i][:-1]).ravel('F') for i in range(2)]
cell_area = np.diag(magic_factor * x_dim * y_dim)
# xi coordinates of the quadrature nodes
# in the column are stored the coordinates of the quad points for contant xi for all faces
xi = np.repeat(np.repeat(cell_nodes[0], (p + 1)
).reshape((p + 1)**2, self.p[0], order='F'), self.p[1], axis=1)
eta = np.tile(np.tile(cell_nodes[1].reshape(
p + 1, self.p[1], order='F'), (p + 1, 1)), self.p[0])
# map onto the physical domain and compute the Jacobian
x, y = self.mesh.mapping(xi, eta)
g = self.mesh.g(xi, eta)
# compute the cochain integrating and then applying inverse pullback
self.cochain_local = np.sum(np.tensordot(quad_weights, (func(x, y) * g),
axes=((0), (0))) * cell_area[:, :, np.newaxis], axis=0)
self.cochain_to_global()
def reconstruct(self, xi, eta):
"""Reconstruct a cochain into a 2-Form.
The reconstructed form is stored into the attribute reconstructed
Parameters
----------
xi : array
1D array with the xi coordinates of the domain of reconstruction on the ref. element
eta : array
1D array with the eta coordinates of the domain of reconstruction on the ref. element
"""
self.basis.evaluate_basis(domain=(xi, eta))
xi, eta = self.basis.xi.ravel('F'), self.basis.eta.ravel('F')
self.reconstructed = np.tensordot(
self.basis.basis, self.cochain_local, axes=((0), (0))) / self.mesh.g(xi, eta)
class ExtGaussForm_2(Form_2):
def __init___(self, *args):
super().__init(*args)
def discretize(self, func, quad='gauss'):
"""Project a function into a finite element space of 2-forms.
The projection is done in the reference element. It follows the inverse of the pullback to project into the physical domain.
"""
# calculate quadrature nodes and weights
self_p = (self.p[0] + 2, self.p[1] + 2)
self.basis.quad_grid = quad
quad, (p, _) = self.basis.quad_grid
xi_ref, eta_ref = np.meshgrid(self.basis._quad_nodes[0], self.basis._quad_nodes[1])
quad_weights = np.kron(self.basis._quad_weights[0], self.basis._quad_weights[1])
# calculate the dimension of the edges of the cells
dim_faces = [self.basis._face_nodes[i][1:] - self.basis._face_nodes[i][:-1]
for i in range(2)]
# set up the right amout of x and y dimensions of the edges of the cell
x_dim = np.repeat(dim_faces[0], self_p[1])
y_dim = np.tile(dim_faces[1], self_p[0])
magic_factor = 0.25
cell_nodes = [(0.5 * (dim_faces[i][np.newaxis, :]) *
(self.basis._quad_nodes[i][:, np.newaxis] + 1) + self.basis._face_nodes[i][:-1]).ravel('F') for i in range(2)]
cell_area = np.diag(magic_factor * x_dim * y_dim)
# xi coordinates of the quadrature nodes
# in the column are stored the coordinates of the quad points for contant xi for all faces
xi = np.repeat(np.repeat(cell_nodes[0], (p + 1)
).reshape((p + 1)**2, self_p[0], order='F'), self_p[1], axis=1)
eta = np.tile(np.tile(cell_nodes[1].reshape(
p + 1, self_p[1], order='F'), (p + 1, 1)), self_p[0])
# map onto the physical domain and compute the Jacobian
x, y = self.mesh.mapping(xi, eta)
g = self.mesh.g(xi, eta)
# compute the cochain integrating and then applying inverse pullback
self.cochain_local = np.sum(np.tensordot(quad_weights, (func(x, y) * g),
axes=((0), (0))) * cell_area[:, :, np.newaxis], axis=0)
self.cochain_to_global()
def Form(*args):
"""Wrap around the classes of forms."""
# TODO: create disctionary for forms
return getattr(sys.modules[__name__], args[0].str_to_form[args[0].form_type])(*args)
def cochain_to_global(function_space, cochain_local):
"""Map the local dofs of the cochain into the global cochain."""
cochain = np.zeros(((function_space.num_dof)))
dof_map = np.transpose(function_space.dof_map.dof_map)
# reorder degrees of freedom
for i, row in enumerate(cochain_local):
try:
for j, dof_value in enumerate(row):
cochain[dof_map[i, j]] = dof_value
except TypeError as t:
raise TypeError("The cochain provided in cochain_to_global is one dimensional")
return cochain
def cochain_to_local(function_space, cochain):
"""Map the cochain elements into local dof with the dof map."""
return cochain[np.transpose(function_space.dof_map.dof_map)]
def func(x, y):
return (x + y) / (x + y)
if __name__ == "__main__":
# p_s = [(2, 2)]
# n = (1, 1)
# for p in p_s:
# mesh = CrazyMesh(2, n, ((-1, 1), (-1, 1)), 0.0)
# func_space = FunctionSpace(mesh, '1-lobatto', p)
# form_1 = Form(func_space)
# form_1.discretize((func, func))
# p = 3, 3
# nx, ny = 2, 2
crazy_mesh = CrazyMesh(2, (1, 1), ((-1, 1), (-1, 1)), curvature=0)
function_space = FunctionSpace(crazy_mesh, '0-gauss', 0)
f0 = Form(function_space)
def p(x,y): return np.sin(np.pi*x) * np.cos(np.pi*y)
f0.discretize(p)
print(f0.basis.inner(f0.basis))
# cochain = np.ones(function_space.num_dof)
# print(cochain)
# # basis = BasisForm(function_space)
# form_1 = Form(function_space, cochain)
# xi = eta = np.linspace(-1, 1, 5)
# xi = eta = quadrature.lobatto_quad(3)[0]
# form_1.reconstruct(xi, eta)
# print(form_1.reconstructed)
# print(np.shape(form_1.reconstructed))
#
# form_1.reconstructed_dx = np.arange(50).reshape(25, 2)
# print(form_1.reconstructed_dx)
# form_1.plot()
# np.__config__.show()
# function_space = FunctionSpace(crazy_mesh, '0-lobatto', p)
# form_2 = Form(function_space)
# form_2.discretize(func)
# print(form_2.cochain)
|
{"hexsha": "a4d1ccd93e44ca46a1e54da53f852f4f56d81ba3", "size": 26134, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/forms.py", "max_stars_repo_name": "Idate96/Mimetic-Fem", "max_stars_repo_head_hexsha": "75ad3b982ef7ed7c6198f526d19dc460dec28f4d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/forms.py", "max_issues_repo_name": "Idate96/Mimetic-Fem", "max_issues_repo_head_hexsha": "75ad3b982ef7ed7c6198f526d19dc460dec28f4d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/forms.py", "max_forks_repo_name": "Idate96/Mimetic-Fem", "max_forks_repo_head_hexsha": "75ad3b982ef7ed7c6198f526d19dc460dec28f4d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.4825396825, "max_line_length": 133, "alphanum_fraction": 0.6231728782, "include": true, "reason": "import numpy", "num_tokens": 6764}
|
# implementation of MLJ measure interface for LossFunctions.jl
# Supervised Loss -- measure traits
is_measure_type(::Type{<:SupervisedLoss}) = true
orientation(::Type{<:SupervisedLoss}) = :loss
reports_each_observation(::Type{<:SupervisedLoss}) = true
is_feature_dependent(::Type{<:SupervisedLoss}) = false
MMI.supports_weights(::Type{<:SupervisedLoss}) = true
MMI.name(M::Type{<:SupervisedLoss}) = split(string(M), '.')[end]*"()"
MMI.docstring(M::Type{<:SupervisedLoss}) = name(M)
## DISTANCE BASED LOSS FUNCTION
MMI.prediction_type(::Type{<:DistanceLoss}) = :deterministic
MMI.target_scitype(::Type{<:DistanceLoss}) = AbstractArray{<:Continuous}
function value(measure::DistanceLoss, yhat, X, y, ::Nothing,
::Val{false}, ::Val{true})
return measure(yhat, y)
end
function value(measure::DistanceLoss, yhat, X, y, w,
::Val{false}, ::Val{true})
return w .* measure(yhat, y) ./ (sum(w)/length(y))
end
## MARGIN BASED LOSS FUNCTIONS
MMI.prediction_type(::Type{<:MarginLoss}) = :probabilistic
MMI.target_scitype(::Type{<:MarginLoss}) = AbstractArray{<:Binary}
# convert a Binary vector into vector of +1 or -1 values
# (for testing only):
pm1(y) = Int8(2) .* (Int8.(int(y))) .- Int8(3)
# rescale [0, 1] -> [-1, 1]
_scale(p) = 2p - 1
function value(measure::MarginLoss, yhat, X, y, ::Nothing,
::Val{false}, ::Val{true})
check_pools(yhat, y)
probs_of_observed = broadcast(pdf, yhat, y)
return broadcast(measure, _scale.(probs_of_observed), 1)
end
function value(measure::MarginLoss, yhat, X, y, w,
::Val{false}, ::Val{true})
return w .* value(measure, yhat, X, y, nothing) ./ (sum(w)/length(y))
end
|
{"hexsha": "1f4e373528ae6df4d44183413bbabc6eebb1c056", "size": 1739, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/measures/loss_functions_interface.jl", "max_stars_repo_name": "juliohm/MLJBase.jl", "max_stars_repo_head_hexsha": "2b8739834c869903bf304039931c74e03a5d41ab", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/measures/loss_functions_interface.jl", "max_issues_repo_name": "juliohm/MLJBase.jl", "max_issues_repo_head_hexsha": "2b8739834c869903bf304039931c74e03a5d41ab", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-03-19T09:18:58.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-19T16:19:02.000Z", "max_forks_repo_path": "src/measures/loss_functions_interface.jl", "max_forks_repo_name": "juliohm/MLJBase.jl", "max_forks_repo_head_hexsha": "2b8739834c869903bf304039931c74e03a5d41ab", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4423076923, "max_line_length": 80, "alphanum_fraction": 0.6474985624, "num_tokens": 533}
|
import numpy as np
# No other imports allowed!
class LeastSquaresLinearRegressor(object):
'''
Class providing a linear regression model
Fit by solving the "least squares" optimization.
Attributes
----------
* self.w_F : 1D array, size n_features (= F)
vector of weights for each feature
* self.b : float
scalar real bias
'''
def __init__(self):
''' Constructor of an sklearn-like regressor
Should do nothing. Attributes are only set after calling 'fit'.
'''
# Leave this alone
pass
def fit(self, x_NF, y_N):
''' Compute and store weights that solve least-squares
Returns
-------
Nothing.
Post-Condition
--------------
Internal attributes updated:
* self.w_F : vector of weights for each feature
* self.b : scalar real bias
Notes
-----
The least-squares optimization problem is:
\min_{w,b} \sum_{n=1}^N (y_n - w^T x_n - b)^2
'''
N, F = x_NF.shape
pass # TODO
def predict(self, x_NF):
''' Make prediction given input features x
Args
----
x_NF : 2D array, (n_examples, n_features) (N,F)
Each row is a feature vector for one example.
Returns
-------
yhat_N : 1D array, size N
Each value is the predicted scalar for one example
'''
# TODO FIX ME
return np.asarray([0.0])
if __name__ == '__main__':
## Simple example use case
# With toy dataset with N=100 examples
# created via a known linear regression model plus small noise
prng = np.random.RandomState(0)
N = 100
w_F = np.asarray([1.1, -2.2, 3.3])
x_NF = prng.randn(N, 3)
y_N = np.dot(x_NF, w_F) + 0.03 * prng.randn(N)
linear_regr = LeastSquaresLinearRegressor()
linear_regr.fit(x_NF, y_N)
yhat_N = linear_regr.predict(x_NF)
|
{"hexsha": "45cab7e05278d413bc90907b3d57b294403a4131", "size": 1978, "ext": "py", "lang": "Python", "max_stars_repo_path": "hw1/LeastSquaresLinearRegression.py", "max_stars_repo_name": "tufts-ml-courses/comp135-19s-assignments", "max_stars_repo_head_hexsha": "d54f4356e022150d85cfa58ebbf8ccdf66e0f1a9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2019-02-23T00:28:06.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-28T20:45:57.000Z", "max_issues_repo_path": "hw1/LeastSquaresLinearRegression.py", "max_issues_repo_name": "tufts-ml-courses/comp135-19s-assignments", "max_issues_repo_head_hexsha": "d54f4356e022150d85cfa58ebbf8ccdf66e0f1a9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hw1/LeastSquaresLinearRegression.py", "max_forks_repo_name": "tufts-ml-courses/comp135-19s-assignments", "max_forks_repo_head_hexsha": "d54f4356e022150d85cfa58ebbf8ccdf66e0f1a9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2019-01-24T20:45:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T20:27:11.000Z", "avg_line_length": 24.1219512195, "max_line_length": 71, "alphanum_fraction": 0.5667340748, "include": true, "reason": "import numpy", "num_tokens": 510}
|
from ionotomo.utils.gaussian_process import *
import numpy as np
def test_level2_solve():
np.random.seed(1234)
K1 = SquaredExponential(2,l=0.29,sigma=3.7)
#K1.fixed = 'l'
#K1.fixed = 'sigma'
K2 = Diagonal(2,sigma=1e-5)
K2.fixed = 'sigma'
K3 = RationalQuadratic(2,sigma=1.)
K4 = MaternPIso(2,p=2)
K6 = GammaExponential(2)
K7 = PeriodicSep(2,0,l=0.5)
K7.fixed = 'l'
K8 = PeriodicSep(2,1,l=0.5)
K8.fixed='l'
K = K3 *K1+K2
hp = K.hyperparams
x = np.random.uniform(size=[250,2])
xstar = np.linspace(-1,2,100)
Xstar,Ystar = np.meshgrid(xstar,xstar)
xstar = np.array([Xstar.flatten(),Ystar.flatten()]).T
y = np.sin(x[:,0]*2*np.pi/0.5) *np.cos( x[:,1]*np.pi/0.5*2.) + np.random.normal(size=x.shape[0])*0.1
m_y = np.mean(y)
y -= m_y
sigma_y = 0.1
hyperparams = level2_solve(x,y,sigma_y,K)
K.hyperparams = hyperparams
print(K)
fstar,cov,log_mar_like = level1_solve(x,y,sigma_y,xstar,K)
import pylab as plt
vmin = np.min(y) + m_y
vmax = np.max(y) + m_y
plt.imshow(fstar.reshape(Xstar.shape)+m_y,extent=(-1,2,-1,2),origin='lower',vmin=vmin,vmax=vmax)
plt.scatter(x[:,0],x[:,1],c=y+m_y)
#plt.scatter(xstar[:,0],xstar[:,1],c=fstar,marker='+')
plt.show()
def test_log_mar_like_func():
K1 = SquaredExponential(2)
K2 = Diagonal(2)
K3 = RationalQuadratic(2)
K4 = GammaExponential(2)
K = K1 + K2 + K3 + K4
print(K)
hp = K.hyperparams
x = np.random.uniform(size=[100,2])
y = x[:,0]**2 + x[:,1]**(1./3.) + np.random.normal(size=100)*0.1
xstar = np.random.uniform(size=[100,2])
sigma_y = 0.1
lml,dlml = neg_log_mar_like_and_derivative(hp,x,y,sigma_y,K)
lml_ = log_mar_like(hp,x,y,sigma_y,K)
eps=1e-5
grad = np.zeros(len(hp))
for i in range(len(hp)):
hp[i] += eps
l_ = log_mar_like(hp,x,y,sigma_y,K)
grad[i] = -(l_ + lml)/eps
hp[i] -= eps
print(grad,dlml)
|
{"hexsha": "bf0c3c8d146125ca94ea578d946f14250168cd2b", "size": 1976, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/ionotomo/tests/test_gaussian_process.py", "max_stars_repo_name": "Joshuaalbert/IonoTomo", "max_stars_repo_head_hexsha": "9f50fbac698d43a824dd098d76dce93504c7b879", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2017-06-22T08:47:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-01T12:33:02.000Z", "max_issues_repo_path": "src/ionotomo/tests/test_gaussian_process.py", "max_issues_repo_name": "Joshuaalbert/IonoTomo", "max_issues_repo_head_hexsha": "9f50fbac698d43a824dd098d76dce93504c7b879", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-04-03T15:21:19.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-03T15:48:31.000Z", "max_forks_repo_path": "src/ionotomo/tests/test_gaussian_process.py", "max_forks_repo_name": "Joshuaalbert/IonoTomo", "max_forks_repo_head_hexsha": "9f50fbac698d43a824dd098d76dce93504c7b879", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-03-01T16:20:00.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-07T15:09:02.000Z", "avg_line_length": 30.4, "max_line_length": 104, "alphanum_fraction": 0.5946356275, "include": true, "reason": "import numpy", "num_tokens": 736}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astronomical and physics constants for Astropy v4.0.
See :mod:`astropy.constants` for a complete listing of constants defined
in Astropy.
"""
import warnings
from astropy.utils import find_current_module
from . import codata2018, iau2015
from . import utils as _utils
codata = codata2018
iaudata = iau2015
_utils._set_c(codata, iaudata, find_current_module())
# Overwrite the following for consistency.
# https://github.com/astropy/astropy/issues/8920
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'Constant .*already has a definition')
# Solar mass (derived from mass parameter and gravitational constant)
M_sun = iau2015.IAU2015(
'M_sun', "Solar mass", iau2015.GM_sun.value / codata2018.G.value,
'kg', ((codata2018.G.uncertainty / codata2018.G.value) *
(iau2015.GM_sun.value / codata2018.G.value)),
f"IAU 2015 Resolution B 3 + {codata2018.G.reference}", system='si')
# Jupiter mass (derived from mass parameter and gravitational constant)
M_jup = iau2015.IAU2015(
'M_jup', "Jupiter mass", iau2015.GM_jup.value / codata2018.G.value,
'kg', ((codata2018.G.uncertainty / codata2018.G.value) *
(iau2015.GM_jup.value / codata2018.G.value)),
f"IAU 2015 Resolution B 3 + {codata2018.G.reference}", system='si')
# Earth mass (derived from mass parameter and gravitational constant)
M_earth = iau2015.IAU2015(
'M_earth', "Earth mass",
iau2015.GM_earth.value / codata2018.G.value,
'kg', ((codata2018.G.uncertainty / codata2018.G.value) *
(iau2015.GM_earth.value / codata2018.G.value)),
f"IAU 2015 Resolution B 3 + {codata2018.G.reference}", system='si')
# Clean up namespace
del warnings
del find_current_module
del _utils
|
{"hexsha": "20b43259bbdc48320af8e84fd614a8148810e97c", "size": 1864, "ext": "py", "lang": "Python", "max_stars_repo_path": "astropy/constants/astropyconst40.py", "max_stars_repo_name": "MatiasRepetto/astropy", "max_stars_repo_head_hexsha": "689f9d3b063145150149e592a879ee40af1fac06", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-03-11T12:26:49.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-11T12:26:49.000Z", "max_issues_repo_path": "astropy/constants/astropyconst40.py", "max_issues_repo_name": "MatiasRepetto/astropy", "max_issues_repo_head_hexsha": "689f9d3b063145150149e592a879ee40af1fac06", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-10-09T18:54:27.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-09T18:54:27.000Z", "max_forks_repo_path": "astropy/constants/astropyconst40.py", "max_forks_repo_name": "MatiasRepetto/astropy", "max_forks_repo_head_hexsha": "689f9d3b063145150149e592a879ee40af1fac06", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.28, "max_line_length": 76, "alphanum_fraction": 0.6995708155, "include": true, "reason": "from astropy", "num_tokens": 538}
|
#!/usr/bin/python
import numpy as np
h2 = 1.0/2048
u = np.random.rand(2048,2048)
u[0,:]=0.0
u[:,0]=0.0
u[2047,:]=0.0
u[:,2047]=0.0
f = np.ndarray(shape=(2048,2048), dtype=float)
v = np.ndarray(shape=(2048,2048), dtype=float)
print u
print f
for iter in range(1,10):
for i in range (1,2046):
for j in range (1,2046):
v[i,j] = 0.25*(h2 * f[i,j] + u[i-1,j] + u[i+1,j] + u[i,j-1] + u[i,j+1])
u=v
print u
|
{"hexsha": "462390cac14f504630ed0c3218fdfbbfe058bed7", "size": 436, "ext": "py", "lang": "Python", "max_stars_repo_path": "comparepythoncfortran/jacobiRelax/jacobiRelax.py", "max_stars_repo_name": "frasanz/MultigridMethods", "max_stars_repo_head_hexsha": "1e582f6945edcf46583f840fef4a8dc88f001baa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "comparepythoncfortran/jacobiRelax/jacobiRelax.py", "max_issues_repo_name": "frasanz/MultigridMethods", "max_issues_repo_head_hexsha": "1e582f6945edcf46583f840fef4a8dc88f001baa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "comparepythoncfortran/jacobiRelax/jacobiRelax.py", "max_forks_repo_name": "frasanz/MultigridMethods", "max_forks_repo_head_hexsha": "1e582f6945edcf46583f840fef4a8dc88f001baa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.9565217391, "max_line_length": 83, "alphanum_fraction": 0.5504587156, "include": true, "reason": "import numpy", "num_tokens": 190}
|
# 08.Songbird_OTUs.r
# Figure 4, Figure 7, Figure S7, Figure S8, Table S10, Table S11
# Ref for Songbird: Morton, J. T. et al. Establishing microbial composition measurement standards with reference frames. Nat. Commun. 10, 2719 (2019).
# Ref for Qurro: Fedarko, M. W. et al. Visualizing ’omic feature rankings and log-ratios using Qurro. NAR Genomics Bioinforma. 2, (2020).
# Water and sediment sequences were analyzed separately
#### Phyloseq Object ####
ps.noncontam.tree
#### Set Directory ####
songbird <- file.path(paste(path_phy, "Songbird", sep=""))
dir.create(songbird, showWarnings=FALSE)
setwd(songbird)
#### Prepare files for Songbird ####
# format taxonomy table
tax <- as(tax_table(ps.noncontam.tree),"matrix")
tax <- as.data.frame(tax)
tax$Kingdom <- paste("k", tax$Kingdom, sep="__")
tax$Phylum <- paste("p", tax$Phylum, sep="__")
tax$Class <- paste("c", tax$Class, sep="__")
tax$Order <- paste("o", tax$Order, sep="__")
tax$Family <- paste("f", tax$Family, sep="__")
tax$Genus <- paste("g", tax$Genus, sep="__")
tax$Species <- paste("s", tax$Species, sep="__")
tax_cols <- c("Kingdom", "Phylum", "Class","Order","Family","Genus","Species")
tax$taxonomy <- do.call(paste, c(tax[tax_cols], sep=";"))
for(co in tax_cols) tax[co]<-NULL
write.table(tax, "tax_for_qiime2.txt", quote=FALSE, col.names=FALSE, sep="\t")
# make a biomformat otu table
otu <- as(otu_table(ps.noncontam.tree),"matrix")
otu_biom <- make_biom(data=otu)
write_biom(otu_biom,"otu_biom.biom")
write.table(otu_table(ps.noncontam.tree), file = "otu_table.txt", sep = "\t", row.names = TRUE, col.names = NA)
# export metadata table
write.table(sample_data(ps.noncontam.tree), file = "metadata_for_qiime2.txt", sep = "\t", row.names = TRUE, col.names = NA)
#### Import data to QIIME2 (on bash) ####
conda activate qiime2-2020.6
wd=<path to Songbird working dir>
cd $wd
sed 's/"//g' metadata_for_qiime2.txt > metadata_for_qiime2_fixed.txt
# also add #SampleID to header
# add Train column for Songbird
biom convert -i otu_biom.biom -o otu_biom_HDF5.biom --to-hdf5
biom add-metadata -i otu_biom_HDF5.biom -o otu_wTax_metadata.biom --observation-metadata-fp tax_for_qiime2.txt --sc-separated taxonomy --observation-header OTUID,taxonomy --sample-metadata-fp metadata_for_qiime2_fixed.txt
# import to QIIME2
qiime tools import \
--input-path otu_biom_HDF5.biom \
--type 'FeatureTable[Frequency]' \
--input-format BIOMV210Format \
--output-path feature-table.qza
# import tax table to QIIME2
qiime tools import \
--type 'FeatureData[Taxonomy]' \
--input-format HeaderlessTSVTaxonomyFormat \
--input-path tax_for_qiime2.txt \
--output-path taxonomy.qza
# check import
qiime feature-table summarize \
--i-table feature-table.qza \
--m-sample-metadata-file metadata_for_qiime2_fixed.txt \
--o-visualization summary_vis.qzv
qiime tools view summary_vis.qzv
#### Songbird ####
# Make the model; parameter of interest
dir=<name of folder for formula>
mkdir ${dir}
qiime songbird multinomial \
--i-table feature-table.qza \
--m-metadata-file metadata_for_qiime2_fixed.txt \
--p-formula "<parameter of interest>" \
--p-epochs 10000 \
--p-differential-prior 0.5 \
--p-summary-interval 1 \
--p-num-random-test-examples 10 \ # For sediment samples: --p-num-random-test-examples 4
--o-differentials ${dir}/differentials.qza \
--o-regression-stats ${dir}/regression-stats.qza \
--o-regression-biplot ${dir}/regression-biplot.qza \
--p-training-column "Test_Train" \ #include if using
--verbose
# Make the null model
null_dir=null_model
mkdir ${null_dir}
qiime songbird multinomial \
--i-table feature-table.qza \
--m-metadata-file metadata_for_qiime2_fixed.txt \
--p-formula "1" \
--p-epochs 10000 \
--p-differential-prior 0.5 \
--p-summary-interval 1 \
--p-num-random-test-examples 10 \ # For sediment samples: --p-num-random-test-examples 4
--o-differentials ${null_dir}/null-diff.qza \
--o-regression-stats ${null_dir}/null-stats.qza \
--o-regression-biplot ${null_dir}/null-biplot.qza \
--p-training-column "Test_Train" \ #include if using
--verbose
# Visualize the first model's regression stats and the null model
qiime songbird summarize-paired \
--i-regression-stats ${dir}/regression-stats.qza \
--i-baseline-stats ${null_dir}/null-stats.qza \
--o-visualization ${dir}/paired-summary.qzv
qiime tools view ${dir}/paired-summary.qzv
# Plot the OTU rankings
qiime qurro differential-plot \
--i-ranks ${dir}/differentials.qza \
--i-table feature-table.qza \
--m-sample-metadata-file metadata_for_qiime2_fixed.txt \
--m-feature-metadata-file tax_for_qiime2.txt \
--verbose \
--o-visualization ${dir}/qurro_plot_q2.qzv
qiime tools view ${dir}/qurro_plot_q2.qzv
# Export the Songbird differentials
qiime metadata tabulate \
--m-input-file ${dir}/differentials.qza \
--o-visualization ${dir}/differentials-viz.qzv
qiime tools export \
--input-path ${dir}/differentials-viz.qzv \
--output-path ${dir}/differentials
#### Plot Songbird figures in R (Repeat for plotting differences between incipient stratification/stratification, Fe concentrations, and DO concentrations; and Sediments) ####
mypath = <path to Songbird data>
setwd(mypath)
# load bugs to focus on [tsv file with columns Family, Genus, Bug_type (iron oxidizer/reducer, methanogen, etc), Top_5perc_rank, Include (whether to include bug in plots), color (using hex color code), Top_10perc_rank]
bug_list <- read.delim(<path to selected bugs>, sep = "\t", stringsAsFactors=FALSE, fileEncoding="latin1")
bug_list$Family_Genus <- paste(bug_list$Family, bug_list$Genus) # use Family_Genus to match tax IDs; can change this to other combination for matching
rownames(bug_list) <- bug_list$Family_Genus
bug_list <- bug_list[ which(bug_list$Include=='Y'), ]
bug_list
# load otu/tax table
otu_tax_table <- read_excel("<path to file with OTU table and tax ID>")
otu_tax_table$Family_Genus <- paste(otu_tax_table$Family, otu_tax_table$Genus)
head(otu_tax_table)
write.csv(otu_tax_table, "otu_tax_table_ps_filt5.csv")
# subset OTUs from main otu/tax table (numerator)
focus_bugs <- otu_tax_table %>% filter(otu_tax_table$Family_Genus %in% bug_list$Family_Genus)
unique(focus_bugs$Family_Genus)
nrow(focus_bugs)
nrow(bug_list)
# get columns of interest only (Family_Genus and counts for all samples)
focus_bugs <- focus_bugs[,c(92, 11:91)]
# sum counts for each Family_Genus
focus_bugs <- focus_bugs %>%
group_by(Family_Genus) %>%
summarise_if(is.numeric, funs(sum))
# organize
focus_bugs <- as.data.frame(focus_bugs)
rownames(focus_bugs) <- focus_bugs$Family_Genus
focus_bugs[,1] <- NULL
focus_bugs <- as.data.frame(t(focus_bugs))
focus_bugs <- tibble::rownames_to_column(focus_bugs, "SampleID")
# setup denominator
bug_denominator <- otu_tax_table %>% filter(!otu_tax_table$Family_Genus %in% bug_list$Family_Genus)
# sum denominator
bug_sum_denominator_num <- bug_denominator %>% dplyr::select(where(is.numeric))
bug_sum_denominator_num <- as.data.frame(colSums(bug_sum_denominator_num))
bug_sum_denominator_num <- tibble::rownames_to_column(bug_sum_denominator_num, "SampleID")
head(bug_sum_denominator_num)
# calculate natural log ratios (Note: log in R is natural log)
df1_bugs <- focus_bugs
df2_bugs <- bug_sum_denominator_num
df3_bugs <- cbind(df1_bugs[1], log(df1_bugs[, -1] / df2_bugs[match(df1_bugs$SampleID, df2_bugs$SampleID), -1]))
head(df3_bugs)
# add descriptions
rownames(df3_bugs) <- df3_bugs$SampleID
df3_bugs$SampleID <- NULL
df3_bugs <- as.data.frame(t(df3_bugs))
df3_bugs$Family_Genus <- rownames(df3_bugs)
df3_bugs$Bug_type <- bug_list$Bug_type[match(df3_bugs$Family_Genus, bug_list$Family_Genus)]
df3_bugs$Top_5perc_rank <- bug_list$Top_5perc_rank[match(df3_bugs$Family_Genus, bug_list$Family_Genus)]
df3_bugs$color <- bug_list$color[match(df3_bugs$Family_Genus, bug_list$Family_Genus)]
head(df3_bugs)
# add stratificaiton group layer
df3_bugs_melt <- reshape2::melt(df3_bugs, id=c("Bug_type", "Top_5perc_rank", "Family_Genus", "color"))
colnames(df3_bugs_melt) <- c("Bug_type", "Top_5perc_rank", "Family_Genus", "color", "SampleID", "Natural_Log_Ratio")
df3_bugs_melt$SampleID <- gsub("\\.", "-", df3_bugs_melt$SampleID)
df3_bugs_melt$Strat_group <- DATA_PHYLOSEQ_FIXED$Strat_group[match(df3_bugs_melt$SampleID, DATA_PHYLOSEQ_FIXED$SampleID)]
df3_bugs_melt$strat_time <- ordi_data$strat_time[match(df3_bugs_melt$SampleID, ordi_data$SampleID)]
df3_bugs_melt$Depth_meters <- ordi_data$Depth_meters[match(df3_bugs_melt$SampleID, ordi_data$SampleID)]
# order the strat group
df3_bugs_melt$Strat_group <- gsub('Unstratified', 'Incipient Stratification', df3_bugs_melt$Strat_group)
df3_bugs_melt$Strat_group <- factor(df3_bugs_melt$Strat_group, levels=c("Incipient Stratification","Epilimnion","Thermocline", "Hypolimnion"), ordered = TRUE)
# remove Inf values
df3_bugs_melt <- df3_bugs_melt[is.finite(df3_bugs_melt$Natural_Log_Ratio),]
head(df3_bugs_melt)
write.csv(df3_bugs_melt, "Select_bugs_natural_log_ratio_v3.csv")
#### Plot Figure 7 ####
# Subset strat 3 months
df3_bugs_melt_sub <- subset(df3_bugs_melt, strat_time == "Stratified (~3 months)")
# Order the Bug_type
df3_bugs_melt_sub$Bug_type <- factor(df3_bugs_melt_sub$Bug_type, levels=c("Magnetotactic", "Iron oxidizer", "Iron oxidizer/reducer", "Photoferrotroph/iron reducer", "Iron reducer", "Sulfate/iron reducer", "Sulfate reducer", "Sulfide oxidizer", "Methylotroph", "Methanogen"), ordered=TRUE)
# Set up theme
plot_theme <- theme(panel.background = element_rect(fill = "white", colour = "black", size = 1, linetype = "solid"),
panel.border = element_rect(colour="black", size=1, fill=NA),
strip.background=element_rect(fill='white', colour='white', size = 0),
strip.text = element_text(face="bold", size=15),
panel.spacing.x=unit(1, "lines"),
panel.spacing.y=unit(1, "lines"),
panel.grid.major = element_line(size = 0),
panel.grid.minor = element_line(size = 0),
axis.text = element_text(size=15, colour="black"),
axis.title = element_text(face="bold", size=15),
legend.position="right",
legend.key = element_rect(fill = "transparent"),
legend.title = element_text(face="bold", size=15),
legend.text = element_text(size=15, colour="black"),
legend.background=element_blank())
plot_guide <- guides(
fill = guide_legend(ncol=1, title="Taxa (Family_Genus)", override.aes=list(color="black", linetype=0)),
color="none",
linetype = "none")
# Set up coloring
colors <- distinct(df3_bugs_melt_sub, Family_Genus, color)
pal <- colors$color
names(pal) <- colors$Family_Genus
pal
figure_strat3months <- ggplot(df3_bugs_melt_sub, aes(x=Depth_meters, y=Natural_Log_Ratio)) +
geom_point(aes(fill=Family_Genus), shape=21, colour="black", size=3) +
geom_smooth(method = 'loess', size=1, alpha=0.25, linetype="dashed", aes(color=Family_Genus, fill=Family_Genus)) +
facet_wrap(. ~ Bug_type, scales="free", nrow=2, labeller = labeller(Bug_type = label_wrap_gen(10))) +
geom_vline(xintercept=2, linetype="dashed", color = "black", size=0.5) +
geom_vline(xintercept=6.5, linetype="dashed", color = "black", size=0.5) +
geom_vline(xintercept=0, linetype="solid", color = "black", size=0.5) +
labs(x = "Depth (m)", y="Natural Log Ratio") +
scale_y_continuous(lim=c(-12,0), breaks = seq(-12, 0, by = 4), expand = expansion(mult = c(0, 0))) +
coord_flip() +
scale_x_reverse(lim=c(10,-0.75), breaks = seq(10, 0, by = -2), expand = expansion(mult = c(0, 0))) +
scale_fill_manual(values=pal) +
scale_color_manual(values=pal) +
plot_theme + plot_guide
figure_strat3months
ggsave("Selected_Bugs_strat3months.pdf", path = mypath, scale = 1, width = 18, height = 9, units = c("in"), dpi = 300)
#### For Fe combined plot (Figure S7) ####
Both_HighFe <- plot_grid(figure_Both + theme(legend.position="none", axis.title=element_blank()),
figure_HighFe + theme(legend.position="none", axis.title=element_blank()),
ncol=2, align = "v", axis="b")
Fe_hypo_ratio_All <- plot_grid(Both_HighFe,NULL,
figure_MidFe + theme(legend.position="none", axis.title=element_blank()),
nrow=3, align = "v", axis="b", rel_heights=c(1,0.1,1))
Fe_hypo_ratio_All
save_file <- paste("Combo_Fe_log_ratio.pdf", sep="")
ggsave(save_file, path = mypath, plot = Fe_hypo_ratio_All, scale = 1, width = 10, height = 8, units = c("in"), dpi = 300)
#### For O2 combined plot (Figure S8) ####
Both_O2 <- plot_grid(figure_obl + theme(legend.position="none", axis.title=element_blank()),
figure_O2 + theme(legend.position="none", axis.title=element_blank()),
ncol=2, align = "v", axis="b", rel_widths=c(1,0.75))
save_file <- paste("Combo_O2_log_ratio.pdf", sep="")
ggsave(save_file, path = mypath, plot = Both_O2, scale = 1, width = 15, height = 4, units = c("in"), dpi = 300)
#### For Fe and O2 log ratio plot ranking (Figure 4) ####
logratio <- read.delim("<path to data from Qurro>", sep = "\t", stringsAsFactors=FALSE, fileEncoding="latin1")
logratio$SampleID <- DATA_PHYLOSEQ_FIXED$SampleID[match(rownames(DATA_PHYLOSEQ_FIXED),logratio$Sample.ID)]
logratio$Season <- factor(logratio$Season, ordered=TRUE, levels=c("Spring", "Summer", "Fall"))
head(logratio)
# Set up theme
plot_theme <- theme(panel.background = element_rect(fill = "white", colour = "black", size = 1, linetype = "solid"),
panel.border = element_rect(colour="black", size=1, fill=NA),
panel.grid.major = element_line(size = 0),
panel.grid.minor = element_line(size = 0),
axis.text = element_text(size=15, colour="black"),
axis.title = element_text(face="bold", size=20),
legend.position="right",
legend.key = element_rect(fill = "transparent"),
legend.title = element_text(face="bold", size=20),
legend.text = element_text(size=15, colour="black"),
legend.background=element_blank())
plot_guide <- guides(
fill = guide_legend(ncol=1, title="strat_time", override.aes=list(color="black", linetype=0)),
color="none",
linetype = "none")
figure <- ggplot(logratio, aes(x=Depth_meters, y=Current_Natural_Log_Ratio)) +
geom_point(aes(fill=Season), shape=21, colour="black", size=3) +
geom_smooth(method = 'loess', size=0.5, alpha=0.25, aes(color=Season, fill=Season, linetype=Season)) +
geom_vline(xintercept=2, linetype="dashed", color = "black", size=0.5) +
geom_vline(xintercept=6.5, linetype="dashed", color = "black", size=0.5) +
geom_vline(xintercept=0, linetype="solid", color = "black", size=0.5) +
labs(x = "Water Column Depth (m)", y="Natural Log Ratio") +
coord_flip() +
scale_x_reverse(lim=c(10,-0.75), breaks = seq(10, 0, by = -2), expand = expansion(mult = c(0, 0))) +
scale_fill_manual(name="Stratification?", values=c("#a10702","#688e26", "#e9c46a"), labels=c("Incipient Stratification", "Stratified (~3 months)", "Stratified (~7 months)")) +
scale_color_manual(name="Stratification?", values=c("#a10702","#688e26", "#e9c46a"), labels=c("Incipient Stratification", "Stratified (~3 months)", "Stratified (~7 months)")) +
scale_linetype_manual(name="Stratification?", values=c("dotted", "longdash", "dotdash"), labels=c("Incipient Stratification", "Stratified (~3 months)", "Stratified (~7 months)")) +
plot_theme + plot_guide
figure
ggsave("Log_Ratio_Qurro.pdf", path = mypath, scale = 1, width = 6, height = 5, units = c("in"), dpi = 300)
#### Plot OTU ranking for Fe and DO (Figure 4) ####
## For Fe
Fe_ranks <- read_excel("<path to Fe differentials.xlsx>", sheet="metadata")
colnames(Fe_ranks) <- c('featureid','Kingdom','Phylum','Class','Order','Family','Genus','Species','Bug_type','Intercept',
'Mid_High_Fe', 'Mid_Low_Fe','Mid_SuperLow_Fe')
## For DO
colnames(DO_ranks) <- c('featureid','Intercept','Aerobic_Anoxic', 'Aerobic_LowO2')
DO_ranks$Aerobic_LowO2 <- as.numeric(DO_ranks$Aerobic_LowO2)
# Sort by ranking (Fe)
Fe_ranks <- Fe_ranks[order(Fe_ranks$Mid_High_Fe),]
# Sort by ranking (DO)
DO_ranks <- DO_ranks[order(DO_ranks$Aerobic_LowO2),]
# Number the ranks (Fe or DO)
Fe_ranks$rank <- seq.int(nrow(Fe_ranks))
DO_ranks$rank <- seq.int(nrow(DO_ranks))
# Set up theme
plot_theme <- theme(panel.background = element_rect(fill = "white", colour = "black", size = 1, linetype = "solid"),
panel.border = element_rect(colour="black", size=1, fill=NA),
panel.spacing.x=unit(1, "lines"),
panel.spacing.y=unit(1, "lines"),
panel.grid.major = element_line(size = 0),
panel.grid.minor = element_line(size = 0),
axis.text = element_text(size=15, colour="black"),
axis.title = element_text(face="bold", size=20),
axis.title.y = element_text(angle = 90))
plot_guide <- guides(fill= guide_legend(order=1, override.aes = list(size=0.2, color="black", alpha=1)),
color = "none", linetype = "none")
# Change y variables to DO or Fe
ggplot(DO_ranks, aes(x=rank, y=Aerobic_LowO2)) +
geom_line(color="black") +
geom_ribbon(data=subset(DO_ranks, rank>1877 & rank<2085),aes(ymax=Aerobic_LowO2),ymin=0, fill="#941b0c",colour=NA,alpha=1)+
geom_ribbon(data=subset(DO_ranks, rank>1117 & rank<1878),aes(ymax=Aerobic_LowO2),ymin=0, fill="grey",colour=NA,alpha=0.5)+
geom_ribbon(data=subset(DO_ranks, rank>208 & rank<1118),aes(ymax=Aerobic_LowO2),ymin=0, fill="grey",colour=NA,alpha=0.5)+
geom_ribbon(data=subset(DO_ranks, rank>0 & rank<209),aes(ymax=Aerobic_LowO2),ymin=0, fill="#f6aa1c",colour=NA,alpha=1)+
geom_hline(yintercept=0, linetype="longdash", color = "black", size=0.3) +
#geom_area(mapping = aes(x = ifelse(rank>1877 & rank<2085, rank, 0)), fill = "red") +
ylab("log(LowO2/Aerobic)") +
xlab("Taxa Rank") +
scale_y_continuous(limits=c(-5.5,5.5), breaks=c(-4,-2,0,2,4), expand = c(0, 0)) +
scale_x_continuous(limits=c(1,2085), breaks=c(1,500,1000,1500,2000), expand = c(0, 0)) +
plot_theme + plot_guide
ggsave("Log_Ratio_ranks.pdf", path = mypath, scale = 1, width = 4.5, height = 4, units = c("in"), dpi = 300)
|
{"hexsha": "9a02233c01245e752a0c65dfe925c42354f41cb5", "size": 18158, "ext": "r", "lang": "R", "max_stars_repo_path": "08.Songbird_OTUs.r", "max_stars_repo_name": "LLNL/2022_PondB_microbiome", "max_stars_repo_head_hexsha": "d9aaade01033eea9f220e96521099fd881971c82", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "08.Songbird_OTUs.r", "max_issues_repo_name": "LLNL/2022_PondB_microbiome", "max_issues_repo_head_hexsha": "d9aaade01033eea9f220e96521099fd881971c82", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "08.Songbird_OTUs.r", "max_forks_repo_name": "LLNL/2022_PondB_microbiome", "max_forks_repo_head_hexsha": "d9aaade01033eea9f220e96521099fd881971c82", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-23T17:39:35.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T17:39:35.000Z", "avg_line_length": 46.9198966408, "max_line_length": 288, "alphanum_fraction": 0.7072364798, "num_tokens": 5595}
|
#### created by Alessandro Bigiotti ####
import numpy as np
import matplotlib.pyplot as plt
import pickle
import math
import os
import tensorflow as tf
import keras as Ker
import keras.backend as Kback
import keras.optimizers as opt
import time as tm
import sklearn.metrics as metr
from keras.models import Sequential
from keras.layers import Input, Dense
from keras.models import Model
from keras.layers.core import Activation, Dropout
from keras.callbacks import EarlyStopping
from keras.utils.vis_utils import plot_model
from tensorflow.python.client import device_lib
from mlp_iperparameter_settings import *
import sys
sys.path.insert(0, '../')
from Utils.base_dir import *
from Utils.utils import *
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#tf.enable_eager_execution()
# if you cannot see any NVIDIA devices:
# 1. you don't have a NVIDIA GPU: remove the line -> from tensorflow.python.client import device_lib
# 2. you have a NVIDIA GPU, but there's a wrong installation/configuration of cuda toolkit, cuda drivers, cudartlib or tensorflow-gpu
# - check if you see your NVIDIA GPU among available devices with the following: print(device_lib.list_local_devices())
# - verify that tensorflow support your cuda tolkit version
# - verify that cudart is installed
def main():
smoothed_data = input("Do you want to use smoothed data? [Y -> yes / N -> no] \n")
input_file_list = []
data_dir = ''
if(smoothed_data == 'Y' or smoothed_data == 'y'):
input_file_list = os.listdir(raw_1min_data_smoothed)
data_dir = raw_1min_data_smoothed
else:
input_file_list = os.listdir(raw_1min_data)
data_dir = raw_1min_data
# fix random seed for reproducibility
np.random.seed(7)
#print(device_lib.list_local_devices())
# Step 1) load the data: import from pickle files
close_prices = []
print('loading the data structures...')
for input_pickle_file in input_file_list:
input_file_path = os.path.join(data_dir, input_pickle_file)
file_array = open(input_file_path, 'rb')
array_dati = pickle.load(file_array)
close_prices.append(array_dati)
close_prices = np.array(close_prices)
# Normalize the data
min_val, max_val, normal_data = Normalize_Data_DataSet(close_prices)
# Divide the Data into Train_Samples and Test_Samples
train_set = []
val_set = []
test_set = []
for i in range(len(normal_data)):
train, test, val = Divide_Data(normal_data[i])
train_set.append(np.array(train))
val_set.append(np.array(val))
test_set.append(np.array(test))
train_set = np.array(train_set)
val_set = np.array(val_set)
test_set = np.array(test_set)
# Create the training, validation and testing sets
train_samples = []
train_labels = []
test_samples = []
test_labels = []
valid_samples = []
valid_labels = []
# Create Training samples and labels
for i in range(len(train_set)):
train_samp, train_lab = CreateDataset(train_set[i], size_campioni)
train_samples.append(np.array(train_samp))
train_labels.append(np.array(train_lab))
train_samples = np.array(train_samples)
train_labels = np.array(train_labels)
# Create Validation samples and labels
for i in range(len(val_set)):
val_samp, val_lab = CreateDataset(val_set[i], size_campioni)
valid_samples.append(np.array(val_samp))
valid_labels.append(np.array(val_lab))
valid_samples = np.array(valid_samples)
valid_labels = np.array(valid_labels)
# Create Test samples and labels
for i in range(len(test_set)):
test_samp, test_lab = CreateDataset(test_set[i], size_campioni)
test_samples.append(np.array(test_samp))
test_labels.append(np.array(test_lab))
test_samples = np.array(test_samples)
test_labels = np.array(test_labels)
# Compose the dataset
train_samples_ok = []
train_labels_ok = []
for i in range(len(train_samples)):
for j in range(len(train_samples[i])):
train_samples_ok.append(train_samples[i][j])
train_labels_ok.append(train_labels[i][j])
train_samples_ok = np.array(train_samples_ok)
train_labels_ok = np.array(train_labels_ok)
test_samples_ok = []
test_labels_ok = []
for i in range(len(test_samples)):
for j in range(len(test_samples[i])):
test_samples_ok.append(test_samples[i][j])
test_labels_ok.append(test_labels[i][j])
test_samples_ok = np.array(test_samples_ok)
test_labels_ok = np.array(test_labels_ok)
valid_samples_ok = []
valid_labels_ok = []
for i in range(len(valid_samples)):
for j in range(len(valid_samples[i])):
valid_samples_ok.append(valid_samples[i][j])
valid_labels_ok.append(valid_labels[i][j])
valid_samples_ok = np.array(valid_samples_ok)
valid_labels_ok = np.array(valid_labels_ok)
optimizer_used = opt.Nadam(lr=learn_rate, beta_1=beta1, beta_2=beta2, epsilon=None, schedule_decay=sched_decay)
# show the shape of the training sample, it could be usefull to correctly fit the model
# print('trainsamples shape: '+str(train_samples_ok.shape))
# Construct the model
model = Sequential()
# Add the input layer with the same shape of the training samples, activation activ_function_1 and dropout drop_out
model.add(Dense(input_dim=train_samples_ok.shape[1], units=output_dim1))
model.add(Activation(activ_function_1))
model.add(Dropout(drop_out))
# Add an Hidden layer, activ_function_2, dropout = drop_out
model.add(Dense(units=output_dim2))
model.add(Activation(activ_function_2))
model.add(Dropout(drop_out))
# Add an Hiddel layer with , activation activ_function_3, dropout drop_out
model.add(Dense(units=output_dim3))
model.add(Activation(activ_function_3))
model.add(Dropout(drop_out))
# Add an output layer with 1 nodes -> for this purpos it will contain the prediction
model.add(Dense(units=1))
# compile the model using n-adam Optimizer, and minimum square error loss function for regression problem
model.compile(optimizer = optimizer_used, loss = loss_function, metrics = [metric_function])
# the following line show the model structure
model.summary()
# plot the model structure
#file = str(input('Insert the file to save the network structure: \n'))
#plot_model(model, to_file = file, show_shapes=True, show_layer_names=True)
# check the time
start = tm.time()
#print('Training and Testing Shapes:')
#print('Training shape: '+str(train_samples.shape)+'Training shape0: '+str(train_samples.shape[0])+'Training shape1: '+str(train_samples.shape[1]))
call_backs = EarlyStopping(monitor='loss', patience=pati)
train_log = model.fit(train_samples_ok, train_labels_ok, batch_size=batch_size, epochs=nb_epoch_train, validation_split=0.0, validation_data = (valid_samples_ok, valid_labels_ok), verbose=2, callbacks = [call_backs])
# Description: train_samples and train_labels are part of the dataset;
# batch_size is the dimension of samples per gradient update (simulate the learning rate)
# epoch are the number of epochs to train the model, each epoch involves the entire train samples and labels; the process is repeated until the epoch value is reached
#
#print(train_log.history)
end = tm.time()
print("execution training phase: "+str(end-start)+"\n")
# print the train_log keys
#print(train_log.history.keys())
# Plot training & validation accuracy values
plt.plot(train_log.history["loss"], color = 'black', linewidth=0.4, label="training (mse)")
plt.plot(train_log.history["val_loss"], '-.', color = 'grey', linewidth=0.4, label="validation (mse)")
plt.title('Training e Validation Loss')
plt.ylabel('Mean Squared Error')
plt.xlabel('Epochs')
plt.yticks(np.arange(-0.0005, 0.0005, step=0.001))
plt.legend()
plt.show()
# Plot training & validation loss values
plt.plot(train_log.history['mean_absolute_error'], color = 'black', linewidth=0.4, label='training (mae)')
plt.plot(train_log.history['val_mean_absolute_error'], '-.', color = 'grey', linewidth=0.4, label = 'validation (mae)')
plt.title('Training e Validation Metrics')
plt.ylabel('Mean Absolute Error')
plt.xlabel('Epochs')
plt.yticks(np.arange(-0.0005, 0.0005, step=0.001))
plt.legend()
plt.show()
#evaluation = model.evaluate(test_samples_ok, test_labels_ok, batch_size = 128, verbose = 1)
#print(model.metrics_names)
#print(evaluation)
# make prediction over the test_set (samples and labels)
score2 = model.evaluate(valid_samples_ok, valid_labels_ok, batch_size = 128, verbose = 1)
score1 = model.evaluate(test_samples_ok, test_labels_ok, batch_size = 128, verbose = 1)
print('validation set scores: ' +str(score2)+'\n')
print('test set scores: '+str(score1)+'\n')
prediction = model.predict(test_samples_ok)
# print('prediction shape: '+str(prediction.shape))
#denormalize data
close_prediction = DeNormalize(prediction, min_val, max_val)
labels_close = DeNormalize(test_labels_ok, min_val, max_val)
mse = metr.mean_squared_error(close_prediction, labels_close)
mae = metr.mean_absolute_error(close_prediction, labels_close)
print("SCORE MSE: "+str(mse))
print("SCORE MAE: "+str(mae))
# direct error evaluation:
differences = []
for i in range(len(prediction)):
differences.append(labels_close[i] - close_prediction[i])
new_diff = np.array(differences)
# Plot predictions vs labels
plt.plot(labels_close, color = 'black', linewidth=0.5, label = 'labels')
plt.plot(close_prediction, color = 'grey', linewidth=0.6, label = 'predictions')
plt.title('Predictions')
plt.ylabel('Price values')
plt.xlabel('Number of samples')
plt.legend()
plt.show()
# Plot prediction error
plt.plot(new_diff, '--', color = 'black', linewidth=0.1, label = 'prediction error')
plt.yticks(np.arange(-0.05, 0.10, step=0.05))
plt.title('Punctual prediction error')
plt.legend()
plt.show()
save_model = input('Save the Network? ([Y -> YES / N -> NO]): \n')
if(save_model == 'Y' or save_model == 'y'):
model.save(os.path.join(str(trained_mlp), "MLP_forecast.h5"))
afile = open(os.path.join(str(trained_mlp), "min_val.pkl"), 'wb')
pickle.dump(min_val, afile)
afile.close()
afile = open(os.path.join(str(trained_mlp), "max_val.pkl"), 'wb')
pickle.dump(max_val, afile)
afile.close()
print('min max for normalization: '+str(min_val)+"; "+str(max_val))
# free the gpu resources
Kback.clear_session()
if __name__ == "__main__":
main()
|
{"hexsha": "736de7a006ffabdc9ba83ea6ade9d91a808cb664", "size": 10169, "ext": "py", "lang": "Python", "max_stars_repo_path": "MLP_Model_Training/keras_MLP_model.py", "max_stars_repo_name": "cony89/TimeSeriesForecast", "max_stars_repo_head_hexsha": "7cad74b7171540e7347836a40e9f7e62a0ae34b9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MLP_Model_Training/keras_MLP_model.py", "max_issues_repo_name": "cony89/TimeSeriesForecast", "max_issues_repo_head_hexsha": "7cad74b7171540e7347836a40e9f7e62a0ae34b9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MLP_Model_Training/keras_MLP_model.py", "max_forks_repo_name": "cony89/TimeSeriesForecast", "max_forks_repo_head_hexsha": "7cad74b7171540e7347836a40e9f7e62a0ae34b9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-29T02:01:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-29T02:01:05.000Z", "avg_line_length": 38.9616858238, "max_line_length": 217, "alphanum_fraction": 0.7467794277, "include": true, "reason": "import numpy", "num_tokens": 2527}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from MDAnalysis.analysis import align
def _adjust_frame_range_for_slicing(fstart, fend, nframes):
if fend != -1:
fend+=1
if fend == (nframes-1) and fstart == (nframes-1):
fend+=1
if fend == fstart:
fend+=1
if fstart<0:
fstart+=nframes
if fend < 0:
fend+=nframes+1
return fstart, fend
def com_com_distances(universe, mda_selection_pairs, fstart=0, fend=-1, fstep=1):
"""Center of mass to Center of mass distance.
This function computes the distance between the centers of mass between pairs of MDAnalysis atoms selections
across the the MD trajectory.
Args:
universe (MDAnalysis.Universe): The MDAnalysis universe object to run the analysis on.
mda_selection_pairs (list): A list of 2 element lists or tuples containing pairs of MDAnalsysis
atom selection objects to compute the distance between.
fstart (int): Optional, the first frame to include in the analysis. Default: 0 (or the first frame)
fend (int): Optional, the last frame to include in the analysis. Default: -1 (or the last frame)
fstep (int): Optional, the interval between frames in the analysis when looping from fstart to fend.
Default: 1 (or every frame)
Returns:
(np.array), (list): Returns two outputs. The first is an Numpy array with the timeseries simulation times
corresponding to the frames in the analysis. The second is list of Numpy arrays with the distances; the
order in the list corresponds to the atom selection pairs in the mda_selection_pairs input.
"""
fstart, fend = _adjust_frame_range_for_slicing(fstart, fend, len(universe.trajectory))
times = []
pair_dists = []
for pair in mda_selection_pairs:
pair_dists.append([])
for frame in universe.trajectory[fstart:fend:fstep]:
times.append(frame.time)
i = 0
for pair in mda_selection_pairs:
sel_1 = pair[0]
sel_2 = pair[1]
com_1 = sel_1.atoms.center_of_mass()
com_2 = sel_2.atoms.center_of_mass()
d_com = com_2 - com_1
dist = np.sqrt(np.dot(d_com, d_com))
pair_dists[i].append(dist)
i+=1
times = np.array(times)
i=0
for vals in pair_dists:
pair_dists[i] = np.array(vals)
i+=1
return times, pair_dists
def com_com_distances_plane(universe, mda_selection_pairs, fstart=0, fend=-1, fstep=1, plane='xy'):
"""Center of mass to Center of mass distance in two dimensions (a plane).
This function computes the distance between the centers of mass between pairs of MDAnalysis atoms selections
across the the MD trajectory, but only uses the 2d coordinates of the specified axis plane.
Args:
universe (MDAnalysis.Universe): The MDAnalysis universe object to run the analysis on.
mda_selection_pairs (list): A list of 2 element lists or tuples containing pairs of MDAnalsysis
atom selection objects to compute the distance between.
fstart (int): Optional, the first frame to include in the analysis. Default: 0 (or the first frame)
fend (int): Optional, the last frame to include in the analysis. Default: -1 (or the last frame)
fstep (int): Optional, the interval between frames in the analysis when looping from fstart to fend.
Default: 1 (or every frame)
plane (str): Optional, the 2d axis plane to compute the distance in. Default: 'xy' (or the xy plane)
Returns:
(np.array), (list): Returns two outputs. The first is an Numpy array with the timeseries simulation times
corresponding to the frames in the analysis. The second is list of Numpy arrays with the distances; the
order in the list corresponds to the atom selection pairs in the mda_selection_pairs input.
"""
lat_ind = [0, 1]
if plane is 'yx':
lat_ind = [0, 1]
elif plane is 'xz' or plane is 'zx':
lat_ind = [0, 2]
elif plane is 'zy' or plane is 'yz':
lat_ind = [1, 2]
#indices = mda_selection.indices
fstart, fend = _adjust_frame_range_for_slicing(fstart, fend, len(universe.trajectory))
times = []
pair_dists = []
for pair in mda_selection_pairs:
pair_dists.append([])
for frame in universe.trajectory[fstart:fend:fstep]:
times.append(frame.time)
i = 0
for pair in mda_selection_pairs:
sel_1 = pair[0]
sel_2 = pair[1]
com_1 = sel_1.atoms.center_of_mass()
com_2 = sel_2.atoms.center_of_mass()
plane_val_1 = com_1[lat_ind]
plane_val_2 = com_2[lat_ind]
d_com = plane_val_2 - plane_val_1
dist = np.sqrt(np.dot(d_com, d_com))
pair_dists[i].append(dist)
i+=1
times = np.array(times)
i=0
for vals in pair_dists:
pair_dists[i] = np.array(vals)
i+=1
return times, pair_dists
def com_com_distances_axis(universe, mda_selection_pairs, fstart=0, fend=-1, fstep=1, axis='z'):
"""Center of mass to Center of mass distance in one dimension (along an axis).
This function computes the distance between the centers of mass between pairs of MDAnalysis atoms selections
across the the MD trajectory, but only uses the 1d coordinate of the specified axis.
Args:
universe (MDAnalysis.Universe): The MDAnalysis universe object to run the analysis on.
mda_selection_pairs (list): A list of 2 element lists or tuples containing pairs of MDAnalsysis
atom selection objects to compute the distance between.
fstart (int): Optional, the first frame to include in the analysis. Default: 0 (or the first frame)
fend (int): Optional, the last frame to include in the analysis. Default: -1 (or the last frame)
fstep (int): Optional, the interval between frames in the analysis when looping from fstart to fend.
Default: 1 (or every frame)
axis (str): Optional, the 1d axis to compute the distance in. Default: 'z' (or the z axis)
Returns:
(np.array), (list): Returns two outputs. The first is an Numpy array with the timeseries simulation times
corresponding to the frames in the analysis. The second is list of Numpy arrays with the distances; the
order in the list corresponds to the atom selection pairs in the mda_selection_pairs input.
"""
dir_ind = 2
if axis is 'x':
dir_ind = 0
elif axis is 'y':
dir_ind = 1
#indices = mda_selection.indices
fstart, fend = _adjust_frame_range_for_slicing(fstart, fend, len(universe.trajectory))
times = []
pair_dists = []
for pair in mda_selection_pairs:
pair_dists.append([])
for frame in universe.trajectory[fstart:fend:fstep]:
times.append(frame.time)
i = 0
for pair in mda_selection_pairs:
sel_1 = pair[0]
sel_2 = pair[1]
com_1 = sel_1.atoms.center_of_mass()
com_2 = sel_2.atoms.center_of_mass()
norm_val_1 = com_1[dir_ind]
norm_val_2 = com_2[dir_ind]
dist = np.abs(norm_val_2 - norm_val_1)
pair_dists[i].append(dist)
i+=1
times = np.array(times)
i=0
for vals in pair_dists:
pair_dists[i] = np.array(vals)
i+=1
return times, pair_dists
def com_com_distances_axis_align(universe, mda_selection_pairs, align_struct_universe, align_sel_string, fstart=0,
fend=-1, fstep=1, axis='z'):
"""Center of mass to Center of mass distance in one dimension (along an axis) after structure alignment.
This function computes the distance between the centers of mass between pairs of MDAnalysis atoms selections
across the the MD trajectory, but only uses the 1d coordinate of the specified axis, and aligns the structure
to some selection of atoms of a reference structure.
Args:
universe (MDAnalysis.Universe): The MDAnalysis universe object to run the analysis on.
mda_selection_pairs (list): A list of 2 element lists or tuples containing pairs of MDAnalsysis
atom selection objects to compute the distance between.
align_struct_universe (MDAnalsysi.Universe): The MDAnalsysis universe object of the reference structure to
align the system to.
align_sel_string (str): A MDAnalysis selection string to use for the structure alignment.
fstart (int): Optional, the first frame to include in the analysis. Default: 0 (or the first frame)
fend (int): Optional, the last frame to include in the analysis. Default: -1 (or the last frame)
fstep (int): Optional, the interval between frames in the analysis when looping from fstart to fend.
Default: 1 (or every frame)
axis (str): Optional, the 1d axis to compute the distance in. Default: 'z' (or the z axis)
Returns:
(np.array), (list): Returns two outputs. The first is an Numpy array with the timeseries simulation times
corresponding to the frames in the analysis. The second is list of Numpy arrays with the distances; the
order in the list corresponds to the atom selection pairs in the mda_selection_pairs input.
"""
dir_ind = 2
if axis is 'x':
dir_ind = 0
elif axis is 'y':
dir_ind = 1
#indices = mda_selection.indices
fstart, fend = _adjust_frame_range_for_slicing(fstart, fend, len(universe.trajectory))
times = []
pair_dists = []
for pair in mda_selection_pairs:
pair_dists.append([])
for frame in universe.trajectory[fstart:fend:fstep]:
times.append(frame.time)
# now do the alignment
align.alignto(universe, align_struct_universe, select=align_sel_string, weights='mass')
i = 0
for pair in mda_selection_pairs:
sel_1 = pair[0]
sel_2 = pair[1]
com_1 = sel_1.atoms.center_of_mass()
com_2 = sel_2.atoms.center_of_mass()
norm_val_1 = com_1[dir_ind]
norm_val_2 = com_2[dir_ind]
dist = np.abs(norm_val_2 - norm_val_1)
pair_dists[i].append(dist)
i+=1
times = np.array(times)
i=0
for vals in pair_dists:
pair_dists[i] = np.array(vals)
i+=1
return times, pair_dists
|
{"hexsha": "bad30e8a3fe7ff6466690384bc9830b89d1086ba", "size": 10565, "ext": "py", "lang": "Python", "max_stars_repo_path": "pybilt/mda_tools/mda_distance.py", "max_stars_repo_name": "blakeaw/ORBILT", "max_stars_repo_head_hexsha": "ed402dd496534dccd00f3e75b57007d944c58c1d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2019-07-29T16:21:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-02T11:44:57.000Z", "max_issues_repo_path": "pybilt/mda_tools/mda_distance.py", "max_issues_repo_name": "blakeaw/ORBILT", "max_issues_repo_head_hexsha": "ed402dd496534dccd00f3e75b57007d944c58c1d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2019-05-15T09:30:05.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-19T16:49:59.000Z", "max_forks_repo_path": "pybilt/mda_tools/mda_distance.py", "max_forks_repo_name": "blakeaw/ORBILT", "max_forks_repo_head_hexsha": "ed402dd496534dccd00f3e75b57007d944c58c1d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2019-08-12T11:14:45.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-22T18:22:55.000Z", "avg_line_length": 45.7359307359, "max_line_length": 115, "alphanum_fraction": 0.6621864647, "include": true, "reason": "import numpy", "num_tokens": 2607}
|
import csv
import pandas as pd
df= pd.read_csv('C:\\Users\\Admin\\Desktop\\BE Proj\\HighFrequency.txt')
print(df)
array= df._values
X =array[:,0:3838]
Y =array[:,3839]
#print(X)
#print(Y)
print('Loaded Data File')
print()
import random
import numpy as np
from sklearn import svm
MyList = np.random.randint(1700, size=340)
testing= []
testing = MyList
NBData_data=X
NBData_target=Y
#Training Data
train_target = np.delete(NBData_target , testing)
train_data= np.delete(NBData_data , testing, axis=0)
#Testing Data
test_target = NBData_target[testing]
test_data = NBData_data[testing]
#Classifier
clf = svm.SVC(kernel='poly', C=1.5, degree = 3)
clf.fit(train_data, train_target)
#Visualization
#Prediction
Z = clf.predict(test_data)
print('The prediction of the authors are:')
print(Z)
#Accuracy
from sklearn.metrics import accuracy_score
print()
print('Accuracy of the system:')
print(accuracy_score(test_target , Z))
print('Successful')
|
{"hexsha": "81126efae6be352ffefa807fe9b5d6f3dea80194", "size": 955, "ext": "py", "lang": "Python", "max_stars_repo_path": "Other Models/svmfile.py", "max_stars_repo_name": "agarwalansh/Stlyometry-based-Authorship-Identification", "max_stars_repo_head_hexsha": "6e41bc6503f28dd8889b292de195cee4ced555af", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Other Models/svmfile.py", "max_issues_repo_name": "agarwalansh/Stlyometry-based-Authorship-Identification", "max_issues_repo_head_hexsha": "6e41bc6503f28dd8889b292de195cee4ced555af", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Other Models/svmfile.py", "max_forks_repo_name": "agarwalansh/Stlyometry-based-Authorship-Identification", "max_forks_repo_head_hexsha": "6e41bc6503f28dd8889b292de195cee4ced555af", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-13T15:01:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-13T15:01:59.000Z", "avg_line_length": 18.7254901961, "max_line_length": 72, "alphanum_fraction": 0.7455497382, "include": true, "reason": "import numpy", "num_tokens": 258}
|
/*! ------------------------------------------------------------------------- *
* \author Joey Dumont <joey.dumont@gmail.com> *
* \since 2018-07-24 *
* *
* Outputs a 10x10 matrix with random complex numbers in a human-readable *
* format. *
* --------------------------------------------------------------------------*/
#include <armadillo>
int main(int argc, char* argv[])
{
arma::cx_mat rand_cx_mat = arma::randu<arma::cx_mat>(10,10);
rand_cx_mat.save("rand_test.txt", arma::raw_ascii);
return 0;
}
|
{"hexsha": "6ed301226fa6d6d4fa1a1c70e658ca2267b7e8c9", "size": 749, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "assets/posts/read-c-format-complex-numbers-with-numpy/complex_arma_data.cpp", "max_stars_repo_name": "joeydumont/joeydumont.github.io", "max_stars_repo_head_hexsha": "f62672427b265d87f754ac95ba54708dd7bd046c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "assets/posts/read-c-format-complex-numbers-with-numpy/complex_arma_data.cpp", "max_issues_repo_name": "joeydumont/joeydumont.github.io", "max_issues_repo_head_hexsha": "f62672427b265d87f754ac95ba54708dd7bd046c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "assets/posts/read-c-format-complex-numbers-with-numpy/complex_arma_data.cpp", "max_forks_repo_name": "joeydumont/joeydumont.github.io", "max_forks_repo_head_hexsha": "f62672427b265d87f754ac95ba54708dd7bd046c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.0588235294, "max_line_length": 79, "alphanum_fraction": 0.3217623498, "num_tokens": 124}
|
#include <boost/algorithm/string/replace.hpp>
#include <iostream>
#include <string>
#include <vector>
#include "fcs-genome/common.h"
#include "fcs-genome/config.h"
#include "fcs-genome/workers/Mutect2FilterWorker.h"
namespace fcsgenome {
Mutect2FilterWorker::Mutect2FilterWorker(
std::vector<std::string> intv_path,
std::string input_path,
std::string tumor_table,
std::string output_path,
std::vector<std::string> extra_opts,
bool &flag_f,
bool flag_gatk): Worker(1, get_config<int>("gatk.mutect2.nct", "gatk.nct"), extra_opts, "Generating Mutect2Filter VCF"),
intv_path_(intv_path),
input_path_(input_path),
tumor_table_(tumor_table),
output_path_(output_path),
flag_gatk_(flag_gatk)
{
// check input/output files
output_path_ = check_output(output_path, flag_f);
}
void Mutect2FilterWorker::check() {
//intv_path_ = check_input(intv_path_);
input_path_ = check_input(input_path_);
if (!tumor_table_.empty()){
tumor_table_ = check_input(tumor_table_);
}
}
void Mutect2FilterWorker::setup() {
// create cmd
std::stringstream cmd;
cmd << get_config<std::string>("java_path") << " "
<< "-Xmx" << get_config<int>("gatk.mutect2.memory", "gatk.memory") << "g "
<< "-jar " << get_config<std::string>("gatk4_path") << " FilterMutectCalls "
<< "-V " << input_path_ << " " ;
std::string test = input_path_;
//std::replace(test.end()-3, test.end(), "vcf", "bed");
std::string ext[2] = {"bed", "list"};
for (int k=0; k<2; k++){
std::string target = get_fname_by_ext(input_path_, ext[k]);
if (boost::filesystem::exists(target)){
cmd << " -L " << target ;
}
};
cmd << " -O " << output_path_ << " ";
for (auto a: intv_path_){
cmd << " -L " << a << " ";
}
cmd << " -isr INTERSECTION ";
if (!tumor_table_.empty()){
cmd << " -contamination-table " << tumor_table_ << " ";
}
for (auto it = extra_opts_.begin(); it != extra_opts_.end(); it++) {
cmd << it->first << " ";
for ( auto vec_iter = it->second.begin(); vec_iter != it->second.end(); vec_iter++) {
if (!(*vec_iter).empty() && vec_iter == it->second.begin()) {
cmd << *vec_iter << " ";
}
else if (!(*vec_iter).empty()) {
cmd << it->first << " " << *vec_iter << " ";
}
}
}
cmd_ = cmd.str();
DLOG(INFO) << cmd_;
}
} // namespace fcsgenome
|
{"hexsha": "685f9c40d7c76e0960d07ff0f79947cc30a509fa", "size": 2414, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/workers/Mutect2FilterWorker.cpp", "max_stars_repo_name": "FCS-holding/falcon-genome", "max_stars_repo_head_hexsha": "bbba762ec54139392be843e9edff21766d5d7f5b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/workers/Mutect2FilterWorker.cpp", "max_issues_repo_name": "FCS-holding/falcon-genome", "max_issues_repo_head_hexsha": "bbba762ec54139392be843e9edff21766d5d7f5b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/workers/Mutect2FilterWorker.cpp", "max_forks_repo_name": "FCS-holding/falcon-genome", "max_forks_repo_head_hexsha": "bbba762ec54139392be843e9edff21766d5d7f5b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.0697674419, "max_line_length": 124, "alphanum_fraction": 0.601905551, "num_tokens": 709}
|
c----------------------------------------------------------
c tapering both end of input seismogram
c----------------------------------------------------------
subroutine taper(nb,ne,n,seis,ntapb,ntape,ss,ncorr)
implicit none
integer*4 nb,ne,n,ntapb,ntape,ncorr
real*4 seis(32768)
real*8 s(32768)
double complex ss(32768)
real*8 omb,ome,sums,c,r,pi
integer*4 k,ns
n = n
pi = datan(1.0d0)*4.0d0
omb = pi/ntapb
ome = pi/ntape
ncorr = ne+ntape
cxx s = seis(1:ncorr)
c make copy seis to s
do k = 1,ncorr
s(k) = seis(k)
enddo
if(nb-ntapb-1 .gt. 0) then
do k=1,nb-ntapb-1
s(k) = 0.0d0
enddo
endif
sums = 0.0d0
c left end of the signal
do k = nb,nb-ntapb,-1
r = (dcos(omb*(nb-k))+1.0d0)/2.0d0
sums = sums + 2.0d0*r
s(k) = s(k)*r
enddo
c right end of the signal
do k = ne,ne+ntape
s(k) = s(k)*(dcos(ome*(ne-k))+1.0d0)/2.0d0
enddo
sums = sums+ne-nb-1
c = 0.0d0
do k = 1,ncorr
c = c + s(k)
enddo
c = -c/sums
c left end of the signal
do k = nb,nb-ntapb,-1
r = (dcos(omb*(nb-k))+1.0d0)/2.0d0
s(k) = s(k)+r*c
enddo
c right end of the signal
do k = ne,ne+ntape
r = (dcos(ome*(ne-k))+1.0d0)/2.0d0
s(k) = s(k)+r*c
enddo
c middle of the signal
do k = nb+1,ne-1
s(k) = s(k)+c
enddo
c determin the power of FFT
ns = 2**(min0(max0(int(dlog(dble(ncorr))/dlog(2.0d0))+1,12),15))
if(ns .gt. ncorr) then
do k = ncorr+1,ns
s(k) = 0.0d0
enddo
endif
ncorr = ns
c convert to complex
do k =1,ns
ss(k) = cmplx(s(k),0.0d0)
enddo
return
end
|
{"hexsha": "9e26d080a536a7424b5767996b82b93ad9cf8380", "size": 1841, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/taper.f", "max_stars_repo_name": "hfmark/aftan", "max_stars_repo_head_hexsha": "ab1da97a3b2e332af81ed808bab919c6bf98071f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-04-16T14:45:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T02:04:16.000Z", "max_issues_repo_path": "src/taper.f", "max_issues_repo_name": "hfmark/aftan", "max_issues_repo_head_hexsha": "ab1da97a3b2e332af81ed808bab919c6bf98071f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/taper.f", "max_forks_repo_name": "hfmark/aftan", "max_forks_repo_head_hexsha": "ab1da97a3b2e332af81ed808bab919c6bf98071f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-02-22T07:50:06.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-27T23:24:39.000Z", "avg_line_length": 25.2191780822, "max_line_length": 70, "alphanum_fraction": 0.4573601304, "num_tokens": 715}
|
"""Adds random forces to the base of Minitaur during the simulation steps."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
parentdir = os.path.dirname(os.path.dirname(parentdir))
os.sys.path.insert(0, parentdir)
import functools
import math
import gin
import numpy as np
from motion_imitation.envs.utilities import env_randomizer_base
from motion_imitation.robots.minitaur import Minitaur
import csv
# _PERTURBATION_START_STEP = 100
# _PERTURBATION_INTERVAL_STEPS = 200
# _PERTURBATION_DURATION_STEPS = 10
# _HORIZONTAL_FORCE_UPPER_BOUND = 120
# _HORIZONTAL_FORCE_LOWER_BOUND = 240
# _VERTICAL_FORCE_UPPER_BOUND = 300
# _VERTICAL_FORCE_LOWER_BOUND = 500
_PERTURBATION_START_STEP = 0
_PERTURBATION_INTERVAL_STEPS = 1
_PERTURBATION_DURATION_STEPS = 99999999999999999999
_HORIZONTAL_FORCE_UPPER_BOUND = 120000
_HORIZONTAL_FORCE_LOWER_BOUND = 24000
_VERTICAL_FORCE_UPPER_BOUND = 300000
_VERTICAL_FORCE_LOWER_BOUND = 50000
@gin.configurable
class MinitaurPushRandomizer(env_randomizer_base.EnvRandomizerBase):
"""Applies a random impulse to the base of Minitaur."""
def __init__(
self,
perturbation_start_step=_PERTURBATION_START_STEP,
perturbation_interval_steps=_PERTURBATION_INTERVAL_STEPS,
perturbation_duration_steps=_PERTURBATION_DURATION_STEPS,
horizontal_force_bound=None,
vertical_force_bound=None,
):
"""Initializes the randomizer.
Args:
perturbation_start_step: No perturbation force before the env has advanced
this amount of steps.
perturbation_interval_steps: The step interval between applying
perturbation forces.
perturbation_duration_steps: The duration of the perturbation force.
horizontal_force_bound: The lower and upper bound of the applied force
magnitude when projected in the horizontal plane.
vertical_force_bound: The z component (abs value) bound of the applied
perturbation force.
"""
self._perturbation_start_step = perturbation_start_step
self._perturbation_interval_steps = perturbation_interval_steps
self._perturbation_duration_steps = perturbation_duration_steps
self._horizontal_force_bound = (horizontal_force_bound if horizontal_force_bound else
[_HORIZONTAL_FORCE_LOWER_BOUND, _HORIZONTAL_FORCE_UPPER_BOUND])
self._vertical_force_bound = (vertical_force_bound if vertical_force_bound else
[_VERTICAL_FORCE_LOWER_BOUND, _VERTICAL_FORCE_UPPER_BOUND])
self._perturbation_parameter_dict = None
self.myenv = None
self.xyz_acc = self.read_csv('/home/yoonwoo/motion_imitation/ab13.csv')
def read_csv(self, filename):
with open(filename, 'r') as csvfile:
datareader = list(csv.reader(csvfile))
datareader = np.array(datareader[1:],dtype=np.float)
np_data = datareader[:,1:4]
return np_data
def _get_robot_from_env(self, env):
if hasattr(env, "minitaur"):
return env.minitaur
elif hasattr(env, "robot"):
return env.robot
else:
return None
def randomize_env(self, env):
"""Randomizes the simulation environment.
Args:
env: The Minitaur gym environment to be randomized.
"""
self.myenv = self._get_robot_from_env(env)
def randomize_step(self, env):
"""Randomizes env steps.
Will be called at every env step. Called to generate randomized force and
torque to apply. Application of forces are done in randomize_sub_step.
Args:
env: The Minitaur gym environment to be randomized.
"""
robot = self.myenv
base_link_ids = robot.chassis_link_ids
# print("********************", base_link_ids)
# env_step_counter --> step_counter
if env.env_step_counter % self._perturbation_interval_steps == 0:
print(env.env_step_counter)
self._applied_link_id = base_link_ids[np.random.randint(0, len(base_link_ids))]
# horizontal_force_magnitude = np.random.uniform(self._horizontal_force_bound[0],
# self._horizontal_force_bound[1])
horizontal_force_magnitude = 1000
x_acc, y_acc, z_acc = self.xyz_acc[env.env_step_counter+14000]
theta = -math.pi/2 #np.random.uniform(0, 2 * math.pi)
vertical_force_magnitude = 0
# vertical_force_magnitude = np.random.uniform(self._vertical_force_bound[0],
# self._vertical_force_bound[1])
self._applied_force = 960 * np.array([y_acc, z_acc, x_acc])
# self._applied_force = horizontal_force_magnitude * np.array(
# [math.cos(theta), math.sin(theta), 0]) + np.array([0, 0, -vertical_force_magnitude])
#print('FORCE: ', self._applied_force)
# print(robot.GetMotorTorques())
if (env.env_step_counter % self._perturbation_interval_steps <
self._perturbation_duration_steps) and (env.env_step_counter >=
self._perturbation_start_step):
# Parameter of pybullet_client.applyExternalForce()
self._perturbation_parameter_dict = dict(#objectUniqueId=env.minitaur.quadruped,
objectUniqueId=robot.quadruped,
linkIndex=self._applied_link_id,
forceObj=self._applied_force,
posObj=[0.0, 0.0, 0.0],
flags=env.pybullet_client.LINK_FRAME)
env.pybullet_client.applyExternalForce(**self._perturbation_parameter_dict)
else:
self._perturbation_parameter_dict = None
def randomize_sub_step(self, env, sub_step_index, num_sub_steps):
"""Randomize simulation steps per sub steps (simulation step).
Will be called at every simulation step. This is the correct place to add
random forces/torques to Minitaur.
Args:
env: The Minitaur gym environment to be randomized.
sub_step_index: Index of sub step, from 0 to N-1. N is the action repeat.
num_sub_steps: Number of sub steps, equals to action repeat.
"""
print("sub_step ??????????????????????????")
if self._perturbation_parameter_dict is not None:
env.pybullet_client.applyExternalForce(**self._perturbation_parameter_dict)
|
{"hexsha": "6f54fe9e75b5a41743909ed4acfd41d16e5e9163", "size": 6543, "ext": "py", "lang": "Python", "max_stars_repo_path": "motion_imitation/envs/utilities/minitaur_push_randomizer.py", "max_stars_repo_name": "ywkim0606/fine-tuning-locomotion", "max_stars_repo_head_hexsha": "96d7c81458511c0a7a11b59cf8c2c3fb8df8a64b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-28T03:02:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T03:02:04.000Z", "max_issues_repo_path": "motion_imitation/envs/utilities/minitaur_push_randomizer.py", "max_issues_repo_name": "ywkim0606/fine-tuning-locomotion", "max_issues_repo_head_hexsha": "96d7c81458511c0a7a11b59cf8c2c3fb8df8a64b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "motion_imitation/envs/utilities/minitaur_push_randomizer.py", "max_forks_repo_name": "ywkim0606/fine-tuning-locomotion", "max_forks_repo_head_hexsha": "96d7c81458511c0a7a11b59cf8c2c3fb8df8a64b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.2094594595, "max_line_length": 99, "alphanum_fraction": 0.7019715727, "include": true, "reason": "import numpy", "num_tokens": 1487}
|
from __future__ import print_function
import theano
import theano.tensor as T
import numpy as np
import os
import lasagne
from lasagne.layers import InputLayer
from lasagne.layers import DenseLayer
from lasagne.layers import ConcatLayer
from lasagne.layers import NonlinearityLayer
from lasagne.layers import GlobalPoolLayer
from lasagne.layers import DropoutLayer
try:
from lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer
from lasagne.layers.dnn import MaxPool2DDNNLayer as PoolLayerDNN
except ImportError:
from lasagne.layers import Conv2DLayer as ConvLayer
from lasagne.layers import MaxPool2DLayer as PoolLayerDNN
from lasagne.layers import MaxPool2DLayer as PoolLayer
from lasagne.layers import LocalResponseNormalization2DLayer as LRNLayer
from lasagne.nonlinearities import softmax, linear
from lasagne.layers import Conv2DLayer
from lasagne.layers import Pool2DLayer
from lasagne.layers.normalization import batch_norm
def print_progress(percentage, loss, acc, final=False):
slashes = int(percentage * 25)
print('\r[' + ''.join(['#' for i in range(slashes)]) + ''.join([' ' for i in range(25 - slashes)]) + '] %.2f %%, loss %.3f, acc %.3f' % (percentage * 100, loss, acc), end='')
if final:
print()
def profile(func):
import time
def inner(*args, **kwargs):
time1 = time.time()
result = func(*args, **kwargs)
print('Took %d seconds' % int(time.time() - time1))
return result
return inner
class GoogleNet(object):
def __init__(self, weight_file=None, forward=False, learning_rate=0.001, dropout=0.4, lamb=0.00001):
self.input_var = T.tensor4('inputs')
self.net = self.build_model(self.input_var, forward, dropout)
if weight_file is not None:
self.load_weights(weight_file)
prediction = lasagne.layers.get_output(self.net['prob'])
self.target_var = T.ivector('targets')
loss = lasagne.objectives.categorical_crossentropy(prediction, self.target_var)
loss = loss.mean() + lamb * lasagne.regularization.l2(self.net['prob'].W)
params = lasagne.layers.get_all_params(self.net['prob'], trainable=True)
updates = lasagne.updates.adagrad(loss, params, learning_rate)
test_prediction = lasagne.layers.get_output(self.net['prob'], deterministic=True)
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), self.target_var), dtype=theano.config.floatX)
self.train_fn = theano.function([self.input_var, self.target_var], [loss, test_acc], updates=updates)
self.val_fn = theano.function([self.input_var, self.target_var], [loss, test_acc])
self.predict_fn = theano.function([self.input_var], [T.argmax(test_prediction, axis=1)])
@profile
def train_epoch(self, data, labels, batch_size=64):
print('Training')
loss = 0.0
acc = []
for i in range(0, len(labels), batch_size):
train_loss, train_acc = self.train_fn(data[i:i + batch_size], labels[i:i + batch_size])
print_progress(i / float(len(labels)), train_loss, train_acc)
acc.append(train_acc)
loss += train_loss
acc = np.mean(acc)
print_progress(1., loss, acc, final=True)
return loss, acc
@profile
def eval(self, data, labels, batch_size=128):
print('Evaluating')
acc = []
loss = 0.0
for i in range(0, len(labels), batch_size):
test_loss_val, test_acc_val = self.val_fn(data[i:i + batch_size], labels[i:i + batch_size])
print_progress(i / float(len(labels)), test_loss_val, test_acc_val)
loss += test_loss_val
acc.append(test_acc_val)
acc = np.mean(acc)
print_progress(1.0, loss, acc, final=True)
return loss, acc
@profile
def predict(self, data, batch_size=128):
print('Predicting')
predictions = list()
for i in range(0, len(data), batch_size):
prediction = self.predict_fn(data[i:i + batch_size])[0]
print_progress(i / float(len(data)), 0, 0)
predictions.append(prediction)
return np.hstack(predictions)
def write(self, file_name):
np.savez(file_name, *lasagne.layers.get_all_param_values(self.net['prob']))
def read(self, file_name):
weights = np.load(open(file_name))
param_values = [weights['arr_%d' % i] for i in range(len(weights.files))]
lasagne.layers.set_all_param_values(self.net['prob'], param_values)
def load_weights(self, weight_file):
import pickle
model = pickle.load(open(weight_file))
lasagne.layers.set_all_param_values(self.net['dropout1'], model['param values'][:-2])
def build_inception_module(self, name, input_layer, nfilters):
# nfilters: (pool_proj, 1x1, 3x3_reduce, 3x3, 5x5_reduce, 5x5)
net = dict()
net['pool'] = PoolLayerDNN(input_layer, pool_size=3, stride=1, pad=1)
net['pool_proj'] = ConvLayer(
net['pool'], nfilters[0], 1, flip_filters=False)
net['1x1'] = ConvLayer(input_layer, nfilters[1], 1, flip_filters=False)
net['3x3_reduce'] = ConvLayer(
input_layer, nfilters[2], 1, flip_filters=False)
net['3x3'] = ConvLayer(
net['3x3_reduce'], nfilters[3], 3, pad=1, flip_filters=False)
net['5x5_reduce'] = ConvLayer(
input_layer, nfilters[4], 1, flip_filters=False)
net['5x5'] = ConvLayer(
net['5x5_reduce'], nfilters[5], 5, pad=2, flip_filters=False)
net['output'] = ConcatLayer([
net['1x1'],
net['3x3'],
net['5x5'],
net['pool_proj'],
])
return {'{}/{}'.format(name, k): v for k, v in net.items()}
def build_model(self, input_var, forward, dropout):
net = dict()
net['input'] = InputLayer((None, 3, None, None), input_var=input_var)
net['conv1/7x7_s2'] = ConvLayer(
net['input'], 64, 7, stride=2, pad=3, flip_filters=False)
net['pool1/3x3_s2'] = PoolLayer(
net['conv1/7x7_s2'], pool_size=3, stride=2, ignore_border=False)
net['pool1/norm1'] = LRNLayer(net['pool1/3x3_s2'], alpha=0.00002, k=1)
net['conv2/3x3_reduce'] = ConvLayer(
net['pool1/norm1'], 64, 1, flip_filters=False)
net['conv2/3x3'] = ConvLayer(
net['conv2/3x3_reduce'], 192, 3, pad=1, flip_filters=False)
net['conv2/norm2'] = LRNLayer(net['conv2/3x3'], alpha=0.00002, k=1)
net['pool2/3x3_s2'] = PoolLayerDNN(net['conv2/norm2'], pool_size=3, stride=2)
net.update(self.build_inception_module('inception_3a',
net['pool2/3x3_s2'],
[32, 64, 96, 128, 16, 32]))
net.update(self.build_inception_module('inception_3b',
net['inception_3a/output'],
[64, 128, 128, 192, 32, 96]))
net['pool3/3x3_s2'] = PoolLayerDNN(net['inception_3b/output'],
pool_size=3, stride=2)
net.update(self.build_inception_module('inception_4a',
net['pool3/3x3_s2'],
[64, 192, 96, 208, 16, 48]))
net.update(self.build_inception_module('inception_4b',
net['inception_4a/output'],
[64, 160, 112, 224, 24, 64]))
net.update(self.build_inception_module('inception_4c',
net['inception_4b/output'],
[64, 128, 128, 256, 24, 64]))
net.update(self.build_inception_module('inception_4d',
net['inception_4c/output'],
[64, 112, 144, 288, 32, 64]))
net.update(self.build_inception_module('inception_4e',
net['inception_4d/output'],
[128, 256, 160, 320, 32, 128]))
net['pool4/3x3_s2'] = PoolLayerDNN(net['inception_4e/output'],
pool_size=3, stride=2)
net.update(self.build_inception_module('inception_5a',
net['pool4/3x3_s2'],
[128, 256, 160, 320, 32, 128]))
net.update(self.build_inception_module('inception_5b',
net['inception_5a/output'],
[128, 384, 192, 384, 48, 128]))
net['pool5/7x7_s1'] = GlobalPoolLayer(net['inception_5b/output'])
if forward:
#net['fc6'] = DenseLayer(net['pool5/7x7_s1'], num_units=1000)
net['prob'] = DenseLayer(net['pool5/7x7_s1'], num_units=4, nonlinearity=softmax)
else:
net['dropout1'] = DropoutLayer(net['pool5/7x7_s1'], p=dropout)
#net['fc6'] = DenseLayer(net['dropout1'], num_units=1000)
#net['dropout2'] = DropoutLayer(net['fc6'], p=dropout)
net['prob'] = DenseLayer(net['dropout1'], num_units=4, nonlinearity=softmax)
return net
class InceptionV3(GoogleNet):
def __init__(self, weight_file=None, forward=False, learning_rate=0.001, dropout=0.4, lamb=0.00001):
super(InceptionV3, self).__init__(weight_file, forward, learning_rate, dropout, lamb)
def bn_conv(self, input_layer, **kwargs):
l = Conv2DLayer(input_layer, **kwargs)
l = batch_norm(l, epsilon=0.001)
return l
def inceptionA(self, input_layer, nfilt):
# Corresponds to a modified version of figure 5 in the paper
l1 = self.bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1)
l2 = self.bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1)
l2 = self.bn_conv(l2, num_filters=nfilt[1][1], filter_size=5, pad=2)
l3 = self.bn_conv(input_layer, num_filters=nfilt[2][0], filter_size=1)
l3 = self.bn_conv(l3, num_filters=nfilt[2][1], filter_size=3, pad=1)
l3 = self.bn_conv(l3, num_filters=nfilt[2][2], filter_size=3, pad=1)
l4 = Pool2DLayer(
input_layer, pool_size=3, stride=1, pad=1, mode='average_exc_pad')
l4 = self.bn_conv(l4, num_filters=nfilt[3][0], filter_size=1)
return ConcatLayer([l1, l2, l3, l4])
def inceptionB(self, input_layer, nfilt):
# Corresponds to a modified version of figure 10 in the paper
l1 = self.bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=3, stride=2)
l2 = self.bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1)
l2 = self.bn_conv(l2, num_filters=nfilt[1][1], filter_size=3, pad=1)
l2 = self.bn_conv(l2, num_filters=nfilt[1][2], filter_size=3, stride=2)
l3 = Pool2DLayer(input_layer, pool_size=3, stride=2)
return ConcatLayer([l1, l2, l3])
def inceptionC(self, input_layer, nfilt):
# Corresponds to figure 6 in the paper
l1 = self.bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1)
l2 = self.bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1)
l2 = self.bn_conv(l2, num_filters=nfilt[1][1], filter_size=(1, 7), pad=(0, 3))
l2 = self.bn_conv(l2, num_filters=nfilt[1][2], filter_size=(7, 1), pad=(3, 0))
l3 = self.bn_conv(input_layer, num_filters=nfilt[2][0], filter_size=1)
l3 = self.bn_conv(l3, num_filters=nfilt[2][1], filter_size=(7, 1), pad=(3, 0))
l3 = self.bn_conv(l3, num_filters=nfilt[2][2], filter_size=(1, 7), pad=(0, 3))
l3 = self.bn_conv(l3, num_filters=nfilt[2][3], filter_size=(7, 1), pad=(3, 0))
l3 = self.bn_conv(l3, num_filters=nfilt[2][4], filter_size=(1, 7), pad=(0, 3))
l4 = Pool2DLayer(
input_layer, pool_size=3, stride=1, pad=1, mode='average_exc_pad')
l4 = self.bn_conv(l4, num_filters=nfilt[3][0], filter_size=1)
return ConcatLayer([l1, l2, l3, l4])
def inceptionD(self, input_layer, nfilt):
# Corresponds to a modified version of figure 10 in the paper
l1 = self.bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1)
l1 = self.bn_conv(l1, num_filters=nfilt[0][1], filter_size=3, stride=2)
l2 = self.bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1)
l2 = self.bn_conv(l2, num_filters=nfilt[1][1], filter_size=(1, 7), pad=(0, 3))
l2 = self.bn_conv(l2, num_filters=nfilt[1][2], filter_size=(7, 1), pad=(3, 0))
l2 = self.bn_conv(l2, num_filters=nfilt[1][3], filter_size=3, stride=2)
l3 = Pool2DLayer(input_layer, pool_size=3, stride=2)
return ConcatLayer([l1, l2, l3])
def inceptionE(self, input_layer, nfilt, pool_mode):
# Corresponds to figure 7 in the paper
l1 = self.bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1)
l2 = self.bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1)
l2a = self.bn_conv(l2, num_filters=nfilt[1][1], filter_size=(1, 3), pad=(0, 1))
l2b = self.bn_conv(l2, num_filters=nfilt[1][2], filter_size=(3, 1), pad=(1, 0))
l3 = self.bn_conv(input_layer, num_filters=nfilt[2][0], filter_size=1)
l3 = self.bn_conv(l3, num_filters=nfilt[2][1], filter_size=3, pad=1)
l3a = self.bn_conv(l3, num_filters=nfilt[2][2], filter_size=(1, 3), pad=(0, 1))
l3b = self.bn_conv(l3, num_filters=nfilt[2][3], filter_size=(3, 1), pad=(1, 0))
l4 = Pool2DLayer(
input_layer, pool_size=3, stride=1, pad=1, mode=pool_mode)
l4 = self.bn_conv(l4, num_filters=nfilt[3][0], filter_size=1)
return ConcatLayer([l1, l2a, l2b, l3a, l3b, l4])
def load_weights(self, weight_file):
import pickle
model = pickle.load(open(weight_file))
lasagne.layers.set_all_param_values(self.net['pool3'], model['param values'][:350])
def build_model(self, input_var, forward, dropout):
net = dict()
net['input'] = InputLayer((None, 3, None, None), input_var=input_var)
net['conv'] = self.bn_conv(net['input'],
num_filters=32, filter_size=3, stride=2)
net['conv_1'] = self.bn_conv(net['conv'], num_filters=32, filter_size=3)
net['conv_2'] = self.bn_conv(net['conv_1'],
num_filters=64, filter_size=3, pad=1)
net['pool'] = Pool2DLayer(net['conv_2'], pool_size=3, stride=2, mode='max')
net['conv_3'] = self.bn_conv(net['pool'], num_filters=80, filter_size=1)
net['conv_4'] = self.bn_conv(net['conv_3'], num_filters=192, filter_size=3)
net['pool_1'] = Pool2DLayer(net['conv_4'],
pool_size=3, stride=2, mode='max')
net['mixed/join'] = self.inceptionA(
net['pool_1'], nfilt=((64,), (48, 64), (64, 96, 96), (32,)))
net['mixed_1/join'] = self.inceptionA(
net['mixed/join'], nfilt=((64,), (48, 64), (64, 96, 96), (64,)))
net['mixed_2/join'] = self.inceptionA(
net['mixed_1/join'], nfilt=((64,), (48, 64), (64, 96, 96), (64,)))
net['mixed_3/join'] = self.inceptionB(
net['mixed_2/join'], nfilt=((384,), (64, 96, 96)))
net['mixed_4/join'] = self.inceptionC(
net['mixed_3/join'],
nfilt=((192,), (128, 128, 192), (128, 128, 128, 128, 192), (192,)))
net['mixed_5/join'] = self.inceptionC(
net['mixed_4/join'],
nfilt=((192,), (160, 160, 192), (160, 160, 160, 160, 192), (192,)))
net['mixed_6/join'] = self.inceptionC(
net['mixed_5/join'],
nfilt=((192,), (160, 160, 192), (160, 160, 160, 160, 192), (192,)))
net['mixed_7/join'] = self.inceptionC(
net['mixed_6/join'],
nfilt=((192,), (192, 192, 192), (192, 192, 192, 192, 192), (192,)))
# net['mixed_8/join'] = self.inceptionD(
# net['mixed_7/join'],
# nfilt=((192, 320), (192, 192, 192, 192)))
# net['mixed_9/join'] = self.inceptionE(
# net['mixed_8/join'],
# nfilt=((320,), (384, 384, 384), (448, 384, 384, 384), (192,)),
# pool_mode='average_exc_pad')
# net['mixed_10/join'] = self.inceptionE(
# net['mixed_9/join'],
# nfilt=((320,), (384, 384, 384), (448, 384, 384, 384), (192,)),
# pool_mode='max')
net['pool3'] = GlobalPoolLayer(net['mixed_7/join'])
net['prob'] = DenseLayer(
net['pool3'], num_units=4, nonlinearity=softmax)
return net
def train(train_data, train_labels,
valid_data, valid_labels,
model='v1',
learning_rate=0.0005, dropout=0.4, batch_size=64, lamb=0.00001, epochs=30):
if model == 'v1':
model = GoogleNet(learning_rate=learning_rate, dropout=dropout)
# Pretrained GoogleNet model on ImageNet
# Taken from here https://s3.amazonaws.com/lasagne/recipes/pretrained/imagenet/blvc_googlenet.pkl
# License: Unrestricted Use
# https://github.com/Lasagne/Recipes/blob/master/modelzoo/googlenet.py
model.load_weights('/var/node436/local/mngo/src/facedu/Models/GoogleNet-224/blvc_googlenet.pkl')
else:
model = InceptionV3(learning_rate=learning_rate, dropout=dropout)
# Pretrained GoogleNet model on ImageNet
# Taken from here https://s3.amazonaws.com/lasagne/recipes/pretrained/imagenet/inception_v3.pkl
# License: Unrestricted Use
# https://github.com/Lasagne/Recipes/blob/master/modelzoo/inception_v3.py
model.load_weights('/var/node436/local/mngo/src/facedu/Models/GoogleNet-224/inception_v3.pkl')
prev_valid_accuracy = 0
wait = 0
for epoch in range(1, epochs + 1):
print('::: Epoch %d :::' % epoch)
indexes = np.random.choice(len(train_labels), len(train_labels))
train_data = train_data[indexes]
train_labels = train_labels[indexes]
train_loss, train_acc = model.train_epoch(train_data, train_labels, batch_size=batch_size)
loss, acc = model.eval(valid_data, valid_labels)
print('Validation loss %.3f, accuracy %.3f' % (loss, acc))
if train_acc > 0.99: # Overfits
break
if acc < prev_valid_accuracy:
wait += 1
if wait > 2:
break
else:
wait = 0
prev_valid_accuracy = acc
print('Storing a model')
model.write('model_tmp.npz')
return prev_valid_accuracy
def predict(valid_data, valid_labels, test_data, model='v1', model_path='model.npz'):
if model == 'v1':
model = GoogleNet(forward=True)
else:
model = InceptionV3(forward=True)
model.read(model_path)
loss, acc = model.eval(valid_data, valid_labels)
print('Validation loss %.3f, accuracy %.3f' % (loss, acc))
prediction = model.predict(test_data)
prediction += 1
return prediction, model.predict(valid_data) + 1
def random_search_hyperparameters(train_data, train_labels,
valid_data, valid_labels, model='v1', times=40):
"""
Implementation of the paper http://www.jmlr.org/papers/volume13/bergstra12a/bergstra12a.pdf
"""
import random
print('Tuning')
### PARAMETERS TUNING CODE ###
best_BATCH = 0
best_LR = 0
best_DO = 0
best_acc = 0
best_L2 = 0
for i in range(times):
BATCH = random.randint(20, 40)
LR = random.uniform(0.0025, 0.0075)
if model == 'v1':
DO = random.uniform(0.15, 0.45)
else:
DO = 0.5
L2 = 0.00001
print('Trying BATCH %d, LR %.7f, DO %.5f, L2 %.6f' % (BATCH, LR, DO, L2))
acc = train(train_data, train_labels, valid_data, valid_labels, model, LR, DO, BATCH, L2)
if acc > best_acc:
best_acc = acc
best_BATCH = BATCH
best_LR = LR
best_DO = DO
best_L2 = L2
print('Best one updated!')
os.rename('model_tmp.npz', 'model.npz')
print('BEST BATCH %d, LR %.7f, DO %.5f, ACC %.5f' % (best_BATCH, best_LR, best_DO, best_acc))
return best_BATCH, best_LR, best_DO, best_acc
|
{"hexsha": "6185c985823cbe9170b305ec6af59152e01d7377", "size": 20514, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/googlenet.py", "max_stars_repo_name": "Ignotus/kaggle-dsg-qualification", "max_stars_repo_head_hexsha": "109155f164811b8a601481dbb5eacbc0f9f9983b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2016-08-31T07:42:32.000Z", "max_stars_repo_stars_event_max_datetime": "2018-11-14T08:20:07.000Z", "max_issues_repo_path": "models/googlenet.py", "max_issues_repo_name": "Ignotus/kaggle-dsg-qualification", "max_issues_repo_head_hexsha": "109155f164811b8a601481dbb5eacbc0f9f9983b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2016-11-26T06:22:36.000Z", "max_issues_repo_issues_event_max_datetime": "2016-11-26T06:22:36.000Z", "max_forks_repo_path": "models/googlenet.py", "max_forks_repo_name": "Ignotus/kaggle-dsg-qualification", "max_forks_repo_head_hexsha": "109155f164811b8a601481dbb5eacbc0f9f9983b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-04T10:12:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-04T10:12:26.000Z", "avg_line_length": 43.0966386555, "max_line_length": 178, "alphanum_fraction": 0.5927171688, "include": true, "reason": "import numpy,import theano", "num_tokens": 5815}
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import re
import sys
import unittest
import numpy as np
import paddle
import scipy.fft
DEVICES = [paddle.CPUPlace()]
if paddle.is_compiled_with_cuda():
DEVICES.append(paddle.CUDAPlace(0))
TEST_CASE_NAME = 'suffix'
# All test case will use float64 for compare percision, refs:
# https://github.com/PaddlePaddle/Paddle/wiki/Upgrade-OP-Precision-to-Float64
RTOL = {
'float32': 1e-03,
'complex64': 1e-3,
'float64': 1e-7,
'complex128': 1e-7
}
ATOL = {'float32': 0.0, 'complex64': 0, 'float64': 0.0, 'complex128': 0}
def rand_x(dims=1,
dtype='float64',
min_dim_len=1,
max_dim_len=10,
complex=False):
shape = [np.random.randint(min_dim_len, max_dim_len) for i in range(dims)]
if complex:
return np.random.randn(*shape).astype(dtype) + 1.j * np.random.randn(
*shape).astype(dtype)
else:
return np.random.randn(*shape).astype(dtype)
def place(devices, key='place'):
def decorate(cls):
module = sys.modules[cls.__module__].__dict__
raw_classes = {
k: v
for k, v in module.items() if k.startswith(cls.__name__)
}
for raw_name, raw_cls in raw_classes.items():
for d in devices:
test_cls = dict(raw_cls.__dict__)
test_cls.update({key: d})
new_name = raw_name + '.' + d.__class__.__name__
module[new_name] = type(new_name, (raw_cls, ), test_cls)
del module[raw_name]
return cls
return decorate
def parameterize(fields, values=None):
fields = [fields] if isinstance(fields, str) else fields
params = [dict(zip(fields, vals)) for vals in values]
def decorate(cls):
test_cls_module = sys.modules[cls.__module__].__dict__
for k, v in enumerate(params):
test_cls = dict(cls.__dict__)
test_cls.update(v)
name = cls.__name__ + str(k)
name = name + '.' + v.get('suffix') if v.get('suffix') else name
test_cls_module[name] = type(name, (cls, ), test_cls)
for m in list(cls.__dict__):
if m.startswith("test"):
delattr(cls, m)
return cls
return decorate
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'),
[('test_x_float64', rand_x(5, np.float64), None, -1, 'backward'),
('test_x_complex', rand_x(
5, complex=True), None, -1,
'backward'), ('test_n_grater_input_length', rand_x(
5, max_dim_len=5), 11, -1,
'backward'), ('test_n_smaller_than_input_length', rand_x(
5, min_dim_len=5, complex=True), 3, -1, 'backward'),
('test_axis_not_last', rand_x(5), None, 3, 'backward'),
('test_norm_forward', rand_x(5), None, 3, 'forward'),
('test_norm_ortho', rand_x(5), None, 3, 'ortho')])
class TestFft(unittest.TestCase):
def test_fft(self):
"""Test fft with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
self.assertTrue(
np.allclose(
scipy.fft.fft(self.x, self.n, self.axis, self.norm),
paddle.fft.fft(
paddle.to_tensor(self.x), self.n, self.axis, self.norm),
rtol=RTOL.get(str(self.x.dtype)),
atol=ATOL.get(str(self.x.dtype))))
@place(DEVICES)
@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [
('test_n_nagative', rand_x(2), -1, -1, 'backward', ValueError),
('test_n_zero', rand_x(2), 0, -1, 'backward', ValueError),
('test_axis_out_of_range', rand_x(1), None, 10, 'backward', ValueError),
('test_axis_with_array', rand_x(1), None, (0, 1), 'backward', ValueError),
('test_norm_not_in_enum_value', rand_x(2), None, -1, 'random', ValueError)
])
class TestFftException(unittest.TestCase):
def test_fft(self):
"""Test fft with buoudary condition
Test case include:
- n out of range
- axis out of range
- axis type error
- norm out of range
"""
with self.assertRaises(self.expect_exception):
paddle.fft.fft(
paddle.to_tensor(self.x), self.n, self.axis, self.norm)
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), [
('test_x_float64', rand_x(5), None, (0, 1), 'backward'),
('test_x_complex128', rand_x(
5, complex=True), None, (0, 1), 'backward'),
('test_n_grater_input_length', rand_x(
5, max_dim_len=5), (6, 6), (0, 1), 'backward'),
('test_n_smaller_than_input_length', rand_x(
5, min_dim_len=5, complex=True), (4, 4), (0, 1), 'backward'),
('test_axis_random', rand_x(5), None, (1, 2), 'backward'),
('test_axis_none', rand_x(5), None, None, 'backward'),
('test_norm_forward', rand_x(5), None, (0, 1), 'forward'),
('test_norm_ortho', rand_x(5), None, (0, 1), 'ortho'),
])
class TestFft2(unittest.TestCase):
def test_fft2(self):
"""Test fft2 with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
self.assertTrue(
np.allclose(
scipy.fft.fft2(self.x, self.n, self.axis, self.norm),
paddle.fft.fft2(
paddle.to_tensor(self.x), self.n, self.axis, self.norm),
rtol=RTOL.get(str(self.x.dtype)),
atol=ATOL.get(str(self.x.dtype))))
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'),
[('test_x_complex_input', rand_x(
2, complex=True), None, (0, 1), None,
ValueError), ('test_x_1dim_tensor', rand_x(1), None, (0, 1), None,
ValueError), ('test_n_nagative', rand_x(2), -1, (0, 1),
'backward', ValueError),
('test_n_len_not_equal_axis', rand_x(
5, max_dim_len=5), 11, (0, 1), 'backward',
ValueError), ('test_n_zero', rand_x(2), (0, 0), (0, 1), 'backward',
ValueError), ('test_axis_out_of_range', rand_x(2), None,
(0, 1, 2), 'backward', ValueError),
('test_axis_with_array', rand_x(1), None, (0, 1), 'backward', ValueError),
('test_axis_not_sequence', rand_x(5), None, -10, 'backward', ValueError),
('test_norm_not_enum', rand_x(2), None, -1, 'random', ValueError)])
class TestFft2Exception(unittest.TestCase):
def test_fft2(self):
"""Test fft2 with buoudary condition
Test case include:
- input type error
- input dim error
- n out of range
- axis out of range
- axis type error
- norm out of range
"""
with paddle.fluid.dygraph.guard(self.place):
with self.assertRaises(self.expect_exception):
paddle.fft.fft2(
paddle.to_tensor(self.x), self.n, self.axis, self.norm)
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'),
[('test_x_float64', rand_x(5, np.float64), None, None, 'backward'),
('test_x_complex128', rand_x(
5, complex=True), None, None,
'backward'), ('test_n_grater_input_length', rand_x(
5, max_dim_len=5), (6, 6), (1, 2), 'backward'), (
'test_n_smaller_input_length', rand_x(
5, min_dim_len=5, complex=True), (3, 3), (1, 2), 'backward'),
('test_axis_not_default', rand_x(5), None, (1, 2),
'backward'), ('test_norm_forward', rand_x(5), None, None, 'forward'),
('test_norm_ortho', rand_x(5), None, None, 'ortho')])
class TestFftn(unittest.TestCase):
def test_fftn(self):
"""Test fftn with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
np.testing.assert_allclose(
scipy.fft.fftn(self.x, self.n, self.axis, self.norm),
paddle.fft.fftn(
paddle.to_tensor(self.x), self.n, self.axis, self.norm),
rtol=RTOL.get(str(self.x.dtype)),
atol=ATOL.get(str(self.x.dtype)))
@place(DEVICES)
@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), [
('test_x_complex128',
(np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)
).astype(np.complex128), None, -1, "backward"),
('test_n_grater_than_input_length',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), 4, -1,
"backward"),
('test_n_smaller_than_input_length',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), 2, -1,
"backward"),
('test_axis_not_last',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, 1,
"backward"),
('test_norm_forward',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, 1,
"forward"),
('test_norm_ortho',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, -1,
"ortho"),
])
class TestHfft(unittest.TestCase):
def test_hfft(self):
"""Test hfft with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
np.testing.assert_allclose(
scipy.fft.hfft(self.x, self.n, self.axis, self.norm),
paddle.fft.hfft(
paddle.to_tensor(self.x), self.n, self.axis, self.norm),
rtol=1e-5,
atol=0)
@place(DEVICES)
@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), [
('test_x_complex128',
(np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)
).astype(np.complex128), None, -1, "backward"),
('test_n_grater_than_input_length',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), 4, -1,
"backward"),
('test_n_smaller_than_input_length',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), 2, -1,
"backward"),
('test_axis_not_last',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, -1,
"backward"),
('test_norm_forward',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, -1,
"forward"),
('test_norm_ortho',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, -1,
"ortho"),
])
class TestIrfft(unittest.TestCase):
def test_irfft(self):
"""Test irfft with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
np.testing.assert_allclose(
scipy.fft.irfft(self.x, self.n, self.axis, self.norm),
paddle.fft.irfft(
paddle.to_tensor(self.x), self.n, self.axis, self.norm),
rtol=1e-5,
atol=0)
@place(DEVICES)
@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), [
('test_x_complex128',
(np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)
).astype(np.complex128), None, None, "backward"),
('test_n_grater_than_input_length',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), [4], None,
"backward"),
('test_n_smaller_than_input_length',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), [2], None,
"backward"),
('test_axis_not_last',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, None,
"backward"),
('test_norm_forward',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, None,
"forward"),
('test_norm_ortho',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, None,
"ortho"),
])
class TestIrfftn(unittest.TestCase):
def test_irfftn(self):
"""Test irfftn with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
np.testing.assert_allclose(
scipy.fft.irfftn(self.x, self.n, self.axis, self.norm),
paddle.fft.irfftn(
paddle.to_tensor(self.x), self.n, self.axis, self.norm),
rtol=1e-5,
atol=0)
@place(DEVICES)
@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), [
('test_x_complex128',
(np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)
).astype(np.complex128), None, None, "backward"),
('test_n_grater_than_input_length',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), [4], None,
"backward"),
('test_n_smaller_than_input_length',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), [2], None,
"backward"),
('test_axis_not_last',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, None,
"backward"),
('test_norm_forward',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, None,
"forward"),
('test_norm_ortho',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, None,
"ortho"),
])
class TestHfftn(unittest.TestCase):
def test_hfftn(self):
"""Test hfftn with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
np.testing.assert_allclose(
scipy.fft.hfftn(self.x, self.n, self.axis, self.norm),
paddle.fft.hfftn(
paddle.to_tensor(self.x), self.n, self.axis, self.norm),
rtol=1e-5,
atol=0)
@place(DEVICES)
@parameterize((TEST_CASE_NAME, 'x', 's', 'axis', 'norm'), [
('test_x_complex128',
(np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)
).astype(np.complex128), None, (-2, -1), "backward"),
('test_with_s', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4),
[2, 2], (-2, -1), "backward", ValueError),
('test_axis_not_last',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, (-2, -1),
"backward"),
('test_norm_forward',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, (-2, -1),
"forward"),
('test_norm_ortho',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, (-2, -1),
"ortho"),
])
class TestHfft2(unittest.TestCase):
def test_hfft2(self):
"""Test hfft2 with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
np.testing.assert_allclose(
scipy.fft.hfft2(self.x, self.s, self.axis, self.norm),
paddle.fft.hfft2(
paddle.to_tensor(self.x), self.s, self.axis, self.norm),
rtol=1e-5,
atol=0)
@place(DEVICES)
@parameterize((TEST_CASE_NAME, 'x', 's', 'axis', 'norm'), [
('test_x_complex128',
(np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)
).astype(np.complex128), None, (-2, -1), "backward"),
('test_n_equal_input_length',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (4, 6), (-2, -1),
"backward"),
('test_axis_not_last',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, (-2, -1),
"backward"),
('test_norm_forward',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, (-2, -1),
"forward"),
('test_norm_ortho',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, (-2, -1),
"ortho"),
])
class TestIrfft2(unittest.TestCase):
def test_irfft2(self):
"""Test irfft2 with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
np.testing.assert_allclose(
scipy.fft.irfft2(self.x, self.s, self.axis, self.norm),
paddle.fft.irfft2(
paddle.to_tensor(self.x), self.s, self.axis, self.norm),
rtol=1e-5,
atol=0)
@place(DEVICES)
@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [(
'test_bool_input',
(np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype(np.bool8),
None, -1, 'backward', NotImplementedError), (
'test_n_nagative',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), -1, -1,
'backward', ValueError), (
'test_n_zero', np.random.randn(4, 4) + 1j * np.random.randn(4, 4),
0, -1, 'backward', ValueError), (
'test_n_type',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4),
(1, 2, 3), -1, 'backward', ValueError), (
'test_axis_out_of_range',
np.random.randn(4) + 1j * np.random.randn(4), None, 10,
'backward', ValueError), (
'test_axis_with_array',
np.random.randn(4) + 1j * np.random.randn(4), None,
(0, 1), 'backward', ValueError), (
'test_norm_not_in_enum_value',
np.random.randn(4, 4) + 1j * np.random.randn(4, 4),
None, -1, 'random', ValueError)])
class TestHfftException(unittest.TestCase):
def test_hfft(self):
"""Test hfft with buoudary condition
Test case include:
Test case include:
- n out of range
- n type error
- axis out of range
- axis type error
- norm out of range
"""
with paddle.fluid.dygraph.guard(self.place):
with self.assertRaises(self.expect_exception):
paddle.fft.hfft(
paddle.to_tensor(self.x), self.n, self.axis, self.norm)
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'),
[('test_n_nagative',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), -1, -1,
'backward', ValueError),
('test_n_zero', np.random.randn(4, 4) + 1j * np.random.randn(4, 4), 0, -1,
'backward', ValueError),
('test_n_type', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4),
(1, 2), -1, 'backward', ValueError),
('test_axis_out_of_range', np.random.randn(4) + 1j * np.random.randn(4),
None, 10, 'backward', ValueError),
('test_axis_with_array', np.random.randn(4) + 1j * np.random.randn(4),
None, (0, 1), 'backward',
ValueError), ('test_norm_not_in_enum_value',
np.random.randn(4, 4) + 1j * np.random.randn(4, 4), None,
None, 'random', ValueError)])
class TestIrfftException(unittest.TestCase):
def test_irfft(self):
"""
Test irfft with buoudary condition
Test case include:
- n out of range
- n type error
- axis type error
- axis out of range
- norm out of range
"""
with paddle.fluid.dygraph.guard(self.place):
with self.assertRaises(self.expect_exception):
paddle.fft.irfft(
paddle.to_tensor(self.x), self.n, self.axis, self.norm)
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'),
[('test_bool_input',
(np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)
).astype(np.bool8), None, (-2, -1), 'backward', NotImplementedError),
('test_n_nagative',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2),
(-2, -1), 'backward', ValueError),
('test_n_zero', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4),
(0, 0), (-2, -1), 'backward', ValueError),
('test_n_type', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4),
3, None, 'backward', ValueError),
('test_n_axis_dim',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (1, 2), (-1),
'backward', ValueError), ('test_axis_out_of_range',
np.random.randn(4) + 1j * np.random.randn(4),
None, (1, 2), 'backward', ValueError),
('test_axis_type', np.random.randn(4) + 1j * np.random.randn(4), None, -1,
'backward',
ValueError), ('test_norm_not_in_enum_value',
np.random.randn(4, 4) + 1j * np.random.randn(4, 4), None,
None, 'random', ValueError)])
class TestHfft2Exception(unittest.TestCase):
def test_hfft2(self):
"""
Test hfft2 with buoudary condition
Test case include:
- input type error
- n type error
- n out of range
- axis out of range
- the dimensions of n and axis are different
- norm out of range
"""
with paddle.fluid.dygraph.guard(self.place):
with self.assertRaises(self.expect_exception):
paddle.fft.hfft2(
paddle.to_tensor(self.x), self.n, self.axis, self.norm)
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'),
[('test_n_nagative',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2),
(-2, -1), 'backward', ValueError),
('test_zero_point',
np.random.randn(4, 4, 1) + 1j * np.random.randn(4, 4, 1), None, (-2, -1),
"backward", ValueError),
('test_n_zero', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4),
(0, 0), (-2, -1), 'backward', ValueError),
('test_n_type', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4),
3, -1, 'backward',
ValueError), ('test_n_axis_dim',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4),
(1, 2), (-3, -2, -1), 'backward', ValueError),
('test_axis_out_of_range', np.random.randn(4) + 1j * np.random.randn(4),
None, (1, 2), 'backward', ValueError), (
'test_axis_type', np.random.randn(4) + 1j * np.random.randn(4), None,
1, 'backward',
ValueError), ('test_norm_not_in_enum_value',
np.random.randn(4, 4) + 1j * np.random.randn(4, 4),
None, None, 'random', ValueError)])
class TestIrfft2Exception(unittest.TestCase):
def test_irfft2(self):
"""
Test irfft2 with buoudary condition
Test case include:
- input type error
- n type error
- n out of range
- axis out of range
- the dimensions of n and axis are different
- norm out of range
"""
with paddle.fluid.dygraph.guard(self.place):
with self.assertRaises(self.expect_exception):
paddle.fft.irfft2(
paddle.to_tensor(self.x), self.n, self.axis, self.norm)
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'),
[('test_bool_input',
(np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)
).astype(np.bool8), None, (-2, -1), 'backward', NotImplementedError),
('test_n_nagative',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2),
(-2, -1), 'backward', ValueError),
('test_n_zero', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4),
(0, 0), (-2, -1), 'backward', ValueError),
('test_n_type', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4),
3, -1, 'backward', ValueError),
('test_n_axis_dim',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4),
(1, 2), (-3, -2, -1), 'backward',
ValueError), ('test_axis_out_of_range',
np.random.randn(4) + 1j * np.random.randn(4), None,
(10, 20), 'backward', ValueError),
('test_axis_type', np.random.randn(4) + 1j * np.random.randn(4), None, 1,
'backward',
ValueError), ('test_norm_not_in_enum_value',
np.random.randn(4, 4) + 1j * np.random.randn(4, 4), None,
None, 'random', ValueError)])
class TestHfftnException(unittest.TestCase):
def test_hfftn(self):
"""Test hfftn with buoudary condition
Test case include:
- input type error
- n type error
- n out of range
- axis out of range
- the dimensions of n and axis are different
- norm out of range
"""
with paddle.fluid.dygraph.guard(self.place):
with self.assertRaises(self.expect_exception):
paddle.fft.hfftn(
paddle.to_tensor(self.x), self.n, self.axis, self.norm)
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'),
[('test_n_nagative',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2),
(-2, -1), 'backward', ValueError),
('test_n_zero', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4),
(0, 0), (-2, -1), 'backward', ValueError),
('test_n_type', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4),
3, -1, 'backward',
ValueError), ('test_n_axis_dim',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4),
(1, 2), (-3, -2, -1), 'backward', ValueError),
('test_axis_out_of_range', np.random.randn(4) + 1j * np.random.randn(4),
None, (10, 20), 'backward', ValueError),
('test_axis_type', np.random.randn(4) + 1j * np.random.randn(4), None, 1,
'backward',
ValueError), ('test_norm_not_in_enum_value',
np.random.randn(4, 4) + 1j * np.random.randn(4, 4), None,
None, 'random', ValueError)])
class TestIrfftnException(unittest.TestCase):
def test_irfftn(self):
"""Test irfftn with buoudary condition
Test case include:
- n out of range
- n type error
- axis out of range
- norm out of range
- the dimensions of n and axis are different
"""
with paddle.fluid.dygraph.guard(self.place):
with self.assertRaises(self.expect_exception):
paddle.fft.irfftn(
paddle.to_tensor(self.x), self.n, self.axis, self.norm)
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'),
[('test_x_float64', rand_x(5, np.float64), None, -1, 'backward'), (
'test_n_grater_than_input_length', rand_x(
5, max_dim_len=5), 11, -1, 'backward'),
('test_n_smaller_than_input_length', rand_x(
5, min_dim_len=5), 3, -1,
'backward'), ('test_axis_not_last', rand_x(5), None, 3, 'backward'),
('test_norm_forward', rand_x(5), None, 3, 'forward'),
('test_norm_ortho', rand_x(5), None, 3, 'ortho')])
class TestRfft(unittest.TestCase):
def test_rfft(self):
"""Test rfft with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
self.assertTrue(
np.allclose(
scipy.fft.rfft(self.x, self.n, self.axis, self.norm),
paddle.fft.rfft(
paddle.to_tensor(self.x), self.n, self.axis, self.norm),
rtol=RTOL.get(str(self.x.dtype)),
atol=ATOL.get(str(self.x.dtype))))
@place(DEVICES)
@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [
('test_n_nagative', rand_x(2), -1, -1, 'backward', ValueError),
('test_n_zero', rand_x(2), 0, -1, 'backward', ValueError),
('test_axis_out_of_range', rand_x(1), None, 10, 'backward', ValueError),
('test_axis_with_array', rand_x(1), None, (0, 1), 'backward', ValueError),
('test_norm_not_in_enum_value', rand_x(2), None, -1, 'random', ValueError)
])
class TestRfftException(unittest.TestCase):
def test_rfft(self):
"""Test rfft with buoudary condition
Test case include:
- n out of range
- axis out of range
- axis type error
- norm out of range
- the dimensions of n and axis are different
"""
with self.assertRaises(self.expect_exception):
paddle.fft.rfft(
paddle.to_tensor(self.x), self.n, self.axis, self.norm)
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), [
('test_x_float64', rand_x(5), None, (0, 1), 'backward'),
('test_n_grater_input_length', rand_x(
5, max_dim_len=5), (6, 6), (0, 1), 'backward'),
('test_n_smaller_than_input_length', rand_x(
5, min_dim_len=5), (4, 4), (0, 1), 'backward'),
('test_axis_random', rand_x(5), None, (1, 2), 'backward'),
('test_axis_none', rand_x(5), None, None, 'backward'),
('test_norm_forward', rand_x(5), None, (0, 1), 'forward'),
('test_norm_ortho', rand_x(5), None, (0, 1), 'ortho'),
])
class TestRfft2(unittest.TestCase):
def test_rfft2(self):
"""Test rfft2 with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
self.assertTrue(
np.allclose(
scipy.fft.rfft2(self.x, self.n, self.axis, self.norm),
paddle.fft.rfft2(
paddle.to_tensor(self.x), self.n, self.axis, self.norm),
rtol=RTOL.get(str(self.x.dtype)),
atol=ATOL.get(str(self.x.dtype))))
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [
('test_x_complex_input', rand_x(
2, complex=True), None, (0, 1), 'backward', RuntimeError),
('test_x_1dim_tensor', rand_x(1), None, (0, 1), 'backward', ValueError),
('test_n_nagative', rand_x(2), -1, (0, 1), 'backward', ValueError),
('test_n_zero', rand_x(2), 0, (0, 1), 'backward', ValueError),
('test_axis_out_of_range', rand_x(2), None, (0, 1, 2), 'backward',
ValueError),
('test_axis_with_array', rand_x(1), None, (0, 1), 'backward',
ValueError),
('test_axis_not_sequence', rand_x(5), None, -10, 'backward',
ValueError),
('test_norm_not_enum', rand_x(2), None, -1, 'random', ValueError),
])
class TestRfft2Exception(unittest.TestCase):
def test_rfft2(self):
"""Test rfft2 with buoudary condition
Test case include:
- input type error
- input dim error
- n out of range
- axis out of range
- norm out of range
- the dimensions of n and axis are different
"""
with paddle.fluid.dygraph.guard(self.place):
with self.assertRaises(self.expect_exception):
paddle.fft.rfft2(
paddle.to_tensor(self.x), self.n, self.axis, self.norm)
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), [
('test_x_float64', rand_x(5, np.float64), None, None, 'backward'),
('test_n_grater_input_length', rand_x(
5, max_dim_len=5), (6, 6), (1, 2), 'backward'),
('test_n_smaller_input_length', rand_x(
5, min_dim_len=5), (3, 3), (1, 2), 'backward'),
('test_axis_not_default', rand_x(5), None, (1, 2), 'backward'),
('test_norm_forward', rand_x(5), None, None, 'forward'),
('test_norm_ortho', rand_x(5), None, None, 'ortho'),
])
class TestRfftn(unittest.TestCase):
def test_rfftn(self):
"""Test rfftn with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
self.assertTrue(
np.allclose(
scipy.fft.rfftn(self.x, self.n, self.axis, self.norm),
paddle.fft.rfftn(
paddle.to_tensor(self.x), self.n, self.axis, self.norm),
rtol=RTOL.get(str(self.x.dtype)),
atol=ATOL.get(str(self.x.dtype))))
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'),
[('test_x_complex', rand_x(
4, complex=True), None, None, 'backward',
RuntimeError), ('test_n_nagative', rand_x(4), (-1, -1), (1, 2),
'backward', ValueError),
('test_n_not_sequence', rand_x(4), -1, None, 'backward', ValueError),
('test_n_zero', rand_x(4), 0, None, 'backward', ValueError), (
'test_axis_out_of_range', rand_x(1), None, [0, 1], 'backward',
ValueError),
('test_norm_not_in_enum', rand_x(2), None, -1, 'random', ValueError)])
class TestRfftnException(unittest.TestCase):
def test_rfftn(self):
"""Test rfftn with buoudary condition
Test case include:
- n out of range
- axis out of range
- norm out of range
- the dimensions of n and axis are different
"""
with paddle.fluid.dygraph.guard(self.place):
with self.assertRaises(self.expect_exception):
paddle.fft.rfftn(
paddle.to_tensor(self.x), self.n, self.axis, self.norm)
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'),
[('test_x_float64', rand_x(5, np.float64), None, -1, 'backward'), (
'test_n_grater_than_input_length', rand_x(
5, max_dim_len=5), 11, -1, 'backward'),
('test_n_smaller_than_input_length', rand_x(
5, min_dim_len=5), 3, -1,
'backward'), ('test_axis_not_last', rand_x(5), None, 3, 'backward'),
('test_norm_forward', rand_x(5), None, 3, 'forward'),
('test_norm_ortho', rand_x(5), None, 3, 'ortho')])
class TestIhfft(unittest.TestCase):
def test_ihfft(self):
"""Test ihfft with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
np.testing.assert_allclose(
scipy.fft.ihfft(self.x, self.n, self.axis, self.norm),
paddle.fft.ihfft(
paddle.to_tensor(self.x), self.n, self.axis, self.norm),
rtol=RTOL.get(str(self.x.dtype)),
atol=ATOL.get(str(self.x.dtype)))
@place(DEVICES)
@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [
('test_n_nagative', rand_x(2), -1, -1, 'backward', ValueError),
('test_n_zero', rand_x(2), 0, -1, 'backward', ValueError),
('test_axis_out_of_range', rand_x(1), None, 10, 'backward', ValueError),
('test_axis_with_array', rand_x(1), None, (0, 1), 'backward', ValueError),
('test_norm_not_in_enum_value', rand_x(2), None, -1, 'random', ValueError)
])
class TestIhfftException(unittest.TestCase):
def test_ihfft(self):
"""Test ihfft with buoudary condition
Test case include:
- axis type error
- axis out of range
- norm out of range
"""
with paddle.fluid.dygraph.guard(self.place):
with self.assertRaises(self.expect_exception):
paddle.fft.ihfft(
paddle.to_tensor(self.x), self.n, self.axis, self.norm)
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), [
('test_x_float64', rand_x(5), None, (0, 1), 'backward'),
('test_n_grater_input_length', rand_x(
5, max_dim_len=5), (11, 11), (0, 1), 'backward'),
('test_n_smaller_than_input_length', rand_x(
5, min_dim_len=5), (1, 1), (0, 1), 'backward'),
('test_axis_random', rand_x(5), None, (1, 2), 'backward'),
('test_axis_none', rand_x(5), None, None, 'backward'),
('test_norm_forward', rand_x(5), None, (0, 1), 'forward'),
('test_norm_ortho', rand_x(5), None, (0, 1), 'ortho'),
])
class TestIhfft2(unittest.TestCase):
def test_ihfft2(self):
"""Test ihfft2 with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
np.testing.assert_allclose(
scipy.fft.ihfft2(self.x, self.n, self.axis, self.norm),
paddle.fft.ihfft2(
paddle.to_tensor(self.x), self.n, self.axis, self.norm),
rtol=RTOL.get(str(self.x.dtype)),
atol=ATOL.get(str(self.x.dtype)))
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'),
[('test_x_complex_input', rand_x(
2, complex=True), None, (0, 1), None, ValueError),
('test_x_1dim_tensor', rand_x(1), None, (0, 1), None,
ValueError), ('test_n_nagative', rand_x(2), -1, (0, 1), 'backward',
ValueError), ('test_n_len_not_equal_axis', rand_x(
5, max_dim_len=5), 11, (0, 1), 'backward', ValueError),
('test_n_zero', rand_x(2), (0, 0), (0, 1), 'backward', ValueError),
('test_axis_out_of_range', rand_x(2), None, (0, 1, 2), 'backward',
ValueError), ('test_axis_with_array', rand_x(1), None, (0, 1), 'backward',
ValueError), ('test_axis_not_sequence', rand_x(5), None,
-10, 'backward', ValueError),
('test_norm_not_enum', rand_x(2), None, -1, 'random', ValueError)])
class TestIhfft2Exception(unittest.TestCase):
def test_ihfft2(self):
"""Test ihfft2 with buoudary condition
Test case include:
- input type error
- input dim error
- n out of range
- axis type error
- axis out of range
- norm out of range
"""
with paddle.fluid.dygraph.guard(self.place):
with self.assertRaises(self.expect_exception):
paddle.fft.ihfft2(
paddle.to_tensor(self.x), self.n, self.axis, self.norm)
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'),
[('test_x_float64', rand_x(5, np.float64), None, None, 'backward'),
('test_n_grater_input_length', rand_x(
5, max_dim_len=5), (11, 11), (0, 1),
'backward'), ('test_n_smaller_input_length', rand_x(
5, min_dim_len=5), (1, 1), (0, 1), 'backward'),
('test_axis_not_default', rand_x(5), None, (1, 2),
'backward'), ('test_norm_forward', rand_x(5), None, None, 'forward'),
('test_norm_ortho', rand_x(5), None, None, 'ortho')])
class TestIhfftn(unittest.TestCase):
def test_ihfftn(self):
"""Test ihfftn with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
self.assertTrue(
np.allclose(
scipy.fft.ihfftn(self.x, self.n, self.axis, self.norm),
paddle.fft.ihfftn(
paddle.to_tensor(self.x), self.n, self.axis, self.norm),
rtol=RTOL.get(str(self.x.dtype)),
atol=ATOL.get(str(self.x.dtype))))
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'),
[('test_x_complex', rand_x(
4, complex=True), None, None, 'backward', RuntimeError),
('test_n_nagative', rand_x(4), -1, None, 'backward', ValueError),
('test_n_zero', rand_x(4), 0, None, 'backward', ValueError), (
'test_axis_out_of_range', rand_x(1), None, [0, 1], 'backward',
ValueError),
('test_norm_not_in_enum', rand_x(2), None, -1, 'random', ValueError)])
class TestIhfftnException(unittest.TestCase):
def test_ihfftn(self):
"""Test ihfftn with buoudary condition
Test case include:
- input type error
- n out of range
- axis out of range
- norm out of range
"""
with paddle.fluid.dygraph.guard(self.place):
with self.assertRaises(self.expect_exception):
paddle.fft.ihfftn(
paddle.to_tensor(self.x), self.n, self.axis, self.norm)
@place(DEVICES)
@parameterize((TEST_CASE_NAME, 'n', 'd', 'dtype'), [
('test_without_d', 20, 1, 'float32'),
('test_with_d', 20, 0.5, 'float32'),
])
class TestFftFreq(unittest.TestCase):
def test_fftfreq(self):
"""Test fftfreq with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
np.testing.assert_allclose(
scipy.fft.fftfreq(self.n, self.d).astype(self.dtype),
paddle.fft.fftfreq(self.n, self.d, self.dtype).numpy(),
rtol=RTOL.get(str(self.dtype)),
atol=ATOL.get(str(self.dtype)))
@place(DEVICES)
@parameterize((TEST_CASE_NAME, 'n', 'd', 'dtype'), [
('test_without_d', 20, 1, 'float32'),
('test_with_d', 20, 0.5, 'float32'),
])
class TestRfftFreq(unittest.TestCase):
def test_rfftfreq(self):
"""Test rfftfreq with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
np.testing.assert_allclose(
scipy.fft.rfftfreq(self.n, self.d).astype(self.dtype),
paddle.fft.rfftfreq(self.n, self.d, self.dtype).numpy(),
rtol=RTOL.get(str(self.dtype)),
atol=ATOL.get(str(self.dtype)))
@place(DEVICES)
@parameterize((TEST_CASE_NAME, 'x', 'axes', 'dtype'), [
('test_1d', np.random.randn(10), (0, ), 'float64'),
('test_2d', np.random.randn(10, 10), (0, 1), 'float64'),
('test_2d_with_all_axes', np.random.randn(10, 10), None, 'float64'),
('test_2d_odd_with_all_axes',
np.random.randn(5, 5) + 1j * np.random.randn(5, 5), None, 'complex128'),
])
class TestFftShift(unittest.TestCase):
def test_fftshift(self):
"""Test fftshift with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
np.testing.assert_allclose(
scipy.fft.fftshift(self.x, self.axes),
paddle.fft.fftshift(paddle.to_tensor(self.x),
self.axes).numpy(),
rtol=RTOL.get(str(self.x.dtype)),
atol=ATOL.get(str(self.x.dtype)))
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'axes'),
[('test_1d', np.random.randn(10), (0, ),
'float64'), ('test_2d', np.random.randn(10, 10), (0, 1), 'float64'),
('test_2d_with_all_axes', np.random.randn(10, 10), None, 'float64'),
('test_2d_odd_with_all_axes',
np.random.randn(5, 5) + 1j * np.random.randn(5, 5), None, 'complex128')])
class TestIfftShift(unittest.TestCase):
def test_ifftshift(self):
"""Test ifftshift with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
np.testing.assert_allclose(
scipy.fft.ifftshift(self.x, self.axes),
paddle.fft.ifftshift(paddle.to_tensor(self.x),
self.axes).numpy(),
rtol=RTOL.get(str(self.x.dtype)),
atol=ATOL.get(str(self.x.dtype)))
if __name__ == '__main__':
unittest.main()
# yapf: enable
|
{"hexsha": "0ef7a1e939e0220a69d1c46ef7a530d9ffa54adc", "size": 42819, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/paddle/fluid/tests/unittests/fft/test_fft.py", "max_stars_repo_name": "2742195759/Paddle", "max_stars_repo_head_hexsha": "ce034db1834af85539b22ab68492df9972ff3e69", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-02-08T13:07:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-22T00:58:33.000Z", "max_issues_repo_path": "python/paddle/fluid/tests/unittests/fft/test_fft.py", "max_issues_repo_name": "2742195759/Paddle", "max_issues_repo_head_hexsha": "ce034db1834af85539b22ab68492df9972ff3e69", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-07-26T04:06:05.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-29T04:25:24.000Z", "max_forks_repo_path": "python/paddle/fluid/tests/unittests/fft/test_fft.py", "max_forks_repo_name": "2742195759/Paddle", "max_forks_repo_head_hexsha": "ce034db1834af85539b22ab68492df9972ff3e69", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-25T10:41:52.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-25T10:41:52.000Z", "avg_line_length": 40.5099337748, "max_line_length": 80, "alphanum_fraction": 0.5668278101, "include": true, "reason": "import numpy,import scipy", "num_tokens": 12135}
|
[STATEMENT]
lemma INV_rule_from_inv_rule:
"\<lbrakk> init T \<subseteq> I; {I \<inter> reach T} (trans T) {> I} \<rbrakk>
\<Longrightarrow> reach T \<subseteq> I"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>init T \<subseteq> I; {I \<inter> reach T} TS.trans T {> I}\<rbrakk> \<Longrightarrow> reach T \<subseteq> I
[PROOF STEP]
by (rule_tac I="I \<inter> reach T" in inv_rule, auto)
|
{"llama_tokens": 161, "file": "Consensus_Refined_Refinement", "length": 1}
|
[STATEMENT]
lemma mset_le_add_iff2:
"i \<le> (j::nat) \<Longrightarrow> (repeat_mset i u + m \<le> repeat_mset j u + n) = (m \<le> repeat_mset (j-i) u + n)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. i \<le> j \<Longrightarrow> (repeat_mset i u + m \<le> repeat_mset j u + n) = (m \<le> repeat_mset (j - i) u + n)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. i \<le> j \<Longrightarrow> (repeat_mset i u + m \<le> repeat_mset j u + n) = (m \<le> repeat_mset (j - i) u + n)
[PROOF STEP]
assume "i \<le> j"
[PROOF STATE]
proof (state)
this:
i \<le> j
goal (1 subgoal):
1. i \<le> j \<Longrightarrow> (repeat_mset i u + m \<le> repeat_mset j u + n) = (m \<le> repeat_mset (j - i) u + n)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
i \<le> j
[PROOF STEP]
have "i + (j - i) = j"
[PROOF STATE]
proof (prove)
using this:
i \<le> j
goal (1 subgoal):
1. i + (j - i) = j
[PROOF STEP]
using le_add_diff_inverse
[PROOF STATE]
proof (prove)
using this:
i \<le> j
?b \<le> ?a \<Longrightarrow> ?b + (?a - ?b) = ?a
goal (1 subgoal):
1. i + (j - i) = j
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
i + (j - i) = j
goal (1 subgoal):
1. i \<le> j \<Longrightarrow> (repeat_mset i u + m \<le> repeat_mset j u + n) = (m \<le> repeat_mset (j - i) u + n)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
i + (j - i) = j
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
i + (j - i) = j
goal (1 subgoal):
1. (repeat_mset i u + m \<le> repeat_mset j u + n) = (m \<le> repeat_mset (j - i) u + n)
[PROOF STEP]
by (metis (no_types) add_le_cancel_left left_add_mult_distrib_mset)
[PROOF STATE]
proof (state)
this:
(repeat_mset i u + m \<le> repeat_mset j u + n) = (m \<le> repeat_mset (j - i) u + n)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 846, "file": null, "length": 10}
|
import config
import copy
import cv2
import importlib
import lipiodol_methods as lm
import niftiutils.masks as masks
import niftiutils.helper_fxns as hf
import niftiutils.transforms as tr
import niftiutils.registration as reg
import niftiutils.visualization as vis
import numpy as np
import random
import math
from math import pi, radians, degrees
import matplotlib.pyplot as plt
import glob
import shutil
import os
from os.path import *
from scipy.ndimage.morphology import binary_closing, binary_opening, binary_dilation
from skimage.morphology import ball, label
###########################
### Create tumor DICOMs
###########################
def write_ranked_imgs(df, target_dir, column, img_type, root_dir, overwrite=False, mask_type=None, window=None):
importlib.reload(masks)
if not exists(root_dir):
os.makedirs(root_dir)
for ix,row in df.dropna(subset=[column]).sort_values([column], ascending=False).iterrows():
save_dir = join(root_dir, "%d_%s" % (row[column]*100, ix))
patient_id = ix
P = lm.get_paths_dict(patient_id, target_dir)
if mask_type is not None:
masks.create_dcm_with_mask(eval(img_type), eval(mask_type), save_dir,
overwrite=True, padding=1.5, window=window)
else:
img = hf.nii_load(eval(img_type))
if window=="ct":
img = tr.apply_window(img)
hf.create_dicom(img, save_dir, overwrite=overwrite)
###########################
### Draw tumor pngs
###########################
def draw_unreg_fig(img_path, mask_path, save_path, color, modality, midslice=True):
img,D = hf.nii_load(img_path)
mask = masks.get_mask(mask_path, D, img.shape)
nz = np.argwhere(mask)
pad = [img.shape[0]//5, img.shape[1]//5]
sl1 = slice(max(nz[:,0].min()-pad[0],0), nz[:,0].max()+pad[0])
sl2 = slice(max(nz[:,1].min()-pad[1],0), nz[:,1].max()+pad[1])
img = np.transpose(img[sl1,sl2], (1,0,2))
mask = np.transpose(mask[sl1,sl2], (1,0,2))
sl1, sl2 = nz[:,-1].min(), nz[:,-1].max()
if midslice:
RNG = [(sl1+sl2)//2]
else:
RNG = range(sl1,sl2, max((sl2-sl1)//10,1))
if not exists(dirname(save_path)):
os.makedirs(dirname(save_path))
for sl in RNG:
plt.close()
if modality=="mr":
plt.imshow(img[...,sl], cmap='gray')
elif modality=="ct":
plt.imshow(img[...,sl], cmap='gray', vmin=30, vmax=250)
plt.contour(mask[:,:,sl], colors=color, alpha=.4)
plt.axis('off')
if midslice:
plt.savefig(save_path+".png", dpi=100, bbox_inches='tight')
else:
plt.savefig(save_path+"_%d.png" % sl, dpi=100, bbox_inches='tight')
def draw_reg_fig(img_path, mask_path, save_path, color, modality):
img,_ = hf.nii_load(img)
mask = masks.get_mask(mask_path)
img = np.transpose(img, (1,0,2))
mask = np.transpose(mask, (1,0,2))
for sl in range(img.shape[-1]//5+1,img.shape[-1]*4//5, max(img.shape[-1]//8,1) ):
plt.close()
if modality=="mr":
plt.imshow(img[...,sl], cmap='gray')
elif modality=="ct":
plt.imshow(img[...,sl], cmap='gray', vmin=30, vmax=250)
plt.contour(mask[:,:,sl], colors=color, alpha=.4)
plt.axis('off')
plt.savefig(save_path+"_%d.png" % sl, dpi=100, bbox_inches='tight')
def draw_sub_and_depo(lesion_id, target_dir, save_dir, include_FU=False, padding=.3):
importlib.reload(lm)
P = lm.get_paths_dict(lesion_id, target_dir)
mod='mrbl'
ART = masks.crop_img_to_mask_vicinity(P[mod]['art'], P[mod]['tumor'], padding)
PRE = masks.crop_img_to_mask_vicinity(P[mod]['pre'], P[mod]['tumor'], padding)
CT = masks.crop_img_to_mask_vicinity(P['ct24']['img'], P['ct24']['tumor'], padding)
CT = tr.apply_window(CT)
if include_FU:
mod='mr30'
art = masks.crop_img_to_mask_vicinity(P[mod]['art'], P[mod]['tumor'], padding)
pre = masks.crop_img_to_mask_vicinity(P[mod]['pre'], P[mod]['tumor'], padding)
hf.draw_multi_slices([ART-PRE, CT, art-pre], save_path=join(save_dir, lesion_id), width=3, dpi=400)
else:
hf.draw_multi_slices([ART-PRE, CT], save_path=join(save_dir, lesion_id), width=4)
def draw_mrseq_with_mask(lesion_id, target_dir, save_dir, mod='mrbl'):
importlib.reload(masks)
P = lm.get_paths_dict(lesion_id, target_dir)
out_img = []
art,C = masks.crop_img_to_mask_vicinity(P[mod]['art'], P[mod]['tumor'], .5, return_crops=True)
pre = masks.crop_img_to_mask_vicinity(P[mod]['pre'], P[mod]['tumor'], .5)
equ = masks.crop_img_to_mask_vicinity(P[mod]['equ'], P[mod]['tumor'], .5)
sub = art - pre
sl = art.shape[-1]//2
I,D = hf.nii_load(P[mod]['art'])
if exists(P[mod]['enh'] + ".off"):
mask = masks.get_mask(P[mod]['enh'], D, I.shape)
mask = hf.crop_nonzero(mask, C)[0]
else:
mask = np.zeros(art.shape)
tumor_mask = masks.get_mask(P[mod]['tumor'], D, I.shape)
tumor_mask = hf.crop_nonzero(tumor_mask, C)[0]
sub_w_mask = vis.create_contour_img(sub, [tumor_mask, mask])
vis.display_sequence([pre[...,sl], art[...,sl], equ[...,sl], sub[...,sl],
sub_w_mask, mask[...,sl]], 2, 3,
join(save_dir, "%s_%s.png" % (lesion_id, mod)))
def draw_reg_seq(lesion_id, target_dir, save_dir):
importlib.reload(hf)
importlib.reload(masks)
P = lm.get_paths_dict(lesion_id, target_dir)
out_img = []
bl_img = masks.crop_img_to_mask_vicinity(P['mrbl']['sub'], P['mrbl']['tumor'], .5, add_mask_cont=True)
fu_img = masks.crop_img_to_mask_vicinity(P['mr30']['sub'], P['mr30']['tumor'], .5, add_mask_cont=True)
ct_img = masks.crop_img_to_mask_vicinity(P['ct24']['img'], P['ct24']['tumor'], .5, add_mask_cont=True, window=[0,300])
bl_Tx,D = hf.nii_load(P['ct24Tx']['mrbl']['art'])
fu_Tx = hf.nii_load(P['ct24Tx']['mr30']['art'])[0]
tumor_mask = masks.get_mask(P['ct24Tx']['crop']['tumor'])
sl = bl_Tx.shape[-1]//2
if exists(P['ct24Tx']['mrbl']['enh'] + ".off"):
bl_M = masks.get_mask(P['ct24Tx']['mrbl']['enh'], D, bl_Tx.shape)
else:
bl_M = np.zeros(bl_Tx.shape)
if exists(P['ct24Tx']['mr30']['enh'] + ".off"):
fu_M = masks.get_mask(P['ct24Tx']['mr30']['enh'], D, bl_Tx.shape)
else:
fu_M = np.zeros(bl_Tx.shape)
mask_overlay = np.stack([bl_M[...,sl], np.zeros(bl_M.shape[:2]), fu_M[...,sl]], -1)
bl_Tx_cont = vis.create_contour_img(bl_Tx[...,sl], [tumor_mask[...,sl], bl_M[...,sl]], colors=[(0,255,0), (255,0,0)])
fu_Tx_cont = vis.create_contour_img(fu_Tx[...,sl], [tumor_mask[...,sl], fu_M[...,sl]], colors=[(0,255,0), (0,0,255)])
#if exists(P['ct24Tx']['crop']['midlip'] + ".off"):
# ct_M = masks.get_mask(P['ct24Tx']['crop']['midlip'], D, bl_Tx.shape)
#else:
# ct_M = np.zeros(bl_Tx.shape)
#mask_overlay2 = np.stack([bl_M[...,sl]*fu_M[...,sl]/fu_M.max(), ct_M[...,sl], bl_M[...,sl]*(1-fu_M[...,sl]/fu_M.max())], -1)
vis.display_sequence([bl_img, fu_img, ct_img,#bl_img[...,bl_img.shape[-1]//2], fu_img[...,fu_img.shape[-1]//2], #ct_img[...,ct_img.shape[-1]//2]
bl_Tx_cont, #np.transpose(ct_Tx_cont, (1,0,2)),
fu_Tx_cont, mask_overlay],#, mask_overlay2],
2, 3, join(save_dir, "%s.png" % lesion_id))
###########################
### Draw figure
###########################
def check_feature(lesion_id, df, column, legend_names, criteria_pos, criteria_neg=None, restriction=None):
if lesion_id not in df.index:
return np.nan
if criteria_neg is None:
criteria_neg = lambda x: ~criteria_pos(x) & ~np.isnan(x)
if criteria_pos(df.loc[lesion_id, column]):
if restriction=="WD":
cnt = (criteria_pos(df[column]) & (df["0=well delineated, 1=infiltrative"]==0)).sum()
elif restriction=="Infiltrative":
cnt = (criteria_pos(df[column]) & (df["0=well delineated, 1=infiltrative"]==1)).sum()
else:
cnt = criteria_pos(df[column]).sum()
return legend_names[0] + " (n=%d)" % cnt
elif criteria_neg(df.loc[lesion_id, column]):
if restriction=="WD":
cnt = (criteria_neg(df[column]) & (df["0=well delineated, 1=infiltrative"]==0)).sum()
elif restriction=="Infiltrative":
cnt = (criteria_neg(df[column]) & (df["0=well delineated, 1=infiltrative"]==1)).sum()
else:
cnt = criteria_neg(df[column]).sum()
return legend_names[1] + " (n=%d)" % cnt
else:
return np.nan
def get_df_entry(lesion_id, master_df, modality):
if modality == "mrbl":
return [check_column(lesion_id, master_df, "0=well delineated, 1=infiltrative", {0: "Well-delineated", 1: "Infiltrative"}),
check_column(lesion_id, master_df, "HCC(0), ICC(1), other(2)", {0: "HCCs", 1: "ICCs", 2: "Metastases"}),
check_column(lesion_id, master_df, "selective=0", {0: "Selective TACE", 1: "Lobar TACE"})]
elif modality == "ct24":
return [check_column(lesion_id, master_df, "0=well delineated, 1=infiltrative", {0: "Well-delineated", 1: "Infiltrative"}),
check_column(lesion_id, master_df, "HCC(0), ICC(1), other(2)", {0: "HCCs", 1: "ICCs", 2: "Metastases"}),
check_column(lesion_id, master_df, "selective=0", {0: "Selective TACE", 1: "Lobar TACE"}),
check_homogeneous(lesion_id, master_df, modality),
check_sparse(lesion_id, master_df, modality),
check_rim(lesion_id, master_df, modality)]
def check_homogeneous(lesion_id, df, modality):
if modality == "mrbl":
return check_feature(lesion_id, df, "enhancing_vol",
legend_names=["Homogeneous\nenhancement", "Heterogeneous\nenhancement"],
criteria_pos=lambda x: x > .75, restriction="Well-delineated")
elif modality == "ct24":
return check_feature(lesion_id, df, "lipcoverage_vol",
legend_names=["Homogeneous\ndeposition", "Heterogeneous\ndeposition"],
criteria_pos=lambda x: x >= .8, restriction="Well-delineated")
def check_sparse(lesion_id, df, modality, restriction=None):
if modality == "mrbl":
return check_feature(lesion_id, df, "enhancing_vol",
legend_names=["Sparse enhancement", "Non-sparse, heterogeneous\nenhancement"],
criteria_pos=lambda x: x < .25, criteria_neg=lambda x: (x>=.25) & (x<.8),
restriction=restriction)
elif modality == "ct24":
return check_feature(lesion_id, df[df["lipcoverage_vol"] < .8], "lipcoverage_vol",
legend_names=["Sparse deposition", "Non-sparse, heterogeneous\ndeposition"],
criteria_pos=lambda x: x < .2, criteria_neg=lambda x: (x>=.2) & (x<.8),
restriction=restriction)
def check_rim(lesion_id, df, modality):
if modality == "mrbl":
return check_feature(lesion_id, df, "rim_enhancing",
legend_names=["Rim enhancement", "Non-rim, heterogeneous\nenhancement"],
criteria_pos=lambda x: x > .5, restriction="Well-delineated")
elif modality == "ct24":
return check_feature(lesion_id, df[df["lipcoverage_vol"] < .8], "rim_lipiodol",
legend_names=["Rim deposition", "Non-rim, heterogeneous\ndeposition"],
criteria_pos=lambda x: x > .5, restriction="Well-delineated")
def check_column(lesion_id, df, column, mapping, restriction=None):
if np.isnan(df.loc[lesion_id, column]):
return np.nan
else:
if restriction=="WD":
cnt = ((df[column]==df.loc[lesion_id, column]) & \
(df["0=well delineated, 1=infiltrative"]==0)).sum()
elif restriction=="Infiltrative":
cnt = ((df[column]==df.loc[lesion_id, column]) & \
(df["0=well delineated, 1=infiltrative"]==1)).sum()
else:
cnt = (df[column]==df.loc[lesion_id, column]).sum()
return mapping[df.loc[lesion_id, column]] + " (n=%d)" % cnt
|
{"hexsha": "814fa66e0112bd1f5f72ed41f82571d6f49546b8", "size": 10949, "ext": "py", "lang": "Python", "max_stars_repo_path": "lipiodol_vis.py", "max_stars_repo_name": "clintonjwang/lipiodol", "max_stars_repo_head_hexsha": "4952f56e7bda44135615c19bb982556be3767f94", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lipiodol_vis.py", "max_issues_repo_name": "clintonjwang/lipiodol", "max_issues_repo_head_hexsha": "4952f56e7bda44135615c19bb982556be3767f94", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lipiodol_vis.py", "max_forks_repo_name": "clintonjwang/lipiodol", "max_forks_repo_head_hexsha": "4952f56e7bda44135615c19bb982556be3767f94", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.5270758123, "max_line_length": 145, "alphanum_fraction": 0.6680062106, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3441}
|
# -*- coding: utf-8 -*-
__version__ = '0.1.10'
try:
# This variable is injected in the __builtins__ by the build
# process. It is used to enable importing subpackages of bear when
# the binaries are not built
__BEAR_SETUP__
except NameError:
__BEAR_SETUP__ = False
if __BEAR_SETUP__:
import sys as _sys
_sys.stdout.write('Partial import of bear during the build process.\n')
del _sys
else:
__all__ = [
# submodules
'core',
'templates',
'utils'
]
# function for finding the package
def package_location():
import os
return os.path.abspath(os.path.dirname(__file__))
def setup_module(module):
# Fixture to assure global seeding of RNG
import numpy as np
import random
_random_seed = int(np.random.uniform() * (2 ** 31 - 1))
np.random.seed(_random_seed)
random.seed(_random_seed)
|
{"hexsha": "00e023f689e7b1f6839a71e7d3cd1bfedbd757e1", "size": 889, "ext": "py", "lang": "Python", "max_stars_repo_path": "bear/__init__.py", "max_stars_repo_name": "tgsmith61591/bear", "max_stars_repo_head_hexsha": "153fc6e8cb01427958a949eab0a270110d8044e1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-10-31T01:56:18.000Z", "max_stars_repo_stars_event_max_datetime": "2018-10-31T01:56:18.000Z", "max_issues_repo_path": "bear/__init__.py", "max_issues_repo_name": "tgsmith61591/bear", "max_issues_repo_head_hexsha": "153fc6e8cb01427958a949eab0a270110d8044e1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-10-24T18:29:14.000Z", "max_issues_repo_issues_event_max_datetime": "2018-10-24T18:29:14.000Z", "max_forks_repo_path": "bear/__init__.py", "max_forks_repo_name": "tgsmith61591/bear", "max_forks_repo_head_hexsha": "153fc6e8cb01427958a949eab0a270110d8044e1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.225, "max_line_length": 75, "alphanum_fraction": 0.6625421822, "include": true, "reason": "import numpy", "num_tokens": 230}
|
[STATEMENT]
lemma phi0: "Phi 0 = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Phi> 0 = 0
[PROOF STEP]
unfolding Phi_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. E (map_pmf (\<phi> 0) (config'_rand BIT (fst BIT init \<bind> (\<lambda>is. return_pmf (init, is))) (take 0 qs))) = 0
[PROOF STEP]
by (simp add: bind_return_pmf map_pmf_def bind_assoc_pmf BIT_init_def)
|
{"llama_tokens": 180, "file": "List_Update_BIT", "length": 2}
|
# Convolutional Neural Network
from scipy.io import loadmat
import numpy as np
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense, Flatten,Dropout
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras import backend as K
from keras.layers.merge import concatenate
from keras.layers.merge import add
from keras.optimizers import SGD, Adam
import keras.layers
from keras.models import load_model
from keras.callbacks import ModelCheckpoint
#from keras.layers.merge import
import patch_trial
import rotate
import matplotlib.pyplot as plt
import yaml
kk = patch_trial.get_data()
kkk = patch_trial.final_data(kk)
k = patch_trial.final_data_aug(kkk)
#x_train, y_train = k['train']['data'], k['train']['labels']
#x_val, y_val = k['val']['data'], k['val']['labels']
#x_test, y_test = k['test']['data'], k['test']['labels']
pred_img_data = kk['data']
print "**********************",pred_img_data.shape
#print x_train.shape, y_train.shape, x_test.shape, y_test.shape
'''
H, W = 5, 5
#input Layer:
inp = Input(shape=(H,W,200))
dropout1 = Dropout(0.5)
dropout2 = Dropout(0.5)
optimizr = Adam(lr = 0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
#5x5 conv filter
conv_5x5 = Conv2D(128, kernel_size=(5,5), activation='relu')(inp)
#3x3 conv filter
conv_3x3 = Conv2D(128, kernel_size=(3,3), activation='relu')(inp)
pool_3x3 = MaxPooling2D(pool_size=(3,3),strides=1)(conv_3x3)
#1x1 conv filter
conv_1x1 = Conv2D(128, kernel_size=(1,1), activation='relu')(inp)
pool_1x1 = MaxPooling2D(pool_size=(5,5),strides=1)(conv_1x1)
#Multi_Scale_Filter_Bank
first_concat_output = concatenate([conv_5x5, pool_3x3, pool_1x1])
norm_layer = keras.layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None)(first_concat_output)
#First_Residual Unit
conv_1x1_1 = Conv2D(128, kernel_size=(1,1),activation='relu')(norm_layer)
norm_layer1 = keras.layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None)(conv_1x1_1)
conv_1x1_2 = Conv2D(128, kernel_size=(1,1), activation='relu')(norm_layer1)
conv_1x1_3 = Conv2D(128, kernel_size=(1,1), activation='relu')(conv_1x1_2)
first_ReLU_sum = add([conv_1x1_1, conv_1x1_3])
#Seconf_Residual Unit
conv_1x1_4 = Conv2D(128, kernel_size=(1,1),activation='relu')(first_ReLU_sum)
conv_1x1_5 = Conv2D(128, kernel_size=(1,1), activation='relu')(conv_1x1_4)
second_ReLU_sum = add([first_ReLU_sum, conv_1x1_5])
conv_1x1_6 = Conv2D(128, kernel_size=(1,1),activation='relu')(second_ReLU_sum)
dropped_conv_1x1_6 = dropout1(conv_1x1_6)
conv_1x1_7 = Conv2D(128, kernel_size=(1,1),activation='relu')(dropped_conv_1x1_6)
dropped_conv_1x1_7 = dropout2(conv_1x1_7)
conv_1x1_8 = Conv2D(128, kernel_size=(1,1),activation='relu')(dropped_conv_1x1_7)
flat_vector = Flatten()(conv_1x1_8)
fcn1 = Dense(64, activation='relu')(flat_vector)
fcn = Dense(8, activation='softmax')(flat_vector)
model = Model(inputs=inp, outputs=fcn)
model.compile(optimizer=optimizr,loss='categorical_crossentropy',metrics=['accuracy'])
#checkpoint = ModelCheckpoint( 'model_CNN.h5' , monitor= 'val_loss' , verbose=0,
#save_best_only=True, mode= 'min' )
#history = model.fit(x_train, y_train, epochs = 600, batch_size=16, shuffle=True, verbose=2, validation_split=0.1, callbacks=[checkpoint])#, validation_data=(x_val, y_val))
#score = model.evaluate(x_test, y_test, batch_size=32)
#print score
'''
model_loaded = load_model('model_CNN_old.h5')
preds = []
def model_predict(model, data):
preds = []
predicts = model.predict(data, batch_size=data.shape[0])#, batch_size=53336, verbose=2)
print "Predicts"
#print type(predicts), predicts.shape, predicts[0]
for i in range(predicts.shape[0]):
preds.append(np.argmax(predicts[i]))
#print preds[i]
preds = np.array(preds)
# preds = np.reshape(preds, (145,145,))
return preds
#prediction = model_predict(model_loaded, pred_img_data)
#print prediction.shape, prediction[0][0]
#print model.summary()
def get_data_1(data,target_mat):
data_dic = {}
#data = loadmat("Indian_pines_corrected.mat")['indian_pines_corrected']
#target_mat = scipy.io.loadmat("Indian_pines_gt.mat")['indian_pines_gt']
target_mat = np.array(target_mat)
labels = []
for i in range(145):
for j in range(145):
labels.append(target_mat[i , j])
labels = np.array(labels)
#print max(labels), min(labels)
#labels = target_mat #keras.utils.to_categorical(labels)
#labels = np.reshape(target_mat, (21025,1))
#print labels.shape
d = data
#d1 = np.pad(d, ((2,2), (2,2), (0,0)), mode='constant', constant_values=0)
#print d1.shape, d1
d= np.array(d)
d = d.astype(float)
d -= np.min(d)
d /= np.max(d)
y = []
for i in range(d.shape[2]):
dd = np.pad(d[0:d.shape[0],0:d.shape[1],i], [(2,2),(2,2)], mode='constant')
y.append(dd)
y = np.array(y)
#print y[0]
#d_p1 = np.dstack((y))
#print y.shape
y1 = []
for i in range(2, y.shape[1]-2):
for j in range(2, y.shape[2]-2):
y1.append(y[:, i-2:i+3, j-2:j+3])
yy = np.array(y1)
y1 = np.array(y1)
#print y1.shape,y1[0,:,2,2]
y1 = np.transpose(y1, (0,2,3,1))
#print y1.shape, yy[0,:,2,2] == y1[0,2,2,:]
data = y1
#print data.shape
data_dic['data'] = data
data_dic['labels'] = labels
#print labels.shape
#y_train = keras.utils.to_categorical(y_train)
return data_dic
d1 = {'data0':loadmat("Indian_pines_corrected.mat")['indian_pines_corrected']}
d2 = {'data0':loadmat("Indian_pines_gt.mat")['indian_pines_gt']}
def get_LSTM_inputs(time_steps, model):
lstm_inputs = {}
cnn_inputs = {}
for i in range(6,9):
print i
inp = {'data0':d1['data0']}
targ = {'data0':d2['data0']}
for j in range(i):
inp = rotate.rotate_image(inp)
targ = rotate.rotate_image(targ)
data_to_cnn = get_data_1(inp['data0'],targ['data0'])
data_to_cnn_new = patch_trial.final_data(data_to_cnn)
cnn_inputs[i+1] = []
for item in data_to_cnn_new['train']['data']:
cnn_inputs[i+1].append(item)
for item in data_to_cnn_new['test']['data']:
cnn_inputs[i+1].append(item)
cnn_inputs[i+1] = np.array(cnn_inputs[i+1])
#print data_to_cnn_new['train']['data'].shape,data_to_cnn_new['test']['data'].shape,temp.shape
lstm_inputs[i+1] = model_predict(model,cnn_inputs[i+1])
return lstm_inputs
'''inp_lstm = get_LSTM_inputs(5, model_loaded)
#print inp_lstm
with open('decoder_data.yml', 'w') as outfile:
yaml.dump(inp_lstm, outfile, default_flow_style=False)'''
'''plot_model(model, to_file='dcn.png')
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
'''
#model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
#model.fit(training_data, training_labels)
#argmax layer
#argmax_layer = K.argmax(conv_1x1_8, axis=-1)'''
|
{"hexsha": "67e3987216236c27b0fde7328ab7665dfa3eef65", "size": 7731, "ext": "py", "lang": "Python", "max_stars_repo_path": "final_yr_proj/ker_fnc.py", "max_stars_repo_name": "kauku123/Undergraduate_Fin_Proj_2018", "max_stars_repo_head_hexsha": "e635d03c05785ca898c7a6bc48261de81318be26", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "final_yr_proj/ker_fnc.py", "max_issues_repo_name": "kauku123/Undergraduate_Fin_Proj_2018", "max_issues_repo_head_hexsha": "e635d03c05785ca898c7a6bc48261de81318be26", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "final_yr_proj/ker_fnc.py", "max_forks_repo_name": "kauku123/Undergraduate_Fin_Proj_2018", "max_forks_repo_head_hexsha": "e635d03c05785ca898c7a6bc48261de81318be26", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.347826087, "max_line_length": 342, "alphanum_fraction": 0.7088345622, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2343}
|
! { dg-do compile }
! { dg-options "-fcoarray=single" }
!
! PR fortran/18918
!
! Was failing before as the "x%a()[]" was
! regarded as coindexed
subroutine test2()
type t
integer, allocatable :: a(:)[:]
end type t
type(t), SAVE :: x
allocate(x%a(1)[*])
end subroutine test2
module m
integer, allocatable :: a(:)[:]
end module m
! Was failing as "a" was allocatable but
! as->cotype was not AS_DEFERERED.
use m
end
|
{"hexsha": "637750a6121ec502bce8a7a729cf75e36943c040", "size": 432, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "validation_tests/llvm/f18/gfortran.dg/coarray_19.f90", "max_stars_repo_name": "brugger1/testsuite", "max_stars_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2019-02-12T18:20:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-09T19:46:19.000Z", "max_issues_repo_path": "validation_tests/llvm/f18/gfortran.dg/coarray_19.f90", "max_issues_repo_name": "brugger1/testsuite", "max_issues_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2020-08-31T22:05:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T18:30:03.000Z", "max_forks_repo_path": "validation_tests/llvm/f18/gfortran.dg/coarray_19.f90", "max_forks_repo_name": "brugger1/testsuite", "max_forks_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2020-08-31T21:59:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T22:06:46.000Z", "avg_line_length": 16.6153846154, "max_line_length": 41, "alphanum_fraction": 0.6435185185, "num_tokens": 142}
|
r"""Self attention block of the Perceiver model."""
from typing import Any, Optional
from functools import partial
import jax.numpy as jnp
from flax import linen as nn
from flax_extra import combinator as cb
from flax_extra.layer._feedforward import FeedForward, FeedForwardCt
from flax_extra.layer._attention import SelfAttention, SelfAttentionCt
Array = jnp.ndarray
Precision = Any
class SelfAttentionBlock(nn.Module):
r"""A block of a self-attention module and following
feed-forward module.
.. math::
\begin{aligned}
& \textrm{SelfAttentionBlock}( \\
& \quad x \in \sR^{\nBatchSize \times \nSeqLen_{x} \times d_{x}} \\
& \quad \_ \\
& \quad \theta \gets LayerNorm() \\
& \quad \theta \gets SelfAttentionBlock() \\
& \quad \theta \gets LayerNorm() \\
& \quad \theta \gets FeedForward() \\
& ) \\
& \rightarrow \sR^{\nBatchSize \times \nSeqLen_{x} \times d_{x}}
\end{aligned}
Args:
latents: latent features.
mask: a mask tensor with boolean values indicating whether
a particular query attends to a particular key.
Returns:
latent features.
"""
attention: SelfAttentionCt = SelfAttention
r"""a type of the self-attention."""
feed_forward: FeedForwardCt = FeedForward
r"""a type of the feed-forward."""
dropout_rate: float = 0.0
r"""probababilistic rate for dropout."""
deterministic: bool = True
r"""whether to perform deterministically or not."""
precision: Optional[Precision] = None
r"""numerical precision of the computation.
See :attr:`jax.lax.Precision` for details."""
@nn.compact
def __call__( # type: ignore[override] # pylint: disable=arguments-differ
self,
latents: Array,
mask: Optional[Array],
) -> Array:
block = cb.serial(
cb.residual(
nn.LayerNorm(epsilon=1e-5),
partial(
self.attention,
deterministic=self.deterministic,
precision=self.precision,
)(),
nn.Dropout(
rate=self.dropout_rate,
deterministic=self.deterministic,
),
),
cb.residual(
nn.LayerNorm(epsilon=1e-5),
self.feed_forward(),
nn.Dropout(rate=self.dropout_rate, deterministic=self.deterministic),
),
)
return block(latents, mask) # type: ignore
|
{"hexsha": "207621d39de0d356ae2870d4d07f2403b709c707", "size": 2608, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/flax_extra/model/perceiver/_self_attention_block.py", "max_stars_repo_name": "manifest/flax-extra", "max_stars_repo_head_hexsha": "e19de992c7acefefca9ed4c9f7ce3e092943363a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/flax_extra/model/perceiver/_self_attention_block.py", "max_issues_repo_name": "manifest/flax-extra", "max_issues_repo_head_hexsha": "e19de992c7acefefca9ed4c9f7ce3e092943363a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-08-10T04:59:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-07T21:10:37.000Z", "max_forks_repo_path": "src/flax_extra/model/perceiver/_self_attention_block.py", "max_forks_repo_name": "manifest/flax-extra", "max_forks_repo_head_hexsha": "e19de992c7acefefca9ed4c9f7ce3e092943363a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6823529412, "max_line_length": 85, "alphanum_fraction": 0.5835889571, "include": true, "reason": "import jax", "num_tokens": 593}
|
import math
import random
import numpy as np
# helper function
def rand_tuple(lower, higher):
y = random.randint(lower, higher)
x = random.randint(lower, higher)
return (y,x)
# I reused most of my MDP code on grildworld from previous homeworks
class Gridworld:
name = "gridworld"
state_size_y = 5
state_size_x = 5
start_state = (0,0)
water_state = (4,2)
goal_state = (4,4)
obstacles = [(2,2),(3,2)]
goal_reward = 10
water_reward = -10
actions = [
(0, "\u2192"),
(math.pi/2, "\u2191"),
(math.pi, "\u2190"),
(-math.pi/2, "\u2193")
]
actions_length = 4
# p(s,a,s') - (veer angle, Probability)
veer_transitions = [(0, 0.8), (-1, 0.1), (math.pi/2, 0.05), (-math.pi/2, 0.05)]
def __init__(self, discount):
self.discount = discount
self.state_features_length = self.state_size_y * self.state_size_x
self.complexity = None
def get_start_state(self):
return self.start_state
def get_next_state(self, state, action):
action_angle = self.actions[action][0]
transition_probability = random.random()
prob_sum = 0
for veer_transition in self.veer_transitions:
prob_sum += veer_transition[1]
if transition_probability < prob_sum:
if(veer_transition[0] == -1):
next_state = state
else:
next_state = self.get_next_position(state, action_angle, veer_transition[0])
return next_state
def get_next_position(self, state, action_angle, veer_angle):
angle = action_angle + veer_angle
direction = (int(math.sin(angle) * -1), int(math.cos(angle)))
next_state = (state[0] + direction[0], state[1] + direction[1])
# boundaries
if(next_state[0] < 0 or next_state[0] >= self.state_size_y or next_state[1] < 0 or next_state[1] >= self.state_size_x) \
or next_state in self.obstacles:
return state
else:
return next_state
# gridworld reward only depends on the next state we are entering
def get_reward(self, next_state):
if(next_state == self.goal_state):
return self.goal_reward
if(next_state == self.water_state):
return self.water_reward
return 0
def episode_over(self, state):
return state == self.goal_state
def get_state_features(self, state):
state = np.reshape(state, (-1,2))
y = state[:,0]
x = state[:,1]
# return a 1d vector of length width*height with 1 at the agent's location and 0 elsewhere
features = np.zeros( (state.shape[0], self.state_size_y * self.state_size_x ) )
features[:, y * self.state_size_y + x] = 1
return features
# I reused most of my MDP code on mountain_car from previous homeworks
class MountainCar:
name = "mountain_car"
all_reward = -1
goal_reward = 0
x_max = 0.5
x_min = -1.2
v_max = 0.7
v_min = -0.7
actions = [
(-1, "R"), # reverse
(0, "N"), # neutral
(1, "F") # forward
]
actions_length = 3
def __init__(self, discount, feature_type, complexity):
self.discount = discount
self.feature_type = feature_type
self.complexity = complexity
if feature_type == "fourier":
self.state_features_length = 1 + 2 * complexity
def get_start_state(self):
initial_x = random.uniform(-0.6, -0.4)
return (initial_x, 0)
def get_next_state(self, state, action):
x = state[0]
v = state[1]
acceleration = self.actions[action][0]
v_next = v + (0.001 * acceleration) - (0.0025 * math.cos(3 * x))
x_next = x + v_next
x_next = max(min(x_next, self.x_max), self.x_min)
v_next = max(min(v_next, self.v_max), self.v_min)
if x_next == self.x_min or x_next == self.x_max:
v_next = 0
return (x_next, v_next)
def get_reward(self, next_state):
if next_state[0] == self.x_max:
return self.goal_reward
else:
return self.all_reward
def episode_over(self, state):
return state[0] == self.x_max
def get_state_features(self, state):
if self.feature_type == "fourier":
return self.fourier_cos( np.reshape(state, (-1,2) ))
def fourier_cos(self, state):
x = state[:,0]
v = state[:,1]
x = np.reshape((x - self.x_min) / (self.x_max - self.x_min), (-1,1)) # 0 to 1 range
v = np.reshape((v - self.v_min) / (self.v_max - self.v_min), (-1,1))
#φ(s) = [1, cos(1πx), cos(2πx), . . . , cos(Mπx), cos(1πv), cos(2πv), . . . cos(Mπv)]>.
fourier_x = np.cos(np.reshape(np.arange(self.complexity) + 1, (1,-1)) * np.pi * x)
fourier_v = np.cos(np.reshape(np.arange(self.complexity) + 1, (1,-1)) * np.pi * v)
fourier = np.concatenate( (np.ones((state.shape[0], 1)), fourier_x, fourier_v), 1 )
return fourier
|
{"hexsha": "4ea8d11c4f184475e641b0149fe5e9296c318452", "size": 5108, "ext": "py", "lang": "Python", "max_stars_repo_path": "source/mdp.py", "max_stars_repo_name": "samkovaly/PolicyGradientsNumpy", "max_stars_repo_head_hexsha": "8048c828804b3c96669b6c0aa06f705e2c6df974", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-17T04:32:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-17T04:32:25.000Z", "max_issues_repo_path": "source/mdp.py", "max_issues_repo_name": "samkovaly/PolicyGradientsNumpy", "max_issues_repo_head_hexsha": "8048c828804b3c96669b6c0aa06f705e2c6df974", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source/mdp.py", "max_forks_repo_name": "samkovaly/PolicyGradientsNumpy", "max_forks_repo_head_hexsha": "8048c828804b3c96669b6c0aa06f705e2c6df974", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.925, "max_line_length": 128, "alphanum_fraction": 0.5806577917, "include": true, "reason": "import numpy", "num_tokens": 1415}
|
SUBROUTINE UpdateGhostLayerNCurvilinear(var,Sx,Sy,NNx,NNy,NNz,CompGrid,alpha,beta,GhostGridX,GhostGridY)
USE Precision
USE DataTypes
IMPLICIT NONE
TYPE (Level_def) :: CompGrid
INTEGER :: NNx, NNy, NNz, rank, alpha, beta, i,j,k, GhostGridX, GhostGridY, diffb
REAL(KIND=long), DIMENSION(NNx,NNy) :: var, Sx, Sy
REAL(KIND=long), DIMENSION(2*alpha+1) :: Stenciln
REAL(KIND=long) :: xn, yn, Source, NormalX, NormalY, nx, ny
INTEGER, DIMENSION(2*beta+1) :: idx
IF (alpha/=beta) THEN
print*,'ERROR: alpha/=beta. (UpdateGhostLayerECurvilinear)'
STOP
END IF
rank = 2*beta+1
! On the free surface
k = NNz
! two boundaries
j=1
DO i = 1+GhostGridX, NNx-GhostGridX
Stenciln = CompGrid%CurvilinearStuff%DiffStencils%StencilG(1:rank,1+GhostGridY,1)
!! Source terms on the boundary ! FIXME: check the formula...
!xn = CompGrid%CurvilinearStuff%xn(i,j+GhostGridY)
!yn = CompGrid%CurvilinearStuff%yn(i,j+GhostGridY)
!Source = xn*Sx(i,j+GhostGridY) + yn*Sy(i,j+GhostGridY)
! use geometric factors for wall boundary
nx = CompGrid%CurvilinearStuff%nx(i,j+GhostGridY)
ny = CompGrid%CurvilinearStuff%ny(i,j+GhostGridY)
! Normal vectors are defined at the ghost points used to impose the kinematic boundary conditions
NormalX = CompGrid%CurvilinearStuff%NormalX(k,i,j)
NormalY = CompGrid%CurvilinearStuff%NormalY(k,i,j)
Source = (NormalX*Sx(i,j+GhostGridY) + NormalY*Sy(i,j+GhostGridY))/(NormalX*nx+NormalY*ny)
!
var(i,j) = (Source-DOT_PRODUCT(Stenciln(2:rank),var(i,2:rank))) / Stenciln(1)
END DO
j = NNy
DO i = 1+GhostGridX, NNx-GhostGridX
Stenciln = CompGrid%CurvilinearStuff%DiffStencils%StencilG(1:rank,rank-GhostGridY,1)
!! Source terms on the boundary! FIXME: check the formula...
!xn = CompGrid%CurvilinearStuff%xn(i,j-GhostGridY)
!yn = CompGrid%CurvilinearStuff%yn(i,j-GhostGridY)
!Source = xn*Sx(i,j-GhostGridY) + yn*Sy(i,j-GhostGridY)
! use geometric factors for wall boundary
nx = CompGrid%CurvilinearStuff%nx(i,j-GhostGridY)
ny = CompGrid%CurvilinearStuff%ny(i,j-GhostGridY)
NormalX = CompGrid%CurvilinearStuff%NormalX(k,i,j)
NormalY = CompGrid%CurvilinearStuff%NormalY(k,i,j)
Source = (NormalX*Sx(i,j-GhostGridY) + NormalY*Sy(i,j-GhostGridY))/(NormalX*nx+NormalY*ny)
!
var(i,j) = (Source-DOT_PRODUCT(Stenciln(1:rank-1),var(i,j-rank+1:j-1))) / Stenciln(rank)
END DO
END SUBROUTINE UpdateGhostLayerNCurvilinear
|
{"hexsha": "917f34167570ef456969780c4102bf798f7198ee", "size": 2388, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/functions/UpdateGhostLayerNCurvilinear.f90", "max_stars_repo_name": "apengsigkarup/OceanWave3D", "max_stars_repo_head_hexsha": "91979da3ede3215b2ae65bffab89b695ff17f112", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 46, "max_stars_repo_stars_event_min_datetime": "2016-01-08T12:36:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T06:56:45.000Z", "max_issues_repo_path": "src/functions/UpdateGhostLayerNCurvilinear.f90", "max_issues_repo_name": "apengsigkarup/OceanWave3D", "max_issues_repo_head_hexsha": "91979da3ede3215b2ae65bffab89b695ff17f112", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2015-10-10T19:45:08.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-07T07:37:11.000Z", "max_forks_repo_path": "src/functions/UpdateGhostLayerNCurvilinear.f90", "max_forks_repo_name": "apengsigkarup/OceanWave3D", "max_forks_repo_head_hexsha": "91979da3ede3215b2ae65bffab89b695ff17f112", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2015-10-01T12:17:08.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-02T16:23:37.000Z", "avg_line_length": 44.2222222222, "max_line_length": 104, "alphanum_fraction": 0.7319932998, "num_tokens": 842}
|
import tensorflow as tf
import numpy as np
import mycommon as mc
class BAGRNN_Model:
def __init__(self,
bag_num = 50,
enc_dim = 256,
embed_dim = 200,
rel_dim = None,
cat_n = 5,
sent_len = 120,
word_n = 80000,
extra_n = 3,
word_embed = None,
dropout = None,
cell_type = 'gru',
adv_eps = None,
adv_type = 'sent',
tune_embed = False,
use_softmax_loss = None,
sampled_sigmoid_loss = False,
max_dist_embed = None,
excl_na_loss = True,
only_perturb_pos_rel = False):
self.bag_num = bag_num # total number of bags
self.enc_dim = enc_dim
if rel_dim is None:
self.rel_dim = 3 * enc_dim if cell_type == 'pcnn' else 2 * enc_dim
else:
self.rel_dim = rel_dim
self.embed_dim = embed_dim
self.cat_n = cat_n
self.sent_len = sent_len
self.pretrain_word_embed = word_embed
self.word_n = word_n
self.extra_n = extra_n
self.dropout = dropout
self.cell_type = cell_type
self.adv_eps = adv_eps # eps for adversarial training, if None, classical feedfwd net
self.adv_type = adv_type # type of adversarial perturbation: batch, bag, sent
self.tune_embed = tune_embed
self.use_softmax_loss = (use_softmax_loss is not None) # whether to use softmax loss or sigmoid loss
self.use_full_softmax = self.use_softmax_loss and (use_softmax_loss > 0)
self.sampled_sigmoid_loss = sampled_sigmoid_loss and not use_softmax_loss
self.max_dist_embed = max_dist_embed
self.use_pcnn = (cell_type == 'pcnn') # None for RNN; other wise use PCNN with feature size <use_pcnn>
self.excl_na_loss = excl_na_loss # exclude NA in the loss function, only effective for sigmoid loss
self.only_perturb_pos_rel = only_perturb_pos_rel
def build(self, is_training,
ent_dim = 3,
dropout_embed = True):
self.is_training = is_training
bag_num = self.bag_num
cat_n = self.cat_n
L = self.sent_len
rel_dim = self.rel_dim
enc_dim = self.enc_dim
cell_type = self.cell_type
dropout = self.dropout
###################################
# create placeholders
#####
# data shape info
self.shapes = shapes = tf.placeholder(tf.int32, [self.bag_num + 1])
# input data
self.X = tf.placeholder(tf.int32, [None, L])
self.ent = tf.placeholder(tf.int32, [None, L])
if self.max_dist_embed is not None:
self.ent2 = tf.placeholder(tf.int32, [None, L])
# labels
self.Y = ph_Y = tf.placeholder(tf.float32, [bag_num, cat_n])
# sentence length
self.length = length = tf.placeholder(tf.int32, [None])
# sentence mask
self.mask = mask = tf.placeholder(tf.float32, [None, L])
# adversarial eps
if self.adv_eps is not None:
self.adv_eps = tf.placeholder(tf.float32, shape=())
# loss mask
if self.sampled_sigmoid_loss:
self.loss_mask = loss_mask = tf.placeholder(tf.float32, [bag_num, cat_n])
else:
loss_mask = None
if self.use_pcnn:
self.pcnn_mask = tf.placeholder(tf.float32, [None, 3, L])
pcnn_pos_mask = tf.expand_dims(tf.transpose(self.pcnn_mask, [0, 2, 1]), axis=1) # [batch, 1, L, 3]
if self.use_full_softmax:
self.diag = tf.expand_dims(tf.eye(cat_n, dtype=tf.float32), axis=0)
#################################
# create embedding variables
####
self.exclude_clip_vars = set()
# pre-process entity embedding
if self.max_dist_embed is None:
self.ent_embed = tf.constant(np.array([[0] * ent_dim * 2, [1] * ent_dim + [0] * ent_dim, [0] * ent_dim + [1] * ent_dim],
dtype=np.float32),
dtype=tf.float32)
else:
self.ent_embed = tf.get_variable('dist_embed', [2 * self.max_dist_embed + 1, ent_dim],
initializer=tf.random_normal_initializer(0, 0.01))
self.exclude_clip_vars.add(self.ent_embed)
if self.pretrain_word_embed is not None:
if self.tune_embed:
pretrain_embed = tf.get_variable('pretrain_embed',
initializer=self.pretrain_word_embed)
self.exclude_clip_vars.add(pretrain_embed)
else:
pretrain_embed = tf.constant(self.pretrain_word_embed,dtype=tf.float32)
extra_embed = tf.get_variable('extra_embed', [self.extra_n,
self.embed_dim],
initializer=tf.random_normal_initializer(0,0.01))
self.exclude_clip_vars.add(extra_embed)
self.word_embed = tf.concat([pretrain_embed, extra_embed], axis=0)
else:
self.word_embed = tf.get_variable('word_embed', [self.word_n+self.extra_n,
self.embed_dim],
initializer=tf.random_normal_initializer(0,0.01))
self.exclude_clip_vars.add(self.word_embed)
################################
# discriminative model
#####
self.orig_inputs = orig_inputs = mc.get_embedding(self.X, self.word_embed,
self.dropout if dropout_embed else None, self.is_training)
if self.max_dist_embed is not None:
dist1_embed = mc.get_embedding(self.ent, self.ent_embed,
self.dropout if dropout_embed else None, self.is_training)
dist2_embed = mc.get_embedding(self.ent2, self.ent_embed,
self.dropout if dropout_embed else None, self.is_training)
ent_inputs = tf.concat([dist1_embed, dist2_embed], axis=2)
else:
ent_inputs = mc.get_embedding(self.ent, self.ent_embed) # [batch,L,dim]
use_softmax_loss = self.use_softmax_loss
use_full_softmax = self.use_full_softmax
use_pcnn = self.use_pcnn
pcnn_feat_size = self.enc_dim
def discriminative_net(word_inputs, name = 'discriminative-net', reuse = False,
only_pos_rel_loss = False):
with tf.variable_scope(name, reuse=reuse):
if only_pos_rel_loss:
pos_rel_mask = ph_Y
# when y = [0, 0, ..., 0]: pos_rel_mask = [1, 1, ..., 1]
# o.w. pos_rel_mask = y
#na_flag = 1 - tf.reduce_max(ph_Y, axis=1, keep_dims=True)
#pos_rel_mask = ph_Y + na_flag
inputs = tf.concat([word_inputs, ent_inputs], axis = 2) # [batch, L, dim]
if not use_pcnn: # use RNN
outputs, states = mc.mybidrnn(inputs, length, enc_dim,
cell_name = cell_type,
scope = 'bidirect-rnn')
# sentence information
V = tf.concat(states, axis=1) # [batch, rel_dim]
V_dim = enc_dim * 2
else:
# use pcnn
feat_size = pcnn_feat_size
window_size = 3
inputs = tf.expand_dims(inputs, axis=1) # [batch, 1, L, dim]
conv_out = tf.squeeze(tf.nn.relu(
tf.layers.conv2d(inputs, feat_size, [1, window_size], 1, padding='same',
kernel_initializer=tf.contrib.layers.xavier_initializer_conv2d())
)) # [batch, L, feat_size]
conv_out = tf.expand_dims(tf.transpose(conv_out, [0, 2, 1]), axis=-1) # [batch, feat, L, 1]
pcnn_pool = tf.reduce_max(conv_out * pcnn_pos_mask, axis=2) # [batch, feat, 3]
V = tf.reshape(pcnn_pool, [-1, feat_size * 3])
V_dim = feat_size * 3
if V_dim != rel_dim:
V = mc.linear(V, rel_dim, scope='embed_proj')
if dropout:
V = tf.layers.dropout(V, rate=dropout,
training=is_training)
#################################
# Multi Label Multi Instance Learning
#####
Q = tf.get_variable('relation_embed', [rel_dim, cat_n],
initializer=tf.random_normal_initializer(0, 0.01))
if use_full_softmax:
A = tf.get_variable('classify-proj', [rel_dim, cat_n],
initializer=tf.random_normal_initializer(0, 0.01))
else:
A = tf.get_variable('classify-proj', [cat_n, rel_dim],
initializer=tf.random_normal_initializer(0, 0.01))
alpha = tf.matmul(tf.nn.tanh(V), Q) # [batch, cat_n]
# process bags
logits_list = []
for i in range(bag_num):
n = shapes[i+1] - shapes[i]
curr_V = V[shapes[i]:shapes[i+1], :] # [n, rel_dim]
curr_alpha = alpha[shapes[i]:shapes[i+1], :] # [n, cat_n]
weight = tf.nn.softmax(tf.transpose(curr_alpha, [1, 0])) # [cat_n, n]
full_weight = tf.tile(tf.expand_dims(weight, axis=-1), [1, 1, rel_dim]) # [cat_n, n, dim]
full_V = tf.tile(tf.expand_dims(curr_V, axis=0), [cat_n, 1, 1])
V_att = tf.reduce_sum(full_weight * full_V, axis=1) # [cat_n, dim]
if use_full_softmax:
cat_logits = tf.matmul(V_att, A) # [cat_n, cat_n]
else:
cat_logits = tf.reduce_sum(V_att * A, axis=1) # [cat_n]
logits_list.append(cat_logits)
logits = tf.stack(logits_list) # [bag_num, cat_n] or [bag_num, cat_n, cat_n]
if use_softmax_loss:
probs = tf.nn.softmax(logits)
if use_full_softmax:
# probs: [bag_num, cat_n, cat_n] last dimension normalized
probs = tf.reduce_sum(probs * self.diag, axis=-1) # [bag_num, cat_n]
# optimize the sum of softmax-loss for each positive rel
loss = -tf.reduce_mean(tf.reduce_sum(tf.log(probs + 1e-20) * ph_Y, axis=1))
else:
# add all the probs, output the joint log probability
loss = -tf.reduce_mean(tf.log(tf.reduce_sum(probs * ph_Y, axis=1) + 1e-20))
else:
probs = tf.nn.sigmoid(logits)
if loss_mask is not None:
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=ph_Y, logits=logits)
if only_pos_rel_loss:
loss = loss * pos_rel_mask
loss = tf.reduce_sum(loss * loss_mask, axis=1)
weight = tf.reduce_sum(loss_mask, axis=1)
# weighted average of the individual sigmoid loss, rescale to full_sigmoid_loss
#coef = cat_n - 1 if self.excl_na_loss else cat_n
#loss = tf.reduce_mean(loss / weight) * coef
loss = tf.reduce_mean(loss) # * coef
else:
if self.excl_na_loss:
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=ph_Y, logits=logits) # [bag, cat_n]
if only_pos_rel_loss:
loss = loss * pos_rel_mask
loss = loss[:, 1:] # exclude NA
loss = tf.reduce_mean(tf.reduce_sum(loss, axis=1))
else:
if only_pos_rel_loss:
loss = tf.losses.sigmoid_cross_entropy(ph_Y, logits, weights=pos_rel_mask)
else:
loss = tf.losses.sigmoid_cross_entropy(ph_Y, logits)
return probs, loss
self.probs, self.raw_loss = discriminative_net(orig_inputs, reuse=False,
only_pos_rel_loss=(self.adv_eps is not None) and (self.only_perturb_pos_rel) and (not use_softmax_loss))
if self.adv_eps is None:
self.loss = self.raw_loss
else: # adversarial training
raw_perturb = tf.gradients(self.raw_loss, orig_inputs)[0] # [batch, L, dim]
if self.adv_type == 'sent':
# normalize per sentence
self.perturb = perturb = self.adv_eps * tf.stop_gradient(
tf.nn.l2_normalize(raw_perturb * tf.expand_dims(mask, axis=-1), dim=[1, 2]))
elif self.adv_type == 'batch':
# normalize the whole batch
self.perturb = perturb = self.adv_eps * tf.stop_gradient(
tf.nn.l2_normalize(raw_perturb * tf.expand_dims(mask, axis=-1), dim=[0,1,2]))
else: # bag-level normalization
raw_perturb = tf.stop_gradient(raw_perturb * tf.expand_dims(mask, axis=-1)) # [batch, L, dim]
perturb_list = []
for i in range(bag_num):
curr_pt = raw_perturb[shapes[i]:shapes[i+1], :, :] # [bag_size, L, dim]
perturb_list.append(self.adv_eps * tf.nn.l2_normalize(curr_pt, dim=[0,1,2]))
self.perturb = perturb = tf.concat(perturb_list, axis=0) # [batch, L, dim]
self.perturb_inputs = perturb_inputs = orig_inputs + perturb
self.perturb_probs, self.loss = discriminative_net(perturb_inputs, reuse=True) # optimize the loss with perturbed loss
|
{"hexsha": "b45e0f9d4b0ba076b8802af61f49735e0329703e", "size": 14477, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/bag_model.py", "max_stars_repo_name": "jxwuyi/AtNRE", "max_stars_repo_head_hexsha": "2fe1d95bc361645e9f8105e56e64786cae4ab040", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 119, "max_stars_repo_stars_event_min_datetime": "2017-07-27T13:44:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-30T07:38:16.000Z", "max_issues_repo_path": "code/bag_model.py", "max_issues_repo_name": "jxwuyi/AtNRE", "max_issues_repo_head_hexsha": "2fe1d95bc361645e9f8105e56e64786cae4ab040", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2018-01-16T03:37:06.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-31T05:29:32.000Z", "max_forks_repo_path": "code/bag_model.py", "max_forks_repo_name": "jxwuyi/AtNRE", "max_forks_repo_head_hexsha": "2fe1d95bc361645e9f8105e56e64786cae4ab040", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 30, "max_forks_repo_forks_event_min_datetime": "2017-09-20T01:51:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-17T05:13:32.000Z", "avg_line_length": 53.4206642066, "max_line_length": 159, "alphanum_fraction": 0.5155764316, "include": true, "reason": "import numpy", "num_tokens": 3163}
|
import numpy as np
import cv2
class BackSub:
def __init__(self, firstFrame):
# by default, uses only the first 200 frames
# to compute a background
self.avg_frames = 1
self.alpha = 1 / self.avg_frames
self.backGroundModel = firstFrame
self.counter = 0
def getForeground(self, frame):
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
green = cv2.inRange(hsv, np.array((50., 10., 0.)), np.array((200., 255, 170.0)))
not_green = cv2.bitwise_not(green)
not_green = cv2.cvtColor(not_green, cv2.COLOR_GRAY2BGR)
return not_green
def denoise(frame):
frame = cv2.medianBlur(frame, 5)
frame = cv2.GaussianBlur(frame, (5, 5), 0)
return frame
if __name__ == "__main__":
# Just a simple function to perform
# some filtering before any further processing.
def onmouse(event, x, y, flags, param):
# increases or decreases threshold.
global thresh
if event == cv2.EVENT_LBUTTONDOWN:
if thresh < 255:
print('up!')
thresh += 1
elif event == cv2.EVENT_RBUTTONDOWN:
if thresh > 0:
print('down!')
thresh -= 1
cam = cv2.VideoCapture(0)
ret, frame = cam.read()
cam_height, cam_width, _channels = frame.shape
if ret:
backSubtractor = BackSub(denoise(frame))
run = True
else:
run = False
# read image to put background over
img = cv2.imread('backgrounds/paris-1-.png')
img = cv2.resize(img, (cam_width, cam_height))
cv2.namedWindow('mask')
cv2.setMouseCallback('mask', onmouse)
thresh = 45
while(run):
# Read a frame from the camera
ret, frame = cam.read()
# If the frame was properly read.
if ret is True:
# Show the filtered image
# get the foreground
fgmask = backSubtractor.getForeground(denoise(frame))
# foreGround = cv2.cvtColor(foreGround, cv2.COLOR_BGR2GRAY)
frame &= fgmask
mask_inv = cv2.bitwise_not(fgmask)
masked_img = img & mask_inv
final = cv2.add(frame, masked_img)
# Apply thresholding on the background and display the resulting mask
# Note: The mask is displayed as a RGB image, you can
# display a grayscale image by converting 'foreGround' to
# a grayscale before applying the threshold.
cv2.imshow('mask', final)
key = cv2.waitKey(10) & 0xFF
else:
break
if key == 27:
break
cam.release()
cv2.destroyAllWindows()
|
{"hexsha": "b8d06e45ba28abcc2453a2e5745d446320545b20", "size": 2689, "ext": "py", "lang": "Python", "max_stars_repo_path": "background_subtraction.py", "max_stars_repo_name": "cynic64/theremin", "max_stars_repo_head_hexsha": "3e67285ea6d571e255ba3c61e0a34eac95dddd1a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "background_subtraction.py", "max_issues_repo_name": "cynic64/theremin", "max_issues_repo_head_hexsha": "3e67285ea6d571e255ba3c61e0a34eac95dddd1a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "background_subtraction.py", "max_forks_repo_name": "cynic64/theremin", "max_forks_repo_head_hexsha": "3e67285ea6d571e255ba3c61e0a34eac95dddd1a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.4387755102, "max_line_length": 88, "alphanum_fraction": 0.5842320565, "include": true, "reason": "import numpy", "num_tokens": 666}
|
# -*- coding: utf-8 -*-
from os import cpu_count
import pytest
from pyleecan.Classes.InputCurrent import InputCurrent
from pyleecan.Classes.MagFEMM import MagFEMM
from pyleecan.Classes.MeshMat import MeshMat
from pyleecan.Classes.NodeMat import NodeMat
from pyleecan.Classes.CellMat import CellMat
from pyleecan.Classes.MeshSolution import MeshSolution
from pyleecan.Classes.Simu1 import Simu1
from pyleecan.Classes.SolutionMat import SolutionMat
from pyleecan.Functions.load import load
from pyleecan.definitions import DATA_DIR
import numpy as np
from os.path import join
from Tests import save_plot_path as save_path
@pytest.mark.long_5s
@pytest.mark.long_1m
@pytest.mark.MagFEMM
@pytest.mark.SPMSM
def test_SPMSM015_plot_contour_B_FEMM():
"""Validation of the implementaiton of periodic angle axis in Magnetic (MagFEMM) and Force (ForceMT) modules"""
SPMSM_015 = load(join(DATA_DIR, "Machine", "SPMSM_015.json"))
simu = Simu1(name="test_FEMM_periodicity_time_no_periodicity_a", machine=SPMSM_015)
# Definition of the enforced output of the electrical module
I0_rms = 250 / np.sqrt(2)
Phi0 = 140 * np.pi / 180 # Maximum Torque Per Amp
Id_ref = (I0_rms * np.exp(1j * Phi0)).real
Iq_ref = (I0_rms * np.exp(1j * Phi0)).imag
simu.input = InputCurrent(
Id_ref=Id_ref,
Iq_ref=Iq_ref,
Na_tot=252 * 9,
Nt_tot=4 * 9,
N0=1000,
)
# Definition of the magnetic simulation: with periodicity
simu.mag = MagFEMM(
type_BH_stator=1,
type_BH_rotor=1,
is_periodicity_a=False,
is_periodicity_t=True,
nb_worker=cpu_count(),
is_get_meshsolution=True,
Kmesh_fineness=0.5,
)
out = simu.run()
out.mag.meshsolution.plot_contour(is_show_fig=False, save_path=join(save_path, "plot_mesh.png"))
out.mag.meshsolution.plot_contour(
group_names="stator core",
is_show_fig=False,
save_path=join(save_path, "plot_mesh_stator.png"),
)
out.mag.meshsolution.plot_contour(
is_animated=True,
group_names="stator core",
is_show_fig=False,
save_path=join(save_path, "plot_mesh_stator.gif"),
)
pass
def test_Benchmark_plot_contour_B_FEMM():
"""Validation of the implementaiton of periodic angle axis in Magnetic (MagFEMM) and Force (ForceMT) modules"""
Benchmark = load(join(DATA_DIR, "Machine", "Benchmark.json"))
simu = Simu1(name="test_FEMM_compare_Toyota_Prius", machine=Benchmark)
simu.input = InputCurrent(
Id_ref=0,
Iq_ref=0,
Na_tot=2048,
Nt_tot=50,
N0=2504,
)
# Definition of the magnetic simulation: with periodicity
simu.mag = MagFEMM(
type_BH_stator=0,
type_BH_rotor=0,
is_periodicity_a=False,
is_periodicity_t=True,
is_get_meshsolution=True,
nb_worker=cpu_count(),
)
out = simu.run()
out.plot_B_mesh(save_path=join(save_path, "plot_B_mesh.png"))
out.plot_B_mesh(group_names="stator core", is_animated=True, is_show_fig=False, save_path=join(save_path, "plot_B_mesh.gif"),)
out.mag.meshsolution.plot_contour(
group_names=["rotor magnets","rotor core"],
is_show_fig=False,
save_path=join(save_path, "plot_mesh_stator.png"),
)
pass
if __name__ == "__main__":
test_Benchmark_plot_contour_B_FEMM()
|
{"hexsha": "e8c8f7810d260db1b46140a8b923b8b78c975d24", "size": 3391, "ext": "py", "lang": "Python", "max_stars_repo_path": "Tests/Plot/Magnetics/test_plot_contour.py", "max_stars_repo_name": "EmileDvs/pyleecan", "max_stars_repo_head_hexsha": "ad2f5f25c089a981f373557a198da51c62407928", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Tests/Plot/Magnetics/test_plot_contour.py", "max_issues_repo_name": "EmileDvs/pyleecan", "max_issues_repo_head_hexsha": "ad2f5f25c089a981f373557a198da51c62407928", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Tests/Plot/Magnetics/test_plot_contour.py", "max_forks_repo_name": "EmileDvs/pyleecan", "max_forks_repo_head_hexsha": "ad2f5f25c089a981f373557a198da51c62407928", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7456140351, "max_line_length": 130, "alphanum_fraction": 0.6974343851, "include": true, "reason": "import numpy", "num_tokens": 952}
|
# Import pyNeuroChem
from __future__ import print_function
# Neuro Chem
from ase_interface import ANI
import pyNeuroChem as pync
import hdnntools as gt
import numpy as np
import matplotlib.pyplot as plt
import time as tm
from scipy import stats as st
import time
import hdnntools as hdt
from rdkit import Chem
from rdkit.Chem import AllChem
# ASE
import ase
from ase.io import read, write
from ase.optimize import BFGS, LBFGS
from ase.vibrations import Vibrations
from ase.thermochemistry import IdealGasThermo
from ase.units import Bohr
from ase.calculators.calculator import Calculator, all_changes
from ase import Atoms
def formatsmilesfile(file):
ifile = open(file, 'r')
contents = ifile.read()
ifile.close()
p = re.compile('([^\s]*).*\n')
smiles = p.findall(contents)
ofile = open(file, 'w')
for mol in smiles:
ofile.write(mol + '\n')
ofile.close()
#def make_atoms
#--------------Parameters------------------
smfile = '/home/jujuman/Research/RawGDB11Database/gdb11_size06.smi' # Smiles file
wkdir1 = '/home/jujuman/Dropbox/ChemSciencePaper.AER/networks/ANI-c08f-ntwk-cv/'
wkdir2 = '/home/jujuman/Dropbox/ChemSciencePaper.AER/networks/ANI-c08f09bad-ntwk-cv/'
wkdir3 = '/home/jujuman/Dropbox/ChemSciencePaper.AER/networks/ANI-c08f09dd-ntwk-cv/'
wkdir4 = '/home/jujuman/Dropbox/ChemSciencePaper.AER/networks/ANI-c08f09div-ntwk-cv/'
cnstfile = 'rHCNO-4.6A_16-3.1A_a4-8.params'
saefile = 'sae_6-31gd.dat'
At = ['C', 'O', 'N'] # Hydrogens added after check
Nnc = 5
#-------------------------------------------
#nnfdir = wkdir + 'cv_c08e_ntw_' + str(0) + '/networks/'
# Construct pyNeuroChem classes
nc1 = [pync.molecule(wkdir1 + cnstfile, wkdir1 + saefile, wkdir1 + 'cv_c08e_ntw_' + str(l) + '/networks/', 0) for l in range(Nnc)]
nc2 = [pync.molecule(wkdir2 + cnstfile, wkdir2 + saefile, wkdir2 + 'cv_c08e_ntw_' + str(l) + '/networks/', 0) for l in range(Nnc)]
nc3 = [pync.molecule(wkdir3 + cnstfile, wkdir3 + saefile, wkdir3 + 'cv_c08e_ntw_' + str(l) + '/networks/', 0) for l in range(Nnc)]
nc4 = [pync.molecule(wkdir4 + cnstfile, wkdir4 + saefile, wkdir4 + 'cv_c08e_ntw_' + str(l) + '/networks/', 0) for l in range(Nnc)]
molecules = Chem.SmilesMolSupplier(smfile, nameColumn=0)
total_mol = 0
total_bad = 0
#mols = [molecules[i] for i in range(217855,217865)]
f1 = open('/home/jujuman/Research/CrossValidation/GDB-06-High-sdev/gdb-06-cvsdev_c08f.dat','w')
f2 = open('/home/jujuman/Research/CrossValidation/GDB-06-High-sdev/gdb-06-cvsdev_c08f09bad.dat','w')
f3 = open('/home/jujuman/Research/CrossValidation/GDB-06-High-sdev/gdb-06-cvsdev_c08f09dd.dat','w')
f4 = open('/home/jujuman/Research/CrossValidation/GDB-06-High-sdev/gdb-06-cvsdev_c08f09div.dat','w')
for k,m in enumerate(molecules):
if m is None: continue
typecount = 0
#print (Chem.MolToSmiles(m))
typecheck = False
for a in m.GetAtoms():
sym = str(a.GetSymbol())
count = 0
for i in At:
if i is sym:
count = 1
if count is 0:
typecheck = True
if typecheck is False:
total_mol = total_mol + 1
#print('total_mol: ',total_mol)
m = Chem.AddHs(m) # Add Hydrogens
embed = AllChem.EmbedMolecule(m, useRandomCoords=True)
if embed is 0: # Embed in 3D Space was successful
check = AllChem.MMFFOptimizeMolecule(m, maxIters=1000) # Classical Optimization
xyz = np.zeros((m.GetNumAtoms(),3),dtype=np.float32)
spc = []
Na = m.GetNumAtoms()
for i in range (0,Na):
pos = m.GetConformer().GetAtomPosition(i)
sym = m.GetAtomWithIdx(i).GetSymbol()
spc.append(sym)
xyz[i, 0] = pos.x
xyz[i, 1] = pos.y
xyz[i, 2] = pos.z
mol = Atoms(symbols=spc, positions=xyz)
#mol.set_calculator(ANI(False))
#mol.calc.setnc(nc1[0])
xyzi = np.array(mol.get_positions(),dtype=np.float32).reshape(xyz.shape[0],3)
#dyn = LBFGS(mol,logfile='logfile.txt')
#dyn.run(fmax=0.001,steps=10000)
# = True if dyn.get_number_of_steps() == 10000 else False
#stps = dyn.get_number_of_steps()
stps = 0
xyz = np.array(mol.get_positions(),dtype=np.float32).reshape(xyz.shape[0],3)
#if conv:
# print('Failed to converge!!!')
energies = np.zeros((Nnc),dtype=np.float64)
#------------ CV NETWORKS 1 -----------
N = 0
for comp in nc1:
comp.setMolecule(coords=xyz, types=list(spc))
energies[N] = comp.energy()[0]
N = N + 1
if np.std(hdt.hatokcal * energies) > 5.0:
hdt.writexyzfile('/home/jujuman/Research/CrossValidation/GDB-06-High-sdev/CV1bmol-'+str(total_mol)+'.xyz',xyz.reshape(1,xyz.shape[0],xyz.shape[1]),spc)
total_bad = total_bad + 1
perc = int(100.0 * total_bad / float(total_mol))
output = ' ' + str(k) + ' ' + str(total_bad) + '/' + str(total_mol) + ' ' + str(perc) + '% (' + str(
Na) + ') : stps=' + str(stps) + ' : ' + str(energies) + ' : std(kcal/mol)=' + str(
np.std(hdt.hatokcal * energies)) + ' : ' + Chem.MolToSmiles(m)
if np.std(hdt.hatokcal*energies) > 5.0:
print("CV1:", output)
f1.write(output + '\n')
#------------ CV NETWORKS 2 -----------
N = 0
for comp in nc2:
comp.setMolecule(coords=xyz, types=list(spc))
energies[N] = comp.energy()[0]
N = N + 1
if np.std(hdt.hatokcal * energies) > 5.0:
hdt.writexyzfile('/home/jujuman/Research/CrossValidation/GDB-06-High-sdev/CV2bmol-'+str(total_mol)+'.xyz',xyz.reshape(1,xyz.shape[0],xyz.shape[1]),spc)
total_bad = total_bad + 1
perc = int(100.0 * total_bad / float(total_mol))
output = ' ' + str(k) + ' ' + str(total_bad) + '/' + str(total_mol) + ' ' + str(perc) + '% (' + str(
Na) + ') : stps=' + str(stps) + ' : ' + str(energies) + ' : std(kcal/mol)=' + str(
np.std(hdt.hatokcal * energies)) + ' : ' + Chem.MolToSmiles(m)
if np.std(hdt.hatokcal*energies) > 5.0:
print("CV2:", output)
f2.write(output + '\n')
#------------ CV NETWORKS 3 -----------
N = 0
for comp in nc3:
comp.setMolecule(coords=xyz, types=list(spc))
energies[N] = comp.energy()[0]
N = N + 1
if np.std(hdt.hatokcal * energies) > 5.0:
hdt.writexyzfile('/home/jujuman/Research/CrossValidation/GDB-06-High-sdev/CV3bmol-'+str(total_mol)+'.xyz',xyz.reshape(1,xyz.shape[0],xyz.shape[1]),spc)
total_bad = total_bad + 1
perc = int(100.0 * total_bad / float(total_mol))
output = ' ' + str(k) + ' ' + str(total_bad) + '/' + str(total_mol) + ' ' + str(perc) + '% (' + str(
Na) + ') : stps=' + str(stps) + ' : ' + str(energies) + ' : std(kcal/mol)=' + str(
np.std(hdt.hatokcal * energies)) + ' : ' + Chem.MolToSmiles(m)
if np.std(hdt.hatokcal*energies) > 5.0:
print("CV3:", output)
f3.write(output + '\n')
#------------ CV NETWORKS 4 -----------
N = 0
for comp in nc4:
comp.setMolecule(coords=xyz, types=list(spc))
energies[N] = comp.energy()[0]
N = N + 1
if np.std(hdt.hatokcal * energies) > 5.0:
hdt.writexyzfile('/home/jujuman/Research/CrossValidation/GDB-06-High-sdev/CV4bmol-'+str(total_mol)+'.xyz',xyz.reshape(1,xyz.shape[0],xyz.shape[1]),spc)
total_bad = total_bad + 1
perc = int(100.0 * total_bad / float(total_mol))
output = ' ' + str(k) + ' ' + str(total_bad) + '/' + str(total_mol) + ' ' + str(perc) + '% (' + str(
Na) + ') : stps=' + str(stps) + ' : ' + str(energies) + ' : std(kcal/mol)=' + str(
np.std(hdt.hatokcal * energies)) + ' : ' + Chem.MolToSmiles(m)
if np.std(hdt.hatokcal*energies) > 5.0:
print("CV4:", output)
f4.write(output + '\n')
print('Total Molecs: ', total_mol)
print('Total Bad 1.0: ', total_bad)
print('Percent Bad: ', int(100.0 * total_bad/float(total_mol)), '%')
f1.close()
f2.close()
f3.close()
f4.close()
#print('End...')
|
{"hexsha": "1fb36ecff62b458d83b7be1ec9cc60d3f362456c", "size": 8636, "ext": "py", "lang": "Python", "max_stars_repo_path": "activelearning/chemsearch/ani-cross-valid-gdb-set.py", "max_stars_repo_name": "plin1112/ANI-Tools", "max_stars_repo_head_hexsha": "76280c918fc79fee8c266b8bc9ab57f86104ec99", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2018-10-30T16:48:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-08T01:44:41.000Z", "max_issues_repo_path": "activelearning/chemsearch/ani-cross-valid-gdb-set.py", "max_issues_repo_name": "plin1112/ANI-Tools", "max_issues_repo_head_hexsha": "76280c918fc79fee8c266b8bc9ab57f86104ec99", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "activelearning/chemsearch/ani-cross-valid-gdb-set.py", "max_forks_repo_name": "plin1112/ANI-Tools", "max_forks_repo_head_hexsha": "76280c918fc79fee8c266b8bc9ab57f86104ec99", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2018-04-05T15:51:12.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-23T21:38:31.000Z", "avg_line_length": 37.547826087, "max_line_length": 167, "alphanum_fraction": 0.5594025012, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2692}
|
(**********************************************************************************
* KSSem.v *
* Formalizing Domains, Ultrametric Spaces and Semantics of Programming Languages *
* Nick Benton, Lars Birkedal, Andrew Kennedy and Carsten Varming *
* Jan 2012 *
* Build with Coq 8.3pl2 plus SSREFLECT *
**********************************************************************************)
(* semantics of types for the kitchen sink language *)
Require Export MetricRec mpremet.
Require Export Fin KSTy KSTm KSTyping KSOp.
Set Implicit Arguments.
Unset Strict Implicit.
Import Prenex Implicits.
Open Scope C_scope.
Open Scope O_scope.
Open Scope M_scope.
Definition CValue := Tm.Value O.
Definition CExp := Tm.Exp O.
(*=MetBF *)
Definition BF : BiFunctor pcmECatType := findomBF
((BiComp (halveBF idBF) (constBF (upred_pcmType CValue)) BiArrow))
[compType of nat]. (*CLEAR*)
Lemma BF_ob A B : ob BF A B = findom_pcmType [compType of nat] (halve_pcmType A -=> upred_pcmType CValue).
by [].
Qed.
Lemma morph_contractive A B C D : contractive (MetricRec.morph BF A B C D).
unfold BF. apply findom_BF_contractive.
apply comp_BF_contractive ; last by apply constBF_contractive. by apply halve_morph_contractive.
Qed.
Module Type RecMet.
Variable W : pcmType.
Variable Unfold : W =-> findom_pcmType [compType of nat] (halve_pcmType W -=> upred_pcmType CValue).
Variable Fold : findom_pcmType [compType of nat] (halve_pcmType W -=> upred_pcmType CValue) =-> W.
Variable FU_id : Fold << Unfold =-= Id.
Variable UF_id : Unfold << Fold =-= Id.
End RecMet.
Module Solution : RecMet.
(*CLEARED*)
Definition W : pcmType := @DInf BF morph_contractive.
Definition Unfold := @Unfold BF morph_contractive :
W =-> findom_pcmType [compType of nat] (halve_pcmType W -=> upred_pcmType CValue).
Definition Fold := @Fold BF morph_contractive :
findom_pcmType [compType of nat] (halve_pcmType W -=> upred_pcmType CValue) =-> W.
(*=End *)
Lemma FU_id : Fold << Unfold =-= Id.
apply (@FU_iso BF morph_contractive).
Qed.
Lemma UF_id : Unfold << Fold =-= Id.
apply (@UF_iso BF morph_contractive).
Qed.
End Solution.
Export Solution.
Lemma Fold_monic x y : Fold x <= Fold y -> x <= y.
move => X. have M:=fmonotonic Unfold X. have e:= (UF_id x). have e':=UF_id y.
apply (pcm_respect e e' M).
Qed.
Lemma Unfold_monic x y : Unfold x <= Unfold y -> x <= y.
move => X. have M:=fmonotonic Fold X. have e:= (FU_id x). have e':=FU_id y.
apply (pcm_respect e e' M).
Qed.
Lemma Unfold_antinonexp n (w w':W) : Unfold w = n = Unfold w' -> w = n = w'.
move => X. have Y:=fnonexpansive Fold X. clear X.
apply (Mrel_trans (Msym (proj2 (Mrefl _ _) (FU_id w) n))).
refine (Mrel_trans _ (proj2 (Mrefl _ _) (FU_id w') n)). by apply Y.
Qed.
Lemma Fold_antinonexp n w w' : Fold w = n = Fold w' -> w = n = w'.
move => X. have Y:=fnonexpansive Unfold X. clear X.
apply (Mrel_trans (Msym (proj2 (Mrefl _ _) (UF_id w) n))).
refine (Mrel_trans _ (proj2 (Mrefl _ _) (UF_id w') n)). by apply Y.
Qed.
Lemma less_nil j (P:j < 0) : False.
move: P ; by rewrite ltn0.
Qed.
(*=TV *)
Definition TV := halve_pcmType W -=> upred_pcmType CValue.
Fixpoint TVal (n:nat) : cmetricType :=
match n with | O => One | S n => TVal n * TV end.
Fixpoint pick n (j : Fin n) : cmetricCatType (TVal n) TV :=
match j with
| FinZ _ => pi2
| FinS _ v => pick v << pi1
end.
(*=End *)
Lemma upred_unit_down : downclosed (fun kt => match snd kt : Tm.Value O with | Tm.UNIT => True | _ => False end).
move => k m v l. simpl. by case: v.
Qed.
Definition upred_unit : upred_pcmType CValue := Eval hnf in mk_upred upred_unit_down.
Lemma upred_prod_down (X : upred_metricType CValue * upred_metricType CValue) :
downclosed (fun kt => let v := snd kt in
match v with
| Tm.PAIR v0 v1 => (fst X) (fst kt, v0) /\ (snd X) (fst kt, v1)
| _ => False
end).
move => k k'. case => v L; try by []. simpl.
move => P [A B].
split ; first by apply (upred_downclosed (ltnW P) A). by apply (upred_downclosed (ltnW P) B).
Qed.
Definition upred_prod (p : (upred_cmetricType CValue) * (upred_cmetricType CValue) )
: upred_cmetricType CValue := Eval hnf in mk_upred (@upred_prod_down p).
Lemma upred_product_ne : nonexpansive upred_prod.
move => n.
case => x0 x1. case => y0 y1. case => e0 e1.
move => k v L. case v ; try by [].
move => v0 v1.
specialize (e0 k).
specialize (e1 k). simpl in e0,e1. simpl. rewrite e0. by rewrite e1. by [].
Qed.
Definition upred_productN : metricCatType (upred_pcmType CValue * upred_pcmType CValue) (upred_pcmType CValue) := Eval hnf in mk_fmet upred_product_ne.
Lemma upred_product_mono : monotonic upred_productN.
case => x0 x1. case => y0 y1. case => e0 e1. case => k. case; try by [].
move => v0 v1. specialize (e0 (k,v0)).
specialize (e1 (k,v1)). move => [A B]. split ; by [apply e0 | apply e1].
Qed.
Definition upred_product : upred_pcmType CValue * upred_pcmType CValue =-> upred_pcmType CValue :=
Eval hnf in mk_fpcm upred_product_mono.
Add Parametric Morphism : @upred_product
with signature (@tset_eq ((upred_pcmType CValue * upred_pcmType CValue))) ==> (@tset_eq (upred_pcmType CValue))
as upred_product_eq_compat.
case => x y. case => x' y'. case => e0 e1. case => k v. simpl. case: v; try done.
move => v0 v1. simpl in e1,e0. specialize (e0 (k,v0)).
specialize (e1 (k,v1)). rewrite e0. by rewrite e1.
Qed.
Lemma upred_sumP (X:upred_cmetricType CValue * upred_cmetricType CValue) :
downclosed (fun kt => let v := snd kt:CValue in
match v with
| Tm.INL p0 => (fst X) (fst kt, p0)
| Tm.INR p0 => (snd X) (fst kt, p0)
| _ => False end).
move => k k'. case => v L; simpl; try by []; move => A; apply (upred_downclosed (ltnW L) A).
Qed.
Definition upred_sumt (p:upred_cmetricType CValue * upred_cmetricType CValue) : upred_cmetricType CValue :=
Eval hnf in mk_upred (@upred_sumP p).
Lemma upred_sum_ne : nonexpansive upred_sumt.
move => n.
case => x0 x1. case => y0 y1. case => e0 e1.
move => k. case => v; simpl; try by [].
- move => L; by rewrite (e0 k v L).
- move => L. by rewrite (e1 k v L).
Qed.
Definition upred_sumN : metricCatType (upred_pcmType CValue * upred_pcmType CValue) (upred_pcmType CValue) := Eval hnf in mk_fmet upred_sum_ne.
Lemma upred_sum_mono : monotonic upred_sumN.
case => x0 x1. case => y0 y1. case => e0 e1. move => [k v]. destruct v; simpl; try by [].
- move => H. by apply (e0 (k,v)).
- move => H. by apply (e1 (k,v)).
Qed.
Definition upred_sum : (upred_pcmType CValue * upred_pcmType CValue =-> upred_pcmType CValue) :=
Eval hnf in mk_fpcm upred_sum_mono.
Add Parametric Morphism : @upred_sum
with signature (@tset_eq ((upred_pcmType CValue * upred_pcmType CValue))) ==> (@tset_eq (upred_pcmType CValue))
as upred_sum_eq_compat.
case => x y. case => x' y'. case => e0 e1. move => [k v]. destruct v; simpl; try by [].
Qed.
(*=upred_mu_down *)
Lemma upred_mu_down n
(R: TVal n.+1 =-> halve_pcmType W -=> upred_pcmType CValue) (s:TVal n)
(P:(halve_pcmType W =-> upred_pcmType CValue) * halve_pcmType W) :
downclosed (fun kt => let: v' := snd kt in
match fst kt, v' with
| O, Tm.FOLD v => True
| S k, Tm.FOLD v => (R ((s,fst P)) (snd P)) (k,v)
| _, _ => False
end).
(*=End *)
case ; first by []. move => m k v. destruct v; simpl; try done.
move => L. destruct k; first by [].
refine (upred_downclosed _). by apply (ssrnat.ltn_trans (ltnSn k) L).
Qed.
(*=upred_mut *)
Definition upred_mut n R s w : upred_pcmType CValue :=
Eval hnf in mk_upred (@upred_mu_down n R s w).
(*=End *)
Definition ne_mono (M:cmetricType) (P Q:pcmType) (f:M * P -> Q) :=
(nonexpansive f) /\ forall m, monotonic (fun x => f (m,x)).
Lemma ne_mono_ne2 (M:cmetricType) (P Q:pcmType) (f:M * P -> Q) (X:ne_mono f) (m:M) : nonexpansive (fun x : P => f (m, x)).
move => n x x' e. simpl. by apply (proj1 X).
Qed.
Definition ne_monoN M P Q f (X:@ne_mono M P Q f) (m:M) : P =-> Q := Eval hnf in mk_fpcmX (ne_mono_ne2 X m) ((proj2 X) m).
Lemma mk_nemon_ne M P Q f (X:@ne_mono M P Q f) : nonexpansive (ne_monoN X).
move => n m m' e x. simpl. by apply ((proj1 X)).
Qed.
Definition mk_nemon M P Q f (X:@ne_mono M P Q f) : M =-> (P -=> Q) := Eval hnf in mk_fmet (mk_nemon_ne X).
(*upred_muS *)
Lemma upred_mu_ne n R s : ne_mono (@upred_mut n R s).
(*=End *)
Proof.
split.
- move => m. case => f w. case => f' w'. case. case:m ; first by [].
move => m e e' k. simpl. case => v; try done.
simpl. have d:(R (s, f)) = m.+1 = (R (s, f')) by apply fnonexpansive.
specialize (d w). have dd:=fnonexpansive ( (R (s, f'))) e'.
case: k ; first by []. move => k L. apply ((Mrel_trans d dd) k v). by apply (ssrnat.ltn_trans (ltnSn k) L).
- move => f. simpl in f. move => w w' L. case => k. case; simpl; try done.
move => v. case: k; first by []. move => k. simpl in w,w'.
have e:(R (s, f)) w <= (R (s, f)) w' by apply: fmonotonic.
by apply (e _).
Qed.
Definition upred_mup n (R:TVal n.+1 =-> halve_pcmType W -=> upred_pcmType CValue) (s:TVal n) :
cmetricCatType (halve_pcmType W -=> upred_pcmType CValue) (halve_pcmType W -=> (upred_pcmType CValue)) :=
Eval hnf in mk_nemon (upred_mu_ne R s).
Lemma upred_muC n R (s:TVal n) : contractive (upred_mup R s).
move => m. move => f g e.
move => w. simpl. move => k. case ; try done. case: k ; first by []. move => k v L.
have d:(R (s, f)) = m = (R (s, g)) by apply fnonexpansive.
apply: d. by apply L.
Qed.
Definition upred_muc n (R: TVal n.+1 =-> halve_pcmType W -=> upred_pcmType CValue) (s:TVal n) :
morphc_pcmType (halve_pcmType W -=> upred_pcmType CValue) (halve_pcmType W -=> upred_pcmType CValue) :=
exist _ (upred_mup R s) (upred_muC R s).
Lemma upred_mun n R : nonexpansive (@upred_muc n R).
move => m s s' e f w. case: m e ; first by []. move => m e k. simpl.
case ; try done. move => v. case: k ; first by []. move => k L.
simpl. have d:(R (s, f)) = m.+1 = (R (s', f)) by apply fnonexpansive.
apply (d w k _). by apply (ssrnat.ltn_trans (ltnSn k) L).
Qed.
(*=upred_mu *)
Definition upred_mu n (R: TVal n.+1 =-> halve_pcmType W -=> upred_pcmType CValue) :
TVal n =-> morphc_pcmType TV TV :=
Eval hnf in mk_fmet (@upred_mun n R).
(*=End *)
Definition bool_option (b:bool) : b + not b.
case: b. by left.
by right.
Defined.
(*=heap_world *)
Definition heap_world k (h:Heap) (w:W) :=
forall j, j < k -> dom h =i dom (Unfold w) /\
forall l, match Unfold w l, h l with Some w',Some v' => w' w (j,v')
| _,_ => True end.
(*=End *)
(*=IExp *)
Definition IExp (f:TV) (k:nat) (e:Tm.Exp 0) (w:W) :=
forall j, (j <= k) ->
forall (h h':Heap) v (D:EV j e h v h'), heap_world k h w ->
exists w':W, w <= w' /\ heap_world (k - j) h' w' /\ (f w') (k-j,v).
(*=End *)
Lemma IExp_respect (f f':TV) (k:nat) (e:Tm.Exp 0) (w:W) : f =-= f' -> IExp f k e w =-= IExp f' k e w.
move => E. unfold IExp. split.
- move => X j L h h' v ev hv. specialize (X j L h h' v ev hv). case: X => w' [LL [HV Y]].
exists w'. split ; first by []. split ; first by []. specialize (E w'). simpl in E.
specialize (E (k - j, v)). rewrite <- E. by apply Y.
- move => X j L h h' v ev hv. specialize (X j L h h' v ev hv). case: X => w' [LL [HV Y]].
exists w'. split ; first by []. split ; first by []. specialize (E w' (k-j,v)). simpl in E.
rewrite E. by apply Y.
Qed.
Lemma upred_all_down n (R:TVal n.+1 =-> halve_pcmType W -=> upred_pcmType CValue)
(s:TVal n * halve_pcmType W) : downclosed (fun kt => let: (k,v) := kt in
match v with
| Tm.TLAM v => forall (r:TV) (w':W) j, (j <= k) -> snd s <= w' ->
(IExp (R (fst s,r)) j v w')
| _ => False
end).
move => m k v L. simpl. case: v; try done. case: s => e w.
move => E X r w' j L'. simpl. move => LL. simpl in X. apply X. by apply (leqW (leq_ltn_trans L' L)). by apply LL.
Qed.
Definition upred_allt n R s : upred_pcmType CValue := mk_upred (@upred_all_down n R s).
Lemma world_extend n (w0 w1 v0 : W) : w0 = n = v0 -> w0 <= w1 -> exists v1:W, w1 = n = v1 /\ v0 <= v1.
case: n w0 w1 v0. move => w0 w1 v0 _ L. by exists v0.
move => n w0 w1 v0 e l.
exists (Fold (create_findomF (Unfold w1) (fun x => match Unfold v0 x with | None => Unfold w1 x | Some v => Some v end))).
have e':=fnonexpansive ( Unfold) e. clear e. split.
- have A:=proj1 e'. have B:=proj2 e'. clear e'.
apply Unfold_antinonexp.
refine (Mrel_trans _ (proj2 (Mrefl _ _) (tset_sym (UF_id _)) _)).
split.
+ move => i. rewrite dom_create_findomF. rewrite in_filter.
specialize (A i). case: (Unfold v0 i). simpl. by rewrite andbT.
rewrite findom_indom. by case: (Unfold w1 i).
+ move => i I I'. specialize (B i). rewrite create_findomF_simpl. do 2 rewrite findom_indom in B.
move: B. case_eq (Unfold v0 i).
* move => s U. rewrite U. simpl. move => X.
specialize (A i). do 2 rewrite findom_indom in A. rewrite U in A. rewrite A in X.
specialize (X is_true_true is_true_true). clear A. rewrite I. rewrite <- X.
have ll:=fmonotonic Unfold l. clear l. specialize (ll i).
case: (Unfold w0 i) X ll ; last by []. move => w0i e X. specialize (X _ (refl_equal _)). case: X => m [e0 e1].
rewrite e0. apply Msym. by apply: (proj2 (Mrefl _ _) e1 _).
* move => e _. specialize (A i). do 2 rewrite findom_indom in A. rewrite e in A. simpl in A.
rewrite findom_indom. by case: (Unfold w1 i).
- apply Unfold_monic.
refine (@pcm_respect _ _ _ _ _ (tset_refl _) (tset_sym (UF_id _)) _).
move => i f E. have ll:=fmonotonic Unfold l. specialize (ll i). clear l.
have A:=proj2 e' i. do 2 rewrite findom_indom in A. rewrite E in A.
rewrite create_findomF_simpl. rewrite E. exists f. split ; last by [].
have e0:=proj1 e' i. do 2 rewrite -> findom_indom in e0. rewrite E in e0.
rewrite e0 in A. specialize (A is_true_true is_true_true). case: (Unfold w0 i) A ll ; last by [].
move => w0i A ll. specialize (ll w0i (refl_equal _)). case: ll => w1i [P Q].
rewrite findom_indom. by rewrite P.
Qed.
Lemma heap_world0 w h : heap_world 0 h w.
by [].
Qed.
Hint Resolve heap_world0.
Lemma heap_world_eq m h w w' j : (j <= m) -> w = m = w' -> heap_world j h w -> heap_world j h w'.
rewrite natO_le. case:m h w w' j ; first by move => h w w' j L E _ ; rewrite leqn0 in L ; rewrite (eqP L).
move => m h w w'. case ; first by [].
move => j L E. move => X k l. specialize (X k l). case: X => D X.
have E':=fnonexpansive Unfold E. case: E' => D' E'.
split ; first by move => i ; rewrite -> D ; apply D'.
move => i. specialize (X i). case_eq (Unfold w' i) ; last by [].
move => wi' e'. specialize (D' i). do 2 rewrite findom_indom in D'. rewrite e' in D'.
specialize (E' i). do 2 rewrite findom_indom in E'. rewrite e' in E'. rewrite D' in E'.
specialize (E' is_true_true is_true_true). clear D'. case: (Unfold w i) X E' ; last by [].
move => wi. case: (h i) ; last by []. move => v A X. specialize (X w'). simpl in X.
have Y:=fnonexpansive wi. specialize (Y m.+2 w w' E). rewrite <- (Mmono Y) in X.
specialize (X k v). specialize (X (leq_trans l L)). by rewrite <- X.
Qed.
Lemma upred_allN n R : ne_mono (@upred_allt n R).
split.
- move => m.
case => x0 x1. case => y0 y1. case:m ; first by [].
move => m. case => e e'. move => k. case ; try done.
move => ee Lk. split.
+ move => A r w j Lj. specialize (A r). move => Lw i Li h h' v ev hv.
simpl in e'. destruct (world_extend (Msym e') Lw) as [w' [X L']].
specialize (A w' j Lj L' i Li). specialize (A h h' v ev).
have hv':= heap_world_eq (leq_trans Lj Lk) X hv.
specialize (A hv'). destruct A as [w0 [lw0 [hv0 A]]].
destruct (world_extend (Msym X) lw0) as [w1 [E1 L1]]. exists w1. split ; first by [].
split ; first by apply: (heap_world_eq (leq_trans (leq_subr _ _) (leq_trans Lj Lk)) E1 hv0).
have EE:(R ((x0, r))) = m.+1 = R ((y0, r)) by apply fnonexpansive.
specialize (EE w0). have e0:R ((y0, r)) w0 = m.+1 = R ((y0, r)) w1. apply: fnonexpansive. by apply E1.
have e1:=Mrel_trans EE e0. clear EE e0.
specialize (e1 (j-i) v).
specialize (e1 (leq_trans (leq_subr _ _) (leq_trans Lj Lk))). by rewrite <- e1.
+ move => A r w j Lj. specialize (A r). move => Lw i Li h h' v ev hv.
simpl in e'. destruct (world_extend e' Lw) as [w' [X L']].
specialize (A w' j Lj L' i Li). specialize (A h h' v ev).
have hv':= heap_world_eq (leq_trans Lj Lk) X hv.
specialize (A hv'). destruct A as [w0 [lw0 [hv0 A]]].
destruct (world_extend (Msym X) lw0) as [w1 [E1 L1]]. exists w1. split ; first by [].
split ; first by apply: (heap_world_eq (leq_trans (leq_subr _ _) (leq_trans Lj Lk)) E1 hv0).
have EE:(R ((x0, r))) = m.+1 = R ((y0, r)) by apply fnonexpansive. specialize (EE w1).
have e0:R (y0, r) w1 = m.+1 = R (y0, r) w0 by apply: fnonexpansive ; apply (Msym E1).
have e1:=Mrel_trans EE e0. clear EE e0.
specialize (e1 (j-i) v (leq_trans (leq_subr _ _) (leq_trans Lj Lk))). by rewrite e1.
- move => s w w' L. case => k v. simpl. case: v ; try done.
move => v X f we' j Lj Lw'. specialize (X f we' j Lj (Ole_trans L Lw')).
move => i Li h h' v' ev hv. specialize (X i Li h h' v' ev hv).
destruct X as [we [Lw [hv' X]]]. exists we. split ; first by []. split ; first by []. by apply X.
Qed.
Definition upred_all n (R : TVal n.+1 =-> halve_pcmType W -=> upred_pcmType CValue) :
(TVal n) =-> (halve_pcmType W -=> upred_pcmType CValue) :=
Eval hnf in mk_nemon (upred_allN R).
(*=upred_ref_down *)
Lemma upred_ref_down n
(R : TVal n =-> halve_pcmType W -=> upred_pcmType CValue)
(s:TVal n) (w: halve_pcmType W) :
(*=End *)
downclosed (fun kt => let: (k,v) := kt in
match k,(v:Tm.Value O) with
| O,Tm.LOC l => True
| S k,Tm.LOC l => Unfold w l = k.+1 = Some (R s)
| _,_ => False
end).
move => m k v L. case: m L ; first by []. move => m L. case v ; try done.
move => l P. case: k L ; first by []. move => k L. by apply (Mrel_mono (ltnW L)).
Qed.
(*=upred_reft *)
Definition upred_reft n R w : upred_pcmType CValue :=
mk_upred (@upred_ref_down n R (fst w) (snd w)).
(*=End *)
Lemma upred_refN n R : ne_mono (@upred_reft n R).
split.
- move => m.
case => s w. case => s' w'. case: m ; first by []. move => m. case => e e'. move => k v.
case: k ; first by []. move => k.
case: v; try done. move => l. have X:=fnonexpansive Unfold e'. clear e'.
simpl in e. simpl in X. case: m e X ; first by []. move => m e X L. simpl.
have e0:=proj2 X l. have e1:=fnonexpansive R e. clear e. rewrite <- (proj1 X) in e0.
rewrite findom_indom in e0. move: (proj1 X l). do 2 rewrite findom_indom.
case: (Unfold w l) e0 ; last by simpl ; case: (Unfold w' l).
move => wl. simpl. move => Y. specialize (Y is_true_true is_true_true).
unfold Mrel. simpl. clear X. case: (Unfold w' l) Y ; last by [].
move => wl' Y _. unfold Mrel in Y. simpl in Y. rewrite -> (@Mrel_mono _ _ _ k.+1 m.+1 L Y).
by rewrite -> (@Mrel_mono _ _ _ k.+1 m.+1 L (Mmono e1)).
- move => s. move => w w' Lw. simpl. case => k v. simpl. case: k ; first by []. case: v ; try done.
move => l k. have L:=fmonotonic Unfold Lw. clear Lw. specialize (L l).
case: (Unfold w l) L ; last by []. move => wl L. specialize (L wl (refl_equal _)).
case: L => wl' [e e']. rewrite e. unfold Mrel ; simpl. move => A. rewrite <- A. apply Msym.
by apply (proj2 (Mrefl _ _) e' k.+1).
Qed.
(*=upred_ref *)
Definition upred_ref n
(R : TVal n =-> halve_pcmType W -=> upred_pcmType CValue) :
(TVal n) =-> (halve_pcmType W -=> upred_pcmType CValue) :=
Eval hnf in mk_nemon (upred_refN R).
(*=End *)
(*=upred_int_down *)
Lemma upred_int_down :
downclosed (fun kt => match snd kt : Tm.Value O with
| Tm.INT i => True | _ => False end). (*CLEAR*)
move => k m v l. simpl. by case: v.
Qed.
(*CLEARED*)
Definition upred_int : upred_pcmType CValue := Eval hnf in mk_upred upred_int_down.
(*=End *)
(*=upred_arrow_down *)
Lemma upred_arrow_down n
(R0 R1: TVal n =-> halve_pcmType W -=> upred_pcmType CValue)
(s:TVal n) (w: halve_pcmType W) : downclosed (fun kt => let: (k,v) := kt in
match v with
| Tm.LAM e => forall w' j (va:Tm.Value O), w <= w' -> (j <= k) ->
(R0 s w') (j,va) -> IExp (R1 s) j (Tm.subExp (cons va (@Tm.idSub _)) e) w'
| _ => False end).
(*=End *)
move => m k. simpl. case ; try done. move => e L X.
move => w' j va Lw Lj. by apply (X w' j va Lw (ssrnat.leq_trans Lj (leqW L))).
Qed.
(*=upred_arrowt *)
Definition upred_arrowt n R0 R1 s w : upred_pcmType CValue :=
Eval hnf in mk_upred (@upred_arrow_down n R0 R1 s w).
(*=End *)
Lemma IExp_nonexp m (f f':TV) (k:nat) (e:Tm.Exp 0) (w w':halve_cmetricType W) : f = m = f' -> w = m = w' -> k < m -> IExp f k e w =-= IExp f' k e w'.
move => E Ew l. unfold IExp. case: m E Ew l ; first by []. move => m E Ew l. split.
- move => X j L h h' v ev hv. specialize (X j L h h' v ev).
have hv':=heap_world_eq _ (Msym Ew) hv. specialize (X (hv' l)). destruct X as [w0 [LL [HV Y]]].
case: (world_extend Ew LL) => w0' [Ew' LL']. exists w0'. split ; first by [].
split ; first by apply: (heap_world_eq (ssrnat.leq_trans (leq_subr _ _) _) Ew').
specialize (E w0). simpl in E.
have E':f w0 = m.+1 = f' w0' by apply Mrel_trans with (y:=f' w0) ; [by apply E | apply: fnonexpansive].
specialize (E' (k - j) (v)).
specialize (E' (leq_ltn_trans (leq_subr j k) l)). rewrite <- E'. by apply Y.
- move => X j L h h' v ev hv. specialize (X j L h h' v ev).
have hv':=heap_world_eq _ Ew hv. specialize (X (hv' l)). destruct X as [w0 [LL [HV Y]]].
case: (world_extend (Msym Ew) LL) => w0' [Ew' LL']. exists w0'. split ; first by [].
split ; first by apply: (heap_world_eq (ssrnat.leq_trans (leq_subr _ _) _) Ew').
have E':f w0' = m.+1 = f' w0 by apply Mrel_trans with (y:=f' w0') ; [by apply E | apply: fnonexpansive ; apply (Msym Ew')].
specialize (E' (k - j) v).
specialize (E' (leq_ltn_trans (leq_subr j k) l)). rewrite -> E'. by apply Y.
Qed.
Lemma upred_arrowN n R : ne_mono (fun a => @upred_arrowt n (fst R) (snd R) (fst a) (snd a)).
split.
- case: R => R0 R1 m x y E. case: m E ; first by []. move => m E. move => k v. simpl. case: v ; try done.
move => e.
move => Lk. split ; move => B w' j va lw Lj T I.
+ have E':=proj2 E. simpl in E'. case: (world_extend (Msym E') lw) => x' [E0 L0]. specialize (B x' j va L0 Lj).
apply (proj1 (IExp_nonexp (Tm.subExp (cons va _) e) (Mrel_refl m.+1 (R1 (fst y))) (Msym E0) (ssrnat.leq_trans Lj Lk))).
have e2:(R1 (fst x)) = m.+1 = (R1 (fst y)) by apply: (fnonexpansive R1) ; apply (proj1 E).
apply (IExp_nonexp _ e2 (Mrel_refl _ x')). by apply (leq_ltn_trans Lj Lk). apply B.
have E1:R0 (fst x) = m.+1 = R0 (fst y) by apply fnonexpansive ; apply (proj1 E).
specialize (E1 x'). have E1':=fnonexpansive ((R0 (fst y))). specialize (E1' m.+1 x' w' (Msym E0)).
have E2:=Mrel_trans E1 E1'. clear E1 E1'. specialize (E2 j va (leq_ltn_trans Lj Lk)). by rewrite E2.
+ have E':=proj2 E. simpl in E'. case: (world_extend E' lw) => x' [E0 L0]. specialize (B x' j va L0 Lj).
apply (proj2 (IExp_nonexp (Tm.subExp (cons va _) e) (Mrel_refl m.+1 (R1 (fst x))) (E0) (ssrnat.leq_trans Lj Lk))).
have e2:(R1 (fst x)) = m.+1 = (R1 (fst y)) by apply: (fnonexpansive R1) ; apply (proj1 E).
apply (IExp_nonexp _ e2 (Mrel_refl _ x')). by apply (leq_ltn_trans Lj Lk). apply B.
have E1:R0 (fst y) = m.+1 = R0 (fst x) by apply fnonexpansive ; apply (Msym E). specialize (E1 x').
have E1':R0 (fst x) x' = m.+1 = R0 (fst x) w' by apply (fnonexpansive (R0 (fst x))) ; apply (Msym E0).
have E2:=Mrel_trans E1 E1'. clear E1 E1'. specialize (E2 j va (leq_ltn_trans Lj Lk)). by rewrite E2.
- case: R => R0 R1. move => s w w' Lw. case => k v. simpl. case: v; try done. move => e0 X w1 j va Lw1 Lj.
by apply (X w1 j va (Ole_trans Lw Lw1) Lj).
Qed.
(*=upred_arrow *)
Definition upred_arrow n
(R0 R1:TVal n =-> halve_pcmType W -=> upred_pcmType CValue) :
cmetricCatType (TVal n) (halve_pcmType W -=> upred_pcmType CValue) :=
Eval hnf in mk_nemon (upred_arrowN (R0,R1)).
(*=End *)
Add Parametric Morphism n : (@upred_arrow n)
with signature (@tset_eq ((metricCatType (TVal n) TV))) ==>
(@tset_eq ((metricCatType (TVal n) TV))) ==>
(@tset_eq ((metricCatType (TVal n) TV)))
as upred_arrow_eq_compat.
move => x x' e y y' e'. move => s w. case => k. simpl. case ; try done.
move => ee. split ; move => X w' j va Lw Lj ; specialize (X w' j va Lw Lj).
- move => Y. apply: (proj1 (IExp_respect _ _ _ _) (X _)) ; first by move => w0 ; apply: e'.
specialize (e s w' (j,va)). by rewrite e.
- move => Y. apply: (proj2 (IExp_respect _ _ _ _) (X _)) ; first by move => w0 ; apply: e'.
specialize (e s w' (j,va)). by rewrite <- e.
Qed.
Fixpoint Prod (T:Type) n : Type :=
match n with
| O => unit
| S n => (Prod T n * T)%type
end.
Lemma Prod_constP n T (p:(upred_pcmType (Prod T n) * upred_pcmType T)) :
downclosed (fun kt => fst p (fst kt,fst (snd kt)) /\ snd p (fst kt,snd (snd kt))).
case:p => A B.
move => m k. simpl. case => te t l. simpl. move => [C D].
by split ; [apply (upred_downclosed (ltnW l) C) | apply (upred_downclosed (ltnW l) D)].
Qed.
Definition Prod_const n T (p:(upred_pcmType (Prod T n) * upred_pcmType T)) : upred_pcmType (Prod T n.+1) :=
Eval hnf in mk_upred (@Prod_constP n T p).
Lemma Prod_consN n T : nonexpansive (@Prod_const n T).
move => m. case => x y. case => x' y'. case: m ; first by [].
move => m. case => e0 e1. move => k. simpl. case => te t l. simpl.
specialize (e0 k te l). specialize (e1 k t l). rewrite e0. by rewrite e1.
Qed.
Lemma Prod_consM n T : monotonic (mk_fmet (@Prod_consN n T)).
case => x t. case => x' t'. case => e0 e1. case => k v. simpl. simpl in v. case: v => vt v.
specialize (e0 (k,vt)). specialize (e1 (k,v)). by move => [A B] ; split ; [apply e0 | apply e1].
Qed.
Definition Prod_cons n T : upred_pcmType (Prod T n) * upred_pcmType T =-> upred_pcmType (Prod T n.+1) :=
Eval hnf in mk_fpcm (@Prod_consM n T).
Implicit Arguments Prod_cons [n T].
(*=IVal *)
Import Ty.
Fixpoint IVal n (t:Ty.Ty n) : cmetricCatType (TVal n) TV :=
match t with
| TVar J => pick J
| Int => mconst _ (pconst _ upred_int)
| Unit => mconst _ (pconst _ upred_unit)
| Mu t => FIXP << upred_mu (IVal t)
| t ** t' => (exp_fun Pcomp upred_product : metricCatType _ _) << pprod_fun_ne
<< <|IVal t,IVal t'|>
| Sum t t' => (exp_fun Pcomp upred_sum : metricCatType _ _) << Pprod_fun
<< <|IVal t, IVal t'|>
| All t => upred_all (IVal t)
| t --> t' => upred_arrow (IVal t) (IVal t')
| Ref t => upred_ref (IVal t)
end.
(*=End *)
(*=IEnv *)
Fixpoint IEnv E n :
TEnv E n -> TVal E =-> halve_pcmType W -=> upred_pcmType (Prod CValue n) :=
match n with
| 0 => fun env => mconst _ (pconst _ (upred_empty unit))
| n.+1 => fun env => (pcompM _ _ _ << ppair _ Prod_cons << Pprod_fun) <<
<| IEnv (tl env), (IVal (hd env)) |>
end.
(*=End *)
(*=IStore *)
Lemma IStore_down E (Se:StoreType E) (s:TVal E) :
downclosed (fun kt => forall l t, Se l = t ->
(IVal (Ref t) s (snd kt)) (fst kt, Tm.LOC l)). (*CLEAR*)
move => m k w Lk. move => X.
move => l t E'. specialize (X l t E').
by apply (upred_downclosed (ltnW Lk) X).
Qed.
(*CLEARED*)
Definition IStore E (Se:StoreType E) (s:TVal E) : upred_pcmType W :=
Eval hnf in mk_upred (@IStore_down E Se s).
(*=End *)
Definition emptyMap T : FMap 0 T. intros var. Require Import Program. dependent destruction var. Defined.
Fixpoint Prod_subst T n : Prod T n -> FMap n T :=
match n with
| O => fun _ => emptyMap T
| S n => fun P => cons (snd P) (Prod_subst (fst P))
end.
(*=VRel *)
Definition VRel E n (env:TEnv E n) (Se:StoreType E) (v:Tm.Value n) (t:Ty.Ty E) :=
forall k (s:TVal E) g w,
(IEnv env s w) (k,g) -> (IStore Se s) (k,w) ->
(IVal t s w) (k, Tm.subVal (Prod_subst g) v).
Definition ERel E n (env:TEnv E n) (Se:StoreType E) (e:Tm.Exp n) (t:Ty.Ty E) :=
forall k (s:TVal E) g w,
(IEnv env s w) (k,g) -> (IStore Se s) (k,w) ->
IExp (IVal t s) k (Tm.subExp (Prod_subst g) e) w.
(*=End *)
Lemma FT_var E n m (env:TEnv E n) s w k g : (IEnv env s w) (k, g) -> IVal (env m) s w (k,nth m (Prod_subst g)).
Proof.
dependent induction m.
- simpl. by case => A B ; apply B.
- simpl. case => A B. rewrite tlCons. by apply (IHm (tl env)).
Qed.
Lemma IVal_mapTy P (ops:Map.Ops P) n (t:Ty n) s m s' (a:FMap n (P m)) :
(forall n (t:P n) s s', IVal (Map.vl ops t) s' =-= IVal (Map.vl ops (Map.wk ops t)) (s', s)) ->
(forall i, pick i s =-= (@IVal m (Map.vl ops (a i)) s')) ->
IVal t s =-= IVal (Map.mapTy ops a t) s'.
move => Y. elim: n / t s m s' a.
- simpl. move => e i s m s' a X. by apply X.
- by [].
- by [].
- move => E t IH t' IH' s m s' a X. simpl. specialize (IH s m s' a X). specialize (IH' s m s' a X).
move => aa. simpl. specialize (IH aa). specialize (IH' aa). simpl in IH. simpl in IH'.
by apply (frespect upred_product (pair_eq_compat IH IH')).
- move => E t IH t' IH' s m s' a X. simpl. specialize (IH s m s' a X). specialize (IH' s m s' a X).
move => aa. simpl. specialize (IH aa). specialize (IH' aa). simpl in IH. simpl in IH'.
by apply (frespect upred_sum (pair_eq_compat IH IH')).
- move => E t IH s m s' a X. simpl. apply: frespect. move => ss. simpl. move => bb. simpl. case => k v. simpl.
case: k ; first by []. case: v ; try done. move => v n.
specialize (IH (s,ss) m.+1 (s',ss)). specialize (IH (Map.lift ops a)).
apply IH. move => i. clear IH. move: Y X. clear. move => Y X. rewrite Map.liftAsCons.
dependent destruction i ; simpl ; first by rewrite Map.vlvr. unfold Map.shift. rewrite -> (X i).
by apply Y.
- move => E t IH s m s' a X. simpl. move => w. simpl. case => k v. case: v ; try done.
move => e. simpl. split.
+ move => A r w' j Lj Lw. specialize (IH (s,r) m.+1 (s',r) (Map.lift ops a)).
specialize (A r w' j Lj Lw). apply: (proj1 (IExp_respect _ _ _ (IH _)) A).
move => i. dependent destruction i ; simpl ; first by rewrite Map.vlvr.
rewrite -> (X i). by apply Y.
+ move => A r w' j Lj Lw. specialize (IH (s,r) m.+1 (s',r) (Map.lift ops a)).
specialize (A r w' j Lj Lw). apply: (proj2 (IExp_respect _ _ _ (IH _)) A).
move => i. dependent destruction i ; simpl ; first by rewrite Map.vlvr.
rewrite -> (X i). by apply Y.
- move => E t IH t' IH' s m s' a X w. simpl. case => k v. simpl. case: v ; try done.
move => e. split.
+ move => A w' j v Lw Lj Iv. specialize (A w' j v Lw Lj).
specialize (IH s m s' a X w' (j,v)). rewrite <- IH in Iv. simpl in Iv. specialize (A Iv).
specialize (IH' s m s' a X). by apply(proj1 (IExp_respect _ _ _ IH') A).
+ move => A w' j v Lw Lj Iv. specialize (A w' j v Lw Lj).
specialize (IH s m s' a X w' (j,v)). rewrite -> IH in Iv. simpl in Iv. specialize (A Iv).
specialize (IH' s m s' a X). by apply(proj2 (IExp_respect _ _ _ IH') A).
- move => E t IH s m s' a X w. simpl. case => k v. simpl. case: k ; first by [].
case: v ; try done. move => l k. specialize (IH s m s' a X). split.
+ move => A. rewrite -> A. by apply: (proj2 (Mrefl _ _) IH).
+ move => A. rewrite -> A. by apply: (proj2 (Mrefl _ _) (tset_sym IH)).
Qed.
Lemma IVal_shiftTy n (t:Ty n) s s' : IVal t s =-= IVal (shiftTy t) (s,s').
by apply IVal_mapTy.
Qed.
(*=IValSubst *)
Lemma IVal_substTy n (t:Ty n) s m s' a :
(forall i, pick i s =-= (@IVal m (a i) s')) -> IVal t s =-= IVal (subTy a t) s'.
(*=End *)
move => X. apply IVal_mapTy ; simpl ; last by [].
clear. move => n t s s'. by apply: IVal_shiftTy.
Qed.
Lemma IEnv_shiftTy E n (env:TEnv E n) (s:TVal E) w r :
IEnv (fun v : Fin n => shiftTy (env v)) (s, r) w =-= IEnv env s w.
dependent induction n ; first by [].
simpl. specialize (IHn (tl env) s w r).
case => k. case => p v. simpl. split.
- case => A B. split ; first by apply (proj1 (IHn (k,p)) A).
have X:=IVal_shiftTy (hd env) s r w (k,v). rewrite X. by apply B.
- case => A B. split ; first by apply (proj2 (IHn (k,p)) A).
have X:=IVal_shiftTy (hd env) s r w (k,v). rewrite <- X. by apply B.
Qed.
Lemma heap_world_down j k h w : j <= k -> heap_world k h w -> heap_world j h w.
move => L X i Li. specialize (X i (ssrnat.leq_trans Li L)). split ; case: X ; move => X Y ; first by apply X.
move => l. specialize (Y l). by apply Y.
Qed.
Lemma IStore_upclosed n t (s:TVal n) j w w' : w <= w' -> (IStore t s) (j,w) -> (IStore t s) (j,w').
move => L. unfold IStore. simpl. move => X l t' E. specialize (X l t' E).
case: j X ; first by []. move => j. have ll:=fmonotonic Unfold L. clear L.
move => A. rewrite <- A. specialize (ll l). case: (Unfold w l) ll A ; last by [].
move => wl ll _. specialize (ll _ (refl_equal _)). case: ll => wl' [e0 e1].
rewrite e0. apply Msym. by apply: (proj2 (Mrefl _ _) e1 _).
Qed.
Lemma IVal_upclosed n (t:Ty n) s w w' k v : w <= w' -> (IVal t s w) (k,v) -> (IVal t s w') (k,v).
move => Lw. apply (fmonotonic (IVal t s) Lw (k,v)).
Qed.
Lemma IEnv_upclosed E n (env:TEnv E n) (s:TVal E) k v w w' : w <= w' -> (IEnv env s w) (k,v) -> (IEnv env s w') (k,v).
elim: n env v ; first by [].
move => n IH env v L. simpl. case => A B.
split.
- specialize (IH (tl env) (fst v) L). by apply IH.
- by apply (IVal_upclosed L B).
Qed.
Lemma IExp_eq f k w e e' : e = e' -> IExp f k e w -> IExp f k e' w.
move => E. by rewrite E.
Qed.
Lemma world_update (w:W) f l : l \notin dom (Unfold w) -> w <= (Fold (updMap l f (Unfold w))).
move => X.
apply Unfold_monic. have ee:=UF_id (updMap l f (Unfold w)).
refine (pcm_respect (tset_refl _) (tset_sym ee) _). clear ee. simpl.
move => l0 m e. case_eq (l0 == l) => A.
- rewrite <-(eqP A) in X. rewrite findom_indom in X. by rewrite e in X.
- have B:(l0 != l) by rewrite A. clear A.
exists m. rewrite updMap_simpl2 ; last by apply B. by rewrite e.
Qed.
Lemma csubst_var n m (g:Prod CValue n) : Tm.subVal (Prod_subst g) (Tm.VAR m) = nth m (Prod_subst g).
Proof.
dependent induction m; first by []. simpl. specialize (IHm (fst g)).
unfold Tm.subVal, Tm.Map.mapVal. simpl. by rewrite tlCons.
Qed.
Lemma negbI (b:bool) : (b -> False) -> ~~ b.
case:b ; last by [].
move => X. simpl. by case: (X is_true_true).
Qed.
(*=Fundamental *)
Lemma FT E (se:StoreType E) :
(forall n (env : TEnv E n) v t, VTy se env v t -> VRel env se v t) /\
(forall n (env : TEnv E n) e t, ETy se env e t -> ERel env se e t).
(*=End *)
move: E se ; apply (@Typing_ind) => E se n env.
(* VAR *)
- move => m k s g w. move => X Y. rewrite csubst_var. by apply FT_var.
(* LOC *)
- move => l k s g w. Tm.UnfoldRenSub. unfold Tm.Map.mapVal. case: k ; first by [].
move => k Ig Is. simpl. by apply: Is.
(* INT *)
- by [].
(* UNIT *)
- by [].
(* LAM *)
- move => t0 t1 e D R. unfold VRel. move => k s g w Ie Is. unfold Tm.subVal. rewrite Tm.Map.mapLAM.
fold Tm.liftSub. fold Tm.subExp. simpl.
move => w' j v l L Iv. unfold ERel in R. specialize (R j). rewrite <- (proj2 (Tm.applyComposeSub _)).
rewrite Tm.composeSingleSub. specialize (R s). specialize (R (g,v)). simpl in R. apply R.
rewrite tlCons. rewrite hdCons. split ; last by []. apply (IEnv_upclosed l). by apply (upred_downclosed L Ie).
move => ll t sll. clear R. specialize (Is ll _ sll). simpl in Is.
case: k Ie Is L.
+ rewrite natO_le. rewrite leqn0. move => _ _ ee. by rewrite (eqP ee).
+ move => k Ie Is L. clear Iv. case: j L ; first by [].
move => j L. case_eq (Unfold w ll) ; last by move => ee ; rewrite ee in Is.
move => wll wle. case: (fmonotonic Unfold l ll wll wle) => wl'. case => wle' ee.
rewrite wle'. rewrite wle in Is. rewrite <- (Mrel_mono L Is). unfold Mrel. simpl.
by apply (proj2 (Mrefl _ _) (tset_sym ee)).
(* TLAM *)
- move => t'. move => e X Y k ts g w. move => A B. unfold Tm.subVal. autorewrite with mapHints.
fold Tm.subExp. simpl. move => r w' j Lj Lw. specialize (Y j (ts,r) g). apply Y.
+ apply: (IEnv_upclosed Lw). apply (upred_downclosed Lj).
by apply (proj2 (IEnv_shiftTy _ _ _ _ _) A).
+ simpl. move => l t ee. simpl in B. specialize (B l (se l) (refl_equal _)).
case: k A B Lj.
* rewrite natO_le. rewrite leqn0. move => _ _ ke. by rewrite (eqP ke).
* move => k A B. clear Y. case: j ; first by [].
move => j Lj. rewrite <- ee. clear t ee. have Lww:=fmonotonic Unfold Lw. clear Lw.
specialize (Lww l). case: (Unfold w l) B Lww ; last by [].
move => wl B Lw. specialize (Lw wl (refl_equal _)). case: Lw => wl'. case => ee e'.
rewrite ee. unfold Mrel. simpl. unfold Mrel in B. simpl in B. apply (Mrel_mono Lj).
have aa:= Mrel_trans (proj2 (Mrefl _ _) (tset_sym e') k.+1) B. apply (Mrel_trans aa). clear.
by apply (proj2 (Mrefl _ _) (IVal_shiftTy (se l) _ _)).
(* PAIR *)
- move => t0 t1 e0 e1. move => _ IH0 _ IH1. move => k s g w Ig Is. simpl.
specialize (IH0 k s g w Ig Is). specialize (IH1 k s g w Ig Is). split.
+ by apply IH0. + by apply IH1.
(* INL *)
- move => t0 t1 e _ IH0. move => k s g w Ig Is.
by apply (IH0 k s g w Ig Is).
(* INR *)
- move => t0 t1 e _ IH0. move => k s g w Ig Is.
by apply (IH0 k s g w Ig Is).
(* FOLD *)
- move => e t' td IH. move => k s g w Ig Is.
have ee:=FIXP_fp (upred_mu (IVal t') s).
specialize (ee w (k, Tm.subVal (Prod_subst g) (Tm.FOLD e))). apply (proj2 ee). clear ee.
case: k Ig Is ; first by []. move => k Ig Is. unfold Tm.subVal. autorewrite with mapHints. fold Tm.subVal.
specialize (IH k.+1 s g w Ig Is). simpl.
have A:=proj2 (IVal_substTy _ _ _ _) IH.
specialize (A (s,FIXP (upred_mu (IVal t') s))).
apply: (upred_downclosed (leqW (leqnn _)) (A _)).
move => i. by dependent destruction i.
(* VAL *)
- move => v t td IH k s g w Ig Is. specialize (IH k s g w Ig Is).
unfold Tm.subExp. autorewrite with mapHints. fold Tm.subVal.
move => j Lj h h' v0 ev. inversion ev. clear h0 H0. clear v1 H1.
move: ev. move => ev.
move: ev. rewrite <- H2. clear v0 H2. rewrite <- H. clear Lj H j.
move => ev hv. exists w. split ; first by []. rewrite subn0. split ; first by [].
by apply IH.
(* LET *)
- move => e0 e1 t0 t1 _ IH0 _ IH1. move => k s g w Ig Is.
move => j Lj h h' v. unfold Tm.subExp. autorewrite with mapHints. fold Tm.subExp. move => ev.
inversion ev. clear h0 H0. clear v1 H3. clear e2 H1 e3 H2 H4.
move => hv. have Ln0 := leq_addr n1 n0. rewrite H in Ln0.
specialize (IH0 k s g w Ig Is n0 (leq_trans Ln0 Lj) h _ _ X hv).
case: IH0 => w1 [Lw [hv0 IH0]]. specialize (IH1 (k-n0) s (g,v0) w1).
have A:IEnv (cons t0 env) s w1 (k - n0, (g, v0)). split ; simpl ; last by apply IH0.
rewrite tlCons. by apply: (IEnv_upclosed Lw) ; apply: (upred_downclosed _ Ig) ; apply leq_subr.
specialize (IH1 A).
have B: (IStore se s) (k - n0, w1) by apply (IStore_upclosed Lw) ; apply: (upred_downclosed _ Is) ; apply leq_subr.
specialize (IH1 B n1).
have Ln1:(n1 <= k - n0). rewrite natO_le. rewrite <- (leq_add2l n0). rewrite H. rewrite subnKC ; first by [].
by apply (leq_trans Ln0).
specialize (IH1 Ln1 h1 h'). unfold subSingle in X0. rewrite <- (proj2 (Tm.applyComposeSub _)) in X0.
rewrite -> Tm.composeSingleSub in X0. specialize (IH1 _ X0 hv0). case: IH1 => wr. case => Lw' R.
exists wr. split ; first by apply (Ole_trans Lw Lw').
by rewrite subnDA.
(* FST *)
- move => v t1 t2 td IH k s g w Ig Is. move => j Lj h h' v'.
unfold Tm.subExp. autorewrite with mapHints. fold Tm.subVal. move => ev. inversion ev. clear h0 H0.
specialize (IH k s g w Ig Is). move: ev. rewrite <- H. clear j H Lj. rewrite H2 in H1. clear v0 H2.
move => ev hv. exists w. split ; first by []. rewrite subn0. split ; first by [].
rewrite <- H1 in IH. simpl in IH. by case: IH.
(* SND *)
- move => v t1 t2 td IH k s g w Ig Is. move => j Lj h h' v'.
unfold Tm.subExp. autorewrite with mapHints. fold Tm.subVal. move => ev. inversion ev. clear h0 H0.
specialize (IH k s g w Ig Is). move: ev. rewrite <- H. clear j H Lj. rewrite H2 in H1. clear v1 H2.
move => ev hv. exists w. split ; first by []. rewrite subn0. split ; first by [].
rewrite <- H1 in IH. simpl in IH. by case: IH.
(* OP *)
- move => op v. move => A B. move => k s g w Ie Is. simpl. unfold Tm.subExp. autorewrite with mapHints. fold Tm.subVal.
move => j Lj h h' vv ev. inversion ev. clear op0 H1. rewrite <- H in Lj, ev. clear j H ev.
clear h0 H0. rewrite <- H4. clear h' H4. clear vv H3. rewrite subn0. simpl. move => hv. exists w. by split ; last split.
(* UNFOLD *)
- move => t v _ IH. move => k s g w Ie Is. move => j' Lj h h' vr.
unfold Tm.subExp. autorewrite with mapHints. fold Tm.subVal. move => ev. inversion ev.
rewrite -> H2 in H1. clear v0 H2. rewrite <- H in Lj. clear ev j' H. clear h0 H0.
rewrite <- H3. clear h' H3. move => hw. specialize (IH k s g w Ie Is). simpl in IH. rewrite <- H1 in IH.
clear Ie Is H1 v g. case: k Lj IH hw ; first by [].
move => k _ IH hw. have IIH:= proj1 (FIXP_fp (upred_muc (IVal t) s) w (k.+1, Tm.FOLD vr)) IH. clear IH.
simpl in IIH. rewrite subSS. rewrite subn0. unfold unfoldTy. unfold subOneTy.
exists w ; split ; first by []. split ; first by apply (heap_world_down (leqW (leqnn k))).
apply: (proj1 (IVal_substTy t _ w _) IIH) => i. by dependent destruction i.
(* REF *)
- move => v t _ IH. move => k s g w Ie Is. move => j' Lj h h' vr.
unfold Tm.subExp. autorewrite with mapHints. fold Tm.subVal. move => ev. inversion ev.
rewrite <- H0 in Lj. clear H0 j' ev. clear vr H3. clear v0 H. clear h0 H2. clear h' H4.
move => hw. case: k Ie Is Lj hw ; first by [].
move => k Ie Is _ hw. rewrite subSS. rewrite subn0. simpl.
exists (Fold (updMap l (IVal t s) (Unfold w))).
have Lw:w <= Fold (updMap l (IVal t s) (Unfold w)).
+ apply: Unfold_monic. move => ll m e. have F:=proj1 (hw k (ltnSn _)). specialize (F l). rewrite F in H1.
case_eq (l == ll) => ee.
* rewrite <- (eqP ee) in e. rewrite findom_indom in H1. by rewrite e in H1.
* case_eq (Unfold (Fold (updMap l (IVal t s) (Unfold w))) ll).
- move => ufwl eaa. exists ufwl. split ; first by [].
have xxx:Some m =-= Some ufwl ; last by apply xxx. rewrite <- eaa. clear ufwl eaa.
have aaa:= (UF_id (updMap l (IVal t s) (Unfold w))). simpl in aaa. unfold Datatypes.id in aaa.
have bb:= (proj2 aaa ll). rewrite -> bb. rewrite updMap_simpl2. by rewrite e.
apply negbI => ea. rewrite (eqP ea) in ee. by rewrite eq_refl in ee.
clear bb. rewrite (proj1 aaa). rewrite indomUpdMap. rewrite findom_indom. rewrite e. simpl. by rewrite orbT.
- move => a. have b:=UF_id (updMap l (IVal t s) (Unfold w)). simpl in b. unfold Datatypes.id in b.
have ne:= findom_indom (Unfold (Fold (updMap l (IVal t s) (Unfold w)))) ll. rewrite a in ne. simpl in ne.
rewrite -> (proj1 b) in ne. rewrite indomUpdMap in ne. rewrite findom_indom in ne. rewrite e in ne.
simpl in ne. by rewrite orbT in ne.
split ; first by apply Lw.
+ split. move => j' Lj. split.
* move => ll. rewrite indomUpdMap.
have b:=UF_id (updMap l (IVal t s) (Unfold w)). simpl in b. unfold Datatypes.id in b.
rewrite (proj1 b ll). rewrite indomUpdMap. have e0:=proj1 (hw k (ltnSn _)) ll. by rewrite e0.
* move => l0. case_eq (Unfold (Fold (updMap l (IVal t s) (Unfold w))) l0) ; last by [].
move => ufl0 e0. have e1:=UF_id (updMap l (IVal t s) (Unfold w)). simpl in e1.
unfold Datatypes.id in e1. have b1:=proj2 e1 l0. rewrite findom_indom in b1. rewrite e0 in b1.
specialize (b1 is_true_true). clear e0. clear e1. specialize (hw k (ltnSn _)).
case: hw => dhe hw. specialize (hw l0). case_eq (l == l0).
- move => e. rewrite (eqP e). simpl.
rewrite (updMap_simpl l0 (Tm.subVal (Prod_subst g) v) h). rewrite -> (eqP e) in b1.
rewrite (updMap_simpl l0) in b1. unfold tset_eq in b1. simpl in b1. simpl.
apply (proj2 ((b1 (Fold (updMap l0 ((IVal t) s) (Unfold w)))) (j', Tm.subVal (Prod_subst g) v))).
clear ufl0 b1.
specialize (IH j' s g w). rewrite <- (eqP e). apply: (IVal_upclosed Lw). apply IH.
by apply (upred_downclosed (leqW (ltnW Lj)) Ie). by apply (upred_downclosed (leqW (ltnW Lj)) Is).
- move => ee. simpl. rewrite updMap_simpl2.
rewrite updMap_simpl2 in b1. specialize (dhe l0). do 2 rewrite findom_indom in dhe.
case_eq (h l0) ; last by move => F ; rewrite F.
move => hl0 e. rewrite e. case: (Unfold w l0) hw b1 ; last by [].
move => uwl0 hw b1. rewrite e in hw. unfold tset_eq in b1. simpl in b1.
specialize (b1 w (k,hl0)). simpl in b1. have hh := proj2 b1 hw. clear uwl0 b1 hw.
apply (fmonotonic ufl0 Lw (j',hl0)). by apply (@upred_downclosed _ _ j' k _ (leqW Lj) hh).
apply negbI => e. rewrite (eqP e) in ee. by rewrite eq_refl in ee.
apply negbI => e. rewrite (eqP e) in ee. by rewrite eq_refl in ee.
+ case: k Ie Is hw ; first by [].
move => k Iw Is hw. have ee:=UF_id (updMap l (IVal t s) (Unfold w)). simpl in ee. unfold Datatypes.id in ee.
have aa:=(proj2 ee l _). rewrite (proj1 ee) in aa. rewrite indomUpdMap in aa. rewrite eq_refl in aa.
specialize (aa is_true_true).
rewrite -> (proj2 (Mrefl _ _) aa). by rewrite updMap_simpl.
(* REF *)
- move => v t _ IH. move => k s g w Ie Is. move => j Lj h h' vr.
unfold Tm.subExp. autorewrite with mapHints. fold Tm.subVal. move => ev. inversion ev.
clear h0 H2. clear v0 H3. rewrite <- H0 in Lj. clear j H0 ev. rewrite <- H4. rewrite <- H4 in H1. clear h' H4.
move => hw. exists w ; split ; first by []. split.
+ case: k Lj Ie Is hw ; first by [].
move => k _ Ie Is hw. rewrite subSS. rewrite subn0. by apply: (heap_world_down (leqW (leqnn k)) hw).
+ specialize (IH k s g w Ie Is). rewrite <- H in IH. simpl in IH.
case: k IH Lj Ie Is hw ; first by []. move => k IH _ Ie Is hw.
rewrite subSS. rewrite subn0. specialize (hw k (ltnSn _)). case: hw => D hw.
specialize (hw l). case: (Unfold w l) IH hw ; last by [].
move => wl IH hw. rewrite H1 in hw. unfold Mrel in IH. simpl in IH.
specialize (IH w k vr (ltnSn _)). simpl in IH. by apply (proj1 IH hw).
(* ASSIGN *)
- move => v0 v1 t _ IH0 _ IH1. move => k s g w Ie Is. move => j Lj h h' vr.
unfold Tm.subExp. autorewrite with mapHints. fold Tm.subVal. move => ev. inversion ev.
rewrite <- H0 in Lj. clear j ev H0. clear v H2. clear h0 H1. clear vr H3. clear h' H4.
specialize (IH0 k s g w Ie Is). rewrite <- H in IH0. clear v0 H.
specialize (IH1 k s g w Ie Is). simpl in IH0. case: k IH0 IH1 Ie Is Lj ; first by [].
move => k IH0 IH1 Ie Is _ hw. rewrite subSS. rewrite subn0. exists w ; split ;first by [].
split ; last by [].
move => j Lj. split.
+ move => ll. rewrite indomUpdMap. specialize (hw k (ltnSn _)). rewrite <- (proj1 hw).
case_eq (ll == l) => ee ; last by []. rewrite (eqP ee). rewrite findom_indom. by rewrite H5.
+ move => l0. case_eq (Unfold w l0) ; last by []. move => wl0 e0.
case_eq (l == l0) => e.
* rewrite <- (eqP e). rewrite updMap_simpl.
rewrite <- (eqP e) in e0. clear l0 e. rewrite e0 in IH0.
specialize (IH0 w j (Tm.subVal (Prod_subst g) v1) (ltnW Lj)). simpl in IH0. apply (proj2 IH0). clear IH0 wl0 e0.
apply: (upred_downclosed _ IH1). by apply (leqW (leqW Lj)).
* rewrite updMap_simpl2. case_eq (h l0) ; last by move => e1.
move => hl0 e1. specialize (hw j (leqW Lj)). case: hw => D hw.
specialize (hw l0). rewrite e0 in hw. rewrite e1 in hw. by apply hw.
apply negbI => e1. rewrite (eqP e1) in e. by rewrite eq_refl in e.
- move => t0 t1 v0 v1 _ IH0 _ IH1. move => k s g w Ie Is. specialize (IH0 k s g w Ie Is).
specialize (IH1 k s g w Ie Is). move => j Lj h h' vr.
unfold Tm.subExp. autorewrite with mapHints. fold Tm.subVal. move => ev. inversion ev.
clear n0 H. clear h0 H0. clear v2 H3. clear h1 H4. clear v H2. clear ev. move => hw.
rewrite <- H1 in IH0. clear H1 v0. simpl in IH0. have IH0':= (IH0 w k _ (Ole_refl _) (leqnn _) IH1). clear IH0.
specialize (IH0' j Lj h h'). unfold subSingle in X. by apply (IH0' _ X hw).
- move => t v t' _ IH. move => k s g w Ie Is. specialize (IH k s g w Ie Is).
move => j Lj h h' vr.
unfold Tm.subExp. autorewrite with mapHints. fold Tm.subVal. move => ev. inversion ev.
clear n0 H. clear h0 H0. clear v0 H2. clear h1 H3. rewrite <- H1 in IH. clear H1 v ev.
move => hw. simpl in IH. specialize (IH (IVal t' s) w k (leqnn _) (Ole_refl _) j Lj h h' _ X hw).
unfold subOneTy. case: IH => w'. case => Lw. case => hw' IH. exists w'. split ; first by []. split ; first by [].
apply: (proj1 (IVal_substTy t _ _ _) IH). move => i. by dependent destruction i.
- move => t0 t1 v e0 e1 t' _ IH0 _ IH1 _ IH2. move => k s g w Ie Is.
specialize (IH0 k s g w Ie Is). move => j Lj h h' vr.
unfold Tm.subExp. autorewrite with mapHints. fold Tm.subExp. fold Tm.subVal. move => ev. inversion ev.
+ clear n0 H. clear e2 H2. clear e3 H3. clear h0 H0. clear v1 H4. clear h1 H5. rewrite <- H1 in IH0. clear v H1 ev.
simpl in IH0. specialize (IH1 k s (g,v0) w).
have IE2:IEnv (cons t0 env) s w (k, (g, v0)). simpl. rewrite tlCons. split ; first by []. by apply IH0.
specialize (IH1 IE2 Is j Lj h h'). apply IH1.
unfold subSingle in X. rewrite <- (proj2 (Tm.applyComposeSub _)) in X. fold Tm.liftSub in X.
rewrite Tm.composeSingleSub in X. by apply X.
+ clear n0 H. clear e2 H2. clear e3 H3. clear h0 H0. clear v1 H4. clear h1 H5. rewrite <- H1 in IH0. clear v H1 ev.
simpl in IH0. specialize (IH2 k s (g,v0) w).
have IE2:IEnv (cons t1 env) s w (k, (g, v0)). simpl. rewrite tlCons. split ; first by []. by apply IH0.
specialize (IH2 IE2 Is j Lj h h'). apply IH2.
unfold subSingle in X. rewrite <- (proj2 (Tm.applyComposeSub _)) in X. fold Tm.liftSub in X.
rewrite Tm.composeSingleSub in X. by apply X.
Qed.
|
{"author": "nbenton", "repo": "coqdomains", "sha": "1ae7ec4af95e4fa44d35d7a5b2452ad123b3a75d", "save_path": "github-repos/coq/nbenton-coqdomains", "path": "github-repos/coq/nbenton-coqdomains/coqdomains-1ae7ec4af95e4fa44d35d7a5b2452ad123b3a75d/src/KSSem.v"}
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle, PathPatch
from matplotlib.text import TextPath
from matplotlib.transforms import Affine2D
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
import mpl_toolkits.mplot3d.art3d as art3d
fig = plt.figure()
ax = fig.add_subplot(1,1,1, projection='3d')
# blue hex colors: #14b4ff #008ed1 #076794
# green hex colors: #b8d6b4 #a2bd9f #758a72
p = Circle((50, 50), 30, color='#14b4ff', fill=True)
ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=100, zdir="z")
p = Circle((50, 50), 40, color='#008ed1', fill=True)
ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=50, zdir="z")
p = Circle((50, 50), 30, color='#076794', fill=True)
ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=0, zdir="z")
ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.set_xlim(0, 100)
ax.set_ylim(0, 100)
ax.set_zlim(0, 100)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
# plt.show()
plt.savefig('../images/slices.eps', format='eps', transparent=True)
|
{"hexsha": "df981a786ca9580ff22c9c4280fd0a33b453fa6a", "size": 1139, "ext": "py", "lang": "Python", "max_stars_repo_path": "plots/circle.py", "max_stars_repo_name": "folkertsman/Fourier-Fingerprint-Search", "max_stars_repo_head_hexsha": "40db8f6b07556677732f73ac5160d083e9bff422", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "plots/circle.py", "max_issues_repo_name": "folkertsman/Fourier-Fingerprint-Search", "max_issues_repo_head_hexsha": "40db8f6b07556677732f73ac5160d083e9bff422", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plots/circle.py", "max_forks_repo_name": "folkertsman/Fourier-Fingerprint-Search", "max_forks_repo_head_hexsha": "40db8f6b07556677732f73ac5160d083e9bff422", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.119047619, "max_line_length": 67, "alphanum_fraction": 0.7164179104, "include": true, "reason": "import numpy", "num_tokens": 430}
|
import tensorflow as tf
import numpy as np
import input_data
a = tf.placeholder("float")
b = tf.placeholder("float")
y = tf.mul(a, b)
sess = tf.Session()
print "%f should equal 2.0" % sess.run(y, feed_dict={a: 1, b: 2})
print "%f should equal 9.0" % sess.run(y, feed_dict={a: 3, b: 3})
a = tf.placeholder("int32")
b = tf.placeholder("int32")
y = tf.mul(a, b)
sess = tf.Session()
print "%d should equal 2" % sess.run(y, feed_dict={a: 1, b: 2})
print "%d should equal 9" % sess.run(y, feed_dict={a: 3, b: 3})
trX = np.linspace(-1, 1, 1000)
trY = 2 * trX + np.random.randn(*trX.shape) * 0.33
X = tf.placeholder("float")
Y = tf.placeholder("float")
def model(X, w):
return tf.mul(X, w)
w = tf.Variable(0.0, name="weights")
y_model = tf.mul(X, w)
cost = (tf.pow(Y - y_model, 2))
train_op = tf.train.GradientDescentOptimizer(0.0001).minimize(cost)
sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
for i in range(100):
print str(i) + ": " + str(sess.run(w))
for (x, y) in zip(trX, trY):
sess.run(train_op, feed_dict={X: x, Y: y})
def init_weights(shape):
return tf.Variable(tf.random_normal(shape, stddev=0.01))
def model(X, w):
return tf.matmul(X, w)
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
X = tf.placeholder("float", [None, 784])
Y = tf.placeholder("float", [None, 10])
w = init_weights([784, 10])
py_x = model(X, w)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(py_x, Y))
train_op = tf.train.GradientDescentOptimizer(0.05).minimize(cost)
predict_op = tf.argmax(py_x, 1)
sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
for i in range(100):
for start, end in zip(range(0, len(trX), 128), range(128, len(trX), 128)):
sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end]})
print i, np.mean(np.argmax(teY, axis=1) ==
sess.run(predict_op, feed_dict={X: teX, Y: teY}))
def init_weights(shape):
return tf.Variable(tf.random_normal(shape, stddev=0.01))
def model(X, w_h, w_o):
h = tf.nn.sigmoid(tf.matmul(X, w_h))
return tf.matmul(h, w_o)
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
X = tf.placeholder("float", [None, 784])
Y = tf.placeholder("float", [None, 10])
w_h = init_weights([784, 625])
w_o = init_weights([625, 10])
py_x = model(X, w_h, w_o)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(py_x, Y))
train_op = tf.train.GradientDescentOptimizer(0.05).minimize(cost)
predict_op = tf.argmax(py_x, 1)
sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
for i in range(100):
for start, end in zip(range(0, len(trX), 128), range(128, len(trX), 128)):
sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end]})
print i, np.mean(np.argmax(teY, axis=1) == sess.run(predict_op, feed_dict={X: teX, Y: teY}))
def init_weights(shape):
return tf.Variable(tf.random_normal(shape, stddev=0.01))
def model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden):
l1a = tf.nn.relu(tf.nn.conv2d(X, w, [1, 1, 1, 1], 'SAME'))
l1 = tf.nn.max_pool(l1a, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
l1 = tf.nn.dropout(l1, p_keep_conv)
l2a = tf.nn.relu(tf.nn.conv2d(l1, w2, [1, 1, 1, 1], 'SAME'))
l2 = tf.nn.max_pool(l2a, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
l2 = tf.nn.dropout(l2, p_keep_conv)
l3a = tf.nn.relu(tf.nn.conv2d(l2, w3, [1, 1, 1, 1], 'SAME'))
l3 = tf.nn.max_pool(l3a, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
l3 = tf.reshape(l3, [-1, w4.get_shape().as_list()[0]])
l3 = tf.nn.dropout(l3, p_keep_conv)
l4 = tf.nn.relu(tf.matmul(l3, w4))
l4 = tf.nn.dropout(l4, p_keep_hidden)
pyx = tf.matmul(l4, w_o)
return pyx
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
trX = trX.reshape(-1, 28, 28, 1)
teX = teX.reshape(-1, 28, 28, 1)
X = tf.placeholder("float", [None, 28, 28, 1])
Y = tf.placeholder("float", [None, 10])
w = init_weights([3, 3, 1, 32])
w2 = init_weights([3, 3, 32, 64])
w3 = init_weights([3, 3, 64, 128])
w4 = init_weights([128 * 4 * 4, 625])
w_o = init_weights([625, 10])
p_keep_conv = tf.placeholder("float")
p_keep_hidden = tf.placeholder("float")
py_x = model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(py_x, Y))
train_op = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cost)
predict_op = tf.argmax(py_x, 1)
sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
for i in range(100):
for start, end in zip(range(0, len(trX), 128), range(128, len(trX), 128)):
sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end],
p_keep_conv: 0.8, p_keep_hidden: 0.5})
test_indices = np.arange(len(teX)) # Get A Test Batch
np.random.shuffle(test_indices)
test_indices = test_indices[0:256]
print i, np.mean(np.argmax(teY[test_indices], axis=1) ==
sess.run(predict_op, feed_dict={X: teX[test_indices],
Y: teY[test_indices],
p_keep_conv: 1.0,
p_keep_hidden: 1.0}))
|
{"hexsha": "cf37b3f4fdebc00d28dacb81c2bea9590c34d405", "size": 5688, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow_tut/tensorflow_tutorial.py", "max_stars_repo_name": "yantraman/musings", "max_stars_repo_head_hexsha": "d1b7069ee740729c23c4ee4acbc08f706b0308b8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tensorflow_tut/tensorflow_tutorial.py", "max_issues_repo_name": "yantraman/musings", "max_issues_repo_head_hexsha": "d1b7069ee740729c23c4ee4acbc08f706b0308b8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tensorflow_tut/tensorflow_tutorial.py", "max_forks_repo_name": "yantraman/musings", "max_forks_repo_head_hexsha": "d1b7069ee740729c23c4ee4acbc08f706b0308b8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2527472527, "max_line_length": 97, "alphanum_fraction": 0.6246483826, "include": true, "reason": "import numpy", "num_tokens": 1827}
|
"""Module that defines common errors for parameter values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from parameters.classifier import constants
def check_valid_value(value, name, valid_list):
"""Raises a ValueError exception if value not in valid_list"""
if value not in valid_list:
msg = constants.ERROR_INVALID % (valid_list, name, value)
raise ValueError(msg)
def check_nan_metric(metric, metric_mean):
"""Raises a ValueError exception if metric value is nan"""
if np.isnan(metric_mean):
msg = constants.ERROR_NAN_METRIC % (metric)
raise ValueError(msg)
|
{"hexsha": "c7f060c35f6ddb886f012303d06b4d6971da7a73", "size": 688, "ext": "py", "lang": "Python", "max_stars_repo_path": "parameters/classifier/errors.py", "max_stars_repo_name": "ReyesDeJong/AnomalyDetectionTransformations", "max_stars_repo_head_hexsha": "c60b1adaf0065b684d76ecacabed1eae39a4e3a9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "parameters/classifier/errors.py", "max_issues_repo_name": "ReyesDeJong/AnomalyDetectionTransformations", "max_issues_repo_head_hexsha": "c60b1adaf0065b684d76ecacabed1eae39a4e3a9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "parameters/classifier/errors.py", "max_forks_repo_name": "ReyesDeJong/AnomalyDetectionTransformations", "max_forks_repo_head_hexsha": "c60b1adaf0065b684d76ecacabed1eae39a4e3a9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2727272727, "max_line_length": 66, "alphanum_fraction": 0.7572674419, "include": true, "reason": "import numpy", "num_tokens": 143}
|
import os
import numpy as np
import csv
import argparse
def extract_experiment_setting(experiment_name):
print('Passed in experiment_name is {}'.format(experiment_name), flush = True)
hyper_parameter_dict = {}
#hyperparameter to extract
C = experiment_name.split('C')[-1]
#record to dict
hyper_parameter_dict['C'] = C
#print values
header = ' checking experiment '.center(100, '-')
print(header)
print('C: {}'.format(C))
print('\n')
return hyper_parameter_dict
def extract_experiment_performance(experiment_dir, experiment_name):
performance_file_fullpath = os.path.join(experiment_dir, experiment_name, 'result_analysis/performance.txt')
returned_file = None
with open(performance_file_fullpath, 'r') as f: #only read mode, do not modify
returned_file = f.read()
validation_accuracy = round(float(returned_file.split('highest validation accuracy: ')[1].split('\n')[0]), 3)
test_accuracy = returned_file.split('corresponding test accuracy: ')[1].split('\n')[0]
print('validation_accuracy: {}'.format(validation_accuracy))
print('test_accuracy: {}'.format(test_accuracy))
return returned_file, validation_accuracy, test_accuracy
def main(experiment_dir, summary_save_dir):
experiments = os.listdir(experiment_dir)
incomplete_experiment_writer = open(os.path.join(summary_save_dir, 'incomplete_experiment_list.txt'), 'w')
summary_filename = os.path.join(summary_save_dir, 'hypersearch_summary.csv')
with open(summary_filename, mode='w') as csv_file:
fieldnames = ['validation_accuracy', 'test_accuracy', 'C', 'performance_string', 'experiment_folder', 'status']
fileEmpty = os.stat(summary_filename).st_size==0
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
if fileEmpty:
writer.writeheader()
for experiment_name in experiments:
if experiment_name !='hypersearch_summary':
experiment_folder = os.path.join(experiment_dir, experiment_name)
experiment_summary = extract_experiment_setting(experiment_name)
try:
returned_file, validation_accuracy, test_accuracy = extract_experiment_performance(experiment_dir, experiment_name)
print('Able to extract performance', flush = True)
experiment_summary.update(validation_accuracy=validation_accuracy, test_accuracy=test_accuracy, performance_string=returned_file, experiment_folder=experiment_folder, status='Completed')
print('Able to update experiment_summary\n\n')
except:
print(' NOT ABLE TO PROCESS {} \n\n'.format(experiment_dir + '/' + experiment_name).center(100, '-'), flush=True)
incomplete_experiment_writer.write(f"{experiment_name}\n\n")
experiment_summary.update(validation_accuracy='NA', test_accuracy='NA', performance_string='NA', experiment_folder=experiment_folder, status='Incompleted')
writer.writerow(experiment_summary)
incomplete_experiment_writer.close()
if __name__=="__main__":
parser = argparse.ArgumentParser(description='synthesizing hyperparameter search results')
parser.add_argument('--experiment_dir')
#parse args
args = parser.parse_args()
experiment_dir = args.experiment_dir
assert os.path.exists(experiment_dir),'The passed in experiment_dir {} does not exist'.format(experiment_dir)
summary_save_dir = os.path.join(experiment_dir, 'hypersearch_summary')
if not os.path.exists(summary_save_dir):
os.makedirs(summary_save_dir)
main(experiment_dir, summary_save_dir)
|
{"hexsha": "5dbe2f26f96fcafb274aebf12c2f2659630267d2", "size": 4122, "ext": "py", "lang": "Python", "max_stars_repo_path": "synthesizing_results/domain_adaptation/synthesize_hypersearch_LR_for_a_subject.py", "max_stars_repo_name": "tufts-ml/fNIRS-mental-workload-classifiers", "max_stars_repo_head_hexsha": "b5199d6184e659152d1fe650db48eba53a221186", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-12-22T12:04:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T20:02:21.000Z", "max_issues_repo_path": "synthesizing_results/SelectWindowSize/synthesize_hypersearch_LR_for_a_subject.py", "max_issues_repo_name": "tufts-ml/fNIRS-mental-workload-classifiers", "max_issues_repo_head_hexsha": "b5199d6184e659152d1fe650db48eba53a221186", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "synthesizing_results/SelectWindowSize/synthesize_hypersearch_LR_for_a_subject.py", "max_forks_repo_name": "tufts-ml/fNIRS-mental-workload-classifiers", "max_forks_repo_head_hexsha": "b5199d6184e659152d1fe650db48eba53a221186", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-12-29T09:02:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-24T22:15:40.000Z", "avg_line_length": 35.2307692308, "max_line_length": 206, "alphanum_fraction": 0.6387675885, "include": true, "reason": "import numpy", "num_tokens": 783}
|
function makespecops(ecut,Ω,basis)
if basis=="Hermite"
dim=length(Ω)
if dim==1
ωx = Ω[1]
e0 = 0.5*ωx
ecut < e0 && error("ecut must exceed the zero point energy.")
Mx,nx,en = nenergy(ecut,e0,ωx,basis)
P = en .< ecut
en = P.*en
ax = sqrt(1/ωx) #in dimensionless units
X,Px = ladderops(Mx,ax)
return P,en,X,Px
elseif dim==2
ωx,ωy = Ω
e0 = 0.5(ωx+ωy)
ecut < e0 && error("ecut must exceed the zero point energy.")
Mx,nx,ex = nenergy(ecut,e0,ωx,basis)
My,ny,ey = nenergy(ecut,e0,ωy,basis)
en = [ex[i+1]+ey[j+1] for i in nx, j in ny]
P = en .< ecut
en = P.*en
ax = sqrt(1/ωx); ay = sqrt(1/ωy)
X,Px = ladderops(Mx,ax)
Y,Py = ladderops(My,ay)
return P,en,X,Px,Y,Py
elseif dim==3
ωx,ωy,ωz = Ω
e0 = 0.5(ωx+ωy+ωz)
ecut < e0 && error("ecut must exceed the zero point energy.")
Mx,nx,ex = nenergy(ecut,e0,ωx,basis)
My,ny,ey = nenergy(ecut,e0,ωy,basis)
Mz,nz,ez = nenergy(ecut,e0,ωz,basis)
en = [ex[i+1]+ey[j+1]+ez[k+1] for i in nx, j in ny, k in nz]
P = en .< ecut
en = P.*en
ax = sqrt(1/ωx); ay = sqrt(1/ωy); az = sqrt(1/ωz)
X,Px = ladderops(Mx,ax)
Y,Py = ladderops(My,ay)
Z,Pz = ladderops(Mz,az)
return P,en,X,Px,Y,Py,Z,Pz
end
end
end
|
{"hexsha": "9cf1b127d1f726cf603a854fc36214b1589c796e", "size": 1278, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/makespecops.jl", "max_stars_repo_name": "AshtonSBradley/ProjectedGPE.jl", "max_stars_repo_head_hexsha": "16623c1e00bbbae73e7448bd9b38f5b7e97979e9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2017-06-05T09:44:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-15T17:59:48.000Z", "max_issues_repo_path": "src/makespecops.jl", "max_issues_repo_name": "AshtonSBradley/ProjectedGPE.jl", "max_issues_repo_head_hexsha": "16623c1e00bbbae73e7448bd9b38f5b7e97979e9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-08-30T05:46:58.000Z", "max_issues_repo_issues_event_max_datetime": "2018-08-30T05:46:58.000Z", "max_forks_repo_path": "src/makespecops.jl", "max_forks_repo_name": "AshtonSBradley/ProjectedGPE.jl", "max_forks_repo_head_hexsha": "16623c1e00bbbae73e7448bd9b38f5b7e97979e9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2017-06-05T09:53:03.000Z", "max_forks_repo_forks_event_max_datetime": "2017-06-05T09:53:03.000Z", "avg_line_length": 27.1914893617, "max_line_length": 65, "alphanum_fraction": 0.5625978091, "num_tokens": 574}
|
import numpy as np
import abc
from precodita import Backend, Dispatchable
class ArrayLike(abc.ABC):
"""
Simple ABC to show off that it is possible to provide generic
implementations
"""
@classmethod
def __subclasshook__(cls, other):
if hasattr(other, "__array_function__"):
return True
return NotImplemented
# ----------------------------------
# Lets define a couple of "backends"
# ----------------------------------
# I use the `uarray` definition of backend here, i.e. a type-dispatching target
# one is also a backend. One used for "backend-selection" is one that has
# the `opt_in=True` set.
# However, backends do not do much work, they say what types they work with
# and get a priority assigned. Some are "opt in", they are disabled and set
# to "highest priority" using a with-statement (nesting multiple should work).
# I decided for now to always have backends. This gives a nice way to
# define what typoes to support.
b1 = Backend("NumPy", np.ndarray, ())
# Add a backend for NumPy matrices (which also accept ndarray though!)
b2 = Backend("Matrix", np.matrix, (np.ndarray,))
# b3 will be auto-registered in its callback. The thought is that this
# could be useful to allow lazy-imports.
# This is probably a bad idea, I think I now prefer the thought of adding a
# `LazyDispatchable(mod, qualname)`. This would use a global list and events
# events on each Dispatchable creation to register the function.
# (A callback may still be useful, e.g. for debug-tracing.)
def register_late(b, func):
print("auto-registering backend:", b, func)
func.register(b)(generic_)
b3 = Backend("generic", ArrayLike, (), callback=register_late)
# The following is disabled by default, but can be opted in and prioritized
# by using `with b4:`
b4 = Backend("Mat2", np.matrix, (np.ndarray,), opt_in=True)
# Will be auto-registered:
def generic_(*args, **kwargs):
return b3, args, kwargs
# -----------------------------------
# Using a function without a fallback
# -----------------------------------
@Dispatchable
def func(a, b, c=None):
"""
The "extractor", similar to `__array_function__`, we use this only to
define the parameters which are dispatched on. In this case, `a` and `c`.
`None` will be ignored during dispatch.
There is no "fallback" here when there are no matches, see below for
an example that does this.
Further, `uarray` has the "replacer" dynamic, which is convenient.
Replacers could be used for extraction of parameters as well, so we could
allow to provide an extractor only for speed (or even create a C-extractor
for shaving off a tiny bit more).
"""
return a, c
@func.register(b1)
def _(a, b, c=None):
return b1, a, b, c
@func.register(b2)
def _(a, b, c=None):
return b2, a, b, c
# b3 was auto-registered.
@func.register(b4)
def _(a, b, c=None):
return b4, a, b, c
# The NumPy backend of course:
print(func(np.array(1), 2, np.array(3)))
# Matrix, since numpy cannot match it:
print(func(np.matrix(1), 2, np.array(3)))
# Only the generic one `ArrayLike` can match this one:
print(func(np.ma.array(1), 2, np.matrix(3)))
# Enable and enforce preference of the `Mat2` backend:
with b4:
print(func(np.matrix(1), 2, np.array(3)))
# --------------------------------
# Using a function with a fallback
# --------------------------------
def _new_func_extractor(a):
return (a,) # Take care: must be a sequence!
@Dispatchable.from_fallback(_new_func_extractor)
def new_func(a):
"""This is the original function, that will always be used if things
fail otherwise.
"""
return "original", a
@new_func.register(b1)
def _(a):
return b1, a
# definitely no match for string input...
print(new_func("asdf"))
# Actually, remember that the ArrayLike one got auto-registered?
print(new_func(np.matrix(1)))
# ------------------------------------------------
# Using a function without dispatchable parameters
# ------------------------------------------------
def _creation_extractor(shape, like=None):
return (like,)
@Dispatchable.from_fallback(_creation_extractor)
def new(shape, like=None):
return "Default version!", np.ones(shape)
@new.register(b2) # matrix!
def _(shape, like=None):
return "Matrix version!", np.ones(shape).view(np.matrix)
# The default version (of course):
print(new((2, 2)))
# We can get a matrix result in two ways:
print(new.invoke(np.matrix)((2, 2)))
print(new((2, 2), like=np.matrix([2])))
|
{"hexsha": "98984f83b6469a77e3427838d2ef6897d09b50b3", "size": 4535, "ext": "py", "lang": "Python", "max_stars_repo_path": "example.py", "max_stars_repo_name": "seberg/precodita", "max_stars_repo_head_hexsha": "b02ea2d6f859d705fa26f3124b2244f777bead6b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "example.py", "max_issues_repo_name": "seberg/precodita", "max_issues_repo_head_hexsha": "b02ea2d6f859d705fa26f3124b2244f777bead6b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "example.py", "max_forks_repo_name": "seberg/precodita", "max_forks_repo_head_hexsha": "b02ea2d6f859d705fa26f3124b2244f777bead6b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0705128205, "max_line_length": 79, "alphanum_fraction": 0.6504961411, "include": true, "reason": "import numpy", "num_tokens": 1136}
|
/*
* Copyright (C) 2021 FISCO BCOS.
* SPDX-License-Identifier: Apache-2.0
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @brief ABI data structure used in transaction construction
* @file Abi.cpp
* @author: catli
* @date: 2021-09-11
*/
#include "Abi.h"
#include <json/forwards.h>
#include <json/json.h>
#include <sys/types.h>
#include <boost/algorithm/string/predicate.hpp>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <string>
using namespace std;
using namespace bcos::executor;
ParameterAbi parseParameter(const Json::Value& input)
{
auto paramType = input["type"].asString();
auto components = vector<ParameterAbi>();
if (boost::starts_with(paramType, "tuple"))
{
auto& paramComponents = input["components"];
assert(!paramComponents.isNull());
components.reserve(paramComponents.size());
for (auto& component : paramComponents)
{
components.emplace_back(parseParameter(component));
}
}
auto parameterAbi = ParameterAbi(paramType, components);
return parameterAbi;
}
vector<string> flattenStaticParamter(const ParameterAbi& param)
{ // TODO: return vector<std::pair<string, vector<uint8>>>, pair is type and access path
const auto TUPLE_STR = "tuple";
auto flatTypes = vector<string>();
if (boost::starts_with(param.type, TUPLE_STR))
{
for (auto i = (size_t)0; i < param.components.size(); i++)
{
auto types = flattenStaticParamter(param.components[i]);
flatTypes.insert(flatTypes.end(), types.begin(), types.end());
}
}
else if (boost::algorithm::contains(param.type, "[") &&
!boost::algorithm::contains(param.type, "[]"))
{
auto type = param.type.substr(0, param.type.find("["));
size_t len = std::stoi(param.type.substr(param.type.find("["), param.type.find("]")));
flatTypes.insert(flatTypes.end(), len, type);
}
else
{
flatTypes.push_back(param.type);
}
return flatTypes;
}
unique_ptr<FunctionAbi> FunctionAbi::deserialize(
string_view abiStr, const bytes& expected, bool isSMCrypto)
{
assert(expected.size() == 4);
Json::Reader reader;
Json::Value root;
if (!reader.parse(abiStr.begin(), abiStr.end(), root))
{
BCOS_LOG(DEBUG) << LOG_BADGE("EXECUTOR") << LOG_DESC("unable to parse contract ABI")
<< LOG_KV("abiStr", abiStr);
return nullptr;
}
if (!root.isArray())
{
BCOS_LOG(DEBUG) << LOG_BADGE("EXECUTOR") << LOG_DESC("contract ABI is not an array")
<< LOG_KV("abiStr", abiStr);
return nullptr;
}
for (auto& function : root)
{
auto& type = function["type"];
if (type.isNull() || type.asString() != "function")
{
continue;
}
if (!function["constant"].isNull())
{ // liquid
if (function["constant"].asBool())
{
continue;
}
}
else if (!function["stateMutability"].isNull())
{ // solidity
if (function["stateMutability"].asString() == "view" ||
function["stateMutability"].asString() == "pure")
{
continue;
}
}
else
{
continue;
}
auto& functionName = function["name"];
assert(!functionName.isNull());
uint32_t selector = 0;
if (!function["selector"].isNull() && function["selector"].isArray())
{
if (isSMCrypto)
{
selector = (uint32_t)function["selector"][1].asUInt();
}
else
{
selector = (uint32_t)function["selector"][0].asUInt();
}
}
auto expectedSelector = *((uint32_t*)expected.data());
expectedSelector = ((expectedSelector & 0xff) << 24) | ((expectedSelector & 0xff00) << 8) |
((expectedSelector & 0xff0000) >> 8) |
((expectedSelector & 0xff000000) >> 24);
if (expectedSelector != selector)
{
BCOS_LOG(DEBUG) << LOG_BADGE("EXECUTOR") << LOG_DESC("selector mismatch")
<< LOG_KV("name", functionName)
<< LOG_KV("expected selector", expectedSelector)
<< LOG_KV("selector", selector);
continue;
}
auto& functionConflictFields = function["conflictFields"];
auto conflictFields = vector<ConflictField>();
conflictFields.reserve(functionConflictFields.size());
if (!functionConflictFields.isNull())
{
for (auto& conflictField : functionConflictFields)
{
auto value = vector<uint8_t>();
if (!conflictField["value"].isNull())
{
value.reserve(conflictField["value"].size());
for (auto& pathItem : conflictField["value"])
{
value.emplace_back(static_cast<uint8_t>(pathItem.asUInt()));
}
}
std::optional<uint8_t> slot = std::nullopt;
if (!conflictField["slot"].isNull())
{
slot = std::optional<uint8_t>(conflictField["slot"].asInt());
}
conflictFields.emplace_back(ConflictField{
static_cast<uint8_t>(conflictField["kind"].asUInt()), value, slot});
}
}
auto& functionInputs = function["inputs"];
assert(!functionInputs.isNull());
auto inputs = vector<ParameterAbi>();
inputs.reserve(functionInputs.size());
auto flatInputs = vector<string>();
for (auto i = (Json::ArrayIndex)0; i < functionInputs.size(); ++i)
{
auto param = parseParameter(functionInputs[i]);
auto flatTypes = flattenStaticParamter(param);
flatInputs.insert(flatInputs.end(), flatTypes.begin(), flatTypes.end());
inputs.emplace_back(std::move(param));
}
return unique_ptr<FunctionAbi>(
new FunctionAbi{functionName.asString(), inputs, selector, conflictFields, flatInputs});
}
BCOS_LOG(ERROR) << LOG_BADGE("EXECUTOR") << LOG_DESC("expected selector not found")
<< LOG_KV("selector", toHexStringWithPrefix(expected));
return nullptr;
}
|
{"hexsha": "f779c9dadb095f7af5a44b39b5f87b7616b84ac8", "size": 7085, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "bcos-executor/src/dag/Abi.cpp", "max_stars_repo_name": "contropist/FISCO-BCOS", "max_stars_repo_head_hexsha": "1605c371448b410674559bb1c9e98bab722f036b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bcos-executor/src/dag/Abi.cpp", "max_issues_repo_name": "contropist/FISCO-BCOS", "max_issues_repo_head_hexsha": "1605c371448b410674559bb1c9e98bab722f036b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bcos-executor/src/dag/Abi.cpp", "max_forks_repo_name": "contropist/FISCO-BCOS", "max_forks_repo_head_hexsha": "1605c371448b410674559bb1c9e98bab722f036b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5609756098, "max_line_length": 100, "alphanum_fraction": 0.5675370501, "num_tokens": 1548}
|
from common.vec_env.vec_logger import VecLogger
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
GAMMA = 0.99
TAU = 1.00
N_STEPS = 5
CLIP_GRAD = 50
COEF_VALUE = 0.5
COEF_ENTROPY = 0.01
def train(args, venv, model, path, device):
N = args.num_processes
net = model(venv.observation_space.shape[0], venv.action_space.n).to(device)
net.train()
optimizer = optim.Adam(net.parameters(), lr=args.lr, amsgrad=args.amsgrad)
vlogger = VecLogger(N=N, path=path)
vlogger.add_model(net)
state = venv.reset()
state_v = torch.from_numpy(state).float().to(device)
hx = torch.zeros(N, 512).to(device)
cx = torch.zeros(N, 512).to(device)
t = 0
while t < args.num_timesteps:
# Reset gradients
loss_value_v = torch.zeros(1, 1).to(device)
loss_policy_v = torch.zeros(1, 1).to(device)
loss_entropy_v = torch.zeros(1, 1).to(device)
gae_v = torch.zeros(N, 1).to(device)
hx.detach_()
cx.detach_()
reward_vs = []
done_vs = []
value_vs = []
log_prob_action_vs = []
entropy_vs = []
for step in range(N_STEPS):
# Perform action according to policy
value_v, logit_v, (hx, cx) = net(state_v, (hx, cx))
prob_v = F.softmax(logit_v, dim=1)
action_v = prob_v.multinomial(num_samples=1)
action = action_v.data.cpu().numpy()
log_prob_v = F.log_softmax(logit_v, dim=1)
log_prob_action_v = log_prob_v.gather(1, action_v)
entropy_v = -(log_prob_v * prob_v).sum(dim=1, keepdim=True)
# Receive reward and new state
state, reward, done, info = venv.step(action)
t += N
reward = np.expand_dims(reward, axis=1)
done = np.expand_dims(done, axis=1)
info = np.expand_dims(info, axis=1)
vlogger.log(t, reward, info)
state_v = torch.from_numpy(state).float().to(device)
reward_v = torch.from_numpy(reward).float().to(device)
done_v = torch.from_numpy(done.astype('int')).float().to(device)
reward_vs.append(reward_v)
done_vs.append(done_v)
value_vs.append(value_v)
log_prob_action_vs.append(log_prob_action_v)
entropy_vs.append(entropy_v)
# Reset the LSTM state if done
hx = (1 - done_v) * hx
cx = (1 - done_v) * cx
# R
R_v = (1 - done_v) * net(state_v, (hx, cx))[0]
value_vs.append(R_v)
for i in reversed(range(len(reward_vs))):
R_v = (1 - done_vs[i]) * GAMMA * R_v + reward_vs[i]
# Accumulate gradients
adv_v = R_v.detach() - value_vs[i]
# Generalized Advantage Estimataion
delta_t = reward_vs[i] + (1 - done_vs[i]) * GAMMA * value_vs[i + 1] - value_vs[i]
gae_v = gae_v * (1 - done_vs[i]) * GAMMA * TAU + delta_t
loss_value_v += (0.5 * adv_v.pow(2)).sum()
loss_policy_v -= (log_prob_action_vs[i] * gae_v.detach()).sum() # cautious: detach()
loss_entropy_v -= (entropy_vs[i]).sum()
net.zero_grad()
loss_v = COEF_VALUE * loss_value_v + loss_policy_v + COEF_ENTROPY * loss_entropy_v
loss_v.backward()
nn.utils.clip_grad_norm_(net.parameters(), CLIP_GRAD)
optimizer.step()
venv.close()
|
{"hexsha": "0e670d751ab0559d5023f87c52f7debdeed88251", "size": 3478, "ext": "py", "lang": "Python", "max_stars_repo_path": "a2c/train.py", "max_stars_repo_name": "liuyuezhangadam/pyrl", "max_stars_repo_head_hexsha": "e6fe907b39315be80ccd7133e9bf3b18a71b01e0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-10-14T16:39:56.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-16T11:16:01.000Z", "max_issues_repo_path": "a2c/train.py", "max_issues_repo_name": "liuyuezhang/pyrl", "max_issues_repo_head_hexsha": "e6fe907b39315be80ccd7133e9bf3b18a71b01e0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "a2c/train.py", "max_forks_repo_name": "liuyuezhang/pyrl", "max_forks_repo_head_hexsha": "e6fe907b39315be80ccd7133e9bf3b18a71b01e0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7787610619, "max_line_length": 97, "alphanum_fraction": 0.5853939045, "include": true, "reason": "import numpy", "num_tokens": 936}
|
import os
import tensorflow as tf
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from matplotlib import image as mpimg
import random
class DataGenerator:
def __init__(self, config):
self.config = config
path = self.config.test_data_path
self.y_raw = pd.read_csv(self.config.labels_file)
files = [x for x in os.listdir(path) if x[-3:] == 'jpg']
if config.debug == 1:
files = files[0:2]
print(files)
self.input = files
else:
self.input = files
# first assume one batch will do it all
def one_batch(self, n=0 , i=0):
if n > 0:
batch_size = self.config.batch_size
filename_nopath = self.input[batch_size*i:batch_size*(i+1)]
filenames = [self.config.test_data_path + x for x in filename_nopath]
else:
filename_nopath = self.input[0:5]
filenames = [self.config.test_data_path + x for x in filename_nopath]
input = self.read_images(filenames)
y_data = []
y_reg = []
for file in self.input:
out_y_data = self.get_y_data(file)
y_data.append(out_y_data[0])
y_reg.append(out_y_data[1])
y_data = self.padder(y_data)
y_reg = self.padder_coord(y_reg)
return input, y_data, y_reg, filename_nopath
def read_images(self, filenames):
'''
Function applied to every data entry to load the data. The output of this is the input format
to the model.
:param filename: filename
:param y_map: y_data (passes straight through)
:return:
'''
imgs = []
for i in range(len(filenames)):
print(filenames[i])
imgs.append(mpimg.imread(filenames[i]))
imgs = np.asarray(imgs)
return imgs
def get_y_data(self, filename):
'''
Converts a filename into an array, with one channel for each boat in the image
:param filename: filename string
:return: np array of size [h, w, n_boats_in_this_image]
'''
ground_truths = self.y_raw[self.y_raw.ImageId == filename]
array_of_coords = np.array(ground_truths[['lt_x', 'lt_y', 'rb_x', 'rb_y']])
# array_of_coords is of shape [n_boxes, 4]
n_boxes = array_of_coords.shape[0]
y_map = np.zeros((768, 768, n_boxes))
y_reg = np.zeros((768, 768, n_boxes, 4))
for box_idx in range(n_boxes):
# Loop over amount of boats per image ~ of order 10.
box = array_of_coords[box_idx, :]
y_map[box[0]:box[2], box[1]:box[3], box_idx] = 1
# Add boat box coordinates to y_reg
y_reg[:, :, box_idx, 0] = round((box[2] + box[0]) / 2) # x-centre
y_reg[:, :, box_idx, 1] = round((box[3] + box[1]) / 2) # y-centre
y_reg[:, :, box_idx, 2] = box[2] - box[0] # width
y_reg[:, :, box_idx, 3] = box[3] - box[1] # height
return y_map, y_reg
def padder(self, list_of_arr):
'''
Pads each list of boat maps so all have the same depth (which is the max amount of
boats across all images) and creates a numpy array of the result.
:param list_of_arr: list of arrays, each shaped [768, 768, n_boats_in_this_image]
:return: numpy array of shape [len(list_of_arr), h, w, maximum_n_boats]
'''
maximum_n_boats = max([x.shape[2] for x in list_of_arr])
dim_arr = list_of_arr[0].shape
self.n_box_max = maximum_n_boats
b = np.zeros([len(list_of_arr), dim_arr[0], dim_arr[1], maximum_n_boats])
for i, arr in enumerate(list_of_arr):
b[i, :, :, :arr.shape[2]] = arr
return b
def padder_coord(self, list_of_arr):
'''
Pads each list of boat maps so all have the same depth (which is the max amount of
boats across all images) and creates a numpy array of the result.
:param list_of_arr: list of arrays, each shaped [768, 768, n_boats_in_this_image, depth] (depth typically 4)
:return: numpy array of shape [len(list_of_arr), h, w, maximum_n_boats, depth]
'''
maximum_n_boats = max([x.shape[2] for x in list_of_arr])
dim_arr = list_of_arr[0].shape
depth = dim_arr[-1]
b = np.zeros([len(list_of_arr), dim_arr[0], dim_arr[1], maximum_n_boats, depth])
for i, arr in enumerate(list_of_arr):
b[i, :, :, :arr.shape[2], :] = arr
return b
|
{"hexsha": "6194411e5d9dfe9d965a543a1c4833023f2bbb36", "size": 4569, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_loader/faster_rcnn_test_loader.py", "max_stars_repo_name": "mmr12/DeepLearning18", "max_stars_repo_head_hexsha": "3e683c570ea8f5e224767a41a0e152267cfd08e7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data_loader/faster_rcnn_test_loader.py", "max_issues_repo_name": "mmr12/DeepLearning18", "max_issues_repo_head_hexsha": "3e683c570ea8f5e224767a41a0e152267cfd08e7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2018-12-21T09:58:40.000Z", "max_issues_repo_issues_event_max_datetime": "2019-01-19T19:34:40.000Z", "max_forks_repo_path": "data_loader/faster_rcnn_test_loader.py", "max_forks_repo_name": "mmr12/DeepLearning18", "max_forks_repo_head_hexsha": "3e683c570ea8f5e224767a41a0e152267cfd08e7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-02-28T09:18:50.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-28T09:18:50.000Z", "avg_line_length": 36.2619047619, "max_line_length": 116, "alphanum_fraction": 0.5944407967, "include": true, "reason": "import numpy", "num_tokens": 1208}
|
#include <boost/metaparse/transform_error_message.hpp>
|
{"hexsha": "3cbc11d9ea211183e909e9cf0809a4baebd786bd", "size": 55, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_metaparse_transform_error_message.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_metaparse_transform_error_message.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_metaparse_transform_error_message.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 27.5, "max_line_length": 54, "alphanum_fraction": 0.8545454545, "num_tokens": 12}
|
import pytest
import numpy as np
from matplotlib import pyplot as plt
from pyrado.utils.functions import rosenbrock
from pyrado.plotting.surface import render_surface
@pytest.mark.visualization
@pytest.mark.parametrize(
'x, y, data_format', [
(np.linspace(-2, 2, 30, True), np.linspace(-1, 3, 30, True), 'numpy'),
(np.linspace(-2, 2, 30, True), np.linspace(-1, 3, 30, True), 'torch'),
], ids=['numpy', 'torch']
)
def test_surface(x, y, data_format):
render_surface(x, y, rosenbrock, 'x', 'y', 'z', data_format)
plt.show()
|
{"hexsha": "07bfe09b111918be6ed37819275717693fb20931", "size": 581, "ext": "py", "lang": "Python", "max_stars_repo_path": "Pyrado/tests/test_plotting.py", "max_stars_repo_name": "jacarvalho/SimuRLacra", "max_stars_repo_head_hexsha": "a6c982862e2ab39a9f65d1c09aa59d9a8b7ac6c5", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Pyrado/tests/test_plotting.py", "max_issues_repo_name": "jacarvalho/SimuRLacra", "max_issues_repo_head_hexsha": "a6c982862e2ab39a9f65d1c09aa59d9a8b7ac6c5", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Pyrado/tests/test_plotting.py", "max_forks_repo_name": "jacarvalho/SimuRLacra", "max_forks_repo_head_hexsha": "a6c982862e2ab39a9f65d1c09aa59d9a8b7ac6c5", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5789473684, "max_line_length": 86, "alphanum_fraction": 0.6368330465, "include": true, "reason": "import numpy", "num_tokens": 164}
|
import numpy as np
from statsmodels.tsa.statespace.sarimax import SARIMAX
class SARIMA(object):
"""A Wrapper for the statsmodels.tsa.statespace.sarimax.SARIMAX class."""
def __init__(self, p, d, q, s, steps):
"""Initialize the SARIMA object.
Args:
p (int):
Integer denoting the order of the autoregressive model.
d (int):
Integer denoting the degree of differencing.
q (int):
Integer denoting the order of the moving-average model.
s (int):
Integer denoting the periodicity of data for the moving-average model.
steps (int):
Integer denoting the number of time steps to predict ahead.
"""
self.p = p
self.d = d
self.q = q
self.s = s
self.steps = steps
def predict(self, X):
"""Predict values using the initialized object.
Args:
X (ndarray):
N-dimensional array containing the input sequences for the model.
Returns:
ndarray:
N-dimensional array containing the predictions for each input sequence.
"""
sarima_results = list()
dimensions = len(X.shape)
if dimensions > 2:
raise ValueError("Only 1D or 2D arrays are supported")
if dimensions == 1 or X.shape[1] == 1:
X = np.expand_dims(X, axis=0)
num_sequences = len(X)
for sequence in range(num_sequences):
sarima = SARIMAX(X[sequence],
order=(self.p, self.d, self.q),
seasonal_order=(self.p, self.d, self.q, self.s))
sarima_fit = sarima.fit(disp=0)
sarima_results.append(sarima_fit.forecast(self.steps)[0])
sarima_results = np.asarray(sarima_results)
if dimensions == 1:
sarima_results = sarima_results[0]
return sarima_results
|
{"hexsha": "37c6eb1e4861f26eeb98abaae2fef6c83b4bd4a7", "size": 2007, "ext": "py", "lang": "Python", "max_stars_repo_path": "orion/primitives/sarima.py", "max_stars_repo_name": "ajayarora1235/Orion", "max_stars_repo_head_hexsha": "69e258ebcb2c19e63054453b3cb2cd74043ef433", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "orion/primitives/sarima.py", "max_issues_repo_name": "ajayarora1235/Orion", "max_issues_repo_head_hexsha": "69e258ebcb2c19e63054453b3cb2cd74043ef433", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "orion/primitives/sarima.py", "max_forks_repo_name": "ajayarora1235/Orion", "max_forks_repo_head_hexsha": "69e258ebcb2c19e63054453b3cb2cd74043ef433", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.45, "max_line_length": 87, "alphanum_fraction": 0.5565520678, "include": true, "reason": "import numpy,from statsmodels", "num_tokens": 435}
|
# %%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data = {
'bids_regdup': pd.read_csv('data/as_bids_REGUP.csv'),
'bids_regdown': pd.read_csv('data/as_bids_REGDOWN.csv'),
'plans': pd.read_csv('data/as_plan.csv'),
'energy_prices':pd.read_csv('data/energy_price.csv'),
'price_vol': pd.read_csv('data/as_price_vol.csv'),
'generation': pd.read_csv('data/generation.csv'),
'weather': pd.read_csv('data/weather_forecast_ercot.csv'),
'wind': pd.read_csv('data/wind.csv')
}
#clean up df-specific bits
data['generation'].drop(columns=['Imports','Other','ST'],inplace=True)
data['generation'].fillna(0, inplace=True)
data['wind'].drop(columns=['ACTUAL_SYSTEM_WIDE'], inplace=True)
# %%
def create_dt(input_df,date_col,hr_col,tz ='America/Chicago'):
#TODO runs quite slow, optimize if possible.
'''create a datetime index for a dataframe from multiple cols
'''
#TODO this would allow idempotence, but it in turn doesnt allow modifying the global from data[key]
input_df = input_df.copy()
#input_df = input_df.drop_duplicates()
if isinstance(input_df.index, pd.DatetimeIndex):
return input_df
#TODO raise exceptions for multiple dt string formats in a column
if input_df[hr_col].astype(str).str.len().nunique() == 1:
#hr_col is probably already in a inferable dt string format
input_df[hr_col] = pd.to_datetime(input_df[hr_col]).dt.time
else:
input_df[hr_col] = pd.to_datetime(input_df[hr_col],format='%H').dt.time
dt_index = pd.to_datetime(input_df[date_col].astype(str)+'T'+input_df[hr_col].astype(str))
#dt_index = pd.DatetimeIndex(dt_index,ambiguous='NaT',tz=tz,freq='H',)
input_df = input_df.set_index(dt_index)
input_df = input_df.drop(columns=[date_col,hr_col])
#TODO this fails
#input_df = input_df.asfreq('H')
return input_df
# %%
#process the rest of the dfs.
for key, df in data.items():
data[key] = create_dt(data[key],'date','hr_beg')
# %%
#TODO refactor into function
start_time = []
end_time = []
for key, df in data.items():
start_ts = df.index.sort_values()[0]
end_ts = df.index.sort_values()[-1]
#TODO can this be done in single line?
start_time.append(start_ts)
end_time.append(end_ts)
mask = df.index.to_series().diff() > pd.Timedelta('01:00:00')
missing_ts = df[mask].index
nan_cols = df.describe().loc['count'] < df.shape[0]
nan_cols = df.describe().loc['count'][nan_cols].sort_values()
na = df.isna().any(axis=1)
res = df[na]
print(f'{key} Summary:\n'
f"ts range: {start_ts} - {end_ts}\n"
f'Total Rows: {df.shape[0]}\n'
f'Rows NaN count: {res.shape[0]}\n'
f'Cols with NaN: \n{nan_cols}\n'
f'position of ts gaps: \n{missing_ts}\n'
f'Partial Describe:\n{df.describe().iloc[:,:3]}\n'
)
# %%
#attempt to join on full data
intersect_dates = pd.date_range(max(start_time), min(end_time),freq='H')
union_dates = pd.date_range(min(start_time), max(end_time),freq='H')
intersect_df = pd.DataFrame(intersect_dates, columns=["dt"])
union_df = pd.DataFrame(union_dates, columns=["dt"])
#%%
for key in data.keys():
intersect_df = intersect_df.merge(data[key], how='left',left_on='dt',
right_on=data[key].index,left_index=True)
union_df = union_df.merge(data[key], how='left',left_on='dt',
right_on=data[key].index,left_index=True)
intersect_df.reset_index(inplace=True,drop=True)
union_df.reset_index(inplace=True,drop=True)
# %%
#find ts of rows with NaN:
intersect_df.loc[pd.isnull(intersect_df).any(1), :].index.values
#drop duplicate timestamps
intersect_df = intersect_df.groupby(['dt']).first()
intersect_df = intersect_df.interpolate()
#TODO interpolate intersect part of union_df
#%%
#write to disk
union_df = union_df.set_index('dt')
intersect_df.to_csv('data/intersect.csv')
union_df.to_csv('data/union.csv')
#%%
intersect_nan = intersect_df.describe().loc['count'] < df.shape[0]
intersect_nan = intersect_df.describe().loc['count'][intersect_nan].sort_values()
intersect_na = intersect_df.isna().any(axis=1)
# %%
|
{"hexsha": "ad93b63f249eca219a68255b80d572fcf15e97d9", "size": 4253, "ext": "py", "lang": "Python", "max_stars_repo_path": "merge.py", "max_stars_repo_name": "nickolasclarke/anciML", "max_stars_repo_head_hexsha": "365e46a7042e358c9288ec67c2eb744b4fbdea1a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-07-11T14:11:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-18T02:39:15.000Z", "max_issues_repo_path": "merge.py", "max_issues_repo_name": "nickolasclarke/anciML", "max_issues_repo_head_hexsha": "365e46a7042e358c9288ec67c2eb744b4fbdea1a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-04-30T20:17:20.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-30T20:17:20.000Z", "max_forks_repo_path": "merge.py", "max_forks_repo_name": "nickolasclarke/anciML", "max_forks_repo_head_hexsha": "365e46a7042e358c9288ec67c2eb744b4fbdea1a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-26T12:50:01.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-26T12:50:01.000Z", "avg_line_length": 38.6636363636, "max_line_length": 103, "alphanum_fraction": 0.6689395721, "include": true, "reason": "import numpy", "num_tokens": 1115}
|
import Op
import numpy as np
from pxr import Usd, UsdGeom
class Stage(Op.Op):
def __init__(self, name='/UsdStage', locations='/root', filename=''):
self.fields = [
('name', 'name', 'name', 'string', name, {}),
('locations', 'locations', 'locations', 'string', locations, {}),
('filename', 'USD Filename', 'USD Filename (.usda)', 'string', filename, {})
]
super(self.__class__, self).__init__(name, self.fields)
def cook(self, location, interface, attrs):
if not attrs['filename']:
self.logger.error('No filename specified')
return
filename = self.resolvePath(attrs['filename'])
stage = Usd.Stage.Open(filename)
if stage is None:
self.logger.error('Error loading USD stage: %s' % filename)
return
for path in stage.Traverse():
childName = path.GetName()
typeName = path.GetTypeName()
propertyNames = path.GetPropertyNames()
childType = typeName
attrs = {}
# attrs['visibility'] = path.GetAttribute('visibility') if 'visibility' in propertyNames else True
xform = interface.attr('xform')
if xform is None: xform = np.eye(4, 4, dtype=np.float32)
if 'xformOp:translate' in propertyNames:
translate = np.float32(path.GetAttribute('xformOp:translate').Get())
xform[:3, 3] = translate
attrs['xform'] = xform
if typeName == 'Xform':
childType = 'group'
elif typeName == 'Sphere':
childType = 'primitive'
attrs['primitiveType'] = typeName
attrs['radius'] = float(path.GetAttribute('radius').Get())
elif typeName == 'Cube':
childType = 'primitive'
attrs['primitiveType'] = typeName
attrs['size'] = float(path.GetAttribute('size').Get())
# print childName, childType
# print path.GetPath().pathString
# print '/root' + path.GetPath().pathString
interface.createChild(path.GetPath().pathString[1:], childType, attrs=attrs)
# xformSphere = np.eye(4, 4, dtype=np.float32)
# xformCube = np.eye(4, 4, dtype=np.float32)
#
# xformSphere[:3, 3] = [1000, 0, 0]
# xformCube[:3, 3] = [-1000, 1000, 500]
#
# sphereAttrs = {
# 'primitiveType': 'sphere', 'radius': 1000., 'slices': 100, 'stacks': 100,
# 'colour': (0, 1, 0, 0.7), 'xform': xformSphere
# }
# interface.createChild('sphere', 'primitive', attrs=sphereAttrs)
#
# cubeAttrs = {
# 'primitiveType': 'cube', 'size': 800.,
# 'colour': (0, 0, 1, 0.7), 'xform': xformCube
# }
# interface.createChild('cube', 'primitive', attrs=cubeAttrs)
# Register Ops
import Registry
Registry.registerOp('Import USD Stage', Stage)
|
{"hexsha": "dd0e7ad4550b5bceccaa3a816a05d82576ce8e74", "size": 2525, "ext": "py", "lang": "Python", "max_stars_repo_path": "Ops/USD.py", "max_stars_repo_name": "davidsoncolin/IMS", "max_stars_repo_head_hexsha": "7a9c44275b4ebf5b16c04338628425ec876e3a0f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Ops/USD.py", "max_issues_repo_name": "davidsoncolin/IMS", "max_issues_repo_head_hexsha": "7a9c44275b4ebf5b16c04338628425ec876e3a0f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Ops/USD.py", "max_forks_repo_name": "davidsoncolin/IMS", "max_forks_repo_head_hexsha": "7a9c44275b4ebf5b16c04338628425ec876e3a0f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-18T12:11:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-18T12:11:53.000Z", "avg_line_length": 30.7926829268, "max_line_length": 101, "alphanum_fraction": 0.6491089109, "include": true, "reason": "import numpy", "num_tokens": 780}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.